--- /dev/null
+From d288298535d880aaee28c2bd0f8176c59611334c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Dec 2019 11:04:21 +0100
+Subject: ACPI: EC: Reference count query handlers under lock
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 3df663a147fe077a6ee8444ec626738946e65547 ]
+
+There is a race condition in acpi_ec_get_query_handler()
+theoretically allowing query handlers to go away before refernce
+counting them.
+
+In order to avoid it, call kref_get() on query handlers under
+ec->mutex.
+
+Also simplify the code a bit while at it.
+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/ec.c | 16 ++++------------
+ 1 file changed, 4 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 49e16f0090957..9415a0041aaf7 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -1080,29 +1080,21 @@ void acpi_ec_dispatch_gpe(void)
+ /* --------------------------------------------------------------------------
+ Event Management
+ -------------------------------------------------------------------------- */
+-static struct acpi_ec_query_handler *
+-acpi_ec_get_query_handler(struct acpi_ec_query_handler *handler)
+-{
+- if (handler)
+- kref_get(&handler->kref);
+- return handler;
+-}
+-
+ static struct acpi_ec_query_handler *
+ acpi_ec_get_query_handler_by_value(struct acpi_ec *ec, u8 value)
+ {
+ struct acpi_ec_query_handler *handler;
+- bool found = false;
+
+ mutex_lock(&ec->mutex);
+ list_for_each_entry(handler, &ec->list, node) {
+ if (value == handler->query_bit) {
+- found = true;
+- break;
++ kref_get(&handler->kref);
++ mutex_unlock(&ec->mutex);
++ return handler;
+ }
+ }
+ mutex_unlock(&ec->mutex);
+- return found ? acpi_ec_get_query_handler(handler) : NULL;
++ return NULL;
+ }
+
+ static void acpi_ec_query_handler_release(struct kref *kref)
+--
+2.25.1
+
--- /dev/null
+From ca0e0139b4cc7717b0f49ff801bd87b0cc790165 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 15:40:53 +0530
+Subject: ALSA: hda: Clear RIRB status before reading WP
+
+From: Mohan Kumar <mkumard@nvidia.com>
+
+[ Upstream commit 6d011d5057ff88ee556c000ac6fe0be23bdfcd72 ]
+
+RIRB interrupt status getting cleared after the write pointer is read
+causes a race condition, where last response(s) into RIRB may remain
+unserviced by IRQ, eventually causing azx_rirb_get_response to fall
+back to polling mode. Clearing the RIRB interrupt status ahead of
+write pointer access ensures that this condition is avoided.
+
+Signed-off-by: Mohan Kumar <mkumard@nvidia.com>
+Signed-off-by: Viswanath L <viswanathl@nvidia.com>
+Link: https://lore.kernel.org/r/1580983853-351-1-git-send-email-viswanathl@nvidia.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/hda_controller.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
+index fa261b27d8588..8198d2e53b7df 100644
+--- a/sound/pci/hda/hda_controller.c
++++ b/sound/pci/hda/hda_controller.c
+@@ -1169,16 +1169,23 @@ irqreturn_t azx_interrupt(int irq, void *dev_id)
+ if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
+ active = true;
+
+- /* clear rirb int */
+ status = azx_readb(chip, RIRBSTS);
+ if (status & RIRB_INT_MASK) {
++ /*
++ * Clearing the interrupt status here ensures that no
++ * interrupt gets masked after the RIRB wp is read in
++ * snd_hdac_bus_update_rirb. This avoids a possible
++ * race condition where codec response in RIRB may
++ * remain unserviced by IRQ, eventually falling back
++ * to polling mode in azx_rirb_get_response.
++ */
++ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
+ active = true;
+ if (status & RIRB_INT_RESPONSE) {
+ if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
+ udelay(80);
+ snd_hdac_bus_update_rirb(bus);
+ }
+- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
+ }
+ } while (active && ++repeat < 10);
+
+--
+2.25.1
+
--- /dev/null
+From e076969fbc2d2a7559c4fa8c223cb87c859751f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 May 2020 08:25:56 +0200
+Subject: ALSA: hda: Fix potential race in unsol event handler
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit c637fa151259c0f74665fde7cba5b7eac1417ae5 ]
+
+The unsol event handling code has a loop retrieving the read/write
+indices and the arrays without locking while the append to the array
+may happen concurrently. This may lead to some inconsistency.
+Although there hasn't been any proof of this bad results, it's still
+safer to protect the racy accesses.
+
+This patch adds the spinlock protection around the unsol handling loop
+for addressing it. Here we take bus->reg_lock as the writer side
+snd_hdac_bus_queue_event() is also protected by that lock.
+
+Link: https://lore.kernel.org/r/20200516062556.30951-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/hda/hdac_bus.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/sound/hda/hdac_bus.c b/sound/hda/hdac_bus.c
+index 714a51721a313..ab9236e4c157e 100644
+--- a/sound/hda/hdac_bus.c
++++ b/sound/hda/hdac_bus.c
+@@ -155,6 +155,7 @@ static void process_unsol_events(struct work_struct *work)
+ struct hdac_driver *drv;
+ unsigned int rp, caddr, res;
+
++ spin_lock_irq(&bus->reg_lock);
+ while (bus->unsol_rp != bus->unsol_wp) {
+ rp = (bus->unsol_rp + 1) % HDA_UNSOL_QUEUE_SIZE;
+ bus->unsol_rp = rp;
+@@ -166,10 +167,13 @@ static void process_unsol_events(struct work_struct *work)
+ codec = bus->caddr_tbl[caddr & 0x0f];
+ if (!codec || !codec->dev.driver)
+ continue;
++ spin_unlock_irq(&bus->reg_lock);
+ drv = drv_to_hdac_driver(codec->dev.driver);
+ if (drv->unsol_event)
+ drv->unsol_event(codec, res);
++ spin_lock_irq(&bus->reg_lock);
+ }
++ spin_unlock_irq(&bus->reg_lock);
+ }
+
+ /**
+--
+2.25.1
+
--- /dev/null
+From 12a9af9adce47631173322d98cf95ffd91a2c668 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 15:49:28 +0100
+Subject: ALSA: usb-audio: Don't create a mixer element with bogus volume range
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit e9a0ef0b5ddcbc0d56c65aefc0f18d16e6f71207 ]
+
+Some USB-audio descriptors provide a bogus volume range (e.g. volume
+min and max are identical), which confuses user-space.
+This patch makes the driver skipping such a control element.
+
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=206221
+Link: https://lore.kernel.org/r/20200214144928.23628-1-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/mixer.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
+index 45bd3d54be54b..451b8ea383c61 100644
+--- a/sound/usb/mixer.c
++++ b/sound/usb/mixer.c
+@@ -1699,6 +1699,16 @@ static void __build_feature_ctl(struct usb_mixer_interface *mixer,
+ /* get min/max values */
+ get_min_max_with_quirks(cval, 0, kctl);
+
++ /* skip a bogus volume range */
++ if (cval->max <= cval->min) {
++ usb_audio_dbg(mixer->chip,
++ "[%d] FU [%s] skipped due to invalid volume\n",
++ cval->head.id, kctl->id.name);
++ snd_ctl_free_one(kctl);
++ return;
++ }
++
++
+ if (control == UAC_FU_VOLUME) {
+ check_mapped_dB(map, cval);
+ if (cval->dBmin < cval->dBmax || !cval->initialized) {
+--
+2.25.1
+
--- /dev/null
+From 8ddaa524f04e0242e3351388be5b497bee91ee3d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 31 Mar 2020 14:25:54 +0200
+Subject: ALSA: usb-audio: Fix case when USB MIDI interface has more than one
+ extra endpoint descriptor
+
+From: Andreas Steinmetz <ast@domdv.de>
+
+[ Upstream commit 5c6cd7021a05a02fcf37f360592d7c18d4d807fb ]
+
+The Miditech MIDIFACE 16x16 (USB ID 1290:1749) has more than one extra
+endpoint descriptor.
+
+The first extra descriptor is: 0x06 0x30 0x00 0x00 0x00 0x00
+
+As the code in snd_usbmidi_get_ms_info() looks only at the
+first extra descriptor to find USB_DT_CS_ENDPOINT the device
+as such is recognized but there is neither input nor output
+configured.
+
+The patch iterates through the extra descriptors to find the
+proper one. With this patch the device is correctly configured.
+
+Signed-off-by: Andreas Steinmetz <ast@domdv.de>
+Link: https://lore.kernel.org/r/1c3b431a86f69e1d60745b6110cdb93c299f120b.camel@domdv.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/usb/midi.c | 29 ++++++++++++++++++++++++-----
+ 1 file changed, 24 insertions(+), 5 deletions(-)
+
+diff --git a/sound/usb/midi.c b/sound/usb/midi.c
+index 28a3ad8b1d74b..137e1e8718d6f 100644
+--- a/sound/usb/midi.c
++++ b/sound/usb/midi.c
+@@ -1828,6 +1828,28 @@ static int snd_usbmidi_create_endpoints(struct snd_usb_midi *umidi,
+ return 0;
+ }
+
++static struct usb_ms_endpoint_descriptor *find_usb_ms_endpoint_descriptor(
++ struct usb_host_endpoint *hostep)
++{
++ unsigned char *extra = hostep->extra;
++ int extralen = hostep->extralen;
++
++ while (extralen > 3) {
++ struct usb_ms_endpoint_descriptor *ms_ep =
++ (struct usb_ms_endpoint_descriptor *)extra;
++
++ if (ms_ep->bLength > 3 &&
++ ms_ep->bDescriptorType == USB_DT_CS_ENDPOINT &&
++ ms_ep->bDescriptorSubtype == UAC_MS_GENERAL)
++ return ms_ep;
++ if (!extra[0])
++ break;
++ extralen -= extra[0];
++ extra += extra[0];
++ }
++ return NULL;
++}
++
+ /*
+ * Returns MIDIStreaming device capabilities.
+ */
+@@ -1865,11 +1887,8 @@ static int snd_usbmidi_get_ms_info(struct snd_usb_midi *umidi,
+ ep = get_ep_desc(hostep);
+ if (!usb_endpoint_xfer_bulk(ep) && !usb_endpoint_xfer_int(ep))
+ continue;
+- ms_ep = (struct usb_ms_endpoint_descriptor *)hostep->extra;
+- if (hostep->extralen < 4 ||
+- ms_ep->bLength < 4 ||
+- ms_ep->bDescriptorType != USB_DT_CS_ENDPOINT ||
+- ms_ep->bDescriptorSubtype != UAC_MS_GENERAL)
++ ms_ep = find_usb_ms_endpoint_descriptor(hostep);
++ if (!ms_ep)
+ continue;
+ if (usb_endpoint_dir_out(ep)) {
+ if (endpoints[epidx].out_ep) {
+--
+2.25.1
+
--- /dev/null
+From 66be461b3a79540bd42eb0d4feee6c55cf598e27 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jan 2020 14:11:25 +0300
+Subject: ar5523: Add USB ID of SMCWUSBT-G2 wireless adapter
+
+From: Mert Dirik <mertdirik@gmail.com>
+
+[ Upstream commit 5b362498a79631f283578b64bf6f4d15ed4cc19a ]
+
+Add the required USB ID for running SMCWUSBT-G2 wireless adapter (SMC
+"EZ Connect g").
+
+This device uses ar5523 chipset and requires firmware to be loaded. Even
+though pid of the device is 4507, this patch adds it as 4506 so that
+AR5523_DEVICE_UG macro can set the AR5523_FLAG_PRE_FIRMWARE flag for pid
+4507.
+
+Signed-off-by: Mert Dirik <mertdirik@gmail.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ar5523/ar5523.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/net/wireless/ath/ar5523/ar5523.c b/drivers/net/wireless/ath/ar5523/ar5523.c
+index da2d179430ca5..4c57e79e5779a 100644
+--- a/drivers/net/wireless/ath/ar5523/ar5523.c
++++ b/drivers/net/wireless/ath/ar5523/ar5523.c
+@@ -1770,6 +1770,8 @@ static const struct usb_device_id ar5523_id_table[] = {
+ AR5523_DEVICE_UX(0x0846, 0x4300), /* Netgear / WG111U */
+ AR5523_DEVICE_UG(0x0846, 0x4250), /* Netgear / WG111T */
+ AR5523_DEVICE_UG(0x0846, 0x5f00), /* Netgear / WPN111 */
++ AR5523_DEVICE_UG(0x083a, 0x4506), /* SMC / EZ Connect
++ SMCWUSBT-G2 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / AR5523_1 */
+ AR5523_DEVICE_UX(0x157e, 0x3205), /* Umedia / AR5523_2 */
+ AR5523_DEVICE_UG(0x157e, 0x3006), /* Umedia / TEW444UBEU */
+--
+2.25.1
+
--- /dev/null
+From 03b8104051773199b7d583e8371988b1df5de25a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Dec 2019 11:48:28 +0100
+Subject: ARM: 8948/1: Prevent OOB access in stacktrace
+
+From: Vincent Whitchurch <vincent.whitchurch@axis.com>
+
+[ Upstream commit 40ff1ddb5570284e039e0ff14d7a859a73dc3673 ]
+
+The stacktrace code can read beyond the stack size, when it attempts to
+read pt_regs from exception frames.
+
+This can happen on normal, non-corrupt stacks. Since the unwind
+information in the extable is not correct for function prologues, the
+unwinding code can return data from the stack which is not actually the
+caller function address, and if in_entry_text() happens to succeed on
+this value, we can end up reading data from outside the task's stack
+when attempting to read pt_regs, since there is no bounds check.
+
+Example:
+
+ [<8010e729>] (unwind_backtrace) from [<8010a9c9>] (show_stack+0x11/0x14)
+ [<8010a9c9>] (show_stack) from [<8057d8d7>] (dump_stack+0x87/0xac)
+ [<8057d8d7>] (dump_stack) from [<8012271d>] (tasklet_action_common.constprop.4+0xa5/0xa8)
+ [<8012271d>] (tasklet_action_common.constprop.4) from [<80102333>] (__do_softirq+0x11b/0x31c)
+ [<80102333>] (__do_softirq) from [<80122485>] (irq_exit+0xad/0xd8)
+ [<80122485>] (irq_exit) from [<8015f3d7>] (__handle_domain_irq+0x47/0x84)
+ [<8015f3d7>] (__handle_domain_irq) from [<8036a523>] (gic_handle_irq+0x43/0x78)
+ [<8036a523>] (gic_handle_irq) from [<80101a49>] (__irq_svc+0x69/0xb4)
+ Exception stack(0xeb491f58 to 0xeb491fa0)
+ 1f40: 7eb14794 00000000
+ 1f60: ffffffff 008dd32c 008dd324 ffffffff 008dd314 0000002a 801011e4 eb490000
+ 1f80: 0000002a 7eb1478c 50c5387d eb491fa8 80101001 8023d09c 40080033 ffffffff
+ [<80101a49>] (__irq_svc) from [<8023d09c>] (do_pipe2+0x0/0xac)
+ [<8023d09c>] (do_pipe2) from [<ffffffff>] (0xffffffff)
+ Exception stack(0xeb491fc8 to 0xeb492010)
+ 1fc0: 008dd314 0000002a 00511ad8 008de4c8 7eb14790 7eb1478c
+ 1fe0: 00511e34 7eb14774 004c8557 76f44098 60080030 7eb14794 00000000 00000000
+ 2000: 00000001 00000000 ea846c00 ea847cc0
+
+In this example, the stack limit is 0xeb492000, but 16 bytes outside the
+stack have been read.
+
+Fix it by adding bounds checks.
+
+Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm/kernel/stacktrace.c | 2 ++
+ arch/arm/kernel/traps.c | 6 ++++--
+ 2 files changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/arch/arm/kernel/stacktrace.c b/arch/arm/kernel/stacktrace.c
+index a4d4a28fe07df..d23ab9ec130a3 100644
+--- a/arch/arm/kernel/stacktrace.c
++++ b/arch/arm/kernel/stacktrace.c
+@@ -115,6 +115,8 @@ static int save_trace(struct stackframe *frame, void *d)
+ return 0;
+
+ regs = (struct pt_regs *)frame->sp;
++ if ((unsigned long)®s[1] > ALIGN(frame->sp, THREAD_SIZE))
++ return 0;
+
+ trace->entries[trace->nr_entries++] = regs->ARM_pc;
+
+diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
+index badf02ca36938..aec533168f046 100644
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -67,14 +67,16 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
+
+ void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
+ {
++ unsigned long end = frame + 4 + sizeof(struct pt_regs);
++
+ #ifdef CONFIG_KALLSYMS
+ printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
+ #else
+ printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
+ #endif
+
+- if (in_entry_text(from))
+- dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
++ if (in_entry_text(from) && end <= ALIGN(frame, THREAD_SIZE))
++ dump_mem("", "Exception stack", frame + 4, end);
+ }
+
+ void dump_backtrace_stm(u32 *stack, u32 instruction)
+--
+2.25.1
+
--- /dev/null
+From 2f11fa72ca10fb9507159dc27fd1c39f25cf64f4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 May 2020 15:10:39 +0530
+Subject: arm64/cpufeature: Drop TraceFilt feature exposure from ID_DFR0
+ register
+
+From: Anshuman Khandual <anshuman.khandual@arm.com>
+
+[ Upstream commit 1ed1b90a0594c8c9d31e8bb8be25a2b37717dc9e ]
+
+ID_DFR0 based TraceFilt feature should not be exposed to guests. Hence lets
+drop it.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-kernel@vger.kernel.org
+
+Suggested-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Anshuman Khandual <anshuman.khandual@arm.com>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/1589881254-10082-3-git-send-email-anshuman.khandual@arm.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/cpufeature.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 095dec566275f..de6fa9b4abfa0 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -300,7 +300,7 @@ static const struct arm64_ftr_bits ftr_id_pfr0[] = {
+ };
+
+ static const struct arm64_ftr_bits ftr_id_dfr0[] = {
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
++ /* [31:28] TraceFilt */
+ S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0xf), /* PerfMon */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
+--
+2.25.1
+
--- /dev/null
+From 0f99b4e0803abddea53738e47c235ad1c09a9332 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Apr 2020 15:29:21 +0100
+Subject: arm64: cpufeature: Relax checks for AArch32 support at EL[0-2]
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit 98448cdfe7060dd5491bfbd3f7214ffe1395d58e ]
+
+We don't need to be quite as strict about mismatched AArch32 support,
+which is good because the friendly hardware folks have been busy
+mismatching this to their hearts' content.
+
+ * We don't care about EL2 or EL3 (there are silly comments concerning
+ the latter, so remove those)
+
+ * EL1 support is gated by the ARM64_HAS_32BIT_EL1 capability and handled
+ gracefully when a mismatch occurs
+
+ * EL0 support is gated by the ARM64_HAS_32BIT_EL0 capability and handled
+ gracefully when a mismatch occurs
+
+Relax the AArch32 checks to FTR_NONSTRICT.
+
+Tested-by: Sai Prakash Ranjan <saiprakash.ranjan@codeaurora.org>
+Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
+Link: https://lore.kernel.org/r/20200421142922.18950-8-will@kernel.org
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/cpufeature.c | 10 +++-------
+ 1 file changed, 3 insertions(+), 7 deletions(-)
+
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index ac3126aba0368..095dec566275f 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -155,11 +155,10 @@ static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_GIC_SHIFT, 4, 0),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_ASIMD_SHIFT, 4, ID_AA64PFR0_ASIMD_NI),
+ S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_FP_SHIFT, 4, ID_AA64PFR0_FP_NI),
+- /* Linux doesn't care about the EL3 */
+ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL3_SHIFT, 4, 0),
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
+- ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL2_SHIFT, 4, 0),
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SHIFT, 4, ID_AA64PFR0_EL1_64BIT_ONLY),
++ ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL0_SHIFT, 4, ID_AA64PFR0_EL0_64BIT_ONLY),
+ ARM64_FTR_END,
+ };
+
+@@ -671,9 +670,6 @@ void update_cpu_features(int cpu,
+ taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
+ info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
+
+- /*
+- * EL3 is not our concern.
+- */
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
+ info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
+ taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
+--
+2.25.1
+
--- /dev/null
+From 4f6ee8bc5358c241588d2cf46571161be3496939 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 May 2020 09:22:28 +0800
+Subject: ASoC: img-i2s-out: Fix runtime PM imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 65bd91dd6957390c42a0491b9622cf31a2cdb140 ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+the call returns an error code. Thus a pairing decrement is needed
+on the error handling path to keep the counter balanced.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Link: https://lore.kernel.org/r/20200529012230.5863-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/img/img-i2s-out.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/sound/soc/img/img-i2s-out.c b/sound/soc/img/img-i2s-out.c
+index fc2d1dac63339..798ab579564cb 100644
+--- a/sound/soc/img/img-i2s-out.c
++++ b/sound/soc/img/img-i2s-out.c
+@@ -350,8 +350,10 @@ static int img_i2s_out_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
+ chan_control_mask = IMG_I2S_OUT_CHAN_CTL_CLKT_MASK;
+
+ ret = pm_runtime_get_sync(i2s->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(i2s->dev);
+ return ret;
++ }
+
+ img_i2s_out_disable(i2s);
+
+@@ -491,8 +493,10 @@ static int img_i2s_out_probe(struct platform_device *pdev)
+ goto err_pm_disable;
+ }
+ ret = pm_runtime_get_sync(&pdev->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_noidle(&pdev->dev);
+ goto err_suspend;
++ }
+
+ reg = IMG_I2S_OUT_CTL_FRM_SIZE_MASK;
+ img_i2s_out_writel(i2s, reg, IMG_I2S_OUT_CTL);
+--
+2.25.1
+
--- /dev/null
+From 01ce435e5f37ebfeecd45d145220c43b483ef9ef Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 1 Sep 2020 10:06:23 +0200
+Subject: ASoC: Intel: bytcr_rt5640: Add quirk for MPMAN Converter9 2-in-1
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit 6a0137101f47301fff2da6ba4b9048383d569909 ]
+
+The MPMAN Converter9 2-in-1 almost fully works with out default settings.
+The only problem is that it has only 1 speaker so any sounds only playing
+on the right channel get lost.
+
+Add a quirk for this model using the default settings + MONO_SPEAKER.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Acked-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Link: https://lore.kernel.org/r/20200901080623.4987-1-hdegoede@redhat.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/intel/boards/bytcr_rt5640.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/sound/soc/intel/boards/bytcr_rt5640.c b/sound/soc/intel/boards/bytcr_rt5640.c
+index 0dcd249877c55..ec630127ef2f3 100644
+--- a/sound/soc/intel/boards/bytcr_rt5640.c
++++ b/sound/soc/intel/boards/bytcr_rt5640.c
+@@ -588,6 +588,16 @@ static const struct dmi_system_id byt_rt5640_quirk_table[] = {
+ BYT_RT5640_SSP0_AIF1 |
+ BYT_RT5640_MCLK_EN),
+ },
++ { /* MPMAN Converter 9, similar hw as the I.T.Works TW891 2-in-1 */
++ .matches = {
++ DMI_MATCH(DMI_SYS_VENDOR, "MPMAN"),
++ DMI_MATCH(DMI_PRODUCT_NAME, "Converter9"),
++ },
++ .driver_data = (void *)(BYTCR_INPUT_DEFAULTS |
++ BYT_RT5640_MONO_SPEAKER |
++ BYT_RT5640_SSP0_AIF1 |
++ BYT_RT5640_MCLK_EN),
++ },
+ {
+ /* MPMAN MPWIN895CL */
+ .matches = {
+--
+2.25.1
+
--- /dev/null
+From 19203881db1cd08a3aea370f7a31ed4fbbf58cbc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 23 Oct 2019 16:46:59 +0100
+Subject: ASoC: kirkwood: fix IRQ error handling
+
+From: Russell King <rmk+kernel@armlinux.org.uk>
+
+[ Upstream commit 175fc928198236037174e5c5c066fe3c4691903e ]
+
+Propagate the error code from request_irq(), rather than returning
+-EBUSY.
+
+Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
+Link: https://lore.kernel.org/r/E1iNIqh-0000tW-EZ@rmk-PC.armlinux.org.uk
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/kirkwood/kirkwood-dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/sound/soc/kirkwood/kirkwood-dma.c b/sound/soc/kirkwood/kirkwood-dma.c
+index c6a58520d377a..255cc45905b81 100644
+--- a/sound/soc/kirkwood/kirkwood-dma.c
++++ b/sound/soc/kirkwood/kirkwood-dma.c
+@@ -136,7 +136,7 @@ static int kirkwood_dma_open(struct snd_pcm_substream *substream)
+ err = request_irq(priv->irq, kirkwood_dma_irq, IRQF_SHARED,
+ "kirkwood-i2s", priv);
+ if (err)
+- return -EBUSY;
++ return err;
+
+ /*
+ * Enable Error interrupts. We're only ack'ing them but
+--
+2.25.1
+
--- /dev/null
+From a82f9cfebcddb0b42ab21f71e2808da2c4d1a442 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 Nov 2019 15:31:12 +0800
+Subject: ASoC: max98090: remove msleep in PLL unlocked workaround
+
+From: Tzung-Bi Shih <tzungbi@google.com>
+
+[ Upstream commit acb874a7c049ec49d8fc66c893170fb42c01bdf7 ]
+
+It was observed Baytrail-based chromebooks could cause continuous PLL
+unlocked when using playback stream and capture stream simultaneously.
+Specifically, starting a capture stream after started a playback stream.
+As a result, the audio data could corrupt or turn completely silent.
+
+As the datasheet suggested, the maximum PLL lock time should be 7 msec.
+The workaround resets the codec softly by toggling SHDN off and on if
+PLL failed to lock for 10 msec. Notably, there is no suggested hold
+time for SHDN off.
+
+On Baytrail-based chromebooks, it would easily happen continuous PLL
+unlocked if there is a 10 msec delay between SHDN off and on. Removes
+the msleep().
+
+Signed-off-by: Tzung-Bi Shih <tzungbi@google.com>
+Link: https://lore.kernel.org/r/20191122073114.219945-2-tzungbi@google.com
+Reviewed-by: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/max98090.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
+index 89b6e187ac235..a5b0c40ee545f 100644
+--- a/sound/soc/codecs/max98090.c
++++ b/sound/soc/codecs/max98090.c
+@@ -2130,10 +2130,16 @@ static void max98090_pll_work(struct max98090_priv *max98090)
+
+ dev_info_ratelimited(component->dev, "PLL unlocked\n");
+
++ /*
++ * As the datasheet suggested, the maximum PLL lock time should be
++ * 7 msec. The workaround resets the codec softly by toggling SHDN
++ * off and on if PLL failed to lock for 10 msec. Notably, there is
++ * no suggested hold time for SHDN off.
++ */
++
+ /* Toggle shutdown OFF then ON */
+ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
+ M98090_SHDNN_MASK, 0);
+- msleep(10);
+ snd_soc_component_update_bits(component, M98090_REG_DEVICE_SHUTDOWN,
+ M98090_SHDNN_MASK, M98090_SHDNN_MASK);
+
+--
+2.25.1
+
--- /dev/null
+From 7c3ba5cbecbb85fc789127fbbde2405652993720 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Aug 2020 19:33:57 +0200
+Subject: ASoC: wm8994: Ensure the device is resumed in wm89xx_mic_detect
+ functions
+
+From: Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+[ Upstream commit f5a2cda4f1db89776b64c4f0f2c2ac609527ac70 ]
+
+When the wm8958_mic_detect, wm8994_mic_detect functions get called from
+the machine driver, e.g. from the card's late_probe() callback, the CODEC
+device may be PM runtime suspended and any regmap writes have no effect.
+Add PM runtime calls to these functions to ensure the device registers
+are updated as expected.
+This suppresses an error during boot
+"wm8994-codec: ASoC: error at snd_soc_component_update_bits on wm8994-codec"
+caused by the regmap access error due to the cache_only flag being set.
+
+Signed-off-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
+Acked-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20200827173357.31891-2-s.nawrocki@samsung.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/wm8994.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index cd089b4143029..e3e069277a3ff 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -3376,6 +3376,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+ return -EINVAL;
+ }
+
++ pm_runtime_get_sync(component->dev);
++
+ switch (micbias) {
+ case 1:
+ micdet = &wm8994->micdet[0];
+@@ -3423,6 +3425,8 @@ int wm8994_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+
+ snd_soc_dapm_sync(dapm);
+
++ pm_runtime_put(component->dev);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(wm8994_mic_detect);
+@@ -3790,6 +3794,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+ return -EINVAL;
+ }
+
++ pm_runtime_get_sync(component->dev);
++
+ if (jack) {
+ snd_soc_dapm_force_enable_pin(dapm, "CLK_SYS");
+ snd_soc_dapm_sync(dapm);
+@@ -3858,6 +3864,8 @@ int wm8958_mic_detect(struct snd_soc_component *component, struct snd_soc_jack *
+ snd_soc_dapm_sync(dapm);
+ }
+
++ pm_runtime_put(component->dev);
++
+ return 0;
+ }
+ EXPORT_SYMBOL_GPL(wm8958_mic_detect);
+--
+2.25.1
+
--- /dev/null
+From 24bef123b622cff1979ad8c656b4ec80746fd283 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 27 Aug 2020 19:33:56 +0200
+Subject: ASoC: wm8994: Skip setting of the WM8994_MICBIAS register for WM1811
+
+From: Sylwester Nawrocki <s.nawrocki@samsung.com>
+
+[ Upstream commit 811c5494436789e7149487c06e0602b507ce274b ]
+
+The WM8994_MICBIAS register is not available in the WM1811 CODEC so skip
+initialization of that register for that device.
+This suppresses an error during boot:
+"wm8994-codec: ASoC: error at snd_soc_component_update_bits on wm8994-codec"
+
+Signed-off-by: Sylwester Nawrocki <s.nawrocki@samsung.com>
+Acked-by: Krzysztof Kozlowski <krzk@kernel.org>
+Acked-by: Charles Keepax <ckeepax@opensource.cirrus.com>
+Link: https://lore.kernel.org/r/20200827173357.31891-1-s.nawrocki@samsung.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/soc/codecs/wm8994.c | 2 ++
+ sound/soc/codecs/wm_hubs.c | 3 +++
+ sound/soc/codecs/wm_hubs.h | 1 +
+ 3 files changed, 6 insertions(+)
+
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 01acb8da2f48e..cd089b4143029 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -4051,11 +4051,13 @@ static int wm8994_component_probe(struct snd_soc_component *component)
+ wm8994->hubs.dcs_readback_mode = 2;
+ break;
+ }
++ wm8994->hubs.micd_scthr = true;
+ break;
+
+ case WM8958:
+ wm8994->hubs.dcs_readback_mode = 1;
+ wm8994->hubs.hp_startup_mode = 1;
++ wm8994->hubs.micd_scthr = true;
+
+ switch (control->revision) {
+ case 0:
+diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
+index fed6ea9b019f7..da7fa6f5459e6 100644
+--- a/sound/soc/codecs/wm_hubs.c
++++ b/sound/soc/codecs/wm_hubs.c
+@@ -1227,6 +1227,9 @@ int wm_hubs_handle_analogue_pdata(struct snd_soc_component *component,
+ snd_soc_component_update_bits(component, WM8993_ADDITIONAL_CONTROL,
+ WM8993_LINEOUT2_FB, WM8993_LINEOUT2_FB);
+
++ if (!hubs->micd_scthr)
++ return 0;
++
+ snd_soc_component_update_bits(component, WM8993_MICBIAS,
+ WM8993_JD_SCTHR_MASK | WM8993_JD_THR_MASK |
+ WM8993_MICB1_LVL | WM8993_MICB2_LVL,
+diff --git a/sound/soc/codecs/wm_hubs.h b/sound/soc/codecs/wm_hubs.h
+index ee339ad8514d1..1433d73e09bf8 100644
+--- a/sound/soc/codecs/wm_hubs.h
++++ b/sound/soc/codecs/wm_hubs.h
+@@ -31,6 +31,7 @@ struct wm_hubs_data {
+ int hp_startup_mode;
+ int series_startup;
+ int no_series_update;
++ bool micd_scthr;
+
+ bool no_cache_dac_hp_direct;
+ struct list_head dcs_cache;
+--
+2.25.1
+
--- /dev/null
+From 35e584e3f07a978a585d7b9e4176c03a1a45af65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Oct 2019 10:59:46 +0100
+Subject: ata: sata_mv, avoid trigerrable BUG_ON
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+[ Upstream commit e9f691d899188679746eeb96e6cb520459eda9b4 ]
+
+There are several reports that the BUG_ON on unsupported command in
+mv_qc_prep can be triggered under some circumstances:
+https://bugzilla.suse.com/show_bug.cgi?id=1110252
+https://serverfault.com/questions/888897/raid-problems-after-power-outage
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1652185
+https://bugs.centos.org/view.php?id=14998
+
+Let sata_mv handle the failure gracefully: warn about that incl. the
+failed command number and return an AC_ERR_INVALID error. We can do that
+now thanks to the previous patch.
+
+Remove also the long-standing FIXME.
+
+[v2] use %.2x as commands are defined as hexa.
+
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: linux-ide@vger.kernel.org
+Cc: Sergei Shtylyov <sergei.shtylyov@cogentembedded.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/ata/sata_mv.c | 8 +++-----
+ 1 file changed, 3 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
+index ab2e9f62ddc1a..25d24ed3385ab 100644
+--- a/drivers/ata/sata_mv.c
++++ b/drivers/ata/sata_mv.c
+@@ -2111,12 +2111,10 @@ static void mv_qc_prep(struct ata_queued_cmd *qc)
+ * non-NCQ mode are: [RW] STREAM DMA and W DMA FUA EXT, none
+ * of which are defined/used by Linux. If we get here, this
+ * driver needs work.
+- *
+- * FIXME: modify libata to give qc_prep a return value and
+- * return error here.
+ */
+- BUG_ON(tf->command);
+- break;
++ ata_port_err(ap, "%s: unsupported command: %.2x\n", __func__,
++ tf->command);
++ return AC_ERR_INVALID;
+ }
+ mv_crqb_pack_cmd(cw++, tf->nsect, ATA_REG_NSECT, 0);
+ mv_crqb_pack_cmd(cw++, tf->hob_lbal, ATA_REG_LBAL, 0);
+--
+2.25.1
+
--- /dev/null
+From 3beb6336bcfef1d5a349958473aaa23cdced7c6e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2019 16:18:08 +0800
+Subject: ath10k: fix array out-of-bounds access
+
+From: Miaoqing Pan <miaoqing@codeaurora.org>
+
+[ Upstream commit c5329b2d5b8b4e41be14d31ee8505b4f5607bf9b ]
+
+If firmware reports rate_max > WMI_TPC_RATE_MAX(WMI_TPC_FINAL_RATE_MAX)
+or num_tx_chain > WMI_TPC_TX_N_CHAIN, it will cause array out-of-bounds
+access, so print a warning and reset to avoid memory corruption.
+
+Tested HW: QCA9984
+Tested FW: 10.4-3.9.0.2-00035
+
+Signed-off-by: Miaoqing Pan <miaoqing@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath10k/debug.c | 2 +-
+ drivers/net/wireless/ath/ath10k/wmi.c | 49 ++++++++++++++++---------
+ 2 files changed, 32 insertions(+), 19 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index 0baaad90b8d18..aa333110eaba6 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -1521,7 +1521,7 @@ static void ath10k_tpc_stats_print(struct ath10k_tpc_stats *tpc_stats,
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "No. Preamble Rate_code ");
+
+- for (i = 0; i < WMI_TPC_TX_N_CHAIN; i++)
++ for (i = 0; i < tpc_stats->num_tx_chain; i++)
+ *len += scnprintf(buf + *len, buf_len - *len,
+ "tpc_value%d ", i);
+
+diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
+index 3372dfa0deccf..3f3fbee631c34 100644
+--- a/drivers/net/wireless/ath/ath10k/wmi.c
++++ b/drivers/net/wireless/ath/ath10k/wmi.c
+@@ -4550,16 +4550,13 @@ static void ath10k_tpc_config_disp_tables(struct ath10k *ar,
+ }
+
+ pream_idx = 0;
+- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
++ for (i = 0; i < tpc_stats->rate_max; i++) {
+ memset(tpc_value, 0, sizeof(tpc_value));
+ memset(buff, 0, sizeof(buff));
+ if (i == pream_table[pream_idx])
+ pream_idx++;
+
+- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+- if (j >= __le32_to_cpu(ev->num_tx_chain))
+- break;
+-
++ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ tpc[j] = ath10k_tpc_config_get_rate(ar, ev, i, j + 1,
+ rate_code[i],
+ type);
+@@ -4672,7 +4669,7 @@ void ath10k_wmi_tpc_config_get_rate_code(u8 *rate_code, u16 *pream_table,
+
+ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+ {
+- u32 num_tx_chain;
++ u32 num_tx_chain, rate_max;
+ u8 rate_code[WMI_TPC_RATE_MAX];
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ struct wmi_pdev_tpc_config_event *ev;
+@@ -4688,6 +4685,13 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+ return;
+ }
+
++ rate_max = __le32_to_cpu(ev->rate_max);
++ if (rate_max > WMI_TPC_RATE_MAX) {
++ ath10k_warn(ar, "number of rate is %d greater than TPC configured rate %d\n",
++ rate_max, WMI_TPC_RATE_MAX);
++ rate_max = WMI_TPC_RATE_MAX;
++ }
++
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+@@ -4704,8 +4708,8 @@ void ath10k_wmi_event_pdev_tpc_config(struct ath10k *ar, struct sk_buff *skb)
+ __le32_to_cpu(ev->twice_antenna_reduction);
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
++ tpc_stats->num_tx_chain = num_tx_chain;
++ tpc_stats->rate_max = rate_max;
+
+ ath10k_tpc_config_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+@@ -4900,16 +4904,13 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
+ }
+
+ pream_idx = 0;
+- for (i = 0; i < __le32_to_cpu(ev->rate_max); i++) {
++ for (i = 0; i < tpc_stats->rate_max; i++) {
+ memset(tpc_value, 0, sizeof(tpc_value));
+ memset(buff, 0, sizeof(buff));
+ if (i == pream_table[pream_idx])
+ pream_idx++;
+
+- for (j = 0; j < WMI_TPC_TX_N_CHAIN; j++) {
+- if (j >= __le32_to_cpu(ev->num_tx_chain))
+- break;
+-
++ for (j = 0; j < tpc_stats->num_tx_chain; j++) {
+ tpc[j] = ath10k_wmi_tpc_final_get_rate(ar, ev, i, j + 1,
+ rate_code[i],
+ type, pream_idx);
+@@ -4925,7 +4926,7 @@ ath10k_wmi_tpc_stats_final_disp_tables(struct ath10k *ar,
+
+ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+ {
+- u32 num_tx_chain;
++ u32 num_tx_chain, rate_max;
+ u8 rate_code[WMI_TPC_FINAL_RATE_MAX];
+ u16 pream_table[WMI_TPC_PREAM_TABLE_MAX];
+ struct wmi_pdev_tpc_final_table_event *ev;
+@@ -4933,12 +4934,24 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+
+ ev = (struct wmi_pdev_tpc_final_table_event *)skb->data;
+
++ num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
++ if (num_tx_chain > WMI_TPC_TX_N_CHAIN) {
++ ath10k_warn(ar, "number of tx chain is %d greater than TPC final configured tx chain %d\n",
++ num_tx_chain, WMI_TPC_TX_N_CHAIN);
++ return;
++ }
++
++ rate_max = __le32_to_cpu(ev->rate_max);
++ if (rate_max > WMI_TPC_FINAL_RATE_MAX) {
++ ath10k_warn(ar, "number of rate is %d greater than TPC final configured rate %d\n",
++ rate_max, WMI_TPC_FINAL_RATE_MAX);
++ rate_max = WMI_TPC_FINAL_RATE_MAX;
++ }
++
+ tpc_stats = kzalloc(sizeof(*tpc_stats), GFP_ATOMIC);
+ if (!tpc_stats)
+ return;
+
+- num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+-
+ ath10k_wmi_tpc_config_get_rate_code(rate_code, pream_table,
+ num_tx_chain);
+
+@@ -4951,8 +4964,8 @@ void ath10k_wmi_event_tpc_final_table(struct ath10k *ar, struct sk_buff *skb)
+ __le32_to_cpu(ev->twice_antenna_reduction);
+ tpc_stats->power_limit = __le32_to_cpu(ev->power_limit);
+ tpc_stats->twice_max_rd_power = __le32_to_cpu(ev->twice_max_rd_power);
+- tpc_stats->num_tx_chain = __le32_to_cpu(ev->num_tx_chain);
+- tpc_stats->rate_max = __le32_to_cpu(ev->rate_max);
++ tpc_stats->num_tx_chain = num_tx_chain;
++ tpc_stats->rate_max = rate_max;
+
+ ath10k_wmi_tpc_stats_final_disp_tables(ar, ev, tpc_stats,
+ rate_code, pream_table,
+--
+2.25.1
+
--- /dev/null
+From a55e24c8316396ae6f33e59e0714430383a967d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Oct 2019 16:18:09 +0800
+Subject: ath10k: fix memory leak for tpc_stats_final
+
+From: Miaoqing Pan <miaoqing@codeaurora.org>
+
+[ Upstream commit 486a8849843455298d49e694cca9968336ce2327 ]
+
+The memory of ar->debug.tpc_stats_final is reallocated every debugfs
+reading, it should be freed in ath10k_debug_destroy() for the last
+allocation.
+
+Tested HW: QCA9984
+Tested FW: 10.4-3.9.0.2-00035
+
+Signed-off-by: Miaoqing Pan <miaoqing@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath10k/debug.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
+index aa333110eaba6..4e980e78ba95c 100644
+--- a/drivers/net/wireless/ath/ath10k/debug.c
++++ b/drivers/net/wireless/ath/ath10k/debug.c
+@@ -2365,6 +2365,7 @@ void ath10k_debug_destroy(struct ath10k *ar)
+ ath10k_debug_fw_stats_reset(ar);
+
+ kfree(ar->debug.tpc_stats);
++ kfree(ar->debug.tpc_stats_final);
+ }
+
+ int ath10k_debug_register(struct ath10k *ar)
+--
+2.25.1
+
--- /dev/null
+From 41880bf35667f2125efbd3fd7491496afe2117a2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 11:42:18 +0800
+Subject: ath10k: use kzalloc to read for ath10k_sdio_hif_diag_read
+
+From: Wen Gong <wgong@codeaurora.org>
+
+[ Upstream commit 402f2992b4d62760cce7c689ff216ea3bf4d6e8a ]
+
+When use command to read values, it crashed.
+
+command:
+dd if=/sys/kernel/debug/ieee80211/phy0/ath10k/mem_value count=1 bs=4 skip=$((0x100233))
+
+It will call to ath10k_sdio_hif_diag_read with address = 0x4008cc and buf_len = 4.
+
+Then system crash:
+[ 1786.013258] Unable to handle kernel paging request at virtual address ffffffc00bd45000
+[ 1786.013273] Mem abort info:
+[ 1786.013281] ESR = 0x96000045
+[ 1786.013291] Exception class = DABT (current EL), IL = 32 bits
+[ 1786.013299] SET = 0, FnV = 0
+[ 1786.013307] EA = 0, S1PTW = 0
+[ 1786.013314] Data abort info:
+[ 1786.013322] ISV = 0, ISS = 0x00000045
+[ 1786.013330] CM = 0, WnR = 1
+[ 1786.013342] swapper pgtable: 4k pages, 39-bit VAs, pgdp = 000000008542a60e
+[ 1786.013350] [ffffffc00bd45000] pgd=0000000000000000, pud=0000000000000000
+[ 1786.013368] Internal error: Oops: 96000045 [#1] PREEMPT SMP
+[ 1786.013609] Process swapper/0 (pid: 0, stack limit = 0x0000000084b153c6)
+[ 1786.013623] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 4.19.86 #137
+[ 1786.013631] Hardware name: MediaTek krane sku176 board (DT)
+[ 1786.013643] pstate: 80000085 (Nzcv daIf -PAN -UAO)
+[ 1786.013662] pc : __memcpy+0x94/0x180
+[ 1786.013678] lr : swiotlb_tbl_unmap_single+0x84/0x150
+[ 1786.013686] sp : ffffff8008003c60
+[ 1786.013694] x29: ffffff8008003c90 x28: ffffffae96411f80
+[ 1786.013708] x27: ffffffae960d2018 x26: ffffff8019a4b9a8
+[ 1786.013721] x25: 0000000000000000 x24: 0000000000000001
+[ 1786.013734] x23: ffffffae96567000 x22: 00000000000051d4
+[ 1786.013747] x21: 0000000000000000 x20: 00000000fe6e9000
+[ 1786.013760] x19: 0000000000000004 x18: 0000000000000020
+[ 1786.013773] x17: 0000000000000001 x16: 0000000000000000
+[ 1786.013787] x15: 00000000ffffffff x14: 00000000000044c0
+[ 1786.013800] x13: 0000000000365ba4 x12: 0000000000000000
+[ 1786.013813] x11: 0000000000000001 x10: 00000037be6e9000
+[ 1786.013826] x9 : ffffffc940000000 x8 : 000000000bd45000
+[ 1786.013839] x7 : 0000000000000000 x6 : ffffffc00bd45000
+[ 1786.013852] x5 : 0000000000000000 x4 : 0000000000000000
+[ 1786.013865] x3 : 0000000000000c00 x2 : 0000000000000004
+[ 1786.013878] x1 : fffffff7be6e9004 x0 : ffffffc00bd45000
+[ 1786.013891] Call trace:
+[ 1786.013903] __memcpy+0x94/0x180
+[ 1786.013914] unmap_single+0x6c/0x84
+[ 1786.013925] swiotlb_unmap_sg_attrs+0x54/0x80
+[ 1786.013938] __swiotlb_unmap_sg_attrs+0x8c/0xa4
+[ 1786.013952] msdc_unprepare_data+0x6c/0x84
+[ 1786.013963] msdc_request_done+0x58/0x84
+[ 1786.013974] msdc_data_xfer_done+0x1a0/0x1c8
+[ 1786.013985] msdc_irq+0x12c/0x17c
+[ 1786.013996] __handle_irq_event_percpu+0xe4/0x250
+[ 1786.014006] handle_irq_event_percpu+0x28/0x68
+[ 1786.014015] handle_irq_event+0x48/0x78
+[ 1786.014026] handle_fasteoi_irq+0xd0/0x1a0
+[ 1786.014039] __handle_domain_irq+0x84/0xc4
+[ 1786.014050] gic_handle_irq+0x124/0x1a4
+[ 1786.014059] el1_irq+0xb0/0x128
+[ 1786.014072] cpuidle_enter_state+0x298/0x328
+[ 1786.014082] cpuidle_enter+0x30/0x40
+[ 1786.014094] do_idle+0x190/0x268
+[ 1786.014104] cpu_startup_entry+0x24/0x28
+[ 1786.014116] rest_init+0xd4/0xe0
+[ 1786.014126] start_kernel+0x30c/0x38c
+[ 1786.014139] Code: f8408423 f80084c3 36100062 b8404423 (b80044c3)
+[ 1786.014150] ---[ end trace 3b02ddb698ea69ee ]---
+[ 1786.015415] Kernel panic - not syncing: Fatal exception in interrupt
+[ 1786.015433] SMP: stopping secondary CPUs
+[ 1786.015447] Kernel Offset: 0x2e8d200000 from 0xffffff8008000000
+[ 1786.015458] CPU features: 0x0,2188200c
+[ 1786.015466] Memory Limit: none
+
+For sdio chip, it need the memory which is kmalloc, if it is
+vmalloc from ath10k_mem_value_read, then it have a memory error.
+kzalloc of ath10k_sdio_hif_diag_read32 is the correct type, so
+add kzalloc in ath10k_sdio_hif_diag_read to replace the buffer
+which is vmalloc from ath10k_mem_value_read.
+
+This patch only effect sdio chip.
+
+Tested with QCA6174 SDIO with firmware WLAN.RMH.4.4.1-00029.
+
+Signed-off-by: Wen Gong <wgong@codeaurora.org>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ath/ath10k/sdio.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/wireless/ath/ath10k/sdio.c b/drivers/net/wireless/ath/ath10k/sdio.c
+index 0ecaba824fb28..0cdaecb0e28a9 100644
+--- a/drivers/net/wireless/ath/ath10k/sdio.c
++++ b/drivers/net/wireless/ath/ath10k/sdio.c
+@@ -1567,23 +1567,33 @@ static int ath10k_sdio_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
+ size_t buf_len)
+ {
+ int ret;
++ void *mem;
++
++ mem = kzalloc(buf_len, GFP_KERNEL);
++ if (!mem)
++ return -ENOMEM;
+
+ /* set window register to start read cycle */
+ ret = ath10k_sdio_write32(ar, MBOX_WINDOW_READ_ADDR_ADDRESS, address);
+ if (ret) {
+ ath10k_warn(ar, "failed to set mbox window read address: %d", ret);
+- return ret;
++ goto out;
+ }
+
+ /* read the data */
+- ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, buf, buf_len);
++ ret = ath10k_sdio_read(ar, MBOX_WINDOW_DATA_ADDRESS, mem, buf_len);
+ if (ret) {
+ ath10k_warn(ar, "failed to read from mbox window data address: %d\n",
+ ret);
+- return ret;
++ goto out;
+ }
+
+- return 0;
++ memcpy(buf, mem, buf_len);
++
++out:
++ kfree(mem);
++
++ return ret;
+ }
+
+ static int ath10k_sdio_hif_diag_read32(struct ath10k *ar, u32 address,
+--
+2.25.1
+
--- /dev/null
+From 2bc3e65963dc6258e352cbc0716375bb8c26821c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2020 11:11:09 -0700
+Subject: atm: fix a memory leak of vcc->user_back
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+[ Upstream commit 8d9f73c0ad2f20e9fed5380de0a3097825859d03 ]
+
+In lec_arp_clear_vccs() only entry->vcc is freed, but vcc
+could be installed on entry->recv_vcc too in lec_vcc_added().
+
+This fixes the following memory leak:
+
+unreferenced object 0xffff8880d9266b90 (size 16):
+ comm "atm2", pid 425, jiffies 4294907980 (age 23.488s)
+ hex dump (first 16 bytes):
+ 00 00 00 00 00 00 00 00 00 00 00 00 6b 6b 6b a5 ............kkk.
+ backtrace:
+ [<(____ptrval____)>] kmem_cache_alloc_trace+0x10e/0x151
+ [<(____ptrval____)>] lane_ioctl+0x4b3/0x569
+ [<(____ptrval____)>] do_vcc_ioctl+0x1ea/0x236
+ [<(____ptrval____)>] svc_ioctl+0x17d/0x198
+ [<(____ptrval____)>] sock_do_ioctl+0x47/0x12f
+ [<(____ptrval____)>] sock_ioctl+0x2f9/0x322
+ [<(____ptrval____)>] vfs_ioctl+0x1e/0x2b
+ [<(____ptrval____)>] ksys_ioctl+0x61/0x80
+ [<(____ptrval____)>] __x64_sys_ioctl+0x16/0x19
+ [<(____ptrval____)>] do_syscall_64+0x57/0x65
+ [<(____ptrval____)>] entry_SYSCALL_64_after_hwframe+0x49/0xb3
+
+Cc: Gengming Liu <l.dmxcsnsbh@gmail.com>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/lec.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/atm/lec.c b/net/atm/lec.c
+index ad4f829193f05..5a6186b809874 100644
+--- a/net/atm/lec.c
++++ b/net/atm/lec.c
+@@ -1270,6 +1270,12 @@ static void lec_arp_clear_vccs(struct lec_arp_table *entry)
+ entry->vcc = NULL;
+ }
+ if (entry->recv_vcc) {
++ struct atm_vcc *vcc = entry->recv_vcc;
++ struct lec_vcc_priv *vpriv = LEC_VCC_PRIV(vcc);
++
++ kfree(vpriv);
++ vcc->user_back = NULL;
++
+ entry->recv_vcc->push = entry->old_recv_push;
+ vcc_release_async(entry->recv_vcc, -EPIPE);
+ entry->recv_vcc = NULL;
+--
+2.25.1
+
--- /dev/null
+From 62392c85e6cf123e8df7a2323f9f5ae1d587a76a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Jan 2020 17:29:16 -0500
+Subject: audit: CONFIG_CHANGE don't log internal bookkeeping as an event
+
+From: Steve Grubb <sgrubb@redhat.com>
+
+[ Upstream commit 70b3eeed49e8190d97139806f6fbaf8964306cdb ]
+
+Common Criteria calls out for any action that modifies the audit trail to
+be recorded. That usually is interpreted to mean insertion or removal of
+rules. It is not required to log modification of the inode information
+since the watch is still in effect. Additionally, if the rule is a never
+rule and the underlying file is one they do not want events for, they
+get an event for this bookkeeping update against their wishes.
+
+Since no device/inode info is logged at insertion and no device/inode
+information is logged on update, there is nothing meaningful being
+communicated to the admin by the CONFIG_CHANGE updated_rules event. One
+can assume that the rule was not "modified" because it is still watching
+the intended target. If the device or inode cannot be resolved, then
+audit_panic is called which is sufficient.
+
+The correct resolution is to drop logging config_update events since
+the watch is still in effect but just on another unknown inode.
+
+Signed-off-by: Steve Grubb <sgrubb@redhat.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/audit_watch.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/kernel/audit_watch.c b/kernel/audit_watch.c
+index 4f7262eba73d8..50952d6d81209 100644
+--- a/kernel/audit_watch.c
++++ b/kernel/audit_watch.c
+@@ -317,8 +317,6 @@ static void audit_update_watch(struct audit_parent *parent,
+ if (oentry->rule.exe)
+ audit_remove_mark(oentry->rule.exe);
+
+- audit_watch_log_rule_change(r, owatch, "updated_rules");
+-
+ call_rcu(&oentry->rcu, audit_free_rule_rcu);
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 8726c9b31a3533b91991c75f500ee9da7664c955 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 Nov 2019 16:03:16 +0800
+Subject: bcache: fix a lost wake-up problem caused by mca_cannibalize_lock
+
+From: Guoju Fang <fangguoju@gmail.com>
+
+[ Upstream commit 34cf78bf34d48dddddfeeadb44f9841d7864997a ]
+
+This patch fix a lost wake-up problem caused by the race between
+mca_cannibalize_lock and bch_cannibalize_unlock.
+
+Consider two processes, A and B. Process A is executing
+mca_cannibalize_lock, while process B takes c->btree_cache_alloc_lock
+and is executing bch_cannibalize_unlock. The problem happens that after
+process A executes cmpxchg and will execute prepare_to_wait. In this
+timeslice process B executes wake_up, but after that process A executes
+prepare_to_wait and set the state to TASK_INTERRUPTIBLE. Then process A
+goes to sleep but no one will wake up it. This problem may cause bcache
+device to dead.
+
+Signed-off-by: Guoju Fang <fangguoju@gmail.com>
+Signed-off-by: Coly Li <colyli@suse.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/bcache/bcache.h | 1 +
+ drivers/md/bcache/btree.c | 12 ++++++++----
+ drivers/md/bcache/super.c | 1 +
+ 3 files changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
+index 1cc6ae3e058c6..6a380ed4919a0 100644
+--- a/drivers/md/bcache/bcache.h
++++ b/drivers/md/bcache/bcache.h
+@@ -585,6 +585,7 @@ struct cache_set {
+ */
+ wait_queue_head_t btree_cache_wait;
+ struct task_struct *btree_cache_alloc_lock;
++ spinlock_t btree_cannibalize_lock;
+
+ /*
+ * When we free a btree node, we increment the gen of the bucket the
+diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
+index d320574b9a4c8..e388e7bb7b5db 100644
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -876,15 +876,17 @@ out:
+
+ static int mca_cannibalize_lock(struct cache_set *c, struct btree_op *op)
+ {
+- struct task_struct *old;
+-
+- old = cmpxchg(&c->btree_cache_alloc_lock, NULL, current);
+- if (old && old != current) {
++ spin_lock(&c->btree_cannibalize_lock);
++ if (likely(c->btree_cache_alloc_lock == NULL)) {
++ c->btree_cache_alloc_lock = current;
++ } else if (c->btree_cache_alloc_lock != current) {
+ if (op)
+ prepare_to_wait(&c->btree_cache_wait, &op->wait,
+ TASK_UNINTERRUPTIBLE);
++ spin_unlock(&c->btree_cannibalize_lock);
+ return -EINTR;
+ }
++ spin_unlock(&c->btree_cannibalize_lock);
+
+ return 0;
+ }
+@@ -919,10 +921,12 @@ static struct btree *mca_cannibalize(struct cache_set *c, struct btree_op *op,
+ */
+ static void bch_cannibalize_unlock(struct cache_set *c)
+ {
++ spin_lock(&c->btree_cannibalize_lock);
+ if (c->btree_cache_alloc_lock == current) {
+ c->btree_cache_alloc_lock = NULL;
+ wake_up(&c->btree_cache_wait);
+ }
++ spin_unlock(&c->btree_cannibalize_lock);
+ }
+
+ static struct btree *mca_alloc(struct cache_set *c, struct btree_op *op,
+diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
+index 825bfde10c694..7787ec42f81e1 100644
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -1737,6 +1737,7 @@ struct cache_set *bch_cache_set_alloc(struct cache_sb *sb)
+ sema_init(&c->sb_write_mutex, 1);
+ mutex_init(&c->bucket_lock);
+ init_waitqueue_head(&c->btree_cache_wait);
++ spin_lock_init(&c->btree_cannibalize_lock);
+ init_waitqueue_head(&c->bucket_wait);
+ init_waitqueue_head(&c->gc_wait);
+ sema_init(&c->uuid_write_mutex, 1);
+--
+2.25.1
+
--- /dev/null
+From 615be4dfeff85066e9bcd30b2fea02771132718a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 14:48:27 -0700
+Subject: bdev: Reduce time holding bd_mutex in sync in blkdev_close()
+
+From: Douglas Anderson <dianders@chromium.org>
+
+[ Upstream commit b849dd84b6ccfe32622988b79b7b073861fcf9f7 ]
+
+While trying to "dd" to the block device for a USB stick, I
+encountered a hung task warning (blocked for > 120 seconds). I
+managed to come up with an easy way to reproduce this on my system
+(where /dev/sdb is the block device for my USB stick) with:
+
+ while true; do dd if=/dev/zero of=/dev/sdb bs=4M; done
+
+With my reproduction here are the relevant bits from the hung task
+detector:
+
+ INFO: task udevd:294 blocked for more than 122 seconds.
+ ...
+ udevd D 0 294 1 0x00400008
+ Call trace:
+ ...
+ mutex_lock_nested+0x40/0x50
+ __blkdev_get+0x7c/0x3d4
+ blkdev_get+0x118/0x138
+ blkdev_open+0x94/0xa8
+ do_dentry_open+0x268/0x3a0
+ vfs_open+0x34/0x40
+ path_openat+0x39c/0xdf4
+ do_filp_open+0x90/0x10c
+ do_sys_open+0x150/0x3c8
+ ...
+
+ ...
+ Showing all locks held in the system:
+ ...
+ 1 lock held by dd/2798:
+ #0: ffffff814ac1a3b8 (&bdev->bd_mutex){+.+.}, at: __blkdev_put+0x50/0x204
+ ...
+ dd D 0 2798 2764 0x00400208
+ Call trace:
+ ...
+ schedule+0x8c/0xbc
+ io_schedule+0x1c/0x40
+ wait_on_page_bit_common+0x238/0x338
+ __lock_page+0x5c/0x68
+ write_cache_pages+0x194/0x500
+ generic_writepages+0x64/0xa4
+ blkdev_writepages+0x24/0x30
+ do_writepages+0x48/0xa8
+ __filemap_fdatawrite_range+0xac/0xd8
+ filemap_write_and_wait+0x30/0x84
+ __blkdev_put+0x88/0x204
+ blkdev_put+0xc4/0xe4
+ blkdev_close+0x28/0x38
+ __fput+0xe0/0x238
+ ____fput+0x1c/0x28
+ task_work_run+0xb0/0xe4
+ do_notify_resume+0xfc0/0x14bc
+ work_pending+0x8/0x14
+
+The problem appears related to the fact that my USB disk is terribly
+slow and that I have a lot of RAM in my system to cache things.
+Specifically my writes seem to be happening at ~15 MB/s and I've got
+~4 GB of RAM in my system that can be used for buffering. To write 4
+GB of buffer to disk thus takes ~4000 MB / ~15 MB/s = ~267 seconds.
+
+The 267 second number is a problem because in __blkdev_put() we call
+sync_blockdev() while holding the bd_mutex. Any other callers who
+want the bd_mutex will be blocked for the whole time.
+
+The problem is made worse because I believe blkdev_put() specifically
+tells other tasks (namely udev) to go try to access the device at right
+around the same time we're going to hold the mutex for a long time.
+
+Putting some traces around this (after disabling the hung task detector),
+I could confirm:
+ dd: 437.608600: __blkdev_put() right before sync_blockdev() for sdb
+ udevd: 437.623901: blkdev_open() right before blkdev_get() for sdb
+ dd: 661.468451: __blkdev_put() right after sync_blockdev() for sdb
+ udevd: 663.820426: blkdev_open() right after blkdev_get() for sdb
+
+A simple fix for this is to realize that sync_blockdev() works fine if
+you're not holding the mutex. Also, it's not the end of the world if
+you sync a little early (though it can have performance impacts).
+Thus we can make a guess that we're going to need to do the sync and
+then do it without holding the mutex. We still do one last sync with
+the mutex but it should be much, much faster.
+
+With this, my hung task warnings for my test case are gone.
+
+Signed-off-by: Douglas Anderson <dianders@chromium.org>
+Reviewed-by: Guenter Roeck <groeck@chromium.org>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/block_dev.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/fs/block_dev.c b/fs/block_dev.c
+index 8ac8f7469354b..9f3faac490259 100644
+--- a/fs/block_dev.c
++++ b/fs/block_dev.c
+@@ -1793,6 +1793,16 @@ static void __blkdev_put(struct block_device *bdev, fmode_t mode, int for_part)
+ struct gendisk *disk = bdev->bd_disk;
+ struct block_device *victim = NULL;
+
++ /*
++ * Sync early if it looks like we're the last one. If someone else
++ * opens the block device between now and the decrement of bd_openers
++ * then we did a sync that we didn't need to, but that's not the end
++ * of the world and we want to avoid long (could be several minute)
++ * syncs while holding the mutex.
++ */
++ if (bdev->bd_openers == 1)
++ sync_blockdev(bdev);
++
+ mutex_lock_nested(&bdev->bd_mutex, for_part);
+ if (for_part)
+ bdev->bd_part_count--;
+--
+2.25.1
+
--- /dev/null
+From 16cddef6dc59337039c036aa5935ff569765ac5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Jan 2020 19:15:35 +0200
+Subject: Bluetooth: btrtl: Use kvmalloc for FW allocations
+
+From: Maxim Mikityanskiy <maxtram95@gmail.com>
+
+[ Upstream commit 268d3636dfb22254324774de1f8875174b3be064 ]
+
+Currently, kmemdup is applied to the firmware data, and it invokes
+kmalloc under the hood. The firmware size and patch_length are big (more
+than PAGE_SIZE), and on some low-end systems (like ASUS E202SA) kmalloc
+may fail to allocate a contiguous chunk under high memory usage and
+fragmentation:
+
+Bluetooth: hci0: RTL: examining hci_ver=06 hci_rev=000a lmp_ver=06 lmp_subver=8821
+Bluetooth: hci0: RTL: rom_version status=0 version=1
+Bluetooth: hci0: RTL: loading rtl_bt/rtl8821a_fw.bin
+kworker/u9:2: page allocation failure: order:4, mode:0x40cc0(GFP_KERNEL|__GFP_COMP), nodemask=(null),cpuset=/,mems_allowed=0
+<stack trace follows>
+
+As firmware load happens on each resume, Bluetooth will stop working
+after several iterations, when the kernel fails to allocate an order-4
+page.
+
+This patch replaces kmemdup with kvmalloc+memcpy. It's not required to
+have a contiguous chunk here, because it's not mapped to the device
+directly.
+
+Signed-off-by: Maxim Mikityanskiy <maxtram95@gmail.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bluetooth/btrtl.c | 20 +++++++++++---------
+ 1 file changed, 11 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
+index 8d1cd2479e36f..cc51395d8b0e5 100644
+--- a/drivers/bluetooth/btrtl.c
++++ b/drivers/bluetooth/btrtl.c
+@@ -343,11 +343,11 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev,
+ * the end.
+ */
+ len = patch_length;
+- buf = kmemdup(btrtl_dev->fw_data + patch_offset, patch_length,
+- GFP_KERNEL);
++ buf = kvmalloc(patch_length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
++ memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4);
+ memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+
+ *_buf = buf;
+@@ -415,8 +415,10 @@ static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff)
+ if (ret < 0)
+ return ret;
+ ret = fw->size;
+- *buff = kmemdup(fw->data, ret, GFP_KERNEL);
+- if (!*buff)
++ *buff = kvmalloc(fw->size, GFP_KERNEL);
++ if (*buff)
++ memcpy(*buff, fw->data, ret);
++ else
+ ret = -ENOMEM;
+
+ release_firmware(fw);
+@@ -454,14 +456,14 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
+ goto out;
+
+ if (btrtl_dev->cfg_len > 0) {
+- tbuff = kzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
++ tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL);
+ if (!tbuff) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ memcpy(tbuff, fw_data, ret);
+- kfree(fw_data);
++ kvfree(fw_data);
+
+ memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len);
+ ret += btrtl_dev->cfg_len;
+@@ -474,7 +476,7 @@ static int btrtl_setup_rtl8723b(struct hci_dev *hdev,
+ ret = rtl_download_firmware(hdev, fw_data, ret);
+
+ out:
+- kfree(fw_data);
++ kvfree(fw_data);
+ return ret;
+ }
+
+@@ -501,8 +503,8 @@ static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev)
+
+ void btrtl_free(struct btrtl_device_info *btrtl_dev)
+ {
+- kfree(btrtl_dev->fw_data);
+- kfree(btrtl_dev->cfg_data);
++ kvfree(btrtl_dev->fw_data);
++ kvfree(btrtl_dev->cfg_data);
+ kfree(btrtl_dev);
+ }
+ EXPORT_SYMBOL_GPL(btrtl_free);
+--
+2.25.1
+
--- /dev/null
+From 606ef73c6e09013e3bfe7b479da70f6f28a3d783 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2020 10:54:14 -0800
+Subject: Bluetooth: Fix refcount use-after-free issue
+
+From: Manish Mandlik <mmandlik@google.com>
+
+[ Upstream commit 6c08fc896b60893c5d673764b0668015d76df462 ]
+
+There is no lock preventing both l2cap_sock_release() and
+chan->ops->close() from running at the same time.
+
+If we consider Thread A running l2cap_chan_timeout() and Thread B running
+l2cap_sock_release(), expected behavior is:
+ A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb()
+ A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill()
+ B::l2cap_sock_release()->sock_orphan()
+ B::l2cap_sock_release()->l2cap_sock_kill()
+
+where,
+sock_orphan() clears "sk->sk_socket" and l2cap_sock_teardown_cb() marks
+socket as SOCK_ZAPPED.
+
+In l2cap_sock_kill(), there is an "if-statement" that checks if both
+sock_orphan() and sock_teardown() has been run i.e. sk->sk_socket is NULL
+and socket is marked as SOCK_ZAPPED. Socket is killed if the condition is
+satisfied.
+
+In the race condition, following occurs:
+ A::l2cap_chan_timeout()->l2cap_chan_close()->l2cap_sock_teardown_cb()
+ B::l2cap_sock_release()->sock_orphan()
+ B::l2cap_sock_release()->l2cap_sock_kill()
+ A::l2cap_chan_timeout()->l2cap_sock_close_cb()->l2cap_sock_kill()
+
+In this scenario, "if-statement" is true in both B::l2cap_sock_kill() and
+A::l2cap_sock_kill() and we hit "refcount: underflow; use-after-free" bug.
+
+Similar condition occurs at other places where teardown/sock_kill is
+happening:
+ l2cap_disconnect_rsp()->l2cap_chan_del()->l2cap_sock_teardown_cb()
+ l2cap_disconnect_rsp()->l2cap_sock_close_cb()->l2cap_sock_kill()
+
+ l2cap_conn_del()->l2cap_chan_del()->l2cap_sock_teardown_cb()
+ l2cap_conn_del()->l2cap_sock_close_cb()->l2cap_sock_kill()
+
+ l2cap_disconnect_req()->l2cap_chan_del()->l2cap_sock_teardown_cb()
+ l2cap_disconnect_req()->l2cap_sock_close_cb()->l2cap_sock_kill()
+
+ l2cap_sock_cleanup_listen()->l2cap_chan_close()->l2cap_sock_teardown_cb()
+ l2cap_sock_cleanup_listen()->l2cap_sock_kill()
+
+Protect teardown/sock_kill and orphan/sock_kill by adding hold_lock on
+l2cap channel to ensure that the socket is killed only after marked as
+zapped and orphan.
+
+Signed-off-by: Manish Mandlik <mmandlik@google.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 26 +++++++++++++++-----------
+ net/bluetooth/l2cap_sock.c | 16 +++++++++++++---
+ 2 files changed, 28 insertions(+), 14 deletions(-)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index 0d84d1f820d4c..b1f51cb007ea6 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -414,6 +414,9 @@ static void l2cap_chan_timeout(struct work_struct *work)
+ BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
+
+ mutex_lock(&conn->chan_lock);
++ /* __set_chan_timer() calls l2cap_chan_hold(chan) while scheduling
++ * this work. No need to call l2cap_chan_hold(chan) here again.
++ */
+ l2cap_chan_lock(chan);
+
+ if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
+@@ -426,12 +429,12 @@ static void l2cap_chan_timeout(struct work_struct *work)
+
+ l2cap_chan_close(chan, reason);
+
+- l2cap_chan_unlock(chan);
+-
+ chan->ops->close(chan);
+- mutex_unlock(&conn->chan_lock);
+
++ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
++
++ mutex_unlock(&conn->chan_lock);
+ }
+
+ struct l2cap_chan *l2cap_chan_create(void)
+@@ -1725,9 +1728,9 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
+
+ l2cap_chan_del(chan, err);
+
+- l2cap_chan_unlock(chan);
+-
+ chan->ops->close(chan);
++
++ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+ }
+
+@@ -4337,6 +4340,7 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+ return 0;
+ }
+
++ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
+
+ rsp.dcid = cpu_to_le16(chan->scid);
+@@ -4345,12 +4349,11 @@ static inline int l2cap_disconnect_req(struct l2cap_conn *conn,
+
+ chan->ops->set_shutdown(chan);
+
+- l2cap_chan_hold(chan);
+ l2cap_chan_del(chan, ECONNRESET);
+
+- l2cap_chan_unlock(chan);
+-
+ chan->ops->close(chan);
++
++ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+
+ mutex_unlock(&conn->chan_lock);
+@@ -4382,20 +4385,21 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn,
+ return 0;
+ }
+
++ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
+
+ if (chan->state != BT_DISCONN) {
+ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+ mutex_unlock(&conn->chan_lock);
+ return 0;
+ }
+
+- l2cap_chan_hold(chan);
+ l2cap_chan_del(chan, 0);
+
+- l2cap_chan_unlock(chan);
+-
+ chan->ops->close(chan);
++
++ l2cap_chan_unlock(chan);
+ l2cap_chan_put(chan);
+
+ mutex_unlock(&conn->chan_lock);
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index a3a2cd55e23a9..d128750e47305 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1039,7 +1039,7 @@ done:
+ }
+
+ /* Kill socket (only if zapped and orphan)
+- * Must be called on unlocked socket.
++ * Must be called on unlocked socket, with l2cap channel lock.
+ */
+ static void l2cap_sock_kill(struct sock *sk)
+ {
+@@ -1200,8 +1200,15 @@ static int l2cap_sock_release(struct socket *sock)
+
+ err = l2cap_sock_shutdown(sock, 2);
+
++ l2cap_chan_hold(l2cap_pi(sk)->chan);
++ l2cap_chan_lock(l2cap_pi(sk)->chan);
++
+ sock_orphan(sk);
+ l2cap_sock_kill(sk);
++
++ l2cap_chan_unlock(l2cap_pi(sk)->chan);
++ l2cap_chan_put(l2cap_pi(sk)->chan);
++
+ return err;
+ }
+
+@@ -1219,12 +1226,15 @@ static void l2cap_sock_cleanup_listen(struct sock *parent)
+ BT_DBG("child chan %p state %s", chan,
+ state_to_string(chan->state));
+
++ l2cap_chan_hold(chan);
+ l2cap_chan_lock(chan);
++
+ __clear_chan_timer(chan);
+ l2cap_chan_close(chan, ECONNRESET);
+- l2cap_chan_unlock(chan);
+-
+ l2cap_sock_kill(sk);
++
++ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+ }
+ }
+
+--
+2.25.1
+
--- /dev/null
+From b7e9d10fe7428e67007f78defe1bfad0bbae9469 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2020 15:55:34 +0000
+Subject: Bluetooth: guard against controllers sending zero'd events
+
+From: Alain Michaud <alainm@chromium.org>
+
+[ Upstream commit 08bb4da90150e2a225f35e0f642cdc463958d696 ]
+
+Some controllers have been observed to send zero'd events under some
+conditions. This change guards against this condition as well as adding
+a trace to facilitate diagnosability of this condition.
+
+Signed-off-by: Alain Michaud <alainm@chromium.org>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_event.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index 2b4a7cf03041b..ec6b3a87b3e7f 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -5738,6 +5738,11 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ u8 status = 0, event = hdr->evt, req_evt = 0;
+ u16 opcode = HCI_OP_NOP;
+
++ if (!event) {
++ bt_dev_warn(hdev, "Received unexpected HCI Event 00000000");
++ goto done;
++ }
++
+ if (hdev->sent_cmd && bt_cb(hdev->sent_cmd)->hci.req_event == event) {
+ struct hci_command_hdr *cmd_hdr = (void *) hdev->sent_cmd->data;
+ opcode = __le16_to_cpu(cmd_hdr->opcode);
+@@ -5949,6 +5954,7 @@ void hci_event_packet(struct hci_dev *hdev, struct sk_buff *skb)
+ req_complete_skb(hdev, status, opcode, orig_skb);
+ }
+
++done:
+ kfree_skb(orig_skb);
+ kfree_skb(skb);
+ hdev->stat.evt_rx++;
+--
+2.25.1
+
--- /dev/null
+From 373b2f476135ad8cba18226b8bd5469fdc467444 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 May 2020 12:55:03 -0700
+Subject: Bluetooth: Handle Inquiry Cancel error after Inquiry Complete
+
+From: Sonny Sasaka <sonnysasaka@chromium.org>
+
+[ Upstream commit adf1d6926444029396861413aba8a0f2a805742a ]
+
+After sending Inquiry Cancel command to the controller, it is possible
+that Inquiry Complete event comes before Inquiry Cancel command complete
+event. In this case the Inquiry Cancel command will have status of
+Command Disallowed since there is no Inquiry session to be cancelled.
+This case should not be treated as error, otherwise we can reach an
+inconsistent state.
+
+Example of a btmon trace when this happened:
+
+< HCI Command: Inquiry Cancel (0x01|0x0002) plen 0
+> HCI Event: Inquiry Complete (0x01) plen 1
+ Status: Success (0x00)
+> HCI Event: Command Complete (0x0e) plen 4
+ Inquiry Cancel (0x01|0x0002) ncmd 1
+ Status: Command Disallowed (0x0c)
+
+Signed-off-by: Sonny Sasaka <sonnysasaka@chromium.org>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/hci_event.c | 19 +++++++++++++++++--
+ 1 file changed, 17 insertions(+), 2 deletions(-)
+
+diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
+index ec6b3a87b3e7f..310622086f74b 100644
+--- a/net/bluetooth/hci_event.c
++++ b/net/bluetooth/hci_event.c
+@@ -41,12 +41,27 @@
+
+ /* Handle HCI Event packets */
+
+-static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
++static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb,
++ u8 *new_status)
+ {
+ __u8 status = *((__u8 *) skb->data);
+
+ BT_DBG("%s status 0x%2.2x", hdev->name, status);
+
++ /* It is possible that we receive Inquiry Complete event right
++ * before we receive Inquiry Cancel Command Complete event, in
++ * which case the latter event should have status of Command
++ * Disallowed (0x0c). This should not be treated as error, since
++ * we actually achieve what Inquiry Cancel wants to achieve,
++ * which is to end the last Inquiry session.
++ */
++ if (status == 0x0c && !test_bit(HCI_INQUIRY, &hdev->flags)) {
++ bt_dev_warn(hdev, "Ignoring error of Inquiry Cancel command");
++ status = 0x00;
++ }
++
++ *new_status = status;
++
+ if (status)
+ return;
+
+@@ -3039,7 +3054,7 @@ static void hci_cmd_complete_evt(struct hci_dev *hdev, struct sk_buff *skb,
+
+ switch (*opcode) {
+ case HCI_OP_INQUIRY_CANCEL:
+- hci_cc_inquiry_cancel(hdev, skb);
++ hci_cc_inquiry_cancel(hdev, skb, status);
+ break;
+
+ case HCI_OP_PERIODIC_INQ:
+--
+2.25.1
+
--- /dev/null
+From f61bc37e59fcccecc4de402a33a246cceb0b36cf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Mar 2020 12:35:27 +0800
+Subject: Bluetooth: L2CAP: handle l2cap config request during open state
+
+From: Howard Chung <howardchung@google.com>
+
+[ Upstream commit 96298f640104e4cd9a913a6e50b0b981829b94ff ]
+
+According to Core Spec Version 5.2 | Vol 3, Part A 6.1.5,
+the incoming L2CAP_ConfigReq should be handled during
+OPEN state.
+
+The section below shows the btmon trace when running
+L2CAP/COS/CFD/BV-12-C before and after this change.
+
+=== Before ===
+...
+> ACL Data RX: Handle 256 flags 0x02 dlen 12 #22
+ L2CAP: Connection Request (0x02) ident 2 len 4
+ PSM: 1 (0x0001)
+ Source CID: 65
+< ACL Data TX: Handle 256 flags 0x00 dlen 16 #23
+ L2CAP: Connection Response (0x03) ident 2 len 8
+ Destination CID: 64
+ Source CID: 65
+ Result: Connection successful (0x0000)
+ Status: No further information available (0x0000)
+< ACL Data TX: Handle 256 flags 0x00 dlen 12 #24
+ L2CAP: Configure Request (0x04) ident 2 len 4
+ Destination CID: 65
+ Flags: 0x0000
+> HCI Event: Number of Completed Packets (0x13) plen 5 #25
+ Num handles: 1
+ Handle: 256
+ Count: 1
+> HCI Event: Number of Completed Packets (0x13) plen 5 #26
+ Num handles: 1
+ Handle: 256
+ Count: 1
+> ACL Data RX: Handle 256 flags 0x02 dlen 16 #27
+ L2CAP: Configure Request (0x04) ident 3 len 8
+ Destination CID: 64
+ Flags: 0x0000
+ Option: Unknown (0x10) [hint]
+ 01 00 ..
+< ACL Data TX: Handle 256 flags 0x00 dlen 18 #28
+ L2CAP: Configure Response (0x05) ident 3 len 10
+ Source CID: 65
+ Flags: 0x0000
+ Result: Success (0x0000)
+ Option: Maximum Transmission Unit (0x01) [mandatory]
+ MTU: 672
+> HCI Event: Number of Completed Packets (0x13) plen 5 #29
+ Num handles: 1
+ Handle: 256
+ Count: 1
+> ACL Data RX: Handle 256 flags 0x02 dlen 14 #30
+ L2CAP: Configure Response (0x05) ident 2 len 6
+ Source CID: 64
+ Flags: 0x0000
+ Result: Success (0x0000)
+> ACL Data RX: Handle 256 flags 0x02 dlen 20 #31
+ L2CAP: Configure Request (0x04) ident 3 len 12
+ Destination CID: 64
+ Flags: 0x0000
+ Option: Unknown (0x10) [hint]
+ 01 00 91 02 11 11 ......
+< ACL Data TX: Handle 256 flags 0x00 dlen 14 #32
+ L2CAP: Command Reject (0x01) ident 3 len 6
+ Reason: Invalid CID in request (0x0002)
+ Destination CID: 64
+ Source CID: 65
+> HCI Event: Number of Completed Packets (0x13) plen 5 #33
+ Num handles: 1
+ Handle: 256
+ Count: 1
+...
+=== After ===
+...
+> ACL Data RX: Handle 256 flags 0x02 dlen 12 #22
+ L2CAP: Connection Request (0x02) ident 2 len 4
+ PSM: 1 (0x0001)
+ Source CID: 65
+< ACL Data TX: Handle 256 flags 0x00 dlen 16 #23
+ L2CAP: Connection Response (0x03) ident 2 len 8
+ Destination CID: 64
+ Source CID: 65
+ Result: Connection successful (0x0000)
+ Status: No further information available (0x0000)
+< ACL Data TX: Handle 256 flags 0x00 dlen 12 #24
+ L2CAP: Configure Request (0x04) ident 2 len 4
+ Destination CID: 65
+ Flags: 0x0000
+> HCI Event: Number of Completed Packets (0x13) plen 5 #25
+ Num handles: 1
+ Handle: 256
+ Count: 1
+> HCI Event: Number of Completed Packets (0x13) plen 5 #26
+ Num handles: 1
+ Handle: 256
+ Count: 1
+> ACL Data RX: Handle 256 flags 0x02 dlen 16 #27
+ L2CAP: Configure Request (0x04) ident 3 len 8
+ Destination CID: 64
+ Flags: 0x0000
+ Option: Unknown (0x10) [hint]
+ 01 00 ..
+< ACL Data TX: Handle 256 flags 0x00 dlen 18 #28
+ L2CAP: Configure Response (0x05) ident 3 len 10
+ Source CID: 65
+ Flags: 0x0000
+ Result: Success (0x0000)
+ Option: Maximum Transmission Unit (0x01) [mandatory]
+ MTU: 672
+> HCI Event: Number of Completed Packets (0x13) plen 5 #29
+ Num handles: 1
+ Handle: 256
+ Count: 1
+> ACL Data RX: Handle 256 flags 0x02 dlen 14 #30
+ L2CAP: Configure Response (0x05) ident 2 len 6
+ Source CID: 64
+ Flags: 0x0000
+ Result: Success (0x0000)
+> ACL Data RX: Handle 256 flags 0x02 dlen 20 #31
+ L2CAP: Configure Request (0x04) ident 3 len 12
+ Destination CID: 64
+ Flags: 0x0000
+ Option: Unknown (0x10) [hint]
+ 01 00 91 02 11 11 .....
+< ACL Data TX: Handle 256 flags 0x00 dlen 18 #32
+ L2CAP: Configure Response (0x05) ident 3 len 10
+ Source CID: 65
+ Flags: 0x0000
+ Result: Success (0x0000)
+ Option: Maximum Transmission Unit (0x01) [mandatory]
+ MTU: 672
+< ACL Data TX: Handle 256 flags 0x00 dlen 12 #33
+ L2CAP: Configure Request (0x04) ident 3 len 4
+ Destination CID: 65
+ Flags: 0x0000
+> HCI Event: Number of Completed Packets (0x13) plen 5 #34
+ Num handles: 1
+ Handle: 256
+ Count: 1
+> HCI Event: Number of Completed Packets (0x13) plen 5 #35
+ Num handles: 1
+ Handle: 256
+ Count: 1
+...
+
+Signed-off-by: Howard Chung <howardchung@google.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
+index b1f51cb007ea6..c04107d446016 100644
+--- a/net/bluetooth/l2cap_core.c
++++ b/net/bluetooth/l2cap_core.c
+@@ -4117,7 +4117,8 @@ static inline int l2cap_config_req(struct l2cap_conn *conn,
+ return 0;
+ }
+
+- if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
++ if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2 &&
++ chan->state != BT_CONNECTED) {
+ cmd_reject_invalid_cid(conn, cmd->ident, chan->scid,
+ chan->dcid);
+ goto unlock;
+--
+2.25.1
+
--- /dev/null
+From 3bfef432cd06a2e850b3bd3e9e7ef003c04f955c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 10:31:59 +0800
+Subject: Bluetooth: prefetch channel before killing sock
+
+From: Hillf Danton <hdanton@sina.com>
+
+[ Upstream commit 2a154903cec20fb64ff4d7d617ca53c16f8fd53a ]
+
+Prefetch channel before killing sock in order to fix UAF like
+
+ BUG: KASAN: use-after-free in l2cap_sock_release+0x24c/0x290 net/bluetooth/l2cap_sock.c:1212
+ Read of size 8 at addr ffff8880944904a0 by task syz-fuzzer/9751
+
+Reported-by: syzbot+c3c5bdea7863886115dc@syzkaller.appspotmail.com
+Fixes: 6c08fc896b60 ("Bluetooth: Fix refcount use-after-free issue")
+Cc: Manish Mandlik <mmandlik@google.com>
+Signed-off-by: Hillf Danton <hdanton@sina.com>
+Signed-off-by: Marcel Holtmann <marcel@holtmann.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/bluetooth/l2cap_sock.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
+index d128750e47305..5572042f04531 100644
+--- a/net/bluetooth/l2cap_sock.c
++++ b/net/bluetooth/l2cap_sock.c
+@@ -1190,6 +1190,7 @@ static int l2cap_sock_release(struct socket *sock)
+ {
+ struct sock *sk = sock->sk;
+ int err;
++ struct l2cap_chan *chan;
+
+ BT_DBG("sock %p, sk %p", sock, sk);
+
+@@ -1199,15 +1200,16 @@ static int l2cap_sock_release(struct socket *sock)
+ bt_sock_unlink(&l2cap_sk_list, sk);
+
+ err = l2cap_sock_shutdown(sock, 2);
++ chan = l2cap_pi(sk)->chan;
+
+- l2cap_chan_hold(l2cap_pi(sk)->chan);
+- l2cap_chan_lock(l2cap_pi(sk)->chan);
++ l2cap_chan_hold(chan);
++ l2cap_chan_lock(chan);
+
+ sock_orphan(sk);
+ l2cap_sock_kill(sk);
+
+- l2cap_chan_unlock(l2cap_pi(sk)->chan);
+- l2cap_chan_put(l2cap_pi(sk)->chan);
++ l2cap_chan_unlock(chan);
++ l2cap_chan_put(chan);
+
+ return err;
+ }
+--
+2.25.1
+
--- /dev/null
+From 3743b28fcdae11a71c48b05795db47162b298429 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2020 15:01:39 +0100
+Subject: bpf: Remove recursion prevention from rcu free callback
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit 8a37963c7ac9ecb7f86f8ebda020e3f8d6d7b8a0 ]
+
+If an element is freed via RCU then recursion into BPF instrumentation
+functions is not a concern. The element is already detached from the map
+and the RCU callback does not hold any locks on which a kprobe, perf event
+or tracepoint attached BPF program could deadlock.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Link: https://lore.kernel.org/bpf/20200224145643.259118710@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/hashtab.c | 8 --------
+ 1 file changed, 8 deletions(-)
+
+diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
+index 1b28fb006763a..3f3ed33bd2fdc 100644
+--- a/kernel/bpf/hashtab.c
++++ b/kernel/bpf/hashtab.c
+@@ -667,15 +667,7 @@ static void htab_elem_free_rcu(struct rcu_head *head)
+ struct htab_elem *l = container_of(head, struct htab_elem, rcu);
+ struct bpf_htab *htab = l->htab;
+
+- /* must increment bpf_prog_active to avoid kprobe+bpf triggering while
+- * we're calling kfree, otherwise deadlock is possible if kprobes
+- * are placed somewhere inside of slub
+- */
+- preempt_disable();
+- __this_cpu_inc(bpf_prog_active);
+ htab_elem_free(htab, l);
+- __this_cpu_dec(bpf_prog_active);
+- preempt_enable();
+ }
+
+ static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
+--
+2.25.1
+
--- /dev/null
+From 0e7becfbb66a1281dc51840d82587d52dffaf958 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2020 15:05:53 +0100
+Subject: btrfs: don't force read-only after error in drop snapshot
+
+From: David Sterba <dsterba@suse.com>
+
+[ Upstream commit 7c09c03091ac562ddca2b393e5d65c1d37da79f1 ]
+
+Deleting a subvolume on a full filesystem leads to ENOSPC followed by a
+forced read-only. This is not a transaction abort and the filesystem is
+otherwise ok, so the error should be just propagated to the callers.
+
+This is caused by unnecessary call to btrfs_handle_fs_error for all
+errors, except EAGAIN. This does not make sense as the standard
+transaction abort mechanism is in btrfs_drop_snapshot so all relevant
+failures are handled.
+
+Originally in commit cb1b69f4508a ("Btrfs: forced readonly when
+btrfs_drop_snapshot() fails") there was no return value at all, so the
+btrfs_std_error made some sense but once the error handling and
+propagation has been implemented we don't need it anymore.
+
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/extent-tree.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 319a89d4d0735..ce5e0f6c6af4f 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -9098,8 +9098,6 @@ out:
+ */
+ if (!for_reloc && !root_dropped)
+ btrfs_add_dead_root(root);
+- if (err && err != -EAGAIN)
+- btrfs_handle_fs_error(fs_info, err, NULL);
+ return err;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From e1dab71e0a44fc4d05471fc96f02e0302bfd24eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jul 2020 15:12:05 +0800
+Subject: btrfs: qgroup: fix data leak caused by race between writeback and
+ truncate
+
+From: Qu Wenruo <wqu@suse.com>
+
+[ Upstream commit fa91e4aa1716004ea8096d5185ec0451e206aea0 ]
+
+[BUG]
+When running tests like generic/013 on test device with btrfs quota
+enabled, it can normally lead to data leak, detected at unmount time:
+
+ BTRFS warning (device dm-3): qgroup 0/5 has unreleased space, type 0 rsv 4096
+ ------------[ cut here ]------------
+ WARNING: CPU: 11 PID: 16386 at fs/btrfs/disk-io.c:4142 close_ctree+0x1dc/0x323 [btrfs]
+ RIP: 0010:close_ctree+0x1dc/0x323 [btrfs]
+ Call Trace:
+ btrfs_put_super+0x15/0x17 [btrfs]
+ generic_shutdown_super+0x72/0x110
+ kill_anon_super+0x18/0x30
+ btrfs_kill_super+0x17/0x30 [btrfs]
+ deactivate_locked_super+0x3b/0xa0
+ deactivate_super+0x40/0x50
+ cleanup_mnt+0x135/0x190
+ __cleanup_mnt+0x12/0x20
+ task_work_run+0x64/0xb0
+ __prepare_exit_to_usermode+0x1bc/0x1c0
+ __syscall_return_slowpath+0x47/0x230
+ do_syscall_64+0x64/0xb0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ ---[ end trace caf08beafeca2392 ]---
+ BTRFS error (device dm-3): qgroup reserved space leaked
+
+[CAUSE]
+In the offending case, the offending operations are:
+2/6: writev f2X[269 1 0 0 0 0] [1006997,67,288] 0
+2/7: truncate f2X[269 1 0 0 48 1026293] 18388 0
+
+The following sequence of events could happen after the writev():
+ CPU1 (writeback) | CPU2 (truncate)
+-----------------------------------------------------------------
+btrfs_writepages() |
+|- extent_write_cache_pages() |
+ |- Got page for 1003520 |
+ | 1003520 is Dirty, no writeback |
+ | So (!clear_page_dirty_for_io()) |
+ | gets called for it |
+ |- Now page 1003520 is Clean. |
+ | | btrfs_setattr()
+ | | |- btrfs_setsize()
+ | | |- truncate_setsize()
+ | | New i_size is 18388
+ |- __extent_writepage() |
+ | |- page_offset() > i_size |
+ |- btrfs_invalidatepage() |
+ |- Page is clean, so no qgroup |
+ callback executed
+
+This means, the qgroup reserved data space is not properly released in
+btrfs_invalidatepage() as the page is Clean.
+
+[FIX]
+Instead of checking the dirty bit of a page, call
+btrfs_qgroup_free_data() unconditionally in btrfs_invalidatepage().
+
+As qgroup rsv are completely bound to the QGROUP_RESERVED bit of
+io_tree, not bound to page status, thus we won't cause double freeing
+anyway.
+
+Fixes: 0b34c261e235 ("btrfs: qgroup: Prevent qgroup->reserved from going subzero")
+CC: stable@vger.kernel.org # 4.14+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Qu Wenruo <wqu@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/inode.c | 23 ++++++++++-------------
+ 1 file changed, 10 insertions(+), 13 deletions(-)
+
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index bdfe159a60da6..64d459ca76d06 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -8913,20 +8913,17 @@ again:
+ /*
+ * Qgroup reserved space handler
+ * Page here will be either
+- * 1) Already written to disk
+- * In this case, its reserved space is released from data rsv map
+- * and will be freed by delayed_ref handler finally.
+- * So even we call qgroup_free_data(), it won't decrease reserved
+- * space.
+- * 2) Not written to disk
+- * This means the reserved space should be freed here. However,
+- * if a truncate invalidates the page (by clearing PageDirty)
+- * and the page is accounted for while allocating extent
+- * in btrfs_check_data_free_space() we let delayed_ref to
+- * free the entire extent.
++ * 1) Already written to disk or ordered extent already submitted
++ * Then its QGROUP_RESERVED bit in io_tree is already cleaned.
++ * Qgroup will be handled by its qgroup_record then.
++ * btrfs_qgroup_free_data() call will do nothing here.
++ *
++ * 2) Not written to disk yet
++ * Then btrfs_qgroup_free_data() call will clear the QGROUP_RESERVED
++ * bit of its io_tree, and free the qgroup reserved data space.
++ * Since the IO will never happen for this page.
+ */
+- if (PageDirty(page))
+- btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
++ btrfs_qgroup_free_data(inode, NULL, page_start, PAGE_SIZE);
+ if (!inode_evicting) {
+ clear_extent_bit(tree, page_start, page_end,
+ EXTENT_LOCKED | EXTENT_DIRTY |
+--
+2.25.1
+
--- /dev/null
+From a7a8f35ad4620234de76f5137bd420bbd779f2c2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Jan 2020 02:48:34 +0800
+Subject: bus: hisi_lpc: Fixup IO ports addresses to avoid use-after-free in
+ host removal
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit a6dd255bdd7d00bbdbf78ba00bde9fc64f86c3a7 ]
+
+Some released ACPI FW for Huawei boards describes incorrect the port IO
+address range for child devices, in that it tells us the IO port max range
+is 0x3fff for each child device, which is not correct. The address range
+should be [e4:e8) or similar. With this incorrect upper range, the child
+device IO port resources overlap.
+
+As such, the kernel thinks that the LPC host serial device is a child of
+the IPMI device:
+
+root@(none)$ more /proc/ioports
+[...]
+00ffc0e3-00ffffff : hisi-lpc-ipmi.0.auto
+ 00ffc0e3-00ffc0e3 : ipmi_si
+ 00ffc0e4-00ffc0e4 : ipmi_si
+ 00ffc0e5-00ffc0e5 : ipmi_si
+ 00ffc2f7-00ffffff : serial8250.1.auto
+ 00ffc2f7-00ffc2fe : serial
+root@(none)$
+
+They should both be siblings. Note that these are logical PIO addresses,
+which have a direct mapping from the FW IO port ranges.
+
+This shows up as a real issue when we enable CONFIG_KASAN and
+CONFIG_DEBUG_TEST_DRIVER_REMOVE - we see use-after-free warnings in the
+host removal path:
+
+==================================================================
+BUG: KASAN: use-after-free in release_resource+0x38/0xc8
+Read of size 8 at addr ffff0026accdbc38 by task swapper/0/1
+
+CPU: 2 PID: 1 Comm: swapper/0 Not tainted 5.5.0-rc6-00001-g68e186e77b5c-dirty #1593
+Hardware name: Huawei Taishan 2180 /D03, BIOS Hisilicon D03 IT20 Nemo 2.0 RC0 03/30/2018
+Call trace:
+dump_backtrace+0x0/0x290
+show_stack+0x14/0x20
+dump_stack+0xf0/0x14c
+print_address_description.isra.9+0x6c/0x3b8
+__kasan_report+0x12c/0x23c
+kasan_report+0xc/0x18
+__asan_load8+0x94/0xb8
+release_resource+0x38/0xc8
+platform_device_del.part.10+0x80/0xe0
+platform_device_unregister+0x20/0x38
+hisi_lpc_acpi_remove_subdev+0x10/0x20
+device_for_each_child+0xc8/0x128
+hisi_lpc_acpi_remove+0x4c/0xa8
+hisi_lpc_remove+0xbc/0xc0
+platform_drv_remove+0x3c/0x68
+really_probe+0x174/0x548
+driver_probe_device+0x7c/0x148
+device_driver_attach+0x94/0xa0
+__driver_attach+0xa4/0x110
+bus_for_each_dev+0xe8/0x158
+driver_attach+0x30/0x40
+bus_add_driver+0x234/0x2f0
+driver_register+0xbc/0x1d0
+__platform_driver_register+0x7c/0x88
+hisi_lpc_driver_init+0x18/0x20
+do_one_initcall+0xb4/0x258
+kernel_init_freeable+0x248/0x2c0
+kernel_init+0x10/0x118
+ret_from_fork+0x10/0x1c
+
+...
+
+The issue here is that the kernel created an incorrect parent-child
+resource dependency between two devices, and references the false parent
+node when deleting the second child device, when it had been deleted
+already.
+
+Fix up the child device resources from FW to create proper IO port
+resource relationships for broken FW.
+
+With this, the IO port layout looks more healthy:
+
+root@(none)$ more /proc/ioports
+[...]
+00ffc0e3-00ffc0e7 : hisi-lpc-ipmi.0.auto
+ 00ffc0e3-00ffc0e3 : ipmi_si
+ 00ffc0e4-00ffc0e4 : ipmi_si
+ 00ffc0e5-00ffc0e5 : ipmi_si
+00ffc2f7-00ffc2ff : serial8250.1.auto
+ 00ffc2f7-00ffc2fe : serial
+
+Signed-off-by: John Garry <john.garry@huawei.com>
+Signed-off-by: Wei Xu <xuwei5@hisilicon.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/bus/hisi_lpc.c | 27 +++++++++++++++++++++++++--
+ 1 file changed, 25 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/bus/hisi_lpc.c b/drivers/bus/hisi_lpc.c
+index e31c02dc77709..cbd970fb02f18 100644
+--- a/drivers/bus/hisi_lpc.c
++++ b/drivers/bus/hisi_lpc.c
+@@ -358,6 +358,26 @@ static int hisi_lpc_acpi_xlat_io_res(struct acpi_device *adev,
+ return 0;
+ }
+
++/*
++ * Released firmware describes the IO port max address as 0x3fff, which is
++ * the max host bus address. Fixup to a proper range. This will probably
++ * never be fixed in firmware.
++ */
++static void hisi_lpc_acpi_fixup_child_resource(struct device *hostdev,
++ struct resource *r)
++{
++ if (r->end != 0x3fff)
++ return;
++
++ if (r->start == 0xe4)
++ r->end = 0xe4 + 0x04 - 1;
++ else if (r->start == 0x2f8)
++ r->end = 0x2f8 + 0x08 - 1;
++ else
++ dev_warn(hostdev, "unrecognised resource %pR to fixup, ignoring\n",
++ r);
++}
++
+ /*
+ * hisi_lpc_acpi_set_io_res - set the resources for a child
+ * @child: the device node to be updated the I/O resource
+@@ -419,8 +439,11 @@ static int hisi_lpc_acpi_set_io_res(struct device *child,
+ return -ENOMEM;
+ }
+ count = 0;
+- list_for_each_entry(rentry, &resource_list, node)
+- resources[count++] = *rentry->res;
++ list_for_each_entry(rentry, &resource_list, node) {
++ resources[count] = *rentry->res;
++ hisi_lpc_acpi_fixup_child_resource(hostdev, &resources[count]);
++ count++;
++ }
+
+ acpi_dev_free_resource_list(&resource_list);
+
+--
+2.25.1
+
--- /dev/null
+From b96d86467cbfac71ff54ba9df45c7adae75eec7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Dec 2019 08:41:25 -0500
+Subject: ceph: ensure we have a new cap before continuing in fill_inode
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit 9a6bed4fe0c8bf57785cbc4db9f86086cb9b193d ]
+
+If the caller passes in a NULL cap_reservation, and we can't allocate
+one then ensure that we fail gracefully.
+
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/inode.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
+index 1e438e0faf77e..3c24fb77ef325 100644
+--- a/fs/ceph/inode.c
++++ b/fs/ceph/inode.c
+@@ -764,8 +764,11 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
+ info_caps = le32_to_cpu(info->cap.caps);
+
+ /* prealloc new cap struct */
+- if (info_caps && ceph_snap(inode) == CEPH_NOSNAP)
++ if (info_caps && ceph_snap(inode) == CEPH_NOSNAP) {
+ new_cap = ceph_get_cap(mdsc, caps_reservation);
++ if (!new_cap)
++ return -ENOMEM;
++ }
+
+ /*
+ * prealloc xattr data, if it looks like we'll need it. only
+--
+2.25.1
+
--- /dev/null
+From 71e4b92aaaf71107ddde2ddd8d689da242830748 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Mar 2020 16:45:45 -0400
+Subject: ceph: fix potential race in ceph_check_caps
+
+From: Jeff Layton <jlayton@kernel.org>
+
+[ Upstream commit dc3da0461cc4b76f2d0c5b12247fcb3b520edbbf ]
+
+Nothing ensures that session will still be valid by the time we
+dereference the pointer. Take and put a reference.
+
+In principle, we should always be able to get a reference here, but
+throw a warning if that's ever not the case.
+
+Signed-off-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/caps.c | 14 +++++++++++++-
+ 1 file changed, 13 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
+index a2d4eed27f804..c0dbf8b7762b4 100644
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -2015,12 +2015,24 @@ ack:
+ if (mutex_trylock(&session->s_mutex) == 0) {
+ dout("inverting session/ino locks on %p\n",
+ session);
++ session = ceph_get_mds_session(session);
+ spin_unlock(&ci->i_ceph_lock);
+ if (took_snap_rwsem) {
+ up_read(&mdsc->snap_rwsem);
+ took_snap_rwsem = 0;
+ }
+- mutex_lock(&session->s_mutex);
++ if (session) {
++ mutex_lock(&session->s_mutex);
++ ceph_put_mds_session(session);
++ } else {
++ /*
++ * Because we take the reference while
++ * holding the i_ceph_lock, it should
++ * never be NULL. Throw a warning if it
++ * ever is.
++ */
++ WARN_ON_ONCE(true);
++ }
+ goto retry;
+ }
+ }
+--
+2.25.1
+
--- /dev/null
+From f88514cd79643eb209269bd97d4393458c83bbed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jun 2020 05:30:19 -0400
+Subject: cifs: Fix double add page to memcg when cifs_readpages
+
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+
+[ Upstream commit 95a3d8f3af9b0d63b43f221b630beaab9739d13a ]
+
+When xfstests generic/451, there is an BUG at mm/memcontrol.c:
+ page:ffffea000560f2c0 refcount:2 mapcount:0 mapping:000000008544e0ea
+ index:0xf
+ mapping->aops:cifs_addr_ops dentry name:"tst-aio-dio-cycle-write.451"
+ flags: 0x2fffff80000001(locked)
+ raw: 002fffff80000001 ffffc90002023c50 ffffea0005280088 ffff88815cda0210
+ raw: 000000000000000f 0000000000000000 00000002ffffffff ffff88817287d000
+ page dumped because: VM_BUG_ON_PAGE(page->mem_cgroup)
+ page->mem_cgroup:ffff88817287d000
+ ------------[ cut here ]------------
+ kernel BUG at mm/memcontrol.c:2659!
+ invalid opcode: 0000 [#1] SMP
+ CPU: 2 PID: 2038 Comm: xfs_io Not tainted 5.8.0-rc1 #44
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS ?-20190727_
+ 073836-buildvm-ppc64le-16.ppc.4
+ RIP: 0010:commit_charge+0x35/0x50
+ Code: 0d 48 83 05 54 b2 02 05 01 48 89 77 38 c3 48 c7
+ c6 78 4a ea ba 48 83 05 38 b2 02 05 01 e8 63 0d9
+ RSP: 0018:ffffc90002023a50 EFLAGS: 00010202
+ RAX: 0000000000000000 RBX: ffff88817287d000 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: ffff88817ac97ea0 RDI: ffff88817ac97ea0
+ RBP: ffffea000560f2c0 R08: 0000000000000203 R09: 0000000000000005
+ R10: 0000000000000030 R11: ffffc900020237a8 R12: 0000000000000000
+ R13: 0000000000000001 R14: 0000000000000001 R15: ffff88815a1272c0
+ FS: 00007f5071ab0800(0000) GS:ffff88817ac80000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000055efcd5ca000 CR3: 000000015d312000 CR4: 00000000000006e0
+ DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ Call Trace:
+ mem_cgroup_charge+0x166/0x4f0
+ __add_to_page_cache_locked+0x4a9/0x710
+ add_to_page_cache_locked+0x15/0x20
+ cifs_readpages+0x217/0x1270
+ read_pages+0x29a/0x670
+ page_cache_readahead_unbounded+0x24f/0x390
+ __do_page_cache_readahead+0x3f/0x60
+ ondemand_readahead+0x1f1/0x470
+ page_cache_async_readahead+0x14c/0x170
+ generic_file_buffered_read+0x5df/0x1100
+ generic_file_read_iter+0x10c/0x1d0
+ cifs_strict_readv+0x139/0x170
+ new_sync_read+0x164/0x250
+ __vfs_read+0x39/0x60
+ vfs_read+0xb5/0x1e0
+ ksys_pread64+0x85/0xf0
+ __x64_sys_pread64+0x22/0x30
+ do_syscall_64+0x69/0x150
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7f5071fcb1af
+ Code: Bad RIP value.
+ RSP: 002b:00007ffde2cdb8e0 EFLAGS: 00000293 ORIG_RAX: 0000000000000011
+ RAX: ffffffffffffffda RBX: 00007ffde2cdb990 RCX: 00007f5071fcb1af
+ RDX: 0000000000001000 RSI: 000055efcd5ca000 RDI: 0000000000000003
+ RBP: 0000000000000003 R08: 0000000000000000 R09: 0000000000000000
+ R10: 0000000000001000 R11: 0000000000000293 R12: 0000000000000001
+ R13: 000000000009f000 R14: 0000000000000000 R15: 0000000000001000
+ Modules linked in:
+ ---[ end trace 725fa14a3e1af65c ]---
+
+Since commit 3fea5a499d57 ("mm: memcontrol: convert page cache to a new
+mem_cgroup_charge() API") not cancel the page charge, the pages maybe
+double add to pagecache:
+thread1 | thread2
+cifs_readpages
+readpages_get_pages
+ add_to_page_cache_locked(head,index=n)=0
+ | readpages_get_pages
+ | add_to_page_cache_locked(head,index=n+1)=0
+ add_to_page_cache_locked(head, index=n+1)=-EEXIST
+ then, will next loop with list head page's
+ index=n+1 and the page->mapping not NULL
+readpages_get_pages
+add_to_page_cache_locked(head, index=n+1)
+ commit_charge
+ VM_BUG_ON_PAGE
+
+So, we should not do the next loop when any page add to page cache
+failed.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Acked-by: Ronnie Sahlberg <lsahlber@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/file.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index e78b52c582f18..5cb15649adb07 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -3804,7 +3804,8 @@ readpages_get_pages(struct address_space *mapping, struct list_head *page_list,
+ break;
+
+ __SetPageLocked(page);
+- if (add_to_page_cache_locked(page, mapping, page->index, gfp)) {
++ rc = add_to_page_cache_locked(page, mapping, page->index, gfp);
++ if (rc) {
+ __ClearPageLocked(page);
+ break;
+ }
+@@ -3820,6 +3821,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *page_list, unsigned num_pages)
+ {
+ int rc;
++ int err = 0;
+ struct list_head tmplist;
+ struct cifsFileInfo *open_file = file->private_data;
+ struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
+@@ -3860,7 +3862,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ * the order of declining indexes. When we put the pages in
+ * the rdata->pages, then we want them in increasing order.
+ */
+- while (!list_empty(page_list)) {
++ while (!list_empty(page_list) && !err) {
+ unsigned int i, nr_pages, bytes, rsize;
+ loff_t offset;
+ struct page *page, *tpage;
+@@ -3883,9 +3885,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ return 0;
+ }
+
+- rc = readpages_get_pages(mapping, page_list, rsize, &tmplist,
++ nr_pages = 0;
++ err = readpages_get_pages(mapping, page_list, rsize, &tmplist,
+ &nr_pages, &offset, &bytes);
+- if (rc) {
++ if (!nr_pages) {
+ add_credits_and_wake_if(server, credits, 0);
+ break;
+ }
+--
+2.25.1
+
--- /dev/null
+From e64cc58f894552152e88c9ab0d2fe43180532cd7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 29 Oct 2019 16:51:19 -0700
+Subject: CIFS: Properly process SMB3 lease breaks
+
+From: Pavel Shilovsky <pshilov@microsoft.com>
+
+[ Upstream commit 9bd4540836684013aaad6070a65d6fcdd9006625 ]
+
+Currenly we doesn't assume that a server may break a lease
+from RWH to RW which causes us setting a wrong lease state
+on a file and thus mistakenly flushing data and byte-range
+locks and purging cached data on the client. This leads to
+performance degradation because subsequent IOs go directly
+to the server.
+
+Fix this by propagating new lease state and epoch values
+to the oplock break handler through cifsFileInfo structure
+and removing the use of cifsInodeInfo flags for that. It
+allows to avoid some races of several lease/oplock breaks
+using those flags in parallel.
+
+Signed-off-by: Pavel Shilovsky <pshilov@microsoft.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/cifsglob.h | 9 ++++++---
+ fs/cifs/file.c | 10 +++++++---
+ fs/cifs/misc.c | 17 +++--------------
+ fs/cifs/smb1ops.c | 8 +++-----
+ fs/cifs/smb2misc.c | 32 +++++++-------------------------
+ fs/cifs/smb2ops.c | 44 ++++++++++++++++++++++++++++++--------------
+ fs/cifs/smb2pdu.h | 2 +-
+ 7 files changed, 57 insertions(+), 65 deletions(-)
+
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 71c2dd0c7f038..2c632793c88c5 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -259,8 +259,9 @@ struct smb_version_operations {
+ int (*check_message)(char *, unsigned int, struct TCP_Server_Info *);
+ bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
+ int (*handle_cancelled_mid)(char *, struct TCP_Server_Info *);
+- void (*downgrade_oplock)(struct TCP_Server_Info *,
+- struct cifsInodeInfo *, bool);
++ void (*downgrade_oplock)(struct TCP_Server_Info *server,
++ struct cifsInodeInfo *cinode, __u32 oplock,
++ unsigned int epoch, bool *purge_cache);
+ /* process transaction2 response */
+ bool (*check_trans2)(struct mid_q_entry *, struct TCP_Server_Info *,
+ char *, int);
+@@ -1160,6 +1161,8 @@ struct cifsFileInfo {
+ unsigned int f_flags;
+ bool invalidHandle:1; /* file closed via session abend */
+ bool oplock_break_cancelled:1;
++ unsigned int oplock_epoch; /* epoch from the lease break */
++ __u32 oplock_level; /* oplock/lease level from the lease break */
+ int count;
+ spinlock_t file_info_lock; /* protects four flag/count fields above */
+ struct mutex fh_mutex; /* prevents reopen race after dead ses*/
+@@ -1300,7 +1303,7 @@ struct cifsInodeInfo {
+ unsigned int epoch; /* used to track lease state changes */
+ #define CIFS_INODE_PENDING_OPLOCK_BREAK (0) /* oplock break in progress */
+ #define CIFS_INODE_PENDING_WRITERS (1) /* Writes in progress */
+-#define CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2 (2) /* Downgrade oplock to L2 */
++#define CIFS_INODE_FLAG_UNUSED (2) /* Unused flag */
+ #define CIFS_INO_DELETE_PENDING (3) /* delete pending on server */
+ #define CIFS_INO_INVALID_MAPPING (4) /* pagecache is invalid */
+ #define CIFS_INO_LOCK (5) /* lock bit for synchronization */
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 128cbd69911b4..e78b52c582f18 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -4185,12 +4185,13 @@ void cifs_oplock_break(struct work_struct *work)
+ struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
+ struct TCP_Server_Info *server = tcon->ses->server;
+ int rc = 0;
++ bool purge_cache = false;
+
+ wait_on_bit(&cinode->flags, CIFS_INODE_PENDING_WRITERS,
+ TASK_UNINTERRUPTIBLE);
+
+- server->ops->downgrade_oplock(server, cinode,
+- test_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2, &cinode->flags));
++ server->ops->downgrade_oplock(server, cinode, cfile->oplock_level,
++ cfile->oplock_epoch, &purge_cache);
+
+ if (!CIFS_CACHE_WRITE(cinode) && CIFS_CACHE_READ(cinode) &&
+ cifs_has_mand_locks(cinode)) {
+@@ -4205,18 +4206,21 @@ void cifs_oplock_break(struct work_struct *work)
+ else
+ break_lease(inode, O_WRONLY);
+ rc = filemap_fdatawrite(inode->i_mapping);
+- if (!CIFS_CACHE_READ(cinode)) {
++ if (!CIFS_CACHE_READ(cinode) || purge_cache) {
+ rc = filemap_fdatawait(inode->i_mapping);
+ mapping_set_error(inode->i_mapping, rc);
+ cifs_zap_mapping(inode);
+ }
+ cifs_dbg(FYI, "Oplock flush inode %p rc %d\n", inode, rc);
++ if (CIFS_CACHE_WRITE(cinode))
++ goto oplock_break_ack;
+ }
+
+ rc = cifs_push_locks(cfile);
+ if (rc)
+ cifs_dbg(VFS, "Push locks rc = %d\n", rc);
+
++oplock_break_ack:
+ /*
+ * releasing stale oplock after recent reconnect of smb session using
+ * a now incorrect file handle is not a data integrity issue but do
+diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
+index e45f8e321371c..dd67f56ea61e5 100644
+--- a/fs/cifs/misc.c
++++ b/fs/cifs/misc.c
+@@ -477,21 +477,10 @@ is_valid_oplock_break(char *buffer, struct TCP_Server_Info *srv)
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
+ &pCifsInode->flags);
+
+- /*
+- * Set flag if the server downgrades the oplock
+- * to L2 else clear.
+- */
+- if (pSMB->OplockLevel)
+- set_bit(
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+- &pCifsInode->flags);
+- else
+- clear_bit(
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+- &pCifsInode->flags);
+-
+- cifs_queue_oplock_break(netfile);
++ netfile->oplock_epoch = 0;
++ netfile->oplock_level = pSMB->OplockLevel;
+ netfile->oplock_break_cancelled = false;
++ cifs_queue_oplock_break(netfile);
+
+ spin_unlock(&tcon->open_file_lock);
+ spin_unlock(&cifs_tcp_ses_lock);
+diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
+index c7f0c85664425..0b7f924512848 100644
+--- a/fs/cifs/smb1ops.c
++++ b/fs/cifs/smb1ops.c
+@@ -381,12 +381,10 @@ coalesce_t2(char *second_buf, struct smb_hdr *target_hdr)
+
+ static void
+ cifs_downgrade_oplock(struct TCP_Server_Info *server,
+- struct cifsInodeInfo *cinode, bool set_level2)
++ struct cifsInodeInfo *cinode, __u32 oplock,
++ unsigned int epoch, bool *purge_cache)
+ {
+- if (set_level2)
+- cifs_set_oplock_level(cinode, OPLOCK_READ);
+- else
+- cifs_set_oplock_level(cinode, 0);
++ cifs_set_oplock_level(cinode, oplock);
+ }
+
+ static bool
+diff --git a/fs/cifs/smb2misc.c b/fs/cifs/smb2misc.c
+index 2fc96f7923ee5..7d875a47d0226 100644
+--- a/fs/cifs/smb2misc.c
++++ b/fs/cifs/smb2misc.c
+@@ -550,7 +550,7 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
+
+ cifs_dbg(FYI, "found in the open list\n");
+ cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
+- le32_to_cpu(rsp->NewLeaseState));
++ lease_state);
+
+ if (ack_req)
+ cfile->oplock_break_cancelled = false;
+@@ -559,17 +559,8 @@ smb2_tcon_has_lease(struct cifs_tcon *tcon, struct smb2_lease_break *rsp)
+
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK, &cinode->flags);
+
+- /*
+- * Set or clear flags depending on the lease state being READ.
+- * HANDLE caching flag should be added when the client starts
+- * to defer closing remote file handles with HANDLE leases.
+- */
+- if (lease_state & SMB2_LEASE_READ_CACHING_HE)
+- set_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+- &cinode->flags);
+- else
+- clear_bit(CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+- &cinode->flags);
++ cfile->oplock_epoch = le16_to_cpu(rsp->Epoch);
++ cfile->oplock_level = lease_state;
+
+ cifs_queue_oplock_break(cfile);
+ return true;
+@@ -599,7 +590,7 @@ smb2_tcon_find_pending_open_lease(struct cifs_tcon *tcon,
+
+ cifs_dbg(FYI, "found in the pending open list\n");
+ cifs_dbg(FYI, "lease key match, lease break 0x%x\n",
+- le32_to_cpu(rsp->NewLeaseState));
++ lease_state);
+
+ open->oplock = lease_state;
+ }
+@@ -732,18 +723,9 @@ smb2_is_valid_oplock_break(char *buffer, struct TCP_Server_Info *server)
+ set_bit(CIFS_INODE_PENDING_OPLOCK_BREAK,
+ &cinode->flags);
+
+- /*
+- * Set flag if the server downgrades the oplock
+- * to L2 else clear.
+- */
+- if (rsp->OplockLevel)
+- set_bit(
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+- &cinode->flags);
+- else
+- clear_bit(
+- CIFS_INODE_DOWNGRADE_OPLOCK_TO_L2,
+- &cinode->flags);
++ cfile->oplock_epoch = 0;
++ cfile->oplock_level = rsp->OplockLevel;
++
+ spin_unlock(&cfile->file_info_lock);
+
+ cifs_queue_oplock_break(cfile);
+diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
+index 2a523139a05fb..947a40069d246 100644
+--- a/fs/cifs/smb2ops.c
++++ b/fs/cifs/smb2ops.c
+@@ -2358,22 +2358,38 @@ static long smb3_fallocate(struct file *file, struct cifs_tcon *tcon, int mode,
+
+ static void
+ smb2_downgrade_oplock(struct TCP_Server_Info *server,
+- struct cifsInodeInfo *cinode, bool set_level2)
++ struct cifsInodeInfo *cinode, __u32 oplock,
++ unsigned int epoch, bool *purge_cache)
+ {
+- if (set_level2)
+- server->ops->set_oplock_level(cinode, SMB2_OPLOCK_LEVEL_II,
+- 0, NULL);
+- else
+- server->ops->set_oplock_level(cinode, 0, 0, NULL);
++ server->ops->set_oplock_level(cinode, oplock, 0, NULL);
+ }
+
+ static void
+-smb21_downgrade_oplock(struct TCP_Server_Info *server,
+- struct cifsInodeInfo *cinode, bool set_level2)
++smb21_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock,
++ unsigned int epoch, bool *purge_cache);
++
++static void
++smb3_downgrade_oplock(struct TCP_Server_Info *server,
++ struct cifsInodeInfo *cinode, __u32 oplock,
++ unsigned int epoch, bool *purge_cache)
+ {
+- server->ops->set_oplock_level(cinode,
+- set_level2 ? SMB2_LEASE_READ_CACHING_HE :
+- 0, 0, NULL);
++ unsigned int old_state = cinode->oplock;
++ unsigned int old_epoch = cinode->epoch;
++ unsigned int new_state;
++
++ if (epoch > old_epoch) {
++ smb21_set_oplock_level(cinode, oplock, 0, NULL);
++ cinode->epoch = epoch;
++ }
++
++ new_state = cinode->oplock;
++ *purge_cache = false;
++
++ if ((old_state & CIFS_CACHE_READ_FLG) != 0 &&
++ (new_state & CIFS_CACHE_READ_FLG) == 0)
++ *purge_cache = true;
++ else if (old_state == new_state && (epoch - old_epoch > 1))
++ *purge_cache = true;
+ }
+
+ static void
+@@ -3449,7 +3465,7 @@ struct smb_version_operations smb21_operations = {
+ .print_stats = smb2_print_stats,
+ .is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+- .downgrade_oplock = smb21_downgrade_oplock,
++ .downgrade_oplock = smb2_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb2_negotiate_wsize,
+@@ -3546,7 +3562,7 @@ struct smb_version_operations smb30_operations = {
+ .dump_share_caps = smb2_dump_share_caps,
+ .is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+- .downgrade_oplock = smb21_downgrade_oplock,
++ .downgrade_oplock = smb3_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb2_negotiate_wsize,
+@@ -3651,7 +3667,7 @@ struct smb_version_operations smb311_operations = {
+ .dump_share_caps = smb2_dump_share_caps,
+ .is_oplock_break = smb2_is_valid_oplock_break,
+ .handle_cancelled_mid = smb2_handle_cancelled_mid,
+- .downgrade_oplock = smb21_downgrade_oplock,
++ .downgrade_oplock = smb3_downgrade_oplock,
+ .need_neg = smb2_need_neg,
+ .negotiate = smb2_negotiate,
+ .negotiate_wsize = smb2_negotiate_wsize,
+diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
+index 308c682fa4d3b..44501f8cbd75e 100644
+--- a/fs/cifs/smb2pdu.h
++++ b/fs/cifs/smb2pdu.h
+@@ -1209,7 +1209,7 @@ struct smb2_oplock_break {
+ struct smb2_lease_break {
+ struct smb2_sync_hdr sync_hdr;
+ __le16 StructureSize; /* Must be 44 */
+- __le16 Reserved;
++ __le16 Epoch;
+ __le32 Flags;
+ __u8 LeaseKey[16];
+ __le32 CurrentLeaseState;
+--
+2.25.1
+
--- /dev/null
+From b936a867cc866af51fd8af5d8000ff7616a2ecb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jan 2020 10:07:25 -0600
+Subject: clk: stratix10: use do_div() for 64-bit calculation
+
+From: Dinh Nguyen <dinguyen@kernel.org>
+
+[ Upstream commit cc26ed7be46c5f5fa45f3df8161ed7ca3c4d318c ]
+
+do_div() macro to perform u64 division and guards against overflow if
+the result is too large for the unsigned long return type.
+
+Signed-off-by: Dinh Nguyen <dinguyen@kernel.org>
+Link: https://lkml.kernel.org/r/20200114160726.19771-1-dinguyen@kernel.org
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/socfpga/clk-pll-s10.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/clk/socfpga/clk-pll-s10.c b/drivers/clk/socfpga/clk-pll-s10.c
+index c4d0b6f6abf2e..fc2e2839fe570 100644
+--- a/drivers/clk/socfpga/clk-pll-s10.c
++++ b/drivers/clk/socfpga/clk-pll-s10.c
+@@ -38,7 +38,9 @@ static unsigned long clk_pll_recalc_rate(struct clk_hw *hwclk,
+ /* read VCO1 reg for numerator and denominator */
+ reg = readl(socfpgaclk->hw.reg);
+ refdiv = (reg & SOCFPGA_PLL_REFDIV_MASK) >> SOCFPGA_PLL_REFDIV_SHIFT;
+- vco_freq = (unsigned long long)parent_rate / refdiv;
++
++ vco_freq = parent_rate;
++ do_div(vco_freq, refdiv);
+
+ /* Read mdiv and fdiv from the fdbck register */
+ reg = readl(socfpgaclk->hw.reg + 0x4);
+--
+2.25.1
+
--- /dev/null
+From 3fa4650925eaa2b8f25df774766e68bae43cb6d4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 19 Oct 2019 16:06:34 +0200
+Subject: clk/ti/adpll: allocate room for terminating null
+
+From: Stephen Kitt <steve@sk2.org>
+
+[ Upstream commit 7f6ac72946b88b89ee44c1c527aa8591ac5ffcbe ]
+
+The buffer allocated in ti_adpll_clk_get_name doesn't account for the
+terminating null. This patch switches to devm_kasprintf to avoid
+overflowing.
+
+Signed-off-by: Stephen Kitt <steve@sk2.org>
+Link: https://lkml.kernel.org/r/20191019140634.15596-1-steve@sk2.org
+Acked-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/clk/ti/adpll.c | 11 ++---------
+ 1 file changed, 2 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/clk/ti/adpll.c b/drivers/clk/ti/adpll.c
+index 688e403333b91..14926e07d09ae 100644
+--- a/drivers/clk/ti/adpll.c
++++ b/drivers/clk/ti/adpll.c
+@@ -193,15 +193,8 @@ static const char *ti_adpll_clk_get_name(struct ti_adpll_data *d,
+ if (err)
+ return NULL;
+ } else {
+- const char *base_name = "adpll";
+- char *buf;
+-
+- buf = devm_kzalloc(d->dev, 8 + 1 + strlen(base_name) + 1 +
+- strlen(postfix), GFP_KERNEL);
+- if (!buf)
+- return NULL;
+- sprintf(buf, "%08lx.%s.%s", d->pa, base_name, postfix);
+- name = buf;
++ name = devm_kasprintf(d->dev, GFP_KERNEL, "%08lx.adpll.%s",
++ d->pa, postfix);
+ }
+
+ return name;
+--
+2.25.1
+
--- /dev/null
+From 69ea3f8169d1f72a4e0993c2a46d010d057ae633 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 Mar 2020 19:27:43 +0530
+Subject: cpufreq: powernv: Fix frame-size-overflow in powernv_cpufreq_work_fn
+
+From: Pratik Rajesh Sampat <psampat@linux.ibm.com>
+
+[ Upstream commit d95fe371ecd28901f11256c610b988ed44e36ee2 ]
+
+The patch avoids allocating cpufreq_policy on stack hence fixing frame
+size overflow in 'powernv_cpufreq_work_fn'
+
+Fixes: 227942809b52 ("cpufreq: powernv: Restore cpu frequency to policy->cur on unthrottling")
+Signed-off-by: Pratik Rajesh Sampat <psampat@linux.ibm.com>
+Reviewed-by: Daniel Axtens <dja@axtens.net>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20200316135743.57735-1-psampat@linux.ibm.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/powernv-cpufreq.c | 13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/cpufreq/powernv-cpufreq.c b/drivers/cpufreq/powernv-cpufreq.c
+index 687c92ef76440..79942f7057576 100644
+--- a/drivers/cpufreq/powernv-cpufreq.c
++++ b/drivers/cpufreq/powernv-cpufreq.c
+@@ -903,6 +903,7 @@ static struct notifier_block powernv_cpufreq_reboot_nb = {
+ void powernv_cpufreq_work_fn(struct work_struct *work)
+ {
+ struct chip *chip = container_of(work, struct chip, throttle);
++ struct cpufreq_policy *policy;
+ unsigned int cpu;
+ cpumask_t mask;
+
+@@ -917,12 +918,14 @@ void powernv_cpufreq_work_fn(struct work_struct *work)
+ chip->restore = false;
+ for_each_cpu(cpu, &mask) {
+ int index;
+- struct cpufreq_policy policy;
+
+- cpufreq_get_policy(&policy, cpu);
+- index = cpufreq_table_find_index_c(&policy, policy.cur);
+- powernv_cpufreq_target_index(&policy, index);
+- cpumask_andnot(&mask, &mask, policy.cpus);
++ policy = cpufreq_cpu_get(cpu);
++ if (!policy)
++ continue;
++ index = cpufreq_table_find_index_c(policy, policy->cur);
++ powernv_cpufreq_target_index(policy, index);
++ cpumask_andnot(&mask, &mask, policy->cpus);
++ cpufreq_cpu_put(policy);
+ }
+ out:
+ put_online_cpus();
+--
+2.25.1
+
--- /dev/null
+From e592d0d1fd6c14abf1e02cad25b6a493524b1d8b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Feb 2020 10:48:42 +0530
+Subject: crypto: chelsio - This fixes the kernel panic which occurs during a
+ libkcapi test
+
+From: Ayush Sawal <ayush.sawal@chelsio.com>
+
+[ Upstream commit 9195189e00a7db55e7d448cee973cae87c5a3c71 ]
+
+The libkcapi test which causes kernel panic is
+aead asynchronous vmsplice multiple test.
+
+./bin/kcapi -v -d 4 -x 10 -c "ccm(aes)"
+-q 4edb58e8d5eb6bc711c43a6f3693daebde2e5524f1b55297abb29f003236e43d
+-t a7877c99 -n 674742abd0f5ba -k 2861fd0253705d7875c95ba8a53171b4
+-a fb7bc304a3909e66e2e0c5ef952712dd884ce3e7324171369f2c5db1adc48c7d
+
+This patch avoids dma_mapping of a zero length sg which causes the panic,
+by using sg_nents_for_len which maps only upto a specific length
+
+Signed-off-by: Ayush Sawal <ayush.sawal@chelsio.com>
+Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/chelsio/chcr_algo.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/crypto/chelsio/chcr_algo.c b/drivers/crypto/chelsio/chcr_algo.c
+index 9b3c259f081d3..ee508bbbb7504 100644
+--- a/drivers/crypto/chelsio/chcr_algo.c
++++ b/drivers/crypto/chelsio/chcr_algo.c
+@@ -2418,8 +2418,9 @@ int chcr_aead_dma_map(struct device *dev,
+ else
+ reqctx->b0_dma = 0;
+ if (req->src == req->dst) {
+- error = dma_map_sg(dev, req->src, sg_nents(req->src),
+- DMA_BIDIRECTIONAL);
++ error = dma_map_sg(dev, req->src,
++ sg_nents_for_len(req->src, dst_size),
++ DMA_BIDIRECTIONAL);
+ if (!error)
+ goto err;
+ } else {
+--
+2.25.1
+
--- /dev/null
+From 3e75d670ac2057a1dc01ba4ecd326eafe43f80e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 Nov 2019 19:20:21 +0900
+Subject: debugfs: Fix !DEBUG_FS debugfs_create_automount
+
+From: Kusanagi Kouichi <slash@ac.auone-net.jp>
+
+[ Upstream commit 4250b047039d324e0ff65267c8beb5bad5052a86 ]
+
+If DEBUG_FS=n, compile fails with the following error:
+
+kernel/trace/trace.c: In function 'tracing_init_dentry':
+kernel/trace/trace.c:8658:9: error: passing argument 3 of 'debugfs_create_automount' from incompatible pointer type [-Werror=incompatible-pointer-types]
+ 8658 | trace_automount, NULL);
+ | ^~~~~~~~~~~~~~~
+ | |
+ | struct vfsmount * (*)(struct dentry *, void *)
+In file included from kernel/trace/trace.c:24:
+./include/linux/debugfs.h:206:25: note: expected 'struct vfsmount * (*)(void *)' but argument is of type 'struct vfsmount * (*)(struct dentry *, void *)'
+ 206 | struct vfsmount *(*f)(void *),
+ | ~~~~~~~~~~~~~~~~~~~^~~~~~~~~~
+
+Signed-off-by: Kusanagi Kouichi <slash@ac.auone-net.jp>
+Link: https://lore.kernel.org/r/20191121102021787.MLMY.25002.ppp.dion.ne.jp@dmta0003.auone-net.jp
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/debugfs.h | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
+index 3b0ba54cc4d5b..3bc1034c57e66 100644
+--- a/include/linux/debugfs.h
++++ b/include/linux/debugfs.h
+@@ -54,6 +54,8 @@ static const struct file_operations __fops = { \
+ .llseek = no_llseek, \
+ }
+
++typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
++
+ #if defined(CONFIG_DEBUG_FS)
+
+ struct dentry *debugfs_lookup(const char *name, struct dentry *parent);
+@@ -75,7 +77,6 @@ struct dentry *debugfs_create_dir(const char *name, struct dentry *parent);
+ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
+ const char *dest);
+
+-typedef struct vfsmount *(*debugfs_automount_t)(struct dentry *, void *);
+ struct dentry *debugfs_create_automount(const char *name,
+ struct dentry *parent,
+ debugfs_automount_t f,
+@@ -204,7 +205,7 @@ static inline struct dentry *debugfs_create_symlink(const char *name,
+
+ static inline struct dentry *debugfs_create_automount(const char *name,
+ struct dentry *parent,
+- struct vfsmount *(*f)(void *),
++ debugfs_automount_t f,
+ void *data)
+ {
+ return ERR_PTR(-ENODEV);
+--
+2.25.1
+
--- /dev/null
+From ba071399536efcc10babb2faa7b0a44e4e636645 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 4 Oct 2019 11:11:40 +0100
+Subject: dma-fence: Serialise signal enabling (dma_fence_enable_sw_signaling)
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit 9c98f021e4e717ffd9948fa65340ea3ef12b7935 ]
+
+Make dma_fence_enable_sw_signaling() behave like its
+dma_fence_add_callback() and dma_fence_default_wait() counterparts and
+perform the test to enable signaling under the fence->lock, along with
+the action to do so. This ensure that should an implementation be trying
+to flush the cb_list (by signaling) on retirement before freeing the
+fence, it can do so in a race-free manner.
+
+See also 0fc89b6802ba ("dma-fence: Simply wrap dma_fence_signal_locked
+with dma_fence_signal").
+
+v2: Refactor all 3 enable_signaling paths to use a common function.
+v3: Don't argue, just keep the tracepoint in the existing spot.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191004101140.32713-1-chris@chris-wilson.co.uk
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma-buf/dma-fence.c | 78 +++++++++++++++++--------------------
+ 1 file changed, 35 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/dma-buf/dma-fence.c b/drivers/dma-buf/dma-fence.c
+index 1551ca7df3941..8586cc05def17 100644
+--- a/drivers/dma-buf/dma-fence.c
++++ b/drivers/dma-buf/dma-fence.c
+@@ -244,6 +244,30 @@ void dma_fence_free(struct dma_fence *fence)
+ }
+ EXPORT_SYMBOL(dma_fence_free);
+
++static bool __dma_fence_enable_signaling(struct dma_fence *fence)
++{
++ bool was_set;
++
++ lockdep_assert_held(fence->lock);
++
++ was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
++ &fence->flags);
++
++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ return false;
++
++ if (!was_set && fence->ops->enable_signaling) {
++ trace_dma_fence_enable_signal(fence);
++
++ if (!fence->ops->enable_signaling(fence)) {
++ dma_fence_signal_locked(fence);
++ return false;
++ }
++ }
++
++ return true;
++}
++
+ /**
+ * dma_fence_enable_sw_signaling - enable signaling on fence
+ * @fence: the fence to enable
+@@ -256,19 +280,12 @@ void dma_fence_enable_sw_signaling(struct dma_fence *fence)
+ {
+ unsigned long flags;
+
+- if (!test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+- &fence->flags) &&
+- !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags) &&
+- fence->ops->enable_signaling) {
+- trace_dma_fence_enable_signal(fence);
+-
+- spin_lock_irqsave(fence->lock, flags);
+-
+- if (!fence->ops->enable_signaling(fence))
+- dma_fence_signal_locked(fence);
++ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ return;
+
+- spin_unlock_irqrestore(fence->lock, flags);
+- }
++ spin_lock_irqsave(fence->lock, flags);
++ __dma_fence_enable_signaling(fence);
++ spin_unlock_irqrestore(fence->lock, flags);
+ }
+ EXPORT_SYMBOL(dma_fence_enable_sw_signaling);
+
+@@ -302,7 +319,6 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+ {
+ unsigned long flags;
+ int ret = 0;
+- bool was_set;
+
+ if (WARN_ON(!fence || !func))
+ return -EINVAL;
+@@ -314,25 +330,14 @@ int dma_fence_add_callback(struct dma_fence *fence, struct dma_fence_cb *cb,
+
+ spin_lock_irqsave(fence->lock, flags);
+
+- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+- &fence->flags);
+-
+- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+- ret = -ENOENT;
+- else if (!was_set && fence->ops->enable_signaling) {
+- trace_dma_fence_enable_signal(fence);
+-
+- if (!fence->ops->enable_signaling(fence)) {
+- dma_fence_signal_locked(fence);
+- ret = -ENOENT;
+- }
+- }
+-
+- if (!ret) {
++ if (__dma_fence_enable_signaling(fence)) {
+ cb->func = func;
+ list_add_tail(&cb->node, &fence->cb_list);
+- } else
++ } else {
+ INIT_LIST_HEAD(&cb->node);
++ ret = -ENOENT;
++ }
++
+ spin_unlock_irqrestore(fence->lock, flags);
+
+ return ret;
+@@ -432,7 +437,6 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
+ struct default_wait_cb cb;
+ unsigned long flags;
+ signed long ret = timeout ? timeout : 1;
+- bool was_set;
+
+ if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
+ return ret;
+@@ -444,21 +448,9 @@ dma_fence_default_wait(struct dma_fence *fence, bool intr, signed long timeout)
+ goto out;
+ }
+
+- was_set = test_and_set_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT,
+- &fence->flags);
+-
+- if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags))
++ if (!__dma_fence_enable_signaling(fence))
+ goto out;
+
+- if (!was_set && fence->ops->enable_signaling) {
+- trace_dma_fence_enable_signal(fence);
+-
+- if (!fence->ops->enable_signaling(fence)) {
+- dma_fence_signal_locked(fence);
+- goto out;
+- }
+- }
+-
+ if (!timeout) {
+ ret = 0;
+ goto out;
+--
+2.25.1
+
--- /dev/null
+From 2dca0ddaeae02fff05517f79bf879b94dbb3fbea Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 Nov 2019 17:05:23 +0530
+Subject: dmaengine: mediatek: hsdma_probe: fixed a memory leak when
+ devm_request_irq fails
+
+From: Satendra Singh Thakur <sst2005@gmail.com>
+
+[ Upstream commit 1ff95243257fad07290dcbc5f7a6ad79d6e703e2 ]
+
+When devm_request_irq fails, currently, the function
+dma_async_device_unregister gets called. This doesn't free
+the resources allocated by of_dma_controller_register.
+Therefore, we have called of_dma_controller_free for this purpose.
+
+Signed-off-by: Satendra Singh Thakur <sst2005@gmail.com>
+Link: https://lore.kernel.org/r/20191109113523.6067-1-sst2005@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/mediatek/mtk-hsdma.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/mediatek/mtk-hsdma.c b/drivers/dma/mediatek/mtk-hsdma.c
+index b7ec56ae02a6e..fca232b1d4a64 100644
+--- a/drivers/dma/mediatek/mtk-hsdma.c
++++ b/drivers/dma/mediatek/mtk-hsdma.c
+@@ -997,7 +997,7 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
+ if (err) {
+ dev_err(&pdev->dev,
+ "request_irq failed with err %d\n", err);
+- goto err_unregister;
++ goto err_free;
+ }
+
+ platform_set_drvdata(pdev, hsdma);
+@@ -1006,6 +1006,8 @@ static int mtk_hsdma_probe(struct platform_device *pdev)
+
+ return 0;
+
++err_free:
++ of_dma_controller_free(pdev->dev.of_node);
+ err_unregister:
+ dma_async_device_unregister(dd);
+
+--
+2.25.1
+
--- /dev/null
+From 8b87eef356af320c96613f704217c91a0c27cade Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Jan 2020 16:36:28 +0100
+Subject: dmaengine: stm32-dma: use vchan_terminate_vdesc() in .terminate_all
+
+From: Amelie Delaunay <amelie.delaunay@st.com>
+
+[ Upstream commit d80cbef35bf89b763f06e03bb4ff8f933bf012c5 ]
+
+To avoid race with vchan_complete, use the race free way to terminate
+running transfer.
+
+Move vdesc->node list_del in stm32_dma_start_transfer instead of in
+stm32_mdma_chan_complete to avoid another race in vchan_dma_desc_free_list.
+
+Signed-off-by: Amelie Delaunay <amelie.delaunay@st.com>
+Link: https://lore.kernel.org/r/20200129153628.29329-9-amelie.delaunay@st.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/stm32-dma.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/stm32-dma.c b/drivers/dma/stm32-dma.c
+index 4903a408fc146..ac7af440f8658 100644
+--- a/drivers/dma/stm32-dma.c
++++ b/drivers/dma/stm32-dma.c
+@@ -494,8 +494,10 @@ static int stm32_dma_terminate_all(struct dma_chan *c)
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+
+- if (chan->busy) {
+- stm32_dma_stop(chan);
++ if (chan->desc) {
++ vchan_terminate_vdesc(&chan->desc->vdesc);
++ if (chan->busy)
++ stm32_dma_stop(chan);
+ chan->desc = NULL;
+ }
+
+@@ -551,6 +553,8 @@ static void stm32_dma_start_transfer(struct stm32_dma_chan *chan)
+ if (!vdesc)
+ return;
+
++ list_del(&vdesc->node);
++
+ chan->desc = to_stm32_dma_desc(vdesc);
+ chan->next_sg = 0;
+ }
+@@ -628,7 +632,6 @@ static void stm32_dma_handle_chan_done(struct stm32_dma_chan *chan)
+ } else {
+ chan->busy = false;
+ if (chan->next_sg == chan->desc->num_sgs) {
+- list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
+ }
+--
+2.25.1
+
--- /dev/null
+From 868be2f5292fdbf515282ed1491e3ba20ec12252 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Jan 2020 09:53:34 +0100
+Subject: dmaengine: stm32-mdma: use vchan_terminate_vdesc() in .terminate_all
+
+From: Amelie Delaunay <amelie.delaunay@st.com>
+
+[ Upstream commit dfc708812a2acfc0ca56f56233b3c3e7b0d4ffe7 ]
+
+To avoid race with vchan_complete, use the race free way to terminate
+running transfer.
+
+Move vdesc->node list_del in stm32_mdma_start_transfer instead of in
+stm32_mdma_xfer_end to avoid another race in vchan_dma_desc_free_list.
+
+Signed-off-by: Amelie Delaunay <amelie.delaunay@st.com>
+Link: https://lore.kernel.org/r/20200127085334.13163-7-amelie.delaunay@st.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/stm32-mdma.c | 9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/dma/stm32-mdma.c b/drivers/dma/stm32-mdma.c
+index 8c3c3e5b812a8..9c6867916e890 100644
+--- a/drivers/dma/stm32-mdma.c
++++ b/drivers/dma/stm32-mdma.c
+@@ -1137,6 +1137,8 @@ static void stm32_mdma_start_transfer(struct stm32_mdma_chan *chan)
+ return;
+ }
+
++ list_del(&vdesc->node);
++
+ chan->desc = to_stm32_mdma_desc(vdesc);
+ hwdesc = chan->desc->node[0].hwdesc;
+ chan->curr_hwdesc = 0;
+@@ -1252,8 +1254,10 @@ static int stm32_mdma_terminate_all(struct dma_chan *c)
+ LIST_HEAD(head);
+
+ spin_lock_irqsave(&chan->vchan.lock, flags);
+- if (chan->busy) {
+- stm32_mdma_stop(chan);
++ if (chan->desc) {
++ vchan_terminate_vdesc(&chan->desc->vdesc);
++ if (chan->busy)
++ stm32_mdma_stop(chan);
+ chan->desc = NULL;
+ }
+ vchan_get_all_descriptors(&chan->vchan, &head);
+@@ -1341,7 +1345,6 @@ static enum dma_status stm32_mdma_tx_status(struct dma_chan *c,
+
+ static void stm32_mdma_xfer_end(struct stm32_mdma_chan *chan)
+ {
+- list_del(&chan->desc->vdesc.node);
+ vchan_cookie_complete(&chan->desc->vdesc);
+ chan->desc = NULL;
+ chan->busy = false;
+--
+2.25.1
+
--- /dev/null
+From 0df5350d536da5556878f2094d970a482e9e2c7d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Feb 2020 19:33:41 +0300
+Subject: dmaengine: tegra-apb: Prevent race conditions on channel's freeing
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+[ Upstream commit 8e84172e372bdca20c305d92d51d33640d2da431 ]
+
+It's incorrect to check the channel's "busy" state without taking a lock.
+That shouldn't cause any real troubles, nevertheless it's always better
+not to have any race conditions in the code.
+
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Acked-by: Jon Hunter <jonathanh@nvidia.com>
+Link: https://lore.kernel.org/r/20200209163356.6439-5-digetx@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/tegra20-apb-dma.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/dma/tegra20-apb-dma.c b/drivers/dma/tegra20-apb-dma.c
+index 15481aeaeecd1..5ccd24a46e381 100644
+--- a/drivers/dma/tegra20-apb-dma.c
++++ b/drivers/dma/tegra20-apb-dma.c
+@@ -1225,8 +1225,7 @@ static void tegra_dma_free_chan_resources(struct dma_chan *dc)
+
+ dev_dbg(tdc2dev(tdc), "Freeing channel %d\n", tdc->id);
+
+- if (tdc->busy)
+- tegra_dma_terminate_all(dc);
++ tegra_dma_terminate_all(dc);
+
+ spin_lock_irqsave(&tdc->lock, flags);
+ list_splice_init(&tdc->pending_sg_req, &sg_req_list);
+--
+2.25.1
+
--- /dev/null
+From d5df9998508b166313f1d5c2e028cd5e59d37b07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jan 2020 11:22:49 +0100
+Subject: dmaengine: zynqmp_dma: fix burst length configuration
+
+From: Matthias Fend <matthias.fend@wolfvision.net>
+
+[ Upstream commit cc88525ebffc757e00cc5a5d61da6271646c7f5f ]
+
+Since the dma engine expects the burst length register content as
+power of 2 value, the burst length needs to be converted first.
+Additionally add a burst length range check to avoid corrupting unrelated
+register bits.
+
+Signed-off-by: Matthias Fend <matthias.fend@wolfvision.net>
+Link: https://lore.kernel.org/r/20200115102249.24398-1-matthias.fend@wolfvision.net
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/zynqmp_dma.c | 24 +++++++++++++++---------
+ 1 file changed, 15 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
+index 73de6a6179fcd..e002ff8413e2a 100644
+--- a/drivers/dma/xilinx/zynqmp_dma.c
++++ b/drivers/dma/xilinx/zynqmp_dma.c
+@@ -127,10 +127,12 @@
+ /* Max transfer size per descriptor */
+ #define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
+
++/* Max burst lengths */
++#define ZYNQMP_DMA_MAX_DST_BURST_LEN 32768U
++#define ZYNQMP_DMA_MAX_SRC_BURST_LEN 32768U
++
+ /* Reset values for data attributes */
+ #define ZYNQMP_DMA_AXCACHE_VAL 0xF
+-#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
+-#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
+
+ #define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
+
+@@ -536,17 +538,19 @@ static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
+
+ static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
+ {
+- u32 val;
++ u32 val, burst_val;
+
+ val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
+ val |= ZYNQMP_DMA_POINT_TYPE_SG;
+ writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
+
+ val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
++ burst_val = __ilog2_u32(chan->src_burst_len);
+ val = (val & ~ZYNQMP_DMA_ARLEN) |
+- (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
++ ((burst_val << ZYNQMP_DMA_ARLEN_OFST) & ZYNQMP_DMA_ARLEN);
++ burst_val = __ilog2_u32(chan->dst_burst_len);
+ val = (val & ~ZYNQMP_DMA_AWLEN) |
+- (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
++ ((burst_val << ZYNQMP_DMA_AWLEN_OFST) & ZYNQMP_DMA_AWLEN);
+ writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
+ }
+
+@@ -562,8 +566,10 @@ static int zynqmp_dma_device_config(struct dma_chan *dchan,
+ {
+ struct zynqmp_dma_chan *chan = to_chan(dchan);
+
+- chan->src_burst_len = config->src_maxburst;
+- chan->dst_burst_len = config->dst_maxburst;
++ chan->src_burst_len = clamp(config->src_maxburst, 1U,
++ ZYNQMP_DMA_MAX_SRC_BURST_LEN);
++ chan->dst_burst_len = clamp(config->dst_maxburst, 1U,
++ ZYNQMP_DMA_MAX_DST_BURST_LEN);
+
+ return 0;
+ }
+@@ -884,8 +890,8 @@ static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
+ return PTR_ERR(chan->regs);
+
+ chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
+- chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
+- chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
++ chan->dst_burst_len = ZYNQMP_DMA_MAX_DST_BURST_LEN;
++ chan->src_burst_len = ZYNQMP_DMA_MAX_SRC_BURST_LEN;
+ err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
+ if (err < 0) {
+ dev_err(&pdev->dev, "missing xlnx,bus-width property\n");
+--
+2.25.1
+
--- /dev/null
+From 3a56d4560107d36c2a8397778961140364a88e53 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Apr 2020 21:04:51 +0530
+Subject: drivers: char: tlclk.c: Avoid data race between init and interrupt
+ handler
+
+From: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+
+[ Upstream commit 44b8fb6eaa7c3fb770bf1e37619cdb3902cca1fc ]
+
+After registering character device the file operation callbacks can be
+called. The open callback registers interrupt handler.
+Therefore interrupt handler can execute in parallel with rest of the init
+function. To avoid such data race initialize telclk_interrupt variable
+and struct alarm_events before registering character device.
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+Link: https://lore.kernel.org/r/20200417153451.1551-1-madhuparnabhowmik10@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/tlclk.c | 17 ++++++++++-------
+ 1 file changed, 10 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/char/tlclk.c b/drivers/char/tlclk.c
+index 8eeb4190207d1..dce22b7fc5449 100644
+--- a/drivers/char/tlclk.c
++++ b/drivers/char/tlclk.c
+@@ -776,17 +776,21 @@ static int __init tlclk_init(void)
+ {
+ int ret;
+
++ telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
++
++ alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
++ if (!alarm_events) {
++ ret = -ENOMEM;
++ goto out1;
++ }
++
+ ret = register_chrdev(tlclk_major, "telco_clock", &tlclk_fops);
+ if (ret < 0) {
+ printk(KERN_ERR "tlclk: can't get major %d.\n", tlclk_major);
++ kfree(alarm_events);
+ return ret;
+ }
+ tlclk_major = ret;
+- alarm_events = kzalloc( sizeof(struct tlclk_alarms), GFP_KERNEL);
+- if (!alarm_events) {
+- ret = -ENOMEM;
+- goto out1;
+- }
+
+ /* Read telecom clock IRQ number (Set by BIOS) */
+ if (!request_region(TLCLK_BASE, 8, "telco_clock")) {
+@@ -795,7 +799,6 @@ static int __init tlclk_init(void)
+ ret = -EBUSY;
+ goto out2;
+ }
+- telclk_interrupt = (inb(TLCLK_REG7) & 0x0f);
+
+ if (0x0F == telclk_interrupt ) { /* not MCPBL0010 ? */
+ printk(KERN_ERR "telclk_interrupt = 0x%x non-mcpbl0010 hw.\n",
+@@ -836,8 +839,8 @@ out3:
+ release_region(TLCLK_BASE, 8);
+ out2:
+ kfree(alarm_events);
+-out1:
+ unregister_chrdev(tlclk_major, "telco_clock");
++out1:
+ return ret;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From f48db46d3b18f18eb845de20a7cabac9ec89f6a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Jan 2020 14:46:53 -0500
+Subject: drm/amd/display: dal_ddc_i2c_payloads_create can fail causing panic
+
+From: Aric Cyr <aric.cyr@amd.com>
+
+[ Upstream commit 6a6c4a4d459ecacc9013c45dcbf2bc9747fdbdbd ]
+
+[Why]
+Since the i2c payload allocation can fail need to check return codes
+
+[How]
+Clean up i2c payload allocations and check for errors
+
+Signed-off-by: Aric Cyr <aric.cyr@amd.com>
+Reviewed-by: Joshua Aberback <Joshua.Aberback@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Acked-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/core/dc_link_ddc.c | 52 +++++++++----------
+ 1 file changed, 25 insertions(+), 27 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+index 46c9cb47a96e5..145af3bb2dfcb 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+@@ -127,22 +127,16 @@ struct aux_payloads {
+ struct vector payloads;
+ };
+
+-static struct i2c_payloads *dal_ddc_i2c_payloads_create(struct dc_context *ctx, uint32_t count)
++static bool dal_ddc_i2c_payloads_create(
++ struct dc_context *ctx,
++ struct i2c_payloads *payloads,
++ uint32_t count)
+ {
+- struct i2c_payloads *payloads;
+-
+- payloads = kzalloc(sizeof(struct i2c_payloads), GFP_KERNEL);
+-
+- if (!payloads)
+- return NULL;
+-
+ if (dal_vector_construct(
+ &payloads->payloads, ctx, count, sizeof(struct i2c_payload)))
+- return payloads;
+-
+- kfree(payloads);
+- return NULL;
++ return true;
+
++ return false;
+ }
+
+ static struct i2c_payload *dal_ddc_i2c_payloads_get(struct i2c_payloads *p)
+@@ -155,14 +149,12 @@ static uint32_t dal_ddc_i2c_payloads_get_count(struct i2c_payloads *p)
+ return p->payloads.count;
+ }
+
+-static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads **p)
++static void dal_ddc_i2c_payloads_destroy(struct i2c_payloads *p)
+ {
+- if (!p || !*p)
++ if (!p)
+ return;
+- dal_vector_destruct(&(*p)->payloads);
+- kfree(*p);
+- *p = NULL;
+
++ dal_vector_destruct(&p->payloads);
+ }
+
+ static struct aux_payloads *dal_ddc_aux_payloads_create(struct dc_context *ctx, uint32_t count)
+@@ -580,9 +572,13 @@ bool dal_ddc_service_query_ddc_data(
+
+ uint32_t payloads_num = write_payloads + read_payloads;
+
++
+ if (write_size > EDID_SEGMENT_SIZE || read_size > EDID_SEGMENT_SIZE)
+ return false;
+
++ if (!payloads_num)
++ return false;
++
+ /*TODO: len of payload data for i2c and aux is uint8!!!!,
+ * but we want to read 256 over i2c!!!!*/
+ if (dal_ddc_service_is_in_aux_transaction_mode(ddc)) {
+@@ -613,23 +609,25 @@ bool dal_ddc_service_query_ddc_data(
+ dal_ddc_aux_payloads_destroy(&payloads);
+
+ } else {
+- struct i2c_payloads *payloads =
+- dal_ddc_i2c_payloads_create(ddc->ctx, payloads_num);
++ struct i2c_command command = {0};
++ struct i2c_payloads payloads;
+
+- struct i2c_command command = {
+- .payloads = dal_ddc_i2c_payloads_get(payloads),
+- .number_of_payloads = 0,
+- .engine = DDC_I2C_COMMAND_ENGINE,
+- .speed = ddc->ctx->dc->caps.i2c_speed_in_khz };
++ if (!dal_ddc_i2c_payloads_create(ddc->ctx, &payloads, payloads_num))
++ return false;
++
++ command.payloads = dal_ddc_i2c_payloads_get(&payloads);
++ command.number_of_payloads = 0;
++ command.engine = DDC_I2C_COMMAND_ENGINE;
++ command.speed = ddc->ctx->dc->caps.i2c_speed_in_khz;
+
+ dal_ddc_i2c_payloads_add(
+- payloads, address, write_size, write_buf, true);
++ &payloads, address, write_size, write_buf, true);
+
+ dal_ddc_i2c_payloads_add(
+- payloads, address, read_size, read_buf, false);
++ &payloads, address, read_size, read_buf, false);
+
+ command.number_of_payloads =
+- dal_ddc_i2c_payloads_get_count(payloads);
++ dal_ddc_i2c_payloads_get_count(&payloads);
+
+ ret = dm_helpers_submit_i2c(
+ ddc->ctx,
+--
+2.25.1
+
--- /dev/null
+From b5ec1bfcc148f339720845cddfaccd697f113a57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2020 10:13:37 -0500
+Subject: drm/amd/display: Stop if retimer is not available
+
+From: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+
+[ Upstream commit a0e40018dcc3f59a10ca21d58f8ea8ceb1b035ac ]
+
+Raven provides retimer feature support that requires i2c interaction in
+order to make it work well, all settings required for this configuration
+are loaded from the Atom bios which include the i2c address. If the
+retimer feature is not available, we should abort the attempt to set
+this feature, otherwise, it makes the following line return
+I2C_CHANNEL_OPERATION_NO_RESPONSE:
+
+ i2c_success = i2c_write(pipe_ctx, slave_address, buffer, sizeof(buffer));
+ ...
+ if (!i2c_success)
+ ASSERT(i2c_success);
+
+This ends up causing problems with hotplugging HDMI displays on Raven,
+and causes retimer settings to warn like so:
+
+WARNING: CPU: 1 PID: 429 at
+drivers/gpu/drm/amd/amdgpu/../dal/dc/core/dc_link.c:1998
+write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu] Modules linked in:
+edac_mce_amd ccp kvm irqbypass binfmt_misc crct10dif_pclmul crc32_pclmul
+ghash_clmulni_intel snd_hda_codec_realtek snd_hda_codec_generic
+ledtrig_audio snd_hda_codec_hdmi snd_hda_intel amdgpu(+) snd_hda_codec
+snd_hda_core snd_hwdep snd_pcm snd_seq_midi snd_seq_midi_event
+snd_rawmidi aesni_intel snd_seq amd_iommu_v2 gpu_sched aes_x86_64
+crypto_simd cryptd glue_helper snd_seq_device ttm drm_kms_helper
+snd_timer eeepc_wmi wmi_bmof asus_wmi sparse_keymap drm mxm_wmi snd
+k10temp fb_sys_fops syscopyarea sysfillrect sysimgblt soundcore joydev
+input_leds mac_hid sch_fq_codel parport_pc ppdev lp parport ip_tables
+x_tables autofs4 igb i2c_algo_bit hid_generic usbhid i2c_piix4 dca ahci
+hid libahci video wmi gpio_amdpt gpio_generic CPU: 1 PID: 429 Comm:
+systemd-udevd Tainted: G W 5.2.0-rc1sept162019+ #1
+Hardware name: System manufacturer System Product Name/ROG STRIX B450-F
+GAMING, BIOS 2605 08/06/2019
+RIP: 0010:write_i2c_retimer_setting+0xc2/0x3c0 [amdgpu]
+Code: ff 0f b6 4d ce 44 0f b6 45 cf 44 0f b6 c8 45 89 cf 44 89 e2 48 c7
+c6 f0 34 bc c0 bf 04 00 00 00 e8 63 b0 90 ff 45 84 ff 75 02 <0f> 0b 42
+0f b6 04 73 8d 50 f6 80 fa 02 77 8c 3c 0a 0f 85 c8 00 00 RSP:
+0018:ffffa99d02726fd0 EFLAGS: 00010246
+RAX: 0000000000000000 RBX: ffffa99d02727035 RCX: 0000000000000006
+RDX: 0000000000000000 RSI: 0000000000000002 RDI: ffff976acc857440
+RBP: ffffa99d02727018 R08: 0000000000000002 R09: 000000000002a600
+R10: ffffe90610193680 R11: 00000000000005e3 R12: 000000000000005d
+R13: ffff976ac4b201b8 R14: 0000000000000001 R15: 0000000000000000
+FS: 00007f14f99e1680(0000) GS:ffff976acc840000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007fdf212843b8 CR3: 0000000408906000 CR4: 00000000003406e0
+Call Trace:
+ core_link_enable_stream+0x626/0x680 [amdgpu]
+ dce110_apply_ctx_to_hw+0x414/0x4e0 [amdgpu]
+ dc_commit_state+0x331/0x5e0 [amdgpu]
+ ? drm_calc_timestamping_constants+0xf9/0x150 [drm]
+ amdgpu_dm_atomic_commit_tail+0x395/0x1e00 [amdgpu]
+ ? dm_plane_helper_prepare_fb+0x20c/0x280 [amdgpu]
+ commit_tail+0x42/0x70 [drm_kms_helper]
+ drm_atomic_helper_commit+0x10c/0x120 [drm_kms_helper]
+ amdgpu_dm_atomic_commit+0x95/0xa0 [amdgpu]
+ drm_atomic_commit+0x4a/0x50 [drm]
+ restore_fbdev_mode_atomic+0x1c0/0x1e0 [drm_kms_helper]
+ restore_fbdev_mode+0x4c/0x160 [drm_kms_helper]
+ ? _cond_resched+0x19/0x40
+ drm_fb_helper_restore_fbdev_mode_unlocked+0x4e/0xa0 [drm_kms_helper]
+ drm_fb_helper_set_par+0x2d/0x50 [drm_kms_helper]
+ fbcon_init+0x471/0x630
+ visual_init+0xd5/0x130
+ do_bind_con_driver+0x20a/0x430
+ do_take_over_console+0x7d/0x1b0
+ do_fbcon_takeover+0x5c/0xb0
+ fbcon_event_notify+0x6cd/0x8a0
+ notifier_call_chain+0x4c/0x70
+ blocking_notifier_call_chain+0x43/0x60
+ fb_notifier_call_chain+0x1b/0x20
+ register_framebuffer+0x254/0x360
+ __drm_fb_helper_initial_config_and_unlock+0x2c5/0x510 [drm_kms_helper]
+ drm_fb_helper_initial_config+0x35/0x40 [drm_kms_helper]
+ amdgpu_fbdev_init+0xcd/0x100 [amdgpu]
+ amdgpu_device_init+0x1156/0x1930 [amdgpu]
+ amdgpu_driver_load_kms+0x8d/0x2e0 [amdgpu]
+ drm_dev_register+0x12b/0x1c0 [drm]
+ amdgpu_pci_probe+0xd3/0x160 [amdgpu]
+ local_pci_probe+0x47/0xa0
+ pci_device_probe+0x142/0x1b0
+ really_probe+0xf5/0x3d0
+ driver_probe_device+0x11b/0x130
+ device_driver_attach+0x58/0x60
+ __driver_attach+0xa3/0x140
+ ? device_driver_attach+0x60/0x60
+ ? device_driver_attach+0x60/0x60
+ bus_for_each_dev+0x74/0xb0
+ ? kmem_cache_alloc_trace+0x1a3/0x1c0
+ driver_attach+0x1e/0x20
+ bus_add_driver+0x147/0x220
+ ? 0xffffffffc0cb9000
+ driver_register+0x60/0x100
+ ? 0xffffffffc0cb9000
+ __pci_register_driver+0x5a/0x60
+ amdgpu_init+0x74/0x83 [amdgpu]
+ do_one_initcall+0x4a/0x1fa
+ ? _cond_resched+0x19/0x40
+ ? kmem_cache_alloc_trace+0x3f/0x1c0
+ ? __vunmap+0x1cc/0x200
+ do_init_module+0x5f/0x227
+ load_module+0x2330/0x2b40
+ __do_sys_finit_module+0xfc/0x120
+ ? __do_sys_finit_module+0xfc/0x120
+ __x64_sys_finit_module+0x1a/0x20
+ do_syscall_64+0x5a/0x130
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+RIP: 0033:0x7f14f9500839
+Code: 00 f3 c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89
+f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01
+f0 ff ff 73 01 c3 48 8b 0d 1f f6 2c 00 f7 d8 64 89 01 48
+RSP: 002b:00007fff9bc4f5a8 EFLAGS: 00000246 ORIG_RAX: 0000000000000139
+RAX: ffffffffffffffda RBX: 000055afb5abce30 RCX: 00007f14f9500839
+RDX: 0000000000000000 RSI: 000055afb5ace0f0 RDI: 0000000000000017
+RBP: 000055afb5ace0f0 R08: 0000000000000000 R09: 000000000000000a
+R10: 0000000000000017 R11: 0000000000000246 R12: 0000000000000000
+R13: 000055afb5aad800 R14: 0000000000020000 R15: 0000000000000000
+---[ end trace c286e96563966f08 ]---
+
+This commit reworks the way that we handle i2c write for retimer in the
+way that we abort this configuration if the feature is not available in
+the device. For debug sake, we kept a simple log message in case the
+retimer is not available.
+
+Signed-off-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Reviewed-by: Hersen Wu <hersenxs.wu@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 67 ++++++++-----------
+ 1 file changed, 29 insertions(+), 38 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+index 3abc0294c05f5..2fb2c683ad54b 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+@@ -1576,8 +1576,7 @@ static void write_i2c_retimer_setting(
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+@@ -1595,8 +1594,7 @@ static void write_i2c_retimer_setting(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+ }
+
+ buffer[0] = offset;
+@@ -1605,8 +1603,7 @@ static void write_i2c_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+ }
+ }
+ }
+@@ -1623,8 +1620,7 @@ static void write_i2c_retimer_setting(
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Based on DP159 specs, APPLY_RX_TX_CHANGE bit in 0x0A
+ * needs to be set to 1 on every 0xA-0xC write.
+@@ -1642,8 +1638,7 @@ static void write_i2c_retimer_setting(
+ pipe_ctx->stream->sink->link->ddc,
+ slave_address, &offset, 1, &value, 1);
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+ }
+
+ buffer[0] = offset;
+@@ -1652,8 +1647,7 @@ static void write_i2c_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+ }
+ }
+ }
+@@ -1668,8 +1662,7 @@ static void write_i2c_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+@@ -1677,8 +1670,7 @@ static void write_i2c_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+@@ -1686,10 +1678,14 @@ static void write_i2c_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ }
++
++ return;
++
++i2c_write_fail:
++ DC_LOG_DEBUG("Set retimer failed");
+ }
+
+ static void write_i2c_default_retimer_setting(
+@@ -1710,8 +1706,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+@@ -1719,8 +1714,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0x0B to 0xDA or 0xD8 */
+ buffer[0] = 0x0B;
+@@ -1728,8 +1722,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+@@ -1737,8 +1730,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0x0C to 0x1D or 0x91 */
+ buffer[0] = 0x0C;
+@@ -1746,8 +1738,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0x0A to 0x17 */
+ buffer[0] = 0x0A;
+@@ -1755,8 +1746,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+
+ if (is_vga_mode) {
+@@ -1768,8 +1758,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0x00 to 0x23 */
+ buffer[0] = 0x00;
+@@ -1777,8 +1766,7 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+
+ /* Write offset 0xff to 0x00 */
+ buffer[0] = 0xff;
+@@ -1786,9 +1774,13 @@ static void write_i2c_default_retimer_setting(
+ i2c_success = i2c_write(pipe_ctx, slave_address,
+ buffer, sizeof(buffer));
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ goto i2c_write_fail;
+ }
++
++ return;
++
++i2c_write_fail:
++ DC_LOG_DEBUG("Set default retimer failed");
+ }
+
+ static void write_i2c_redriver_setting(
+@@ -1811,8 +1803,7 @@ static void write_i2c_redriver_setting(
+ buffer, sizeof(buffer));
+
+ if (!i2c_success)
+- /* Write failure */
+- ASSERT(i2c_success);
++ DC_LOG_DEBUG("Set redriver failed");
+ }
+
+ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
+--
+2.25.1
+
--- /dev/null
+From 0ce1e674c61a6e0ed3b750d7d548a7983e6080fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2020 17:48:56 +0800
+Subject: drm/amdgpu: increase atombios cmd timeout
+
+From: John Clements <john.clements@amd.com>
+
+[ Upstream commit 1b3460a8b19688ad3033b75237d40fa580a5a953 ]
+
+mitigates race condition on BACO reset between GPU bootcode and driver reload
+
+Reviewed-by: Hawking Zhang <Hawking.Zhang@amd.com>
+Signed-off-by: John Clements <john.clements@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/atom.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/atom.c b/drivers/gpu/drm/amd/amdgpu/atom.c
+index e9934de1b9cf8..0222bb7ea49b4 100644
+--- a/drivers/gpu/drm/amd/amdgpu/atom.c
++++ b/drivers/gpu/drm/amd/amdgpu/atom.c
+@@ -742,8 +742,8 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
+ cjiffies = jiffies;
+ if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+ cjiffies -= ctx->last_jump_jiffies;
+- if ((jiffies_to_msecs(cjiffies) > 5000)) {
+- DRM_ERROR("atombios stuck in loop for more than 5secs aborting\n");
++ if ((jiffies_to_msecs(cjiffies) > 10000)) {
++ DRM_ERROR("atombios stuck in loop for more than 10secs aborting\n");
+ ctx->abort = true;
+ }
+ } else {
+--
+2.25.1
+
--- /dev/null
+From f7730999b82ceaba79168e9bf913d1e396732e05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 Nov 2019 09:50:18 -0500
+Subject: drm/amdgpu/powerplay: fix AVFS handling with custom powerplay table
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 53dbc27ad5a93932ff1892a8e4ef266827d74a0f ]
+
+When a custom powerplay table is provided, we need to update
+the OD VDDC flag to avoid AVFS being enabled when it shouldn't be.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205393
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+index ce459ea4ec3ad..da9e6923fa659 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+@@ -3591,6 +3591,13 @@ static int vega10_set_power_state_tasks(struct pp_hwmgr *hwmgr,
+ PP_ASSERT_WITH_CODE(!result,
+ "Failed to upload PPtable!", return result);
+
++ /*
++ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
++ * That effectively disables AVFS feature.
++ */
++ if(hwmgr->hardcode_pp_table != NULL)
++ data->need_update_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
++
+ vega10_update_avfs(hwmgr);
+
+ data->need_update_dpm_table &= DPMTABLE_OD_UPDATE_VDDC;
+--
+2.25.1
+
--- /dev/null
+From 45f399ec680515a6e13b9a2a7b7e3dae1cfcd761 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 Nov 2019 11:15:17 -0500
+Subject: drm/amdgpu/powerplay/smu7: fix AVFS handling with custom powerplay
+ table
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit 901245624c7812b6c95d67177bae850e783b5212 ]
+
+When a custom powerplay table is provided, we need to update
+the OD VDDC flag to avoid AVFS being enabled when it shouldn't be.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=205393
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+index 72c0a2ae2dd4f..058898b321b8a 100644
+--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
++++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_hwmgr.c
+@@ -3970,6 +3970,13 @@ static int smu7_set_power_state_tasks(struct pp_hwmgr *hwmgr, const void *input)
+ "Failed to populate and upload SCLK MCLK DPM levels!",
+ result = tmp_result);
+
++ /*
++ * If a custom pp table is loaded, set DPMTABLE_OD_UPDATE_VDDC flag.
++ * That effectively disables AVFS feature.
++ */
++ if (hwmgr->hardcode_pp_table != NULL)
++ data->need_update_smu7_dpm_table |= DPMTABLE_OD_UPDATE_VDDC;
++
+ tmp_result = smu7_update_avfs(hwmgr);
+ PP_ASSERT_WITH_CODE((0 == tmp_result),
+ "Failed to update avfs voltages!",
+--
+2.25.1
+
--- /dev/null
+From 2df3023c53ba5723e5b9dcfaab4c31984a1febc5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 2 Sep 2020 17:11:09 +0800
+Subject: drm/amdkfd: fix a memory leak issue
+
+From: Dennis Li <Dennis.Li@amd.com>
+
+[ Upstream commit 087d764159996ae378b08c0fdd557537adfd6899 ]
+
+In the resume stage of GPU recovery, start_cpsch will call pm_init
+which set pm->allocated as false, cause the next pm_release_ib has
+no chance to release ib memory.
+
+Add pm_release_ib in stop_cpsch which will be called in the suspend
+stage of GPU recovery.
+
+Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Signed-off-by: Dennis Li <Dennis.Li@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+index 189212cb35475..bff39f561264e 100644
+--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
++++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+@@ -1101,6 +1101,8 @@ static int stop_cpsch(struct device_queue_manager *dqm)
+ unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm_unlock(dqm);
+
++ pm_release_ib(&dqm->packets);
++
+ kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
+ pm_uninit(&dqm->packets);
+
+--
+2.25.1
+
--- /dev/null
+From c46de9fd90b779cb8013e10edbf427255e89abb0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Feb 2020 11:36:44 -0700
+Subject: drm/msm/a5xx: Always set an OPP supported hardware value
+
+From: Jordan Crouse <jcrouse@codeaurora.org>
+
+[ Upstream commit 0478b4fc5f37f4d494245fe7bcce3f531cf380e9 ]
+
+If the opp table specifies opp-supported-hw as a property but the driver
+has not set a supported hardware value the OPP subsystem will reject
+all the table entries.
+
+Set a "default" value that will match the default table entries but not
+conflict with any possible real bin values. Also fix a small memory leak
+and free the buffer allocated by nvmem_cell_read().
+
+Signed-off-by: Jordan Crouse <jcrouse@codeaurora.org>
+Reviewed-by: Eric Anholt <eric@anholt.net>
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 27 ++++++++++++++++++++-------
+ 1 file changed, 20 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+index 1fc9a7fa37b45..d29a58bd2f7a3 100644
+--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
++++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+@@ -1474,18 +1474,31 @@ static const struct adreno_gpu_funcs funcs = {
+ static void check_speed_bin(struct device *dev)
+ {
+ struct nvmem_cell *cell;
+- u32 bin, val;
++ u32 val;
++
++ /*
++ * If the OPP table specifies a opp-supported-hw property then we have
++ * to set something with dev_pm_opp_set_supported_hw() or the table
++ * doesn't get populated so pick an arbitrary value that should
++ * ensure the default frequencies are selected but not conflict with any
++ * actual bins
++ */
++ val = 0x80;
+
+ cell = nvmem_cell_get(dev, "speed_bin");
+
+- /* If a nvmem cell isn't defined, nothing to do */
+- if (IS_ERR(cell))
+- return;
++ if (!IS_ERR(cell)) {
++ void *buf = nvmem_cell_read(cell, NULL);
++
++ if (!IS_ERR(buf)) {
++ u8 bin = *((u8 *) buf);
+
+- bin = *((u32 *) nvmem_cell_read(cell, NULL));
+- nvmem_cell_put(cell);
++ val = (1 << bin);
++ kfree(buf);
++ }
+
+- val = (1 << bin);
++ nvmem_cell_put(cell);
++ }
+
+ dev_pm_opp_set_supported_hw(dev, &val, 1);
+ }
+--
+2.25.1
+
--- /dev/null
+From d430567390c194a0bb3a27125686f10921cde603 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Mar 2020 11:14:10 +0100
+Subject: drm/msm: fix leaks if initialization fails
+
+From: Pavel Machek <pavel@denx.de>
+
+[ Upstream commit 66be340f827554cb1c8a1ed7dea97920b4085af2 ]
+
+We should free resources in unlikely case of allocation failure.
+
+Signed-off-by: Pavel Machek <pavel@denx.de>
+Signed-off-by: Rob Clark <robdclark@chromium.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/msm/msm_drv.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
+index 7f45486b6650b..3ba3ae9749bec 100644
+--- a/drivers/gpu/drm/msm/msm_drv.c
++++ b/drivers/gpu/drm/msm/msm_drv.c
+@@ -495,8 +495,10 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
+ if (!dev->dma_parms) {
+ dev->dma_parms = devm_kzalloc(dev, sizeof(*dev->dma_parms),
+ GFP_KERNEL);
+- if (!dev->dma_parms)
+- return -ENOMEM;
++ if (!dev->dma_parms) {
++ ret = -ENOMEM;
++ goto err_msm_uninit;
++ }
+ }
+ dma_set_max_seg_size(dev, DMA_BIT_MASK(32));
+
+--
+2.25.1
+
--- /dev/null
+From e5bd67dcaa3eee5f650298d168ad0820206f82cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 May 2020 18:14:53 +0800
+Subject: drm/nouveau/debugfs: fix runtime pm imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 00583fbe8031f69bba8b0a9a861efb75fb7131af ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+the call returns an error code. Thus a pairing decrement is needed
+on the error handling path to keep the counter balanced.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_debugfs.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_debugfs.c b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+index 9635704a1d864..4561a786fab07 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_debugfs.c
++++ b/drivers/gpu/drm/nouveau/nouveau_debugfs.c
+@@ -161,8 +161,11 @@ nouveau_debugfs_pstate_set(struct file *file, const char __user *ubuf,
+ }
+
+ ret = pm_runtime_get_sync(drm->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(drm->dev);
+ return ret;
++ }
++
+ ret = nvif_mthd(ctrl, NVIF_CONTROL_PSTATE_USER, &args, sizeof(args));
+ pm_runtime_put_autosuspend(drm->dev);
+ if (ret < 0)
+--
+2.25.1
+
--- /dev/null
+From e1b517ac0fe665fae1f3f2f4f57c8dc3d6cb3251 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 May 2020 18:47:48 +0800
+Subject: drm/nouveau/dispnv50: fix runtime pm imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit dc455f4c888365595c0a13da445e092422d55b8d ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+the call returns an error code. Thus a pairing decrement is needed
+on the error handling path to keep the counter balanced.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/dispnv50/disp.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+index e06ea8c8184cb..1bb0a9f6fa730 100644
+--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
++++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
+@@ -909,8 +909,10 @@ nv50_mstc_detect(struct drm_connector *connector, bool force)
+ return connector_status_disconnected;
+
+ ret = pm_runtime_get_sync(connector->dev->dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(connector->dev->dev);
+ return connector_status_disconnected;
++ }
+
+ conn_status = drm_dp_mst_detect_port(connector, mstc->port->mgr,
+ mstc->port);
+--
+2.25.1
+
--- /dev/null
+From dee0ebbb0ef55c38a388b5246a787b3cce7429ee Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 May 2020 18:25:49 +0800
+Subject: drm/nouveau: fix runtime pm imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit d7372dfb3f7f1602b87e0663e8b8646da23ebca7 ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+the call returns an error code. Thus a pairing decrement is needed
+on the error handling path to keep the counter balanced.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/nouveau/nouveau_gem.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
+index 791f970714ed6..a98fccb0d32f9 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
++++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
+@@ -82,8 +82,10 @@ nouveau_gem_object_open(struct drm_gem_object *gem, struct drm_file *file_priv)
+ return ret;
+
+ ret = pm_runtime_get_sync(dev);
+- if (ret < 0 && ret != -EACCES)
++ if (ret < 0 && ret != -EACCES) {
++ pm_runtime_put_autosuspend(dev);
+ goto out;
++ }
+
+ ret = nouveau_vma_new(nvbo, &cli->vmm, &vma);
+ pm_runtime_mark_last_busy(dev);
+--
+2.25.1
+
--- /dev/null
+From 75582bdafb512c2d3a7c312d817a851313796388 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Apr 2019 10:58:32 +0800
+Subject: drm/omap: fix possible object reference leak
+
+From: Wen Yang <wen.yang99@zte.com.cn>
+
+[ Upstream commit 47340e46f34a3b1d80e40b43ae3d7a8da34a3541 ]
+
+The call to of_find_matching_node returns a node pointer with refcount
+incremented thus it must be explicitly decremented after the last
+usage.
+
+Detected by coccinelle with the following warnings:
+drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:212:2-8: ERROR: missing of_node_put; acquired a node pointer with refcount incremented on line 209, but without a corresponding object release within this function.
+drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c:237:1-7: ERROR: missing of_node_put; acquired a node pointer with refcount incremented on line 209, but without a corresponding object release within this function.
+
+Signed-off-by: Wen Yang <wen.yang99@zte.com.cn>
+Reviewed-by: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Reviewed-by: Mukesh Ojha <mojha@codeaurora.org>
+Cc: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Cc: David Airlie <airlied@linux.ie>
+Cc: Daniel Vetter <daniel@ffwll.ch>
+Cc: Sebastian Reichel <sebastian.reichel@collabora.com>
+Cc: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
+Cc: dri-devel@lists.freedesktop.org
+Cc: linux-kernel@vger.kernel.org
+Cc: Markus Elfring <Markus.Elfring@web.de>
+Signed-off-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/1554692313-28882-2-git-send-email-wen.yang99@zte.com.cn
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+index 3bfb95d230e0e..d8fb686c1fda9 100644
+--- a/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
++++ b/drivers/gpu/drm/omapdrm/dss/omapdss-boot-init.c
+@@ -193,7 +193,7 @@ static int __init omapdss_boot_init(void)
+ dss = of_find_matching_node(NULL, omapdss_of_match);
+
+ if (dss == NULL || !of_device_is_available(dss))
+- return 0;
++ goto put_node;
+
+ omapdss_walk_device(dss, true);
+
+@@ -218,6 +218,8 @@ static int __init omapdss_boot_init(void)
+ kfree(n);
+ }
+
++put_node:
++ of_node_put(dss);
+ return 0;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 632ca693f594810c8194a8b40fcd7e341646f048 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2020 15:35:34 +0200
+Subject: dt-bindings: sound: wm8994: Correct required supplies based on actual
+ implementaion
+
+From: Krzysztof Kozlowski <krzk@kernel.org>
+
+[ Upstream commit 8c149b7d75e53be47648742f40fc90d9fc6fa63a ]
+
+The required supplies in bindings were actually not matching
+implementation making the bindings incorrect and misleading. The Linux
+kernel driver requires all supplies to be present. Also for wlf,wm8994
+uses just DBVDD-supply instead of DBVDDn-supply (n: <1,3>).
+
+Reported-by: Jonathan Bakker <xc-racer2@live.ca>
+Signed-off-by: Krzysztof Kozlowski <krzk@kernel.org>
+Link: https://lore.kernel.org/r/20200501133534.6706-1-krzk@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../devicetree/bindings/sound/wm8994.txt | 18 ++++++++++++------
+ 1 file changed, 12 insertions(+), 6 deletions(-)
+
+diff --git a/Documentation/devicetree/bindings/sound/wm8994.txt b/Documentation/devicetree/bindings/sound/wm8994.txt
+index 68cccc4653ba3..367b58ce1bb92 100644
+--- a/Documentation/devicetree/bindings/sound/wm8994.txt
++++ b/Documentation/devicetree/bindings/sound/wm8994.txt
+@@ -14,9 +14,15 @@ Required properties:
+ - #gpio-cells : Must be 2. The first cell is the pin number and the
+ second cell is used to specify optional parameters (currently unused).
+
+- - AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply, CPVDD-supply,
+- SPKVDD1-supply, SPKVDD2-supply : power supplies for the device, as covered
+- in Documentation/devicetree/bindings/regulator/regulator.txt
++ - power supplies for the device, as covered in
++ Documentation/devicetree/bindings/regulator/regulator.txt, depending
++ on compatible:
++ - for wlf,wm1811 and wlf,wm8958:
++ AVDD1-supply, AVDD2-supply, DBVDD1-supply, DBVDD2-supply, DBVDD3-supply,
++ DCVDD-supply, CPVDD-supply, SPKVDD1-supply, SPKVDD2-supply
++ - for wlf,wm8994:
++ AVDD1-supply, AVDD2-supply, DBVDD-supply, DCVDD-supply, CPVDD-supply,
++ SPKVDD1-supply, SPKVDD2-supply
+
+ Optional properties:
+
+@@ -73,11 +79,11 @@ wm8994: codec@1a {
+
+ lineout1-se;
+
++ AVDD1-supply = <®ulator>;
+ AVDD2-supply = <®ulator>;
+ CPVDD-supply = <®ulator>;
+- DBVDD1-supply = <®ulator>;
+- DBVDD2-supply = <®ulator>;
+- DBVDD3-supply = <®ulator>;
++ DBVDD-supply = <®ulator>;
++ DCVDD-supply = <®ulator>;
+ SPKVDD1-supply = <®ulator>;
+ SPKVDD2-supply = <®ulator>;
+ };
+--
+2.25.1
+
--- /dev/null
+From 2b16d89d9a63b5008cb9df08e10efb5b3e312169 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Apr 2020 09:35:31 -0700
+Subject: e1000: Do not perform reset in reset_task if we are already down
+
+From: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+
+[ Upstream commit 49ee3c2ab5234757bfb56a0b3a3cb422f427e3a3 ]
+
+We are seeing a deadlock in e1000 down when NAPI is being disabled. Looking
+over the kernel function trace of the system it appears that the interface
+is being closed and then a reset is hitting which deadlocks the interface
+as the NAPI interface is already disabled.
+
+To prevent this from happening I am disabling the reset task when
+__E1000_DOWN is already set. In addition code has been added so that we set
+the __E1000_DOWN while holding the __E1000_RESET flag in e1000_close in
+order to guarantee that the reset task will not run after we have started
+the close call.
+
+Signed-off-by: Alexander Duyck <alexander.h.duyck@linux.intel.com>
+Tested-by: Maxim Zhukov <mussitantesmortem@gmail.com>
+Signed-off-by: Jeff Kirsher <jeffrey.t.kirsher@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/intel/e1000/e1000_main.c | 18 ++++++++++++++----
+ 1 file changed, 14 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
+index 47b867c64b147..195108858f38f 100644
+--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
++++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
+@@ -542,8 +542,13 @@ void e1000_reinit_locked(struct e1000_adapter *adapter)
+ WARN_ON(in_interrupt());
+ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+ msleep(1);
+- e1000_down(adapter);
+- e1000_up(adapter);
++
++ /* only run the task if not already down */
++ if (!test_bit(__E1000_DOWN, &adapter->flags)) {
++ e1000_down(adapter);
++ e1000_up(adapter);
++ }
++
+ clear_bit(__E1000_RESETTING, &adapter->flags);
+ }
+
+@@ -1433,10 +1438,15 @@ int e1000_close(struct net_device *netdev)
+ struct e1000_hw *hw = &adapter->hw;
+ int count = E1000_CHECK_RESET_COUNT;
+
+- while (test_bit(__E1000_RESETTING, &adapter->flags) && count--)
++ while (test_and_set_bit(__E1000_RESETTING, &adapter->flags) && count--)
+ usleep_range(10000, 20000);
+
+- WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
++ WARN_ON(count < 0);
++
++ /* signal that we're down so that the reset task will no longer run */
++ set_bit(__E1000_DOWN, &adapter->flags);
++ clear_bit(__E1000_RESETTING, &adapter->flags);
++
+ e1000_down(adapter);
+ e1000_power_down_phy(adapter);
+ e1000_free_irq(adapter);
+--
+2.25.1
+
--- /dev/null
+From a95869e9e510db547dc154e6101ec628129f7ab1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Feb 2020 23:02:46 +0800
+Subject: ext4: fix a data race at inode->i_disksize
+
+From: Qiujun Huang <hqjagain@gmail.com>
+
+[ Upstream commit dce8e237100f60c28cc66effb526ba65a01d8cb3 ]
+
+KCSAN find inode->i_disksize could be accessed concurrently.
+
+BUG: KCSAN: data-race in ext4_mark_iloc_dirty / ext4_write_end
+
+write (marked) to 0xffff8b8932f40090 of 8 bytes by task 66792 on cpu 0:
+ ext4_write_end+0x53f/0x5b0
+ ext4_da_write_end+0x237/0x510
+ generic_perform_write+0x1c4/0x2a0
+ ext4_buffered_write_iter+0x13a/0x210
+ ext4_file_write_iter+0xe2/0x9b0
+ new_sync_write+0x29c/0x3a0
+ __vfs_write+0x92/0xa0
+ vfs_write+0xfc/0x2a0
+ ksys_write+0xe8/0x140
+ __x64_sys_write+0x4c/0x60
+ do_syscall_64+0x8a/0x2a0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+read to 0xffff8b8932f40090 of 8 bytes by task 14414 on cpu 1:
+ ext4_mark_iloc_dirty+0x716/0x1190
+ ext4_mark_inode_dirty+0xc9/0x360
+ ext4_convert_unwritten_extents+0x1bc/0x2a0
+ ext4_convert_unwritten_io_end_vec+0xc5/0x150
+ ext4_put_io_end+0x82/0x130
+ ext4_writepages+0xae7/0x16f0
+ do_writepages+0x64/0x120
+ __writeback_single_inode+0x7d/0x650
+ writeback_sb_inodes+0x3a4/0x860
+ __writeback_inodes_wb+0xc4/0x150
+ wb_writeback+0x43f/0x510
+ wb_workfn+0x3b2/0x8a0
+ process_one_work+0x39b/0x7e0
+ worker_thread+0x88/0x650
+ kthread+0x1d4/0x1f0
+ ret_from_fork+0x35/0x40
+
+The plain read is outside of inode->i_data_sem critical section
+which results in a data race. Fix it by adding READ_ONCE().
+
+Signed-off-by: Qiujun Huang <hqjagain@gmail.com>
+Link: https://lore.kernel.org/r/1582556566-3909-1-git-send-email-hqjagain@gmail.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/inode.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index cd833f4e64ef1..52be4c9650241 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -5315,7 +5315,7 @@ static int ext4_do_update_inode(handle_t *handle,
+ raw_inode->i_file_acl_high =
+ cpu_to_le16(ei->i_file_acl >> 32);
+ raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
+- if (ei->i_disksize != ext4_isize(inode->i_sb, raw_inode)) {
++ if (READ_ONCE(ei->i_disksize) != ext4_isize(inode->i_sb, raw_inode)) {
+ ext4_isize_set(raw_inode, ei->i_disksize);
+ need_datasync = 1;
+ }
+--
+2.25.1
+
--- /dev/null
+From d9bd15972dbb74482fb7697e11aa3c39db682ded Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2020 12:23:17 -0500
+Subject: ext4: make dioread_nolock the default
+
+From: Theodore Ts'o <tytso@mit.edu>
+
+[ Upstream commit 244adf6426ee31a83f397b700d964cff12a247d3 ]
+
+This fixes the direct I/O versus writeback race which can reveal stale
+data, and it improves the tail latency of commits on slow devices.
+
+Link: https://lore.kernel.org/r/20200125022254.1101588-1-tytso@mit.edu
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/super.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index 0c15ff19acbd4..0076ea7427e34 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -1538,6 +1538,7 @@ static const match_table_t tokens = {
+ {Opt_auto_da_alloc, "auto_da_alloc"},
+ {Opt_noauto_da_alloc, "noauto_da_alloc"},
+ {Opt_dioread_nolock, "dioread_nolock"},
++ {Opt_dioread_lock, "nodioread_nolock"},
+ {Opt_dioread_lock, "dioread_lock"},
+ {Opt_discard, "discard"},
+ {Opt_nodiscard, "nodiscard"},
+@@ -3712,6 +3713,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ set_opt(sb, NO_UID32);
+ /* xattr user namespace & acls are now defaulted on */
+ set_opt(sb, XATTR_USER);
++ set_opt(sb, DIOREAD_NOLOCK);
+ #ifdef CONFIG_EXT4_FS_POSIX_ACL
+ set_opt(sb, POSIX_ACL);
+ #endif
+@@ -3849,9 +3851,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ goto failed_mount;
+
+ if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
+- printk_once(KERN_WARNING "EXT4-fs: Warning: mounting "
+- "with data=journal disables delayed "
+- "allocation and O_DIRECT support!\n");
++ printk_once(KERN_WARNING "EXT4-fs: Warning: mounting with data=journal disables delayed allocation, dioread_nolock, and O_DIRECT support!\n");
++ clear_opt(sb, DIOREAD_NOLOCK);
+ if (test_opt2(sb, EXPLICIT_DELALLOC)) {
+ ext4_msg(sb, KERN_ERR, "can't mount with "
+ "both data=journal and delalloc");
+--
+2.25.1
+
--- /dev/null
+From 20f0ba37d0f106b9ad5f4ad086db121328a6e26a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2020 15:01:56 +0000
+Subject: ext4: mark block bitmap corrupted when found instead of BUGON
+
+From: Dmitry Monakhov <dmonakhov@gmail.com>
+
+[ Upstream commit eb5760863fc28feab28b567ddcda7e667e638da0 ]
+
+We already has similar code in ext4_mb_complex_scan_group(), but
+ext4_mb_simple_scan_group() still affected.
+
+Other reports: https://www.spinics.net/lists/linux-ext4/msg60231.html
+
+Reviewed-by: Andreas Dilger <adilger@dilger.ca>
+Signed-off-by: Dmitry Monakhov <dmonakhov@gmail.com>
+Link: https://lore.kernel.org/r/20200310150156.641-1-dmonakhov@gmail.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/mballoc.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
+index 8dd54a8a03610..054cfdd007d69 100644
+--- a/fs/ext4/mballoc.c
++++ b/fs/ext4/mballoc.c
+@@ -1901,8 +1901,15 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
+ BUG_ON(buddy == NULL);
+
+ k = mb_find_next_zero_bit(buddy, max, 0);
+- BUG_ON(k >= max);
+-
++ if (k >= max) {
++ ext4_grp_locked_error(ac->ac_sb, e4b->bd_group, 0, 0,
++ "%d free clusters of order %d. But found 0",
++ grp->bb_counters[i], i);
++ ext4_mark_group_bitmap_corrupted(ac->ac_sb,
++ e4b->bd_group,
++ EXT4_GROUP_INFO_BBITMAP_CORRUPT);
++ break;
++ }
+ ac->ac_found++;
+
+ ac->ac_b_ex.fe_len = 1 << i;
+--
+2.25.1
+
--- /dev/null
+From eedc62d5e4352be2777caf5d63014c6504b1cdb4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Feb 2020 16:35:08 +0000
+Subject: firmware: arm_sdei: Use cpus_read_lock() to avoid races with cpuhp
+
+From: James Morse <james.morse@arm.com>
+
+[ Upstream commit 54f529a6806c9710947a4f2cdc15d6ea54121ccd ]
+
+SDEI has private events that need registering and enabling on each CPU.
+CPUs can come and go while we are trying to do this. SDEI tries to avoid
+these problems by setting the reregister flag before the register call,
+so any CPUs that come online register the event too. Sticking plaster
+like this doesn't work, as if the register call fails, a CPU that
+subsequently comes online will register the event before reregister
+is cleared.
+
+Take cpus_read_lock() around the register and enable calls. We don't
+want surprise CPUs to do the wrong thing if they race with these calls
+failing.
+
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/firmware/arm_sdei.c | 26 ++++++++++++++------------
+ 1 file changed, 14 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/firmware/arm_sdei.c b/drivers/firmware/arm_sdei.c
+index 05b528c7ed8fd..e809f4d9a9e93 100644
+--- a/drivers/firmware/arm_sdei.c
++++ b/drivers/firmware/arm_sdei.c
+@@ -410,14 +410,19 @@ int sdei_event_enable(u32 event_num)
+ return -ENOENT;
+ }
+
+- spin_lock(&sdei_list_lock);
+- event->reenable = true;
+- spin_unlock(&sdei_list_lock);
+
++ cpus_read_lock();
+ if (event->type == SDEI_EVENT_TYPE_SHARED)
+ err = sdei_api_event_enable(event->event_num);
+ else
+ err = sdei_do_cross_call(_local_event_enable, event);
++
++ if (!err) {
++ spin_lock(&sdei_list_lock);
++ event->reenable = true;
++ spin_unlock(&sdei_list_lock);
++ }
++ cpus_read_unlock();
+ mutex_unlock(&sdei_events_lock);
+
+ return err;
+@@ -619,21 +624,18 @@ int sdei_event_register(u32 event_num, sdei_event_callback *cb, void *arg)
+ break;
+ }
+
+- spin_lock(&sdei_list_lock);
+- event->reregister = true;
+- spin_unlock(&sdei_list_lock);
+-
++ cpus_read_lock();
+ err = _sdei_event_register(event);
+ if (err) {
+- spin_lock(&sdei_list_lock);
+- event->reregister = false;
+- event->reenable = false;
+- spin_unlock(&sdei_list_lock);
+-
+ sdei_event_destroy(event);
+ pr_warn("Failed to register event %u: %d\n", event_num,
+ err);
++ } else {
++ spin_lock(&sdei_list_lock);
++ event->reregister = true;
++ spin_unlock(&sdei_list_lock);
+ }
++ cpus_read_unlock();
+ } while (0);
+ mutex_unlock(&sdei_events_lock);
+
+--
+2.25.1
+
--- /dev/null
+From ec8c72aa7d84494d7d4542fe8c0d5903562e83d7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 31 Oct 2019 01:43:31 -0400
+Subject: fix dget_parent() fastpath race
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+[ Upstream commit e84009336711d2bba885fc9cea66348ddfce3758 ]
+
+We are overoptimistic about taking the fast path there; seeing
+the same value in ->d_parent after having grabbed a reference
+to that parent does *not* mean that it has remained our parent
+all along.
+
+That wouldn't be a big deal (in the end it is our parent and
+we have grabbed the reference we are about to return), but...
+the situation with barriers is messed up.
+
+We might have hit the following sequence:
+
+d is a dentry of /tmp/a/b
+CPU1: CPU2:
+parent = d->d_parent (i.e. dentry of /tmp/a)
+ rename /tmp/a/b to /tmp/b
+ rmdir /tmp/a, making its dentry negative
+grab reference to parent,
+end up with cached parent->d_inode (NULL)
+ mkdir /tmp/a, rename /tmp/b to /tmp/a/b
+recheck d->d_parent, which is back to original
+decide that everything's fine and return the reference we'd got.
+
+The trouble is, caller (on CPU1) will observe dget_parent()
+returning an apparently negative dentry. It actually is positive,
+but CPU1 has stale ->d_inode cached.
+
+Use d->d_seq to see if it has been moved instead of rechecking ->d_parent.
+NOTE: we are *NOT* going to retry on any kind of ->d_seq mismatch;
+we just go into the slow path in such case. We don't wait for ->d_seq
+to become even either - again, if we are racing with renames, we
+can bloody well go to slow path anyway.
+
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dcache.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/dcache.c b/fs/dcache.c
+index 6e0022326afe3..20370a0997bf9 100644
+--- a/fs/dcache.c
++++ b/fs/dcache.c
+@@ -864,17 +864,19 @@ struct dentry *dget_parent(struct dentry *dentry)
+ {
+ int gotref;
+ struct dentry *ret;
++ unsigned seq;
+
+ /*
+ * Do optimistic parent lookup without any
+ * locking.
+ */
+ rcu_read_lock();
++ seq = raw_seqcount_begin(&dentry->d_seq);
+ ret = READ_ONCE(dentry->d_parent);
+ gotref = lockref_get_not_zero(&ret->d_lockref);
+ rcu_read_unlock();
+ if (likely(gotref)) {
+- if (likely(ret == READ_ONCE(dentry->d_parent)))
++ if (!read_seqcount_retry(&dentry->d_seq, seq))
+ return ret;
+ dput(ret);
+ }
+--
+2.25.1
+
--- /dev/null
+From ed81d474e6c6782696c099ea881a730212a13346 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 May 2020 14:50:37 +0200
+Subject: fuse: don't check refcount after stealing page
+
+From: Miklos Szeredi <mszeredi@redhat.com>
+
+[ Upstream commit 32f98877c57bee6bc27f443a96f49678a2cd6a50 ]
+
+page_count() is unstable. Unless there has been an RCU grace period
+between when the page was removed from the page cache and now, a
+speculative reference may exist from the page cache.
+
+Reported-by: Matthew Wilcox <willy@infradead.org>
+Signed-off-by: Miklos Szeredi <mszeredi@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/fuse/dev.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
+index 01e6ea11822bf..c51c9a6881e49 100644
+--- a/fs/fuse/dev.c
++++ b/fs/fuse/dev.c
+@@ -831,7 +831,6 @@ static int fuse_check_page(struct page *page)
+ {
+ if (page_mapcount(page) ||
+ page->mapping != NULL ||
+- page_count(page) != 1 ||
+ (page->flags & PAGE_FLAGS_CHECK_AT_PREP &
+ ~(1 << PG_locked |
+ 1 << PG_referenced |
+--
+2.25.1
+
--- /dev/null
+From f36424713063830cbe8d6710d9af71e3bfc39bfa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 19 Nov 2019 11:40:46 -0500
+Subject: gfs2: clean up iopen glock mess in gfs2_create_inode
+
+From: Bob Peterson <rpeterso@redhat.com>
+
+[ Upstream commit 2c47c1be51fbded1f7baa2ceaed90f97932f79be ]
+
+Before this patch, gfs2_create_inode had a use-after-free for the
+iopen glock in some error paths because it did this:
+
+ gfs2_glock_put(io_gl);
+fail_gunlock2:
+ if (io_gl)
+ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
+
+In some cases, the io_gl was used for create and only had one
+reference, so the glock might be freed before the clear_bit().
+This patch tries to straighten it out by only jumping to the
+error paths where iopen is properly set, and moving the
+gfs2_glock_put after the clear_bit.
+
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/inode.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
+index d968b5c5df217..a52b8b0dceeb9 100644
+--- a/fs/gfs2/inode.c
++++ b/fs/gfs2/inode.c
+@@ -715,7 +715,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+
+ error = gfs2_trans_begin(sdp, blocks, 0);
+ if (error)
+- goto fail_gunlock2;
++ goto fail_free_inode;
+
+ if (blocks > 1) {
+ ip->i_eattr = ip->i_no_addr + 1;
+@@ -726,7 +726,7 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+
+ error = gfs2_glock_get(sdp, ip->i_no_addr, &gfs2_iopen_glops, CREATE, &io_gl);
+ if (error)
+- goto fail_gunlock2;
++ goto fail_free_inode;
+
+ BUG_ON(test_and_set_bit(GLF_INODE_CREATING, &io_gl->gl_flags));
+
+@@ -735,7 +735,6 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ goto fail_gunlock2;
+
+ glock_set_object(ip->i_iopen_gh.gh_gl, ip);
+- gfs2_glock_put(io_gl);
+ gfs2_set_iop(inode);
+ insert_inode_hash(inode);
+
+@@ -768,6 +767,8 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+
+ mark_inode_dirty(inode);
+ d_instantiate(dentry, inode);
++ /* After instantiate, errors should result in evict which will destroy
++ * both inode and iopen glocks properly. */
+ if (file) {
+ file->f_mode |= FMODE_CREATED;
+ error = finish_open(file, dentry, gfs2_open_common);
+@@ -775,15 +776,15 @@ static int gfs2_create_inode(struct inode *dir, struct dentry *dentry,
+ gfs2_glock_dq_uninit(ghs);
+ gfs2_glock_dq_uninit(ghs + 1);
+ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++ gfs2_glock_put(io_gl);
+ return error;
+
+ fail_gunlock3:
+ glock_clear_object(io_gl, ip);
+ gfs2_glock_dq_uninit(&ip->i_iopen_gh);
+- gfs2_glock_put(io_gl);
+ fail_gunlock2:
+- if (io_gl)
+- clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++ clear_bit(GLF_INODE_CREATING, &io_gl->gl_flags);
++ gfs2_glock_put(io_gl);
+ fail_free_inode:
+ if (ip->i_gl) {
+ glock_clear_object(ip->i_gl, ip);
+--
+2.25.1
+
--- /dev/null
+From 2f5be5220f158f42ec9a20fc5979bb6a36b3e5a3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 17 Oct 2019 23:29:53 -0500
+Subject: gma/gma500: fix a memory disclosure bug due to uninitialized bytes
+
+From: Kangjie Lu <kjlu@umn.edu>
+
+[ Upstream commit 57a25a5f754ce27da2cfa6f413cfd366f878db76 ]
+
+`best_clock` is an object that may be sent out. Object `clock`
+contains uninitialized bytes that are copied to `best_clock`,
+which leads to memory disclosure and information leak.
+
+Signed-off-by: Kangjie Lu <kjlu@umn.edu>
+Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
+Link: https://patchwork.freedesktop.org/patch/msgid/20191018042953.31099-1-kjlu@umn.edu
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/gma500/cdv_intel_display.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/gpu/drm/gma500/cdv_intel_display.c b/drivers/gpu/drm/gma500/cdv_intel_display.c
+index 17db4b4749d5a..2e8479744ca4a 100644
+--- a/drivers/gpu/drm/gma500/cdv_intel_display.c
++++ b/drivers/gpu/drm/gma500/cdv_intel_display.c
+@@ -415,6 +415,8 @@ static bool cdv_intel_find_dp_pll(const struct gma_limit_t *limit,
+ struct gma_crtc *gma_crtc = to_gma_crtc(crtc);
+ struct gma_clock_t clock;
+
++ memset(&clock, 0, sizeof(clock));
++
+ switch (refclk) {
+ case 27000:
+ if (target < 200000) {
+--
+2.25.1
+
--- /dev/null
+From d3613063df358c3ad89245d2faecc94bbc0cc4f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Sep 2020 12:32:33 +0200
+Subject: i2c: core: Call i2c_acpi_install_space_handler() before
+ i2c_acpi_register_devices()
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit 21653a4181ff292480599dad996a2b759ccf050f ]
+
+Some ACPI i2c-devices _STA method (which is used to detect if the device
+is present) use autodetection code which probes which device is present
+over i2c. This requires the I2C ACPI OpRegion handler to be registered
+before we enumerate i2c-clients under the i2c-adapter.
+
+This fixes the i2c touchpad on the Lenovo ThinkBook 14-IIL and
+ThinkBook 15 IIL not getting an i2c-client instantiated and thus not
+working.
+
+BugLink: https://bugzilla.redhat.com/show_bug.cgi?id=1842039
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Reviewed-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Wolfram Sang <wsa@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/i2c/i2c-core-base.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/i2c/i2c-core-base.c b/drivers/i2c/i2c-core-base.c
+index f225bef1e043c..41dd0a08a625c 100644
+--- a/drivers/i2c/i2c-core-base.c
++++ b/drivers/i2c/i2c-core-base.c
+@@ -1292,8 +1292,8 @@ static int i2c_register_adapter(struct i2c_adapter *adap)
+
+ /* create pre-declared device nodes */
+ of_i2c_register_devices(adap);
+- i2c_acpi_register_devices(adap);
+ i2c_acpi_install_space_handler(adap);
++ i2c_acpi_register_devices(adap);
+
+ if (adap->nr < __i2c_first_dynamic_bus_num)
+ i2c_scan_static_board_info(adap);
+--
+2.25.1
+
--- /dev/null
+From 1c8d1307428c0af73c6bc4aac0a09d78fca321f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2020 10:12:06 +0300
+Subject: ipv6_route_seq_next should increase position index
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+[ Upstream commit 4fc427e0515811250647d44de38d87d7b0e0790f ]
+
+if seq_file .next fuction does not change position index,
+read after some lseek can generate unexpected output.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=206283
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv6/ip6_fib.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
+index 05a206202e23d..b924941b96a31 100644
+--- a/net/ipv6/ip6_fib.c
++++ b/net/ipv6/ip6_fib.c
+@@ -2377,14 +2377,13 @@ static void *ipv6_route_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ struct net *net = seq_file_net(seq);
+ struct ipv6_route_iter *iter = seq->private;
+
++ ++(*pos);
+ if (!v)
+ goto iter_table;
+
+ n = rcu_dereference_bh(((struct fib6_info *)v)->fib6_next);
+- if (n) {
+- ++*pos;
++ if (n)
+ return n;
+- }
+
+ iter_table:
+ ipv6_route_check_sernum(iter);
+@@ -2392,8 +2391,6 @@ iter_table:
+ r = fib6_walk_continue(&iter->w);
+ spin_unlock_bh(&iter->tbl->tb6_lock);
+ if (r > 0) {
+- if (v)
+- ++*pos;
+ return iter->w.leaf;
+ } else if (r < 0) {
+ fib6_walker_unlink(net, &iter->w);
+--
+2.25.1
+
--- /dev/null
+From 1b898a9e54bf6c1da09419066ee1eebcc5d210de Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Dec 2019 16:50:53 -0800
+Subject: kernel/sys.c: avoid copying possible padding bytes in copy_to_user
+
+From: Joe Perches <joe@perches.com>
+
+[ Upstream commit 5e1aada08cd19ea652b2d32a250501d09b02ff2e ]
+
+Initialization is not guaranteed to zero padding bytes so use an
+explicit memset instead to avoid leaking any kernel content in any
+possible padding bytes.
+
+Link: http://lkml.kernel.org/r/dfa331c00881d61c8ee51577a082d8bebd61805c.camel@perches.com
+Signed-off-by: Joe Perches <joe@perches.com>
+Cc: Dan Carpenter <error27@gmail.com>
+Cc: Julia Lawall <julia.lawall@lip6.fr>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Kees Cook <keescook@chromium.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sys.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/sys.c b/kernel/sys.c
+index 096932a450466..baf60a3aa34b7 100644
+--- a/kernel/sys.c
++++ b/kernel/sys.c
+@@ -1275,11 +1275,13 @@ SYSCALL_DEFINE1(uname, struct old_utsname __user *, name)
+
+ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
+ {
+- struct oldold_utsname tmp = {};
++ struct oldold_utsname tmp;
+
+ if (!name)
+ return -EFAULT;
+
++ memset(&tmp, 0, sizeof(tmp));
++
+ down_read(&uts_sem);
+ memcpy(&tmp.sysname, &utsname()->sysname, __OLD_UTS_LEN);
+ memcpy(&tmp.nodename, &utsname()->nodename, __OLD_UTS_LEN);
+--
+2.25.1
+
--- /dev/null
+From 49fc115681e997e2767ab79238350905575d773a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 28 Nov 2019 14:38:48 +0800
+Subject: KVM: arm/arm64: vgic: Fix potential double free dist->spis in
+ __kvm_vgic_destroy()
+
+From: Miaohe Lin <linmiaohe@huawei.com>
+
+[ Upstream commit 0bda9498dd45280e334bfe88b815ebf519602cc3 ]
+
+In kvm_vgic_dist_init() called from kvm_vgic_map_resources(), if
+dist->vgic_model is invalid, dist->spis will be freed without set
+dist->spis = NULL. And in vgicv2 resources clean up path,
+__kvm_vgic_destroy() will be called to free allocated resources.
+And dist->spis will be freed again in clean up chain because we
+forget to set dist->spis = NULL in kvm_vgic_dist_init() failed
+path. So double free would happen.
+
+Signed-off-by: Miaohe Lin <linmiaohe@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Eric Auger <eric.auger@redhat.com>
+Link: https://lore.kernel.org/r/1574923128-19956-1-git-send-email-linmiaohe@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/arm/vgic/vgic-init.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/virt/kvm/arm/vgic/vgic-init.c b/virt/kvm/arm/vgic/vgic-init.c
+index cd75df25fe140..2fc1777da50d2 100644
+--- a/virt/kvm/arm/vgic/vgic-init.c
++++ b/virt/kvm/arm/vgic/vgic-init.c
+@@ -187,6 +187,7 @@ static int kvm_vgic_dist_init(struct kvm *kvm, unsigned int nr_spis)
+ break;
+ default:
+ kfree(dist->spis);
++ dist->spis = NULL;
+ return -EINVAL;
+ }
+ }
+--
+2.25.1
+
--- /dev/null
+From 14a611d9e804005bcd20ea0fd4a49d633293961c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Apr 2020 11:03:48 +0800
+Subject: KVM: arm64: vgic-its: Fix memory leak on the error path of
+ vgic_add_lpi()
+
+From: Zenghui Yu <yuzenghui@huawei.com>
+
+[ Upstream commit 57bdb436ce869a45881d8aa4bc5dac8e072dd2b6 ]
+
+If we're going to fail out the vgic_add_lpi(), let's make sure the
+allocated vgic_irq memory is also freed. Though it seems that both
+cases are unlikely to fail.
+
+Signed-off-by: Zenghui Yu <yuzenghui@huawei.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Link: https://lore.kernel.org/r/20200414030349.625-3-yuzenghui@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/arm/vgic/vgic-its.c | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/virt/kvm/arm/vgic/vgic-its.c b/virt/kvm/arm/vgic/vgic-its.c
+index 9295addea7ecf..f139b1c62ca38 100644
+--- a/virt/kvm/arm/vgic/vgic-its.c
++++ b/virt/kvm/arm/vgic/vgic-its.c
+@@ -107,14 +107,21 @@ out_unlock:
+ * We "cache" the configuration table entries in our struct vgic_irq's.
+ * However we only have those structs for mapped IRQs, so we read in
+ * the respective config data from memory here upon mapping the LPI.
++ *
++ * Should any of these fail, behave as if we couldn't create the LPI
++ * by dropping the refcount and returning the error.
+ */
+ ret = update_lpi_config(kvm, irq, NULL, false);
+- if (ret)
++ if (ret) {
++ vgic_put_irq(kvm, irq);
+ return ERR_PTR(ret);
++ }
+
+ ret = vgic_v3_lpi_sync_pending_status(kvm, irq);
+- if (ret)
++ if (ret) {
++ vgic_put_irq(kvm, irq);
+ return ERR_PTR(ret);
++ }
+
+ return irq;
+ }
+--
+2.25.1
+
--- /dev/null
+From 005c69c664a570327852fb7b3d341313dcad3f31 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 12 Oct 2019 11:37:31 +0800
+Subject: KVM: fix overflow of zero page refcount with ksm running
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zhuang Yanying <ann.zhuangyanying@huawei.com>
+
+[ Upstream commit 7df003c85218b5f5b10a7f6418208f31e813f38f ]
+
+We are testing Virtual Machine with KSM on v5.4-rc2 kernel,
+and found the zero_page refcount overflow.
+The cause of refcount overflow is increased in try_async_pf
+(get_user_page) without being decreased in mmu_set_spte()
+while handling ept violation.
+In kvm_release_pfn_clean(), only unreserved page will call
+put_page. However, zero page is reserved.
+So, as well as creating and destroy vm, the refcount of
+zero page will continue to increase until it overflows.
+
+step1:
+echo 10000 > /sys/kernel/pages_to_scan/pages_to_scan
+echo 1 > /sys/kernel/pages_to_scan/run
+echo 1 > /sys/kernel/pages_to_scan/use_zero_pages
+
+step2:
+just create several normal qemu kvm vms.
+And destroy it after 10s.
+Repeat this action all the time.
+
+After a long period of time, all domains hang because
+of the refcount of zero page overflow.
+
+Qemu print error log as follow:
+ …
+ error: kvm run failed Bad address
+ EAX=00006cdc EBX=00000008 ECX=80202001 EDX=078bfbfd
+ ESI=ffffffff EDI=00000000 EBP=00000008 ESP=00006cc4
+ EIP=000efd75 EFL=00010002 [-------] CPL=0 II=0 A20=1 SMM=0 HLT=0
+ ES =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA]
+ CS =0008 00000000 ffffffff 00c09b00 DPL=0 CS32 [-RA]
+ SS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA]
+ DS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA]
+ FS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA]
+ GS =0010 00000000 ffffffff 00c09300 DPL=0 DS [-WA]
+ LDT=0000 00000000 0000ffff 00008200 DPL=0 LDT
+ TR =0000 00000000 0000ffff 00008b00 DPL=0 TSS32-busy
+ GDT= 000f7070 00000037
+ IDT= 000f70ae 00000000
+ CR0=00000011 CR2=00000000 CR3=00000000 CR4=00000000
+ DR0=0000000000000000 DR1=0000000000000000 DR2=0000000000000000 DR3=0000000000000000
+ DR6=00000000ffff0ff0 DR7=0000000000000400
+ EFER=0000000000000000
+ Code=00 01 00 00 00 e9 e8 00 00 00 c7 05 4c 55 0f 00 01 00 00 00 <8b> 35 00 00 01 00 8b 3d 04 00 01 00 b8 d8 d3 00 00 c1 e0 08 0c ea a3 00 00 01 00 c7 05 04
+ …
+
+Meanwhile, a kernel warning is departed.
+
+ [40914.836375] WARNING: CPU: 3 PID: 82067 at ./include/linux/mm.h:987 try_get_page+0x1f/0x30
+ [40914.836412] CPU: 3 PID: 82067 Comm: CPU 0/KVM Kdump: loaded Tainted: G OE 5.2.0-rc2 #5
+ [40914.836415] RIP: 0010:try_get_page+0x1f/0x30
+ [40914.836417] Code: 40 00 c3 0f 1f 84 00 00 00 00 00 48 8b 47 08 a8 01 75 11 8b 47 34 85 c0 7e 10 f0 ff 47 34 b8 01 00 00 00 c3 48 8d 78 ff eb e9 <0f> 0b 31 c0 c3 66 90 66 2e 0f 1f 84 00 0
+ 0 00 00 00 48 8b 47 08 a8
+ [40914.836418] RSP: 0018:ffffb4144e523988 EFLAGS: 00010286
+ [40914.836419] RAX: 0000000080000000 RBX: 0000000000000326 RCX: 0000000000000000
+ [40914.836420] RDX: 0000000000000000 RSI: 00004ffdeba10000 RDI: ffffdf07093f6440
+ [40914.836421] RBP: ffffdf07093f6440 R08: 800000424fd91225 R09: 0000000000000000
+ [40914.836421] R10: ffff9eb41bfeebb8 R11: 0000000000000000 R12: ffffdf06bbd1e8a8
+ [40914.836422] R13: 0000000000000080 R14: 800000424fd91225 R15: ffffdf07093f6440
+ [40914.836423] FS: 00007fb60ffff700(0000) GS:ffff9eb4802c0000(0000) knlGS:0000000000000000
+ [40914.836425] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ [40914.836426] CR2: 0000000000000000 CR3: 0000002f220e6002 CR4: 00000000003626e0
+ [40914.836427] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+ [40914.836427] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+ [40914.836428] Call Trace:
+ [40914.836433] follow_page_pte+0x302/0x47b
+ [40914.836437] __get_user_pages+0xf1/0x7d0
+ [40914.836441] ? irq_work_queue+0x9/0x70
+ [40914.836443] get_user_pages_unlocked+0x13f/0x1e0
+ [40914.836469] __gfn_to_pfn_memslot+0x10e/0x400 [kvm]
+ [40914.836486] try_async_pf+0x87/0x240 [kvm]
+ [40914.836503] tdp_page_fault+0x139/0x270 [kvm]
+ [40914.836523] kvm_mmu_page_fault+0x76/0x5e0 [kvm]
+ [40914.836588] vcpu_enter_guest+0xb45/0x1570 [kvm]
+ [40914.836632] kvm_arch_vcpu_ioctl_run+0x35d/0x580 [kvm]
+ [40914.836645] kvm_vcpu_ioctl+0x26e/0x5d0 [kvm]
+ [40914.836650] do_vfs_ioctl+0xa9/0x620
+ [40914.836653] ksys_ioctl+0x60/0x90
+ [40914.836654] __x64_sys_ioctl+0x16/0x20
+ [40914.836658] do_syscall_64+0x5b/0x180
+ [40914.836664] entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ [40914.836666] RIP: 0033:0x7fb61cb6bfc7
+
+Signed-off-by: LinFeng <linfeng23@huawei.com>
+Signed-off-by: Zhuang Yanying <ann.zhuangyanying@huawei.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/kvm_main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
+index 6bd01d12df2ec..9312c7e750ed3 100644
+--- a/virt/kvm/kvm_main.c
++++ b/virt/kvm/kvm_main.c
+@@ -169,6 +169,7 @@ bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
+ */
+ if (pfn_valid(pfn))
+ return PageReserved(pfn_to_page(pfn)) &&
++ !is_zero_pfn(pfn) &&
+ !kvm_is_zone_device_pfn(pfn);
+
+ return true;
+--
+2.25.1
+
--- /dev/null
+From 39b749af6bc3a71ca869cdef756ae76b2acd5b88 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 21 Feb 2020 11:29:50 -0500
+Subject: KVM: PPC: Book3S HV: Treat TM-related invalid form instructions on P9
+ like the valid ones
+
+From: Gustavo Romero <gromero@linux.ibm.com>
+
+[ Upstream commit 1dff3064c764b5a51c367b949b341d2e38972bec ]
+
+On P9 DD2.2 due to a CPU defect some TM instructions need to be emulated by
+KVM. This is handled at first by the hardware raising a softpatch interrupt
+when certain TM instructions that need KVM assistance are executed in the
+guest. Althought some TM instructions per Power ISA are invalid forms they
+can raise a softpatch interrupt too. For instance, 'tresume.' instruction
+as defined in the ISA must have bit 31 set (1), but an instruction that
+matches 'tresume.' PO and XO opcode fields but has bit 31 not set (0), like
+0x7cfe9ddc, also raises a softpatch interrupt. Similarly for 'treclaim.'
+and 'trechkpt.' instructions with bit 31 = 0, i.e. 0x7c00075c and
+0x7c0007dc, respectively. Hence, if a code like the following is executed
+in the guest it will raise a softpatch interrupt just like a 'tresume.'
+when the TM facility is enabled ('tabort. 0' in the example is used only
+to enable the TM facility):
+
+int main() { asm("tabort. 0; .long 0x7cfe9ddc;"); }
+
+Currently in such a case KVM throws a complete trace like:
+
+[345523.705984] WARNING: CPU: 24 PID: 64413 at arch/powerpc/kvm/book3s_hv_tm.c:211 kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv]
+[345523.705985] Modules linked in: kvm_hv(E) xt_conntrack ipt_REJECT nf_reject_ipv4 xt_tcpudp ip6table_mangle ip6table_nat
+iptable_mangle iptable_nat nf_nat nf_conntrack nf_defrag_ipv6 nf_defrag_ipv4 ebtable_filter ebtables ip6table_filter
+ip6_tables iptable_filter bridge stp llc sch_fq_codel ipmi_powernv at24 vmx_crypto ipmi_devintf ipmi_msghandler
+ibmpowernv uio_pdrv_genirq kvm opal_prd uio leds_powernv ib_iser rdma_cm iw_cm ib_cm ib_core iscsi_tcp libiscsi_tcp
+libiscsi scsi_transport_iscsi ip_tables x_tables autofs4 btrfs blake2b_generic zstd_compress raid10 raid456
+async_raid6_recov async_memcpy async_pq async_xor async_tx libcrc32c xor raid6_pq raid1 raid0 multipath linear tg3
+crct10dif_vpmsum crc32c_vpmsum ipr [last unloaded: kvm_hv]
+[345523.706030] CPU: 24 PID: 64413 Comm: CPU 0/KVM Tainted: G W E 5.5.0+ #1
+[345523.706031] NIP: c0080000072cb9c0 LR: c0080000072b5e80 CTR: c0080000085c7850
+[345523.706034] REGS: c000000399467680 TRAP: 0700 Tainted: G W E (5.5.0+)
+[345523.706034] MSR: 900000010282b033 <SF,HV,VEC,VSX,EE,FP,ME,IR,DR,RI,LE,TM[E]> CR: 24022428 XER: 00000000
+[345523.706042] CFAR: c0080000072b5e7c IRQMASK: 0
+ GPR00: c0080000072b5e80 c000000399467910 c0080000072db500 c000000375ccc720
+ GPR04: c000000375ccc720 00000003fbec0000 0000a10395dda5a6 0000000000000000
+ GPR08: 000000007cfe9ddc 7cfe9ddc000005dc 7cfe9ddc7c0005dc c0080000072cd530
+ GPR12: c0080000085c7850 c0000003fffeb800 0000000000000001 00007dfb737f0000
+ GPR16: c0002001edcca558 0000000000000000 0000000000000000 0000000000000001
+ GPR20: c000000001b21258 c0002001edcca558 0000000000000018 0000000000000000
+ GPR24: 0000000001000000 ffffffffffffffff 0000000000000001 0000000000001500
+ GPR28: c0002001edcc4278 c00000037dd80000 800000050280f033 c000000375ccc720
+[345523.706062] NIP [c0080000072cb9c0] kvmhv_p9_tm_emulation+0x68/0x620 [kvm_hv]
+[345523.706065] LR [c0080000072b5e80] kvmppc_handle_exit_hv.isra.53+0x3e8/0x798 [kvm_hv]
+[345523.706066] Call Trace:
+[345523.706069] [c000000399467910] [c000000399467940] 0xc000000399467940 (unreliable)
+[345523.706071] [c000000399467950] [c000000399467980] 0xc000000399467980
+[345523.706075] [c0000003994679f0] [c0080000072bd1c4] kvmhv_run_single_vcpu+0xa1c/0xb80 [kvm_hv]
+[345523.706079] [c000000399467ac0] [c0080000072bd8e0] kvmppc_vcpu_run_hv+0x5b8/0xb00 [kvm_hv]
+[345523.706087] [c000000399467b90] [c0080000085c93cc] kvmppc_vcpu_run+0x34/0x48 [kvm]
+[345523.706095] [c000000399467bb0] [c0080000085c582c] kvm_arch_vcpu_ioctl_run+0x244/0x420 [kvm]
+[345523.706101] [c000000399467c40] [c0080000085b7498] kvm_vcpu_ioctl+0x3d0/0x7b0 [kvm]
+[345523.706105] [c000000399467db0] [c0000000004adf9c] ksys_ioctl+0x13c/0x170
+[345523.706107] [c000000399467e00] [c0000000004adff8] sys_ioctl+0x28/0x80
+[345523.706111] [c000000399467e20] [c00000000000b278] system_call+0x5c/0x68
+[345523.706112] Instruction dump:
+[345523.706114] 419e0390 7f8a4840 409d0048 6d497c00 2f89075d 419e021c 6d497c00 2f8907dd
+[345523.706119] 419e01c0 6d497c00 2f8905dd 419e00a4 <0fe00000> 38210040 38600000 ebc1fff0
+
+and then treats the executed instruction as a 'nop'.
+
+However the POWER9 User's Manual, in section "4.6.10 Book II Invalid
+Forms", informs that for TM instructions bit 31 is in fact ignored, thus
+for the TM-related invalid forms ignoring bit 31 and handling them like the
+valid forms is an acceptable way to handle them. POWER8 behaves the same
+way too.
+
+This commit changes the handling of the cases here described by treating
+the TM-related invalid forms that can generate a softpatch interrupt
+just like their valid forms (w/ bit 31 = 1) instead of as a 'nop' and by
+gently reporting any other unrecognized case to the host and treating it as
+illegal instruction instead of throwing a trace and treating it as a 'nop'.
+
+Signed-off-by: Gustavo Romero <gromero@linux.ibm.com>
+Reviewed-by: Segher Boessenkool <segher@kernel.crashing.org>
+Acked-By: Michael Neuling <mikey@neuling.org>
+Reviewed-by: Leonardo Bras <leonardo@linux.ibm.com>
+Signed-off-by: Paul Mackerras <paulus@ozlabs.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/include/asm/kvm_asm.h | 3 +++
+ arch/powerpc/kvm/book3s_hv_tm.c | 28 ++++++++++++++++++++-----
+ arch/powerpc/kvm/book3s_hv_tm_builtin.c | 16 ++++++++++++--
+ 3 files changed, 40 insertions(+), 7 deletions(-)
+
+diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
+index a790d5cf6ea37..684e8ae00d160 100644
+--- a/arch/powerpc/include/asm/kvm_asm.h
++++ b/arch/powerpc/include/asm/kvm_asm.h
+@@ -163,4 +163,7 @@
+
+ #define KVM_INST_FETCH_FAILED -1
+
++/* Extract PO and XOP opcode fields */
++#define PO_XOP_OPCODE_MASK 0xfc0007fe
++
+ #endif /* __POWERPC_KVM_ASM_H__ */
+diff --git a/arch/powerpc/kvm/book3s_hv_tm.c b/arch/powerpc/kvm/book3s_hv_tm.c
+index 31cd0f327c8a2..e7fd60cf97804 100644
+--- a/arch/powerpc/kvm/book3s_hv_tm.c
++++ b/arch/powerpc/kvm/book3s_hv_tm.c
+@@ -6,6 +6,8 @@
+ * published by the Free Software Foundation.
+ */
+
++#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
++
+ #include <linux/kvm_host.h>
+
+ #include <asm/kvm_ppc.h>
+@@ -47,7 +49,18 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ u64 newmsr, bescr;
+ int ra, rs;
+
+- switch (instr & 0xfc0007ff) {
++ /*
++ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
++ * in these instructions, so masking bit 31 out doesn't change these
++ * instructions. For treclaim., tsr., and trechkpt. instructions if bit
++ * 31 = 0 then they are per ISA invalid forms, however P9 UM, in section
++ * 4.6.10 Book II Invalid Forms, informs specifically that ignoring bit
++ * 31 is an acceptable way to handle these invalid forms that have
++ * bit 31 = 0. Moreover, for emulation purposes both forms (w/ and wo/
++ * bit 31 set) can generate a softpatch interrupt. Hence both forms
++ * are handled below for these instructions so they behave the same way.
++ */
++ switch (instr & PO_XOP_OPCODE_MASK) {
+ case PPC_INST_RFID:
+ /* XXX do we need to check for PR=0 here? */
+ newmsr = vcpu->arch.shregs.srr1;
+@@ -108,7 +121,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ vcpu->arch.shregs.msr = newmsr;
+ return RESUME_GUEST;
+
+- case PPC_INST_TSR:
++ /* ignore bit 31, see comment above */
++ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
+ /* check for PR=1 and arch 2.06 bit set in PCR */
+ if ((msr & MSR_PR) && (vcpu->arch.vcore->pcr & PCR_ARCH_206)) {
+ /* generate an illegal instruction interrupt */
+@@ -143,7 +157,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ vcpu->arch.shregs.msr = msr;
+ return RESUME_GUEST;
+
+- case PPC_INST_TRECLAIM:
++ /* ignore bit 31, see comment above */
++ case (PPC_INST_TRECLAIM & PO_XOP_OPCODE_MASK):
+ /* check for TM disabled in the HFSCR or MSR */
+ if (!(vcpu->arch.hfscr & HFSCR_TM)) {
+ /* generate an illegal instruction interrupt */
+@@ -179,7 +194,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ vcpu->arch.shregs.msr &= ~MSR_TS_MASK;
+ return RESUME_GUEST;
+
+- case PPC_INST_TRECHKPT:
++ /* ignore bit 31, see comment above */
++ case (PPC_INST_TRECHKPT & PO_XOP_OPCODE_MASK):
+ /* XXX do we need to check for PR=0 here? */
+ /* check for TM disabled in the HFSCR or MSR */
+ if (!(vcpu->arch.hfscr & HFSCR_TM)) {
+@@ -211,6 +227,8 @@ int kvmhv_p9_tm_emulation(struct kvm_vcpu *vcpu)
+ }
+
+ /* What should we do here? We didn't recognize the instruction */
+- WARN_ON_ONCE(1);
++ kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
++ pr_warn_ratelimited("Unrecognized TM-related instruction %#x for emulation", instr);
++
+ return RESUME_GUEST;
+ }
+diff --git a/arch/powerpc/kvm/book3s_hv_tm_builtin.c b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+index 3cf5863bc06e8..3c7ca2fa19597 100644
+--- a/arch/powerpc/kvm/book3s_hv_tm_builtin.c
++++ b/arch/powerpc/kvm/book3s_hv_tm_builtin.c
+@@ -26,7 +26,18 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
+ u64 newmsr, msr, bescr;
+ int rs;
+
+- switch (instr & 0xfc0007ff) {
++ /*
++ * rfid, rfebb, and mtmsrd encode bit 31 = 0 since it's a reserved bit
++ * in these instructions, so masking bit 31 out doesn't change these
++ * instructions. For the tsr. instruction if bit 31 = 0 then it is per
++ * ISA an invalid form, however P9 UM, in section 4.6.10 Book II Invalid
++ * Forms, informs specifically that ignoring bit 31 is an acceptable way
++ * to handle TM-related invalid forms that have bit 31 = 0. Moreover,
++ * for emulation purposes both forms (w/ and wo/ bit 31 set) can
++ * generate a softpatch interrupt. Hence both forms are handled below
++ * for tsr. to make them behave the same way.
++ */
++ switch (instr & PO_XOP_OPCODE_MASK) {
+ case PPC_INST_RFID:
+ /* XXX do we need to check for PR=0 here? */
+ newmsr = vcpu->arch.shregs.srr1;
+@@ -76,7 +87,8 @@ int kvmhv_p9_tm_emulation_early(struct kvm_vcpu *vcpu)
+ vcpu->arch.shregs.msr = newmsr;
+ return 1;
+
+- case PPC_INST_TSR:
++ /* ignore bit 31, see comment above */
++ case (PPC_INST_TSR & PO_XOP_OPCODE_MASK):
+ /* we know the MSR has the TS field = S (0b01) here */
+ msr = vcpu->arch.shregs.msr;
+ /* check for PR=1 and arch 2.06 bit set in PCR */
+--
+2.25.1
+
--- /dev/null
+From 5c768a209f4ec6bbb23a8a63a57e393e4677bf42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Apr 2020 12:11:52 -0700
+Subject: KVM: Remove CREATE_IRQCHIP/SET_PIT2 race
+
+From: Steve Rutherford <srutherford@google.com>
+
+[ Upstream commit 7289fdb5dcdbc5155b5531529c44105868a762f2 ]
+
+Fixes a NULL pointer dereference, caused by the PIT firing an interrupt
+before the interrupt table has been initialized.
+
+SET_PIT2 can race with the creation of the IRQchip. In particular,
+if SET_PIT2 is called with a low PIT timer period (after the creation of
+the IOAPIC, but before the instantiation of the irq routes), the PIT can
+fire an interrupt at an uninitialized table.
+
+Signed-off-by: Steve Rutherford <srutherford@google.com>
+Signed-off-by: Jon Cargille <jcargill@google.com>
+Reviewed-by: Jim Mattson <jmattson@google.com>
+Message-Id: <20200416191152.259434-1-jcargill@google.com>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/x86.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
+index 430a4bc66f604..620ed1fa35119 100644
+--- a/arch/x86/kvm/x86.c
++++ b/arch/x86/kvm/x86.c
+@@ -4668,10 +4668,13 @@ set_identity_unlock:
+ r = -EFAULT;
+ if (copy_from_user(&u.ps, argp, sizeof u.ps))
+ goto out;
++ mutex_lock(&kvm->lock);
+ r = -ENXIO;
+ if (!kvm->arch.vpit)
+- goto out;
++ goto set_pit_out;
+ r = kvm_vm_ioctl_set_pit(kvm, &u.ps);
++set_pit_out:
++ mutex_unlock(&kvm->lock);
+ break;
+ }
+ case KVM_GET_PIT2: {
+@@ -4691,10 +4694,13 @@ set_identity_unlock:
+ r = -EFAULT;
+ if (copy_from_user(&u.ps2, argp, sizeof(u.ps2)))
+ goto out;
++ mutex_lock(&kvm->lock);
+ r = -ENXIO;
+ if (!kvm->arch.vpit)
+- goto out;
++ goto set_pit2_out;
+ r = kvm_vm_ioctl_set_pit2(kvm, &u.ps2);
++set_pit2_out:
++ mutex_unlock(&kvm->lock);
+ break;
+ }
+ case KVM_REINJECT_CONTROL: {
+--
+2.25.1
+
--- /dev/null
+From a120e5296d7dbd2c49189525f00c2f21835ee715 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Feb 2020 18:24:48 +0100
+Subject: KVM: x86: fix incorrect comparison in trace event
+
+From: Paolo Bonzini <pbonzini@redhat.com>
+
+[ Upstream commit 147f1a1fe5d7e6b01b8df4d0cbd6f9eaf6b6c73b ]
+
+The "u" field in the event has three states, -1/0/1. Using u8 however means that
+comparison with -1 will always fail, so change to signed char.
+
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/kvm/mmutrace.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
+index cb41b036eb264..7e0dc8c7da2c0 100644
+--- a/arch/x86/kvm/mmutrace.h
++++ b/arch/x86/kvm/mmutrace.h
+@@ -339,7 +339,7 @@ TRACE_EVENT(
+ /* These depend on page entry type, so compute them now. */
+ __field(bool, r)
+ __field(bool, x)
+- __field(u8, u)
++ __field(signed char, u)
+ ),
+
+ TP_fast_assign(
+--
+2.25.1
+
--- /dev/null
+From 3885f9e7cf528743be4f432826758bc5a4b58da8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2019 10:24:30 +0300
+Subject: leds: mlxreg: Fix possible buffer overflow
+
+From: Oleh Kravchenko <oleg@kaa.org.ua>
+
+[ Upstream commit 7c6082b903ac28dc3f383fba57c6f9e7e2594178 ]
+
+Error was detected by PVS-Studio:
+V512 A call of the 'sprintf' function will lead to overflow of
+the buffer 'led_data->led_cdev_name'.
+
+Acked-by: Jacek Anaszewski <jacek.anaszewski@gmail.com>
+Acked-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Oleh Kravchenko <oleg@kaa.org.ua>
+Signed-off-by: Pavel Machek <pavel@ucw.cz>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/leds/leds-mlxreg.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/leds/leds-mlxreg.c b/drivers/leds/leds-mlxreg.c
+index 1ee48cb21df95..022e973dc7c31 100644
+--- a/drivers/leds/leds-mlxreg.c
++++ b/drivers/leds/leds-mlxreg.c
+@@ -209,8 +209,8 @@ static int mlxreg_led_config(struct mlxreg_led_priv_data *priv)
+ brightness = LED_OFF;
+ led_data->base_color = MLXREG_LED_GREEN_SOLID;
+ }
+- sprintf(led_data->led_cdev_name, "%s:%s", "mlxreg",
+- data->label);
++ snprintf(led_data->led_cdev_name, sizeof(led_data->led_cdev_name),
++ "mlxreg:%s", data->label);
+ led_cdev->name = led_data->led_cdev_name;
+ led_cdev->brightness = brightness;
+ led_cdev->max_brightness = LED_ON;
+--
+2.25.1
+
--- /dev/null
+From f1dc551931ac1a269b3884234d13518ffcc502d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Sep 2020 12:24:53 +0200
+Subject: lockdep: fix order in trace_hardirqs_off_caller()
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+[ Upstream commit 73ac74c7d489756d2313219a108809921dbfaea1 ]
+
+Switch order so that locking state is consistent even
+if the IRQ tracer calls into lockdep again.
+
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_preemptirq.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/trace_preemptirq.c b/kernel/trace/trace_preemptirq.c
+index 71f553cceb3c1..0e373cb0106bb 100644
+--- a/kernel/trace/trace_preemptirq.c
++++ b/kernel/trace/trace_preemptirq.c
+@@ -59,14 +59,14 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
+
+ __visible void trace_hardirqs_off_caller(unsigned long caller_addr)
+ {
++ lockdep_hardirqs_off(CALLER_ADDR0);
++
+ if (!this_cpu_read(tracing_irq_cpu)) {
+ this_cpu_write(tracing_irq_cpu, 1);
+ tracer_hardirqs_off(CALLER_ADDR0, caller_addr);
+ if (!in_nmi())
+ trace_irq_disable_rcuidle(CALLER_ADDR0, caller_addr);
+ }
+-
+- lockdep_hardirqs_off(CALLER_ADDR0);
+ }
+ EXPORT_SYMBOL(trace_hardirqs_off_caller);
+ #endif /* CONFIG_TRACE_IRQFLAGS */
+--
+2.25.1
+
--- /dev/null
+From 6bc7a2540522c75e39076c1d2a1d97d7b0010b0a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Sep 2019 20:15:44 +0800
+Subject: m68k: q40: Fix info-leak in rtc_ioctl
+
+From: Fuqian Huang <huangfq.daxian@gmail.com>
+
+[ Upstream commit 7cf78b6b12fd5550545e4b73b35dca18bd46b44c ]
+
+When the option is RTC_PLL_GET, pll will be copied to userland
+via copy_to_user. pll is initialized using mach_get_rtc_pll indirect
+call and mach_get_rtc_pll is only assigned with function
+q40_get_rtc_pll in arch/m68k/q40/config.c.
+In function q40_get_rtc_pll, the field pll_ctrl is not initialized.
+This will leak uninitialized stack content to userland.
+Fix this by zeroing the uninitialized field.
+
+Signed-off-by: Fuqian Huang <huangfq.daxian@gmail.com>
+Link: https://lore.kernel.org/r/20190927121544.7650-1-huangfq.daxian@gmail.com
+Signed-off-by: Geert Uytterhoeven <geert@linux-m68k.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/m68k/q40/config.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/m68k/q40/config.c b/arch/m68k/q40/config.c
+index 96810d91da2bd..4a25ce6a1823d 100644
+--- a/arch/m68k/q40/config.c
++++ b/arch/m68k/q40/config.c
+@@ -273,6 +273,7 @@ static int q40_get_rtc_pll(struct rtc_pll_info *pll)
+ {
+ int tmp = Q40_RTC_CTRL;
+
++ pll->pll_ctrl = 0;
+ pll->pll_value = tmp & Q40_RTC_PLL_MASK;
+ if (tmp & Q40_RTC_PLL_SIGN)
+ pll->pll_value = -pll->pll_value;
+--
+2.25.1
+
--- /dev/null
+From bdd827c9d90c6f552cd3c552c835ae0d4ecd6416 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Feb 2020 16:45:27 +0100
+Subject: media: go7007: Fix URB type for interrupt handling
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Takashi Iwai <tiwai@suse.de>
+
+[ Upstream commit a3ea410cac41b19a5490aad7fe6d9a9a772e646e ]
+
+Josef reported that his old-and-good Plextor ConvertX M402U video
+converter spews lots of WARNINGs on the recent kernels, and it turned
+out that the device uses a bulk endpoint for interrupt handling just
+like 2250 board.
+
+For fixing it, generalize the check with the proper verification of
+the endpoint instead of hard-coded board type check.
+
+Fixes: 7e5219d18e93 ("[media] go7007: Fix 2250 urb type")
+Reported-and-tested-by: Josef Möllers <josef.moellers@suse.com>
+BugLink: https://bugzilla.suse.com/show_bug.cgi?id=1162583
+BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=206427
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/usb/go7007/go7007-usb.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/usb/go7007/go7007-usb.c b/drivers/media/usb/go7007/go7007-usb.c
+index 19c6a0354ce00..b84a6f6548610 100644
+--- a/drivers/media/usb/go7007/go7007-usb.c
++++ b/drivers/media/usb/go7007/go7007-usb.c
+@@ -1052,6 +1052,7 @@ static int go7007_usb_probe(struct usb_interface *intf,
+ struct go7007_usb *usb;
+ const struct go7007_usb_board *board;
+ struct usb_device *usbdev = interface_to_usbdev(intf);
++ struct usb_host_endpoint *ep;
+ unsigned num_i2c_devs;
+ char *name;
+ int video_pipe, i, v_urb_len;
+@@ -1148,7 +1149,8 @@ static int go7007_usb_probe(struct usb_interface *intf,
+ if (usb->intr_urb->transfer_buffer == NULL)
+ goto allocfail;
+
+- if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
++ ep = usb->usbdev->ep_in[4];
++ if (usb_endpoint_type(&ep->desc) == USB_ENDPOINT_XFER_BULK)
+ usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
+ usb_rcvbulkpipe(usb->usbdev, 4),
+ usb->intr_urb->transfer_buffer, 2*sizeof(u16),
+--
+2.25.1
+
--- /dev/null
+From 1564eb33e4a8f065cb18fb6c68a541824709aa68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 18 Aug 2019 22:51:30 -0300
+Subject: media: mc-device.c: fix memleak in media_device_register_entity
+
+From: zhengbin <zhengbin13@huawei.com>
+
+[ Upstream commit 713f871b30a66dc4daff4d17b760c9916aaaf2e1 ]
+
+In media_device_register_entity, if media_graph_walk_init fails,
+need to free the previously memory.
+
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: zhengbin <zhengbin13@huawei.com>
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/media-device.c | 65 ++++++++++++++++++------------------
+ 1 file changed, 33 insertions(+), 32 deletions(-)
+
+diff --git a/drivers/media/media-device.c b/drivers/media/media-device.c
+index ed518b1f82e4a..d04ed438a45de 100644
+--- a/drivers/media/media-device.c
++++ b/drivers/media/media-device.c
+@@ -568,6 +568,38 @@ static void media_device_release(struct media_devnode *devnode)
+ dev_dbg(devnode->parent, "Media device released\n");
+ }
+
++static void __media_device_unregister_entity(struct media_entity *entity)
++{
++ struct media_device *mdev = entity->graph_obj.mdev;
++ struct media_link *link, *tmp;
++ struct media_interface *intf;
++ unsigned int i;
++
++ ida_free(&mdev->entity_internal_idx, entity->internal_idx);
++
++ /* Remove all interface links pointing to this entity */
++ list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
++ list_for_each_entry_safe(link, tmp, &intf->links, list) {
++ if (link->entity == entity)
++ __media_remove_intf_link(link);
++ }
++ }
++
++ /* Remove all data links that belong to this entity */
++ __media_entity_remove_links(entity);
++
++ /* Remove all pads that belong to this entity */
++ for (i = 0; i < entity->num_pads; i++)
++ media_gobj_destroy(&entity->pads[i].graph_obj);
++
++ /* Remove the entity */
++ media_gobj_destroy(&entity->graph_obj);
++
++ /* invoke entity_notify callbacks to handle entity removal?? */
++
++ entity->graph_obj.mdev = NULL;
++}
++
+ /**
+ * media_device_register_entity - Register an entity with a media device
+ * @mdev: The media device
+@@ -625,6 +657,7 @@ int __must_check media_device_register_entity(struct media_device *mdev,
+ */
+ ret = media_graph_walk_init(&new, mdev);
+ if (ret) {
++ __media_device_unregister_entity(entity);
+ mutex_unlock(&mdev->graph_mutex);
+ return ret;
+ }
+@@ -637,38 +670,6 @@ int __must_check media_device_register_entity(struct media_device *mdev,
+ }
+ EXPORT_SYMBOL_GPL(media_device_register_entity);
+
+-static void __media_device_unregister_entity(struct media_entity *entity)
+-{
+- struct media_device *mdev = entity->graph_obj.mdev;
+- struct media_link *link, *tmp;
+- struct media_interface *intf;
+- unsigned int i;
+-
+- ida_free(&mdev->entity_internal_idx, entity->internal_idx);
+-
+- /* Remove all interface links pointing to this entity */
+- list_for_each_entry(intf, &mdev->interfaces, graph_obj.list) {
+- list_for_each_entry_safe(link, tmp, &intf->links, list) {
+- if (link->entity == entity)
+- __media_remove_intf_link(link);
+- }
+- }
+-
+- /* Remove all data links that belong to this entity */
+- __media_entity_remove_links(entity);
+-
+- /* Remove all pads that belong to this entity */
+- for (i = 0; i < entity->num_pads; i++)
+- media_gobj_destroy(&entity->pads[i].graph_obj);
+-
+- /* Remove the entity */
+- media_gobj_destroy(&entity->graph_obj);
+-
+- /* invoke entity_notify callbacks to handle entity removal?? */
+-
+- entity->graph_obj.mdev = NULL;
+-}
+-
+ void media_device_unregister_entity(struct media_entity *entity)
+ {
+ struct media_device *mdev = entity->graph_obj.mdev;
+--
+2.25.1
+
--- /dev/null
+From 76860dd2d11e348d5c6eb066b8929c266cc9a3cc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Sep 2019 11:25:42 -0300
+Subject: media: smiapp: Fix error handling at NVM reading
+
+From: Sakari Ailus <sakari.ailus@linux.intel.com>
+
+[ Upstream commit a5b1d5413534607b05fb34470ff62bf395f5c8d0 ]
+
+If NVM reading failed, the device was left powered on. Fix that.
+
+Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+samsung@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/i2c/smiapp/smiapp-core.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
+index 4731e1c72f960..0a434bdce3b3b 100644
+--- a/drivers/media/i2c/smiapp/smiapp-core.c
++++ b/drivers/media/i2c/smiapp/smiapp-core.c
+@@ -2337,11 +2337,12 @@ smiapp_sysfs_nvm_read(struct device *dev, struct device_attribute *attr,
+ if (rval < 0) {
+ if (rval != -EBUSY && rval != -EAGAIN)
+ pm_runtime_set_active(&client->dev);
+- pm_runtime_put(&client->dev);
++ pm_runtime_put_noidle(&client->dev);
+ return -ENODEV;
+ }
+
+ if (smiapp_read_nvm(sensor, sensor->nvm)) {
++ pm_runtime_put(&client->dev);
+ dev_err(&client->dev, "nvm read failed\n");
+ return -ENODEV;
+ }
+--
+2.25.1
+
--- /dev/null
+From 6502fd468c4df956747bbcfee5e25b379d6c43a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Feb 2020 08:35:46 +0100
+Subject: media: staging/imx: Missing assignment in
+ imx_media_capture_device_register()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit ef0ed05dcef8a74178a8b480cce23a377b1de2b8 ]
+
+There was supposed to be a "ret = " assignment here, otherwise the
+error handling on the next line won't work.
+
+Fixes: 64b5a49df486 ("[media] media: imx: Add Capture Device Interface")
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Reviewed-by: Steve Longerbeam <slongerbeam@gmail.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/media/imx/imx-media-capture.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/staging/media/imx/imx-media-capture.c b/drivers/staging/media/imx/imx-media-capture.c
+index 256039ce561e6..81a3370551dbc 100644
+--- a/drivers/staging/media/imx/imx-media-capture.c
++++ b/drivers/staging/media/imx/imx-media-capture.c
+@@ -678,7 +678,7 @@ int imx_media_capture_device_register(struct imx_media_video_dev *vdev)
+ /* setup default format */
+ fmt_src.pad = priv->src_sd_pad;
+ fmt_src.which = V4L2_SUBDEV_FORMAT_ACTIVE;
+- v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
++ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt_src);
+ if (ret) {
+ v4l2_err(sd, "failed to get src_sd format\n");
+ goto unreg;
+--
+2.25.1
+
--- /dev/null
+From ea9566a963e781ab1cf648964f5532d84bf65b63 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2020 15:26:46 +0100
+Subject: media: tda10071: fix unsigned sign extension overflow
+
+From: Colin Ian King <colin.king@canonical.com>
+
+[ Upstream commit a7463e2dc698075132de9905b89f495df888bb79 ]
+
+The shifting of buf[3] by 24 bits to the left will be promoted to
+a 32 bit signed int and then sign-extended to an unsigned long. In
+the unlikely event that the the top bit of buf[3] is set then all
+then all the upper bits end up as also being set because of
+the sign-extension and this affect the ev->post_bit_error sum.
+Fix this by using the temporary u32 variable bit_error to avoid
+the sign-extension promotion. This also removes the need to do the
+computation twice.
+
+Addresses-Coverity: ("Unintended sign extension")
+
+Fixes: 267897a4708f ("[media] tda10071: implement DVBv5 statistics")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/dvb-frontends/tda10071.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/dvb-frontends/tda10071.c b/drivers/media/dvb-frontends/tda10071.c
+index 097c42d3f8c26..df0c7243eafe4 100644
+--- a/drivers/media/dvb-frontends/tda10071.c
++++ b/drivers/media/dvb-frontends/tda10071.c
+@@ -483,10 +483,11 @@ static int tda10071_read_status(struct dvb_frontend *fe, enum fe_status *status)
+ goto error;
+
+ if (dev->delivery_system == SYS_DVBS) {
+- dev->dvbv3_ber = buf[0] << 24 | buf[1] << 16 |
+- buf[2] << 8 | buf[3] << 0;
+- dev->post_bit_error += buf[0] << 24 | buf[1] << 16 |
+- buf[2] << 8 | buf[3] << 0;
++ u32 bit_error = buf[0] << 24 | buf[1] << 16 |
++ buf[2] << 8 | buf[3] << 0;
++
++ dev->dvbv3_ber = bit_error;
++ dev->post_bit_error += bit_error;
+ c->post_bit_error.stat[0].scale = FE_SCALE_COUNTER;
+ c->post_bit_error.stat[0].uvalue = dev->post_bit_error;
+ dev->block_error += buf[4] << 8 | buf[5] << 0;
+--
+2.25.1
+
--- /dev/null
+From 9ce4b283c99376d8b0befb357a394d243b45034e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Nov 2019 15:53:33 +0100
+Subject: media: ti-vpe: cal: Restrict DMA to avoid memory corruption
+
+From: Nikhil Devshatwar <nikhil.nd@ti.com>
+
+[ Upstream commit 6e72eab2e7b7a157d554b8f9faed7676047be7c1 ]
+
+When setting DMA for video capture from CSI channel, if the DMA size
+is not given, it ends up writing as much data as sent by the camera.
+
+This may lead to overwriting the buffers causing memory corruption.
+Observed green lines on the default framebuffer.
+
+Restrict the DMA to maximum height as specified in the S_FMT ioctl.
+
+Signed-off-by: Nikhil Devshatwar <nikhil.nd@ti.com>
+Signed-off-by: Benoit Parrot <bparrot@ti.com>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/ti-vpe/cal.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/media/platform/ti-vpe/cal.c b/drivers/media/platform/ti-vpe/cal.c
+index be3155275a6ba..d945323fc437d 100644
+--- a/drivers/media/platform/ti-vpe/cal.c
++++ b/drivers/media/platform/ti-vpe/cal.c
+@@ -684,12 +684,13 @@ static void pix_proc_config(struct cal_ctx *ctx)
+ }
+
+ static void cal_wr_dma_config(struct cal_ctx *ctx,
+- unsigned int width)
++ unsigned int width, unsigned int height)
+ {
+ u32 val;
+
+ val = reg_read(ctx->dev, CAL_WR_DMA_CTRL(ctx->csi2_port));
+ set_field(&val, ctx->csi2_port, CAL_WR_DMA_CTRL_CPORT_MASK);
++ set_field(&val, height, CAL_WR_DMA_CTRL_YSIZE_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_DTAG_PIX_DAT,
+ CAL_WR_DMA_CTRL_DTAG_MASK);
+ set_field(&val, CAL_WR_DMA_CTRL_MODE_CONST,
+@@ -1315,7 +1316,8 @@ static int cal_start_streaming(struct vb2_queue *vq, unsigned int count)
+ csi2_lane_config(ctx);
+ csi2_ctx_config(ctx);
+ pix_proc_config(ctx);
+- cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline);
++ cal_wr_dma_config(ctx, ctx->v_fmt.fmt.pix.bytesperline,
++ ctx->v_fmt.fmt.pix.height);
+ cal_wr_dma_addr(ctx, addr);
+ csi2_ppi_enable(ctx);
+
+--
+2.25.1
+
--- /dev/null
+From ccb9f54e299667407369d90c2f049a1f2d98de67 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 21 Oct 2019 10:16:34 +0100
+Subject: mfd: mfd-core: Protect against NULL call-back function pointer
+
+From: Lee Jones <lee.jones@linaro.org>
+
+[ Upstream commit b195e101580db390f50b0d587b7f66f241d2bc88 ]
+
+If a child device calls mfd_cell_{en,dis}able() without an appropriate
+call-back being set, we are likely to encounter a panic. Avoid this
+by adding suitable checking.
+
+Signed-off-by: Lee Jones <lee.jones@linaro.org>
+Reviewed-by: Daniel Thompson <daniel.thompson@linaro.org>
+Reviewed-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mfd/mfd-core.c | 10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+diff --git a/drivers/mfd/mfd-core.c b/drivers/mfd/mfd-core.c
+index 182973df1aed4..77c965c6a65f1 100644
+--- a/drivers/mfd/mfd-core.c
++++ b/drivers/mfd/mfd-core.c
+@@ -32,6 +32,11 @@ int mfd_cell_enable(struct platform_device *pdev)
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
++ if (!cell->enable) {
++ dev_dbg(&pdev->dev, "No .enable() call-back registered\n");
++ return 0;
++ }
++
+ /* only call enable hook if the cell wasn't previously enabled */
+ if (atomic_inc_return(cell->usage_count) == 1)
+ err = cell->enable(pdev);
+@@ -49,6 +54,11 @@ int mfd_cell_disable(struct platform_device *pdev)
+ const struct mfd_cell *cell = mfd_get_cell(pdev);
+ int err = 0;
+
++ if (!cell->disable) {
++ dev_dbg(&pdev->dev, "No .disable() call-back registered\n");
++ return 0;
++ }
++
+ /* only disable if no other clients are using it */
+ if (atomic_dec_return(cell->usage_count) == 0)
+ err = cell->disable(pdev);
+--
+2.25.1
+
--- /dev/null
+From c4088ddeae51de29c7e604e6f23a9696e35a9092 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2020 22:28:32 -0800
+Subject: mm: avoid data corruption on CoW fault into PFN-mapped VMA
+
+From: Kirill A. Shutemov <kirill@shutemov.name>
+
+[ Upstream commit c3e5ea6ee574ae5e845a40ac8198de1fb63bb3ab ]
+
+Jeff Moyer has reported that one of xfstests triggers a warning when run
+on DAX-enabled filesystem:
+
+ WARNING: CPU: 76 PID: 51024 at mm/memory.c:2317 wp_page_copy+0xc40/0xd50
+ ...
+ wp_page_copy+0x98c/0xd50 (unreliable)
+ do_wp_page+0xd8/0xad0
+ __handle_mm_fault+0x748/0x1b90
+ handle_mm_fault+0x120/0x1f0
+ __do_page_fault+0x240/0xd70
+ do_page_fault+0x38/0xd0
+ handle_page_fault+0x10/0x30
+
+The warning happens on failed __copy_from_user_inatomic() which tries to
+copy data into a CoW page.
+
+This happens because of race between MADV_DONTNEED and CoW page fault:
+
+ CPU0 CPU1
+ handle_mm_fault()
+ do_wp_page()
+ wp_page_copy()
+ do_wp_page()
+ madvise(MADV_DONTNEED)
+ zap_page_range()
+ zap_pte_range()
+ ptep_get_and_clear_full()
+ <TLB flush>
+ __copy_from_user_inatomic()
+ sees empty PTE and fails
+ WARN_ON_ONCE(1)
+ clear_page()
+
+The solution is to re-try __copy_from_user_inatomic() under PTL after
+checking that PTE is matches the orig_pte.
+
+The second copy attempt can still fail, like due to non-readable PTE, but
+there's nothing reasonable we can do about, except clearing the CoW page.
+
+Reported-by: Jeff Moyer <jmoyer@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Tested-by: Jeff Moyer <jmoyer@redhat.com>
+Cc: <stable@vger.kernel.org>
+Cc: Justin He <Justin.He@arm.com>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Link: http://lkml.kernel.org/r/20200218154151.13349-1-kirill.shutemov@linux.intel.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memory.c | 35 +++++++++++++++++++++++++++--------
+ 1 file changed, 27 insertions(+), 8 deletions(-)
+
+diff --git a/mm/memory.c b/mm/memory.c
+index fcad8a0d943d3..eeae63bd95027 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -2353,7 +2353,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
+ bool ret;
+ void *kaddr;
+ void __user *uaddr;
+- bool force_mkyoung;
++ bool locked = false;
+ struct vm_area_struct *vma = vmf->vma;
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr = vmf->address;
+@@ -2378,11 +2378,11 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
+ * On architectures with software "accessed" bits, we would
+ * take a double page fault, so mark it accessed here.
+ */
+- force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
+- if (force_mkyoung) {
++ if (arch_faults_on_old_pte() && !pte_young(vmf->orig_pte)) {
+ pte_t entry;
+
+ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
++ locked = true;
+ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
+ /*
+ * Other thread has already handled the fault
+@@ -2406,18 +2406,37 @@ static inline bool cow_user_page(struct page *dst, struct page *src,
+ * zeroes.
+ */
+ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
++ if (locked)
++ goto warn;
++
++ /* Re-validate under PTL if the page is still mapped */
++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
++ locked = true;
++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
++ /* The PTE changed under us. Retry page fault. */
++ ret = false;
++ goto pte_unlock;
++ }
++
+ /*
+- * Give a warn in case there can be some obscure
+- * use-case
++ * The same page can be mapped back since last copy attampt.
++ * Try to copy again under PTL.
+ */
+- WARN_ON_ONCE(1);
+- clear_page(kaddr);
++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
++ /*
++ * Give a warn in case there can be some obscure
++ * use-case
++ */
++warn:
++ WARN_ON_ONCE(1);
++ clear_page(kaddr);
++ }
+ }
+
+ ret = true;
+
+ pte_unlock:
+- if (force_mkyoung)
++ if (locked)
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ kunmap_atomic(kaddr);
+ flush_dcache_page(dst);
+--
+2.25.1
+
--- /dev/null
+From 9d9578e1fd57386225e86540a8c5e6a2fc6ad279 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Apr 2020 21:04:47 -0700
+Subject: mm/filemap.c: clear page error before actual read
+
+From: Xianting Tian <xianting_tian@126.com>
+
+[ Upstream commit faffdfa04fa11ccf048cebdde73db41ede0679e0 ]
+
+Mount failure issue happens under the scenario: Application forked dozens
+of threads to mount the same number of cramfs images separately in docker,
+but several mounts failed with high probability. Mount failed due to the
+checking result of the page(read from the superblock of loop dev) is not
+uptodate after wait_on_page_locked(page) returned in function cramfs_read:
+
+ wait_on_page_locked(page);
+ if (!PageUptodate(page)) {
+ ...
+ }
+
+The reason of the checking result of the page not uptodate: systemd-udevd
+read the loopX dev before mount, because the status of loopX is Lo_unbound
+at this time, so loop_make_request directly trigger the calling of io_end
+handler end_buffer_async_read, which called SetPageError(page). So It
+caused the page can't be set to uptodate in function
+end_buffer_async_read:
+
+ if(page_uptodate && !PageError(page)) {
+ SetPageUptodate(page);
+ }
+
+Then mount operation is performed, it used the same page which is just
+accessed by systemd-udevd above, Because this page is not uptodate, it
+will launch a actual read via submit_bh, then wait on this page by calling
+wait_on_page_locked(page). When the I/O of the page done, io_end handler
+end_buffer_async_read is called, because no one cleared the page
+error(during the whole read path of mount), which is caused by
+systemd-udevd reading, so this page is still in "PageError" status, which
+can't be set to uptodate in function end_buffer_async_read, then caused
+mount failure.
+
+But sometimes mount succeed even through systemd-udeved read loopX dev
+just before, The reason is systemd-udevd launched other loopX read just
+between step 3.1 and 3.2, the steps as below:
+
+1, loopX dev default status is Lo_unbound;
+2, systemd-udved read loopX dev (page is set to PageError);
+3, mount operation
+ 1) set loopX status to Lo_bound;
+ ==>systemd-udevd read loopX dev<==
+ 2) read loopX dev(page has no error)
+ 3) mount succeed
+
+As the loopX dev status is set to Lo_bound after step 3.1, so the other
+loopX dev read by systemd-udevd will go through the whole I/O stack, part
+of the call trace as below:
+
+ SYS_read
+ vfs_read
+ do_sync_read
+ blkdev_aio_read
+ generic_file_aio_read
+ do_generic_file_read:
+ ClearPageError(page);
+ mapping->a_ops->readpage(filp, page);
+
+here, mapping->a_ops->readpage() is blkdev_readpage. In latest kernel,
+some function name changed, the call trace as below:
+
+ blkdev_read_iter
+ generic_file_read_iter
+ generic_file_buffered_read:
+ /*
+ * A previous I/O error may have been due to temporary
+ * failures, eg. mutipath errors.
+ * Pg_error will be set again if readpage fails.
+ */
+ ClearPageError(page);
+ /* Start the actual read. The read will unlock the page*/
+ error=mapping->a_ops->readpage(flip, page);
+
+We can see ClearPageError(page) is called before the actual read,
+then the read in step 3.2 succeed.
+
+This patch is to add the calling of ClearPageError just before the actual
+read of read path of cramfs mount. Without the patch, the call trace as
+below when performing cramfs mount:
+
+ do_mount
+ cramfs_read
+ cramfs_blkdev_read
+ read_cache_page
+ do_read_cache_page:
+ filler(data, page);
+ or
+ mapping->a_ops->readpage(data, page);
+
+With the patch, the call trace as below when performing mount:
+
+ do_mount
+ cramfs_read
+ cramfs_blkdev_read
+ read_cache_page:
+ do_read_cache_page:
+ ClearPageError(page); <== new add
+ filler(data, page);
+ or
+ mapping->a_ops->readpage(data, page);
+
+With the patch, mount operation trigger the calling of
+ClearPageError(page) before the actual read, the page has no error if no
+additional page error happen when I/O done.
+
+Signed-off-by: Xianting Tian <xianting_tian@126.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Jan Kara <jack@suse.cz>
+Cc: <yubin@h3c.com>
+Link: http://lkml.kernel.org/r/1583318844-22971-1-git-send-email-xianting_tian@126.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/filemap.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/mm/filemap.c b/mm/filemap.c
+index 45f1c6d73b5b0..f2e777003b901 100644
+--- a/mm/filemap.c
++++ b/mm/filemap.c
+@@ -2889,6 +2889,14 @@ filler:
+ unlock_page(page);
+ goto out;
+ }
++
++ /*
++ * A previous I/O error may have been due to temporary
++ * failures.
++ * Clear page error before actual read, PG_error will be
++ * set again if read page fails.
++ */
++ ClearPageError(page);
+ goto filler;
+
+ out:
+--
+2.25.1
+
--- /dev/null
+From 14aa3a253607459713babd6e02d18b9705976b37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 11 Oct 2019 22:09:39 +0800
+Subject: mm: fix double page fault on arm64 if PTE_AF is cleared
+
+From: Jia He <justin.he@arm.com>
+
+[ Upstream commit 83d116c53058d505ddef051e90ab27f57015b025 ]
+
+When we tested pmdk unit test [1] vmmalloc_fork TEST3 on arm64 guest, there
+will be a double page fault in __copy_from_user_inatomic of cow_user_page.
+
+To reproduce the bug, the cmd is as follows after you deployed everything:
+make -C src/test/vmmalloc_fork/ TEST_TIME=60m check
+
+Below call trace is from arm64 do_page_fault for debugging purpose:
+[ 110.016195] Call trace:
+[ 110.016826] do_page_fault+0x5a4/0x690
+[ 110.017812] do_mem_abort+0x50/0xb0
+[ 110.018726] el1_da+0x20/0xc4
+[ 110.019492] __arch_copy_from_user+0x180/0x280
+[ 110.020646] do_wp_page+0xb0/0x860
+[ 110.021517] __handle_mm_fault+0x994/0x1338
+[ 110.022606] handle_mm_fault+0xe8/0x180
+[ 110.023584] do_page_fault+0x240/0x690
+[ 110.024535] do_mem_abort+0x50/0xb0
+[ 110.025423] el0_da+0x20/0x24
+
+The pte info before __copy_from_user_inatomic is (PTE_AF is cleared):
+[ffff9b007000] pgd=000000023d4f8003, pud=000000023da9b003,
+ pmd=000000023d4b3003, pte=360000298607bd3
+
+As told by Catalin: "On arm64 without hardware Access Flag, copying from
+user will fail because the pte is old and cannot be marked young. So we
+always end up with zeroed page after fork() + CoW for pfn mappings. we
+don't always have a hardware-managed access flag on arm64."
+
+This patch fixes it by calling pte_mkyoung. Also, the parameter is
+changed because vmf should be passed to cow_user_page()
+
+Add a WARN_ON_ONCE when __copy_from_user_inatomic() returns error
+in case there can be some obscure use-case (by Kirill).
+
+[1] https://github.com/pmem/pmdk/tree/master/src/test/vmmalloc_fork
+
+Signed-off-by: Jia He <justin.he@arm.com>
+Reported-by: Yibo Cai <Yibo.Cai@arm.com>
+Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
+Acked-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/memory.c | 104 ++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 89 insertions(+), 15 deletions(-)
+
+diff --git a/mm/memory.c b/mm/memory.c
+index bbf0cc4066c84..fcad8a0d943d3 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -116,6 +116,18 @@ int randomize_va_space __read_mostly =
+ 2;
+ #endif
+
++#ifndef arch_faults_on_old_pte
++static inline bool arch_faults_on_old_pte(void)
++{
++ /*
++ * Those arches which don't have hw access flag feature need to
++ * implement their own helper. By default, "true" means pagefault
++ * will be hit on old pte.
++ */
++ return true;
++}
++#endif
++
+ static int __init disable_randmaps(char *s)
+ {
+ randomize_va_space = 0;
+@@ -2335,32 +2347,82 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd,
+ return same;
+ }
+
+-static inline void cow_user_page(struct page *dst, struct page *src, unsigned long va, struct vm_area_struct *vma)
++static inline bool cow_user_page(struct page *dst, struct page *src,
++ struct vm_fault *vmf)
+ {
++ bool ret;
++ void *kaddr;
++ void __user *uaddr;
++ bool force_mkyoung;
++ struct vm_area_struct *vma = vmf->vma;
++ struct mm_struct *mm = vma->vm_mm;
++ unsigned long addr = vmf->address;
++
+ debug_dma_assert_idle(src);
+
++ if (likely(src)) {
++ copy_user_highpage(dst, src, addr, vma);
++ return true;
++ }
++
+ /*
+ * If the source page was a PFN mapping, we don't have
+ * a "struct page" for it. We do a best-effort copy by
+ * just copying from the original user address. If that
+ * fails, we just zero-fill it. Live with it.
+ */
+- if (unlikely(!src)) {
+- void *kaddr = kmap_atomic(dst);
+- void __user *uaddr = (void __user *)(va & PAGE_MASK);
++ kaddr = kmap_atomic(dst);
++ uaddr = (void __user *)(addr & PAGE_MASK);
++
++ /*
++ * On architectures with software "accessed" bits, we would
++ * take a double page fault, so mark it accessed here.
++ */
++ force_mkyoung = arch_faults_on_old_pte() && !pte_young(vmf->orig_pte);
++ if (force_mkyoung) {
++ pte_t entry;
++
++ vmf->pte = pte_offset_map_lock(mm, vmf->pmd, addr, &vmf->ptl);
++ if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) {
++ /*
++ * Other thread has already handled the fault
++ * and we don't need to do anything. If it's
++ * not the case, the fault will be triggered
++ * again on the same address.
++ */
++ ret = false;
++ goto pte_unlock;
++ }
+
++ entry = pte_mkyoung(vmf->orig_pte);
++ if (ptep_set_access_flags(vma, addr, vmf->pte, entry, 0))
++ update_mmu_cache(vma, addr, vmf->pte);
++ }
++
++ /*
++ * This really shouldn't fail, because the page is there
++ * in the page tables. But it might just be unreadable,
++ * in which case we just give up and fill the result with
++ * zeroes.
++ */
++ if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE)) {
+ /*
+- * This really shouldn't fail, because the page is there
+- * in the page tables. But it might just be unreadable,
+- * in which case we just give up and fill the result with
+- * zeroes.
++ * Give a warn in case there can be some obscure
++ * use-case
+ */
+- if (__copy_from_user_inatomic(kaddr, uaddr, PAGE_SIZE))
+- clear_page(kaddr);
+- kunmap_atomic(kaddr);
+- flush_dcache_page(dst);
+- } else
+- copy_user_highpage(dst, src, va, vma);
++ WARN_ON_ONCE(1);
++ clear_page(kaddr);
++ }
++
++ ret = true;
++
++pte_unlock:
++ if (force_mkyoung)
++ pte_unmap_unlock(vmf->pte, vmf->ptl);
++ kunmap_atomic(kaddr);
++ flush_dcache_page(dst);
++
++ return ret;
+ }
+
+ static gfp_t __get_fault_gfp_mask(struct vm_area_struct *vma)
+@@ -2514,7 +2576,19 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
+ vmf->address);
+ if (!new_page)
+ goto oom;
+- cow_user_page(new_page, old_page, vmf->address, vma);
++
++ if (!cow_user_page(new_page, old_page, vmf)) {
++ /*
++ * COW failed, if the fault was solved by other,
++ * it's fine. If not, userspace would re-fault on
++ * the same address and we will handle the fault
++ * from the second attempt.
++ */
++ put_page(new_page);
++ if (old_page)
++ put_page(old_page);
++ return 0;
++ }
+ }
+
+ if (mem_cgroup_try_charge_delay(new_page, mm, GFP_KERNEL, &memcg, false))
+--
+2.25.1
+
--- /dev/null
+From 429a698b1f7c666696a99004331cb6cfd70e2e07 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Apr 2020 21:04:34 -0700
+Subject: mm/kmemleak.c: use address-of operator on section symbols
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+[ Upstream commit b0d14fc43d39203ae025f20ef4d5d25d9ccf4be1 ]
+
+Clang warns:
+
+ mm/kmemleak.c:1955:28: warning: array comparison always evaluates to a constant [-Wtautological-compare]
+ if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+ ^
+ mm/kmemleak.c:1955:60: warning: array comparison always evaluates to a constant [-Wtautological-compare]
+ if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
+
+These are not true arrays, they are linker defined symbols, which are just
+addresses. Using the address of operator silences the warning and does
+not change the resulting assembly with either clang/ld.lld or gcc/ld
+(tested with diff + objdump -Dr).
+
+Suggested-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Catalin Marinas <catalin.marinas@arm.com>
+Link: https://github.com/ClangBuiltLinux/linux/issues/895
+Link: http://lkml.kernel.org/r/20200220051551.44000-1-natechancellor@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/kmemleak.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/kmemleak.c b/mm/kmemleak.c
+index 5eeabece0c178..f54734abf9466 100644
+--- a/mm/kmemleak.c
++++ b/mm/kmemleak.c
+@@ -2039,7 +2039,7 @@ void __init kmemleak_init(void)
+ create_object((unsigned long)__bss_start, __bss_stop - __bss_start,
+ KMEMLEAK_GREY, GFP_ATOMIC);
+ /* only register .data..ro_after_init if not within .data */
+- if (__start_ro_after_init < _sdata || __end_ro_after_init > _edata)
++ if (&__start_ro_after_init < &_sdata || &__end_ro_after_init > &_edata)
+ create_object((unsigned long)__start_ro_after_init,
+ __end_ro_after_init - __start_ro_after_init,
+ KMEMLEAK_GREY, GFP_ATOMIC);
+--
+2.25.1
+
--- /dev/null
+From 2540d97f0ffbb03cceea721d29f3248b15c405bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2020 14:32:48 -0700
+Subject: mm/mmap.c: initialize align_offset explicitly for vm_unmapped_area
+
+From: Jaewon Kim <jaewon31.kim@samsung.com>
+
+[ Upstream commit 09ef5283fd96ac424ef0e569626f359bf9ab86c9 ]
+
+On passing requirement to vm_unmapped_area, arch_get_unmapped_area and
+arch_get_unmapped_area_topdown did not set align_offset. Internally on
+both unmapped_area and unmapped_area_topdown, if info->align_mask is 0,
+then info->align_offset was meaningless.
+
+But commit df529cabb7a2 ("mm: mmap: add trace point of
+vm_unmapped_area") always prints info->align_offset even though it is
+uninitialized.
+
+Fix this uninitialized value issue by setting it to 0 explicitly.
+
+Before:
+ vm_unmapped_area: addr=0x755b155000 err=0 total_vm=0x15aaf0 flags=0x1 len=0x109000 lo=0x8000 hi=0x75eed48000 mask=0x0 ofs=0x4022
+
+After:
+ vm_unmapped_area: addr=0x74a4ca1000 err=0 total_vm=0x168ab1 flags=0x1 len=0x9000 lo=0x8000 hi=0x753d94b000 mask=0x0 ofs=0x0
+
+Signed-off-by: Jaewon Kim <jaewon31.kim@samsung.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Michel Lespinasse <walken@google.com>
+Cc: Borislav Petkov <bp@suse.de>
+Link: http://lkml.kernel.org/r/20200409094035.19457-1-jaewon31.kim@samsung.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/mmap.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/mm/mmap.c b/mm/mmap.c
+index e84fd3347a518..f875386e7acd4 100644
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2077,6 +2077,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ info.align_mask = 0;
++ info.align_offset = 0;
+ return vm_unmapped_area(&info);
+ }
+ #endif
+@@ -2118,6 +2119,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
+ info.low_limit = max(PAGE_SIZE, mmap_min_addr);
+ info.high_limit = mm->mmap_base;
+ info.align_mask = 0;
++ info.align_offset = 0;
+ addr = vm_unmapped_area(&info);
+
+ /*
+--
+2.25.1
+
--- /dev/null
+From 7c4fbc588e37480328219686b4abdfa585453794 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Feb 2020 17:35:58 -0800
+Subject: mm: pagewalk: fix termination condition in walk_pte_range()
+
+From: Steven Price <steven.price@arm.com>
+
+[ Upstream commit c02a98753e0a36ba65a05818626fa6adeb4e7c97 ]
+
+If walk_pte_range() is called with a 'end' argument that is beyond the
+last page of memory (e.g. ~0UL) then the comparison between 'addr' and
+'end' will always fail and the loop will be infinite. Instead change the
+comparison to >= while accounting for overflow.
+
+Link: http://lkml.kernel.org/r/20191218162402.45610-15-steven.price@arm.com
+Signed-off-by: Steven Price <steven.price@arm.com>
+Cc: Albert Ou <aou@eecs.berkeley.edu>
+Cc: Alexandre Ghiti <alex@ghiti.fr>
+Cc: Andy Lutomirski <luto@kernel.org>
+Cc: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Christian Borntraeger <borntraeger@de.ibm.com>
+Cc: Dave Hansen <dave.hansen@linux.intel.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: James Hogan <jhogan@kernel.org>
+Cc: James Morse <james.morse@arm.com>
+Cc: Jerome Glisse <jglisse@redhat.com>
+Cc: "Liang, Kan" <kan.liang@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Michael Ellerman <mpe@ellerman.id.au>
+Cc: Paul Burton <paul.burton@mips.com>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Paul Walmsley <paul.walmsley@sifive.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Ralf Baechle <ralf@linux-mips.org>
+Cc: Russell King <linux@armlinux.org.uk>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Cc: Vineet Gupta <vgupta@synopsys.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Zong Li <zong.li@sifive.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/pagewalk.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/mm/pagewalk.c b/mm/pagewalk.c
+index c3084ff2569d2..3c0930d94a295 100644
+--- a/mm/pagewalk.c
++++ b/mm/pagewalk.c
+@@ -15,9 +15,9 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
+ err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
+ if (err)
+ break;
+- addr += PAGE_SIZE;
+- if (addr == end)
++ if (addr >= end - PAGE_SIZE)
+ break;
++ addr += PAGE_SIZE;
+ pte++;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 92b87530127f5017c5cc8588009a6ea1b334f903 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 1 Jun 2020 21:48:40 -0700
+Subject: mm/swap_state: fix a data race in swapin_nr_pages
+
+From: Qian Cai <cai@lca.pw>
+
+[ Upstream commit d6c1f098f2a7ba62627c9bc17cda28f534ef9e4a ]
+
+"prev_offset" is a static variable in swapin_nr_pages() that can be
+accessed concurrently with only mmap_sem held in read mode as noticed by
+KCSAN,
+
+ BUG: KCSAN: data-race in swap_cluster_readahead / swap_cluster_readahead
+
+ write to 0xffffffff92763830 of 8 bytes by task 14795 on cpu 17:
+ swap_cluster_readahead+0x2a6/0x5e0
+ swapin_readahead+0x92/0x8dc
+ do_swap_page+0x49b/0xf20
+ __handle_mm_fault+0xcfb/0xd70
+ handle_mm_fault+0xfc/0x2f0
+ do_page_fault+0x263/0x715
+ page_fault+0x34/0x40
+
+ 1 lock held by (dnf)/14795:
+ #0: ffff897bd2e98858 (&mm->mmap_sem#2){++++}-{3:3}, at: do_page_fault+0x143/0x715
+ do_user_addr_fault at arch/x86/mm/fault.c:1405
+ (inlined by) do_page_fault at arch/x86/mm/fault.c:1535
+ irq event stamp: 83493
+ count_memcg_event_mm+0x1a6/0x270
+ count_memcg_event_mm+0x119/0x270
+ __do_softirq+0x365/0x589
+ irq_exit+0xa2/0xc0
+
+ read to 0xffffffff92763830 of 8 bytes by task 1 on cpu 22:
+ swap_cluster_readahead+0xfd/0x5e0
+ swapin_readahead+0x92/0x8dc
+ do_swap_page+0x49b/0xf20
+ __handle_mm_fault+0xcfb/0xd70
+ handle_mm_fault+0xfc/0x2f0
+ do_page_fault+0x263/0x715
+ page_fault+0x34/0x40
+
+ 1 lock held by systemd/1:
+ #0: ffff897c38f14858 (&mm->mmap_sem#2){++++}-{3:3}, at: do_page_fault+0x143/0x715
+ irq event stamp: 43530289
+ count_memcg_event_mm+0x1a6/0x270
+ count_memcg_event_mm+0x119/0x270
+ __do_softirq+0x365/0x589
+ irq_exit+0xa2/0xc0
+
+Signed-off-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Marco Elver <elver@google.com>
+Cc: Hugh Dickins <hughd@google.com>
+Link: http://lkml.kernel.org/r/20200402213748.2237-1-cai@lca.pw
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/swap_state.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/mm/swap_state.c b/mm/swap_state.c
+index 09731f4174c7e..3febffe0fca4a 100644
+--- a/mm/swap_state.c
++++ b/mm/swap_state.c
+@@ -537,10 +537,11 @@ static unsigned long swapin_nr_pages(unsigned long offset)
+ return 1;
+
+ hits = atomic_xchg(&swapin_readahead_hits, 0);
+- pages = __swapin_nr_pages(prev_offset, offset, hits, max_pages,
++ pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
++ max_pages,
+ atomic_read(&last_readahead_pages));
+ if (!hits)
+- prev_offset = offset;
++ WRITE_ONCE(prev_offset, offset);
+ atomic_set(&last_readahead_pages, pages);
+
+ return pages;
+--
+2.25.1
+
--- /dev/null
+From 4b1f57e2a134e8c34c58dbdf544d0fe42f93940a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Jan 2020 22:13:39 -0800
+Subject: mm/swapfile.c: swap_next should increase position index
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+[ Upstream commit 10c8d69f314d557d94d74ec492575ae6a4f1eb1c ]
+
+If seq_file .next fuction does not change position index, read after
+some lseek can generate unexpected output.
+
+In Aug 2018 NeilBrown noticed commit 1f4aace60b0e ("fs/seq_file.c:
+simplify seq_file iteration code and interface") "Some ->next functions
+do not increment *pos when they return NULL... Note that such ->next
+functions are buggy and should be fixed. A simple demonstration is
+
+ dd if=/proc/swaps bs=1000 skip=1
+
+Choose any block size larger than the size of /proc/swaps. This will
+always show the whole last line of /proc/swaps"
+
+Described problem is still actual. If you make lseek into middle of
+last output line following read will output end of last line and whole
+last line once again.
+
+ $ dd if=/proc/swaps bs=1 # usual output
+ Filename Type Size Used Priority
+ /dev/dm-0 partition 4194812 97536 -2
+ 104+0 records in
+ 104+0 records out
+ 104 bytes copied
+
+ $ dd if=/proc/swaps bs=40 skip=1 # last line was generated twice
+ dd: /proc/swaps: cannot skip to specified offset
+ v/dm-0 partition 4194812 97536 -2
+ /dev/dm-0 partition 4194812 97536 -2
+ 3+1 records in
+ 3+1 records out
+ 131 bytes copied
+
+https://bugzilla.kernel.org/show_bug.cgi?id=206283
+
+Link: http://lkml.kernel.org/r/bd8cfd7b-ac95-9b91-f9e7-e8438bd5047d@virtuozzo.com
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Jann Horn <jannh@google.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Kees Cook <keescook@chromium.org>
+Cc: Hugh Dickins <hughd@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/swapfile.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/mm/swapfile.c b/mm/swapfile.c
+index 0047dcaf93697..c3684cfa9534e 100644
+--- a/mm/swapfile.c
++++ b/mm/swapfile.c
+@@ -2738,10 +2738,10 @@ static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
+ else
+ type = si->type + 1;
+
++ ++(*pos);
+ for (; (si = swap_type_to_swap_info(type)); type++) {
+ if (!(si->flags & SWP_USED) || !si->swap_map)
+ continue;
+- ++*pos;
+ return si;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 4614cabdb1e4a80e144cca3183770e0127f3bd5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Apr 2020 21:10:12 -0700
+Subject: mm/vmscan.c: fix data races using kswapd_classzone_idx
+
+From: Qian Cai <cai@lca.pw>
+
+[ Upstream commit 5644e1fbbfe15ad06785502bbfe5751223e5841d ]
+
+pgdat->kswapd_classzone_idx could be accessed concurrently in
+wakeup_kswapd(). Plain writes and reads without any lock protection
+result in data races. Fix them by adding a pair of READ|WRITE_ONCE() as
+well as saving a branch (compilers might well optimize the original code
+in an unintentional way anyway). While at it, also take care of
+pgdat->kswapd_order and non-kswapd threads in allow_direct_reclaim(). The
+data races were reported by KCSAN,
+
+ BUG: KCSAN: data-race in wakeup_kswapd / wakeup_kswapd
+
+ write to 0xffff9f427ffff2dc of 4 bytes by task 7454 on cpu 13:
+ wakeup_kswapd+0xf1/0x400
+ wakeup_kswapd at mm/vmscan.c:3967
+ wake_all_kswapds+0x59/0xc0
+ wake_all_kswapds at mm/page_alloc.c:4241
+ __alloc_pages_slowpath+0xdcc/0x1290
+ __alloc_pages_slowpath at mm/page_alloc.c:4512
+ __alloc_pages_nodemask+0x3bb/0x450
+ alloc_pages_vma+0x8a/0x2c0
+ do_anonymous_page+0x16e/0x6f0
+ __handle_mm_fault+0xcd5/0xd40
+ handle_mm_fault+0xfc/0x2f0
+ do_page_fault+0x263/0x6f9
+ page_fault+0x34/0x40
+
+ 1 lock held by mtest01/7454:
+ #0: ffff9f425afe8808 (&mm->mmap_sem#2){++++}, at:
+ do_page_fault+0x143/0x6f9
+ do_user_addr_fault at arch/x86/mm/fault.c:1405
+ (inlined by) do_page_fault at arch/x86/mm/fault.c:1539
+ irq event stamp: 6944085
+ count_memcg_event_mm+0x1a6/0x270
+ count_memcg_event_mm+0x119/0x270
+ __do_softirq+0x34c/0x57c
+ irq_exit+0xa2/0xc0
+
+ read to 0xffff9f427ffff2dc of 4 bytes by task 7472 on cpu 38:
+ wakeup_kswapd+0xc8/0x400
+ wake_all_kswapds+0x59/0xc0
+ __alloc_pages_slowpath+0xdcc/0x1290
+ __alloc_pages_nodemask+0x3bb/0x450
+ alloc_pages_vma+0x8a/0x2c0
+ do_anonymous_page+0x16e/0x6f0
+ __handle_mm_fault+0xcd5/0xd40
+ handle_mm_fault+0xfc/0x2f0
+ do_page_fault+0x263/0x6f9
+ page_fault+0x34/0x40
+
+ 1 lock held by mtest01/7472:
+ #0: ffff9f425a9ac148 (&mm->mmap_sem#2){++++}, at:
+ do_page_fault+0x143/0x6f9
+ irq event stamp: 6793561
+ count_memcg_event_mm+0x1a6/0x270
+ count_memcg_event_mm+0x119/0x270
+ __do_softirq+0x34c/0x57c
+ irq_exit+0xa2/0xc0
+
+ BUG: KCSAN: data-race in kswapd / wakeup_kswapd
+
+ write to 0xffff90973ffff2dc of 4 bytes by task 820 on cpu 6:
+ kswapd+0x27c/0x8d0
+ kthread+0x1e0/0x200
+ ret_from_fork+0x27/0x50
+
+ read to 0xffff90973ffff2dc of 4 bytes by task 6299 on cpu 0:
+ wakeup_kswapd+0xf3/0x450
+ wake_all_kswapds+0x59/0xc0
+ __alloc_pages_slowpath+0xdcc/0x1290
+ __alloc_pages_nodemask+0x3bb/0x450
+ alloc_pages_vma+0x8a/0x2c0
+ do_anonymous_page+0x170/0x700
+ __handle_mm_fault+0xc9f/0xd00
+ handle_mm_fault+0xfc/0x2f0
+ do_page_fault+0x263/0x6f9
+ page_fault+0x34/0x40
+
+Signed-off-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Marco Elver <elver@google.com>
+Cc: Matthew Wilcox <willy@infradead.org>
+Link: http://lkml.kernel.org/r/1582749472-5171-1-git-send-email-cai@lca.pw
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/vmscan.c | 45 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 26 insertions(+), 19 deletions(-)
+
+diff --git a/mm/vmscan.c b/mm/vmscan.c
+index b93dc8fc6007f..b7d7f6d65bd5b 100644
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -3109,8 +3109,9 @@ static bool allow_direct_reclaim(pg_data_t *pgdat)
+
+ /* kswapd must be awake if processes are being throttled */
+ if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) {
+- pgdat->kswapd_classzone_idx = min(pgdat->kswapd_classzone_idx,
+- (enum zone_type)ZONE_NORMAL);
++ if (READ_ONCE(pgdat->kswapd_classzone_idx) > ZONE_NORMAL)
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, ZONE_NORMAL);
++
+ wake_up_interruptible(&pgdat->kswapd_wait);
+ }
+
+@@ -3626,9 +3627,9 @@ out:
+ static enum zone_type kswapd_classzone_idx(pg_data_t *pgdat,
+ enum zone_type prev_classzone_idx)
+ {
+- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
+- return prev_classzone_idx;
+- return pgdat->kswapd_classzone_idx;
++ enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
++
++ return curr_idx == MAX_NR_ZONES ? prev_classzone_idx : curr_idx;
+ }
+
+ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_order,
+@@ -3672,8 +3673,11 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int alloc_order, int reclaim_o
+ * the previous request that slept prematurely.
+ */
+ if (remaining) {
+- pgdat->kswapd_classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
+- pgdat->kswapd_order = max(pgdat->kswapd_order, reclaim_order);
++ WRITE_ONCE(pgdat->kswapd_classzone_idx,
++ kswapd_classzone_idx(pgdat, classzone_idx));
++
++ if (READ_ONCE(pgdat->kswapd_order) < reclaim_order)
++ WRITE_ONCE(pgdat->kswapd_order, reclaim_order);
+ }
+
+ finish_wait(&pgdat->kswapd_wait, &wait);
+@@ -3755,12 +3759,12 @@ static int kswapd(void *p)
+ tsk->flags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
+ set_freezable();
+
+- pgdat->kswapd_order = 0;
+- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
++ WRITE_ONCE(pgdat->kswapd_order, 0);
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
+ for ( ; ; ) {
+ bool ret;
+
+- alloc_order = reclaim_order = pgdat->kswapd_order;
++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
+ classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
+
+ kswapd_try_sleep:
+@@ -3768,10 +3772,10 @@ kswapd_try_sleep:
+ classzone_idx);
+
+ /* Read the new order and classzone_idx */
+- alloc_order = reclaim_order = pgdat->kswapd_order;
++ alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order);
+ classzone_idx = kswapd_classzone_idx(pgdat, classzone_idx);
+- pgdat->kswapd_order = 0;
+- pgdat->kswapd_classzone_idx = MAX_NR_ZONES;
++ WRITE_ONCE(pgdat->kswapd_order, 0);
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, MAX_NR_ZONES);
+
+ ret = try_to_freeze();
+ if (kthread_should_stop())
+@@ -3816,20 +3820,23 @@ void wakeup_kswapd(struct zone *zone, gfp_t gfp_flags, int order,
+ enum zone_type classzone_idx)
+ {
+ pg_data_t *pgdat;
++ enum zone_type curr_idx;
+
+ if (!managed_zone(zone))
+ return;
+
+ if (!cpuset_zone_allowed(zone, gfp_flags))
+ return;
++
+ pgdat = zone->zone_pgdat;
++ curr_idx = READ_ONCE(pgdat->kswapd_classzone_idx);
++
++ if (curr_idx == MAX_NR_ZONES || curr_idx < classzone_idx)
++ WRITE_ONCE(pgdat->kswapd_classzone_idx, classzone_idx);
++
++ if (READ_ONCE(pgdat->kswapd_order) < order)
++ WRITE_ONCE(pgdat->kswapd_order, order);
+
+- if (pgdat->kswapd_classzone_idx == MAX_NR_ZONES)
+- pgdat->kswapd_classzone_idx = classzone_idx;
+- else
+- pgdat->kswapd_classzone_idx = max(pgdat->kswapd_classzone_idx,
+- classzone_idx);
+- pgdat->kswapd_order = max(pgdat->kswapd_order, order);
+ if (!waitqueue_active(&pgdat->kswapd_wait))
+ return;
+
+--
+2.25.1
+
--- /dev/null
+From 5cd6c05ed0029c8aa5c3ca867971c01298718bd1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 Nov 2019 20:00:45 -0500
+Subject: mmc: core: Fix size overflow for mmc partitions
+
+From: Bradley Bolen <bradleybolen@gmail.com>
+
+[ Upstream commit f3d7c2292d104519195fdb11192daec13229c219 ]
+
+With large eMMC cards, it is possible to create general purpose
+partitions that are bigger than 4GB. The size member of the mmc_part
+struct is only an unsigned int which overflows for gp partitions larger
+than 4GB. Change this to a u64 to handle the overflow.
+
+Signed-off-by: Bradley Bolen <bradleybolen@gmail.com>
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/core/mmc.c | 9 ++++-----
+ include/linux/mmc/card.h | 2 +-
+ 2 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
+index 5ca53e225382d..4b18034537f53 100644
+--- a/drivers/mmc/core/mmc.c
++++ b/drivers/mmc/core/mmc.c
+@@ -300,7 +300,7 @@ static void mmc_manage_enhanced_area(struct mmc_card *card, u8 *ext_csd)
+ }
+ }
+
+-static void mmc_part_add(struct mmc_card *card, unsigned int size,
++static void mmc_part_add(struct mmc_card *card, u64 size,
+ unsigned int part_cfg, char *name, int idx, bool ro,
+ int area_type)
+ {
+@@ -316,7 +316,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ {
+ int idx;
+ u8 hc_erase_grp_sz, hc_wp_grp_sz;
+- unsigned int part_size;
++ u64 part_size;
+
+ /*
+ * General purpose partition feature support --
+@@ -346,8 +346,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ (ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3 + 1]
+ << 8) +
+ ext_csd[EXT_CSD_GP_SIZE_MULT + idx * 3];
+- part_size *= (size_t)(hc_erase_grp_sz *
+- hc_wp_grp_sz);
++ part_size *= (hc_erase_grp_sz * hc_wp_grp_sz);
+ mmc_part_add(card, part_size << 19,
+ EXT_CSD_PART_CONFIG_ACC_GP0 + idx,
+ "gp%d", idx, false,
+@@ -365,7 +364,7 @@ static void mmc_manage_gp_partitions(struct mmc_card *card, u8 *ext_csd)
+ static int mmc_decode_ext_csd(struct mmc_card *card, u8 *ext_csd)
+ {
+ int err = 0, idx;
+- unsigned int part_size;
++ u64 part_size;
+ struct device_node *np;
+ bool broken_hpi = false;
+
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index 8ef330027b134..3f8e84a80b4ad 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -227,7 +227,7 @@ struct mmc_queue_req;
+ * MMC Physical partitions
+ */
+ struct mmc_part {
+- unsigned int size; /* partition size (in bytes) */
++ u64 size; /* partition size (in bytes) */
+ unsigned int part_cfg; /* partition type */
+ char name[MAX_MMC_PART_NAME_LEN];
+ bool force_ro; /* to make boot parts RO by default */
+--
+2.25.1
+
--- /dev/null
+From d55897ca2ba229c731a396355328ee49ed3a20a8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Dec 2019 00:15:26 +0100
+Subject: mt76: clear skb pointers from rx aggregation reorder buffer during
+ cleanup
+
+From: Felix Fietkau <nbd@nbd.name>
+
+[ Upstream commit 9379df2fd9234e3b67a23101c2370c99f6af6d77 ]
+
+During the cleanup of the aggregation session, a rx handler (or release timer)
+on another CPU might still hold a pointer to the reorder buffer and could
+attempt to release some packets.
+Clearing pointers during cleanup avoids a theoretical use-after-free bug here.
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/mediatek/mt76/agg-rx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/mediatek/mt76/agg-rx.c b/drivers/net/wireless/mediatek/mt76/agg-rx.c
+index d44d57e6eb27a..97df6b3a472b1 100644
+--- a/drivers/net/wireless/mediatek/mt76/agg-rx.c
++++ b/drivers/net/wireless/mediatek/mt76/agg-rx.c
+@@ -278,6 +278,7 @@ static void mt76_rx_aggr_shutdown(struct mt76_dev *dev, struct mt76_rx_tid *tid)
+ if (!skb)
+ continue;
+
++ tid->reorder_buf[i] = NULL;
+ tid->nframes--;
+ dev_kfree_skb(skb);
+ }
+--
+2.25.1
+
--- /dev/null
+From 709832305b88ef8255daa6ee8d1c7c3c89fa2cf1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Oct 2019 10:36:37 +0800
+Subject: mtd: cfi_cmdset_0002: don't free cfi->cfiq in error path of
+ cfi_amdstd_setup()
+
+From: Hou Tao <houtao1@huawei.com>
+
+[ Upstream commit 03976af89e3bd9489d542582a325892e6a8cacc0 ]
+
+Else there may be a double-free problem, because cfi->cfiq will
+be freed by mtd_do_chip_probe() if both the two invocations of
+check_cmd_set() return failure.
+
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Reviewed-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/chips/cfi_cmdset_0002.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
+index 1dbc9554a0786..3ab75d3e2ce32 100644
+--- a/drivers/mtd/chips/cfi_cmdset_0002.c
++++ b/drivers/mtd/chips/cfi_cmdset_0002.c
+@@ -727,7 +727,6 @@ static struct mtd_info *cfi_amdstd_setup(struct mtd_info *mtd)
+ kfree(mtd->eraseregions);
+ kfree(mtd);
+ kfree(cfi->cmdset_priv);
+- kfree(cfi->cfiq);
+ return NULL;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From dc6902920ca3cd1086289c632333b26bc2f5d995 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Apr 2020 09:53:47 -0700
+Subject: mtd: parser: cmdline: Support MTD names containing one or more colons
+
+From: Boris Brezillon <boris.brezillon@collabora.com>
+
+[ Upstream commit eb13fa0227417e84aecc3bd9c029d376e33474d3 ]
+
+Looks like some drivers define MTD names with a colon in it, thus
+making mtdpart= parsing impossible. Let's fix the parser to gracefully
+handle that case: the last ':' in a partition definition sequence is
+considered instead of the first one.
+
+Signed-off-by: Boris Brezillon <boris.brezillon@collabora.com>
+Signed-off-by: Ron Minnich <rminnich@google.com>
+Tested-by: Ron Minnich <rminnich@google.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/cmdlinepart.c | 23 ++++++++++++++++++++---
+ 1 file changed, 20 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
+index 3ea44cff9b759..c29205ee82e20 100644
+--- a/drivers/mtd/cmdlinepart.c
++++ b/drivers/mtd/cmdlinepart.c
+@@ -231,12 +231,29 @@ static int mtdpart_setup_real(char *s)
+ struct cmdline_mtd_partition *this_mtd;
+ struct mtd_partition *parts;
+ int mtd_id_len, num_parts;
+- char *p, *mtd_id;
++ char *p, *mtd_id, *semicol;
++
++ /*
++ * Replace the first ';' by a NULL char so strrchr can work
++ * properly.
++ */
++ semicol = strchr(s, ';');
++ if (semicol)
++ *semicol = '\0';
+
+ mtd_id = s;
+
+- /* fetch <mtd-id> */
+- p = strchr(s, ':');
++ /*
++ * fetch <mtd-id>. We use strrchr to ignore all ':' that could
++ * be present in the MTD name, only the last one is interpreted
++ * as an <mtd-id>/<part-definition> separator.
++ */
++ p = strrchr(s, ':');
++
++ /* Restore the ';' now. */
++ if (semicol)
++ *semicol = ';';
++
+ if (!p) {
+ pr_err("no mtd-id\n");
+ return -EINVAL;
+--
+2.25.1
+
--- /dev/null
+From 2eaa8448350a29388ef71ceaffa591bb28e7af7b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 May 2020 18:40:06 +0800
+Subject: mtd: rawnand: omap_elm: Fix runtime PM imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 37f7212148cf1d796135cdf8d0c7fee13067674b ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+when it returns an error code. Thus a pairing decrement is needed on
+the error handling path to keep the counter balanced.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
+Link: https://lore.kernel.org/linux-mtd/20200522104008.28340-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/nand/raw/omap_elm.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/mtd/nand/raw/omap_elm.c b/drivers/mtd/nand/raw/omap_elm.c
+index a3f32f939cc17..6736777a41567 100644
+--- a/drivers/mtd/nand/raw/omap_elm.c
++++ b/drivers/mtd/nand/raw/omap_elm.c
+@@ -421,6 +421,7 @@ static int elm_probe(struct platform_device *pdev)
+ pm_runtime_enable(&pdev->dev);
+ if (pm_runtime_get_sync(&pdev->dev) < 0) {
+ ret = -EINVAL;
++ pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_disable(&pdev->dev);
+ dev_err(&pdev->dev, "can't enable clock\n");
+ return ret;
+--
+2.25.1
+
--- /dev/null
+From 3a82c04253c557def6cb79cb16b609c3dd12bf2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2020 10:11:28 +0300
+Subject: neigh_stat_seq_next() should increase position index
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+[ Upstream commit 1e3f9f073c47bee7c23e77316b07bc12338c5bba ]
+
+if seq_file .next fuction does not change position index,
+read after some lseek can generate unexpected output.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=206283
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/neighbour.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/core/neighbour.c b/net/core/neighbour.c
+index bf738ec68cb53..6e890f51b7d86 100644
+--- a/net/core/neighbour.c
++++ b/net/core/neighbour.c
+@@ -2844,6 +2844,7 @@ static void *neigh_stat_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ *pos = cpu+1;
+ return per_cpu_ptr(tbl->stats, cpu);
+ }
++ (*pos)++;
+ return NULL;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From be08c8429667f918767175803fe83273a1a484b8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2020 11:39:48 +0800
+Subject: net: openvswitch: use div_u64() for 64-by-32 divisions
+
+From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
+
+[ Upstream commit 659d4587fe7233bfdff303744b20d6f41ad04362 ]
+
+Compile the kernel for arm 32 platform, the build warning found.
+To fix that, should use div_u64() for divisions.
+| net/openvswitch/meter.c:396: undefined reference to `__udivdi3'
+
+[add more commit msg, change reported tag, and use div_u64 instead
+of do_div by Tonghao]
+
+Fixes: e57358873bb5d6ca ("net: openvswitch: use u64 for meter bucket")
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
+Tested-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/meter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
+index 6f5131d1074b0..5ea2471ffc03f 100644
+--- a/net/openvswitch/meter.c
++++ b/net/openvswitch/meter.c
+@@ -256,7 +256,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
+ * Start with a full bucket.
+ */
+ band->bucket = (band->burst_size + band->rate) * 1000ULL;
+- band_max_delta_t = band->bucket / band->rate;
++ band_max_delta_t = div_u64(band->bucket, band->rate);
+ if (band_max_delta_t > meter->max_delta_t)
+ meter->max_delta_t = band_max_delta_t;
+ band++;
+--
+2.25.1
+
--- /dev/null
+From 635c1fb526a0b76ab5ed367bb2b103b5b5d9d6e4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 24 Apr 2020 08:08:06 +0800
+Subject: net: openvswitch: use u64 for meter bucket
+
+From: Tonghao Zhang <xiangxia.m.yue@gmail.com>
+
+[ Upstream commit e57358873bb5d6caa882b9684f59140912b37dde ]
+
+When setting the meter rate to 4+Gbps, there is an
+overflow, the meters don't work as expected.
+
+Cc: Pravin B Shelar <pshelar@ovn.org>
+Cc: Andy Zhou <azhou@ovn.org>
+Signed-off-by: Tonghao Zhang <xiangxia.m.yue@gmail.com>
+Acked-by: Pravin B Shelar <pshelar@ovn.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/openvswitch/meter.c | 2 +-
+ net/openvswitch/meter.h | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/openvswitch/meter.c b/net/openvswitch/meter.c
+index c038e021a5916..6f5131d1074b0 100644
+--- a/net/openvswitch/meter.c
++++ b/net/openvswitch/meter.c
+@@ -255,7 +255,7 @@ static struct dp_meter *dp_meter_create(struct nlattr **a)
+ *
+ * Start with a full bucket.
+ */
+- band->bucket = (band->burst_size + band->rate) * 1000;
++ band->bucket = (band->burst_size + band->rate) * 1000ULL;
+ band_max_delta_t = band->bucket / band->rate;
+ if (band_max_delta_t > meter->max_delta_t)
+ meter->max_delta_t = band_max_delta_t;
+diff --git a/net/openvswitch/meter.h b/net/openvswitch/meter.h
+index 964ace2650f89..970557ed5b5b6 100644
+--- a/net/openvswitch/meter.h
++++ b/net/openvswitch/meter.h
+@@ -26,7 +26,7 @@ struct dp_meter_band {
+ u32 type;
+ u32 rate;
+ u32 burst_size;
+- u32 bucket; /* 1/1000 packets, or in bits */
++ u64 bucket; /* 1/1000 packets, or in bits */
+ struct ovs_flow_stats stats;
+ };
+
+--
+2.25.1
+
--- /dev/null
+From 15dcce44d33aef3e4e91c6b62d827fd50021747e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2019 10:04:11 -0800
+Subject: net: silence data-races on sk_backlog.tail
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit 9ed498c6280a2f2b51d02df96df53037272ede49 ]
+
+sk->sk_backlog.tail might be read without holding the socket spinlock,
+we need to add proper READ_ONCE()/WRITE_ONCE() to silence the warnings.
+
+KCSAN reported :
+
+BUG: KCSAN: data-race in tcp_add_backlog / tcp_recvmsg
+
+write to 0xffff8881265109f8 of 8 bytes by interrupt on cpu 1:
+ __sk_add_backlog include/net/sock.h:907 [inline]
+ sk_add_backlog include/net/sock.h:938 [inline]
+ tcp_add_backlog+0x476/0xce0 net/ipv4/tcp_ipv4.c:1759
+ tcp_v4_rcv+0x1a70/0x1bd0 net/ipv4/tcp_ipv4.c:1947
+ ip_protocol_deliver_rcu+0x4d/0x420 net/ipv4/ip_input.c:204
+ ip_local_deliver_finish+0x110/0x140 net/ipv4/ip_input.c:231
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_local_deliver+0x133/0x210 net/ipv4/ip_input.c:252
+ dst_input include/net/dst.h:442 [inline]
+ ip_rcv_finish+0x121/0x160 net/ipv4/ip_input.c:413
+ NF_HOOK include/linux/netfilter.h:305 [inline]
+ NF_HOOK include/linux/netfilter.h:299 [inline]
+ ip_rcv+0x18f/0x1a0 net/ipv4/ip_input.c:523
+ __netif_receive_skb_one_core+0xa7/0xe0 net/core/dev.c:4929
+ __netif_receive_skb+0x37/0xf0 net/core/dev.c:5043
+ netif_receive_skb_internal+0x59/0x190 net/core/dev.c:5133
+ napi_skb_finish net/core/dev.c:5596 [inline]
+ napi_gro_receive+0x28f/0x330 net/core/dev.c:5629
+ receive_buf+0x284/0x30b0 drivers/net/virtio_net.c:1061
+ virtnet_receive drivers/net/virtio_net.c:1323 [inline]
+ virtnet_poll+0x436/0x7d0 drivers/net/virtio_net.c:1428
+ napi_poll net/core/dev.c:6311 [inline]
+ net_rx_action+0x3ae/0xa90 net/core/dev.c:6379
+ __do_softirq+0x115/0x33f kernel/softirq.c:292
+ invoke_softirq kernel/softirq.c:373 [inline]
+ irq_exit+0xbb/0xe0 kernel/softirq.c:413
+ exiting_irq arch/x86/include/asm/apic.h:536 [inline]
+ do_IRQ+0xa6/0x180 arch/x86/kernel/irq.c:263
+ ret_from_intr+0x0/0x19
+ native_safe_halt+0xe/0x10 arch/x86/kernel/paravirt.c:71
+ arch_cpu_idle+0x1f/0x30 arch/x86/kernel/process.c:571
+ default_idle_call+0x1e/0x40 kernel/sched/idle.c:94
+ cpuidle_idle_call kernel/sched/idle.c:154 [inline]
+ do_idle+0x1af/0x280 kernel/sched/idle.c:263
+ cpu_startup_entry+0x1b/0x20 kernel/sched/idle.c:355
+ start_secondary+0x208/0x260 arch/x86/kernel/smpboot.c:264
+ secondary_startup_64+0xa4/0xb0 arch/x86/kernel/head_64.S:241
+
+read to 0xffff8881265109f8 of 8 bytes by task 8057 on cpu 0:
+ tcp_recvmsg+0x46e/0x1b40 net/ipv4/tcp.c:2050
+ inet_recvmsg+0xbb/0x250 net/ipv4/af_inet.c:838
+ sock_recvmsg_nosec net/socket.c:871 [inline]
+ sock_recvmsg net/socket.c:889 [inline]
+ sock_recvmsg+0x92/0xb0 net/socket.c:885
+ sock_read_iter+0x15f/0x1e0 net/socket.c:967
+ call_read_iter include/linux/fs.h:1889 [inline]
+ new_sync_read+0x389/0x4f0 fs/read_write.c:414
+ __vfs_read+0xb1/0xc0 fs/read_write.c:427
+ vfs_read fs/read_write.c:461 [inline]
+ vfs_read+0x143/0x2c0 fs/read_write.c:446
+ ksys_read+0xd5/0x1b0 fs/read_write.c:587
+ __do_sys_read fs/read_write.c:597 [inline]
+ __se_sys_read fs/read_write.c:595 [inline]
+ __x64_sys_read+0x4c/0x60 fs/read_write.c:595
+ do_syscall_64+0xcc/0x370 arch/x86/entry/common.c:290
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+
+Reported by Kernel Concurrency Sanitizer on:
+CPU: 0 PID: 8057 Comm: syz-fuzzer Not tainted 5.4.0-rc6+ #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/crypto/chelsio/chtls/chtls_io.c | 10 +++++-----
+ include/net/sock.h | 4 ++--
+ net/ipv4/tcp.c | 2 +-
+ net/llc/af_llc.c | 2 +-
+ 4 files changed, 9 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
+index 1e0cc96306dd7..2c1f3ddb0cc79 100644
+--- a/drivers/crypto/chelsio/chtls/chtls_io.c
++++ b/drivers/crypto/chelsio/chtls/chtls_io.c
+@@ -1449,7 +1449,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ csk->wr_max_credits))
+ sk->sk_write_space(sk);
+
+- if (copied >= target && !sk->sk_backlog.tail)
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ break;
+
+ if (copied) {
+@@ -1482,7 +1482,7 @@ static int chtls_pt_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ break;
+ }
+ }
+- if (sk->sk_backlog.tail) {
++ if (READ_ONCE(sk->sk_backlog.tail)) {
+ release_sock(sk);
+ lock_sock(sk);
+ chtls_cleanup_rbuf(sk, copied);
+@@ -1627,7 +1627,7 @@ static int peekmsg(struct sock *sk, struct msghdr *msg,
+ break;
+ }
+
+- if (sk->sk_backlog.tail) {
++ if (READ_ONCE(sk->sk_backlog.tail)) {
+ /* Do not sleep, just process backlog. */
+ release_sock(sk);
+ lock_sock(sk);
+@@ -1759,7 +1759,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ csk->wr_max_credits))
+ sk->sk_write_space(sk);
+
+- if (copied >= target && !sk->sk_backlog.tail)
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ break;
+
+ if (copied) {
+@@ -1790,7 +1790,7 @@ int chtls_recvmsg(struct sock *sk, struct msghdr *msg, size_t len,
+ }
+ }
+
+- if (sk->sk_backlog.tail) {
++ if (READ_ONCE(sk->sk_backlog.tail)) {
+ release_sock(sk);
+ lock_sock(sk);
+ chtls_cleanup_rbuf(sk, copied);
+diff --git a/include/net/sock.h b/include/net/sock.h
+index 77f36257cac97..bc752237dff3f 100644
+--- a/include/net/sock.h
++++ b/include/net/sock.h
+@@ -900,11 +900,11 @@ static inline void __sk_add_backlog(struct sock *sk, struct sk_buff *skb)
+ skb_dst_force(skb);
+
+ if (!sk->sk_backlog.tail)
+- sk->sk_backlog.head = skb;
++ WRITE_ONCE(sk->sk_backlog.head, skb);
+ else
+ sk->sk_backlog.tail->next = skb;
+
+- sk->sk_backlog.tail = skb;
++ WRITE_ONCE(sk->sk_backlog.tail, skb);
+ skb->next = NULL;
+ }
+
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 616ff2970f4fc..4ce3397e6fcf7 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2038,7 +2038,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
+
+ /* Well, if we have backlog, try to process it now yet. */
+
+- if (copied >= target && !sk->sk_backlog.tail)
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ break;
+
+ if (copied) {
+diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
+index 6ead3c39f3566..bcba579e292ff 100644
+--- a/net/llc/af_llc.c
++++ b/net/llc/af_llc.c
+@@ -785,7 +785,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
+ }
+ /* Well, if we have backlog, try to process it now yet. */
+
+- if (copied >= target && !sk->sk_backlog.tail)
++ if (copied >= target && !READ_ONCE(sk->sk_backlog.tail))
+ break;
+
+ if (copied) {
+--
+2.25.1
+
--- /dev/null
+From f6efc4ec550077f638864383b368a62c738b89b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Apr 2020 13:04:49 -0400
+Subject: NFS: Fix races nfs_page_group_destroy() vs
+ nfs_destroy_unlinked_subrequests()
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+[ Upstream commit 08ca8b21f760c0ed5034a5c122092eec22ccf8f4 ]
+
+When a subrequest is being detached from the subgroup, we want to
+ensure that it is not holding the group lock, or in the process
+of waiting for the group lock.
+
+Fixes: 5b2b5187fa85 ("NFS: Fix nfs_page_group_destroy() and nfs_lock_and_join_requests() race cases")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/pagelist.c | 67 +++++++++++++++++++++++++++-------------
+ fs/nfs/write.c | 10 ++++--
+ include/linux/nfs_page.h | 2 ++
+ 3 files changed, 55 insertions(+), 24 deletions(-)
+
+diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
+index 5dae7c85d9b6e..2c7d76b4c5e18 100644
+--- a/fs/nfs/pagelist.c
++++ b/fs/nfs/pagelist.c
+@@ -132,47 +132,70 @@ nfs_async_iocounter_wait(struct rpc_task *task, struct nfs_lock_context *l_ctx)
+ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
+
+ /*
+- * nfs_page_group_lock - lock the head of the page group
+- * @req - request in group that is to be locked
++ * nfs_page_set_headlock - set the request PG_HEADLOCK
++ * @req: request that is to be locked
+ *
+- * this lock must be held when traversing or modifying the page
+- * group list
++ * this lock must be held when modifying req->wb_head
+ *
+ * return 0 on success, < 0 on error
+ */
+ int
+-nfs_page_group_lock(struct nfs_page *req)
++nfs_page_set_headlock(struct nfs_page *req)
+ {
+- struct nfs_page *head = req->wb_head;
+-
+- WARN_ON_ONCE(head != head->wb_head);
+-
+- if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags))
++ if (!test_and_set_bit(PG_HEADLOCK, &req->wb_flags))
+ return 0;
+
+- set_bit(PG_CONTENDED1, &head->wb_flags);
++ set_bit(PG_CONTENDED1, &req->wb_flags);
+ smp_mb__after_atomic();
+- return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK,
++ return wait_on_bit_lock(&req->wb_flags, PG_HEADLOCK,
+ TASK_UNINTERRUPTIBLE);
+ }
+
+ /*
+- * nfs_page_group_unlock - unlock the head of the page group
+- * @req - request in group that is to be unlocked
++ * nfs_page_clear_headlock - clear the request PG_HEADLOCK
++ * @req: request that is to be locked
+ */
+ void
+-nfs_page_group_unlock(struct nfs_page *req)
++nfs_page_clear_headlock(struct nfs_page *req)
+ {
+- struct nfs_page *head = req->wb_head;
+-
+- WARN_ON_ONCE(head != head->wb_head);
+-
+ smp_mb__before_atomic();
+- clear_bit(PG_HEADLOCK, &head->wb_flags);
++ clear_bit(PG_HEADLOCK, &req->wb_flags);
+ smp_mb__after_atomic();
+- if (!test_bit(PG_CONTENDED1, &head->wb_flags))
++ if (!test_bit(PG_CONTENDED1, &req->wb_flags))
+ return;
+- wake_up_bit(&head->wb_flags, PG_HEADLOCK);
++ wake_up_bit(&req->wb_flags, PG_HEADLOCK);
++}
++
++/*
++ * nfs_page_group_lock - lock the head of the page group
++ * @req: request in group that is to be locked
++ *
++ * this lock must be held when traversing or modifying the page
++ * group list
++ *
++ * return 0 on success, < 0 on error
++ */
++int
++nfs_page_group_lock(struct nfs_page *req)
++{
++ int ret;
++
++ ret = nfs_page_set_headlock(req);
++ if (ret || req->wb_head == req)
++ return ret;
++ return nfs_page_set_headlock(req->wb_head);
++}
++
++/*
++ * nfs_page_group_unlock - unlock the head of the page group
++ * @req: request in group that is to be unlocked
++ */
++void
++nfs_page_group_unlock(struct nfs_page *req)
++{
++ if (req != req->wb_head)
++ nfs_page_clear_headlock(req->wb_head);
++ nfs_page_clear_headlock(req);
+ }
+
+ /*
+diff --git a/fs/nfs/write.c b/fs/nfs/write.c
+index 63d20308a9bb7..d419d89b91f7c 100644
+--- a/fs/nfs/write.c
++++ b/fs/nfs/write.c
+@@ -416,22 +416,28 @@ nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
+ destroy_list = (subreq->wb_this_page == old_head) ?
+ NULL : subreq->wb_this_page;
+
++ /* Note: lock subreq in order to change subreq->wb_head */
++ nfs_page_set_headlock(subreq);
+ WARN_ON_ONCE(old_head != subreq->wb_head);
+
+ /* make sure old group is not used */
+ subreq->wb_this_page = subreq;
++ subreq->wb_head = subreq;
+
+ clear_bit(PG_REMOVE, &subreq->wb_flags);
+
+ /* Note: races with nfs_page_group_destroy() */
+ if (!kref_read(&subreq->wb_kref)) {
+ /* Check if we raced with nfs_page_group_destroy() */
+- if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags))
++ if (test_and_clear_bit(PG_TEARDOWN, &subreq->wb_flags)) {
++ nfs_page_clear_headlock(subreq);
+ nfs_free_request(subreq);
++ } else
++ nfs_page_clear_headlock(subreq);
+ continue;
+ }
++ nfs_page_clear_headlock(subreq);
+
+- subreq->wb_head = subreq;
+ nfs_release_request(old_head);
+
+ if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags)) {
+diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
+index ad69430fd0eb5..5162fc1533c2f 100644
+--- a/include/linux/nfs_page.h
++++ b/include/linux/nfs_page.h
+@@ -142,6 +142,8 @@ extern void nfs_unlock_and_release_request(struct nfs_page *);
+ extern int nfs_page_group_lock(struct nfs_page *);
+ extern void nfs_page_group_unlock(struct nfs_page *);
+ extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
++extern int nfs_page_set_headlock(struct nfs_page *req);
++extern void nfs_page_clear_headlock(struct nfs_page *req);
+ extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
+
+ /*
+--
+2.25.1
+
--- /dev/null
+From 16343d890c0bd15422c7150bec7aa8e2dbc890a7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 1 Mar 2020 18:21:38 -0500
+Subject: nfsd: Don't add locks to closed or closing open stateids
+
+From: Trond Myklebust <trondmy@gmail.com>
+
+[ Upstream commit a451b12311aa8c96c6f6e01c783a86995dc3ec6b ]
+
+In NFSv4, the lock stateids are tied to the lockowner, and the open stateid,
+so that the action of closing the file also results in either an automatic
+loss of the locks, or an error of the form NFS4ERR_LOCKS_HELD.
+
+In practice this means we must not add new locks to the open stateid
+after the close process has been invoked. In fact doing so, can result
+in the following panic:
+
+ kernel BUG at lib/list_debug.c:51!
+ invalid opcode: 0000 [#1] SMP NOPTI
+ CPU: 2 PID: 1085 Comm: nfsd Not tainted 5.6.0-rc3+ #2
+ Hardware name: VMware, Inc. VMware7,1/440BX Desktop Reference Platform, BIOS VMW71.00V.14410784.B64.1908150010 08/15/2019
+ RIP: 0010:__list_del_entry_valid.cold+0x31/0x55
+ Code: 1a 3d 9b e8 74 10 c2 ff 0f 0b 48 c7 c7 f0 1a 3d 9b e8 66 10 c2 ff 0f 0b 48 89 f2 48 89 fe 48 c7 c7 b0 1a 3d 9b e8 52 10 c2 ff <0f> 0b 48 89 fe 4c 89 c2 48 c7 c7 78 1a 3d 9b e8 3e 10 c2 ff 0f 0b
+ RSP: 0018:ffffb296c1d47d90 EFLAGS: 00010246
+ RAX: 0000000000000054 RBX: ffff8ba032456ec8 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: ffff8ba039e99cc8 RDI: ffff8ba039e99cc8
+ RBP: ffff8ba032456e60 R08: 0000000000000781 R09: 0000000000000003
+ R10: 0000000000000000 R11: 0000000000000001 R12: ffff8ba009a4abe0
+ R13: ffff8ba032456e8c R14: 0000000000000000 R15: ffff8ba00adb01d8
+ FS: 0000000000000000(0000) GS:ffff8ba039e80000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 00007fb213f0b008 CR3: 00000001347de006 CR4: 00000000003606e0
+ Call Trace:
+ release_lock_stateid+0x2b/0x80 [nfsd]
+ nfsd4_free_stateid+0x1e9/0x210 [nfsd]
+ nfsd4_proc_compound+0x414/0x700 [nfsd]
+ ? nfs4svc_decode_compoundargs+0x407/0x4c0 [nfsd]
+ nfsd_dispatch+0xc1/0x200 [nfsd]
+ svc_process_common+0x476/0x6f0 [sunrpc]
+ ? svc_sock_secure_port+0x12/0x30 [sunrpc]
+ ? svc_recv+0x313/0x9c0 [sunrpc]
+ ? nfsd_svc+0x2d0/0x2d0 [nfsd]
+ svc_process+0xd4/0x110 [sunrpc]
+ nfsd+0xe3/0x140 [nfsd]
+ kthread+0xf9/0x130
+ ? nfsd_destroy+0x50/0x50 [nfsd]
+ ? kthread_park+0x90/0x90
+ ret_from_fork+0x1f/0x40
+
+The fix is to ensure that lock creation tests for whether or not the
+open stateid is unhashed, and to fail if that is the case.
+
+Fixes: 659aefb68eca ("nfsd: Ensure we don't recognise lock stateids after freeing them")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfsd/nfs4state.c | 73 ++++++++++++++++++++++++++-------------------
+ 1 file changed, 43 insertions(+), 30 deletions(-)
+
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index c24306af9758f..655079ae1dd1f 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -471,6 +471,8 @@ find_any_file(struct nfs4_file *f)
+ {
+ struct file *ret;
+
++ if (!f)
++ return NULL;
+ spin_lock(&f->fi_lock);
+ ret = __nfs4_get_fd(f, O_RDWR);
+ if (!ret) {
+@@ -1207,6 +1209,12 @@ static void nfs4_put_stateowner(struct nfs4_stateowner *sop)
+ nfs4_free_stateowner(sop);
+ }
+
++static bool
++nfs4_ol_stateid_unhashed(const struct nfs4_ol_stateid *stp)
++{
++ return list_empty(&stp->st_perfile);
++}
++
+ static bool unhash_ol_stateid(struct nfs4_ol_stateid *stp)
+ {
+ struct nfs4_file *fp = stp->st_stid.sc_file;
+@@ -1274,9 +1282,11 @@ static bool unhash_lock_stateid(struct nfs4_ol_stateid *stp)
+ {
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+
++ if (!unhash_ol_stateid(stp))
++ return false;
+ list_del_init(&stp->st_locks);
+ nfs4_unhash_stid(&stp->st_stid);
+- return unhash_ol_stateid(stp);
++ return true;
+ }
+
+ static void release_lock_stateid(struct nfs4_ol_stateid *stp)
+@@ -1341,13 +1351,12 @@ static void release_open_stateid_locks(struct nfs4_ol_stateid *open_stp,
+ static bool unhash_open_stateid(struct nfs4_ol_stateid *stp,
+ struct list_head *reaplist)
+ {
+- bool unhashed;
+-
+ lockdep_assert_held(&stp->st_stid.sc_client->cl_lock);
+
+- unhashed = unhash_ol_stateid(stp);
++ if (!unhash_ol_stateid(stp))
++ return false;
+ release_open_stateid_locks(stp, reaplist);
+- return unhashed;
++ return true;
+ }
+
+ static void release_open_stateid(struct nfs4_ol_stateid *stp)
+@@ -5774,21 +5783,21 @@ alloc_init_lock_stateowner(unsigned int strhashval, struct nfs4_client *clp,
+ }
+
+ static struct nfs4_ol_stateid *
+-find_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fp)
++find_lock_stateid(const struct nfs4_lockowner *lo,
++ const struct nfs4_ol_stateid *ost)
+ {
+ struct nfs4_ol_stateid *lst;
+- struct nfs4_client *clp = lo->lo_owner.so_client;
+
+- lockdep_assert_held(&clp->cl_lock);
++ lockdep_assert_held(&ost->st_stid.sc_client->cl_lock);
+
+- list_for_each_entry(lst, &lo->lo_owner.so_stateids, st_perstateowner) {
+- if (lst->st_stid.sc_type != NFS4_LOCK_STID)
+- continue;
+- if (lst->st_stid.sc_file == fp) {
+- refcount_inc(&lst->st_stid.sc_count);
+- return lst;
++ /* If ost is not hashed, ost->st_locks will not be valid */
++ if (!nfs4_ol_stateid_unhashed(ost))
++ list_for_each_entry(lst, &ost->st_locks, st_locks) {
++ if (lst->st_stateowner == &lo->lo_owner) {
++ refcount_inc(&lst->st_stid.sc_count);
++ return lst;
++ }
+ }
+- }
+ return NULL;
+ }
+
+@@ -5804,11 +5813,11 @@ init_lock_stateid(struct nfs4_ol_stateid *stp, struct nfs4_lockowner *lo,
+ mutex_lock_nested(&stp->st_mutex, OPEN_STATEID_MUTEX);
+ retry:
+ spin_lock(&clp->cl_lock);
+- spin_lock(&fp->fi_lock);
+- retstp = find_lock_stateid(lo, fp);
++ if (nfs4_ol_stateid_unhashed(open_stp))
++ goto out_close;
++ retstp = find_lock_stateid(lo, open_stp);
+ if (retstp)
+- goto out_unlock;
+-
++ goto out_found;
+ refcount_inc(&stp->st_stid.sc_count);
+ stp->st_stid.sc_type = NFS4_LOCK_STID;
+ stp->st_stateowner = nfs4_get_stateowner(&lo->lo_owner);
+@@ -5817,22 +5826,26 @@ retry:
+ stp->st_access_bmap = 0;
+ stp->st_deny_bmap = open_stp->st_deny_bmap;
+ stp->st_openstp = open_stp;
++ spin_lock(&fp->fi_lock);
+ list_add(&stp->st_locks, &open_stp->st_locks);
+ list_add(&stp->st_perstateowner, &lo->lo_owner.so_stateids);
+ list_add(&stp->st_perfile, &fp->fi_stateids);
+-out_unlock:
+ spin_unlock(&fp->fi_lock);
+ spin_unlock(&clp->cl_lock);
+- if (retstp) {
+- if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
+- nfs4_put_stid(&retstp->st_stid);
+- goto retry;
+- }
+- /* To keep mutex tracking happy */
+- mutex_unlock(&stp->st_mutex);
+- stp = retstp;
+- }
+ return stp;
++out_found:
++ spin_unlock(&clp->cl_lock);
++ if (nfsd4_lock_ol_stateid(retstp) != nfs_ok) {
++ nfs4_put_stid(&retstp->st_stid);
++ goto retry;
++ }
++ /* To keep mutex tracking happy */
++ mutex_unlock(&stp->st_mutex);
++ return retstp;
++out_close:
++ spin_unlock(&clp->cl_lock);
++ mutex_unlock(&stp->st_mutex);
++ return NULL;
+ }
+
+ static struct nfs4_ol_stateid *
+@@ -5847,7 +5860,7 @@ find_or_create_lock_stateid(struct nfs4_lockowner *lo, struct nfs4_file *fi,
+
+ *new = false;
+ spin_lock(&clp->cl_lock);
+- lst = find_lock_stateid(lo, fi);
++ lst = find_lock_stateid(lo, ost);
+ spin_unlock(&clp->cl_lock);
+ if (lst != NULL) {
+ if (nfsd4_lock_ol_stateid(lst) == nfs_ok)
+--
+2.25.1
+
--- /dev/null
+From 68ade8ff1fe89916eb2aa01b7b7ccf92490ae8eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jul 2020 13:11:59 +0200
+Subject: nvme: explicitly update mpath disk capacity on revalidation
+
+From: Anthony Iliopoulos <ailiop@suse.com>
+
+[ Upstream commit 05b29021fba5e725dd385151ef00b6340229b500 ]
+
+Commit 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is
+blocked") reverted multipath head disk revalidation due to deadlocks
+caused by holding the bd_mutex during revalidate.
+
+Updating the multipath disk blockdev size is still required though for
+userspace to be able to observe any resizing while the device is
+mounted. Directly update the bdev inode size to avoid unnecessarily
+holding the bdev->bd_mutex.
+
+Fixes: 3b4b19721ec652 ("nvme: fix possible deadlock when I/O is
+blocked")
+
+Signed-off-by: Anthony Iliopoulos <ailiop@suse.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 1 +
+ drivers/nvme/host/nvme.h | 13 +++++++++++++
+ 2 files changed, 14 insertions(+)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 84fcfcdb8ba5f..33dad9774da01 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1599,6 +1599,7 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ if (ns->head->disk) {
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
++ nvme_mpath_update_disk_size(ns->head->disk);
+ }
+ #endif
+ }
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index a70b997060e68..9c2e7a151e400 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -504,6 +504,16 @@ static inline void nvme_mpath_check_last_path(struct nvme_ns *ns)
+ kblockd_schedule_work(&head->requeue_work);
+ }
+
++static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
++{
++ struct block_device *bdev = bdget_disk(disk, 0);
++
++ if (bdev) {
++ bd_set_size(bdev, get_capacity(disk) << SECTOR_SHIFT);
++ bdput(bdev);
++ }
++}
++
+ extern struct device_attribute dev_attr_ana_grpid;
+ extern struct device_attribute dev_attr_ana_state;
+
+@@ -570,6 +580,9 @@ static inline void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys)
+ static inline void nvme_mpath_start_freeze(struct nvme_subsystem *subsys)
+ {
+ }
++static inline void nvme_mpath_update_disk_size(struct gendisk *disk)
++{
++}
+ #endif /* CONFIG_NVME_MULTIPATH */
+
+ #ifdef CONFIG_NVM
+--
+2.25.1
+
--- /dev/null
+From c5ed08d9496342db85719c20c5a51171e3265007 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 17:29:43 +0200
+Subject: nvme: Fix controller creation races with teardown flow
+
+From: Israel Rukshin <israelr@mellanox.com>
+
+[ Upstream commit ce1518139e6976cf19c133b555083354fdb629b8 ]
+
+Calling nvme_sysfs_delete() when the controller is in the middle of
+creation may cause several bugs. If the controller is in NEW state we
+remove delete_controller file and don't delete the controller. The user
+will not be able to use nvme disconnect command on that controller again,
+although the controller may be active. Other bugs may happen if the
+controller is in the middle of create_ctrl callback and
+nvme_do_delete_ctrl() starts. For example, freeing I/O tagset at
+nvme_do_delete_ctrl() before it was allocated at create_ctrl callback.
+
+To fix all those races don't allow the user to delete the controller
+before it was fully created.
+
+Signed-off-by: Israel Rukshin <israelr@mellanox.com>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 5 +++++
+ drivers/nvme/host/nvme.h | 1 +
+ 2 files changed, 6 insertions(+)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 4b182ac15687e..faa7feebb6095 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -2856,6 +2856,10 @@ static ssize_t nvme_sysfs_delete(struct device *dev,
+ {
+ struct nvme_ctrl *ctrl = dev_get_drvdata(dev);
+
++ /* Can't delete non-created controllers */
++ if (!ctrl->created)
++ return -EBUSY;
++
+ if (device_remove_file_self(dev, attr))
+ nvme_delete_ctrl_sync(ctrl);
+ return count;
+@@ -3576,6 +3580,7 @@ void nvme_start_ctrl(struct nvme_ctrl *ctrl)
+ queue_work(nvme_wq, &ctrl->async_event_work);
+ nvme_start_queues(ctrl);
+ }
++ ctrl->created = true;
+ }
+ EXPORT_SYMBOL_GPL(nvme_start_ctrl);
+
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index 31c1496f938fb..a70b997060e68 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -206,6 +206,7 @@ struct nvme_ctrl {
+ struct nvme_command ka_cmd;
+ struct work_struct fw_act_work;
+ unsigned long events;
++ bool created;
+
+ #ifdef CONFIG_NVME_MULTIPATH
+ /* asymmetric namespace access: */
+--
+2.25.1
+
--- /dev/null
+From b1898f1e249269df29d0aafa4ac28f55f694276b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Jun 2020 01:53:08 -0700
+Subject: nvme: fix possible deadlock when I/O is blocked
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit 3b4b19721ec652ad2c4fe51dfbe5124212b5f581 ]
+
+Revert fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk
+in nvme_validate_ns")
+
+When adding a new namespace to the head disk (via nvme_mpath_set_live)
+we will see partition scan which triggers I/O on the mpath device node.
+This process will usually be triggered from the scan_work which holds
+the scan_lock. If I/O blocks (if we got ana change currently have only
+available paths but none are accessible) this can deadlock on the head
+disk bd_mutex as both partition scan I/O takes it, and head disk revalidation
+takes it to check for resize (also triggered from scan_work on a different
+path). See trace [1].
+
+The mpath disk revalidation was originally added to detect online disk
+size change, but this is no longer needed since commit cb224c3af4df
+("nvme: Convert to use set_capacity_revalidate_and_notify") which already
+updates resize info without unnecessarily revalidating the disk (the
+mpath disk doesn't even implement .revalidate_disk fop).
+
+[1]:
+--
+kernel: INFO: task kworker/u65:9:494 blocked for more than 241 seconds.
+kernel: Tainted: G OE 5.3.5-050305-generic #201910071830
+kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+kernel: kworker/u65:9 D 0 494 2 0x80004000
+kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: schedule_preempt_disabled+0xe/0x10
+kernel: __mutex_lock.isra.0+0x182/0x4f0
+kernel: __mutex_lock_slowpath+0x13/0x20
+kernel: mutex_lock+0x2e/0x40
+kernel: revalidate_disk+0x63/0xa0
+kernel: __nvme_revalidate_disk+0xfe/0x110 [nvme_core]
+kernel: nvme_revalidate_disk+0xa4/0x160 [nvme_core]
+kernel: ? evict+0x14c/0x1b0
+kernel: revalidate_disk+0x2b/0xa0
+kernel: nvme_validate_ns+0x49/0x940 [nvme_core]
+kernel: ? blk_mq_free_request+0xd2/0x100
+kernel: ? __nvme_submit_sync_cmd+0xbe/0x1e0 [nvme_core]
+kernel: nvme_scan_work+0x24f/0x380 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x249/0x400
+kernel: kthread+0x104/0x140
+kernel: ? process_one_work+0x380/0x380
+kernel: ? kthread_park+0x80/0x80
+kernel: ret_from_fork+0x1f/0x40
+...
+kernel: INFO: task kworker/u65:1:2630 blocked for more than 241 seconds.
+kernel: Tainted: G OE 5.3.5-050305-generic #201910071830
+kernel: "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
+kernel: kworker/u65:1 D 0 2630 2 0x80004000
+kernel: Workqueue: nvme-wq nvme_scan_work [nvme_core]
+kernel: Call Trace:
+kernel: __schedule+0x2b9/0x6c0
+kernel: schedule+0x42/0xb0
+kernel: io_schedule+0x16/0x40
+kernel: do_read_cache_page+0x438/0x830
+kernel: ? __switch_to_asm+0x34/0x70
+kernel: ? file_fdatawait_range+0x30/0x30
+kernel: read_cache_page+0x12/0x20
+kernel: read_dev_sector+0x27/0xc0
+kernel: read_lba+0xc1/0x220
+kernel: ? kmem_cache_alloc_trace+0x19c/0x230
+kernel: efi_partition+0x1e6/0x708
+kernel: ? vsnprintf+0x39e/0x4e0
+kernel: ? snprintf+0x49/0x60
+kernel: check_partition+0x154/0x244
+kernel: rescan_partitions+0xae/0x280
+kernel: __blkdev_get+0x40f/0x560
+kernel: blkdev_get+0x3d/0x140
+kernel: __device_add_disk+0x388/0x480
+kernel: device_add_disk+0x13/0x20
+kernel: nvme_mpath_set_live+0x119/0x140 [nvme_core]
+kernel: nvme_update_ns_ana_state+0x5c/0x60 [nvme_core]
+kernel: nvme_set_ns_ana_state+0x1e/0x30 [nvme_core]
+kernel: nvme_parse_ana_log+0xa1/0x180 [nvme_core]
+kernel: ? nvme_update_ns_ana_state+0x60/0x60 [nvme_core]
+kernel: nvme_mpath_add_disk+0x47/0x90 [nvme_core]
+kernel: nvme_validate_ns+0x396/0x940 [nvme_core]
+kernel: ? blk_mq_free_request+0xd2/0x100
+kernel: nvme_scan_work+0x24f/0x380 [nvme_core]
+kernel: process_one_work+0x1db/0x380
+kernel: worker_thread+0x249/0x400
+kernel: kthread+0x104/0x140
+kernel: ? process_one_work+0x380/0x380
+kernel: ? kthread_park+0x80/0x80
+kernel: ret_from_fork+0x1f/0x40
+--
+
+Fixes: fab7772bfbcf ("nvme-multipath: revalidate nvme_ns_head gendisk
+in nvme_validate_ns")
+Signed-off-by: Anton Eidelman <anton@lightbitslabs.com>
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index faa7feebb6095..84fcfcdb8ba5f 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -1599,7 +1599,6 @@ static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id)
+ if (ns->head->disk) {
+ nvme_update_disk_info(ns->head->disk, ns, id);
+ blk_queue_stack_limits(ns->head->disk->queue, ns->queue);
+- revalidate_disk(ns->head->disk);
+ }
+ #endif
+ }
+--
+2.25.1
+
--- /dev/null
+From 959a01bd62712c0833db169642b77d9467d60591 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Feb 2020 10:05:38 +0900
+Subject: nvme-multipath: do not reset on unknown status
+
+From: John Meneghini <johnm@netapp.com>
+
+[ Upstream commit 764e9332098c0e60251386a507fe46ac91276120 ]
+
+The nvme multipath error handling defaults to controller reset if the
+error is unknown. There are, however, no existing nvme status codes that
+indicate a reset should be used, and resetting causes unnecessary
+disruption to the rest of IO.
+
+Change nvme's error handling to first check if failover should happen.
+If not, let the normal error handling take over rather than reset the
+controller.
+
+Based-on-a-patch-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Hannes Reinecke <hare@suse.de>
+Signed-off-by: John Meneghini <johnm@netapp.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 5 +----
+ drivers/nvme/host/multipath.c | 21 +++++++++------------
+ drivers/nvme/host/nvme.h | 5 +++--
+ 3 files changed, 13 insertions(+), 18 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 0d60f2f8f3eec..4b182ac15687e 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -255,11 +255,8 @@ void nvme_complete_rq(struct request *req)
+ trace_nvme_complete_rq(req);
+
+ if (unlikely(status != BLK_STS_OK && nvme_req_needs_retry(req))) {
+- if ((req->cmd_flags & REQ_NVME_MPATH) &&
+- blk_path_error(status)) {
+- nvme_failover_req(req);
++ if ((req->cmd_flags & REQ_NVME_MPATH) && nvme_failover_req(req))
+ return;
+- }
+
+ if (!blk_queue_dying(req->q)) {
+ nvme_req(req)->retries++;
+diff --git a/drivers/nvme/host/multipath.c b/drivers/nvme/host/multipath.c
+index 2e63c1106030b..e71075338ff5c 100644
+--- a/drivers/nvme/host/multipath.c
++++ b/drivers/nvme/host/multipath.c
+@@ -73,17 +73,12 @@ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ }
+ }
+
+-void nvme_failover_req(struct request *req)
++bool nvme_failover_req(struct request *req)
+ {
+ struct nvme_ns *ns = req->q->queuedata;
+ u16 status = nvme_req(req)->status;
+ unsigned long flags;
+
+- spin_lock_irqsave(&ns->head->requeue_lock, flags);
+- blk_steal_bios(&ns->head->requeue_list, req);
+- spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
+- blk_mq_end_request(req, 0);
+-
+ switch (status & 0x7ff) {
+ case NVME_SC_ANA_TRANSITION:
+ case NVME_SC_ANA_INACCESSIBLE:
+@@ -111,15 +106,17 @@ void nvme_failover_req(struct request *req)
+ nvme_mpath_clear_current_path(ns);
+ break;
+ default:
+- /*
+- * Reset the controller for any non-ANA error as we don't know
+- * what caused the error.
+- */
+- nvme_reset_ctrl(ns->ctrl);
+- break;
++ /* This was a non-ANA error so follow the normal error path. */
++ return false;
+ }
+
++ spin_lock_irqsave(&ns->head->requeue_lock, flags);
++ blk_steal_bios(&ns->head->requeue_list, req);
++ spin_unlock_irqrestore(&ns->head->requeue_lock, flags);
++ blk_mq_end_request(req, 0);
++
+ kblockd_schedule_work(&ns->head->requeue_work);
++ return true;
+ }
+
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
+index cc4273f119894..31c1496f938fb 100644
+--- a/drivers/nvme/host/nvme.h
++++ b/drivers/nvme/host/nvme.h
+@@ -477,7 +477,7 @@ void nvme_mpath_wait_freeze(struct nvme_subsystem *subsys);
+ void nvme_mpath_start_freeze(struct nvme_subsystem *subsys);
+ void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ struct nvme_ctrl *ctrl, int *flags);
+-void nvme_failover_req(struct request *req);
++bool nvme_failover_req(struct request *req);
+ void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl);
+ int nvme_mpath_alloc_disk(struct nvme_ctrl *ctrl,struct nvme_ns_head *head);
+ void nvme_mpath_add_disk(struct nvme_ns *ns, struct nvme_id_ns *id);
+@@ -521,8 +521,9 @@ static inline void nvme_set_disk_name(char *disk_name, struct nvme_ns *ns,
+ sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->head->instance);
+ }
+
+-static inline void nvme_failover_req(struct request *req)
++static inline bool nvme_failover_req(struct request *req)
+ {
++ return false;
+ }
+ static inline void nvme_kick_requeue_lists(struct nvme_ctrl *ctrl)
+ {
+--
+2.25.1
+
--- /dev/null
+From 4f255e0b71759e043f938f1bb0e3940fd81e9a4c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2020 11:02:28 +0000
+Subject: nvmet-rdma: fix double free of rdma queue
+
+From: Israel Rukshin <israelr@mellanox.com>
+
+[ Upstream commit 21f9024355e58772ec5d7fc3534aa5e29d72a8b6 ]
+
+In case rdma accept fails at nvmet_rdma_queue_connect(), release work is
+scheduled. Later on, a new RDMA CM event may arrive since we didn't
+destroy the cm-id and call nvmet_rdma_queue_connect_fail(), which
+schedule another release work. This will cause calling
+nvmet_rdma_free_queue twice. To fix this we implicitly destroy the cm_id
+with non-zero ret code, which guarantees that new rdma_cm events will
+not arrive afterwards. Also add a qp pointer to nvmet_rdma_queue
+structure, so we can use it when the cm_id pointer is NULL or was
+destroyed.
+
+Signed-off-by: Israel Rukshin <israelr@mellanox.com>
+Suggested-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Max Gurtovoy <maxg@mellanox.com>
+Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/rdma.c | 30 ++++++++++++++++++------------
+ 1 file changed, 18 insertions(+), 12 deletions(-)
+
+diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
+index 08f997a390d5d..cfd26437aeaea 100644
+--- a/drivers/nvme/target/rdma.c
++++ b/drivers/nvme/target/rdma.c
+@@ -83,6 +83,7 @@ enum nvmet_rdma_queue_state {
+
+ struct nvmet_rdma_queue {
+ struct rdma_cm_id *cm_id;
++ struct ib_qp *qp;
+ struct nvmet_port *port;
+ struct ib_cq *cq;
+ atomic_t sq_wr_avail;
+@@ -471,7 +472,7 @@ static int nvmet_rdma_post_recv(struct nvmet_rdma_device *ndev,
+ if (ndev->srq)
+ ret = ib_post_srq_recv(ndev->srq, &cmd->wr, NULL);
+ else
+- ret = ib_post_recv(cmd->queue->cm_id->qp, &cmd->wr, NULL);
++ ret = ib_post_recv(cmd->queue->qp, &cmd->wr, NULL);
+
+ if (unlikely(ret))
+ pr_err("post_recv cmd failed\n");
+@@ -510,7 +511,7 @@ static void nvmet_rdma_release_rsp(struct nvmet_rdma_rsp *rsp)
+ atomic_add(1 + rsp->n_rdma, &queue->sq_wr_avail);
+
+ if (rsp->n_rdma) {
+- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
++ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
+ queue->cm_id->port_num, rsp->req.sg,
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ }
+@@ -594,7 +595,7 @@ static void nvmet_rdma_read_data_done(struct ib_cq *cq, struct ib_wc *wc)
+
+ WARN_ON(rsp->n_rdma <= 0);
+ atomic_add(rsp->n_rdma, &queue->sq_wr_avail);
+- rdma_rw_ctx_destroy(&rsp->rw, queue->cm_id->qp,
++ rdma_rw_ctx_destroy(&rsp->rw, queue->qp,
+ queue->cm_id->port_num, rsp->req.sg,
+ rsp->req.sg_cnt, nvmet_data_dir(&rsp->req));
+ rsp->n_rdma = 0;
+@@ -737,7 +738,7 @@ static bool nvmet_rdma_execute_command(struct nvmet_rdma_rsp *rsp)
+ }
+
+ if (nvmet_rdma_need_data_in(rsp)) {
+- if (rdma_rw_ctx_post(&rsp->rw, queue->cm_id->qp,
++ if (rdma_rw_ctx_post(&rsp->rw, queue->qp,
+ queue->cm_id->port_num, &rsp->read_cqe, NULL))
+ nvmet_req_complete(&rsp->req, NVME_SC_DATA_XFER_ERROR);
+ } else {
+@@ -1020,6 +1021,7 @@ static int nvmet_rdma_create_queue_ib(struct nvmet_rdma_queue *queue)
+ pr_err("failed to create_qp ret= %d\n", ret);
+ goto err_destroy_cq;
+ }
++ queue->qp = queue->cm_id->qp;
+
+ atomic_set(&queue->sq_wr_avail, qp_attr.cap.max_send_wr);
+
+@@ -1048,11 +1050,10 @@ err_destroy_cq:
+
+ static void nvmet_rdma_destroy_queue_ib(struct nvmet_rdma_queue *queue)
+ {
+- struct ib_qp *qp = queue->cm_id->qp;
+-
+- ib_drain_qp(qp);
+- rdma_destroy_id(queue->cm_id);
+- ib_destroy_qp(qp);
++ ib_drain_qp(queue->qp);
++ if (queue->cm_id)
++ rdma_destroy_id(queue->cm_id);
++ ib_destroy_qp(queue->qp);
+ ib_free_cq(queue->cq);
+ }
+
+@@ -1286,9 +1287,12 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+
+ ret = nvmet_rdma_cm_accept(cm_id, queue, &event->param.conn);
+ if (ret) {
+- schedule_work(&queue->release_work);
+- /* Destroying rdma_cm id is not needed here */
+- return 0;
++ /*
++ * Don't destroy the cm_id in free path, as we implicitly
++ * destroy the cm_id here with non-zero ret code.
++ */
++ queue->cm_id = NULL;
++ goto free_queue;
+ }
+
+ mutex_lock(&nvmet_rdma_queue_mutex);
+@@ -1297,6 +1301,8 @@ static int nvmet_rdma_queue_connect(struct rdma_cm_id *cm_id,
+
+ return 0;
+
++free_queue:
++ nvmet_rdma_free_queue(queue);
+ put_device:
+ kref_put(&ndev->ref, nvmet_rdma_free_dev);
+
+--
+2.25.1
+
--- /dev/null
+From c9bc17f76df1ebb2aaa7e171b50158236c867538 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Sep 2020 10:24:57 -0500
+Subject: objtool: Fix noreturn detection for ignored functions
+
+From: Josh Poimboeuf <jpoimboe@redhat.com>
+
+[ Upstream commit db6c6a0df840e3f52c84cc302cc1a08ba11a4416 ]
+
+When a function is annotated with STACK_FRAME_NON_STANDARD, objtool
+doesn't validate its code paths. It also skips sibling call detection
+within the function.
+
+But sibling call detection is actually needed for the case where the
+ignored function doesn't have any return instructions. Otherwise
+objtool naively marks the function as implicit static noreturn, which
+affects the reachability of its callers, resulting in "unreachable
+instruction" warnings.
+
+Fix it by just enabling sibling call detection for ignored functions.
+The 'insn->ignore' check in add_jump_destinations() is no longer needed
+after
+
+ e6da9567959e ("objtool: Don't use ignore flag for fake jumps").
+
+Fixes the following warning:
+
+ arch/x86/kvm/vmx/vmx.o: warning: objtool: vmx_handle_exit_irqoff()+0x142: unreachable instruction
+
+which triggers on an allmodconfig with CONFIG_GCOV_KERNEL unset.
+
+Reported-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Linus Torvalds <torvalds@linux-foundation.org>
+Link: https://lkml.kernel.org/r/5b1e2536cdbaa5246b60d7791b76130a74082c62.1599751464.git.jpoimboe@redhat.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/objtool/check.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/objtool/check.c b/tools/objtool/check.c
+index fd3071d83deae..c0ab27368a345 100644
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -503,7 +503,7 @@ static int add_jump_destinations(struct objtool_file *file)
+ insn->type != INSN_JUMP_UNCONDITIONAL)
+ continue;
+
+- if (insn->ignore || insn->offset == FAKE_JUMP_OFFSET)
++ if (insn->offset == FAKE_JUMP_OFFSET)
+ continue;
+
+ rela = find_rela_by_dest_range(insn->sec, insn->offset,
+--
+2.25.1
+
--- /dev/null
+From b4fab4aca10390dd4cc302c3a96f54fe0fceb9f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Feb 2020 15:31:13 +0100
+Subject: PCI: pciehp: Fix MSI interrupt race
+
+From: Stuart Hayes <stuart.w.hayes@gmail.com>
+
+[ Upstream commit 8edf5332c39340b9583cf9cba659eb7ec71f75b5 ]
+
+Without this commit, a PCIe hotplug port can stop generating interrupts on
+hotplug events, so device adds and removals will not be seen:
+
+The pciehp interrupt handler pciehp_isr() reads the Slot Status register
+and then writes back to it to clear the bits that caused the interrupt. If
+a different interrupt event bit gets set between the read and the write,
+pciehp_isr() returns without having cleared all of the interrupt event
+bits. If this happens when the MSI isn't masked (which by default it isn't
+in handle_edge_irq(), and which it will never be when MSI per-vector
+masking is not supported), we won't get any more hotplug interrupts from
+that device.
+
+That is expected behavior, according to the PCIe Base Spec r5.0, section
+6.7.3.4, "Software Notification of Hot-Plug Events".
+
+Because the Presence Detect Changed and Data Link Layer State Changed event
+bits can both get set at nearly the same time when a device is added or
+removed, this is more likely to happen than it might seem. The issue was
+found (and can be reproduced rather easily) by connecting and disconnecting
+an NVMe storage device on at least one system model where the NVMe devices
+were being connected to an AMD PCIe port (PCI device 0x1022/0x1483).
+
+Fix the issue by modifying pciehp_isr() to loop back and re-read the Slot
+Status register immediately after writing to it, until it sees that all of
+the event status bits have been cleared.
+
+[lukas: drop loop count limitation, write "events" instead of "status",
+don't loop back in INTx and poll modes, tweak code comment & commit msg]
+Link: https://lore.kernel.org/r/78b4ced5072bfe6e369d20e8b47c279b8c7af12e.1582121613.git.lukas@wunner.de
+Tested-by: Stuart Hayes <stuart.w.hayes@gmail.com>
+Signed-off-by: Stuart Hayes <stuart.w.hayes@gmail.com>
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Reviewed-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/hotplug/pciehp_hpc.c | 26 ++++++++++++++++++++------
+ 1 file changed, 20 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
+index 07940d1d83b70..005817e40ad39 100644
+--- a/drivers/pci/hotplug/pciehp_hpc.c
++++ b/drivers/pci/hotplug/pciehp_hpc.c
+@@ -530,7 +530,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ struct controller *ctrl = (struct controller *)dev_id;
+ struct pci_dev *pdev = ctrl_dev(ctrl);
+ struct device *parent = pdev->dev.parent;
+- u16 status, events;
++ u16 status, events = 0;
+
+ /*
+ * Interrupts only occur in D3hot or shallower (PCIe r4.0, sec 6.7.3.4).
+@@ -553,6 +553,7 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ }
+ }
+
++read_status:
+ pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &status);
+ if (status == (u16) ~0) {
+ ctrl_info(ctrl, "%s: no response from device\n", __func__);
+@@ -565,24 +566,37 @@ static irqreturn_t pciehp_isr(int irq, void *dev_id)
+ * Slot Status contains plain status bits as well as event
+ * notification bits; right now we only want the event bits.
+ */
+- events = status & (PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
+- PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
+- PCI_EXP_SLTSTA_DLLSC);
++ status &= PCI_EXP_SLTSTA_ABP | PCI_EXP_SLTSTA_PFD |
++ PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_CC |
++ PCI_EXP_SLTSTA_DLLSC;
+
+ /*
+ * If we've already reported a power fault, don't report it again
+ * until we've done something to handle it.
+ */
+ if (ctrl->power_fault_detected)
+- events &= ~PCI_EXP_SLTSTA_PFD;
++ status &= ~PCI_EXP_SLTSTA_PFD;
+
++ events |= status;
+ if (!events) {
+ if (parent)
+ pm_runtime_put(parent);
+ return IRQ_NONE;
+ }
+
+- pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
++ if (status) {
++ pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, events);
++
++ /*
++ * In MSI mode, all event bits must be zero before the port
++ * will send a new interrupt (PCIe Base Spec r5.0 sec 6.7.3.4).
++ * So re-read the Slot Status register in case a bit was set
++ * between read and write.
++ */
++ if (pci_dev_msi_enabled(pdev) && !pciehp_poll_mode)
++ goto read_status;
++ }
++
+ ctrl_dbg(ctrl, "pending interrupts %#06x from Slot Status\n", events);
+ if (parent)
+ pm_runtime_put(parent);
+--
+2.25.1
+
--- /dev/null
+From f80871ca6f2ce0153ba0013f90b285e110ae2c72 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 10:47:09 +0800
+Subject: PCI: tegra: Fix runtime PM imbalance on error
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit fcee90cdf6f3a3a371add04d41528d5ba9c3b411 ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+when it returns an error code. Thus a pairing decrement is needed on
+the error handling path to keep the counter balanced.
+
+Also, call pm_runtime_disable() when pm_runtime_get_sync() returns
+an error code.
+
+Link: https://lore.kernel.org/r/20200521024709.2368-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pci/controller/pci-tegra.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/pci/controller/pci-tegra.c b/drivers/pci/controller/pci-tegra.c
+index 6f86583605a46..097c02197ec8f 100644
+--- a/drivers/pci/controller/pci-tegra.c
++++ b/drivers/pci/controller/pci-tegra.c
+@@ -2400,7 +2400,7 @@ static int tegra_pcie_probe(struct platform_device *pdev)
+ err = pm_runtime_get_sync(pcie->dev);
+ if (err < 0) {
+ dev_err(dev, "fail to enable pcie controller: %d\n", err);
+- goto teardown_msi;
++ goto pm_runtime_put;
+ }
+
+ err = tegra_pcie_request_resources(pcie);
+@@ -2440,7 +2440,6 @@ free_resources:
+ pm_runtime_put:
+ pm_runtime_put_sync(pcie->dev);
+ pm_runtime_disable(pcie->dev);
+-teardown_msi:
+ tegra_pcie_msi_teardown(pcie);
+ put_resources:
+ tegra_pcie_put_resources(pcie);
+--
+2.25.1
+
--- /dev/null
+From d858c4b86e58853dbc0861d6ae175c8401709dfb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2020 22:16:23 -0400
+Subject: PCI: Use ioremap(), not phys_to_virt() for platform ROM
+
+From: Mikel Rychliski <mikel@mikelr.com>
+
+[ Upstream commit 72e0ef0e5f067fd991f702f0b2635d911d0cf208 ]
+
+On some EFI systems, the video BIOS is provided by the EFI firmware. The
+boot stub code stores the physical address of the ROM image in pdev->rom.
+Currently we attempt to access this pointer using phys_to_virt(), which
+doesn't work with CONFIG_HIGHMEM.
+
+On these systems, attempting to load the radeon module on a x86_32 kernel
+can result in the following:
+
+ BUG: unable to handle page fault for address: 3e8ed03c
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ *pde = 00000000
+ Oops: 0000 [#1] PREEMPT SMP
+ CPU: 0 PID: 317 Comm: systemd-udevd Not tainted 5.6.0-rc3-next-20200228 #2
+ Hardware name: Apple Computer, Inc. MacPro1,1/Mac-F4208DC8, BIOS MP11.88Z.005C.B08.0707021221 07/02/07
+ EIP: radeon_get_bios+0x5ed/0xe50 [radeon]
+ Code: 00 00 84 c0 0f 85 12 fd ff ff c7 87 64 01 00 00 00 00 00 00 8b 47 08 8b 55 b0 e8 1e 83 e1 d6 85 c0 74 1a 8b 55 c0 85 d2 74 13 <80> 38 55 75 0e 80 78 01 aa 0f 84 a4 03 00 00 8d 74 26 00 68 dc 06
+ EAX: 3e8ed03c EBX: 00000000 ECX: 3e8ed03c EDX: 00010000
+ ESI: 00040000 EDI: eec04000 EBP: eef3fc60 ESP: eef3fbe0
+ DS: 007b ES: 007b FS: 00d8 GS: 00e0 SS: 0068 EFLAGS: 00010206
+ CR0: 80050033 CR2: 3e8ed03c CR3: 2ec77000 CR4: 000006d0
+ Call Trace:
+ r520_init+0x26/0x240 [radeon]
+ radeon_device_init+0x533/0xa50 [radeon]
+ radeon_driver_load_kms+0x80/0x220 [radeon]
+ drm_dev_register+0xa7/0x180 [drm]
+ radeon_pci_probe+0x10f/0x1a0 [radeon]
+ pci_device_probe+0xd4/0x140
+
+Fix the issue by updating all drivers which can access a platform provided
+ROM. Instead of calling the helper function pci_platform_rom() which uses
+phys_to_virt(), call ioremap() directly on the pdev->rom.
+
+radeon_read_platform_bios() previously directly accessed an __iomem
+pointer. Avoid this by calling memcpy_fromio() instead of kmemdup().
+
+pci_platform_rom() now has no remaining callers, so remove it.
+
+Link: https://lore.kernel.org/r/20200319021623.5426-1-mikel@mikelr.com
+Signed-off-by: Mikel Rychliski <mikel@mikelr.com>
+Signed-off-by: Bjorn Helgaas <bhelgaas@google.com>
+Acked-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 31 +++++++++++--------
+ .../drm/nouveau/nvkm/subdev/bios/shadowpci.c | 17 ++++++++--
+ drivers/gpu/drm/radeon/radeon_bios.c | 30 +++++++++++-------
+ drivers/pci/rom.c | 17 ----------
+ include/linux/pci.h | 1 -
+ 5 files changed, 52 insertions(+), 44 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+index a5df80d50d447..6cf3dd5edffda 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c
+@@ -191,30 +191,35 @@ static bool amdgpu_read_bios_from_rom(struct amdgpu_device *adev)
+
+ static bool amdgpu_read_platform_bios(struct amdgpu_device *adev)
+ {
+- uint8_t __iomem *bios;
+- size_t size;
++ phys_addr_t rom = adev->pdev->rom;
++ size_t romlen = adev->pdev->romlen;
++ void __iomem *bios;
+
+ adev->bios = NULL;
+
+- bios = pci_platform_rom(adev->pdev, &size);
+- if (!bios) {
++ if (!rom || romlen == 0)
+ return false;
+- }
+
+- adev->bios = kzalloc(size, GFP_KERNEL);
+- if (adev->bios == NULL)
++ adev->bios = kzalloc(romlen, GFP_KERNEL);
++ if (!adev->bios)
+ return false;
+
+- memcpy_fromio(adev->bios, bios, size);
++ bios = ioremap(rom, romlen);
++ if (!bios)
++ goto free_bios;
+
+- if (!check_atom_bios(adev->bios, size)) {
+- kfree(adev->bios);
+- return false;
+- }
++ memcpy_fromio(adev->bios, bios, romlen);
++ iounmap(bios);
+
+- adev->bios_size = size;
++ if (!check_atom_bios(adev->bios, romlen))
++ goto free_bios;
++
++ adev->bios_size = romlen;
+
+ return true;
++free_bios:
++ kfree(adev->bios);
++ return false;
+ }
+
+ #ifdef CONFIG_ACPI
+diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+index 9b91da09dc5f8..8d9812a51ef63 100644
+--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
++++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/shadowpci.c
+@@ -101,9 +101,13 @@ platform_init(struct nvkm_bios *bios, const char *name)
+ else
+ return ERR_PTR(-ENODEV);
+
++ if (!pdev->rom || pdev->romlen == 0)
++ return ERR_PTR(-ENODEV);
++
+ if ((priv = kmalloc(sizeof(*priv), GFP_KERNEL))) {
++ priv->size = pdev->romlen;
+ if (ret = -ENODEV,
+- (priv->rom = pci_platform_rom(pdev, &priv->size)))
++ (priv->rom = ioremap(pdev->rom, pdev->romlen)))
+ return priv;
+ kfree(priv);
+ }
+@@ -111,11 +115,20 @@ platform_init(struct nvkm_bios *bios, const char *name)
+ return ERR_PTR(ret);
+ }
+
++static void
++platform_fini(void *data)
++{
++ struct priv *priv = data;
++
++ iounmap(priv->rom);
++ kfree(priv);
++}
++
+ const struct nvbios_source
+ nvbios_platform = {
+ .name = "PLATFORM",
+ .init = platform_init,
+- .fini = (void(*)(void *))kfree,
++ .fini = platform_fini,
+ .read = pcirom_read,
+ .rw = true,
+ };
+diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
+index 04c0ed41374f1..dd0528cf98183 100644
+--- a/drivers/gpu/drm/radeon/radeon_bios.c
++++ b/drivers/gpu/drm/radeon/radeon_bios.c
+@@ -104,25 +104,33 @@ static bool radeon_read_bios(struct radeon_device *rdev)
+
+ static bool radeon_read_platform_bios(struct radeon_device *rdev)
+ {
+- uint8_t __iomem *bios;
+- size_t size;
++ phys_addr_t rom = rdev->pdev->rom;
++ size_t romlen = rdev->pdev->romlen;
++ void __iomem *bios;
+
+ rdev->bios = NULL;
+
+- bios = pci_platform_rom(rdev->pdev, &size);
+- if (!bios) {
++ if (!rom || romlen == 0)
+ return false;
+- }
+
+- if (size == 0 || bios[0] != 0x55 || bios[1] != 0xaa) {
++ rdev->bios = kzalloc(romlen, GFP_KERNEL);
++ if (!rdev->bios)
+ return false;
+- }
+- rdev->bios = kmemdup(bios, size, GFP_KERNEL);
+- if (rdev->bios == NULL) {
+- return false;
+- }
++
++ bios = ioremap(rom, romlen);
++ if (!bios)
++ goto free_bios;
++
++ memcpy_fromio(rdev->bios, bios, romlen);
++ iounmap(bios);
++
++ if (rdev->bios[0] != 0x55 || rdev->bios[1] != 0xaa)
++ goto free_bios;
+
+ return true;
++free_bios:
++ kfree(rdev->bios);
++ return false;
+ }
+
+ #ifdef CONFIG_ACPI
+diff --git a/drivers/pci/rom.c b/drivers/pci/rom.c
+index 137bf0cee897c..8fc9a4e911e3a 100644
+--- a/drivers/pci/rom.c
++++ b/drivers/pci/rom.c
+@@ -195,20 +195,3 @@ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom)
+ pci_disable_rom(pdev);
+ }
+ EXPORT_SYMBOL(pci_unmap_rom);
+-
+-/**
+- * pci_platform_rom - provides a pointer to any ROM image provided by the
+- * platform
+- * @pdev: pointer to pci device struct
+- * @size: pointer to receive size of pci window over ROM
+- */
+-void __iomem *pci_platform_rom(struct pci_dev *pdev, size_t *size)
+-{
+- if (pdev->rom && pdev->romlen) {
+- *size = pdev->romlen;
+- return phys_to_virt((phys_addr_t)pdev->rom);
+- }
+-
+- return NULL;
+-}
+-EXPORT_SYMBOL(pci_platform_rom);
+diff --git a/include/linux/pci.h b/include/linux/pci.h
+index 2517492dd1855..2fda9893962d1 100644
+--- a/include/linux/pci.h
++++ b/include/linux/pci.h
+@@ -1144,7 +1144,6 @@ int pci_enable_rom(struct pci_dev *pdev);
+ void pci_disable_rom(struct pci_dev *pdev);
+ void __iomem __must_check *pci_map_rom(struct pci_dev *pdev, size_t *size);
+ void pci_unmap_rom(struct pci_dev *pdev, void __iomem *rom);
+-void __iomem __must_check *pci_platform_rom(struct pci_dev *pdev, size_t *size);
+
+ /* Power management related routines */
+ int pci_save_state(struct pci_dev *dev);
+--
+2.25.1
+
--- /dev/null
+From 91487771777ef6328dfdc99c951cb7e01a5adaa2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 08:03:19 +0100
+Subject: perf cpumap: Fix snprintf overflow check
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit d74b181a028bb5a468f0c609553eff6a8fdf4887 ]
+
+'snprintf' returns the number of characters which would be generated for
+the given input.
+
+If the returned value is *greater than* or equal to the buffer size, it
+means that the output has been truncated.
+
+Fix the overflow test accordingly.
+
+Fixes: 7780c25bae59f ("perf tools: Allow ability to map cpus to nodes easily")
+Fixes: 92a7e1278005b ("perf cpumap: Add cpu__max_present_cpu()")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Suggested-by: David Laight <David.Laight@ACULAB.COM>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Don Zickus <dzickus@redhat.com>
+Cc: He Zhe <zhe.he@windriver.com>
+Cc: Jan Stancek <jstancek@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: kernel-janitors@vger.kernel.org
+Link: http://lore.kernel.org/lkml/20200324070319.10901-1-christophe.jaillet@wanadoo.fr
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/cpumap.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
+index f93846edc1e0d..827d844f4efb1 100644
+--- a/tools/perf/util/cpumap.c
++++ b/tools/perf/util/cpumap.c
+@@ -462,7 +462,7 @@ static void set_max_cpu_num(void)
+
+ /* get the highest possible cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/possible", mnt);
+- if (ret == PATH_MAX) {
++ if (ret >= PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+@@ -473,7 +473,7 @@ static void set_max_cpu_num(void)
+
+ /* get the highest present cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/cpu/present", mnt);
+- if (ret == PATH_MAX) {
++ if (ret >= PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+@@ -501,7 +501,7 @@ static void set_max_node_num(void)
+
+ /* get the highest possible cpu number for a sparse allocation */
+ ret = snprintf(path, PATH_MAX, "%s/devices/system/node/possible", mnt);
+- if (ret == PATH_MAX) {
++ if (ret >= PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ goto out;
+ }
+@@ -586,7 +586,7 @@ int cpu__setup_cpunode_map(void)
+ return 0;
+
+ n = snprintf(path, PATH_MAX, "%s/devices/system/node", mnt);
+- if (n == PATH_MAX) {
++ if (n >= PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ return -1;
+ }
+@@ -601,7 +601,7 @@ int cpu__setup_cpunode_map(void)
+ continue;
+
+ n = snprintf(buf, PATH_MAX, "%s/%s", path, dent1->d_name);
+- if (n == PATH_MAX) {
++ if (n >= PATH_MAX) {
+ pr_err("sysfs path crossed PATH_MAX(%d) size\n", PATH_MAX);
+ continue;
+ }
+--
+2.25.1
+
--- /dev/null
+From a9f17540b7f9a9a28102b16535e5f7bac85d552c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 May 2020 16:59:18 -0700
+Subject: perf evsel: Fix 2 memory leaks
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 3efc899d9afb3d03604f191a0be9669eabbfc4aa ]
+
+If allocated, perf_pkg_mask and metric_events need freeing.
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Reviewed-by: Andi Kleen <ak@linux.intel.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Link: http://lore.kernel.org/lkml/20200512235918.10732-1-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/evsel.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index e8586957562b3..1b7b6244c8cfe 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1291,6 +1291,8 @@ void perf_evsel__exit(struct perf_evsel *evsel)
+ zfree(&evsel->group_name);
+ zfree(&evsel->name);
+ zfree(&evsel->pmu_name);
++ zfree(&evsel->per_pkg_mask);
++ zfree(&evsel->metric_events);
+ perf_evsel__object.fini(evsel);
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 284e582d00f8cb894fb8c7e0210ed5776137e866 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2020 19:08:01 +0800
+Subject: perf jevents: Fix leak of mapfile memory
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit 3f5777fbaf04c58d940526a22a2e0c813c837936 ]
+
+The memory for global pointer is never freed during normal program
+execution, so let's do that in the main function exit as a good
+programming practice.
+
+A stray blank line is also removed.
+
+Reported-by: Jiri Olsa <jolsa@redhat.com>
+Signed-off-by: John Garry <john.garry@huawei.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: James Clark <james.clark@arm.com>
+Cc: Joakim Zhang <qiangqing.zhang@nxp.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: linuxarm@huawei.com
+Link: http://lore.kernel.org/lkml/1583406486-154841-2-git-send-email-john.garry@huawei.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/pmu-events/jevents.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/tools/perf/pmu-events/jevents.c b/tools/perf/pmu-events/jevents.c
+index c17e594041712..6631970f96832 100644
+--- a/tools/perf/pmu-events/jevents.c
++++ b/tools/perf/pmu-events/jevents.c
+@@ -1064,10 +1064,9 @@ static int process_one_file(const char *fpath, const struct stat *sb,
+ */
+ int main(int argc, char *argv[])
+ {
+- int rc;
++ int rc, ret = 0;
+ int maxfds;
+ char ldirname[PATH_MAX];
+-
+ const char *arch;
+ const char *output_file;
+ const char *start_dirname;
+@@ -1138,7 +1137,8 @@ int main(int argc, char *argv[])
+ /* Make build fail */
+ fclose(eventsfp);
+ free_arch_std_events();
+- return 1;
++ ret = 1;
++ goto out_free_mapfile;
+ } else if (rc) {
+ goto empty_map;
+ }
+@@ -1156,14 +1156,17 @@ int main(int argc, char *argv[])
+ /* Make build fail */
+ fclose(eventsfp);
+ free_arch_std_events();
+- return 1;
++ ret = 1;
+ }
+
+- return 0;
++
++ goto out_free_mapfile;
+
+ empty_map:
+ fclose(eventsfp);
+ create_empty_mapping(output_file);
+ free_arch_std_events();
+- return 0;
++out_free_mapfile:
++ free(mapfile);
++ return ret;
+ }
+--
+2.25.1
+
--- /dev/null
+From a10271528b68db9e4c8c9f5425ee5dd56c5049df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 May 2020 15:19:16 +0300
+Subject: perf kcore_copy: Fix module map when there are no modules loaded
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+[ Upstream commit 61f82e3fb697a8e85f22fdec786528af73dc36d1 ]
+
+In the absence of any modules, no "modules" map is created, but there
+are other executable pages to map, due to eBPF JIT, kprobe or ftrace.
+Map them by recognizing that the first "module" symbol is not
+necessarily from a module, and adjust the map accordingly.
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: H. Peter Anvin <hpa@zytor.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Cc: x86@kernel.org
+Link: http://lore.kernel.org/lkml/20200512121922.8997-10-adrian.hunter@intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/symbol-elf.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
+index a701a8a48f005..166c621e02235 100644
+--- a/tools/perf/util/symbol-elf.c
++++ b/tools/perf/util/symbol-elf.c
+@@ -1421,6 +1421,7 @@ struct kcore_copy_info {
+ u64 first_symbol;
+ u64 last_symbol;
+ u64 first_module;
++ u64 first_module_symbol;
+ u64 last_module_symbol;
+ size_t phnum;
+ struct list_head phdrs;
+@@ -1497,6 +1498,8 @@ static int kcore_copy__process_kallsyms(void *arg, const char *name, char type,
+ return 0;
+
+ if (strchr(name, '[')) {
++ if (!kci->first_module_symbol || start < kci->first_module_symbol)
++ kci->first_module_symbol = start;
+ if (start > kci->last_module_symbol)
+ kci->last_module_symbol = start;
+ return 0;
+@@ -1694,6 +1697,10 @@ static int kcore_copy__calc_maps(struct kcore_copy_info *kci, const char *dir,
+ kci->etext += page_size;
+ }
+
++ if (kci->first_module_symbol &&
++ (!kci->first_module || kci->first_module_symbol < kci->first_module))
++ kci->first_module = kci->first_module_symbol;
++
+ kci->first_module = round_down(kci->first_module, page_size);
+
+ if (kci->last_module_symbol) {
+--
+2.25.1
+
--- /dev/null
+From 50ed9d513a15ca6e51970587206cc4aa51388f26 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Mar 2020 11:23:47 -0700
+Subject: perf mem2node: Avoid double free related to realloc
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 266150c94c69429cf6d18e130237224a047f5061 ]
+
+Realloc of size zero is a free not an error, avoid this causing a double
+free. Caught by clang's address sanitizer:
+
+==2634==ERROR: AddressSanitizer: attempting double-free on 0x6020000015f0 in thread T0:
+ #0 0x5649659297fd in free llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:123:3
+ #1 0x5649659e9251 in __zfree tools/lib/zalloc.c:13:2
+ #2 0x564965c0f92c in mem2node__exit tools/perf/util/mem2node.c:114:2
+ #3 0x564965a08b4c in perf_c2c__report tools/perf/builtin-c2c.c:2867:2
+ #4 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10
+ #5 0x564965944348 in run_builtin tools/perf/perf.c:312:11
+ #6 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8
+ #7 0x5649659440c4 in run_argv tools/perf/perf.c:408:2
+ #8 0x564965942e41 in main tools/perf/perf.c:538:3
+
+0x6020000015f0 is located 0 bytes inside of 1-byte region [0x6020000015f0,0x6020000015f1)
+freed by thread T0 here:
+ #0 0x564965929da3 in realloc third_party/llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:164:3
+ #1 0x564965c0f55e in mem2node__init tools/perf/util/mem2node.c:97:16
+ #2 0x564965a08956 in perf_c2c__report tools/perf/builtin-c2c.c:2803:8
+ #3 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10
+ #4 0x564965944348 in run_builtin tools/perf/perf.c:312:11
+ #5 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8
+ #6 0x5649659440c4 in run_argv tools/perf/perf.c:408:2
+ #7 0x564965942e41 in main tools/perf/perf.c:538:3
+
+previously allocated by thread T0 here:
+ #0 0x564965929c42 in calloc third_party/llvm/llvm-project/compiler-rt/lib/asan/asan_malloc_linux.cpp:154:3
+ #1 0x5649659e9220 in zalloc tools/lib/zalloc.c:8:9
+ #2 0x564965c0f32d in mem2node__init tools/perf/util/mem2node.c:61:12
+ #3 0x564965a08956 in perf_c2c__report tools/perf/builtin-c2c.c:2803:8
+ #4 0x564965a0616a in cmd_c2c tools/perf/builtin-c2c.c:2989:10
+ #5 0x564965944348 in run_builtin tools/perf/perf.c:312:11
+ #6 0x564965943235 in handle_internal_command tools/perf/perf.c:364:8
+ #7 0x5649659440c4 in run_argv tools/perf/perf.c:408:2
+ #8 0x564965942e41 in main tools/perf/perf.c:538:3
+
+v2: add a WARN_ON_ONCE when the free condition arises.
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: clang-built-linux@googlegroups.com
+Link: http://lore.kernel.org/lkml/20200320182347.87675-1-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/mem2node.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/mem2node.c b/tools/perf/util/mem2node.c
+index c6fd81c025863..81c5a2e438b7d 100644
+--- a/tools/perf/util/mem2node.c
++++ b/tools/perf/util/mem2node.c
+@@ -1,5 +1,6 @@
+ #include <errno.h>
+ #include <inttypes.h>
++#include <asm/bug.h>
+ #include <linux/bitmap.h>
+ #include "mem2node.h"
+ #include "util.h"
+@@ -92,7 +93,7 @@ int mem2node__init(struct mem2node *map, struct perf_env *env)
+
+ /* Cut unused entries, due to merging. */
+ tmp_entries = realloc(entries, sizeof(*entries) * j);
+- if (tmp_entries)
++ if (tmp_entries || WARN_ON_ONCE(j == 0))
+ entries = tmp_entries;
+
+ for (i = 0; i < j; i++) {
+--
+2.25.1
+
--- /dev/null
+From e1641fa878283907f9df4bcc3ff28029430fc73c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2020 22:36:24 -0700
+Subject: perf metricgroup: Free metric_events on error
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit a159e2fe89b4d1f9fb54b0ae418b961e239bf617 ]
+
+Avoid a simple memory leak.
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexei Starovoitov <ast@kernel.org>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Andrii Nakryiko <andriin@fb.com>
+Cc: Cong Wang <xiyou.wangcong@gmail.com>
+Cc: Daniel Borkmann <daniel@iogearbox.net>
+Cc: Jin Yao <yao.jin@linux.intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: John Fastabend <john.fastabend@gmail.com>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Kim Phillips <kim.phillips@amd.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Martin KaFai Lau <kafai@fb.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: Yonghong Song <yhs@fb.com>
+Cc: bpf@vger.kernel.org
+Cc: kp singh <kpsingh@chromium.org>
+Cc: netdev@vger.kernel.org
+Link: http://lore.kernel.org/lkml/20200508053629.210324-10-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/metricgroup.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 8b3dafe3fac3a..6dcc6e1182a54 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -171,6 +171,7 @@ static int metricgroup__setup_events(struct list_head *groups,
+ if (!evsel) {
+ pr_debug("Cannot resolve %s: %s\n",
+ eg->metric_name, eg->metric_expr);
++ free(metric_events);
+ continue;
+ }
+ for (i = 0; i < eg->idnum; i++)
+@@ -178,11 +179,13 @@ static int metricgroup__setup_events(struct list_head *groups,
+ me = metricgroup__lookup(metric_events_list, evsel, true);
+ if (!me) {
+ ret = -ENOMEM;
++ free(metric_events);
+ break;
+ }
+ expr = malloc(sizeof(struct metric_expr));
+ if (!expr) {
+ ret = -ENOMEM;
++ free(metric_events);
+ break;
+ }
+ expr->metric_expr = eg->metric_expr;
+--
+2.25.1
+
--- /dev/null
+From ae51d10e62dfc65367df11c9071b3820f21ec819 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 14 Mar 2020 10:03:56 -0700
+Subject: perf parse-events: Fix 3 use after frees found with clang ASAN
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit d4953f7ef1a2e87ef732823af35361404d13fea8 ]
+
+Reproducible with a clang asan build and then running perf test in
+particular 'Parse event definition strings'.
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: clang-built-linux@googlegroups.com
+Link: http://lore.kernel.org/lkml/20200314170356.62914-1-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/evsel.c | 1 +
+ tools/perf/util/parse-events.c | 4 ++--
+ 2 files changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 4fad92213609f..68c5ab0e1800b 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -1290,6 +1290,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
+ thread_map__put(evsel->threads);
+ zfree(&evsel->group_name);
+ zfree(&evsel->name);
++ zfree(&evsel->pmu_name);
+ perf_evsel__object.fini(evsel);
+ }
+
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 95043cae57740..6d087d9acd5ee 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1261,7 +1261,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ attr.type = pmu->type;
+ evsel = __add_event(list, &parse_state->idx, &attr, NULL, pmu, NULL, auto_merge_stats);
+ if (evsel) {
+- evsel->pmu_name = name;
++ evsel->pmu_name = name ? strdup(name) : NULL;
+ evsel->use_uncore_alias = use_uncore_alias;
+ return 0;
+ } else {
+@@ -1302,7 +1302,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ evsel->snapshot = info.snapshot;
+ evsel->metric_expr = info.metric_expr;
+ evsel->metric_name = info.metric_name;
+- evsel->pmu_name = name;
++ evsel->pmu_name = name ? strdup(name) : NULL;
+ evsel->use_uncore_alias = use_uncore_alias;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From c9afe7992d1925265f5b454d09df79bfdf2b98b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 May 2020 10:20:26 -0300
+Subject: perf parse-events: Fix incorrect conversion of 'if () free()' to
+ 'zfree()'
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+[ Upstream commit 7fcdccd4237724931d9773d1e3039bfe053a6f52 ]
+
+When applying a patch by Ian I incorrectly converted to zfree() an
+expression that involved testing some other struct member, not the one
+being freed, which lead to bugs reproduceable by:
+
+ $ perf stat -e i/bs,tsc,L2/o sleep 1
+ WARNING: multiple event parsing errors
+ Segmentation fault (core dumped)
+ $
+
+Fix it by restoring the test for pos->free_str before freeing
+pos->val.str, but continue using zfree(&pos->val.str) to set that member
+to NULL after freeing it.
+
+Reported-by: Ian Rogers <irogers@google.com>
+Fixes: e8dfb81838b1 ("perf parse-events: Fix memory leaks found on parse_events")
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: clang-built-linux@googlegroups.com
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/parse-events.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index cce96b05d24c9..426f1984c143e 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1287,7 +1287,8 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+
+ list_for_each_entry_safe(pos, tmp, &config_terms, list) {
+ list_del_init(&pos->list);
+- zfree(&pos->val.str);
++ if (pos->free_str)
++ zfree(&pos->val.str);
+ free(pos);
+ }
+ return -EINVAL;
+--
+2.25.1
+
--- /dev/null
+From 12af937c8babb84eb214501bdc2de94daa0b54b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Mar 2020 19:31:00 -0700
+Subject: perf parse-events: Fix memory leaks found on parse_events
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit e8dfb81838b14f82521968343884665b996646ef ]
+
+Fix a memory leak found by applying LLVM's libfuzzer on parse_events().
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Leo Yan <leo.yan@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: clang-built-linux@googlegroups.com
+Link: http://lore.kernel.org/lkml/20200319023101.82458-1-irogers@google.com
+[ split from a larger patch, use zfree() ]
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/parse-events.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 6d087d9acd5ee..cce96b05d24c9 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1287,6 +1287,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+
+ list_for_each_entry_safe(pos, tmp, &config_terms, list) {
+ list_del_init(&pos->list);
++ zfree(&pos->val.str);
+ free(pos);
+ }
+ return -EINVAL;
+--
+2.25.1
+
--- /dev/null
+From 5195a97aefd91798c80f6c0c80541d0be3d1d76c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 08:36:18 +0800
+Subject: perf parse-events: Use strcmp() to compare the PMU name
+
+From: Jin Yao <yao.jin@linux.intel.com>
+
+[ Upstream commit 8510895bafdbf7c4dd24c22946d925691135c2b2 ]
+
+A big uncore event group is split into multiple small groups which only
+include the uncore events from the same PMU. This has been supported in
+the commit 3cdc5c2cb924a ("perf parse-events: Handle uncore event
+aliases in small groups properly").
+
+If the event's PMU name starts to repeat, it must be a new event.
+That can be used to distinguish the leader from other members.
+But now it only compares the pointer of pmu_name
+(leader->pmu_name == evsel->pmu_name).
+
+If we use "perf stat -M LLC_MISSES.PCIE_WRITE -a" on cascadelakex,
+the event list is:
+
+ evsel->name evsel->pmu_name
+ ---------------------------------------------------------------
+ unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_4 (as leader)
+ unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_2
+ unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_0
+ unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_5
+ unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_3
+ unc_iio_data_req_of_cpu.mem_write.part0 uncore_iio_1
+ unc_iio_data_req_of_cpu.mem_write.part1 uncore_iio_4
+ ......
+
+For the event "unc_iio_data_req_of_cpu.mem_write.part1" with
+"uncore_iio_4", it should be the event from PMU "uncore_iio_4".
+It's not a new leader for this PMU.
+
+But if we use "(leader->pmu_name == evsel->pmu_name)", the check
+would be failed and the event is stored to leaders[] as a new
+PMU leader.
+
+So this patch uses strcmp to compare the PMU name between events.
+
+Fixes: d4953f7ef1a2 ("perf parse-events: Fix 3 use after frees found with clang ASAN")
+Signed-off-by: Jin Yao <yao.jin@linux.intel.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Jin Yao <yao.jin@intel.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20200430003618.17002-1-yao.jin@linux.intel.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/parse-events.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index 426f1984c143e..5ab5c69f50500 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -1423,12 +1423,11 @@ parse_events__set_leader_for_uncore_aliase(char *name, struct list_head *list,
+ * event. That can be used to distinguish the leader from
+ * other members, even they have the same event name.
+ */
+- if ((leader != evsel) && (leader->pmu_name == evsel->pmu_name)) {
++ if ((leader != evsel) &&
++ !strcmp(leader->pmu_name, evsel->pmu_name)) {
+ is_leader = false;
+ continue;
+ }
+- /* The name is always alias name */
+- WARN_ON(strcmp(leader->name, evsel->name));
+
+ /* Store the leader event for each PMU */
+ leaders[nr_pmu++] = (uintptr_t) evsel;
+--
+2.25.1
+
--- /dev/null
+From 8590ee6cd336f04737f4797e5752d395f64241cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 18 May 2020 15:14:45 +0200
+Subject: perf stat: Fix duration_time value for higher intervals
+
+From: Jiri Olsa <jolsa@kernel.org>
+
+[ Upstream commit ea9eb1f456a08c18feb485894185f7a4e31cc8a4 ]
+
+Joakim reported wrong duration_time value for interval bigger
+than 4000 [1].
+
+The problem is in the interval value we pass to update_stats
+function, which is typed as 'unsigned int' and overflows when
+we get over 2^32 (happens between intervals 4000 and 5000).
+
+Retyping the passed value to unsigned long long.
+
+[1] https://www.spinics.net/lists/linux-perf-users/msg11777.html
+
+Fixes: b90f1333ef08 ("perf stat: Update walltime_nsecs_stats in interval mode")
+Reported-by: Joakim Zhang <qiangqing.zhang@nxp.com>
+Signed-off-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Michael Petlan <mpetlan@redhat.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20200518131445.3745083-1-jolsa@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/builtin-stat.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
+index 6aae10ff954c7..adabe9d4dc866 100644
+--- a/tools/perf/builtin-stat.c
++++ b/tools/perf/builtin-stat.c
+@@ -422,7 +422,7 @@ static void process_interval(void)
+ }
+
+ init_stats(&walltime_nsecs_stats);
+- update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000);
++ update_stats(&walltime_nsecs_stats, stat_config.interval * 1000000ULL);
+ print_counters(&rs, 0, NULL);
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 95e34517788e76dc1811a2853d2b86f7dd0f6135 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Apr 2020 09:15:50 -0700
+Subject: perf stat: Force error in fallback on :k events
+
+From: Stephane Eranian <eranian@google.com>
+
+[ Upstream commit bec49a9e05db3dbdca696fa07c62c52638fb6371 ]
+
+When it is not possible for a non-privilege perf command to monitor at
+the kernel level (:k), the fallback code forces a :u. That works if the
+event was previously monitoring both levels. But if the event was
+already constrained to kernel only, then it does not make sense to
+restrict it to user only.
+
+Given the code works by exclusion, a kernel only event would have:
+
+ attr->exclude_user = 1
+
+The fallback code would add:
+
+ attr->exclude_kernel = 1
+
+In the end the end would not monitor in either the user level or kernel
+level. In other words, it would count nothing.
+
+An event programmed to monitor kernel only cannot be switched to user
+only without seriously warning the user.
+
+This patch forces an error in this case to make it clear the request
+cannot really be satisfied.
+
+Behavior with paranoid 1:
+
+ $ sudo bash -c "echo 1 > /proc/sys/kernel/perf_event_paranoid"
+ $ perf stat -e cycles:k sleep 1
+
+ Performance counter stats for 'sleep 1':
+
+ 1,520,413 cycles:k
+
+ 1.002361664 seconds time elapsed
+
+ 0.002480000 seconds user
+ 0.000000000 seconds sys
+
+Old behavior with paranoid 2:
+
+ $ sudo bash -c "echo 2 > /proc/sys/kernel/perf_event_paranoid"
+ $ perf stat -e cycles:k sleep 1
+ Performance counter stats for 'sleep 1':
+
+ 0 cycles:ku
+
+ 1.002358127 seconds time elapsed
+
+ 0.002384000 seconds user
+ 0.000000000 seconds sys
+
+New behavior with paranoid 2:
+
+ $ sudo bash -c "echo 2 > /proc/sys/kernel/perf_event_paranoid"
+ $ perf stat -e cycles:k sleep 1
+ Error:
+ You may not have permission to collect stats.
+
+ Consider tweaking /proc/sys/kernel/perf_event_paranoid,
+ which controls use of the performance events system by
+ unprivileged users (without CAP_PERFMON or CAP_SYS_ADMIN).
+
+ The current value is 2:
+
+ -1: Allow use of (almost) all events by all users
+ Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK
+ >= 0: Disallow ftrace function tracepoint by users without CAP_PERFMON or CAP_SYS_ADMIN
+ Disallow raw tracepoint access by users without CAP_SYS_PERFMON or CAP_SYS_ADMIN
+ >= 1: Disallow CPU event access by users without CAP_PERFMON or CAP_SYS_ADMIN
+ >= 2: Disallow kernel profiling by users without CAP_PERFMON or CAP_SYS_ADMIN
+
+ To make this setting permanent, edit /etc/sysctl.conf too, e.g.:
+
+ kernel.perf_event_paranoid = -1
+
+v2 of this patch addresses the review feedback from jolsa@redhat.com.
+
+Signed-off-by: Stephane Eranian <eranian@google.com>
+Reviewed-by: Ian Rogers <irogers@google.com>
+Acked-by: Jiri Olsa <jolsa@redhat.com>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/20200414161550.225588-1-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/evsel.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
+index 68c5ab0e1800b..e8586957562b3 100644
+--- a/tools/perf/util/evsel.c
++++ b/tools/perf/util/evsel.c
+@@ -2796,6 +2796,10 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
+ char *new_name;
+ const char *sep = ":";
+
++ /* If event has exclude user then don't exclude kernel. */
++ if (evsel->core.attr.exclude_user)
++ return false;
++
+ /* Is there already the separator in the name. */
+ if (strchr(name, '/') ||
+ strchr(name, ':'))
+--
+2.25.1
+
--- /dev/null
+From d448b313fce0cb97c26f9a8fa3fe2a4a96d1b405 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Feb 2020 11:21:11 +0100
+Subject: perf test: Fix test trace+probe_vfs_getname.sh on s390
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+[ Upstream commit 2bbc83537614517730e9f2811195004b712de207 ]
+
+This test places a kprobe to function getname_flags() in the kernel
+which has the following prototype:
+
+ struct filename *getname_flags(const char __user *filename, int flags, int *empty)
+
+The 'filename' argument points to a filename located in user space memory.
+
+Looking at commit 88903c464321c ("tracing/probe: Add ustring type for
+user-space string") the kprobe should indicate that user space memory is
+accessed.
+
+Output before:
+
+ [root@m35lp76 perf]# ./perf test 66 67
+ 66: Use vfs_getname probe to get syscall args filenames : FAILED!
+ 67: Check open filename arg using perf trace + vfs_getname: FAILED!
+ [root@m35lp76 perf]#
+
+Output after:
+
+ [root@m35lp76 perf]# ./perf test 66 67
+ 66: Use vfs_getname probe to get syscall args filenames : Ok
+ 67: Check open filename arg using perf trace + vfs_getname: Ok
+ [root@m35lp76 perf]#
+
+Comments from Masami Hiramatsu:
+
+This bug doesn't happen on x86 or other archs on which user address
+space and kernel address space is the same. On some arches (ppc64 in
+this case?) user address space is partially or completely the same as
+kernel address space.
+
+(Yes, they switch the world when running into the kernel) In this case,
+we need to use different data access functions for each space.
+
+That is why I introduced the "ustring" type for kprobe events.
+
+As far as I can see, Thomas's patch is sane. Thomas, could you show us
+your result on your test environment?
+
+Comments from Thomas Richter:
+
+Test results for s/390 included above.
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
+Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Cc: Vasily Gorbik <gor@linux.ibm.com>
+Link: http://lore.kernel.org/lkml/20200217102111.61137-1-tmricht@linux.ibm.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/tests/shell/lib/probe_vfs_getname.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/tests/shell/lib/probe_vfs_getname.sh b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+index 7cb99b433888b..c2cc42daf9242 100644
+--- a/tools/perf/tests/shell/lib/probe_vfs_getname.sh
++++ b/tools/perf/tests/shell/lib/probe_vfs_getname.sh
+@@ -14,7 +14,7 @@ add_probe_vfs_getname() {
+ if [ $had_vfs_getname -eq 1 ] ; then
+ line=$(perf probe -L getname_flags 2>&1 | egrep 'result.*=.*filename;' | sed -r 's/[[:space:]]+([[:digit:]]+)[[:space:]]+result->uptr.*/\1/')
+ perf probe -q "vfs_getname=getname_flags:${line} pathname=result->name:string" || \
+- perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:string"
++ perf probe $verbose "vfs_getname=getname_flags:${line} pathname=filename:ustring"
+ fi
+ }
+
+--
+2.25.1
+
--- /dev/null
+From b165d319469c3be18fec924f1cc1534c92f4950d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 5 Mar 2020 23:11:10 -0800
+Subject: perf trace: Fix the selection for architectures to generate the errno
+ name tables
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 7597ce89b3ed239f7a3408b930d2a6c7a4c938a1 ]
+
+Make the architecture test directory agree with the code comment.
+
+Committer notes:
+
+This was split from a larger patch.
+
+The code was assuming the developer always worked from tools/perf/, so make sure we
+do the test -d having $toolsdir/perf/arch/$arch, to match the intent expressed in the comment,
+just above that loop.
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Alexios Zavras <alexios.zavras@intel.com>
+Cc: Andi Kleen <ak@linux.intel.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Igor Lubashev <ilubashe@akamai.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Wei Li <liwei391@huawei.com>
+Link: http://lore.kernel.org/lkml/20200306071110.130202-4-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/trace/beauty/arch_errno_names.sh | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/trace/beauty/arch_errno_names.sh b/tools/perf/trace/beauty/arch_errno_names.sh
+index 22c9fc900c847..f8c44a85650be 100755
+--- a/tools/perf/trace/beauty/arch_errno_names.sh
++++ b/tools/perf/trace/beauty/arch_errno_names.sh
+@@ -91,7 +91,7 @@ EoHEADER
+ # in tools/perf/arch
+ archlist=""
+ for arch in $(find $toolsdir/arch -maxdepth 1 -mindepth 1 -type d -printf "%f\n" | grep -v x86 | sort); do
+- test -d arch/$arch && archlist="$archlist $arch"
++ test -d $toolsdir/perf/arch/$arch && archlist="$archlist $arch"
+ done
+
+ for arch in x86 $archlist generic; do
+--
+2.25.1
+
--- /dev/null
+From bbeba7c08bf2606d6ce23eb16539950d5c725057 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 21:32:17 +0800
+Subject: perf util: Fix memory leak of prefix_if_not_in
+
+From: Xie XiuQi <xiexiuqi@huawei.com>
+
+[ Upstream commit 07e9a6f538cbeecaf5c55b6f2991416f873cdcbd ]
+
+Need to free "str" before return when asprintf() failed to avoid memory
+leak.
+
+Signed-off-by: Xie XiuQi <xiexiuqi@huawei.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Hongbo Yao <yaohongbo@huawei.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Li Bin <huawei.libin@huawei.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Link: http://lore.kernel.org/lkml/20200521133218.30150-4-liwei391@huawei.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/sort.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
+index 46daa22b86e3b..85ff4f68adc00 100644
+--- a/tools/perf/util/sort.c
++++ b/tools/perf/util/sort.c
+@@ -2690,7 +2690,7 @@ static char *prefix_if_not_in(const char *pre, char *str)
+ return str;
+
+ if (asprintf(&n, "%s,%s", pre, str) < 0)
+- return NULL;
++ n = NULL;
+
+ free(str);
+ return n;
+--
+2.25.1
+
--- /dev/null
+From 1d11a24a648a5876f047d2a27012d0c2119d36f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Apr 2020 10:36:33 -0700
+Subject: phy: samsung: s5pv210-usb2: Add delay after reset
+
+From: Jonathan Bakker <xc-racer2@live.ca>
+
+[ Upstream commit 05942b8c36c7eb5d3fc5e375d4b0d0c49562e85d ]
+
+The USB phy takes some time to reset, so make sure we give it to it. The
+delay length was taken from the 4x12 phy driver.
+
+This manifested in issues with the DWC2 driver since commit fe369e1826b3
+("usb: dwc2: Make dwc2_readl/writel functions endianness-agnostic.")
+where the endianness check would read the DWC ID as 0 due to the phy still
+resetting, resulting in the wrong endian mode being chosen.
+
+Signed-off-by: Jonathan Bakker <xc-racer2@live.ca>
+Link: https://lore.kernel.org/r/BN6PR04MB06605D52502816E500683553A3D10@BN6PR04MB0660.namprd04.prod.outlook.com
+Signed-off-by: Kishon Vijay Abraham I <kishon@ti.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/phy/samsung/phy-s5pv210-usb2.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/phy/samsung/phy-s5pv210-usb2.c b/drivers/phy/samsung/phy-s5pv210-usb2.c
+index f6f72339bbc32..bb7fdf491c1c2 100644
+--- a/drivers/phy/samsung/phy-s5pv210-usb2.c
++++ b/drivers/phy/samsung/phy-s5pv210-usb2.c
+@@ -142,6 +142,10 @@ static void s5pv210_phy_pwr(struct samsung_usb2_phy_instance *inst, bool on)
+ udelay(10);
+ rst &= ~rstbits;
+ writel(rst, drv->reg_phy + S5PV210_UPHYRST);
++ /* The following delay is necessary for the reset sequence to be
++ * completed
++ */
++ udelay(80);
+ } else {
+ pwr = readl(drv->reg_phy + S5PV210_UPHYPWR);
+ pwr |= phypwr;
+--
+2.25.1
+
--- /dev/null
+From c8a6ef45693b3295a5297b48d342a1955e0de733 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Nov 2019 00:56:03 +0300
+Subject: PM / devfreq: tegra30: Fix integer overflow on CPU's freq max out
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+[ Upstream commit 53b4b2aeee26f42cde5ff2a16dd0d8590c51a55a ]
+
+There is another kHz-conversion bug in the code, resulting in integer
+overflow. Although, this time the resulting value is 4294966296 and it's
+close to ULONG_MAX, which is okay in this case.
+
+Reviewed-by: Chanwoo Choi <cw00.choi@samsung.com>
+Tested-by: Peter Geis <pgwipeout@gmail.com>
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Chanwoo Choi <cw00.choi@samsung.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/devfreq/tegra-devfreq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
+index 06768074d2d82..479d9575e1245 100644
+--- a/drivers/devfreq/tegra-devfreq.c
++++ b/drivers/devfreq/tegra-devfreq.c
+@@ -80,6 +80,8 @@
+
+ #define KHZ 1000
+
++#define KHZ_MAX (ULONG_MAX / KHZ)
++
+ /* Assume that the bus is saturated if the utilization is 25% */
+ #define BUS_SATURATION_RATIO 25
+
+@@ -180,7 +182,7 @@ struct tegra_actmon_emc_ratio {
+ };
+
+ static struct tegra_actmon_emc_ratio actmon_emc_ratios[] = {
+- { 1400000, ULONG_MAX },
++ { 1400000, KHZ_MAX },
+ { 1200000, 750000 },
+ { 1100000, 600000 },
+ { 1000000, 500000 },
+--
+2.25.1
+
--- /dev/null
+From 5398a5ed4e5e662fac5dc3cacd27a41eccb1b72e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 15:12:58 -0700
+Subject: power: supply: max17040: Correct voltage reading
+
+From: Jonathan Bakker <xc-racer2@live.ca>
+
+[ Upstream commit 0383024f811aa469df258039807810fc3793a105 ]
+
+According to the datasheet available at (1), the bottom four
+bits are always zero and the actual voltage is 1.25x this value
+in mV. Since the kernel API specifies that voltages should be in
+uV, it should report 1250x the shifted value.
+
+1) https://datasheets.maximintegrated.com/en/ds/MAX17040-MAX17041.pdf
+
+Signed-off-by: Jonathan Bakker <xc-racer2@live.ca>
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/power/supply/max17040_battery.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/power/supply/max17040_battery.c b/drivers/power/supply/max17040_battery.c
+index 33c40f79d23d5..2c35c13ad546f 100644
+--- a/drivers/power/supply/max17040_battery.c
++++ b/drivers/power/supply/max17040_battery.c
+@@ -109,7 +109,7 @@ static void max17040_get_vcell(struct i2c_client *client)
+
+ vcell = max17040_read_reg(client, MAX17040_VCELL);
+
+- chip->vcell = vcell;
++ chip->vcell = (vcell >> 4) * 1250;
+ }
+
+ static void max17040_get_soc(struct i2c_client *client)
+--
+2.25.1
+
--- /dev/null
+From e653263c07d28dfb3a1d1ba9e04eae5c06fd1cdf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2019 12:25:36 +1100
+Subject: powerpc/eeh: Only dump stack once if an MMIO loop is detected
+
+From: Oliver O'Halloran <oohall@gmail.com>
+
+[ Upstream commit 4e0942c0302b5ad76b228b1a7b8c09f658a1d58a ]
+
+Many drivers don't check for errors when they get a 0xFFs response from an
+MMIO load. As a result after an EEH event occurs a driver can get stuck in
+a polling loop unless it some kind of internal timeout logic.
+
+Currently EEH tries to detect and report stuck drivers by dumping a stack
+trace after eeh_dev_check_failure() is called EEH_MAX_FAILS times on an
+already frozen PE. The value of EEH_MAX_FAILS was chosen so that a dump
+would occur every few seconds if the driver was spinning in a loop. This
+results in a lot of spurious stack traces in the kernel log.
+
+Fix this by limiting it to printing one stack trace for each PE freeze. If
+the driver is truely stuck the kernel's hung task detector is better suited
+to reporting the probelm anyway.
+
+Signed-off-by: Oliver O'Halloran <oohall@gmail.com>
+Reviewed-by: Sam Bobroff <sbobroff@linux.ibm.com>
+Tested-by: Sam Bobroff <sbobroff@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20191016012536.22588-1-oohall@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/eeh.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
+index fe3c6f3bd3b62..d123cba0992d0 100644
+--- a/arch/powerpc/kernel/eeh.c
++++ b/arch/powerpc/kernel/eeh.c
+@@ -502,7 +502,7 @@ int eeh_dev_check_failure(struct eeh_dev *edev)
+ rc = 1;
+ if (pe->state & EEH_PE_ISOLATED) {
+ pe->check_count++;
+- if (pe->check_count % EEH_MAX_FAILS == 0) {
++ if (pe->check_count == EEH_MAX_FAILS) {
+ dn = pci_device_to_OF_node(dev);
+ if (dn)
+ location = of_get_property(dn, "ibm,loc-code",
+--
+2.25.1
+
--- /dev/null
+From 7832b90acbbc03d46669b29081628d7301e5b81b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2020 14:34:07 +1000
+Subject: powerpc/traps: Make unrecoverable NMIs die instead of panic
+
+From: Nicholas Piggin <npiggin@gmail.com>
+
+[ Upstream commit 265d6e588d87194c2fe2d6c240247f0264e0c19b ]
+
+System Reset and Machine Check interrupts that are not recoverable due
+to being nested or interrupting when RI=0 currently panic. This is not
+necessary, and can often just kill the current context and recover.
+
+Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Reviewed-by: Christophe Leroy <christophe.leroy@c-s.fr>
+Link: https://lore.kernel.org/r/20200508043408.886394-16-npiggin@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/powerpc/kernel/traps.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
+index d5f351f02c153..7781f0168ce8c 100644
+--- a/arch/powerpc/kernel/traps.c
++++ b/arch/powerpc/kernel/traps.c
+@@ -430,11 +430,11 @@ out:
+ #ifdef CONFIG_PPC_BOOK3S_64
+ BUG_ON(get_paca()->in_nmi == 0);
+ if (get_paca()->in_nmi > 1)
+- nmi_panic(regs, "Unrecoverable nested System Reset");
++ die("Unrecoverable nested System Reset", regs, SIGABRT);
+ #endif
+ /* Must die if the interrupt is not recoverable */
+ if (!(regs->msr & MSR_RI))
+- nmi_panic(regs, "Unrecoverable System Reset");
++ die("Unrecoverable System Reset", regs, SIGABRT);
+
+ if (!nested)
+ nmi_exit();
+@@ -775,7 +775,7 @@ void machine_check_exception(struct pt_regs *regs)
+
+ /* Must die if the interrupt is not recoverable */
+ if (!(regs->msr & MSR_RI))
+- nmi_panic(regs, "Unrecoverable Machine check");
++ die("Unrecoverable Machine check", regs, SIGBUS);
+
+ return;
+
+--
+2.25.1
+
--- /dev/null
+From 702cec586f6ee210213b0a6ba629230b8b8af02c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 22 May 2020 16:53:06 +1000
+Subject: printk: handle blank console arguments passed in.
+
+From: Shreyas Joshi <shreyas.joshi@biamp.com>
+
+[ Upstream commit 48021f98130880dd74286459a1ef48b5e9bc374f ]
+
+If uboot passes a blank string to console_setup then it results in
+a trashed memory. Ultimately, the kernel crashes during freeing up
+the memory.
+
+This fix checks if there is a blank parameter being
+passed to console_setup from uboot. In case it detects that
+the console parameter is blank then it doesn't setup the serial
+device and it gracefully exits.
+
+Link: https://lore.kernel.org/r/20200522065306.83-1-shreyas.joshi@biamp.com
+Signed-off-by: Shreyas Joshi <shreyas.joshi@biamp.com>
+Acked-by: Sergey Senozhatsky <sergey.senozhatsky@gmail.com>
+[pmladek@suse.com: Better format the commit message and code, remove unnecessary brackets.]
+Signed-off-by: Petr Mladek <pmladek@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/printk/printk.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
+index 3cb0e5b479ff3..cf272aba362be 100644
+--- a/kernel/printk/printk.c
++++ b/kernel/printk/printk.c
+@@ -2148,6 +2148,9 @@ static int __init console_setup(char *str)
+ char *s, *options, *brl_options = NULL;
+ int idx;
+
++ if (str[0] == 0)
++ return 1;
++
+ if (_braille_console_setup(&str, &brl_options))
+ return 1;
+
+--
+2.25.1
+
--- /dev/null
+From ab9f8d9622d35134facd8a1744789b466c20a5d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 25 Feb 2020 11:27:04 -0500
+Subject: random: fix data races at timer_rand_state
+
+From: Qian Cai <cai@lca.pw>
+
+[ Upstream commit e00d996a4317aff5351c4338dd97d390225412c2 ]
+
+Fields in "struct timer_rand_state" could be accessed concurrently.
+Lockless plain reads and writes result in data races. Fix them by adding
+pairs of READ|WRITE_ONCE(). The data races were reported by KCSAN,
+
+ BUG: KCSAN: data-race in add_timer_randomness / add_timer_randomness
+
+ write to 0xffff9f320a0a01d0 of 8 bytes by interrupt on cpu 22:
+ add_timer_randomness+0x100/0x190
+ add_timer_randomness at drivers/char/random.c:1152
+ add_disk_randomness+0x85/0x280
+ scsi_end_request+0x43a/0x4a0
+ scsi_io_completion+0xb7/0x7e0
+ scsi_finish_command+0x1ed/0x2a0
+ scsi_softirq_done+0x1c9/0x1d0
+ blk_done_softirq+0x181/0x1d0
+ __do_softirq+0xd9/0x57c
+ irq_exit+0xa2/0xc0
+ do_IRQ+0x8b/0x190
+ ret_from_intr+0x0/0x42
+ cpuidle_enter_state+0x15e/0x980
+ cpuidle_enter+0x69/0xc0
+ call_cpuidle+0x23/0x40
+ do_idle+0x248/0x280
+ cpu_startup_entry+0x1d/0x1f
+ start_secondary+0x1b2/0x230
+ secondary_startup_64+0xb6/0xc0
+
+ no locks held by swapper/22/0.
+ irq event stamp: 32871382
+ _raw_spin_unlock_irqrestore+0x53/0x60
+ _raw_spin_lock_irqsave+0x21/0x60
+ _local_bh_enable+0x21/0x30
+ irq_exit+0xa2/0xc0
+
+ read to 0xffff9f320a0a01d0 of 8 bytes by interrupt on cpu 2:
+ add_timer_randomness+0xe8/0x190
+ add_disk_randomness+0x85/0x280
+ scsi_end_request+0x43a/0x4a0
+ scsi_io_completion+0xb7/0x7e0
+ scsi_finish_command+0x1ed/0x2a0
+ scsi_softirq_done+0x1c9/0x1d0
+ blk_done_softirq+0x181/0x1d0
+ __do_softirq+0xd9/0x57c
+ irq_exit+0xa2/0xc0
+ do_IRQ+0x8b/0x190
+ ret_from_intr+0x0/0x42
+ cpuidle_enter_state+0x15e/0x980
+ cpuidle_enter+0x69/0xc0
+ call_cpuidle+0x23/0x40
+ do_idle+0x248/0x280
+ cpu_startup_entry+0x1d/0x1f
+ start_secondary+0x1b2/0x230
+ secondary_startup_64+0xb6/0xc0
+
+ no locks held by swapper/2/0.
+ irq event stamp: 37846304
+ _raw_spin_unlock_irqrestore+0x53/0x60
+ _raw_spin_lock_irqsave+0x21/0x60
+ _local_bh_enable+0x21/0x30
+ irq_exit+0xa2/0xc0
+
+ Reported by Kernel Concurrency Sanitizer on:
+ Hardware name: HP ProLiant BL660c Gen9, BIOS I38 10/17/2018
+
+Link: https://lore.kernel.org/r/1582648024-13111-1-git-send-email-cai@lca.pw
+Signed-off-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/random.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/char/random.c b/drivers/char/random.c
+index 6a5d4dfafc474..80dedecfe15c5 100644
+--- a/drivers/char/random.c
++++ b/drivers/char/random.c
+@@ -1150,14 +1150,14 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
+ * We take into account the first, second and third-order deltas
+ * in order to make our estimate.
+ */
+- delta = sample.jiffies - state->last_time;
+- state->last_time = sample.jiffies;
++ delta = sample.jiffies - READ_ONCE(state->last_time);
++ WRITE_ONCE(state->last_time, sample.jiffies);
+
+- delta2 = delta - state->last_delta;
+- state->last_delta = delta;
++ delta2 = delta - READ_ONCE(state->last_delta);
++ WRITE_ONCE(state->last_delta, delta);
+
+- delta3 = delta2 - state->last_delta2;
+- state->last_delta2 = delta2;
++ delta3 = delta2 - READ_ONCE(state->last_delta2);
++ WRITE_ONCE(state->last_delta2, delta2);
+
+ if (delta < 0)
+ delta = -delta;
+--
+2.25.1
+
--- /dev/null
+From 269c3c561e860427007b785b050a19b4cf483b2d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jun 2020 16:51:21 -0700
+Subject: rapidio: avoid data race between file operation callbacks and
+ mport_cdev_add().
+
+From: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+
+[ Upstream commit e1c3cdb26ab881b77486dc50370356a349077c74 ]
+
+Fields of md(mport_dev) are set after cdev_device_add(). However, the
+file operation callbacks can be called after cdev_device_add() and
+therefore accesses to fields of md in the callbacks can race with the rest
+of the mport_cdev_add() function.
+
+One such example is INIT_LIST_HEAD(&md->portwrites) in mport_cdev_add(),
+the list is initialised after cdev_device_add(). This can race with
+list_add_tail(&pw_filter->md_node,&md->portwrites) in
+rio_mport_add_pw_filter() which is called by unlocked_ioctl.
+
+To avoid such data races use cdev_device_add() after initializing md.
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Alexandre Bounine <alex.bou9@gmail.com>
+Cc: Matt Porter <mporter@kernel.crashing.org>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: Mike Marshall <hubcap@omnibond.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Ira Weiny <ira.weiny@intel.com>
+Cc: Allison Randal <allison@lohutok.net>
+Cc: Pavel Andrianov <andrianov@ispras.ru>
+Link: http://lkml.kernel.org/r/20200426112950.1803-1-madhuparnabhowmik10@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/rapidio/devices/rio_mport_cdev.c | 14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/rapidio/devices/rio_mport_cdev.c b/drivers/rapidio/devices/rio_mport_cdev.c
+index 5940780648e0f..f36a8a5261a13 100644
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -2385,13 +2385,6 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
+ cdev_init(&md->cdev, &mport_fops);
+ md->cdev.owner = THIS_MODULE;
+
+- ret = cdev_device_add(&md->cdev, &md->dev);
+- if (ret) {
+- rmcd_error("Failed to register mport %d (err=%d)",
+- mport->id, ret);
+- goto err_cdev;
+- }
+-
+ INIT_LIST_HEAD(&md->doorbells);
+ spin_lock_init(&md->db_lock);
+ INIT_LIST_HEAD(&md->portwrites);
+@@ -2411,6 +2404,13 @@ static struct mport_dev *mport_cdev_add(struct rio_mport *mport)
+ #else
+ md->properties.transfer_mode |= RIO_TRANSFER_MODE_TRANSFER;
+ #endif
++
++ ret = cdev_device_add(&md->cdev, &md->dev);
++ if (ret) {
++ rmcd_error("Failed to register mport %d (err=%d)",
++ mport->id, ret);
++ goto err_cdev;
++ }
+ ret = rio_query_mport(mport, &attr);
+ if (!ret) {
+ md->properties.flags = attr.flags;
+--
+2.25.1
+
--- /dev/null
+From 6d4e8872220fa51c4c505640b6338bdffde2ef17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2020 11:25:33 +0200
+Subject: RDMA/cm: Remove a race freeing timewait_info
+
+From: Jason Gunthorpe <jgg@mellanox.com>
+
+[ Upstream commit bede86a39d9dc3387ac00dcb8e1ac221676b2f25 ]
+
+When creating a cm_id during REQ the id immediately becomes visible to the
+other MAD handlers, and shortly after the state is moved to IB_CM_REQ_RCVD
+
+This allows cm_rej_handler() to run concurrently and free the work:
+
+ CPU 0 CPU1
+ cm_req_handler()
+ ib_create_cm_id()
+ cm_match_req()
+ id_priv->state = IB_CM_REQ_RCVD
+ cm_rej_handler()
+ cm_acquire_id()
+ spin_lock(&id_priv->lock)
+ switch (id_priv->state)
+ case IB_CM_REQ_RCVD:
+ cm_reset_to_idle()
+ kfree(id_priv->timewait_info);
+ goto destroy
+ destroy:
+ kfree(id_priv->timewait_info);
+ id_priv->timewait_info = NULL
+
+Causing a double free or worse.
+
+Do not free the timewait_info without also holding the
+id_priv->lock. Simplify this entire flow by making the free unconditional
+during cm_destroy_id() and removing the confusing special case error
+unwind during creation of the timewait_info.
+
+This also fixes a leak of the timewait if cm_destroy_id() is called in
+IB_CM_ESTABLISHED with an XRC TGT QP. The state machine will be left in
+ESTABLISHED while it needed to transition through IB_CM_TIMEWAIT to
+release the timewait pointer.
+
+Also fix a leak of the timewait_info if the caller mis-uses the API and
+does ib_send_cm_reqs().
+
+Fixes: a977049dacde ("[PATCH] IB: Add the kernel CM implementation")
+Link: https://lore.kernel.org/r/20200310092545.251365-4-leon@kernel.org
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cm.c | 25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
+index 64f206e11d497..4ebf63360a697 100644
+--- a/drivers/infiniband/core/cm.c
++++ b/drivers/infiniband/core/cm.c
+@@ -1100,14 +1100,22 @@ retest:
+ break;
+ }
+
+- spin_lock_irq(&cm.lock);
++ spin_lock_irq(&cm_id_priv->lock);
++ spin_lock(&cm.lock);
++ /* Required for cleanup paths related cm_req_handler() */
++ if (cm_id_priv->timewait_info) {
++ cm_cleanup_timewait(cm_id_priv->timewait_info);
++ kfree(cm_id_priv->timewait_info);
++ cm_id_priv->timewait_info = NULL;
++ }
+ if (!list_empty(&cm_id_priv->altr_list) &&
+ (!cm_id_priv->altr_send_port_not_ready))
+ list_del(&cm_id_priv->altr_list);
+ if (!list_empty(&cm_id_priv->prim_list) &&
+ (!cm_id_priv->prim_send_port_not_ready))
+ list_del(&cm_id_priv->prim_list);
+- spin_unlock_irq(&cm.lock);
++ spin_unlock(&cm.lock);
++ spin_unlock_irq(&cm_id_priv->lock);
+
+ cm_free_id(cm_id->local_id);
+ cm_deref_id(cm_id_priv);
+@@ -1424,7 +1432,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ /* Verify that we're not in timewait. */
+ cm_id_priv = container_of(cm_id, struct cm_id_private, id);
+ spin_lock_irqsave(&cm_id_priv->lock, flags);
+- if (cm_id->state != IB_CM_IDLE) {
++ if (cm_id->state != IB_CM_IDLE || WARN_ON(cm_id_priv->timewait_info)) {
+ spin_unlock_irqrestore(&cm_id_priv->lock, flags);
+ ret = -EINVAL;
+ goto out;
+@@ -1442,12 +1450,12 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ param->ppath_sgid_attr, &cm_id_priv->av,
+ cm_id_priv);
+ if (ret)
+- goto error1;
++ goto out;
+ if (param->alternate_path) {
+ ret = cm_init_av_by_path(param->alternate_path, NULL,
+ &cm_id_priv->alt_av, cm_id_priv);
+ if (ret)
+- goto error1;
++ goto out;
+ }
+ cm_id->service_id = param->service_id;
+ cm_id->service_mask = ~cpu_to_be64(0);
+@@ -1465,7 +1473,7 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+
+ ret = cm_alloc_msg(cm_id_priv, &cm_id_priv->msg);
+ if (ret)
+- goto error1;
++ goto out;
+
+ req_msg = (struct cm_req_msg *) cm_id_priv->msg->mad;
+ cm_format_req(req_msg, cm_id_priv, param);
+@@ -1488,7 +1496,6 @@ int ib_send_cm_req(struct ib_cm_id *cm_id,
+ return 0;
+
+ error2: cm_free_msg(cm_id_priv->msg);
+-error1: kfree(cm_id_priv->timewait_info);
+ out: return ret;
+ }
+ EXPORT_SYMBOL(ib_send_cm_req);
+@@ -1973,7 +1980,7 @@ static int cm_req_handler(struct cm_work *work)
+ pr_debug("%s: local_id %d, no listen_cm_id_priv\n", __func__,
+ be32_to_cpu(cm_id->local_id));
+ ret = -EINVAL;
+- goto free_timeinfo;
++ goto destroy;
+ }
+
+ cm_id_priv->id.cm_handler = listen_cm_id_priv->id.cm_handler;
+@@ -2057,8 +2064,6 @@ static int cm_req_handler(struct cm_work *work)
+ rejected:
+ atomic_dec(&cm_id_priv->refcount);
+ cm_deref_id(listen_cm_id_priv);
+-free_timeinfo:
+- kfree(cm_id_priv->timewait_info);
+ destroy:
+ ib_destroy_cm_id(cm_id);
+ return ret;
+--
+2.25.1
+
--- /dev/null
+From b8aafa3809e5f0e4520d47dd4e0e5710c71ba843 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2019 14:44:11 +0800
+Subject: RDMA/i40iw: Fix potential use after free
+
+From: Pan Bian <bianpan2016@163.com>
+
+[ Upstream commit da046d5f895fca18d63b15ac8faebd5bf784e23a ]
+
+Release variable dst after logging dst->error to avoid possible use after
+free.
+
+Link: https://lore.kernel.org/r/1573022651-37171-1-git-send-email-bianpan2016@163.com
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/i40iw/i40iw_cm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_cm.c b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+index 4321b9e3dbb4b..0273d0404e740 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_cm.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_cm.c
+@@ -2071,9 +2071,9 @@ static int i40iw_addr_resolve_neigh_ipv6(struct i40iw_device *iwdev,
+ dst = i40iw_get_dst_ipv6(&src_addr, &dst_addr);
+ if (!dst || dst->error) {
+ if (dst) {
+- dst_release(dst);
+ i40iw_pr_err("ip6_route_output returned dst->error = %d\n",
+ dst->error);
++ dst_release(dst);
+ }
+ return rc;
+ }
+--
+2.25.1
+
--- /dev/null
+From 222770a141d2767d6a188dba9218d37c5f890ac0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Sep 2019 21:07:46 +0200
+Subject: RDMA/iw_cgxb4: Fix an error handling path in 'c4iw_connect()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 9067f2f0b41d7e817fc8c5259bab1f17512b0147 ]
+
+We should jump to fail3 in order to undo the 'xa_insert_irq()' call.
+
+Link: https://lore.kernel.org/r/20190923190746.10964-1-christophe.jaillet@wanadoo.fr
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index 16145b0a14583..3fd3dfa3478b7 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -3293,7 +3293,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ if (raddr->sin_addr.s_addr == htonl(INADDR_ANY)) {
+ err = pick_local_ipaddrs(dev, cm_id);
+ if (err)
+- goto fail2;
++ goto fail3;
+ }
+
+ /* find a route */
+@@ -3315,7 +3315,7 @@ int c4iw_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
+ if (ipv6_addr_type(&raddr6->sin6_addr) == IPV6_ADDR_ANY) {
+ err = pick_local_ip6addrs(dev, cm_id);
+ if (err)
+- goto fail2;
++ goto fail3;
+ }
+
+ /* find a route */
+--
+2.25.1
+
--- /dev/null
+From 58b55b80edf484eccbdb3616d6508a1f4ae7ee4b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 Nov 2019 14:23:54 +0800
+Subject: RDMA/qedr: Fix potential use after free
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Pan Bian <bianpan2016@163.com>
+
+[ Upstream commit 960657b732e1ce21b07be5ab48a7ad3913d72ba4 ]
+
+Move the release operation after error log to avoid possible use after
+free.
+
+Link: https://lore.kernel.org/r/1573021434-18768-1-git-send-email-bianpan2016@163.com
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Acked-by: Michal Kalderon <michal.kalderon@marvell.com>
+Reviewed-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/qedr/qedr_iw_cm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/qedr/qedr_iw_cm.c b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+index 2566715773675..e908dfbaa1378 100644
+--- a/drivers/infiniband/hw/qedr/qedr_iw_cm.c
++++ b/drivers/infiniband/hw/qedr/qedr_iw_cm.c
+@@ -460,10 +460,10 @@ qedr_addr6_resolve(struct qedr_dev *dev,
+
+ if ((!dst) || dst->error) {
+ if (dst) {
+- dst_release(dst);
+ DP_ERR(dev,
+ "ip6_route_output returned dst->error = %d\n",
+ dst->error);
++ dst_release(dst);
+ }
+ return -EINVAL;
+ }
+--
+2.25.1
+
--- /dev/null
+From b9f5ea08810c4b32c2abb20b9c18cf973ad2a41e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Feb 2020 12:57:14 -0800
+Subject: RDMA/rxe: Fix configuration of atomic queue pair attributes
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit fb3063d31995cc4cf1d47a406bb61d6fb1b1d58d ]
+
+From the comment above the definition of the roundup_pow_of_two() macro:
+
+ The result is undefined when n == 0.
+
+Hence only pass positive values to roundup_pow_of_two(). This patch fixes
+the following UBSAN complaint:
+
+ UBSAN: Undefined behaviour in ./include/linux/log2.h:57:13
+ shift exponent 64 is too large for 64-bit type 'long unsigned int'
+ Call Trace:
+ dump_stack+0xa5/0xe6
+ ubsan_epilogue+0x9/0x26
+ __ubsan_handle_shift_out_of_bounds.cold+0x4c/0xf9
+ rxe_qp_from_attr.cold+0x37/0x5d [rdma_rxe]
+ rxe_modify_qp+0x59/0x70 [rdma_rxe]
+ _ib_modify_qp+0x5aa/0x7c0 [ib_core]
+ ib_modify_qp+0x3b/0x50 [ib_core]
+ cma_modify_qp_rtr+0x234/0x260 [rdma_cm]
+ __rdma_accept+0x1a7/0x650 [rdma_cm]
+ nvmet_rdma_cm_handler+0x1286/0x14cd [nvmet_rdma]
+ cma_cm_event_handler+0x6b/0x330 [rdma_cm]
+ cma_ib_req_handler+0xe60/0x22d0 [rdma_cm]
+ cm_process_work+0x30/0x140 [ib_cm]
+ cm_req_handler+0x11f4/0x1cd0 [ib_cm]
+ cm_work_handler+0xb8/0x344e [ib_cm]
+ process_one_work+0x569/0xb60
+ worker_thread+0x7a/0x5d0
+ kthread+0x1e6/0x210
+ ret_from_fork+0x24/0x30
+
+Link: https://lore.kernel.org/r/20200217205714.26937-1-bvanassche@acm.org
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Reviewed-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_qp.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_qp.c b/drivers/infiniband/sw/rxe/rxe_qp.c
+index 230697fa31fe3..8a22ab8b29e9b 100644
+--- a/drivers/infiniband/sw/rxe/rxe_qp.c
++++ b/drivers/infiniband/sw/rxe/rxe_qp.c
+@@ -583,15 +583,16 @@ int rxe_qp_from_attr(struct rxe_qp *qp, struct ib_qp_attr *attr, int mask,
+ int err;
+
+ if (mask & IB_QP_MAX_QP_RD_ATOMIC) {
+- int max_rd_atomic = __roundup_pow_of_two(attr->max_rd_atomic);
++ int max_rd_atomic = attr->max_rd_atomic ?
++ roundup_pow_of_two(attr->max_rd_atomic) : 0;
+
+ qp->attr.max_rd_atomic = max_rd_atomic;
+ atomic_set(&qp->req.rd_atomic, max_rd_atomic);
+ }
+
+ if (mask & IB_QP_MAX_DEST_RD_ATOMIC) {
+- int max_dest_rd_atomic =
+- __roundup_pow_of_two(attr->max_dest_rd_atomic);
++ int max_dest_rd_atomic = attr->max_dest_rd_atomic ?
++ roundup_pow_of_two(attr->max_dest_rd_atomic) : 0;
+
+ qp->attr.max_dest_rd_atomic = max_dest_rd_atomic;
+
+--
+2.25.1
+
--- /dev/null
+From f43b92914206f38ce2fb64128506690e06539a7a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 23 Mar 2020 13:28:00 +0200
+Subject: RDMA/rxe: Set sys_image_guid to be aligned with HW IB devices
+
+From: Zhu Yanjun <yanjunz@mellanox.com>
+
+[ Upstream commit d0ca2c35dd15a3d989955caec02beea02f735ee6 ]
+
+The RXE driver doesn't set sys_image_guid and user space applications see
+zeros. This causes to pyverbs tests to fail with the following traceback,
+because the IBTA spec requires to have valid sys_image_guid.
+
+ Traceback (most recent call last):
+ File "./tests/test_device.py", line 51, in test_query_device
+ self.verify_device_attr(attr)
+ File "./tests/test_device.py", line 74, in verify_device_attr
+ assert attr.sys_image_guid != 0
+
+In order to fix it, set sys_image_guid to be equal to node_guid.
+
+Before:
+ 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid
+ 0000:0000:0000:0000
+
+After:
+ 5: rxe0: ... node_guid 5054:00ff:feaa:5363 sys_image_guid
+ 5054:00ff:feaa:5363
+
+Fixes: 8700e3e7c485 ("Soft RoCE driver")
+Link: https://lore.kernel.org/r/20200323112800.1444784-1-leon@kernel.org
+Signed-off-by: Zhu Yanjun <yanjunz@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe.c b/drivers/infiniband/sw/rxe/rxe.c
+index 94dedabe648c2..6589ff51eaf5c 100644
+--- a/drivers/infiniband/sw/rxe/rxe.c
++++ b/drivers/infiniband/sw/rxe/rxe.c
+@@ -121,6 +121,8 @@ static void rxe_init_device_param(struct rxe_dev *rxe)
+ rxe->attr.max_fast_reg_page_list_len = RXE_MAX_FMR_PAGE_LIST_LEN;
+ rxe->attr.max_pkeys = RXE_MAX_PKEYS;
+ rxe->attr.local_ca_ack_delay = RXE_LOCAL_CA_ACK_DELAY;
++ addrconf_addr_eui48((unsigned char *)&rxe->attr.sys_image_guid,
++ rxe->ndev->dev_addr);
+
+ rxe->max_ucontext = RXE_MAX_UCONTEXT;
+ }
+--
+2.25.1
+
--- /dev/null
+From 8457d9c578f3431d2508a57759130b7feb4bb365 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 24 Aug 2020 17:21:22 -0700
+Subject: RISC-V: Take text_mutex in ftrace_init_nop()
+
+From: Palmer Dabbelt <palmerdabbelt@google.com>
+
+[ Upstream commit 66d18dbda8469a944dfec6c49d26d5946efba218 ]
+
+Without this we get lockdep failures. They're spurious failures as SMP isn't
+up when ftrace_init_nop() is called. As far as I can tell the easiest fix is
+to just take the lock, which also seems like the safest fix.
+
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Acked-by: Guo Ren <guoren@kernel.org>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/ftrace.h | 7 +++++++
+ arch/riscv/kernel/ftrace.c | 19 +++++++++++++++++++
+ 2 files changed, 26 insertions(+)
+
+diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
+index c6dcc5291f972..02fbc175142e2 100644
+--- a/arch/riscv/include/asm/ftrace.h
++++ b/arch/riscv/include/asm/ftrace.h
+@@ -63,4 +63,11 @@ do { \
+ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+ */
+ #define MCOUNT_INSN_SIZE 8
++
++#ifndef __ASSEMBLY__
++struct dyn_ftrace;
++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
++#define ftrace_init_nop ftrace_init_nop
++#endif
++
+ #endif
+diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
+index 6d39f64e4dce4..fa8530f05ed4f 100644
+--- a/arch/riscv/kernel/ftrace.c
++++ b/arch/riscv/kernel/ftrace.c
+@@ -88,6 +88,25 @@ int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ return __ftrace_modify_call(rec->ip, addr, false);
+ }
+
++
++/*
++ * This is called early on, and isn't wrapped by
++ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
++ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
++ * just directly poke the text, but it's simpler to just take the lock
++ * ourselves.
++ */
++int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
++{
++ int out;
++
++ ftrace_arch_code_modify_prepare();
++ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++ ftrace_arch_code_modify_post_process();
++
++ return out;
++}
++
+ int ftrace_update_ftrace_func(ftrace_func_t func)
+ {
+ int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
+--
+2.25.1
+
--- /dev/null
+From 22a4adbd767be1b87b5e5707704c5283a3f9d39a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jan 2020 10:11:35 +0300
+Subject: rt_cpu_seq_next should increase position index
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+[ Upstream commit a3ea86739f1bc7e121d921842f0f4a8ab1af94d9 ]
+
+if seq_file .next fuction does not change position index,
+read after some lseek can generate unexpected output.
+
+https://bugzilla.kernel.org/show_bug.cgi?id=206283
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/ipv4/route.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/ipv4/route.c b/net/ipv4/route.c
+index 84de87b7eedcd..3db428242b22d 100644
+--- a/net/ipv4/route.c
++++ b/net/ipv4/route.c
+@@ -274,6 +274,7 @@ static void *rt_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+ *pos = cpu+1;
+ return &per_cpu(rt_cache_stat, cpu);
+ }
++ (*pos)++;
+ return NULL;
+
+ }
+--
+2.25.1
+
--- /dev/null
+From 074612cfb3cfdab349dfe181557405e1fd987abc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2020 08:34:01 +0100
+Subject: rtc: ds1374: fix possible race condition
+
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+[ Upstream commit c11af8131a4e7ba1960faed731ee7e84c2c13c94 ]
+
+The RTC IRQ is requested before the struct rtc_device is allocated,
+this may lead to a NULL pointer dereference in the IRQ handler.
+
+To fix this issue, allocating the rtc_device struct before requesting
+the RTC IRQ using devm_rtc_allocate_device, and use rtc_register_device
+to register the RTC device.
+
+Link: https://lore.kernel.org/r/20200306073404.56921-1-alexandre.belloni@bootlin.com
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/rtc/rtc-ds1374.c | 15 +++++++++------
+ 1 file changed, 9 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/rtc/rtc-ds1374.c b/drivers/rtc/rtc-ds1374.c
+index 38a2e9e684df4..77a106e90124b 100644
+--- a/drivers/rtc/rtc-ds1374.c
++++ b/drivers/rtc/rtc-ds1374.c
+@@ -620,6 +620,10 @@ static int ds1374_probe(struct i2c_client *client,
+ if (!ds1374)
+ return -ENOMEM;
+
++ ds1374->rtc = devm_rtc_allocate_device(&client->dev);
++ if (IS_ERR(ds1374->rtc))
++ return PTR_ERR(ds1374->rtc);
++
+ ds1374->client = client;
+ i2c_set_clientdata(client, ds1374);
+
+@@ -641,12 +645,11 @@ static int ds1374_probe(struct i2c_client *client,
+ device_set_wakeup_capable(&client->dev, 1);
+ }
+
+- ds1374->rtc = devm_rtc_device_register(&client->dev, client->name,
+- &ds1374_rtc_ops, THIS_MODULE);
+- if (IS_ERR(ds1374->rtc)) {
+- dev_err(&client->dev, "unable to register the class device\n");
+- return PTR_ERR(ds1374->rtc);
+- }
++ ds1374->rtc->ops = &ds1374_rtc_ops;
++
++ ret = rtc_register_device(ds1374->rtc);
++ if (ret)
++ return ret;
+
+ #ifdef CONFIG_RTC_DRV_DS1374_WDT
+ save_client = client;
+--
+2.25.1
+
--- /dev/null
+From 4f38e773197e92a224d31ffe88f6f037b0a69563 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 6 Mar 2020 02:01:44 +0100
+Subject: rtc: sa1100: fix possible race condition
+
+From: Alexandre Belloni <alexandre.belloni@bootlin.com>
+
+[ Upstream commit f2997775b111c6d660c32a18d5d44d37cb7361b1 ]
+
+Both RTC IRQs are requested before the struct rtc_device is allocated,
+this may lead to a NULL pointer dereference in the IRQ handler.
+
+To fix this issue, allocating the rtc_device struct before requesting
+the IRQs using devm_rtc_allocate_device, and use rtc_register_device
+to register the RTC device.
+
+Link: https://lore.kernel.org/r/20200306010146.39762-1-alexandre.belloni@bootlin.com
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/rtc/rtc-sa1100.c | 18 ++++++++++--------
+ 1 file changed, 10 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
+index 304d905cb23fd..56f625371735f 100644
+--- a/drivers/rtc/rtc-sa1100.c
++++ b/drivers/rtc/rtc-sa1100.c
+@@ -186,7 +186,6 @@ static const struct rtc_class_ops sa1100_rtc_ops = {
+
+ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
+ {
+- struct rtc_device *rtc;
+ int ret;
+
+ spin_lock_init(&info->lock);
+@@ -215,15 +214,14 @@ int sa1100_rtc_init(struct platform_device *pdev, struct sa1100_rtc *info)
+ writel_relaxed(0, info->rcnr);
+ }
+
+- rtc = devm_rtc_device_register(&pdev->dev, pdev->name, &sa1100_rtc_ops,
+- THIS_MODULE);
+- if (IS_ERR(rtc)) {
++ info->rtc->ops = &sa1100_rtc_ops;
++ info->rtc->max_user_freq = RTC_FREQ;
++
++ ret = rtc_register_device(info->rtc);
++ if (ret) {
+ clk_disable_unprepare(info->clk);
+- return PTR_ERR(rtc);
++ return ret;
+ }
+- info->rtc = rtc;
+-
+- rtc->max_user_freq = RTC_FREQ;
+
+ /* Fix for a nasty initialization problem the in SA11xx RTSR register.
+ * See also the comments in sa1100_rtc_interrupt().
+@@ -272,6 +270,10 @@ static int sa1100_rtc_probe(struct platform_device *pdev)
+ info->irq_1hz = irq_1hz;
+ info->irq_alarm = irq_alarm;
+
++ info->rtc = devm_rtc_allocate_device(&pdev->dev);
++ if (IS_ERR(info->rtc))
++ return PTR_ERR(info->rtc);
++
+ ret = devm_request_irq(&pdev->dev, irq_1hz, sa1100_rtc_interrupt, 0,
+ "rtc 1Hz", &pdev->dev);
+ if (ret) {
+--
+2.25.1
+
--- /dev/null
+From d67a3370d310665a438d3fdede4f39e3f931e706 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Dec 2019 14:56:13 +0100
+Subject: s390/cpum_sf: Use kzalloc and minor changes
+
+From: Thomas Richter <tmricht@linux.ibm.com>
+
+[ Upstream commit 32dab6828c42f087439d3e2617dc7283546bd8f7 ]
+
+Use kzalloc() to allocate auxiliary buffer structure initialized
+with all zeroes to avoid random value in trace output.
+
+Avoid double access to SBD hardware flags.
+
+Signed-off-by: Thomas Richter <tmricht@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/perf_cpum_sf.c | 9 ++++-----
+ 1 file changed, 4 insertions(+), 5 deletions(-)
+
+diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
+index 74a296cea21cc..0e6d01225a670 100644
+--- a/arch/s390/kernel/perf_cpum_sf.c
++++ b/arch/s390/kernel/perf_cpum_sf.c
+@@ -1377,8 +1377,8 @@ static int aux_output_begin(struct perf_output_handle *handle,
+ idx = aux->empty_mark + 1;
+ for (i = 0; i < range_scan; i++, idx++) {
+ te = aux_sdb_trailer(aux, idx);
+- te->flags = te->flags & ~SDB_TE_BUFFER_FULL_MASK;
+- te->flags = te->flags & ~SDB_TE_ALERT_REQ_MASK;
++ te->flags &= ~(SDB_TE_BUFFER_FULL_MASK |
++ SDB_TE_ALERT_REQ_MASK);
+ te->overflow = 0;
+ }
+ /* Save the position of empty SDBs */
+@@ -1425,8 +1425,7 @@ static bool aux_set_alert(struct aux_buffer *aux, unsigned long alert_index,
+ te = aux_sdb_trailer(aux, alert_index);
+ do {
+ orig_flags = te->flags;
+- orig_overflow = te->overflow;
+- *overflow = orig_overflow;
++ *overflow = orig_overflow = te->overflow;
+ if (orig_flags & SDB_TE_BUFFER_FULL_MASK) {
+ /*
+ * SDB is already set by hardware.
+@@ -1660,7 +1659,7 @@ static void *aux_buffer_setup(struct perf_event *event, void **pages,
+ }
+
+ /* Allocate aux_buffer struct for the event */
+- aux = kmalloc(sizeof(struct aux_buffer), GFP_KERNEL);
++ aux = kzalloc(sizeof(struct aux_buffer), GFP_KERNEL);
+ if (!aux)
+ goto no_aux;
+ sfb = &aux->sfb;
+--
+2.25.1
+
--- /dev/null
+From 6273854f81cd91e5729b98687a89c449f43bbe82 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 9 Sep 2020 14:27:25 +0200
+Subject: s390/init: add missing __init annotations
+
+From: Ilya Leoshkevich <iii@linux.ibm.com>
+
+[ Upstream commit fcb2b70cdb194157678fb1a75f9ff499aeba3d2a ]
+
+Add __init to reserve_memory_end, reserve_oldmem and remove_oldmem.
+Sometimes these functions are not inlined, and then the build
+complains about section mismatch.
+
+Signed-off-by: Ilya Leoshkevich <iii@linux.ibm.com>
+Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/s390/kernel/setup.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
+index 5f85e0dfa66d1..4bda9055daefa 100644
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -537,7 +537,7 @@ static struct notifier_block kdump_mem_nb = {
+ /*
+ * Make sure that the area behind memory_end is protected
+ */
+-static void reserve_memory_end(void)
++static void __init reserve_memory_end(void)
+ {
+ #ifdef CONFIG_CRASH_DUMP
+ if (ipl_info.type == IPL_TYPE_FCP_DUMP &&
+@@ -555,7 +555,7 @@ static void reserve_memory_end(void)
+ /*
+ * Make sure that oldmem, where the dump is stored, is protected
+ */
+-static void reserve_oldmem(void)
++static void __init reserve_oldmem(void)
+ {
+ #ifdef CONFIG_CRASH_DUMP
+ if (OLDMEM_BASE)
+@@ -567,7 +567,7 @@ static void reserve_oldmem(void)
+ /*
+ * Make sure that oldmem, where the dump is stored, is protected
+ */
+-static void remove_oldmem(void)
++static void __init remove_oldmem(void)
+ {
+ #ifdef CONFIG_CRASH_DUMP
+ if (OLDMEM_BASE)
+--
+2.25.1
+
--- /dev/null
+From 9a08a4089e52ea471cc08a398ea198ad752ab74e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Feb 2020 16:29:31 -0800
+Subject: scsi: aacraid: Disabling TM path and only processing IOP reset
+
+From: Sagar Biradar <Sagar.Biradar@microchip.com>
+
+[ Upstream commit bef18d308a2215eff8c3411a23d7f34604ce56c3 ]
+
+Fixes the occasional adapter panic when sg_reset is issued with -d, -t, -b
+and -H flags. Removal of command type HBA_IU_TYPE_SCSI_TM_REQ in
+aac_hba_send since iu_type, request_id and fib_flags are not populated.
+Device and target reset handlers are made to send TMF commands only when
+reset_state is 0.
+
+Link: https://lore.kernel.org/r/1581553771-25796-1-git-send-email-Sagar.Biradar@microchip.com
+Reviewed-by: Sagar Biradar <Sagar.Biradar@microchip.com>
+Signed-off-by: Sagar Biradar <Sagar.Biradar@microchip.com>
+Signed-off-by: Balsundar P <balsundar.p@microsemi.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/aacraid/commsup.c | 2 +-
+ drivers/scsi/aacraid/linit.c | 34 +++++++++++++++++++++++++---------
+ 2 files changed, 26 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
+index b7588de4484e5..4cb6ee6e1212e 100644
+--- a/drivers/scsi/aacraid/commsup.c
++++ b/drivers/scsi/aacraid/commsup.c
+@@ -743,7 +743,7 @@ int aac_hba_send(u8 command, struct fib *fibptr, fib_callback callback,
+ hbacmd->request_id =
+ cpu_to_le32((((u32)(fibptr - dev->fibs)) << 2) + 1);
+ fibptr->flags |= FIB_CONTEXT_FLAG_SCSI_CMD;
+- } else if (command != HBA_IU_TYPE_SCSI_TM_REQ)
++ } else
+ return -EINVAL;
+
+
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 1046947064a0b..0142547aaadd2 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -736,7 +736,11 @@ static int aac_eh_abort(struct scsi_cmnd* cmd)
+ status = aac_hba_send(HBA_IU_TYPE_SCSI_TM_REQ, fib,
+ (fib_callback) aac_hba_callback,
+ (void *) cmd);
+-
++ if (status != -EINPROGRESS) {
++ aac_fib_complete(fib);
++ aac_fib_free(fib);
++ return ret;
++ }
+ /* Wait up to 15 secs for completion */
+ for (count = 0; count < 15; ++count) {
+ if (cmd->SCp.sent_command) {
+@@ -915,11 +919,11 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
+
+ info = &aac->hba_map[bus][cid];
+
+- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+- info->reset_state > 0)
++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
++ !(info->reset_state > 0)))
+ return FAILED;
+
+- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
++ pr_err("%s: Host device reset request. SCSI hang ?\n",
+ AAC_DRIVERNAME);
+
+ fib = aac_fib_alloc(aac);
+@@ -934,7 +938,12 @@ static int aac_eh_dev_reset(struct scsi_cmnd *cmd)
+ status = aac_hba_send(command, fib,
+ (fib_callback) aac_tmf_callback,
+ (void *) info);
+-
++ if (status != -EINPROGRESS) {
++ info->reset_state = 0;
++ aac_fib_complete(fib);
++ aac_fib_free(fib);
++ return ret;
++ }
+ /* Wait up to 15 seconds for completion */
+ for (count = 0; count < 15; ++count) {
+ if (info->reset_state == 0) {
+@@ -973,11 +982,11 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
+
+ info = &aac->hba_map[bus][cid];
+
+- if (info->devtype != AAC_DEVTYPE_NATIVE_RAW &&
+- info->reset_state > 0)
++ if (!(info->devtype == AAC_DEVTYPE_NATIVE_RAW &&
++ !(info->reset_state > 0)))
+ return FAILED;
+
+- pr_err("%s: Host adapter reset request. SCSI hang ?\n",
++ pr_err("%s: Host target reset request. SCSI hang ?\n",
+ AAC_DRIVERNAME);
+
+ fib = aac_fib_alloc(aac);
+@@ -994,6 +1003,13 @@ static int aac_eh_target_reset(struct scsi_cmnd *cmd)
+ (fib_callback) aac_tmf_callback,
+ (void *) info);
+
++ if (status != -EINPROGRESS) {
++ info->reset_state = 0;
++ aac_fib_complete(fib);
++ aac_fib_free(fib);
++ return ret;
++ }
++
+ /* Wait up to 15 seconds for completion */
+ for (count = 0; count < 15; ++count) {
+ if (info->reset_state <= 0) {
+@@ -1046,7 +1062,7 @@ static int aac_eh_bus_reset(struct scsi_cmnd* cmd)
+ }
+ }
+
+- pr_err("%s: Host adapter reset request. SCSI hang ?\n", AAC_DRIVERNAME);
++ pr_err("%s: Host bus reset request. SCSI hang ?\n", AAC_DRIVERNAME);
+
+ /*
+ * Check the health of the controller
+--
+2.25.1
+
--- /dev/null
+From cb27a943983c7838681ddf82aca7a6a3014a561f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 12 Apr 2020 11:40:39 +0200
+Subject: scsi: aacraid: Fix error handling paths in aac_probe_one()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit f7854c382240c1686900b2f098b36430c6f5047e ]
+
+If 'scsi_host_alloc()' or 'kcalloc()' fail, 'error' is known to be 0. Set
+it explicitly to -ENOMEM before branching to the error handling path.
+
+While at it, remove 2 useless assignments to 'error'. These values are
+overwridden a few lines later.
+
+Link: https://lore.kernel.org/r/20200412094039.8822-1-christophe.jaillet@wanadoo.fr
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/aacraid/linit.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
+index 0142547aaadd2..eecffc03084c0 100644
+--- a/drivers/scsi/aacraid/linit.c
++++ b/drivers/scsi/aacraid/linit.c
+@@ -1620,7 +1620,7 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ struct Scsi_Host *shost;
+ struct aac_dev *aac;
+ struct list_head *insert = &aac_devices;
+- int error = -ENODEV;
++ int error;
+ int unique_id = 0;
+ u64 dmamask;
+ int mask_bits = 0;
+@@ -1645,7 +1645,6 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ error = pci_enable_device(pdev);
+ if (error)
+ goto out;
+- error = -ENODEV;
+
+ if (!(aac_drivers[index].quirks & AAC_QUIRK_SRC)) {
+ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+@@ -1677,8 +1676,10 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ pci_set_master(pdev);
+
+ shost = scsi_host_alloc(&aac_driver_template, sizeof(struct aac_dev));
+- if (!shost)
++ if (!shost) {
++ error = -ENOMEM;
+ goto out_disable_pdev;
++ }
+
+ shost->irq = pdev->irq;
+ shost->unique_id = unique_id;
+@@ -1703,8 +1704,11 @@ static int aac_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
+ aac->fibs = kcalloc(shost->can_queue + AAC_NUM_MGT_FIB,
+ sizeof(struct fib),
+ GFP_KERNEL);
+- if (!aac->fibs)
++ if (!aac->fibs) {
++ error = -ENOMEM;
+ goto out_free_host;
++ }
++
+ spin_lock_init(&aac->fib_lock);
+
+ mutex_init(&aac->ioctl_mutex);
+--
+2.25.1
+
--- /dev/null
+From d520cfad29079b3ee78425913f35a98fcdcf3af6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 15 Oct 2019 11:51:58 +0530
+Subject: scsi: aacraid: fix illegal IO beyond last LBA
+
+From: Balsundar P <balsundar.p@microsemi.com>
+
+[ Upstream commit c86fbe484c10b2cd1e770770db2d6b2c88801c1d ]
+
+The driver fails to handle data when read or written beyond device reported
+LBA, which triggers kernel panic
+
+Link: https://lore.kernel.org/r/1571120524-6037-2-git-send-email-balsundar.p@microsemi.com
+Signed-off-by: Balsundar P <balsundar.p@microsemi.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/aacraid/aachba.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/scsi/aacraid/aachba.c b/drivers/scsi/aacraid/aachba.c
+index 6e356325d8d98..54717fb84a54c 100644
+--- a/drivers/scsi/aacraid/aachba.c
++++ b/drivers/scsi/aacraid/aachba.c
+@@ -2481,13 +2481,13 @@ static int aac_read(struct scsi_cmnd * scsicmd)
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+- return 1;
++ return 0;
+ }
+
+ dprintk((KERN_DEBUG "aac_read[cpu %d]: lba = %llu, t = %ld.\n",
+@@ -2573,13 +2573,13 @@ static int aac_write(struct scsi_cmnd * scsicmd)
+ scsicmd->result = DID_OK << 16 | COMMAND_COMPLETE << 8 |
+ SAM_STAT_CHECK_CONDITION;
+ set_sense(&dev->fsa_dev[cid].sense_data,
+- HARDWARE_ERROR, SENCODE_INTERNAL_TARGET_FAILURE,
++ ILLEGAL_REQUEST, SENCODE_LBA_OUT_OF_RANGE,
+ ASENCODE_INTERNAL_TARGET_FAILURE, 0, 0);
+ memcpy(scsicmd->sense_buffer, &dev->fsa_dev[cid].sense_data,
+ min_t(size_t, sizeof(dev->fsa_dev[cid].sense_data),
+ SCSI_SENSE_BUFFERSIZE));
+ scsicmd->scsi_done(scsicmd);
+- return 1;
++ return 0;
+ }
+
+ dprintk((KERN_DEBUG "aac_write[cpu %d]: lba = %llu, t = %ld.\n",
+--
+2.25.1
+
--- /dev/null
+From 56189611d2712f1ba8aea4a45978a36bd4d4b8d6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 14:18:55 +0000
+Subject: scsi: cxlflash: Fix error return code in cxlflash_probe()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+[ Upstream commit d0b1e4a638d670a09f42017a3e567dc846931ba8 ]
+
+Fix to return negative error code -ENOMEM from create_afu error handling
+case instead of 0, as done elsewhere in this function.
+
+Link: https://lore.kernel.org/r/20200428141855.88704-1-weiyongjun1@huawei.com
+Acked-by: Matthew R. Ochs <mrochs@linux.ibm.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/cxlflash/main.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/scsi/cxlflash/main.c b/drivers/scsi/cxlflash/main.c
+index f987c40c47a13..443813feaef47 100644
+--- a/drivers/scsi/cxlflash/main.c
++++ b/drivers/scsi/cxlflash/main.c
+@@ -3749,6 +3749,7 @@ static int cxlflash_probe(struct pci_dev *pdev,
+ cfg->afu_cookie = cfg->ops->create_afu(pdev);
+ if (unlikely(!cfg->afu_cookie)) {
+ dev_err(dev, "%s: create_afu failed\n", __func__);
++ rc = -ENOMEM;
+ goto out_remove;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 770a523fe4c23e102ad34345a708f4673e7d8cec Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Nov 2019 23:26:22 +0800
+Subject: scsi: fnic: fix use after free
+
+From: Pan Bian <bianpan2016@163.com>
+
+[ Upstream commit ec990306f77fd4c58c3b27cc3b3c53032d6e6670 ]
+
+The memory chunk io_req is released by mempool_free. Accessing
+io_req->start_time will result in a use after free bug. The variable
+start_time is a backup of the timestamp. So, use start_time here to
+avoid use after free.
+
+Link: https://lore.kernel.org/r/1572881182-37664-1-git-send-email-bianpan2016@163.com
+Signed-off-by: Pan Bian <bianpan2016@163.com>
+Reviewed-by: Satish Kharat <satishkh@cisco.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/fnic/fnic_scsi.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/fnic/fnic_scsi.c b/drivers/scsi/fnic/fnic_scsi.c
+index 73ffc16ec0225..b521fc7650cb9 100644
+--- a/drivers/scsi/fnic/fnic_scsi.c
++++ b/drivers/scsi/fnic/fnic_scsi.c
+@@ -1034,7 +1034,8 @@ static void fnic_fcpio_icmnd_cmpl_handler(struct fnic *fnic,
+ atomic64_inc(&fnic_stats->io_stats.io_completions);
+
+
+- io_duration_time = jiffies_to_msecs(jiffies) - jiffies_to_msecs(io_req->start_time);
++ io_duration_time = jiffies_to_msecs(jiffies) -
++ jiffies_to_msecs(start_time);
+
+ if(io_duration_time <= 10)
+ atomic64_inc(&fnic_stats->io_stats.io_btw_0_to_10_msec);
+--
+2.25.1
+
--- /dev/null
+From 8d783963324fc4c23b22adbcbb8e50dd6398c452 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Mar 2020 13:26:18 -0500
+Subject: scsi: hpsa: correct race condition in offload enabled
+
+From: Don Brace <don.brace@microsemi.com>
+
+[ Upstream commit 3e16e83a62edac7617bfd8dbb4e55d04ff6adbe1 ]
+
+Correct race condition where ioaccel is re-enabled before the raid_map is
+updated. For RAID_1, RAID_1ADM, and RAID 5/6 there is a BUG_ON called which
+is bad.
+
+ - Change event thread to disable ioaccel only. Send all requests down the
+ RAID path instead.
+
+ - Have rescan thread handle offload_enable.
+
+ - Since there is only one rescan allowed at a time, turning
+ offload_enabled on/off should not be racy. Each handler queues up a
+ rescan if one is already in progress.
+
+ - For timing diagram, offload_enabled is initially off due to a change
+ (transformation: splitmirror/remirror), ...
+
+ otbe = offload_to_be_enabled
+ oe = offload_enabled
+
+ Time Event Rescan Completion Request
+ Worker Worker Thread Thread
+ ---- ------ ------ ---------- -------
+ T0 | | + UA |
+ T1 | + rescan started | 0x3f |
+ T2 + Event | | 0x0e |
+ T3 + Ack msg | | |
+ T4 | + if (!dev[i]->oe && | |
+ T5 | | dev[i]->otbe) | |
+ T6 | | get_raid_map | |
+ T7 + otbe = 1 | | |
+ T8 | | | |
+ T9 | + oe = otbe | |
+ T10 | | | + ioaccel request
+ T11 * BUG_ON
+
+ T0 - I/O completion with UA 0x3f 0x0e sets rescan flag.
+ T1 - rescan worker thread starts a rescan.
+ T2 - event comes in
+ T3 - event thread starts and issues "Acknowledge" message
+ ...
+ T6 - rescan thread has bypassed code to reload new raid map.
+ ...
+ T7 - event thread runs and sets offload_to_be_enabled
+ ...
+ T9 - rescan thread turns on offload_enabled.
+ T10- request comes in and goes down ioaccel path.
+ T11- BUG_ON.
+
+ - After the patch is applied, ioaccel_enabled can only be re-enabled in
+ the re-scan thread.
+
+Link: https://lore.kernel.org/r/158472877894.14200.7077843399036368335.stgit@brunhilda
+Reviewed-by: Scott Teel <scott.teel@microsemi.com>
+Reviewed-by: Matt Perricone <matt.perricone@microsemi.com>
+Reviewed-by: Scott Benesh <scott.benesh@microsemi.com>
+Signed-off-by: Don Brace <don.brace@microsemi.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/hpsa.c | 80 ++++++++++++++++++++++++++++++++-------------
+ 1 file changed, 57 insertions(+), 23 deletions(-)
+
+diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
+index f570b8c5d857c..11de2198bb87d 100644
+--- a/drivers/scsi/hpsa.c
++++ b/drivers/scsi/hpsa.c
+@@ -507,6 +507,12 @@ static ssize_t host_store_rescan(struct device *dev,
+ return count;
+ }
+
++static void hpsa_turn_off_ioaccel_for_device(struct hpsa_scsi_dev_t *device)
++{
++ device->offload_enabled = 0;
++ device->offload_to_be_enabled = 0;
++}
++
+ static ssize_t host_show_firmware_revision(struct device *dev,
+ struct device_attribute *attr, char *buf)
+ {
+@@ -1743,8 +1749,7 @@ static void hpsa_figure_phys_disk_ptrs(struct ctlr_info *h,
+ __func__,
+ h->scsi_host->host_no, logical_drive->bus,
+ logical_drive->target, logical_drive->lun);
+- logical_drive->offload_enabled = 0;
+- logical_drive->offload_to_be_enabled = 0;
++ hpsa_turn_off_ioaccel_for_device(logical_drive);
+ logical_drive->queue_depth = 8;
+ }
+ }
+@@ -2496,8 +2501,7 @@ static void process_ioaccel2_completion(struct ctlr_info *h,
+ IOACCEL2_SERV_RESPONSE_FAILURE) {
+ if (c2->error_data.status ==
+ IOACCEL2_STATUS_SR_IOACCEL_DISABLED) {
+- dev->offload_enabled = 0;
+- dev->offload_to_be_enabled = 0;
++ hpsa_turn_off_ioaccel_for_device(dev);
+ }
+
+ return hpsa_retry_cmd(h, c);
+@@ -3676,10 +3680,17 @@ static void hpsa_get_ioaccel_status(struct ctlr_info *h,
+ this_device->offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+ if (this_device->offload_config) {
+- this_device->offload_to_be_enabled =
++ bool offload_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+- if (hpsa_get_raid_map(h, scsi3addr, this_device))
+- this_device->offload_to_be_enabled = 0;
++ /*
++ * Check to see if offload can be enabled.
++ */
++ if (offload_enabled) {
++ rc = hpsa_get_raid_map(h, scsi3addr, this_device);
++ if (rc) /* could not load raid_map */
++ goto out;
++ this_device->offload_to_be_enabled = 1;
++ }
+ }
+
+ out:
+@@ -3998,8 +4009,7 @@ static int hpsa_update_device_info(struct ctlr_info *h,
+ } else {
+ this_device->raid_level = RAID_UNKNOWN;
+ this_device->offload_config = 0;
+- this_device->offload_enabled = 0;
+- this_device->offload_to_be_enabled = 0;
++ hpsa_turn_off_ioaccel_for_device(this_device);
+ this_device->hba_ioaccel_enabled = 0;
+ this_device->volume_offline = 0;
+ this_device->queue_depth = h->nr_cmds;
+@@ -5213,8 +5223,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ /* Handles load balance across RAID 1 members.
+ * (2-drive R1 and R10 with even # of drives.)
+ * Appropriate for SSDs, not optimal for HDDs
++ * Ensure we have the correct raid_map.
+ */
+- BUG_ON(le16_to_cpu(map->layout_map_count) != 2);
++ if (le16_to_cpu(map->layout_map_count) != 2) {
++ hpsa_turn_off_ioaccel_for_device(dev);
++ return IO_ACCEL_INELIGIBLE;
++ }
+ if (dev->offload_to_mirror)
+ map_index += le16_to_cpu(map->data_disks_per_row);
+ dev->offload_to_mirror = !dev->offload_to_mirror;
+@@ -5222,8 +5236,12 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ case HPSA_RAID_ADM:
+ /* Handles N-way mirrors (R1-ADM)
+ * and R10 with # of drives divisible by 3.)
++ * Ensure we have the correct raid_map.
+ */
+- BUG_ON(le16_to_cpu(map->layout_map_count) != 3);
++ if (le16_to_cpu(map->layout_map_count) != 3) {
++ hpsa_turn_off_ioaccel_for_device(dev);
++ return IO_ACCEL_INELIGIBLE;
++ }
+
+ offload_to_mirror = dev->offload_to_mirror;
+ raid_map_helper(map, offload_to_mirror,
+@@ -5248,7 +5266,10 @@ static int hpsa_scsi_ioaccel_raid_map(struct ctlr_info *h,
+ r5or6_blocks_per_row =
+ le16_to_cpu(map->strip_size) *
+ le16_to_cpu(map->data_disks_per_row);
+- BUG_ON(r5or6_blocks_per_row == 0);
++ if (r5or6_blocks_per_row == 0) {
++ hpsa_turn_off_ioaccel_for_device(dev);
++ return IO_ACCEL_INELIGIBLE;
++ }
+ stripesize = r5or6_blocks_per_row *
+ le16_to_cpu(map->layout_map_count);
+ #if BITS_PER_LONG == 32
+@@ -8218,7 +8239,7 @@ static int detect_controller_lockup(struct ctlr_info *h)
+ *
+ * Called from monitor controller worker (hpsa_event_monitor_worker)
+ *
+- * A Volume (or Volumes that comprise an Array set may be undergoing a
++ * A Volume (or Volumes that comprise an Array set) may be undergoing a
+ * transformation, so we will be turning off ioaccel for all volumes that
+ * make up the Array.
+ */
+@@ -8241,6 +8262,9 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
+ * Run through current device list used during I/O requests.
+ */
+ for (i = 0; i < h->ndevices; i++) {
++ int offload_to_be_enabled = 0;
++ int offload_config = 0;
++
+ device = h->dev[i];
+
+ if (!device)
+@@ -8258,25 +8282,35 @@ static void hpsa_set_ioaccel_status(struct ctlr_info *h)
+ continue;
+
+ ioaccel_status = buf[IOACCEL_STATUS_BYTE];
+- device->offload_config =
++
++ /*
++ * Check if offload is still configured on
++ */
++ offload_config =
+ !!(ioaccel_status & OFFLOAD_CONFIGURED_BIT);
+- if (device->offload_config)
+- device->offload_to_be_enabled =
++ /*
++ * If offload is configured on, check to see if ioaccel
++ * needs to be enabled.
++ */
++ if (offload_config)
++ offload_to_be_enabled =
+ !!(ioaccel_status & OFFLOAD_ENABLED_BIT);
+
++ /*
++ * If ioaccel is to be re-enabled, re-enable later during the
++ * scan operation so the driver can get a fresh raidmap
++ * before turning ioaccel back on.
++ */
++ if (offload_to_be_enabled)
++ continue;
++
+ /*
+ * Immediately turn off ioaccel for any volume the
+ * controller tells us to. Some of the reasons could be:
+ * transformation - change to the LVs of an Array.
+ * degraded volume - component failure
+- *
+- * If ioaccel is to be re-enabled, re-enable later during the
+- * scan operation so the driver can get a fresh raidmap
+- * before turning ioaccel back on.
+- *
+ */
+- if (!device->offload_to_be_enabled)
+- device->offload_enabled = 0;
++ hpsa_turn_off_ioaccel_for_device(device);
+ }
+
+ kfree(buf);
+--
+2.25.1
+
--- /dev/null
+From 03dcedfbeaa19ddfb27134cd93f03d10b571be0d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 22 Jun 2020 03:12:11 -0700
+Subject: scsi: libfc: Handling of extra kref
+
+From: Javed Hasan <jhasan@marvell.com>
+
+[ Upstream commit 71f2bf85e90d938d4a9ef9dd9bfa8d9b0b6a03f7 ]
+
+Handling of extra kref which is done by lookup table in case rdata is
+already present in list.
+
+This issue was leading to memory leak. Trace from KMEMLEAK tool:
+
+ unreferenced object 0xffff8888259e8780 (size 512):
+ comm "kworker/2:1", pid 182614, jiffies 4433237386 (age 113021.971s)
+ hex dump (first 32 bytes):
+ 58 0a ec cf 83 88 ff ff 00 00 00 00 00 00 00 00
+ 01 00 00 00 08 00 00 00 13 7d f0 1e 0e 00 00 10
+ backtrace:
+ [<000000006b25760f>] fc_rport_recv_req+0x3c6/0x18f0 [libfc]
+ [<00000000f208d994>] fc_lport_recv_els_req+0x120/0x8a0 [libfc]
+ [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc]
+ [<00000000ad5be37b>] qedf_ll2_process_skb+0x73d/0xad0 [qedf]
+ [<00000000e0eb6893>] process_one_work+0x382/0x6c0
+ [<000000002dfd9e21>] worker_thread+0x57/0x5c0
+ [<00000000b648204f>] kthread+0x1a0/0x1c0
+ [<0000000072f5ab20>] ret_from_fork+0x35/0x40
+ [<000000001d5c05d8>] 0xffffffffffffffff
+
+Below is the log sequence which leads to memory leak. Here we get the
+nested "Received PLOGI request" for same port and this request leads to
+call the fc_rport_create() twice for the same rport.
+
+ kernel: host1: rport fffce5: Received PLOGI request
+ kernel: host1: rport fffce5: Received PLOGI in INIT state
+ kernel: host1: rport fffce5: Port is Ready
+ kernel: host1: rport fffce5: Received PRLI request while in state Ready
+ kernel: host1: rport fffce5: PRLI rspp type 8 active 1 passive 0
+ kernel: host1: rport fffce5: Received LOGO request while in state Ready
+ kernel: host1: rport fffce5: Delete port
+ kernel: host1: rport fffce5: Received PLOGI request
+ kernel: host1: rport fffce5: Received PLOGI in state Delete - send busy
+
+Link: https://lore.kernel.org/r/20200622101212.3922-2-jhasan@marvell.com
+Reviewed-by: Girish Basrur <gbasrur@marvell.com>
+Reviewed-by: Saurav Kashyap <skashyap@marvell.com>
+Reviewed-by: Shyam Sundar <ssundar@marvell.com>
+Signed-off-by: Javed Hasan <jhasan@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/libfc/fc_rport.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
+index 90a748551ede5..f39d2d62b002f 100644
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -145,8 +145,10 @@ struct fc_rport_priv *fc_rport_create(struct fc_lport *lport, u32 port_id)
+ lockdep_assert_held(&lport->disc.disc_mutex);
+
+ rdata = fc_rport_lookup(lport, port_id);
+- if (rdata)
++ if (rdata) {
++ kref_put(&rdata->kref, fc_rport_destroy);
+ return rdata;
++ }
+
+ if (lport->rport_priv_size > 0)
+ rport_priv_size = lport->rport_priv_size;
+--
+2.25.1
+
--- /dev/null
+From 2d588689106934aab2ad191c283f34c6042d415c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jun 2020 02:49:59 -0700
+Subject: scsi: libfc: Skip additional kref updating work event
+
+From: Javed Hasan <jhasan@marvell.com>
+
+[ Upstream commit 823a65409c8990f64c5693af98ce0e7819975cba ]
+
+When an rport event (RPORT_EV_READY) is updated without work being queued,
+avoid taking an additional reference.
+
+This issue was leading to memory leak. Trace from KMEMLEAK tool:
+
+ unreferenced object 0xffff8888259e8780 (size 512):
+ comm "kworker/2:1", jiffies 4433237386 (age 113021.971s)
+ hex dump (first 32 bytes):
+ 58 0a ec cf 83 88 ff ff 00 00 00 00 00 00 00 00
+ 01 00 00 00 08 00 00 00 13 7d f0 1e 0e 00 00 10
+ backtrace:
+ [<000000006b25760f>] fc_rport_recv_req+0x3c6/0x18f0 [libfc]
+ [<00000000f208d994>] fc_lport_recv_els_req+0x120/0x8a0 [libfc]
+ [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc]
+ [<00000000a9c437b8>] fc_lport_recv+0xb9/0x130 [libfc]
+ [<00000000ad5be37b>] qedf_ll2_process_skb+0x73d/0xad0 [qedf]
+ [<00000000e0eb6893>] process_one_work+0x382/0x6c0
+ [<000000002dfd9e21>] worker_thread+0x57/0x5c0
+ [<00000000b648204f>] kthread+0x1a0/0x1c0
+ [<0000000072f5ab20>] ret_from_fork+0x35/0x40
+ [<000000001d5c05d8>] 0xffffffffffffffff
+
+Below is the log sequence which leads to memory leak. Here we get the
+RPORT_EV_READY and RPORT_EV_STOP back to back, which lead to overwrite the
+event RPORT_EV_READY by event RPORT_EV_STOP. Because of this, kref_count
+gets incremented by 1.
+
+ kernel: host0: rport fffce5: Received PLOGI request
+ kernel: host0: rport fffce5: Received PLOGI in INIT state
+ kernel: host0: rport fffce5: Port is Ready
+ kernel: host0: rport fffce5: Received PRLI request while in state Ready
+ kernel: host0: rport fffce5: PRLI rspp type 8 active 1 passive 0
+ kernel: host0: rport fffce5: Received LOGO request while in state Ready
+ kernel: host0: rport fffce5: Delete port
+ kernel: host0: rport fffce5: Received PLOGI request
+ kernel: host0: rport fffce5: Received PLOGI in state Delete - send busy
+ kernel: host0: rport fffce5: work event 3
+ kernel: host0: rport fffce5: lld callback ev 3
+ kernel: host0: rport fffce5: work delete
+
+Link: https://lore.kernel.org/r/20200626094959.32151-1-jhasan@marvell.com
+Reviewed-by: Girish Basrur <gbasrur@marvell.com>
+Reviewed-by: Saurav Kashyap <skashyap@marvell.com>
+Reviewed-by: Shyam Sundar <ssundar@marvell.com>
+Signed-off-by: Javed Hasan <jhasan@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/libfc/fc_rport.c | 9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
+index f39d2d62b002f..2b3239765c249 100644
+--- a/drivers/scsi/libfc/fc_rport.c
++++ b/drivers/scsi/libfc/fc_rport.c
+@@ -495,10 +495,11 @@ static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
+
+ fc_rport_state_enter(rdata, RPORT_ST_DELETE);
+
+- kref_get(&rdata->kref);
+- if (rdata->event == RPORT_EV_NONE &&
+- !queue_work(rport_event_queue, &rdata->event_work))
+- kref_put(&rdata->kref, fc_rport_destroy);
++ if (rdata->event == RPORT_EV_NONE) {
++ kref_get(&rdata->kref);
++ if (!queue_work(rport_event_queue, &rdata->event_work))
++ kref_put(&rdata->kref, fc_rport_destroy);
++ }
+
+ rdata->event = event;
+ }
+--
+2.25.1
+
--- /dev/null
+From acf4ddbb6ebf7d4414a82cdcf838730a32e49908 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Jan 2020 16:23:07 -0800
+Subject: scsi: lpfc: Fix coverity errors in fmdi attribute handling
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit 4cb9e1ddaa145be9ed67b6a7de98ca705a43f998 ]
+
+Coverity reported a memory corruption error for the fdmi attributes
+routines:
+
+ CID 15768 [Memory Corruption] Out-of-bounds access on FDMI
+
+Sloppy coding of the fmdi structures. In both the lpfc_fdmi_attr_def and
+lpfc_fdmi_reg_port_list structures, a field was placed at the start of
+payload that may have variable content. The field was given an arbitrary
+type (uint32_t). The code then uses the field name to derive an address,
+which it used in things such as memset and memcpy. The memset sizes or
+memcpy lengths were larger than the arbitrary type, thus coverity reported
+an error.
+
+Fix by replacing the arbitrary fields with the real field structures
+describing the payload.
+
+Link: https://lore.kernel.org/r/20200128002312.16346-8-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_ct.c | 137 ++++++++++++++++++------------------
+ drivers/scsi/lpfc/lpfc_hw.h | 36 +++++-----
+ 2 files changed, 85 insertions(+), 88 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
+index 384f5cd7c3c81..99b4ff78f9dce 100644
+--- a/drivers/scsi/lpfc/lpfc_ct.c
++++ b/drivers/scsi/lpfc/lpfc_ct.c
+@@ -1737,8 +1737,8 @@ lpfc_fdmi_hba_attr_wwnn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, sizeof(struct lpfc_name));
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+@@ -1754,8 +1754,8 @@ lpfc_fdmi_hba_attr_manufacturer(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ /* This string MUST be consistent with other FC platforms
+ * supported by Broadcom.
+@@ -1779,8 +1779,8 @@ lpfc_fdmi_hba_attr_sn(struct lpfc_vport *vport, struct lpfc_fdmi_attr_def *ad)
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, phba->SerialNumber,
+ sizeof(ae->un.AttrString));
+@@ -1801,8 +1801,8 @@ lpfc_fdmi_hba_attr_model(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, phba->ModelName,
+ sizeof(ae->un.AttrString));
+@@ -1822,8 +1822,8 @@ lpfc_fdmi_hba_attr_description(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, phba->ModelDesc,
+ sizeof(ae->un.AttrString));
+@@ -1845,8 +1845,8 @@ lpfc_fdmi_hba_attr_hdw_ver(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t i, j, incr, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ /* Convert JEDEC ID to ascii for hardware version */
+ incr = vp->rev.biuRev;
+@@ -1875,8 +1875,8 @@ lpfc_fdmi_hba_attr_drvr_ver(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, lpfc_release_version,
+ sizeof(ae->un.AttrString));
+@@ -1897,8 +1897,8 @@ lpfc_fdmi_hba_attr_rom_ver(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ if (phba->sli_rev == LPFC_SLI_REV4)
+ lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+@@ -1922,8 +1922,8 @@ lpfc_fdmi_hba_attr_fmw_ver(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+ len = strnlen(ae->un.AttrString,
+@@ -1942,8 +1942,8 @@ lpfc_fdmi_hba_attr_os_ver(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s %s %s",
+ init_utsname()->sysname,
+@@ -1965,7 +1965,7 @@ lpfc_fdmi_hba_attr_ct_len(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ ae->un.AttrInt = cpu_to_be32(LPFC_MAX_CT_SIZE);
+ size = FOURBYTES + sizeof(uint32_t);
+@@ -1981,8 +1981,8 @@ lpfc_fdmi_hba_attr_symbolic_name(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ len = lpfc_vport_symbolic_node_name(vport,
+ ae->un.AttrString, 256);
+@@ -2000,7 +2000,7 @@ lpfc_fdmi_hba_attr_vendor_info(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ /* Nothing is defined for this currently */
+ ae->un.AttrInt = cpu_to_be32(0);
+@@ -2017,7 +2017,7 @@ lpfc_fdmi_hba_attr_num_ports(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ /* Each driver instance corresponds to a single port */
+ ae->un.AttrInt = cpu_to_be32(1);
+@@ -2034,8 +2034,8 @@ lpfc_fdmi_hba_attr_fabric_wwnn(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, sizeof(struct lpfc_name));
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ memcpy(&ae->un.AttrWWN, &vport->fabric_nodename,
+ sizeof(struct lpfc_name));
+@@ -2053,8 +2053,8 @@ lpfc_fdmi_hba_attr_bios_ver(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ lpfc_decode_firmware_rev(phba, ae->un.AttrString, 1);
+ len = strnlen(ae->un.AttrString,
+@@ -2073,7 +2073,7 @@ lpfc_fdmi_hba_attr_bios_state(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ /* Driver doesn't have access to this information */
+ ae->un.AttrInt = cpu_to_be32(0);
+@@ -2090,8 +2090,8 @@ lpfc_fdmi_hba_attr_vendor_id(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, "EMULEX",
+ sizeof(ae->un.AttrString));
+@@ -2112,8 +2112,8 @@ lpfc_fdmi_port_attr_fc4type(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 32);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+@@ -2134,7 +2134,7 @@ lpfc_fdmi_port_attr_support_speed(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ ae->un.AttrInt = 0;
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+@@ -2186,7 +2186,7 @@ lpfc_fdmi_port_attr_speed(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ if (!(phba->hba_flag & HBA_FCOE_MODE)) {
+ switch (phba->fc_linkspeed) {
+@@ -2253,7 +2253,7 @@ lpfc_fdmi_port_attr_max_frame(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ hsp = (struct serv_parm *)&vport->fc_sparam;
+ ae->un.AttrInt = (((uint32_t) hsp->cmn.bbRcvSizeMsb) << 8) |
+@@ -2273,8 +2273,8 @@ lpfc_fdmi_port_attr_os_devname(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ snprintf(ae->un.AttrString, sizeof(ae->un.AttrString),
+ "/sys/class/scsi_host/host%d", shost->host_no);
+@@ -2294,8 +2294,8 @@ lpfc_fdmi_port_attr_host_name(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ snprintf(ae->un.AttrString, sizeof(ae->un.AttrString), "%s",
+ init_utsname()->nodename);
+@@ -2315,8 +2315,8 @@ lpfc_fdmi_port_attr_wwnn(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, sizeof(struct lpfc_name));
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ memcpy(&ae->un.AttrWWN, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+@@ -2333,8 +2333,8 @@ lpfc_fdmi_port_attr_wwpn(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, sizeof(struct lpfc_name));
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ memcpy(&ae->un.AttrWWN, &vport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+@@ -2351,8 +2351,8 @@ lpfc_fdmi_port_attr_symbolic_name(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ len = lpfc_vport_symbolic_port_name(vport, ae->un.AttrString, 256);
+ len += (len & 3) ? (4 - (len & 3)) : 4;
+@@ -2370,7 +2370,7 @@ lpfc_fdmi_port_attr_port_type(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+ if (phba->fc_topology == LPFC_TOPOLOGY_LOOP)
+ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTTYPE_NLPORT);
+ else
+@@ -2388,7 +2388,7 @@ lpfc_fdmi_port_attr_class(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+ ae->un.AttrInt = cpu_to_be32(FC_COS_CLASS2 | FC_COS_CLASS3);
+ size = FOURBYTES + sizeof(uint32_t);
+ ad->AttrLen = cpu_to_be16(size);
+@@ -2403,8 +2403,8 @@ lpfc_fdmi_port_attr_fabric_wwpn(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, sizeof(struct lpfc_name));
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ memcpy(&ae->un.AttrWWN, &vport->fabric_portname,
+ sizeof(struct lpfc_name));
+@@ -2421,8 +2421,8 @@ lpfc_fdmi_port_attr_active_fc4type(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 32);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ ae->un.AttrTypes[3] = 0x02; /* Type 0x1 - ELS */
+ ae->un.AttrTypes[2] = 0x01; /* Type 0x8 - FCP */
+@@ -2442,7 +2442,7 @@ lpfc_fdmi_port_attr_port_state(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+ /* Link Up - operational */
+ ae->un.AttrInt = cpu_to_be32(LPFC_FDMI_PORTSTATE_ONLINE);
+ size = FOURBYTES + sizeof(uint32_t);
+@@ -2458,7 +2458,7 @@ lpfc_fdmi_port_attr_num_disc(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+ vport->fdmi_num_disc = lpfc_find_map_node(vport);
+ ae->un.AttrInt = cpu_to_be32(vport->fdmi_num_disc);
+ size = FOURBYTES + sizeof(uint32_t);
+@@ -2474,7 +2474,7 @@ lpfc_fdmi_port_attr_nportid(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+ ae->un.AttrInt = cpu_to_be32(vport->fc_myDID);
+ size = FOURBYTES + sizeof(uint32_t);
+ ad->AttrLen = cpu_to_be16(size);
+@@ -2489,8 +2489,8 @@ lpfc_fdmi_smart_attr_service(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, "Smart SAN Initiator",
+ sizeof(ae->un.AttrString));
+@@ -2510,8 +2510,8 @@ lpfc_fdmi_smart_attr_guid(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ memcpy(&ae->un.AttrString, &vport->fc_sparam.nodeName,
+ sizeof(struct lpfc_name));
+@@ -2531,8 +2531,8 @@ lpfc_fdmi_smart_attr_version(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, "Smart SAN Version 2.0",
+ sizeof(ae->un.AttrString));
+@@ -2553,8 +2553,8 @@ lpfc_fdmi_smart_attr_model(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t len, size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
+- memset(ae, 0, 256);
++ ae = &ad->AttrValue;
++ memset(ae, 0, sizeof(*ae));
+
+ strncpy(ae->un.AttrString, phba->ModelName,
+ sizeof(ae->un.AttrString));
+@@ -2573,7 +2573,7 @@ lpfc_fdmi_smart_attr_port_info(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+
+ /* SRIOV (type 3) is not supported */
+ if (vport->vpi)
+@@ -2593,7 +2593,7 @@ lpfc_fdmi_smart_attr_qos(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+ ae->un.AttrInt = cpu_to_be32(0);
+ size = FOURBYTES + sizeof(uint32_t);
+ ad->AttrLen = cpu_to_be16(size);
+@@ -2608,7 +2608,7 @@ lpfc_fdmi_smart_attr_security(struct lpfc_vport *vport,
+ struct lpfc_fdmi_attr_entry *ae;
+ uint32_t size;
+
+- ae = (struct lpfc_fdmi_attr_entry *)&ad->AttrValue;
++ ae = &ad->AttrValue;
+ ae->un.AttrInt = cpu_to_be32(1);
+ size = FOURBYTES + sizeof(uint32_t);
+ ad->AttrLen = cpu_to_be16(size);
+@@ -2756,7 +2756,8 @@ lpfc_fdmi_cmd(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
+ /* Registered Port List */
+ /* One entry (port) per adapter */
+ rh->rpl.EntryCnt = cpu_to_be32(1);
+- memcpy(&rh->rpl.pe, &phba->pport->fc_sparam.portName,
++ memcpy(&rh->rpl.pe.PortName,
++ &phba->pport->fc_sparam.portName,
+ sizeof(struct lpfc_name));
+
+ /* point to the HBA attribute block */
+diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
+index 009aa0eee0408..48d4d576d588e 100644
+--- a/drivers/scsi/lpfc/lpfc_hw.h
++++ b/drivers/scsi/lpfc/lpfc_hw.h
+@@ -1333,25 +1333,8 @@ struct fc_rdp_res_frame {
+ /* lpfc_sli_ct_request defines the CT_IU preamble for FDMI commands */
+ #define SLI_CT_FDMI_Subtypes 0x10 /* Management Service Subtype */
+
+-/*
+- * Registered Port List Format
+- */
+-struct lpfc_fdmi_reg_port_list {
+- uint32_t EntryCnt;
+- uint32_t pe; /* Variable-length array */
+-};
+-
+-
+ /* Definitions for HBA / Port attribute entries */
+
+-struct lpfc_fdmi_attr_def { /* Defined in TLV format */
+- /* Structure is in Big Endian format */
+- uint32_t AttrType:16;
+- uint32_t AttrLen:16;
+- uint32_t AttrValue; /* Marks start of Value (ATTRIBUTE_ENTRY) */
+-};
+-
+-
+ /* Attribute Entry */
+ struct lpfc_fdmi_attr_entry {
+ union {
+@@ -1362,7 +1345,13 @@ struct lpfc_fdmi_attr_entry {
+ } un;
+ };
+
+-#define LPFC_FDMI_MAX_AE_SIZE sizeof(struct lpfc_fdmi_attr_entry)
++struct lpfc_fdmi_attr_def { /* Defined in TLV format */
++ /* Structure is in Big Endian format */
++ uint32_t AttrType:16;
++ uint32_t AttrLen:16;
++ /* Marks start of Value (ATTRIBUTE_ENTRY) */
++ struct lpfc_fdmi_attr_entry AttrValue;
++} __packed;
+
+ /*
+ * HBA Attribute Block
+@@ -1386,13 +1375,20 @@ struct lpfc_fdmi_hba_ident {
+ struct lpfc_name PortName;
+ };
+
++/*
++ * Registered Port List Format
++ */
++struct lpfc_fdmi_reg_port_list {
++ uint32_t EntryCnt;
++ struct lpfc_fdmi_port_entry pe;
++} __packed;
++
+ /*
+ * Register HBA(RHBA)
+ */
+ struct lpfc_fdmi_reg_hba {
+ struct lpfc_fdmi_hba_ident hi;
+- struct lpfc_fdmi_reg_port_list rpl; /* variable-length array */
+-/* struct lpfc_fdmi_attr_block ab; */
++ struct lpfc_fdmi_reg_port_list rpl;
+ };
+
+ /*
+--
+2.25.1
+
--- /dev/null
+From 65b07c07c7aa007d658c49ae66fe7aef164f5d8e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Nov 2019 16:57:00 -0800
+Subject: scsi: lpfc: Fix kernel crash at lpfc_nvme_info_show during remote
+ port bounce
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit 6c1e803eac846f886cd35131e6516fc51a8414b9 ]
+
+When reading sysfs nvme_info file while a remote port leaves and comes
+back, a NULL pointer is encountered. The issue is due to ndlp list
+corruption as the the nvme_info_show does not use the same lock as the rest
+of the code.
+
+Correct by removing the rcu_xxx_lock calls and replace by the host_lock and
+phba->hbaLock spinlocks that are used by the rest of the driver. Given
+we're called from sysfs, we are safe to use _irq rather than _irqsave.
+
+Link: https://lore.kernel.org/r/20191105005708.7399-4-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_attr.c | 35 ++++++++++++++++++-----------------
+ 1 file changed, 18 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
+index fe084d47ed9e5..3447d19d4147a 100644
+--- a/drivers/scsi/lpfc/lpfc_attr.c
++++ b/drivers/scsi/lpfc/lpfc_attr.c
+@@ -332,7 +332,6 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ if (strlcat(buf, "\nNVME Initiator Enabled\n", PAGE_SIZE) >= PAGE_SIZE)
+ goto buffer_done;
+
+- rcu_read_lock();
+ scnprintf(tmp, sizeof(tmp),
+ "XRI Dist lpfc%d Total %d NVME %d SCSI %d ELS %d\n",
+ phba->brd_no,
+@@ -341,7 +340,7 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ phba->sli4_hba.scsi_xri_max,
+ lpfc_sli4_get_els_iocb_cnt(phba));
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto buffer_done;
+
+ /* Port state is only one of two values for now. */
+ if (localport->port_id)
+@@ -357,7 +356,9 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ wwn_to_u64(vport->fc_nodename.u.wwn),
+ localport->port_id, statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto buffer_done;
++
++ spin_lock_irq(shost->host_lock);
+
+ list_for_each_entry(ndlp, &vport->fc_nodes, nlp_listp) {
+ nrport = NULL;
+@@ -384,39 +385,39 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+
+ /* Tab in to show lport ownership. */
+ if (strlcat(buf, "NVME RPORT ", PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+ if (phba->brd_no >= 10) {
+ if (strlcat(buf, " ", PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "WWPN x%llx ",
+ nrport->port_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+
+ scnprintf(tmp, sizeof(tmp), "WWNN x%llx ",
+ nrport->node_name);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+
+ scnprintf(tmp, sizeof(tmp), "DID x%06x ",
+ nrport->port_id);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+
+ /* An NVME rport can have multiple roles. */
+ if (nrport->port_role & FC_PORT_ROLE_NVME_INITIATOR) {
+ if (strlcat(buf, "INITIATOR ", PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_TARGET) {
+ if (strlcat(buf, "TARGET ", PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+ }
+ if (nrport->port_role & FC_PORT_ROLE_NVME_DISCOVERY) {
+ if (strlcat(buf, "DISCSRVC ", PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+ }
+ if (nrport->port_role & ~(FC_PORT_ROLE_NVME_INITIATOR |
+ FC_PORT_ROLE_NVME_TARGET |
+@@ -424,14 +425,14 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ scnprintf(tmp, sizeof(tmp), "UNKNOWN ROLE x%x",
+ nrport->port_role);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+ }
+
+ scnprintf(tmp, sizeof(tmp), "%s\n", statep);
+ if (strlcat(buf, tmp, PAGE_SIZE) >= PAGE_SIZE)
+- goto rcu_unlock_buf_done;
++ goto unlock_buf_done;
+ }
+- rcu_read_unlock();
++ spin_unlock_irq(shost->host_lock);
+
+ if (!lport)
+ goto buffer_done;
+@@ -491,11 +492,11 @@ lpfc_nvme_info_show(struct device *dev, struct device_attribute *attr,
+ atomic_read(&lport->cmpl_fcp_err));
+ strlcat(buf, tmp, PAGE_SIZE);
+
+- /* RCU is already unlocked. */
++ /* host_lock is already unlocked. */
+ goto buffer_done;
+
+- rcu_unlock_buf_done:
+- rcu_read_unlock();
++ unlock_buf_done:
++ spin_unlock_irq(shost->host_lock);
+
+ buffer_done:
+ len = strnlen(buf, PAGE_SIZE);
+--
+2.25.1
+
--- /dev/null
+From cb4a624c69b876fe21a672443b89c70325f74f40 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Jan 2020 16:23:01 -0800
+Subject: scsi: lpfc: Fix RQ buffer leakage when no IOCBs available
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit 39c4f1a965a9244c3ba60695e8ff8da065ec6ac4 ]
+
+The driver is occasionally seeing the following SLI Port error, requiring
+reset and reinit:
+
+ Port Status Event: ... error 1=0x52004a01, error 2=0x218
+
+The failure means an RQ timeout. That is, the adapter had received
+asynchronous receive frames, ran out of buffer slots to place the frames,
+and the driver did not replenish the buffer slots before a timeout
+occurred. The driver should not be so slow in replenishing buffers that a
+timeout can occur.
+
+When the driver received all the frames of a sequence, it allocates an IOCB
+to put the frames in. In a situation where there was no IOCB available for
+the frame of a sequence, the RQ buffer corresponding to the first frame of
+the sequence was not returned to the FW. Eventually, with enough traffic
+encountering the situation, the timeout occurred.
+
+Fix by releasing the buffer back to firmware whenever there is no IOCB for
+the first frame.
+
+[mkp: typo]
+
+Link: https://lore.kernel.org/r/20200128002312.16346-2-jsmart2021@gmail.com
+Signed-off-by: Dick Kennedy <dick.kennedy@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_sli.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index a56a939792ac1..2ab351260e815 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -17413,6 +17413,10 @@ lpfc_prep_seq(struct lpfc_vport *vport, struct hbq_dmabuf *seq_dmabuf)
+ list_add_tail(&iocbq->list, &first_iocbq->list);
+ }
+ }
++ /* Free the sequence's header buffer */
++ if (!first_iocbq)
++ lpfc_in_buf_free(vport->phba, &seq_dmabuf->dbuf);
++
+ return first_iocbq;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 7ba875b4312258704195e3a7a52ab1bce066fb78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Nov 2019 15:39:06 +0530
+Subject: scsi: pm80xx: Cleanup command when a reset times out
+
+From: peter chang <dpf@google.com>
+
+[ Upstream commit 51c1c5f6ed64c2b65a8cf89dac136273d25ca540 ]
+
+Added the fix so the if driver properly sent the abort it tries to remove
+it from the firmware's list of outstanding commands regardless of the abort
+status. This means that the task gets freed 'now' rather than possibly
+getting freed later when the scsi layer thinks it's leaked but still valid.
+
+Link: https://lore.kernel.org/r/20191114100910.6153-10-deepak.ukey@microchip.com
+Acked-by: Jack Wang <jinpu.wang@cloud.ionos.com>
+Signed-off-by: peter chang <dpf@google.com>
+Signed-off-by: Deepak Ukey <deepak.ukey@microchip.com>
+Signed-off-by: Viswas G <Viswas.G@microchip.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/pm8001/pm8001_sas.c | 50 +++++++++++++++++++++++---------
+ 1 file changed, 37 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c
+index ba79b37d8cf7e..5becdde3ea324 100644
+--- a/drivers/scsi/pm8001/pm8001_sas.c
++++ b/drivers/scsi/pm8001/pm8001_sas.c
+@@ -1184,8 +1184,8 @@ int pm8001_abort_task(struct sas_task *task)
+ pm8001_ha = pm8001_find_ha_by_dev(dev);
+ device_id = pm8001_dev->device_id;
+ phy_id = pm8001_dev->attached_phy;
+- rc = pm8001_find_tag(task, &tag);
+- if (rc == 0) {
++ ret = pm8001_find_tag(task, &tag);
++ if (ret == 0) {
+ pm8001_printk("no tag for task:%p\n", task);
+ return TMF_RESP_FUNC_FAILED;
+ }
+@@ -1223,26 +1223,50 @@ int pm8001_abort_task(struct sas_task *task)
+
+ /* 2. Send Phy Control Hard Reset */
+ reinit_completion(&completion);
++ phy->port_reset_status = PORT_RESET_TMO;
+ phy->reset_success = false;
+ phy->enable_completion = &completion;
+ phy->reset_completion = &completion_reset;
+ ret = PM8001_CHIP_DISP->phy_ctl_req(pm8001_ha, phy_id,
+ PHY_HARD_RESET);
+- if (ret)
+- goto out;
+- PM8001_MSG_DBG(pm8001_ha,
+- pm8001_printk("Waiting for local phy ctl\n"));
+- wait_for_completion(&completion);
+- if (!phy->reset_success)
++ if (ret) {
++ phy->enable_completion = NULL;
++ phy->reset_completion = NULL;
+ goto out;
++ }
+
+- /* 3. Wait for Port Reset complete / Port reset TMO */
++ /* In the case of the reset timeout/fail we still
++ * abort the command at the firmware. The assumption
++ * here is that the drive is off doing something so
++ * that it's not processing requests, and we want to
++ * avoid getting a completion for this and either
++ * leaking the task in libsas or losing the race and
++ * getting a double free.
++ */
+ PM8001_MSG_DBG(pm8001_ha,
++ pm8001_printk("Waiting for local phy ctl\n"));
++ ret = wait_for_completion_timeout(&completion,
++ PM8001_TASK_TIMEOUT * HZ);
++ if (!ret || !phy->reset_success) {
++ phy->enable_completion = NULL;
++ phy->reset_completion = NULL;
++ } else {
++ /* 3. Wait for Port Reset complete or
++ * Port reset TMO
++ */
++ PM8001_MSG_DBG(pm8001_ha,
+ pm8001_printk("Waiting for Port reset\n"));
+- wait_for_completion(&completion_reset);
+- if (phy->port_reset_status) {
+- pm8001_dev_gone_notify(dev);
+- goto out;
++ ret = wait_for_completion_timeout(
++ &completion_reset,
++ PM8001_TASK_TIMEOUT * HZ);
++ if (!ret)
++ phy->reset_completion = NULL;
++ WARN_ON(phy->port_reset_status ==
++ PORT_RESET_TMO);
++ if (phy->port_reset_status == PORT_RESET_TMO) {
++ pm8001_dev_gone_notify(dev);
++ goto out;
++ }
+ }
+
+ /*
+--
+2.25.1
+
--- /dev/null
+From 92a30b3e7cf8923ee5bf983101edfc4ac7c5ffbe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2020 23:43:32 -0700
+Subject: scsi: qedi: Fix termination timeouts in session logout
+
+From: Nilesh Javali <njavali@marvell.com>
+
+[ Upstream commit b9b97e6903032ec56e6dcbe137a9819b74a17fea ]
+
+The destroy connection ramrod timed out during session logout. Fix the
+wait delay for graceful vs abortive termination as per the FW requirements.
+
+Link: https://lore.kernel.org/r/20200408064332.19377-7-mrangankar@marvell.com
+Reviewed-by: Lee Duncan <lduncan@suse.com>
+Signed-off-by: Nilesh Javali <njavali@marvell.com>
+Signed-off-by: Manish Rangankar <mrangankar@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/qedi/qedi_iscsi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/scsi/qedi/qedi_iscsi.c b/drivers/scsi/qedi/qedi_iscsi.c
+index 751941a3ed303..aa451c8b49e56 100644
+--- a/drivers/scsi/qedi/qedi_iscsi.c
++++ b/drivers/scsi/qedi/qedi_iscsi.c
+@@ -1065,6 +1065,9 @@ static void qedi_ep_disconnect(struct iscsi_endpoint *ep)
+ break;
+ }
+
++ if (!abrt_conn)
++ wait_delay += qedi->pf_params.iscsi_pf_params.two_msl_timer;
++
+ qedi_ep->state = EP_STATE_DISCONN_START;
+ ret = qedi_ops->destroy_conn(qedi->cdev, qedi_ep->handle, abrt_conn);
+ if (ret) {
+--
+2.25.1
+
--- /dev/null
+From 0a8e81da08c5376c3942d3a809fbf0819c87d717 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Dec 2019 14:02:46 -0800
+Subject: scsi: ufs: Fix a race condition in the tracing code
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit eacf36f5bebde5089dddb3d5bfcbeab530b01f8a ]
+
+Starting execution of a command before tracing a command may cause the
+completion handler to free data while it is being traced. Fix this race by
+tracing a command before it is submitted.
+
+Cc: Bean Huo <beanhuo@micron.com>
+Cc: Can Guo <cang@codeaurora.org>
+Cc: Avri Altman <avri.altman@wdc.com>
+Cc: Stanley Chu <stanley.chu@mediatek.com>
+Cc: Tomas Winkler <tomas.winkler@intel.com>
+Link: https://lore.kernel.org/r/20191224220248.30138-5-bvanassche@acm.org
+Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index faf1959981784..b2cbdd01ab10b 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -1910,12 +1910,12 @@ void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
+ {
+ hba->lrb[task_tag].issue_time_stamp = ktime_get();
+ hba->lrb[task_tag].compl_time_stamp = ktime_set(0, 0);
++ ufshcd_add_command_trace(hba, task_tag, "send");
+ ufshcd_clk_scaling_start_busy(hba);
+ __set_bit(task_tag, &hba->outstanding_reqs);
+ ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
+ /* Make sure that doorbell is committed immediately */
+ wmb();
+- ufshcd_add_command_trace(hba, task_tag, "send");
+ }
+
+ /**
+--
+2.25.1
+
--- /dev/null
+From 1bfd59f501432edc1f590c970c42cbc17977a4c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Dec 2019 14:02:44 -0800
+Subject: scsi: ufs: Make ufshcd_add_command_trace() easier to read
+
+From: Bart Van Assche <bvanassche@acm.org>
+
+[ Upstream commit e4d2add7fd5bc64ee3e388eabe6b9e081cb42e11 ]
+
+Since the lrbp->cmd expression occurs multiple times, introduce a new local
+variable to hold that pointer. This patch does not change any
+functionality.
+
+Cc: Bean Huo <beanhuo@micron.com>
+Cc: Can Guo <cang@codeaurora.org>
+Cc: Avri Altman <avri.altman@wdc.com>
+Cc: Stanley Chu <stanley.chu@mediatek.com>
+Cc: Tomas Winkler <tomas.winkler@intel.com>
+Link: https://lore.kernel.org/r/20191224220248.30138-3-bvanassche@acm.org
+Reviewed-by: Stanley Chu <stanley.chu@mediatek.com>
+Reviewed-by: Can Guo <cang@codeaurora.org>
+Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
+Signed-off-by: Bart Van Assche <bvanassche@acm.org>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufshcd.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index eb10a5cacd90c..faf1959981784 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -353,27 +353,27 @@ static void ufshcd_add_command_trace(struct ufs_hba *hba,
+ u8 opcode = 0;
+ u32 intr, doorbell;
+ struct ufshcd_lrb *lrbp = &hba->lrb[tag];
++ struct scsi_cmnd *cmd = lrbp->cmd;
+ int transfer_len = -1;
+
+ if (!trace_ufshcd_command_enabled()) {
+ /* trace UPIU W/O tracing command */
+- if (lrbp->cmd)
++ if (cmd)
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
+ return;
+ }
+
+- if (lrbp->cmd) { /* data phase exists */
++ if (cmd) { /* data phase exists */
+ /* trace UPIU also */
+ ufshcd_add_cmd_upiu_trace(hba, tag, str);
+- opcode = (u8)(*lrbp->cmd->cmnd);
++ opcode = cmd->cmnd[0];
+ if ((opcode == READ_10) || (opcode == WRITE_10)) {
+ /*
+ * Currently we only fully trace read(10) and write(10)
+ * commands
+ */
+- if (lrbp->cmd->request && lrbp->cmd->request->bio)
+- lba =
+- lrbp->cmd->request->bio->bi_iter.bi_sector;
++ if (cmd->request && cmd->request->bio)
++ lba = cmd->request->bio->bi_iter.bi_sector;
+ transfer_len = be32_to_cpu(
+ lrbp->ucd_req_ptr->sc.exp_data_transfer_len);
+ }
+--
+2.25.1
+
--- /dev/null
+From 34d1534bc9ddaa9632b6dc040113063995bde969 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 26 Dec 2019 12:29:17 +0000
+Subject: sctp: move trace_sctp_probe_path into sctp_outq_sack
+
+From: Kevin Kou <qdkevin.kou@gmail.com>
+
+[ Upstream commit f643ee295c1c63bc117fb052d4da681354d6f732 ]
+
+The original patch bringed in the "SCTP ACK tracking trace event"
+feature was committed at Dec.20, 2017, it replaced jprobe usage
+with trace events, and bringed in two trace events, one is
+TRACE_EVENT(sctp_probe), another one is TRACE_EVENT(sctp_probe_path).
+The original patch intended to trigger the trace_sctp_probe_path in
+TRACE_EVENT(sctp_probe) as below code,
+
++TRACE_EVENT(sctp_probe,
++
++ TP_PROTO(const struct sctp_endpoint *ep,
++ const struct sctp_association *asoc,
++ struct sctp_chunk *chunk),
++
++ TP_ARGS(ep, asoc, chunk),
++
++ TP_STRUCT__entry(
++ __field(__u64, asoc)
++ __field(__u32, mark)
++ __field(__u16, bind_port)
++ __field(__u16, peer_port)
++ __field(__u32, pathmtu)
++ __field(__u32, rwnd)
++ __field(__u16, unack_data)
++ ),
++
++ TP_fast_assign(
++ struct sk_buff *skb = chunk->skb;
++
++ __entry->asoc = (unsigned long)asoc;
++ __entry->mark = skb->mark;
++ __entry->bind_port = ep->base.bind_addr.port;
++ __entry->peer_port = asoc->peer.port;
++ __entry->pathmtu = asoc->pathmtu;
++ __entry->rwnd = asoc->peer.rwnd;
++ __entry->unack_data = asoc->unack_data;
++
++ if (trace_sctp_probe_path_enabled()) {
++ struct sctp_transport *sp;
++
++ list_for_each_entry(sp, &asoc->peer.transport_addr_list,
++ transports) {
++ trace_sctp_probe_path(sp, asoc);
++ }
++ }
++ ),
+
+But I found it did not work when I did testing, and trace_sctp_probe_path
+had no output, I finally found that there is trace buffer lock
+operation(trace_event_buffer_reserve) in include/trace/trace_events.h:
+
+static notrace void \
+trace_event_raw_event_##call(void *__data, proto) \
+{ \
+ struct trace_event_file *trace_file = __data; \
+ struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
+ struct trace_event_buffer fbuffer; \
+ struct trace_event_raw_##call *entry; \
+ int __data_size; \
+ \
+ if (trace_trigger_soft_disabled(trace_file)) \
+ return; \
+ \
+ __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
+ \
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+ \
+ trace_event_buffer_commit(&fbuffer); \
+}
+
+The reason caused no output of trace_sctp_probe_path is that
+trace_sctp_probe_path written in TP_fast_assign part of
+TRACE_EVENT(sctp_probe), and it will be placed( { assign; } ) after the
+trace_event_buffer_reserve() when compiler expands Macro,
+
+ entry = trace_event_buffer_reserve(&fbuffer, trace_file, \
+ sizeof(*entry) + __data_size); \
+ \
+ if (!entry) \
+ return; \
+ \
+ tstruct \
+ \
+ { assign; } \
+
+so trace_sctp_probe_path finally can not acquire trace_event_buffer
+and return no output, that is to say the nest of tracepoint entry function
+is not allowed. The function call flow is:
+
+trace_sctp_probe()
+-> trace_event_raw_event_sctp_probe()
+ -> lock buffer
+ -> trace_sctp_probe_path()
+ -> trace_event_raw_event_sctp_probe_path() --nested
+ -> buffer has been locked and return no output.
+
+This patch is to remove trace_sctp_probe_path from the TP_fast_assign
+part of TRACE_EVENT(sctp_probe) to avoid the nest of entry function,
+and trigger sctp_probe_path_trace in sctp_outq_sack.
+
+After this patch, you can enable both events individually,
+ # cd /sys/kernel/debug/tracing
+ # echo 1 > events/sctp/sctp_probe/enable
+ # echo 1 > events/sctp/sctp_probe_path/enable
+
+Or, you can enable all the events under sctp.
+
+ # echo 1 > events/sctp/enable
+
+Signed-off-by: Kevin Kou <qdkevin.kou@gmail.com>
+Acked-by: Marcelo Ricardo Leitner <marcelo.leitner@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/trace/events/sctp.h | 9 ---------
+ net/sctp/outqueue.c | 6 ++++++
+ 2 files changed, 6 insertions(+), 9 deletions(-)
+
+diff --git a/include/trace/events/sctp.h b/include/trace/events/sctp.h
+index 7475c7be165aa..d4aac34365955 100644
+--- a/include/trace/events/sctp.h
++++ b/include/trace/events/sctp.h
+@@ -75,15 +75,6 @@ TRACE_EVENT(sctp_probe,
+ __entry->pathmtu = asoc->pathmtu;
+ __entry->rwnd = asoc->peer.rwnd;
+ __entry->unack_data = asoc->unack_data;
+-
+- if (trace_sctp_probe_path_enabled()) {
+- struct sctp_transport *sp;
+-
+- list_for_each_entry(sp, &asoc->peer.transport_addr_list,
+- transports) {
+- trace_sctp_probe_path(sp, asoc);
+- }
+- }
+ ),
+
+ TP_printk("asoc=%#llx mark=%#x bind_port=%d peer_port=%d pathmtu=%d "
+diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c
+index 7bb8e5603298d..d6e83a37a1adf 100644
+--- a/net/sctp/outqueue.c
++++ b/net/sctp/outqueue.c
+@@ -51,6 +51,7 @@
+ #include <net/sctp/sctp.h>
+ #include <net/sctp/sm.h>
+ #include <net/sctp/stream_sched.h>
++#include <trace/events/sctp.h>
+
+ /* Declare internal functions here. */
+ static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
+@@ -1257,6 +1258,11 @@ int sctp_outq_sack(struct sctp_outq *q, struct sctp_chunk *chunk)
+ /* Grab the association's destination address list. */
+ transport_list = &asoc->peer.transport_addr_list;
+
++ /* SCTP path tracepoint for congestion control debugging. */
++ list_for_each_entry(transport, transport_list, transports) {
++ trace_sctp_probe_path(transport, asoc);
++ }
++
+ sack_ctsn = ntohl(sack->cum_tsn_ack);
+ gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
+ asoc->stats.gapcnt += gap_ack_blocks;
+--
+2.25.1
+
--- /dev/null
+From 8ec268eb924456502cc32367a8598a46e1bef10f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jan 2020 09:30:29 +0100
+Subject: selftests/ftrace: fix glob selftest
+
+From: Sven Schnelle <svens@linux.ibm.com>
+
+[ Upstream commit af4ddd607dff7aabd466a4a878e01b9f592a75ab ]
+
+test.d/ftrace/func-filter-glob.tc is failing on s390 because it has
+ARCH_INLINE_SPIN_LOCK and friends set to 'y'. So the usual
+__raw_spin_lock symbol isn't in the ftrace function list. Change
+'*aw*lock' to '*spin*lock' which would hopefully match some of the
+locking functions on all platforms.
+
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+index 27a54a17da65d..f4e92afab14b2 100644
+--- a/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
++++ b/tools/testing/selftests/ftrace/test.d/ftrace/func-filter-glob.tc
+@@ -30,7 +30,7 @@ ftrace_filter_check '*schedule*' '^.*schedule.*$'
+ ftrace_filter_check 'schedule*' '^schedule.*$'
+
+ # filter by *mid*end
+-ftrace_filter_check '*aw*lock' '.*aw.*lock$'
++ftrace_filter_check '*pin*lock' '.*pin.*lock$'
+
+ # filter by start*mid*
+ ftrace_filter_check 'mutex*try*' '^mutex.*try.*'
+--
+2.25.1
+
--- /dev/null
+From b3674911656b639faca25145c87b874dd7cc07f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 26 Jun 2020 10:21:15 -0700
+Subject: selftests/x86/syscall_nt: Clear weird flags after each test
+
+From: Andy Lutomirski <luto@kernel.org>
+
+[ Upstream commit a61fa2799ef9bf6c4f54cf7295036577cececc72 ]
+
+Clear the weird flags before logging to improve strace output --
+logging results while, say, TF is set does no one any favors.
+
+Signed-off-by: Andy Lutomirski <luto@kernel.org>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/907bfa5a42d4475b8245e18b67a04b13ca51ffdb.1593191971.git.luto@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/x86/syscall_nt.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/tools/testing/selftests/x86/syscall_nt.c b/tools/testing/selftests/x86/syscall_nt.c
+index 43fcab367fb0a..74e6b3fc2d09e 100644
+--- a/tools/testing/selftests/x86/syscall_nt.c
++++ b/tools/testing/selftests/x86/syscall_nt.c
+@@ -67,6 +67,7 @@ static void do_it(unsigned long extraflags)
+ set_eflags(get_eflags() | extraflags);
+ syscall(SYS_getpid);
+ flags = get_eflags();
++ set_eflags(X86_EFLAGS_IF | X86_EFLAGS_FIXED);
+ if ((flags & extraflags) == extraflags) {
+ printf("[OK]\tThe syscall worked and flags are still set\n");
+ } else {
+--
+2.25.1
+
--- /dev/null
+From 3da6b22e34d955795543027ba6536bb4ba36ab89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Sep 2019 09:30:07 -0400
+Subject: selinux: allow labeling before policy is loaded
+
+From: Jonathan Lebon <jlebon@redhat.com>
+
+[ Upstream commit 3e3e24b42043eceb97ed834102c2d094dfd7aaa6 ]
+
+Currently, the SELinux LSM prevents one from setting the
+`security.selinux` xattr on an inode without a policy first being
+loaded. However, this restriction is problematic: it makes it impossible
+to have newly created files with the correct label before actually
+loading the policy.
+
+This is relevant in distributions like Fedora, where the policy is
+loaded by systemd shortly after pivoting out of the initrd. In such
+instances, all files created prior to pivoting will be unlabeled. One
+then has to relabel them after pivoting, an operation which inherently
+races with other processes trying to access those same files.
+
+Going further, there are use cases for creating the entire root
+filesystem on first boot from the initrd (e.g. Container Linux supports
+this today[1], and we'd like to support it in Fedora CoreOS as well[2]).
+One can imagine doing this in two ways: at the block device level (e.g.
+laying down a disk image), or at the filesystem level. In the former,
+labeling can simply be part of the image. But even in the latter
+scenario, one still really wants to be able to set the right labels when
+populating the new filesystem.
+
+This patch enables this by changing behaviour in the following two ways:
+1. allow `setxattr` if we're not initialized
+2. don't try to set the in-core inode SID if we're not initialized;
+ instead leave it as `LABEL_INVALID` so that revalidation may be
+ attempted at a later time
+
+Note the first hunk of this patch is mostly the same as a previously
+discussed one[3], though it was part of a larger series which wasn't
+accepted.
+
+[1] https://coreos.com/os/docs/latest/root-filesystem-placement.html
+[2] https://github.com/coreos/fedora-coreos-tracker/issues/94
+[3] https://www.spinics.net/lists/linux-initramfs/msg04593.html
+
+Co-developed-by: Victor Kamensky <kamensky@cisco.com>
+Signed-off-by: Victor Kamensky <kamensky@cisco.com>
+Signed-off-by: Jonathan Lebon <jlebon@redhat.com>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/selinux/hooks.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index 452254fd89f87..250b725f5754c 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -3304,6 +3304,9 @@ static int selinux_inode_setxattr(struct dentry *dentry, const char *name,
+ return dentry_has_perm(current_cred(), dentry, FILE__SETATTR);
+ }
+
++ if (!selinux_state.initialized)
++ return (inode_owner_or_capable(inode) ? 0 : -EPERM);
++
+ sbsec = inode->i_sb->s_security;
+ if (!(sbsec->flags & SBLABEL_MNT))
+ return -EOPNOTSUPP;
+@@ -3387,6 +3390,15 @@ static void selinux_inode_post_setxattr(struct dentry *dentry, const char *name,
+ return;
+ }
+
++ if (!selinux_state.initialized) {
++ /* If we haven't even been initialized, then we can't validate
++ * against a policy, so leave the label as invalid. It may
++ * resolve to a valid label on the next revalidation try if
++ * we've since initialized.
++ */
++ return;
++ }
++
+ rc = security_context_to_sid_force(&selinux_state, value, size,
+ &newsid);
+ if (rc) {
+--
+2.25.1
+
--- /dev/null
+From 42c593bcebd26bdbfdf545899e424f69f29cfe57 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 1 Feb 2020 10:47:47 +0300
+Subject: selinux: sel_avc_get_stat_idx should increase position index
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+[ Upstream commit 8d269a8e2a8f0bca89022f4ec98de460acb90365 ]
+
+If seq_file .next function does not change position index,
+read after some lseek can generate unexpected output.
+
+$ dd if=/sys/fs/selinux/avc/cache_stats # usual output
+lookups hits misses allocations reclaims frees
+817223 810034 7189 7189 6992 7037
+1934894 1926896 7998 7998 7632 7683
+1322812 1317176 5636 5636 5456 5507
+1560571 1551548 9023 9023 9056 9115
+0+1 records in
+0+1 records out
+189 bytes copied, 5,1564e-05 s, 3,7 MB/s
+
+$# read after lseek to midle of last line
+$ dd if=/sys/fs/selinux/avc/cache_stats bs=180 skip=1
+dd: /sys/fs/selinux/avc/cache_stats: cannot skip to specified offset
+056 9115 <<<< end of last line
+1560571 1551548 9023 9023 9056 9115 <<< whole last line once again
+0+1 records in
+0+1 records out
+45 bytes copied, 8,7221e-05 s, 516 kB/s
+
+$# read after lseek beyond end of of file
+$ dd if=/sys/fs/selinux/avc/cache_stats bs=1000 skip=1
+dd: /sys/fs/selinux/avc/cache_stats: cannot skip to specified offset
+1560571 1551548 9023 9023 9056 9115 <<<< generates whole last line
+0+1 records in
+0+1 records out
+36 bytes copied, 9,0934e-05 s, 396 kB/s
+
+https://bugzilla.kernel.org/show_bug.cgi?id=206283
+
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Acked-by: Stephen Smalley <sds@tycho.nsa.gov>
+Signed-off-by: Paul Moore <paul@paul-moore.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ security/selinux/selinuxfs.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
+index f3a5a138a096d..60b3f16bb5c7b 100644
+--- a/security/selinux/selinuxfs.c
++++ b/security/selinux/selinuxfs.c
+@@ -1509,6 +1509,7 @@ static struct avc_cache_stats *sel_avc_get_stat_idx(loff_t *idx)
+ *idx = cpu + 1;
+ return &per_cpu(avc_cache_stats, cpu);
+ }
++ (*idx)++;
+ return NULL;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From f7c966cb3915685a41e0fcd5173da79be2072a1d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 Nov 2019 19:03:00 +0100
+Subject: seqlock: Require WRITE_ONCE surrounding raw_seqcount_barrier
+
+From: Marco Elver <elver@google.com>
+
+[ Upstream commit bf07132f96d426bcbf2098227fb680915cf44498 ]
+
+This patch proposes to require marked atomic accesses surrounding
+raw_write_seqcount_barrier. We reason that otherwise there is no way to
+guarantee propagation nor atomicity of writes before/after the barrier
+[1]. For example, consider the compiler tears stores either before or
+after the barrier; in this case, readers may observe a partial value,
+and because readers are unaware that writes are going on (writes are not
+in a seq-writer critical section), will complete the seq-reader critical
+section while having observed some partial state.
+[1] https://lwn.net/Articles/793253/
+
+This came up when designing and implementing KCSAN, because KCSAN would
+flag these accesses as data-races. After careful analysis, our reasoning
+as above led us to conclude that the best thing to do is to propose an
+amendment to the raw_seqcount_barrier usage.
+
+Signed-off-by: Marco Elver <elver@google.com>
+Acked-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Paul E. McKenney <paulmck@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/seqlock.h | 11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
+index bcf4cf26b8c89..a42a29952889c 100644
+--- a/include/linux/seqlock.h
++++ b/include/linux/seqlock.h
+@@ -243,6 +243,13 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
+ * usual consistency guarantee. It is one wmb cheaper, because we can
+ * collapse the two back-to-back wmb()s.
+ *
++ * Note that, writes surrounding the barrier should be declared atomic (e.g.
++ * via WRITE_ONCE): a) to ensure the writes become visible to other threads
++ * atomically, avoiding compiler optimizations; b) to document which writes are
++ * meant to propagate to the reader critical section. This is necessary because
++ * neither writes before and after the barrier are enclosed in a seq-writer
++ * critical section that would ensure readers are aware of ongoing writes.
++ *
+ * seqcount_t seq;
+ * bool X = true, Y = false;
+ *
+@@ -262,11 +269,11 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
+ *
+ * void write(void)
+ * {
+- * Y = true;
++ * WRITE_ONCE(Y, true);
+ *
+ * raw_write_seqcount_barrier(seq);
+ *
+- * X = false;
++ * WRITE_ONCE(X, false);
+ * }
+ */
+ static inline void raw_write_seqcount_barrier(seqcount_t *s)
+--
+2.25.1
+
--- /dev/null
+From d03b9c17eca19652a1dff76faf81d6217c433294 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Mar 2020 16:33:39 +0530
+Subject: serial: 8250: 8250_omap: Terminate DMA before pushing data on RX
+ timeout
+
+From: Vignesh Raghavendra <vigneshr@ti.com>
+
+[ Upstream commit 7cf4df30a98175033e9849f7f16c46e96ba47f41 ]
+
+Terminate and flush DMA internal buffers, before pushing RX data to
+higher layer. Otherwise, this will lead to data corruption, as driver
+would end up pushing stale buffer data to higher layer while actual data
+is still stuck inside DMA hardware and has yet not arrived at the
+memory.
+While at that, replace deprecated dmaengine_terminate_all() with
+dmaengine_terminate_async().
+
+Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
+Link: https://lore.kernel.org/r/20200319110344.21348-2-vigneshr@ti.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/8250_omap.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index a7e555e413a69..cbd006fb7fbb9 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -781,7 +781,10 @@ static void __dma_rx_do_complete(struct uart_8250_port *p)
+ dmaengine_tx_status(dma->rxchan, dma->rx_cookie, &state);
+
+ count = dma->rx_size - state.residue;
+-
++ if (count < dma->rx_size)
++ dmaengine_terminate_async(dma->rxchan);
++ if (!count)
++ goto unlock;
+ ret = tty_insert_flip_string(tty_port, dma->rx_buf, count);
+
+ p->port.icount.rx += ret;
+@@ -843,7 +846,6 @@ static void omap_8250_rx_dma_flush(struct uart_8250_port *p)
+ spin_unlock_irqrestore(&priv->rx_dma_lock, flags);
+
+ __dma_rx_do_complete(p);
+- dmaengine_terminate_all(dma->rxchan);
+ }
+
+ static int omap_8250_rx_dma(struct uart_8250_port *p)
+--
+2.25.1
+
--- /dev/null
+From a12d6e58b2296e08c5f40e31f72bb2dacaf8a3ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 20 Mar 2020 14:52:00 +0200
+Subject: serial: 8250_omap: Fix sleeping function called from invalid context
+ during probe
+
+From: Peter Ujfalusi <peter.ujfalusi@ti.com>
+
+[ Upstream commit 4ce35a3617c0ac758c61122b2218b6c8c9ac9398 ]
+
+When booting j721e the following bug is printed:
+
+[ 1.154821] BUG: sleeping function called from invalid context at kernel/sched/completion.c:99
+[ 1.154827] in_atomic(): 0, irqs_disabled(): 128, non_block: 0, pid: 12, name: kworker/0:1
+[ 1.154832] 3 locks held by kworker/0:1/12:
+[ 1.154836] #0: ffff000840030728 ((wq_completion)events){+.+.}, at: process_one_work+0x1d4/0x6e8
+[ 1.154852] #1: ffff80001214fdd8 (deferred_probe_work){+.+.}, at: process_one_work+0x1d4/0x6e8
+[ 1.154860] #2: ffff00084060b170 (&dev->mutex){....}, at: __device_attach+0x38/0x138
+[ 1.154872] irq event stamp: 63096
+[ 1.154881] hardirqs last enabled at (63095): [<ffff800010b74318>] _raw_spin_unlock_irqrestore+0x70/0x78
+[ 1.154887] hardirqs last disabled at (63096): [<ffff800010b740d8>] _raw_spin_lock_irqsave+0x28/0x80
+[ 1.154893] softirqs last enabled at (62254): [<ffff800010080c88>] _stext+0x488/0x564
+[ 1.154899] softirqs last disabled at (62247): [<ffff8000100fdb3c>] irq_exit+0x114/0x140
+[ 1.154906] CPU: 0 PID: 12 Comm: kworker/0:1 Not tainted 5.6.0-rc6-next-20200318-00094-g45e4089b0bd3 #221
+[ 1.154911] Hardware name: Texas Instruments K3 J721E SoC (DT)
+[ 1.154917] Workqueue: events deferred_probe_work_func
+[ 1.154923] Call trace:
+[ 1.154928] dump_backtrace+0x0/0x190
+[ 1.154933] show_stack+0x14/0x20
+[ 1.154940] dump_stack+0xe0/0x148
+[ 1.154946] ___might_sleep+0x150/0x1f0
+[ 1.154952] __might_sleep+0x4c/0x80
+[ 1.154957] wait_for_completion_timeout+0x40/0x140
+[ 1.154964] ti_sci_set_device_state+0xa0/0x158
+[ 1.154969] ti_sci_cmd_get_device_exclusive+0x14/0x20
+[ 1.154977] ti_sci_dev_start+0x34/0x50
+[ 1.154984] genpd_runtime_resume+0x78/0x1f8
+[ 1.154991] __rpm_callback+0x3c/0x140
+[ 1.154996] rpm_callback+0x20/0x80
+[ 1.155001] rpm_resume+0x568/0x758
+[ 1.155007] __pm_runtime_resume+0x44/0xb0
+[ 1.155013] omap8250_probe+0x2b4/0x508
+[ 1.155019] platform_drv_probe+0x50/0xa0
+[ 1.155023] really_probe+0xd4/0x318
+[ 1.155028] driver_probe_device+0x54/0xe8
+[ 1.155033] __device_attach_driver+0x80/0xb8
+[ 1.155039] bus_for_each_drv+0x74/0xc0
+[ 1.155044] __device_attach+0xdc/0x138
+[ 1.155049] device_initial_probe+0x10/0x18
+[ 1.155053] bus_probe_device+0x98/0xa0
+[ 1.155058] deferred_probe_work_func+0x74/0xb0
+[ 1.155063] process_one_work+0x280/0x6e8
+[ 1.155068] worker_thread+0x48/0x430
+[ 1.155073] kthread+0x108/0x138
+[ 1.155079] ret_from_fork+0x10/0x18
+
+To fix the bug we need to first call pm_runtime_enable() prior to any
+pm_runtime calls.
+
+Reported-by: Tomi Valkeinen <tomi.valkeinen@ti.com>
+Signed-off-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Link: https://lore.kernel.org/r/20200320125200.6772-1-peter.ujfalusi@ti.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/8250_omap.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/tty/serial/8250/8250_omap.c b/drivers/tty/serial/8250/8250_omap.c
+index a019286f8bb65..a7e555e413a69 100644
+--- a/drivers/tty/serial/8250/8250_omap.c
++++ b/drivers/tty/serial/8250/8250_omap.c
+@@ -1227,11 +1227,11 @@ static int omap8250_probe(struct platform_device *pdev)
+ spin_lock_init(&priv->rx_dma_lock);
+
+ device_init_wakeup(&pdev->dev, true);
++ pm_runtime_enable(&pdev->dev);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_set_autosuspend_delay(&pdev->dev, -1);
+
+ pm_runtime_irq_safe(&pdev->dev);
+- pm_runtime_enable(&pdev->dev);
+
+ pm_runtime_get_sync(&pdev->dev);
+
+--
+2.25.1
+
--- /dev/null
+From 549efb29dc6f945a57120d54ffdc5bddaf7f9263 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 19 Mar 2020 16:02:29 +0530
+Subject: serial: 8250_port: Don't service RX FIFO if throttled
+
+From: Vignesh Raghavendra <vigneshr@ti.com>
+
+[ Upstream commit f19c3f6c8109b8bab000afd35580929958e087a9 ]
+
+When port's throttle callback is called, it should stop pushing any more
+data into TTY buffer to avoid buffer overflow. This means driver has to
+stop HW from receiving more data and assert the HW flow control. For
+UARTs with auto HW flow control (such as 8250_omap) manual assertion of
+flow control line is not possible and only way is to allow RX FIFO to
+fill up, thus trigger auto HW flow control logic.
+
+Therefore make sure that 8250 generic IRQ handler does not drain data
+when port is stopped (i.e UART_LSR_DR is unset in read_status_mask). Not
+servicing, RX FIFO would trigger auto HW flow control when FIFO
+occupancy reaches preset threshold, thus halting RX.
+Since, error conditions in UART_LSR register are cleared just by reading
+the register, data has to be drained in case there are FIFO errors, else
+error information will lost.
+
+Signed-off-by: Vignesh Raghavendra <vigneshr@ti.com>
+Link: https://lore.kernel.org/r/20200319103230.16867-2-vigneshr@ti.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/8250/8250_port.c | 16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/tty/serial/8250/8250_port.c b/drivers/tty/serial/8250/8250_port.c
+index 09f0dc3b967b1..60ca19eca1f63 100644
+--- a/drivers/tty/serial/8250/8250_port.c
++++ b/drivers/tty/serial/8250/8250_port.c
+@@ -1861,6 +1861,7 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+ unsigned char status;
+ unsigned long flags;
+ struct uart_8250_port *up = up_to_u8250p(port);
++ bool skip_rx = false;
+
+ if (iir & UART_IIR_NO_INT)
+ return 0;
+@@ -1869,7 +1870,20 @@ int serial8250_handle_irq(struct uart_port *port, unsigned int iir)
+
+ status = serial_port_in(port, UART_LSR);
+
+- if (status & (UART_LSR_DR | UART_LSR_BI)) {
++ /*
++ * If port is stopped and there are no error conditions in the
++ * FIFO, then don't drain the FIFO, as this may lead to TTY buffer
++ * overflow. Not servicing, RX FIFO would trigger auto HW flow
++ * control when FIFO occupancy reaches preset threshold, thus
++ * halting RX. This only works when auto HW flow control is
++ * available.
++ */
++ if (!(status & (UART_LSR_FIFOE | UART_LSR_BRK_ERROR_BITS)) &&
++ (port->status & (UPSTAT_AUTOCTS | UPSTAT_AUTORTS)) &&
++ !(port->read_status_mask & UART_LSR_DR))
++ skip_rx = true;
++
++ if (status & (UART_LSR_DR | UART_LSR_BI) && !skip_rx) {
+ if (!up->dma || handle_rx_dma(up, iir))
+ status = serial8250_rx_chars(up, status);
+ }
+--
+2.25.1
+
--- /dev/null
+From ea95bd7590fd4a0d55c743c91ad14d226ee06e37 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2020 11:56:02 +0530
+Subject: serial: uartps: Wait for tx_empty in console setup
+
+From: Raviteja Narayanam <raviteja.narayanam@xilinx.com>
+
+[ Upstream commit 42e11948ddf68b9f799cad8c0ddeab0a39da33e8 ]
+
+On some platforms, the log is corrupted while console is being
+registered. It is observed that when set_termios is called, there
+are still some bytes in the FIFO to be transmitted.
+
+So, wait for tx_empty inside cdns_uart_console_setup before calling
+set_termios.
+
+Signed-off-by: Raviteja Narayanam <raviteja.narayanam@xilinx.com>
+Reviewed-by: Shubhrajyoti Datta <shubhrajyoti.datta@xilinx.com>
+Link: https://lore.kernel.org/r/1586413563-29125-2-git-send-email-raviteja.narayanam@xilinx.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/xilinx_uartps.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/tty/serial/xilinx_uartps.c b/drivers/tty/serial/xilinx_uartps.c
+index 31950a38f0fb7..23f9b0cdff086 100644
+--- a/drivers/tty/serial/xilinx_uartps.c
++++ b/drivers/tty/serial/xilinx_uartps.c
+@@ -1236,6 +1236,7 @@ static int cdns_uart_console_setup(struct console *co, char *options)
+ int bits = 8;
+ int parity = 'n';
+ int flow = 'n';
++ unsigned long time_out;
+
+ if (!port->membase) {
+ pr_debug("console on " CDNS_UART_TTY_NAME "%i not present\n",
+@@ -1246,6 +1247,13 @@ static int cdns_uart_console_setup(struct console *co, char *options)
+ if (options)
+ uart_parse_options(options, &baud, &parity, &bits, &flow);
+
++ /* Wait for tx_empty before setting up the console */
++ time_out = jiffies + usecs_to_jiffies(TX_TIMEOUT);
++
++ while (time_before(jiffies, time_out) &&
++ cdns_uart_tx_empty(port) != TIOCSER_TEMT)
++ cpu_relax();
++
+ return uart_set_options(port, co, baud, parity, bits, flow);
+ }
+
+--
+2.25.1
+
--- /dev/null
+selinux-allow-labeling-before-policy-is-loaded.patch
+media-mc-device.c-fix-memleak-in-media_device_regist.patch
+dma-fence-serialise-signal-enabling-dma_fence_enable.patch
+ath10k-fix-array-out-of-bounds-access.patch
+ath10k-fix-memory-leak-for-tpc_stats_final.patch
+mm-fix-double-page-fault-on-arm64-if-pte_af-is-clear.patch
+scsi-aacraid-fix-illegal-io-beyond-last-lba.patch
+m68k-q40-fix-info-leak-in-rtc_ioctl.patch
+gma-gma500-fix-a-memory-disclosure-bug-due-to-uninit.patch
+asoc-kirkwood-fix-irq-error-handling.patch
+media-smiapp-fix-error-handling-at-nvm-reading.patch
+ata-sata_mv-avoid-trigerrable-bug_on.patch
+leds-mlxreg-fix-possible-buffer-overflow.patch
+pm-devfreq-tegra30-fix-integer-overflow-on-cpu-s-fre.patch
+scsi-fnic-fix-use-after-free.patch
+scsi-lpfc-fix-kernel-crash-at-lpfc_nvme_info_show-du.patch
+net-silence-data-races-on-sk_backlog.tail.patch
+clk-ti-adpll-allocate-room-for-terminating-null.patch
+drm-amdgpu-powerplay-fix-avfs-handling-with-custom-p.patch
+mtd-cfi_cmdset_0002-don-t-free-cfi-cfiq-in-error-pat.patch
+mfd-mfd-core-protect-against-null-call-back-function.patch
+drm-amdgpu-powerplay-smu7-fix-avfs-handling-with-cus.patch
+tpm_crb-fix-ftpm-on-amd-zen-cpus.patch
+tracing-adding-null-checks-for-trace_array-descripto.patch
+bcache-fix-a-lost-wake-up-problem-caused-by-mca_cann.patch
+dmaengine-mediatek-hsdma_probe-fixed-a-memory-leak-w.patch
+rdma-qedr-fix-potential-use-after-free.patch
+rdma-i40iw-fix-potential-use-after-free.patch
+fix-dget_parent-fastpath-race.patch
+xfs-fix-attr-leaf-header-freemap.size-underflow.patch
+rdma-iw_cgxb4-fix-an-error-handling-path-in-c4iw_con.patch
+ubi-fix-producing-anchor-pebs.patch
+mmc-core-fix-size-overflow-for-mmc-partitions.patch
+gfs2-clean-up-iopen-glock-mess-in-gfs2_create_inode.patch
+scsi-pm80xx-cleanup-command-when-a-reset-times-out.patch
+debugfs-fix-debug_fs-debugfs_create_automount.patch
+cifs-properly-process-smb3-lease-breaks.patch
+asoc-max98090-remove-msleep-in-pll-unlocked-workarou.patch
+kernel-sys.c-avoid-copying-possible-padding-bytes-in.patch
+kvm-arm-arm64-vgic-fix-potential-double-free-dist-sp.patch
+xfs-fix-log-reservation-overflows-when-allocating-la.patch
+neigh_stat_seq_next-should-increase-position-index.patch
+rt_cpu_seq_next-should-increase-position-index.patch
+ipv6_route_seq_next-should-increase-position-index.patch
+seqlock-require-write_once-surrounding-raw_seqcount_.patch
+media-ti-vpe-cal-restrict-dma-to-avoid-memory-corrup.patch
+sctp-move-trace_sctp_probe_path-into-sctp_outq_sack.patch
+acpi-ec-reference-count-query-handlers-under-lock.patch
+scsi-ufs-make-ufshcd_add_command_trace-easier-to-rea.patch
+scsi-ufs-fix-a-race-condition-in-the-tracing-code.patch
+dmaengine-zynqmp_dma-fix-burst-length-configuration.patch
+s390-cpum_sf-use-kzalloc-and-minor-changes.patch
+powerpc-eeh-only-dump-stack-once-if-an-mmio-loop-is-.patch
+bluetooth-btrtl-use-kvmalloc-for-fw-allocations.patch
+tracing-set-kernel_stack-s-caller-size-properly.patch
+ext4-make-dioread_nolock-the-default.patch
+arm-8948-1-prevent-oob-access-in-stacktrace.patch
+ar5523-add-usb-id-of-smcwusbt-g2-wireless-adapter.patch
+ceph-ensure-we-have-a-new-cap-before-continuing-in-f.patch
+selftests-ftrace-fix-glob-selftest.patch
+tools-power-x86-intel_pstate_tracer-changes-for-pyth.patch
+bluetooth-fix-refcount-use-after-free-issue.patch
+mm-swapfile.c-swap_next-should-increase-position-ind.patch
+mm-pagewalk-fix-termination-condition-in-walk_pte_ra.patch
+bluetooth-prefetch-channel-before-killing-sock.patch
+kvm-fix-overflow-of-zero-page-refcount-with-ksm-runn.patch
+alsa-hda-clear-rirb-status-before-reading-wp.patch
+skbuff-fix-a-data-race-in-skb_queue_len.patch
+audit-config_change-don-t-log-internal-bookkeeping-a.patch
+selinux-sel_avc_get_stat_idx-should-increase-positio.patch
+scsi-lpfc-fix-rq-buffer-leakage-when-no-iocbs-availa.patch
+scsi-lpfc-fix-coverity-errors-in-fmdi-attribute-hand.patch
+drm-omap-fix-possible-object-reference-leak.patch
+clk-stratix10-use-do_div-for-64-bit-calculation.patch
+crypto-chelsio-this-fixes-the-kernel-panic-which-occ.patch
+mt76-clear-skb-pointers-from-rx-aggregation-reorder-.patch
+alsa-usb-audio-don-t-create-a-mixer-element-with-bog.patch
+perf-test-fix-test-trace-probe_vfs_getname.sh-on-s39.patch
+rdma-rxe-fix-configuration-of-atomic-queue-pair-attr.patch
+kvm-x86-fix-incorrect-comparison-in-trace-event.patch
+dmaengine-stm32-mdma-use-vchan_terminate_vdesc-in-.t.patch
+media-staging-imx-missing-assignment-in-imx_media_ca.patch
+x86-pkeys-add-check-for-pkey-overflow.patch
+bpf-remove-recursion-prevention-from-rcu-free-callba.patch
+dmaengine-stm32-dma-use-vchan_terminate_vdesc-in-.te.patch
+dmaengine-tegra-apb-prevent-race-conditions-on-chann.patch
+drm-amd-display-dal_ddc_i2c_payloads_create-can-fail.patch
+firmware-arm_sdei-use-cpus_read_lock-to-avoid-races-.patch
+random-fix-data-races-at-timer_rand_state.patch
+bus-hisi_lpc-fixup-io-ports-addresses-to-avoid-use-a.patch
+media-go7007-fix-urb-type-for-interrupt-handling.patch
+bluetooth-guard-against-controllers-sending-zero-d-e.patch
+timekeeping-prevent-32bit-truncation-in-scale64_chec.patch
+ext4-fix-a-data-race-at-inode-i_disksize.patch
+perf-jevents-fix-leak-of-mapfile-memory.patch
+mm-avoid-data-corruption-on-cow-fault-into-pfn-mappe.patch
+drm-amdgpu-increase-atombios-cmd-timeout.patch
+drm-amd-display-stop-if-retimer-is-not-available.patch
+ath10k-use-kzalloc-to-read-for-ath10k_sdio_hif_diag_.patch
+scsi-aacraid-disabling-tm-path-and-only-processing-i.patch
+bluetooth-l2cap-handle-l2cap-config-request-during-o.patch
+media-tda10071-fix-unsigned-sign-extension-overflow.patch
+xfs-don-t-ever-return-a-stale-pointer-from-__xfs_dir.patch
+xfs-mark-dir-corrupt-when-lookup-by-hash-fails.patch
+ext4-mark-block-bitmap-corrupted-when-found-instead-.patch
+tpm-ibmvtpm-wait-for-buffer-to-be-set-before-proceed.patch
+rtc-sa1100-fix-possible-race-condition.patch
+rtc-ds1374-fix-possible-race-condition.patch
+nfsd-don-t-add-locks-to-closed-or-closing-open-state.patch
+rdma-cm-remove-a-race-freeing-timewait_info.patch
+kvm-ppc-book3s-hv-treat-tm-related-invalid-form-inst.patch
+drm-msm-fix-leaks-if-initialization-fails.patch
+drm-msm-a5xx-always-set-an-opp-supported-hardware-va.patch
+tracing-use-address-of-operator-on-section-symbols.patch
+thermal-rcar_thermal-handle-probe-error-gracefully.patch
+perf-parse-events-fix-3-use-after-frees-found-with-c.patch
+serial-8250_port-don-t-service-rx-fifo-if-throttled.patch
+serial-8250_omap-fix-sleeping-function-called-from-i.patch
+serial-8250-8250_omap-terminate-dma-before-pushing-d.patch
+perf-cpumap-fix-snprintf-overflow-check.patch
+cpufreq-powernv-fix-frame-size-overflow-in-powernv_c.patch
+tools-gpio-hammer-avoid-potential-overflow-in-main.patch
+nvme-multipath-do-not-reset-on-unknown-status.patch
+nvme-fix-controller-creation-races-with-teardown-flo.patch
+rdma-rxe-set-sys_image_guid-to-be-aligned-with-hw-ib.patch
+scsi-hpsa-correct-race-condition-in-offload-enabled.patch
+sunrpc-fix-a-potential-buffer-overflow-in-svc_print_.patch
+svcrdma-fix-leak-of-transport-addresses.patch
+pci-use-ioremap-not-phys_to_virt-for-platform-rom.patch
+ubifs-fix-out-of-bounds-memory-access-caused-by-abno.patch
+alsa-usb-audio-fix-case-when-usb-midi-interface-has-.patch
+pci-pciehp-fix-msi-interrupt-race.patch
+nfs-fix-races-nfs_page_group_destroy-vs-nfs_destroy_.patch
+mm-kmemleak.c-use-address-of-operator-on-section-sym.patch
+mm-filemap.c-clear-page-error-before-actual-read.patch
+mm-vmscan.c-fix-data-races-using-kswapd_classzone_id.patch
+nvmet-rdma-fix-double-free-of-rdma-queue.patch
+mm-mmap.c-initialize-align_offset-explicitly-for-vm_.patch
+scsi-qedi-fix-termination-timeouts-in-session-logout.patch
+serial-uartps-wait-for-tx_empty-in-console-setup.patch
+kvm-remove-create_irqchip-set_pit2-race.patch
+perf-stat-force-error-in-fallback-on-k-events.patch
+bdev-reduce-time-holding-bd_mutex-in-sync-in-blkdev_.patch
+drivers-char-tlclk.c-avoid-data-race-between-init-an.patch
+kvm-arm64-vgic-its-fix-memory-leak-on-the-error-path.patch
+net-openvswitch-use-u64-for-meter-bucket.patch
+scsi-aacraid-fix-error-handling-paths-in-aac_probe_o.patch
+staging-r8188eu-avoid-skb_clone-for-amsdu-to-msdu-co.patch
+sparc64-vcc-fix-error-return-code-in-vcc_probe.patch
+arm64-cpufeature-relax-checks-for-aarch32-support-at.patch
+dt-bindings-sound-wm8994-correct-required-supplies-b.patch
+atm-fix-a-memory-leak-of-vcc-user_back.patch
+perf-parse-events-fix-memory-leaks-found-on-parse_ev.patch
+perf-mem2node-avoid-double-free-related-to-realloc.patch
+power-supply-max17040-correct-voltage-reading.patch
+phy-samsung-s5pv210-usb2-add-delay-after-reset.patch
+bluetooth-handle-inquiry-cancel-error-after-inquiry-.patch
+usb-ehci-ehci-mv-fix-error-handling-in-mv_ehci_probe.patch
+tipc-fix-memory-leak-in-service-subscripting.patch
+tty-serial-samsung-correct-clock-selection-logic.patch
+alsa-hda-fix-potential-race-in-unsol-event-handler.patch
+powerpc-traps-make-unrecoverable-nmis-die-instead-of.patch
+fuse-don-t-check-refcount-after-stealing-page.patch
+usb-ehci-ehci-mv-fix-less-than-zero-comparison-of-an.patch
+scsi-cxlflash-fix-error-return-code-in-cxlflash_prob.patch
+arm64-cpufeature-drop-tracefilt-feature-exposure-fro.patch
+e1000-do-not-perform-reset-in-reset_task-if-we-are-a.patch
+drm-nouveau-debugfs-fix-runtime-pm-imbalance-on-erro.patch
+drm-nouveau-fix-runtime-pm-imbalance-on-error.patch
+drm-nouveau-dispnv50-fix-runtime-pm-imbalance-on-err.patch
+printk-handle-blank-console-arguments-passed-in.patch
+usb-dwc3-increase-timeout-for-cmdact-cleared-by-devi.patch
+btrfs-don-t-force-read-only-after-error-in-drop-snap.patch
+vfio-pci-fix-memory-leaks-of-eventfd-ctx.patch
+perf-parse-events-fix-incorrect-conversion-of-if-fre.patch
+perf-evsel-fix-2-memory-leaks.patch
+perf-trace-fix-the-selection-for-architectures-to-ge.patch
+perf-stat-fix-duration_time-value-for-higher-interva.patch
+perf-util-fix-memory-leak-of-prefix_if_not_in.patch
+perf-metricgroup-free-metric_events-on-error.patch
+perf-kcore_copy-fix-module-map-when-there-are-no-mod.patch
+asoc-img-i2s-out-fix-runtime-pm-imbalance-on-error.patch
+wlcore-fix-runtime-pm-imbalance-in-wl1271_tx_work.patch
+wlcore-fix-runtime-pm-imbalance-in-wlcore_regdomain_.patch
+mtd-rawnand-omap_elm-fix-runtime-pm-imbalance-on-err.patch
+pci-tegra-fix-runtime-pm-imbalance-on-error.patch
+ceph-fix-potential-race-in-ceph_check_caps.patch
+mm-swap_state-fix-a-data-race-in-swapin_nr_pages.patch
+rapidio-avoid-data-race-between-file-operation-callb.patch
+mtd-parser-cmdline-support-mtd-names-containing-one-.patch
+x86-speculation-mds-mark-mds_user_clear_cpu_buffers-.patch
+vfio-pci-clear-error-and-request-eventfd-ctx-after-r.patch
+cifs-fix-double-add-page-to-memcg-when-cifs_readpage.patch
+nvme-fix-possible-deadlock-when-i-o-is-blocked.patch
+scsi-libfc-handling-of-extra-kref.patch
+scsi-libfc-skip-additional-kref-updating-work-event.patch
+selftests-x86-syscall_nt-clear-weird-flags-after-eac.patch
+vfio-pci-fix-racy-on-error-and-request-eventfd-ctx.patch
+btrfs-qgroup-fix-data-leak-caused-by-race-between-wr.patch
+ubi-fastmap-free-unused-fastmap-anchor-peb-during-de.patch
+perf-parse-events-use-strcmp-to-compare-the-pmu-name.patch
+net-openvswitch-use-div_u64-for-64-by-32-divisions.patch
+nvme-explicitly-update-mpath-disk-capacity-on-revali.patch
+asoc-wm8994-skip-setting-of-the-wm8994_micbias-regis.patch
+asoc-wm8994-ensure-the-device-is-resumed-in-wm89xx_m.patch
+asoc-intel-bytcr_rt5640-add-quirk-for-mpman-converte.patch
+risc-v-take-text_mutex-in-ftrace_init_nop.patch
+s390-init-add-missing-__init-annotations.patch
+lockdep-fix-order-in-trace_hardirqs_off_caller.patch
+drm-amdkfd-fix-a-memory-leak-issue.patch
+i2c-core-call-i2c_acpi_install_space_handler-before-.patch
+objtool-fix-noreturn-detection-for-ignored-functions.patch
--- /dev/null
+From 3e7feae2dfb013b44b8be7c400de32af7772b1c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Feb 2020 13:40:29 -0500
+Subject: skbuff: fix a data race in skb_queue_len()
+
+From: Qian Cai <cai@lca.pw>
+
+[ Upstream commit 86b18aaa2b5b5bb48e609cd591b3d2d0fdbe0442 ]
+
+sk_buff.qlen can be accessed concurrently as noticed by KCSAN,
+
+ BUG: KCSAN: data-race in __skb_try_recv_from_queue / unix_dgram_sendmsg
+
+ read to 0xffff8a1b1d8a81c0 of 4 bytes by task 5371 on cpu 96:
+ unix_dgram_sendmsg+0x9a9/0xb70 include/linux/skbuff.h:1821
+ net/unix/af_unix.c:1761
+ ____sys_sendmsg+0x33e/0x370
+ ___sys_sendmsg+0xa6/0xf0
+ __sys_sendmsg+0x69/0xf0
+ __x64_sys_sendmsg+0x51/0x70
+ do_syscall_64+0x91/0xb47
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+ write to 0xffff8a1b1d8a81c0 of 4 bytes by task 1 on cpu 99:
+ __skb_try_recv_from_queue+0x327/0x410 include/linux/skbuff.h:2029
+ __skb_try_recv_datagram+0xbe/0x220
+ unix_dgram_recvmsg+0xee/0x850
+ ____sys_recvmsg+0x1fb/0x210
+ ___sys_recvmsg+0xa2/0xf0
+ __sys_recvmsg+0x66/0xf0
+ __x64_sys_recvmsg+0x51/0x70
+ do_syscall_64+0x91/0xb47
+ entry_SYSCALL_64_after_hwframe+0x49/0xbe
+
+Since only the read is operating as lockless, it could introduce a logic
+bug in unix_recvq_full() due to the load tearing. Fix it by adding
+a lockless variant of skb_queue_len() and unix_recvq_full() where
+READ_ONCE() is on the read while WRITE_ONCE() is on the write similar to
+the commit d7d16a89350a ("net: add skb_queue_empty_lockless()").
+
+Signed-off-by: Qian Cai <cai@lca.pw>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skbuff.h | 14 +++++++++++++-
+ net/unix/af_unix.c | 11 +++++++++--
+ 2 files changed, 22 insertions(+), 3 deletions(-)
+
+diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
+index cbc0294f39899..703ce71caeacb 100644
+--- a/include/linux/skbuff.h
++++ b/include/linux/skbuff.h
+@@ -1688,6 +1688,18 @@ static inline __u32 skb_queue_len(const struct sk_buff_head *list_)
+ return list_->qlen;
+ }
+
++/**
++ * skb_queue_len_lockless - get queue length
++ * @list_: list to measure
++ *
++ * Return the length of an &sk_buff queue.
++ * This variant can be used in lockless contexts.
++ */
++static inline __u32 skb_queue_len_lockless(const struct sk_buff_head *list_)
++{
++ return READ_ONCE(list_->qlen);
++}
++
+ /**
+ * __skb_queue_head_init - initialize non-spinlock portions of sk_buff_head
+ * @list: queue to initialize
+@@ -1895,7 +1907,7 @@ static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
+ {
+ struct sk_buff *next, *prev;
+
+- list->qlen--;
++ WRITE_ONCE(list->qlen, list->qlen - 1);
+ next = skb->next;
+ prev = skb->prev;
+ skb->next = skb->prev = NULL;
+diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
+index 2318e2e2748f4..2020306468af4 100644
+--- a/net/unix/af_unix.c
++++ b/net/unix/af_unix.c
+@@ -192,11 +192,17 @@ static inline int unix_may_send(struct sock *sk, struct sock *osk)
+ return unix_peer(osk) == NULL || unix_our_peer(sk, osk);
+ }
+
+-static inline int unix_recvq_full(struct sock const *sk)
++static inline int unix_recvq_full(const struct sock *sk)
+ {
+ return skb_queue_len(&sk->sk_receive_queue) > sk->sk_max_ack_backlog;
+ }
+
++static inline int unix_recvq_full_lockless(const struct sock *sk)
++{
++ return skb_queue_len_lockless(&sk->sk_receive_queue) >
++ READ_ONCE(sk->sk_max_ack_backlog);
++}
++
+ struct sock *unix_peer_get(struct sock *s)
+ {
+ struct sock *peer;
+@@ -1788,7 +1794,8 @@ restart_locked:
+ * - unix_peer(sk) == sk by time of get but disconnected before lock
+ */
+ if (other != sk &&
+- unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
++ unlikely(unix_peer(other) != sk &&
++ unix_recvq_full_lockless(other))) {
+ if (timeo) {
+ timeo = unix_wait_for_peer(other, timeo);
+
+--
+2.25.1
+
--- /dev/null
+From 26117cea05a0428361fe170a04d2654c29ce0803 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2020 12:24:15 +0000
+Subject: sparc64: vcc: Fix error return code in vcc_probe()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+[ Upstream commit ff62255a2a5c1228a28f2bb063646f948115a309 ]
+
+Fix to return negative error code -ENOMEM from the error handling
+case instead of 0, as done elsewhere in this function.
+
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Link: https://lore.kernel.org/r/20200427122415.47416-1-weiyongjun1@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/vcc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/tty/vcc.c b/drivers/tty/vcc.c
+index 58b454c34560a..10a832a2135e2 100644
+--- a/drivers/tty/vcc.c
++++ b/drivers/tty/vcc.c
+@@ -604,6 +604,7 @@ static int vcc_probe(struct vio_dev *vdev, const struct vio_device_id *id)
+ port->index = vcc_table_add(port);
+ if (port->index == -1) {
+ pr_err("VCC: no more TTY indices left for allocation\n");
++ rv = -ENOMEM;
+ goto free_ldc;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From 27c12c81a1b786dabe1d3675b1eac8893905d516 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Apr 2020 22:14:04 +0300
+Subject: staging:r8188eu: avoid skb_clone for amsdu to msdu conversion
+
+From: Ivan Safonov <insafonov@gmail.com>
+
+[ Upstream commit 628cbd971a927abe6388d44320e351c337b331e4 ]
+
+skb clones use same data buffer,
+so tail of one skb is corrupted by beginning of next skb.
+
+Signed-off-by: Ivan Safonov <insafonov@gmail.com>
+Link: https://lore.kernel.org/r/20200423191404.12028-1-insafonov@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/staging/rtl8188eu/core/rtw_recv.c | 19 ++++++-------------
+ 1 file changed, 6 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
+index 17b4b9257b495..0ddf41b5a734a 100644
+--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
++++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
+@@ -1535,21 +1535,14 @@ static int amsdu_to_msdu(struct adapter *padapter, struct recv_frame *prframe)
+
+ /* Allocate new skb for releasing to upper layer */
+ sub_skb = dev_alloc_skb(nSubframe_Length + 12);
+- if (sub_skb) {
+- skb_reserve(sub_skb, 12);
+- skb_put_data(sub_skb, pdata, nSubframe_Length);
+- } else {
+- sub_skb = skb_clone(prframe->pkt, GFP_ATOMIC);
+- if (sub_skb) {
+- sub_skb->data = pdata;
+- sub_skb->len = nSubframe_Length;
+- skb_set_tail_pointer(sub_skb, nSubframe_Length);
+- } else {
+- DBG_88E("skb_clone() Fail!!! , nr_subframes=%d\n", nr_subframes);
+- break;
+- }
++ if (!sub_skb) {
++ DBG_88E("dev_alloc_skb() Fail!!! , nr_subframes=%d\n", nr_subframes);
++ break;
+ }
+
++ skb_reserve(sub_skb, 12);
++ skb_put_data(sub_skb, pdata, nSubframe_Length);
++
+ subframes[nr_subframes++] = sub_skb;
+
+ if (nr_subframes >= MAX_SUBFRAME_COUNT) {
+--
+2.25.1
+
--- /dev/null
+From d971170ce50cb41e45a516d94fed4b489fce3d06 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 27 Mar 2020 17:15:39 +0100
+Subject: SUNRPC: Fix a potential buffer overflow in 'svc_print_xprts()'
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit b25b60d7bfb02a74bc3c2d998e09aab159df8059 ]
+
+'maxlen' is the total size of the destination buffer. There is only one
+caller and this value is 256.
+
+When we compute the size already used and what we would like to add in
+the buffer, the trailling NULL character is not taken into account.
+However, this trailling character will be added by the 'strcat' once we
+have checked that we have enough place.
+
+So, there is a off-by-one issue and 1 byte of the stack could be
+erroneously overwridden.
+
+Take into account the trailling NULL, when checking if there is enough
+place in the destination buffer.
+
+While at it, also replace a 'sprintf' by a safer 'snprintf', check for
+output truncation and avoid a superfluous 'strlen'.
+
+Fixes: dc9a16e49dbba ("svc: Add /proc/sys/sunrpc/transport files")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+[ cel: very minor fix to documenting comment
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/svc_xprt.c | 19 ++++++++++++++-----
+ 1 file changed, 14 insertions(+), 5 deletions(-)
+
+diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
+index c8ee8e801edb8..709c082dc9059 100644
+--- a/net/sunrpc/svc_xprt.c
++++ b/net/sunrpc/svc_xprt.c
+@@ -103,8 +103,17 @@ void svc_unreg_xprt_class(struct svc_xprt_class *xcl)
+ }
+ EXPORT_SYMBOL_GPL(svc_unreg_xprt_class);
+
+-/*
+- * Format the transport list for printing
++/**
++ * svc_print_xprts - Format the transport list for printing
++ * @buf: target buffer for formatted address
++ * @maxlen: length of target buffer
++ *
++ * Fills in @buf with a string containing a list of transport names, each name
++ * terminated with '\n'. If the buffer is too small, some entries may be
++ * missing, but it is guaranteed that all lines in the output buffer are
++ * complete.
++ *
++ * Returns positive length of the filled-in string.
+ */
+ int svc_print_xprts(char *buf, int maxlen)
+ {
+@@ -117,9 +126,9 @@ int svc_print_xprts(char *buf, int maxlen)
+ list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) {
+ int slen;
+
+- sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload);
+- slen = strlen(tmpstr);
+- if (len + slen > maxlen)
++ slen = snprintf(tmpstr, sizeof(tmpstr), "%s %d\n",
++ xcl->xcl_name, xcl->xcl_max_payload);
++ if (slen >= sizeof(tmpstr) || len + slen >= maxlen)
+ break;
+ len += slen;
+ strcat(buf, tmpstr);
+--
+2.25.1
+
--- /dev/null
+From 89eca1d72abd311fdaab4e12cd9af461101b9873 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 24 Mar 2020 16:53:59 -0400
+Subject: svcrdma: Fix leak of transport addresses
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 1a33d8a284b1e85e03b8c7b1ea8fb985fccd1d71 ]
+
+Kernel memory leak detected:
+
+unreferenced object 0xffff888849cdf480 (size 8):
+ comm "kworker/u8:3", pid 2086, jiffies 4297898756 (age 4269.856s)
+ hex dump (first 8 bytes):
+ 30 00 cd 49 88 88 ff ff 0..I....
+ backtrace:
+ [<00000000acfc370b>] __kmalloc_track_caller+0x137/0x183
+ [<00000000a2724354>] kstrdup+0x2b/0x43
+ [<0000000082964f84>] xprt_rdma_format_addresses+0x114/0x17d [rpcrdma]
+ [<00000000dfa6ed00>] xprt_setup_rdma_bc+0xc0/0x10c [rpcrdma]
+ [<0000000073051a83>] xprt_create_transport+0x3f/0x1a0 [sunrpc]
+ [<0000000053531a8e>] rpc_create+0x118/0x1cd [sunrpc]
+ [<000000003a51b5f8>] setup_callback_client+0x1a5/0x27d [nfsd]
+ [<000000001bd410af>] nfsd4_process_cb_update.isra.7+0x16c/0x1ac [nfsd]
+ [<000000007f4bbd56>] nfsd4_run_cb_work+0x4c/0xbd [nfsd]
+ [<0000000055c5586b>] process_one_work+0x1b2/0x2fe
+ [<00000000b1e3e8ef>] worker_thread+0x1a6/0x25a
+ [<000000005205fb78>] kthread+0xf6/0xfb
+ [<000000006d2dc057>] ret_from_fork+0x3a/0x50
+
+Introduce a call to xprt_rdma_free_addresses() similar to the way
+that the TCP backchannel releases a transport's peer address
+strings.
+
+Fixes: 5d252f90a800 ("svcrdma: Add class for RDMA backwards direction transport")
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtrdma/svc_rdma_backchannel.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+index b9827665ff355..d183d4aee822c 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_backchannel.c
+@@ -256,6 +256,7 @@ xprt_rdma_bc_put(struct rpc_xprt *xprt)
+ {
+ dprintk("svcrdma: %s: xprt %p\n", __func__, xprt);
+
++ xprt_rdma_free_addresses(xprt);
+ xprt_free(xprt);
+ module_put(THIS_MODULE);
+ }
+--
+2.25.1
+
--- /dev/null
+From 29f0f3c4b1d3249ecf04be2cb2ba490019768d86 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 10 Mar 2020 12:47:09 +0100
+Subject: thermal: rcar_thermal: Handle probe error gracefully
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+
+[ Upstream commit 39056e8a989ef52486e063e34b4822b341e47b0e ]
+
+If the common register memory resource is not available the driver needs
+to fail gracefully to disable PM. Instead of returning the error
+directly store it in ret and use the already existing error path.
+
+Signed-off-by: Niklas Söderlund <niklas.soderlund+renesas@ragnatech.se>
+Reviewed-by: Geert Uytterhoeven <geert+renesas@glider.be>
+Signed-off-by: Daniel Lezcano <daniel.lezcano@linaro.org>
+Link: https://lore.kernel.org/r/20200310114709.1483860-1-niklas.soderlund+renesas@ragnatech.se
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/thermal/rcar_thermal.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
+index 4dc30e7890f6c..140386d7c75a3 100644
+--- a/drivers/thermal/rcar_thermal.c
++++ b/drivers/thermal/rcar_thermal.c
+@@ -505,8 +505,10 @@ static int rcar_thermal_probe(struct platform_device *pdev)
+ res = platform_get_resource(pdev, IORESOURCE_MEM,
+ mres++);
+ common->base = devm_ioremap_resource(dev, res);
+- if (IS_ERR(common->base))
+- return PTR_ERR(common->base);
++ if (IS_ERR(common->base)) {
++ ret = PTR_ERR(common->base);
++ goto error_unregister;
++ }
+
+ idle = 0; /* polling delay is not needed */
+ }
+--
+2.25.1
+
--- /dev/null
+From ba77dec56773a5bc3b0bda32e4e3098efc46e6d5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Jan 2020 18:05:23 +0800
+Subject: timekeeping: Prevent 32bit truncation in scale64_check_overflow()
+
+From: Wen Yang <wenyang@linux.alibaba.com>
+
+[ Upstream commit 4cbbc3a0eeed675449b1a4d080008927121f3da3 ]
+
+While unlikely the divisor in scale64_check_overflow() could be >= 32bit in
+scale64_check_overflow(). do_div() truncates the divisor to 32bit at least
+on 32bit platforms.
+
+Use div64_u64() instead to avoid the truncation to 32-bit.
+
+[ tglx: Massaged changelog ]
+
+Signed-off-by: Wen Yang <wenyang@linux.alibaba.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Link: https://lkml.kernel.org/r/20200120100523.45656-1-wenyang@linux.alibaba.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/time/timekeeping.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
+index 81ee5b83c9200..c66fd11d94bc4 100644
+--- a/kernel/time/timekeeping.c
++++ b/kernel/time/timekeeping.c
+@@ -1004,9 +1004,8 @@ static int scale64_check_overflow(u64 mult, u64 div, u64 *base)
+ ((int)sizeof(u64)*8 - fls64(mult) < fls64(rem)))
+ return -EOVERFLOW;
+ tmp *= mult;
+- rem *= mult;
+
+- do_div(rem, div);
++ rem = div64_u64(rem * mult, div);
+ *base = tmp + rem;
+ return 0;
+ }
+--
+2.25.1
+
--- /dev/null
+From 9f664a79c018ae25a460c584fd2a4b4d2835d422 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 May 2020 19:33:17 +0700
+Subject: tipc: fix memory leak in service subscripting
+
+From: Tuong Lien <tuong.t.lien@dektech.com.au>
+
+[ Upstream commit 0771d7df819284d46cf5cfb57698621b503ec17f ]
+
+Upon receipt of a service subscription request from user via a topology
+connection, one 'sub' object will be allocated in kernel, so it will be
+able to send an event of the service if any to the user correspondingly
+then. Also, in case of any failure, the connection will be shutdown and
+all the pertaining 'sub' objects will be freed.
+
+However, there is a race condition as follows resulting in memory leak:
+
+ receive-work connection send-work
+ | | |
+ sub-1 |<------//-------| |
+ sub-2 |<------//-------| |
+ | |<---------------| evt for sub-x
+ sub-3 |<------//-------| |
+ : : :
+ : : :
+ | /--------| |
+ | | * peer closed |
+ | | | |
+ | | |<-------X-------| evt for sub-y
+ | | |<===============|
+ sub-n |<------/ X shutdown |
+ -> orphan | |
+
+That is, the 'receive-work' may get the last subscription request while
+the 'send-work' is shutting down the connection due to peer close.
+
+We had a 'lock' on the connection, so the two actions cannot be carried
+out simultaneously. If the last subscription is allocated e.g. 'sub-n',
+before the 'send-work' closes the connection, there will be no issue at
+all, the 'sub' objects will be freed. In contrast the last subscription
+will become orphan since the connection was closed, and we released all
+references.
+
+This commit fixes the issue by simply adding one test if the connection
+remains in 'connected' state right after we obtain the connection lock,
+then a subscription object can be created as usual, otherwise we ignore
+it.
+
+Acked-by: Ying Xue <ying.xue@windriver.com>
+Acked-by: Jon Maloy <jmaloy@redhat.com>
+Reported-by: Thang Ngo <thang.h.ngo@dektech.com.au>
+Signed-off-by: Tuong Lien <tuong.t.lien@dektech.com.au>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/topsrv.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index 41f4464ac6cc5..ec9a7137d2677 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -407,7 +407,9 @@ static int tipc_conn_rcv_from_sock(struct tipc_conn *con)
+ return -EWOULDBLOCK;
+ if (ret == sizeof(s)) {
+ read_lock_bh(&sk->sk_callback_lock);
+- ret = tipc_conn_rcv_sub(srv, con, &s);
++ /* RACE: the connection can be closed in the meantime */
++ if (likely(connected(con)))
++ ret = tipc_conn_rcv_sub(srv, con, &s);
+ read_unlock_bh(&sk->sk_callback_lock);
+ if (!ret)
+ return 0;
+--
+2.25.1
+
--- /dev/null
+From e47e21e3672e5e53d0c4de97ca061eb0df6b8a32 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Mar 2020 15:50:21 +0100
+Subject: tools: gpio-hammer: Avoid potential overflow in main
+
+From: Gabriel Ravier <gabravier@gmail.com>
+
+[ Upstream commit d1ee7e1f5c9191afb69ce46cc7752e4257340a31 ]
+
+If '-o' was used more than 64 times in a single invocation of gpio-hammer,
+this could lead to an overflow of the 'lines' array. This commit fixes
+this by avoiding the overflow and giving a proper diagnostic back to the
+user
+
+Signed-off-by: Gabriel Ravier <gabravier@gmail.com>
+Signed-off-by: Bartosz Golaszewski <bgolaszewski@baylibre.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/gpio/gpio-hammer.c | 17 ++++++++++++++++-
+ 1 file changed, 16 insertions(+), 1 deletion(-)
+
+diff --git a/tools/gpio/gpio-hammer.c b/tools/gpio/gpio-hammer.c
+index 4bcb234c0fcab..3da5462a0c7d3 100644
+--- a/tools/gpio/gpio-hammer.c
++++ b/tools/gpio/gpio-hammer.c
+@@ -138,7 +138,14 @@ int main(int argc, char **argv)
+ device_name = optarg;
+ break;
+ case 'o':
+- lines[i] = strtoul(optarg, NULL, 10);
++ /*
++ * Avoid overflow. Do not immediately error, we want to
++ * be able to accurately report on the amount of times
++ * '-o' was given to give an accurate error message
++ */
++ if (i < GPIOHANDLES_MAX)
++ lines[i] = strtoul(optarg, NULL, 10);
++
+ i++;
+ break;
+ case '?':
+@@ -146,6 +153,14 @@ int main(int argc, char **argv)
+ return -1;
+ }
+ }
++
++ if (i >= GPIOHANDLES_MAX) {
++ fprintf(stderr,
++ "Only %d occurences of '-o' are allowed, %d were found\n",
++ GPIOHANDLES_MAX, i + 1);
++ return -1;
++ }
++
+ nlines = i;
+
+ if (!device_name || !nlines) {
+--
+2.25.1
+
--- /dev/null
+From 773153c40642173d7c3a2179d43fe9644eaaea0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Jan 2020 19:59:56 -0800
+Subject: tools/power/x86/intel_pstate_tracer: changes for python 3
+ compatibility
+
+From: Doug Smythies <doug.smythies@gmail.com>
+
+[ Upstream commit e749e09db30c38f1a275945814b0109e530a07b0 ]
+
+Some syntax needs to be more rigorous for python 3.
+Backwards compatibility tested with python 2.7
+
+Signed-off-by: Doug Smythies <dsmythies@telus.net>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../intel_pstate_tracer.py | 22 +++++++++----------
+ 1 file changed, 11 insertions(+), 11 deletions(-)
+
+diff --git a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+index 2fa3c5757bcb5..dbed3d213bf17 100755
+--- a/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
++++ b/tools/power/x86/intel_pstate_tracer/intel_pstate_tracer.py
+@@ -10,11 +10,11 @@ then this utility enables and collects trace data for a user specified interval
+ and generates performance plots.
+
+ Prerequisites:
+- Python version 2.7.x
++ Python version 2.7.x or higher
+ gnuplot 5.0 or higher
+- gnuplot-py 1.8
++ gnuplot-py 1.8 or higher
+ (Most of the distributions have these required packages. They may be called
+- gnuplot-py, phython-gnuplot. )
++ gnuplot-py, phython-gnuplot or phython3-gnuplot, gnuplot-nox, ... )
+
+ HWP (Hardware P-States are disabled)
+ Kernel config for Linux trace is enabled
+@@ -180,7 +180,7 @@ def plot_pstate_cpu_with_sample():
+ g_plot('set xlabel "Samples"')
+ g_plot('set ylabel "P-State"')
+ g_plot('set title "{} : cpu pstate vs. sample : {:%F %H:%M}"'.format(testname, datetime.now()))
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_SAMPLE, C_TO)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+@@ -197,7 +197,7 @@ def plot_pstate_cpu():
+ # the following command is really cool, but doesn't work with the CPU masking option because it aborts on the first missing file.
+ # plot_str = 'plot for [i=0:*] file=sprintf("cpu%03d.csv",i) title_s=sprintf("cpu%03d",i) file using 16:7 pt 7 ps 1 title title_s'
+ #
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_TO)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+@@ -211,7 +211,7 @@ def plot_load_cpu():
+ g_plot('set ylabel "CPU load (percent)"')
+ g_plot('set title "{} : cpu loads : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_LOAD)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+@@ -225,7 +225,7 @@ def plot_frequency_cpu():
+ g_plot('set ylabel "CPU Frequency (GHz)"')
+ g_plot('set title "{} : cpu frequencies : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_FREQ)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+@@ -240,7 +240,7 @@ def plot_duration_cpu():
+ g_plot('set ylabel "Timer Duration (MilliSeconds)"')
+ g_plot('set title "{} : cpu durations : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_DURATION)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+@@ -254,7 +254,7 @@ def plot_scaled_cpu():
+ g_plot('set ylabel "Scaled Busy (Unitless)"')
+ g_plot('set title "{} : cpu scaled busy : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_SCALED)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+@@ -268,7 +268,7 @@ def plot_boost_cpu():
+ g_plot('set ylabel "CPU IO Boost (percent)"')
+ g_plot('set title "{} : cpu io boost : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_BOOST)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+@@ -282,7 +282,7 @@ def plot_ghz_cpu():
+ g_plot('set ylabel "TSC Frequency (GHz)"')
+ g_plot('set title "{} : cpu TSC Frequencies (Sanity check calculation) : {:%F %H:%M}"'.format(testname, datetime.now()))
+
+- title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).replace('\n', ' ')
++ title_list = subprocess.check_output('ls cpu???.csv | sed -e \'s/.csv//\'',shell=True).decode('utf-8').replace('\n', ' ')
+ plot_str = "plot for [i in title_list] i.'.csv' using {:d}:{:d} pt 7 ps 1 title i".format(C_ELAPSED, C_GHZ)
+ g_plot('title_list = "{}"'.format(title_list))
+ g_plot(plot_str)
+--
+2.25.1
+
--- /dev/null
+From 4bb29fa9660892e52e8a4a0283c024f0192e1dc7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 12 Mar 2020 11:53:31 -0400
+Subject: tpm: ibmvtpm: Wait for buffer to be set before proceeding
+
+From: Stefan Berger <stefanb@linux.ibm.com>
+
+[ Upstream commit d8d74ea3c00214aee1e1826ca18e77944812b9b4 ]
+
+Synchronize with the results from the CRQs before continuing with
+the initialization. This avoids trying to send TPM commands while
+the rtce buffer has not been allocated, yet.
+
+This patch fixes an existing race condition that may occurr if the
+hypervisor does not quickly respond to the VTPM_GET_RTCE_BUFFER_SIZE
+request sent during initialization and therefore the ibmvtpm->rtce_buf
+has not been allocated at the time the first TPM command is sent.
+
+Fixes: 132f76294744 ("drivers/char/tpm: Add new device driver to support IBM vTPM")
+Signed-off-by: Stefan Berger <stefanb@linux.ibm.com>
+Acked-by: Nayna Jain <nayna@linux.ibm.com>
+Tested-by: Nayna Jain <nayna@linux.ibm.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/tpm/tpm_ibmvtpm.c | 9 +++++++++
+ drivers/char/tpm/tpm_ibmvtpm.h | 1 +
+ 2 files changed, 10 insertions(+)
+
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.c b/drivers/char/tpm/tpm_ibmvtpm.c
+index 569e93e1f06cc..3ba67bc6baba0 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.c
++++ b/drivers/char/tpm/tpm_ibmvtpm.c
+@@ -588,6 +588,7 @@ static irqreturn_t ibmvtpm_interrupt(int irq, void *vtpm_instance)
+ */
+ while ((crq = ibmvtpm_crq_get_next(ibmvtpm)) != NULL) {
+ ibmvtpm_crq_process(crq, ibmvtpm);
++ wake_up_interruptible(&ibmvtpm->crq_queue.wq);
+ crq->valid = 0;
+ smp_wmb();
+ }
+@@ -635,6 +636,7 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ }
+
+ crq_q->num_entry = CRQ_RES_BUF_SIZE / sizeof(*crq_q->crq_addr);
++ init_waitqueue_head(&crq_q->wq);
+ ibmvtpm->crq_dma_handle = dma_map_single(dev, crq_q->crq_addr,
+ CRQ_RES_BUF_SIZE,
+ DMA_BIDIRECTIONAL);
+@@ -687,6 +689,13 @@ static int tpm_ibmvtpm_probe(struct vio_dev *vio_dev,
+ if (rc)
+ goto init_irq_cleanup;
+
++ if (!wait_event_timeout(ibmvtpm->crq_queue.wq,
++ ibmvtpm->rtce_buf != NULL,
++ HZ)) {
++ dev_err(dev, "CRQ response timed out\n");
++ goto init_irq_cleanup;
++ }
++
+ return tpm_chip_register(chip);
+ init_irq_cleanup:
+ do {
+diff --git a/drivers/char/tpm/tpm_ibmvtpm.h b/drivers/char/tpm/tpm_ibmvtpm.h
+index 91dfe766d0800..4f6a124601db4 100644
+--- a/drivers/char/tpm/tpm_ibmvtpm.h
++++ b/drivers/char/tpm/tpm_ibmvtpm.h
+@@ -31,6 +31,7 @@ struct ibmvtpm_crq_queue {
+ struct ibmvtpm_crq *crq_addr;
+ u32 index;
+ u32 num_entry;
++ wait_queue_head_t wq;
+ };
+
+ struct ibmvtpm_dev {
+--
+2.25.1
+
--- /dev/null
+From 35c84874487094d69fb9caffbd85618c350a75b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 16 Oct 2019 21:28:14 +0300
+Subject: tpm_crb: fix fTPM on AMD Zen+ CPUs
+
+From: Ivan Lazeev <ivan.lazeev@gmail.com>
+
+[ Upstream commit 3ef193822b25e9ee629974f66dc1ff65167f770c ]
+
+Bug link: https://bugzilla.kernel.org/show_bug.cgi?id=195657
+
+cmd/rsp buffers are expected to be in the same ACPI region.
+For Zen+ CPUs BIOS's might report two different regions, some of
+them also report region sizes inconsistent with values from TPM
+registers.
+
+Memory configuration on ASRock x470 ITX:
+
+db0a0000-dc59efff : Reserved
+ dc57e000-dc57efff : MSFT0101:00
+ dc582000-dc582fff : MSFT0101:00
+
+Work around the issue by storing ACPI regions declared for the
+device in a fixed array and adding an array for pointers to
+corresponding possibly allocated resources in crb_map_io function.
+This data was previously held for a single resource
+in struct crb_priv (iobase field) and local variable io_res in
+crb_map_io function. ACPI resources array is used to find index of
+corresponding region for each buffer and make the buffer size
+consistent with region's length. Array of pointers to allocated
+resources is used to map the region at most once.
+
+Signed-off-by: Ivan Lazeev <ivan.lazeev@gmail.com>
+Tested-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Tested-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Reviewed-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Jarkko Sakkinen <jarkko.sakkinen@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/char/tpm/tpm_crb.c | 123 +++++++++++++++++++++++++++----------
+ 1 file changed, 90 insertions(+), 33 deletions(-)
+
+diff --git a/drivers/char/tpm/tpm_crb.c b/drivers/char/tpm/tpm_crb.c
+index 763fc7e6c0058..20f27100708bd 100644
+--- a/drivers/char/tpm/tpm_crb.c
++++ b/drivers/char/tpm/tpm_crb.c
+@@ -26,6 +26,7 @@
+ #include "tpm.h"
+
+ #define ACPI_SIG_TPM2 "TPM2"
++#define TPM_CRB_MAX_RESOURCES 3
+
+ static const guid_t crb_acpi_start_guid =
+ GUID_INIT(0x6BBF6CAB, 0x5463, 0x4714,
+@@ -95,7 +96,6 @@ enum crb_status {
+ struct crb_priv {
+ u32 sm;
+ const char *hid;
+- void __iomem *iobase;
+ struct crb_regs_head __iomem *regs_h;
+ struct crb_regs_tail __iomem *regs_t;
+ u8 __iomem *cmd;
+@@ -438,21 +438,27 @@ static const struct tpm_class_ops tpm_crb = {
+
+ static int crb_check_resource(struct acpi_resource *ares, void *data)
+ {
+- struct resource *io_res = data;
++ struct resource *iores_array = data;
+ struct resource_win win;
+ struct resource *res = &(win.res);
++ int i;
+
+ if (acpi_dev_resource_memory(ares, res) ||
+ acpi_dev_resource_address_space(ares, &win)) {
+- *io_res = *res;
+- io_res->name = NULL;
++ for (i = 0; i < TPM_CRB_MAX_RESOURCES + 1; ++i) {
++ if (resource_type(iores_array + i) != IORESOURCE_MEM) {
++ iores_array[i] = *res;
++ iores_array[i].name = NULL;
++ break;
++ }
++ }
+ }
+
+ return 1;
+ }
+
+-static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+- struct resource *io_res, u64 start, u32 size)
++static void __iomem *crb_map_res(struct device *dev, struct resource *iores,
++ void __iomem **iobase_ptr, u64 start, u32 size)
+ {
+ struct resource new_res = {
+ .start = start,
+@@ -464,10 +470,16 @@ static void __iomem *crb_map_res(struct device *dev, struct crb_priv *priv,
+ if (start != new_res.start)
+ return (void __iomem *) ERR_PTR(-EINVAL);
+
+- if (!resource_contains(io_res, &new_res))
++ if (!iores)
+ return devm_ioremap_resource(dev, &new_res);
+
+- return priv->iobase + (new_res.start - io_res->start);
++ if (!*iobase_ptr) {
++ *iobase_ptr = devm_ioremap_resource(dev, iores);
++ if (IS_ERR(*iobase_ptr))
++ return *iobase_ptr;
++ }
++
++ return *iobase_ptr + (new_res.start - iores->start);
+ }
+
+ /*
+@@ -494,9 +506,13 @@ static u64 crb_fixup_cmd_size(struct device *dev, struct resource *io_res,
+ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ struct acpi_table_tpm2 *buf)
+ {
+- struct list_head resources;
+- struct resource io_res;
++ struct list_head acpi_resource_list;
++ struct resource iores_array[TPM_CRB_MAX_RESOURCES + 1] = { {0} };
++ void __iomem *iobase_array[TPM_CRB_MAX_RESOURCES] = {NULL};
+ struct device *dev = &device->dev;
++ struct resource *iores;
++ void __iomem **iobase_ptr;
++ int i;
+ u32 pa_high, pa_low;
+ u64 cmd_pa;
+ u32 cmd_size;
+@@ -505,21 +521,41 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ u32 rsp_size;
+ int ret;
+
+- INIT_LIST_HEAD(&resources);
+- ret = acpi_dev_get_resources(device, &resources, crb_check_resource,
+- &io_res);
++ INIT_LIST_HEAD(&acpi_resource_list);
++ ret = acpi_dev_get_resources(device, &acpi_resource_list,
++ crb_check_resource, iores_array);
+ if (ret < 0)
+ return ret;
+- acpi_dev_free_resource_list(&resources);
++ acpi_dev_free_resource_list(&acpi_resource_list);
+
+- if (resource_type(&io_res) != IORESOURCE_MEM) {
++ if (resource_type(iores_array) != IORESOURCE_MEM) {
+ dev_err(dev, FW_BUG "TPM2 ACPI table does not define a memory resource\n");
+ return -EINVAL;
++ } else if (resource_type(iores_array + TPM_CRB_MAX_RESOURCES) ==
++ IORESOURCE_MEM) {
++ dev_warn(dev, "TPM2 ACPI table defines too many memory resources\n");
++ memset(iores_array + TPM_CRB_MAX_RESOURCES,
++ 0, sizeof(*iores_array));
++ iores_array[TPM_CRB_MAX_RESOURCES].flags = 0;
+ }
+
+- priv->iobase = devm_ioremap_resource(dev, &io_res);
+- if (IS_ERR(priv->iobase))
+- return PTR_ERR(priv->iobase);
++ iores = NULL;
++ iobase_ptr = NULL;
++ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
++ if (buf->control_address >= iores_array[i].start &&
++ buf->control_address + sizeof(struct crb_regs_tail) - 1 <=
++ iores_array[i].end) {
++ iores = iores_array + i;
++ iobase_ptr = iobase_array + i;
++ break;
++ }
++ }
++
++ priv->regs_t = crb_map_res(dev, iores, iobase_ptr, buf->control_address,
++ sizeof(struct crb_regs_tail));
++
++ if (IS_ERR(priv->regs_t))
++ return PTR_ERR(priv->regs_t);
+
+ /* The ACPI IO region starts at the head area and continues to include
+ * the control area, as one nice sane region except for some older
+@@ -527,9 +563,10 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ */
+ if ((priv->sm == ACPI_TPM2_COMMAND_BUFFER) ||
+ (priv->sm == ACPI_TPM2_MEMORY_MAPPED)) {
+- if (buf->control_address == io_res.start +
++ if (iores &&
++ buf->control_address == iores->start +
+ sizeof(*priv->regs_h))
+- priv->regs_h = priv->iobase;
++ priv->regs_h = *iobase_ptr;
+ else
+ dev_warn(dev, FW_BUG "Bad ACPI memory layout");
+ }
+@@ -538,13 +575,6 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ if (ret)
+ return ret;
+
+- priv->regs_t = crb_map_res(dev, priv, &io_res, buf->control_address,
+- sizeof(struct crb_regs_tail));
+- if (IS_ERR(priv->regs_t)) {
+- ret = PTR_ERR(priv->regs_t);
+- goto out_relinquish_locality;
+- }
+-
+ /*
+ * PTT HW bug w/a: wake up the device to access
+ * possibly not retained registers.
+@@ -556,13 +586,26 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+ pa_high = ioread32(&priv->regs_t->ctrl_cmd_pa_high);
+ pa_low = ioread32(&priv->regs_t->ctrl_cmd_pa_low);
+ cmd_pa = ((u64)pa_high << 32) | pa_low;
+- cmd_size = crb_fixup_cmd_size(dev, &io_res, cmd_pa,
+- ioread32(&priv->regs_t->ctrl_cmd_size));
++ cmd_size = ioread32(&priv->regs_t->ctrl_cmd_size);
++
++ iores = NULL;
++ iobase_ptr = NULL;
++ for (i = 0; iores_array[i].end; ++i) {
++ if (cmd_pa >= iores_array[i].start &&
++ cmd_pa <= iores_array[i].end) {
++ iores = iores_array + i;
++ iobase_ptr = iobase_array + i;
++ break;
++ }
++ }
++
++ if (iores)
++ cmd_size = crb_fixup_cmd_size(dev, iores, cmd_pa, cmd_size);
+
+ dev_dbg(dev, "cmd_hi = %X cmd_low = %X cmd_size %X\n",
+ pa_high, pa_low, cmd_size);
+
+- priv->cmd = crb_map_res(dev, priv, &io_res, cmd_pa, cmd_size);
++ priv->cmd = crb_map_res(dev, iores, iobase_ptr, cmd_pa, cmd_size);
+ if (IS_ERR(priv->cmd)) {
+ ret = PTR_ERR(priv->cmd);
+ goto out;
+@@ -570,11 +613,25 @@ static int crb_map_io(struct acpi_device *device, struct crb_priv *priv,
+
+ memcpy_fromio(&__rsp_pa, &priv->regs_t->ctrl_rsp_pa, 8);
+ rsp_pa = le64_to_cpu(__rsp_pa);
+- rsp_size = crb_fixup_cmd_size(dev, &io_res, rsp_pa,
+- ioread32(&priv->regs_t->ctrl_rsp_size));
++ rsp_size = ioread32(&priv->regs_t->ctrl_rsp_size);
++
++ iores = NULL;
++ iobase_ptr = NULL;
++ for (i = 0; resource_type(iores_array + i) == IORESOURCE_MEM; ++i) {
++ if (rsp_pa >= iores_array[i].start &&
++ rsp_pa <= iores_array[i].end) {
++ iores = iores_array + i;
++ iobase_ptr = iobase_array + i;
++ break;
++ }
++ }
++
++ if (iores)
++ rsp_size = crb_fixup_cmd_size(dev, iores, rsp_pa, rsp_size);
+
+ if (cmd_pa != rsp_pa) {
+- priv->rsp = crb_map_res(dev, priv, &io_res, rsp_pa, rsp_size);
++ priv->rsp = crb_map_res(dev, iores, iobase_ptr,
++ rsp_pa, rsp_size);
+ ret = PTR_ERR_OR_ZERO(priv->rsp);
+ goto out;
+ }
+--
+2.25.1
+
--- /dev/null
+From 18ef724b53d9667516e80c2fbfe3c5310dab85fa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 14 Aug 2019 10:55:25 -0700
+Subject: tracing: Adding NULL checks for trace_array descriptor pointer
+
+From: Divya Indi <divya.indi@oracle.com>
+
+[ Upstream commit 953ae45a0c25e09428d4a03d7654f97ab8a36647 ]
+
+As part of commit f45d1225adb0 ("tracing: Kernel access to Ftrace
+instances") we exported certain functions. Here, we are adding some additional
+NULL checks to ensure safe usage by users of these APIs.
+
+Link: http://lkml.kernel.org/r/1565805327-579-4-git-send-email-divya.indi@oracle.com
+
+Signed-off-by: Divya Indi <divya.indi@oracle.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 3 +++
+ kernel/trace/trace_events.c | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 4966410bb0f4d..17505a22d800b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -3037,6 +3037,9 @@ int trace_array_printk(struct trace_array *tr,
+ if (!(global_trace.trace_flags & TRACE_ITER_PRINTK))
+ return 0;
+
++ if (!tr)
++ return -ENOENT;
++
+ va_start(ap, fmt);
+ ret = trace_array_vprintk(tr, ip, fmt, ap);
+ va_end(ap);
+diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
+index 27726121d332c..0fc06a7da87fb 100644
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -800,6 +800,8 @@ static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
+ char *event = NULL, *sub = NULL, *match;
+ int ret;
+
++ if (!tr)
++ return -ENOENT;
+ /*
+ * The buf format can be <subsystem>:<event-name>
+ * *:<event-name> means any event by that name.
+--
+2.25.1
+
--- /dev/null
+From d85457dd9f13ece980723cad026cd03513e18946 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Sep 2014 16:14:12 -0400
+Subject: tracing: Set kernel_stack's caller size properly
+
+From: Josef Bacik <jbacik@fb.com>
+
+[ Upstream commit cbc3b92ce037f5e7536f6db157d185cd8b8f615c ]
+
+I noticed when trying to use the trace-cmd python interface that reading the raw
+buffer wasn't working for kernel_stack events. This is because it uses a
+stubbed version of __dynamic_array that doesn't do the __data_loc trick and
+encode the length of the array into the field. Instead it just shows up as a
+size of 0. So change this to __array and set the len to FTRACE_STACK_ENTRIES
+since this is what we actually do in practice and matches how user_stack_trace
+works.
+
+Link: http://lkml.kernel.org/r/1411589652-1318-1-git-send-email-jbacik@fb.com
+
+Signed-off-by: Josef Bacik <jbacik@fb.com>
+[ Pulled from the archeological digging of my INBOX ]
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace_entries.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/trace/trace_entries.h b/kernel/trace/trace_entries.h
+index 06bb2fd9a56c5..a97aad105d367 100644
+--- a/kernel/trace/trace_entries.h
++++ b/kernel/trace/trace_entries.h
+@@ -179,7 +179,7 @@ FTRACE_ENTRY(kernel_stack, stack_entry,
+
+ F_STRUCT(
+ __field( int, size )
+- __dynamic_array(unsigned long, caller )
++ __array( unsigned long, caller, FTRACE_STACK_ENTRIES )
+ ),
+
+ F_printk("\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n\t=> (" IP_FMT ")\n"
+--
+2.25.1
+
--- /dev/null
+From 5df6d63e212bde70e2f726fc3dfbe35723ee427b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Feb 2020 22:10:12 -0700
+Subject: tracing: Use address-of operator on section symbols
+
+From: Nathan Chancellor <natechancellor@gmail.com>
+
+[ Upstream commit bf2cbe044da275021b2de5917240411a19e5c50d ]
+
+Clang warns:
+
+../kernel/trace/trace.c:9335:33: warning: array comparison always
+evaluates to true [-Wtautological-compare]
+ if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
+ ^
+1 warning generated.
+
+These are not true arrays, they are linker defined symbols, which are
+just addresses. Using the address of operator silences the warning and
+does not change the runtime result of the check (tested with some print
+statements compiled in with clang + ld.lld and gcc + ld.bfd in QEMU).
+
+Link: http://lkml.kernel.org/r/20200220051011.26113-1-natechancellor@gmail.com
+
+Link: https://github.com/ClangBuiltLinux/linux/issues/893
+Suggested-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Nathan Chancellor <natechancellor@gmail.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 17505a22d800b..6bf617ff03694 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -8529,7 +8529,7 @@ __init static int tracer_alloc_buffers(void)
+ goto out_free_buffer_mask;
+
+ /* Only allocate trace_printk buffers if a trace_printk exists */
+- if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
++ if (&__stop___trace_bprintk_fmt != &__start___trace_bprintk_fmt)
+ /* Must be called before global_trace.buffer is allocated */
+ trace_printk_init_buffers();
+
+--
+2.25.1
+
--- /dev/null
+From 22dfd14b2ef5338233ddf6dbaf367a0dc3025c17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2020 18:34:33 -0700
+Subject: tty: serial: samsung: Correct clock selection logic
+
+From: Jonathan Bakker <xc-racer2@live.ca>
+
+[ Upstream commit 7d31676a8d91dd18e08853efd1cb26961a38c6a6 ]
+
+Some variants of the samsung tty driver can pick which clock
+to use for their baud rate generation. In the DT conversion,
+a default clock was selected to be used if a specific one wasn't
+assigned and then a comparison of which clock rate worked better
+was done. Unfortunately, the comparison was implemented in such
+a way that only the default clock was ever actually compared.
+Fix this by iterating through all possible clocks, except when a
+specific clock has already been picked via clk_sel (which is
+only possible via board files).
+
+Signed-off-by: Jonathan Bakker <xc-racer2@live.ca>
+Reviewed-by: Krzysztof Kozlowski <krzk@kernel.org>
+Link: https://lore.kernel.org/r/BN6PR04MB06604E63833EA41837EBF77BA3A30@BN6PR04MB0660.namprd04.prod.outlook.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/samsung.c | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
+index fcb89bf2524d1..1528a7ba2bf4d 100644
+--- a/drivers/tty/serial/samsung.c
++++ b/drivers/tty/serial/samsung.c
+@@ -1187,14 +1187,14 @@ static unsigned int s3c24xx_serial_getclk(struct s3c24xx_uart_port *ourport,
+ struct s3c24xx_uart_info *info = ourport->info;
+ struct clk *clk;
+ unsigned long rate;
+- unsigned int cnt, baud, quot, clk_sel, best_quot = 0;
++ unsigned int cnt, baud, quot, best_quot = 0;
+ char clkname[MAX_CLK_NAME_LENGTH];
+ int calc_deviation, deviation = (1 << 30) - 1;
+
+- clk_sel = (ourport->cfg->clk_sel) ? ourport->cfg->clk_sel :
+- ourport->info->def_clk_sel;
+ for (cnt = 0; cnt < info->num_clks; cnt++) {
+- if (!(clk_sel & (1 << cnt)))
++ /* Keep selected clock if provided */
++ if (ourport->cfg->clk_sel &&
++ !(ourport->cfg->clk_sel & (1 << cnt)))
+ continue;
+
+ sprintf(clkname, "clk_uart_baud%d", cnt);
+--
+2.25.1
+
--- /dev/null
+From f9be09791e8ca9deb0f449432e0984fb2135b07e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Feb 2020 21:26:34 +0800
+Subject: ubi: fastmap: Free unused fastmap anchor peb during detach
+
+From: Hou Tao <houtao1@huawei.com>
+
+[ Upstream commit c16f39d14a7e0ec59881fbdb22ae494907534384 ]
+
+When CONFIG_MTD_UBI_FASTMAP is enabled, fm_anchor will be assigned
+a free PEB during ubi_wl_init() or ubi_update_fastmap(). However
+if fastmap is not used or disabled on the MTD device, ubi_wl_entry
+related with the PEB will not be freed during detach.
+
+So Fix it by freeing the unused fastmap anchor during detach.
+
+Fixes: f9c34bb52997 ("ubi: Fix producing anchor PEBs")
+Reported-by: syzbot+f317896aae32eb281a58@syzkaller.appspotmail.com
+Reviewed-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/ubi/fastmap-wl.c | 15 +++++++++++++--
+ 1 file changed, 13 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 13efebb400225..e08f6b4637dda 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -48,6 +48,13 @@ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
+ return victim;
+ }
+
++static inline void return_unused_peb(struct ubi_device *ubi,
++ struct ubi_wl_entry *e)
++{
++ wl_tree_add(e, &ubi->free);
++ ubi->free_count++;
++}
++
+ /**
+ * return_unused_pool_pebs - returns unused PEB to the free tree.
+ * @ubi: UBI device description object
+@@ -61,8 +68,7 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
+
+ for (i = pool->used; i < pool->size; i++) {
+ e = ubi->lookuptbl[pool->pebs[i]];
+- wl_tree_add(e, &ubi->free);
+- ubi->free_count++;
++ return_unused_peb(ubi, e);
+ }
+ }
+
+@@ -370,6 +376,11 @@ static void ubi_fastmap_close(struct ubi_device *ubi)
+ return_unused_pool_pebs(ubi, &ubi->fm_pool);
+ return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
+
++ if (ubi->fm_anchor) {
++ return_unused_peb(ubi, ubi->fm_anchor);
++ ubi->fm_anchor = NULL;
++ }
++
+ if (ubi->fm) {
+ for (i = 0; i < ubi->fm->used_blocks; i++)
+ kfree(ubi->fm->e[i]);
+--
+2.25.1
+
--- /dev/null
+From dcd3ba2daf64ec2c87f999d7f376e9f026ddd411 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Nov 2019 09:12:51 +0100
+Subject: ubi: Fix producing anchor PEBs
+
+From: Sascha Hauer <s.hauer@pengutronix.de>
+
+[ Upstream commit f9c34bb529975fe9f85b870a80c53a83a3c5a182 ]
+
+When a new fastmap is about to be written UBI must make sure it has a
+free block for a fastmap anchor available. For this ubi_update_fastmap()
+calls ubi_ensure_anchor_pebs(). This stopped working with 2e8f08deabbc
+("ubi: Fix races around ubi_refill_pools()"), with this commit the wear
+leveling code is blocked and can no longer produce free PEBs. UBI then
+more often than not falls back to write the new fastmap anchor to the
+same block it was already on which means the same erase block gets
+erased during each fastmap write and wears out quite fast.
+
+As the locking prevents us from producing the anchor PEB when we
+actually need it, this patch changes the strategy for creating the
+anchor PEB. We no longer create it on demand right before we want to
+write a fastmap, but instead we create an anchor PEB right after we have
+written a fastmap. This gives us enough time to produce a new anchor PEB
+before it is needed. To make sure we have an anchor PEB for the very
+first fastmap write we call ubi_ensure_anchor_pebs() during
+initialisation as well.
+
+Fixes: 2e8f08deabbc ("ubi: Fix races around ubi_refill_pools()")
+Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mtd/ubi/fastmap-wl.c | 31 ++++++++++++++++++-------------
+ drivers/mtd/ubi/fastmap.c | 14 +++++---------
+ drivers/mtd/ubi/ubi.h | 6 ++++--
+ drivers/mtd/ubi/wl.c | 32 ++++++++++++++------------------
+ drivers/mtd/ubi/wl.h | 1 -
+ 5 files changed, 41 insertions(+), 43 deletions(-)
+
+diff --git a/drivers/mtd/ubi/fastmap-wl.c b/drivers/mtd/ubi/fastmap-wl.c
+index 98f7d6be8d1fc..13efebb400225 100644
+--- a/drivers/mtd/ubi/fastmap-wl.c
++++ b/drivers/mtd/ubi/fastmap-wl.c
+@@ -66,18 +66,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
+ }
+ }
+
+-static int anchor_pebs_available(struct rb_root *root)
+-{
+- struct rb_node *p;
+- struct ubi_wl_entry *e;
+-
+- ubi_rb_for_each_entry(p, e, root, u.rb)
+- if (e->pnum < UBI_FM_MAX_START)
+- return 1;
+-
+- return 0;
+-}
+-
+ /**
+ * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
+ * @ubi: UBI device description object
+@@ -286,8 +274,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
+ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+ {
+ struct ubi_work *wrk;
++ struct ubi_wl_entry *anchor;
+
+ spin_lock(&ubi->wl_lock);
++
++ /* Do we already have an anchor? */
++ if (ubi->fm_anchor) {
++ spin_unlock(&ubi->wl_lock);
++ return 0;
++ }
++
++ /* See if we can find an anchor PEB on the list of free PEBs */
++ anchor = ubi_wl_get_fm_peb(ubi, 1);
++ if (anchor) {
++ ubi->fm_anchor = anchor;
++ spin_unlock(&ubi->wl_lock);
++ return 0;
++ }
++
++ /* No luck, trigger wear leveling to produce a new anchor PEB */
++ ubi->fm_do_produce_anchor = 1;
+ if (ubi->wl_scheduled) {
+ spin_unlock(&ubi->wl_lock);
+ return 0;
+@@ -303,7 +309,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
+ return -ENOMEM;
+ }
+
+- wrk->anchor = 1;
+ wrk->func = &wear_leveling_worker;
+ __schedule_ubi_work(ubi, wrk);
+ return 0;
+diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
+index 8e292992f84c7..b88ef875236cc 100644
+--- a/drivers/mtd/ubi/fastmap.c
++++ b/drivers/mtd/ubi/fastmap.c
+@@ -1552,14 +1552,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
+ return 0;
+ }
+
+- ret = ubi_ensure_anchor_pebs(ubi);
+- if (ret) {
+- up_write(&ubi->fm_eba_sem);
+- up_write(&ubi->work_sem);
+- up_write(&ubi->fm_protect);
+- return ret;
+- }
+-
+ new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
+ if (!new_fm) {
+ up_write(&ubi->fm_eba_sem);
+@@ -1630,7 +1622,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
+ }
+
+ spin_lock(&ubi->wl_lock);
+- tmp_e = ubi_wl_get_fm_peb(ubi, 1);
++ tmp_e = ubi->fm_anchor;
++ ubi->fm_anchor = NULL;
+ spin_unlock(&ubi->wl_lock);
+
+ if (old_fm) {
+@@ -1682,6 +1675,9 @@ out_unlock:
+ up_write(&ubi->work_sem);
+ up_write(&ubi->fm_protect);
+ kfree(old_fm);
++
++ ubi_ensure_anchor_pebs(ubi);
++
+ return ret;
+
+ err:
+diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
+index d47b9e436e673..d248ec371cc17 100644
+--- a/drivers/mtd/ubi/ubi.h
++++ b/drivers/mtd/ubi/ubi.h
+@@ -504,6 +504,8 @@ struct ubi_debug_info {
+ * @fm_work: fastmap work queue
+ * @fm_work_scheduled: non-zero if fastmap work was scheduled
+ * @fast_attach: non-zero if UBI was attached by fastmap
++ * @fm_anchor: The next anchor PEB to use for fastmap
++ * @fm_do_produce_anchor: If true produce an anchor PEB in wl
+ *
+ * @used: RB-tree of used physical eraseblocks
+ * @erroneous: RB-tree of erroneous used physical eraseblocks
+@@ -612,6 +614,8 @@ struct ubi_device {
+ struct work_struct fm_work;
+ int fm_work_scheduled;
+ int fast_attach;
++ struct ubi_wl_entry *fm_anchor;
++ int fm_do_produce_anchor;
+
+ /* Wear-leveling sub-system's stuff */
+ struct rb_root used;
+@@ -802,7 +806,6 @@ struct ubi_attach_info {
+ * @vol_id: the volume ID on which this erasure is being performed
+ * @lnum: the logical eraseblock number
+ * @torture: if the physical eraseblock has to be tortured
+- * @anchor: produce a anchor PEB to by used by fastmap
+ *
+ * The @func pointer points to the worker function. If the @shutdown argument is
+ * not zero, the worker has to free the resources and exit immediately as the
+@@ -818,7 +821,6 @@ struct ubi_work {
+ int vol_id;
+ int lnum;
+ int torture;
+- int anchor;
+ };
+
+ #include "debug.h"
+diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
+index 6f2ac865ff05e..80d64d7e7a8be 100644
+--- a/drivers/mtd/ubi/wl.c
++++ b/drivers/mtd/ubi/wl.c
+@@ -331,13 +331,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
+ }
+ }
+
+- /* If no fastmap has been written and this WL entry can be used
+- * as anchor PEB, hold it back and return the second best WL entry
+- * such that fastmap can use the anchor PEB later. */
+- if (prev_e && !ubi->fm_disabled &&
+- !ubi->fm && e->pnum < UBI_FM_MAX_START)
+- return prev_e;
+-
+ return e;
+ }
+
+@@ -648,9 +641,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ {
+ int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
+ int erase = 0, keep = 0, vol_id = -1, lnum = -1;
+-#ifdef CONFIG_MTD_UBI_FASTMAP
+- int anchor = wrk->anchor;
+-#endif
+ struct ubi_wl_entry *e1, *e2;
+ struct ubi_vid_io_buf *vidb;
+ struct ubi_vid_hdr *vid_hdr;
+@@ -690,11 +680,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ }
+
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+- /* Check whether we need to produce an anchor PEB */
+- if (!anchor)
+- anchor = !anchor_pebs_available(&ubi->free);
+-
+- if (anchor) {
++ if (ubi->fm_do_produce_anchor) {
+ e1 = find_anchor_wl_entry(&ubi->used);
+ if (!e1)
+ goto out_cancel;
+@@ -705,6 +691,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
+ self_check_in_wl_tree(ubi, e1, &ubi->used);
+ rb_erase(&e1->u.rb, &ubi->used);
+ dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
++ ubi->fm_do_produce_anchor = 0;
+ } else if (!ubi->scrub.rb_node) {
+ #else
+ if (!ubi->scrub.rb_node) {
+@@ -1037,7 +1024,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
+ goto out_cancel;
+ }
+
+- wrk->anchor = 0;
+ wrk->func = &wear_leveling_worker;
+ if (nested)
+ __schedule_ubi_work(ubi, wrk);
+@@ -1079,8 +1065,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
+ err = sync_erase(ubi, e, wl_wrk->torture);
+ if (!err) {
+ spin_lock(&ubi->wl_lock);
+- wl_tree_add(e, &ubi->free);
+- ubi->free_count++;
++
++ if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
++ ubi->fm_anchor = e;
++ ubi->fm_do_produce_anchor = 0;
++ } else {
++ wl_tree_add(e, &ubi->free);
++ ubi->free_count++;
++ }
++
+ spin_unlock(&ubi->wl_lock);
+
+ /*
+@@ -1724,6 +1717,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
+ if (err)
+ goto out_free;
+
++#ifdef CONFIG_MTD_UBI_FASTMAP
++ ubi_ensure_anchor_pebs(ubi);
++#endif
+ return 0;
+
+ out_free:
+diff --git a/drivers/mtd/ubi/wl.h b/drivers/mtd/ubi/wl.h
+index a9e2d669acd81..c93a532937863 100644
+--- a/drivers/mtd/ubi/wl.h
++++ b/drivers/mtd/ubi/wl.h
+@@ -2,7 +2,6 @@
+ #ifndef UBI_WL_H
+ #define UBI_WL_H
+ #ifdef CONFIG_MTD_UBI_FASTMAP
+-static int anchor_pebs_available(struct rb_root *root);
+ static void update_fastmap_work_fn(struct work_struct *wrk);
+ static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
+ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
+--
+2.25.1
+
--- /dev/null
+From 7d280caee85c8a74f6ad766f81a75df9c7ab3631 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Jan 2020 23:36:07 +0800
+Subject: ubifs: Fix out-of-bounds memory access caused by abnormal value of
+ node_len
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Liu Song <liu.song11@zte.com.cn>
+
+[ Upstream commit acc5af3efa303d5f36cc8c0f61716161f6ca1384 ]
+
+In “ubifs_check_node”, when the value of "node_len" is abnormal,
+the code will goto label of "out_len" for execution. Then, in the
+following "ubifs_dump_node", if inode type is "UBIFS_DATA_NODE",
+in "print_hex_dump", an out-of-bounds access may occur due to the
+wrong "ch->len".
+
+Therefore, when the value of "node_len" is abnormal, data length
+should to be adjusted to a reasonable safe range. At this time,
+structured data is not credible, so dump the corrupted data directly
+for analysis.
+
+Signed-off-by: Liu Song <liu.song11@zte.com.cn>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ubifs/io.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
+index 099bec94b8207..fab29f899f913 100644
+--- a/fs/ubifs/io.c
++++ b/fs/ubifs/io.c
+@@ -237,7 +237,7 @@ int ubifs_is_mapped(const struct ubifs_info *c, int lnum)
+ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
+ int offs, int quiet, int must_chk_crc)
+ {
+- int err = -EINVAL, type, node_len;
++ int err = -EINVAL, type, node_len, dump_node = 1;
+ uint32_t crc, node_crc, magic;
+ const struct ubifs_ch *ch = buf;
+
+@@ -290,10 +290,22 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
+ out_len:
+ if (!quiet)
+ ubifs_err(c, "bad node length %d", node_len);
++ if (type == UBIFS_DATA_NODE && node_len > UBIFS_DATA_NODE_SZ)
++ dump_node = 0;
+ out:
+ if (!quiet) {
+ ubifs_err(c, "bad node at LEB %d:%d", lnum, offs);
+- ubifs_dump_node(c, buf);
++ if (dump_node) {
++ ubifs_dump_node(c, buf);
++ } else {
++ int safe_len = min3(node_len, c->leb_size - offs,
++ (int)UBIFS_MAX_DATA_NODE_SZ);
++ pr_err("\tprevent out-of-bounds memory access\n");
++ pr_err("\ttruncated data node length %d\n", safe_len);
++ pr_err("\tcorrupted data node:\n");
++ print_hex_dump(KERN_ERR, "\t", DUMP_PREFIX_OFFSET, 32, 1,
++ buf, safe_len, 0);
++ }
+ dump_stack();
+ }
+ return err;
+--
+2.25.1
+
--- /dev/null
+From c403b7e69561015e73f748b0e0ec870f6fdc6eff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 21 May 2020 16:46:43 +0800
+Subject: usb: dwc3: Increase timeout for CmdAct cleared by device controller
+
+From: Yu Chen <chenyu56@huawei.com>
+
+[ Upstream commit 1c0e69ae1b9f9004fd72978612ae3463791edc56 ]
+
+If the SS PHY is in P3, there is no pipe_clk, HW may use suspend_clk
+for function, as suspend_clk is slow so EP command need more time to
+complete, e.g, imx8M suspend_clk is 32K, set ep configuration will
+take about 380us per below trace time stamp(44.286278 - 44.285897
+= 0.000381):
+
+configfs_acm.sh-822 [000] d..1 44.285896: dwc3_writel: addr
+000000006d59aae1 value 00000401
+configfs_acm.sh-822 [000] d..1 44.285897: dwc3_readl: addr
+000000006d59aae1 value 00000401
+... ...
+configfs_acm.sh-822 [000] d..1 44.286278: dwc3_readl: addr
+000000006d59aae1 value 00000001
+configfs_acm.sh-822 [000] d..1 44.286279: dwc3_gadget_ep_cmd:
+ep0out: cmd 'Set Endpoint Configuration' [401] params 00001000
+00000500 00000000 --> status: Successful
+
+This was originally found on Hisilicon Kirin Soc that need more time
+for the device controller to clear the CmdAct of DEPCMD.
+
+Signed-off-by: Yu Chen <chenyu56@huawei.com>
+Signed-off-by: John Stultz <john.stultz@linaro.org>
+Signed-off-by: Li Jun <jun.li@nxp.com>
+Signed-off-by: Felipe Balbi <balbi@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/dwc3/gadget.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
+index 7bf2573dd459e..37cc3fd7c3cad 100644
+--- a/drivers/usb/dwc3/gadget.c
++++ b/drivers/usb/dwc3/gadget.c
+@@ -270,7 +270,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd,
+ {
+ const struct usb_endpoint_descriptor *desc = dep->endpoint.desc;
+ struct dwc3 *dwc = dep->dwc;
+- u32 timeout = 1000;
++ u32 timeout = 5000;
+ u32 saved_config = 0;
+ u32 reg;
+
+--
+2.25.1
+
--- /dev/null
+From 734e943b839e3586249f62422ddbdec74f161c4e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2020 19:43:05 +0800
+Subject: USB: EHCI: ehci-mv: fix error handling in mv_ehci_probe()
+
+From: Tang Bin <tangbin@cmss.chinamobile.com>
+
+[ Upstream commit c856b4b0fdb5044bca4c0acf9a66f3b5cc01a37a ]
+
+If the function platform_get_irq() failed, the negative value
+returned will not be detected here. So fix error handling in
+mv_ehci_probe(). And when get irq failed, the function
+platform_get_irq() logs an error message, so remove redundant
+message here.
+
+Signed-off-by: Zhang Shengju <zhangshengju@cmss.chinamobile.com>
+Signed-off-by: Tang Bin <tangbin@cmss.chinamobile.com>
+Link: https://lore.kernel.org/r/20200508114305.15740-1-tangbin@cmss.chinamobile.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/ehci-mv.c | 5 ++---
+ 1 file changed, 2 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
+index de764459e05a6..4edcd7536a01b 100644
+--- a/drivers/usb/host/ehci-mv.c
++++ b/drivers/usb/host/ehci-mv.c
+@@ -193,9 +193,8 @@ static int mv_ehci_probe(struct platform_device *pdev)
+ hcd->regs = ehci_mv->op_regs;
+
+ hcd->irq = platform_get_irq(pdev, 0);
+- if (!hcd->irq) {
+- dev_err(&pdev->dev, "Cannot get irq.");
+- retval = -ENODEV;
++ if (hcd->irq < 0) {
++ retval = hcd->irq;
+ goto err_disable_clk;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From e59db2674ff07e4e8b74a460aad1923d6dd095b1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 May 2020 17:54:53 +0100
+Subject: USB: EHCI: ehci-mv: fix less than zero comparison of an unsigned int
+
+From: Colin Ian King <colin.king@canonical.com>
+
+[ Upstream commit a7f40c233a6b0540d28743267560df9cfb571ca9 ]
+
+The comparison of hcd->irq to less than zero for an error check will
+never be true because hcd->irq is an unsigned int. Fix this by
+assigning the int retval to the return of platform_get_irq and checking
+this for the -ve error condition and assigning hcd->irq to retval.
+
+Addresses-Coverity: ("Unsigned compared against 0")
+Fixes: c856b4b0fdb5 ("USB: EHCI: ehci-mv: fix error handling in mv_ehci_probe()")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Link: https://lore.kernel.org/r/20200515165453.104028-1-colin.king@canonical.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/host/ehci-mv.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/usb/host/ehci-mv.c b/drivers/usb/host/ehci-mv.c
+index 4edcd7536a01b..9d93e7441bbca 100644
+--- a/drivers/usb/host/ehci-mv.c
++++ b/drivers/usb/host/ehci-mv.c
+@@ -192,11 +192,10 @@ static int mv_ehci_probe(struct platform_device *pdev)
+ hcd->rsrc_len = resource_size(r);
+ hcd->regs = ehci_mv->op_regs;
+
+- hcd->irq = platform_get_irq(pdev, 0);
+- if (hcd->irq < 0) {
+- retval = hcd->irq;
++ retval = platform_get_irq(pdev, 0);
++ if (retval < 0)
+ goto err_disable_clk;
+- }
++ hcd->irq = retval;
+
+ ehci = hcd_to_ehci(hcd);
+ ehci->caps = (struct ehci_caps *) ehci_mv->cap_regs;
+--
+2.25.1
+
--- /dev/null
+From 6f3828b939cb1039bd265146e240ee6af51d74b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 16 Jun 2020 15:26:36 -0600
+Subject: vfio/pci: Clear error and request eventfd ctx after releasing
+
+From: Alex Williamson <alex.williamson@redhat.com>
+
+[ Upstream commit 5c5866c593bbd444d0339ede6a8fb5f14ff66d72 ]
+
+The next use of the device will generate an underflow from the
+stale reference.
+
+Cc: Qian Cai <cai@lca.pw>
+Fixes: 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx")
+Reported-by: Daniel Wagner <dwagner@suse.de>
+Reviewed-by: Cornelia Huck <cohuck@redhat.com>
+Tested-by: Daniel Wagner <dwagner@suse.de>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 86cd8bdfa9f28..94fad366312f1 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -409,10 +409,14 @@ static void vfio_pci_release(void *device_data)
+ if (!(--vdev->refcnt)) {
+ vfio_spapr_pci_eeh_release(vdev->pdev);
+ vfio_pci_disable(vdev);
+- if (vdev->err_trigger)
++ if (vdev->err_trigger) {
+ eventfd_ctx_put(vdev->err_trigger);
+- if (vdev->req_trigger)
++ vdev->err_trigger = NULL;
++ }
++ if (vdev->req_trigger) {
+ eventfd_ctx_put(vdev->req_trigger);
++ vdev->req_trigger = NULL;
++ }
+ }
+
+ mutex_unlock(&driver_lock);
+--
+2.25.1
+
--- /dev/null
+From c0a9076859244a30d3b669150ac1eee494dfb06b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 May 2020 00:34:50 -0400
+Subject: vfio/pci: fix memory leaks of eventfd ctx
+
+From: Qian Cai <cai@lca.pw>
+
+[ Upstream commit 1518ac272e789cae8c555d69951b032a275b7602 ]
+
+Finished a qemu-kvm (-device vfio-pci,host=0001:01:00.0) triggers a few
+memory leaks after a while because vfio_pci_set_ctx_trigger_single()
+calls eventfd_ctx_fdget() without the matching eventfd_ctx_put() later.
+Fix it by calling eventfd_ctx_put() for those memory in
+vfio_pci_release() before vfio_device_release().
+
+unreferenced object 0xebff008981cc2b00 (size 128):
+ comm "qemu-kvm", pid 4043, jiffies 4294994816 (age 9796.310s)
+ hex dump (first 32 bytes):
+ 01 00 00 00 6b 6b 6b 6b 00 00 00 00 ad 4e ad de ....kkkk.....N..
+ ff ff ff ff 6b 6b 6b 6b ff ff ff ff ff ff ff ff ....kkkk........
+ backtrace:
+ [<00000000917e8f8d>] slab_post_alloc_hook+0x74/0x9c
+ [<00000000df0f2aa2>] kmem_cache_alloc_trace+0x2b4/0x3d4
+ [<000000005fcec025>] do_eventfd+0x54/0x1ac
+ [<0000000082791a69>] __arm64_sys_eventfd2+0x34/0x44
+ [<00000000b819758c>] do_el0_svc+0x128/0x1dc
+ [<00000000b244e810>] el0_sync_handler+0xd0/0x268
+ [<00000000d495ef94>] el0_sync+0x164/0x180
+unreferenced object 0x29ff008981cc4180 (size 128):
+ comm "qemu-kvm", pid 4043, jiffies 4294994818 (age 9796.290s)
+ hex dump (first 32 bytes):
+ 01 00 00 00 6b 6b 6b 6b 00 00 00 00 ad 4e ad de ....kkkk.....N..
+ ff ff ff ff 6b 6b 6b 6b ff ff ff ff ff ff ff ff ....kkkk........
+ backtrace:
+ [<00000000917e8f8d>] slab_post_alloc_hook+0x74/0x9c
+ [<00000000df0f2aa2>] kmem_cache_alloc_trace+0x2b4/0x3d4
+ [<000000005fcec025>] do_eventfd+0x54/0x1ac
+ [<0000000082791a69>] __arm64_sys_eventfd2+0x34/0x44
+ [<00000000b819758c>] do_el0_svc+0x128/0x1dc
+ [<00000000b244e810>] el0_sync_handler+0xd0/0x268
+ [<00000000d495ef94>] el0_sync+0x164/0x180
+
+Signed-off-by: Qian Cai <cai@lca.pw>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 9f72a6ee13b53..86cd8bdfa9f28 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -409,6 +409,10 @@ static void vfio_pci_release(void *device_data)
+ if (!(--vdev->refcnt)) {
+ vfio_spapr_pci_eeh_release(vdev->pdev);
+ vfio_pci_disable(vdev);
++ if (vdev->err_trigger)
++ eventfd_ctx_put(vdev->err_trigger);
++ if (vdev->req_trigger)
++ eventfd_ctx_put(vdev->req_trigger);
+ }
+
+ mutex_unlock(&driver_lock);
+--
+2.25.1
+
--- /dev/null
+From 87a1357af5f766d9e767d088c0dff186d9738abe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Jul 2020 15:34:41 +0800
+Subject: vfio/pci: fix racy on error and request eventfd ctx
+
+From: Zeng Tao <prime.zeng@hisilicon.com>
+
+[ Upstream commit b872d0640840018669032b20b6375a478ed1f923 ]
+
+The vfio_pci_release call will free and clear the error and request
+eventfd ctx while these ctx could be in use at the same time in the
+function like vfio_pci_request, and it's expected to protect them under
+the vdev->igate mutex, which is missing in vfio_pci_release.
+
+This issue is introduced since commit 1518ac272e78 ("vfio/pci: fix memory
+leaks of eventfd ctx"),and since commit 5c5866c593bb ("vfio/pci: Clear
+error and request eventfd ctx after releasing"), it's very easily to
+trigger the kernel panic like this:
+
+[ 9513.904346] Unable to handle kernel NULL pointer dereference at virtual address 0000000000000008
+[ 9513.913091] Mem abort info:
+[ 9513.915871] ESR = 0x96000006
+[ 9513.918912] EC = 0x25: DABT (current EL), IL = 32 bits
+[ 9513.924198] SET = 0, FnV = 0
+[ 9513.927238] EA = 0, S1PTW = 0
+[ 9513.930364] Data abort info:
+[ 9513.933231] ISV = 0, ISS = 0x00000006
+[ 9513.937048] CM = 0, WnR = 0
+[ 9513.940003] user pgtable: 4k pages, 48-bit VAs, pgdp=0000007ec7d12000
+[ 9513.946414] [0000000000000008] pgd=0000007ec7d13003, p4d=0000007ec7d13003, pud=0000007ec728c003, pmd=0000000000000000
+[ 9513.956975] Internal error: Oops: 96000006 [#1] PREEMPT SMP
+[ 9513.962521] Modules linked in: vfio_pci vfio_virqfd vfio_iommu_type1 vfio hclge hns3 hnae3 [last unloaded: vfio_pci]
+[ 9513.972998] CPU: 4 PID: 1327 Comm: bash Tainted: G W 5.8.0-rc4+ #3
+[ 9513.980443] Hardware name: Huawei TaiShan 2280 V2/BC82AMDC, BIOS 2280-V2 CS V3.B270.01 05/08/2020
+[ 9513.989274] pstate: 80400089 (Nzcv daIf +PAN -UAO BTYPE=--)
+[ 9513.994827] pc : _raw_spin_lock_irqsave+0x48/0x88
+[ 9513.999515] lr : eventfd_signal+0x6c/0x1b0
+[ 9514.003591] sp : ffff800038a0b960
+[ 9514.006889] x29: ffff800038a0b960 x28: ffff007ef7f4da10
+[ 9514.012175] x27: ffff207eefbbfc80 x26: ffffbb7903457000
+[ 9514.017462] x25: ffffbb7912191000 x24: ffff007ef7f4d400
+[ 9514.022747] x23: ffff20be6e0e4c00 x22: 0000000000000008
+[ 9514.028033] x21: 0000000000000000 x20: 0000000000000000
+[ 9514.033321] x19: 0000000000000008 x18: 0000000000000000
+[ 9514.038606] x17: 0000000000000000 x16: ffffbb7910029328
+[ 9514.043893] x15: 0000000000000000 x14: 0000000000000001
+[ 9514.049179] x13: 0000000000000000 x12: 0000000000000002
+[ 9514.054466] x11: 0000000000000000 x10: 0000000000000a00
+[ 9514.059752] x9 : ffff800038a0b840 x8 : ffff007ef7f4de60
+[ 9514.065038] x7 : ffff007fffc96690 x6 : fffffe01faffb748
+[ 9514.070324] x5 : 0000000000000000 x4 : 0000000000000000
+[ 9514.075609] x3 : 0000000000000000 x2 : 0000000000000001
+[ 9514.080895] x1 : ffff007ef7f4d400 x0 : 0000000000000000
+[ 9514.086181] Call trace:
+[ 9514.088618] _raw_spin_lock_irqsave+0x48/0x88
+[ 9514.092954] eventfd_signal+0x6c/0x1b0
+[ 9514.096691] vfio_pci_request+0x84/0xd0 [vfio_pci]
+[ 9514.101464] vfio_del_group_dev+0x150/0x290 [vfio]
+[ 9514.106234] vfio_pci_remove+0x30/0x128 [vfio_pci]
+[ 9514.111007] pci_device_remove+0x48/0x108
+[ 9514.115001] device_release_driver_internal+0x100/0x1b8
+[ 9514.120200] device_release_driver+0x28/0x38
+[ 9514.124452] pci_stop_bus_device+0x68/0xa8
+[ 9514.128528] pci_stop_and_remove_bus_device+0x20/0x38
+[ 9514.133557] pci_iov_remove_virtfn+0xb4/0x128
+[ 9514.137893] sriov_disable+0x3c/0x108
+[ 9514.141538] pci_disable_sriov+0x28/0x38
+[ 9514.145445] hns3_pci_sriov_configure+0x48/0xb8 [hns3]
+[ 9514.150558] sriov_numvfs_store+0x110/0x198
+[ 9514.154724] dev_attr_store+0x44/0x60
+[ 9514.158373] sysfs_kf_write+0x5c/0x78
+[ 9514.162018] kernfs_fop_write+0x104/0x210
+[ 9514.166010] __vfs_write+0x48/0x90
+[ 9514.169395] vfs_write+0xbc/0x1c0
+[ 9514.172694] ksys_write+0x74/0x100
+[ 9514.176079] __arm64_sys_write+0x24/0x30
+[ 9514.179987] el0_svc_common.constprop.4+0x110/0x200
+[ 9514.184842] do_el0_svc+0x34/0x98
+[ 9514.188144] el0_svc+0x14/0x40
+[ 9514.191185] el0_sync_handler+0xb0/0x2d0
+[ 9514.195088] el0_sync+0x140/0x180
+[ 9514.198389] Code: b9001020 d2800000 52800022 f9800271 (885ffe61)
+[ 9514.204455] ---[ end trace 648de00c8406465f ]---
+[ 9514.212308] note: bash[1327] exited with preempt_count 1
+
+Cc: Qian Cai <cai@lca.pw>
+Cc: Alex Williamson <alex.williamson@redhat.com>
+Fixes: 1518ac272e78 ("vfio/pci: fix memory leaks of eventfd ctx")
+Signed-off-by: Zeng Tao <prime.zeng@hisilicon.com>
+Signed-off-by: Alex Williamson <alex.williamson@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/vfio/pci/vfio_pci.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/vfio/pci/vfio_pci.c b/drivers/vfio/pci/vfio_pci.c
+index 94fad366312f1..58e7336b2748b 100644
+--- a/drivers/vfio/pci/vfio_pci.c
++++ b/drivers/vfio/pci/vfio_pci.c
+@@ -409,14 +409,19 @@ static void vfio_pci_release(void *device_data)
+ if (!(--vdev->refcnt)) {
+ vfio_spapr_pci_eeh_release(vdev->pdev);
+ vfio_pci_disable(vdev);
++ mutex_lock(&vdev->igate);
+ if (vdev->err_trigger) {
+ eventfd_ctx_put(vdev->err_trigger);
+ vdev->err_trigger = NULL;
+ }
++ mutex_unlock(&vdev->igate);
++
++ mutex_lock(&vdev->igate);
+ if (vdev->req_trigger) {
+ eventfd_ctx_put(vdev->req_trigger);
+ vdev->req_trigger = NULL;
+ }
++ mutex_unlock(&vdev->igate);
+ }
+
+ mutex_unlock(&driver_lock);
+--
+2.25.1
+
--- /dev/null
+From 95bbc68394c5a800f2f6b6b529afe594fe19dfb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 May 2020 20:42:38 +0800
+Subject: wlcore: fix runtime pm imbalance in wl1271_tx_work
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 9604617e998b49f7695fea1479ed82421ef8c9f0 ]
+
+There are two error handling paths in this functon. When
+wlcore_tx_work_locked() returns an error code, we should
+decrease the runtime PM usage counter the same way as the
+error handling path beginning from pm_runtime_get_sync().
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Acked-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/20200520124241.9931-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/tx.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/net/wireless/ti/wlcore/tx.c b/drivers/net/wireless/ti/wlcore/tx.c
+index b6e19c2d66b0a..250bcbf4ea2f2 100644
+--- a/drivers/net/wireless/ti/wlcore/tx.c
++++ b/drivers/net/wireless/ti/wlcore/tx.c
+@@ -877,6 +877,7 @@ void wl1271_tx_work(struct work_struct *work)
+
+ ret = wlcore_tx_work_locked(wl);
+ if (ret < 0) {
++ pm_runtime_put_noidle(wl->dev);
+ wl12xx_queue_recovery_work(wl);
+ goto out;
+ }
+--
+2.25.1
+
--- /dev/null
+From 3601b364e32151f3d5a200f00711a449f6ff3351 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 May 2020 20:46:47 +0800
+Subject: wlcore: fix runtime pm imbalance in wlcore_regdomain_config
+
+From: Dinghao Liu <dinghao.liu@zju.edu.cn>
+
+[ Upstream commit 282a04bf1d8029eb98585cb5db3fd70fe8bc91f7 ]
+
+pm_runtime_get_sync() increments the runtime PM usage counter even
+the call returns an error code. Thus a pairing decrement is needed
+on the error handling path to keep the counter balanced.
+
+Signed-off-by: Dinghao Liu <dinghao.liu@zju.edu.cn>
+Acked-by: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Kalle Valo <kvalo@codeaurora.org>
+Link: https://lore.kernel.org/r/20200520124649.10848-1-dinghao.liu@zju.edu.cn
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wireless/ti/wlcore/main.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c
+index 2ca5658bbc2ab..43c7b37dec0c9 100644
+--- a/drivers/net/wireless/ti/wlcore/main.c
++++ b/drivers/net/wireless/ti/wlcore/main.c
+@@ -3671,8 +3671,10 @@ void wlcore_regdomain_config(struct wl1271 *wl)
+ goto out;
+
+ ret = pm_runtime_get_sync(wl->dev);
+- if (ret < 0)
++ if (ret < 0) {
++ pm_runtime_put_autosuspend(wl->dev);
+ goto out;
++ }
+
+ ret = wlcore_cmd_regdomain_config_locked(wl);
+ if (ret < 0) {
+--
+2.25.1
+
--- /dev/null
+From ffa2d9c817c9754b448ca977771f8134ecd8a16e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Jan 2020 08:53:46 -0800
+Subject: x86/pkeys: Add check for pkey "overflow"
+
+From: Dave Hansen <dave.hansen@linux.intel.com>
+
+[ Upstream commit 16171bffc829272d5e6014bad48f680cb50943d9 ]
+
+Alex Shi reported the pkey macros above arch_set_user_pkey_access()
+to be unused. They are unused, and even refer to a nonexistent
+CONFIG option.
+
+But, they might have served a good use, which was to ensure that
+the code does not try to set values that would not fit in the
+PKRU register. As it stands, a too-large 'pkey' value would
+be likely to silently overflow the u32 new_pkru_bits.
+
+Add a check to look for overflows. Also add a comment to remind
+any future developer to closely examine the types used to store
+pkey values if arch_max_pkey() ever changes.
+
+This boots and passes the x86 pkey selftests.
+
+Reported-by: Alex Shi <alex.shi@linux.alibaba.com>
+Signed-off-by: Dave Hansen <dave.hansen@intel.com>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Link: https://lkml.kernel.org/r/20200122165346.AD4DA150@viggo.jf.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/pkeys.h | 5 +++++
+ arch/x86/kernel/fpu/xstate.c | 9 +++++++--
+ 2 files changed, 12 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/pkeys.h b/arch/x86/include/asm/pkeys.h
+index 19b137f1b3beb..2ff9b98812b76 100644
+--- a/arch/x86/include/asm/pkeys.h
++++ b/arch/x86/include/asm/pkeys.h
+@@ -4,6 +4,11 @@
+
+ #define ARCH_DEFAULT_PKEY 0
+
++/*
++ * If more than 16 keys are ever supported, a thorough audit
++ * will be necessary to ensure that the types that store key
++ * numbers and masks have sufficient capacity.
++ */
+ #define arch_max_pkey() (boot_cpu_has(X86_FEATURE_OSPKE) ? 16 : 1)
+
+ extern int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
+index 4b900035f2202..601a5da1d196a 100644
+--- a/arch/x86/kernel/fpu/xstate.c
++++ b/arch/x86/kernel/fpu/xstate.c
+@@ -907,8 +907,6 @@ const void *get_xsave_field_ptr(int xsave_state)
+
+ #ifdef CONFIG_ARCH_HAS_PKEYS
+
+-#define NR_VALID_PKRU_BITS (CONFIG_NR_PROTECTION_KEYS * 2)
+-#define PKRU_VALID_MASK (NR_VALID_PKRU_BITS - 1)
+ /*
+ * This will go out and modify PKRU register to set the access
+ * rights for @pkey to @init_val.
+@@ -927,6 +925,13 @@ int arch_set_user_pkey_access(struct task_struct *tsk, int pkey,
+ if (!boot_cpu_has(X86_FEATURE_OSPKE))
+ return -EINVAL;
+
++ /*
++ * This code should only be called with valid 'pkey'
++ * values originating from in-kernel users. Complain
++ * if a bad value is observed.
++ */
++ WARN_ON_ONCE(pkey >= arch_max_pkey());
++
+ /* Set the bits we need in PKRU: */
+ if (init_val & PKEY_DISABLE_ACCESS)
+ new_pkru_bits |= PKRU_AD_BIT;
+--
+2.25.1
+
--- /dev/null
+From b336f10f58b5e1670dede88a3940a756f8cd5fe7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 4 Mar 2020 12:49:18 +0100
+Subject: x86/speculation/mds: Mark mds_user_clear_cpu_buffers()
+ __always_inline
+
+From: Thomas Gleixner <tglx@linutronix.de>
+
+[ Upstream commit a7ef9ba986b5fae9d80f8a7b31db0423687efe4e ]
+
+Prevent the compiler from uninlining and creating traceable/probable
+functions as this is invoked _after_ context tracking switched to
+CONTEXT_USER and rcu idle.
+
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Reviewed-by: Alexandre Chartre <alexandre.chartre@oracle.com>
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Link: https://lkml.kernel.org/r/20200505134340.902709267@linutronix.de
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/nospec-branch.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
+index e3f70c60e8ccd..62f9903544b59 100644
+--- a/arch/x86/include/asm/nospec-branch.h
++++ b/arch/x86/include/asm/nospec-branch.h
+@@ -330,7 +330,7 @@ DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
+ * combination with microcode which triggers a CPU buffer flush when the
+ * instruction is executed.
+ */
+-static inline void mds_clear_cpu_buffers(void)
++static __always_inline void mds_clear_cpu_buffers(void)
+ {
+ static const u16 ds = __KERNEL_DS;
+
+@@ -351,7 +351,7 @@ static inline void mds_clear_cpu_buffers(void)
+ *
+ * Clear CPU buffers if the corresponding static key is enabled
+ */
+-static inline void mds_user_clear_cpu_buffers(void)
++static __always_inline void mds_user_clear_cpu_buffers(void)
+ {
+ if (static_branch_likely(&mds_user_clear))
+ mds_clear_cpu_buffers();
+--
+2.25.1
+
--- /dev/null
+From 7cf96d3fc5dacc71ad32a3ec8c57c38e1e6711c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2020 10:37:55 -0700
+Subject: xfs: don't ever return a stale pointer from __xfs_dir3_free_read
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 1cb5deb5bc095c070c09a4540c45f9c9ba24be43 ]
+
+If we decide that a directory free block is corrupt, we must take care
+not to leak a buffer pointer to the caller. After xfs_trans_brelse
+returns, the buffer can be freed or reused, which means that we have to
+set *bpp back to NULL.
+
+Callers are supposed to notice the nonzero return value and not use the
+buffer pointer, but we should code more defensively, even if all current
+callers handle this situation correctly.
+
+Fixes: de14c5f541e7 ("xfs: verify free block header fields")
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_dir2_node.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/xfs/libxfs/xfs_dir2_node.c b/fs/xfs/libxfs/xfs_dir2_node.c
+index f1bb3434f51c7..01e99806b941f 100644
+--- a/fs/xfs/libxfs/xfs_dir2_node.c
++++ b/fs/xfs/libxfs/xfs_dir2_node.c
+@@ -214,6 +214,7 @@ __xfs_dir3_free_read(
+ if (fa) {
+ xfs_verifier_error(*bpp, -EFSCORRUPTED, fa);
+ xfs_trans_brelse(tp, *bpp);
++ *bpp = NULL;
+ return -EFSCORRUPTED;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From ba3fe398165ded46eca5abe76ae6ebb90d099a02 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Nov 2019 21:15:08 -0800
+Subject: xfs: fix attr leaf header freemap.size underflow
+
+From: Brian Foster <bfoster@redhat.com>
+
+[ Upstream commit 2a2b5932db67586bacc560cc065d62faece5b996 ]
+
+The leaf format xattr addition helper xfs_attr3_leaf_add_work()
+adjusts the block freemap in a couple places. The first update drops
+the size of the freemap that the caller had already selected to
+place the xattr name/value data. Before the function returns, it
+also checks whether the entries array has encroached on a freemap
+range by virtue of the new entry addition. This is necessary because
+the entries array grows from the start of the block (but end of the
+block header) towards the end of the block while the name/value data
+grows from the end of the block in the opposite direction. If the
+associated freemap is already empty, however, size is zero and the
+subtraction underflows the field and causes corruption.
+
+This is reproduced rarely by generic/070. The observed behavior is
+that a smaller sized freemap is aligned to the end of the entries
+list, several subsequent xattr additions land in larger freemaps and
+the entries list expands into the smaller freemap until it is fully
+consumed and then underflows. Note that it is not otherwise a
+corruption for the entries array to consume an empty freemap because
+the nameval list (i.e. the firstused pointer in the xattr header)
+starts beyond the end of the corrupted freemap.
+
+Update the freemap size modification to account for the fact that
+the freemap entry can be empty and thus stale.
+
+Signed-off-by: Brian Foster <bfoster@redhat.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_attr_leaf.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/fs/xfs/libxfs/xfs_attr_leaf.c b/fs/xfs/libxfs/xfs_attr_leaf.c
+index bd37f4a292c3b..efb586ea508bf 100644
+--- a/fs/xfs/libxfs/xfs_attr_leaf.c
++++ b/fs/xfs/libxfs/xfs_attr_leaf.c
+@@ -1438,7 +1438,9 @@ xfs_attr3_leaf_add_work(
+ for (i = 0; i < XFS_ATTR_LEAF_MAPSIZE; i++) {
+ if (ichdr->freemap[i].base == tmp) {
+ ichdr->freemap[i].base += sizeof(xfs_attr_leaf_entry_t);
+- ichdr->freemap[i].size -= sizeof(xfs_attr_leaf_entry_t);
++ ichdr->freemap[i].size -=
++ min_t(uint16_t, ichdr->freemap[i].size,
++ sizeof(xfs_attr_leaf_entry_t));
+ }
+ }
+ ichdr->usedbytes += xfs_attr_leaf_entsize(leaf, args->index);
+--
+2.25.1
+
--- /dev/null
+From 6201de7214a75371344ab0d4ee2342d9bbaf8bd3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2019 13:19:07 -0800
+Subject: xfs: fix log reservation overflows when allocating large rt extents
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit b1de6fc7520fe12949c070af0e8c0e4044cd3420 ]
+
+Omar Sandoval reported that a 4G fallocate on the realtime device causes
+filesystem shutdowns due to a log reservation overflow that happens when
+we log the rtbitmap updates. Factor rtbitmap/rtsummary updates into the
+the tr_write and tr_itruncate log reservation calculation.
+
+"The following reproducer results in a transaction log overrun warning
+for me:
+
+ mkfs.xfs -f -r rtdev=/dev/vdc -d rtinherit=1 -m reflink=0 /dev/vdb
+ mount -o rtdev=/dev/vdc /dev/vdb /mnt
+ fallocate -l 4G /mnt/foo
+
+Reported-by: Omar Sandoval <osandov@osandov.com>
+Tested-by: Omar Sandoval <osandov@osandov.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Brian Foster <bfoster@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/libxfs/xfs_trans_resv.c | 96 +++++++++++++++++++++++++++-------
+ 1 file changed, 77 insertions(+), 19 deletions(-)
+
+diff --git a/fs/xfs/libxfs/xfs_trans_resv.c b/fs/xfs/libxfs/xfs_trans_resv.c
+index f99a7aefe4184..2b3cc5a8ced1b 100644
+--- a/fs/xfs/libxfs/xfs_trans_resv.c
++++ b/fs/xfs/libxfs/xfs_trans_resv.c
+@@ -197,6 +197,24 @@ xfs_calc_inode_chunk_res(
+ return res;
+ }
+
++/*
++ * Per-extent log reservation for the btree changes involved in freeing or
++ * allocating a realtime extent. We have to be able to log as many rtbitmap
++ * blocks as needed to mark inuse MAXEXTLEN blocks' worth of realtime extents,
++ * as well as the realtime summary block.
++ */
++unsigned int
++xfs_rtalloc_log_count(
++ struct xfs_mount *mp,
++ unsigned int num_ops)
++{
++ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
++ unsigned int rtbmp_bytes;
++
++ rtbmp_bytes = (MAXEXTLEN / mp->m_sb.sb_rextsize) / NBBY;
++ return (howmany(rtbmp_bytes, blksz) + 1) * num_ops;
++}
++
+ /*
+ * Various log reservation values.
+ *
+@@ -219,13 +237,21 @@ xfs_calc_inode_chunk_res(
+
+ /*
+ * In a write transaction we can allocate a maximum of 2
+- * extents. This gives:
++ * extents. This gives (t1):
+ * the inode getting the new extents: inode size
+ * the inode's bmap btree: max depth * block size
+ * the agfs of the ags from which the extents are allocated: 2 * sector
+ * the superblock free block counter: sector size
+ * the allocation btrees: 2 exts * 2 trees * (2 * max depth - 1) * block size
+- * And the bmap_finish transaction can free bmap blocks in a join:
++ * Or, if we're writing to a realtime file (t2):
++ * the inode getting the new extents: inode size
++ * the inode's bmap btree: max depth * block size
++ * the agfs of the ags from which the extents are allocated: 2 * sector
++ * the superblock free block counter: sector size
++ * the realtime bitmap: ((MAXEXTLEN / rtextsize) / NBBY) bytes
++ * the realtime summary: 1 block
++ * the allocation btrees: 2 trees * (2 * max depth - 1) * block size
++ * And the bmap_finish transaction can free bmap blocks in a join (t3):
+ * the agfs of the ags containing the blocks: 2 * sector size
+ * the agfls of the ags containing the blocks: 2 * sector size
+ * the super block free block counter: sector size
+@@ -235,40 +261,72 @@ STATIC uint
+ xfs_calc_write_reservation(
+ struct xfs_mount *mp)
+ {
+- return XFS_DQUOT_LOGRES(mp) +
+- max((xfs_calc_inode_res(mp, 1) +
++ unsigned int t1, t2, t3;
++ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
++
++ t1 = xfs_calc_inode_res(mp, 1) +
++ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK), blksz) +
++ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
++
++ if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
++ t2 = xfs_calc_inode_res(mp, 1) +
+ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK),
+- XFS_FSB_TO_B(mp, 1)) +
++ blksz) +
+ xfs_calc_buf_res(3, mp->m_sb.sb_sectsize) +
+- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
+- XFS_FSB_TO_B(mp, 1))),
+- (xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
+- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2),
+- XFS_FSB_TO_B(mp, 1))));
++ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 1), blksz) +
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 1), blksz);
++ } else {
++ t2 = 0;
++ }
++
++ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
++
++ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ }
+
+ /*
+- * In truncating a file we free up to two extents at once. We can modify:
++ * In truncating a file we free up to two extents at once. We can modify (t1):
+ * the inode being truncated: inode size
+ * the inode's bmap btree: (max depth + 1) * block size
+- * And the bmap_finish transaction can free the blocks and bmap blocks:
++ * And the bmap_finish transaction can free the blocks and bmap blocks (t2):
+ * the agf for each of the ags: 4 * sector size
+ * the agfl for each of the ags: 4 * sector size
+ * the super block to reflect the freed blocks: sector size
+ * worst case split in allocation btrees per extent assuming 4 extents:
+ * 4 exts * 2 trees * (2 * max depth - 1) * block size
++ * Or, if it's a realtime file (t3):
++ * the agf for each of the ags: 2 * sector size
++ * the agfl for each of the ags: 2 * sector size
++ * the super block to reflect the freed blocks: sector size
++ * the realtime bitmap: 2 exts * ((MAXEXTLEN / rtextsize) / NBBY) bytes
++ * the realtime summary: 2 exts * 1 block
++ * worst case split in allocation btrees per extent assuming 2 extents:
++ * 2 exts * 2 trees * (2 * max depth - 1) * block size
+ */
+ STATIC uint
+ xfs_calc_itruncate_reservation(
+ struct xfs_mount *mp)
+ {
+- return XFS_DQUOT_LOGRES(mp) +
+- max((xfs_calc_inode_res(mp, 1) +
+- xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1,
+- XFS_FSB_TO_B(mp, 1))),
+- (xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
+- xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4),
+- XFS_FSB_TO_B(mp, 1))));
++ unsigned int t1, t2, t3;
++ unsigned int blksz = XFS_FSB_TO_B(mp, 1);
++
++ t1 = xfs_calc_inode_res(mp, 1) +
++ xfs_calc_buf_res(XFS_BM_MAXLEVELS(mp, XFS_DATA_FORK) + 1, blksz);
++
++ t2 = xfs_calc_buf_res(9, mp->m_sb.sb_sectsize) +
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 4), blksz);
++
++ if (xfs_sb_version_hasrealtime(&mp->m_sb)) {
++ t3 = xfs_calc_buf_res(5, mp->m_sb.sb_sectsize) +
++ xfs_calc_buf_res(xfs_rtalloc_log_count(mp, 2), blksz) +
++ xfs_calc_buf_res(xfs_allocfree_log_count(mp, 2), blksz);
++ } else {
++ t3 = 0;
++ }
++
++ return XFS_DQUOT_LOGRES(mp) + max3(t1, t2, t3);
+ }
+
+ /*
+--
+2.25.1
+
--- /dev/null
+From b8e09e0505ed750ca1e259b762655f581c491cc9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Mar 2020 10:37:57 -0700
+Subject: xfs: mark dir corrupt when lookup-by-hash fails
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit 2e107cf869eecc770e3f630060bb4e5f547d0fd8 ]
+
+In xchk_dir_actor, we attempt to validate the directory hash structures
+by performing a directory entry lookup by (hashed) name. If the lookup
+returns ENOENT, that means that the hash information is corrupt. The
+_process_error functions don't catch this, so we have to add that
+explicitly.
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Dave Chinner <dchinner@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/scrub/dir.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/fs/xfs/scrub/dir.c b/fs/xfs/scrub/dir.c
+index cd3e4d768a18c..33dfcba72c7a0 100644
+--- a/fs/xfs/scrub/dir.c
++++ b/fs/xfs/scrub/dir.c
+@@ -156,6 +156,9 @@ xchk_dir_actor(
+ xname.type = XFS_DIR3_FT_UNKNOWN;
+
+ error = xfs_dir_lookup(sdc->sc->tp, ip, &xname, &lookup_ino, NULL);
++ /* ENOENT means the hash lookup failed and the dir is corrupt */
++ if (error == -ENOENT)
++ error = -EFSCORRUPTED;
+ if (!xchk_fblock_process_error(sdc->sc, XFS_DATA_FORK, offset,
+ &error))
+ goto out;
+--
+2.25.1
+