--- /dev/null
+From f57870578d3694f78022f54c2d4aee22d688ebb8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 May 2020 10:44:41 +0200
+Subject: ACPI: EC: PM: Avoid premature returns from acpi_s2idle_wake()
+
+From: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+
+[ Upstream commit 7b301750f7f8f6503e11f1af4a03832525f58c66 ]
+
+If the EC GPE status is not set after checking all of the other GPEs,
+acpi_s2idle_wake() returns 'false', to indicate that the SCI event
+that has just triggered is not a system wakeup one, but it does that
+without canceling the pending wakeup and re-arming the SCI for system
+wakeup which is a mistake, because it may cause s2idle_loop() to busy
+spin until the next valid wakeup event. [If that happens, the first
+spurious wakeup is still pending after acpi_s2idle_wake() has
+returned, so s2idle_enter() does nothing, acpi_s2idle_wake()
+is called again and it sees that the SCI has triggered, but no GPEs
+are active, so 'false' is returned again, and so on.]
+
+Fix that by moving all of the GPE checking logic from
+acpi_s2idle_wake() to acpi_ec_dispatch_gpe() and making the
+latter return 'true' only if a non-EC GPE has triggered and
+'false' otherwise, which will cause acpi_s2idle_wake() to
+cancel the pending SCI wakeup and re-arm the SCI for system
+wakeup regardless of the EC GPE status.
+
+This also addresses a lockup observed on an Elitegroup EF20EA laptop
+after attempting to wake it up from suspend-to-idle by a key press.
+
+Fixes: d5406284ff80 ("ACPI: PM: s2idle: Refine active GPEs check")
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=207603
+Reported-by: Todd Brandt <todd.e.brandt@linux.intel.com>
+Fixes: fdde0ff8590b ("ACPI: PM: s2idle: Prevent spurious SCIs from waking up the system")
+Link: https://lore.kernel.org/linux-acpi/CAB4CAwdqo7=MvyG_PE+PGVfeA17AHF5i5JucgaKqqMX6mjArbQ@mail.gmail.com/
+Reported-by: Chris Chiu <chiu@endlessm.com>
+Tested-by: Chris Chiu <chiu@endlessm.com>
+Cc: 5.4+ <stable@vger.kernel.org> # 5.4+
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/ec.c | 24 ++++++++++++++++--------
+ drivers/acpi/internal.h | 1 -
+ drivers/acpi/sleep.c | 14 ++------------
+ 3 files changed, 18 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
+index 35dd2f1fb0e61..03b3067811c99 100644
+--- a/drivers/acpi/ec.c
++++ b/drivers/acpi/ec.c
+@@ -2042,23 +2042,31 @@ void acpi_ec_set_gpe_wake_mask(u8 action)
+ acpi_set_gpe_wake_mask(NULL, first_ec->gpe, action);
+ }
+
+-bool acpi_ec_other_gpes_active(void)
+-{
+- return acpi_any_gpe_status_set(first_ec ? first_ec->gpe : U32_MAX);
+-}
+-
+ bool acpi_ec_dispatch_gpe(void)
+ {
+ u32 ret;
+
+ if (!first_ec)
++ return acpi_any_gpe_status_set(U32_MAX);
++
++ /*
++ * Report wakeup if the status bit is set for any enabled GPE other
++ * than the EC one.
++ */
++ if (acpi_any_gpe_status_set(first_ec->gpe))
++ return true;
++
++ if (ec_no_wakeup)
+ return false;
+
++ /*
++ * Dispatch the EC GPE in-band, but do not report wakeup in any case
++ * to allow the caller to process events properly after that.
++ */
+ ret = acpi_dispatch_gpe(NULL, first_ec->gpe);
+- if (ret == ACPI_INTERRUPT_HANDLED) {
++ if (ret == ACPI_INTERRUPT_HANDLED)
+ pm_pr_dbg("EC GPE dispatched\n");
+- return true;
+- }
++
+ return false;
+ }
+ #endif /* CONFIG_PM_SLEEP */
+diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
+index d44c591c4ee4d..3616daec650b1 100644
+--- a/drivers/acpi/internal.h
++++ b/drivers/acpi/internal.h
+@@ -202,7 +202,6 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit);
+
+ #ifdef CONFIG_PM_SLEEP
+ void acpi_ec_flush_work(void);
+-bool acpi_ec_other_gpes_active(void);
+ bool acpi_ec_dispatch_gpe(void);
+ #endif
+
+diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
+index 4edc8a3ce40fd..3850704570c0c 100644
+--- a/drivers/acpi/sleep.c
++++ b/drivers/acpi/sleep.c
+@@ -1013,20 +1013,10 @@ static bool acpi_s2idle_wake(void)
+ if (acpi_check_wakeup_handlers())
+ return true;
+
+- /*
+- * If the status bit is set for any enabled GPE other than the
+- * EC one, the wakeup is regarded as a genuine one.
+- */
+- if (acpi_ec_other_gpes_active())
++ /* Check non-EC GPE wakeups and dispatch the EC GPE. */
++ if (acpi_ec_dispatch_gpe())
+ return true;
+
+- /*
+- * If the EC GPE status bit has not been set, the wakeup is
+- * regarded as a spurious one.
+- */
+- if (!acpi_ec_dispatch_gpe())
+- return false;
+-
+ /*
+ * Cancel the wakeup and process all pending events in case
+ * there are any wakeup ones in there.
+--
+2.20.1
+
--- /dev/null
+From 68d903a3874e0ff0d56833965387b512940260ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 May 2020 13:57:18 +0900
+Subject: ALSA: firewire-lib: fix 'function sizeof not defined' error of
+ tracepoints format
+
+From: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+
+[ Upstream commit 1034872123a06b759aba772b1c99612ccb8e632a ]
+
+The snd-firewire-lib.ko has 'amdtp-packet' event of tracepoints. Current
+printk format for the event includes 'sizeof(u8)' macro expected to be
+extended in compilation time. However, this is not done. As a result,
+perf tools cannot parse the event for printing:
+
+$ mount -l -t debugfs
+debugfs on /sys/kernel/debug type debugfs (rw,nosuid,nodev,noexec,relatime)
+$ cat /sys/kernel/debug/tracing/events/snd_firewire_lib/amdtp_packet/format
+...
+print fmt: "%02u %04u %04x %04x %02d %03u %02u %03u %02u %01u %02u %s",
+ REC->second, REC->cycle, REC->src, REC->dest, REC->channel,
+ REC->payload_quadlets, REC->data_blocks, REC->data_block_counter,
+ REC->packet_index, REC->irq, REC->index,
+ __print_array(__get_dynamic_array(cip_header),
+ __get_dynamic_array_len(cip_header),
+ sizeof(u8))
+
+$ sudo perf record -e snd_firewire_lib:amdtp_packet
+ [snd_firewire_lib:amdtp_packet] function sizeof not defined
+ Error: expected type 5 but read 0
+
+This commit fixes it by obsoleting the macro with actual size.
+
+Cc: <stable@vger.kernel.org>
+Fixes: bde2bbdb307a ("ALSA: firewire-lib: use dynamic array for CIP header of tracing events")
+Signed-off-by: Takashi Sakamoto <o-takashi@sakamocchi.jp>
+Link: https://lore.kernel.org/r/20200503045718.86337-1-o-takashi@sakamocchi.jp
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/firewire/amdtp-stream-trace.h | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/sound/firewire/amdtp-stream-trace.h b/sound/firewire/amdtp-stream-trace.h
+index 16c7f6605511e..26e7cb555d3c5 100644
+--- a/sound/firewire/amdtp-stream-trace.h
++++ b/sound/firewire/amdtp-stream-trace.h
+@@ -66,8 +66,7 @@ TRACE_EVENT(amdtp_packet,
+ __entry->irq,
+ __entry->index,
+ __print_array(__get_dynamic_array(cip_header),
+- __get_dynamic_array_len(cip_header),
+- sizeof(u8)))
++ __get_dynamic_array_len(cip_header), 1))
+ );
+
+ #endif
+--
+2.20.1
+
--- /dev/null
+From 6a5f6ccd5dea6e0baa06e824e033576d3ad027f6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 15:38:36 +0300
+Subject: ALSA: hda/hdmi: fix race in monitor detection during probe
+
+From: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+
+[ Upstream commit ca76282b6faffc83601c25bd2a95f635c03503ef ]
+
+A race exists between build_pcms() and build_controls() phases of codec
+setup. Build_pcms() sets up notifier for jack events. If a monitor event
+is received before build_controls() is run, the initial jack state is
+lost and never reported via mixer controls.
+
+The problem can be hit at least with SOF as the controller driver. SOF
+calls snd_hda_codec_build_controls() in its workqueue-based probe and
+this can be delayed enough to hit the race condition.
+
+Fix the issue by invalidating the per-pin ELD information when
+build_controls() is called. The existing call to hdmi_present_sense()
+will update the ELD contents. This ensures initial monitor state is
+correctly reflected via mixer controls.
+
+BugLink: https://github.com/thesofproject/linux/issues/1687
+Signed-off-by: Kai Vehmanen <kai.vehmanen@linux.intel.com>
+Link: https://lore.kernel.org/r/20200428123836.24512-1-kai.vehmanen@linux.intel.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_hdmi.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index 0c1a59d5ad59d..0f3250417b955 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -2320,7 +2320,9 @@ static int generic_hdmi_build_controls(struct hda_codec *codec)
+
+ for (pin_idx = 0; pin_idx < spec->num_pins; pin_idx++) {
+ struct hdmi_spec_per_pin *per_pin = get_pin(spec, pin_idx);
++ struct hdmi_eld *pin_eld = &per_pin->sink_eld;
+
++ pin_eld->eld_valid = false;
+ hdmi_present_sense(per_pin, 0);
+ }
+
+--
+2.20.1
+
--- /dev/null
+From f7b50a8019dcc33201f5a7c146dfc0d321dfadcb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 3 May 2020 23:24:47 +0800
+Subject: ALSA: hda/realtek - Fix S3 pop noise on Dell Wyse
+
+From: Kai-Heng Feng <kai.heng.feng@canonical.com>
+
+[ Upstream commit 52e4e36807aeac1cdd07b14e509c8a64101e1a09 ]
+
+Commit 317d9313925c ("ALSA: hda/realtek - Set default power save node to
+0") makes the ALC225 have pop noise on S3 resume and cold boot.
+
+The previous fix enable power save node universally for ALC225, however
+it makes some ALC225 systems unable to produce any sound.
+
+So let's only enable power save node for the affected Dell Wyse
+platform.
+
+Fixes: 317d9313925c ("ALSA: hda/realtek - Set default power save node to 0")
+BugLink: https://bugs.launchpad.net/bugs/1866357
+Signed-off-by: Kai-Heng Feng <kai.heng.feng@canonical.com>
+Link: https://lore.kernel.org/r/20200503152449.22761-2-kai.heng.feng@canonical.com
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ sound/pci/hda/patch_realtek.c | 16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index da4863d7f7f24..611498270c5e6 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5743,6 +5743,15 @@ static void alc233_alc662_fixup_lenovo_dual_codecs(struct hda_codec *codec,
+ }
+ }
+
++static void alc225_fixup_s3_pop_noise(struct hda_codec *codec,
++ const struct hda_fixup *fix, int action)
++{
++ if (action != HDA_FIXUP_ACT_PRE_PROBE)
++ return;
++
++ codec->power_save_node = 1;
++}
++
+ /* Forcibly assign NID 0x03 to HP/LO while NID 0x02 to SPK for EQ */
+ static void alc274_fixup_bind_dacs(struct hda_codec *codec,
+ const struct hda_fixup *fix, int action)
+@@ -5932,6 +5941,7 @@ enum {
+ ALC233_FIXUP_ACER_HEADSET_MIC,
+ ALC294_FIXUP_LENOVO_MIC_LOCATION,
+ ALC225_FIXUP_DELL_WYSE_MIC_NO_PRESENCE,
++ ALC225_FIXUP_S3_POP_NOISE,
+ ALC700_FIXUP_INTEL_REFERENCE,
+ ALC274_FIXUP_DELL_BIND_DACS,
+ ALC274_FIXUP_DELL_AIO_LINEOUT_VERB,
+@@ -6817,6 +6827,12 @@ static const struct hda_fixup alc269_fixups[] = {
+ { }
+ },
+ .chained = true,
++ .chain_id = ALC225_FIXUP_S3_POP_NOISE
++ },
++ [ALC225_FIXUP_S3_POP_NOISE] = {
++ .type = HDA_FIXUP_FUNC,
++ .v.func = alc225_fixup_s3_pop_noise,
++ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
+ [ALC700_FIXUP_INTEL_REFERENCE] = {
+--
+2.20.1
+
--- /dev/null
+From 84fc828f488c8dace3ddd933147904589552f9a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 May 2020 09:54:41 +0200
+Subject: arm64: fix the flush_icache_range arguments in machine_kexec
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit d51c214541c5154dda3037289ee895ea3ded5ebd ]
+
+The second argument is the end "pointer", not the length.
+
+Fixes: d28f6df1305a ("arm64/kexec: Add core kexec support")
+Cc: <stable@vger.kernel.org> # 4.8.x-
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/kernel/machine_kexec.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/arch/arm64/kernel/machine_kexec.c b/arch/arm64/kernel/machine_kexec.c
+index 8e9c924423b4e..a0b144cfaea71 100644
+--- a/arch/arm64/kernel/machine_kexec.c
++++ b/arch/arm64/kernel/machine_kexec.c
+@@ -177,6 +177,7 @@ void machine_kexec(struct kimage *kimage)
+ * the offline CPUs. Therefore, we must use the __* variant here.
+ */
+ __flush_icache_range((uintptr_t)reboot_code_buffer,
++ (uintptr_t)reboot_code_buffer +
+ arm64_relocate_new_kernel_size);
+
+ /* Flush the kimage list and its buffers. */
+--
+2.20.1
+
--- /dev/null
+From 6cb85e9644f3f9f9d2e4b1e991d78ebd0106d09f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 16 May 2020 20:45:27 -0400
+Subject: bpf: Fix bug in mmap() implementation for BPF array map
+
+[ Upstream commit 333291ce5055f2039afc907badaf5b66bc1adfdc ]
+
+mmap() subsystem allows user-space application to memory-map region with
+initial page offset. This wasn't taken into account in initial implementation
+of BPF array memory-mapping. This would result in wrong pages, not taking into
+account requested page shift, being memory-mmaped into user-space. This patch
+fixes this gap and adds a test for such scenario.
+
+Fixes: fc9702273e2e ("bpf: Add mmap() support for BPF_MAP_TYPE_ARRAY")
+Signed-off-by: Andrii Nakryiko <andriin@fb.com>
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Acked-by: Yonghong Song <yhs@fb.com>
+Link: https://lore.kernel.org/bpf/20200512235925.3817805-1-andriin@fb.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/arraymap.c | 7 ++++++-
+ tools/testing/selftests/bpf/prog_tests/mmap.c | 9 +++++++++
+ 2 files changed, 15 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/arraymap.c b/kernel/bpf/arraymap.c
+index 95d77770353c9..1d6120fd5ba68 100644
+--- a/kernel/bpf/arraymap.c
++++ b/kernel/bpf/arraymap.c
+@@ -486,7 +486,12 @@ static int array_map_mmap(struct bpf_map *map, struct vm_area_struct *vma)
+ if (!(map->map_flags & BPF_F_MMAPABLE))
+ return -EINVAL;
+
+- return remap_vmalloc_range(vma, array_map_vmalloc_addr(array), pgoff);
++ if (vma->vm_pgoff * PAGE_SIZE + (vma->vm_end - vma->vm_start) >
++ PAGE_ALIGN((u64)array->map.max_entries * array->elem_size))
++ return -EINVAL;
++
++ return remap_vmalloc_range(vma, array_map_vmalloc_addr(array),
++ vma->vm_pgoff + pgoff);
+ }
+
+ const struct bpf_map_ops array_map_ops = {
+diff --git a/tools/testing/selftests/bpf/prog_tests/mmap.c b/tools/testing/selftests/bpf/prog_tests/mmap.c
+index 16a814eb4d645..b0e789678aa46 100644
+--- a/tools/testing/selftests/bpf/prog_tests/mmap.c
++++ b/tools/testing/selftests/bpf/prog_tests/mmap.c
+@@ -197,6 +197,15 @@ void test_mmap(void)
+ CHECK_FAIL(map_data->val[far] != 3 * 321);
+
+ munmap(tmp2, 4 * page_size);
++
++ /* map all 4 pages, but with pg_off=1 page, should fail */
++ tmp1 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED,
++ data_map_fd, page_size /* initial page shift */);
++ if (CHECK(tmp1 != MAP_FAILED, "adv_mmap7", "unexpected success")) {
++ munmap(tmp1, 4 * page_size);
++ goto cleanup;
++ }
++
+ cleanup:
+ if (bss_mmaped)
+ CHECK_FAIL(munmap(bss_mmaped, bss_sz));
+--
+2.20.1
+
--- /dev/null
+From 35146c67d00da8aeb7babe8504169117152c45fe Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 08:18:51 +0000
+Subject: bpf: Fix error return code in map_lookup_and_delete_elem()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+[ Upstream commit 7f645462ca01d01abb94d75e6768c8b3ed3a188b ]
+
+Fix to return negative error code -EFAULT from the copy_to_user() error
+handling case instead of 0, as done elsewhere in this function.
+
+Fixes: bd513cd08f10 ("bpf: add MAP_LOOKUP_AND_DELETE_ELEM syscall")
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Link: https://lore.kernel.org/bpf/20200430081851.166996-1-weiyongjun1@huawei.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/syscall.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
+index 3b92aea18ae75..e04ea4c8f9358 100644
+--- a/kernel/bpf/syscall.c
++++ b/kernel/bpf/syscall.c
+@@ -1480,8 +1480,10 @@ static int map_lookup_and_delete_elem(union bpf_attr *attr)
+ if (err)
+ goto free_value;
+
+- if (copy_to_user(uvalue, value, value_size) != 0)
++ if (copy_to_user(uvalue, value, value_size) != 0) {
++ err = -EFAULT;
+ goto free_value;
++ }
+
+ err = 0;
+
+--
+2.20.1
+
--- /dev/null
+From d6c2a36b31fb07f4da82b273aa6b76ee5feb481f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 10:21:44 -0700
+Subject: bpf, sockmap: bpf_tcp_ingress needs to subtract bytes from sg.size
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+[ Upstream commit 81aabbb9fb7b4b1efd073b62f0505d3adad442f3 ]
+
+In bpf_tcp_ingress we used apply_bytes to subtract bytes from sg.size
+which is used to track total bytes in a message. But this is not
+correct because apply_bytes is itself modified in the main loop doing
+the mem_charge.
+
+Then at the end of this we have sg.size incorrectly set and out of
+sync with actual sk values. Then we can get a splat if we try to
+cork the data later and again try to redirect the msg to ingress. To
+fix instead of trying to track msg.size do the easy thing and include
+it as part of the sk_msg_xfer logic so that when the msg is moved the
+sg.size is always correct.
+
+To reproduce the below users will need ingress + cork and hit an
+error path that will then try to 'free' the skmsg.
+
+[ 173.699981] BUG: KASAN: null-ptr-deref in sk_msg_free_elem+0xdd/0x120
+[ 173.699987] Read of size 8 at addr 0000000000000008 by task test_sockmap/5317
+
+[ 173.700000] CPU: 2 PID: 5317 Comm: test_sockmap Tainted: G I 5.7.0-rc1+ #43
+[ 173.700005] Hardware name: Dell Inc. Precision 5820 Tower/002KVM, BIOS 1.9.2 01/24/2019
+[ 173.700009] Call Trace:
+[ 173.700021] dump_stack+0x8e/0xcb
+[ 173.700029] ? sk_msg_free_elem+0xdd/0x120
+[ 173.700034] ? sk_msg_free_elem+0xdd/0x120
+[ 173.700042] __kasan_report+0x102/0x15f
+[ 173.700052] ? sk_msg_free_elem+0xdd/0x120
+[ 173.700060] kasan_report+0x32/0x50
+[ 173.700070] sk_msg_free_elem+0xdd/0x120
+[ 173.700080] __sk_msg_free+0x87/0x150
+[ 173.700094] tcp_bpf_send_verdict+0x179/0x4f0
+[ 173.700109] tcp_bpf_sendpage+0x3ce/0x5d0
+
+Fixes: 604326b41a6fb ("bpf, sockmap: convert to generic sk_msg interface")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/158861290407.14306.5327773422227552482.stgit@john-Precision-5820-Tower
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/skmsg.h | 1 +
+ net/ipv4/tcp_bpf.c | 1 -
+ 2 files changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/include/linux/skmsg.h b/include/linux/skmsg.h
+index 14d61bba0b79b..71db17927a9da 100644
+--- a/include/linux/skmsg.h
++++ b/include/linux/skmsg.h
+@@ -187,6 +187,7 @@ static inline void sk_msg_xfer(struct sk_msg *dst, struct sk_msg *src,
+ dst->sg.data[which] = src->sg.data[which];
+ dst->sg.data[which].length = size;
+ dst->sg.size += size;
++ src->sg.size -= size;
+ src->sg.data[which].length -= size;
+ src->sg.data[which].offset += size;
+ }
+diff --git a/net/ipv4/tcp_bpf.c b/net/ipv4/tcp_bpf.c
+index 8a01428f80c1c..19bd10e6ab830 100644
+--- a/net/ipv4/tcp_bpf.c
++++ b/net/ipv4/tcp_bpf.c
+@@ -200,7 +200,6 @@ static int bpf_tcp_ingress(struct sock *sk, struct sk_psock *psock,
+
+ if (!ret) {
+ msg->sg.start = i;
+- msg->sg.size -= apply_bytes;
+ sk_psock_queue_msg(psock, tmp);
+ sk_psock_data_ready(sk, psock);
+ } else {
+--
+2.20.1
+
--- /dev/null
+From 18498f47af2a5efcd492adf22cd3b79badbdd091 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 10:21:23 -0700
+Subject: bpf, sockmap: msg_pop_data can incorrecty set an sge length
+
+From: John Fastabend <john.fastabend@gmail.com>
+
+[ Upstream commit 3e104c23816220919ea1b3fd93fabe363c67c484 ]
+
+When sk_msg_pop() is called where the pop operation is working on
+the end of a sge element and there is no additional trailing data
+and there _is_ data in front of pop, like the following case,
+
+ |____________a_____________|__pop__|
+
+We have out of order operations where we incorrectly set the pop
+variable so that instead of zero'ing pop we incorrectly leave it
+untouched, effectively. This can cause later logic to shift the
+buffers around believing it should pop extra space. The result is
+we have 'popped' more data then we expected potentially breaking
+program logic.
+
+It took us a while to hit this case because typically we pop headers
+which seem to rarely be at the end of a scatterlist elements but
+we can't rely on this.
+
+Fixes: 7246d8ed4dcce ("bpf: helper to pop data from messages")
+Signed-off-by: John Fastabend <john.fastabend@gmail.com>
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: Jakub Sitnicki <jakub@cloudflare.com>
+Acked-by: Martin KaFai Lau <kafai@fb.com>
+Link: https://lore.kernel.org/bpf/158861288359.14306.7654891716919968144.stgit@john-Precision-5820-Tower
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/core/filter.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/core/filter.c b/net/core/filter.c
+index c180871e606d8..083fbe92662ec 100644
+--- a/net/core/filter.c
++++ b/net/core/filter.c
+@@ -2590,8 +2590,8 @@ BPF_CALL_4(bpf_msg_pop_data, struct sk_msg *, msg, u32, start,
+ }
+ pop = 0;
+ } else if (pop >= sge->length - a) {
+- sge->length = a;
+ pop -= (sge->length - a);
++ sge->length = a;
+ }
+ }
+
+--
+2.20.1
+
--- /dev/null
+From c7fa32bffba4b11678ffc1cb1d02036f6e0f4c10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 16:12:55 +0100
+Subject: cachefiles: Fix corruption of the return value in
+ cachefiles_read_or_alloc_pages()
+
+From: David Howells <dhowells@redhat.com>
+
+[ Upstream commit c5f9d9db83d9f84d2b4aae5a1b29d9b582ccff2f ]
+
+The patch which changed cachefiles from calling ->bmap() to using the
+bmap() wrapper overwrote the running return value with the result of
+calling bmap(). This causes an assertion failure elsewhere in the code.
+
+Fix this by using ret2 rather than ret to hold the return value.
+
+The oops looks like:
+
+ kernel BUG at fs/nfs/fscache.c:468!
+ ...
+ RIP: 0010:__nfs_readpages_from_fscache+0x18b/0x190 [nfs]
+ ...
+ Call Trace:
+ nfs_readpages+0xbf/0x1c0 [nfs]
+ ? __alloc_pages_nodemask+0x16c/0x320
+ read_pages+0x67/0x1a0
+ __do_page_cache_readahead+0x1cf/0x1f0
+ ondemand_readahead+0x172/0x2b0
+ page_cache_async_readahead+0xaa/0xe0
+ generic_file_buffered_read+0x852/0xd50
+ ? mem_cgroup_commit_charge+0x6e/0x140
+ ? nfs4_have_delegation+0x19/0x30 [nfsv4]
+ generic_file_read_iter+0x100/0x140
+ ? nfs_revalidate_mapping+0x176/0x2b0 [nfs]
+ nfs_file_read+0x6d/0xc0 [nfs]
+ new_sync_read+0x11a/0x1c0
+ __vfs_read+0x29/0x40
+ vfs_read+0x8e/0x140
+ ksys_read+0x61/0xd0
+ __x64_sys_read+0x1a/0x20
+ do_syscall_64+0x60/0x1e0
+ entry_SYSCALL_64_after_hwframe+0x44/0xa9
+ RIP: 0033:0x7f5d148267e0
+
+Fixes: 10d83e11a582 ("cachefiles: drop direct usage of ->bmap method.")
+Reported-by: David Wysochanski <dwysocha@redhat.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Tested-by: David Wysochanski <dwysocha@redhat.com>
+cc: Carlos Maiolino <cmaiolino@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cachefiles/rdwr.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
+index 1dc97f2d62013..d3d78176b23ce 100644
+--- a/fs/cachefiles/rdwr.c
++++ b/fs/cachefiles/rdwr.c
+@@ -398,7 +398,7 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
+ struct inode *inode;
+ sector_t block;
+ unsigned shift;
+- int ret;
++ int ret, ret2;
+
+ object = container_of(op->op.object,
+ struct cachefiles_object, fscache);
+@@ -430,8 +430,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
+ block = page->index;
+ block <<= shift;
+
+- ret = bmap(inode, &block);
+- ASSERT(ret < 0);
++ ret2 = bmap(inode, &block);
++ ASSERT(ret2 == 0);
+
+ _debug("%llx -> %llx",
+ (unsigned long long) (page->index << shift),
+@@ -739,8 +739,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
+ block = page->index;
+ block <<= shift;
+
+- ret = bmap(inode, &block);
+- ASSERT(!ret);
++ ret2 = bmap(inode, &block);
++ ASSERT(ret2 == 0);
+
+ _debug("%llx -> %llx",
+ (unsigned long long) (page->index << shift),
+--
+2.20.1
+
--- /dev/null
+From dfff8cd9f7b27cbcf93aaec27a47d05eb75947dd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Apr 2020 20:26:29 +0100
+Subject: cpufreq: intel_pstate: Only mention the BIOS disabling turbo mode
+ once
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit 8c539776ac83c0857395e1ccc9c6b516521a2d32 ]
+
+Make a note of the first time we discover the turbo mode has been
+disabled by the BIOS, as otherwise we complain every time we try to
+update the mode.
+
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/intel_pstate.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index c81e1ff290697..b4c014464a208 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1058,7 +1058,7 @@ static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
+
+ update_turbo_state();
+ if (global.turbo_disabled) {
+- pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
++ pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
+ mutex_unlock(&intel_pstate_limits_lock);
+ mutex_unlock(&intel_pstate_driver_lock);
+ return -EPERM;
+--
+2.20.1
+
--- /dev/null
+From 64f44caae06b1c0ededacf0a624304aa2f773cbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Apr 2020 18:49:06 +0200
+Subject: dmaengine: mmp_tdma: Do not ignore slave config validation errors
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+[ Upstream commit 363c32701c7fdc8265a84b21a6a4f45d1202b9ca ]
+
+With an invalid dma_slave_config set previously,
+mmp_tdma_prep_dma_cyclic() would detect an error whilst configuring the
+channel, but proceed happily on:
+
+ [ 120.756530] mmp-tdma d42a0800.adma: mmp_tdma: unknown burst size.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Link: https://lore.kernel.org/r/20200419164912.670973-2-lkundrak@v3.sk
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/mmp_tdma.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
+index 10117f271b12b..51e08c16756ae 100644
+--- a/drivers/dma/mmp_tdma.c
++++ b/drivers/dma/mmp_tdma.c
+@@ -443,7 +443,8 @@ static struct dma_async_tx_descriptor *mmp_tdma_prep_dma_cyclic(
+ if (!desc)
+ goto err_out;
+
+- mmp_tdma_config_write(chan, direction, &tdmac->slave_config);
++ if (mmp_tdma_config_write(chan, direction, &tdmac->slave_config))
++ goto err_out;
+
+ while (buf < buf_len) {
+ desc = &tdmac->desc_arr[i];
+--
+2.20.1
+
--- /dev/null
+From 35c1745abf5776876e50e7feef6eeeeda887452d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 19 Apr 2020 18:49:09 +0200
+Subject: dmaengine: mmp_tdma: Reset channel error on release
+
+From: Lubomir Rintel <lkundrak@v3.sk>
+
+[ Upstream commit 0c89446379218698189a47871336cb30286a7197 ]
+
+When a channel configuration fails, the status of the channel is set to
+DEV_ERROR so that an attempt to submit it fails. However, this status
+sticks until the heat end of the universe, making it impossible to
+recover from the error.
+
+Let's reset it when the channel is released so that further use of the
+channel with correct configuration is not impacted.
+
+Signed-off-by: Lubomir Rintel <lkundrak@v3.sk>
+Link: https://lore.kernel.org/r/20200419164912.670973-5-lkundrak@v3.sk
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/mmp_tdma.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/drivers/dma/mmp_tdma.c b/drivers/dma/mmp_tdma.c
+index 51e08c16756ae..d683232d7fea0 100644
+--- a/drivers/dma/mmp_tdma.c
++++ b/drivers/dma/mmp_tdma.c
+@@ -363,6 +363,8 @@ static void mmp_tdma_free_descriptor(struct mmp_tdma_chan *tdmac)
+ gen_pool_free(gpool, (unsigned long)tdmac->desc_arr,
+ size);
+ tdmac->desc_arr = NULL;
++ if (tdmac->status == DMA_ERROR)
++ tdmac->status = DMA_COMPLETE;
+
+ return;
+ }
+--
+2.20.1
+
--- /dev/null
+From 718bca0b21e262174f454ab5e9d5ff31338336e2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Apr 2020 11:53:35 +0530
+Subject: dmaengine: pch_dma.c: Avoid data race between probe and irq handler
+
+From: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+
+[ Upstream commit 2e45676a4d33af47259fa186ea039122ce263ba9 ]
+
+pd->dma.dev is read in irq handler pd_irq().
+However, it is set to pdev->dev after request_irq().
+Therefore, set pd->dma.dev to pdev->dev before request_irq() to
+avoid data race between pch_dma_probe() and pd_irq().
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Madhuparna Bhowmik <madhuparnabhowmik10@gmail.com>
+Link: https://lore.kernel.org/r/20200416062335.29223-1-madhuparnabhowmik10@gmail.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/pch_dma.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
+index 581e7a290d98e..a3b0b4c56a190 100644
+--- a/drivers/dma/pch_dma.c
++++ b/drivers/dma/pch_dma.c
+@@ -865,6 +865,7 @@ static int pch_dma_probe(struct pci_dev *pdev,
+ }
+
+ pci_set_master(pdev);
++ pd->dma.dev = &pdev->dev;
+
+ err = request_irq(pdev->irq, pd_irq, IRQF_SHARED, DRV_NAME, pd);
+ if (err) {
+@@ -880,7 +881,6 @@ static int pch_dma_probe(struct pci_dev *pdev,
+ goto err_free_irq;
+ }
+
+- pd->dma.dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&pd->dma.channels);
+
+--
+2.20.1
+
--- /dev/null
+From 9c58697b70610e0fd108bd31cf76595f3d8f7b56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 3 Mar 2020 14:05:18 +0100
+Subject: dmaengine: xilinx_dma: Add missing check for empty list
+
+From: Sebastian von Ohr <vonohr@smaract.com>
+
+[ Upstream commit b269426011bcfd97b7c3101abfe1a99147b6f40b ]
+
+The DMA transfer might finish just after checking the state with
+dma_cookie_status, but before the lock is acquired. Not checking
+for an empty list in xilinx_dma_tx_status may result in reading
+random data or data corruption when desc is written to. This can
+be reliably triggered by using dma_sync_wait to wait for DMA
+completion.
+
+Signed-off-by: Sebastian von Ohr <vonohr@smaract.com>
+Tested-by: Radhey Shyam Pandey <radhey.shyam.pandey@xilinx.com>
+Link: https://lore.kernel.org/r/20200303130518.333-1-vonohr@smaract.com
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/dma/xilinx/xilinx_dma.c | 20 ++++++++++----------
+ 1 file changed, 10 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/dma/xilinx/xilinx_dma.c b/drivers/dma/xilinx/xilinx_dma.c
+index a9c5d5cc9f2bd..5d5f1d0ce16cb 100644
+--- a/drivers/dma/xilinx/xilinx_dma.c
++++ b/drivers/dma/xilinx/xilinx_dma.c
+@@ -1229,16 +1229,16 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
+ return ret;
+
+ spin_lock_irqsave(&chan->lock, flags);
+-
+- desc = list_last_entry(&chan->active_list,
+- struct xilinx_dma_tx_descriptor, node);
+- /*
+- * VDMA and simple mode do not support residue reporting, so the
+- * residue field will always be 0.
+- */
+- if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
+- residue = xilinx_dma_get_residue(chan, desc);
+-
++ if (!list_empty(&chan->active_list)) {
++ desc = list_last_entry(&chan->active_list,
++ struct xilinx_dma_tx_descriptor, node);
++ /*
++ * VDMA and simple mode do not support residue reporting, so the
++ * residue field will always be 0.
++ */
++ if (chan->has_sg && chan->xdev->dma_config->dmatype != XDMA_TYPE_VDMA)
++ residue = xilinx_dma_get_residue(chan, desc);
++ }
+ spin_unlock_irqrestore(&chan->lock, flags);
+
+ dma_set_residue(txstate, residue);
+--
+2.20.1
+
--- /dev/null
+From 22a01eae62434abb8f05b44cbc871add926c36d1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Apr 2020 17:37:40 +0800
+Subject: drm/amd/display: blank dp stream before re-train the link
+
+From: Xiaodong Yan <Xiaodong.Yan@amd.com>
+
+[ Upstream commit 718a5569b6fa6e1f49f1ae76a3c18acb4ddb74f1 ]
+
+[Why]
+When link loss happened, monitor can not light up if only re-train the
+link.
+
+[How]
+Blank all the DP streams on this link before re-train the link, and then
+unblank the stream
+
+Signed-off-by: Xiaodong Yan <Xiaodong.Yan@amd.com>
+Reviewed-by: Tony Cheng <Tony.Cheng@amd.com>
+Acked-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+index fd9e69634c50a..1b6c75a4dd60a 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+@@ -2885,6 +2885,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
+ sizeof(hpd_irq_dpcd_data),
+ "Status: ");
+
++ for (i = 0; i < MAX_PIPES; i++) {
++ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
++ link->dc->hwss.blank_stream(pipe_ctx);
++ }
++
+ for (i = 0; i < MAX_PIPES; i++) {
+ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
+ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
+@@ -2904,6 +2910,12 @@ bool dc_link_handle_hpd_rx_irq(struct dc_link *link, union hpd_irq_data *out_hpd
+ if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST)
+ dc_link_reallocate_mst_payload(link);
+
++ for (i = 0; i < MAX_PIPES; i++) {
++ pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i];
++ if (pipe_ctx && pipe_ctx->stream && pipe_ctx->stream->link == link)
++ link->dc->hwss.unblank_stream(pipe_ctx, &previous_link_settings);
++ }
++
+ status = false;
+ if (out_link_loss)
+ *out_link_loss = true;
+--
+2.20.1
+
--- /dev/null
+From 069cba232c79063fe15a21b94b00a32e1587961c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Apr 2020 18:07:52 -0400
+Subject: drm/amd/display: check if REFCLK_CNTL register is present
+
+From: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+
+[ Upstream commit 3159d41db3a04330c31ece32f8b29752fc114848 ]
+
+Check before programming the register since it isn't present on
+all IPs using this code.
+
+Signed-off-by: Dmytro Laktyushkin <Dmytro.Laktyushkin@amd.com>
+Reviewed-by: Eric Bernstein <Eric.Bernstein@amd.com>
+Acked-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index a444fed941849..ad422e00f9fec 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -2306,7 +2306,8 @@ void dcn20_fpga_init_hw(struct dc *dc)
+
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_REFDIV, 2);
+ REG_UPDATE(DCHUBBUB_GLOBAL_TIMER_CNTL, DCHUBBUB_GLOBAL_TIMER_ENABLE, 1);
+- REG_WRITE(REFCLK_CNTL, 0);
++ if (REG(REFCLK_CNTL))
++ REG_WRITE(REFCLK_CNTL, 0);
+ //
+
+
+--
+2.20.1
+
--- /dev/null
+From 146ed2dc4084703339fabe90de214c211737a3e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Apr 2020 18:07:56 -0400
+Subject: drm/amd/display: Defer cursor update around VUPDATE for all ASIC
+
+From: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+
+[ Upstream commit fdfd2a858590d318cfee483bd1c73e00f77533af ]
+
+[Why]
+Fixes the following scenario:
+
+- Flip has been prepared sometime during the frame, update pending
+- Cursor update happens right when VUPDATE would happen
+- OPTC lock acquired, VUPDATE is blocked until next frame
+- Flip is delayed potentially infinitely
+
+With the igt@kms_cursor_legacy cursor-vs-flip-legacy test we can
+observe nearly *13* frames of delay for some flips on Navi.
+
+[How]
+Apply the Raven workaround generically. When close enough to VUPDATE
+block cursor updates from occurring from the dc_stream_set_cursor_*
+helpers.
+
+This could perhaps be a little smarter by checking if there were
+pending updates or flips earlier in the frame on the HUBP side before
+applying the delay, but this should be fine for now.
+
+This fixes the kms_cursor_legacy test.
+
+Signed-off-by: Nicholas Kazlauskas <nicholas.kazlauskas@amd.com>
+Reviewed-by: Aric Cyr <Aric.Cyr@amd.com>
+Acked-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/core/dc_stream.c | 28 +++++++++----------
+ 1 file changed, 14 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+index 6ddbb00ed37a5..8c20e9e907b2f 100644
+--- a/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
++++ b/drivers/gpu/drm/amd/display/dc/core/dc_stream.c
+@@ -239,24 +239,24 @@ static void delay_cursor_until_vupdate(struct pipe_ctx *pipe_ctx, struct dc *dc)
+ struct dc_stream_state *stream = pipe_ctx->stream;
+ unsigned int us_per_line;
+
+- if (stream->ctx->asic_id.chip_family == FAMILY_RV &&
+- ASICREV_IS_RAVEN(stream->ctx->asic_id.hw_internal_rev)) {
++ if (!dc->hwss.get_vupdate_offset_from_vsync)
++ return;
+
+- vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
+- if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
+- return;
++ vupdate_line = dc->hwss.get_vupdate_offset_from_vsync(pipe_ctx);
++ if (!dc_stream_get_crtc_position(dc, &stream, 1, &vpos, &nvpos))
++ return;
+
+- if (vpos >= vupdate_line)
+- return;
++ if (vpos >= vupdate_line)
++ return;
+
+- us_per_line = stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
+- lines_to_vupdate = vupdate_line - vpos;
+- us_to_vupdate = lines_to_vupdate * us_per_line;
++ us_per_line =
++ stream->timing.h_total * 10000 / stream->timing.pix_clk_100hz;
++ lines_to_vupdate = vupdate_line - vpos;
++ us_to_vupdate = lines_to_vupdate * us_per_line;
+
+- /* 70 us is a conservative estimate of cursor update time*/
+- if (us_to_vupdate < 70)
+- udelay(us_to_vupdate);
+- }
++ /* 70 us is a conservative estimate of cursor update time*/
++ if (us_to_vupdate < 70)
++ udelay(us_to_vupdate);
+ #endif
+ }
+
+--
+2.20.1
+
--- /dev/null
+From efe8d627d880b30204c216be34c055fc0f654fb9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Apr 2020 18:07:57 -0400
+Subject: drm/amd/display: Update downspread percent to match spreadsheet for
+ DCN2.1
+
+From: Sung Lee <sung.lee@amd.com>
+
+[ Upstream commit 668a6741f809f2d15d125cfe2b39661e8f1655ea ]
+
+[WHY]
+The downspread percentage was copied over from a previous version
+of the display_mode_lib spreadsheet. This value has been updated,
+and the previous value is too high to allow for such modes as
+4K120hz. The new value is sufficient for such modes.
+
+[HOW]
+Update the value in dcn21_resource to match the spreadsheet.
+
+Signed-off-by: Sung Lee <sung.lee@amd.com>
+Reviewed-by: Yongqiang Sun <yongqiang.sun@amd.com>
+Acked-by: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+index 33d0a176841a5..122d3e734c59a 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn21/dcn21_resource.c
+@@ -250,7 +250,7 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_1_soc = {
+ .dram_channel_width_bytes = 4,
+ .fabric_datapath_to_dcn_data_return_bytes = 32,
+ .dcn_downspread_percent = 0.5,
+- .downspread_percent = 0.5,
++ .downspread_percent = 0.38,
+ .dram_page_open_time_ns = 50.0,
+ .dram_rw_turnaround_time_ns = 17.5,
+ .dram_return_buffer_per_channel_bytes = 8192,
+--
+2.20.1
+
--- /dev/null
+From 1b059c4319e21ad2ac8ced1066dbd5b6ce17aef3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Apr 2020 19:03:17 +0800
+Subject: drm/amd/powerplay: avoid using pm_en before it is initialized revised
+
+From: Tiecheng Zhou <Tiecheng.Zhou@amd.com>
+
+[ Upstream commit 690ae30be163d5262feae01335b2a6f30569e5aa ]
+
+hwmgr->pm_en is initialized at hwmgr_hw_init.
+
+during amdgpu_device_init, there is amdgpu_asic_reset that calls to
+soc15_asic_reset (for V320 usecase, Vega10 asic), in which:
+1) soc15_asic_reset_method calls to pp_get_asic_baco_capability (pm_en)
+2) soc15_asic_baco_reset calls to pp_set_asic_baco_state (pm_en)
+
+pm_en is used in the above two cases while it has not yet been initialized
+
+So avoid using pm_en in the above two functions for V320 passthrough.
+
+Reviewed-by: Evan Quan <evan.quan@amd.com>
+Signed-off-by: Tiecheng Zhou <Tiecheng.Zhou@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/powerplay/amd_powerplay.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+index c195575366a3b..e4e5a53b2b4ea 100644
+--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
++++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+@@ -1435,7 +1435,8 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
+ if (!hwmgr)
+ return -EINVAL;
+
+- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_capability)
++ if (!(hwmgr->not_vf && amdgpu_dpm) ||
++ !hwmgr->hwmgr_func->get_asic_baco_capability)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+@@ -1469,7 +1470,8 @@ static int pp_set_asic_baco_state(void *handle, int state)
+ if (!hwmgr)
+ return -EINVAL;
+
+- if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_asic_baco_state)
++ if (!(hwmgr->not_vf && amdgpu_dpm) ||
++ !hwmgr->hwmgr_func->set_asic_baco_state)
+ return 0;
+
+ mutex_lock(&hwmgr->smu_lock);
+--
+2.20.1
+
--- /dev/null
+From eb1227b6f438ccaa61a7bac2fbd5f33483d31f5b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2020 16:25:39 -0400
+Subject: drm/amdgpu: bump version for invalidate L2 before SDMA IBs
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek Olšák <marek.olsak@amd.com>
+
+[ Upstream commit 9017a4897a20658f010bebea825262963c10afa6 ]
+
+This fixes GPU hangs due to cache coherency issues.
+Bump the driver version. Split out from the original patch.
+
+Signed-off-by: Marek Olšák <marek.olsak@amd.com>
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Tested-by: Pierre-Eric Pelloux-Prayer <pierre-eric.pelloux-prayer@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+index 42f4febe24c6d..8d45a2b662aeb 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+@@ -85,9 +85,10 @@
+ * - 3.34.0 - Non-DC can flip correctly between buffers with different pitches
+ * - 3.35.0 - Add drm_amdgpu_info_device::tcc_disabled_mask
+ * - 3.36.0 - Allow reading more status registers on si/cik
++ * - 3.37.0 - L2 is invalidated before SDMA IBs, needed for correctness
+ */
+ #define KMS_DRIVER_MAJOR 3
+-#define KMS_DRIVER_MINOR 36
++#define KMS_DRIVER_MINOR 37
+ #define KMS_DRIVER_PATCHLEVEL 0
+
+ int amdgpu_vram_limit = 0;
+--
+2.20.1
+
--- /dev/null
+From 5d7993f9dbfa9e1f24df5bb2f7ab77f51c0830b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 May 2020 09:42:26 -0400
+Subject: drm/amdgpu: force fbdev into vram
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Alex Deucher <alexander.deucher@amd.com>
+
+[ Upstream commit a6aacb2b26e85aa619cf0c6f98d0ca77314cd2a1 ]
+
+We set the fb smem pointer to the offset into the BAR, so keep
+the fbdev bo in vram.
+
+Bug: https://bugzilla.kernel.org/show_bug.cgi?id=207581
+Fixes: 6c8d74caa2fa33 ("drm/amdgpu: Enable scatter gather display support")
+Reviewed-by: Christian König <christian.koenig@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+index 2672dc64a3101..6a76ab16500fa 100644
+--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
++++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+@@ -133,8 +133,7 @@ static int amdgpufb_create_pinned_object(struct amdgpu_fbdev *rfbdev,
+ u32 cpp;
+ u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED |
+ AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+- AMDGPU_GEM_CREATE_VRAM_CLEARED |
+- AMDGPU_GEM_CREATE_CPU_GTT_USWC;
++ AMDGPU_GEM_CREATE_VRAM_CLEARED;
+
+ info = drm_get_format_info(adev->ddev, mode_cmd);
+ cpp = info->cpp[0];
+--
+2.20.1
+
--- /dev/null
+From c2c994d171a9dbf2334778ba6d3b7bf0fa0f4f1b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 14:46:54 -0700
+Subject: drm/i915: Don't enable WaIncreaseLatencyIPCEnabled when IPC is
+ disabled
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Sultan Alsawaf <sultan@kerneltoast.com>
+
+[ Upstream commit 421abe200321a2c907ede1a6208c558284ba0b75 ]
+
+In commit 5a7d202b1574, a logical AND was erroneously changed to an OR,
+causing WaIncreaseLatencyIPCEnabled to be enabled unconditionally for
+kabylake and coffeelake, even when IPC is disabled. Fix the logic so
+that WaIncreaseLatencyIPCEnabled is only used when IPC is enabled.
+
+Fixes: 5a7d202b1574 ("drm/i915: Drop WaIncreaseLatencyIPCEnabled/1140 for cnl")
+Cc: stable@vger.kernel.org # 5.3.x+
+Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
+Signed-off-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200430214654.51314-1-sultan@kerneltoast.com
+(cherry picked from commit 690d22dafa88b82453516387b475664047a6bd14)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/intel_pm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
+index bd2d30ecc030f..53c7b1a1b3551 100644
+--- a/drivers/gpu/drm/i915/intel_pm.c
++++ b/drivers/gpu/drm/i915/intel_pm.c
+@@ -4722,7 +4722,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state,
+ * WaIncreaseLatencyIPCEnabled: kbl,cfl
+ * Display WA #1141: kbl,cfl
+ */
+- if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) ||
++ if ((IS_KABYLAKE(dev_priv) || IS_COFFEELAKE(dev_priv)) &&
+ dev_priv->ipc_enabled)
+ latency += 4;
+
+--
+2.20.1
+
--- /dev/null
+From cbd211615e6bedfc50a48b3cb3f1836f4da1223c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Apr 2020 13:53:55 +0100
+Subject: drm/i915/gem: Remove object_is_locked assertion from
+ unpin_from_display_plane
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit 47bf7b7a7151bad568b9523d14477a353a450066 ]
+
+Since moving the obj->vma.list to a spin_lock, and the vm->bound_list to
+its vm->mutex, along with tracking shrinkable status under its own
+spinlock, we no long require the object to be locked by the caller.
+
+This is fortunate as it appears we can be called with the lock along an
+error path in flipping:
+
+<4> [139.942851] WARN_ON(debug_locks && !lock_is_held(&(&((obj)->base.resv)->lock.base)->dep_map))
+<4> [139.943242] WARNING: CPU: 0 PID: 1203 at drivers/gpu/drm/i915/gem/i915_gem_domain.c:405 i915_gem_object_unpin_from_display_plane+0x70/0x130 [i915]
+<4> [139.943263] Modules linked in: snd_hda_intel i915 vgem snd_hda_codec_realtek snd_hda_codec_generic coretemp snd_intel_dspcfg snd_hda_codec snd_hwdep snd_hda_core r8169 lpc_ich snd_pcm realtek prime_numbers [last unloaded: i915]
+<4> [139.943347] CPU: 0 PID: 1203 Comm: kms_flip Tainted: G U 5.6.0-gd0fda5c2cf3f1-drmtip_474+ #1
+<4> [139.943363] Hardware name: /D510MO, BIOS MOPNV10J.86A.0311.2010.0802.2346 08/02/2010
+<4> [139.943589] RIP: 0010:i915_gem_object_unpin_from_display_plane+0x70/0x130 [i915]
+<4> [139.943589] Code: 85 28 01 00 00 be ff ff ff ff 48 8d 78 60 e8 d7 9b f0 e2 85 c0 75 b9 48 c7 c6 50 b9 38 c0 48 c7 c7 e9 48 3c c0 e8 20 d4 e9 e2 <0f> 0b eb a2 48 c7 c1 08 bb 38 c0 ba 0a 01 00 00 48 c7 c6 88 a3 35
+<4> [139.943589] RSP: 0018:ffffb774c0603b48 EFLAGS: 00010282
+<4> [139.943589] RAX: 0000000000000000 RBX: ffff9a142fa36e80 RCX: 0000000000000006
+<4> [139.943589] RDX: 000000000000160d RSI: ffff9a142c1a88f8 RDI: ffffffffa434a64d
+<4> [139.943589] RBP: ffff9a1410a513c0 R08: ffff9a142c1a88f8 R09: 0000000000000000
+<4> [139.943589] R10: 0000000000000000 R11: 0000000000000000 R12: ffff9a1436ee94b8
+<4> [139.943589] R13: 0000000000000001 R14: 00000000ffffffff R15: ffff9a1410960000
+<4> [139.943589] FS: 00007fc73a744e40(0000) GS:ffff9a143da00000(0000) knlGS:0000000000000000
+<4> [139.943589] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+<4> [139.943589] CR2: 00007fc73997e098 CR3: 000000002f5fe000 CR4: 00000000000006f0
+<4> [139.943589] Call Trace:
+<4> [139.943589] intel_pin_and_fence_fb_obj+0x1c9/0x1f0 [i915]
+<4> [139.943589] intel_plane_pin_fb+0x3f/0xd0 [i915]
+<4> [139.943589] intel_prepare_plane_fb+0x13b/0x5c0 [i915]
+<4> [139.943589] drm_atomic_helper_prepare_planes+0x85/0x110
+<4> [139.943589] intel_atomic_commit+0xda/0x390 [i915]
+<4> [139.943589] drm_atomic_helper_page_flip+0x9c/0xd0
+<4> [139.943589] ? drm_event_reserve_init+0x46/0x60
+<4> [139.943589] drm_mode_page_flip_ioctl+0x587/0x5d0
+
+This completes the symmetry lost in commit 8b1c78e06e61 ("drm/i915: Avoid
+calling i915_gem_object_unbind holding object lock").
+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1743
+Fixes: 8b1c78e06e61 ("drm/i915: Avoid calling i915_gem_object_unbind holding object lock")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Matthew Auld <matthew.auld@intel.com>
+Cc: Andi Shyti <andi.shyti@intel.com>
+Cc: <stable@vger.kernel.org> # v5.6+
+Reviewed-by: Matthew Auld <matthew.auld@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200420125356.26614-1-chris@chris-wilson.co.uk
+(cherry picked from commit a95f3ac21d64d62c746f836598d1467d5837fa28)
+(cherry picked from commit 2208b85fa1766ee4821a9435d548578b67090531)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gem/i915_gem_domain.c | 7 +------
+ 1 file changed, 1 insertion(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gem/i915_gem_domain.c b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+index 0cc40e77bbd2f..4f96c8788a2ec 100644
+--- a/drivers/gpu/drm/i915/gem/i915_gem_domain.c
++++ b/drivers/gpu/drm/i915/gem/i915_gem_domain.c
+@@ -368,7 +368,6 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+ struct drm_i915_private *i915 = to_i915(obj->base.dev);
+ struct i915_vma *vma;
+
+- GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+ if (!atomic_read(&obj->bind_count))
+ return;
+
+@@ -400,12 +399,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
+ void
+ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
+ {
+- struct drm_i915_gem_object *obj = vma->obj;
+-
+- assert_object_held(obj);
+-
+ /* Bump the LRU to try and avoid premature eviction whilst flipping */
+- i915_gem_object_bump_inactive_ggtt(obj);
++ i915_gem_object_bump_inactive_ggtt(vma->obj);
+
+ i915_vma_unpin(vma);
+ }
+--
+2.20.1
+
--- /dev/null
+From 8f3a1058c66db20cdbb903433325a6cb78e6974b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2020 13:22:49 +0100
+Subject: drm/i915/gt: Make timeslicing an explicit engine property
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit fe5a708267911d55cce42910d93e303924b088fd ]
+
+In order to allow userspace to rely on timeslicing to reorder their
+batches, we must support preemption of those user batches. Declare
+timeslicing as an explicit property that is a combination of having the
+kernel support and HW support.
+
+Suggested-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Fixes: 8ee36e048c98 ("drm/i915/execlists: Minimalistic timeslicing")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200501122249.12417-1-chris@chris-wilson.co.uk
+(cherry picked from commit a211da9c771bf97395a3ced83a3aa383372b13a7)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/intel_engine.h | 9 ---------
+ drivers/gpu/drm/i915/gt/intel_engine_types.h | 18 ++++++++++++++----
+ drivers/gpu/drm/i915/gt/intel_lrc.c | 5 ++++-
+ 3 files changed, 18 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine.h b/drivers/gpu/drm/i915/gt/intel_engine.h
+index 5df003061e442..beb3211a6249d 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine.h
+@@ -338,13 +338,4 @@ intel_engine_has_preempt_reset(const struct intel_engine_cs *engine)
+ return intel_engine_has_preemption(engine);
+ }
+
+-static inline bool
+-intel_engine_has_timeslices(const struct intel_engine_cs *engine)
+-{
+- if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
+- return false;
+-
+- return intel_engine_has_semaphores(engine);
+-}
+-
+ #endif /* _INTEL_RINGBUFFER_H_ */
+diff --git a/drivers/gpu/drm/i915/gt/intel_engine_types.h b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+index 92be41a6903c0..4ea067e1508a5 100644
+--- a/drivers/gpu/drm/i915/gt/intel_engine_types.h
++++ b/drivers/gpu/drm/i915/gt/intel_engine_types.h
+@@ -473,10 +473,11 @@ struct intel_engine_cs {
+ #define I915_ENGINE_SUPPORTS_STATS BIT(1)
+ #define I915_ENGINE_HAS_PREEMPTION BIT(2)
+ #define I915_ENGINE_HAS_SEMAPHORES BIT(3)
+-#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(4)
+-#define I915_ENGINE_IS_VIRTUAL BIT(5)
+-#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(6)
+-#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(7)
++#define I915_ENGINE_HAS_TIMESLICES BIT(4)
++#define I915_ENGINE_NEEDS_BREADCRUMB_TASKLET BIT(5)
++#define I915_ENGINE_IS_VIRTUAL BIT(6)
++#define I915_ENGINE_HAS_RELATIVE_MMIO BIT(7)
++#define I915_ENGINE_REQUIRES_CMD_PARSER BIT(8)
+ unsigned int flags;
+
+ /*
+@@ -573,6 +574,15 @@ intel_engine_has_semaphores(const struct intel_engine_cs *engine)
+ return engine->flags & I915_ENGINE_HAS_SEMAPHORES;
+ }
+
++static inline bool
++intel_engine_has_timeslices(const struct intel_engine_cs *engine)
++{
++ if (!IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
++ return false;
++
++ return engine->flags & I915_ENGINE_HAS_TIMESLICES;
++}
++
+ static inline bool
+ intel_engine_needs_breadcrumb_tasklet(const struct intel_engine_cs *engine)
+ {
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 31455eceeb0c6..5bebda4a2d0b4 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -4194,8 +4194,11 @@ void intel_execlists_set_default_submission(struct intel_engine_cs *engine)
+ engine->flags |= I915_ENGINE_SUPPORTS_STATS;
+ if (!intel_vgpu_active(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_SEMAPHORES;
+- if (HAS_LOGICAL_RING_PREEMPTION(engine->i915))
++ if (HAS_LOGICAL_RING_PREEMPTION(engine->i915)) {
+ engine->flags |= I915_ENGINE_HAS_PREEMPTION;
++ if (IS_ACTIVE(CONFIG_DRM_I915_TIMESLICE_DURATION))
++ engine->flags |= I915_ENGINE_HAS_TIMESLICES;
++ }
+ }
+
+ if (INTEL_GEN(engine->i915) >= 12)
+--
+2.20.1
+
--- /dev/null
+From 8a457183496ba0c685f8a9349d3447121b9d4dad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 May 2020 17:59:18 +0800
+Subject: drm/i915/gvt: Fix kernel oops for 3-level ppgtt guest
+
+From: Zhenyu Wang <zhenyuw@linux.intel.com>
+
+[ Upstream commit 72a7a9925e2beea09b109dffb3384c9bf920d9da ]
+
+As i915 won't allocate extra PDP for current default PML4 table,
+so for 3-level ppgtt guest, we would hit kernel pointer access
+failure on extra PDP pointers. So this trys to bypass that now.
+It won't impact real shadow PPGTT setup, so guest context still
+works.
+
+This is verified on 4.15 guest kernel with i915.enable_ppgtt=1
+to force on old aliasing ppgtt behavior.
+
+Fixes: 4f15665ccbba ("drm/i915: Add ppgtt to GVT GEM context")
+Reviewed-by: Xiong Zhang <xiong.y.zhang@intel.com>
+Signed-off-by: Zhenyu Wang <zhenyuw@linux.intel.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/20200506095918.124913-1-zhenyuw@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gvt/scheduler.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
+index 685d1e04a5ff6..709ad181bc94a 100644
+--- a/drivers/gpu/drm/i915/gvt/scheduler.c
++++ b/drivers/gpu/drm/i915/gvt/scheduler.c
+@@ -375,7 +375,11 @@ static void set_context_ppgtt_from_shadow(struct intel_vgpu_workload *workload,
+ for (i = 0; i < GVT_RING_CTX_NR_PDPS; i++) {
+ struct i915_page_directory * const pd =
+ i915_pd_entry(ppgtt->pd, i);
+-
++ /* skip now as current i915 ppgtt alloc won't allocate
++ top level pdp for non 4-level table, won't impact
++ shadow ppgtt. */
++ if (!pd)
++ break;
+ px_dma(pd) = mm->ppgtt_mm.shadow_pdps[i];
+ }
+ }
+--
+2.20.1
+
--- /dev/null
+From e32f87dc59aba60a50e356b641560d201ba4049c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 May 2020 12:52:17 +0100
+Subject: drm/i915: Handle idling during i915_gem_evict_something busy loops
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit 955da9d77435acac066139e9d7f7723ce7204a1d ]
+
+i915_gem_evict_something() is charged with finding a slot within the GTT
+that we may reuse. Since our goal is not to stall, we first look for a
+slot that only overlaps idle vma. To this end, on the first pass we move
+any active vma to the end of the search list. However, we only stopped
+moving active vma after we see the first active vma twice. If during the
+search, that first active vma completed, we would not notice and keep on
+extending the search list.
+
+Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/1746
+Fixes: 2850748ef876 ("drm/i915: Pull i915_vma_pin under the vm->mutex")
+Fixes: b1e3177bd1d8 ("drm/i915: Coordinate i915_active with its own mutex")
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: <stable@vger.kernel.org> # v5.5+
+Reviewed-by: Mika Kuoppala <mika.kuoppala@linux.intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200509115217.26853-1-chris@chris-wilson.co.uk
+(cherry picked from commit 73e28cc40bf00b5d168cb8f5cff1ae63e9097446)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/i915_gem_evict.c | 26 ++++++++++++--------------
+ 1 file changed, 12 insertions(+), 14 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c
+index 0697bedebeef2..d99df9c337089 100644
+--- a/drivers/gpu/drm/i915/i915_gem_evict.c
++++ b/drivers/gpu/drm/i915/i915_gem_evict.c
+@@ -130,6 +130,13 @@ i915_gem_evict_something(struct i915_address_space *vm,
+ active = NULL;
+ INIT_LIST_HEAD(&eviction_list);
+ list_for_each_entry_safe(vma, next, &vm->bound_list, vm_link) {
++ if (vma == active) { /* now seen this vma twice */
++ if (flags & PIN_NONBLOCK)
++ break;
++
++ active = ERR_PTR(-EAGAIN);
++ }
++
+ /*
+ * We keep this list in a rough least-recently scanned order
+ * of active elements (inactive elements are cheap to reap).
+@@ -145,21 +152,12 @@ i915_gem_evict_something(struct i915_address_space *vm,
+ * To notice when we complete one full cycle, we record the
+ * first active element seen, before moving it to the tail.
+ */
+- if (i915_vma_is_active(vma)) {
+- if (vma == active) {
+- if (flags & PIN_NONBLOCK)
+- break;
+-
+- active = ERR_PTR(-EAGAIN);
+- }
+-
+- if (active != ERR_PTR(-EAGAIN)) {
+- if (!active)
+- active = vma;
++ if (active != ERR_PTR(-EAGAIN) && i915_vma_is_active(vma)) {
++ if (!active)
++ active = vma;
+
+- list_move_tail(&vma->vm_link, &vm->bound_list);
+- continue;
+- }
++ list_move_tail(&vma->vm_link, &vm->bound_list);
++ continue;
+ }
+
+ if (mark_free(&scan, vma, flags, &eviction_list))
+--
+2.20.1
+
--- /dev/null
+From dcddce723898856dd0de508dd4a444e3540e5941 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2020 16:51:09 +0100
+Subject: drm/i915: Mark concurrent submissions with a weak-dependency
+
+From: Chris Wilson <chris@chris-wilson.co.uk>
+
+[ Upstream commit a9d094dcf7845af85f82adcad9f793e51e4d14c8 ]
+
+We recorded the dependencies for WAIT_FOR_SUBMIT in order that we could
+correctly perform priority inheritance from the parallel branches to the
+common trunk. However, for the purpose of timeslicing and reset
+handling, the dependency is weak -- as we the pair of requests are
+allowed to run in parallel and not in strict succession.
+
+The real significance though is that this allows us to rearrange
+groups of WAIT_FOR_SUBMIT linked requests along the single engine, and
+so can resolve user level inter-batch scheduling dependencies from user
+semaphores.
+
+Fixes: c81471f5e95c ("drm/i915: Copy across scheduler behaviour flags across submit fences")
+Testcase: igt/gem_exec_fence/submit
+Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
+Cc: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Cc: <stable@vger.kernel.org> # v5.6+
+Reviewed-by: Tvrtko Ursulin <tvrtko.ursulin@intel.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20200507155109.8892-1-chris@chris-wilson.co.uk
+(cherry picked from commit 6b6cd2ebd8d071e55998e32b648bb8081f7f02bb)
+Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/i915/gt/intel_lrc.c | 3 +++
+ drivers/gpu/drm/i915/i915_request.c | 8 ++++++--
+ drivers/gpu/drm/i915/i915_scheduler.c | 6 +++---
+ drivers/gpu/drm/i915/i915_scheduler.h | 3 ++-
+ drivers/gpu/drm/i915/i915_scheduler_types.h | 1 +
+ 5 files changed, 15 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c
+index 5bebda4a2d0b4..637c03ee1a57f 100644
+--- a/drivers/gpu/drm/i915/gt/intel_lrc.c
++++ b/drivers/gpu/drm/i915/gt/intel_lrc.c
+@@ -1626,6 +1626,9 @@ static void defer_request(struct i915_request *rq, struct list_head * const pl)
+ struct i915_request *w =
+ container_of(p->waiter, typeof(*w), sched);
+
++ if (p->flags & I915_DEPENDENCY_WEAK)
++ continue;
++
+ /* Leave semaphores spinning on the other engines */
+ if (w->engine != rq->engine)
+ continue;
+diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c
+index a18b2a2447066..32ab154db788c 100644
+--- a/drivers/gpu/drm/i915/i915_request.c
++++ b/drivers/gpu/drm/i915/i915_request.c
+@@ -951,7 +951,9 @@ i915_request_await_request(struct i915_request *to, struct i915_request *from)
+ return 0;
+
+ if (to->engine->schedule) {
+- ret = i915_sched_node_add_dependency(&to->sched, &from->sched);
++ ret = i915_sched_node_add_dependency(&to->sched,
++ &from->sched,
++ I915_DEPENDENCY_EXTERNAL);
+ if (ret < 0)
+ return ret;
+ }
+@@ -1084,7 +1086,9 @@ __i915_request_await_execution(struct i915_request *to,
+
+ /* Couple the dependency tree for PI on this exposed to->fence */
+ if (to->engine->schedule) {
+- err = i915_sched_node_add_dependency(&to->sched, &from->sched);
++ err = i915_sched_node_add_dependency(&to->sched,
++ &from->sched,
++ I915_DEPENDENCY_WEAK);
+ if (err < 0)
+ return err;
+ }
+diff --git a/drivers/gpu/drm/i915/i915_scheduler.c b/drivers/gpu/drm/i915/i915_scheduler.c
+index 34b654b4e58af..8e419d897c2b4 100644
+--- a/drivers/gpu/drm/i915/i915_scheduler.c
++++ b/drivers/gpu/drm/i915/i915_scheduler.c
+@@ -455,7 +455,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ }
+
+ int i915_sched_node_add_dependency(struct i915_sched_node *node,
+- struct i915_sched_node *signal)
++ struct i915_sched_node *signal,
++ unsigned long flags)
+ {
+ struct i915_dependency *dep;
+
+@@ -464,8 +465,7 @@ int i915_sched_node_add_dependency(struct i915_sched_node *node,
+ return -ENOMEM;
+
+ if (!__i915_sched_node_add_dependency(node, signal, dep,
+- I915_DEPENDENCY_EXTERNAL |
+- I915_DEPENDENCY_ALLOC))
++ flags | I915_DEPENDENCY_ALLOC))
+ i915_dependency_free(dep);
+
+ return 0;
+diff --git a/drivers/gpu/drm/i915/i915_scheduler.h b/drivers/gpu/drm/i915/i915_scheduler.h
+index d1dc4efef77b5..6f0bf00fc5690 100644
+--- a/drivers/gpu/drm/i915/i915_scheduler.h
++++ b/drivers/gpu/drm/i915/i915_scheduler.h
+@@ -34,7 +34,8 @@ bool __i915_sched_node_add_dependency(struct i915_sched_node *node,
+ unsigned long flags);
+
+ int i915_sched_node_add_dependency(struct i915_sched_node *node,
+- struct i915_sched_node *signal);
++ struct i915_sched_node *signal,
++ unsigned long flags);
+
+ void i915_sched_node_fini(struct i915_sched_node *node);
+
+diff --git a/drivers/gpu/drm/i915/i915_scheduler_types.h b/drivers/gpu/drm/i915/i915_scheduler_types.h
+index d18e705500542..7186875088a0a 100644
+--- a/drivers/gpu/drm/i915/i915_scheduler_types.h
++++ b/drivers/gpu/drm/i915/i915_scheduler_types.h
+@@ -78,6 +78,7 @@ struct i915_dependency {
+ unsigned long flags;
+ #define I915_DEPENDENCY_ALLOC BIT(0)
+ #define I915_DEPENDENCY_EXTERNAL BIT(1)
++#define I915_DEPENDENCY_WEAK BIT(2)
+ };
+
+ #endif /* _I915_SCHEDULER_TYPES_H_ */
+--
+2.20.1
+
--- /dev/null
+From aa2009d59f92cfd0b38e1596be49370045fa2b17 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Apr 2020 12:34:36 +0300
+Subject: drm/qxl: lost qxl_bo_kunmap_atomic_page in qxl_image_init_helper()
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+[ Upstream commit 5b5703dbafae74adfbe298a56a81694172caf5e6 ]
+
+v2: removed TODO reminder
+
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Link: http://patchwork.freedesktop.org/patch/msgid/a4e0ae09-a73c-1c62-04ef-3f990d41bea9@virtuozzo.com
+Signed-off-by: Gerd Hoffmann <kraxel@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/qxl/qxl_image.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/qxl/qxl_image.c b/drivers/gpu/drm/qxl/qxl_image.c
+index 43688ecdd8a04..60ab7151b84dc 100644
+--- a/drivers/gpu/drm/qxl/qxl_image.c
++++ b/drivers/gpu/drm/qxl/qxl_image.c
+@@ -212,7 +212,8 @@ qxl_image_init_helper(struct qxl_device *qdev,
+ break;
+ default:
+ DRM_ERROR("unsupported image bit depth\n");
+- return -EINVAL; /* TODO: cleanup */
++ qxl_bo_kunmap_atomic_page(qdev, image_bo, ptr);
++ return -EINVAL;
+ }
+ image->u.bitmap.flags = QXL_BITMAP_TOP_DOWN;
+ image->u.bitmap.x = width;
+--
+2.20.1
+
--- /dev/null
+From 44436bd24fb453bdf57f56afb14b8a4230473084 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 25 Mar 2020 21:16:03 +0100
+Subject: drm/tegra: Fix SMMU support on Tegra124 and Tegra210
+
+From: Thierry Reding <treding@nvidia.com>
+
+[ Upstream commit 501be6c1c72417eab05e7413671a38ea991a8ebc ]
+
+When testing whether or not to enable the use of the SMMU, consult the
+supported DMA mask rather than the actually configured DMA mask, since
+the latter might already have been restricted.
+
+Fixes: 2d9384ff9177 ("drm/tegra: Relax IOMMU usage criteria on old Tegra")
+Tested-by: Jon Hunter <jonathanh@nvidia.com>
+Signed-off-by: Thierry Reding <treding@nvidia.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/tegra/drm.c | 3 ++-
+ drivers/gpu/host1x/dev.c | 13 +++++++++++++
+ include/linux/host1x.h | 3 +++
+ 3 files changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
+index bd268028fb3d6..583cd6e0ae27f 100644
+--- a/drivers/gpu/drm/tegra/drm.c
++++ b/drivers/gpu/drm/tegra/drm.c
+@@ -1039,6 +1039,7 @@ void tegra_drm_free(struct tegra_drm *tegra, size_t size, void *virt,
+
+ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
+ {
++ struct host1x *host1x = dev_get_drvdata(dev->dev.parent);
+ struct iommu_domain *domain;
+
+ /*
+@@ -1076,7 +1077,7 @@ static bool host1x_drm_wants_iommu(struct host1x_device *dev)
+ * sufficient and whether or not the host1x is attached to an IOMMU
+ * doesn't matter.
+ */
+- if (!domain && dma_get_mask(dev->dev.parent) <= DMA_BIT_MASK(32))
++ if (!domain && host1x_get_dma_mask(host1x) <= DMA_BIT_MASK(32))
+ return true;
+
+ return domain != NULL;
+diff --git a/drivers/gpu/host1x/dev.c b/drivers/gpu/host1x/dev.c
+index 388bcc2889aaf..40a4b9f8b861a 100644
+--- a/drivers/gpu/host1x/dev.c
++++ b/drivers/gpu/host1x/dev.c
+@@ -502,6 +502,19 @@ static void __exit tegra_host1x_exit(void)
+ }
+ module_exit(tegra_host1x_exit);
+
++/**
++ * host1x_get_dma_mask() - query the supported DMA mask for host1x
++ * @host1x: host1x instance
++ *
++ * Note that this returns the supported DMA mask for host1x, which can be
++ * different from the applicable DMA mask under certain circumstances.
++ */
++u64 host1x_get_dma_mask(struct host1x *host1x)
++{
++ return host1x->info->dma_mask;
++}
++EXPORT_SYMBOL(host1x_get_dma_mask);
++
+ MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
+ MODULE_AUTHOR("Terje Bergstrom <tbergstrom@nvidia.com>");
+ MODULE_DESCRIPTION("Host1x driver for Tegra products");
+diff --git a/include/linux/host1x.h b/include/linux/host1x.h
+index 62d216ff10979..c230b4e70d759 100644
+--- a/include/linux/host1x.h
++++ b/include/linux/host1x.h
+@@ -17,9 +17,12 @@ enum host1x_class {
+ HOST1X_CLASS_GR3D = 0x60,
+ };
+
++struct host1x;
+ struct host1x_client;
+ struct iommu_group;
+
++u64 host1x_get_dma_mask(struct host1x *host1x);
++
+ /**
+ * struct host1x_client_ops - host1x client operations
+ * @init: host1x client initialization code
+--
+2.20.1
+
--- /dev/null
+From a203bef06025d6ca1b52bf1170cbb9aebfc5a96d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 May 2020 17:50:38 -0700
+Subject: epoll: call final ep_events_available() check under the lock
+
+From: Roman Penyaev <rpenyaev@suse.de>
+
+[ Upstream commit 65759097d804d2a9ad2b687db436319704ba7019 ]
+
+There is a possible race when ep_scan_ready_list() leaves ->rdllist and
+->obflist empty for a short period of time although some events are
+pending. It is quite likely that ep_events_available() observes empty
+lists and goes to sleep.
+
+Since commit 339ddb53d373 ("fs/epoll: remove unnecessary wakeups of
+nested epoll") we are conservative in wakeups (there is only one place
+for wakeup and this is ep_poll_callback()), thus ep_events_available()
+must always observe correct state of two lists.
+
+The easiest and correct way is to do the final check under the lock.
+This does not impact the performance, since lock is taken anyway for
+adding a wait entry to the wait queue.
+
+The discussion of the problem can be found here:
+
+ https://lore.kernel.org/linux-fsdevel/a2f22c3c-c25a-4bda-8339-a7bdaf17849e@akamai.com/
+
+In this patch barrierless __set_current_state() is used. This is safe
+since waitqueue_active() is called under the same lock on wakeup side.
+
+Short-circuit for fatal signals (i.e. fatal_signal_pending() check) is
+moved to the line just before actual events harvesting routine. This is
+fully compliant to what is said in the comment of the patch where the
+actual fatal_signal_pending() check was added: c257a340ede0 ("fs, epoll:
+short circuit fetching events if thread has been killed").
+
+Fixes: 339ddb53d373 ("fs/epoll: remove unnecessary wakeups of nested epoll")
+Reported-by: Jason Baron <jbaron@akamai.com>
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Roman Penyaev <rpenyaev@suse.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Jason Baron <jbaron@akamai.com>
+Cc: Khazhismel Kumykov <khazhy@google.com>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200505145609.1865152-1-rpenyaev@suse.de
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/eventpoll.c | 48 ++++++++++++++++++++++++++++--------------------
+ 1 file changed, 28 insertions(+), 20 deletions(-)
+
+diff --git a/fs/eventpoll.c b/fs/eventpoll.c
+index b0a097274cfeb..f5a481089893a 100644
+--- a/fs/eventpoll.c
++++ b/fs/eventpoll.c
+@@ -1857,34 +1857,33 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ * event delivery.
+ */
+ init_wait(&wait);
+- write_lock_irq(&ep->lock);
+- __add_wait_queue_exclusive(&ep->wq, &wait);
+- write_unlock_irq(&ep->lock);
+
++ write_lock_irq(&ep->lock);
+ /*
+- * We don't want to sleep if the ep_poll_callback() sends us
+- * a wakeup in between. That's why we set the task state
+- * to TASK_INTERRUPTIBLE before doing the checks.
++ * Barrierless variant, waitqueue_active() is called under
++ * the same lock on wakeup ep_poll_callback() side, so it
++ * is safe to avoid an explicit barrier.
+ */
+- set_current_state(TASK_INTERRUPTIBLE);
++ __set_current_state(TASK_INTERRUPTIBLE);
++
+ /*
+- * Always short-circuit for fatal signals to allow
+- * threads to make a timely exit without the chance of
+- * finding more events available and fetching
+- * repeatedly.
++ * Do the final check under the lock. ep_scan_ready_list()
++ * plays with two lists (->rdllist and ->ovflist) and there
++ * is always a race when both lists are empty for short
++ * period of time although events are pending, so lock is
++ * important.
+ */
+- if (fatal_signal_pending(current)) {
+- res = -EINTR;
+- break;
++ eavail = ep_events_available(ep);
++ if (!eavail) {
++ if (signal_pending(current))
++ res = -EINTR;
++ else
++ __add_wait_queue_exclusive(&ep->wq, &wait);
+ }
++ write_unlock_irq(&ep->lock);
+
+- eavail = ep_events_available(ep);
+- if (eavail)
+- break;
+- if (signal_pending(current)) {
+- res = -EINTR;
++ if (eavail || res)
+ break;
+- }
+
+ if (!schedule_hrtimeout_range(to, slack, HRTIMER_MODE_ABS)) {
+ timed_out = 1;
+@@ -1905,6 +1904,15 @@ static int ep_poll(struct eventpoll *ep, struct epoll_event __user *events,
+ }
+
+ send_events:
++ if (fatal_signal_pending(current)) {
++ /*
++ * Always short-circuit for fatal signals to allow
++ * threads to make a timely exit without the chance of
++ * finding more events available and fetching
++ * repeatedly.
++ */
++ res = -EINTR;
++ }
+ /*
+ * Try to transfer events to user space. In case we get 0 events and
+ * there's still timeout left over, we go trying again in search of
+--
+2.20.1
+
--- /dev/null
+From dc73766b7cdbdf92707a90b0053b0f340bb68bf6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 07:57:46 -0700
+Subject: fibmap: Warn and return an error in case of block > INT_MAX
+
+From: Ritesh Harjani <riteshh@linux.ibm.com>
+
+[ Upstream commit b75dfde1212991b24b220c3995101c60a7b8ae74 ]
+
+We better warn the fibmap user and not return a truncated and therefore
+an incorrect block map address if the bmap() returned block address
+is greater than INT_MAX (since user supplied integer pointer).
+
+It's better to pr_warn() all user of ioctl_fibmap() and return a proper
+error code rather than silently letting a FS corruption happen if the
+user tries to fiddle around with the returned block map address.
+
+We fix this by returning an error code of -ERANGE and returning 0 as the
+block mapping address in case if it is > INT_MAX.
+
+Now iomap_bmap() could be called from either of these two paths.
+Either when a user is calling an ioctl_fibmap() interface to get
+the block mapping address or by some filesystem via use of bmap()
+internal kernel API.
+bmap() kernel API is well equipped with handling of u64 addresses.
+
+WARN condition in iomap_bmap_actor() was mainly added to warn all
+the fibmap users. But now that we have directly added this warning
+for all fibmap users and also made sure to return 0 as block map address
+in case if addr > INT_MAX.
+So we can now remove this logic from iomap_bmap_actor().
+
+Signed-off-by: Ritesh Harjani <riteshh@linux.ibm.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ioctl.c | 8 ++++++++
+ fs/iomap/fiemap.c | 5 +----
+ 2 files changed, 9 insertions(+), 4 deletions(-)
+
+diff --git a/fs/ioctl.c b/fs/ioctl.c
+index 282d45be6f453..5e80b40bc1b5c 100644
+--- a/fs/ioctl.c
++++ b/fs/ioctl.c
+@@ -55,6 +55,7 @@ EXPORT_SYMBOL(vfs_ioctl);
+ static int ioctl_fibmap(struct file *filp, int __user *p)
+ {
+ struct inode *inode = file_inode(filp);
++ struct super_block *sb = inode->i_sb;
+ int error, ur_block;
+ sector_t block;
+
+@@ -71,6 +72,13 @@ static int ioctl_fibmap(struct file *filp, int __user *p)
+ block = ur_block;
+ error = bmap(inode, &block);
+
++ if (block > INT_MAX) {
++ error = -ERANGE;
++ pr_warn_ratelimited("[%s/%d] FS: %s File: %pD4 would truncate fibmap result\n",
++ current->comm, task_pid_nr(current),
++ sb->s_id, filp);
++ }
++
+ if (error)
+ ur_block = 0;
+ else
+diff --git a/fs/iomap/fiemap.c b/fs/iomap/fiemap.c
+index bccf305ea9ce2..d55e8f491a5e5 100644
+--- a/fs/iomap/fiemap.c
++++ b/fs/iomap/fiemap.c
+@@ -117,10 +117,7 @@ iomap_bmap_actor(struct inode *inode, loff_t pos, loff_t length,
+
+ if (iomap->type == IOMAP_MAPPED) {
+ addr = (pos - iomap->offset + iomap->addr) >> inode->i_blkbits;
+- if (addr > INT_MAX)
+- WARN(1, "would truncate bmap result\n");
+- else
+- *bno = addr;
++ *bno = addr;
+ }
+ return 0;
+ }
+--
+2.20.1
+
--- /dev/null
+From ab40967eb71378b6c6c25845eee695d057f5c3e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2020 12:32:14 +0200
+Subject: fork: prevent accidental access to clone3 features
+
+From: Christian Brauner <christian.brauner@ubuntu.com>
+
+[ Upstream commit 3f2c788a13143620c5471ac96ac4f033fc9ac3f3 ]
+
+Jan reported an issue where an interaction between sign-extending clone's
+flag argument on ppc64le and the new CLONE_INTO_CGROUP feature causes
+clone() to consistently fail with EBADF.
+
+The whole story is a little longer. The legacy clone() syscall is odd in a
+bunch of ways and here two things interact. First, legacy clone's flag
+argument is word-size dependent, i.e. it's an unsigned long whereas most
+system calls with flag arguments use int or unsigned int. Second, legacy
+clone() ignores unknown and deprecated flags. The two of them taken
+together means that users on 64bit systems can pass garbage for the upper
+32bit of the clone() syscall since forever and things would just work fine.
+Just try this on a 64bit kernel prior to v5.7-rc1 where this will succeed
+and on v5.7-rc1 where this will fail with EBADF:
+
+int main(int argc, char *argv[])
+{
+ pid_t pid;
+
+ /* Note that legacy clone() has different argument ordering on
+ * different architectures so this won't work everywhere.
+ *
+ * Only set the upper 32 bits.
+ */
+ pid = syscall(__NR_clone, 0xffffffff00000000 | SIGCHLD,
+ NULL, NULL, NULL, NULL);
+ if (pid < 0)
+ exit(EXIT_FAILURE);
+ if (pid == 0)
+ exit(EXIT_SUCCESS);
+ if (wait(NULL) != pid)
+ exit(EXIT_FAILURE);
+
+ exit(EXIT_SUCCESS);
+}
+
+Since legacy clone() couldn't be extended this was not a problem so far and
+nobody really noticed or cared since nothing in the kernel ever bothered to
+look at the upper 32 bits.
+
+But once we introduced clone3() and expanded the flag argument in struct
+clone_args to 64 bit we opened this can of worms. With the first flag-based
+extension to clone3() making use of the upper 32 bits of the flag argument
+we've effectively made it possible for the legacy clone() syscall to reach
+clone3() only flags. The sign extension scenario is just the odd
+corner-case that we needed to figure this out.
+
+The reason we just realized this now and not already when we introduced
+CLONE_CLEAR_SIGHAND was that CLONE_INTO_CGROUP assumes that a valid cgroup
+file descriptor has been given. So the sign extension (or the user
+accidently passing garbage for the upper 32 bits) caused the
+CLONE_INTO_CGROUP bit to be raised and the kernel to error out when it
+didn't find a valid cgroup file descriptor.
+
+Let's fix this by always capping the upper 32 bits for all codepaths that
+are not aware of clone3() features. This ensures that we can't reach
+clone3() only features by accident via legacy clone as with the sign
+extension case and also that legacy clone() works exactly like before, i.e.
+ignoring any unknown flags. This solution risks no regressions and is also
+pretty clean.
+
+Fixes: 7f192e3cd316 ("fork: add clone3")
+Fixes: ef2c41cf38a7 ("clone3: allow spawning processes into cgroups")
+Reported-by: Jan Stancek <jstancek@redhat.com>
+Signed-off-by: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Cc: Dmitry V. Levin <ldv@altlinux.org>
+Cc: Andreas Schwab <schwab@linux-m68k.org>
+Cc: Florian Weimer <fw@deneb.enyo.de>
+Cc: libc-alpha@sourceware.org
+Cc: stable@vger.kernel.org # 5.3+
+Link: https://sourceware.org/pipermail/libc-alpha/2020-May/113596.html
+Link: https://lore.kernel.org/r/20200507103214.77218-1-christian.brauner@ubuntu.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/fork.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/kernel/fork.c b/kernel/fork.c
+index d90af13431c7e..c9ba2b7bfef9d 100644
+--- a/kernel/fork.c
++++ b/kernel/fork.c
+@@ -2486,11 +2486,11 @@ long do_fork(unsigned long clone_flags,
+ int __user *child_tidptr)
+ {
+ struct kernel_clone_args args = {
+- .flags = (clone_flags & ~CSIGNAL),
++ .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
+ .pidfd = parent_tidptr,
+ .child_tid = child_tidptr,
+ .parent_tid = parent_tidptr,
+- .exit_signal = (clone_flags & CSIGNAL),
++ .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
+ .stack = stack_start,
+ .stack_size = stack_size,
+ };
+@@ -2508,8 +2508,9 @@ long do_fork(unsigned long clone_flags,
+ pid_t kernel_thread(int (*fn)(void *), void *arg, unsigned long flags)
+ {
+ struct kernel_clone_args args = {
+- .flags = ((flags | CLONE_VM | CLONE_UNTRACED) & ~CSIGNAL),
+- .exit_signal = (flags & CSIGNAL),
++ .flags = ((lower_32_bits(flags) | CLONE_VM |
++ CLONE_UNTRACED) & ~CSIGNAL),
++ .exit_signal = (lower_32_bits(flags) & CSIGNAL),
+ .stack = (unsigned long)fn,
+ .stack_size = (unsigned long)arg,
+ };
+@@ -2570,11 +2571,11 @@ SYSCALL_DEFINE5(clone, unsigned long, clone_flags, unsigned long, newsp,
+ #endif
+ {
+ struct kernel_clone_args args = {
+- .flags = (clone_flags & ~CSIGNAL),
++ .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
+ .pidfd = parent_tidptr,
+ .child_tid = child_tidptr,
+ .parent_tid = parent_tidptr,
+- .exit_signal = (clone_flags & CSIGNAL),
++ .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
+ .stack = newsp,
+ .tls = tls,
+ };
+--
+2.20.1
+
--- /dev/null
+From c0bb334fa20ad338a0560d6d4daa3d96fd43685c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 Apr 2020 19:42:04 +0200
+Subject: gfs2: Another gfs2_walk_metadata fix
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+[ Upstream commit 566a2ab3c9005f62e784bd39022d58d34ef4365c ]
+
+Make sure we don't walk past the end of the metadata in gfs2_walk_metadata: the
+inode holds fewer pointers than indirect blocks.
+
+Slightly clean up gfs2_iomap_get.
+
+Fixes: a27a0c9b6a20 ("gfs2: gfs2_walk_metadata fix")
+Cc: stable@vger.kernel.org # v5.3+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/bmap.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
+index 08f6fbb3655e2..31ed264356253 100644
+--- a/fs/gfs2/bmap.c
++++ b/fs/gfs2/bmap.c
+@@ -528,10 +528,12 @@ static int gfs2_walk_metadata(struct inode *inode, struct metapath *mp,
+
+ /* Advance in metadata tree. */
+ (mp->mp_list[hgt])++;
+- if (mp->mp_list[hgt] >= sdp->sd_inptrs) {
+- if (!hgt)
++ if (hgt) {
++ if (mp->mp_list[hgt] >= sdp->sd_inptrs)
++ goto lower_metapath;
++ } else {
++ if (mp->mp_list[hgt] >= sdp->sd_diptrs)
+ break;
+- goto lower_metapath;
+ }
+
+ fill_up_metapath:
+@@ -876,10 +878,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
+ ret = -ENOENT;
+ goto unlock;
+ } else {
+- /* report a hole */
+ iomap->offset = pos;
+ iomap->length = length;
+- goto do_alloc;
++ goto hole_found;
+ }
+ }
+ iomap->length = size;
+@@ -933,8 +934,6 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
+ return ret;
+
+ do_alloc:
+- iomap->addr = IOMAP_NULL_ADDR;
+- iomap->type = IOMAP_HOLE;
+ if (flags & IOMAP_REPORT) {
+ if (pos >= size)
+ ret = -ENOENT;
+@@ -956,6 +955,9 @@ static int gfs2_iomap_get(struct inode *inode, loff_t pos, loff_t length,
+ if (pos < size && height == ip->i_height)
+ ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
+ }
++hole_found:
++ iomap->addr = IOMAP_NULL_ADDR;
++ iomap->type = IOMAP_HOLE;
+ goto out;
+ }
+
+--
+2.20.1
+
--- /dev/null
+From 9f298ddb12253414d6b8c5234cbcd067747efdb5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Apr 2020 01:15:41 +0200
+Subject: gfs2: More gfs2_find_jhead fixes
+
+From: Andreas Gruenbacher <agruenba@redhat.com>
+
+[ Upstream commit aa83da7f47b26c9587bade6c4bc4736ffa308f0a ]
+
+It turns out that when extending an existing bio, gfs2_find_jhead fails to
+check if the block number is consecutive, which leads to incorrect reads for
+fragmented journals.
+
+In addition, limit the maximum bio size to an arbitrary value of 2 megabytes:
+since commit 07173c3ec276 ("block: enable multipage bvecs"), if we just keep
+adding pages until bio_add_page fails, bios will grow much larger than useful,
+which pins more memory than necessary with barely any additional performance
+gains.
+
+Fixes: f4686c26ecc3 ("gfs2: read journal in large chunks")
+Cc: stable@vger.kernel.org # v5.2+
+Signed-off-by: Andreas Gruenbacher <agruenba@redhat.com>
+Signed-off-by: Bob Peterson <rpeterso@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/gfs2/lops.c | 19 ++++++++++++-------
+ 1 file changed, 12 insertions(+), 7 deletions(-)
+
+diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
+index c090d5ad3f221..3a020bdc358cd 100644
+--- a/fs/gfs2/lops.c
++++ b/fs/gfs2/lops.c
+@@ -259,7 +259,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
+ struct super_block *sb = sdp->sd_vfs;
+ struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
+
+- bio->bi_iter.bi_sector = blkno << (sb->s_blocksize_bits - 9);
++ bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
+ bio_set_dev(bio, sb->s_bdev);
+ bio->bi_end_io = end_io;
+ bio->bi_private = sdp;
+@@ -505,7 +505,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
+ unsigned int bsize = sdp->sd_sb.sb_bsize, off;
+ unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
+ unsigned int shift = PAGE_SHIFT - bsize_shift;
+- unsigned int readahead_blocks = BIO_MAX_PAGES << shift;
++ unsigned int max_bio_size = 2 * 1024 * 1024;
+ struct gfs2_journal_extent *je;
+ int sz, ret = 0;
+ struct bio *bio = NULL;
+@@ -533,12 +533,17 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
+ off = 0;
+ }
+
+- if (!bio || (bio_chained && !off)) {
++ if (!bio || (bio_chained && !off) ||
++ bio->bi_iter.bi_size >= max_bio_size) {
+ /* start new bio */
+ } else {
+- sz = bio_add_page(bio, page, bsize, off);
+- if (sz == bsize)
+- goto block_added;
++ sector_t sector = dblock << sdp->sd_fsb2bb_shift;
++
++ if (bio_end_sector(bio) == sector) {
++ sz = bio_add_page(bio, page, bsize, off);
++ if (sz == bsize)
++ goto block_added;
++ }
+ if (off) {
+ unsigned int blocks =
+ (PAGE_SIZE - off) >> bsize_shift;
+@@ -564,7 +569,7 @@ int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
+ off += bsize;
+ if (off == PAGE_SIZE)
+ page = NULL;
+- if (blocks_submitted < blocks_read + readahead_blocks) {
++ if (blocks_submitted < 2 * max_bio_size >> bsize_shift) {
+ /* Keep at least one bio in flight */
+ continue;
+ }
+--
+2.20.1
+
--- /dev/null
+From d21179a7684e126309caa463e0e15a5f12ba7781 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 May 2020 13:02:19 +0200
+Subject: hwmon: (da9052) Synchronize access with mfd
+
+From: Samu Nuutamo <samu.nuutamo@vincit.fi>
+
+[ Upstream commit 333e22db228f0bd0c839553015a6a8d3db4ba569 ]
+
+When tsi-as-adc is configured it is possible for in7[0123]_input read to
+return an incorrect value if a concurrent read to in[456]_input is
+performed. This is caused by a concurrent manipulation of the mux
+channel without proper locking as hwmon and mfd use different locks for
+synchronization.
+
+Switch hwmon to use the same lock as mfd when accessing the TSI channel.
+
+Fixes: 4f16cab19a3d5 ("hwmon: da9052: Add support for TSI channel")
+Signed-off-by: Samu Nuutamo <samu.nuutamo@vincit.fi>
+[rebase to current master, reword commit message slightly]
+Signed-off-by: Sebastian Reichel <sebastian.reichel@collabora.com>
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/da9052-hwmon.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
+index 53b517dbe7e6e..4af2fc309c286 100644
+--- a/drivers/hwmon/da9052-hwmon.c
++++ b/drivers/hwmon/da9052-hwmon.c
+@@ -244,9 +244,9 @@ static ssize_t da9052_tsi_show(struct device *dev,
+ int channel = to_sensor_dev_attr(devattr)->index;
+ int ret;
+
+- mutex_lock(&hwmon->hwmon_lock);
++ mutex_lock(&hwmon->da9052->auxadc_lock);
+ ret = __da9052_read_tsi(dev, channel);
+- mutex_unlock(&hwmon->hwmon_lock);
++ mutex_unlock(&hwmon->da9052->auxadc_lock);
+
+ if (ret < 0)
+ return ret;
+--
+2.20.1
+
--- /dev/null
+From cf9f297fd6e9101aaede28efa91e6203f24098fb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 May 2020 13:13:33 -0700
+Subject: hwmon: (drivetemp) Fix SCT support if SCT data tables are not
+ supported
+
+From: Guenter Roeck <linux@roeck-us.net>
+
+[ Upstream commit bcb543cc3d4034da3f3fd8bc4296a26dfeadf47d ]
+
+If SCT is supported but SCT data tables are not, the driver unnecessarily
+tries to fall back to SMART. Use SCT without data tables instead in this
+situation.
+
+Fixes: 5b46903d8bf3 ("hwmon: Driver for disk and solid state drives with temperature sensors")
+Signed-off-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hwmon/drivetemp.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hwmon/drivetemp.c b/drivers/hwmon/drivetemp.c
+index 9179460c2d9d5..0d4f3d97ffc61 100644
+--- a/drivers/hwmon/drivetemp.c
++++ b/drivers/hwmon/drivetemp.c
+@@ -346,7 +346,7 @@ static int drivetemp_identify_sata(struct drivetemp_data *st)
+ st->have_temp_highest = temp_is_valid(buf[SCT_STATUS_TEMP_HIGHEST]);
+
+ if (!have_sct_data_table)
+- goto skip_sct;
++ goto skip_sct_data;
+
+ /* Request and read temperature history table */
+ memset(buf, '\0', sizeof(st->smartdata));
+--
+2.20.1
+
--- /dev/null
+From cc973071cce6d361d542a0b6e74bd8db90685fe0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 22 Apr 2020 12:22:11 +0300
+Subject: i40iw: Fix error handling in i40iw_manage_arp_cache()
+
+From: Dan Carpenter <dan.carpenter@oracle.com>
+
+[ Upstream commit 37e31d2d26a4124506c24e95434e9baf3405a23a ]
+
+The i40iw_arp_table() function can return -EOVERFLOW if
+i40iw_alloc_resource() fails so we can't just test for "== -1".
+
+Fixes: 4e9042e647ff ("i40iw: add hw and utils files")
+Link: https://lore.kernel.org/r/20200422092211.GA195357@mwanda
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Acked-by: Shiraz Saleem <shiraz.saleem@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/i40iw/i40iw_hw.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/infiniband/hw/i40iw/i40iw_hw.c b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+index 55a1fbf0e670c..ae8b97c306657 100644
+--- a/drivers/infiniband/hw/i40iw/i40iw_hw.c
++++ b/drivers/infiniband/hw/i40iw/i40iw_hw.c
+@@ -534,7 +534,7 @@ void i40iw_manage_arp_cache(struct i40iw_device *iwdev,
+ int arp_index;
+
+ arp_index = i40iw_arp_table(iwdev, ip_addr, ipv4, mac_addr, action);
+- if (arp_index == -1)
++ if (arp_index < 0)
+ return;
+ cqp_request = i40iw_get_cqp_request(&iwdev->cqp, false);
+ if (!cqp_request)
+--
+2.20.1
+
--- /dev/null
+From 2fc825a447a2e578b25eefe36f50d39879fdb9ed Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2020 10:10:12 +0300
+Subject: IB/core: Fix potential NULL pointer dereference in pkey cache
+
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+
+[ Upstream commit 1901b91f99821955eac2bd48fe25ee983385dc00 ]
+
+The IB core pkey cache is populated by procedure ib_cache_update().
+Initially, the pkey cache pointer is NULL. ib_cache_update allocates a
+buffer and populates it with the device's pkeys, via repeated calls to
+procedure ib_query_pkey().
+
+If there is a failure in populating the pkey buffer via ib_query_pkey(),
+ib_cache_update does not replace the old pkey buffer cache with the
+updated one -- it leaves the old cache as is.
+
+Since initially the pkey buffer cache is NULL, when calling
+ib_cache_update the first time, a failure in ib_query_pkey() will cause
+the pkey buffer cache pointer to remain NULL.
+
+In this situation, any calls subsequent to ib_get_cached_pkey(),
+ib_find_cached_pkey(), or ib_find_cached_pkey_exact() will try to
+dereference the NULL pkey cache pointer, causing a kernel panic.
+
+Fix this by checking the ib_cache_update() return value.
+
+Fixes: 8faea9fd4a39 ("RDMA/cache: Move the cache per-port data into the main ib_port_data")
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Link: https://lore.kernel.org/r/20200507071012.100594-1-leon@kernel.org
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/cache.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/core/cache.c b/drivers/infiniband/core/cache.c
+index 17bfedd24cc34..4619629b958cd 100644
+--- a/drivers/infiniband/core/cache.c
++++ b/drivers/infiniband/core/cache.c
+@@ -1536,8 +1536,11 @@ int ib_cache_setup_one(struct ib_device *device)
+ if (err)
+ return err;
+
+- rdma_for_each_port (device, p)
+- ib_cache_update(device, p, true);
++ rdma_for_each_port (device, p) {
++ err = ib_cache_update(device, p, true);
++ if (err)
++ return err;
++ }
+
+ return 0;
+ }
+--
+2.20.1
+
--- /dev/null
+From 82fa0a214489e3c8817db5d118d187eb06fd6a9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 09:09:17 -0400
+Subject: IB/hfi1: Fix another case where pq is left on waitlist
+
+From: Mike Marciniszyn <mike.marciniszyn@intel.com>
+
+[ Upstream commit fa8dac3968635dec8518a13ac78d662f2aa88e4d ]
+
+The commit noted below fixed a case where a pq is left on the sdma wait
+list.
+
+It however missed another case.
+
+user_sdma_send_pkts() has two calls from hfi1_user_sdma_process_request().
+
+If the first one fails as indicated by -EBUSY, the pq will be placed on
+the waitlist as by design.
+
+If the second call then succeeds, the pq is still on the waitlist setting
+up a race with the interrupt handler if a subsequent request uses a
+different SDMA engine
+
+Fix by deleting the first call.
+
+The use of pcount and the intent to send a short burst of packets followed
+by the larger balance of packets was never correctly implemented, because
+the two calls always send pcount packets no matter what. A subsequent
+patch will correct that issue.
+
+Fixes: 9a293d1e21a6 ("IB/hfi1: Ensure pq is not left on waitlist")
+Link: https://lore.kernel.org/r/20200504130917.175613.43231.stgit@awfm-01.aw.intel.com
+Cc: <stable@vger.kernel.org>
+Reviewed-by: Kaike Wan <kaike.wan@intel.com>
+Signed-off-by: Mike Marciniszyn <mike.marciniszyn@intel.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/hfi1/user_sdma.c | 4 ----
+ 1 file changed, 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
+index 13e4203497b33..a92346e88628b 100644
+--- a/drivers/infiniband/hw/hfi1/user_sdma.c
++++ b/drivers/infiniband/hw/hfi1/user_sdma.c
+@@ -589,10 +589,6 @@ int hfi1_user_sdma_process_request(struct hfi1_filedata *fd,
+
+ set_comp_state(pq, cq, info.comp_idx, QUEUED, 0);
+ pq->state = SDMA_PKT_Q_ACTIVE;
+- /* Send the first N packets in the request to buy us some time */
+- ret = user_sdma_send_pkts(req, pcount);
+- if (unlikely(ret < 0 && ret != -EBUSY))
+- goto free_req;
+
+ /*
+ * This is a somewhat blocking send implementation.
+--
+2.20.1
+
--- /dev/null
+From c3c6582f0be58899ad2fe8b52dab046ddc8a0fbb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Apr 2020 10:59:21 +0300
+Subject: IB/mlx4: Test return value of calls to ib_get_cached_pkey
+
+From: Jack Morgenstein <jackm@dev.mellanox.co.il>
+
+[ Upstream commit 6693ca95bd4330a0ad7326967e1f9bcedd6b0800 ]
+
+In the mlx4_ib_post_send() flow, some functions call ib_get_cached_pkey()
+without checking its return value. If ib_get_cached_pkey() returns an
+error code, these functions should return failure.
+
+Fixes: 1ffeb2eb8be9 ("IB/mlx4: SR-IOV IB context objects and proxy/tunnel SQP support")
+Fixes: 225c7b1feef1 ("IB/mlx4: Add a driver Mellanox ConnectX InfiniBand adapters")
+Fixes: e622f2f4ad21 ("IB: split struct ib_send_wr")
+Link: https://lore.kernel.org/r/20200426075921.130074-1-leon@kernel.org
+Signed-off-by: Jack Morgenstein <jackm@dev.mellanox.co.il>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/mlx4/qp.c | 14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
+index 26425dd2d960f..a2b1f6af5ba3f 100644
+--- a/drivers/infiniband/hw/mlx4/qp.c
++++ b/drivers/infiniband/hw/mlx4/qp.c
+@@ -2891,6 +2891,7 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
+ int send_size;
+ int header_size;
+ int spc;
++ int err;
+ int i;
+
+ if (wr->wr.opcode != IB_WR_SEND)
+@@ -2925,7 +2926,9 @@ static int build_sriov_qp0_header(struct mlx4_ib_sqp *sqp,
+
+ sqp->ud_header.lrh.virtual_lane = 0;
+ sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
+- ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
++ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, 0, &pkey);
++ if (err)
++ return err;
+ sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
+ if (sqp->qp.mlx4_ib_qp_type == MLX4_IB_QPT_TUN_SMI_OWNER)
+ sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
+@@ -3212,9 +3215,14 @@ static int build_mlx_header(struct mlx4_ib_sqp *sqp, const struct ib_ud_wr *wr,
+ }
+ sqp->ud_header.bth.solicited_event = !!(wr->wr.send_flags & IB_SEND_SOLICITED);
+ if (!sqp->qp.ibqp.qp_num)
+- ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index, &pkey);
++ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, sqp->pkey_index,
++ &pkey);
+ else
+- ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index, &pkey);
++ err = ib_get_cached_pkey(ib_dev, sqp->qp.port, wr->pkey_index,
++ &pkey);
++ if (err)
++ return err;
++
+ sqp->ud_header.bth.pkey = cpu_to_be16(pkey);
+ sqp->ud_header.bth.destination_qpn = cpu_to_be32(wr->remote_qpn);
+ sqp->ud_header.bth.psn = cpu_to_be32((sqp->send_psn++) & ((1 << 24) - 1));
+--
+2.20.1
+
--- /dev/null
+From 6d6f67a74f7443635b1904b3955cc61ab3f1ea95 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2020 17:09:37 +0300
+Subject: io_uring: check non-sync defer_list carefully
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 4ee3631451c9a62e6b6bc7ee51fb9a5b34e33509 ]
+
+io_req_defer() do double-checked locking. Use proper helpers for that,
+i.e. list_empty_careful().
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 01f71b9efb88f..832e042531bc4 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -4258,7 +4258,7 @@ static int io_req_defer(struct io_kiocb *req, const struct io_uring_sqe *sqe)
+ int ret;
+
+ /* Still need defer if there is pending req in defer list. */
+- if (!req_need_defer(req) && list_empty(&ctx->defer_list))
++ if (!req_need_defer(req) && list_empty_careful(&ctx->defer_list))
+ return 0;
+
+ if (!req->io && io_alloc_async_ctx(req))
+--
+2.20.1
+
--- /dev/null
+From 0e217c289cf4c7599bcc7aa323b97c86e03118d8 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 1 May 2020 08:52:56 +0800
+Subject: io_uring: use cond_resched() in io_ring_ctx_wait_and_kill()
+
+From: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
+
+[ Upstream commit 3fd44c86711f71156b586c22b0495c58f69358bb ]
+
+While working on to make io_uring sqpoll mode support syscalls that need
+struct files_struct, I got cpu soft lockup in io_ring_ctx_wait_and_kill(),
+
+ while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
+ cpu_relax();
+
+above loop never has an chance to exit, it's because preempt isn't enabled
+in the kernel, and the context calling io_ring_ctx_wait_and_kill() and
+io_sq_thread() run in the same cpu, if io_sq_thread calls a cond_resched()
+yield cpu and another context enters above loop, then io_sq_thread() will
+always in runqueue and never exit.
+
+Use cond_resched() can fix this issue.
+
+ Reported-by: syzbot+66243bb7126c410cefe6@syzkaller.appspotmail.com
+Signed-off-by: Xiaoguang Wang <xiaoguang.wang@linux.alibaba.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 9690c845a3e4b..01f71b9efb88f 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -6451,7 +6451,7 @@ static void io_ring_ctx_wait_and_kill(struct io_ring_ctx *ctx)
+ * it could cause shutdown to hang.
+ */
+ while (ctx->sqo_thread && !wq_has_sleeper(&ctx->sqo_wait))
+- cpu_relax();
++ cond_resched();
+
+ io_kill_timeouts(ctx);
+ io_poll_remove_all(ctx);
+--
+2.20.1
+
--- /dev/null
+From 669deb9ce1a92804c1e54cf038a2e420ed335b6c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 May 2020 17:50:48 -0700
+Subject: ipc/util.c: sysvipc_find_ipc() incorrectly updates position index
+
+From: Vasily Averin <vvs@virtuozzo.com>
+
+[ Upstream commit 5e698222c70257d13ae0816720dde57c56f81e15 ]
+
+Commit 89163f93c6f9 ("ipc/util.c: sysvipc_find_ipc() should increase
+position index") is causing this bug (seen on 5.6.8):
+
+ # ipcs -q
+
+ ------ Message Queues --------
+ key msqid owner perms used-bytes messages
+
+ # ipcmk -Q
+ Message queue id: 0
+ # ipcs -q
+
+ ------ Message Queues --------
+ key msqid owner perms used-bytes messages
+ 0x82db8127 0 root 644 0 0
+
+ # ipcmk -Q
+ Message queue id: 1
+ # ipcs -q
+
+ ------ Message Queues --------
+ key msqid owner perms used-bytes messages
+ 0x82db8127 0 root 644 0 0
+ 0x76d1fb2a 1 root 644 0 0
+
+ # ipcrm -q 0
+ # ipcs -q
+
+ ------ Message Queues --------
+ key msqid owner perms used-bytes messages
+ 0x76d1fb2a 1 root 644 0 0
+ 0x76d1fb2a 1 root 644 0 0
+
+ # ipcmk -Q
+ Message queue id: 2
+ # ipcrm -q 2
+ # ipcs -q
+
+ ------ Message Queues --------
+ key msqid owner perms used-bytes messages
+ 0x76d1fb2a 1 root 644 0 0
+ 0x76d1fb2a 1 root 644 0 0
+
+ # ipcmk -Q
+ Message queue id: 3
+ # ipcrm -q 1
+ # ipcs -q
+
+ ------ Message Queues --------
+ key msqid owner perms used-bytes messages
+ 0x7c982867 3 root 644 0 0
+ 0x7c982867 3 root 644 0 0
+ 0x7c982867 3 root 644 0 0
+ 0x7c982867 3 root 644 0 0
+
+Whenever an IPC item with a low id is deleted, the items with higher ids
+are duplicated, as if filling a hole.
+
+new_pos should jump through hole of unused ids, pos can be updated
+inside "for" cycle.
+
+Fixes: 89163f93c6f9 ("ipc/util.c: sysvipc_find_ipc() should increase position index")
+Reported-by: Andreas Schwab <schwab@suse.de>
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Vasily Averin <vvs@virtuozzo.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Acked-by: Waiman Long <longman@redhat.com>
+Cc: NeilBrown <neilb@suse.com>
+Cc: Steven Rostedt <rostedt@goodmis.org>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Peter Oberparleiter <oberpar@linux.ibm.com>
+Cc: Davidlohr Bueso <dave@stgolabs.net>
+Cc: Manfred Spraul <manfred@colorfullife.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/4921fe9b-9385-a2b4-1dc4-1099be6d2e39@virtuozzo.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ ipc/util.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/ipc/util.c b/ipc/util.c
+index 2d70f25f64b83..c4a67982ec008 100644
+--- a/ipc/util.c
++++ b/ipc/util.c
+@@ -764,21 +764,21 @@ static struct kern_ipc_perm *sysvipc_find_ipc(struct ipc_ids *ids, loff_t pos,
+ total++;
+ }
+
+- *new_pos = pos + 1;
++ ipc = NULL;
+ if (total >= ids->in_use)
+- return NULL;
++ goto out;
+
+ for (; pos < ipc_mni; pos++) {
+ ipc = idr_find(&ids->ipcs_idr, pos);
+ if (ipc != NULL) {
+ rcu_read_lock();
+ ipc_lock_object(ipc);
+- return ipc;
++ break;
+ }
+ }
+-
+- /* Out of range - return NULL to terminate iteration */
+- return NULL;
++out:
++ *new_pos = pos + 1;
++ return ipc;
+ }
+
+ static void *sysvipc_proc_next(struct seq_file *s, void *it, loff_t *pos)
+--
+2.20.1
+
--- /dev/null
+From 5318c41ffaf7d2d8c2bd2cecfa27ebebfa5fb0b7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 May 2020 17:50:34 -0700
+Subject: mm, memcg: fix inconsistent oom event behavior
+
+From: Yafang Shao <laoar.shao@gmail.com>
+
+[ Upstream commit 04fd61a4e01028210a91f0efc408c8bc61a3018c ]
+
+A recent commit 9852ae3fe529 ("mm, memcg: consider subtrees in
+memory.events") changed the behavior of memcg events, which will now
+consider subtrees in memory.events.
+
+But oom_kill event is a special one as it is used in both cgroup1 and
+cgroup2. In cgroup1, it is displayed in memory.oom_control. The file
+memory.oom_control is in both root memcg and non root memcg, that is
+different with memory.event as it only in non-root memcg. That commit
+is okay for cgroup2, but it is not okay for cgroup1 as it will cause
+inconsistent behavior between root memcg and non-root memcg.
+
+Here's an example on why this behavior is inconsistent in cgroup1.
+
+ root memcg
+ /
+ memcg foo
+ /
+ memcg bar
+
+Suppose there's an oom_kill in memcg bar, then the oon_kill will be
+
+ root memcg : memory.oom_control(oom_kill) 0
+ /
+ memcg foo : memory.oom_control(oom_kill) 1
+ /
+ memcg bar : memory.oom_control(oom_kill) 1
+
+For the non-root memcg, its memory.oom_control(oom_kill) includes its
+descendants' oom_kill, but for root memcg, it doesn't include its
+descendants' oom_kill. That means, memory.oom_control(oom_kill) has
+different meanings in different memcgs. That is inconsistent. Then the
+user has to know whether the memcg is root or not.
+
+If we can't fully support it in cgroup1, for example by adding
+memory.events.local into cgroup1 as well, then let's don't touch its
+original behavior.
+
+Fixes: 9852ae3fe529 ("mm, memcg: consider subtrees in memory.events")
+Reported-by: Randy Dunlap <rdunlap@infradead.org>
+Signed-off-by: Yafang Shao <laoar.shao@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Shakeel Butt <shakeelb@google.com>
+Acked-by: Johannes Weiner <hannes@cmpxchg.org>
+Acked-by: Chris Down <chris@chrisdown.name>
+Acked-by: Michal Hocko <mhocko@suse.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200502141055.7378-1-laoar.shao@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/memcontrol.h | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
+index e9ba01336d4e7..bc5a3621a9d7d 100644
+--- a/include/linux/memcontrol.h
++++ b/include/linux/memcontrol.h
+@@ -783,6 +783,8 @@ static inline void memcg_memory_event(struct mem_cgroup *memcg,
+ atomic_long_inc(&memcg->memory_events[event]);
+ cgroup_file_notify(&memcg->events_file);
+
++ if (!cgroup_subsys_on_dfl(memory_cgrp_subsys))
++ break;
+ if (cgrp_dfl_root.flags & CGRP_ROOT_MEMORY_LOCAL_EVENTS)
+ break;
+ } while ((memcg = parent_mem_cgroup(memcg)) &&
+--
+2.20.1
+
--- /dev/null
+From c1b25b9ef43a427ef86a66cb460b8a881a202e76 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Apr 2020 22:23:55 +0200
+Subject: mmc: alcor: Fix a resource leak in the error path for ->probe()
+
+From: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+
+[ Upstream commit 7c277dd2b0ff6a16f1732a66c2c52a29f067163e ]
+
+If devm_request_threaded_irq() fails, the allocated struct mmc_host needs
+to be freed via calling mmc_free_host(), so let's do that.
+
+Fixes: c5413ad815a6 ("mmc: add new Alcor Micro Cardreader SD/MMC driver")
+Signed-off-by: Christophe JAILLET <christophe.jaillet@wanadoo.fr>
+Link: https://lore.kernel.org/r/20200426202355.43055-1-christophe.jaillet@wanadoo.fr
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/alcor.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/alcor.c b/drivers/mmc/host/alcor.c
+index 1aee485d56d4c..026ca9194ce5b 100644
+--- a/drivers/mmc/host/alcor.c
++++ b/drivers/mmc/host/alcor.c
+@@ -1104,7 +1104,7 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
+
+ if (ret) {
+ dev_err(&pdev->dev, "Failed to get irq for data line\n");
+- return ret;
++ goto free_host;
+ }
+
+ mutex_init(&host->cmd_mutex);
+@@ -1116,6 +1116,10 @@ static int alcor_pci_sdmmc_drv_probe(struct platform_device *pdev)
+ dev_set_drvdata(&pdev->dev, host);
+ mmc_add_host(mmc);
+ return 0;
++
++free_host:
++ mmc_free_host(mmc);
++ return ret;
+ }
+
+ static int alcor_pci_sdmmc_drv_remove(struct platform_device *pdev)
+--
+2.20.1
+
--- /dev/null
+From 70a7eb9aed3c28b6361ad464ecb02f2f68006b65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 8 May 2020 09:22:27 +0300
+Subject: mmc: block: Fix request completion in the CQE timeout path
+
+From: Adrian Hunter <adrian.hunter@intel.com>
+
+[ Upstream commit c077dc5e0620508a29497dac63a2822324ece52a ]
+
+First, it should be noted that the CQE timeout (60 seconds) is substantial
+so a CQE request that times out is really stuck, and the race between
+timeout and completion is extremely unlikely. Nevertheless this patch
+fixes an issue with it.
+
+Commit ad73d6feadbd7b ("mmc: complete requests from ->timeout")
+preserved the existing functionality, to complete the request.
+However that had only been necessary because the block layer
+timeout handler had been marking the request to prevent it from being
+completed normally. That restriction was removed at the same time, the
+result being that a request that has gone will have been completed anyway.
+That is, the completion was unnecessary.
+
+At the time, the unnecessary completion was harmless because the block
+layer would ignore it, although that changed in kernel v5.0.
+
+Note for stable, this patch will not apply cleanly without patch "mmc:
+core: Fix recursive locking issue in CQE recovery path"
+
+Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
+Fixes: ad73d6feadbd7b ("mmc: complete requests from ->timeout")
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20200508062227.23144-1-adrian.hunter@intel.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/core/queue.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index 4d1e468d39823..9c0ccb3744c28 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -110,8 +110,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+ mmc_cqe_recovery_notifier(mrq);
+ return BLK_EH_RESET_TIMER;
+ }
+- /* No timeout (XXX: huh? comment doesn't make much sense) */
+- blk_mq_complete_request(req);
++ /* The request has gone already */
+ return BLK_EH_DONE;
+ default:
+ /* Timeout is handled by mmc core */
+--
+2.20.1
+
--- /dev/null
+From 97fb71831a9e8a8a97e65415a833f2a41e56167f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 6 May 2020 20:04:02 +0530
+Subject: mmc: core: Check request type before completing the request
+
+From: Veerabhadrarao Badiganti <vbadigan@codeaurora.org>
+
+[ Upstream commit e6bfb1bf00852b55f4c771f47ae67004c04d3c87 ]
+
+In the request completion path with CQE, request type is being checked
+after the request is getting completed. This is resulting in returning
+the wrong request type and leading to the IO hang issue.
+
+ASYNC request type is getting returned for DCMD type requests.
+Because of this mismatch, mq->cqe_busy flag is never getting cleared
+and the driver is not invoking blk_mq_hw_run_queue. So requests are not
+getting dispatched to the LLD from the block layer.
+
+All these eventually leading to IO hang issues.
+So, get the request type before completing the request.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 1e8e55b67030 ("mmc: block: Add CQE support")
+Signed-off-by: Veerabhadrarao Badiganti <vbadigan@codeaurora.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/1588775643-18037-2-git-send-email-vbadigan@codeaurora.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/core/block.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 663d87924e5e8..32db16f6debc7 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1417,6 +1417,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+ struct mmc_request *mrq = &mqrq->brq.mrq;
+ struct request_queue *q = req->q;
+ struct mmc_host *host = mq->card->host;
++ enum mmc_issue_type issue_type = mmc_issue_type(mq, req);
+ unsigned long flags;
+ bool put_card;
+ int err;
+@@ -1446,7 +1447,7 @@ static void mmc_blk_cqe_complete_rq(struct mmc_queue *mq, struct request *req)
+
+ spin_lock_irqsave(&mq->lock, flags);
+
+- mq->in_flight[mmc_issue_type(mq, req)] -= 1;
++ mq->in_flight[issue_type] -= 1;
+
+ put_card = (mmc_tot_in_flight(mq) == 0);
+
+--
+2.20.1
+
--- /dev/null
+From 0ae58252a28ffa4e4f8143f45e8a09215f5d2f56 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2020 21:45:33 +0530
+Subject: mmc: core: Fix recursive locking issue in CQE recovery path
+
+From: Sarthak Garg <sartgarg@codeaurora.org>
+
+[ Upstream commit 39a22f73744d5baee30b5f134ae2e30b668b66ed ]
+
+Consider the following stack trace
+
+-001|raw_spin_lock_irqsave
+-002|mmc_blk_cqe_complete_rq
+-003|__blk_mq_complete_request(inline)
+-003|blk_mq_complete_request(rq)
+-004|mmc_cqe_timed_out(inline)
+-004|mmc_mq_timed_out
+
+mmc_mq_timed_out acquires the queue_lock for the first
+time. The mmc_blk_cqe_complete_rq function also tries to acquire
+the same queue lock resulting in recursive locking where the task
+is spinning for the same lock which it has already acquired leading
+to watchdog bark.
+
+Fix this issue with the lock only for the required critical section.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 1e8e55b67030 ("mmc: block: Add CQE support")
+Suggested-by: Sahitya Tummala <stummala@codeaurora.org>
+Signed-off-by: Sarthak Garg <sartgarg@codeaurora.org>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/1588868135-31783-1-git-send-email-vbadigan@codeaurora.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/core/queue.c | 13 ++++---------
+ 1 file changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index 9edc08685e86d..4d1e468d39823 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -107,7 +107,7 @@ static enum blk_eh_timer_return mmc_cqe_timed_out(struct request *req)
+ case MMC_ISSUE_DCMD:
+ if (host->cqe_ops->cqe_timeout(host, mrq, &recovery_needed)) {
+ if (recovery_needed)
+- __mmc_cqe_recovery_notifier(mq);
++ mmc_cqe_recovery_notifier(mrq);
+ return BLK_EH_RESET_TIMER;
+ }
+ /* No timeout (XXX: huh? comment doesn't make much sense) */
+@@ -125,18 +125,13 @@ static enum blk_eh_timer_return mmc_mq_timed_out(struct request *req,
+ struct request_queue *q = req->q;
+ struct mmc_queue *mq = q->queuedata;
+ unsigned long flags;
+- int ret;
++ bool ignore_tout;
+
+ spin_lock_irqsave(&mq->lock, flags);
+-
+- if (mq->recovery_needed || !mq->use_cqe)
+- ret = BLK_EH_RESET_TIMER;
+- else
+- ret = mmc_cqe_timed_out(req);
+-
++ ignore_tout = mq->recovery_needed || !mq->use_cqe;
+ spin_unlock_irqrestore(&mq->lock, flags);
+
+- return ret;
++ return ignore_tout ? BLK_EH_RESET_TIMER : mmc_cqe_timed_out(req);
+ }
+
+ static void mmc_mq_recovery_handler(struct work_struct *work)
+--
+2.20.1
+
--- /dev/null
+From df8b552daa54dd10ba6d3238d531b912e686bb92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 May 2020 14:39:57 +0800
+Subject: mmc: sdhci-pci-gli: Fix can not access GL9750 after reboot from
+ Windows 10
+
+From: Ben Chuang <ben.chuang@genesyslogic.com.tw>
+
+[ Upstream commit b56ff195c317ad28c05d354aeecbb9995b8e08c1 ]
+
+Need to clear some bits in a vendor-defined register after reboot from
+Windows 10.
+
+Fixes: e51df6ce668a ("mmc: host: sdhci-pci: Add Genesys Logic GL975x support")
+Reported-by: Grzegorz Kowal <custos.mentis@gmail.com>
+Signed-off-by: Ben Chuang <ben.chuang@genesyslogic.com.tw>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Tested-by: Grzegorz Kowal <custos.mentis@gmail.com>
+Link: https://lore.kernel.org/r/20200504063957.6638-1-benchuanggli@gmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci-pci-gli.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index ff39d81a5742c..fd76aa672e020 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -26,6 +26,9 @@
+ #define SDHCI_GLI_9750_DRIVING_2 GENMASK(27, 26)
+ #define GLI_9750_DRIVING_1_VALUE 0xFFF
+ #define GLI_9750_DRIVING_2_VALUE 0x3
++#define SDHCI_GLI_9750_SEL_1 BIT(29)
++#define SDHCI_GLI_9750_SEL_2 BIT(31)
++#define SDHCI_GLI_9750_ALL_RST (BIT(24)|BIT(25)|BIT(28)|BIT(30))
+
+ #define SDHCI_GLI_9750_PLL 0x864
+ #define SDHCI_GLI_9750_PLL_TX2_INV BIT(23)
+@@ -122,6 +125,8 @@ static void gli_set_9750(struct sdhci_host *host)
+ GLI_9750_DRIVING_1_VALUE);
+ driving_value |= FIELD_PREP(SDHCI_GLI_9750_DRIVING_2,
+ GLI_9750_DRIVING_2_VALUE);
++ driving_value &= ~(SDHCI_GLI_9750_SEL_1|SDHCI_GLI_9750_SEL_2|SDHCI_GLI_9750_ALL_RST);
++ driving_value |= SDHCI_GLI_9750_SEL_2;
+ sdhci_writel(host, driving_value, SDHCI_GLI_9750_DRIVING);
+
+ sw_ctrl_value &= ~SDHCI_GLI_9750_SW_CTRL_4;
+--
+2.20.1
+
--- /dev/null
+From 14d49c154c362893bdfa5be48b08f996e6962c9e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 27 Apr 2020 18:30:48 +0800
+Subject: mmc: sdhci-pci-gli: Fix no irq handler from suspend
+
+From: Ben Chuang <ben.chuang@genesyslogic.com.tw>
+
+[ Upstream commit 282ede76e47048eebc8ce5324b412890f0ec0a69 ]
+
+The kernel prints a message similar to
+"[ 28.881959] do_IRQ: 5.36 No irq handler for vector"
+when GL975x resumes from suspend. Implement a resume callback to fix this.
+
+Fixes: 31e43f31890c ("mmc: sdhci-pci-gli: Enable MSI interrupt for GL975x")
+Co-developed-by: Renius Chen <renius.chen@genesyslogic.com.tw>
+Signed-off-by: Renius Chen <renius.chen@genesyslogic.com.tw>
+Tested-by: Dave Flogeras <dflogeras2@gmail.com>
+Signed-off-by: Ben Chuang <ben.chuang@genesyslogic.com.tw>
+Tested-by: Vineeth Pillai <vineethrp@gmail.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Link: https://lore.kernel.org/r/20200427103048.20785-1-benchuanggli@gmail.com
+Cc: stable@vger.kernel.org
+Signed-off-by: Samuel Zou <zou_wei@huawei.com>
+[Samuel Zou: Make sdhci_pci_gli_resume() static]
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci-pci-gli.c | 18 ++++++++++++++++++
+ 1 file changed, 18 insertions(+)
+
+diff --git a/drivers/mmc/host/sdhci-pci-gli.c b/drivers/mmc/host/sdhci-pci-gli.c
+index ce15a05f23d41..ff39d81a5742c 100644
+--- a/drivers/mmc/host/sdhci-pci-gli.c
++++ b/drivers/mmc/host/sdhci-pci-gli.c
+@@ -334,6 +334,18 @@ static u32 sdhci_gl9750_readl(struct sdhci_host *host, int reg)
+ return value;
+ }
+
++#ifdef CONFIG_PM_SLEEP
++static int sdhci_pci_gli_resume(struct sdhci_pci_chip *chip)
++{
++ struct sdhci_pci_slot *slot = chip->slots[0];
++
++ pci_free_irq_vectors(slot->chip->pdev);
++ gli_pcie_enable_msi(slot);
++
++ return sdhci_pci_resume_host(chip);
++}
++#endif
++
+ static const struct sdhci_ops sdhci_gl9755_ops = {
+ .set_clock = sdhci_set_clock,
+ .enable_dma = sdhci_pci_enable_dma,
+@@ -348,6 +360,9 @@ const struct sdhci_pci_fixes sdhci_gl9755 = {
+ .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
+ .probe_slot = gli_probe_slot_gl9755,
+ .ops = &sdhci_gl9755_ops,
++#ifdef CONFIG_PM_SLEEP
++ .resume = sdhci_pci_gli_resume,
++#endif
+ };
+
+ static const struct sdhci_ops sdhci_gl9750_ops = {
+@@ -366,4 +381,7 @@ const struct sdhci_pci_fixes sdhci_gl9750 = {
+ .quirks2 = SDHCI_QUIRK2_BROKEN_DDR50,
+ .probe_slot = gli_probe_slot_gl9750,
+ .ops = &sdhci_gl9750_ops,
++#ifdef CONFIG_PM_SLEEP
++ .resume = sdhci_pci_gli_resume,
++#endif
+ };
+--
+2.20.1
+
--- /dev/null
+From 5270b58e23c7fbef37fecb962547c2e45ac85aaa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 23:30:48 +0200
+Subject: netfilter: conntrack: avoid gcc-10 zero-length-bounds warning
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+[ Upstream commit 2c407aca64977ede9b9f35158e919773cae2082f ]
+
+gcc-10 warns around a suspicious access to an empty struct member:
+
+net/netfilter/nf_conntrack_core.c: In function '__nf_conntrack_alloc':
+net/netfilter/nf_conntrack_core.c:1522:9: warning: array subscript 0 is outside the bounds of an interior zero-length array 'u8[0]' {aka 'unsigned char[0]'} [-Wzero-length-bounds]
+ 1522 | memset(&ct->__nfct_init_offset[0], 0,
+ | ^~~~~~~~~~~~~~~~~~~~~~~~~~
+In file included from net/netfilter/nf_conntrack_core.c:37:
+include/net/netfilter/nf_conntrack.h:90:5: note: while referencing '__nfct_init_offset'
+ 90 | u8 __nfct_init_offset[0];
+ | ^~~~~~~~~~~~~~~~~~
+
+The code is correct but a bit unusual. Rework it slightly in a way that
+does not trigger the warning, using an empty struct instead of an empty
+array. There are probably more elegant ways to do this, but this is the
+smallest change.
+
+Fixes: c41884ce0562 ("netfilter: conntrack: avoid zeroing timer")
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/netfilter/nf_conntrack.h | 2 +-
+ net/netfilter/nf_conntrack_core.c | 4 ++--
+ 2 files changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
+index 9f551f3b69c65..90690e37a56f0 100644
+--- a/include/net/netfilter/nf_conntrack.h
++++ b/include/net/netfilter/nf_conntrack.h
+@@ -87,7 +87,7 @@ struct nf_conn {
+ struct hlist_node nat_bysource;
+ #endif
+ /* all members below initialized via memset */
+- u8 __nfct_init_offset[0];
++ struct { } __nfct_init_offset;
+
+ /* If we were expected by an expectation, this will be it */
+ struct nf_conn *master;
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 1927fc296f951..6a978d7e0d639 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -1517,9 +1517,9 @@ __nf_conntrack_alloc(struct net *net,
+ ct->status = 0;
+ ct->timeout = 0;
+ write_pnet(&ct->ct_net, net);
+- memset(&ct->__nfct_init_offset[0], 0,
++ memset(&ct->__nfct_init_offset, 0,
+ offsetof(struct nf_conn, proto) -
+- offsetof(struct nf_conn, __nfct_init_offset[0]));
++ offsetof(struct nf_conn, __nfct_init_offset));
+
+ nf_ct_zone_add(ct, zone);
+
+--
+2.20.1
+
--- /dev/null
+From 97c51489e1cf1bbce2834f7f836761838f176b92 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 May 2020 14:28:07 +0200
+Subject: netfilter: conntrack: fix infinite loop on rmmod
+
+From: Florian Westphal <fw@strlen.de>
+
+[ Upstream commit 54ab49fde95605a1077f759ce454d94e84b5ca45 ]
+
+'rmmod nf_conntrack' can hang forever, because the netns exit
+gets stuck in nf_conntrack_cleanup_net_list():
+
+i_see_dead_people:
+ busy = 0;
+ list_for_each_entry(net, net_exit_list, exit_list) {
+ nf_ct_iterate_cleanup(kill_all, net, 0, 0);
+ if (atomic_read(&net->ct.count) != 0)
+ busy = 1;
+ }
+ if (busy) {
+ schedule();
+ goto i_see_dead_people;
+ }
+
+When nf_ct_iterate_cleanup iterates the conntrack table, all nf_conn
+structures can be found twice:
+once for the original tuple and once for the conntracks reply tuple.
+
+get_next_corpse() only calls the iterator when the entry is
+in original direction -- the idea was to avoid unneeded invocations
+of the iterator callback.
+
+When support for clashing entries was added, the assumption that
+all nf_conn objects are added twice, once in original, once for reply
+tuple no longer holds -- NF_CLASH_BIT entries are only added in
+the non-clashing reply direction.
+
+Thus, if at least one NF_CLASH entry is in the list then
+nf_conntrack_cleanup_net_list() always skips it completely.
+
+During normal netns destruction, this causes a hang of several
+seconds, until the gc worker removes the entry (NF_CLASH entries
+always have a 1 second timeout).
+
+But in the rmmod case, the gc worker has already been stopped, so
+ct.count never becomes 0.
+
+We can fix this in two ways:
+
+1. Add a second test for CLASH_BIT and call iterator for those
+ entries as well, or:
+2. Skip the original tuple direction and use the reply tuple.
+
+2) is simpler, so do that.
+
+Fixes: 6a757c07e51f80ac ("netfilter: conntrack: allow insertion of clashing entries")
+Reported-by: Chen Yi <yiche@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_conntrack_core.c | 13 ++++++++++++-
+ 1 file changed, 12 insertions(+), 1 deletion(-)
+
+diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
+index 6a978d7e0d639..d11a583481334 100644
+--- a/net/netfilter/nf_conntrack_core.c
++++ b/net/netfilter/nf_conntrack_core.c
+@@ -2137,8 +2137,19 @@ get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
+ nf_conntrack_lock(lockp);
+ if (*bucket < nf_conntrack_htable_size) {
+ hlist_nulls_for_each_entry(h, n, &nf_conntrack_hash[*bucket], hnnode) {
+- if (NF_CT_DIRECTION(h) != IP_CT_DIR_ORIGINAL)
++ if (NF_CT_DIRECTION(h) != IP_CT_DIR_REPLY)
+ continue;
++ /* All nf_conn objects are added to hash table twice, one
++ * for original direction tuple, once for the reply tuple.
++ *
++ * Exception: In the IPS_NAT_CLASH case, only the reply
++ * tuple is added (the original tuple already existed for
++ * a different object).
++ *
++ * We only need to call the iterator once for each
++ * conntrack, so we just use the 'reply' direction
++ * tuple while iterating.
++ */
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ if (iter(ct, data))
+ goto found;
+--
+2.20.1
+
--- /dev/null
+From 535fd215b53c4e880184f85b3eb6355885fd4eda Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 May 2020 11:54:31 +0200
+Subject: netfilter: flowtable: set NF_FLOW_TEARDOWN flag on entry expiration
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 9ed81c8e0deb7bd2aa0d69371e4a0f9a7b31205d ]
+
+If the flow timer expires, the gc sets on the NF_FLOW_TEARDOWN flag.
+Otherwise, the flowtable software path might race to refresh the
+timeout, leaving the state machine in inconsistent state.
+
+Fixes: c29f74e0df7a ("netfilter: nf_flow_table: hardware offload support")
+Reported-by: Paul Blakey <paulb@mellanox.com>
+Reviewed-by: Roi Dayan <roid@mellanox.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_flow_table_core.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
+index 70ebebaf5bc12..0ee78a1663786 100644
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -271,7 +271,7 @@ static void flow_offload_del(struct nf_flowtable *flow_table,
+
+ if (nf_flow_has_expired(flow))
+ flow_offload_fixup_ct(flow->ct);
+- else if (test_bit(NF_FLOW_TEARDOWN, &flow->flags))
++ else
+ flow_offload_fixup_ct_timeout(flow->ct);
+
+ flow_offload_free(flow);
+@@ -348,8 +348,10 @@ static void nf_flow_offload_gc_step(struct flow_offload *flow, void *data)
+ {
+ struct nf_flowtable *flow_table = data;
+
+- if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct) ||
+- test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
++ if (nf_flow_has_expired(flow) || nf_ct_is_dying(flow->ct))
++ set_bit(NF_FLOW_TEARDOWN, &flow->flags);
++
++ if (test_bit(NF_FLOW_TEARDOWN, &flow->flags)) {
+ if (test_bit(NF_FLOW_HW, &flow->flags)) {
+ if (!test_bit(NF_FLOW_HW_DYING, &flow->flags))
+ nf_flow_offload_del(flow_table, flow);
+--
+2.20.1
+
--- /dev/null
+From d73d2451cf4090ed603955b37f0205b65ce8b438 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 May 2020 15:31:41 +0200
+Subject: netfilter: nft_set_rbtree: Add missing expired checks
+
+From: Phil Sutter <phil@nwl.cc>
+
+[ Upstream commit 340eaff651160234bdbce07ef34b92a8e45cd540 ]
+
+Expired intervals would still match and be dumped to user space until
+garbage collection wiped them out. Make sure they stop matching and
+disappear (from users' perspective) as soon as they expire.
+
+Fixes: 8d8540c4f5e03 ("netfilter: nft_set_rbtree: add timeout support")
+Signed-off-by: Phil Sutter <phil@nwl.cc>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nft_set_rbtree.c | 11 +++++++++++
+ 1 file changed, 11 insertions(+)
+
+diff --git a/net/netfilter/nft_set_rbtree.c b/net/netfilter/nft_set_rbtree.c
+index 46d976969ca30..accbb54c2b714 100644
+--- a/net/netfilter/nft_set_rbtree.c
++++ b/net/netfilter/nft_set_rbtree.c
+@@ -79,6 +79,10 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+ parent = rcu_dereference_raw(parent->rb_left);
+ continue;
+ }
++
++ if (nft_set_elem_expired(&rbe->ext))
++ return false;
++
+ if (nft_rbtree_interval_end(rbe)) {
+ if (nft_set_is_anonymous(set))
+ return false;
+@@ -94,6 +98,7 @@ static bool __nft_rbtree_lookup(const struct net *net, const struct nft_set *set
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+ nft_set_elem_active(&interval->ext, genmask) &&
++ !nft_set_elem_expired(&interval->ext) &&
+ nft_rbtree_interval_start(interval)) {
+ *ext = &interval->ext;
+ return true;
+@@ -154,6 +159,9 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
+ continue;
+ }
+
++ if (nft_set_elem_expired(&rbe->ext))
++ return false;
++
+ if (!nft_set_ext_exists(&rbe->ext, NFT_SET_EXT_FLAGS) ||
+ (*nft_set_ext_flags(&rbe->ext) & NFT_SET_ELEM_INTERVAL_END) ==
+ (flags & NFT_SET_ELEM_INTERVAL_END)) {
+@@ -170,6 +178,7 @@ static bool __nft_rbtree_get(const struct net *net, const struct nft_set *set,
+
+ if (set->flags & NFT_SET_INTERVAL && interval != NULL &&
+ nft_set_elem_active(&interval->ext, genmask) &&
++ !nft_set_elem_expired(&interval->ext) &&
+ ((!nft_rbtree_interval_end(interval) &&
+ !(flags & NFT_SET_ELEM_INTERVAL_END)) ||
+ (nft_rbtree_interval_end(interval) &&
+@@ -418,6 +427,8 @@ static void nft_rbtree_walk(const struct nft_ctx *ctx,
+
+ if (iter->count < iter->skip)
+ goto cont;
++ if (nft_set_elem_expired(&rbe->ext))
++ goto cont;
+ if (!nft_set_elem_active(&rbe->ext, iter->genmask))
+ goto cont;
+
+--
+2.20.1
+
--- /dev/null
+From 26aa6f25962141595248a09ab65c576240b746cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Apr 2020 16:14:42 -0400
+Subject: NFS: Fix fscache super_cookie allocation
+
+From: Dave Wysochanski <dwysocha@redhat.com>
+
+[ Upstream commit 15751612734ca0c419ac43ce986c9badcb5e2829 ]
+
+Commit f2aedb713c28 ("NFS: Add fs_context support.") reworked
+NFS mount code paths for fs_context support which included
+super_block initialization. In the process there was an extra
+return left in the code and so we never call
+nfs_fscache_get_super_cookie even if 'fsc' is given on as mount
+option. In addition, there is an extra check inside
+nfs_fscache_get_super_cookie for the NFS_OPTION_FSCACHE which
+is unnecessary since the only caller nfs_get_cache_cookie
+checks this flag.
+
+Fixes: f2aedb713c28 ("NFS: Add fs_context support.")
+Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/fscache.c | 2 --
+ fs/nfs/super.c | 1 -
+ 2 files changed, 3 deletions(-)
+
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index 8eff1fd806b1c..f517184156068 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -118,8 +118,6 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
+
+ nfss->fscache_key = NULL;
+ nfss->fscache = NULL;
+- if (!(nfss->options & NFS_OPTION_FSCACHE))
+- return;
+ if (!uniq) {
+ uniq = "";
+ ulen = 1;
+diff --git a/fs/nfs/super.c b/fs/nfs/super.c
+index dada09b391c65..c0d5240b8a0ac 100644
+--- a/fs/nfs/super.c
++++ b/fs/nfs/super.c
+@@ -1154,7 +1154,6 @@ static void nfs_get_cache_cookie(struct super_block *sb,
+ uniq = ctx->fscache_uniq;
+ ulen = strlen(ctx->fscache_uniq);
+ }
+- return;
+ }
+
+ nfs_fscache_get_super_cookie(sb, uniq, ulen);
+--
+2.20.1
+
--- /dev/null
+From 30f35be37b459100cd4c4fc9ca4ab8b96f85479f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Apr 2020 16:14:41 -0400
+Subject: NFS: Fix fscache super_cookie index_key from changing after umount
+
+From: Dave Wysochanski <dwysocha@redhat.com>
+
+[ Upstream commit d9bfced1fbcb35b28d8fbed4e785d2807055ed2b ]
+
+Commit 402cb8dda949 ("fscache: Attach the index key and aux data to
+the cookie") added the index_key and index_key_len parameters to
+fscache_acquire_cookie(), and updated the callers in the NFS client.
+One of the callers was inside nfs_fscache_get_super_cookie()
+and was changed to use the full struct nfs_fscache_key as the
+index_key. However, a couple members of this structure contain
+pointers and thus will change each time the same NFS share is
+remounted. Since index_key is used for fscache_cookie->key_hash
+and this subsequently is used to compare cookies, the effectiveness
+of fscache with NFS is reduced to the point at which a umount
+occurs. Any subsequent remount of the same share will cause a
+unique NFS super_block index_key and key_hash to be generated for
+the same data, rendering any prior fscache data unable to be
+found. A simple reproducer demonstrates the problem.
+
+1. Mount share with 'fsc', create a file, drop page cache
+systemctl start cachefilesd
+mount -o vers=3,fsc 127.0.0.1:/export /mnt
+dd if=/dev/zero of=/mnt/file1.bin bs=4096 count=1
+echo 3 > /proc/sys/vm/drop_caches
+
+2. Read file into page cache and fscache, then unmount
+dd if=/mnt/file1.bin of=/dev/null bs=4096 count=1
+umount /mnt
+
+3. Remount and re-read which should come from fscache
+mount -o vers=3,fsc 127.0.0.1:/export /mnt
+echo 3 > /proc/sys/vm/drop_caches
+dd if=/mnt/file1.bin of=/dev/null bs=4096 count=1
+
+4. Check for READ ops in mountstats - there should be none
+grep READ: /proc/self/mountstats
+
+Looking at the history and the removed function, nfs_super_get_key(),
+we should only use nfs_fscache_key.key plus any uniquifier, for
+the fscache index_key.
+
+Fixes: 402cb8dda949 ("fscache: Attach the index key and aux data to the cookie")
+Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/fscache.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index 1abf126c2df45..8eff1fd806b1c 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -188,7 +188,8 @@ void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int
+ /* create a cache index for looking up filehandles */
+ nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
+ &nfs_fscache_super_index_def,
+- key, sizeof(*key) + ulen,
++ &key->key,
++ sizeof(key->key) + ulen,
+ NULL, 0,
+ nfss, 0, true);
+ dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
+--
+2.20.1
+
--- /dev/null
+From 7276c120114cc14209c66eccb0f0140a9fdc09b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 11 May 2020 10:02:48 -0400
+Subject: nfs: fix NULL deference in nfs4_get_valid_delegation
+
+From: J. Bruce Fields <bfields@redhat.com>
+
+[ Upstream commit 29fe839976266bc7c55b927360a1daae57477723 ]
+
+We add the new state to the nfsi->open_states list, making it
+potentially visible to other threads, before we've finished initializing
+it.
+
+That wasn't a problem when all the readers were also taking the i_lock
+(as we do here), but since we switched to RCU, there's now a possibility
+that a reader could see the partially initialized state.
+
+Symptoms observed were a crash when another thread called
+nfs4_get_valid_delegation() on a NULL inode, resulting in an oops like:
+
+ BUG: unable to handle page fault for address: ffffffffffffffb0 ...
+ RIP: 0010:nfs4_get_valid_delegation+0x6/0x30 [nfsv4] ...
+ Call Trace:
+ nfs4_open_prepare+0x80/0x1c0 [nfsv4]
+ __rpc_execute+0x75/0x390 [sunrpc]
+ ? finish_task_switch+0x75/0x260
+ rpc_async_schedule+0x29/0x40 [sunrpc]
+ process_one_work+0x1ad/0x370
+ worker_thread+0x30/0x390
+ ? create_worker+0x1a0/0x1a0
+ kthread+0x10c/0x130
+ ? kthread_park+0x80/0x80
+ ret_from_fork+0x22/0x30
+
+Fixes: 9ae075fdd190 "NFSv4: Convert open state lookup to use RCU"
+Reviewed-by: Seiichi Ikarashi <s.ikarashi@fujitsu.com>
+Tested-by: Daisuke Matsuda <matsuda-daisuke@fujitsu.com>
+Tested-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
+Signed-off-by: J. Bruce Fields <bfields@redhat.com>
+Cc: stable@vger.kernel.org # v4.20+
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/nfs4state.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
+index f7723d221945b..459c7fb5d103a 100644
+--- a/fs/nfs/nfs4state.c
++++ b/fs/nfs/nfs4state.c
+@@ -734,9 +734,9 @@ nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
+ state = new;
+ state->owner = owner;
+ atomic_inc(&owner->so_count);
+- list_add_rcu(&state->inode_states, &nfsi->open_states);
+ ihold(inode);
+ state->inode = inode;
++ list_add_rcu(&state->inode_states, &nfsi->open_states);
+ spin_unlock(&inode->i_lock);
+ /* Note: The reclaim code dictates that we add stateless
+ * and read-only stateids to the end of the list */
+--
+2.20.1
+
--- /dev/null
+From 2f217b6eaa2f80a33e71a2cd30938b667f99cd68 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 14 May 2020 16:09:40 -0400
+Subject: NFSv3: fix rpc receive buffer size for MOUNT call
+
+From: Olga Kornievskaia <olga.kornievskaia@gmail.com>
+
+[ Upstream commit 8eed292bc8cbf737e46fb1c119d4c8f6dcb00650 ]
+
+Prior to commit e3d3ab64dd66 ("SUNRPC: Use au_rslack when
+computing reply buffer size"), there was enough slack in the reply
+buffer to commodate filehandles of size 60bytes. However, the real
+problem was that the reply buffer size for the MOUNT operation was
+not correctly calculated. Received buffer size used the filehandle
+size for NFSv2 (32bytes) which is much smaller than the allowed
+filehandle size for the v3 mounts.
+
+Fix the reply buffer size (decode arguments size) for the MNT command.
+
+Fixes: 2c94b8eca1a2 ("SUNRPC: Use au_rslack when computing reply buffer size")
+Signed-off-by: Olga Kornievskaia <kolga@netapp.com>
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/mount_clnt.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/nfs/mount_clnt.c b/fs/nfs/mount_clnt.c
+index 35c8cb2d76372..dda5c3e65d8d6 100644
+--- a/fs/nfs/mount_clnt.c
++++ b/fs/nfs/mount_clnt.c
+@@ -30,6 +30,7 @@
+ #define encode_dirpath_sz (1 + XDR_QUADLEN(MNTPATHLEN))
+ #define MNT_status_sz (1)
+ #define MNT_fhandle_sz XDR_QUADLEN(NFS2_FHSIZE)
++#define MNT_fhandlev3_sz XDR_QUADLEN(NFS3_FHSIZE)
+ #define MNT_authflav3_sz (1 + NFS_MAX_SECFLAVORS)
+
+ /*
+@@ -37,7 +38,7 @@
+ */
+ #define MNT_enc_dirpath_sz encode_dirpath_sz
+ #define MNT_dec_mountres_sz (MNT_status_sz + MNT_fhandle_sz)
+-#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandle_sz + \
++#define MNT_dec_mountres3_sz (MNT_status_sz + MNT_fhandlev3_sz + \
+ MNT_authflav3_sz)
+
+ /*
+--
+2.20.1
+
--- /dev/null
+From 8c69881fa24f0bf89cabd12018afc3a981a5cdba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 16 Apr 2020 06:06:08 -0400
+Subject: NFSv4: Fix fscache cookie aux_data to ensure change_attr is included
+
+From: Dave Wysochanski <dwysocha@redhat.com>
+
+[ Upstream commit 50eaa652b54df1e2b48dc398d9e6114c9ed080eb ]
+
+Commit 402cb8dda949 ("fscache: Attach the index key and aux data to
+the cookie") added the aux_data and aux_data_len to parameters to
+fscache_acquire_cookie(), and updated the callers in the NFS client.
+In the process it modified the aux_data to include the change_attr,
+but missed adding change_attr to a couple places where aux_data was
+used. Specifically, when opening a file and the change_attr is not
+added, the following attempt to lookup an object will fail inside
+cachefiles_check_object_xattr() = -116 due to
+nfs_fscache_inode_check_aux() failing memcmp on auxdata and returning
+FSCACHE_CHECKAUX_OBSOLETE.
+
+Fix this by adding nfs_fscache_update_auxdata() to set the auxdata
+from all relevant fields in the inode, including the change_attr.
+
+Fixes: 402cb8dda949 ("fscache: Attach the index key and aux data to the cookie")
+Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/nfs/fscache.c | 34 ++++++++++++++++------------------
+ 1 file changed, 16 insertions(+), 18 deletions(-)
+
+diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
+index f517184156068..a60df88efc404 100644
+--- a/fs/nfs/fscache.c
++++ b/fs/nfs/fscache.c
+@@ -225,6 +225,19 @@ void nfs_fscache_release_super_cookie(struct super_block *sb)
+ }
+ }
+
++static void nfs_fscache_update_auxdata(struct nfs_fscache_inode_auxdata *auxdata,
++ struct nfs_inode *nfsi)
++{
++ memset(auxdata, 0, sizeof(*auxdata));
++ auxdata->mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
++ auxdata->mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
++ auxdata->ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
++ auxdata->ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
++
++ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
++ auxdata->change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
++}
++
+ /*
+ * Initialise the per-inode cache cookie pointer for an NFS inode.
+ */
+@@ -238,14 +251,7 @@ void nfs_fscache_init_inode(struct inode *inode)
+ if (!(nfss->fscache && S_ISREG(inode->i_mode)))
+ return;
+
+- memset(&auxdata, 0, sizeof(auxdata));
+- auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+- auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+- auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+- auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
+-
+- if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+- auxdata.change_attr = inode_peek_iversion_raw(&nfsi->vfs_inode);
++ nfs_fscache_update_auxdata(&auxdata, nfsi);
+
+ nfsi->fscache = fscache_acquire_cookie(NFS_SB(inode->i_sb)->fscache,
+ &nfs_fscache_inode_object_def,
+@@ -265,11 +271,7 @@ void nfs_fscache_clear_inode(struct inode *inode)
+
+ dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n", nfsi, cookie);
+
+- memset(&auxdata, 0, sizeof(auxdata));
+- auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+- auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+- auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+- auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
++ nfs_fscache_update_auxdata(&auxdata, nfsi);
+ fscache_relinquish_cookie(cookie, &auxdata, false);
+ nfsi->fscache = NULL;
+ }
+@@ -309,11 +311,7 @@ void nfs_fscache_open_file(struct inode *inode, struct file *filp)
+ if (!fscache_cookie_valid(cookie))
+ return;
+
+- memset(&auxdata, 0, sizeof(auxdata));
+- auxdata.mtime_sec = nfsi->vfs_inode.i_mtime.tv_sec;
+- auxdata.mtime_nsec = nfsi->vfs_inode.i_mtime.tv_nsec;
+- auxdata.ctime_sec = nfsi->vfs_inode.i_ctime.tv_sec;
+- auxdata.ctime_nsec = nfsi->vfs_inode.i_ctime.tv_nsec;
++ nfs_fscache_update_auxdata(&auxdata, nfsi);
+
+ if (inode_is_open_for_write(inode)) {
+ dfprintk(FSCACHE, "NFS: nfsi 0x%p disabling cache\n", nfsi);
+--
+2.20.1
+
--- /dev/null
+From 6029e3722277c1a08668e2d2a581a06ea95e79d2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 11 Dec 2019 19:32:54 +0200
+Subject: pinctrl: baytrail: Enable pin configuration setting for GPIO chip
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit ccd025eaddaeb99e982029446197c544252108e2 ]
+
+It appears that pin configuration for GPIO chip hasn't been enabled yet
+due to absence of ->set_config() callback.
+
+Enable it here for Intel Baytrail.
+
+Fixes: c501d0b149de ("pinctrl: baytrail: Add pin control operations")
+Depends-on: 2956b5d94a76 ("pinctrl / gpio: Introduce .set_config() callback for GPIO chips")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/intel/pinctrl-baytrail.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/pinctrl/intel/pinctrl-baytrail.c b/drivers/pinctrl/intel/pinctrl-baytrail.c
+index b409642f168d6..9b821c9cbd16a 100644
+--- a/drivers/pinctrl/intel/pinctrl-baytrail.c
++++ b/drivers/pinctrl/intel/pinctrl-baytrail.c
+@@ -1286,6 +1286,7 @@ static const struct gpio_chip byt_gpio_chip = {
+ .direction_output = byt_gpio_direction_output,
+ .get = byt_gpio_get,
+ .set = byt_gpio_set,
++ .set_config = gpiochip_generic_config,
+ .dbg_show = byt_gpio_dbg_show,
+ };
+
+--
+2.20.1
+
--- /dev/null
+From dea485aac6a2dc134b707b5a80e4831be8f3a74f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 Apr 2020 12:11:54 +0800
+Subject: pinctrl: cherryview: Add missing spinlock usage in
+ chv_gpio_irq_handler
+
+From: Grace Kao <grace.kao@intel.com>
+
+[ Upstream commit 69388e15f5078c961b9e5319e22baea4c57deff1 ]
+
+According to Braswell NDA Specification Update (#557593),
+concurrent read accesses may result in returning 0xffffffff and write
+instructions may be dropped. We have an established format for the
+commit references, i.e.
+cdca06e4e859 ("pinctrl: baytrail: Add missing spinlock usage in
+byt_gpio_irq_handler")
+
+Fixes: 0bd50d719b00 ("pinctrl: cherryview: prevent concurrent access to GPIO controllers")
+Signed-off-by: Grace Kao <grace.kao@intel.com>
+Reported-by: Brian Norris <briannorris@chromium.org>
+Reviewed-by: Brian Norris <briannorris@chromium.org>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/intel/pinctrl-cherryview.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c
+index 4c74fdde576d0..1093a6105d40c 100644
+--- a/drivers/pinctrl/intel/pinctrl-cherryview.c
++++ b/drivers/pinctrl/intel/pinctrl-cherryview.c
+@@ -1479,11 +1479,15 @@ static void chv_gpio_irq_handler(struct irq_desc *desc)
+ struct chv_pinctrl *pctrl = gpiochip_get_data(gc);
+ struct irq_chip *chip = irq_desc_get_chip(desc);
+ unsigned long pending;
++ unsigned long flags;
+ u32 intr_line;
+
+ chained_irq_enter(chip, desc);
+
++ raw_spin_lock_irqsave(&chv_lock, flags);
+ pending = readl(pctrl->regs + CHV_INTSTAT);
++ raw_spin_unlock_irqrestore(&chv_lock, flags);
++
+ for_each_set_bit(intr_line, &pending, pctrl->community->nirqs) {
+ unsigned int irq, offset;
+
+--
+2.20.1
+
--- /dev/null
+From de08290fb287af9c6fb9130a5e4e4dd0b561c7b9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Apr 2020 02:37:26 +0200
+Subject: pinctrl: qcom: fix wrong write in update_dual_edge
+
+From: Ansuel Smith <ansuelsmth@gmail.com>
+
+[ Upstream commit 90bcb0c3ca0809d1ed358bfbf838df4b3d4e58e0 ]
+
+Fix a typo in the readl/writel accessor conversion where val is used
+instead of pol changing the behavior of the original code.
+
+Cc: stable@vger.kernel.org
+Fixes: 6c73698904aa pinctrl: qcom: Introduce readl/writel accessors
+Signed-off-by: Ansuel Smith <ansuelsmth@gmail.com>
+Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
+Link: https://lore.kernel.org/r/20200414003726.25347-1-ansuelsmth@gmail.com
+Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/qcom/pinctrl-msm.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
+index 1a948c3f54b7c..9f1c9951949ea 100644
+--- a/drivers/pinctrl/qcom/pinctrl-msm.c
++++ b/drivers/pinctrl/qcom/pinctrl-msm.c
+@@ -692,7 +692,7 @@ static void msm_gpio_update_dual_edge_pos(struct msm_pinctrl *pctrl,
+
+ pol = msm_readl_intr_cfg(pctrl, g);
+ pol ^= BIT(g->intr_polarity_bit);
+- msm_writel_intr_cfg(val, pctrl, g);
++ msm_writel_intr_cfg(pol, pctrl, g);
+
+ val2 = msm_readl_io(pctrl, g) & BIT(g->in_bit);
+ intstat = msm_readl_intr_status(pctrl, g);
+--
+2.20.1
+
--- /dev/null
+From 4dadea572c47f6dd74f4319290d9c75009975fba Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 1 Apr 2020 11:53:00 +0300
+Subject: pinctrl: sunrisepoint: Fix PAD lock register offset for SPT-H
+
+From: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+
+[ Upstream commit 6b7275c87717652daace4c0b8131eb184c7d7516 ]
+
+It appears that SPT-H variant has different offset for PAD locking registers.
+Fix it here.
+
+Fixes: 551fa5801ef1 ("pinctrl: intel: sunrisepoint: Add Intel Sunrisepoint-H support")
+Signed-off-by: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/pinctrl/intel/pinctrl-sunrisepoint.c | 15 ++++++++-------
+ 1 file changed, 8 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+index 330c8f077b73a..4d7a86a5a37b0 100644
+--- a/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
++++ b/drivers/pinctrl/intel/pinctrl-sunrisepoint.c
+@@ -15,17 +15,18 @@
+
+ #include "pinctrl-intel.h"
+
+-#define SPT_PAD_OWN 0x020
+-#define SPT_PADCFGLOCK 0x0a0
+-#define SPT_HOSTSW_OWN 0x0d0
+-#define SPT_GPI_IS 0x100
+-#define SPT_GPI_IE 0x120
++#define SPT_PAD_OWN 0x020
++#define SPT_H_PADCFGLOCK 0x090
++#define SPT_LP_PADCFGLOCK 0x0a0
++#define SPT_HOSTSW_OWN 0x0d0
++#define SPT_GPI_IS 0x100
++#define SPT_GPI_IE 0x120
+
+ #define SPT_COMMUNITY(b, s, e) \
+ { \
+ .barno = (b), \
+ .padown_offset = SPT_PAD_OWN, \
+- .padcfglock_offset = SPT_PADCFGLOCK, \
++ .padcfglock_offset = SPT_LP_PADCFGLOCK, \
+ .hostown_offset = SPT_HOSTSW_OWN, \
+ .is_offset = SPT_GPI_IS, \
+ .ie_offset = SPT_GPI_IE, \
+@@ -47,7 +48,7 @@
+ { \
+ .barno = (b), \
+ .padown_offset = SPT_PAD_OWN, \
+- .padcfglock_offset = SPT_PADCFGLOCK, \
++ .padcfglock_offset = SPT_H_PADCFGLOCK, \
+ .hostown_offset = SPT_HOSTSW_OWN, \
+ .is_offset = SPT_GPI_IS, \
+ .ie_offset = SPT_GPI_IE, \
+--
+2.20.1
+
--- /dev/null
+From c7a24d96c6273c55d7cd765051ee7606d9076ddf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 7 May 2020 09:29:42 +0300
+Subject: RDMA/core: Fix double put of resource
+
+From: Maor Gottlieb <maorg@mellanox.com>
+
+[ Upstream commit 50bbe3d34fea74b7c0fabe553c40c2f4a48bb9c3 ]
+
+Do not decrease the reference count of resource tracker object twice in
+the error flow of res_get_common_doit.
+
+Fixes: c5dfe0ea6ffa ("RDMA/nldev: Add resource tracker doit callback")
+Link: https://lore.kernel.org/r/20200507062942.98305-1-leon@kernel.org
+Signed-off-by: Maor Gottlieb <maorg@mellanox.com>
+Signed-off-by: Leon Romanovsky <leonro@mellanox.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/core/nldev.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/infiniband/core/nldev.c b/drivers/infiniband/core/nldev.c
+index 9eec26d10d7b1..e16105be2eb23 100644
+--- a/drivers/infiniband/core/nldev.c
++++ b/drivers/infiniband/core/nldev.c
+@@ -1292,11 +1292,10 @@ static int res_get_common_doit(struct sk_buff *skb, struct nlmsghdr *nlh,
+ has_cap_net_admin = netlink_capable(skb, CAP_NET_ADMIN);
+
+ ret = fill_func(msg, has_cap_net_admin, res, port);
+-
+- rdma_restrack_put(res);
+ if (ret)
+ goto err_free;
+
++ rdma_restrack_put(res);
+ nlmsg_end(msg, nlh);
+ ib_device_put(device);
+ return rdma_nl_unicast(sock_net(skb->sk), msg, NETLINK_CB(skb).portid);
+--
+2.20.1
+
--- /dev/null
+From 84e748b0bf219a39d5e2504801e0e66d9ceb7c4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 May 2020 00:26:08 +0530
+Subject: RDMA/iw_cxgb4: Fix incorrect function parameters
+
+From: Potnuri Bharat Teja <bharat@chelsio.com>
+
+[ Upstream commit c8b1f340e54158662acfa41d6dee274846370282 ]
+
+While reading the TCB field in t4_tcb_get_field32() the wrong mask is
+passed as a parameter which leads the driver eventually to a kernel
+panic/app segfault from access to an illegal SRQ index while flushing the
+SRQ completions during connection teardown.
+
+Fixes: 11a27e2121a5 ("iw_cxgb4: complete the cached SRQ buffers")
+Link: https://lore.kernel.org/r/20200511185608.5202-1-bharat@chelsio.com
+Signed-off-by: Potnuri Bharat Teja <bharat@chelsio.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/hw/cxgb4/cm.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
+index d69dece3b1d54..30e08bcc9afb5 100644
+--- a/drivers/infiniband/hw/cxgb4/cm.c
++++ b/drivers/infiniband/hw/cxgb4/cm.c
+@@ -2891,8 +2891,7 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
+ srqidx = ABORT_RSS_SRQIDX_G(
+ be32_to_cpu(req->srqidx_status));
+ if (srqidx) {
+- complete_cached_srq_buffers(ep,
+- req->srqidx_status);
++ complete_cached_srq_buffers(ep, srqidx);
+ } else {
+ /* Hold ep ref until finish_peer_abort() */
+ c4iw_get_ep(&ep->com);
+@@ -3878,8 +3877,8 @@ static int read_tcb_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
+ return 0;
+ }
+
+- ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_W,
+- TCB_RQ_START_S);
++ ep->srqe_idx = t4_tcb_get_field32(tcb, TCB_RQ_START_W, TCB_RQ_START_M,
++ TCB_RQ_START_S);
+ cleanup:
+ pr_debug("ep %p tid %u %016x\n", ep, ep->hwtid, ep->srqe_idx);
+
+--
+2.20.1
+
--- /dev/null
+From baa68bfb90d02ecc56e8f9a34442aab892a69b9c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 26 Apr 2020 00:35:45 +0100
+Subject: RDMA/rxe: Always return ERR_PTR from rxe_create_mmap_info()
+
+From: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+
+[ Upstream commit bb43c8e382e5da0ee253e3105d4099820ff4d922 ]
+
+The commit below modified rxe_create_mmap_info() to return ERR_PTR's but
+didn't update the callers to handle them. Modify rxe_create_mmap_info() to
+only return ERR_PTR and fix all error checking after
+rxe_create_mmap_info() is called.
+
+Ensure that all other exit paths properly set the error return.
+
+Fixes: ff23dfa13457 ("IB: Pass only ib_udata in function prototypes")
+Link: https://lore.kernel.org/r/20200425233545.17210-1-sudipm.mukherjee@gmail.com
+Link: https://lore.kernel.org/r/20200511183742.GB225608@mwanda
+Cc: stable@vger.kernel.org [5.4+]
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Dan Carpenter <dan.carpenter@oracle.com>
+Signed-off-by: Jason Gunthorpe <jgg@mellanox.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/infiniband/sw/rxe/rxe_mmap.c | 2 +-
+ drivers/infiniband/sw/rxe/rxe_queue.c | 11 +++++++----
+ 2 files changed, 8 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_mmap.c b/drivers/infiniband/sw/rxe/rxe_mmap.c
+index 48f48122ddcb8..6a413d73b95dd 100644
+--- a/drivers/infiniband/sw/rxe/rxe_mmap.c
++++ b/drivers/infiniband/sw/rxe/rxe_mmap.c
+@@ -151,7 +151,7 @@ struct rxe_mmap_info *rxe_create_mmap_info(struct rxe_dev *rxe, u32 size,
+
+ ip = kmalloc(sizeof(*ip), GFP_KERNEL);
+ if (!ip)
+- return NULL;
++ return ERR_PTR(-ENOMEM);
+
+ size = PAGE_ALIGN(size);
+
+diff --git a/drivers/infiniband/sw/rxe/rxe_queue.c b/drivers/infiniband/sw/rxe/rxe_queue.c
+index ff92704de32ff..245040c3a35d0 100644
+--- a/drivers/infiniband/sw/rxe/rxe_queue.c
++++ b/drivers/infiniband/sw/rxe/rxe_queue.c
+@@ -45,12 +45,15 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
+
+ if (outbuf) {
+ ip = rxe_create_mmap_info(rxe, buf_size, udata, buf);
+- if (!ip)
++ if (IS_ERR(ip)) {
++ err = PTR_ERR(ip);
+ goto err1;
++ }
+
+- err = copy_to_user(outbuf, &ip->info, sizeof(ip->info));
+- if (err)
++ if (copy_to_user(outbuf, &ip->info, sizeof(ip->info))) {
++ err = -EFAULT;
+ goto err2;
++ }
+
+ spin_lock_bh(&rxe->pending_lock);
+ list_add(&ip->pending_mmaps, &rxe->pending_mmaps);
+@@ -64,7 +67,7 @@ int do_mmap_info(struct rxe_dev *rxe, struct mminfo __user *outbuf,
+ err2:
+ kfree(ip);
+ err1:
+- return -EINVAL;
++ return err;
+ }
+
+ inline void rxe_queue_reset(struct rxe_queue *q)
+--
+2.20.1
+
--- /dev/null
+From 161a7013e7eea1fbaea40c8e3ac832d4ce73431f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 Apr 2020 17:29:58 +0300
+Subject: riscv: fix vdso build with lld
+
+From: Ilie Halip <ilie.halip@gmail.com>
+
+[ Upstream commit 3c1918c8f54166598195d938564072664a8275b1 ]
+
+When building with the LLVM linker this error occurrs:
+ LD arch/riscv/kernel/vdso/vdso-syms.o
+ ld.lld: error: no input files
+
+This happens because the lld treats -R as an alias to -rpath, as opposed
+to ld where -R means --just-symbols.
+
+Use the long option name for compatibility between the two.
+
+Link: https://github.com/ClangBuiltLinux/linux/issues/805
+Reported-by: Dmitry Golovin <dima@golovin.in>
+Reviewed-by: Nick Desaulniers <ndesaulniers@google.com>
+Signed-off-by: Ilie Halip <ilie.halip@gmail.com>
+Reviewed-by: Fangrui Song <maskray@google.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/vdso/Makefile | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
+index 33b16f4212f7a..a4ee3a0e7d20d 100644
+--- a/arch/riscv/kernel/vdso/Makefile
++++ b/arch/riscv/kernel/vdso/Makefile
+@@ -33,15 +33,15 @@ $(obj)/vdso.so.dbg: $(src)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+
+ # We also create a special relocatable object that should mirror the symbol
+-# table and layout of the linked DSO. With ld -R we can then refer to
+-# these symbols in the kernel code rather than hand-coded addresses.
++# table and layout of the linked DSO. With ld --just-symbols we can then
++# refer to these symbols in the kernel code rather than hand-coded addresses.
+
+ SYSCFLAGS_vdso.so.dbg = -shared -s -Wl,-soname=linux-vdso.so.1 \
+ -Wl,--build-id -Wl,--hash-style=both
+ $(obj)/vdso-dummy.o: $(src)/vdso.lds $(obj)/rt_sigreturn.o FORCE
+ $(call if_changed,vdsold)
+
+-LDFLAGS_vdso-syms.o := -r -R
++LDFLAGS_vdso-syms.o := -r --just-symbols
+ $(obj)/vdso-syms.o: $(obj)/vdso-dummy.o FORCE
+ $(call if_changed,ld)
+
+--
+2.20.1
+
--- /dev/null
+From 01100d690dc66ec4ecba80ec8e4223817d65e167 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 13 May 2020 09:42:29 +0200
+Subject: s390/ism: fix error return code in ism_probe()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+[ Upstream commit 29b74cb75e3572d83708745e81e24d37837415f9 ]
+
+Fix to return negative error code -ENOMEM from the smcd_alloc_dev()
+error handling case instead of 0, as done elsewhere in this function.
+
+Fixes: 684b89bc39ce ("s390/ism: add device driver for internal shared memory")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Signed-off-by: Ursula Braun <ubraun@linux.ibm.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/s390/net/ism_drv.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/s390/net/ism_drv.c b/drivers/s390/net/ism_drv.c
+index 4fc2056bd2272..e615dc240150b 100644
+--- a/drivers/s390/net/ism_drv.c
++++ b/drivers/s390/net/ism_drv.c
+@@ -521,8 +521,10 @@ static int ism_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+
+ ism->smcd = smcd_alloc_dev(&pdev->dev, dev_name(&pdev->dev), &ism_ops,
+ ISM_NR_DMBS);
+- if (!ism->smcd)
++ if (!ism->smcd) {
++ ret = -ENOMEM;
+ goto err_resource;
++ }
+
+ ism->smcd->priv = ism;
+ ret = ism_dev_init(ism);
+--
+2.20.1
+
--- /dev/null
+From 8dc75421ec91c40fbee5eb4f7744ae310e985dff Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 Apr 2020 14:34:19 +0800
+Subject: selftests/ftrace: Check the first record for kprobe_args_type.tc
+
+From: Xiao Yang <yangx.jy@cn.fujitsu.com>
+
+[ Upstream commit f0c0d0cf590f71b2213b29a7ded2cde3d0a1a0ba ]
+
+It is possible to get multiple records from trace during test and then more
+than 4 arguments are assigned to ARGS. This situation results in the failure
+of kprobe_args_type.tc. For example:
+-----------------------------------------------------------
+grep testprobe trace
+ ftracetest-5902 [001] d... 111195.682227: testprobe: (_do_fork+0x0/0x460) arg1=334823024 arg2=334823024 arg3=0x13f4fe70 arg4=7
+ pmlogger-5949 [000] d... 111195.709898: testprobe: (_do_fork+0x0/0x460) arg1=345308784 arg2=345308784 arg3=0x1494fe70 arg4=7
+ grep testprobe trace
+ sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'
+ARGS='334823024 334823024 0x13f4fe70 7
+345308784 345308784 0x1494fe70 7'
+-----------------------------------------------------------
+
+We don't care which process calls do_fork so just check the first record to
+fix the issue.
+
+Signed-off-by: Xiao Yang <yangx.jy@cn.fujitsu.com>
+Acked-by: Masami Hiramatsu <mhiramat@kernel.org>
+Signed-off-by: Shuah Khan <skhan@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
+index 1bcb67dcae267..81490ecaaa927 100644
+--- a/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
++++ b/tools/testing/selftests/ftrace/test.d/kprobe/kprobe_args_type.tc
+@@ -38,7 +38,7 @@ for width in 64 32 16 8; do
+ echo 0 > events/kprobes/testprobe/enable
+
+ : "Confirm the arguments is recorded in given types correctly"
+- ARGS=`grep "testprobe" trace | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
++ ARGS=`grep "testprobe" trace | head -n 1 | sed -e 's/.* arg1=\(.*\) arg2=\(.*\) arg3=\(.*\) arg4=\(.*\)/\1 \2 \3 \4/'`
+ check_types $ARGS $width
+
+ : "Clear event for next loop"
+--
+2.20.1
+
r8169-re-establish-support-for-rtl8401-chip-version.patch
umh-fix-memory-leak-on-execve-failure.patch
net-broadcom-select-broadcom_phy-for-bcmgenet.patch
+dmaengine-xilinx_dma-add-missing-check-for-empty-lis.patch
+riscv-fix-vdso-build-with-lld.patch
+dmaengine-pch_dma.c-avoid-data-race-between-probe-an.patch
+dmaengine-mmp_tdma-do-not-ignore-slave-config-valida.patch
+dmaengine-mmp_tdma-reset-channel-error-on-release.patch
+drm-amd-display-blank-dp-stream-before-re-train-the-.patch
+selftests-ftrace-check-the-first-record-for-kprobe_a.patch
+cpufreq-intel_pstate-only-mention-the-bios-disabling.patch
+alsa-hda-hdmi-fix-race-in-monitor-detection-during-p.patch
+drm-amd-powerplay-avoid-using-pm_en-before-it-is-ini.patch
+drm-amdgpu-bump-version-for-invalidate-l2-before-sdm.patch
+drm-amd-display-check-if-refclk_cntl-register-is-pre.patch
+drm-amd-display-defer-cursor-update-around-vupdate-f.patch
+drm-amd-display-update-downspread-percent-to-match-s.patch
+drm-qxl-lost-qxl_bo_kunmap_atomic_page-in-qxl_image_.patch
+fibmap-warn-and-return-an-error-in-case-of-block-int.patch
+io_uring-use-cond_resched-in-io_ring_ctx_wait_and_ki.patch
+io_uring-check-non-sync-defer_list-carefully.patch
+ipc-util.c-sysvipc_find_ipc-incorrectly-updates-posi.patch
+alsa-hda-realtek-fix-s3-pop-noise-on-dell-wyse.patch
+gfs2-another-gfs2_walk_metadata-fix.patch
+mmc-sdhci-pci-gli-fix-no-irq-handler-from-suspend.patch
+ib-hfi1-fix-another-case-where-pq-is-left-on-waitlis.patch
+acpi-ec-pm-avoid-premature-returns-from-acpi_s2idle_.patch
+pinctrl-sunrisepoint-fix-pad-lock-register-offset-fo.patch
+pinctrl-baytrail-enable-pin-configuration-setting-fo.patch
+pinctrl-qcom-fix-wrong-write-in-update_dual_edge.patch
+pinctrl-cherryview-add-missing-spinlock-usage-in-chv.patch
+drm-tegra-fix-smmu-support-on-tegra124-and-tegra210.patch
+bpf-fix-error-return-code-in-map_lookup_and_delete_e.patch
+alsa-firewire-lib-fix-function-sizeof-not-defined-er.patch
+cachefiles-fix-corruption-of-the-return-value-in-cac.patch
+i40iw-fix-error-handling-in-i40iw_manage_arp_cache.patch
+drm-i915-gt-make-timeslicing-an-explicit-engine-prop.patch
+drm-i915-don-t-enable-waincreaselatencyipcenabled-wh.patch
+bpf-sockmap-msg_pop_data-can-incorrecty-set-an-sge-l.patch
+bpf-sockmap-bpf_tcp_ingress-needs-to-subtract-bytes-.patch
+drm-i915-gem-remove-object_is_locked-assertion-from-.patch
+mmc-alcor-fix-a-resource-leak-in-the-error-path-for-.patch
+mmc-sdhci-pci-gli-fix-can-not-access-gl9750-after-re.patch
+mmc-core-check-request-type-before-completing-the-re.patch
+mmc-core-fix-recursive-locking-issue-in-cqe-recovery.patch
+mmc-block-fix-request-completion-in-the-cqe-timeout-.patch
+gfs2-more-gfs2_find_jhead-fixes.patch
+fork-prevent-accidental-access-to-clone3-features.patch
+drm-amdgpu-force-fbdev-into-vram.patch
+nfs-fix-fscache-super_cookie-index_key-from-changing.patch
+nfs-fix-fscache-super_cookie-allocation.patch
+nfsv4-fix-fscache-cookie-aux_data-to-ensure-change_a.patch
+hwmon-drivetemp-fix-sct-support-if-sct-data-tables-a.patch
+netfilter-conntrack-avoid-gcc-10-zero-length-bounds-.patch
+drm-i915-gvt-fix-kernel-oops-for-3-level-ppgtt-guest.patch
+arm64-fix-the-flush_icache_range-arguments-in-machin.patch
+netfilter-conntrack-fix-infinite-loop-on-rmmod.patch
+drm-i915-mark-concurrent-submissions-with-a-weak-dep.patch
+nfs-fix-null-deference-in-nfs4_get_valid_delegation.patch
+sunrpc-signalled-async-tasks-need-to-exit.patch
+tracing-wait-for-preempt-irq-delay-thread-to-execute.patch
+netfilter-flowtable-set-nf_flow_teardown-flag-on-ent.patch
+netfilter-nft_set_rbtree-add-missing-expired-checks.patch
+rdma-rxe-always-return-err_ptr-from-rxe_create_mmap_.patch
+ib-mlx4-test-return-value-of-calls-to-ib_get_cached_.patch
+ib-core-fix-potential-null-pointer-dereference-in-pk.patch
+rdma-core-fix-double-put-of-resource.patch
+rdma-iw_cxgb4-fix-incorrect-function-parameters.patch
+x86-ftrace-have-ftrace-trampolines-turn-read-only-at.patch
+hwmon-da9052-synchronize-access-with-mfd.patch
+s390-ism-fix-error-return-code-in-ism_probe.patch
+drm-i915-handle-idling-during-i915_gem_evict_somethi.patch
+mm-memcg-fix-inconsistent-oom-event-behavior.patch
+epoll-call-final-ep_events_available-check-under-the.patch
+bpf-fix-bug-in-mmap-implementation-for-bpf-array-map.patch
+nfsv3-fix-rpc-receive-buffer-size-for-mount-call.patch
--- /dev/null
+From 99d39e6dc59311f57ab005f408d70b1a75664623 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 9 May 2020 14:07:13 -0400
+Subject: SUNRPC: Signalled ASYNC tasks need to exit
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit ce99aa62e1eb793e259d023c7f6ccb7c4879917b ]
+
+Ensure that signalled ASYNC rpc_tasks exit immediately instead of
+spinning until a timeout (or forever).
+
+To avoid checking for the signal flag on every scheduler iteration,
+the check is instead introduced in the client's finite state
+machine.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Fixes: ae67bd3821bb ("SUNRPC: Fix up task signalling")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/clnt.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
+index 7324b21f923e6..3ceaefb2f0bcf 100644
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -2416,6 +2416,11 @@ rpc_check_timeout(struct rpc_task *task)
+ {
+ struct rpc_clnt *clnt = task->tk_client;
+
++ if (RPC_SIGNALLED(task)) {
++ rpc_call_rpcerror(task, -ERESTARTSYS);
++ return;
++ }
++
+ if (xprt_adjust_timeout(task->tk_rqstp) == 0)
+ return;
+
+--
+2.20.1
+
--- /dev/null
+From 2e33a786e9200af943b098d07eafdaeca4f9d2a1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 10 May 2020 11:35:10 -0400
+Subject: tracing: Wait for preempt irq delay thread to execute
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit 8b1fac2e73e84ef0d6391051880a8e1d7044c847 ]
+
+A bug report was posted that running the preempt irq delay module on a slow
+machine, and removing it quickly could lead to the thread created by the
+modlue to execute after the module is removed, and this could cause the
+kernel to crash. The fix for this was to call kthread_stop() after creating
+the thread to make sure it finishes before allowing the module to be
+removed.
+
+Now this caused the opposite problem on fast machines. What now happens is
+the kthread_stop() can cause the kthread never to execute and the test never
+to run. To fix this, add a completion and wait for the kthread to execute,
+then wait for it to end.
+
+This issue caused the ftracetest selftests to fail on the preemptirq tests.
+
+Link: https://lore.kernel.org/r/20200510114210.15d9e4af@oasis.local.home
+
+Cc: stable@vger.kernel.org
+Fixes: d16a8c31077e ("tracing: Wait for preempt irq delay thread to finish")
+Reviewed-by: Joel Fernandes (Google) <joel@joelfernandes.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/preemptirq_delay_test.c | 12 ++++++++++--
+ 1 file changed, 10 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/trace/preemptirq_delay_test.c b/kernel/trace/preemptirq_delay_test.c
+index c4c86de63cf91..312d1a0ca3b60 100644
+--- a/kernel/trace/preemptirq_delay_test.c
++++ b/kernel/trace/preemptirq_delay_test.c
+@@ -16,6 +16,7 @@
+ #include <linux/printk.h>
+ #include <linux/string.h>
+ #include <linux/sysfs.h>
++#include <linux/completion.h>
+
+ static ulong delay = 100;
+ static char test_mode[12] = "irq";
+@@ -28,6 +29,8 @@ MODULE_PARM_DESC(delay, "Period in microseconds (100 us default)");
+ MODULE_PARM_DESC(test_mode, "Mode of the test such as preempt, irq, or alternate (default irq)");
+ MODULE_PARM_DESC(burst_size, "The size of a burst (default 1)");
+
++static struct completion done;
++
+ #define MIN(x, y) ((x) < (y) ? (x) : (y))
+
+ static void busy_wait(ulong time)
+@@ -114,6 +117,8 @@ static int preemptirq_delay_run(void *data)
+ for (i = 0; i < s; i++)
+ (testfuncs[i])(i);
+
++ complete(&done);
++
+ set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ schedule();
+@@ -128,15 +133,18 @@ static int preemptirq_delay_run(void *data)
+ static int preemptirq_run_test(void)
+ {
+ struct task_struct *task;
+-
+ char task_name[50];
+
++ init_completion(&done);
++
+ snprintf(task_name, sizeof(task_name), "%s_test", test_mode);
+ task = kthread_run(preemptirq_delay_run, NULL, task_name);
+ if (IS_ERR(task))
+ return PTR_ERR(task);
+- if (task)
++ if (task) {
++ wait_for_completion(&done);
+ kthread_stop(task);
++ }
+ return 0;
+ }
+
+--
+2.20.1
+
--- /dev/null
+From 70617fa9ff5a6664c3cf89ff5adbf66d0cb3e945 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 30 Apr 2020 20:21:47 -0400
+Subject: x86/ftrace: Have ftrace trampolines turn read-only at the end of
+ system boot up
+
+From: Steven Rostedt (VMware) <rostedt@goodmis.org>
+
+[ Upstream commit 59566b0b622e3e6ea928c0b8cac8a5601b00b383 ]
+
+Booting one of my machines, it triggered the following crash:
+
+ Kernel/User page tables isolation: enabled
+ ftrace: allocating 36577 entries in 143 pages
+ Starting tracer 'function'
+ BUG: unable to handle page fault for address: ffffffffa000005c
+ #PF: supervisor write access in kernel mode
+ #PF: error_code(0x0003) - permissions violation
+ PGD 2014067 P4D 2014067 PUD 2015063 PMD 7b253067 PTE 7b252061
+ Oops: 0003 [#1] PREEMPT SMP PTI
+ CPU: 0 PID: 0 Comm: swapper Not tainted 5.4.0-test+ #24
+ Hardware name: To Be Filled By O.E.M. To Be Filled By O.E.M./To be filled by O.E.M., BIOS SDBLI944.86P 05/08/2007
+ RIP: 0010:text_poke_early+0x4a/0x58
+ Code: 34 24 48 89 54 24 08 e8 bf 72 0b 00 48 8b 34 24 48 8b 4c 24 08 84 c0 74 0b 48 89 df f3 a4 48 83 c4 10 5b c3 9c 58 fa 48 89 df <f3> a4 50 9d 48 83 c4 10 5b e9 d6 f9 ff ff
+0 41 57 49
+ RSP: 0000:ffffffff82003d38 EFLAGS: 00010046
+ RAX: 0000000000000046 RBX: ffffffffa000005c RCX: 0000000000000005
+ RDX: 0000000000000005 RSI: ffffffff825b9a90 RDI: ffffffffa000005c
+ RBP: ffffffffa000005c R08: 0000000000000000 R09: ffffffff8206e6e0
+ R10: ffff88807b01f4c0 R11: ffffffff8176c106 R12: ffffffff8206e6e0
+ R13: ffffffff824f2440 R14: 0000000000000000 R15: ffffffff8206eac0
+ FS: 0000000000000000(0000) GS:ffff88807d400000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: ffffffffa000005c CR3: 0000000002012000 CR4: 00000000000006b0
+ Call Trace:
+ text_poke_bp+0x27/0x64
+ ? mutex_lock+0x36/0x5d
+ arch_ftrace_update_trampoline+0x287/0x2d5
+ ? ftrace_replace_code+0x14b/0x160
+ ? ftrace_update_ftrace_func+0x65/0x6c
+ __register_ftrace_function+0x6d/0x81
+ ftrace_startup+0x23/0xc1
+ register_ftrace_function+0x20/0x37
+ func_set_flag+0x59/0x77
+ __set_tracer_option.isra.19+0x20/0x3e
+ trace_set_options+0xd6/0x13e
+ apply_trace_boot_options+0x44/0x6d
+ register_tracer+0x19e/0x1ac
+ early_trace_init+0x21b/0x2c9
+ start_kernel+0x241/0x518
+ ? load_ucode_intel_bsp+0x21/0x52
+ secondary_startup_64+0xa4/0xb0
+
+I was able to trigger it on other machines, when I added to the kernel
+command line of both "ftrace=function" and "trace_options=func_stack_trace".
+
+The cause is the "ftrace=function" would register the function tracer
+and create a trampoline, and it will set it as executable and
+read-only. Then the "trace_options=func_stack_trace" would then update
+the same trampoline to include the stack tracer version of the function
+tracer. But since the trampoline already exists, it updates it with
+text_poke_bp(). The problem is that text_poke_bp() called while
+system_state == SYSTEM_BOOTING, it will simply do a memcpy() and not
+the page mapping, as it would think that the text is still read-write.
+But in this case it is not, and we take a fault and crash.
+
+Instead, lets keep the ftrace trampolines read-write during boot up,
+and then when the kernel executable text is set to read-only, the
+ftrace trampolines get set to read-only as well.
+
+Link: https://lkml.kernel.org/r/20200430202147.4dc6e2de@oasis.local.home
+
+Cc: Ingo Molnar <mingo@kernel.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Andy Lutomirski <luto@amacapital.net>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Josh Poimboeuf <jpoimboe@redhat.com>
+Cc: "H. Peter Anvin" <hpa@zytor.com>
+Cc: stable@vger.kernel.org
+Fixes: 768ae4406a5c ("x86/ftrace: Use text_poke()")
+Acked-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/ftrace.h | 6 ++++++
+ arch/x86/kernel/ftrace.c | 29 ++++++++++++++++++++++++++++-
+ arch/x86/mm/init_64.c | 3 +++
+ include/linux/ftrace.h | 23 +++++++++++++++++++++++
+ kernel/trace/ftrace_internal.h | 22 ----------------------
+ 5 files changed, 60 insertions(+), 23 deletions(-)
+
+diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
+index 85be2f5062728..89af0d2c62aab 100644
+--- a/arch/x86/include/asm/ftrace.h
++++ b/arch/x86/include/asm/ftrace.h
+@@ -56,6 +56,12 @@ struct dyn_arch_ftrace {
+
+ #ifndef __ASSEMBLY__
+
++#if defined(CONFIG_FUNCTION_TRACER) && defined(CONFIG_DYNAMIC_FTRACE)
++extern void set_ftrace_ops_ro(void);
++#else
++static inline void set_ftrace_ops_ro(void) { }
++#endif
++
+ #define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+ static inline bool arch_syscall_match_sym_name(const char *sym, const char *name)
+ {
+diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
+index 37a0aeaf89e77..b0e641793be4f 100644
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -407,7 +407,8 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+
+ set_vm_flush_reset_perms(trampoline);
+
+- set_memory_ro((unsigned long)trampoline, npages);
++ if (likely(system_state != SYSTEM_BOOTING))
++ set_memory_ro((unsigned long)trampoline, npages);
+ set_memory_x((unsigned long)trampoline, npages);
+ return (unsigned long)trampoline;
+ fail:
+@@ -415,6 +416,32 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+ return 0;
+ }
+
++void set_ftrace_ops_ro(void)
++{
++ struct ftrace_ops *ops;
++ unsigned long start_offset;
++ unsigned long end_offset;
++ unsigned long npages;
++ unsigned long size;
++
++ do_for_each_ftrace_op(ops, ftrace_ops_list) {
++ if (!(ops->flags & FTRACE_OPS_FL_ALLOC_TRAMP))
++ continue;
++
++ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
++ start_offset = (unsigned long)ftrace_regs_caller;
++ end_offset = (unsigned long)ftrace_regs_caller_end;
++ } else {
++ start_offset = (unsigned long)ftrace_caller;
++ end_offset = (unsigned long)ftrace_epilogue;
++ }
++ size = end_offset - start_offset;
++ size = size + RET_SIZE + sizeof(void *);
++ npages = DIV_ROUND_UP(size, PAGE_SIZE);
++ set_memory_ro((unsigned long)ops->trampoline, npages);
++ } while_for_each_ftrace_op(ops);
++}
++
+ static unsigned long calc_trampoline_call_offset(bool save_regs)
+ {
+ unsigned long start_offset;
+diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
+index abbdecb75fad8..023e1ec5e1537 100644
+--- a/arch/x86/mm/init_64.c
++++ b/arch/x86/mm/init_64.c
+@@ -54,6 +54,7 @@
+ #include <asm/init.h>
+ #include <asm/uv/uv.h>
+ #include <asm/setup.h>
++#include <asm/ftrace.h>
+
+ #include "mm_internal.h"
+
+@@ -1288,6 +1289,8 @@ void mark_rodata_ro(void)
+ all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
+ set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
+
++ set_ftrace_ops_ro();
++
+ #ifdef CONFIG_CPA_DEBUG
+ printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
+ set_memory_rw(start, (end-start) >> PAGE_SHIFT);
+diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
+index db95244a62d44..ab4bd15cbcdb3 100644
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -210,6 +210,29 @@ struct ftrace_ops {
+ #endif
+ };
+
++extern struct ftrace_ops __rcu *ftrace_ops_list;
++extern struct ftrace_ops ftrace_list_end;
++
++/*
++ * Traverse the ftrace_global_list, invoking all entries. The reason that we
++ * can use rcu_dereference_raw_check() is that elements removed from this list
++ * are simply leaked, so there is no need to interact with a grace-period
++ * mechanism. The rcu_dereference_raw_check() calls are needed to handle
++ * concurrent insertions into the ftrace_global_list.
++ *
++ * Silly Alpha and silly pointer-speculation compiler optimizations!
++ */
++#define do_for_each_ftrace_op(op, list) \
++ op = rcu_dereference_raw_check(list); \
++ do
++
++/*
++ * Optimized for just a single item in the list (as that is the normal case).
++ */
++#define while_for_each_ftrace_op(op) \
++ while (likely(op = rcu_dereference_raw_check((op)->next)) && \
++ unlikely((op) != &ftrace_list_end))
++
+ /*
+ * Type of the current tracing.
+ */
+diff --git a/kernel/trace/ftrace_internal.h b/kernel/trace/ftrace_internal.h
+index 0456e0a3dab14..382775edf6902 100644
+--- a/kernel/trace/ftrace_internal.h
++++ b/kernel/trace/ftrace_internal.h
+@@ -4,28 +4,6 @@
+
+ #ifdef CONFIG_FUNCTION_TRACER
+
+-/*
+- * Traverse the ftrace_global_list, invoking all entries. The reason that we
+- * can use rcu_dereference_raw_check() is that elements removed from this list
+- * are simply leaked, so there is no need to interact with a grace-period
+- * mechanism. The rcu_dereference_raw_check() calls are needed to handle
+- * concurrent insertions into the ftrace_global_list.
+- *
+- * Silly Alpha and silly pointer-speculation compiler optimizations!
+- */
+-#define do_for_each_ftrace_op(op, list) \
+- op = rcu_dereference_raw_check(list); \
+- do
+-
+-/*
+- * Optimized for just a single item in the list (as that is the normal case).
+- */
+-#define while_for_each_ftrace_op(op) \
+- while (likely(op = rcu_dereference_raw_check((op)->next)) && \
+- unlikely((op) != &ftrace_list_end))
+-
+-extern struct ftrace_ops __rcu *ftrace_ops_list;
+-extern struct ftrace_ops ftrace_list_end;
+ extern struct mutex ftrace_lock;
+ extern struct ftrace_ops global_ops;
+
+--
+2.20.1
+