--- /dev/null
+From 4df284d4c3951b78a2b5db65ed015ecf0878a5f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 18:33:54 -0700
+Subject: alpha: fix annotation of io{read,write}{16,32}be()
+
+From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+
+[ Upstream commit bd72866b8da499e60633ff28f8a4f6e09ca78efe ]
+
+These accessors must be used to read/write a big-endian bus. The value
+returned or written is native-endian.
+
+However, these accessors are defined using be{16,32}_to_cpu() or
+cpu_to_be{16,32}() to make the endian conversion but these expect a
+__be{16,32} when none is present. Keeping them would need a force cast
+that would solve nothing at all.
+
+So, do the conversion using swab{16,32}, like done in asm-generic for
+similar situations.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Richard Henderson <rth@twiddle.net>
+Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+Cc: Matt Turner <mattst88@gmail.com>
+Cc: Stephen Boyd <sboyd@kernel.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Link: http://lkml.kernel.org/r/20200622114232.80039-1-luc.vanoostenryck@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/alpha/include/asm/io.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
+index d123ff90f7a83..9995bed6e92e2 100644
+--- a/arch/alpha/include/asm/io.h
++++ b/arch/alpha/include/asm/io.h
+@@ -493,10 +493,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
+ }
+ #endif
+
+-#define ioread16be(p) be16_to_cpu(ioread16(p))
+-#define ioread32be(p) be32_to_cpu(ioread32(p))
+-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
+-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
++#define ioread16be(p) swab16(ioread16(p))
++#define ioread32be(p) swab32(ioread32(p))
++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
+
+ #define inb_p inb
+ #define inw_p inw
+--
+2.25.1
+
--- /dev/null
+From c6240afe99dd02fc8a86eb1191e78e1f8963f82f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Aug 2020 11:37:20 -0700
+Subject: cpufreq: intel_pstate: Fix cpuinfo_max_freq when
+ MSR_TURBO_RATIO_LIMIT is 0
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+[ Upstream commit 4daca379c703ff55edc065e8e5173dcfeecf0148 ]
+
+The MSR_TURBO_RATIO_LIMIT can be 0. This is not an error. User can update
+this MSR via BIOS settings on some systems or can use msr tools to update.
+Also some systems boot with value = 0.
+
+This results in display of cpufreq/cpuinfo_max_freq wrong. This value
+will be equal to cpufreq/base_frequency, even though turbo is enabled.
+
+But platform will still function normally in HWP mode as we get max
+1-core frequency from the MSR_HWP_CAPABILITIES. This MSR is already used
+to calculate cpu->pstate.turbo_freq, which is used for to set
+policy->cpuinfo.max_freq. But some other places cpu->pstate.turbo_pstate
+is used. For example to set policy->max.
+
+To fix this, also update cpu->pstate.turbo_pstate when updating
+cpu->pstate.turbo_freq.
+
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/intel_pstate.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 1aa0b05c8cbdf..5c41dc9aaa46d 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1378,6 +1378,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+
+ intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max);
+ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
++ cpu->pstate.turbo_pstate = phy_max;
+ } else {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ }
+--
+2.25.1
+
--- /dev/null
+From 575661bf8e826485e92827fffdaf9423ecf80899 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jul 2020 22:24:07 -0700
+Subject: Input: psmouse - add a newline when printing 'proto' by sysfs
+
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+
+[ Upstream commit 4aec14de3a15cf9789a0e19c847f164776f49473 ]
+
+When I cat parameter 'proto' by sysfs, it displays as follows. It's
+better to add a newline for easy reading.
+
+root@syzkaller:~# cat /sys/module/psmouse/parameters/proto
+autoroot@syzkaller:~#
+
+Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Link: https://lore.kernel.org/r/20200720073846.120724-1-wangxiongfeng2@huawei.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/mouse/psmouse-base.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index 8ac9e03c05b45..ca8f726dab2e7 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -2012,7 +2012,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
+ {
+ int type = *((unsigned int *)kp->arg);
+
+- return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
++ return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
+ }
+
+ static int __init psmouse_init(void)
+--
+2.25.1
+
--- /dev/null
+From b243c24d7b3b3cb7cb3acdb38599850c2f262aa3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jun 2020 17:06:35 +0800
+Subject: jffs2: fix UAF problem
+
+From: Zhe Li <lizhe67@huawei.com>
+
+[ Upstream commit 798b7347e4f29553db4b996393caf12f5b233daf ]
+
+The log of UAF problem is listed below.
+BUG: KASAN: use-after-free in jffs2_rmdir+0xa4/0x1cc [jffs2] at addr c1f165fc
+Read of size 4 by task rm/8283
+=============================================================================
+BUG kmalloc-32 (Tainted: P B O ): kasan: bad access detected
+-----------------------------------------------------------------------------
+
+INFO: Allocated in 0xbbbbbbbb age=3054364 cpu=0 pid=0
+ 0xb0bba6ef
+ jffs2_write_dirent+0x11c/0x9c8 [jffs2]
+ __slab_alloc.isra.21.constprop.25+0x2c/0x44
+ __kmalloc+0x1dc/0x370
+ jffs2_write_dirent+0x11c/0x9c8 [jffs2]
+ jffs2_do_unlink+0x328/0x5fc [jffs2]
+ jffs2_rmdir+0x110/0x1cc [jffs2]
+ vfs_rmdir+0x180/0x268
+ do_rmdir+0x2cc/0x300
+ ret_from_syscall+0x0/0x3c
+INFO: Freed in 0x205b age=3054364 cpu=0 pid=0
+ 0x2e9173
+ jffs2_add_fd_to_list+0x138/0x1dc [jffs2]
+ jffs2_add_fd_to_list+0x138/0x1dc [jffs2]
+ jffs2_garbage_collect_dirent.isra.3+0x21c/0x288 [jffs2]
+ jffs2_garbage_collect_live+0x16bc/0x1800 [jffs2]
+ jffs2_garbage_collect_pass+0x678/0x11d4 [jffs2]
+ jffs2_garbage_collect_thread+0x1e8/0x3b0 [jffs2]
+ kthread+0x1a8/0x1b0
+ ret_from_kernel_thread+0x5c/0x64
+Call Trace:
+[c17ddd20] [c02452d4] kasan_report.part.0+0x298/0x72c (unreliable)
+[c17ddda0] [d2509680] jffs2_rmdir+0xa4/0x1cc [jffs2]
+[c17dddd0] [c026da04] vfs_rmdir+0x180/0x268
+[c17dde00] [c026f4e4] do_rmdir+0x2cc/0x300
+[c17ddf40] [c001a658] ret_from_syscall+0x0/0x3c
+
+The root cause is that we don't get "jffs2_inode_info.sem" before
+we scan list "jffs2_inode_info.dents" in function jffs2_rmdir.
+This patch add codes to get "jffs2_inode_info.sem" before we scan
+"jffs2_inode_info.dents" to slove the UAF problem.
+
+Signed-off-by: Zhe Li <lizhe67@huawei.com>
+Reviewed-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jffs2/dir.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index e5a6deb38e1e1..f4a5ec92f5dc7 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
+ int ret;
+ uint32_t now = get_seconds();
+
++ mutex_lock(&f->sem);
+ for (fd = f->dents ; fd; fd = fd->next) {
+- if (fd->ino)
++ if (fd->ino) {
++ mutex_unlock(&f->sem);
+ return -ENOTEMPTY;
++ }
+ }
++ mutex_unlock(&f->sem);
+
+ ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
+ dentry->d_name.len, f, now);
+--
+2.25.1
+
--- /dev/null
+From dbaba08b25c9fc277d85ace353008edf6a6208e6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 11:27:25 +0100
+Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not
+ set
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit b5331379bc62611d1026173a09c73573384201d9 ]
+
+When an MMU notifier call results in unmapping a range that spans multiple
+PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary,
+since this avoids running into RCU stalls during VM teardown. Unfortunately,
+if the VM is destroyed as a result of OOM, then blocking is not permitted
+and the call to the scheduler triggers the following BUG():
+
+ | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394
+ | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper
+ | INFO: lockdep is turned off.
+ | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1
+ | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015
+ | Call trace:
+ | dump_backtrace+0x0/0x284
+ | show_stack+0x1c/0x28
+ | dump_stack+0xf0/0x1a4
+ | ___might_sleep+0x2bc/0x2cc
+ | unmap_stage2_range+0x160/0x1ac
+ | kvm_unmap_hva_range+0x1a0/0x1c8
+ | kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8
+ | __mmu_notifier_invalidate_range_start+0x218/0x31c
+ | mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0
+ | __oom_reap_task_mm+0x128/0x268
+ | oom_reap_task+0xac/0x298
+ | oom_reaper+0x178/0x17c
+ | kthread+0x1e4/0x1fc
+ | ret_from_fork+0x10/0x30
+
+Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we
+only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier
+flags.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd")
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Message-Id: <20200811102725.7121-3-will@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/arm/mmu.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 3814cdad643a5..f7de27efdd1f2 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -288,7 +288,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
+ * destroying the VM), otherwise another faulting VCPU may come in and mess
+ * with things behind our backs.
+ */
+-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
++static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
++ bool may_block)
+ {
+ pgd_t *pgd;
+ phys_addr_t addr = start, end = start + size;
+@@ -311,11 +312,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+ * If the range is too large, release the kvm->mmu_lock
+ * to prevent starvation and lockup detector warnings.
+ */
+- if (next != end)
++ if (may_block && next != end)
+ cond_resched_lock(&kvm->mmu_lock);
+ } while (pgd++, addr = next, addr != end);
+ }
+
++static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
++{
++ __unmap_stage2_range(mmu, start, size, true);
++}
++
+ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+ phys_addr_t addr, phys_addr_t end)
+ {
+@@ -1626,7 +1632,10 @@ static int handle_hva_to_gpa(struct kvm *kvm,
+
+ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+ {
+- unmap_stage2_range(kvm, gpa, size);
++ unsigned flags = *(unsigned *)data;
++ bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
++
++ __unmap_stage2_range(kvm, gpa, size, may_block);
+ return 0;
+ }
+
+@@ -1649,7 +1658,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
+ return 0;
+
+ trace_kvm_unmap_hva_range(start, end);
+- handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
++ handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
+ return 0;
+ }
+
+--
+2.25.1
+
--- /dev/null
+From af5f42ab2062b1a12b519f7063f9c005e8a40202 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Jun 2020 17:17:52 +1000
+Subject: m68knommu: fix overwriting of bits in ColdFire V3 cache control
+
+From: Greg Ungerer <gerg@linux-m68k.org>
+
+[ Upstream commit bdee0e793cea10c516ff48bf3ebb4ef1820a116b ]
+
+The Cache Control Register (CACR) of the ColdFire V3 has bits that
+control high level caching functions, and also enable/disable the use
+of the alternate stack pointer register (the EUSP bit) to provide
+separate supervisor and user stack pointer registers. The code as
+it is today will blindly clear the EUSP bit on cache actions like
+invalidation. So it is broken for this case - and that will result
+in failed booting (interrupt entry and exit processing will be
+completely hosed).
+
+This only affects ColdFire V3 parts that support the alternate stack
+register (like the 5329 for example) - generally speaking new parts do,
+older parts don't. It has no impact on ColdFire V3 parts with the single
+stack pointer, like the 5307 for example.
+
+Fix the cache bit defines used, so they maintain the EUSP bit when
+carrying out cache actions through the CACR register.
+
+Signed-off-by: Greg Ungerer <gerg@linux-m68k.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/m68k/include/asm/m53xxacr.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
+index 9138a624c5c81..692f90e7fecc1 100644
+--- a/arch/m68k/include/asm/m53xxacr.h
++++ b/arch/m68k/include/asm/m53xxacr.h
+@@ -89,9 +89,9 @@
+ * coherency though in all cases. And for copyback caches we will need
+ * to push cached data as well.
+ */
+-#define CACHE_INIT CACR_CINVA
+-#define CACHE_INVALIDATE CACR_CINVA
+-#define CACHE_INVALIDATED CACR_CINVA
++#define CACHE_INIT (CACHE_MODE + CACR_CINVA - CACR_EC)
++#define CACHE_INVALIDATE (CACHE_MODE + CACR_CINVA)
++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
+
+ #define ACR0_MODE ((CONFIG_RAMBASE & 0xff000000) + \
+ (0x000f0000) + \
+--
+2.25.1
+
--- /dev/null
+From c473dab60d9dde95a13d5a9d0116101ca15e9663 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jun 2020 18:17:28 +0200
+Subject: media: budget-core: Improve exception handling in budget_register()
+
+From: Chuhong Yuan <hslester96@gmail.com>
+
+[ Upstream commit fc0456458df8b3421dba2a5508cd817fbc20ea71 ]
+
+budget_register() has no error handling after its failure.
+Add the missed undo functions for error handling to fix it.
+
+Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/pci/ttpci/budget-core.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
+index 97499b2af7144..20524376b83be 100644
+--- a/drivers/media/pci/ttpci/budget-core.c
++++ b/drivers/media/pci/ttpci/budget-core.c
+@@ -383,20 +383,25 @@ static int budget_register(struct budget *budget)
+ ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
+
+ if (ret < 0)
+- return ret;
++ goto err_release_dmx;
+
+ budget->mem_frontend.source = DMX_MEMORY_FE;
+ ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
+ if (ret < 0)
+- return ret;
++ goto err_release_dmx;
+
+ ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
+ if (ret < 0)
+- return ret;
++ goto err_release_dmx;
+
+ dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
+
+ return 0;
++
++err_release_dmx:
++ dvb_dmxdev_release(&budget->dmxdev);
++ dvb_dmx_release(&budget->demux);
++ return ret;
+ }
+
+ static void budget_unregister(struct budget *budget)
+--
+2.25.1
+
--- /dev/null
+From 6f38659c37b3bd5c623305d316ed7eef9508f746 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jul 2020 11:02:23 +0200
+Subject: media: vpss: clean up resources in init
+
+From: Evgeny Novikov <novikov@ispras.ru>
+
+[ Upstream commit 9c487b0b0ea7ff22127fe99a7f67657d8730ff94 ]
+
+If platform_driver_register() fails within vpss_init() resources are not
+cleaned up. The patch fixes this issue by introducing the corresponding
+error handling.
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Evgeny Novikov <novikov@ispras.ru>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/davinci/vpss.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
+index 2ee4cd9e6d80f..d984f45c03149 100644
+--- a/drivers/media/platform/davinci/vpss.c
++++ b/drivers/media/platform/davinci/vpss.c
+@@ -514,19 +514,31 @@ static void vpss_exit(void)
+
+ static int __init vpss_init(void)
+ {
++ int ret;
++
+ if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+ return -EBUSY;
+
+ oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+ if (unlikely(!oper_cfg.vpss_regs_base2)) {
+- release_mem_region(VPSS_CLK_CTRL, 4);
+- return -ENOMEM;
++ ret = -ENOMEM;
++ goto err_ioremap;
+ }
+
+ writel(VPSS_CLK_CTRL_VENCCLKEN |
+- VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++ VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++
++ ret = platform_driver_register(&vpss_driver);
++ if (ret)
++ goto err_pd_register;
++
++ return 0;
+
+- return platform_driver_register(&vpss_driver);
++err_pd_register:
++ iounmap(oper_cfg.vpss_regs_base2);
++err_ioremap:
++ release_mem_region(VPSS_CLK_CTRL, 4);
++ return ret;
+ }
+ subsys_initcall(vpss_init);
+ module_exit(vpss_exit);
+--
+2.25.1
+
--- /dev/null
+From 159304e9cf8813204f4caf6dac6cab3bf3c94819 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 20 Jun 2020 20:04:43 +0800
+Subject: rtc: goldfish: Enable interrupt in set_alarm() when necessary
+
+From: Huacai Chen <chenhc@lemote.com>
+
+[ Upstream commit 22f8d5a1bf230cf8567a4121fc3789babb46336d ]
+
+When use goldfish rtc, the "hwclock" command fails with "select() to
+/dev/rtc to wait for clock tick timed out". This is because "hwclock"
+need the set_alarm() hook to enable interrupt when alrm->enabled is
+true. This operation is missing in goldfish rtc (but other rtc drivers,
+such as cmos rtc, enable interrupt here), so add it.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Link: https://lore.kernel.org/r/1592654683-31314-1-git-send-email-chenhc@lemote.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/rtc/rtc-goldfish.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
+index a1c44d0c85578..30cbe22c57a8e 100644
+--- a/drivers/rtc/rtc-goldfish.c
++++ b/drivers/rtc/rtc-goldfish.c
+@@ -87,6 +87,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
+ rtc_alarm64 = rtc_alarm * NSEC_PER_SEC;
+ writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
+ writel(rtc_alarm64, base + TIMER_ALARM_LOW);
++ writel(1, base + TIMER_IRQ_ENABLED);
+ } else {
+ /*
+ * if this function was called with enabled=0
+--
+2.25.1
+
--- /dev/null
+From 3326160f8cfeca6345de9b7e4799a5c77b5e0e42 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Jul 2020 01:18:23 -0700
+Subject: scsi: libfc: Free skb in fc_disc_gpn_id_resp() for valid cases
+
+From: Javed Hasan <jhasan@marvell.com>
+
+[ Upstream commit ec007ef40abb6a164d148b0dc19789a7a2de2cc8 ]
+
+In fc_disc_gpn_id_resp(), skb is supposed to get freed in all cases except
+for PTR_ERR. However, in some cases it didn't.
+
+This fix is to call fc_frame_free(fp) before function returns.
+
+Link: https://lore.kernel.org/r/20200729081824.30996-2-jhasan@marvell.com
+Reviewed-by: Girish Basrur <gbasrur@marvell.com>
+Reviewed-by: Santosh Vernekar <svernekar@marvell.com>
+Reviewed-by: Saurav Kashyap <skashyap@marvell.com>
+Reviewed-by: Shyam Sundar <ssundar@marvell.com>
+Signed-off-by: Javed Hasan <jhasan@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/libfc/fc_disc.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
+index 28b50ab2fbb01..62f83cc151b22 100644
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -605,8 +605,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+
+ if (PTR_ERR(fp) == -FC_EX_CLOSED)
+ goto out;
+- if (IS_ERR(fp))
+- goto redisc;
++ if (IS_ERR(fp)) {
++ mutex_lock(&disc->disc_mutex);
++ fc_disc_restart(disc);
++ mutex_unlock(&disc->disc_mutex);
++ goto out;
++ }
+
+ cp = fc_frame_payload_get(fp, sizeof(*cp));
+ if (!cp)
+@@ -633,7 +637,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+ new_rdata->disc_id = disc->disc_id;
+ fc_rport_login(new_rdata);
+ }
+- goto out;
++ goto free_fp;
+ }
+ rdata->disc_id = disc->disc_id;
+ mutex_unlock(&rdata->rp_mutex);
+@@ -650,6 +654,8 @@ redisc:
+ fc_disc_restart(disc);
+ mutex_unlock(&disc->disc_mutex);
+ }
++free_fp:
++ fc_frame_free(fp);
+ out:
+ kref_put(&rdata->kref, fc_rport_destroy);
+ if (!IS_ERR(fp))
+--
+2.25.1
+
--- /dev/null
+From 3a4eff78f899fba9cb97e89124aa20878cb89bd0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 09:26:24 +0800
+Subject: scsi: ufs: Add DELAY_BEFORE_LPM quirk for Micron devices
+
+From: Stanley Chu <stanley.chu@mediatek.com>
+
+[ Upstream commit c0a18ee0ce78d7957ec1a53be35b1b3beba80668 ]
+
+It is confirmed that Micron device needs DELAY_BEFORE_LPM quirk to have a
+delay before VCC is powered off. Sdd Micron vendor ID and this quirk for
+Micron devices.
+
+Link: https://lore.kernel.org/r/20200612012625.6615-2-stanley.chu@mediatek.com
+Reviewed-by: Bean Huo <beanhuo@micron.com>
+Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
+Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufs_quirks.h | 1 +
+ drivers/scsi/ufs/ufshcd.c | 2 ++
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
+index 71f73d1d1ad1f..6c944fbefd40a 100644
+--- a/drivers/scsi/ufs/ufs_quirks.h
++++ b/drivers/scsi/ufs/ufs_quirks.h
+@@ -21,6 +21,7 @@
+ #define UFS_ANY_VENDOR 0xFFFF
+ #define UFS_ANY_MODEL "ANY_MODEL"
+
++#define UFS_VENDOR_MICRON 0x12C
+ #define UFS_VENDOR_TOSHIBA 0x198
+ #define UFS_VENDOR_SAMSUNG 0x1CE
+ #define UFS_VENDOR_SKHYNIX 0x1AD
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 1e2a97a10033b..11e917b44a0f1 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -189,6 +189,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+
+ static struct ufs_dev_fix ufs_fixups[] = {
+ /* UFS cards deviations table */
++ UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
++ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+ UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+ UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL, UFS_DEVICE_NO_VCCQ),
+--
+2.25.1
+
ext4-fix-checking-of-directory-entry-validity-for-inline-directories.patch
jbd2-add-the-missing-unlock_buffer-in-the-error-path-of-jbd2_write_superblock.patch
mm-memory.c-skip-spurious-tlb-flush-for-retried-page-fault.patch
+kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch
+spi-prevent-adding-devices-below-an-unregistering-co.patch
+scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch
+media-budget-core-improve-exception-handling-in-budg.patch
+rtc-goldfish-enable-interrupt-in-set_alarm-when-nece.patch
+media-vpss-clean-up-resources-in-init.patch
+input-psmouse-add-a-newline-when-printing-proto-by-s.patch
+m68knommu-fix-overwriting-of-bits-in-coldfire-v3-cac.patch
+xfs-fix-inode-quota-reservation-checks.patch
+jffs2-fix-uaf-problem.patch
+cpufreq-intel_pstate-fix-cpuinfo_max_freq-when-msr_t.patch
+scsi-libfc-free-skb-in-fc_disc_gpn_id_resp-for-valid.patch
+virtio_ring-avoid-loop-when-vq-is-broken-in-virtqueu.patch
+xfs-fix-ubsan-null-ptr-deref-in-xfs_sysfs_init.patch
+alpha-fix-annotation-of-io-read-write-16-32-be.patch
--- /dev/null
+From ac8b00ce2acb9e417cd34bde3cbc8c1a02433dab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Aug 2020 13:09:01 +0200
+Subject: spi: Prevent adding devices below an unregistering controller
+
+From: Lukas Wunner <lukas@wunner.de>
+
+[ Upstream commit ddf75be47ca748f8b12d28ac64d624354fddf189 ]
+
+CONFIG_OF_DYNAMIC and CONFIG_ACPI allow adding SPI devices at runtime
+using a DeviceTree overlay or DSDT patch. CONFIG_SPI_SLAVE allows the
+same via sysfs.
+
+But there are no precautions to prevent adding a device below a
+controller that's being removed. Such a device is unusable and may not
+even be able to unbind cleanly as it becomes inaccessible once the
+controller has been torn down. E.g. it is then impossible to quiesce
+the device's interrupt.
+
+of_spi_notify() and acpi_spi_notify() do hold a ref on the controller,
+but otherwise run lockless against spi_unregister_controller().
+
+Fix by holding the spi_add_lock in spi_unregister_controller() and
+bailing out of spi_add_device() if the controller has been unregistered
+concurrently.
+
+Fixes: ce79d54ae447 ("spi/of: Add OF notifier handler")
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Cc: stable@vger.kernel.org # v3.19+
+Cc: Geert Uytterhoeven <geert+renesas@glider.be>
+Cc: Octavian Purdila <octavian.purdila@intel.com>
+Cc: Pantelis Antoniou <pantelis.antoniou@konsulko.com>
+Link: https://lore.kernel.org/r/a8c3205088a969dc8410eec1eba9aface60f36af.1596451035.git.lukas@wunner.de
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/spi/Kconfig | 3 +++
+ drivers/spi/spi.c | 21 ++++++++++++++++++++-
+ 2 files changed, 23 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
+index a75f2a2cf7805..4b6a1629969f3 100644
+--- a/drivers/spi/Kconfig
++++ b/drivers/spi/Kconfig
+@@ -827,4 +827,7 @@ config SPI_SLAVE_SYSTEM_CONTROL
+
+ endif # SPI_SLAVE
+
++config SPI_DYNAMIC
++ def_bool ACPI || OF_DYNAMIC || SPI_SLAVE
++
+ endif # SPI
+diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
+index 49eee894f51d4..ab6a4f85bcde7 100644
+--- a/drivers/spi/spi.c
++++ b/drivers/spi/spi.c
+@@ -428,6 +428,12 @@ static LIST_HEAD(spi_controller_list);
+ */
+ static DEFINE_MUTEX(board_lock);
+
++/*
++ * Prevents addition of devices with same chip select and
++ * addition of devices below an unregistering controller.
++ */
++static DEFINE_MUTEX(spi_add_lock);
++
+ /**
+ * spi_alloc_device - Allocate a new SPI device
+ * @ctlr: Controller to which device is connected
+@@ -506,7 +512,6 @@ static int spi_dev_check(struct device *dev, void *data)
+ */
+ int spi_add_device(struct spi_device *spi)
+ {
+- static DEFINE_MUTEX(spi_add_lock);
+ struct spi_controller *ctlr = spi->controller;
+ struct device *dev = ctlr->dev.parent;
+ int status;
+@@ -534,6 +539,13 @@ int spi_add_device(struct spi_device *spi)
+ goto done;
+ }
+
++ /* Controller may unregister concurrently */
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC) &&
++ !device_is_registered(&ctlr->dev)) {
++ status = -ENODEV;
++ goto done;
++ }
++
+ if (ctlr->cs_gpios)
+ spi->cs_gpio = ctlr->cs_gpios[spi->chip_select];
+
+@@ -2265,6 +2277,10 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ struct spi_controller *found;
+ int id = ctlr->bus_num;
+
++ /* Prevent addition of new devices, unregister existing ones */
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++ mutex_lock(&spi_add_lock);
++
+ device_for_each_child(&ctlr->dev, NULL, __unregister);
+
+ /* First make sure that this controller was ever added */
+@@ -2285,6 +2301,9 @@ void spi_unregister_controller(struct spi_controller *ctlr)
+ if (found == ctlr)
+ idr_remove(&spi_master_idr, id);
+ mutex_unlock(&board_lock);
++
++ if (IS_ENABLED(CONFIG_SPI_DYNAMIC))
++ mutex_unlock(&spi_add_lock);
+ }
+ EXPORT_SYMBOL_GPL(spi_unregister_controller);
+
+--
+2.25.1
+
--- /dev/null
+From 921c5740080f0a0482d59c48e4b8180c7e51d408 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Aug 2020 15:44:09 +0800
+Subject: virtio_ring: Avoid loop when vq is broken in virtqueue_poll
+
+From: Mao Wenan <wenan.mao@linux.alibaba.com>
+
+[ Upstream commit 481a0d7422db26fb63e2d64f0652667a5c6d0f3e ]
+
+The loop may exist if vq->broken is true,
+virtqueue_get_buf_ctx_packed or virtqueue_get_buf_ctx_split
+will return NULL, so virtnet_poll will reschedule napi to
+receive packet, it will lead cpu usage(si) to 100%.
+
+call trace as below:
+virtnet_poll
+ virtnet_receive
+ virtqueue_get_buf_ctx
+ virtqueue_get_buf_ctx_packed
+ virtqueue_get_buf_ctx_split
+ virtqueue_napi_complete
+ virtqueue_poll //return true
+ virtqueue_napi_schedule //it will reschedule napi
+
+to fix this, return false if vq is broken in virtqueue_poll.
+
+Signed-off-by: Mao Wenan <wenan.mao@linux.alibaba.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://lore.kernel.org/r/1596354249-96204-1-git-send-email-wenan.mao@linux.alibaba.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_ring.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index b82bb0b081615..51278f8bd3ab3 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -829,6 +829,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+ {
+ struct vring_virtqueue *vq = to_vvq(_vq);
+
++ if (unlikely(vq->broken))
++ return false;
++
+ virtio_mb(vq->weak_barriers);
+ return (u16)last_used_idx != virtio16_to_cpu(_vq->vdev, vq->vring.used->idx);
+ }
+--
+2.25.1
+
--- /dev/null
+From 2a23ea0369b5d62806bcc330ca8ed84efb4be4a4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jul 2020 10:36:09 -0700
+Subject: xfs: fix inode quota reservation checks
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit f959b5d037e71a4d69b5bf71faffa065d9269b4a ]
+
+xfs_trans_dqresv is the function that we use to make reservations
+against resource quotas. Each resource contains two counters: the
+q_core counter, which tracks resources allocated on disk; and the dquot
+reservation counter, which tracks how much of that resource has either
+been allocated or reserved by threads that are working on metadata
+updates.
+
+For disk blocks, we compare the proposed reservation counter against the
+hard and soft limits to decide if we're going to fail the operation.
+However, for inodes we inexplicably compare against the q_core counter,
+not the incore reservation count.
+
+Since the q_core counter is always lower than the reservation count and
+we unlock the dquot between reservation and transaction commit, this
+means that multiple threads can reserve the last inode count before we
+hit the hard limit, and when they commit, we'll be well over the hard
+limit.
+
+Fix this by checking against the incore inode reservation counter, since
+we would appear to maintain that correctly (and that's what we report in
+GETQUOTA).
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Allison Collins <allison.henderson@oracle.com>
+Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_trans_dquot.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
+index c3d547211d160..9c42e50a5cb7e 100644
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -669,7 +669,7 @@ xfs_trans_dqresv(
+ }
+ }
+ if (ninos > 0) {
+- total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
++ total_count = dqp->q_res_icount + ninos;
+ timer = be32_to_cpu(dqp->q_core.d_itimer);
+ warns = be16_to_cpu(dqp->q_core.d_iwarns);
+ warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
+--
+2.25.1
+
--- /dev/null
+From e6b3b87828e8b6f2e01c7337896a2d2eb64fa88c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 15:18:48 -0700
+Subject: xfs: Fix UBSAN null-ptr-deref in xfs_sysfs_init
+
+From: Eiichi Tsukata <devel@etsukata.com>
+
+[ Upstream commit 96cf2a2c75567ff56195fe3126d497a2e7e4379f ]
+
+If xfs_sysfs_init is called with parent_kobj == NULL, UBSAN
+shows the following warning:
+
+ UBSAN: null-ptr-deref in ./fs/xfs/xfs_sysfs.h:37:23
+ member access within null pointer of type 'struct xfs_kobj'
+ Call Trace:
+ dump_stack+0x10e/0x195
+ ubsan_type_mismatch_common+0x241/0x280
+ __ubsan_handle_type_mismatch_v1+0x32/0x40
+ init_xfs_fs+0x12b/0x28f
+ do_one_initcall+0xdd/0x1d0
+ do_initcall_level+0x151/0x1b6
+ do_initcalls+0x50/0x8f
+ do_basic_setup+0x29/0x2b
+ kernel_init_freeable+0x19f/0x20b
+ kernel_init+0x11/0x1e0
+ ret_from_fork+0x22/0x30
+
+Fix it by checking parent_kobj before the code accesses its member.
+
+Signed-off-by: Eiichi Tsukata <devel@etsukata.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+[darrick: minor whitespace edits]
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_sysfs.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
+index d04637181ef21..980c9429abec5 100644
+--- a/fs/xfs/xfs_sysfs.h
++++ b/fs/xfs/xfs_sysfs.h
+@@ -44,9 +44,11 @@ xfs_sysfs_init(
+ struct xfs_kobj *parent_kobj,
+ const char *name)
+ {
++ struct kobject *parent;
++
++ parent = parent_kobj ? &parent_kobj->kobject : NULL;
+ init_completion(&kobj->complete);
+- return kobject_init_and_add(&kobj->kobject, ktype,
+- &parent_kobj->kobject, "%s", name);
++ return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
+ }
+
+ static inline void
+--
+2.25.1
+