]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.7
authorSasha Levin <sashal@kernel.org>
Sun, 23 Aug 2020 21:41:32 +0000 (17:41 -0400)
committerSasha Levin <sashal@kernel.org>
Sun, 23 Aug 2020 21:41:32 +0000 (17:41 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
33 files changed:
queue-5.7/alpha-fix-annotation-of-io-read-write-16-32-be.patch [new file with mode: 0644]
queue-5.7/ceph-fix-use-after-free-for-fsc-mdsc.patch [new file with mode: 0644]
queue-5.7/cpufreq-intel_pstate-fix-cpuinfo_max_freq-when-msr_t.patch [new file with mode: 0644]
queue-5.7/drm-ttm-fix-offset-in-vmas-with-a-pg_offs-in-ttm_bo_.patch [new file with mode: 0644]
queue-5.7/f2fs-fix-to-check-page-dirty-status-before-writeback.patch [new file with mode: 0644]
queue-5.7/fs-signalfd.c-fix-inconsistent-return-codes-for-sign.patch [new file with mode: 0644]
queue-5.7/input-psmouse-add-a-newline-when-printing-proto-by-s.patch [new file with mode: 0644]
queue-5.7/io-wq-add-an-option-to-cancel-all-matched-reqs.patch [new file with mode: 0644]
queue-5.7/io-wq-reorder-cancellation-pending-running.patch [new file with mode: 0644]
queue-5.7/io_uring-cancel-all-task-s-requests-on-exit.patch [new file with mode: 0644]
queue-5.7/io_uring-find-and-cancel-head-link-async-work-on-fil.patch [new file with mode: 0644]
queue-5.7/jffs2-fix-uaf-problem.patch [new file with mode: 0644]
queue-5.7/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch [new file with mode: 0644]
queue-5.7/m68knommu-fix-overwriting-of-bits-in-coldfire-v3-cac.patch [new file with mode: 0644]
queue-5.7/media-budget-core-improve-exception-handling-in-budg.patch [new file with mode: 0644]
queue-5.7/media-camss-fix-memory-leaks-on-error-handling-paths.patch [new file with mode: 0644]
queue-5.7/media-vpss-clean-up-resources-in-init.patch [new file with mode: 0644]
queue-5.7/mips-fix-unable-to-reserve-memory-for-crash-kernel.patch [new file with mode: 0644]
queue-5.7/opp-enable-resources-again-if-they-were-disabled-ear.patch [new file with mode: 0644]
queue-5.7/opp-put-opp-table-in-dev_pm_opp_set_rate-for-empty-t.patch [new file with mode: 0644]
queue-5.7/opp-reorder-the-code-for-target_freq-case.patch [new file with mode: 0644]
queue-5.7/riscv-fixup-static_obj-fail.patch [new file with mode: 0644]
queue-5.7/rtc-goldfish-enable-interrupt-in-set_alarm-when-nece.patch [new file with mode: 0644]
queue-5.7/scsi-libfc-free-skb-in-fc_disc_gpn_id_resp-for-valid.patch [new file with mode: 0644]
queue-5.7/scsi-target-tcmu-fix-crash-in-tcmu_flush_dcache_rang.patch [new file with mode: 0644]
queue-5.7/scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch [new file with mode: 0644]
queue-5.7/series
queue-5.7/svcrdma-fix-another-receive-buffer-leak.patch [new file with mode: 0644]
queue-5.7/swiotlb-xen-use-vmalloc_to_page-on-vmalloc-virt-addr.patch [new file with mode: 0644]
queue-5.7/tools-testing-selftests-cgroup-cgroup_util.c-cg_read.patch [new file with mode: 0644]
queue-5.7/virtio_ring-avoid-loop-when-vq-is-broken-in-virtqueu.patch [new file with mode: 0644]
queue-5.7/xfs-fix-inode-quota-reservation-checks.patch [new file with mode: 0644]
queue-5.7/xfs-fix-ubsan-null-ptr-deref-in-xfs_sysfs_init.patch [new file with mode: 0644]

diff --git a/queue-5.7/alpha-fix-annotation-of-io-read-write-16-32-be.patch b/queue-5.7/alpha-fix-annotation-of-io-read-write-16-32-be.patch
new file mode 100644 (file)
index 0000000..ff3a606
--- /dev/null
@@ -0,0 +1,57 @@
+From e1b072ee7aada9de636338bfb8ef2515fc52a734 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 18:33:54 -0700
+Subject: alpha: fix annotation of io{read,write}{16,32}be()
+
+From: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+
+[ Upstream commit bd72866b8da499e60633ff28f8a4f6e09ca78efe ]
+
+These accessors must be used to read/write a big-endian bus.  The value
+returned or written is native-endian.
+
+However, these accessors are defined using be{16,32}_to_cpu() or
+cpu_to_be{16,32}() to make the endian conversion but these expect a
+__be{16,32} when none is present.  Keeping them would need a force cast
+that would solve nothing at all.
+
+So, do the conversion using swab{16,32}, like done in asm-generic for
+similar situations.
+
+Reported-by: kernel test robot <lkp@intel.com>
+Signed-off-by: Luc Van Oostenryck <luc.vanoostenryck@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Richard Henderson <rth@twiddle.net>
+Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+Cc: Matt Turner <mattst88@gmail.com>
+Cc: Stephen Boyd <sboyd@kernel.org>
+Cc: Arnd Bergmann <arnd@arndb.de>
+Link: http://lkml.kernel.org/r/20200622114232.80039-1-luc.vanoostenryck@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/alpha/include/asm/io.h | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/arch/alpha/include/asm/io.h b/arch/alpha/include/asm/io.h
+index e6225cf40de57..b09dd6bc98a12 100644
+--- a/arch/alpha/include/asm/io.h
++++ b/arch/alpha/include/asm/io.h
+@@ -490,10 +490,10 @@ extern inline void writeq(u64 b, volatile void __iomem *addr)
+ }
+ #endif
+-#define ioread16be(p) be16_to_cpu(ioread16(p))
+-#define ioread32be(p) be32_to_cpu(ioread32(p))
+-#define iowrite16be(v,p) iowrite16(cpu_to_be16(v), (p))
+-#define iowrite32be(v,p) iowrite32(cpu_to_be32(v), (p))
++#define ioread16be(p) swab16(ioread16(p))
++#define ioread32be(p) swab32(ioread32(p))
++#define iowrite16be(v,p) iowrite16(swab16(v), (p))
++#define iowrite32be(v,p) iowrite32(swab32(v), (p))
+ #define inb_p         inb
+ #define inw_p         inw
+-- 
+2.25.1
+
diff --git a/queue-5.7/ceph-fix-use-after-free-for-fsc-mdsc.patch b/queue-5.7/ceph-fix-use-after-free-for-fsc-mdsc.patch
new file mode 100644 (file)
index 0000000..c6528e3
--- /dev/null
@@ -0,0 +1,44 @@
+From d169282a0650b9aa8e4ac12baa78d1eebb3203f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jul 2020 15:32:25 +0800
+Subject: ceph: fix use-after-free for fsc->mdsc
+
+From: Xiubo Li <xiubli@redhat.com>
+
+[ Upstream commit a7caa88f8b72c136f9a401f498471b8a8e35370d ]
+
+If the ceph_mdsc_init() fails, it will free the mdsc already.
+
+Reported-by: syzbot+b57f46d8d6ea51960b8c@syzkaller.appspotmail.com
+Signed-off-by: Xiubo Li <xiubli@redhat.com>
+Reviewed-by: Jeff Layton <jlayton@kernel.org>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ceph/mds_client.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
+index 95272ae36b058..e32935b68d0a4 100644
+--- a/fs/ceph/mds_client.c
++++ b/fs/ceph/mds_client.c
+@@ -4337,7 +4337,6 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
+               return -ENOMEM;
+       }
+-      fsc->mdsc = mdsc;
+       init_completion(&mdsc->safe_umount_waiters);
+       init_waitqueue_head(&mdsc->session_close_wq);
+       INIT_LIST_HEAD(&mdsc->waiting_for_map);
+@@ -4390,6 +4389,8 @@ int ceph_mdsc_init(struct ceph_fs_client *fsc)
+       strscpy(mdsc->nodename, utsname()->nodename,
+               sizeof(mdsc->nodename));
++
++      fsc->mdsc = mdsc;
+       return 0;
+ }
+-- 
+2.25.1
+
diff --git a/queue-5.7/cpufreq-intel_pstate-fix-cpuinfo_max_freq-when-msr_t.patch b/queue-5.7/cpufreq-intel_pstate-fix-cpuinfo_max_freq-when-msr_t.patch
new file mode 100644 (file)
index 0000000..56e0d5f
--- /dev/null
@@ -0,0 +1,48 @@
+From fdc48889fa22620bac000521b011ea1ac3aa8f12 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 3 Aug 2020 11:37:20 -0700
+Subject: cpufreq: intel_pstate: Fix cpuinfo_max_freq when
+ MSR_TURBO_RATIO_LIMIT is 0
+
+From: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+
+[ Upstream commit 4daca379c703ff55edc065e8e5173dcfeecf0148 ]
+
+The MSR_TURBO_RATIO_LIMIT can be 0. This is not an error. User can update
+this MSR via BIOS settings on some systems or can use msr tools to update.
+Also some systems boot with value = 0.
+
+This results in display of cpufreq/cpuinfo_max_freq wrong. This value
+will be equal to cpufreq/base_frequency, even though turbo is enabled.
+
+But platform will still function normally in HWP mode as we get max
+1-core frequency from the MSR_HWP_CAPABILITIES. This MSR is already used
+to calculate cpu->pstate.turbo_freq, which is used for to set
+policy->cpuinfo.max_freq. But some other places cpu->pstate.turbo_pstate
+is used. For example to set policy->max.
+
+To fix this, also update cpu->pstate.turbo_pstate when updating
+cpu->pstate.turbo_freq.
+
+Signed-off-by: Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/cpufreq/intel_pstate.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
+index 4d3429b2058fc..8c4d86032c7a3 100644
+--- a/drivers/cpufreq/intel_pstate.c
++++ b/drivers/cpufreq/intel_pstate.c
+@@ -1572,6 +1572,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
+               intel_pstate_get_hwp_max(cpu->cpu, &phy_max, &current_max);
+               cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
++              cpu->pstate.turbo_pstate = phy_max;
+       } else {
+               cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+       }
+-- 
+2.25.1
+
diff --git a/queue-5.7/drm-ttm-fix-offset-in-vmas-with-a-pg_offs-in-ttm_bo_.patch b/queue-5.7/drm-ttm-fix-offset-in-vmas-with-a-pg_offs-in-ttm_bo_.patch
new file mode 100644 (file)
index 0000000..808fc2c
--- /dev/null
@@ -0,0 +1,44 @@
+From c9ed7e271a0676537a3a84d17d893cc172d3ffb3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 28 Jul 2020 14:27:04 -0400
+Subject: drm/ttm: fix offset in VMAs with a pg_offs in ttm_bo_vm_access
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Felix Kuehling <Felix.Kuehling@amd.com>
+
+[ Upstream commit c0001213d195d1bac83e0744c06ff06dd5a8ba53 ]
+
+VMAs with a pg_offs that's offset from the start of the vma_node need
+to adjust the offset within the BO accordingly. This matches the
+offset calculation in ttm_bo_vm_fault_reserved.
+
+Signed-off-by: Felix Kuehling <Felix.Kuehling@amd.com>
+Tested-by: Laurent Morichetti <laurent.morichetti@amd.com>
+Signed-off-by: Christian König <christian.koenig@amd.com>
+Link: https://patchwork.freedesktop.org/patch/381169/
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/ttm/ttm_bo_vm.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+index 72100b84c7a90..b08fdfa4291b2 100644
+--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
+@@ -505,8 +505,10 @@ static int ttm_bo_vm_access_kmap(struct ttm_buffer_object *bo,
+ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr,
+                    void *buf, int len, int write)
+ {
+-      unsigned long offset = (addr) - vma->vm_start;
+       struct ttm_buffer_object *bo = vma->vm_private_data;
++      unsigned long offset = (addr) - vma->vm_start +
++              ((vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node))
++               << PAGE_SHIFT);
+       int ret;
+       if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->num_pages)
+-- 
+2.25.1
+
diff --git a/queue-5.7/f2fs-fix-to-check-page-dirty-status-before-writeback.patch b/queue-5.7/f2fs-fix-to-check-page-dirty-status-before-writeback.patch
new file mode 100644 (file)
index 0000000..871b218
--- /dev/null
@@ -0,0 +1,40 @@
+From 6d702c8d2851055ec067f5fe560644d9e77f57e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jun 2020 17:14:19 +0800
+Subject: f2fs: fix to check page dirty status before writeback
+
+From: Chao Yu <yuchao0@huawei.com>
+
+[ Upstream commit eb1353cfa9c1e9415b03dc117f8399969fa02102 ]
+
+In f2fs_write_raw_pages(), we need to check page dirty status before
+writeback, because there could be a racer (e.g. reclaimer) helps
+writebacking the dirty page.
+
+Signed-off-by: Chao Yu <yuchao0@huawei.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/compress.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
+index 527d50edcb956..b397121dfa107 100644
+--- a/fs/f2fs/compress.c
++++ b/fs/f2fs/compress.c
+@@ -1207,6 +1207,12 @@ retry_write:
+                               congestion_wait(BLK_RW_ASYNC,
+                                               DEFAULT_IO_TIMEOUT);
+                               lock_page(cc->rpages[i]);
++
++                              if (!PageDirty(cc->rpages[i])) {
++                                      unlock_page(cc->rpages[i]);
++                                      continue;
++                              }
++
+                               clear_page_dirty_for_io(cc->rpages[i]);
+                               goto retry_write;
+                       }
+-- 
+2.25.1
+
diff --git a/queue-5.7/fs-signalfd.c-fix-inconsistent-return-codes-for-sign.patch b/queue-5.7/fs-signalfd.c-fix-inconsistent-return-codes-for-sign.patch
new file mode 100644 (file)
index 0000000..8af5677
--- /dev/null
@@ -0,0 +1,60 @@
+From d4aa69cf1b98a7420a837b858f8fa238c30aa38a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 18:36:04 -0700
+Subject: fs/signalfd.c: fix inconsistent return codes for signalfd4
+
+From: Helge Deller <deller@gmx.de>
+
+[ Upstream commit a089e3fd5a82aea20f3d9ec4caa5f4c65cc2cfcc ]
+
+The kernel signalfd4() syscall returns different error codes when called
+either in compat or native mode.  This behaviour makes correct emulation
+in qemu and testing programs like LTP more complicated.
+
+Fix the code to always return -in both modes- EFAULT for unaccessible user
+memory, and EINVAL when called with an invalid signal mask.
+
+Signed-off-by: Helge Deller <deller@gmx.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Alexander Viro <viro@zeniv.linux.org.uk>
+Cc: Laurent Vivier <laurent@vivier.eu>
+Link: http://lkml.kernel.org/r/20200530100707.GA10159@ls3530.fritz.box
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/signalfd.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/fs/signalfd.c b/fs/signalfd.c
+index 44b6845b071c3..5b78719be4455 100644
+--- a/fs/signalfd.c
++++ b/fs/signalfd.c
+@@ -314,9 +314,10 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
+ {
+       sigset_t mask;
+-      if (sizemask != sizeof(sigset_t) ||
+-          copy_from_user(&mask, user_mask, sizeof(mask)))
++      if (sizemask != sizeof(sigset_t))
+               return -EINVAL;
++      if (copy_from_user(&mask, user_mask, sizeof(mask)))
++              return -EFAULT;
+       return do_signalfd4(ufd, &mask, flags);
+ }
+@@ -325,9 +326,10 @@ SYSCALL_DEFINE3(signalfd, int, ufd, sigset_t __user *, user_mask,
+ {
+       sigset_t mask;
+-      if (sizemask != sizeof(sigset_t) ||
+-          copy_from_user(&mask, user_mask, sizeof(mask)))
++      if (sizemask != sizeof(sigset_t))
+               return -EINVAL;
++      if (copy_from_user(&mask, user_mask, sizeof(mask)))
++              return -EFAULT;
+       return do_signalfd4(ufd, &mask, 0);
+ }
+-- 
+2.25.1
+
diff --git a/queue-5.7/input-psmouse-add-a-newline-when-printing-proto-by-s.patch b/queue-5.7/input-psmouse-add-a-newline-when-printing-proto-by-s.patch
new file mode 100644 (file)
index 0000000..c1f0a05
--- /dev/null
@@ -0,0 +1,39 @@
+From 5276e152772216fc32028499dad2c7c02a8fc9f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 21 Jul 2020 22:24:07 -0700
+Subject: Input: psmouse - add a newline when printing 'proto' by sysfs
+
+From: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+
+[ Upstream commit 4aec14de3a15cf9789a0e19c847f164776f49473 ]
+
+When I cat parameter 'proto' by sysfs, it displays as follows. It's
+better to add a newline for easy reading.
+
+root@syzkaller:~# cat /sys/module/psmouse/parameters/proto
+autoroot@syzkaller:~#
+
+Signed-off-by: Xiongfeng Wang <wangxiongfeng2@huawei.com>
+Link: https://lore.kernel.org/r/20200720073846.120724-1-wangxiongfeng2@huawei.com
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/input/mouse/psmouse-base.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
+index 527ae0b9a191e..0b4a3039f312f 100644
+--- a/drivers/input/mouse/psmouse-base.c
++++ b/drivers/input/mouse/psmouse-base.c
+@@ -2042,7 +2042,7 @@ static int psmouse_get_maxproto(char *buffer, const struct kernel_param *kp)
+ {
+       int type = *((unsigned int *)kp->arg);
+-      return sprintf(buffer, "%s", psmouse_protocol_by_type(type)->name);
++      return sprintf(buffer, "%s\n", psmouse_protocol_by_type(type)->name);
+ }
+ static int __init psmouse_init(void)
+-- 
+2.25.1
+
diff --git a/queue-5.7/io-wq-add-an-option-to-cancel-all-matched-reqs.patch b/queue-5.7/io-wq-add-an-option-to-cancel-all-matched-reqs.patch
new file mode 100644 (file)
index 0000000..d9ade1c
--- /dev/null
@@ -0,0 +1,193 @@
+From b5f8cdbfa9e5c74ddfd54fb167f634ae88a3f18d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 10:24:03 +0300
+Subject: io-wq: add an option to cancel all matched reqs
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 4f26bda1522c35d2701fc219368c7101c17005c1 ]
+
+This adds support for cancelling all io-wq works matching a predicate.
+It isn't used yet, so no change in observable behaviour.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io-wq.c    | 60 +++++++++++++++++++++++++++++----------------------
+ fs/io-wq.h    |  2 +-
+ fs/io_uring.c |  2 +-
+ 3 files changed, 36 insertions(+), 28 deletions(-)
+
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 3283f8c5b5a18..6d2e8ccc229e3 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -907,13 +907,15 @@ void io_wq_cancel_all(struct io_wq *wq)
+ struct io_cb_cancel_data {
+       work_cancel_fn *fn;
+       void *data;
++      int nr_running;
++      int nr_pending;
++      bool cancel_all;
+ };
+ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+ {
+       struct io_cb_cancel_data *match = data;
+       unsigned long flags;
+-      bool ret = false;
+       /*
+        * Hold the lock to avoid ->cur_work going out of scope, caller
+@@ -924,55 +926,55 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+           !(worker->cur_work->flags & IO_WQ_WORK_NO_CANCEL) &&
+           match->fn(worker->cur_work, match->data)) {
+               send_sig(SIGINT, worker->task, 1);
+-              ret = true;
++              match->nr_running++;
+       }
+       spin_unlock_irqrestore(&worker->lock, flags);
+-      return ret;
++      return match->nr_running && !match->cancel_all;
+ }
+-static bool io_wqe_cancel_pending_work(struct io_wqe *wqe,
++static void io_wqe_cancel_pending_work(struct io_wqe *wqe,
+                                      struct io_cb_cancel_data *match)
+ {
+       struct io_wq_work_node *node, *prev;
+       struct io_wq_work *work;
+       unsigned long flags;
+-      bool found = false;
++retry:
+       spin_lock_irqsave(&wqe->lock, flags);
+       wq_list_for_each(node, prev, &wqe->work_list) {
+               work = container_of(node, struct io_wq_work, list);
++              if (!match->fn(work, match->data))
++                      continue;
+-              if (match->fn(work, match->data)) {
+-                      wq_list_del(&wqe->work_list, node, prev);
+-                      found = true;
+-                      break;
+-              }
++              wq_list_del(&wqe->work_list, node, prev);
++              spin_unlock_irqrestore(&wqe->lock, flags);
++              io_run_cancel(work, wqe);
++              match->nr_pending++;
++              if (!match->cancel_all)
++                      return;
++
++              /* not safe to continue after unlock */
++              goto retry;
+       }
+       spin_unlock_irqrestore(&wqe->lock, flags);
+-
+-      if (found)
+-              io_run_cancel(work, wqe);
+-      return found;
+ }
+-static bool io_wqe_cancel_running_work(struct io_wqe *wqe,
++static void io_wqe_cancel_running_work(struct io_wqe *wqe,
+                                      struct io_cb_cancel_data *match)
+ {
+-      bool found;
+-
+       rcu_read_lock();
+-      found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
++      io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
+       rcu_read_unlock();
+-      return found;
+ }
+ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+-                                void *data)
++                                void *data, bool cancel_all)
+ {
+       struct io_cb_cancel_data match = {
+-              .fn     = cancel,
+-              .data   = data,
++              .fn             = cancel,
++              .data           = data,
++              .cancel_all     = cancel_all,
+       };
+       int node;
+@@ -984,7 +986,8 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+-              if (io_wqe_cancel_pending_work(wqe, &match))
++              io_wqe_cancel_pending_work(wqe, &match);
++              if (match.nr_pending && !match.cancel_all)
+                       return IO_WQ_CANCEL_OK;
+       }
+@@ -997,10 +1000,15 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+-              if (io_wqe_cancel_running_work(wqe, &match))
++              io_wqe_cancel_running_work(wqe, &match);
++              if (match.nr_running && !match.cancel_all)
+                       return IO_WQ_CANCEL_RUNNING;
+       }
++      if (match.nr_running)
++              return IO_WQ_CANCEL_RUNNING;
++      if (match.nr_pending)
++              return IO_WQ_CANCEL_OK;
+       return IO_WQ_CANCEL_NOTFOUND;
+ }
+@@ -1011,7 +1019,7 @@ static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
+ enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
+ {
+-      return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork);
++      return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
+ }
+ static bool io_wq_pid_match(struct io_wq_work *work, void *data)
+@@ -1025,7 +1033,7 @@ enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
+ {
+       void *data = (void *) (unsigned long) pid;
+-      return io_wq_cancel_cb(wq, io_wq_pid_match, data);
++      return io_wq_cancel_cb(wq, io_wq_pid_match, data, false);
+ }
+ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index 5ba12de7572f0..8902903831f25 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -134,7 +134,7 @@ enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
+ typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
+ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+-                                      void *data);
++                                      void *data, bool cancel_all);
+ struct task_struct *io_wq_get_task(struct io_wq *wq);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index b33d4a97a8774..cf32705546773 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -5023,7 +5023,7 @@ static int io_async_cancel_one(struct io_ring_ctx *ctx, void *sqe_addr)
+       enum io_wq_cancel cancel_ret;
+       int ret = 0;
+-      cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr);
++      cancel_ret = io_wq_cancel_cb(ctx->io_wq, io_cancel_cb, sqe_addr, false);
+       switch (cancel_ret) {
+       case IO_WQ_CANCEL_OK:
+               ret = 0;
+-- 
+2.25.1
+
diff --git a/queue-5.7/io-wq-reorder-cancellation-pending-running.patch b/queue-5.7/io-wq-reorder-cancellation-pending-running.patch
new file mode 100644 (file)
index 0000000..eacdda3
--- /dev/null
@@ -0,0 +1,120 @@
+From 3570c621d726e27758fcafbb2a81f56e87d24951 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 10:24:02 +0300
+Subject: io-wq: reorder cancellation pending -> running
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit f4c2665e33f48904f2766d644df33fb3fd54b5ec ]
+
+Go all over all pending lists and cancel works there, and only then
+try to match running requests. No functional changes here, just a
+preparation for bulk cancellation.
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io-wq.c | 54 ++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 32 insertions(+), 22 deletions(-)
+
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 4023c98468608..3283f8c5b5a18 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -931,19 +931,14 @@ static bool io_wq_worker_cancel(struct io_worker *worker, void *data)
+       return ret;
+ }
+-static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
+-                                          struct io_cb_cancel_data *match)
++static bool io_wqe_cancel_pending_work(struct io_wqe *wqe,
++                                     struct io_cb_cancel_data *match)
+ {
+       struct io_wq_work_node *node, *prev;
+       struct io_wq_work *work;
+       unsigned long flags;
+       bool found = false;
+-      /*
+-       * First check pending list, if we're lucky we can just remove it
+-       * from there. CANCEL_OK means that the work is returned as-new,
+-       * no completion will be posted for it.
+-       */
+       spin_lock_irqsave(&wqe->lock, flags);
+       wq_list_for_each(node, prev, &wqe->work_list) {
+               work = container_of(node, struct io_wq_work, list);
+@@ -956,21 +951,20 @@ static enum io_wq_cancel io_wqe_cancel_work(struct io_wqe *wqe,
+       }
+       spin_unlock_irqrestore(&wqe->lock, flags);
+-      if (found) {
++      if (found)
+               io_run_cancel(work, wqe);
+-              return IO_WQ_CANCEL_OK;
+-      }
++      return found;
++}
++
++static bool io_wqe_cancel_running_work(struct io_wqe *wqe,
++                                     struct io_cb_cancel_data *match)
++{
++      bool found;
+-      /*
+-       * Now check if a free (going busy) or busy worker has the work
+-       * currently running. If we find it there, we'll return CANCEL_RUNNING
+-       * as an indication that we attempt to signal cancellation. The
+-       * completion will run normally in this case.
+-       */
+       rcu_read_lock();
+       found = io_wq_for_each_worker(wqe, io_wq_worker_cancel, match);
+       rcu_read_unlock();
+-      return found ? IO_WQ_CANCEL_RUNNING : IO_WQ_CANCEL_NOTFOUND;
++      return found;
+ }
+ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+@@ -980,18 +974,34 @@ enum io_wq_cancel io_wq_cancel_cb(struct io_wq *wq, work_cancel_fn *cancel,
+               .fn     = cancel,
+               .data   = data,
+       };
+-      enum io_wq_cancel ret = IO_WQ_CANCEL_NOTFOUND;
+       int node;
++      /*
++       * First check pending list, if we're lucky we can just remove it
++       * from there. CANCEL_OK means that the work is returned as-new,
++       * no completion will be posted for it.
++       */
+       for_each_node(node) {
+               struct io_wqe *wqe = wq->wqes[node];
+-              ret = io_wqe_cancel_work(wqe, &match);
+-              if (ret != IO_WQ_CANCEL_NOTFOUND)
+-                      break;
++              if (io_wqe_cancel_pending_work(wqe, &match))
++                      return IO_WQ_CANCEL_OK;
+       }
+-      return ret;
++      /*
++       * Now check if a free (going busy) or busy worker has the work
++       * currently running. If we find it there, we'll return CANCEL_RUNNING
++       * as an indication that we attempt to signal cancellation. The
++       * completion will run normally in this case.
++       */
++      for_each_node(node) {
++              struct io_wqe *wqe = wq->wqes[node];
++
++              if (io_wqe_cancel_running_work(wqe, &match))
++                      return IO_WQ_CANCEL_RUNNING;
++      }
++
++      return IO_WQ_CANCEL_NOTFOUND;
+ }
+ static bool io_wq_io_cb_cancel_data(struct io_wq_work *work, void *data)
+-- 
+2.25.1
+
diff --git a/queue-5.7/io_uring-cancel-all-task-s-requests-on-exit.patch b/queue-5.7/io_uring-cancel-all-task-s-requests-on-exit.patch
new file mode 100644 (file)
index 0000000..ca0ce73
--- /dev/null
@@ -0,0 +1,93 @@
+From b26de0fd71f4035b5ef51becbf79ee227513b8b4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Jun 2020 10:24:04 +0300
+Subject: io_uring: cancel all task's requests on exit
+
+From: Pavel Begunkov <asml.silence@gmail.com>
+
+[ Upstream commit 44e728b8aae0bb6d4229129083974f9dea43f50b ]
+
+If a process is going away, io_uring_flush() will cancel only 1
+request with a matching pid. Cancel all of them
+
+Signed-off-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io-wq.c    | 14 --------------
+ fs/io-wq.h    |  1 -
+ fs/io_uring.c | 14 ++++++++++++--
+ 3 files changed, 12 insertions(+), 17 deletions(-)
+
+diff --git a/fs/io-wq.c b/fs/io-wq.c
+index 6d2e8ccc229e3..2bfa9117bc289 100644
+--- a/fs/io-wq.c
++++ b/fs/io-wq.c
+@@ -1022,20 +1022,6 @@ enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork)
+       return io_wq_cancel_cb(wq, io_wq_io_cb_cancel_data, (void *)cwork, false);
+ }
+-static bool io_wq_pid_match(struct io_wq_work *work, void *data)
+-{
+-      pid_t pid = (pid_t) (unsigned long) data;
+-
+-      return work->task_pid == pid;
+-}
+-
+-enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid)
+-{
+-      void *data = (void *) (unsigned long) pid;
+-
+-      return io_wq_cancel_cb(wq, io_wq_pid_match, data, false);
+-}
+-
+ struct io_wq *io_wq_create(unsigned bounded, struct io_wq_data *data)
+ {
+       int ret = -ENOMEM, node;
+diff --git a/fs/io-wq.h b/fs/io-wq.h
+index 8902903831f25..df8a4cd3236db 100644
+--- a/fs/io-wq.h
++++ b/fs/io-wq.h
+@@ -129,7 +129,6 @@ static inline bool io_wq_is_hashed(struct io_wq_work *work)
+ void io_wq_cancel_all(struct io_wq *wq);
+ enum io_wq_cancel io_wq_cancel_work(struct io_wq *wq, struct io_wq_work *cwork);
+-enum io_wq_cancel io_wq_cancel_pid(struct io_wq *wq, pid_t pid);
+ typedef bool (work_cancel_fn)(struct io_wq_work *, void *);
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index cf32705546773..9bb23edf2363a 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7720,6 +7720,13 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+       }
+ }
++static bool io_cancel_pid_cb(struct io_wq_work *work, void *data)
++{
++      pid_t pid = (pid_t) (unsigned long) data;
++
++      return work->task_pid == pid;
++}
++
+ static int io_uring_flush(struct file *file, void *data)
+ {
+       struct io_ring_ctx *ctx = file->private_data;
+@@ -7729,8 +7736,11 @@ static int io_uring_flush(struct file *file, void *data)
+       /*
+        * If the task is going away, cancel work it may have pending
+        */
+-      if (fatal_signal_pending(current) || (current->flags & PF_EXITING))
+-              io_wq_cancel_pid(ctx->io_wq, task_pid_vnr(current));
++      if (fatal_signal_pending(current) || (current->flags & PF_EXITING)) {
++              void *data = (void *) (unsigned long)task_pid_vnr(current);
++
++              io_wq_cancel_cb(ctx->io_wq, io_cancel_pid_cb, data, true);
++      }
+       return 0;
+ }
+-- 
+2.25.1
+
diff --git a/queue-5.7/io_uring-find-and-cancel-head-link-async-work-on-fil.patch b/queue-5.7/io_uring-find-and-cancel-head-link-async-work-on-fil.patch
new file mode 100644 (file)
index 0000000..a1a182f
--- /dev/null
@@ -0,0 +1,81 @@
+From c6c642718480be66b38d910ca487a91c5a4d4238 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 16 Aug 2020 08:23:05 -0700
+Subject: io_uring: find and cancel head link async work on files exit
+
+From: Jens Axboe <axboe@kernel.dk>
+
+[ Upstream commit b711d4eaf0c408a811311ee3e94d6e9e5a230a9a ]
+
+Commit f254ac04c874 ("io_uring: enable lookup of links holding inflight files")
+only handled 2 out of the three head link cases we have, we also need to
+lookup and cancel work that is blocked in io-wq if that work has a link
+that's holding a reference to the files structure.
+
+Put the "cancel head links that hold this request pending" logic into
+io_attempt_cancel(), which will to through the motions of finding and
+canceling head links that hold the current inflight files stable request
+pending.
+
+Cc: stable@vger.kernel.org
+Reported-by: Pavel Begunkov <asml.silence@gmail.com>
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/io_uring.c | 33 +++++++++++++++++++++++++++++----
+ 1 file changed, 29 insertions(+), 4 deletions(-)
+
+diff --git a/fs/io_uring.c b/fs/io_uring.c
+index 9bb23edf2363a..0822a16bed9aa 100644
+--- a/fs/io_uring.c
++++ b/fs/io_uring.c
+@@ -7659,6 +7659,33 @@ static bool io_timeout_remove_link(struct io_ring_ctx *ctx,
+       return found;
+ }
++static bool io_cancel_link_cb(struct io_wq_work *work, void *data)
++{
++      return io_match_link(container_of(work, struct io_kiocb, work), data);
++}
++
++static void io_attempt_cancel(struct io_ring_ctx *ctx, struct io_kiocb *req)
++{
++      enum io_wq_cancel cret;
++
++      /* cancel this particular work, if it's running */
++      cret = io_wq_cancel_work(ctx->io_wq, &req->work);
++      if (cret != IO_WQ_CANCEL_NOTFOUND)
++              return;
++
++      /* find links that hold this pending, cancel those */
++      cret = io_wq_cancel_cb(ctx->io_wq, io_cancel_link_cb, req, true);
++      if (cret != IO_WQ_CANCEL_NOTFOUND)
++              return;
++
++      /* if we have a poll link holding this pending, cancel that */
++      if (io_poll_remove_link(ctx, req))
++              return;
++
++      /* final option, timeout link is holding this req pending */
++      io_timeout_remove_link(ctx, req);
++}
++
+ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+                                 struct files_struct *files)
+ {
+@@ -7708,10 +7735,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx,
+                               continue;
+                       }
+               } else {
+-                      io_wq_cancel_work(ctx->io_wq, &cancel_req->work);
+-                      /* could be a link, check and remove if it is */
+-                      if (!io_poll_remove_link(ctx, cancel_req))
+-                              io_timeout_remove_link(ctx, cancel_req);
++                      /* cancel this request, or head link requests */
++                      io_attempt_cancel(ctx, cancel_req);
+                       io_put_req(cancel_req);
+               }
+-- 
+2.25.1
+
diff --git a/queue-5.7/jffs2-fix-uaf-problem.patch b/queue-5.7/jffs2-fix-uaf-problem.patch
new file mode 100644 (file)
index 0000000..5a02095
--- /dev/null
@@ -0,0 +1,80 @@
+From 00460b3475bfb5f909d5d1140b8308c0e4350ca4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 19 Jun 2020 17:06:35 +0800
+Subject: jffs2: fix UAF problem
+
+From: Zhe Li <lizhe67@huawei.com>
+
+[ Upstream commit 798b7347e4f29553db4b996393caf12f5b233daf ]
+
+The log of UAF problem is listed below.
+BUG: KASAN: use-after-free in jffs2_rmdir+0xa4/0x1cc [jffs2] at addr c1f165fc
+Read of size 4 by task rm/8283
+=============================================================================
+BUG kmalloc-32 (Tainted: P    B      O   ): kasan: bad access detected
+-----------------------------------------------------------------------------
+
+INFO: Allocated in 0xbbbbbbbb age=3054364 cpu=0 pid=0
+        0xb0bba6ef
+        jffs2_write_dirent+0x11c/0x9c8 [jffs2]
+        __slab_alloc.isra.21.constprop.25+0x2c/0x44
+        __kmalloc+0x1dc/0x370
+        jffs2_write_dirent+0x11c/0x9c8 [jffs2]
+        jffs2_do_unlink+0x328/0x5fc [jffs2]
+        jffs2_rmdir+0x110/0x1cc [jffs2]
+        vfs_rmdir+0x180/0x268
+        do_rmdir+0x2cc/0x300
+        ret_from_syscall+0x0/0x3c
+INFO: Freed in 0x205b age=3054364 cpu=0 pid=0
+        0x2e9173
+        jffs2_add_fd_to_list+0x138/0x1dc [jffs2]
+        jffs2_add_fd_to_list+0x138/0x1dc [jffs2]
+        jffs2_garbage_collect_dirent.isra.3+0x21c/0x288 [jffs2]
+        jffs2_garbage_collect_live+0x16bc/0x1800 [jffs2]
+        jffs2_garbage_collect_pass+0x678/0x11d4 [jffs2]
+        jffs2_garbage_collect_thread+0x1e8/0x3b0 [jffs2]
+        kthread+0x1a8/0x1b0
+        ret_from_kernel_thread+0x5c/0x64
+Call Trace:
+[c17ddd20] [c02452d4] kasan_report.part.0+0x298/0x72c (unreliable)
+[c17ddda0] [d2509680] jffs2_rmdir+0xa4/0x1cc [jffs2]
+[c17dddd0] [c026da04] vfs_rmdir+0x180/0x268
+[c17dde00] [c026f4e4] do_rmdir+0x2cc/0x300
+[c17ddf40] [c001a658] ret_from_syscall+0x0/0x3c
+
+The root cause is that we don't get "jffs2_inode_info.sem" before
+we scan list "jffs2_inode_info.dents" in function jffs2_rmdir.
+This patch add codes to get "jffs2_inode_info.sem" before we scan
+"jffs2_inode_info.dents" to slove the UAF problem.
+
+Signed-off-by: Zhe Li <lizhe67@huawei.com>
+Reviewed-by: Hou Tao <houtao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/jffs2/dir.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+diff --git a/fs/jffs2/dir.c b/fs/jffs2/dir.c
+index f20cff1194bb6..776493713153f 100644
+--- a/fs/jffs2/dir.c
++++ b/fs/jffs2/dir.c
+@@ -590,10 +590,14 @@ static int jffs2_rmdir (struct inode *dir_i, struct dentry *dentry)
+       int ret;
+       uint32_t now = JFFS2_NOW();
++      mutex_lock(&f->sem);
+       for (fd = f->dents ; fd; fd = fd->next) {
+-              if (fd->ino)
++              if (fd->ino) {
++                      mutex_unlock(&f->sem);
+                       return -ENOTEMPTY;
++              }
+       }
++      mutex_unlock(&f->sem);
+       ret = jffs2_do_unlink(c, dir_f, dentry->d_name.name,
+                             dentry->d_name.len, f, now);
+-- 
+2.25.1
+
diff --git a/queue-5.7/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch b/queue-5.7/kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch
new file mode 100644 (file)
index 0000000..f32716e
--- /dev/null
@@ -0,0 +1,110 @@
+From 655785290d4ee911a633b3376dc61d5ce1419e43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 11:27:25 +0100
+Subject: KVM: arm64: Only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is not
+ set
+
+From: Will Deacon <will@kernel.org>
+
+[ Upstream commit b5331379bc62611d1026173a09c73573384201d9 ]
+
+When an MMU notifier call results in unmapping a range that spans multiple
+PGDs, we end up calling into cond_resched_lock() when crossing a PGD boundary,
+since this avoids running into RCU stalls during VM teardown. Unfortunately,
+if the VM is destroyed as a result of OOM, then blocking is not permitted
+and the call to the scheduler triggers the following BUG():
+
+ | BUG: sleeping function called from invalid context at arch/arm64/kvm/mmu.c:394
+ | in_atomic(): 1, irqs_disabled(): 0, non_block: 1, pid: 36, name: oom_reaper
+ | INFO: lockdep is turned off.
+ | CPU: 3 PID: 36 Comm: oom_reaper Not tainted 5.8.0 #1
+ | Hardware name: QEMU QEMU Virtual Machine, BIOS 0.0.0 02/06/2015
+ | Call trace:
+ |  dump_backtrace+0x0/0x284
+ |  show_stack+0x1c/0x28
+ |  dump_stack+0xf0/0x1a4
+ |  ___might_sleep+0x2bc/0x2cc
+ |  unmap_stage2_range+0x160/0x1ac
+ |  kvm_unmap_hva_range+0x1a0/0x1c8
+ |  kvm_mmu_notifier_invalidate_range_start+0x8c/0xf8
+ |  __mmu_notifier_invalidate_range_start+0x218/0x31c
+ |  mmu_notifier_invalidate_range_start_nonblock+0x78/0xb0
+ |  __oom_reap_task_mm+0x128/0x268
+ |  oom_reap_task+0xac/0x298
+ |  oom_reaper+0x178/0x17c
+ |  kthread+0x1e4/0x1fc
+ |  ret_from_fork+0x10/0x30
+
+Use the new 'flags' argument to kvm_unmap_hva_range() to ensure that we
+only reschedule if MMU_NOTIFIER_RANGE_BLOCKABLE is set in the notifier
+flags.
+
+Cc: <stable@vger.kernel.org>
+Fixes: 8b3405e345b5 ("kvm: arm/arm64: Fix locking for kvm_free_stage2_pgd")
+Cc: Marc Zyngier <maz@kernel.org>
+Cc: Suzuki K Poulose <suzuki.poulose@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Signed-off-by: Will Deacon <will@kernel.org>
+Message-Id: <20200811102725.7121-3-will@kernel.org>
+Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ virt/kvm/arm/mmu.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
+index 8a9d13e8e904f..6ee6770694953 100644
+--- a/virt/kvm/arm/mmu.c
++++ b/virt/kvm/arm/mmu.c
+@@ -331,7 +331,8 @@ static void unmap_stage2_puds(struct kvm *kvm, pgd_t *pgd,
+  * destroying the VM), otherwise another faulting VCPU may come in and mess
+  * with things behind our backs.
+  */
+-static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
++static void __unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size,
++                              bool may_block)
+ {
+       pgd_t *pgd;
+       phys_addr_t addr = start, end = start + size;
+@@ -356,11 +357,16 @@ static void unmap_stage2_range(struct kvm *kvm, phys_addr_t start, u64 size)
+                * If the range is too large, release the kvm->mmu_lock
+                * to prevent starvation and lockup detector warnings.
+                */
+-              if (next != end)
++              if (may_block && next != end)
+                       cond_resched_lock(&kvm->mmu_lock);
+       } while (pgd++, addr = next, addr != end);
+ }
++static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size)
++{
++      __unmap_stage2_range(mmu, start, size, true);
++}
++
+ static void stage2_flush_ptes(struct kvm *kvm, pmd_t *pmd,
+                             phys_addr_t addr, phys_addr_t end)
+ {
+@@ -2041,7 +2047,10 @@ static int handle_hva_to_gpa(struct kvm *kvm,
+ static int kvm_unmap_hva_handler(struct kvm *kvm, gpa_t gpa, u64 size, void *data)
+ {
+-      unmap_stage2_range(kvm, gpa, size);
++      unsigned flags = *(unsigned *)data;
++      bool may_block = flags & MMU_NOTIFIER_RANGE_BLOCKABLE;
++
++      __unmap_stage2_range(kvm, gpa, size, may_block);
+       return 0;
+ }
+@@ -2052,7 +2061,7 @@ int kvm_unmap_hva_range(struct kvm *kvm,
+               return 0;
+       trace_kvm_unmap_hva_range(start, end);
+-      handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL);
++      handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, &flags);
+       return 0;
+ }
+-- 
+2.25.1
+
diff --git a/queue-5.7/m68knommu-fix-overwriting-of-bits-in-coldfire-v3-cac.patch b/queue-5.7/m68knommu-fix-overwriting-of-bits-in-coldfire-v3-cac.patch
new file mode 100644 (file)
index 0000000..6c6cd5a
--- /dev/null
@@ -0,0 +1,52 @@
+From 35a8008016af865dd75c7e08cbb31b1b5fe4ea2e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 13 Jun 2020 17:17:52 +1000
+Subject: m68knommu: fix overwriting of bits in ColdFire V3 cache control
+
+From: Greg Ungerer <gerg@linux-m68k.org>
+
+[ Upstream commit bdee0e793cea10c516ff48bf3ebb4ef1820a116b ]
+
+The Cache Control Register (CACR) of the ColdFire V3 has bits that
+control high level caching functions, and also enable/disable the use
+of the alternate stack pointer register (the EUSP bit) to provide
+separate supervisor and user stack pointer registers. The code as
+it is today will blindly clear the EUSP bit on cache actions like
+invalidation. So it is broken for this case - and that will result
+in failed booting (interrupt entry and exit processing will be
+completely hosed).
+
+This only affects ColdFire V3 parts that support the alternate stack
+register (like the 5329 for example) - generally speaking new parts do,
+older parts don't. It has no impact on ColdFire V3 parts with the single
+stack pointer, like the 5307 for example.
+
+Fix the cache bit defines used, so they maintain the EUSP bit when
+carrying out cache actions through the CACR register.
+
+Signed-off-by: Greg Ungerer <gerg@linux-m68k.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/m68k/include/asm/m53xxacr.h | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/arch/m68k/include/asm/m53xxacr.h b/arch/m68k/include/asm/m53xxacr.h
+index 9138a624c5c81..692f90e7fecc1 100644
+--- a/arch/m68k/include/asm/m53xxacr.h
++++ b/arch/m68k/include/asm/m53xxacr.h
+@@ -89,9 +89,9 @@
+  * coherency though in all cases. And for copyback caches we will need
+  * to push cached data as well.
+  */
+-#define CACHE_INIT      CACR_CINVA
+-#define CACHE_INVALIDATE  CACR_CINVA
+-#define CACHE_INVALIDATED CACR_CINVA
++#define CACHE_INIT        (CACHE_MODE + CACR_CINVA - CACR_EC)
++#define CACHE_INVALIDATE  (CACHE_MODE + CACR_CINVA)
++#define CACHE_INVALIDATED (CACHE_MODE + CACR_CINVA)
+ #define ACR0_MODE     ((CONFIG_RAMBASE & 0xff000000) + \
+                        (0x000f0000) + \
+-- 
+2.25.1
+
diff --git a/queue-5.7/media-budget-core-improve-exception-handling-in-budg.patch b/queue-5.7/media-budget-core-improve-exception-handling-in-budg.patch
new file mode 100644 (file)
index 0000000..d021cd7
--- /dev/null
@@ -0,0 +1,56 @@
+From a0bb47d781dd939658543834a60b6a4484b3e215 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 5 Jun 2020 18:17:28 +0200
+Subject: media: budget-core: Improve exception handling in budget_register()
+
+From: Chuhong Yuan <hslester96@gmail.com>
+
+[ Upstream commit fc0456458df8b3421dba2a5508cd817fbc20ea71 ]
+
+budget_register() has no error handling after its failure.
+Add the missed undo functions for error handling to fix it.
+
+Signed-off-by: Chuhong Yuan <hslester96@gmail.com>
+Signed-off-by: Sean Young <sean@mess.org>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/pci/ttpci/budget-core.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/media/pci/ttpci/budget-core.c b/drivers/media/pci/ttpci/budget-core.c
+index fadbdeeb44955..293867b9e7961 100644
+--- a/drivers/media/pci/ttpci/budget-core.c
++++ b/drivers/media/pci/ttpci/budget-core.c
+@@ -369,20 +369,25 @@ static int budget_register(struct budget *budget)
+       ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->hw_frontend);
+       if (ret < 0)
+-              return ret;
++              goto err_release_dmx;
+       budget->mem_frontend.source = DMX_MEMORY_FE;
+       ret = dvbdemux->dmx.add_frontend(&dvbdemux->dmx, &budget->mem_frontend);
+       if (ret < 0)
+-              return ret;
++              goto err_release_dmx;
+       ret = dvbdemux->dmx.connect_frontend(&dvbdemux->dmx, &budget->hw_frontend);
+       if (ret < 0)
+-              return ret;
++              goto err_release_dmx;
+       dvb_net_init(&budget->dvb_adapter, &budget->dvb_net, &dvbdemux->dmx);
+       return 0;
++
++err_release_dmx:
++      dvb_dmxdev_release(&budget->dmxdev);
++      dvb_dmx_release(&budget->demux);
++      return ret;
+ }
+ static void budget_unregister(struct budget *budget)
+-- 
+2.25.1
+
diff --git a/queue-5.7/media-camss-fix-memory-leaks-on-error-handling-paths.patch b/queue-5.7/media-camss-fix-memory-leaks-on-error-handling-paths.patch
new file mode 100644 (file)
index 0000000..98e7228
--- /dev/null
@@ -0,0 +1,99 @@
+From dcefd0243523a63e2826c3b77710f03bd48c6f08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 23 Jul 2020 19:46:51 +0200
+Subject: media: camss: fix memory leaks on error handling paths in probe
+
+From: Evgeny Novikov <novikov@ispras.ru>
+
+[ Upstream commit f45882cfb152f5d3a421fd58f177f227e44843b9 ]
+
+camss_probe() does not free camss on error handling paths. The patch
+introduces an additional error label for this purpose. Besides, it
+removes call of v4l2_async_notifier_cleanup() from
+camss_of_parse_ports() since its caller, camss_probe(), cleans up all
+its resources itself.
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Evgeny Novikov <novikov@ispras.ru>
+Co-developed-by: Anton Vasilyev <vasilyev@ispras.ru>
+Signed-off-by: Anton Vasilyev <vasilyev@ispras.ru>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/qcom/camss/camss.c | 30 +++++++++++++++--------
+ 1 file changed, 20 insertions(+), 10 deletions(-)
+
+diff --git a/drivers/media/platform/qcom/camss/camss.c b/drivers/media/platform/qcom/camss/camss.c
+index 3fdc9f964a3c6..2483641799dfb 100644
+--- a/drivers/media/platform/qcom/camss/camss.c
++++ b/drivers/media/platform/qcom/camss/camss.c
+@@ -504,7 +504,6 @@ static int camss_of_parse_ports(struct camss *camss)
+       return num_subdevs;
+ err_cleanup:
+-      v4l2_async_notifier_cleanup(&camss->notifier);
+       of_node_put(node);
+       return ret;
+ }
+@@ -835,29 +834,38 @@ static int camss_probe(struct platform_device *pdev)
+               camss->csid_num = 4;
+               camss->vfe_num = 2;
+       } else {
+-              return -EINVAL;
++              ret = -EINVAL;
++              goto err_free;
+       }
+       camss->csiphy = devm_kcalloc(dev, camss->csiphy_num,
+                                    sizeof(*camss->csiphy), GFP_KERNEL);
+-      if (!camss->csiphy)
+-              return -ENOMEM;
++      if (!camss->csiphy) {
++              ret = -ENOMEM;
++              goto err_free;
++      }
+       camss->csid = devm_kcalloc(dev, camss->csid_num, sizeof(*camss->csid),
+                                  GFP_KERNEL);
+-      if (!camss->csid)
+-              return -ENOMEM;
++      if (!camss->csid) {
++              ret = -ENOMEM;
++              goto err_free;
++      }
+       camss->vfe = devm_kcalloc(dev, camss->vfe_num, sizeof(*camss->vfe),
+                                 GFP_KERNEL);
+-      if (!camss->vfe)
+-              return -ENOMEM;
++      if (!camss->vfe) {
++              ret = -ENOMEM;
++              goto err_free;
++      }
+       v4l2_async_notifier_init(&camss->notifier);
+       num_subdevs = camss_of_parse_ports(camss);
+-      if (num_subdevs < 0)
+-              return num_subdevs;
++      if (num_subdevs < 0) {
++              ret = num_subdevs;
++              goto err_cleanup;
++      }
+       ret = camss_init_subdevices(camss);
+       if (ret < 0)
+@@ -936,6 +944,8 @@ err_register_entities:
+       v4l2_device_unregister(&camss->v4l2_dev);
+ err_cleanup:
+       v4l2_async_notifier_cleanup(&camss->notifier);
++err_free:
++      kfree(camss);
+       return ret;
+ }
+-- 
+2.25.1
+
diff --git a/queue-5.7/media-vpss-clean-up-resources-in-init.patch b/queue-5.7/media-vpss-clean-up-resources-in-init.patch
new file mode 100644 (file)
index 0000000..5aaef6b
--- /dev/null
@@ -0,0 +1,66 @@
+From 2200addc38051a7f5e4bcde36c82cfc2e30edc10 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jul 2020 11:02:23 +0200
+Subject: media: vpss: clean up resources in init
+
+From: Evgeny Novikov <novikov@ispras.ru>
+
+[ Upstream commit 9c487b0b0ea7ff22127fe99a7f67657d8730ff94 ]
+
+If platform_driver_register() fails within vpss_init() resources are not
+cleaned up. The patch fixes this issue by introducing the corresponding
+error handling.
+
+Found by Linux Driver Verification project (linuxtesting.org).
+
+Signed-off-by: Evgeny Novikov <novikov@ispras.ru>
+Signed-off-by: Hans Verkuil <hverkuil-cisco@xs4all.nl>
+Signed-off-by: Mauro Carvalho Chehab <mchehab+huawei@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/media/platform/davinci/vpss.c | 20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/media/platform/davinci/vpss.c b/drivers/media/platform/davinci/vpss.c
+index d38d2bbb6f0f8..7000f0bf0b353 100644
+--- a/drivers/media/platform/davinci/vpss.c
++++ b/drivers/media/platform/davinci/vpss.c
+@@ -505,19 +505,31 @@ static void vpss_exit(void)
+ static int __init vpss_init(void)
+ {
++      int ret;
++
+       if (!request_mem_region(VPSS_CLK_CTRL, 4, "vpss_clock_control"))
+               return -EBUSY;
+       oper_cfg.vpss_regs_base2 = ioremap(VPSS_CLK_CTRL, 4);
+       if (unlikely(!oper_cfg.vpss_regs_base2)) {
+-              release_mem_region(VPSS_CLK_CTRL, 4);
+-              return -ENOMEM;
++              ret = -ENOMEM;
++              goto err_ioremap;
+       }
+       writel(VPSS_CLK_CTRL_VENCCLKEN |
+-                   VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++             VPSS_CLK_CTRL_DACCLKEN, oper_cfg.vpss_regs_base2);
++
++      ret = platform_driver_register(&vpss_driver);
++      if (ret)
++              goto err_pd_register;
++
++      return 0;
+-      return platform_driver_register(&vpss_driver);
++err_pd_register:
++      iounmap(oper_cfg.vpss_regs_base2);
++err_ioremap:
++      release_mem_region(VPSS_CLK_CTRL, 4);
++      return ret;
+ }
+ subsys_initcall(vpss_init);
+ module_exit(vpss_exit);
+-- 
+2.25.1
+
diff --git a/queue-5.7/mips-fix-unable-to-reserve-memory-for-crash-kernel.patch b/queue-5.7/mips-fix-unable-to-reserve-memory-for-crash-kernel.patch
new file mode 100644 (file)
index 0000000..775de22
--- /dev/null
@@ -0,0 +1,82 @@
+From 96c68080b984f2bac49796fec968bdc8789e94ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 25 Jul 2020 13:56:38 +0800
+Subject: MIPS: Fix unable to reserve memory for Crash kernel
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jinyang He <hejinyang@loongson.cn>
+
+[ Upstream commit b1ce9716f3b5ed3b49badf1f003b9e34b7ead0f9 ]
+
+Use 0 as the align parameter in memblock_find_in_range() is
+incorrect when we reserve memory for Crash kernel.
+
+The environment as follows:
+[    0.000000] MIPS: machine is loongson,loongson64c-4core-rs780e
+...
+[    1.951016]     crashkernel=64M@128M
+
+The warning as follows:
+[    0.000000] Invalid memory region reserved for crash kernel
+
+And the iomem as follows:
+00200000-0effffff : System RAM
+  04000000-0484009f : Kernel code
+  048400a0-04ad7fff : Kernel data
+  04b40000-05c4c6bf : Kernel bss
+1a000000-1bffffff : pci@1a000000
+...
+
+The align parameter may be finally used by round_down() or round_up().
+Like the following call tree:
+
+mips-next: mm/memblock.c
+
+memblock_find_in_range
+└── memblock_find_in_range_node
+    ├── __memblock_find_range_bottom_up
+    │   └── round_up
+    └── __memblock_find_range_top_down
+        └── round_down
+\#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+\#define round_down(x, y) ((x) & ~__round_mask(x, y))
+\#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+
+The round_down(or round_up)'s second parameter must be a power of 2.
+If the second parameter is 0, it both will return 0.
+
+Use 1 as the parameter to fix the bug and the iomem as follows:
+00200000-0effffff : System RAM
+  04000000-0484009f : Kernel code
+  048400a0-04ad7fff : Kernel data
+  04b40000-05c4c6bf : Kernel bss
+  08000000-0bffffff : Crash kernel
+1a000000-1bffffff : pci@1a000000
+...
+
+Signed-off-by: Jinyang He <hejinyang@loongson.cn>
+Reviewed-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/kernel/setup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
+index 573509e0f2d4e..3ace115740dd1 100644
+--- a/arch/mips/kernel/setup.c
++++ b/arch/mips/kernel/setup.c
+@@ -497,7 +497,7 @@ static void __init mips_parse_crashkernel(void)
+       if (ret != 0 || crash_size <= 0)
+               return;
+-      if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 0)) {
++      if (!memblock_find_in_range(crash_base, crash_base + crash_size, crash_size, 1)) {
+               pr_warn("Invalid memory region reserved for crash kernel\n");
+               return;
+       }
+-- 
+2.25.1
+
diff --git a/queue-5.7/opp-enable-resources-again-if-they-were-disabled-ear.patch b/queue-5.7/opp-enable-resources-again-if-they-were-disabled-ear.patch
new file mode 100644 (file)
index 0000000..d5d7f6c
--- /dev/null
@@ -0,0 +1,58 @@
+From 20101faf84cab5a0848381cb4b1d3a54c4fd3875 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Aug 2020 12:36:19 +0530
+Subject: opp: Enable resources again if they were disabled earlier
+
+From: Rajendra Nayak <rnayak@codeaurora.org>
+
+[ Upstream commit a4501bac0e553bed117b7e1b166d49731caf7260 ]
+
+dev_pm_opp_set_rate() can now be called with freq = 0 in order
+to either drop performance or bandwidth votes or to disable
+regulators on platforms which support them.
+
+In such cases, a subsequent call to dev_pm_opp_set_rate() with
+the same frequency ends up returning early because 'old_freq == freq'
+
+Instead make it fall through and put back the dropped performance
+and bandwidth votes and/or enable back the regulators.
+
+Cc: v5.3+ <stable@vger.kernel.org> # v5.3+
+Fixes: cd7ea582866f ("opp: Make dev_pm_opp_set_rate() handle freq = 0 to drop performance votes")
+Reported-by: Sajida Bhanu <sbhanu@codeaurora.org>
+Reviewed-by: Sibi Sankar <sibis@codeaurora.org>
+Reported-by: Matthias Kaehlcke <mka@chromium.org>
+Tested-by: Matthias Kaehlcke <mka@chromium.org>
+Reviewed-by: Stephen Boyd <sboyd@kernel.org>
+Signed-off-by: Rajendra Nayak <rnayak@codeaurora.org>
+[ Viresh: Don't skip clk_set_rate() and massaged changelog ]
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/opp/core.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index e4f01e7771a22..195fcaff18448 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -845,10 +845,12 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+       /* Return early if nothing to do */
+       if (old_freq == freq) {
+-              dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
+-                      __func__, freq);
+-              ret = 0;
+-              goto put_opp_table;
++              if (!opp_table->required_opp_tables && !opp_table->regulators) {
++                      dev_dbg(dev, "%s: old/new frequencies (%lu Hz) are same, nothing to do\n",
++                              __func__, freq);
++                      ret = 0;
++                      goto put_opp_table;
++              }
+       }
+       /*
+-- 
+2.25.1
+
diff --git a/queue-5.7/opp-put-opp-table-in-dev_pm_opp_set_rate-for-empty-t.patch b/queue-5.7/opp-put-opp-table-in-dev_pm_opp_set_rate-for-empty-t.patch
new file mode 100644 (file)
index 0000000..5dd069b
--- /dev/null
@@ -0,0 +1,44 @@
+From ab718afd2e52173997156c9bf751617408b0fff6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Aug 2020 14:28:36 -0700
+Subject: opp: Put opp table in dev_pm_opp_set_rate() for empty tables
+
+From: Stephen Boyd <swboyd@chromium.org>
+
+[ Upstream commit 8979ef70850eb469e1094279259d1ef393ffe85f ]
+
+We get the opp_table pointer at the top of the function and so we should
+put the pointer at the end of the function like all other exit paths
+from this function do.
+
+Cc: v5.7+ <stable@vger.kernel.org> # v5.7+
+Fixes: aca48b61f963 ("opp: Manage empty OPP tables with clk handle")
+Reviewed-by: Rajendra Nayak <rnayak@codeaurora.org>
+Signed-off-by: Stephen Boyd <swboyd@chromium.org>
+[ Viresh: Split the patch into two ]
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/opp/core.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 2d3880b3d6ee0..a55d083e5be21 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -822,8 +822,10 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+                * have OPP table for the device, while others don't and
+                * opp_set_rate() just needs to behave like clk_set_rate().
+                */
+-              if (!_get_opp_count(opp_table))
+-                      return 0;
++              if (!_get_opp_count(opp_table)) {
++                      ret = 0;
++                      goto put_opp_table;
++              }
+               if (!opp_table->required_opp_tables) {
+                       dev_err(dev, "target frequency can't be 0\n");
+-- 
+2.25.1
+
diff --git a/queue-5.7/opp-reorder-the-code-for-target_freq-case.patch b/queue-5.7/opp-reorder-the-code-for-target_freq-case.patch
new file mode 100644 (file)
index 0000000..9fde8c0
--- /dev/null
@@ -0,0 +1,57 @@
+From 17348c7c323236a894ba345cf6a541a7bbdd502d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 May 2020 12:37:24 +0530
+Subject: opp: Reorder the code for !target_freq case
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Viresh Kumar <viresh.kumar@linaro.org>
+
+[ Upstream commit b23dfa3543f31fbb8c0098925bf90fc23193d17a ]
+
+Reorder the code a bit to make it more readable. Add additional comment
+as well.
+
+Tested-by: Marek Szyprowski <m.szyprowski@samsung.com>
+Acked-by: Clément Péron <peron.clem@gmail.com>
+Tested-by: Clément Péron <peron.clem@gmail.com>
+Signed-off-by: Viresh Kumar <viresh.kumar@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/opp/core.c | 14 ++++++++++----
+ 1 file changed, 10 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/opp/core.c b/drivers/opp/core.c
+index 195fcaff18448..2d3880b3d6ee0 100644
+--- a/drivers/opp/core.c
++++ b/drivers/opp/core.c
+@@ -817,15 +817,21 @@ int dev_pm_opp_set_rate(struct device *dev, unsigned long target_freq)
+       }
+       if (unlikely(!target_freq)) {
+-              if (opp_table->required_opp_tables) {
+-                      ret = _set_required_opps(dev, opp_table, NULL);
+-              } else if (!_get_opp_count(opp_table)) {
++              /*
++               * Some drivers need to support cases where some platforms may
++               * have OPP table for the device, while others don't and
++               * opp_set_rate() just needs to behave like clk_set_rate().
++               */
++              if (!_get_opp_count(opp_table))
+                       return 0;
+-              } else {
++
++              if (!opp_table->required_opp_tables) {
+                       dev_err(dev, "target frequency can't be 0\n");
+                       ret = -EINVAL;
++                      goto put_opp_table;
+               }
++              ret = _set_required_opps(dev, opp_table, NULL);
+               goto put_opp_table;
+       }
+-- 
+2.25.1
+
diff --git a/queue-5.7/riscv-fixup-static_obj-fail.patch b/queue-5.7/riscv-fixup-static_obj-fail.patch
new file mode 100644 (file)
index 0000000..68ef19a
--- /dev/null
@@ -0,0 +1,79 @@
+From c5d6d844894c731126280fc4640c34775cdb46c9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 27 Jun 2020 13:57:06 +0000
+Subject: riscv: Fixup static_obj() fail
+
+From: Guo Ren <guoren@linux.alibaba.com>
+
+[ Upstream commit 6184358da0004c8fd940afda6c0a0fa4027dc911 ]
+
+When enable LOCKDEP, static_obj() will cause error. Because some
+__initdata static variables is before _stext:
+
+static int static_obj(const void *obj)
+{
+        unsigned long start = (unsigned long) &_stext,
+                      end   = (unsigned long) &_end,
+                      addr  = (unsigned long) obj;
+
+        /*
+         * static variable?
+         */
+        if ((addr >= start) && (addr < end))
+                return 1;
+
+[    0.067192] INFO: trying to register non-static key.
+[    0.067325] the code is fine but needs lockdep annotation.
+[    0.067449] turning off the locking correctness validator.
+[    0.067718] CPU: 0 PID: 0 Comm: swapper/0 Not tainted 5.7.0-rc7-dirty #44
+[    0.067945] Call Trace:
+[    0.068369] [<ffffffe00020323c>] walk_stackframe+0x0/0xa4
+[    0.068506] [<ffffffe000203422>] show_stack+0x2a/0x34
+[    0.068631] [<ffffffe000521e4e>] dump_stack+0x94/0xca
+[    0.068757] [<ffffffe000255a4e>] register_lock_class+0x5b8/0x5bc
+[    0.068969] [<ffffffe000255abe>] __lock_acquire+0x6c/0x1d5c
+[    0.069101] [<ffffffe0002550fe>] lock_acquire+0xae/0x312
+[    0.069228] [<ffffffe000989a8e>] _raw_spin_lock_irqsave+0x40/0x5a
+[    0.069357] [<ffffffe000247c64>] complete+0x1e/0x50
+[    0.069479] [<ffffffe000984c38>] rest_init+0x1b0/0x28a
+[    0.069660] [<ffffffe0000016a2>] 0xffffffe0000016a2
+[    0.069779] [<ffffffe000001b84>] 0xffffffe000001b84
+[    0.069953] [<ffffffe000001092>] 0xffffffe000001092
+
+static __initdata DECLARE_COMPLETION(kthreadd_done);
+
+noinline void __ref rest_init(void)
+{
+       ...
+       complete(&kthreadd_done);
+
+Signed-off-by: Guo Ren <guoren@linux.alibaba.com>
+Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/kernel/vmlinux.lds.S | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
+index 0339b6bbe11ab..bf3f34dbe630b 100644
+--- a/arch/riscv/kernel/vmlinux.lds.S
++++ b/arch/riscv/kernel/vmlinux.lds.S
+@@ -22,6 +22,7 @@ SECTIONS
+       /* Beginning of code and text segment */
+       . = LOAD_OFFSET;
+       _start = .;
++      _stext = .;
+       HEAD_TEXT_SECTION
+       . = ALIGN(PAGE_SIZE);
+@@ -49,7 +50,6 @@ SECTIONS
+       . = ALIGN(SECTION_ALIGN);
+       .text : {
+               _text = .;
+-              _stext = .;
+               TEXT_TEXT
+               SCHED_TEXT
+               CPUIDLE_TEXT
+-- 
+2.25.1
+
diff --git a/queue-5.7/rtc-goldfish-enable-interrupt-in-set_alarm-when-nece.patch b/queue-5.7/rtc-goldfish-enable-interrupt-in-set_alarm-when-nece.patch
new file mode 100644 (file)
index 0000000..cda2187
--- /dev/null
@@ -0,0 +1,39 @@
+From bbf1bd955a1ed8fdf3531e687c856c17634291f5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 20 Jun 2020 20:04:43 +0800
+Subject: rtc: goldfish: Enable interrupt in set_alarm() when necessary
+
+From: Huacai Chen <chenhc@lemote.com>
+
+[ Upstream commit 22f8d5a1bf230cf8567a4121fc3789babb46336d ]
+
+When use goldfish rtc, the "hwclock" command fails with "select() to
+/dev/rtc to wait for clock tick timed out". This is because "hwclock"
+need the set_alarm() hook to enable interrupt when alrm->enabled is
+true. This operation is missing in goldfish rtc (but other rtc drivers,
+such as cmos rtc, enable interrupt here), so add it.
+
+Signed-off-by: Huacai Chen <chenhc@lemote.com>
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Link: https://lore.kernel.org/r/1592654683-31314-1-git-send-email-chenhc@lemote.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/rtc/rtc-goldfish.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/rtc/rtc-goldfish.c b/drivers/rtc/rtc-goldfish.c
+index cb6b0ad7ec3f2..5dd92147f1680 100644
+--- a/drivers/rtc/rtc-goldfish.c
++++ b/drivers/rtc/rtc-goldfish.c
+@@ -73,6 +73,7 @@ static int goldfish_rtc_set_alarm(struct device *dev,
+               rtc_alarm64 = rtc_tm_to_time64(&alrm->time) * NSEC_PER_SEC;
+               writel((rtc_alarm64 >> 32), base + TIMER_ALARM_HIGH);
+               writel(rtc_alarm64, base + TIMER_ALARM_LOW);
++              writel(1, base + TIMER_IRQ_ENABLED);
+       } else {
+               /*
+                * if this function was called with enabled=0
+-- 
+2.25.1
+
diff --git a/queue-5.7/scsi-libfc-free-skb-in-fc_disc_gpn_id_resp-for-valid.patch b/queue-5.7/scsi-libfc-free-skb-in-fc_disc_gpn_id_resp-for-valid.patch
new file mode 100644 (file)
index 0000000..f1ec4ff
--- /dev/null
@@ -0,0 +1,66 @@
+From fb594bb6ff4d63718082270e73521b0324c0c11d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 Jul 2020 01:18:23 -0700
+Subject: scsi: libfc: Free skb in fc_disc_gpn_id_resp() for valid cases
+
+From: Javed Hasan <jhasan@marvell.com>
+
+[ Upstream commit ec007ef40abb6a164d148b0dc19789a7a2de2cc8 ]
+
+In fc_disc_gpn_id_resp(), skb is supposed to get freed in all cases except
+for PTR_ERR. However, in some cases it didn't.
+
+This fix is to call fc_frame_free(fp) before function returns.
+
+Link: https://lore.kernel.org/r/20200729081824.30996-2-jhasan@marvell.com
+Reviewed-by: Girish Basrur <gbasrur@marvell.com>
+Reviewed-by: Santosh Vernekar <svernekar@marvell.com>
+Reviewed-by: Saurav Kashyap <skashyap@marvell.com>
+Reviewed-by: Shyam Sundar <ssundar@marvell.com>
+Signed-off-by: Javed Hasan <jhasan@marvell.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/libfc/fc_disc.c | 12 +++++++++---
+ 1 file changed, 9 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
+index 2b865c6423e29..e00dc4693fcbd 100644
+--- a/drivers/scsi/libfc/fc_disc.c
++++ b/drivers/scsi/libfc/fc_disc.c
+@@ -581,8 +581,12 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+       if (PTR_ERR(fp) == -FC_EX_CLOSED)
+               goto out;
+-      if (IS_ERR(fp))
+-              goto redisc;
++      if (IS_ERR(fp)) {
++              mutex_lock(&disc->disc_mutex);
++              fc_disc_restart(disc);
++              mutex_unlock(&disc->disc_mutex);
++              goto out;
++      }
+       cp = fc_frame_payload_get(fp, sizeof(*cp));
+       if (!cp)
+@@ -609,7 +613,7 @@ static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+                               new_rdata->disc_id = disc->disc_id;
+                               fc_rport_login(new_rdata);
+                       }
+-                      goto out;
++                      goto free_fp;
+               }
+               rdata->disc_id = disc->disc_id;
+               mutex_unlock(&rdata->rp_mutex);
+@@ -626,6 +630,8 @@ redisc:
+               fc_disc_restart(disc);
+               mutex_unlock(&disc->disc_mutex);
+       }
++free_fp:
++      fc_frame_free(fp);
+ out:
+       kref_put(&rdata->kref, fc_rport_destroy);
+       if (!IS_ERR(fp))
+-- 
+2.25.1
+
diff --git a/queue-5.7/scsi-target-tcmu-fix-crash-in-tcmu_flush_dcache_rang.patch b/queue-5.7/scsi-target-tcmu-fix-crash-in-tcmu_flush_dcache_rang.patch
new file mode 100644 (file)
index 0000000..6d99efc
--- /dev/null
@@ -0,0 +1,94 @@
+From c740f7fe20d146f3c51b7661b54d0b8545790761 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Jun 2020 15:16:32 +0200
+Subject: scsi: target: tcmu: Fix crash in tcmu_flush_dcache_range on ARM
+
+From: Bodo Stroesser <bstroesser@ts.fujitsu.com>
+
+[ Upstream commit 3145550a7f8b08356c8ff29feaa6c56aca12901d ]
+
+This patch fixes the following crash (see
+https://bugzilla.kernel.org/show_bug.cgi?id=208045)
+
+ Process iscsi_trx (pid: 7496, stack limit = 0x0000000010dd111a)
+ CPU: 0 PID: 7496 Comm: iscsi_trx Not tainted 4.19.118-0419118-generic
+        #202004230533
+ Hardware name: Greatwall QingTian DF720/F601, BIOS 601FBE20 Sep 26 2019
+ pstate: 80400005 (Nzcv daif +PAN -UAO)
+ pc : flush_dcache_page+0x18/0x40
+ lr : is_ring_space_avail+0x68/0x2f8 [target_core_user]
+ sp : ffff000015123a80
+ x29: ffff000015123a80 x28: 0000000000000000
+ x27: 0000000000001000 x26: ffff000023ea5000
+ x25: ffffcfa25bbe08b8 x24: 0000000000000078
+ x23: ffff7e0000000000 x22: ffff000023ea5001
+ x21: ffffcfa24b79c000 x20: 0000000000000fff
+ x19: ffff7e00008fa940 x18: 0000000000000000
+ x17: 0000000000000000 x16: ffff2d047e709138
+ x15: 0000000000000000 x14: 0000000000000000
+ x13: 0000000000000000 x12: ffff2d047fbd0a40
+ x11: 0000000000000000 x10: 0000000000000030
+ x9 : 0000000000000000 x8 : ffffc9a254820a00
+ x7 : 00000000000013b0 x6 : 000000000000003f
+ x5 : 0000000000000040 x4 : ffffcfa25bbe08e8
+ x3 : 0000000000001000 x2 : 0000000000000078
+ x1 : ffffcfa25bbe08b8 x0 : ffff2d040bc88a18
+ Call trace:
+  flush_dcache_page+0x18/0x40
+  is_ring_space_avail+0x68/0x2f8 [target_core_user]
+  queue_cmd_ring+0x1f8/0x680 [target_core_user]
+  tcmu_queue_cmd+0xe4/0x158 [target_core_user]
+  __target_execute_cmd+0x30/0xf0 [target_core_mod]
+  target_execute_cmd+0x294/0x390 [target_core_mod]
+  transport_generic_new_cmd+0x1e8/0x358 [target_core_mod]
+  transport_handle_cdb_direct+0x50/0xb0 [target_core_mod]
+  iscsit_execute_cmd+0x2b4/0x350 [iscsi_target_mod]
+  iscsit_sequence_cmd+0xd8/0x1d8 [iscsi_target_mod]
+  iscsit_process_scsi_cmd+0xac/0xf8 [iscsi_target_mod]
+  iscsit_get_rx_pdu+0x404/0xd00 [iscsi_target_mod]
+  iscsi_target_rx_thread+0xb8/0x130 [iscsi_target_mod]
+  kthread+0x130/0x138
+  ret_from_fork+0x10/0x18
+ Code: f9000bf3 aa0003f3 aa1e03e0 d503201f (f9400260)
+ ---[ end trace 1e451c73f4266776 ]---
+
+The solution is based on patch:
+
+  "scsi: target: tcmu: Optimize use of flush_dcache_page"
+
+which restricts the use of tcmu_flush_dcache_range() to addresses from
+vmalloc'ed areas only.
+
+This patch now replaces the virt_to_page() call in
+tcmu_flush_dcache_range() - which is wrong for vmalloced addrs - by
+vmalloc_to_page().
+
+The patch was tested on ARM with kernel 4.19.118 and 5.7.2
+
+Link: https://lore.kernel.org/r/20200618131632.32748-3-bstroesser@ts.fujitsu.com
+Tested-by: JiangYu <lnsyyj@hotmail.com>
+Tested-by: Daniel Meyerholt <dxm523@gmail.com>
+Acked-by: Mike Christie <michael.christie@oracle.com>
+Signed-off-by: Bodo Stroesser <bstroesser@ts.fujitsu.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/target/target_core_user.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
+index b63a1e0c4aa6d..a55114975b00d 100644
+--- a/drivers/target/target_core_user.c
++++ b/drivers/target/target_core_user.c
+@@ -601,7 +601,7 @@ static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
+       size = round_up(size+offset, PAGE_SIZE);
+       while (size) {
+-              flush_dcache_page(virt_to_page(start));
++              flush_dcache_page(vmalloc_to_page(start));
+               start += PAGE_SIZE;
+               size -= PAGE_SIZE;
+       }
+-- 
+2.25.1
+
diff --git a/queue-5.7/scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch b/queue-5.7/scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch
new file mode 100644 (file)
index 0000000..a7c9d02
--- /dev/null
@@ -0,0 +1,52 @@
+From 143069352e0fe2e9c287d23af10972f074068cc4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 12 Jun 2020 09:26:24 +0800
+Subject: scsi: ufs: Add DELAY_BEFORE_LPM quirk for Micron devices
+
+From: Stanley Chu <stanley.chu@mediatek.com>
+
+[ Upstream commit c0a18ee0ce78d7957ec1a53be35b1b3beba80668 ]
+
+It is confirmed that Micron device needs DELAY_BEFORE_LPM quirk to have a
+delay before VCC is powered off. Sdd Micron vendor ID and this quirk for
+Micron devices.
+
+Link: https://lore.kernel.org/r/20200612012625.6615-2-stanley.chu@mediatek.com
+Reviewed-by: Bean Huo <beanhuo@micron.com>
+Reviewed-by: Alim Akhtar <alim.akhtar@samsung.com>
+Signed-off-by: Stanley Chu <stanley.chu@mediatek.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/ufs/ufs_quirks.h | 1 +
+ drivers/scsi/ufs/ufshcd.c     | 2 ++
+ 2 files changed, 3 insertions(+)
+
+diff --git a/drivers/scsi/ufs/ufs_quirks.h b/drivers/scsi/ufs/ufs_quirks.h
+index df7a1e6805a3b..c3af72c58805d 100644
+--- a/drivers/scsi/ufs/ufs_quirks.h
++++ b/drivers/scsi/ufs/ufs_quirks.h
+@@ -12,6 +12,7 @@
+ #define UFS_ANY_VENDOR 0xFFFF
+ #define UFS_ANY_MODEL  "ANY_MODEL"
++#define UFS_VENDOR_MICRON      0x12C
+ #define UFS_VENDOR_TOSHIBA     0x198
+ #define UFS_VENDOR_SAMSUNG     0x1CE
+ #define UFS_VENDOR_SKHYNIX     0x1AD
+diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
+index 477b6cfff381b..2c02967f159ea 100644
+--- a/drivers/scsi/ufs/ufshcd.c
++++ b/drivers/scsi/ufs/ufshcd.c
+@@ -211,6 +211,8 @@ ufs_get_desired_pm_lvl_for_dev_link_state(enum ufs_dev_pwr_mode dev_state,
+ static struct ufs_dev_fix ufs_fixups[] = {
+       /* UFS cards deviations table */
++      UFS_FIX(UFS_VENDOR_MICRON, UFS_ANY_MODEL,
++              UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+               UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM),
+       UFS_FIX(UFS_VENDOR_SAMSUNG, UFS_ANY_MODEL,
+-- 
+2.25.1
+
index c3d250cacd20dd706f15ea956d084a424c8de6b8..df74698070d7c81d630e37a0caacefa3e3b82868 100644 (file)
@@ -25,3 +25,35 @@ drm-amd-display-fix-edid-parsing-after-resume-from-suspend.patch
 drm-amd-display-blank-stream-before-destroying-hdcp-session.patch
 drm-amd-display-fix-dfpstate-hang-due-to-view-port-changed.patch
 drm-amd-display-fix-pow-crashing-when-given-base-0.patch
+io-wq-reorder-cancellation-pending-running.patch
+io-wq-add-an-option-to-cancel-all-matched-reqs.patch
+io_uring-cancel-all-task-s-requests-on-exit.patch
+io_uring-find-and-cancel-head-link-async-work-on-fil.patch
+opp-enable-resources-again-if-they-were-disabled-ear.patch
+kvm-arm64-only-reschedule-if-mmu_notifier_range_bloc.patch
+opp-reorder-the-code-for-target_freq-case.patch
+opp-put-opp-table-in-dev_pm_opp_set_rate-for-empty-t.patch
+scsi-ufs-add-delay_before_lpm-quirk-for-micron-devic.patch
+scsi-target-tcmu-fix-crash-in-tcmu_flush_dcache_rang.patch
+media-budget-core-improve-exception-handling-in-budg.patch
+f2fs-fix-to-check-page-dirty-status-before-writeback.patch
+rtc-goldfish-enable-interrupt-in-set_alarm-when-nece.patch
+media-vpss-clean-up-resources-in-init.patch
+input-psmouse-add-a-newline-when-printing-proto-by-s.patch
+mips-fix-unable-to-reserve-memory-for-crash-kernel.patch
+m68knommu-fix-overwriting-of-bits-in-coldfire-v3-cac.patch
+svcrdma-fix-another-receive-buffer-leak.patch
+xfs-fix-inode-quota-reservation-checks.patch
+drm-ttm-fix-offset-in-vmas-with-a-pg_offs-in-ttm_bo_.patch
+riscv-fixup-static_obj-fail.patch
+jffs2-fix-uaf-problem.patch
+ceph-fix-use-after-free-for-fsc-mdsc.patch
+swiotlb-xen-use-vmalloc_to_page-on-vmalloc-virt-addr.patch
+cpufreq-intel_pstate-fix-cpuinfo_max_freq-when-msr_t.patch
+scsi-libfc-free-skb-in-fc_disc_gpn_id_resp-for-valid.patch
+virtio_ring-avoid-loop-when-vq-is-broken-in-virtqueu.patch
+media-camss-fix-memory-leaks-on-error-handling-paths.patch
+tools-testing-selftests-cgroup-cgroup_util.c-cg_read.patch
+xfs-fix-ubsan-null-ptr-deref-in-xfs_sysfs_init.patch
+alpha-fix-annotation-of-io-read-write-16-32-be.patch
+fs-signalfd.c-fix-inconsistent-return-codes-for-sign.patch
diff --git a/queue-5.7/svcrdma-fix-another-receive-buffer-leak.patch b/queue-5.7/svcrdma-fix-another-receive-buffer-leak.patch
new file mode 100644 (file)
index 0000000..5cc7fe7
--- /dev/null
@@ -0,0 +1,45 @@
+From e515e070ff3f882fa8ac8257334189a37a3150bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 30 Jun 2020 15:55:45 -0400
+Subject: svcrdma: Fix another Receive buffer leak
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+[ Upstream commit 64d26422516b2e347b32e6d9b1d40b3c19a62aae ]
+
+During a connection tear down, the Receive queue is flushed before
+the device resources are freed. Typically, all the Receives flush
+with IB_WR_FLUSH_ERR.
+
+However, any pending successful Receives flush with IB_WR_SUCCESS,
+and the server automatically posts a fresh Receive to replace the
+completing one. This happens even after the connection has closed
+and the RQ is drained. Receives that are posted after the RQ is
+drained appear never to complete, causing a Receive resource leak.
+The leaked Receive buffer is left DMA-mapped.
+
+To prevent these late-posted recv_ctxt's from leaking, block new
+Receive posting after XPT_CLOSE is set.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sunrpc/xprtrdma/svc_rdma_recvfrom.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+diff --git a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+index efa5fcb5793f7..952b8f1908500 100644
+--- a/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
++++ b/net/sunrpc/xprtrdma/svc_rdma_recvfrom.c
+@@ -265,6 +265,8 @@ static int svc_rdma_post_recv(struct svcxprt_rdma *rdma)
+ {
+       struct svc_rdma_recv_ctxt *ctxt;
++      if (test_bit(XPT_CLOSE, &rdma->sc_xprt.xpt_flags))
++              return 0;
+       ctxt = svc_rdma_recv_ctxt_get(rdma);
+       if (!ctxt)
+               return -ENOMEM;
+-- 
+2.25.1
+
diff --git a/queue-5.7/swiotlb-xen-use-vmalloc_to_page-on-vmalloc-virt-addr.patch b/queue-5.7/swiotlb-xen-use-vmalloc_to_page-on-vmalloc-virt-addr.patch
new file mode 100644 (file)
index 0000000..8aae72d
--- /dev/null
@@ -0,0 +1,63 @@
+From 4c4cdf7f4e52ad4e35dd7513d8ac62629f1af5c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Jul 2020 15:34:17 -0700
+Subject: swiotlb-xen: use vmalloc_to_page on vmalloc virt addresses
+
+From: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+
+[ Upstream commit 8b1e868f66076490189a36d984fcce286cdd6295 ]
+
+xen_alloc_coherent_pages might return pages for which virt_to_phys and
+virt_to_page don't work, e.g. ioremap'ed pages.
+
+So in xen_swiotlb_free_coherent we can't assume that virt_to_page works.
+Instead add a is_vmalloc_addr check and use vmalloc_to_page on vmalloc
+virt addresses.
+
+This patch fixes the following crash at boot on RPi4 (the underlying
+issue is not RPi4 specific):
+https://marc.info/?l=xen-devel&m=158862573216800
+
+Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Signed-off-by: Stefano Stabellini <stefano.stabellini@xilinx.com>
+Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
+Tested-by: Corey Minyard <cminyard@mvista.com>
+Tested-by: Roman Shaposhnik <roman@zededa.com>
+Link: https://lore.kernel.org/r/20200710223427.6897-1-sstabellini@kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/xen/swiotlb-xen.c | 8 +++++++-
+ 1 file changed, 7 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/xen/swiotlb-xen.c b/drivers/xen/swiotlb-xen.c
+index b6d27762c6f8c..5fbadd07819bd 100644
+--- a/drivers/xen/swiotlb-xen.c
++++ b/drivers/xen/swiotlb-xen.c
+@@ -335,6 +335,7 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+       int order = get_order(size);
+       phys_addr_t phys;
+       u64 dma_mask = DMA_BIT_MASK(32);
++      struct page *page;
+       if (hwdev && hwdev->coherent_dma_mask)
+               dma_mask = hwdev->coherent_dma_mask;
+@@ -346,9 +347,14 @@ xen_swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
+       /* Convert the size to actually allocated. */
+       size = 1UL << (order + XEN_PAGE_SHIFT);
++      if (is_vmalloc_addr(vaddr))
++              page = vmalloc_to_page(vaddr);
++      else
++              page = virt_to_page(vaddr);
++
+       if (!WARN_ON((dev_addr + size - 1 > dma_mask) ||
+                    range_straddles_page_boundary(phys, size)) &&
+-          TestClearPageXenRemapped(virt_to_page(vaddr)))
++          TestClearPageXenRemapped(page))
+               xen_destroy_contiguous_region(phys, order);
+       xen_free_coherent_pages(hwdev, size, vaddr, (dma_addr_t)phys, attrs);
+-- 
+2.25.1
+
diff --git a/queue-5.7/tools-testing-selftests-cgroup-cgroup_util.c-cg_read.patch b/queue-5.7/tools-testing-selftests-cgroup-cgroup_util.c-cg_read.patch
new file mode 100644 (file)
index 0000000..743f0b0
--- /dev/null
@@ -0,0 +1,44 @@
+From dd0ac5b062eed05c4953a191840288039a05f7c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 23:17:25 -0700
+Subject: tools/testing/selftests/cgroup/cgroup_util.c: cg_read_strcmp: fix
+ null pointer dereference
+
+From: Gaurav Singh <gaurav1086@gmail.com>
+
+[ Upstream commit d830020656c5b68ced962ed3cb51a90e0a89d4c4 ]
+
+Haven't reproduced this issue. This PR is does a minor code cleanup.
+
+Signed-off-by: Gaurav Singh <gaurav1086@gmail.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Shuah Khan <shuah@kernel.org>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Michal Koutn <mkoutny@suse.com>
+Cc: Roman Gushchin <guro@fb.com>
+Cc: Christian Brauner <christian.brauner@ubuntu.com>
+Cc: Chris Down <chris@chrisdown.name>
+Link: http://lkml.kernel.org/r/20200726013808.22242-1-gaurav1086@gmail.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/cgroup/cgroup_util.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/tools/testing/selftests/cgroup/cgroup_util.c b/tools/testing/selftests/cgroup/cgroup_util.c
+index 8a637ca7d73a4..05853b0b88318 100644
+--- a/tools/testing/selftests/cgroup/cgroup_util.c
++++ b/tools/testing/selftests/cgroup/cgroup_util.c
+@@ -106,7 +106,7 @@ int cg_read_strcmp(const char *cgroup, const char *control,
+       /* Handle the case of comparing against empty string */
+       if (!expected)
+-              size = 32;
++              return -1;
+       else
+               size = strlen(expected) + 1;
+-- 
+2.25.1
+
diff --git a/queue-5.7/virtio_ring-avoid-loop-when-vq-is-broken-in-virtqueu.patch b/queue-5.7/virtio_ring-avoid-loop-when-vq-is-broken-in-virtqueu.patch
new file mode 100644 (file)
index 0000000..df61ff6
--- /dev/null
@@ -0,0 +1,53 @@
+From af7e374842d5f6a07d7ce6d9b857e2e17ba2d872 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 2 Aug 2020 15:44:09 +0800
+Subject: virtio_ring: Avoid loop when vq is broken in virtqueue_poll
+
+From: Mao Wenan <wenan.mao@linux.alibaba.com>
+
+[ Upstream commit 481a0d7422db26fb63e2d64f0652667a5c6d0f3e ]
+
+The loop may exist if vq->broken is true,
+virtqueue_get_buf_ctx_packed or virtqueue_get_buf_ctx_split
+will return NULL, so virtnet_poll will reschedule napi to
+receive packet, it will lead cpu usage(si) to 100%.
+
+call trace as below:
+virtnet_poll
+       virtnet_receive
+               virtqueue_get_buf_ctx
+                       virtqueue_get_buf_ctx_packed
+                       virtqueue_get_buf_ctx_split
+       virtqueue_napi_complete
+               virtqueue_poll           //return true
+               virtqueue_napi_schedule //it will reschedule napi
+
+to fix this, return false if vq is broken in virtqueue_poll.
+
+Signed-off-by: Mao Wenan <wenan.mao@linux.alibaba.com>
+Acked-by: Michael S. Tsirkin <mst@redhat.com>
+Link: https://lore.kernel.org/r/1596354249-96204-1-git-send-email-wenan.mao@linux.alibaba.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Jason Wang <jasowang@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/virtio/virtio_ring.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c
+index 58b96baa8d488..4f7c73e6052f6 100644
+--- a/drivers/virtio/virtio_ring.c
++++ b/drivers/virtio/virtio_ring.c
+@@ -1960,6 +1960,9 @@ bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx)
+ {
+       struct vring_virtqueue *vq = to_vvq(_vq);
++      if (unlikely(vq->broken))
++              return false;
++
+       virtio_mb(vq->weak_barriers);
+       return vq->packed_ring ? virtqueue_poll_packed(_vq, last_used_idx) :
+                                virtqueue_poll_split(_vq, last_used_idx);
+-- 
+2.25.1
+
diff --git a/queue-5.7/xfs-fix-inode-quota-reservation-checks.patch b/queue-5.7/xfs-fix-inode-quota-reservation-checks.patch
new file mode 100644 (file)
index 0000000..f9c6653
--- /dev/null
@@ -0,0 +1,56 @@
+From d5e69d71df95a3d92e4d96791c835de474f3bdd2 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 14 Jul 2020 10:36:09 -0700
+Subject: xfs: fix inode quota reservation checks
+
+From: Darrick J. Wong <darrick.wong@oracle.com>
+
+[ Upstream commit f959b5d037e71a4d69b5bf71faffa065d9269b4a ]
+
+xfs_trans_dqresv is the function that we use to make reservations
+against resource quotas.  Each resource contains two counters: the
+q_core counter, which tracks resources allocated on disk; and the dquot
+reservation counter, which tracks how much of that resource has either
+been allocated or reserved by threads that are working on metadata
+updates.
+
+For disk blocks, we compare the proposed reservation counter against the
+hard and soft limits to decide if we're going to fail the operation.
+However, for inodes we inexplicably compare against the q_core counter,
+not the incore reservation count.
+
+Since the q_core counter is always lower than the reservation count and
+we unlock the dquot between reservation and transaction commit, this
+means that multiple threads can reserve the last inode count before we
+hit the hard limit, and when they commit, we'll be well over the hard
+limit.
+
+Fix this by checking against the incore inode reservation counter, since
+we would appear to maintain that correctly (and that's what we report in
+GETQUOTA).
+
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Reviewed-by: Allison Collins <allison.henderson@oracle.com>
+Reviewed-by: Chandan Babu R <chandanrlinux@gmail.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_trans_dquot.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/xfs/xfs_trans_dquot.c b/fs/xfs/xfs_trans_dquot.c
+index d1b9869bc5fa6..af3636a99bf60 100644
+--- a/fs/xfs/xfs_trans_dquot.c
++++ b/fs/xfs/xfs_trans_dquot.c
+@@ -647,7 +647,7 @@ xfs_trans_dqresv(
+                       }
+               }
+               if (ninos > 0) {
+-                      total_count = be64_to_cpu(dqp->q_core.d_icount) + ninos;
++                      total_count = dqp->q_res_icount + ninos;
+                       timer = be32_to_cpu(dqp->q_core.d_itimer);
+                       warns = be16_to_cpu(dqp->q_core.d_iwarns);
+                       warnlimit = dqp->q_mount->m_quotainfo->qi_iwarnlimit;
+-- 
+2.25.1
+
diff --git a/queue-5.7/xfs-fix-ubsan-null-ptr-deref-in-xfs_sysfs_init.patch b/queue-5.7/xfs-fix-ubsan-null-ptr-deref-in-xfs_sysfs_init.patch
new file mode 100644 (file)
index 0000000..1c75321
--- /dev/null
@@ -0,0 +1,59 @@
+From 3c5c495e96223cd5fb700d8dd405f0e8bb44d77a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Aug 2020 15:18:48 -0700
+Subject: xfs: Fix UBSAN null-ptr-deref in xfs_sysfs_init
+
+From: Eiichi Tsukata <devel@etsukata.com>
+
+[ Upstream commit 96cf2a2c75567ff56195fe3126d497a2e7e4379f ]
+
+If xfs_sysfs_init is called with parent_kobj == NULL, UBSAN
+shows the following warning:
+
+  UBSAN: null-ptr-deref in ./fs/xfs/xfs_sysfs.h:37:23
+  member access within null pointer of type 'struct xfs_kobj'
+  Call Trace:
+   dump_stack+0x10e/0x195
+   ubsan_type_mismatch_common+0x241/0x280
+   __ubsan_handle_type_mismatch_v1+0x32/0x40
+   init_xfs_fs+0x12b/0x28f
+   do_one_initcall+0xdd/0x1d0
+   do_initcall_level+0x151/0x1b6
+   do_initcalls+0x50/0x8f
+   do_basic_setup+0x29/0x2b
+   kernel_init_freeable+0x19f/0x20b
+   kernel_init+0x11/0x1e0
+   ret_from_fork+0x22/0x30
+
+Fix it by checking parent_kobj before the code accesses its member.
+
+Signed-off-by: Eiichi Tsukata <devel@etsukata.com>
+Reviewed-by: Darrick J. Wong <darrick.wong@oracle.com>
+[darrick: minor whitespace edits]
+Signed-off-by: Darrick J. Wong <darrick.wong@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/xfs/xfs_sysfs.h | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/xfs/xfs_sysfs.h b/fs/xfs/xfs_sysfs.h
+index e9f810fc67317..43585850f1546 100644
+--- a/fs/xfs/xfs_sysfs.h
++++ b/fs/xfs/xfs_sysfs.h
+@@ -32,9 +32,11 @@ xfs_sysfs_init(
+       struct xfs_kobj         *parent_kobj,
+       const char              *name)
+ {
++      struct kobject          *parent;
++
++      parent = parent_kobj ? &parent_kobj->kobject : NULL;
+       init_completion(&kobj->complete);
+-      return kobject_init_and_add(&kobj->kobject, ktype,
+-                                  &parent_kobj->kobject, "%s", name);
++      return kobject_init_and_add(&kobj->kobject, ktype, parent, "%s", name);
+ }
+ static inline void
+-- 
+2.25.1
+