]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.6-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 May 2020 14:06:10 +0000 (16:06 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 25 May 2020 14:06:10 +0000 (16:06 +0200)
added patches:
device-dax-don-t-leak-kernel-memory-to-user-space-after-unloading-kmem.patch
ipack-tpci200-fix-error-return-code-in-tpci200_register.patch
kasan-disable-branch-tracing-for-core-runtime.patch
rapidio-fix-an-error-in-get_user_pages_fast-error-handling.patch
s390-kaslr-add-support-for-r_390_jmp_slot-relocation-type.patch
s390-pci-fix-s390_mmio_read-write-with-mio.patch
sh-include-linux-time_types.h-for-sockios.patch
sparc32-fix-page-table-traversal-in-srmmu_nocache_init.patch
sparc32-use-pud-rather-than-pgd-to-get-pmd-in-srmmu_nocache_init.patch
z3fold-fix-use-after-free-when-freeing-handles.patch

queue-5.6/device-dax-don-t-leak-kernel-memory-to-user-space-after-unloading-kmem.patch [new file with mode: 0644]
queue-5.6/ipack-tpci200-fix-error-return-code-in-tpci200_register.patch [new file with mode: 0644]
queue-5.6/kasan-disable-branch-tracing-for-core-runtime.patch [new file with mode: 0644]
queue-5.6/rapidio-fix-an-error-in-get_user_pages_fast-error-handling.patch [new file with mode: 0644]
queue-5.6/s390-kaslr-add-support-for-r_390_jmp_slot-relocation-type.patch [new file with mode: 0644]
queue-5.6/s390-pci-fix-s390_mmio_read-write-with-mio.patch [new file with mode: 0644]
queue-5.6/series
queue-5.6/sh-include-linux-time_types.h-for-sockios.patch [new file with mode: 0644]
queue-5.6/sparc32-fix-page-table-traversal-in-srmmu_nocache_init.patch [new file with mode: 0644]
queue-5.6/sparc32-use-pud-rather-than-pgd-to-get-pmd-in-srmmu_nocache_init.patch [new file with mode: 0644]
queue-5.6/z3fold-fix-use-after-free-when-freeing-handles.patch [new file with mode: 0644]

diff --git a/queue-5.6/device-dax-don-t-leak-kernel-memory-to-user-space-after-unloading-kmem.patch b/queue-5.6/device-dax-don-t-leak-kernel-memory-to-user-space-after-unloading-kmem.patch
new file mode 100644 (file)
index 0000000..f1bfa64
--- /dev/null
@@ -0,0 +1,129 @@
+From 60858c00e5f018eda711a3aa84cf62214ef62d61 Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Fri, 22 May 2020 22:22:42 -0700
+Subject: device-dax: don't leak kernel memory to user space after unloading kmem
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 60858c00e5f018eda711a3aa84cf62214ef62d61 upstream.
+
+Assume we have kmem configured and loaded:
+
+  [root@localhost ~]# cat /proc/iomem
+  ...
+  140000000-33fffffff : Persistent Memory$
+    140000000-1481fffff : namespace0.0
+    150000000-33fffffff : dax0.0
+      150000000-33fffffff : System RAM
+
+Assume we try to unload kmem. This force-unloading will work, even if
+memory cannot get removed from the system.
+
+  [root@localhost ~]# rmmod kmem
+  [   86.380228] removing memory fails, because memory [0x0000000150000000-0x0000000157ffffff] is onlined
+  ...
+  [   86.431225] kmem dax0.0: DAX region [mem 0x150000000-0x33fffffff] cannot be hotremoved until the next reboot
+
+Now, we can reconfigure the namespace:
+
+  [root@localhost ~]# ndctl create-namespace --force --reconfig=namespace0.0 --mode=devdax
+  [  131.409351] nd_pmem namespace0.0: could not reserve region [mem 0x140000000-0x33fffffff]dax
+  [  131.410147] nd_pmem: probe of namespace0.0 failed with error -16namespace0.0 --mode=devdax
+  ...
+
+This fails as expected due to the busy memory resource, and the memory
+cannot be used.  However, the dax0.0 device is removed, and along its
+name.
+
+The name of the memory resource now points at freed memory (name of the
+device):
+
+  [root@localhost ~]# cat /proc/iomem
+  ...
+  140000000-33fffffff : Persistent Memory
+    140000000-1481fffff : namespace0.0
+    150000000-33fffffff : �_�^7_��/_��wR��WQ���^��� ...
+    150000000-33fffffff : System RAM
+
+We have to make sure to duplicate the string.  While at it, remove the
+superfluous setting of the name and fixup a stale comment.
+
+Fixes: 9f960da72b25 ("device-dax: "Hotremove" persistent memory that is used like normal RAM")
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Dan Williams <dan.j.williams@intel.com>
+Cc: Vishal Verma <vishal.l.verma@intel.com>
+Cc: Dave Jiang <dave.jiang@intel.com>
+Cc: Pavel Tatashin <pasha.tatashin@soleen.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: <stable@vger.kernel.org>   [5.3]
+Link: http://lkml.kernel.org/r/20200508084217.9160-2-david@redhat.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dax/kmem.c |   14 +++++++++++---
+ 1 file changed, 11 insertions(+), 3 deletions(-)
+
+--- a/drivers/dax/kmem.c
++++ b/drivers/dax/kmem.c
+@@ -22,6 +22,7 @@ int dev_dax_kmem_probe(struct device *de
+       resource_size_t kmem_size;
+       resource_size_t kmem_end;
+       struct resource *new_res;
++      const char *new_res_name;
+       int numa_node;
+       int rc;
+@@ -48,11 +49,16 @@ int dev_dax_kmem_probe(struct device *de
+       kmem_size &= ~(memory_block_size_bytes() - 1);
+       kmem_end = kmem_start + kmem_size;
+-      /* Region is permanently reserved.  Hot-remove not yet implemented. */
+-      new_res = request_mem_region(kmem_start, kmem_size, dev_name(dev));
++      new_res_name = kstrdup(dev_name(dev), GFP_KERNEL);
++      if (!new_res_name)
++              return -ENOMEM;
++
++      /* Region is permanently reserved if hotremove fails. */
++      new_res = request_mem_region(kmem_start, kmem_size, new_res_name);
+       if (!new_res) {
+               dev_warn(dev, "could not reserve region [%pa-%pa]\n",
+                        &kmem_start, &kmem_end);
++              kfree(new_res_name);
+               return -EBUSY;
+       }
+@@ -63,12 +69,12 @@ int dev_dax_kmem_probe(struct device *de
+        * unknown to us that will break add_memory() below.
+        */
+       new_res->flags = IORESOURCE_SYSTEM_RAM;
+-      new_res->name = dev_name(dev);
+       rc = add_memory(numa_node, new_res->start, resource_size(new_res));
+       if (rc) {
+               release_resource(new_res);
+               kfree(new_res);
++              kfree(new_res_name);
+               return rc;
+       }
+       dev_dax->dax_kmem_res = new_res;
+@@ -83,6 +89,7 @@ static int dev_dax_kmem_remove(struct de
+       struct resource *res = dev_dax->dax_kmem_res;
+       resource_size_t kmem_start = res->start;
+       resource_size_t kmem_size = resource_size(res);
++      const char *res_name = res->name;
+       int rc;
+       /*
+@@ -102,6 +109,7 @@ static int dev_dax_kmem_remove(struct de
+       /* Release and free dax resources */
+       release_resource(res);
+       kfree(res);
++      kfree(res_name);
+       dev_dax->dax_kmem_res = NULL;
+       return 0;
diff --git a/queue-5.6/ipack-tpci200-fix-error-return-code-in-tpci200_register.patch b/queue-5.6/ipack-tpci200-fix-error-return-code-in-tpci200_register.patch
new file mode 100644 (file)
index 0000000..683863f
--- /dev/null
@@ -0,0 +1,34 @@
+From 133317479f0324f6faaf797c4f5f3e9b1b36ce35 Mon Sep 17 00:00:00 2001
+From: Wei Yongjun <weiyongjun1@huawei.com>
+Date: Thu, 7 May 2020 09:42:37 +0000
+Subject: ipack: tpci200: fix error return code in tpci200_register()
+
+From: Wei Yongjun <weiyongjun1@huawei.com>
+
+commit 133317479f0324f6faaf797c4f5f3e9b1b36ce35 upstream.
+
+Fix to return negative error code -ENOMEM from the ioremap() error handling
+case instead of 0, as done elsewhere in this function.
+
+Fixes: 43986798fd50 ("ipack: add error handling for ioremap_nocache")
+Reported-by: Hulk Robot <hulkci@huawei.com>
+Signed-off-by: Wei Yongjun <weiyongjun1@huawei.com>
+Cc: stable <stable@vger.kernel.org>
+Acked-by: Samuel Iglesias Gonsalvez <siglesias@igalia.com>
+Link: https://lore.kernel.org/r/20200507094237.13599-1-weiyongjun1@huawei.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/ipack/carriers/tpci200.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/ipack/carriers/tpci200.c
++++ b/drivers/ipack/carriers/tpci200.c
+@@ -306,6 +306,7 @@ static int tpci200_register(struct tpci2
+                       "(bn 0x%X, sn 0x%X) failed to map driver user space!",
+                       tpci200->info->pdev->bus->number,
+                       tpci200->info->pdev->devfn);
++              res = -ENOMEM;
+               goto out_release_mem8_space;
+       }
diff --git a/queue-5.6/kasan-disable-branch-tracing-for-core-runtime.patch b/queue-5.6/kasan-disable-branch-tracing-for-core-runtime.patch
new file mode 100644 (file)
index 0000000..bcc30ba
--- /dev/null
@@ -0,0 +1,80 @@
+From 33cd65e73abd693c00c4156cf23677c453b41b3b Mon Sep 17 00:00:00 2001
+From: Marco Elver <elver@google.com>
+Date: Fri, 22 May 2020 22:22:59 -0700
+Subject: kasan: disable branch tracing for core runtime
+
+From: Marco Elver <elver@google.com>
+
+commit 33cd65e73abd693c00c4156cf23677c453b41b3b upstream.
+
+During early boot, while KASAN is not yet initialized, it is possible to
+enter reporting code-path and end up in kasan_report().
+
+While uninitialized, the branch there prevents generating any reports,
+however, under certain circumstances when branches are being traced
+(TRACE_BRANCH_PROFILING), we may recurse deep enough to cause kernel
+reboots without warning.
+
+To prevent similar issues in future, we should disable branch tracing
+for the core runtime.
+
+[elver@google.com: remove duplicate DISABLE_BRANCH_PROFILING, per Qian Cai]
+  Link: https://lore.kernel.org/lkml/20200517011732.GE24705@shao2-debian/
+  Link: http://lkml.kernel.org/r/20200522075207.157349-1-elver@google.com
+Reported-by: kernel test robot <rong.a.chen@intel.com>
+Signed-off-by: Marco Elver <elver@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrey Konovalov <andreyknvl@google.com>
+Cc: Dmitry Vyukov <dvyukov@google.com>
+Cc: Alexander Potapenko <glider@google.com>
+Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
+Cc: Qian Cai <cai@lca.pw>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r//20200517011732.GE24705@shao2-debian/
+Link: http://lkml.kernel.org/r/20200519182459.87166-1-elver@google.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/kasan/Makefile  |    8 ++++----
+ mm/kasan/generic.c |    1 -
+ mm/kasan/tags.c    |    1 -
+ 3 files changed, 4 insertions(+), 6 deletions(-)
+
+--- a/mm/kasan/Makefile
++++ b/mm/kasan/Makefile
+@@ -14,10 +14,10 @@ CFLAGS_REMOVE_tags.o = $(CC_FLAGS_FTRACE
+ # Function splitter causes unnecessary splits in __asan_load1/__asan_store1
+ # see: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=63533
+-CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+-CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+-CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
+-CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector)
++CFLAGS_common.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
++CFLAGS_generic.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
++CFLAGS_generic_report.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
++CFLAGS_tags.o := $(call cc-option, -fno-conserve-stack -fno-stack-protector) -DDISABLE_BRANCH_PROFILING
+ obj-$(CONFIG_KASAN) := common.o init.o report.o
+ obj-$(CONFIG_KASAN_GENERIC) += generic.o generic_report.o quarantine.o
+--- a/mm/kasan/generic.c
++++ b/mm/kasan/generic.c
+@@ -15,7 +15,6 @@
+  */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-#define DISABLE_BRANCH_PROFILING
+ #include <linux/export.h>
+ #include <linux/interrupt.h>
+--- a/mm/kasan/tags.c
++++ b/mm/kasan/tags.c
+@@ -12,7 +12,6 @@
+  */
+ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+-#define DISABLE_BRANCH_PROFILING
+ #include <linux/export.h>
+ #include <linux/interrupt.h>
diff --git a/queue-5.6/rapidio-fix-an-error-in-get_user_pages_fast-error-handling.patch b/queue-5.6/rapidio-fix-an-error-in-get_user_pages_fast-error-handling.patch
new file mode 100644 (file)
index 0000000..0b1f437
--- /dev/null
@@ -0,0 +1,48 @@
+From ffca476a0a8d26de767cc41d62b8ca7f540ecfdd Mon Sep 17 00:00:00 2001
+From: John Hubbard <jhubbard@nvidia.com>
+Date: Fri, 22 May 2020 22:22:48 -0700
+Subject: rapidio: fix an error in get_user_pages_fast() error handling
+
+From: John Hubbard <jhubbard@nvidia.com>
+
+commit ffca476a0a8d26de767cc41d62b8ca7f540ecfdd upstream.
+
+In the case of get_user_pages_fast() returning fewer pages than
+requested, rio_dma_transfer() does not quite do the right thing.  It
+attempts to release all the pages that were requested, rather than just
+the pages that were pinned.
+
+Fix the error handling so that only the pages that were successfully
+pinned are released.
+
+Fixes: e8de370188d0 ("rapidio: add mport char device driver")
+Signed-off-by: John Hubbard <jhubbard@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Matt Porter <mporter@kernel.crashing.org>
+Cc: Alexandre Bounine <alex.bou9@gmail.com>
+Cc: Sumit Semwal <sumit.semwal@linaro.org>
+Cc: Dan Carpenter <dan.carpenter@oracle.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200517235620.205225-2-jhubbard@nvidia.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/rapidio/devices/rio_mport_cdev.c |    5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/rapidio/devices/rio_mport_cdev.c
++++ b/drivers/rapidio/devices/rio_mport_cdev.c
+@@ -877,6 +877,11 @@ rio_dma_transfer(struct file *filp, u32
+                               rmcd_error("pinned %ld out of %ld pages",
+                                          pinned, nr_pages);
+                       ret = -EFAULT;
++                      /*
++                       * Set nr_pages up to mean "how many pages to unpin, in
++                       * the error handler:
++                       */
++                      nr_pages = pinned;
+                       goto err_pg;
+               }
diff --git a/queue-5.6/s390-kaslr-add-support-for-r_390_jmp_slot-relocation-type.patch b/queue-5.6/s390-kaslr-add-support-for-r_390_jmp_slot-relocation-type.patch
new file mode 100644 (file)
index 0000000..cc1c96b
--- /dev/null
@@ -0,0 +1,41 @@
+From 4c1cbcbd6c56c79de2c07159be4f55386bb0bef2 Mon Sep 17 00:00:00 2001
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Date: Wed, 6 May 2020 13:45:52 +0200
+Subject: s390/kaslr: add support for R_390_JMP_SLOT relocation type
+
+From: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+
+commit 4c1cbcbd6c56c79de2c07159be4f55386bb0bef2 upstream.
+
+With certain kernel configurations, the R_390_JMP_SLOT relocation type
+might be generated, which is not expected by the KASLR relocation code,
+and the kernel stops with the message "Unknown relocation type".
+
+This was found with a zfcpdump kernel config, where CONFIG_MODULES=n
+and CONFIG_VFIO=n. In that case, symbol_get() is used on undefined
+__weak symbols in virt/kvm/vfio.c, which results in the generation
+of R_390_JMP_SLOT relocation types.
+
+Fix this by handling R_390_JMP_SLOT similar to R_390_GLOB_DAT.
+
+Fixes: 805bc0bc238f ("s390/kernel: build a relocatable kernel")
+Cc: <stable@vger.kernel.org> # v5.2+
+Signed-off-by: Gerald Schaefer <gerald.schaefer@de.ibm.com>
+Reviewed-by: Philipp Rudo <prudo@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/machine_kexec_reloc.c |    1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/s390/kernel/machine_kexec_reloc.c
++++ b/arch/s390/kernel/machine_kexec_reloc.c
+@@ -28,6 +28,7 @@ int arch_kexec_do_relocs(int r_type, voi
+               break;
+       case R_390_64:          /* Direct 64 bit.  */
+       case R_390_GLOB_DAT:
++      case R_390_JMP_SLOT:
+               *(u64 *)loc = val;
+               break;
+       case R_390_PC16:        /* PC relative 16 bit.  */
diff --git a/queue-5.6/s390-pci-fix-s390_mmio_read-write-with-mio.patch b/queue-5.6/s390-pci-fix-s390_mmio_read-write-with-mio.patch
new file mode 100644 (file)
index 0000000..ecdd1ab
--- /dev/null
@@ -0,0 +1,335 @@
+From f058599e22d59e594e5aae1dc10560568d8f4a8b Mon Sep 17 00:00:00 2001
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+Date: Thu, 26 Mar 2020 12:22:50 +0100
+Subject: s390/pci: Fix s390_mmio_read/write with MIO
+
+From: Niklas Schnelle <schnelle@linux.ibm.com>
+
+commit f058599e22d59e594e5aae1dc10560568d8f4a8b upstream.
+
+The s390_mmio_read/write syscalls are currently broken when running with
+MIO.
+
+The new pcistb_mio/pcstg_mio/pcilg_mio instructions are executed
+similiarly to normal load/store instructions and do address translation
+in the current address space. That means inside the kernel they are
+aware of mappings into kernel address space while outside the kernel
+they use user space mappings (usually created through mmap'ing a PCI
+device file).
+
+Now when existing user space applications use the s390_pci_mmio_write
+and s390_pci_mmio_read syscalls, they pass I/O addresses that are mapped
+into user space so as to be usable with the new instructions without
+needing a syscall. Accessing these addresses with the old instructions
+as done currently leads to a kernel panic.
+
+Also, for such a user space mapping there may not exist an equivalent
+kernel space mapping which means we can't just use the new instructions
+in kernel space.
+
+Instead of replicating user mappings in the kernel which then might
+collide with other mappings, we can conceptually execute the new
+instructions as if executed by the user space application using the
+secondary address space. This even allows us to directly store to the
+user pointer without the need for copy_to/from_user().
+
+Cc: stable@vger.kernel.org
+Fixes: 71ba41c9b1d9 ("s390/pci: provide support for MIO instructions")
+Signed-off-by: Niklas Schnelle <schnelle@linux.ibm.com>
+Reviewed-by: Sven Schnelle <svens@linux.ibm.com>
+Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/include/asm/pci_io.h |   10 +
+ arch/s390/pci/pci_mmio.c       |  213 ++++++++++++++++++++++++++++++++++++++++-
+ 2 files changed, 219 insertions(+), 4 deletions(-)
+
+--- a/arch/s390/include/asm/pci_io.h
++++ b/arch/s390/include/asm/pci_io.h
+@@ -8,6 +8,10 @@
+ #include <linux/slab.h>
+ #include <asm/pci_insn.h>
++/* I/O size constraints */
++#define ZPCI_MAX_READ_SIZE    8
++#define ZPCI_MAX_WRITE_SIZE   128
++
+ /* I/O Map */
+ #define ZPCI_IOMAP_SHIFT              48
+ #define ZPCI_IOMAP_ADDR_BASE          0x8000000000000000UL
+@@ -140,7 +144,8 @@ static inline int zpci_memcpy_fromio(voi
+       while (n > 0) {
+               size = zpci_get_max_write_size((u64 __force) src,
+-                                             (u64) dst, n, 8);
++                                             (u64) dst, n,
++                                             ZPCI_MAX_READ_SIZE);
+               rc = zpci_read_single(dst, src, size);
+               if (rc)
+                       break;
+@@ -161,7 +166,8 @@ static inline int zpci_memcpy_toio(volat
+       while (n > 0) {
+               size = zpci_get_max_write_size((u64 __force) dst,
+-                                             (u64) src, n, 128);
++                                             (u64) src, n,
++                                             ZPCI_MAX_WRITE_SIZE);
+               if (size > 8) /* main path */
+                       rc = zpci_write_block(dst, src, size);
+               else
+--- a/arch/s390/pci/pci_mmio.c
++++ b/arch/s390/pci/pci_mmio.c
+@@ -11,6 +11,113 @@
+ #include <linux/mm.h>
+ #include <linux/errno.h>
+ #include <linux/pci.h>
++#include <asm/pci_io.h>
++#include <asm/pci_debug.h>
++
++static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
++{
++      struct {
++              u64 offset;
++              u8 cc;
++              u8 status;
++      } data = {offset, cc, status};
++
++      zpci_err_hex(&data, sizeof(data));
++}
++
++static inline int __pcistb_mio_inuser(
++              void __iomem *ioaddr, const void __user *src,
++              u64 len, u8 *status)
++{
++      int cc = -ENXIO;
++
++      asm volatile (
++              "       sacf 256\n"
++              "0:     .insn   rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
++              "1:     ipm     %[cc]\n"
++              "       srl     %[cc],28\n"
++              "2:     sacf 768\n"
++              EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
++              : [cc] "+d" (cc), [len] "+d" (len)
++              : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
++              : "cc", "memory");
++      *status = len >> 24 & 0xff;
++      return cc;
++}
++
++static inline int __pcistg_mio_inuser(
++              void __iomem *ioaddr, const void __user *src,
++              u64 ulen, u8 *status)
++{
++      register u64 addr asm("2") = (u64 __force) ioaddr;
++      register u64 len asm("3") = ulen;
++      int cc = -ENXIO;
++      u64 val = 0;
++      u64 cnt = ulen;
++      u8 tmp;
++
++      /*
++       * copy 0 < @len <= 8 bytes from @src into the right most bytes of
++       * a register, then store it to PCI at @ioaddr while in secondary
++       * address space. pcistg then uses the user mappings.
++       */
++      asm volatile (
++              "       sacf    256\n"
++              "0:     llgc    %[tmp],0(%[src])\n"
++              "       sllg    %[val],%[val],8\n"
++              "       aghi    %[src],1\n"
++              "       ogr     %[val],%[tmp]\n"
++              "       brctg   %[cnt],0b\n"
++              "1:     .insn   rre,0xb9d40000,%[val],%[ioaddr]\n"
++              "2:     ipm     %[cc]\n"
++              "       srl     %[cc],28\n"
++              "3:     sacf    768\n"
++              EX_TABLE(0b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
++              :
++              [src] "+a" (src), [cnt] "+d" (cnt),
++              [val] "+d" (val), [tmp] "=d" (tmp),
++              [len] "+d" (len), [cc] "+d" (cc),
++              [ioaddr] "+a" (addr)
++              :: "cc", "memory");
++      *status = len >> 24 & 0xff;
++
++      /* did we read everything from user memory? */
++      if (!cc && cnt != 0)
++              cc = -EFAULT;
++
++      return cc;
++}
++
++static inline int __memcpy_toio_inuser(void __iomem *dst,
++                                 const void __user *src, size_t n)
++{
++      int size, rc = 0;
++      u8 status = 0;
++      mm_segment_t old_fs;
++
++      if (!src)
++              return -EINVAL;
++
++      old_fs = enable_sacf_uaccess();
++      while (n > 0) {
++              size = zpci_get_max_write_size((u64 __force) dst,
++                                             (u64 __force) src, n,
++                                             ZPCI_MAX_WRITE_SIZE);
++              if (size > 8) /* main path */
++                      rc = __pcistb_mio_inuser(dst, src, size, &status);
++              else
++                      rc = __pcistg_mio_inuser(dst, src, size, &status);
++              if (rc)
++                      break;
++              src += size;
++              dst += size;
++              n -= size;
++      }
++      disable_sacf_uaccess(old_fs);
++      if (rc)
++              zpci_err_mmio(rc, status, (__force u64) dst);
++      return rc;
++}
+ static long get_pfn(unsigned long user_addr, unsigned long access,
+                   unsigned long *pfn)
+@@ -46,6 +153,20 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, uns
+       if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
+               return -EINVAL;
++
++      /*
++       * Only support read access to MIO capable devices on a MIO enabled
++       * system. Otherwise we would have to check for every address if it is
++       * a special ZPCI_ADDR and we would have to do a get_pfn() which we
++       * don't need for MIO capable devices.
++       */
++      if (static_branch_likely(&have_mio)) {
++              ret = __memcpy_toio_inuser((void  __iomem *) mmio_addr,
++                                      user_buffer,
++                                      length);
++              return ret;
++      }
++
+       if (length > 64) {
+               buf = kmalloc(length, GFP_KERNEL);
+               if (!buf)
+@@ -56,7 +177,8 @@ SYSCALL_DEFINE3(s390_pci_mmio_write, uns
+       ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
+       if (ret)
+               goto out;
+-      io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
++      io_addr = (void __iomem *)((pfn << PAGE_SHIFT) |
++                      (mmio_addr & ~PAGE_MASK));
+       ret = -EFAULT;
+       if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+@@ -72,6 +194,78 @@ out:
+       return ret;
+ }
++static inline int __pcilg_mio_inuser(
++              void __user *dst, const void __iomem *ioaddr,
++              u64 ulen, u8 *status)
++{
++      register u64 addr asm("2") = (u64 __force) ioaddr;
++      register u64 len asm("3") = ulen;
++      u64 cnt = ulen;
++      int shift = ulen * 8;
++      int cc = -ENXIO;
++      u64 val, tmp;
++
++      /*
++       * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
++       * user space) into a register using pcilg then store these bytes at
++       * user address @dst
++       */
++      asm volatile (
++              "       sacf    256\n"
++              "0:     .insn   rre,0xb9d60000,%[val],%[ioaddr]\n"
++              "1:     ipm     %[cc]\n"
++              "       srl     %[cc],28\n"
++              "       ltr     %[cc],%[cc]\n"
++              "       jne     4f\n"
++              "2:     ahi     %[shift],-8\n"
++              "       srlg    %[tmp],%[val],0(%[shift])\n"
++              "3:     stc     %[tmp],0(%[dst])\n"
++              "       aghi    %[dst],1\n"
++              "       brctg   %[cnt],2b\n"
++              "4:     sacf    768\n"
++              EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b)
++              :
++              [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
++              [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
++              [shift] "+d" (shift)
++              :
++              [ioaddr] "a" (addr)
++              : "cc", "memory");
++
++      /* did we write everything to the user space buffer? */
++      if (!cc && cnt != 0)
++              cc = -EFAULT;
++
++      *status = len >> 24 & 0xff;
++      return cc;
++}
++
++static inline int __memcpy_fromio_inuser(void __user *dst,
++                                   const void __iomem *src,
++                                   unsigned long n)
++{
++      int size, rc = 0;
++      u8 status;
++      mm_segment_t old_fs;
++
++      old_fs = enable_sacf_uaccess();
++      while (n > 0) {
++              size = zpci_get_max_write_size((u64 __force) src,
++                                             (u64 __force) dst, n,
++                                             ZPCI_MAX_READ_SIZE);
++              rc = __pcilg_mio_inuser(dst, src, size, &status);
++              if (rc)
++                      break;
++              src += size;
++              dst += size;
++              n -= size;
++      }
++      disable_sacf_uaccess(old_fs);
++      if (rc)
++              zpci_err_mmio(rc, status, (__force u64) dst);
++      return rc;
++}
++
+ SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
+               void __user *, user_buffer, size_t, length)
+ {
+@@ -86,12 +280,27 @@ SYSCALL_DEFINE3(s390_pci_mmio_read, unsi
+       if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
+               return -EINVAL;
++
++      /*
++       * Only support write access to MIO capable devices on a MIO enabled
++       * system. Otherwise we would have to check for every address if it is
++       * a special ZPCI_ADDR and we would have to do a get_pfn() which we
++       * don't need for MIO capable devices.
++       */
++      if (static_branch_likely(&have_mio)) {
++              ret = __memcpy_fromio_inuser(
++                              user_buffer, (const void __iomem *)mmio_addr,
++                              length);
++              return ret;
++      }
++
+       if (length > 64) {
+               buf = kmalloc(length, GFP_KERNEL);
+               if (!buf)
+                       return -ENOMEM;
+-      } else
++      } else {
+               buf = local_buf;
++      }
+       ret = get_pfn(mmio_addr, VM_READ, &pfn);
+       if (ret)
index a61307fa555d30526690da666a59d33e26755ab9..46d01bca702394a89e485cd1b4984170dbb156c7 100644 (file)
@@ -102,3 +102,13 @@ driver-core-fix-handling-of-sync_state_only-stateless-device-links.patch
 misc-rtsx-add-short-delay-after-exit-from-aspm.patch
 tty-serial-add-missing-spin_lock_init-for-sifive-serial-console.patch
 mei-release-me_cl-object-reference.patch
+ipack-tpci200-fix-error-return-code-in-tpci200_register.patch
+s390-pci-fix-s390_mmio_read-write-with-mio.patch
+s390-kaslr-add-support-for-r_390_jmp_slot-relocation-type.patch
+device-dax-don-t-leak-kernel-memory-to-user-space-after-unloading-kmem.patch
+rapidio-fix-an-error-in-get_user_pages_fast-error-handling.patch
+kasan-disable-branch-tracing-for-core-runtime.patch
+sh-include-linux-time_types.h-for-sockios.patch
+sparc32-use-pud-rather-than-pgd-to-get-pmd-in-srmmu_nocache_init.patch
+sparc32-fix-page-table-traversal-in-srmmu_nocache_init.patch
+z3fold-fix-use-after-free-when-freeing-handles.patch
diff --git a/queue-5.6/sh-include-linux-time_types.h-for-sockios.patch b/queue-5.6/sh-include-linux-time_types.h-for-sockios.patch
new file mode 100644 (file)
index 0000000..9d9a08c
--- /dev/null
@@ -0,0 +1,46 @@
+From fc94cf2092c7c1267fa2deb8388d624f50eba808 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 22 May 2020 22:23:02 -0700
+Subject: sh: include linux/time_types.h for sockios
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit fc94cf2092c7c1267fa2deb8388d624f50eba808 upstream.
+
+Using the socket ioctls on arch/sh (and only there) causes build time
+problems when __kernel_old_timeval/__kernel_old_timespec are not already
+visible to the compiler.
+
+Add an explict include line for the header that defines these
+structures.
+
+Fixes: 8c709f9a0693 ("y2038: sh: remove timeval/timespec usage from headers")
+Fixes: 0768e17073dc ("net: socket: implement 64-bit timestamps")
+Reported-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Tested-by: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: Yoshinori Sato <ysato@users.sourceforge.jp>
+Cc: Rich Felker <dalias@libc.org>
+Cc: "David S. Miller" <davem@davemloft.net>
+Cc: John Paul Adrian Glaubitz <glaubitz@physik.fu-berlin.de>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200519131327.1836482-1-arnd@arndb.de
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/sh/include/uapi/asm/sockios.h |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/sh/include/uapi/asm/sockios.h
++++ b/arch/sh/include/uapi/asm/sockios.h
+@@ -2,6 +2,8 @@
+ #ifndef __ASM_SH_SOCKIOS_H
+ #define __ASM_SH_SOCKIOS_H
++#include <linux/time_types.h>
++
+ /* Socket-level I/O control calls. */
+ #define FIOGETOWN     _IOR('f', 123, int)
+ #define FIOSETOWN     _IOW('f', 124, int)
diff --git a/queue-5.6/sparc32-fix-page-table-traversal-in-srmmu_nocache_init.patch b/queue-5.6/sparc32-fix-page-table-traversal-in-srmmu_nocache_init.patch
new file mode 100644 (file)
index 0000000..f2c96e9
--- /dev/null
@@ -0,0 +1,44 @@
+From 0cfc8a8d70dcd51db783e8e87917e02149c71458 Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.ibm.com>
+Date: Sat, 23 May 2020 22:57:18 +0300
+Subject: sparc32: fix page table traversal in srmmu_nocache_init()
+
+From: Mike Rapoport <rppt@linux.ibm.com>
+
+commit 0cfc8a8d70dcd51db783e8e87917e02149c71458 upstream.
+
+The srmmu_nocache_init() uses __nocache_fix() macro to add an offset to
+page table entry to access srmmu_nocache_pool.
+
+But since sparc32 has only three actual page table levels, pgd, p4d and
+pud are essentially the same thing and pgd_offset() and p4d_offset() are
+no-ops, the __nocache_fix() should be done only at PUD level.
+
+Remove __nocache_fix() for p4d_offset() and pud_offset() and keep it
+only for PUD and lower levels.
+
+Fixes: c2bc26f7ca1f ("sparc32: use PUD rather than PGD to get PMD in srmmu_nocache_init()")
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Anatoly Pugachev <matorola@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/sparc/mm/srmmu.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -331,8 +331,8 @@ static void __init srmmu_nocache_init(vo
+       while (vaddr < srmmu_nocache_end) {
+               pgd = pgd_offset_k(vaddr);
+-              p4d = p4d_offset(__nocache_fix(pgd), vaddr);
+-              pud = pud_offset(__nocache_fix(p4d), vaddr);
++              p4d = p4d_offset(pgd, vaddr);
++              pud = pud_offset(p4d, vaddr);
+               pmd = pmd_offset(__nocache_fix(pud), vaddr);
+               pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
diff --git a/queue-5.6/sparc32-use-pud-rather-than-pgd-to-get-pmd-in-srmmu_nocache_init.patch b/queue-5.6/sparc32-use-pud-rather-than-pgd-to-get-pmd-in-srmmu_nocache_init.patch
new file mode 100644 (file)
index 0000000..53e9231
--- /dev/null
@@ -0,0 +1,50 @@
+From c2bc26f7ca1ff1165bb6669a7a4cccc20ffd2ced Mon Sep 17 00:00:00 2001
+From: Mike Rapoport <rppt@linux.ibm.com>
+Date: Fri, 22 May 2020 22:23:09 -0700
+Subject: sparc32: use PUD rather than PGD to get PMD in srmmu_nocache_init()
+
+From: Mike Rapoport <rppt@linux.ibm.com>
+
+commit c2bc26f7ca1ff1165bb6669a7a4cccc20ffd2ced upstream.
+
+The kbuild test robot reported the following warning:
+
+  arch/sparc/mm/srmmu.c: In function 'srmmu_nocache_init': arch/sparc/mm/srmmu.c:300:9: error: variable 'pud' set but not used [-Werror=unused-but-set-variable]
+  300 |  pud_t *pud;
+
+This warning is caused by misprint in the page table traversal in
+srmmu_nocache_init() function which accessed a PMD entry using PGD
+rather than PUD.
+
+Since sparc32 has only 3 page table levels, the PGD and PUD are
+essentially the same and usage of __nocache_fix() removed the type
+checking.
+
+Use PUD for the consistency and to silence the compiler warning.
+
+Fixes: 7235db268a2777bc38 ("sparc32: use pgtable-nopud instead of 4level-fixup")
+Reported-by: kbuild test robot <lkp@intel.com>
+Signed-off-by: Mike Rapoport <rppt@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: David S. Miller <davem@davemloft.net>
+Cc: Anatoly Pugachev <matorola@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200520132005.GM1059226@linux.ibm.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/sparc/mm/srmmu.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -333,7 +333,7 @@ static void __init srmmu_nocache_init(vo
+               pgd = pgd_offset_k(vaddr);
+               p4d = p4d_offset(__nocache_fix(pgd), vaddr);
+               pud = pud_offset(__nocache_fix(p4d), vaddr);
+-              pmd = pmd_offset(__nocache_fix(pgd), vaddr);
++              pmd = pmd_offset(__nocache_fix(pud), vaddr);
+               pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
+               pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
diff --git a/queue-5.6/z3fold-fix-use-after-free-when-freeing-handles.patch b/queue-5.6/z3fold-fix-use-after-free-when-freeing-handles.patch
new file mode 100644 (file)
index 0000000..9dd20cb
--- /dev/null
@@ -0,0 +1,87 @@
+From d8f117abb380ba968b5e3ef2042d901c02872a4c Mon Sep 17 00:00:00 2001
+From: Uladzislau Rezki <uladzislau.rezki@sony.com>
+Date: Fri, 22 May 2020 22:23:12 -0700
+Subject: z3fold: fix use-after-free when freeing handles
+
+From: Uladzislau Rezki <uladzislau.rezki@sony.com>
+
+commit d8f117abb380ba968b5e3ef2042d901c02872a4c upstream.
+
+free_handle() for a foreign handle may race with inter-page compaction,
+what can lead to memory corruption.
+
+To avoid that, take write lock not read lock in free_handle to be
+synchronized with __release_z3fold_page().
+
+For example KASAN can detect it:
+
+  ==================================================================
+  BUG: KASAN: use-after-free in LZ4_decompress_safe+0x2c4/0x3b8
+  Read of size 1 at addr ffffffc976695ca3 by task GoogleApiHandle/4121
+
+  CPU: 0 PID: 4121 Comm: GoogleApiHandle Tainted: P S         OE     4.19.81-perf+ #162
+  Hardware name: Sony Mobile Communications. PDX-203(KONA) (DT)
+  Call trace:
+     LZ4_decompress_safe+0x2c4/0x3b8
+     lz4_decompress_crypto+0x3c/0x70
+     crypto_decompress+0x58/0x70
+     zcomp_decompress+0xd4/0x120
+     ...
+
+Apart from that, initialize zhdr->mapped_count in init_z3fold_page() and
+remove "newpage" variable because it is not used anywhere.
+
+Signed-off-by: Uladzislau Rezki <uladzislau.rezki@sony.com>
+Signed-off-by: Vitaly Wool <vitaly.wool@konsulko.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Cc: Qian Cai <cai@lca.pw>
+Cc: Raymond Jennings <shentino@gmail.com>
+Cc: <stable@vger.kernel.org>
+Link: http://lkml.kernel.org/r/20200520082100.28876-1-vitaly.wool@konsulko.com
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/z3fold.c |   11 ++++++-----
+ 1 file changed, 6 insertions(+), 5 deletions(-)
+
+--- a/mm/z3fold.c
++++ b/mm/z3fold.c
+@@ -318,16 +318,16 @@ static inline void free_handle(unsigned
+       slots = handle_to_slots(handle);
+       write_lock(&slots->lock);
+       *(unsigned long *)handle = 0;
+-      write_unlock(&slots->lock);
+-      if (zhdr->slots == slots)
++      if (zhdr->slots == slots) {
++              write_unlock(&slots->lock);
+               return; /* simple case, nothing else to do */
++      }
+       /* we are freeing a foreign handle if we are here */
+       zhdr->foreign_handles--;
+       is_free = true;
+-      read_lock(&slots->lock);
+       if (!test_bit(HANDLES_ORPHANED, &slots->pool)) {
+-              read_unlock(&slots->lock);
++              write_unlock(&slots->lock);
+               return;
+       }
+       for (i = 0; i <= BUDDY_MASK; i++) {
+@@ -336,7 +336,7 @@ static inline void free_handle(unsigned
+                       break;
+               }
+       }
+-      read_unlock(&slots->lock);
++      write_unlock(&slots->lock);
+       if (is_free) {
+               struct z3fold_pool *pool = slots_to_pool(slots);
+@@ -422,6 +422,7 @@ static struct z3fold_header *init_z3fold
+       zhdr->start_middle = 0;
+       zhdr->cpu = -1;
+       zhdr->foreign_handles = 0;
++      zhdr->mapped_count = 0;
+       zhdr->slots = slots;
+       zhdr->pool = pool;
+       INIT_LIST_HEAD(&zhdr->buddy);