]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2020 09:34:56 +0000 (10:34 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 7 Jan 2020 09:34:56 +0000 (10:34 +0100)
added patches:
arm64-revert-support-for-execute-only-user-mappings.patch
ftrace-avoid-potential-division-by-zero-in-function-profiler.patch
spi-spi-fsl-dspi-fix-16-bit-word-order-in-32-bit-xspi-mode.patch

queue-5.4/arm64-revert-support-for-execute-only-user-mappings.patch [new file with mode: 0644]
queue-5.4/ftrace-avoid-potential-division-by-zero-in-function-profiler.patch [new file with mode: 0644]
queue-5.4/series
queue-5.4/spi-spi-fsl-dspi-fix-16-bit-word-order-in-32-bit-xspi-mode.patch [new file with mode: 0644]

diff --git a/queue-5.4/arm64-revert-support-for-execute-only-user-mappings.patch b/queue-5.4/arm64-revert-support-for-execute-only-user-mappings.patch
new file mode 100644 (file)
index 0000000..ecaf7f5
--- /dev/null
@@ -0,0 +1,115 @@
+From 24cecc37746393432d994c0dbc251fb9ac7c5d72 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Mon, 6 Jan 2020 14:35:39 +0000
+Subject: arm64: Revert support for execute-only user mappings
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 24cecc37746393432d994c0dbc251fb9ac7c5d72 upstream.
+
+The ARMv8 64-bit architecture supports execute-only user permissions by
+clearing the PTE_USER and PTE_UXN bits, practically making it a mostly
+privileged mapping but from which user running at EL0 can still execute.
+
+The downside, however, is that the kernel at EL1 inadvertently reading
+such mapping would not trip over the PAN (privileged access never)
+protection.
+
+Revert the relevant bits from commit cab15ce604e5 ("arm64: Introduce
+execute-only page access permissions") so that PROT_EXEC implies
+PROT_READ (and therefore PTE_USER) until the architecture gains proper
+support for execute-only user mappings.
+
+Fixes: cab15ce604e5 ("arm64: Introduce execute-only page access permissions")
+Cc: <stable@vger.kernel.org> # 4.9.x-
+Acked-by: Will Deacon <will@kernel.org>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/pgtable-prot.h |    5 ++---
+ arch/arm64/include/asm/pgtable.h      |   10 +++-------
+ arch/arm64/mm/fault.c                 |    2 +-
+ mm/mmap.c                             |    6 ------
+ 4 files changed, 6 insertions(+), 17 deletions(-)
+
+--- a/arch/arm64/include/asm/pgtable-prot.h
++++ b/arch/arm64/include/asm/pgtable-prot.h
+@@ -85,13 +85,12 @@
+ #define PAGE_SHARED_EXEC      __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
+ #define PAGE_READONLY         __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
+ #define PAGE_READONLY_EXEC    __pgprot(_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
+-#define PAGE_EXECONLY         __pgprot(_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
+ #define __P000  PAGE_NONE
+ #define __P001  PAGE_READONLY
+ #define __P010  PAGE_READONLY
+ #define __P011  PAGE_READONLY
+-#define __P100  PAGE_EXECONLY
++#define __P100  PAGE_READONLY_EXEC
+ #define __P101  PAGE_READONLY_EXEC
+ #define __P110  PAGE_READONLY_EXEC
+ #define __P111  PAGE_READONLY_EXEC
+@@ -100,7 +99,7 @@
+ #define __S001  PAGE_READONLY
+ #define __S010  PAGE_SHARED
+ #define __S011  PAGE_SHARED
+-#define __S100  PAGE_EXECONLY
++#define __S100  PAGE_READONLY_EXEC
+ #define __S101  PAGE_READONLY_EXEC
+ #define __S110  PAGE_SHARED_EXEC
+ #define __S111  PAGE_SHARED_EXEC
+--- a/arch/arm64/include/asm/pgtable.h
++++ b/arch/arm64/include/asm/pgtable.h
+@@ -96,12 +96,8 @@ extern unsigned long empty_zero_page[PAG
+ #define pte_dirty(pte)                (pte_sw_dirty(pte) || pte_hw_dirty(pte))
+ #define pte_valid(pte)                (!!(pte_val(pte) & PTE_VALID))
+-/*
+- * Execute-only user mappings do not have the PTE_USER bit set. All valid
+- * kernel mappings have the PTE_UXN bit set.
+- */
+ #define pte_valid_not_user(pte) \
+-      ((pte_val(pte) & (PTE_VALID | PTE_USER | PTE_UXN)) == (PTE_VALID | PTE_UXN))
++      ((pte_val(pte) & (PTE_VALID | PTE_USER)) == PTE_VALID)
+ #define pte_valid_young(pte) \
+       ((pte_val(pte) & (PTE_VALID | PTE_AF)) == (PTE_VALID | PTE_AF))
+ #define pte_valid_user(pte) \
+@@ -117,8 +113,8 @@ extern unsigned long empty_zero_page[PAG
+ /*
+  * p??_access_permitted() is true for valid user mappings (subject to the
+- * write permission check) other than user execute-only which do not have the
+- * PTE_USER bit set. PROT_NONE mappings do not have the PTE_VALID bit set.
++ * write permission check). PROT_NONE mappings do not have the PTE_VALID bit
++ * set.
+  */
+ #define pte_access_permitted(pte, write) \
+       (pte_valid_user(pte) && (!(write) || pte_write(pte)))
+--- a/arch/arm64/mm/fault.c
++++ b/arch/arm64/mm/fault.c
+@@ -454,7 +454,7 @@ static int __kprobes do_page_fault(unsig
+       const struct fault_info *inf;
+       struct mm_struct *mm = current->mm;
+       vm_fault_t fault, major = 0;
+-      unsigned long vm_flags = VM_READ | VM_WRITE;
++      unsigned long vm_flags = VM_READ | VM_WRITE | VM_EXEC;
+       unsigned int mm_flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE;
+       if (kprobe_page_fault(regs, esr))
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -90,12 +90,6 @@ static void unmap_region(struct mm_struc
+  * MAP_PRIVATE        r: (no) no      r: (yes) yes    r: (no) yes     r: (no) yes
+  *            w: (no) no      w: (no) no      w: (copy) copy  w: (no) no
+  *            x: (no) no      x: (no) yes     x: (no) yes     x: (yes) yes
+- *
+- * On arm64, PROT_EXEC has the following behaviour for both MAP_SHARED and
+- * MAP_PRIVATE:
+- *                                                            r: (no) no
+- *                                                            w: (no) no
+- *                                                            x: (yes) yes
+  */
+ pgprot_t protection_map[16] __ro_after_init = {
+       __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
diff --git a/queue-5.4/ftrace-avoid-potential-division-by-zero-in-function-profiler.patch b/queue-5.4/ftrace-avoid-potential-division-by-zero-in-function-profiler.patch
new file mode 100644 (file)
index 0000000..dd4bd86
--- /dev/null
@@ -0,0 +1,49 @@
+From e31f7939c1c27faa5d0e3f14519eaf7c89e8a69d Mon Sep 17 00:00:00 2001
+From: Wen Yang <wenyang@linux.alibaba.com>
+Date: Fri, 3 Jan 2020 11:02:48 +0800
+Subject: ftrace: Avoid potential division by zero in function profiler
+
+From: Wen Yang <wenyang@linux.alibaba.com>
+
+commit e31f7939c1c27faa5d0e3f14519eaf7c89e8a69d upstream.
+
+The ftrace_profile->counter is unsigned long and
+do_div truncates it to 32 bits, which means it can test
+non-zero and be truncated to zero for division.
+Fix this issue by using div64_ul() instead.
+
+Link: http://lkml.kernel.org/r/20200103030248.14516-1-wenyang@linux.alibaba.com
+
+Cc: stable@vger.kernel.org
+Fixes: e330b3bcd8319 ("tracing: Show sample std dev in function profiling")
+Fixes: 34886c8bc590f ("tracing: add average time in function to function profiler")
+Signed-off-by: Wen Yang <wenyang@linux.alibaba.com>
+Signed-off-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -524,8 +524,7 @@ static int function_stat_show(struct seq
+       }
+ #ifdef CONFIG_FUNCTION_GRAPH_TRACER
+-      avg = rec->time;
+-      do_div(avg, rec->counter);
++      avg = div64_ul(rec->time, rec->counter);
+       if (tracing_thresh && (avg < tracing_thresh))
+               goto out;
+ #endif
+@@ -551,7 +550,8 @@ static int function_stat_show(struct seq
+                * Divide only 1000 for ns^2 -> us^2 conversion.
+                * trace_print_graph_duration will divide 1000 again.
+                */
+-              do_div(stddev, rec->counter * (rec->counter - 1) * 1000);
++              stddev = div64_ul(stddev,
++                                rec->counter * (rec->counter - 1) * 1000);
+       }
+       trace_seq_init(&s);
index 35ec18efe0e109ace98e275b7c04b398c3734541..2d3f96309b3ba6903e8f3382fa7d7de094294a20 100644 (file)
@@ -127,3 +127,6 @@ alsa-pcm-yet-another-missing-check-of-non-cached-buffer-type.patch
 alsa-firewire-motu-correct-a-typo-in-the-clock-proc-string.patch
 scsi-lpfc-fix-rpi-release-when-deleting-vport.patch
 exit-panic-before-exit_mm-on-global-init-exit.patch
+arm64-revert-support-for-execute-only-user-mappings.patch
+ftrace-avoid-potential-division-by-zero-in-function-profiler.patch
+spi-spi-fsl-dspi-fix-16-bit-word-order-in-32-bit-xspi-mode.patch
diff --git a/queue-5.4/spi-spi-fsl-dspi-fix-16-bit-word-order-in-32-bit-xspi-mode.patch b/queue-5.4/spi-spi-fsl-dspi-fix-16-bit-word-order-in-32-bit-xspi-mode.patch
new file mode 100644 (file)
index 0000000..928fae2
--- /dev/null
@@ -0,0 +1,90 @@
+From ca59d5a51690d5b9340343dc36792a252e9414ae Mon Sep 17 00:00:00 2001
+From: Vladimir Oltean <olteanv@gmail.com>
+Date: Sat, 28 Dec 2019 15:55:36 +0200
+Subject: spi: spi-fsl-dspi: Fix 16-bit word order in 32-bit XSPI mode
+
+From: Vladimir Oltean <olteanv@gmail.com>
+
+commit ca59d5a51690d5b9340343dc36792a252e9414ae upstream.
+
+When used in Extended SPI mode on LS1021A, the DSPI controller wants to
+have the least significant 16-bit word written first to the TX FIFO.
+
+In fact, the LS1021A reference manual says:
+
+33.5.2.4.2 Draining the TX FIFO
+
+When Extended SPI Mode (DSPIx_MCR[XSPI]) is enabled, if the frame size
+of SPI Data to be transmitted is more than 16 bits, then it causes two
+Data entries to be popped from TX FIFO simultaneously which are
+transferred to the shift register. The first of the two popped entries
+forms the 16 least significant bits of the SPI frame to be transmitted.
+
+So given the following TX buffer:
+
+ +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
+ | 0x0 | 0x1 | 0x2 | 0x3 | 0x4 | 0x5 | 0x6 | 0x7 | 0x8 | 0x9 | 0xa | 0xb |
+ +-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+-----+
+ |     32-bit word 1     |     32-bit word 2     |     32-bit word 3     |
+ +-----------------------+-----------------------+-----------------------+
+
+The correct way that a little-endian system should transmit it on the
+wire when bits_per_word is 32 is:
+
+0x03020100
+0x07060504
+0x0b0a0908
+
+But it is actually transmitted as following, as seen with a scope:
+
+0x01000302
+0x05040706
+0x09080b0a
+
+It appears that this patch has been submitted at least once before:
+https://lkml.org/lkml/2018/9/21/286
+but in that case Chuanhua Han did not manage to explain the problem
+clearly enough and the patch did not get merged, leaving XSPI mode
+broken.
+
+Fixes: 8fcd151d2619 ("spi: spi-fsl-dspi: XSPI FIFO handling (in TCFQ mode)")
+Cc: Esben Haabendal <eha@deif.com>
+Cc: Chuanhua Han <chuanhua.han@nxp.com>
+Signed-off-by: Vladimir Oltean <olteanv@gmail.com>
+Link: https://lore.kernel.org/r/20191228135536.14284-1-olteanv@gmail.com
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/spi/spi-fsl-dspi.c |   15 ++++-----------
+ 1 file changed, 4 insertions(+), 11 deletions(-)
+
+--- a/drivers/spi/spi-fsl-dspi.c
++++ b/drivers/spi/spi-fsl-dspi.c
+@@ -583,21 +583,14 @@ static void dspi_tcfq_write(struct fsl_d
+       dspi->tx_cmd |= SPI_PUSHR_CMD_CTCNT;
+       if (dspi->devtype_data->xspi_mode && dspi->bits_per_word > 16) {
+-              /* Write two TX FIFO entries first, and then the corresponding
+-               * CMD FIFO entry.
++              /* Write the CMD FIFO entry first, and then the two
++               * corresponding TX FIFO entries.
+                */
+               u32 data = dspi_pop_tx(dspi);
+-              if (dspi->cur_chip->ctar_val & SPI_CTAR_LSBFE) {
+-                      /* LSB */
+-                      tx_fifo_write(dspi, data & 0xFFFF);
+-                      tx_fifo_write(dspi, data >> 16);
+-              } else {
+-                      /* MSB */
+-                      tx_fifo_write(dspi, data >> 16);
+-                      tx_fifo_write(dspi, data & 0xFFFF);
+-              }
+               cmd_fifo_write(dspi);
++              tx_fifo_write(dspi, data & 0xFFFF);
++              tx_fifo_write(dspi, data >> 16);
+       } else {
+               /* Write one entry to both TX FIFO and CMD FIFO
+                * simultaneously.