]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.2-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 6 Nov 2015 05:57:46 +0000 (21:57 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 6 Nov 2015 05:57:46 +0000 (21:57 -0800)
added patches:
arm-8445-1-fix-vdsomunge-not-to-depend-on-glibc-specific-byteswap.h.patch
arm-8449-1-fix-bug-in-vdsomunge-swab32-macro.patch
arm64-kernel-fix-tcr_el1.t0sz-restore-on-systems-with-extended-idmap.patch
revert-arm64-unwind-fix-pc-calculation.patch

queue-4.2/arm-8445-1-fix-vdsomunge-not-to-depend-on-glibc-specific-byteswap.h.patch [new file with mode: 0644]
queue-4.2/arm-8449-1-fix-bug-in-vdsomunge-swab32-macro.patch [new file with mode: 0644]
queue-4.2/arm64-kernel-fix-tcr_el1.t0sz-restore-on-systems-with-extended-idmap.patch [new file with mode: 0644]
queue-4.2/revert-arm64-unwind-fix-pc-calculation.patch [new file with mode: 0644]
queue-4.2/series

diff --git a/queue-4.2/arm-8445-1-fix-vdsomunge-not-to-depend-on-glibc-specific-byteswap.h.patch b/queue-4.2/arm-8445-1-fix-vdsomunge-not-to-depend-on-glibc-specific-byteswap.h.patch
new file mode 100644 (file)
index 0000000..ad11709
--- /dev/null
@@ -0,0 +1,82 @@
+From 8a603f91cc4848ab1a0458bc065aa9f64322e123 Mon Sep 17 00:00:00 2001
+From: "H. Nikolaus Schaller" <hns@goldelico.com>
+Date: Fri, 16 Oct 2015 22:19:06 +0100
+Subject: ARM: 8445/1: fix vdsomunge not to depend on glibc specific byteswap.h
+
+From: "H. Nikolaus Schaller" <hns@goldelico.com>
+
+commit 8a603f91cc4848ab1a0458bc065aa9f64322e123 upstream.
+
+If the host toolchain is not glibc based then the arm kernel build
+fails with
+
+  HOSTCC  arch/arm/vdso/vdsomunge
+  arch/arm/vdso/vdsomunge.c:48:22: fatal error: byteswap.h: No such file or directory
+
+Observed: with omap2plus_defconfig and compile on Mac OS X with arm ELF
+cross-compiler.
+
+Reason: byteswap.h is a glibc only header.
+
+Solution: replace by private byte-swapping macros (taken from
+arch/mips/boot/elf2ecoff.c and kindly improved by Russell King)
+
+Tested to compile on Mac OS X 10.9.5 host.
+
+Signed-off-by: H. Nikolaus Schaller <hns@goldelico.com>
+Signed-off-by: Nathan Lynch <nathan_lynch@mentor.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/vdso/vdsomunge.c |   17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/arch/arm/vdso/vdsomunge.c
++++ b/arch/arm/vdso/vdsomunge.c
+@@ -45,7 +45,6 @@
+  * it does.
+  */
+-#include <byteswap.h>
+ #include <elf.h>
+ #include <errno.h>
+ #include <fcntl.h>
+@@ -59,6 +58,16 @@
+ #include <sys/types.h>
+ #include <unistd.h>
++#define swab16(x) \
++      ((((x) & 0x00ff) << 8) | \
++       (((x) & 0xff00) >> 8))
++
++#define swab32(x) \
++      ((((x) & 0x000000ff) << 24) | \
++       (((x) & 0x0000ff00) <<  8) | \
++       (((x) & 0x00ff0000) >>  8) | \
++       (((x) & 0xff000000) << 24))
++
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ #define HOST_ORDER ELFDATA2LSB
+ #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
+@@ -104,17 +113,17 @@ static void cleanup(void)
+ static Elf32_Word read_elf_word(Elf32_Word word, bool swap)
+ {
+-      return swap ? bswap_32(word) : word;
++      return swap ? swab32(word) : word;
+ }
+ static Elf32_Half read_elf_half(Elf32_Half half, bool swap)
+ {
+-      return swap ? bswap_16(half) : half;
++      return swap ? swab16(half) : half;
+ }
+ static void write_elf_word(Elf32_Word val, Elf32_Word *dst, bool swap)
+ {
+-      *dst = swap ? bswap_32(val) : val;
++      *dst = swap ? swab32(val) : val;
+ }
+ int main(int argc, char **argv)
diff --git a/queue-4.2/arm-8449-1-fix-bug-in-vdsomunge-swab32-macro.patch b/queue-4.2/arm-8449-1-fix-bug-in-vdsomunge-swab32-macro.patch
new file mode 100644 (file)
index 0000000..75e9e9e
--- /dev/null
@@ -0,0 +1,35 @@
+From 38850d786a799c3ff2de0dc1980902c3263698dc Mon Sep 17 00:00:00 2001
+From: "H. Nikolaus Schaller" <hns@goldelico.com>
+Date: Wed, 28 Oct 2015 19:00:26 +0100
+Subject: ARM: 8449/1: fix bug in vdsomunge swab32 macro
+
+From: "H. Nikolaus Schaller" <hns@goldelico.com>
+
+commit 38850d786a799c3ff2de0dc1980902c3263698dc upstream.
+
+Commit 8a603f91cc48 ("ARM: 8445/1: fix vdsomunge not to depend on
+glibc specific byteswap.h") unfortunately introduced a bug created but
+not found during discussion and patch simplification.
+
+Reported-by: Efraim Yawitz <efraim.yawitz@gmail.com>
+Signed-off-by: H. Nikolaus Schaller <hns@goldelico.com>
+Fixes: 8a603f91cc48 ("ARM: 8445/1: fix vdsomunge not to depend on glibc specific byteswap.h")
+Signed-off-by: Nathan Lynch <nathan_lynch@mentor.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/vdso/vdsomunge.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/vdso/vdsomunge.c
++++ b/arch/arm/vdso/vdsomunge.c
+@@ -66,7 +66,7 @@
+       ((((x) & 0x000000ff) << 24) | \
+        (((x) & 0x0000ff00) <<  8) | \
+        (((x) & 0x00ff0000) >>  8) | \
+-       (((x) & 0xff000000) << 24))
++       (((x) & 0xff000000) >> 24))
+ #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
+ #define HOST_ORDER ELFDATA2LSB
diff --git a/queue-4.2/arm64-kernel-fix-tcr_el1.t0sz-restore-on-systems-with-extended-idmap.patch b/queue-4.2/arm64-kernel-fix-tcr_el1.t0sz-restore-on-systems-with-extended-idmap.patch
new file mode 100644 (file)
index 0000000..8c4ebf4
--- /dev/null
@@ -0,0 +1,85 @@
+From e13d918a19a7b6cba62b32884f5e336e764c2cc6 Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Tue, 27 Oct 2015 17:29:10 +0000
+Subject: arm64: kernel: fix tcr_el1.t0sz restore on systems with extended idmap
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit e13d918a19a7b6cba62b32884f5e336e764c2cc6 upstream.
+
+Commit dd006da21646 ("arm64: mm: increase VA range of identity map")
+introduced a mechanism to extend the virtual memory map range
+to support arm64 systems with system RAM located at very high offset,
+where the identity mapping used to enable/disable the MMU requires
+additional translation levels to map the physical memory at an equal
+virtual offset.
+
+The kernel detects at boot time the tcr_el1.t0sz value required by the
+identity mapping and sets-up the tcr_el1.t0sz register field accordingly,
+any time the identity map is required in the kernel (ie when enabling the
+MMU).
+
+After enabling the MMU, in the cold boot path the kernel resets the
+tcr_el1.t0sz to its default value (ie the actual configuration value for
+the system virtual address space) so that after enabling the MMU the
+memory space translated by ttbr0_el1 is restored as expected.
+
+Commit dd006da21646 ("arm64: mm: increase VA range of identity map")
+also added code to set-up the tcr_el1.t0sz value when the kernel resumes
+from low-power states with the MMU off through cpu_resume() in order to
+effectively use the identity mapping to enable the MMU but failed to add
+the code required to restore the tcr_el1.t0sz to its default value, when
+the core returns to the kernel with the MMU enabled, so that the kernel
+might end up running with tcr_el1.t0sz value set-up for the identity
+mapping which can be lower than the value required by the actual virtual
+address space, resulting in an erroneous set-up.
+
+This patchs adds code in the resume path that restores the tcr_el1.t0sz
+default value upon core resume, mirroring this way the cold boot path
+behaviour therefore fixing the issue.
+
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Fixes: dd006da21646 ("arm64: mm: increase VA range of identity map")
+Acked-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Signed-off-by: James Morse <james.morse@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/suspend.c |   22 +++++++++++++---------
+ 1 file changed, 13 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -80,17 +80,21 @@ int cpu_suspend(unsigned long arg, int (
+       if (ret == 0) {
+               /*
+                * We are resuming from reset with TTBR0_EL1 set to the
+-               * idmap to enable the MMU; restore the active_mm mappings in
+-               * TTBR0_EL1 unless the active_mm == &init_mm, in which case
+-               * the thread entered cpu_suspend with TTBR0_EL1 set to
+-               * reserved TTBR0 page tables and should be restored as such.
++               * idmap to enable the MMU; set the TTBR0 to the reserved
++               * page tables to prevent speculative TLB allocations, flush
++               * the local tlb and set the default tcr_el1.t0sz so that
++               * the TTBR0 address space set-up is properly restored.
++               * If the current active_mm != &init_mm we entered cpu_suspend
++               * with mappings in TTBR0 that must be restored, so we switch
++               * them back to complete the address space configuration
++               * restoration before returning.
+                */
+-              if (mm == &init_mm)
+-                      cpu_set_reserved_ttbr0();
+-              else
+-                      cpu_switch_mm(mm->pgd, mm);
+-
++              cpu_set_reserved_ttbr0();
+               flush_tlb_all();
++              cpu_set_default_tcr_t0sz();
++
++              if (mm != &init_mm)
++                      cpu_switch_mm(mm->pgd, mm);
+               /*
+                * Restore per-cpu offset before any kernel
diff --git a/queue-4.2/revert-arm64-unwind-fix-pc-calculation.patch b/queue-4.2/revert-arm64-unwind-fix-pc-calculation.patch
new file mode 100644 (file)
index 0000000..dd37f47
--- /dev/null
@@ -0,0 +1,44 @@
+From 9702970c7bd3e2d6fecb642a190269131d4ac16c Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Wed, 28 Oct 2015 16:56:13 +0000
+Subject: Revert "ARM64: unwind: Fix PC calculation"
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 9702970c7bd3e2d6fecb642a190269131d4ac16c upstream.
+
+This reverts commit e306dfd06fcb44d21c80acb8e5a88d55f3d1cf63.
+
+With this patch applied, we were the only architecture making this sort
+of adjustment to the PC calculation in the unwinder. This causes
+problems for ftrace, where the PC values are matched against the
+contents of the stack frames in the callchain and fail to match any
+records after the address adjustment.
+
+Whilst there has been some effort to change ftrace to workaround this,
+those patches are not yet ready for mainline and, since we're the odd
+architecture in this regard, let's just step in line with other
+architectures (like arch/arm/) for now.
+
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/stacktrace.c |    6 +-----
+ 1 file changed, 1 insertion(+), 5 deletions(-)
+
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -48,11 +48,7 @@ int notrace unwind_frame(struct stackfra
+       frame->sp = fp + 0x10;
+       frame->fp = *(unsigned long *)(fp);
+-      /*
+-       * -4 here because we care about the PC at time of bl,
+-       * not where the return will go.
+-       */
+-      frame->pc = *(unsigned long *)(fp + 8) - 4;
++      frame->pc = *(unsigned long *)(fp + 8);
+       return 0;
+ }
index 4c1018bc5d662ea16bfde7a0b0347e2f36d1a962..614f5b6230f81514dab21abfc3a217949dd11047 100644 (file)
@@ -59,3 +59,7 @@ arm-dts-imx7d-fix-uart2-base-address.patch
 arm-dts-am57xx-beagle-x15-set-vdd_sd-to-always-on.patch
 arm-ux500-modify-initial-levelshifter-status.patch
 arm-omap1-fix-incorrect-int_dma_lcd.patch
+arm-8445-1-fix-vdsomunge-not-to-depend-on-glibc-specific-byteswap.h.patch
+arm-8449-1-fix-bug-in-vdsomunge-swab32-macro.patch
+revert-arm64-unwind-fix-pc-calculation.patch
+arm64-kernel-fix-tcr_el1.t0sz-restore-on-systems-with-extended-idmap.patch