]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.3-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 31 Jan 2016 19:23:00 +0000 (11:23 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sun, 31 Jan 2016 19:23:00 +0000 (11:23 -0800)
added patches:
arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch
recordmcount-fix-endianness-handling-bug-for-nop_mcount.patch

queue-4.3/arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch [new file with mode: 0644]
queue-4.3/arm64-mm-use-correct-mapping-granularity-under-debug_rodata.patch [deleted file]
queue-4.3/recordmcount-fix-endianness-handling-bug-for-nop_mcount.patch [new file with mode: 0644]
queue-4.3/series

diff --git a/queue-4.3/arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch b/queue-4.3/arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch
new file mode 100644 (file)
index 0000000..0882278
--- /dev/null
@@ -0,0 +1,90 @@
+From f436b2ac90a095746beb6729b8ee8ed87c9eaede Mon Sep 17 00:00:00 2001
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Date: Wed, 13 Jan 2016 14:50:03 +0000
+Subject: arm64: kernel: fix architected PMU registers unconditional access
+
+From: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+
+commit f436b2ac90a095746beb6729b8ee8ed87c9eaede upstream.
+
+The Performance Monitors extension is an optional feature of the
+AArch64 architecture, therefore, in order to access Performance
+Monitors registers safely, the kernel should detect the architected
+PMU unit presence through the ID_AA64DFR0_EL1 register PMUVer field
+before accessing them.
+
+This patch implements a guard by reading the ID_AA64DFR0_EL1 register
+PMUVer field to detect the architected PMU presence and prevent accessing
+PMU system registers if the Performance Monitors extension is not
+implemented in the core.
+
+Cc: Peter Maydell <peter.maydell@linaro.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Fixes: 60792ad349f3 ("arm64: kernel: enforce pmuserenr_el0 initialization and restore")
+Signed-off-by: Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Guenter Roeck <linux@roeck-us.net>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/head.S    |    5 +++++
+ arch/arm64/mm/proc-macros.S |   12 ++++++++++++
+ arch/arm64/mm/proc.S        |    4 ++--
+ 3 files changed, 19 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/kernel/head.S
++++ b/arch/arm64/kernel/head.S
+@@ -524,9 +524,14 @@ CPU_LE(   movk    x0, #0x30d0, lsl #16    )       // C
+ #endif
+       /* EL2 debug */
++      mrs     x0, id_aa64dfr0_el1             // Check ID_AA64DFR0_EL1 PMUVer
++      sbfx    x0, x0, #8, #4
++      cmp     x0, #1
++      b.lt    4f                              // Skip if no PMU present
+       mrs     x0, pmcr_el0                    // Disable debug access traps
+       ubfx    x0, x0, #11, #5                 // to EL2 and allow access to
+       msr     mdcr_el2, x0                    // all PMU counters from EL1
++4:
+       /* Stage-2 translation */
+       msr     vttbr_el2, xzr
+--- a/arch/arm64/mm/proc-macros.S
++++ b/arch/arm64/mm/proc-macros.S
+@@ -62,3 +62,15 @@
+       bfi     \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH
+ #endif
+       .endm
++
++/*
++ * reset_pmuserenr_el0 - reset PMUSERENR_EL0 if PMUv3 present
++ */
++      .macro  reset_pmuserenr_el0, tmpreg
++      mrs     \tmpreg, id_aa64dfr0_el1        // Check ID_AA64DFR0_EL1 PMUVer
++      sbfx    \tmpreg, \tmpreg, #8, #4
++      cmp     \tmpreg, #1                     // Skip if no PMU present
++      b.lt    9000f
++      msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
++9000:
++      .endm
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -115,7 +115,7 @@ ENTRY(cpu_do_resume)
+        */
+       ubfx    x11, x11, #1, #1
+       msr     oslar_el1, x11
+-      msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
++      reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
+       mov     x0, x12
+       dsb     nsh             // Make sure local tlb invalidation completed
+       isb
+@@ -154,7 +154,7 @@ ENTRY(__cpu_setup)
+       msr     cpacr_el1, x0                   // Enable FP/ASIMD
+       mov     x0, #1 << 12                    // Reset mdscr_el1 and disable
+       msr     mdscr_el1, x0                   // access to the DCC from EL0
+-      msr     pmuserenr_el0, xzr              // Disable PMU access from EL0
++      reset_pmuserenr_el0 x0                  // Disable PMU access from EL0
+       /*
+        * Memory region attributes for LPAE:
+        *
diff --git a/queue-4.3/arm64-mm-use-correct-mapping-granularity-under-debug_rodata.patch b/queue-4.3/arm64-mm-use-correct-mapping-granularity-under-debug_rodata.patch
deleted file mode 100644 (file)
index f4ebda2..0000000
+++ /dev/null
@@ -1,82 +0,0 @@
-From 4fee9f364b9b99f76732f2a6fd6df679a237fa74 Mon Sep 17 00:00:00 2001
-From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
-Date: Mon, 16 Nov 2015 11:18:14 +0100
-Subject: arm64: mm: use correct mapping granularity under DEBUG_RODATA
-
-From: Ard Biesheuvel <ard.biesheuvel@linaro.org>
-
-commit 4fee9f364b9b99f76732f2a6fd6df679a237fa74 upstream.
-
-When booting a 64k pages kernel that is built with CONFIG_DEBUG_RODATA
-and resides at an offset that is not a multiple of 512 MB, the rounding
-that occurs in __map_memblock() and fixup_executable() results in
-incorrect regions being mapped.
-
-The following snippet from /sys/kernel/debug/kernel_page_tables shows
-how, when the kernel is loaded 2 MB above the base of DRAM at 0x40000000,
-the first 2 MB of memory (which may be inaccessible from non-secure EL1
-or just reserved by the firmware) is inadvertently mapped into the end of
-the module region.
-
-  ---[ Modules start ]---
-  0xfffffdffffe00000-0xfffffe0000000000     2M RW NX ... UXN MEM/NORMAL
-  ---[ Modules end ]---
-  ---[ Kernel Mapping ]---
-  0xfffffe0000000000-0xfffffe0000090000   576K RW NX ... UXN MEM/NORMAL
-  0xfffffe0000090000-0xfffffe0000200000  1472K ro x  ... UXN MEM/NORMAL
-  0xfffffe0000200000-0xfffffe0000800000     6M ro x  ... UXN MEM/NORMAL
-  0xfffffe0000800000-0xfffffe0000810000    64K ro x  ... UXN MEM/NORMAL
-  0xfffffe0000810000-0xfffffe0000a00000  1984K RW NX ... UXN MEM/NORMAL
-  0xfffffe0000a00000-0xfffffe00ffe00000  4084M RW NX ... UXN MEM/NORMAL
-
-The same issue is likely to occur on 16k pages kernels whose load
-address is not a multiple of 32 MB (i.e., SECTION_SIZE). So round to
-SWAPPER_BLOCK_SIZE instead of SECTION_SIZE.
-
-Fixes: da141706aea5 ("arm64: add better page protections to arm64")
-Signed-off-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
-Acked-by: Mark Rutland <mark.rutland@arm.com>
-Acked-by: Laura Abbott <labbott@redhat.com>
-Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-
----
- arch/arm64/mm/mmu.c |   12 ++++++------
- 1 file changed, 6 insertions(+), 6 deletions(-)
-
---- a/arch/arm64/mm/mmu.c
-+++ b/arch/arm64/mm/mmu.c
-@@ -308,8 +308,8 @@ static void __init __map_memblock(phys_a
-        * for now. This will get more fine grained later once all memory
-        * is mapped
-        */
--      unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
--      unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
-+      unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
-+      unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
-       if (end < kernel_x_start) {
-               create_mapping(start, __phys_to_virt(start),
-@@ -397,18 +397,18 @@ void __init fixup_executable(void)
- {
- #ifdef CONFIG_DEBUG_RODATA
-       /* now that we are actually fully mapped, make the start/end more fine grained */
--      if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
-+      if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_start = round_down(__pa(_stext),
--                                                      SECTION_SIZE);
-+                                                       SWAPPER_BLOCK_SIZE);
-               create_mapping(aligned_start, __phys_to_virt(aligned_start),
-                               __pa(_stext) - aligned_start,
-                               PAGE_KERNEL);
-       }
--      if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
-+      if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
-               unsigned long aligned_end = round_up(__pa(__init_end),
--                                                      SECTION_SIZE);
-+                                                        SWAPPER_BLOCK_SIZE);
-               create_mapping(__pa(__init_end), (unsigned long)__init_end,
-                               aligned_end - __pa(__init_end),
-                               PAGE_KERNEL);
diff --git a/queue-4.3/recordmcount-fix-endianness-handling-bug-for-nop_mcount.patch b/queue-4.3/recordmcount-fix-endianness-handling-bug-for-nop_mcount.patch
new file mode 100644 (file)
index 0000000..cb03ae0
--- /dev/null
@@ -0,0 +1,34 @@
+From c84da8b9ad3761eef43811181c7e896e9834b26b Mon Sep 17 00:00:00 2001
+From: libin <huawei.libin@huawei.com>
+Date: Tue, 3 Nov 2015 08:58:47 +0800
+Subject: recordmcount: Fix endianness handling bug for nop_mcount
+
+From: libin <huawei.libin@huawei.com>
+
+commit c84da8b9ad3761eef43811181c7e896e9834b26b upstream.
+
+In nop_mcount, shdr->sh_offset and welp->r_offset should handle
+endianness properly, otherwise it will trigger Segmentation fault
+if the recordmcount main and file.o have different endianness.
+
+Link: http://lkml.kernel.org/r/563806C7.7070606@huawei.com
+
+Signed-off-by: Li Bin <huawei.libin@huawei.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ scripts/recordmcount.h |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/scripts/recordmcount.h
++++ b/scripts/recordmcount.h
+@@ -377,7 +377,7 @@ static void nop_mcount(Elf_Shdr const *c
+               if (mcountsym == Elf_r_sym(relp) && !is_fake_mcount(relp)) {
+                       if (make_nop)
+-                              ret = make_nop((void *)ehdr, shdr->sh_offset + relp->r_offset);
++                              ret = make_nop((void *)ehdr, _w(shdr->sh_offset) + _w(relp->r_offset));
+                       if (warn_on_notrace_sect && !once) {
+                               printf("Section %s has mcount callers being ignored\n",
+                                      txtname);
index 759c83b15a51b7f94d421040c04ba066219e610f..3e5d5294420c70ba90de35471bb9bc91766d7797 100644 (file)
@@ -133,7 +133,6 @@ recordmcount-arm64-replace-the-ignored-mcount-call-into-nop.patch
 arm64-bpf-fix-div-by-zero-case.patch
 arm64-bpf-fix-mod-by-zero-case.patch
 arm64-cmpxchg_dbl-fix-return-value-type.patch
-arm64-mm-use-correct-mapping-granularity-under-debug_rodata.patch
 arm64-kernel-pause-unpause-function-graph-tracer-in-cpu_suspend.patch
 arm-arm64-kvm-test-properly-for-a-pte-s-uncachedness.patch
 arm64-kvm-fix-aarch32-to-aarch64-register-mapping.patch
@@ -155,3 +154,5 @@ fix-the-regression-from-direct-io-fix-negative-return-from-dio-read-beyond-eof.p
 mn10300-select-config_have_uid16-to-fix-build-failure.patch
 arm64-restore-bogomips-information-in-proc-cpuinfo.patch
 arm64-kvm-add-workaround-for-cortex-a57-erratum-834220.patch
+arm64-kernel-fix-architected-pmu-registers-unconditional-access.patch
+recordmcount-fix-endianness-handling-bug-for-nop_mcount.patch