]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Jul 2015 00:38:49 +0000 (17:38 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Fri, 17 Jul 2015 00:38:49 +0000 (17:38 -0700)
added patches:
arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch
arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch
arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch
arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch

queue-3.10/arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch [new file with mode: 0644]
queue-3.10/arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch [new file with mode: 0644]
queue-3.10/arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch [new file with mode: 0644]
queue-3.10/arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch [new file with mode: 0644]
queue-3.10/series

diff --git a/queue-3.10/arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch b/queue-3.10/arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch
new file mode 100644 (file)
index 0000000..1ae82f9
--- /dev/null
@@ -0,0 +1,60 @@
+From d57f727264f1425a94689bafc7e99e502cb135b5 Mon Sep 17 00:00:00 2001
+From: Vineet Gupta <vgupta@synopsys.com>
+Date: Thu, 13 Nov 2014 15:54:01 +0530
+Subject: ARC: add compiler barrier to LLSC based cmpxchg
+
+From: Vineet Gupta <vgupta@synopsys.com>
+
+commit d57f727264f1425a94689bafc7e99e502cb135b5 upstream.
+
+When auditing cmpxchg call sites, Chuck noted that gcc was optimizing
+away some of the desired LDs.
+
+|      do {
+|              new = old = *ipi_data_ptr;
+|              new |= 1U << msg;
+|      } while (cmpxchg(ipi_data_ptr, old, new) != old);
+
+was generating to below
+
+| 8015cef8:    ld         r2,[r4,0]  <-- First LD
+| 8015cefc:    bset       r1,r2,r1
+|
+| 8015cf00:    llock      r3,[r4]  <-- atomic op
+| 8015cf04:    brne       r3,r2,8015cf10
+| 8015cf08:    scond      r1,[r4]
+| 8015cf0c:    bnz        8015cf00
+|
+| 8015cf10:    brne       r3,r2,8015cf00  <-- Branch doesn't go to orig LD
+
+Although this was fixed by adding a ACCESS_ONCE in this call site, it
+seems safer (for now at least) to add compiler barrier to LLSC based
+cmpxchg
+
+Reported-by: Chuck Jordan <cjordan@synopsys,com>
+Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/include/asm/cmpxchg.h |    9 +++++----
+ 1 file changed, 5 insertions(+), 4 deletions(-)
+
+--- a/arch/arc/include/asm/cmpxchg.h
++++ b/arch/arc/include/asm/cmpxchg.h
+@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned l
+       "       scond   %3, [%1]        \n"
+       "       bnz     1b              \n"
+       "2:                             \n"
+-      : "=&r"(prev)
+-      : "r"(ptr), "ir"(expected),
+-        "r"(new) /* can't be "ir". scond can't take limm for "b" */
+-      : "cc");
++      : "=&r"(prev)   /* Early clobber, to prevent reg reuse */
++      : "r"(ptr),     /* Not "m": llock only supports reg direct addr mode */
++        "ir"(expected),
++        "r"(new)      /* can't be "ir". scond can't take LIMM for "b" */
++      : "cc", "memory"); /* so that gcc knows memory is being written here */
+       return prev;
+ }
diff --git a/queue-3.10/arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch b/queue-3.10/arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch
new file mode 100644 (file)
index 0000000..3832376
--- /dev/null
@@ -0,0 +1,39 @@
+From 565630d503ef24e44c252bed55571b3a0d68455f Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Fri, 12 Jun 2015 11:24:41 +0100
+Subject: arm64: Do not attempt to use init_mm in reset_context()
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 565630d503ef24e44c252bed55571b3a0d68455f upstream.
+
+After secondary CPU boot or hotplug, the active_mm of the idle thread is
+&init_mm. The init_mm.pgd (swapper_pg_dir) is only meant for TTBR1_EL1
+and must not be set in TTBR0_EL1. Since when active_mm == &init_mm the
+TTBR0_EL1 is already set to the reserved value, there is no need to
+perform any context reset.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/context.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/arch/arm64/mm/context.c
++++ b/arch/arm64/mm/context.c
+@@ -92,6 +92,14 @@ static void reset_context(void *info)
+       unsigned int cpu = smp_processor_id();
+       struct mm_struct *mm = current->active_mm;
++      /*
++       * current->active_mm could be init_mm for the idle thread immediately
++       * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to
++       * the reserved value, so no need to reset any context.
++       */
++      if (mm == &init_mm)
++              return;
++
+       smp_rmb();
+       asid = cpu_last_asid + cpu;
diff --git a/queue-3.10/arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch b/queue-3.10/arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch
new file mode 100644 (file)
index 0000000..34c8ba2
--- /dev/null
@@ -0,0 +1,49 @@
+From b9bcc919931611498e856eae9bf66337330d04cc Mon Sep 17 00:00:00 2001
+From: Dave P Martin <Dave.Martin@arm.com>
+Date: Tue, 16 Jun 2015 17:38:47 +0100
+Subject: arm64: mm: Fix freeing of the wrong memmap entries with !SPARSEMEM_VMEMMAP
+
+From: Dave P Martin <Dave.Martin@arm.com>
+
+commit b9bcc919931611498e856eae9bf66337330d04cc upstream.
+
+The memmap freeing code in free_unused_memmap() computes the end of
+each memblock by adding the memblock size onto the base.  However,
+if SPARSEMEM is enabled then the value (start) used for the base
+may already have been rounded downwards to work out which memmap
+entries to free after the previous memblock.
+
+This may cause memmap entries that are in use to get freed.
+
+In general, you're not likely to hit this problem unless there
+are at least 2 memblocks and one of them is not aligned to a
+sparsemem section boundary.  Note that carve-outs can increase
+the number of memblocks by splitting the regions listed in the
+device tree.
+
+This problem doesn't occur with SPARSEMEM_VMEMMAP, because the
+vmemmap code deals with freeing the unused regions of the memmap
+instead of requiring the arch code to do it.
+
+This patch gets the memblock base out of the memblock directly when
+computing the block end address to ensure the correct value is used.
+
+Signed-off-by: Dave Martin <Dave.Martin@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/mm/init.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm64/mm/init.c
++++ b/arch/arm64/mm/init.c
+@@ -262,7 +262,7 @@ static void __init free_unused_memmap(vo
+                * memmap entries are valid from the bank end aligned to
+                * MAX_ORDER_NR_PAGES.
+                */
+-              prev_end = ALIGN(start + __phys_to_pfn(reg->size),
++              prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
+                                MAX_ORDER_NR_PAGES);
+       }
diff --git a/queue-3.10/arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch b/queue-3.10/arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch
new file mode 100644 (file)
index 0000000..0257ff0
--- /dev/null
@@ -0,0 +1,45 @@
+From 6f1a6ae87c0c60d7c462ef8fd071f291aa7a9abb Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 19 Jun 2015 13:56:33 +0100
+Subject: arm64: vdso: work-around broken ELF toolchains in Makefile
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 6f1a6ae87c0c60d7c462ef8fd071f291aa7a9abb upstream.
+
+When building the kernel with a bare-metal (ELF) toolchain, the -shared
+option may not be passed down to collect2, resulting in silent corruption
+of the vDSO image (in particular, the DYNAMIC section is omitted).
+
+The effect of this corruption is that the dynamic linker fails to find
+the vDSO symbols and libc is instead used for the syscalls that we
+intended to optimise (e.g. gettimeofday). Functionally, there is no
+issue as the sigreturn trampoline is still intact and located by the
+kernel.
+
+This patch fixes the problem by explicitly passing -shared to the linker
+when building the vDSO.
+
+Reported-by: Szabolcs Nagy <Szabolcs.Nagy@arm.com>
+Reported-by: James Greenlaigh <james.greenhalgh@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/vdso/Makefile |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/arm64/kernel/vdso/Makefile
++++ b/arch/arm64/kernel/vdso/Makefile
+@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-bu
+ ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \
+               $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
++# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared
++# down to collect2, resulting in silent corruption of the vDSO image.
++ccflags-y += -Wl,-shared
++
+ obj-y += vdso.o
+ extra-y += vdso.lds vdso-offsets.h
+ CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
index 14958dfb5da2e41adcf62b864cf2aee54a5b70d5..9bb1d6e3eeb1afbcedf6460012a3fe786694b5ef 100644 (file)
@@ -2,3 +2,7 @@ ipr-increase-default-adapter-init-stage-change-timeout.patch
 disable-write-buffering-on-toshiba-topic95.patch
 alsa-hda-add-headset-support-to-acer-aspire-v5.patch
 alsa-hda-fix-the-dock-headphone-output-on-fujitsu-lifebook-e780.patch
+arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch
+arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch
+arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch
+arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch