From: Greg Kroah-Hartman Date: Fri, 17 Jul 2015 01:02:52 +0000 (-0700) Subject: 3.14-stable patches X-Git-Tag: v4.0.9~10 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=71a24214e83b5ed57db0c7ee4d8665aeedf0d097;p=thirdparty%2Fkernel%2Fstable-queue.git 3.14-stable patches added patches: arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch --- diff --git a/queue-3.14/arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch b/queue-3.14/arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch new file mode 100644 index 00000000000..f8c3b292a15 --- /dev/null +++ b/queue-3.14/arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch @@ -0,0 +1,60 @@ +From d57f727264f1425a94689bafc7e99e502cb135b5 Mon Sep 17 00:00:00 2001 +From: Vineet Gupta +Date: Thu, 13 Nov 2014 15:54:01 +0530 +Subject: ARC: add compiler barrier to LLSC based cmpxchg + +From: Vineet Gupta + +commit d57f727264f1425a94689bafc7e99e502cb135b5 upstream. + +When auditing cmpxchg call sites, Chuck noted that gcc was optimizing +away some of the desired LDs. + +| do { +| new = old = *ipi_data_ptr; +| new |= 1U << msg; +| } while (cmpxchg(ipi_data_ptr, old, new) != old); + +was generating to below + +| 8015cef8: ld r2,[r4,0] <-- First LD +| 8015cefc: bset r1,r2,r1 +| +| 8015cf00: llock r3,[r4] <-- atomic op +| 8015cf04: brne r3,r2,8015cf10 +| 8015cf08: scond r1,[r4] +| 8015cf0c: bnz 8015cf00 +| +| 8015cf10: brne r3,r2,8015cf00 <-- Branch doesn't go to orig LD + +Although this was fixed by adding a ACCESS_ONCE in this call site, it +seems safer (for now at least) to add compiler barrier to LLSC based +cmpxchg + +Reported-by: Chuck Jordan +Acked-by: Peter Zijlstra (Intel) +Signed-off-by: Vineet Gupta +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arc/include/asm/cmpxchg.h | 9 +++++---- + 1 file changed, 5 insertions(+), 4 deletions(-) + +--- a/arch/arc/include/asm/cmpxchg.h ++++ b/arch/arc/include/asm/cmpxchg.h +@@ -25,10 +25,11 @@ __cmpxchg(volatile void *ptr, unsigned l + " scond %3, [%1] \n" + " bnz 1b \n" + "2: \n" +- : "=&r"(prev) +- : "r"(ptr), "ir"(expected), +- "r"(new) /* can't be "ir". scond can't take limm for "b" */ +- : "cc"); ++ : "=&r"(prev) /* Early clobber, to prevent reg reuse */ ++ : "r"(ptr), /* Not "m": llock only supports reg direct addr mode */ ++ "ir"(expected), ++ "r"(new) /* can't be "ir". scond can't take LIMM for "b" */ ++ : "cc", "memory"); /* so that gcc knows memory is being written here */ + + return prev; + } diff --git a/queue-3.14/arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch b/queue-3.14/arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch new file mode 100644 index 00000000000..38323766cb2 --- /dev/null +++ b/queue-3.14/arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch @@ -0,0 +1,39 @@ +From 565630d503ef24e44c252bed55571b3a0d68455f Mon Sep 17 00:00:00 2001 +From: Catalin Marinas +Date: Fri, 12 Jun 2015 11:24:41 +0100 +Subject: arm64: Do not attempt to use init_mm in reset_context() + +From: Catalin Marinas + +commit 565630d503ef24e44c252bed55571b3a0d68455f upstream. + +After secondary CPU boot or hotplug, the active_mm of the idle thread is +&init_mm. The init_mm.pgd (swapper_pg_dir) is only meant for TTBR1_EL1 +and must not be set in TTBR0_EL1. Since when active_mm == &init_mm the +TTBR0_EL1 is already set to the reserved value, there is no need to +perform any context reset. + +Signed-off-by: Catalin Marinas +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/mm/context.c | 8 ++++++++ + 1 file changed, 8 insertions(+) + +--- a/arch/arm64/mm/context.c ++++ b/arch/arm64/mm/context.c +@@ -92,6 +92,14 @@ static void reset_context(void *info) + unsigned int cpu = smp_processor_id(); + struct mm_struct *mm = current->active_mm; + ++ /* ++ * current->active_mm could be init_mm for the idle thread immediately ++ * after secondary CPU boot or hotplug. TTBR0_EL1 is already set to ++ * the reserved value, so no need to reset any context. ++ */ ++ if (mm == &init_mm) ++ return; ++ + smp_rmb(); + asid = cpu_last_asid + cpu; + diff --git a/queue-3.14/arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch b/queue-3.14/arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch new file mode 100644 index 00000000000..6673781c6f9 --- /dev/null +++ b/queue-3.14/arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch @@ -0,0 +1,49 @@ +From b9bcc919931611498e856eae9bf66337330d04cc Mon Sep 17 00:00:00 2001 +From: Dave P Martin +Date: Tue, 16 Jun 2015 17:38:47 +0100 +Subject: arm64: mm: Fix freeing of the wrong memmap entries with !SPARSEMEM_VMEMMAP + +From: Dave P Martin + +commit b9bcc919931611498e856eae9bf66337330d04cc upstream. + +The memmap freeing code in free_unused_memmap() computes the end of +each memblock by adding the memblock size onto the base. However, +if SPARSEMEM is enabled then the value (start) used for the base +may already have been rounded downwards to work out which memmap +entries to free after the previous memblock. + +This may cause memmap entries that are in use to get freed. + +In general, you're not likely to hit this problem unless there +are at least 2 memblocks and one of them is not aligned to a +sparsemem section boundary. Note that carve-outs can increase +the number of memblocks by splitting the regions listed in the +device tree. + +This problem doesn't occur with SPARSEMEM_VMEMMAP, because the +vmemmap code deals with freeing the unused regions of the memmap +instead of requiring the arch code to do it. + +This patch gets the memblock base out of the memblock directly when +computing the block end address to ensure the correct value is used. + +Signed-off-by: Dave Martin +Signed-off-by: Catalin Marinas +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/mm/init.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm64/mm/init.c ++++ b/arch/arm64/mm/init.c +@@ -243,7 +243,7 @@ static void __init free_unused_memmap(vo + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ +- prev_end = ALIGN(start + __phys_to_pfn(reg->size), ++ prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), + MAX_ORDER_NR_PAGES); + } + diff --git a/queue-3.14/arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch b/queue-3.14/arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch new file mode 100644 index 00000000000..0257ff05662 --- /dev/null +++ b/queue-3.14/arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch @@ -0,0 +1,45 @@ +From 6f1a6ae87c0c60d7c462ef8fd071f291aa7a9abb Mon Sep 17 00:00:00 2001 +From: Will Deacon +Date: Fri, 19 Jun 2015 13:56:33 +0100 +Subject: arm64: vdso: work-around broken ELF toolchains in Makefile + +From: Will Deacon + +commit 6f1a6ae87c0c60d7c462ef8fd071f291aa7a9abb upstream. + +When building the kernel with a bare-metal (ELF) toolchain, the -shared +option may not be passed down to collect2, resulting in silent corruption +of the vDSO image (in particular, the DYNAMIC section is omitted). + +The effect of this corruption is that the dynamic linker fails to find +the vDSO symbols and libc is instead used for the syscalls that we +intended to optimise (e.g. gettimeofday). Functionally, there is no +issue as the sigreturn trampoline is still intact and located by the +kernel. + +This patch fixes the problem by explicitly passing -shared to the linker +when building the vDSO. + +Reported-by: Szabolcs Nagy +Reported-by: James Greenlaigh +Signed-off-by: Will Deacon +Signed-off-by: Catalin Marinas +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm64/kernel/vdso/Makefile | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/arch/arm64/kernel/vdso/Makefile ++++ b/arch/arm64/kernel/vdso/Makefile +@@ -15,6 +15,10 @@ ccflags-y := -shared -fno-common -fno-bu + ccflags-y += -nostdlib -Wl,-soname=linux-vdso.so.1 \ + $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + ++# Workaround for bare-metal (ELF) toolchains that neglect to pass -shared ++# down to collect2, resulting in silent corruption of the vDSO image. ++ccflags-y += -Wl,-shared ++ + obj-y += vdso.o + extra-y += vdso.lds vdso-offsets.h + CPPFLAGS_vdso.lds += -P -C -U$(ARCH) diff --git a/queue-3.14/series b/queue-3.14/series index 9b7bd399592..3340f375f7e 100644 --- a/queue-3.14/series +++ b/queue-3.14/series @@ -4,3 +4,7 @@ disable-write-buffering-on-toshiba-topic95.patch alsa-hda-add-headset-support-to-acer-aspire-v5.patch alsa-hda-fix-the-dock-headphone-output-on-fujitsu-lifebook-e780.patch acpi-init-switch-over-platform-to-the-acpi-mode-later.patch +arc-add-compiler-barrier-to-llsc-based-cmpxchg.patch +arm64-do-not-attempt-to-use-init_mm-in-reset_context.patch +arm64-mm-fix-freeing-of-the-wrong-memmap-entries-with-sparsemem_vmemmap.patch +arm64-vdso-work-around-broken-elf-toolchains-in-makefile.patch