]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.0-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Aug 2012 18:03:39 +0000 (11:03 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 9 Aug 2012 18:03:39 +0000 (11:03 -0700)
added patches:
alsa-hda-remove-quirk-for-dell-vostro-1015.patch
arm-7467-1-mutex-use-generic-xchg-based-implementation-for-armv6.patch
arm-7477-1-vfp-always-save-vfp-state-in-vfp_pm_suspend-on-up.patch
arm-7478-1-errata-extend-workaround-for-erratum-720789.patch
arm-7479-1-mm-avoid-null-dereference-when-flushing-gate_vma-with-vivt-caches.patch

queue-3.0/alsa-hda-remove-quirk-for-dell-vostro-1015.patch [new file with mode: 0644]
queue-3.0/arm-7467-1-mutex-use-generic-xchg-based-implementation-for-armv6.patch [new file with mode: 0644]
queue-3.0/arm-7477-1-vfp-always-save-vfp-state-in-vfp_pm_suspend-on-up.patch [new file with mode: 0644]
queue-3.0/arm-7478-1-errata-extend-workaround-for-erratum-720789.patch [new file with mode: 0644]
queue-3.0/arm-7479-1-mm-avoid-null-dereference-when-flushing-gate_vma-with-vivt-caches.patch [new file with mode: 0644]
queue-3.0/series

diff --git a/queue-3.0/alsa-hda-remove-quirk-for-dell-vostro-1015.patch b/queue-3.0/alsa-hda-remove-quirk-for-dell-vostro-1015.patch
new file mode 100644 (file)
index 0000000..4906235
--- /dev/null
@@ -0,0 +1,30 @@
+From e9fc83cb2e5877801a255a37ddbc5be996ea8046 Mon Sep 17 00:00:00 2001
+From: David Henningsson <david.henningsson@canonical.com>
+Date: Tue, 7 Aug 2012 14:03:29 +0200
+Subject: ALSA: hda - remove quirk for Dell Vostro 1015
+
+From: David Henningsson <david.henningsson@canonical.com>
+
+commit e9fc83cb2e5877801a255a37ddbc5be996ea8046 upstream.
+
+This computer is confirmed working with model=auto on kernel 3.2.
+Also, parsing fails with hda-emu with the current model.
+
+Signed-off-by: David Henningsson <david.henningsson@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_conexant.c |    1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -3089,7 +3089,6 @@ static const struct snd_pci_quirk cxt506
+       SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
+       SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
+-      SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
+       SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+       SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
diff --git a/queue-3.0/arm-7467-1-mutex-use-generic-xchg-based-implementation-for-armv6.patch b/queue-3.0/arm-7467-1-mutex-use-generic-xchg-based-implementation-for-armv6.patch
new file mode 100644 (file)
index 0000000..8067dd6
--- /dev/null
@@ -0,0 +1,162 @@
+From a76d7bd96d65fa5119adba97e1b58d95f2e78829 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 13 Jul 2012 19:15:40 +0100
+Subject: ARM: 7467/1: mutex: use generic xchg-based implementation for ARMv6+
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit a76d7bd96d65fa5119adba97e1b58d95f2e78829 upstream.
+
+The open-coded mutex implementation for ARMv6+ cores suffers from a
+severe lack of barriers, so in the uncontended case we don't actually
+protect any accesses performed during the critical section.
+
+Furthermore, the code is largely a duplication of the ARMv6+ atomic_dec
+code but optimised to remove a branch instruction, as the mutex fastpath
+was previously inlined. Now that this is executed out-of-line, we can
+reuse the atomic access code for the locking (in fact, we use the xchg
+code as this produces shorter critical sections).
+
+This patch uses the generic xchg based implementation for mutexes on
+ARMv6+, which introduces barriers to the lock/unlock operations and also
+has the benefit of removing a fair amount of inline assembly code.
+
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Reported-by: Shan Kang <kangshan0910@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/mutex.h |  119 +------------------------------------------
+ 1 file changed, 4 insertions(+), 115 deletions(-)
+
+--- a/arch/arm/include/asm/mutex.h
++++ b/arch/arm/include/asm/mutex.h
+@@ -7,121 +7,10 @@
+  */
+ #ifndef _ASM_MUTEX_H
+ #define _ASM_MUTEX_H
+-
+-#if __LINUX_ARM_ARCH__ < 6
+-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+-# include <asm-generic/mutex-xchg.h>
+-#else
+-
+ /*
+- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+- * atomic decrement (it is not a reliable atomic decrement but it satisfies
+- * the defined semantics for our purpose, while being smaller and faster
+- * than a real atomic decrement or atomic swap.  The idea is to attempt
+- * decrementing the lock value only once.  If once decremented it isn't zero,
+- * or if its store-back fails due to a dispute on the exclusive store, we
+- * simply bail out immediately through the slow path where the lock will be
+- * reattempted until it succeeds.
++ * On pre-ARMv6 hardware this results in a swp-based implementation,
++ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
++ * accesses instead.
+  */
+-static inline void
+-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+-      int __ex_flag, __res;
+-
+-      __asm__ (
+-
+-              "ldrex  %0, [%2]        \n\t"
+-              "sub    %0, %0, #1      \n\t"
+-              "strex  %1, %0, [%2]    "
+-
+-              : "=&r" (__res), "=&r" (__ex_flag)
+-              : "r" (&(count)->counter)
+-              : "cc","memory" );
+-
+-      __res |= __ex_flag;
+-      if (unlikely(__res != 0))
+-              fail_fn(count);
+-}
+-
+-static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+-      int __ex_flag, __res;
+-
+-      __asm__ (
+-
+-              "ldrex  %0, [%2]        \n\t"
+-              "sub    %0, %0, #1      \n\t"
+-              "strex  %1, %0, [%2]    "
+-
+-              : "=&r" (__res), "=&r" (__ex_flag)
+-              : "r" (&(count)->counter)
+-              : "cc","memory" );
+-
+-      __res |= __ex_flag;
+-      if (unlikely(__res != 0))
+-              __res = fail_fn(count);
+-      return __res;
+-}
+-
+-/*
+- * Same trick is used for the unlock fast path. However the original value,
+- * rather than the result, is used to test for success in order to have
+- * better generated assembly.
+- */
+-static inline void
+-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+-      int __ex_flag, __res, __orig;
+-
+-      __asm__ (
+-
+-              "ldrex  %0, [%3]        \n\t"
+-              "add    %1, %0, #1      \n\t"
+-              "strex  %2, %1, [%3]    "
+-
+-              : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+-              : "r" (&(count)->counter)
+-              : "cc","memory" );
+-
+-      __orig |= __ex_flag;
+-      if (unlikely(__orig != 0))
+-              fail_fn(count);
+-}
+-
+-/*
+- * If the unlock was done on a contended lock, or if the unlock simply fails
+- * then the mutex remains locked.
+- */
+-#define __mutex_slowpath_needs_to_unlock()    1
+-
+-/*
+- * For __mutex_fastpath_trylock we use another construct which could be
+- * described as a "single value cmpxchg".
+- *
+- * This provides the needed trylock semantics like cmpxchg would, but it is
+- * lighter and less generic than a true cmpxchg implementation.
+- */
+-static inline int
+-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+-      int __ex_flag, __res, __orig;
+-
+-      __asm__ (
+-
+-              "1: ldrex       %0, [%3]        \n\t"
+-              "subs           %1, %0, #1      \n\t"
+-              "strexeq        %2, %1, [%3]    \n\t"
+-              "movlt          %0, #0          \n\t"
+-              "cmpeq          %2, #0          \n\t"
+-              "bgt            1b              "
+-
+-              : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+-              : "r" (&count->counter)
+-              : "cc", "memory" );
+-
+-      return __orig;
+-}
+-
+-#endif
++#include <asm-generic/mutex-xchg.h>
+ #endif
diff --git a/queue-3.0/arm-7477-1-vfp-always-save-vfp-state-in-vfp_pm_suspend-on-up.patch b/queue-3.0/arm-7477-1-vfp-always-save-vfp-state-in-vfp_pm_suspend-on-up.patch
new file mode 100644 (file)
index 0000000..cc91423
--- /dev/null
@@ -0,0 +1,55 @@
+From 24b35521b8ddf088531258f06f681bb7b227bf47 Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Fri, 20 Jul 2012 02:03:42 +0100
+Subject: ARM: 7477/1: vfp: Always save VFP state in vfp_pm_suspend on UP
+
+From: Colin Cross <ccross@android.com>
+
+commit 24b35521b8ddf088531258f06f681bb7b227bf47 upstream.
+
+vfp_pm_suspend should save the VFP state in suspend after
+any lazy context switch.  If it only saves when the VFP is enabled,
+the state can get lost when, on a UP system:
+  Thread 1 uses the VFP
+  Context switch occurs to thread 2, VFP is disabled but the
+     VFP context is not saved
+  Thread 2 initiates suspend
+  vfp_pm_suspend is called with the VFP disabled, and the unsaved
+     VFP context of Thread 1 in the registers
+
+Modify vfp_pm_suspend to save the VFP context whenever
+vfp_current_hw_state is not NULL.
+
+Includes a fix from Ido Yariv <ido@wizery.com>, who pointed out that on
+SMP systems, the state pointer can be pointing to a freed task struct if
+a task exited on another cpu, fixed by using #ifndef CONFIG_SMP in the
+new if clause.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Cc: Barry Song <bs14@csr.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Ido Yariv <ido@wizery.com>
+Cc: Daniel Drake <dsd@laptop.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/vfp/vfpmodule.c |    6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -412,6 +412,12 @@ static int vfp_pm_suspend(void)
+               /* disable, just in case */
+               fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
++      } else if (vfp_current_hw_state[ti->cpu]) {
++#ifndef CONFIG_SMP
++              fmxr(FPEXC, fpexc | FPEXC_EN);
++              vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
++              fmxr(FPEXC, fpexc);
++#endif
+       }
+       /* clear any information we had about last context state */
diff --git a/queue-3.0/arm-7478-1-errata-extend-workaround-for-erratum-720789.patch b/queue-3.0/arm-7478-1-errata-extend-workaround-for-erratum-720789.patch
new file mode 100644 (file)
index 0000000..25c881c
--- /dev/null
@@ -0,0 +1,59 @@
+From 5a783cbc48367cfc7b65afc75430953dfe60098f Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 20 Jul 2012 18:24:55 +0100
+Subject: ARM: 7478/1: errata: extend workaround for erratum #720789
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 5a783cbc48367cfc7b65afc75430953dfe60098f upstream.
+
+Commit cdf357f1 ("ARM: 6299/1: errata: TLBIASIDIS and TLBIMVAIS
+operations can broadcast a faulty ASID") replaced by-ASID TLB flushing
+operations with all-ASID variants to workaround A9 erratum #720789.
+
+This patch extends the workaround to include the tlb_range operations,
+which were overlooked by the original patch.
+
+Tested-by: Steve Capper <steve.capper@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/tlb-v7.S |   12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
+       dsb
+       mov     r0, r0, lsr #PAGE_SHIFT         @ align address
+       mov     r1, r1, lsr #PAGE_SHIFT
++#ifdef CONFIG_ARM_ERRATA_720789
++      mov     r3, #0
++#else
+       asid    r3, r3                          @ mask ASID
++#endif
+       orr     r0, r3, r0, lsl #PAGE_SHIFT     @ Create initial MVA
+       mov     r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++      ALT_SMP(mcr     p15, 0, r0, c8, c3, 3)  @ TLB invalidate U MVA all ASID (shareable)
++#else
+       ALT_SMP(mcr     p15, 0, r0, c8, c3, 1)  @ TLB invalidate U MVA (shareable)
++#endif
+       ALT_UP(mcr      p15, 0, r0, c8, c7, 1)  @ TLB invalidate U MVA
+       add     r0, r0, #PAGE_SZ
+@@ -70,7 +78,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
+       mov     r0, r0, lsl #PAGE_SHIFT
+       mov     r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++      ALT_SMP(mcr     p15, 0, r0, c8, c3, 3)  @ TLB invalidate U MVA all ASID (shareable)
++#else
+       ALT_SMP(mcr     p15, 0, r0, c8, c3, 1)  @ TLB invalidate U MVA (shareable)
++#endif
+       ALT_UP(mcr      p15, 0, r0, c8, c7, 1)  @ TLB invalidate U MVA
+       add     r0, r0, #PAGE_SZ
+       cmp     r0, r1
diff --git a/queue-3.0/arm-7479-1-mm-avoid-null-dereference-when-flushing-gate_vma-with-vivt-caches.patch b/queue-3.0/arm-7479-1-mm-avoid-null-dereference-when-flushing-gate_vma-with-vivt-caches.patch
new file mode 100644 (file)
index 0000000..2bbfc18
--- /dev/null
@@ -0,0 +1,56 @@
+From b74253f78400f9a4b42da84bb1de7540b88ce7c4 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 23 Jul 2012 14:18:13 +0100
+Subject: ARM: 7479/1: mm: avoid NULL dereference when flushing gate_vma with VIVT caches
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit b74253f78400f9a4b42da84bb1de7540b88ce7c4 upstream.
+
+The vivt_flush_cache_{range,page} functions check that the mm_struct
+of the VMA being flushed has been active on the current CPU before
+performing the cache maintenance.
+
+The gate_vma has a NULL mm_struct pointer and, as such, will cause a
+kernel fault if we try to flush it with the above operations. This
+happens during ELF core dumps, which include the gate_vma as it may be
+useful for debugging purposes.
+
+This patch adds checks to the VIVT cache flushing functions so that VMAs
+with a NULL mm_struct are flushed unconditionally (the vectors page may
+be dirty if we use it to store the current TLS pointer).
+
+Reported-by: Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+Tested-by: Uros Bizjak <ubizjak@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/cacheflush.h |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(s
+ static inline void
+ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+ {
+-      if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
++      struct mm_struct *mm = vma->vm_mm;
++
++      if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+               __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
+                                       vma->vm_flags);
+ }
+@@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_st
+ static inline void
+ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+ {
+-      if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
++      struct mm_struct *mm = vma->vm_mm;
++
++      if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
+               unsigned long addr = user_addr & PAGE_MASK;
+               __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+       }
index 8a6666ac3bf81c70d2c87612036fae817042efb7..181586bda9ae94d9cfd88e1e1dd923dd19e60627 100644 (file)
@@ -3,3 +3,8 @@ sunrpc-return-negative-value-in-case-rpcbind-client-creation-error.patch
 nilfs2-fix-deadlock-issue-between-chcp-and-thaw-ioctls.patch
 pcdp-use-early_ioremap-early_iounmap-to-access-pcdp-table.patch
 mm-fix-wrong-argument-of-migrate_huge_pages-in-soft_offline_huge_page.patch
+arm-7467-1-mutex-use-generic-xchg-based-implementation-for-armv6.patch
+arm-7477-1-vfp-always-save-vfp-state-in-vfp_pm_suspend-on-up.patch
+arm-7478-1-errata-extend-workaround-for-erratum-720789.patch
+arm-7479-1-mm-avoid-null-dereference-when-flushing-gate_vma-with-vivt-caches.patch
+alsa-hda-remove-quirk-for-dell-vostro-1015.patch