--- /dev/null
+From 4407be6ba217514b1bc01488f8b56467d309e416 Mon Sep 17 00:00:00 2001
+From: "Philipp A. Mohrenweiser" <phiamo@googlemail.com>
+Date: Mon, 6 Aug 2012 13:14:18 +0200
+Subject: ALSA: hda - add dock support for Thinkpad T430s
+
+From: "Philipp A. Mohrenweiser" <phiamo@googlemail.com>
+
+commit 4407be6ba217514b1bc01488f8b56467d309e416 upstream.
+
+Add a model/fixup string "lenovo-dock", for Thinkpad T430s, to allow
+sound in docking station.
+
+Tested on Lenovo T430s with ThinkPad Mini Dock Plus Series 3
+
+Signed-off-by: Philipp A. Mohrenweiser <phiamo@googlemail.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6157,6 +6157,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
--- /dev/null
+From c8415a48fcb7a29889f4405d38c57db351e4b50a Mon Sep 17 00:00:00 2001
+From: Felix Kaechele <felix@fetzig.org>
+Date: Mon, 6 Aug 2012 23:02:01 +0200
+Subject: ALSA: hda - add dock support for Thinkpad X230
+
+From: Felix Kaechele <felix@fetzig.org>
+
+commit c8415a48fcb7a29889f4405d38c57db351e4b50a upstream.
+
+As with the ThinkPad Models X230 Tablet and T530 the X230 needs a qurik to
+correctly set up the pins for the dock port.
+
+Signed-off-by: Felix Kaechele <felix@fetzig.org>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6157,6 +6157,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
--- /dev/null
+From 012e7eb1e501d0120e0383b81477f63091f5e365 Mon Sep 17 00:00:00 2001
+From: David Henningsson <david.henningsson@canonical.com>
+Date: Wed, 8 Aug 2012 08:43:37 +0200
+Subject: ALSA: hda - Fix double quirk for Quanta FL1 / Lenovo Ideapad
+
+From: David Henningsson <david.henningsson@canonical.com>
+
+commit 012e7eb1e501d0120e0383b81477f63091f5e365 upstream.
+
+The same ID is twice in the quirk table, so the second one is not used.
+
+Signed-off-by: David Henningsson <david.henningsson@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_realtek.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -6056,6 +6056,8 @@ static const struct alc_fixup alc269_fix
+ [ALC269_FIXUP_PCM_44K] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc269_fixup_pcm_44k,
++ .chained = true,
++ .chain_id = ALC269_FIXUP_QUANTA_MUTE
+ },
+ [ALC269_FIXUP_STEREO_DMIC] = {
+ .type = ALC_FIXUP_FUNC,
+@@ -6160,8 +6162,7 @@ static const struct snd_pci_quirk alc269
+ SND_PCI_QUIRK(0x17aa, 0x21fa, "Thinkpad X230", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x21fb, "Thinkpad T430s", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+- SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
++ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+
+ #if 0
--- /dev/null
+From e9fc83cb2e5877801a255a37ddbc5be996ea8046 Mon Sep 17 00:00:00 2001
+From: David Henningsson <david.henningsson@canonical.com>
+Date: Tue, 7 Aug 2012 14:03:29 +0200
+Subject: ALSA: hda - remove quirk for Dell Vostro 1015
+
+From: David Henningsson <david.henningsson@canonical.com>
+
+commit e9fc83cb2e5877801a255a37ddbc5be996ea8046 upstream.
+
+This computer is confirmed working with model=auto on kernel 3.2.
+Also, parsing fails with hda-emu with the current model.
+
+Signed-off-by: David Henningsson <david.henningsson@canonical.com>
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/pci/hda/patch_conexant.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+--- a/sound/pci/hda/patch_conexant.c
++++ b/sound/pci/hda/patch_conexant.c
+@@ -2975,7 +2975,6 @@ static const struct snd_pci_quirk cxt506
+ SND_PCI_QUIRK(0x1028, 0x02d8, "Dell Vostro", CXT5066_DELL_VOSTRO),
+ SND_PCI_QUIRK(0x1028, 0x02f5, "Dell Vostro 320", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0401, "Dell Vostro 1014", CXT5066_DELL_VOSTRO),
+- SND_PCI_QUIRK(0x1028, 0x0402, "Dell Vostro", CXT5066_DELL_VOSTRO),
+ SND_PCI_QUIRK(0x1028, 0x0408, "Dell Inspiron One 19T", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x050f, "Dell Inspiron", CXT5066_IDEAPAD),
+ SND_PCI_QUIRK(0x1028, 0x0510, "Dell Vostro", CXT5066_IDEAPAD),
--- /dev/null
+From 98bd8b96b26db3399a48202318dca4aaa2515355 Mon Sep 17 00:00:00 2001
+From: Shawn Guo <shawn.guo@linaro.org>
+Date: Fri, 13 Jul 2012 08:19:34 +0100
+Subject: ARM: 7466/1: disable interrupt before spinning endlessly
+
+From: Shawn Guo <shawn.guo@linaro.org>
+
+commit 98bd8b96b26db3399a48202318dca4aaa2515355 upstream.
+
+The CPU will endlessly spin at the end of machine_halt and
+machine_restart calls. However, this will lead to a soft lockup
+warning after about 20 seconds, if CONFIG_LOCKUP_DETECTOR is enabled,
+as system timer is still alive.
+
+Disable interrupt before going to spin endlessly, so that the lockup
+warning will never be seen.
+
+Reported-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/process.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/arm/kernel/process.c
++++ b/arch/arm/kernel/process.c
+@@ -267,6 +267,7 @@ void machine_shutdown(void)
+ void machine_halt(void)
+ {
+ machine_shutdown();
++ local_irq_disable();
+ while (1);
+ }
+
+@@ -288,6 +289,7 @@ void machine_restart(char *cmd)
+
+ /* Whoops - the platform was unable to reboot. Tell the user! */
+ printk("Reboot failed -- System halted\n");
++ local_irq_disable();
+ while (1);
+ }
+
--- /dev/null
+From a76d7bd96d65fa5119adba97e1b58d95f2e78829 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 13 Jul 2012 19:15:40 +0100
+Subject: ARM: 7467/1: mutex: use generic xchg-based implementation for ARMv6+
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit a76d7bd96d65fa5119adba97e1b58d95f2e78829 upstream.
+
+The open-coded mutex implementation for ARMv6+ cores suffers from a
+severe lack of barriers, so in the uncontended case we don't actually
+protect any accesses performed during the critical section.
+
+Furthermore, the code is largely a duplication of the ARMv6+ atomic_dec
+code but optimised to remove a branch instruction, as the mutex fastpath
+was previously inlined. Now that this is executed out-of-line, we can
+reuse the atomic access code for the locking (in fact, we use the xchg
+code as this produces shorter critical sections).
+
+This patch uses the generic xchg based implementation for mutexes on
+ARMv6+, which introduces barriers to the lock/unlock operations and also
+has the benefit of removing a fair amount of inline assembly code.
+
+Acked-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Nicolas Pitre <nico@linaro.org>
+Reported-by: Shan Kang <kangshan0910@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/mutex.h | 119 +------------------------------------------
+ 1 file changed, 4 insertions(+), 115 deletions(-)
+
+--- a/arch/arm/include/asm/mutex.h
++++ b/arch/arm/include/asm/mutex.h
+@@ -7,121 +7,10 @@
+ */
+ #ifndef _ASM_MUTEX_H
+ #define _ASM_MUTEX_H
+-
+-#if __LINUX_ARM_ARCH__ < 6
+-/* On pre-ARMv6 hardware the swp based implementation is the most efficient. */
+-# include <asm-generic/mutex-xchg.h>
+-#else
+-
+ /*
+- * Attempting to lock a mutex on ARMv6+ can be done with a bastardized
+- * atomic decrement (it is not a reliable atomic decrement but it satisfies
+- * the defined semantics for our purpose, while being smaller and faster
+- * than a real atomic decrement or atomic swap. The idea is to attempt
+- * decrementing the lock value only once. If once decremented it isn't zero,
+- * or if its store-back fails due to a dispute on the exclusive store, we
+- * simply bail out immediately through the slow path where the lock will be
+- * reattempted until it succeeds.
++ * On pre-ARMv6 hardware this results in a swp-based implementation,
++ * which is the most efficient. For ARMv6+, we emit a pair of exclusive
++ * accesses instead.
+ */
+-static inline void
+-__mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- fail_fn(count);
+-}
+-
+-static inline int
+-__mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res;
+-
+- __asm__ (
+-
+- "ldrex %0, [%2] \n\t"
+- "sub %0, %0, #1 \n\t"
+- "strex %1, %0, [%2] "
+-
+- : "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __res |= __ex_flag;
+- if (unlikely(__res != 0))
+- __res = fail_fn(count);
+- return __res;
+-}
+-
+-/*
+- * Same trick is used for the unlock fast path. However the original value,
+- * rather than the result, is used to test for success in order to have
+- * better generated assembly.
+- */
+-static inline void
+-__mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "ldrex %0, [%3] \n\t"
+- "add %1, %0, #1 \n\t"
+- "strex %2, %1, [%3] "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&(count)->counter)
+- : "cc","memory" );
+-
+- __orig |= __ex_flag;
+- if (unlikely(__orig != 0))
+- fail_fn(count);
+-}
+-
+-/*
+- * If the unlock was done on a contended lock, or if the unlock simply fails
+- * then the mutex remains locked.
+- */
+-#define __mutex_slowpath_needs_to_unlock() 1
+-
+-/*
+- * For __mutex_fastpath_trylock we use another construct which could be
+- * described as a "single value cmpxchg".
+- *
+- * This provides the needed trylock semantics like cmpxchg would, but it is
+- * lighter and less generic than a true cmpxchg implementation.
+- */
+-static inline int
+-__mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *))
+-{
+- int __ex_flag, __res, __orig;
+-
+- __asm__ (
+-
+- "1: ldrex %0, [%3] \n\t"
+- "subs %1, %0, #1 \n\t"
+- "strexeq %2, %1, [%3] \n\t"
+- "movlt %0, #0 \n\t"
+- "cmpeq %2, #0 \n\t"
+- "bgt 1b "
+-
+- : "=&r" (__orig), "=&r" (__res), "=&r" (__ex_flag)
+- : "r" (&count->counter)
+- : "cc", "memory" );
+-
+- return __orig;
+-}
+-
+-#endif
++#include <asm-generic/mutex-xchg.h>
+ #endif
--- /dev/null
+From a84b895a2348f0dbff31b71ddf954f70a6cde368 Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Fri, 20 Jul 2012 02:03:43 +0100
+Subject: ARM: 7476/1: vfp: only clear vfp state for current cpu in vfp_pm_suspend
+
+From: Colin Cross <ccross@android.com>
+
+commit a84b895a2348f0dbff31b71ddf954f70a6cde368 upstream.
+
+vfp_pm_suspend runs on each cpu, only clear the hardware state
+pointer for the current cpu. Prevents a possible crash if one
+cpu clears the hw state pointer when another cpu has already
+checked if it is valid.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/vfp/vfpmodule.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -460,7 +460,7 @@ static int vfp_pm_suspend(void)
+ }
+
+ /* clear any information we had about last context state */
+- memset(vfp_current_hw_state, 0, sizeof(vfp_current_hw_state));
++ vfp_current_hw_state[ti->cpu] = NULL;
+
+ return 0;
+ }
--- /dev/null
+From 24b35521b8ddf088531258f06f681bb7b227bf47 Mon Sep 17 00:00:00 2001
+From: Colin Cross <ccross@android.com>
+Date: Fri, 20 Jul 2012 02:03:42 +0100
+Subject: ARM: 7477/1: vfp: Always save VFP state in vfp_pm_suspend on UP
+
+From: Colin Cross <ccross@android.com>
+
+commit 24b35521b8ddf088531258f06f681bb7b227bf47 upstream.
+
+vfp_pm_suspend should save the VFP state in suspend after
+any lazy context switch. If it only saves when the VFP is enabled,
+the state can get lost when, on a UP system:
+ Thread 1 uses the VFP
+ Context switch occurs to thread 2, VFP is disabled but the
+ VFP context is not saved
+ Thread 2 initiates suspend
+ vfp_pm_suspend is called with the VFP disabled, and the unsaved
+ VFP context of Thread 1 in the registers
+
+Modify vfp_pm_suspend to save the VFP context whenever
+vfp_current_hw_state is not NULL.
+
+Includes a fix from Ido Yariv <ido@wizery.com>, who pointed out that on
+SMP systems, the state pointer can be pointing to a freed task struct if
+a task exited on another cpu, fixed by using #ifndef CONFIG_SMP in the
+new if clause.
+
+Signed-off-by: Colin Cross <ccross@android.com>
+Cc: Barry Song <bs14@csr.com>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: Ido Yariv <ido@wizery.com>
+Cc: Daniel Drake <dsd@laptop.org>
+Cc: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/vfp/vfpmodule.c | 6 ++++++
+ 1 file changed, 6 insertions(+)
+
+--- a/arch/arm/vfp/vfpmodule.c
++++ b/arch/arm/vfp/vfpmodule.c
+@@ -457,6 +457,12 @@ static int vfp_pm_suspend(void)
+
+ /* disable, just in case */
+ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
++ } else if (vfp_current_hw_state[ti->cpu]) {
++#ifndef CONFIG_SMP
++ fmxr(FPEXC, fpexc | FPEXC_EN);
++ vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
++ fmxr(FPEXC, fpexc);
++#endif
+ }
+
+ /* clear any information we had about last context state */
--- /dev/null
+From 5a783cbc48367cfc7b65afc75430953dfe60098f Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Fri, 20 Jul 2012 18:24:55 +0100
+Subject: ARM: 7478/1: errata: extend workaround for erratum #720789
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit 5a783cbc48367cfc7b65afc75430953dfe60098f upstream.
+
+Commit cdf357f1 ("ARM: 6299/1: errata: TLBIASIDIS and TLBIMVAIS
+operations can broadcast a faulty ASID") replaced by-ASID TLB flushing
+operations with all-ASID variants to workaround A9 erratum #720789.
+
+This patch extends the workaround to include the tlb_range operations,
+which were overlooked by the original patch.
+
+Tested-by: Steve Capper <steve.capper@arm.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mm/tlb-v7.S | 12 ++++++++++++
+ 1 file changed, 12 insertions(+)
+
+--- a/arch/arm/mm/tlb-v7.S
++++ b/arch/arm/mm/tlb-v7.S
+@@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
+ dsb
+ mov r0, r0, lsr #PAGE_SHIFT @ align address
+ mov r1, r1, lsr #PAGE_SHIFT
++#ifdef CONFIG_ARM_ERRATA_720789
++ mov r3, #0
++#else
+ asid r3, r3 @ mask ASID
++#endif
+ orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+
+ add r0, r0, #PAGE_SZ
+@@ -67,7 +75,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
+ mov r0, r0, lsl #PAGE_SHIFT
+ mov r1, r1, lsl #PAGE_SHIFT
+ 1:
++#ifdef CONFIG_ARM_ERRATA_720789
++ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
++#else
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
++#endif
+ ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
+ add r0, r0, #PAGE_SZ
+ cmp r0, r1
--- /dev/null
+From b74253f78400f9a4b42da84bb1de7540b88ce7c4 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Mon, 23 Jul 2012 14:18:13 +0100
+Subject: ARM: 7479/1: mm: avoid NULL dereference when flushing gate_vma with VIVT caches
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit b74253f78400f9a4b42da84bb1de7540b88ce7c4 upstream.
+
+The vivt_flush_cache_{range,page} functions check that the mm_struct
+of the VMA being flushed has been active on the current CPU before
+performing the cache maintenance.
+
+The gate_vma has a NULL mm_struct pointer and, as such, will cause a
+kernel fault if we try to flush it with the above operations. This
+happens during ELF core dumps, which include the gate_vma as it may be
+useful for debugging purposes.
+
+This patch adds checks to the VIVT cache flushing functions so that VMAs
+with a NULL mm_struct are flushed unconditionally (the vectors page may
+be dirty if we use it to store the current TLS pointer).
+
+Reported-by: Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+Tested-by: Uros Bizjak <ubizjak@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/include/asm/cacheflush.h | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/arch/arm/include/asm/cacheflush.h
++++ b/arch/arm/include/asm/cacheflush.h
+@@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(s
+ static inline void
+ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+ {
+- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
++ struct mm_struct *mm = vma->vm_mm;
++
++ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
+ __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
+ vma->vm_flags);
+ }
+@@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_st
+ static inline void
+ vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+ {
+- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
++ struct mm_struct *mm = vma->vm_mm;
++
++ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
+ unsigned long addr = user_addr & PAGE_MASK;
+ __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
+ }
--- /dev/null
+From c5dff4ffd327088d85035bec535b7d0c9ea03151 Mon Sep 17 00:00:00 2001
+From: Javier Martinez Canillas <javier@dowhile0.org>
+Date: Sat, 28 Jul 2012 15:19:55 +0100
+Subject: ARM: 7480/1: only call smp_send_stop() on SMP
+
+From: Javier Martinez Canillas <javier@dowhile0.org>
+
+commit c5dff4ffd327088d85035bec535b7d0c9ea03151 upstream.
+
+On reboot or poweroff (machine_shutdown()) a call to smp_send_stop() is
+made (to stop the others CPU's) when CONFIG_SMP=y.
+
+arch/arm/kernel/process.c:
+
+void machine_shutdown(void)
+{
+ #ifdef CONFIG_SMP
+ smp_send_stop();
+ #endif
+}
+
+smp_send_stop() calls the function pointer smp_cross_call(), which is set
+on the smp_init_cpus() function for OMAP processors.
+
+arch/arm/mach-omap2/omap-smp.c:
+
+void __init smp_init_cpus(void)
+{
+...
+ set_smp_cross_call(gic_raise_softirq);
+...
+}
+
+But the ARM setup_arch() function only calls smp_init_cpus()
+if CONFIG_SMP=y && is_smp().
+
+arm/kernel/setup.c:
+
+void __init setup_arch(char **cmdline_p)
+{
+...
+ #ifdef CONFIG_SMP
+ if (is_smp())
+ smp_init_cpus();
+ #endif
+...
+}
+
+Newer OMAP CPU's are SMP machines so omap2plus_defconfig sets
+CONFIG_SMP=y. Unfortunately on an OMAP UP machine is_smp()
+returns false and smp_init_cpus() is never called and the
+smp_cross_call() function remains NULL.
+
+If the machine is rebooted or powered off, smp_send_stop() will
+be called (since CONFIG_SMP=y) leading to the following error:
+
+[ 42.815551] Restarting system.
+[ 42.819030] Unable to handle kernel NULL pointer dereference at virtual address 00000000
+[ 42.827667] pgd = d7a74000
+[ 42.830566] [00000000] *pgd=96ce7831, *pte=00000000, *ppte=00000000
+[ 42.837249] Internal error: Oops: 80000007 [#1] SMP ARM
+[ 42.842773] Modules linked in:
+[ 42.846008] CPU: 0 Not tainted (3.5.0-rc3-next-20120622-00002-g62e87ba-dirty #44)
+[ 42.854278] PC is at 0x0
+[ 42.856994] LR is at smp_send_stop+0x4c/0xe4
+[ 42.861511] pc : [<00000000>] lr : [<c00183a4>] psr: 60000013
+[ 42.861511] sp : d6c85e70 ip : 00000000 fp : 00000000
+[ 42.873626] r10: 00000000 r9 : d6c84000 r8 : 00000002
+[ 42.879150] r7 : c07235a0 r6 : c06dd2d0 r5 : 000f4241 r4 : d6c85e74
+[ 42.886047] r3 : 00000000 r2 : 00000000 r1 : 00000006 r0 : d6c85e74
+[ 42.892944] Flags: nZCv IRQs on FIQs on Mode SVC_32 ISA ARM Segment user
+[ 42.900482] Control: 10c5387d Table: 97a74019 DAC: 00000015
+[ 42.906555] Process reboot (pid: 1166, stack limit = 0xd6c842f8)
+[ 42.912902] Stack: (0xd6c85e70 to 0xd6c86000)
+[ 42.917510] 5e60: c07235a0 00000000 00000000 d6c84000
+[ 42.926177] 5e80: 01234567 c00143d0 4321fedc c00511bc d6c85ebc 00000168 00000460 00000000
+[ 42.934814] 5ea0: c1017950 a0000013 c1017900 d8014390 d7ec3858 c0498e48 c1017950 00000000
+[ 42.943481] 5ec0: d6ddde10 d6c85f78 00000003 00000000 d6ddde10 d6c84000 00000000 00000000
+[ 42.952117] 5ee0: 00000002 00000000 00000000 c0088c88 00000002 00000000 00000000 c00f4b90
+[ 42.960784] 5f00: 00000000 d6c85ebc d8014390 d7e311c8 60000013 00000103 00000002 d6c84000
+[ 42.969421] 5f20: c00f3274 d6e00a00 00000001 60000013 d6c84000 00000000 00000000 c00895d4
+[ 42.978057] 5f40: 00000002 d8007c80 d781f000 c00f6150 d8010cc0 c00f3274 d781f000 d6c84000
+[ 42.986694] 5f60: c0013020 d6e00a00 00000001 20000010 0001257c ef000000 00000000 c00895d4
+[ 42.995361] 5f80: 00000002 00000001 00000003 00000000 00000001 00000003 00000000 00000058
+[ 43.003997] 5fa0: c00130c8 c0012f00 00000001 00000003 fee1dead 28121969 01234567 00000002
+[ 43.012634] 5fc0: 00000001 00000003 00000000 00000058 00012584 0001257c 00000001 00000000
+[ 43.021270] 5fe0: 000124bc bec5cc6c 00008f9c 4a2f7c40 20000010 fee1dead 00000000 00000000
+[ 43.029968] [<c00183a4>] (smp_send_stop+0x4c/0xe4) from [<c00143d0>] (machine_restart+0xc/0x4c)
+[ 43.039154] [<c00143d0>] (machine_restart+0xc/0x4c) from [<c00511bc>] (sys_reboot+0x144/0x1f0)
+[ 43.048278] [<c00511bc>] (sys_reboot+0x144/0x1f0) from [<c0012f00>] (ret_fast_syscall+0x0/0x3c)
+[ 43.057464] Code: bad PC value
+[ 43.060760] ---[ end trace c3988d1dd0b8f0fb ]---
+
+Add a check so smp_cross_call() is only called when there is more than one CPU on-line.
+
+Signed-off-by: Javier Martinez Canillas <javier at dowhile0.org>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/smp.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm/kernel/smp.c
++++ b/arch/arm/kernel/smp.c
+@@ -590,7 +590,8 @@ void smp_send_stop(void)
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+- smp_cross_call(&mask, IPI_CPU_STOP);
++ if (!cpumask_empty(&mask))
++ smp_cross_call(&mask, IPI_CPU_STOP);
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
--- /dev/null
+From 15ac49b65024f55c4371a53214879a9c77c4fbf9 Mon Sep 17 00:00:00 2001
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+Date: Mon, 30 Jul 2012 19:42:10 +0100
+Subject: ARM: Fix undefined instruction exception handling
+
+From: Russell King <rmk+kernel@arm.linux.org.uk>
+
+commit 15ac49b65024f55c4371a53214879a9c77c4fbf9 upstream.
+
+While trying to get a v3.5 kernel booted on the cubox, I noticed that
+VFP does not work correctly with VFP bounce handling. This is because
+of the confusion over 16-bit vs 32-bit instructions, and where PC is
+supposed to point to.
+
+The rule is that FP handlers are entered with regs->ARM_pc pointing at
+the _next_ instruction to be executed. However, if the exception is
+not handled, regs->ARM_pc points at the faulting instruction.
+
+This is easy for ARM mode, because we know that the next instruction and
+previous instructions are separated by four bytes. This is not true of
+Thumb2 though.
+
+Since all FP instructions are 32-bit in Thumb2, it makes things easy.
+We just need to select the appropriate adjustment. Do this by moving
+the adjustment out of do_undefinstr() into the assembly code, as only
+the assembly code knows whether it's dealing with a 32-bit or 16-bit
+instruction.
+
+Acked-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/kernel/entry-armv.S | 111 +++++++++++++++++++++++++++----------------
+ arch/arm/kernel/traps.c | 8 ---
+ arch/arm/vfp/entry.S | 16 +++---
+ arch/arm/vfp/vfphw.S | 19 ++++---
+ 4 files changed, 92 insertions(+), 62 deletions(-)
+
+--- a/arch/arm/kernel/entry-armv.S
++++ b/arch/arm/kernel/entry-armv.S
+@@ -244,6 +244,19 @@ svc_preempt:
+ b 1b
+ #endif
+
++__und_fault:
++ @ Correct the PC such that it is pointing at the instruction
++ @ which caused the fault. If the faulting instruction was ARM
++ @ the PC will be pointing at the next instruction, and have to
++ @ subtract 4. Otherwise, it is Thumb, and the PC will be
++ @ pointing at the second half of the Thumb instruction. We
++ @ have to subtract 2.
++ ldr r2, [r0, #S_PC]
++ sub r2, r2, r1
++ str r2, [r0, #S_PC]
++ b do_undefinstr
++ENDPROC(__und_fault)
++
+ .align 5
+ __und_svc:
+ #ifdef CONFIG_KPROBES
+@@ -261,25 +274,32 @@ __und_svc:
+ @
+ @ r0 - instruction
+ @
+-#ifndef CONFIG_THUMB2_KERNEL
++#ifndef CONFIG_THUMB2_KERNEL
+ ldr r0, [r4, #-4]
+ #else
++ mov r1, #2
+ ldrh r0, [r4, #-2] @ Thumb instruction at LR - 2
+ cmp r0, #0xe800 @ 32-bit instruction if xx >= 0
+- ldrhhs r9, [r4] @ bottom 16 bits
+- orrhs r0, r9, r0, lsl #16
++ blo __und_svc_fault
++ ldrh r9, [r4] @ bottom 16 bits
++ add r4, r4, #2
++ str r4, [sp, #S_PC]
++ orr r0, r9, r0, lsl #16
+ #endif
+- adr r9, BSYM(1f)
++ adr r9, BSYM(__und_svc_finish)
+ mov r2, r4
+ bl call_fpe
+
++ mov r1, #4 @ PC correction to apply
++__und_svc_fault:
+ mov r0, sp @ struct pt_regs *regs
+- bl do_undefinstr
++ bl __und_fault
+
+ @
+ @ IRQs off again before pulling preserved data off the stack
+ @
+-1: disable_irq_notrace
++__und_svc_finish:
++ disable_irq_notrace
+
+ @
+ @ restore SPSR and restart the instruction
+@@ -423,25 +443,33 @@ __und_usr:
+ mov r2, r4
+ mov r3, r5
+
++ @ r2 = regs->ARM_pc, which is either 2 or 4 bytes ahead of the
++ @ faulting instruction depending on Thumb mode.
++ @ r3 = regs->ARM_cpsr
+ @
+- @ fall through to the emulation code, which returns using r9 if
+- @ it has emulated the instruction, or the more conventional lr
+- @ if we are to treat this as a real undefined instruction
+- @
+- @ r0 - instruction
++ @ The emulation code returns using r9 if it has emulated the
++ @ instruction, or the more conventional lr if we are to treat
++ @ this as a real undefined instruction
+ @
+ adr r9, BSYM(ret_from_exception)
+- adr lr, BSYM(__und_usr_unknown)
++
+ tst r3, #PSR_T_BIT @ Thumb mode?
+- itet eq @ explicit IT needed for the 1f label
+- subeq r4, r2, #4 @ ARM instr at LR - 4
+- subne r4, r2, #2 @ Thumb instr at LR - 2
+-1: ldreqt r0, [r4]
++ bne __und_usr_thumb
++ sub r4, r2, #4 @ ARM instr at LR - 4
++1: ldrt r0, [r4]
+ #ifdef CONFIG_CPU_ENDIAN_BE8
+- reveq r0, r0 @ little endian instruction
++ rev r0, r0 @ little endian instruction
+ #endif
+- beq call_fpe
++ @ r0 = 32-bit ARM instruction which caused the exception
++ @ r2 = PC value for the following instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the faulting instruction
++ @ lr = 32-bit undefined instruction function
++ adr lr, BSYM(__und_usr_fault_32)
++ b call_fpe
++
++__und_usr_thumb:
+ @ Thumb instruction
++ sub r4, r2, #2 @ First half of thumb instr at LR - 2
+ #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
+ /*
+ * Thumb-2 instruction handling. Note that because pre-v6 and >= v6 platforms
+@@ -455,7 +483,7 @@ __und_usr:
+ ldr r5, .LCcpu_architecture
+ ldr r5, [r5]
+ cmp r5, #CPU_ARCH_ARMv7
+- blo __und_usr_unknown
++ blo __und_usr_fault_16 @ 16bit undefined instruction
+ /*
+ * The following code won't get run unless the running CPU really is v7, so
+ * coding round the lack of ldrht on older arches is pointless. Temporarily
+@@ -463,15 +491,18 @@ __und_usr:
+ */
+ .arch armv6t2
+ #endif
+-2:
+- ARM( ldrht r5, [r4], #2 )
+- THUMB( ldrht r5, [r4] )
+- THUMB( add r4, r4, #2 )
++2: ldrht r5, [r4]
+ cmp r5, #0xe800 @ 32bit instruction if xx != 0
+- blo __und_usr_unknown
+-3: ldrht r0, [r4]
++ blo __und_usr_fault_16 @ 16bit undefined instruction
++3: ldrht r0, [r2]
+ add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
++ str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
+ orr r0, r0, r5, lsl #16
++ adr lr, BSYM(__und_usr_fault_32)
++ @ r0 = the two 16-bit Thumb instructions which caused the exception
++ @ r2 = PC value for the following Thumb instruction (:= regs->ARM_pc)
++ @ r4 = PC value for the first 16-bit Thumb instruction
++ @ lr = 32bit undefined instruction function
+
+ #if __LINUX_ARM_ARCH__ < 7
+ /* If the target arch was overridden, change it back: */
+@@ -482,17 +513,13 @@ __und_usr:
+ #endif
+ #endif /* __LINUX_ARM_ARCH__ < 7 */
+ #else /* !(CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7) */
+- b __und_usr_unknown
++ b __und_usr_fault_16
+ #endif
+- UNWIND(.fnend )
++ UNWIND(.fnend)
+ ENDPROC(__und_usr)
+
+- @
+- @ fallthrough to call_fpe
+- @
+-
+ /*
+- * The out of line fixup for the ldrt above.
++ * The out of line fixup for the ldrt instructions above.
+ */
+ .pushsection .fixup, "ax"
+ 4: mov pc, r9
+@@ -523,11 +550,12 @@ ENDPROC(__und_usr)
+ * NEON handler code.
+ *
+ * Emulators may wish to make use of the following registers:
+- * r0 = instruction opcode.
+- * r2 = PC+4
++ * r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++ * r2 = PC value to resume execution after successful emulation
+ * r9 = normal "successful" return address
+- * r10 = this threads thread_info structure.
++ * r10 = this threads thread_info structure
+ * lr = unrecognised instruction return address
++ * IRQs disabled, FIQs enabled.
+ */
+ @
+ @ Fall-through from Thumb-2 __und_usr
+@@ -662,12 +690,17 @@ ENTRY(no_fp)
+ mov pc, lr
+ ENDPROC(no_fp)
+
+-__und_usr_unknown:
+- enable_irq
++__und_usr_fault_32:
++ mov r1, #4
++ b 1f
++__und_usr_fault_16:
++ mov r1, #2
++1: enable_irq
+ mov r0, sp
+ adr lr, BSYM(ret_from_exception)
+- b do_undefinstr
+-ENDPROC(__und_usr_unknown)
++ b __und_fault
++ENDPROC(__und_usr_fault_32)
++ENDPROC(__und_usr_fault_16)
+
+ .align 5
+ __pabt_usr:
+--- a/arch/arm/kernel/traps.c
++++ b/arch/arm/kernel/traps.c
+@@ -370,18 +370,10 @@ static int call_undef_hook(struct pt_reg
+
+ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
+ {
+- unsigned int correction = thumb_mode(regs) ? 2 : 4;
+ unsigned int instr;
+ siginfo_t info;
+ void __user *pc;
+
+- /*
+- * According to the ARM ARM, PC is 2 or 4 bytes ahead,
+- * depending whether we're in Thumb mode or not.
+- * Correct this offset.
+- */
+- regs->ARM_pc -= correction;
+-
+ pc = (void __user *)instruction_pointer(regs);
+
+ if (processor_mode(regs) == SVC_MODE) {
+--- a/arch/arm/vfp/entry.S
++++ b/arch/arm/vfp/entry.S
+@@ -7,18 +7,20 @@
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+- *
+- * Basic entry code, called from the kernel's undefined instruction trap.
+- * r0 = faulted instruction
+- * r5 = faulted PC+4
+- * r9 = successful return
+- * r10 = thread_info structure
+- * lr = failure return
+ */
+ #include <asm/thread_info.h>
+ #include <asm/vfpmacros.h>
+ #include "../kernel/entry-header.S"
+
++@ VFP entry point.
++@
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
++@ r10 = this threads thread_info structure
++@ lr = unrecognised instruction return address
++@ IRQs disabled.
++@
+ ENTRY(do_vfp)
+ #ifdef CONFIG_PREEMPT
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
+--- a/arch/arm/vfp/vfphw.S
++++ b/arch/arm/vfp/vfphw.S
+@@ -61,13 +61,13 @@
+
+ @ VFP hardware support entry point.
+ @
+-@ r0 = faulted instruction
+-@ r2 = faulted PC+4
+-@ r9 = successful return
++@ r0 = instruction opcode (32-bit ARM or two 16-bit Thumb)
++@ r2 = PC value to resume execution after successful emulation
++@ r9 = normal "successful" return address
+ @ r10 = vfp_state union
+ @ r11 = CPU number
+-@ lr = failure return
+-
++@ lr = unrecognised instruction return address
++@ IRQs enabled.
+ ENTRY(vfp_support_entry)
+ DBGSTR3 "instr %08x pc %08x state %p", r0, r2, r10
+
+@@ -161,9 +161,12 @@ vfp_hw_state_valid:
+ @ exception before retrying branch
+ @ out before setting an FPEXC that
+ @ stops us reading stuff
+- VFPFMXR FPEXC, r1 @ restore FPEXC last
+- sub r2, r2, #4
+- str r2, [sp, #S_PC] @ retry the instruction
++ VFPFMXR FPEXC, r1 @ Restore FPEXC last
++ sub r2, r2, #4 @ Retry current instruction - if Thumb
++ str r2, [sp, #S_PC] @ mode it's two 16-bit instructions,
++ @ else it's one 32-bit instruction, so
++ @ always subtract 4 from the following
++ @ instruction address.
+ #ifdef CONFIG_PREEMPT
+ get_thread_info r10
+ ldr r4, [r10, #TI_PREEMPT] @ get preempt count
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
-@@ -1431,8 +1431,8 @@ static int soft_offline_huge_page(struct
+@@ -1433,8 +1433,8 @@ static int soft_offline_huge_page(struct
/* Keep page count to indicate a given hugepage is isolated. */
list_add(&hpage->lru, &pagelist);
if (ret) {
struct page *page1, *page2;
list_for_each_entry_safe(page1, page2, &pagelist, lru)
-@@ -1561,7 +1561,7 @@ int soft_offline_page(struct page *page,
+@@ -1563,7 +1563,7 @@ int soft_offline_page(struct page *page,
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
media-ene_ir-fix-driver-initialisation.patch
pcdp-use-early_ioremap-early_iounmap-to-access-pcdp-table.patch
mm-fix-wrong-argument-of-migrate_huge_pages-in-soft_offline_huge_page.patch
+arm-7466-1-disable-interrupt-before-spinning-endlessly.patch
+arm-7467-1-mutex-use-generic-xchg-based-implementation-for-armv6.patch
+arm-7476-1-vfp-only-clear-vfp-state-for-current-cpu-in-vfp_pm_suspend.patch
+arm-7477-1-vfp-always-save-vfp-state-in-vfp_pm_suspend-on-up.patch
+arm-7478-1-errata-extend-workaround-for-erratum-720789.patch
+arm-7479-1-mm-avoid-null-dereference-when-flushing-gate_vma-with-vivt-caches.patch
+arm-7480-1-only-call-smp_send_stop-on-smp.patch
+arm-fix-undefined-instruction-exception-handling.patch
+alsa-hda-add-dock-support-for-thinkpad-t430s.patch
+alsa-hda-add-dock-support-for-thinkpad-x230.patch
+alsa-hda-remove-quirk-for-dell-vostro-1015.patch
+alsa-hda-fix-double-quirk-for-quanta-fl1-lenovo-ideapad.patch