--- /dev/null
+From e306dfd06fcb44d21c80acb8e5a88d55f3d1cf63 Mon Sep 17 00:00:00 2001
+From: Olof Johansson <olof@lixom.net>
+Date: Fri, 14 Feb 2014 19:35:15 +0000
+Subject: ARM64: unwind: Fix PC calculation
+
+From: Olof Johansson <olof@lixom.net>
+
+commit e306dfd06fcb44d21c80acb8e5a88d55f3d1cf63 upstream.
+
+The frame PC value in the unwind code used to just take the saved LR
+value and use that. That's incorrect as a stack trace, since it shows
+the return path stack, not the call path stack.
+
+In particular, it shows faulty information in case the bl is done as
+the very last instruction of one label, since the return point will be
+in the next label. That can easily be seen with tail calls to panic(),
+which is marked __noreturn and thus doesn't have anything useful after it.
+
+Easiest here is to just correct the unwind code and do a -4, to get the
+actual call site for the backtrace instead of the return site.
+
+Signed-off-by: Olof Johansson <olof@lixom.net>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/stacktrace.c | 6 +++++-
+ 1 file changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/kernel/stacktrace.c
++++ b/arch/arm64/kernel/stacktrace.c
+@@ -48,7 +48,11 @@ int unwind_frame(struct stackframe *fram
+
+ frame->sp = fp + 0x10;
+ frame->fp = *(unsigned long *)(fp);
+- frame->pc = *(unsigned long *)(fp + 8);
++ /*
++ * -4 here because we care about the PC at time of bl,
++ * not where the return will go.
++ */
++ frame->pc = *(unsigned long *)(fp + 8) - 4;
+
+ return 0;
+ }
--- /dev/null
+From c4204960e9d0ba99459dbf1db918f99a45e7a62a Mon Sep 17 00:00:00 2001
+From: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+Date: Tue, 18 Feb 2014 15:22:12 +0000
+Subject: Input - arizona-haptics: Fix double lock of dapm_mutex
+
+From: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+
+commit c4204960e9d0ba99459dbf1db918f99a45e7a62a upstream.
+
+snd_soc_dapm_sync takes the dapm_mutex internally, but we currently take
+it externally as well. This patch fixes this.
+
+Signed-off-by: Charles Keepax <ckeepax@opensource.wolfsonmicro.com>
+Signed-off-by: Mark Brown <broonie@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/input/misc/arizona-haptics.c | 11 ++++-------
+ 1 file changed, 4 insertions(+), 7 deletions(-)
+
+--- a/drivers/input/misc/arizona-haptics.c
++++ b/drivers/input/misc/arizona-haptics.c
+@@ -77,16 +77,14 @@ static void arizona_haptics_work(struct
+ return;
+ }
+
++ mutex_unlock(dapm_mutex);
++
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+- mutex_unlock(dapm_mutex);
+ return;
+ }
+-
+- mutex_unlock(dapm_mutex);
+-
+ } else {
+ /* This disable sequence will be a noop if already enabled */
+ mutex_lock_nested(dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
+@@ -99,16 +97,15 @@ static void arizona_haptics_work(struct
+ return;
+ }
+
++ mutex_unlock(dapm_mutex);
++
+ ret = snd_soc_dapm_sync(arizona->dapm);
+ if (ret != 0) {
+ dev_err(arizona->dev, "Failed to sync DAPM: %d\n",
+ ret);
+- mutex_unlock(dapm_mutex);
+ return;
+ }
+
+- mutex_unlock(dapm_mutex);
+-
+ ret = regmap_update_bits(arizona->regmap,
+ ARIZONA_HAPTICS_CONTROL_1,
+ ARIZONA_HAP_CTRL_MASK,
--- /dev/null
+From f229006ec6beabf7b844653d92fa61f025fe3dcf Mon Sep 17 00:00:00 2001
+From: James Hogan <james.hogan@imgtec.com>
+Date: Tue, 25 Feb 2014 22:05:35 +0000
+Subject: irq-metag*: stop set_affinity vectoring to offline cpus
+
+From: James Hogan <james.hogan@imgtec.com>
+
+commit f229006ec6beabf7b844653d92fa61f025fe3dcf upstream.
+
+Fix irq_set_affinity callbacks in the Meta IRQ chip drivers to AND
+cpu_online_mask into the cpumask when picking a CPU to vector the
+interrupt to.
+
+As Thomas pointed out, the /proc/irq/$N/smp_affinity interface doesn't
+filter out offline CPUs, so without this patch if you offline CPU0 and
+set an IRQ affinity to 0x3 it vectors the interrupt onto CPU0 even
+though it is offline.
+
+Reported-by: Thomas Gleixner <tglx@linutronix.de>
+Signed-off-by: James Hogan <james.hogan@imgtec.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: linux-metag@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/irqchip/irq-metag-ext.c | 2 +-
+ drivers/irqchip/irq-metag.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/irqchip/irq-metag-ext.c
++++ b/drivers/irqchip/irq-metag-ext.c
+@@ -515,7 +515,7 @@ static int meta_intc_set_affinity(struct
+ * one cpu (the interrupt code doesn't support it), so we just
+ * pick the first cpu we find in 'cpumask'.
+ */
+- cpu = cpumask_any(cpumask);
++ cpu = cpumask_any_and(cpumask, cpu_online_mask);
+ thread = cpu_2_hwthread_id[cpu];
+
+ metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR2(thread)), vec_addr);
+--- a/drivers/irqchip/irq-metag.c
++++ b/drivers/irqchip/irq-metag.c
+@@ -201,7 +201,7 @@ static int metag_internal_irq_set_affini
+ * one cpu (the interrupt code doesn't support it), so we just
+ * pick the first cpu we find in 'cpumask'.
+ */
+- cpu = cpumask_any(cpumask);
++ cpu = cpumask_any_and(cpumask, cpu_online_mask);
+ thread = cpu_2_hwthread_id[cpu];
+
+ metag_out32(TBI_TRIG_VEC(TBID_SIGNUM_TR1(thread)),
--- /dev/null
+From 9845cbbd113fbb5b769a45d8e88dc47bc12df4e0 Mon Sep 17 00:00:00 2001
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+Date: Tue, 25 Feb 2014 15:01:42 -0800
+Subject: mm, thp: fix infinite loop on memcg OOM
+
+From: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>
+
+commit 9845cbbd113fbb5b769a45d8e88dc47bc12df4e0 upstream.
+
+Masayoshi Mizuma reported a bug with the hang of an application under
+the memcg limit. It happens on write-protection fault to huge zero page
+
+If we successfully allocate a huge page to replace zero page but hit the
+memcg limit we need to split the zero page with split_huge_page_pmd()
+and fallback to small pages.
+
+The other part of the problem is that VM_FAULT_OOM has special meaning
+in do_huge_pmd_wp_page() context. __handle_mm_fault() expects the page
+to be split if it sees VM_FAULT_OOM and it will will retry page fault
+handling. This causes an infinite loop if the page was not split.
+
+do_huge_pmd_wp_zero_page_fallback() can return VM_FAULT_OOM if it failed
+to allocate one small page, so fallback to small pages will not help.
+
+The solution for this part is to replace VM_FAULT_OOM with
+VM_FAULT_FALLBACK is fallback required.
+
+Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
+Reported-by: Masayoshi Mizuma <m.mizuma@jp.fujitsu.com>
+Reviewed-by: Michal Hocko <mhocko@suse.cz>
+Cc: Johannes Weiner <hannes@cmpxchg.org>
+Cc: Andrea Arcangeli <aarcange@redhat.com>
+Cc: David Rientjes <rientjes@google.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/huge_memory.c | 9 ++++++---
+ mm/memory.c | 14 +++-----------
+ 2 files changed, 9 insertions(+), 14 deletions(-)
+
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1160,8 +1160,10 @@ alloc:
+ } else {
+ ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
+ pmd, orig_pmd, page, haddr);
+- if (ret & VM_FAULT_OOM)
++ if (ret & VM_FAULT_OOM) {
+ split_huge_page(page);
++ ret |= VM_FAULT_FALLBACK;
++ }
+ put_page(page);
+ }
+ count_vm_event(THP_FAULT_FALLBACK);
+@@ -1173,9 +1175,10 @@ alloc:
+ if (page) {
+ split_huge_page(page);
+ put_page(page);
+- }
++ } else
++ split_huge_page_pmd(vma, address, pmd);
++ ret |= VM_FAULT_FALLBACK;
+ count_vm_event(THP_FAULT_FALLBACK);
+- ret |= VM_FAULT_OOM;
+ goto out;
+ }
+
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -3700,7 +3700,6 @@ static int __handle_mm_fault(struct mm_s
+ if (unlikely(is_vm_hugetlb_page(vma)))
+ return hugetlb_fault(mm, vma, address, flags);
+
+-retry:
+ pgd = pgd_offset(mm, address);
+ pud = pud_alloc(mm, pgd, address);
+ if (!pud)
+@@ -3738,20 +3737,13 @@ retry:
+ if (dirty && !pmd_write(orig_pmd)) {
+ ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
+ orig_pmd);
+- /*
+- * If COW results in an oom, the huge pmd will
+- * have been split, so retry the fault on the
+- * pte for a smaller charge.
+- */
+- if (unlikely(ret & VM_FAULT_OOM))
+- goto retry;
+- return ret;
++ if (!(ret & VM_FAULT_FALLBACK))
++ return ret;
+ } else {
+ huge_pmd_set_accessed(mm, vma, address, pmd,
+ orig_pmd, dirty);
++ return 0;
+ }
+-
+- return 0;
+ }
+ }
+
ioat-fix-tasklet-tear-down.patch
quota-fix-race-between-dqput-and-dquot_scan_active.patch
ipc-mqueue-remove-limits-for-the-amount-of-system-wide.patch
+input-arizona-haptics-fix-double-lock-of-dapm_mutex.patch
+mm-thp-fix-infinite-loop-on-memcg-oom.patch
+irq-metag-stop-set_affinity-vectoring-to-offline-cpus.patch
+arm64-unwind-fix-pc-calculation.patch