--- /dev/null
+From 263782c1c95bbddbb022dc092fd89a36bb8d5577 Mon Sep 17 00:00:00 2001
+From: Benjamin LaHaise <bcrl@kvack.org>
+Date: Mon, 14 Jul 2014 12:49:26 -0400
+Subject: aio: protect reqs_available updates from changes in interrupt handlers
+
+From: Benjamin LaHaise <bcrl@kvack.org>
+
+commit 263782c1c95bbddbb022dc092fd89a36bb8d5577 upstream.
+
+As of commit f8567a3845ac05bb28f3c1b478ef752762bd39ef it is now possible to
+have put_reqs_available() called from irq context. While put_reqs_available()
+is per cpu, it did not protect itself from interrupts on the same CPU. This
+lead to aio_complete() corrupting the available io requests count when run
+under a heavy O_DIRECT workloads as reported by Robert Elliott. Fix this by
+disabling irq updates around the per cpu batch updates of reqs_available.
+
+Many thanks to Robert and folks for testing and tracking this down.
+
+Reported-by: Robert Elliot <Elliott@hp.com>
+Tested-by: Robert Elliot <Elliott@hp.com>
+Signed-off-by: Benjamin LaHaise <bcrl@kvack.org>
+Cc: Jens Axboe <axboe@kernel.dk>, Christoph Hellwig <hch@infradead.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/aio.c | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+--- a/fs/aio.c
++++ b/fs/aio.c
+@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
+ static void put_reqs_available(struct kioctx *ctx, unsigned nr)
+ {
+ struct kioctx_cpu *kcpu;
++ unsigned long flags;
+
+ preempt_disable();
+ kcpu = this_cpu_ptr(ctx->cpu);
+
++ local_irq_save(flags);
+ kcpu->reqs_available += nr;
++
+ while (kcpu->reqs_available >= ctx->req_batch * 2) {
+ kcpu->reqs_available -= ctx->req_batch;
+ atomic_add(ctx->req_batch, &ctx->reqs_available);
+ }
+
++ local_irq_restore(flags);
+ preempt_enable();
+ }
+
+@@ -847,10 +851,12 @@ static bool get_reqs_available(struct ki
+ {
+ struct kioctx_cpu *kcpu;
+ bool ret = false;
++ unsigned long flags;
+
+ preempt_disable();
+ kcpu = this_cpu_ptr(ctx->cpu);
+
++ local_irq_save(flags);
+ if (!kcpu->reqs_available) {
+ int old, avail = atomic_read(&ctx->reqs_available);
+
+@@ -869,6 +875,7 @@ static bool get_reqs_available(struct ki
+ ret = true;
+ kcpu->reqs_available--;
+ out:
++ local_irq_restore(flags);
+ preempt_enable();
+ return ret;
+ }
--- /dev/null
+From a4b6cb735b25aa84a462a1985e3e43bebaf5beb4 Mon Sep 17 00:00:00 2001
+From: Anton Kolesov <Anton.Kolesov@synopsys.com>
+Date: Fri, 20 Jun 2014 20:28:39 +0400
+Subject: ARC: Implement ptrace(PTRACE_GET_THREAD_AREA)
+
+From: Anton Kolesov <Anton.Kolesov@synopsys.com>
+
+commit a4b6cb735b25aa84a462a1985e3e43bebaf5beb4 upstream.
+
+This patch adds implementation of GET_THREAD_AREA ptrace request type. This
+is required by GDB to debug NPTL applications.
+
+Signed-off-by: Anton Kolesov <Anton.Kolesov@synopsys.com>
+Signed-off-by: Vineet Gupta <vgupta@synopsys.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arc/include/uapi/asm/ptrace.h | 1 +
+ arch/arc/kernel/ptrace.c | 4 ++++
+ 2 files changed, 5 insertions(+)
+
+--- a/arch/arc/include/uapi/asm/ptrace.h
++++ b/arch/arc/include/uapi/asm/ptrace.h
+@@ -11,6 +11,7 @@
+ #ifndef _UAPI__ASM_ARC_PTRACE_H
+ #define _UAPI__ASM_ARC_PTRACE_H
+
++#define PTRACE_GET_THREAD_AREA 25
+
+ #ifndef __ASSEMBLY__
+ /*
+--- a/arch/arc/kernel/ptrace.c
++++ b/arch/arc/kernel/ptrace.c
+@@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *chi
+ pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
+
+ switch (request) {
++ case PTRACE_GET_THREAD_AREA:
++ ret = put_user(task_thread_info(child)->thr_ptr,
++ (unsigned long __user *)data);
++ break;
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
--- /dev/null
+From 22970070e027cbbb9b2878f8f7c31d0d7f29e94d Mon Sep 17 00:00:00 2001
+From: Marek Vasut <marex@denx.de>
+Date: Fri, 28 Feb 2014 12:58:41 +0100
+Subject: ARM: dts: imx: Add alias for ethernet controller
+
+From: Marek Vasut <marex@denx.de>
+
+commit 22970070e027cbbb9b2878f8f7c31d0d7f29e94d upstream.
+
+Add alias for FEC ethernet on i.MX to allow bootloaders (like U-Boot)
+patch-in the MAC address for FEC using this alias.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Signed-off-by: Shawn Guo <shawn.guo@linaro.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/boot/dts/imx25.dtsi | 1 +
+ arch/arm/boot/dts/imx27.dtsi | 1 +
+ arch/arm/boot/dts/imx51.dtsi | 1 +
+ arch/arm/boot/dts/imx53.dtsi | 1 +
+ 4 files changed, 4 insertions(+)
+
+--- a/arch/arm/boot/dts/imx25.dtsi
++++ b/arch/arm/boot/dts/imx25.dtsi
+@@ -30,6 +30,7 @@
+ spi2 = &spi3;
+ usb0 = &usbotg;
+ usb1 = &usbhost1;
++ ethernet0 = &fec;
+ };
+
+ cpus {
+--- a/arch/arm/boot/dts/imx27.dtsi
++++ b/arch/arm/boot/dts/imx27.dtsi
+@@ -30,6 +30,7 @@
+ spi0 = &cspi1;
+ spi1 = &cspi2;
+ spi2 = &cspi3;
++ ethernet0 = &fec;
+ };
+
+ aitc: aitc-interrupt-controller@e0000000 {
+--- a/arch/arm/boot/dts/imx51.dtsi
++++ b/arch/arm/boot/dts/imx51.dtsi
+@@ -27,6 +27,7 @@
+ spi0 = &ecspi1;
+ spi1 = &ecspi2;
+ spi2 = &cspi;
++ ethernet0 = &fec;
+ };
+
+ tzic: tz-interrupt-controller@e0000000 {
+--- a/arch/arm/boot/dts/imx53.dtsi
++++ b/arch/arm/boot/dts/imx53.dtsi
+@@ -33,6 +33,7 @@
+ spi0 = &ecspi1;
+ spi1 = &ecspi2;
+ spi2 = &cspi;
++ ethernet0 = &fec;
+ };
+
+ cpus {
--- /dev/null
+From b738d764652dc5aab1c8939f637112981fce9e0e Mon Sep 17 00:00:00 2001
+From: Linus Torvalds <torvalds@linux-foundation.org>
+Date: Sun, 8 Jun 2014 14:17:00 -0700
+Subject: Don't trigger congestion wait on dirty-but-not-writeout pages
+
+From: Linus Torvalds <torvalds@linux-foundation.org>
+
+commit b738d764652dc5aab1c8939f637112981fce9e0e upstream.
+
+shrink_inactive_list() used to wait 0.1s to avoid congestion when all
+the pages that were isolated from the inactive list were dirty but not
+under active writeback. That makes no real sense, and apparently causes
+major interactivity issues under some loads since 3.11.
+
+The ostensible reason for it was to wait for kswapd to start writing
+pages, but that seems questionable as well, since the congestion wait
+code seems to trigger for kswapd itself as well. Also, the logic behind
+delaying anything when we haven't actually started writeback is not
+clear - it only delays actually starting that writeback.
+
+We'll still trigger the congestion waiting if
+
+ (a) the process is kswapd, and we hit pages flagged for immediate
+ reclaim
+
+ (b) the process is not kswapd, and the zone backing dev writeback is
+ actually congested.
+
+This probably needs to be revisited, but as it is this fixes a reported
+regression.
+
+Reported-by: Felipe Contreras <felipe.contreras@gmail.com>
+Pinpointed-by: Hillf Danton <dhillf@gmail.com>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+[mhocko@suse.cz: backport to 3.12 stable tree]
+Fixes: e2be15f6c3ee ('mm: vmscan: stall page reclaim and writeback pages based on dirty/writepage pages encountered')
+Reported-by: Felipe Contreras <felipe.contreras@gmail.com>
+Pinpointed-by: Hillf Danton <dhillf@gmail.com>
+Cc: Michal Hocko <mhocko@suse.cz>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Mel Gorman <mgorman@suse.de>
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Michal Hocko <mhocko@suse.cz>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ mm/vmscan.c | 11 +++++------
+ 1 file changed, 5 insertions(+), 6 deletions(-)
+
+--- a/mm/vmscan.c
++++ b/mm/vmscan.c
+@@ -1540,19 +1540,18 @@ shrink_inactive_list(unsigned long nr_to
+ * If dirty pages are scanned that are not queued for IO, it
+ * implies that flushers are not keeping up. In this case, flag
+ * the zone ZONE_TAIL_LRU_DIRTY and kswapd will start writing
+- * pages from reclaim context. It will forcibly stall in the
+- * next check.
++ * pages from reclaim context.
+ */
+ if (nr_unqueued_dirty == nr_taken)
+ zone_set_flag(zone, ZONE_TAIL_LRU_DIRTY);
+
+ /*
+- * In addition, if kswapd scans pages marked marked for
+- * immediate reclaim and under writeback (nr_immediate), it
+- * implies that pages are cycling through the LRU faster than
++ * If kswapd scans pages marked marked for immediate
++ * reclaim and under writeback (nr_immediate), it implies
++ * that pages are cycling through the LRU faster than
+ * they are written so also forcibly stall.
+ */
+- if (nr_unqueued_dirty == nr_taken || nr_immediate)
++ if (nr_immediate)
+ congestion_wait(BLK_RW_ASYNC, HZ/10);
+ }
+
--- /dev/null
+From dc271ee0d04d12d6bfabacbec803289a7072fbd9 Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Thu, 3 Jul 2014 20:46:35 +0300
+Subject: iwlwifi: mvm: disable CTS to Self
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit dc271ee0d04d12d6bfabacbec803289a7072fbd9 upstream.
+
+Firmware folks seem say that this flag can make trouble.
+Drop it. The advantage of CTS to self is that it slightly
+reduces the cost of the protection, but make the protection
+less reliable.
+
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+
+---
+ drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c | 8 ++------
+ 1 file changed, 2 insertions(+), 6 deletions(-)
+
+--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
++++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+@@ -651,13 +651,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(
+ if (vif->bss_conf.qos)
+ cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
+
+- /* Don't use cts to self as the fw doesn't support it currently. */
+- if (vif->bss_conf.use_cts_prot) {
++ if (vif->bss_conf.use_cts_prot)
+ cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
+- if (IWL_UCODE_API(mvm->fw->ucode_ver) >= 8)
+- cmd->protection_flags |=
+- cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN);
+- }
++
+ IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
+ vif->bss_conf.use_cts_prot,
+ vif->bss_conf.ht_operation_mode);
--- /dev/null
+From b0ab99e7736af88b8ac1b7ae50ea287fffa2badc Mon Sep 17 00:00:00 2001
+From: Mateusz Guzik <mguzik@redhat.com>
+Date: Sat, 14 Jun 2014 15:00:09 +0200
+Subject: sched: Fix possible divide by zero in avg_atom() calculation
+
+From: Mateusz Guzik <mguzik@redhat.com>
+
+commit b0ab99e7736af88b8ac1b7ae50ea287fffa2badc upstream.
+
+proc_sched_show_task() does:
+
+ if (nr_switches)
+ do_div(avg_atom, nr_switches);
+
+nr_switches is unsigned long and do_div truncates it to 32 bits, which
+means it can test non-zero on e.g. x86-64 and be truncated to zero for
+division.
+
+Fix the problem by using div64_ul() instead.
+
+As a side effect calculations of avg_atom for big nr_switches are now correct.
+
+Signed-off-by: Mateusz Guzik <mguzik@redhat.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Link: http://lkml.kernel.org/r/1402750809-31991-1-git-send-email-mguzik@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/debug.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/sched/debug.c
++++ b/kernel/sched/debug.c
+@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_st
+
+ avg_atom = p->se.sum_exec_runtime;
+ if (nr_switches)
+- do_div(avg_atom, nr_switches);
++ avg_atom = div64_ul(avg_atom, nr_switches);
+ else
+ avg_atom = -1LL;
+
dm-cache-metadata-do-not-allow-the-data-block-size-to-change.patch
pm-sleep-fix-request_firmware-error-at-resume.patch
locking-mutex-disable-optimistic-spinning-on-some-architectures.patch
+sched-fix-possible-divide-by-zero-in-avg_atom-calculation.patch
+aio-protect-reqs_available-updates-from-changes-in-interrupt-handlers.patch
+arm-dts-imx-add-alias-for-ethernet-controller.patch
+iwlwifi-mvm-disable-cts-to-self.patch
+don-t-trigger-congestion-wait-on-dirty-but-not-writeout-pages.patch
+arc-implement-ptrace-ptrace_get_thread_area.patch