--- /dev/null
+From 6d4c88304794442055eaea1c07f3c7b988b8c924 Mon Sep 17 00:00:00 2001
+From: Suman Anna <s-anna@ti.com>
+Date: Mon, 23 Dec 2013 16:53:11 -0600
+Subject: ARM: OMAP2+: hwmod_data: fix missing OMAP_INTC_START in irq data
+
+From: Suman Anna <s-anna@ti.com>
+
+commit 6d4c88304794442055eaea1c07f3c7b988b8c924 upstream.
+
+Commit 7d7e1eb (ARM: OMAP2+: Prepare for irqs.h removal) and commit
+ec2c082 (ARM: OMAP2+: Remove hardcoded IRQs and enable SPARSE_IRQ)
+updated the way interrupts for OMAP2/3 devices are defined in the
+HWMOD data structures to being an index plus a fixed offset (defined
+by OMAP_INTC_START).
+
+Couple of irqs in the OMAP2/3 hwmod data were misconfigured completely
+as they were missing this OMAP_INTC_START relative offset. Add this
+offset back to fix the incorrect irq data for the following modules:
+ OMAP2 - GPMC, RNG
+ OMAP3 - GPMC, ISP MMU & IVA MMU
+
+Signed-off-by: Suman Anna <s-anna@ti.com>
+Fixes: 7d7e1eba7e92 ("ARM: OMAP2+: Prepare for irqs.h removal")
+Fixes: ec2c0825ca31 ("ARM: OMAP2+: Remove hardcoded IRQs and enable SPARSE_IRQ")
+Cc: Tony Lindgren <tony@atomide.com>
+Signed-off-by: Paul Walmsley <paul@pwsan.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c | 4 ++--
+ arch/arm/mach-omap2/omap_hwmod_3xxx_data.c | 6 +++---
+ 2 files changed, 5 insertions(+), 5 deletions(-)
+
+--- a/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_2xxx_ipblock_data.c
+@@ -796,7 +796,7 @@ struct omap_hwmod omap2xxx_counter_32k_h
+
+ /* gpmc */
+ static struct omap_hwmod_irq_info omap2xxx_gpmc_irqs[] = {
+- { .irq = 20 },
++ { .irq = 20 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+@@ -841,7 +841,7 @@ static struct omap_hwmod_class omap2_rng
+ };
+
+ static struct omap_hwmod_irq_info omap2_rng_mpu_irqs[] = {
+- { .irq = 52 },
++ { .irq = 52 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
++++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+@@ -2152,7 +2152,7 @@ static struct omap_hwmod_class omap3xxx_
+ };
+
+ static struct omap_hwmod_irq_info omap3xxx_gpmc_irqs[] = {
+- { .irq = 20 },
++ { .irq = 20 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+@@ -2986,7 +2986,7 @@ static struct omap_mmu_dev_attr mmu_isp_
+
+ static struct omap_hwmod omap3xxx_mmu_isp_hwmod;
+ static struct omap_hwmod_irq_info omap3xxx_mmu_isp_irqs[] = {
+- { .irq = 24 },
++ { .irq = 24 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
+@@ -3028,7 +3028,7 @@ static struct omap_mmu_dev_attr mmu_iva_
+
+ static struct omap_hwmod omap3xxx_mmu_iva_hwmod;
+ static struct omap_hwmod_irq_info omap3xxx_mmu_iva_irqs[] = {
+- { .irq = 28 },
++ { .irq = 28 + OMAP_INTC_START, },
+ { .irq = -1 }
+ };
+
--- /dev/null
+From cdc27c27843248ae7eb0df5fc261dd004eaa5670 Mon Sep 17 00:00:00 2001
+From: Will Deacon <will.deacon@arm.com>
+Date: Tue, 17 Dec 2013 17:09:08 +0000
+Subject: arm64: ptrace: avoid using HW_BREAKPOINT_EMPTY for disabled events
+
+From: Will Deacon <will.deacon@arm.com>
+
+commit cdc27c27843248ae7eb0df5fc261dd004eaa5670 upstream.
+
+Commit 8f34a1da35ae ("arm64: ptrace: use HW_BREAKPOINT_EMPTY type for
+disabled breakpoints") fixed an issue with GDB trying to zero breakpoint
+control registers. The problem there is that the arch hw_breakpoint code
+will attempt to create a (disabled), execute breakpoint of length 0.
+
+This will fail validation and report unexpected failure to GDB. To avoid
+this, we treated disabled breakpoints as HW_BREAKPOINT_EMPTY, but that
+seems to have broken with recent kernels, causing watchpoints to be
+treated as TYPE_INST in the core code and returning ENOSPC for any
+further breakpoints.
+
+This patch fixes the problem by prioritising the `enable' field of the
+breakpoint: if it is cleared, we simply update the perf_event_attr to
+indicate that the thing is disabled and don't bother changing either the
+type or the length. This reinforces the behaviour that the breakpoint
+control register is essentially read-only apart from the enable bit
+when disabling a breakpoint.
+
+Reported-by: Aaron Liu <liucy214@gmail.com>
+Signed-off-by: Will Deacon <will.deacon@arm.com>
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/kernel/ptrace.c | 36 +++++++++++++++++-------------------
+ 1 file changed, 17 insertions(+), 19 deletions(-)
+
+--- a/arch/arm64/kernel/ptrace.c
++++ b/arch/arm64/kernel/ptrace.c
+@@ -236,31 +236,29 @@ static int ptrace_hbp_fill_attr_ctrl(uns
+ {
+ int err, len, type, disabled = !ctrl.enabled;
+
+- if (disabled) {
+- len = 0;
+- type = HW_BREAKPOINT_EMPTY;
+- } else {
+- err = arch_bp_generic_fields(ctrl, &len, &type);
+- if (err)
+- return err;
++ attr->disabled = disabled;
++ if (disabled)
++ return 0;
+
+- switch (note_type) {
+- case NT_ARM_HW_BREAK:
+- if ((type & HW_BREAKPOINT_X) != type)
+- return -EINVAL;
+- break;
+- case NT_ARM_HW_WATCH:
+- if ((type & HW_BREAKPOINT_RW) != type)
+- return -EINVAL;
+- break;
+- default:
++ err = arch_bp_generic_fields(ctrl, &len, &type);
++ if (err)
++ return err;
++
++ switch (note_type) {
++ case NT_ARM_HW_BREAK:
++ if ((type & HW_BREAKPOINT_X) != type)
++ return -EINVAL;
++ break;
++ case NT_ARM_HW_WATCH:
++ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+- }
++ break;
++ default:
++ return -EINVAL;
+ }
+
+ attr->bp_len = len;
+ attr->bp_type = type;
+- attr->disabled = disabled;
+
+ return 0;
+ }
--- /dev/null
+From 4ecf7ccb1973fd826456b6ab1e6dfafe9023c753 Mon Sep 17 00:00:00 2001
+From: Catalin Marinas <catalin.marinas@arm.com>
+Date: Fri, 31 May 2013 16:30:58 +0100
+Subject: arm64: spinlock: retry trylock operation if strex fails on free lock
+
+From: Catalin Marinas <catalin.marinas@arm.com>
+
+commit 4ecf7ccb1973fd826456b6ab1e6dfafe9023c753 upstream.
+
+An exclusive store instruction may fail for reasons other than lock
+contention (e.g. a cache eviction during the critical section) so, in
+line with other architectures using similar exclusive instructions
+(alpha, mips, powerpc), retry the trylock operation if the lock appears
+to be free but the strex reported failure.
+
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: Tony Thompson <anthony.thompson@arm.com>
+Acked-by: Will Deacon <will.deacon@arm.com>
+Cc: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/arm64/include/asm/spinlock.h | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/arm64/include/asm/spinlock.h
++++ b/arch/arm64/include/asm/spinlock.h
+@@ -59,9 +59,10 @@ static inline int arch_spin_trylock(arch
+ unsigned int tmp;
+
+ asm volatile(
+- " ldaxr %w0, %1\n"
++ "2: ldaxr %w0, %1\n"
+ " cbnz %w0, 1f\n"
+ " stxr %w0, %w2, %1\n"
++ " cbnz %w0, 2b\n"
+ "1:\n"
+ : "=&r" (tmp), "+Q" (lock->lock)
+ : "r" (1)
--- /dev/null
+From c4602c1c818bd6626178d6d3fcc152d9f2f48ac0 Mon Sep 17 00:00:00 2001
+From: Miao Xie <miaox@cn.fujitsu.com>
+Date: Mon, 16 Dec 2013 15:20:01 +0800
+Subject: ftrace: Initialize the ftrace profiler for each possible cpu
+
+From: Miao Xie <miaox@cn.fujitsu.com>
+
+commit c4602c1c818bd6626178d6d3fcc152d9f2f48ac0 upstream.
+
+Ftrace currently initializes only the online CPUs. This implementation has
+two problems:
+- If we online a CPU after we enable the function profile, and then run the
+ test, we will lose the trace information on that CPU.
+ Steps to reproduce:
+ # echo 0 > /sys/devices/system/cpu/cpu1/online
+ # cd <debugfs>/tracing/
+ # echo <some function name> >> set_ftrace_filter
+ # echo 1 > function_profile_enabled
+ # echo 1 > /sys/devices/system/cpu/cpu1/online
+ # run test
+- If we offline a CPU before we enable the function profile, we will not clear
+ the trace information when we enable the function profile. It will trouble
+ the users.
+ Steps to reproduce:
+ # cd <debugfs>/tracing/
+ # echo <some function name> >> set_ftrace_filter
+ # echo 1 > function_profile_enabled
+ # run test
+ # cat trace_stat/function*
+ # echo 0 > /sys/devices/system/cpu/cpu1/online
+ # echo 0 > function_profile_enabled
+ # echo 1 > function_profile_enabled
+ # cat trace_stat/function*
+ # run test
+ # cat trace_stat/function*
+
+So it is better that we initialize the ftrace profiler for each possible cpu
+every time we enable the function profile instead of just the online ones.
+
+Link: http://lkml.kernel.org/r/1387178401-10619-1-git-send-email-miaox@cn.fujitsu.com
+
+Signed-off-by: Miao Xie <miaox@cn.fujitsu.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/ftrace.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -750,7 +750,7 @@ static int ftrace_profile_init(void)
+ int cpu;
+ int ret = 0;
+
+- for_each_online_cpu(cpu) {
++ for_each_possible_cpu(cpu) {
+ ret = ftrace_profile_init_cpu(cpu);
+ if (ret)
+ break;
iser-target-fix-error-return-code-in-isert_create_device_ib_res.patch
iscsi-target-fix-up-all-zero-data-length-cdbs-with-r-w_bit-set.patch
target-file-update-hw_max_sectors-based-on-current-block_size.patch
+ftrace-initialize-the-ftrace-profiler-for-each-possible-cpu.patch
+arm64-ptrace-avoid-using-hw_breakpoint_empty-for-disabled-events.patch
+arm64-spinlock-retry-trylock-operation-if-strex-fails-on-free-lock.patch
+arm-omap2-hwmod_data-fix-missing-omap_intc_start-in-irq-data.patch
+x86-idle-repair-large-server-50-watt-idle-power-regression.patch
--- /dev/null
+From 40e2d7f9b5dae048789c64672bf3027fbb663ffa Mon Sep 17 00:00:00 2001
+From: Len Brown <len.brown@intel.com>
+Date: Wed, 18 Dec 2013 16:44:57 -0500
+Subject: x86 idle: Repair large-server 50-watt idle-power regression
+
+From: Len Brown <len.brown@intel.com>
+
+commit 40e2d7f9b5dae048789c64672bf3027fbb663ffa upstream.
+
+Linux 3.10 changed the timing of how thread_info->flags is touched:
+
+ x86: Use generic idle loop
+ (7d1a941731fabf27e5fb6edbebb79fe856edb4e5)
+
+This caused Intel NHM-EX and WSM-EX servers to experience a large number
+of immediate MONITOR/MWAIT break wakeups, which caused cpuidle to demote
+from deep C-states to shallow C-states, which caused these platforms
+to experience a significant increase in idle power.
+
+Note that this issue was already present before the commit above,
+however, it wasn't seen often enough to be noticed in power measurements.
+
+Here we extend an errata workaround from the Core2 EX "Dunnington"
+to extend to NHM-EX and WSM-EX, to prevent these immediate
+returns from MWAIT, reducing idle power on these platforms.
+
+While only acpi_idle ran on Dunnington, intel_idle
+may also run on these two newer systems.
+As of today, there are no other models that are known
+to need this tweak.
+
+Link: http://lkml.kernel.org/r/CAJvTdK=%2BaNN66mYpCGgbHGCHhYQAKx-vB0kJSWjVpsNb_hOAtQ@mail.gmail.com
+Signed-off-by: Len Brown <len.brown@intel.com>
+Link: http://lkml.kernel.org/r/baff264285f6e585df757d58b17788feabc68918.1387403066.git.len.brown@intel.com
+Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/intel.c | 3 ++-
+ drivers/idle/intel_idle.c | 3 +++
+ 2 files changed, 5 insertions(+), 1 deletion(-)
+
+--- a/arch/x86/kernel/cpu/intel.c
++++ b/arch/x86/kernel/cpu/intel.c
+@@ -387,7 +387,8 @@ static void __cpuinit init_intel(struct
+ set_cpu_cap(c, X86_FEATURE_PEBS);
+ }
+
+- if (c->x86 == 6 && c->x86_model == 29 && cpu_has_clflush)
++ if (c->x86 == 6 && cpu_has_clflush &&
++ (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
+ set_cpu_cap(c, X86_FEATURE_CLFLUSH_MONITOR);
+
+ #ifdef CONFIG_X86_64
+--- a/drivers/idle/intel_idle.c
++++ b/drivers/idle/intel_idle.c
+@@ -361,6 +361,9 @@ static int intel_idle(struct cpuidle_dev
+
+ if (!current_set_polling_and_test()) {
+
++ if (this_cpu_has(X86_FEATURE_CLFLUSH_MONITOR))
++ clflush((void *)¤t_thread_info()->flags);
++
+ __monitor((void *)¤t_thread_info()->flags, 0, 0);
+ smp_mb();
+ if (!need_resched())