--- /dev/null
+From d949b4fe6d23dd92b5fa48cbf7af90ca32beed2e Mon Sep 17 00:00:00 2001
+From: David Daney <david.daney@cavium.com>
+Date: Wed, 12 Jun 2013 17:28:33 +0000
+Subject: MIPS: Octeon: Don't clobber bootloader data structures.
+
+From: David Daney <david.daney@cavium.com>
+
+commit d949b4fe6d23dd92b5fa48cbf7af90ca32beed2e upstream.
+
+Commit abe77f90dc (MIPS: Octeon: Add kexec and kdump support) added a
+bootmem region for the kernel image itself. The problem is that this
+is rounded up to a 0x100000 boundary, which is memory that may not be
+owned by the kernel. Depending on the kernel's configuration based
+size, this 'extra' memory may contain data passed from the bootloader
+to the kernel itself, which if clobbered makes the kernel crash in
+various ways.
+
+The fix: Quit rounding the size up, so that we only use memory
+assigned to the kernel.
+
+Signed-off-by: David Daney <david.daney@cavium.com>
+Cc: linux-mips@linux-mips.org
+Patchwork: https://patchwork.linux-mips.org/patch/5449/
+Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/mips/cavium-octeon/setup.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/mips/cavium-octeon/setup.c
++++ b/arch/mips/cavium-octeon/setup.c
+@@ -996,7 +996,7 @@ void __init plat_mem_setup(void)
+ cvmx_bootmem_unlock();
+ /* Add the memory region for the kernel. */
+ kernel_start = (unsigned long) _text;
+- kernel_size = ALIGN(_end - _text, 0x100000);
++ kernel_size = _end - _text;
+
+ /* Adjust for physical offset. */
+ kernel_start &= ~0xffffffff80000000ULL;
--- /dev/null
+From 734df5ab549ca44f40de0f07af1c8803856dfb18 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@redhat.com>
+Date: Tue, 9 Jul 2013 17:44:10 +0200
+Subject: perf: Clone child context from parent context pmu
+
+From: Jiri Olsa <jolsa@redhat.com>
+
+commit 734df5ab549ca44f40de0f07af1c8803856dfb18 upstream.
+
+Currently when the child context for inherited events is
+created, it's based on the pmu object of the first event
+of the parent context.
+
+This is wrong for the following scenario:
+
+ - HW context having HW and SW event
+ - HW event got removed (closed)
+ - SW event stays in HW context as the only event
+ and its pmu is used to clone the child context
+
+The issue starts when the cpu context object is touched
+based on the pmu context object (__get_cpu_context). In
+this case the HW context will work with SW cpu context
+ending up with following WARN below.
+
+Fixing this by using parent context pmu object to clone
+from child context.
+
+Addresses the following warning reported by Vince Weaver:
+
+[ 2716.472065] ------------[ cut here ]------------
+[ 2716.476035] WARNING: at kernel/events/core.c:2122 task_ctx_sched_out+0x3c/0x)
+[ 2716.476035] Modules linked in: nfsd auth_rpcgss oid_registry nfs_acl nfs locn
+[ 2716.476035] CPU: 0 PID: 3164 Comm: perf_fuzzer Not tainted 3.10.0-rc4 #2
+[ 2716.476035] Hardware name: AOpen DE7000/nMCP7ALPx-DE R1.06 Oct.19.2012, BI2
+[ 2716.476035] 0000000000000000 ffffffff8102e215 0000000000000000 ffff88011fc18
+[ 2716.476035] ffff8801175557f0 0000000000000000 ffff880119fda88c ffffffff810ad
+[ 2716.476035] ffff880119fda880 ffffffff810af02a 0000000000000009 ffff880117550
+[ 2716.476035] Call Trace:
+[ 2716.476035] [<ffffffff8102e215>] ? warn_slowpath_common+0x5b/0x70
+[ 2716.476035] [<ffffffff810ab2bd>] ? task_ctx_sched_out+0x3c/0x5f
+[ 2716.476035] [<ffffffff810af02a>] ? perf_event_exit_task+0xbf/0x194
+[ 2716.476035] [<ffffffff81032a37>] ? do_exit+0x3e7/0x90c
+[ 2716.476035] [<ffffffff810cd5ab>] ? __do_fault+0x359/0x394
+[ 2716.476035] [<ffffffff81032fe6>] ? do_group_exit+0x66/0x98
+[ 2716.476035] [<ffffffff8103dbcd>] ? get_signal_to_deliver+0x479/0x4ad
+[ 2716.476035] [<ffffffff810ac05c>] ? __perf_event_task_sched_out+0x230/0x2d1
+[ 2716.476035] [<ffffffff8100205d>] ? do_signal+0x3c/0x432
+[ 2716.476035] [<ffffffff810abbf9>] ? ctx_sched_in+0x43/0x141
+[ 2716.476035] [<ffffffff810ac2ca>] ? perf_event_context_sched_in+0x7a/0x90
+[ 2716.476035] [<ffffffff810ac311>] ? __perf_event_task_sched_in+0x31/0x118
+[ 2716.476035] [<ffffffff81050dd9>] ? mmdrop+0xd/0x1c
+[ 2716.476035] [<ffffffff81051a39>] ? finish_task_switch+0x7d/0xa6
+[ 2716.476035] [<ffffffff81002473>] ? do_notify_resume+0x20/0x5d
+[ 2716.476035] [<ffffffff813654f5>] ? retint_signal+0x3d/0x78
+[ 2716.476035] ---[ end trace 827178d8a5966c3d ]---
+
+Reported-by: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Jiri Olsa <jolsa@redhat.com>
+Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1373384651-6109-1-git-send-email-jolsa@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -7228,7 +7228,7 @@ inherit_task_group(struct perf_event *ev
+ * child.
+ */
+
+- child_ctx = alloc_perf_context(event->pmu, child);
++ child_ctx = alloc_perf_context(parent_ctx->pmu, child);
+ if (!child_ctx)
+ return -ENOMEM;
+
--- /dev/null
+From 058ebd0eba3aff16b144eabf4510ed9510e1416e Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Fri, 12 Jul 2013 11:08:33 +0200
+Subject: perf: Fix perf_lock_task_context() vs RCU
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 058ebd0eba3aff16b144eabf4510ed9510e1416e upstream.
+
+Jiri managed to trigger this warning:
+
+ [] ======================================================
+ [] [ INFO: possible circular locking dependency detected ]
+ [] 3.10.0+ #228 Tainted: G W
+ [] -------------------------------------------------------
+ [] p/6613 is trying to acquire lock:
+ [] (rcu_node_0){..-...}, at: [<ffffffff810ca797>] rcu_read_unlock_special+0xa7/0x250
+ []
+ [] but task is already holding lock:
+ [] (&ctx->lock){-.-...}, at: [<ffffffff810f2879>] perf_lock_task_context+0xd9/0x2c0
+ []
+ [] which lock already depends on the new lock.
+ []
+ [] the existing dependency chain (in reverse order) is:
+ []
+ [] -> #4 (&ctx->lock){-.-...}:
+ [] -> #3 (&rq->lock){-.-.-.}:
+ [] -> #2 (&p->pi_lock){-.-.-.}:
+ [] -> #1 (&rnp->nocb_gp_wq[1]){......}:
+ [] -> #0 (rcu_node_0){..-...}:
+
+Paul was quick to explain that due to preemptible RCU we cannot call
+rcu_read_unlock() while holding scheduler (or nested) locks when part
+of the read side critical section was preemptible.
+
+Therefore solve it by making the entire RCU read side non-preemptible.
+
+Also pull out the retry from under the non-preempt to play nice with RT.
+
+Reported-by: Jiri Olsa <jolsa@redhat.com>
+Helped-out-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 15 ++++++++++++++-
+ 1 file changed, 14 insertions(+), 1 deletion(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -761,8 +761,18 @@ perf_lock_task_context(struct task_struc
+ {
+ struct perf_event_context *ctx;
+
+- rcu_read_lock();
+ retry:
++ /*
++ * One of the few rules of preemptible RCU is that one cannot do
++ * rcu_read_unlock() while holding a scheduler (or nested) lock when
++ * part of the read side critical section was preemptible -- see
++ * rcu_read_unlock_special().
++ *
++ * Since ctx->lock nests under rq->lock we must ensure the entire read
++ * side critical section is non-preemptible.
++ */
++ preempt_disable();
++ rcu_read_lock();
+ ctx = rcu_dereference(task->perf_event_ctxp[ctxn]);
+ if (ctx) {
+ /*
+@@ -778,6 +788,8 @@ retry:
+ raw_spin_lock_irqsave(&ctx->lock, *flags);
+ if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) {
+ raw_spin_unlock_irqrestore(&ctx->lock, *flags);
++ rcu_read_unlock();
++ preempt_enable();
+ goto retry;
+ }
+
+@@ -787,6 +799,7 @@ retry:
+ }
+ }
+ rcu_read_unlock();
++ preempt_enable();
+ return ctx;
+ }
+
--- /dev/null
+From 06f417968beac6e6b614e17b37d347aa6a6b1d30 Mon Sep 17 00:00:00 2001
+From: Jiri Olsa <jolsa@redhat.com>
+Date: Tue, 9 Jul 2013 17:44:11 +0200
+Subject: perf: Remove WARN_ON_ONCE() check in __perf_event_enable() for valid scenario
+
+From: Jiri Olsa <jolsa@redhat.com>
+
+commit 06f417968beac6e6b614e17b37d347aa6a6b1d30 upstream.
+
+The '!ctx->is_active' check has a valid scenario, so
+there's no need for the warning.
+
+The reason is that there's a time window between the
+'ctx->is_active' check in the perf_event_enable() function
+and the __perf_event_enable() function having:
+
+ - IRQs on
+ - ctx->lock unlocked
+
+where the task could be killed and 'ctx' deactivated by
+perf_event_exit_task(), ending up with the warning below.
+
+So remove the WARN_ON_ONCE() check and add comments to
+explain it all.
+
+This addresses the following warning reported by Vince Weaver:
+
+[ 324.983534] ------------[ cut here ]------------
+[ 324.984420] WARNING: at kernel/events/core.c:1953 __perf_event_enable+0x187/0x190()
+[ 324.984420] Modules linked in:
+[ 324.984420] CPU: 19 PID: 2715 Comm: nmi_bug_snb Not tainted 3.10.0+ #246
+[ 324.984420] Hardware name: Supermicro X8DTN/X8DTN, BIOS 4.6.3 01/08/2010
+[ 324.984420] 0000000000000009 ffff88043fce3ec8 ffffffff8160ea0b ffff88043fce3f00
+[ 324.984420] ffffffff81080ff0 ffff8802314fdc00 ffff880231a8f800 ffff88043fcf7860
+[ 324.984420] 0000000000000286 ffff880231a8f800 ffff88043fce3f10 ffffffff8108103a
+[ 324.984420] Call Trace:
+[ 324.984420] <IRQ> [<ffffffff8160ea0b>] dump_stack+0x19/0x1b
+[ 324.984420] [<ffffffff81080ff0>] warn_slowpath_common+0x70/0xa0
+[ 324.984420] [<ffffffff8108103a>] warn_slowpath_null+0x1a/0x20
+[ 324.984420] [<ffffffff81134437>] __perf_event_enable+0x187/0x190
+[ 324.984420] [<ffffffff81130030>] remote_function+0x40/0x50
+[ 324.984420] [<ffffffff810e51de>] generic_smp_call_function_single_interrupt+0xbe/0x130
+[ 324.984420] [<ffffffff81066a47>] smp_call_function_single_interrupt+0x27/0x40
+[ 324.984420] [<ffffffff8161fd2f>] call_function_single_interrupt+0x6f/0x80
+[ 324.984420] <EOI> [<ffffffff816161a1>] ? _raw_spin_unlock_irqrestore+0x41/0x70
+[ 324.984420] [<ffffffff8113799d>] perf_event_exit_task+0x14d/0x210
+[ 324.984420] [<ffffffff810acd04>] ? switch_task_namespaces+0x24/0x60
+[ 324.984420] [<ffffffff81086946>] do_exit+0x2b6/0xa40
+[ 324.984420] [<ffffffff8161615c>] ? _raw_spin_unlock_irq+0x2c/0x30
+[ 324.984420] [<ffffffff81087279>] do_group_exit+0x49/0xc0
+[ 324.984420] [<ffffffff81096854>] get_signal_to_deliver+0x254/0x620
+[ 324.984420] [<ffffffff81043057>] do_signal+0x57/0x5a0
+[ 324.984420] [<ffffffff8161a164>] ? __do_page_fault+0x2a4/0x4e0
+[ 324.984420] [<ffffffff8161665c>] ? retint_restore_args+0xe/0xe
+[ 324.984420] [<ffffffff816166cd>] ? retint_signal+0x11/0x84
+[ 324.984420] [<ffffffff81043605>] do_notify_resume+0x65/0x80
+[ 324.984420] [<ffffffff81616702>] retint_signal+0x46/0x84
+[ 324.984420] ---[ end trace 442ec2f04db3771a ]---
+
+Reported-by: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Jiri Olsa <jolsa@redhat.com>
+Suggested-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
+Cc: Frederic Weisbecker <fweisbec@gmail.com>
+Cc: Ingo Molnar <mingo@elte.hu>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Paul Mackerras <paulus@samba.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Peter Zijlstra <peterz@infradead.org>
+Link: http://lkml.kernel.org/r/1373384651-6109-2-git-send-email-jolsa@redhat.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c | 11 ++++++++++-
+ 1 file changed, 10 insertions(+), 1 deletion(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1761,7 +1761,16 @@ static int __perf_event_enable(void *inf
+ struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+ int err;
+
+- if (WARN_ON_ONCE(!ctx->is_active))
++ /*
++ * There's a time window between 'ctx->is_active' check
++ * in perf_event_enable function and this place having:
++ * - IRQs on
++ * - ctx->lock unlocked
++ *
++ * where the task could be killed and 'ctx' deactivated
++ * by perf_event_exit_task.
++ */
++ if (!ctx->is_active)
+ return -EINVAL;
+
+ raw_spin_lock(&ctx->lock);
drm-nva3-disp-fix-hdmi-audio-regression.patch
drm-nv50-disp-use-output-specific-mask-in-interrupt.patch
iommu-amd-only-unmap-large-pages-from-the-first-pte.patch
+sunrpc-split-client-creation-routine-into-setup-and-registration.patch
+sunrpc-fix-a-deadlock-in-rpc_client_register.patch
+xtensa-adjust-boot-parameters-address-when-initialize_xtensa_mmu_inside_vmlinux-is-selected.patch
+thermal-cpu_cooling-fix-stub-function.patch
+mips-octeon-don-t-clobber-bootloader-data-structures.patch
+staging-line6-fix-unlocked-snd_pcm_stop-call.patch
+perf-clone-child-context-from-parent-context-pmu.patch
+perf-remove-warn_on_once-check-in-__perf_event_enable-for-valid-scenario.patch
+perf-fix-perf_lock_task_context-vs-rcu.patch
+tracing-failed-to-create-system-directory.patch
+tracing-fix-irqs-off-tag-display-in-syscall-tracing.patch
+tracing-make-trace_marker-use-the-correct-per-instance-buffer.patch
+tracing-protect-ftrace_trace_arrays-list-in-trace_events.c.patch
+tracing-add-trace_array_get-put-to-handle-instance-refs-better.patch
+tracing-get-trace_array-ref-counts-when-accessing-trace-files.patch
+tracing-fix-race-between-deleting-buffer-and-setting-events.patch
+tracing-add-trace_array_get-put-to-event-handling.patch
--- /dev/null
+From 86f0b5b86d142b9323432fef078a6cf0fb5dda74 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Thu, 11 Jul 2013 18:02:38 +0200
+Subject: staging: line6: Fix unlocked snd_pcm_stop() call
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit 86f0b5b86d142b9323432fef078a6cf0fb5dda74 upstream.
+
+snd_pcm_stop() must be called in the PCM substream lock context.
+
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/staging/line6/pcm.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+--- a/drivers/staging/line6/pcm.c
++++ b/drivers/staging/line6/pcm.c
+@@ -385,8 +385,11 @@ static int snd_line6_pcm_free(struct snd
+ */
+ static void pcm_disconnect_substream(struct snd_pcm_substream *substream)
+ {
+- if (substream->runtime && snd_pcm_running(substream))
++ if (substream->runtime && snd_pcm_running(substream)) {
++ snd_pcm_stream_lock_irq(substream);
+ snd_pcm_stop(substream, SNDRV_PCM_STATE_DISCONNECTED);
++ snd_pcm_stream_unlock_irq(substream);
++ }
+ }
+
+ /*
--- /dev/null
+From eeee245268c951262b861bc1be4e9dc812352499 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+Date: Wed, 10 Jul 2013 15:33:01 -0400
+Subject: SUNRPC: Fix a deadlock in rpc_client_register()
+
+From: Trond Myklebust <Trond.Myklebust@netapp.com>
+
+commit eeee245268c951262b861bc1be4e9dc812352499 upstream.
+
+Commit 384816051ca9125cd54750e59c780c2a2655fa4f (SUNRPC: fix races on
+PipeFS MOUNT notifications) introduces a regression when we call
+rpc_setup_pipedir() with RPCSEC_GSS as the auth flavour.
+
+By calling rpcauth_create() while holding the sn->pipefs_sb_lock, we
+end up deadlocking in gss_pipes_dentries_create_net().
+Fix is to register the client and release the mutex before calling
+rpcauth_create().
+
+Reported-by: Weston Andros Adamson <dros@netapp.com>
+Tested-by: Weston Andros Adamson <dros@netapp.com>
+Cc: Stanislav Kinsbursky <skinsbursky@parallels.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/clnt.c | 16 +++++++++-------
+ 1 file changed, 9 insertions(+), 7 deletions(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -288,7 +288,7 @@ static int rpc_client_register(const str
+ struct rpc_auth *auth;
+ struct net *net = rpc_net_ns(clnt);
+ struct super_block *pipefs_sb;
+- int err = 0;
++ int err;
+
+ pipefs_sb = rpc_get_sb_net(net);
+ if (pipefs_sb) {
+@@ -297,6 +297,10 @@ static int rpc_client_register(const str
+ goto out;
+ }
+
++ rpc_register_client(clnt);
++ if (pipefs_sb)
++ rpc_put_sb_net(net);
++
+ auth = rpcauth_create(args->authflavor, clnt);
+ if (IS_ERR(auth)) {
+ dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
+@@ -304,16 +308,14 @@ static int rpc_client_register(const str
+ err = PTR_ERR(auth);
+ goto err_auth;
+ }
+-
+- rpc_register_client(clnt);
++ return 0;
++err_auth:
++ pipefs_sb = rpc_get_sb_net(net);
++ __rpc_clnt_remove_pipedir(clnt);
+ out:
+ if (pipefs_sb)
+ rpc_put_sb_net(net);
+ return err;
+-
+-err_auth:
+- __rpc_clnt_remove_pipedir(clnt);
+- goto out;
+ }
+
+ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
--- /dev/null
+From e73f4cc051199799aee4320f300f28ffb82f3eb1 Mon Sep 17 00:00:00 2001
+From: Stanislav Kinsbursky <skinsbursky@parallels.com>
+Date: Mon, 24 Jun 2013 11:52:52 +0400
+Subject: SUNRPC: split client creation routine into setup and registration
+
+From: Stanislav Kinsbursky <skinsbursky@parallels.com>
+
+commit e73f4cc051199799aee4320f300f28ffb82f3eb1 upstream.
+
+This helper moves all "registration" code to the new rpc_client_register()
+helper.
+This helper will be used later in the series to synchronize against PipeFS
+MOUNT/UMOUNT events.
+
+Signed-off-by: Stanislav Kinsbursky <skinsbursky@parallels.com>
+Signed-off-by: Trond Myklebust <Trond.Myklebust@netapp.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/clnt.c | 64 ++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 39 insertions(+), 25 deletions(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -281,14 +281,47 @@ static void rpc_clnt_set_nodename(struct
+ memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen);
+ }
+
++static int rpc_client_register(const struct rpc_create_args *args,
++ struct rpc_clnt *clnt)
++{
++ const struct rpc_program *program = args->program;
++ struct rpc_auth *auth;
++ struct net *net = rpc_net_ns(clnt);
++ struct super_block *pipefs_sb;
++ int err = 0;
++
++ pipefs_sb = rpc_get_sb_net(net);
++ if (pipefs_sb) {
++ err = rpc_setup_pipedir(clnt, program->pipe_dir_name, pipefs_sb);
++ if (err)
++ goto out;
++ }
++
++ auth = rpcauth_create(args->authflavor, clnt);
++ if (IS_ERR(auth)) {
++ dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
++ args->authflavor);
++ err = PTR_ERR(auth);
++ goto err_auth;
++ }
++
++ rpc_register_client(clnt);
++out:
++ if (pipefs_sb)
++ rpc_put_sb_net(net);
++ return err;
++
++err_auth:
++ __rpc_clnt_remove_pipedir(clnt);
++ goto out;
++}
++
+ static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt)
+ {
+ const struct rpc_program *program = args->program;
+ const struct rpc_version *version;
+ struct rpc_clnt *clnt = NULL;
+- struct rpc_auth *auth;
+ int err;
+- struct super_block *pipefs_sb;
+
+ /* sanity check the name before trying to print it */
+ dprintk("RPC: creating %s client for %s (xprt %p)\n",
+@@ -347,34 +380,15 @@ static struct rpc_clnt * rpc_new_client(
+
+ atomic_set(&clnt->cl_count, 1);
+
+- pipefs_sb = rpc_get_sb_net(rpc_net_ns(clnt));
+- if (pipefs_sb) {
+- err = rpc_setup_pipedir(clnt, program->pipe_dir_name, pipefs_sb);
+- if (err)
+- goto out_no_path;
+- }
+-
+- auth = rpcauth_create(args->authflavor, clnt);
+- if (IS_ERR(auth)) {
+- dprintk("RPC: Couldn't create auth handle (flavor %u)\n",
+- args->authflavor);
+- err = PTR_ERR(auth);
+- goto out_no_auth;
+- }
+-
+ /* save the nodename */
+ rpc_clnt_set_nodename(clnt, utsname()->nodename);
+- rpc_register_client(clnt);
+- if (pipefs_sb)
+- rpc_put_sb_net(rpc_net_ns(clnt));
++
++ err = rpc_client_register(args, clnt);
++ if (err)
++ goto out_no_path;
+ return clnt;
+
+-out_no_auth:
+- if (pipefs_sb)
+- __rpc_clnt_remove_pipedir(clnt);
+ out_no_path:
+- if (pipefs_sb)
+- rpc_put_sb_net(rpc_net_ns(clnt));
+ kfree(clnt->cl_principal);
+ out_no_principal:
+ rpc_free_iostats(clnt->cl_metrics);
--- /dev/null
+From e8d39240d635ed9bcaddbec898b1c9f063c5dbb2 Mon Sep 17 00:00:00 2001
+From: Arnd Bergmann <arnd@arndb.de>
+Date: Fri, 5 Jul 2013 17:40:13 +0200
+Subject: thermal: cpu_cooling: fix stub function
+
+From: Arnd Bergmann <arnd@arndb.de>
+
+commit e8d39240d635ed9bcaddbec898b1c9f063c5dbb2 upstream.
+
+The function stub for cpufreq_cooling_get_level introduced
+in 57df81069 "Thermal: exynos: fix cooling state translation"
+is not syntactically correct C and needs to be fixed to avoid
+this error:
+
+In file included from drivers/thermal/db8500_thermal.c:20:0:
+ include/linux/cpu_cooling.h: In function 'cpufreq_cooling_get_level':
+include/linux/cpu_cooling.h:57:1:
+ error: parameter name omitted unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int) ^
+ include/linux/cpu_cooling.h:57:1: error: parameter name omitted
+
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Acked-by: Eduardo Valentin <eduardo.valentin@ti.com>
+Cc: Zhang Rui <rui.zhang@intel.com>
+Cc: Amit Daniel kachhap <amit.daniel@samsung.com>
+Signed-off-by: Eduardo Valentin <eduardo.valentin@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/linux/cpu_cooling.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/include/linux/cpu_cooling.h
++++ b/include/linux/cpu_cooling.h
+@@ -41,7 +41,7 @@ cpufreq_cooling_register(const struct cp
+ */
+ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev);
+
+-unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int);
++unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq);
+ #else /* !CONFIG_CPU_THERMAL */
+ static inline struct thermal_cooling_device *
+ cpufreq_cooling_register(const struct cpumask *clip_cpus)
+@@ -54,7 +54,7 @@ void cpufreq_cooling_unregister(struct t
+ return;
+ }
+ static inline
+-unsigned long cpufreq_cooling_get_level(unsigned int, unsigned int)
++unsigned long cpufreq_cooling_get_level(unsigned int cpu, unsigned int freq)
+ {
+ return THERMAL_CSTATE_INVALID;
+ }
--- /dev/null
+From 8e2e2fa47129532a30cff6c25a47078dc97d9260 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Tue, 2 Jul 2013 15:30:53 -0400
+Subject: tracing: Add trace_array_get/put() to event handling
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 8e2e2fa47129532a30cff6c25a47078dc97d9260 upstream.
+
+Commit a695cb58162 "tracing: Prevent deleting instances when they are being read"
+tried to fix a race between deleting a trace instance and reading contents
+of a trace file. But it wasn't good enough. The following could crash the kernel:
+
+ # cd /sys/kernel/debug/tracing/instances
+ # ( while :; do mkdir foo; rmdir foo; done ) &
+ # ( while :; do echo 1 > foo/events/sched/sched_switch 2> /dev/null; done ) &
+
+Luckily this can only be done by root user, but it should be fixed regardless.
+
+The problem is that a delete of the file can happen after the write to the event
+is opened, but before the enabling happens.
+
+The solution is to make sure the trace_array is available before succeeding in
+opening for write, and incerment the ref counter while opened.
+
+Now the instance can be deleted when the events are writing to the buffer,
+but the deletion of the instance will disable all events before the instance
+is actually deleted.
+
+Reported-by: Alexander Lam <azl@google.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.h | 3 ++
+ kernel/trace/trace_events.c | 55 ++++++++++++++++++++++++++++++++++++++++----
+ 2 files changed, 54 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -226,6 +226,9 @@ extern struct list_head ftrace_trace_arr
+
+ extern struct mutex trace_types_lock;
+
++extern int trace_array_get(struct trace_array *tr);
++extern void trace_array_put(struct trace_array *tr);
++
+ /*
+ * The global tracer (top) should be the first trace array added,
+ * but we check the flag anyway.
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -413,6 +413,35 @@ static void put_system(struct ftrace_sub
+ }
+
+ /*
++ * Open and update trace_array ref count.
++ * Must have the current trace_array passed to it.
++ */
++static int tracing_open_generic_file(struct inode *inode, struct file *filp)
++{
++ struct ftrace_event_file *file = inode->i_private;
++ struct trace_array *tr = file->tr;
++ int ret;
++
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
++ ret = tracing_open_generic(inode, filp);
++ if (ret < 0)
++ trace_array_put(tr);
++ return ret;
++}
++
++static int tracing_release_generic_file(struct inode *inode, struct file *filp)
++{
++ struct ftrace_event_file *file = inode->i_private;
++ struct trace_array *tr = file->tr;
++
++ trace_array_put(tr);
++
++ return 0;
++}
++
++/*
+ * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
+ */
+ static int
+@@ -1046,9 +1075,17 @@ static int subsystem_open(struct inode *
+ /* Some versions of gcc think dir can be uninitialized here */
+ WARN_ON(!dir);
+
++ /* Still need to increment the ref count of the system */
++ if (trace_array_get(tr) < 0) {
++ put_system(dir);
++ return -ENODEV;
++ }
++
+ ret = tracing_open_generic(inode, filp);
+- if (ret < 0)
++ if (ret < 0) {
++ trace_array_put(tr);
+ put_system(dir);
++ }
+
+ return ret;
+ }
+@@ -1059,16 +1096,23 @@ static int system_tr_open(struct inode *
+ struct trace_array *tr = inode->i_private;
+ int ret;
+
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ /* Make a temporary dir that has no system but points to tr */
+ dir = kzalloc(sizeof(*dir), GFP_KERNEL);
+- if (!dir)
++ if (!dir) {
++ trace_array_put(tr);
+ return -ENOMEM;
++ }
+
+ dir->tr = tr;
+
+ ret = tracing_open_generic(inode, filp);
+- if (ret < 0)
++ if (ret < 0) {
++ trace_array_put(tr);
+ kfree(dir);
++ }
+
+ filp->private_data = dir;
+
+@@ -1079,6 +1123,8 @@ static int subsystem_release(struct inod
+ {
+ struct ftrace_subsystem_dir *dir = file->private_data;
+
++ trace_array_put(dir->tr);
++
+ /*
+ * If dir->subsystem is NULL, then this is a temporary
+ * descriptor that was made for a trace_array to enable
+@@ -1206,9 +1252,10 @@ static const struct file_operations ftra
+ };
+
+ static const struct file_operations ftrace_enable_fops = {
+- .open = tracing_open_generic,
++ .open = tracing_open_generic_file,
+ .read = event_enable_read,
+ .write = event_enable_write,
++ .release = tracing_release_generic_file,
+ .llseek = default_llseek,
+ };
+
--- /dev/null
+From ff451961a8b2a17667a7bfa39c86fb9b351445db Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Mon, 1 Jul 2013 22:50:29 -0400
+Subject: tracing: Add trace_array_get/put() to handle instance refs better
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit ff451961a8b2a17667a7bfa39c86fb9b351445db upstream.
+
+Commit a695cb58162 "tracing: Prevent deleting instances when they are being read"
+tried to fix a race between deleting a trace instance and reading contents
+of a trace file. But it wasn't good enough. The following could crash the kernel:
+
+ # cd /sys/kernel/debug/tracing/instances
+ # ( while :; do mkdir foo; rmdir foo; done ) &
+ # ( while :; do cat foo/trace &> /dev/null; done ) &
+
+Luckily this can only be done by root user, but it should be fixed regardless.
+
+The problem is that a delete of the file can happen after the reader starts
+to open the file but before it grabs the trace_types_mutex.
+
+The solution is to validate the trace array before using it. If the trace
+array does not exist in the list of trace arrays, then it returns -ENODEV.
+
+There's a possibility that a trace_array could be deleted and a new one
+created and the open would open its file instead. But that is very minor as
+it will just return the data of the new trace array, it may confuse the user
+but it will not crash the system. As this can only be done by root anyway,
+the race will only occur if root is deleting what its trying to read at
+the same time.
+
+Reported-by: Alexander Lam <azl@google.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 83 +++++++++++++++++++++++++++++++++++++++------------
+ 1 file changed, 65 insertions(+), 18 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -193,6 +193,37 @@ static struct trace_array global_trace;
+
+ LIST_HEAD(ftrace_trace_arrays);
+
++int trace_array_get(struct trace_array *this_tr)
++{
++ struct trace_array *tr;
++ int ret = -ENODEV;
++
++ mutex_lock(&trace_types_lock);
++ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
++ if (tr == this_tr) {
++ tr->ref++;
++ ret = 0;
++ break;
++ }
++ }
++ mutex_unlock(&trace_types_lock);
++
++ return ret;
++}
++
++static void __trace_array_put(struct trace_array *this_tr)
++{
++ WARN_ON(!this_tr->ref);
++ this_tr->ref--;
++}
++
++void trace_array_put(struct trace_array *this_tr)
++{
++ mutex_lock(&trace_types_lock);
++ __trace_array_put(this_tr);
++ mutex_unlock(&trace_types_lock);
++}
++
+ int filter_current_check_discard(struct ring_buffer *buffer,
+ struct ftrace_event_call *call, void *rec,
+ struct ring_buffer_event *event)
+@@ -2768,10 +2799,9 @@ static const struct seq_operations trace
+ };
+
+ static struct trace_iterator *
+-__tracing_open(struct inode *inode, struct file *file, bool snapshot)
++__tracing_open(struct trace_array *tr, struct trace_cpu *tc,
++ struct inode *inode, struct file *file, bool snapshot)
+ {
+- struct trace_cpu *tc = inode->i_private;
+- struct trace_array *tr = tc->tr;
+ struct trace_iterator *iter;
+ int cpu;
+
+@@ -2850,8 +2880,6 @@ __tracing_open(struct inode *inode, stru
+ tracing_iter_reset(iter, cpu);
+ }
+
+- tr->ref++;
+-
+ mutex_unlock(&trace_types_lock);
+
+ return iter;
+@@ -2881,17 +2909,20 @@ static int tracing_release(struct inode
+ struct trace_array *tr;
+ int cpu;
+
+- if (!(file->f_mode & FMODE_READ))
++ /* Writes do not use seq_file, need to grab tr from inode */
++ if (!(file->f_mode & FMODE_READ)) {
++ struct trace_cpu *tc = inode->i_private;
++
++ trace_array_put(tc->tr);
+ return 0;
++ }
+
+ iter = m->private;
+ tr = iter->tr;
++ trace_array_put(tr);
+
+ mutex_lock(&trace_types_lock);
+
+- WARN_ON(!tr->ref);
+- tr->ref--;
+-
+ for_each_tracing_cpu(cpu) {
+ if (iter->buffer_iter[cpu])
+ ring_buffer_read_finish(iter->buffer_iter[cpu]);
+@@ -2910,20 +2941,23 @@ static int tracing_release(struct inode
+ kfree(iter->trace);
+ kfree(iter->buffer_iter);
+ seq_release_private(inode, file);
++
+ return 0;
+ }
+
+ static int tracing_open(struct inode *inode, struct file *file)
+ {
++ struct trace_cpu *tc = inode->i_private;
++ struct trace_array *tr = tc->tr;
+ struct trace_iterator *iter;
+ int ret = 0;
+
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ /* If this file was open for write, then erase contents */
+ if ((file->f_mode & FMODE_WRITE) &&
+ (file->f_flags & O_TRUNC)) {
+- struct trace_cpu *tc = inode->i_private;
+- struct trace_array *tr = tc->tr;
+-
+ if (tc->cpu == RING_BUFFER_ALL_CPUS)
+ tracing_reset_online_cpus(&tr->trace_buffer);
+ else
+@@ -2931,12 +2965,16 @@ static int tracing_open(struct inode *in
+ }
+
+ if (file->f_mode & FMODE_READ) {
+- iter = __tracing_open(inode, file, false);
++ iter = __tracing_open(tr, tc, inode, file, false);
+ if (IS_ERR(iter))
+ ret = PTR_ERR(iter);
+ else if (trace_flags & TRACE_ITER_LATENCY_FMT)
+ iter->iter_flags |= TRACE_FILE_LAT_FMT;
+ }
++
++ if (ret < 0)
++ trace_array_put(tr);
++
+ return ret;
+ }
+
+@@ -4512,12 +4550,16 @@ struct ftrace_buffer_info {
+ static int tracing_snapshot_open(struct inode *inode, struct file *file)
+ {
+ struct trace_cpu *tc = inode->i_private;
++ struct trace_array *tr = tc->tr;
+ struct trace_iterator *iter;
+ struct seq_file *m;
+ int ret = 0;
+
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ if (file->f_mode & FMODE_READ) {
+- iter = __tracing_open(inode, file, true);
++ iter = __tracing_open(tr, tc, inode, file, true);
+ if (IS_ERR(iter))
+ ret = PTR_ERR(iter);
+ } else {
+@@ -4530,13 +4572,16 @@ static int tracing_snapshot_open(struct
+ kfree(m);
+ return -ENOMEM;
+ }
+- iter->tr = tc->tr;
++ iter->tr = tr;
+ iter->trace_buffer = &tc->tr->max_buffer;
+ iter->cpu_file = tc->cpu;
+ m->private = iter;
+ file->private_data = m;
+ }
+
++ if (ret < 0)
++ trace_array_put(tr);
++
+ return ret;
+ }
+
+@@ -4617,9 +4662,12 @@ out:
+ static int tracing_snapshot_release(struct inode *inode, struct file *file)
+ {
+ struct seq_file *m = file->private_data;
++ int ret;
++
++ ret = tracing_release(inode, file);
+
+ if (file->f_mode & FMODE_READ)
+- return tracing_release(inode, file);
++ return ret;
+
+ /* If write only, the seq_file is just a stub */
+ if (m)
+@@ -4864,8 +4912,7 @@ static int tracing_buffers_release(struc
+
+ mutex_lock(&trace_types_lock);
+
+- WARN_ON(!iter->tr->ref);
+- iter->tr->ref--;
++ __trace_array_put(iter->tr);
+
+ if (info->spare)
+ ring_buffer_free_read_page(iter->trace_buffer->buffer, info->spare);
--- /dev/null
+From 6e94a780374ed31b280f939d4757e8d7858dff16 Mon Sep 17 00:00:00 2001
+From: Steven Rostedt <rostedt@goodmis.org>
+Date: Thu, 27 Jun 2013 10:58:31 -0400
+Subject: tracing: Failed to create system directory
+
+From: Steven Rostedt <rostedt@goodmis.org>
+
+commit 6e94a780374ed31b280f939d4757e8d7858dff16 upstream.
+
+Running the following:
+
+ # cd /sys/kernel/debug/tracing
+ # echo p:i do_sys_open > kprobe_events
+ # echo p:j schedule >> kprobe_events
+ # cat kprobe_events
+p:kprobes/i do_sys_open
+p:kprobes/j schedule
+ # echo p:i do_sys_open >> kprobe_events
+ # cat kprobe_events
+p:kprobes/j schedule
+p:kprobes/i do_sys_open
+ # ls /sys/kernel/debug/tracing/events/kprobes/
+enable filter j
+
+Notice that the 'i' is missing from the kprobes directory.
+
+The console produces:
+
+"Failed to create system directory kprobes"
+
+This is because kprobes passes in a allocated name for the system
+and the ftrace event subsystem saves off that name instead of creating
+a duplicate for it. But the kprobes may free the system name making
+the pointer to it invalid.
+
+This bug was introduced by 92edca073c37 "tracing: Use direct field, type
+and system names" which switched from using kstrdup() on the system name
+in favor of just keeping apointer to it, as the internal ftrace event
+system names are static and exist for the life of the computer being booted.
+
+Instead of reverting back to duplicating system names again, we can use
+core_kernel_data() to determine if the passed in name was allocated or
+static. Then use the MSB of the ref_count to be a flag to keep track if
+the name was allocated or not. Then we can still save from having to duplicate
+strings that will always exist, but still copy the ones that may be freed.
+
+Reported-by: "zhangwei(Jovi)" <jovi.zhangwei@huawei.com>
+Reported-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+Tested-by: Masami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events.c | 41 +++++++++++++++++++++++++++++++++++------
+ 1 file changed, 35 insertions(+), 6 deletions(-)
+
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -41,6 +41,23 @@ static LIST_HEAD(ftrace_common_fields);
+ static struct kmem_cache *field_cachep;
+ static struct kmem_cache *file_cachep;
+
++#define SYSTEM_FL_FREE_NAME (1 << 31)
++
++static inline int system_refcount(struct event_subsystem *system)
++{
++ return system->ref_count & ~SYSTEM_FL_FREE_NAME;
++}
++
++static int system_refcount_inc(struct event_subsystem *system)
++{
++ return (system->ref_count++) & ~SYSTEM_FL_FREE_NAME;
++}
++
++static int system_refcount_dec(struct event_subsystem *system)
++{
++ return (--system->ref_count) & ~SYSTEM_FL_FREE_NAME;
++}
++
+ /* Double loops, do not use break, only goto's work */
+ #define do_for_each_event_file(tr, file) \
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) { \
+@@ -349,8 +366,8 @@ static void __put_system(struct event_su
+ {
+ struct event_filter *filter = system->filter;
+
+- WARN_ON_ONCE(system->ref_count == 0);
+- if (--system->ref_count)
++ WARN_ON_ONCE(system_refcount(system) == 0);
++ if (system_refcount_dec(system))
+ return;
+
+ list_del(&system->list);
+@@ -359,13 +376,15 @@ static void __put_system(struct event_su
+ kfree(filter->filter_string);
+ kfree(filter);
+ }
++ if (system->ref_count & SYSTEM_FL_FREE_NAME)
++ kfree(system->name);
+ kfree(system);
+ }
+
+ static void __get_system(struct event_subsystem *system)
+ {
+- WARN_ON_ONCE(system->ref_count == 0);
+- system->ref_count++;
++ WARN_ON_ONCE(system_refcount(system) == 0);
++ system_refcount_inc(system);
+ }
+
+ static void __get_system_dir(struct ftrace_subsystem_dir *dir)
+@@ -379,7 +398,7 @@ static void __put_system_dir(struct ftra
+ {
+ WARN_ON_ONCE(dir->ref_count == 0);
+ /* If the subsystem is about to be freed, the dir must be too */
+- WARN_ON_ONCE(dir->subsystem->ref_count == 1 && dir->ref_count != 1);
++ WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
+
+ __put_system(dir->subsystem);
+ if (!--dir->ref_count)
+@@ -1279,7 +1298,15 @@ create_new_subsystem(const char *name)
+ return NULL;
+
+ system->ref_count = 1;
+- system->name = name;
++
++ /* Only allocate if dynamic (kprobes and modules) */
++ if (!core_kernel_data((unsigned long)name)) {
++ system->ref_count |= SYSTEM_FL_FREE_NAME;
++ system->name = kstrdup(name, GFP_KERNEL);
++ if (!system->name)
++ goto out_free;
++ } else
++ system->name = name;
+
+ system->filter = NULL;
+
+@@ -1292,6 +1319,8 @@ create_new_subsystem(const char *name)
+ return system;
+
+ out_free:
++ if (system->ref_count & SYSTEM_FL_FREE_NAME)
++ kfree(system->name);
+ kfree(system);
+ return NULL;
+ }
--- /dev/null
+From 11034ae9c20f4057a6127fc965906417978e69b2 Mon Sep 17 00:00:00 2001
+From: "zhangwei(Jovi)" <jovi.zhangwei@huawei.com>
+Date: Wed, 10 Apr 2013 11:26:23 +0800
+Subject: tracing: Fix irqs-off tag display in syscall tracing
+
+From: "zhangwei(Jovi)" <jovi.zhangwei@huawei.com>
+
+commit 11034ae9c20f4057a6127fc965906417978e69b2 upstream.
+
+All syscall tracing irqs-off tags are wrong, the syscall enter entry doesn't
+disable irqs.
+
+ [root@jovi tracing]#echo "syscalls:sys_enter_open" > set_event
+ [root@jovi tracing]# cat trace
+ # tracer: nop
+ #
+ # entries-in-buffer/entries-written: 13/13 #P:2
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / delay
+ # TASK-PID CPU# |||| TIMESTAMP FUNCTION
+ # | | | |||| | |
+ irqbalance-513 [000] d... 56115.496766: sys_open(filename: 804e1a6, flags: 0, mode: 1b6)
+ irqbalance-513 [000] d... 56115.497008: sys_open(filename: 804e1bb, flags: 0, mode: 1b6)
+ sendmail-771 [000] d... 56115.827982: sys_open(filename: b770e6d1, flags: 0, mode: 1b6)
+
+The reason is syscall tracing doesn't record irq_flags into buffer.
+The proper display is:
+
+ [root@jovi tracing]#echo "syscalls:sys_enter_open" > set_event
+ [root@jovi tracing]# cat trace
+ # tracer: nop
+ #
+ # entries-in-buffer/entries-written: 14/14 #P:2
+ #
+ # _-----=> irqs-off
+ # / _----=> need-resched
+ # | / _---=> hardirq/softirq
+ # || / _--=> preempt-depth
+ # ||| / delay
+ # TASK-PID CPU# |||| TIMESTAMP FUNCTION
+ # | | | |||| | |
+ irqbalance-514 [001] .... 46.213921: sys_open(filename: 804e1a6, flags: 0, mode: 1b6)
+ irqbalance-514 [001] .... 46.214160: sys_open(filename: 804e1bb, flags: 0, mode: 1b6)
+ <...>-920 [001] .... 47.307260: sys_open(filename: 4e82a0c5, flags: 80000, mode: 0)
+
+Link: http://lkml.kernel.org/r/1365564393-10972-3-git-send-email-jovi.zhangwei@huawei.com
+
+Signed-off-by: zhangwei(Jovi) <jovi.zhangwei@huawei.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_syscalls.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+--- a/kernel/trace/trace_syscalls.c
++++ b/kernel/trace/trace_syscalls.c
+@@ -306,6 +306,8 @@ static void ftrace_syscall_enter(void *d
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int syscall_nr;
+ int size;
+
+@@ -321,9 +323,12 @@ static void ftrace_syscall_enter(void *d
+
+ size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ buffer = tr->trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer,
+- sys_data->enter_event->event.type, size, 0, 0);
++ sys_data->enter_event->event.type, size, irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -333,7 +338,8 @@ static void ftrace_syscall_enter(void *d
+
+ if (!filter_current_check_discard(buffer, sys_data->enter_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
+@@ -343,6 +349,8 @@ static void ftrace_syscall_exit(void *da
+ struct syscall_metadata *sys_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
++ unsigned long irq_flags;
++ int pc;
+ int syscall_nr;
+
+ syscall_nr = trace_get_syscall_nr(current, regs);
+@@ -355,9 +363,13 @@ static void ftrace_syscall_exit(void *da
+ if (!sys_data)
+ return;
+
++ local_save_flags(irq_flags);
++ pc = preempt_count();
++
+ buffer = tr->trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer,
+- sys_data->exit_event->event.type, sizeof(*entry), 0, 0);
++ sys_data->exit_event->event.type, sizeof(*entry),
++ irq_flags, pc);
+ if (!event)
+ return;
+
+@@ -367,7 +379,8 @@ static void ftrace_syscall_exit(void *da
+
+ if (!filter_current_check_discard(buffer, sys_data->exit_event,
+ entry, event))
+- trace_current_buffer_unlock_commit(buffer, event, 0, 0);
++ trace_current_buffer_unlock_commit(buffer, event,
++ irq_flags, pc);
+ }
+
+ static int reg_event_syscall_enter(struct ftrace_event_file *file,
--- /dev/null
+From 2a6c24afab70dbcfee49f4c76e1511eec1a3298b Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Tue, 2 Jul 2013 14:48:23 -0400
+Subject: tracing: Fix race between deleting buffer and setting events
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 2a6c24afab70dbcfee49f4c76e1511eec1a3298b upstream.
+
+While analyzing the code, I discovered that there's a potential race between
+deleting a trace instance and setting events. There are a few races that can
+occur if events are being traced as the buffer is being deleted. Mostly the
+problem comes with freeing the descriptor used by the trace event callback.
+To prevent problems like this, the events are disabled before the buffer is
+deleted. The problem with the current solution is that the event_mutex is let
+go between disabling the events and freeing the files, which means that the events
+could be enabled again while the freeing takes place.
+
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace_events.c | 23 +++++++++++++++++------
+ 1 file changed, 17 insertions(+), 6 deletions(-)
+
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -415,14 +415,14 @@ static void put_system(struct ftrace_sub
+ /*
+ * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
+ */
+-static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
+- const char *sub, const char *event, int set)
++static int
++__ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
++ const char *sub, const char *event, int set)
+ {
+ struct ftrace_event_file *file;
+ struct ftrace_event_call *call;
+ int ret = -EINVAL;
+
+- mutex_lock(&event_mutex);
+ list_for_each_entry(file, &tr->events, list) {
+
+ call = file->event_call;
+@@ -448,6 +448,17 @@ static int __ftrace_set_clr_event(struct
+
+ ret = 0;
+ }
++
++ return ret;
++}
++
++static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
++ const char *sub, const char *event, int set)
++{
++ int ret;
++
++ mutex_lock(&event_mutex);
++ ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
+ mutex_unlock(&event_mutex);
+
+ return ret;
+@@ -2367,11 +2378,11 @@ early_event_add_tracer(struct dentry *pa
+
+ int event_trace_del_tracer(struct trace_array *tr)
+ {
+- /* Disable any running events */
+- __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
+-
+ mutex_lock(&event_mutex);
+
++ /* Disable any running events */
++ __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
++
+ down_write(&trace_event_sem);
+ __trace_remove_event_dirs(tr);
+ debugfs_remove_recursive(tr->event_dir);
--- /dev/null
+From 7b85af63034818e43aee6c1d7bf1c7c6796a9073 Mon Sep 17 00:00:00 2001
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+Date: Mon, 1 Jul 2013 23:34:22 -0400
+Subject: tracing: Get trace_array ref counts when accessing trace files
+
+From: "Steven Rostedt (Red Hat)" <rostedt@goodmis.org>
+
+commit 7b85af63034818e43aee6c1d7bf1c7c6796a9073 upstream.
+
+When a trace file is opened that may access a trace array, it must
+increment its ref count to prevent it from being deleted.
+
+Reported-by: Alexander Lam <azl@google.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 121 +++++++++++++++++++++++++++++++++++++++++++++++----
+ 1 file changed, 112 insertions(+), 9 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -2902,6 +2902,43 @@ int tracing_open_generic(struct inode *i
+ return 0;
+ }
+
++/*
++ * Open and update trace_array ref count.
++ * Must have the current trace_array passed to it.
++ */
++int tracing_open_generic_tr(struct inode *inode, struct file *filp)
++{
++ struct trace_array *tr = inode->i_private;
++
++ if (tracing_disabled)
++ return -ENODEV;
++
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
++ filp->private_data = inode->i_private;
++
++ return 0;
++
++}
++
++int tracing_open_generic_tc(struct inode *inode, struct file *filp)
++{
++ struct trace_cpu *tc = inode->i_private;
++ struct trace_array *tr = tc->tr;
++
++ if (tracing_disabled)
++ return -ENODEV;
++
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
++ filp->private_data = inode->i_private;
++
++ return 0;
++
++}
++
+ static int tracing_release(struct inode *inode, struct file *file)
+ {
+ struct seq_file *m = file->private_data;
+@@ -2945,6 +2982,32 @@ static int tracing_release(struct inode
+ return 0;
+ }
+
++static int tracing_release_generic_tr(struct inode *inode, struct file *file)
++{
++ struct trace_array *tr = inode->i_private;
++
++ trace_array_put(tr);
++ return 0;
++}
++
++static int tracing_release_generic_tc(struct inode *inode, struct file *file)
++{
++ struct trace_cpu *tc = inode->i_private;
++ struct trace_array *tr = tc->tr;
++
++ trace_array_put(tr);
++ return 0;
++}
++
++static int tracing_single_release_tr(struct inode *inode, struct file *file)
++{
++ struct trace_array *tr = inode->i_private;
++
++ trace_array_put(tr);
++
++ return single_release(inode, file);
++}
++
+ static int tracing_open(struct inode *inode, struct file *file)
+ {
+ struct trace_cpu *tc = inode->i_private;
+@@ -3331,9 +3394,14 @@ tracing_trace_options_write(struct file
+
+ static int tracing_trace_options_open(struct inode *inode, struct file *file)
+ {
++ struct trace_array *tr = inode->i_private;
++
+ if (tracing_disabled)
+ return -ENODEV;
+
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ return single_open(file, tracing_trace_options_show, inode->i_private);
+ }
+
+@@ -3341,7 +3409,7 @@ static const struct file_operations trac
+ .open = tracing_trace_options_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = tracing_single_release_tr,
+ .write = tracing_trace_options_write,
+ };
+
+@@ -3829,6 +3897,9 @@ static int tracing_open_pipe(struct inod
+ if (tracing_disabled)
+ return -ENODEV;
+
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ mutex_lock(&trace_types_lock);
+
+ /* create a buffer to store the information to pass to userspace */
+@@ -3881,6 +3952,7 @@ out:
+ fail:
+ kfree(iter->trace);
+ kfree(iter);
++ __trace_array_put(tr);
+ mutex_unlock(&trace_types_lock);
+ return ret;
+ }
+@@ -3888,6 +3960,8 @@ fail:
+ static int tracing_release_pipe(struct inode *inode, struct file *file)
+ {
+ struct trace_iterator *iter = file->private_data;
++ struct trace_cpu *tc = inode->i_private;
++ struct trace_array *tr = tc->tr;
+
+ mutex_lock(&trace_types_lock);
+
+@@ -3901,6 +3975,8 @@ static int tracing_release_pipe(struct i
+ kfree(iter->trace);
+ kfree(iter);
+
++ trace_array_put(tr);
++
+ return 0;
+ }
+
+@@ -4358,6 +4434,8 @@ tracing_free_buffer_release(struct inode
+ /* resize the ring buffer to 0 */
+ tracing_resize_ring_buffer(tr, 0, RING_BUFFER_ALL_CPUS);
+
++ trace_array_put(tr);
++
+ return 0;
+ }
+
+@@ -4534,10 +4612,20 @@ static ssize_t tracing_clock_write(struc
+
+ static int tracing_clock_open(struct inode *inode, struct file *file)
+ {
++ struct trace_array *tr = inode->i_private;
++ int ret;
++
+ if (tracing_disabled)
+ return -ENODEV;
+
+- return single_open(file, tracing_clock_show, inode->i_private);
++ if (trace_array_get(tr))
++ return -ENODEV;
++
++ ret = single_open(file, tracing_clock_show, inode->i_private);
++ if (ret < 0)
++ trace_array_put(tr);
++
++ return ret;
+ }
+
+ struct ftrace_buffer_info {
+@@ -4733,34 +4821,38 @@ static const struct file_operations trac
+ };
+
+ static const struct file_operations tracing_entries_fops = {
+- .open = tracing_open_generic,
++ .open = tracing_open_generic_tc,
+ .read = tracing_entries_read,
+ .write = tracing_entries_write,
+ .llseek = generic_file_llseek,
++ .release = tracing_release_generic_tc,
+ };
+
+ static const struct file_operations tracing_total_entries_fops = {
+- .open = tracing_open_generic,
++ .open = tracing_open_generic_tr,
+ .read = tracing_total_entries_read,
+ .llseek = generic_file_llseek,
++ .release = tracing_release_generic_tr,
+ };
+
+ static const struct file_operations tracing_free_buffer_fops = {
++ .open = tracing_open_generic_tr,
+ .write = tracing_free_buffer_write,
+ .release = tracing_free_buffer_release,
+ };
+
+ static const struct file_operations tracing_mark_fops = {
+- .open = tracing_open_generic,
++ .open = tracing_open_generic_tr,
+ .write = tracing_mark_write,
+ .llseek = generic_file_llseek,
++ .release = tracing_release_generic_tr,
+ };
+
+ static const struct file_operations trace_clock_fops = {
+ .open = tracing_clock_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+- .release = single_release,
++ .release = tracing_single_release_tr,
+ .write = tracing_clock_write,
+ };
+
+@@ -4788,13 +4880,19 @@ static int tracing_buffers_open(struct i
+ struct trace_cpu *tc = inode->i_private;
+ struct trace_array *tr = tc->tr;
+ struct ftrace_buffer_info *info;
++ int ret;
+
+ if (tracing_disabled)
+ return -ENODEV;
+
++ if (trace_array_get(tr) < 0)
++ return -ENODEV;
++
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+- if (!info)
++ if (!info) {
++ trace_array_put(tr);
+ return -ENOMEM;
++ }
+
+ mutex_lock(&trace_types_lock);
+
+@@ -4812,7 +4910,11 @@ static int tracing_buffers_open(struct i
+
+ mutex_unlock(&trace_types_lock);
+
+- return nonseekable_open(inode, filp);
++ ret = nonseekable_open(inode, filp);
++ if (ret < 0)
++ trace_array_put(tr);
++
++ return ret;
+ }
+
+ static unsigned int
+@@ -5707,9 +5809,10 @@ rb_simple_write(struct file *filp, const
+ }
+
+ static const struct file_operations rb_simple_fops = {
+- .open = tracing_open_generic,
++ .open = tracing_open_generic_tr,
+ .read = rb_simple_read,
+ .write = rb_simple_write,
++ .release = tracing_release_generic_tr,
+ .llseek = default_llseek,
+ };
+
--- /dev/null
+From 2d71619c59fac95a5415a326162fa046161b938c Mon Sep 17 00:00:00 2001
+From: Alexander Z Lam <azl@google.com>
+Date: Mon, 1 Jul 2013 15:31:24 -0700
+Subject: tracing: Make trace_marker use the correct per-instance buffer
+
+From: Alexander Z Lam <azl@google.com>
+
+commit 2d71619c59fac95a5415a326162fa046161b938c upstream.
+
+The trace_marker file was present for each new instance created, but it
+added the trace mark to the global trace buffer instead of to
+the instance's buffer.
+
+Link: http://lkml.kernel.org/r/1372717885-4543-2-git-send-email-azl@google.com
+
+Signed-off-by: Alexander Z Lam <azl@google.com>
+Cc: David Sharp <dhsharp@google.com>
+Cc: Vaibhav Nagarnaik <vnagarnaik@google.com>
+Cc: Alexander Z Lam <lambchop468@gmail.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -4328,6 +4328,7 @@ tracing_mark_write(struct file *filp, co
+ size_t cnt, loff_t *fpos)
+ {
+ unsigned long addr = (unsigned long)ubuf;
++ struct trace_array *tr = filp->private_data;
+ struct ring_buffer_event *event;
+ struct ring_buffer *buffer;
+ struct print_entry *entry;
+@@ -4387,7 +4388,7 @@ tracing_mark_write(struct file *filp, co
+
+ local_save_flags(irq_flags);
+ size = sizeof(*entry) + cnt + 2; /* possible \n added */
+- buffer = global_trace.trace_buffer.buffer;
++ buffer = tr->trace_buffer.buffer;
+ event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
+ irq_flags, preempt_count());
+ if (!event) {
--- /dev/null
+From a82274151af2b075163e3c42c828529dee311487 Mon Sep 17 00:00:00 2001
+From: Alexander Z Lam <azl@google.com>
+Date: Mon, 1 Jul 2013 19:37:54 -0700
+Subject: tracing: Protect ftrace_trace_arrays list in trace_events.c
+
+From: Alexander Z Lam <azl@google.com>
+
+commit a82274151af2b075163e3c42c828529dee311487 upstream.
+
+There are multiple places where the ftrace_trace_arrays list is accessed in
+trace_events.c without the trace_types_lock held.
+
+Link: http://lkml.kernel.org/r/1372732674-22726-1-git-send-email-azl@google.com
+
+Signed-off-by: Alexander Z Lam <azl@google.com>
+Cc: Vaibhav Nagarnaik <vnagarnaik@google.com>
+Cc: David Sharp <dhsharp@google.com>
+Cc: Alexander Z Lam <lambchop468@gmail.com>
+Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/trace/trace.c | 2 +-
+ kernel/trace/trace.h | 2 ++
+ kernel/trace/trace_events.c | 11 ++++++++++-
+ 3 files changed, 13 insertions(+), 2 deletions(-)
+
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -240,7 +240,7 @@ static struct tracer *trace_types __rea
+ /*
+ * trace_types_lock is used to protect the trace_types list.
+ */
+-static DEFINE_MUTEX(trace_types_lock);
++DEFINE_MUTEX(trace_types_lock);
+
+ /*
+ * serialize the access of the ring buffer
+--- a/kernel/trace/trace.h
++++ b/kernel/trace/trace.h
+@@ -224,6 +224,8 @@ enum {
+
+ extern struct list_head ftrace_trace_arrays;
+
++extern struct mutex trace_types_lock;
++
+ /*
+ * The global tracer (top) should be the first trace array added,
+ * but we check the flag anyway.
+--- a/kernel/trace/trace_events.c
++++ b/kernel/trace/trace_events.c
+@@ -1011,6 +1011,7 @@ static int subsystem_open(struct inode *
+ int ret;
+
+ /* Make sure the system still exists */
++ mutex_lock(&trace_types_lock);
+ mutex_lock(&event_mutex);
+ list_for_each_entry(tr, &ftrace_trace_arrays, list) {
+ list_for_each_entry(dir, &tr->systems, list) {
+@@ -1026,6 +1027,7 @@ static int subsystem_open(struct inode *
+ }
+ exit_loop:
+ mutex_unlock(&event_mutex);
++ mutex_unlock(&trace_types_lock);
+
+ if (!system)
+ return -ENODEV;
+@@ -1620,6 +1622,7 @@ static void __add_event_to_tracers(struc
+ int trace_add_event_call(struct ftrace_event_call *call)
+ {
+ int ret;
++ mutex_lock(&trace_types_lock);
+ mutex_lock(&event_mutex);
+
+ ret = __register_event(call, NULL);
+@@ -1627,11 +1630,13 @@ int trace_add_event_call(struct ftrace_e
+ __add_event_to_tracers(call, NULL);
+
+ mutex_unlock(&event_mutex);
++ mutex_unlock(&trace_types_lock);
+ return ret;
+ }
+
+ /*
+- * Must be called under locking both of event_mutex and trace_event_sem.
++ * Must be called under locking of trace_types_lock, event_mutex and
++ * trace_event_sem.
+ */
+ static void __trace_remove_event_call(struct ftrace_event_call *call)
+ {
+@@ -1643,11 +1648,13 @@ static void __trace_remove_event_call(st
+ /* Remove an event_call */
+ void trace_remove_event_call(struct ftrace_event_call *call)
+ {
++ mutex_lock(&trace_types_lock);
+ mutex_lock(&event_mutex);
+ down_write(&trace_event_sem);
+ __trace_remove_event_call(call);
+ up_write(&trace_event_sem);
+ mutex_unlock(&event_mutex);
++ mutex_unlock(&trace_types_lock);
+ }
+
+ #define for_each_event(event, start, end) \
+@@ -1791,6 +1798,7 @@ static int trace_module_notify(struct no
+ {
+ struct module *mod = data;
+
++ mutex_lock(&trace_types_lock);
+ mutex_lock(&event_mutex);
+ switch (val) {
+ case MODULE_STATE_COMING:
+@@ -1801,6 +1809,7 @@ static int trace_module_notify(struct no
+ break;
+ }
+ mutex_unlock(&event_mutex);
++ mutex_unlock(&trace_types_lock);
+
+ return 0;
+ }
--- /dev/null
+From c5a771d0678f9613e9f89cf1a5bdcfa5b08b225b Mon Sep 17 00:00:00 2001
+From: Max Filippov <jcmvbkbc@gmail.com>
+Date: Sun, 9 Jun 2013 04:52:11 +0400
+Subject: xtensa: adjust boot parameters address when INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is selected
+
+From: Max Filippov <jcmvbkbc@gmail.com>
+
+commit c5a771d0678f9613e9f89cf1a5bdcfa5b08b225b upstream.
+
+The virtual address of boot parameters chain is passed to the kernel via
+a2 register. Adjust it in case it is remapped during MMUv3 -> MMUv2
+mapping change, i.e. when it is in the first 128M.
+
+Also fix interpretation of initrd and FDT addresses passed in the boot
+parameters: these are physical addresses.
+
+Reported-by: Baruch Siach <baruch@tkos.co.il>
+Signed-off-by: Max Filippov <jcmvbkbc@gmail.com>
+Signed-off-by: Chris Zankel <chris@zankel.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/xtensa/kernel/head.S | 9 +++++++++
+ arch/xtensa/kernel/setup.c | 6 +++---
+ 2 files changed, 12 insertions(+), 3 deletions(-)
+
+--- a/arch/xtensa/kernel/head.S
++++ b/arch/xtensa/kernel/head.S
+@@ -68,6 +68,15 @@ _SetupMMU:
+
+ #ifdef CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX
+ initialize_mmu
++#if defined(CONFIG_MMU) && XCHAL_HAVE_PTP_MMU && XCHAL_HAVE_SPANNING_WAY
++ rsr a2, excsave1
++ movi a3, 0x08000000
++ bgeu a2, a3, 1f
++ movi a3, 0xd0000000
++ add a2, a2, a3
++ wsr a2, excsave1
++1:
++#endif
+ #endif
+ .end no-absolute-literals
+
+--- a/arch/xtensa/kernel/setup.c
++++ b/arch/xtensa/kernel/setup.c
+@@ -152,8 +152,8 @@ static int __init parse_tag_initrd(const
+ {
+ meminfo_t* mi;
+ mi = (meminfo_t*)(tag->data);
+- initrd_start = (void*)(mi->start);
+- initrd_end = (void*)(mi->end);
++ initrd_start = __va(mi->start);
++ initrd_end = __va(mi->end);
+
+ return 0;
+ }
+@@ -164,7 +164,7 @@ __tagtable(BP_TAG_INITRD, parse_tag_init
+
+ static int __init parse_tag_fdt(const bp_tag_t *tag)
+ {
+- dtb_start = (void *)(tag->data[0]);
++ dtb_start = __va(tag->data[0]);
+ return 0;
+ }
+