]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.5-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 21 May 2016 05:33:46 +0000 (22:33 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 21 May 2016 05:33:46 +0000 (22:33 -0700)
added patches:
perf-core-fix-perf_event_open-vs.-execve-race.patch
perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch

queue-4.5/perf-core-fix-perf_event_open-vs.-execve-race.patch [new file with mode: 0644]
queue-4.5/perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch [new file with mode: 0644]
queue-4.5/series

diff --git a/queue-4.5/perf-core-fix-perf_event_open-vs.-execve-race.patch b/queue-4.5/perf-core-fix-perf_event_open-vs.-execve-race.patch
new file mode 100644 (file)
index 0000000..5c4399c
--- /dev/null
@@ -0,0 +1,170 @@
+From 79c9ce57eb2d5f1497546a3946b4ae21b6fdc438 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 26 Apr 2016 11:36:53 +0200
+Subject: perf/core: Fix perf_event_open() vs. execve() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 79c9ce57eb2d5f1497546a3946b4ae21b6fdc438 upstream.
+
+Jann reported that the ptrace_may_access() check in
+find_lively_task_by_vpid() is racy against exec().
+
+Specifically:
+
+  perf_event_open()            execve()
+
+  ptrace_may_access()
+                               commit_creds()
+  ...                          if (get_dumpable() != SUID_DUMP_USER)
+                                 perf_event_exit_task();
+  perf_install_in_context()
+
+would result in installing a counter across the creds boundary.
+
+Fix this by wrapping lots of perf_event_open() in cred_guard_mutex.
+This should be fine as perf_event_exit_task() is already called with
+cred_guard_mutex held, so all perf locks already nest inside it.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: He Kuang <hekuang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c |   52 +++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 36 insertions(+), 16 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -1090,6 +1090,7 @@ static void put_ctx(struct perf_event_co
+  * function.
+  *
+  * Lock order:
++ *    cred_guard_mutex
+  *    task_struct::perf_event_mutex
+  *      perf_event_context::mutex
+  *        perf_event::child_mutex;
+@@ -3415,7 +3416,6 @@ static struct task_struct *
+ find_lively_task_by_vpid(pid_t vpid)
+ {
+       struct task_struct *task;
+-      int err;
+       rcu_read_lock();
+       if (!vpid)
+@@ -3429,16 +3429,7 @@ find_lively_task_by_vpid(pid_t vpid)
+       if (!task)
+               return ERR_PTR(-ESRCH);
+-      /* Reuse ptrace permission checks for now. */
+-      err = -EACCES;
+-      if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+-              goto errout;
+-
+       return task;
+-errout:
+-      put_task_struct(task);
+-      return ERR_PTR(err);
+-
+ }
+ /*
+@@ -8360,6 +8351,24 @@ SYSCALL_DEFINE5(perf_event_open,
+       get_online_cpus();
++      if (task) {
++              err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
++              if (err)
++                      goto err_cpus;
++
++              /*
++               * Reuse ptrace permission checks for now.
++               *
++               * We must hold cred_guard_mutex across this and any potential
++               * perf_install_in_context() call for this new event to
++               * serialize against exec() altering our credentials (and the
++               * perf_event_exit_task() that could imply).
++               */
++              err = -EACCES;
++              if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
++                      goto err_cred;
++      }
++
+       if (flags & PERF_FLAG_PID_CGROUP)
+               cgroup_fd = pid;
+@@ -8367,7 +8376,7 @@ SYSCALL_DEFINE5(perf_event_open,
+                                NULL, NULL, cgroup_fd);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+-              goto err_cpus;
++              goto err_cred;
+       }
+       if (is_sampling_event(event)) {
+@@ -8426,11 +8435,6 @@ SYSCALL_DEFINE5(perf_event_open,
+               goto err_context;
+       }
+-      if (task) {
+-              put_task_struct(task);
+-              task = NULL;
+-      }
+-
+       /*
+        * Look up the group leader (we will attach this event to it):
+        */
+@@ -8528,6 +8532,11 @@ SYSCALL_DEFINE5(perf_event_open,
+       WARN_ON_ONCE(ctx->parent_ctx);
++      /*
++       * This is the point on no return; we cannot fail hereafter. This is
++       * where we start modifying current state.
++       */
++
+       if (move_group) {
+               /*
+                * See perf_event_ctx_lock() for comments on the details
+@@ -8599,6 +8608,11 @@ SYSCALL_DEFINE5(perf_event_open,
+               mutex_unlock(&gctx->mutex);
+       mutex_unlock(&ctx->mutex);
++      if (task) {
++              mutex_unlock(&task->signal->cred_guard_mutex);
++              put_task_struct(task);
++      }
++
+       put_online_cpus();
+       mutex_lock(&current->perf_event_mutex);
+@@ -8631,6 +8645,9 @@ err_alloc:
+        */
+       if (!event_file)
+               free_event(event);
++err_cred:
++      if (task)
++              mutex_unlock(&task->signal->cred_guard_mutex);
+ err_cpus:
+       put_online_cpus();
+ err_task:
+@@ -8915,6 +8932,9 @@ static void perf_event_exit_task_context
+ /*
+  * When a child task exits, feed back event values to parent events.
++ *
++ * Can be called with cred_guard_mutex held when called from
++ * install_exec_creds().
+  */
+ void perf_event_exit_task(struct task_struct *child)
+ {
diff --git a/queue-4.5/perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch b/queue-4.5/perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch
new file mode 100644 (file)
index 0000000..aacf729
--- /dev/null
@@ -0,0 +1,62 @@
+From ab92b232ae05c382c3df0e3d6a5c6d16b639ac8c Mon Sep 17 00:00:00 2001
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Date: Tue, 10 May 2016 16:18:32 +0300
+Subject: perf/x86/intel/pt: Generate PMI in the STOP region as well
+
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+
+commit ab92b232ae05c382c3df0e3d6a5c6d16b639ac8c upstream.
+
+Currently, the PT driver always sets the PMI bit one region (page) before
+the STOP region so that we can wake up the consumer before we run out of
+room in the buffer and have to disable the event. However, we also need
+an interrupt in the last output region, so that we actually get to disable
+the event (if no more room from new data is available at that point),
+otherwise hardware just quietly refuses to start, but the event is
+scheduled in and we end up losing trace data till the event gets removed.
+
+For a cpu-wide event it is even worse since there may not be any
+re-scheduling at all and no chance for the ring buffer code to notice
+that its buffer is filled up and the event needs to be disabled (so that
+the consumer can re-enable it when it finishes reading the data out). In
+other words, all the trace data will be lost after the buffer gets filled
+up.
+
+This patch makes PT also generate a PMI when the last output region is
+full.
+
+Reported-by: Markus Metzger <markus.t.metzger@intel.com>
+Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: vince@deater.net
+Link: http://lkml.kernel.org/r/1462886313-13660-2-git-send-email-alexander.shishkin@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+@@ -709,6 +709,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+       /* clear STOP and INT from current entry */
+       buf->topa_index[buf->stop_pos]->stop = 0;
++      buf->topa_index[buf->stop_pos]->intr = 0;
+       buf->topa_index[buf->intr_pos]->intr = 0;
+       /* how many pages till the STOP marker */
+@@ -733,6 +734,7 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
+       buf->intr_pos = idx;
+       buf->topa_index[buf->stop_pos]->stop = 1;
++      buf->topa_index[buf->stop_pos]->intr = 1;
+       buf->topa_index[buf->intr_pos]->intr = 1;
+       return 0;
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..2583666e830f0d00c6a6b434b0bb97c1addbcc45 100644 (file)
@@ -0,0 +1,2 @@
+perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch
+perf-core-fix-perf_event_open-vs.-execve-race.patch