]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 21 May 2016 05:33:35 +0000 (22:33 -0700)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Sat, 21 May 2016 05:33:35 +0000 (22:33 -0700)
added patches:
perf-core-fix-perf_event_open-vs.-execve-race.patch
perf-test-fix-build-of-bpf-and-llvm-on-older-glibc-libraries.patch
perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch

queue-4.4/perf-core-fix-perf_event_open-vs.-execve-race.patch [new file with mode: 0644]
queue-4.4/perf-test-fix-build-of-bpf-and-llvm-on-older-glibc-libraries.patch [new file with mode: 0644]
queue-4.4/perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch [new file with mode: 0644]
queue-4.4/series

diff --git a/queue-4.4/perf-core-fix-perf_event_open-vs.-execve-race.patch b/queue-4.4/perf-core-fix-perf_event_open-vs.-execve-race.patch
new file mode 100644 (file)
index 0000000..fbacef2
--- /dev/null
@@ -0,0 +1,170 @@
+From 79c9ce57eb2d5f1497546a3946b4ae21b6fdc438 Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 26 Apr 2016 11:36:53 +0200
+Subject: perf/core: Fix perf_event_open() vs. execve() race
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 79c9ce57eb2d5f1497546a3946b4ae21b6fdc438 upstream.
+
+Jann reported that the ptrace_may_access() check in
+find_lively_task_by_vpid() is racy against exec().
+
+Specifically:
+
+  perf_event_open()            execve()
+
+  ptrace_may_access()
+                               commit_creds()
+  ...                          if (get_dumpable() != SUID_DUMP_USER)
+                                 perf_event_exit_task();
+  perf_install_in_context()
+
+would result in installing a counter across the creds boundary.
+
+Fix this by wrapping lots of perf_event_open() in cred_guard_mutex.
+This should be fine as perf_event_exit_task() is already called with
+cred_guard_mutex held, so all perf locks already nest inside it.
+
+Reported-by: Jann Horn <jannh@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: He Kuang <hekuang@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c |   52 +++++++++++++++++++++++++++++++++++----------------
+ 1 file changed, 36 insertions(+), 16 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -946,6 +946,7 @@ static void put_ctx(struct perf_event_co
+  * function.
+  *
+  * Lock order:
++ *    cred_guard_mutex
+  *    task_struct::perf_event_mutex
+  *      perf_event_context::mutex
+  *        perf_event_context::lock
+@@ -3418,7 +3419,6 @@ static struct task_struct *
+ find_lively_task_by_vpid(pid_t vpid)
+ {
+       struct task_struct *task;
+-      int err;
+       rcu_read_lock();
+       if (!vpid)
+@@ -3432,16 +3432,7 @@ find_lively_task_by_vpid(pid_t vpid)
+       if (!task)
+               return ERR_PTR(-ESRCH);
+-      /* Reuse ptrace permission checks for now. */
+-      err = -EACCES;
+-      if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
+-              goto errout;
+-
+       return task;
+-errout:
+-      put_task_struct(task);
+-      return ERR_PTR(err);
+-
+ }
+ /*
+@@ -8328,6 +8319,24 @@ SYSCALL_DEFINE5(perf_event_open,
+       get_online_cpus();
++      if (task) {
++              err = mutex_lock_interruptible(&task->signal->cred_guard_mutex);
++              if (err)
++                      goto err_cpus;
++
++              /*
++               * Reuse ptrace permission checks for now.
++               *
++               * We must hold cred_guard_mutex across this and any potential
++               * perf_install_in_context() call for this new event to
++               * serialize against exec() altering our credentials (and the
++               * perf_event_exit_task() that could imply).
++               */
++              err = -EACCES;
++              if (!ptrace_may_access(task, PTRACE_MODE_READ_REALCREDS))
++                      goto err_cred;
++      }
++
+       if (flags & PERF_FLAG_PID_CGROUP)
+               cgroup_fd = pid;
+@@ -8335,7 +8344,7 @@ SYSCALL_DEFINE5(perf_event_open,
+                                NULL, NULL, cgroup_fd);
+       if (IS_ERR(event)) {
+               err = PTR_ERR(event);
+-              goto err_cpus;
++              goto err_cred;
+       }
+       if (is_sampling_event(event)) {
+@@ -8394,11 +8403,6 @@ SYSCALL_DEFINE5(perf_event_open,
+               goto err_context;
+       }
+-      if (task) {
+-              put_task_struct(task);
+-              task = NULL;
+-      }
+-
+       /*
+        * Look up the group leader (we will attach this event to it):
+        */
+@@ -8486,6 +8490,11 @@ SYSCALL_DEFINE5(perf_event_open,
+       WARN_ON_ONCE(ctx->parent_ctx);
++      /*
++       * This is the point on no return; we cannot fail hereafter. This is
++       * where we start modifying current state.
++       */
++
+       if (move_group) {
+               /*
+                * See perf_event_ctx_lock() for comments on the details
+@@ -8555,6 +8564,11 @@ SYSCALL_DEFINE5(perf_event_open,
+               mutex_unlock(&gctx->mutex);
+       mutex_unlock(&ctx->mutex);
++      if (task) {
++              mutex_unlock(&task->signal->cred_guard_mutex);
++              put_task_struct(task);
++      }
++
+       put_online_cpus();
+       event->owner = current;
+@@ -8589,6 +8603,9 @@ err_alloc:
+        */
+       if (!event_file)
+               free_event(event);
++err_cred:
++      if (task)
++              mutex_unlock(&task->signal->cred_guard_mutex);
+ err_cpus:
+       put_online_cpus();
+ err_task:
+@@ -8868,6 +8885,9 @@ static void perf_event_exit_task_context
+ /*
+  * When a child task exits, feed back event values to parent events.
++ *
++ * Can be called with cred_guard_mutex held when called from
++ * install_exec_creds().
+  */
+ void perf_event_exit_task(struct task_struct *child)
+ {
diff --git a/queue-4.4/perf-test-fix-build-of-bpf-and-llvm-on-older-glibc-libraries.patch b/queue-4.4/perf-test-fix-build-of-bpf-and-llvm-on-older-glibc-libraries.patch
new file mode 100644 (file)
index 0000000..cdf2132
--- /dev/null
@@ -0,0 +1,118 @@
+From 916d4092a1d2d7bb50630497be71ee4c4c2807fa Mon Sep 17 00:00:00 2001
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+Date: Wed, 18 Nov 2015 17:38:49 -0300
+Subject: perf test: Fix build of BPF and LLVM on older glibc libraries
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Arnaldo Carvalho de Melo <acme@redhat.com>
+
+commit 916d4092a1d2d7bb50630497be71ee4c4c2807fa upstream.
+
+  $ rpm -q glibc
+  glibc-2.12-1.166.el6_7.1.x86_64
+
+<SNIP>
+    CC       /tmp/build/perf/tests/llvm.o
+  cc1: warnings being treated as errors
+  tests/llvm.c: In function ‘test_llvm__fetch_bpf_obj’:
+  tests/llvm.c:53: error: declaration of ‘index’ shadows a global declaration
+  /usr/include/string.h:489: error: shadowed declaration is here
+<SNIP>
+    CC       /tmp/build/perf/tests/bpf.o
+  cc1: warnings being treated as errors
+  tests/bpf.c: In function ‘__test__bpf’:
+  tests/bpf.c:149: error: declaration of ‘index’ shadows a global declaration
+  /usr/include/string.h:489: error: shadowed declaration is here
+<SNIP>
+
+Cc: He Kuang <hekuang@huawei.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: pi3orama@163.com
+Cc: Wang Nan <wangnan0@huawei.com>
+Cc: Zefan Li <lizefan@huawei.com>
+Fixes: b31de018a628 ("perf test: Enhance the LLVM test: update basic BPF test program")
+Fixes: ba1fae431e74 ("perf test: Add 'perf test BPF'")
+Link: http://lkml.kernel.org/n/tip-akpo4r750oya2phxoh9e3447@git.kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Nikolay Borisov <kernel@kyup.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/tests/bpf.c  |   14 +++++++-------
+ tools/perf/tests/llvm.c |    8 ++++----
+ 2 files changed, 11 insertions(+), 11 deletions(-)
+
+--- a/tools/perf/tests/bpf.c
++++ b/tools/perf/tests/bpf.c
+@@ -146,7 +146,7 @@ prepare_bpf(void *obj_buf, size_t obj_bu
+       return obj;
+ }
+-static int __test__bpf(int index)
++static int __test__bpf(int idx)
+ {
+       int ret;
+       void *obj_buf;
+@@ -154,27 +154,27 @@ static int __test__bpf(int index)
+       struct bpf_object *obj;
+       ret = test_llvm__fetch_bpf_obj(&obj_buf, &obj_buf_sz,
+-                                     bpf_testcase_table[index].prog_id,
++                                     bpf_testcase_table[idx].prog_id,
+                                      true);
+       if (ret != TEST_OK || !obj_buf || !obj_buf_sz) {
+               pr_debug("Unable to get BPF object, %s\n",
+-                       bpf_testcase_table[index].msg_compile_fail);
+-              if (index == 0)
++                       bpf_testcase_table[idx].msg_compile_fail);
++              if (idx == 0)
+                       return TEST_SKIP;
+               else
+                       return TEST_FAIL;
+       }
+       obj = prepare_bpf(obj_buf, obj_buf_sz,
+-                        bpf_testcase_table[index].name);
++                        bpf_testcase_table[idx].name);
+       if (!obj) {
+               ret = TEST_FAIL;
+               goto out;
+       }
+       ret = do_test(obj,
+-                    bpf_testcase_table[index].target_func,
+-                    bpf_testcase_table[index].expect_result);
++                    bpf_testcase_table[idx].target_func,
++                    bpf_testcase_table[idx].expect_result);
+ out:
+       bpf__clear();
+       return ret;
+--- a/tools/perf/tests/llvm.c
++++ b/tools/perf/tests/llvm.c
+@@ -50,7 +50,7 @@ static struct {
+ int
+ test_llvm__fetch_bpf_obj(void **p_obj_buf,
+                        size_t *p_obj_buf_sz,
+-                       enum test_llvm__testcase index,
++                       enum test_llvm__testcase idx,
+                        bool force)
+ {
+       const char *source;
+@@ -59,11 +59,11 @@ test_llvm__fetch_bpf_obj(void **p_obj_bu
+       char *tmpl_new = NULL, *clang_opt_new = NULL;
+       int err, old_verbose, ret = TEST_FAIL;
+-      if (index >= __LLVM_TESTCASE_MAX)
++      if (idx >= __LLVM_TESTCASE_MAX)
+               return TEST_FAIL;
+-      source = bpf_source_table[index].source;
+-      desc = bpf_source_table[index].desc;
++      source = bpf_source_table[idx].source;
++      desc = bpf_source_table[idx].desc;
+       perf_config(perf_config_cb, NULL);
diff --git a/queue-4.4/perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch b/queue-4.4/perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch
new file mode 100644 (file)
index 0000000..a495649
--- /dev/null
@@ -0,0 +1,66 @@
+From ab92b232ae05c382c3df0e3d6a5c6d16b639ac8c Mon Sep 17 00:00:00 2001
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Date: Tue, 10 May 2016 16:18:32 +0300
+Subject: perf/x86/intel/pt: Generate PMI in the STOP region as well
+
+From: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+
+commit ab92b232ae05c382c3df0e3d6a5c6d16b639ac8c upstream.
+
+Currently, the PT driver always sets the PMI bit one region (page) before
+the STOP region so that we can wake up the consumer before we run out of
+room in the buffer and have to disable the event. However, we also need
+an interrupt in the last output region, so that we actually get to disable
+the event (if no more room from new data is available at that point),
+otherwise hardware just quietly refuses to start, but the event is
+scheduled in and we end up losing trace data till the event gets removed.
+
+For a cpu-wide event it is even worse since there may not be any
+re-scheduling at all and no chance for the ring buffer code to notice
+that its buffer is filled up and the event needs to be disabled (so that
+the consumer can re-enable it when it finishes reading the data out). In
+other words, all the trace data will be lost after the buffer gets filled
+up.
+
+This patch makes PT also generate a PMI when the last output region is
+full.
+
+Reported-by: Markus Metzger <markus.t.metzger@intel.com>
+Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
+Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
+Cc: Borislav Petkov <bp@alien8.de>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: Vince Weaver <vincent.weaver@maine.edu>
+Cc: vince@deater.net
+Link: http://lkml.kernel.org/r/1462886313-13660-2-git-send-email-alexander.shishkin@linux.intel.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/x86/kernel/cpu/perf_event_intel_pt.c |    2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/arch/x86/kernel/cpu/perf_event_intel_pt.c
++++ b/arch/x86/kernel/cpu/perf_event_intel_pt.c
+@@ -694,6 +694,7 @@ static int pt_buffer_reset_markers(struc
+       /* clear STOP and INT from current entry */
+       buf->topa_index[buf->stop_pos]->stop = 0;
++      buf->topa_index[buf->stop_pos]->intr = 0;
+       buf->topa_index[buf->intr_pos]->intr = 0;
+       /* how many pages till the STOP marker */
+@@ -718,6 +719,7 @@ static int pt_buffer_reset_markers(struc
+       buf->intr_pos = idx;
+       buf->topa_index[buf->stop_pos]->stop = 1;
++      buf->topa_index[buf->stop_pos]->intr = 1;
+       buf->topa_index[buf->intr_pos]->intr = 1;
+       return 0;
index 19ebce931467e36db3b4eb2dfffaee8e64316b85..7cbc19d4c15ccb5b3eede44a9e23722de0fd3a4c 100644 (file)
@@ -1 +1,4 @@
 btrfs-don-t-use-src-fd-for-printk.patch
+perf-x86-intel-pt-generate-pmi-in-the-stop-region-as-well.patch
+perf-core-fix-perf_event_open-vs.-execve-race.patch
+perf-test-fix-build-of-bpf-and-llvm-on-older-glibc-libraries.patch