]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.9-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Feb 2017 12:17:02 +0000 (04:17 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 13 Feb 2017 12:17:02 +0000 (04:17 -0800)
added patches:
perf-core-fix-crash-in-perf_event_read.patch
perf-diff-fix-o-order-option-behavior-again.patch
perf-diff-fix-segfault-on-perf-diff-o-n-option.patch
stacktrace-lockdep-fix-address-newline-ugliness.patch

queue-4.9/perf-core-fix-crash-in-perf_event_read.patch [new file with mode: 0644]
queue-4.9/perf-diff-fix-o-order-option-behavior-again.patch [new file with mode: 0644]
queue-4.9/perf-diff-fix-segfault-on-perf-diff-o-n-option.patch [new file with mode: 0644]
queue-4.9/series
queue-4.9/stacktrace-lockdep-fix-address-newline-ugliness.patch [new file with mode: 0644]

diff --git a/queue-4.9/perf-core-fix-crash-in-perf_event_read.patch b/queue-4.9/perf-core-fix-crash-in-perf_event_read.patch
new file mode 100644 (file)
index 0000000..5337c62
--- /dev/null
@@ -0,0 +1,94 @@
+From 451d24d1e5f40bad000fa9abe36ddb16fc9928cb Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 31 Jan 2017 11:27:10 +0100
+Subject: perf/core: Fix crash in perf_event_read()
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 451d24d1e5f40bad000fa9abe36ddb16fc9928cb upstream.
+
+Alexei had his box explode because doing read() on a package
+(rapl/uncore) event that isn't currently scheduled in ends up doing an
+out-of-bounds load.
+
+Rework the code to more explicitly deal with event->oncpu being -1.
+
+Reported-by: Alexei Starovoitov <alexei.starovoitov@gmail.com>
+Tested-by: Alexei Starovoitov <ast@kernel.org>
+Tested-by: David Carrillo-Cisneros <davidcc@google.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: eranian@google.com
+Fixes: d6a2f9035bfc ("perf/core: Introduce PMU_EV_CAP_READ_ACTIVE_PKG")
+Link: http://lkml.kernel.org/r/20170131102710.GL6515@twins.programming.kicks-ass.net
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/events/core.c |   25 +++++++++++++++----------
+ 1 file changed, 15 insertions(+), 10 deletions(-)
+
+--- a/kernel/events/core.c
++++ b/kernel/events/core.c
+@@ -3461,14 +3461,15 @@ struct perf_read_data {
+       int ret;
+ };
+-static int find_cpu_to_read(struct perf_event *event, int local_cpu)
++static int __perf_event_read_cpu(struct perf_event *event, int event_cpu)
+ {
+-      int event_cpu = event->oncpu;
+       u16 local_pkg, event_pkg;
+       if (event->group_caps & PERF_EV_CAP_READ_ACTIVE_PKG) {
+-              event_pkg =  topology_physical_package_id(event_cpu);
+-              local_pkg =  topology_physical_package_id(local_cpu);
++              int local_cpu = smp_processor_id();
++
++              event_pkg = topology_physical_package_id(event_cpu);
++              local_pkg = topology_physical_package_id(local_cpu);
+               if (event_pkg == local_pkg)
+                       return local_cpu;
+@@ -3598,7 +3599,7 @@ u64 perf_event_read_local(struct perf_ev
+ static int perf_event_read(struct perf_event *event, bool group)
+ {
+-      int ret = 0, cpu_to_read, local_cpu;
++      int event_cpu, ret = 0;
+       /*
+        * If event is enabled and currently active on a CPU, update the
+@@ -3611,21 +3612,25 @@ static int perf_event_read(struct perf_e
+                       .ret = 0,
+               };
+-              local_cpu = get_cpu();
+-              cpu_to_read = find_cpu_to_read(event, local_cpu);
+-              put_cpu();
++              event_cpu = READ_ONCE(event->oncpu);
++              if ((unsigned)event_cpu >= nr_cpu_ids)
++                      return 0;
++
++              preempt_disable();
++              event_cpu = __perf_event_read_cpu(event, event_cpu);
+               /*
+                * Purposely ignore the smp_call_function_single() return
+                * value.
+                *
+-               * If event->oncpu isn't a valid CPU it means the event got
++               * If event_cpu isn't a valid CPU it means the event got
+                * scheduled out and that will have updated the event count.
+                *
+                * Therefore, either way, we'll have an up-to-date event count
+                * after this.
+                */
+-              (void)smp_call_function_single(cpu_to_read, __perf_event_read, &data, 1);
++              (void)smp_call_function_single(event_cpu, __perf_event_read, &data, 1);
++              preempt_enable();
+               ret = data.ret;
+       } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
+               struct perf_event_context *ctx = event->ctx;
diff --git a/queue-4.9/perf-diff-fix-o-order-option-behavior-again.patch b/queue-4.9/perf-diff-fix-o-order-option-behavior-again.patch
new file mode 100644 (file)
index 0000000..f0c3c5e
--- /dev/null
@@ -0,0 +1,81 @@
+From a1c9f97f0b64e6337d9cfcc08c134450934fdd90 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@kernel.org>
+Date: Wed, 18 Jan 2017 14:14:57 +0900
+Subject: perf diff: Fix -o/--order option behavior (again)
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+commit a1c9f97f0b64e6337d9cfcc08c134450934fdd90 upstream.
+
+Commit 21e6d8428664 ("perf diff: Use perf_hpp__register_sort_field
+interface") changed list_add() to perf_hpp__register_sort_field().
+
+This resulted in a behavior change since the field was added to the tail
+instead of the head.  So the -o option is mostly ignored due to its
+order in the list.
+
+This patch fixes it by adding perf_hpp__prepend_sort_field().
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Fixes: 21e6d8428664 ("perf diff: Use perf_hpp__register_sort_field interface")
+Link: http://lkml.kernel.org/r/20170118051457.30946-2-namhyung@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/builtin-diff.c |    2 +-
+ tools/perf/ui/hist.c      |    6 ++++++
+ tools/perf/util/hist.h    |    7 +++++++
+ 3 files changed, 14 insertions(+), 1 deletion(-)
+
+--- a/tools/perf/builtin-diff.c
++++ b/tools/perf/builtin-diff.c
+@@ -1199,7 +1199,7 @@ static int ui_init(void)
+               BUG_ON(1);
+       }
+-      perf_hpp__register_sort_field(fmt);
++      perf_hpp__prepend_sort_field(fmt);
+       return 0;
+ }
+--- a/tools/perf/ui/hist.c
++++ b/tools/perf/ui/hist.c
+@@ -521,6 +521,12 @@ void perf_hpp_list__register_sort_field(
+       list_add_tail(&format->sort_list, &list->sorts);
+ }
++void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
++                                     struct perf_hpp_fmt *format)
++{
++      list_add(&format->sort_list, &list->sorts);
++}
++
+ void perf_hpp__column_unregister(struct perf_hpp_fmt *format)
+ {
+       list_del(&format->list);
+--- a/tools/perf/util/hist.h
++++ b/tools/perf/util/hist.h
+@@ -282,6 +282,8 @@ void perf_hpp_list__column_register(stru
+                                   struct perf_hpp_fmt *format);
+ void perf_hpp_list__register_sort_field(struct perf_hpp_list *list,
+                                       struct perf_hpp_fmt *format);
++void perf_hpp_list__prepend_sort_field(struct perf_hpp_list *list,
++                                     struct perf_hpp_fmt *format);
+ static inline void perf_hpp__column_register(struct perf_hpp_fmt *format)
+ {
+@@ -293,6 +295,11 @@ static inline void perf_hpp__register_so
+       perf_hpp_list__register_sort_field(&perf_hpp_list, format);
+ }
++static inline void perf_hpp__prepend_sort_field(struct perf_hpp_fmt *format)
++{
++      perf_hpp_list__prepend_sort_field(&perf_hpp_list, format);
++}
++
+ #define perf_hpp_list__for_each_format(_list, format) \
+       list_for_each_entry(format, &(_list)->fields, list)
diff --git a/queue-4.9/perf-diff-fix-segfault-on-perf-diff-o-n-option.patch b/queue-4.9/perf-diff-fix-segfault-on-perf-diff-o-n-option.patch
new file mode 100644 (file)
index 0000000..6ad1706
--- /dev/null
@@ -0,0 +1,51 @@
+From 8381cdd0e32dd748bd34ca3ace476949948bd793 Mon Sep 17 00:00:00 2001
+From: Namhyung Kim <namhyung@kernel.org>
+Date: Wed, 18 Jan 2017 14:14:56 +0900
+Subject: perf diff: Fix segfault on 'perf diff -o N' option
+
+From: Namhyung Kim <namhyung@kernel.org>
+
+commit 8381cdd0e32dd748bd34ca3ace476949948bd793 upstream.
+
+The -o/--order option is to select column number to sort a diff result.
+
+It does the job by adding a hpp field at the beginning of the sort list.
+But it should not be added to the output field list as it has no
+callbacks required by a output field.
+
+During the setup_sorting(), the perf_hpp__setup_output_field() appends
+the given sort keys to the output field if it's not there already.
+
+Originally it was checked by fmt->list being non-empty.  But commit
+3f931f2c4274 ("perf hists: Make hpp setup function generic") changed it
+to check the ->equal callback.
+
+Anyways, we don't need to add the pseudo hpp field to the output field
+list since it won't be used for output.  So just skip fields if they
+have no ->color or ->entry callbacks.
+
+Signed-off-by: Namhyung Kim <namhyung@kernel.org>
+Acked-by: Jiri Olsa <jolsa@kernel.org>
+Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
+Fixes: 3f931f2c4274 ("perf hists: Make hpp setup function generic")
+Link: http://lkml.kernel.org/r/20170118051457.30946-1-namhyung@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ tools/perf/ui/hist.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/tools/perf/ui/hist.c
++++ b/tools/perf/ui/hist.c
+@@ -566,6 +566,10 @@ void perf_hpp__setup_output_field(struct
+       perf_hpp_list__for_each_sort_list(list, fmt) {
+               struct perf_hpp_fmt *pos;
++              /* skip sort-only fields ("sort_compute" in perf diff) */
++              if (!fmt->entry && !fmt->color)
++                      continue;
++
+               perf_hpp_list__for_each_format(list, pos) {
+                       if (fmt_equal(fmt, pos))
+                               goto next;
index 37cc9689cbdc83279082d1e0d1a7eac954611af3..6421fc54a7b79f6e60956d7f9d96e40348d7d2a0 100644 (file)
@@ -54,3 +54,7 @@ x86-cpu-amd-bring-back-compute-unit-id.patch
 x86-cpu-amd-fix-zen-smt-topology.patch
 ib-rxe-fix-resid-update.patch
 ib-rxe-fix-mem_check_range-integer-overflow.patch
+stacktrace-lockdep-fix-address-newline-ugliness.patch
+perf-diff-fix-o-order-option-behavior-again.patch
+perf-diff-fix-segfault-on-perf-diff-o-n-option.patch
+perf-core-fix-crash-in-perf_event_read.patch
diff --git a/queue-4.9/stacktrace-lockdep-fix-address-newline-ugliness.patch b/queue-4.9/stacktrace-lockdep-fix-address-newline-ugliness.patch
new file mode 100644 (file)
index 0000000..95cb116
--- /dev/null
@@ -0,0 +1,96 @@
+From bfeda41d06d85ad9d52f2413cfc2b77be5022f75 Mon Sep 17 00:00:00 2001
+From: Omar Sandoval <osandov@fb.com>
+Date: Tue, 7 Feb 2017 15:33:20 -0800
+Subject: stacktrace, lockdep: Fix address, newline ugliness
+
+From: Omar Sandoval <osandov@fb.com>
+
+commit bfeda41d06d85ad9d52f2413cfc2b77be5022f75 upstream.
+
+Since KERN_CONT became meaningful again, lockdep stack traces have had
+annoying extra newlines, like this:
+
+[    5.561122] -> #1 (B){+.+...}:
+[    5.561528]
+[    5.561532] [<ffffffff810d8873>] lock_acquire+0xc3/0x210
+[    5.562178]
+[    5.562181] [<ffffffff816f6414>] mutex_lock_nested+0x74/0x6d0
+[    5.562861]
+[    5.562880] [<ffffffffa01aa3c3>] init_btrfs_fs+0x21/0x196 [btrfs]
+[    5.563717]
+[    5.563721] [<ffffffff81000472>] do_one_initcall+0x52/0x1b0
+[    5.564554]
+[    5.564559] [<ffffffff811a3af6>] do_init_module+0x5f/0x209
+[    5.565357]
+[    5.565361] [<ffffffff81122f4d>] load_module+0x218d/0x2b80
+[    5.566020]
+[    5.566021] [<ffffffff81123beb>] SyS_finit_module+0xeb/0x120
+[    5.566694]
+[    5.566696] [<ffffffff816fd241>] entry_SYSCALL_64_fastpath+0x1f/0xc2
+
+That's happening because each printk() call now gets printed on its own
+line, and we do a separate call to print the spaces before the symbol.
+Fix it by doing the printk() directly instead of using the
+print_ip_sym() helper.
+
+Additionally, the symbol address isn't very helpful, so let's get rid of
+that, too. The final result looks like this:
+
+[    5.194518] -> #1 (B){+.+...}:
+[    5.195002]        lock_acquire+0xc3/0x210
+[    5.195439]        mutex_lock_nested+0x74/0x6d0
+[    5.196491]        do_one_initcall+0x52/0x1b0
+[    5.196939]        do_init_module+0x5f/0x209
+[    5.197355]        load_module+0x218d/0x2b80
+[    5.197792]        SyS_finit_module+0xeb/0x120
+[    5.198251]        entry_SYSCALL_64_fastpath+0x1f/0xc2
+
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Omar Sandoval <osandov@fb.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: kernel-team@fb.com
+Fixes: 4bcc595ccd80 ("printk: reinstate KERN_CONT for printing continuation lines")
+Link: http://lkml.kernel.org/r/43b4e114724b2bdb0308fa86cb33aa07d3d67fad.1486510315.git.osandov@fb.com
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/stacktrace.c |   12 ++++--------
+ 1 file changed, 4 insertions(+), 8 deletions(-)
+
+--- a/kernel/stacktrace.c
++++ b/kernel/stacktrace.c
+@@ -18,10 +18,8 @@ void print_stack_trace(struct stack_trac
+       if (WARN_ON(!trace->entries))
+               return;
+-      for (i = 0; i < trace->nr_entries; i++) {
+-              printk("%*c", 1 + spaces, ' ');
+-              print_ip_sym(trace->entries[i]);
+-      }
++      for (i = 0; i < trace->nr_entries; i++)
++              printk("%*c%pS\n", 1 + spaces, ' ', (void *)trace->entries[i]);
+ }
+ EXPORT_SYMBOL_GPL(print_stack_trace);
+@@ -29,7 +27,6 @@ int snprint_stack_trace(char *buf, size_
+                       struct stack_trace *trace, int spaces)
+ {
+       int i;
+-      unsigned long ip;
+       int generated;
+       int total = 0;
+@@ -37,9 +34,8 @@ int snprint_stack_trace(char *buf, size_
+               return 0;
+       for (i = 0; i < trace->nr_entries; i++) {
+-              ip = trace->entries[i];
+-              generated = snprintf(buf, size, "%*c[<%p>] %pS\n",
+-                              1 + spaces, ' ', (void *) ip, (void *) ip);
++              generated = snprintf(buf, size, "%*c%pS\n", 1 + spaces, ' ',
++                                   (void *)trace->entries[i]);
+               total += generated;