]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.10-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Mar 2015 09:59:00 +0000 (10:59 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Tue, 24 Mar 2015 09:59:00 +0000 (10:59 +0100)
added patches:
sparc-perf-make-counting-mode-actually-work.patch
sparc-perf-remove-redundant-perf_pmu_-en-dis-able-calls.patch
sparc-semtimedop-unreachable-due-to-comparison-error.patch
sparc-touch-nmi-watchdog-when-walking-cpus-and-calling-printk.patch
sparc32-destroy_context-and-switch_mm-needs-to-disable-interrupts.patch
sparc64-fix-several-bugs-in-memmove.patch

queue-3.10/series [new file with mode: 0644]
queue-3.10/sparc-perf-make-counting-mode-actually-work.patch [new file with mode: 0644]
queue-3.10/sparc-perf-remove-redundant-perf_pmu_-en-dis-able-calls.patch [new file with mode: 0644]
queue-3.10/sparc-semtimedop-unreachable-due-to-comparison-error.patch [new file with mode: 0644]
queue-3.10/sparc-touch-nmi-watchdog-when-walking-cpus-and-calling-printk.patch [new file with mode: 0644]
queue-3.10/sparc32-destroy_context-and-switch_mm-needs-to-disable-interrupts.patch [new file with mode: 0644]
queue-3.10/sparc64-fix-several-bugs-in-memmove.patch [new file with mode: 0644]
queue-3.19/series [new file with mode: 0644]

diff --git a/queue-3.10/series b/queue-3.10/series
new file mode 100644 (file)
index 0000000..7c1c80e
--- /dev/null
@@ -0,0 +1,6 @@
+sparc32-destroy_context-and-switch_mm-needs-to-disable-interrupts.patch
+sparc-semtimedop-unreachable-due-to-comparison-error.patch
+sparc-perf-remove-redundant-perf_pmu_-en-dis-able-calls.patch
+sparc-perf-make-counting-mode-actually-work.patch
+sparc-touch-nmi-watchdog-when-walking-cpus-and-calling-printk.patch
+sparc64-fix-several-bugs-in-memmove.patch
diff --git a/queue-3.10/sparc-perf-make-counting-mode-actually-work.patch b/queue-3.10/sparc-perf-make-counting-mode-actually-work.patch
new file mode 100644 (file)
index 0000000..72970fb
--- /dev/null
@@ -0,0 +1,94 @@
+From foo@baz Tue Mar 24 10:57:46 CET 2015
+From: David Ahern <david.ahern@oracle.com>
+Date: Thu, 19 Mar 2015 16:06:17 -0400
+Subject: sparc: perf: Make counting mode actually work
+
+From: David Ahern <david.ahern@oracle.com>
+
+[ Upstream commit d51291cb8f32bfae6b331e1838651f3ddefa73a5 ]
+
+Currently perf-stat (aka, counting mode) does not work:
+
+$ perf stat ls
+...
+ Performance counter stats for 'ls':
+
+          1.585665      task-clock (msec)         #    0.580 CPUs utilized
+                24      context-switches          #    0.015 M/sec
+                 0      cpu-migrations            #    0.000 K/sec
+                86      page-faults               #    0.054 M/sec
+   <not supported>      cycles
+   <not supported>      stalled-cycles-frontend
+   <not supported>      stalled-cycles-backend
+   <not supported>      instructions
+   <not supported>      branches
+   <not supported>      branch-misses
+
+       0.002735100 seconds time elapsed
+
+The reason is that state is never reset (stays with PERF_HES_UPTODATE set).
+Add a call to sparc_pmu_enable_event during the added_event handling.
+Clean up the encoding since pmu_start calls sparc_pmu_enable_event which
+does the same. Passing PERF_EF_RELOAD to sparc_pmu_start means the call
+to sparc_perf_event_set_period can be removed as well.
+
+With this patch:
+
+$ perf stat ls
+...
+ Performance counter stats for 'ls':
+
+          1.552890      task-clock (msec)         #    0.552 CPUs utilized
+                24      context-switches          #    0.015 M/sec
+                 0      cpu-migrations            #    0.000 K/sec
+                86      page-faults               #    0.055 M/sec
+         5,748,997      cycles                    #    3.702 GHz
+   <not supported>      stalled-cycles-frontend:HG
+   <not supported>      stalled-cycles-backend:HG
+         1,684,362      instructions:HG           #    0.29  insns per cycle
+           295,133      branches:HG               #  190.054 M/sec
+            28,007      branch-misses:HG          #    9.49% of all branches
+
+       0.002815665 seconds time elapsed
+
+Signed-off-by: David Ahern <david.ahern@oracle.com>
+Acked-by: Bob Picco <bob.picco@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/perf_event.c |   11 +++--------
+ 1 file changed, 3 insertions(+), 8 deletions(-)
+
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -960,6 +960,8 @@ out:
+       cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
+ }
++static void sparc_pmu_start(struct perf_event *event, int flags);
++
+ /* On this PMU each PIC has it's own PCR control register.  */
+ static void calculate_multiple_pcrs(struct cpu_hw_events *cpuc)
+ {
+@@ -972,20 +974,13 @@ static void calculate_multiple_pcrs(stru
+               struct perf_event *cp = cpuc->event[i];
+               struct hw_perf_event *hwc = &cp->hw;
+               int idx = hwc->idx;
+-              u64 enc;
+               if (cpuc->current_idx[i] != PIC_NO_INDEX)
+                       continue;
+-              sparc_perf_event_set_period(cp, hwc, idx);
+               cpuc->current_idx[i] = idx;
+-              enc = perf_event_get_enc(cpuc->events[i]);
+-              cpuc->pcr[idx] &= ~mask_for_index(idx);
+-              if (hwc->state & PERF_HES_STOPPED)
+-                      cpuc->pcr[idx] |= nop_for_index(idx);
+-              else
+-                      cpuc->pcr[idx] |= event_encoding(enc, idx);
++              sparc_pmu_start(cp, PERF_EF_RELOAD);
+       }
+ out:
+       for (i = 0; i < cpuc->n_events; i++) {
diff --git a/queue-3.10/sparc-perf-remove-redundant-perf_pmu_-en-dis-able-calls.patch b/queue-3.10/sparc-perf-remove-redundant-perf_pmu_-en-dis-able-calls.patch
new file mode 100644 (file)
index 0000000..1b0443f
--- /dev/null
@@ -0,0 +1,57 @@
+From foo@baz Tue Mar 24 10:57:46 CET 2015
+From: David Ahern <david.ahern@oracle.com>
+Date: Thu, 19 Mar 2015 16:05:57 -0400
+Subject: sparc: perf: Remove redundant perf_pmu_{en|dis}able calls
+
+From: David Ahern <david.ahern@oracle.com>
+
+[ Upstream commit 5b0d4b5514bbcce69b516d0742f2cfc84ebd6db3 ]
+
+perf_pmu_disable is called by core perf code before pmu->del and the
+enable function is called by core perf code afterwards. No need to
+call again within sparc_pmu_del.
+
+Ditto for pmu->add and sparc_pmu_add.
+
+Signed-off-by: David Ahern <david.ahern@oracle.com>
+Acked-by: Bob Picco <bob.picco@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/perf_event.c |    4 ----
+ 1 file changed, 4 deletions(-)
+
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -1101,7 +1101,6 @@ static void sparc_pmu_del(struct perf_ev
+       int i;
+       local_irq_save(flags);
+-      perf_pmu_disable(event->pmu);
+       for (i = 0; i < cpuc->n_events; i++) {
+               if (event == cpuc->event[i]) {
+@@ -1127,7 +1126,6 @@ static void sparc_pmu_del(struct perf_ev
+               }
+       }
+-      perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+ }
+@@ -1361,7 +1359,6 @@ static int sparc_pmu_add(struct perf_eve
+       unsigned long flags;
+       local_irq_save(flags);
+-      perf_pmu_disable(event->pmu);
+       n0 = cpuc->n_events;
+       if (n0 >= sparc_pmu->max_hw_events)
+@@ -1394,7 +1391,6 @@ nocheck:
+       ret = 0;
+ out:
+-      perf_pmu_enable(event->pmu);
+       local_irq_restore(flags);
+       return ret;
+ }
diff --git a/queue-3.10/sparc-semtimedop-unreachable-due-to-comparison-error.patch b/queue-3.10/sparc-semtimedop-unreachable-due-to-comparison-error.patch
new file mode 100644 (file)
index 0000000..7adeea8
--- /dev/null
@@ -0,0 +1,38 @@
+From foo@baz Tue Mar 24 10:57:46 CET 2015
+From: Rob Gardner <rob.gardner@oracle.com>
+Date: Mon, 2 Mar 2015 23:16:55 -0700
+Subject: sparc: semtimedop() unreachable due to comparison error
+
+From: Rob Gardner <rob.gardner@oracle.com>
+
+[ Upstream commit 53eb2516972b8c4628651dfcb926cb9ef8b2864a ]
+
+A bug was reported that the semtimedop() system call was always
+failing eith ENOSYS.
+
+Since SEMCTL is defined as 3, and SEMTIMEDOP is defined as 4,
+the comparison "call <= SEMCTL" will always prevent SEMTIMEDOP
+from getting through to the semaphore ops switch statement.
+
+This is corrected by changing the comparison to "call <= SEMTIMEDOP".
+
+Orabug: 20633375
+
+Signed-off-by: Rob Gardner <rob.gardner@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/sys_sparc_64.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sparc/kernel/sys_sparc_64.c
++++ b/arch/sparc/kernel/sys_sparc_64.c
+@@ -336,7 +336,7 @@ SYSCALL_DEFINE6(sparc_ipc, unsigned int,
+       long err;
+       /* No need for backward compatibility. We can start fresh... */
+-      if (call <= SEMCTL) {
++      if (call <= SEMTIMEDOP) {
+               switch (call) {
+               case SEMOP:
+                       err = sys_semtimedop(first, ptr,
diff --git a/queue-3.10/sparc-touch-nmi-watchdog-when-walking-cpus-and-calling-printk.patch b/queue-3.10/sparc-touch-nmi-watchdog-when-walking-cpus-and-calling-printk.patch
new file mode 100644 (file)
index 0000000..96eae22
--- /dev/null
@@ -0,0 +1,42 @@
+From foo@baz Tue Mar 24 10:57:46 CET 2015
+From: David Ahern <david.ahern@oracle.com>
+Date: Thu, 19 Mar 2015 16:06:53 -0400
+Subject: sparc: Touch NMI watchdog when walking cpus and calling printk
+
+From: David Ahern <david.ahern@oracle.com>
+
+[ Upstream commit 31aaa98c248da766ece922bbbe8cc78cfd0bc920 ]
+
+With the increase in number of CPUs calls to functions that dump
+output to console (e.g., arch_trigger_all_cpu_backtrace) can take
+a long time to complete. If IRQs are disabled eventually the NMI
+watchdog kicks in and creates more havoc. Avoid by telling the NMI
+watchdog everything is ok.
+
+Signed-off-by: David Ahern <david.ahern@oracle.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/process_64.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -280,6 +280,8 @@ void arch_trigger_all_cpu_backtrace(void
+                       printk("             TPC[%lx] O7[%lx] I7[%lx] RPC[%lx]\n",
+                              gp->tpc, gp->o7, gp->i7, gp->rpc);
+               }
++
++              touch_nmi_watchdog();
+       }
+       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
+@@ -352,6 +354,8 @@ static void pmu_snapshot_all_cpus(void)
+                      (cpu == this_cpu ? '*' : ' '), cpu,
+                      pp->pcr[0], pp->pcr[1], pp->pcr[2], pp->pcr[3],
+                      pp->pic[0], pp->pic[1], pp->pic[2], pp->pic[3]);
++
++              touch_nmi_watchdog();
+       }
+       memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
diff --git a/queue-3.10/sparc32-destroy_context-and-switch_mm-needs-to-disable-interrupts.patch b/queue-3.10/sparc32-destroy_context-and-switch_mm-needs-to-disable-interrupts.patch
new file mode 100644 (file)
index 0000000..9549c84
--- /dev/null
@@ -0,0 +1,62 @@
+From foo@baz Tue Mar 24 10:57:46 CET 2015
+From: Andreas Larsson <andreas@gaisler.com>
+Date: Thu, 18 Dec 2014 13:23:23 +0100
+Subject: sparc32: destroy_context() and switch_mm() needs to disable interrupts.
+
+From: Andreas Larsson <andreas@gaisler.com>
+
+[ Upstream commit 66d0f7ec9f1038452178b1993fc07fd96d30fd38 ]
+
+Load balancing can be triggered in the critical sections protected by
+srmmu_context_spinlock in destroy_context() and switch_mm() and can hang
+the cpu waiting for the rq lock of another cpu that in turn has called
+switch_mm hangning on srmmu_context_spinlock leading to deadlock.
+
+So, disable interrupt while taking srmmu_context_spinlock in
+destroy_context() and switch_mm() so we don't deadlock.
+
+See also commit 77b838fa1ef0 ("[SPARC64]: destroy_context() needs to disable
+interrupts.")
+
+Signed-off-by: Andreas Larsson <andreas@gaisler.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/srmmu.c |   11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+--- a/arch/sparc/mm/srmmu.c
++++ b/arch/sparc/mm/srmmu.c
+@@ -455,10 +455,12 @@ static void __init sparc_context_init(in
+ void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
+              struct task_struct *tsk)
+ {
++      unsigned long flags;
++
+       if (mm->context == NO_CONTEXT) {
+-              spin_lock(&srmmu_context_spinlock);
++              spin_lock_irqsave(&srmmu_context_spinlock, flags);
+               alloc_context(old_mm, mm);
+-              spin_unlock(&srmmu_context_spinlock);
++              spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+               srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
+       }
+@@ -983,14 +985,15 @@ int init_new_context(struct task_struct
+ void destroy_context(struct mm_struct *mm)
+ {
++      unsigned long flags;
+       if (mm->context != NO_CONTEXT) {
+               flush_cache_mm(mm);
+               srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
+               flush_tlb_mm(mm);
+-              spin_lock(&srmmu_context_spinlock);
++              spin_lock_irqsave(&srmmu_context_spinlock, flags);
+               free_context(mm->context);
+-              spin_unlock(&srmmu_context_spinlock);
++              spin_unlock_irqrestore(&srmmu_context_spinlock, flags);
+               mm->context = NO_CONTEXT;
+       }
+ }
diff --git a/queue-3.10/sparc64-fix-several-bugs-in-memmove.patch b/queue-3.10/sparc64-fix-several-bugs-in-memmove.patch
new file mode 100644 (file)
index 0000000..cde8a9e
--- /dev/null
@@ -0,0 +1,98 @@
+From foo@baz Tue Mar 24 10:57:46 CET 2015
+From: "David S. Miller" <davem@davemloft.net>
+Date: Mon, 23 Mar 2015 09:22:10 -0700
+Subject: sparc64: Fix several bugs in memmove().
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 2077cef4d5c29cf886192ec32066f783d6a80db8 ]
+
+Firstly, handle zero length calls properly.  Believe it or not there
+are a few of these happening during early boot.
+
+Next, we can't just drop to a memcpy() call in the forward copy case
+where dst <= src.  The reason is that the cache initializing stores
+used in the Niagara memcpy() implementations can end up clearing out
+cache lines before we've sourced their original contents completely.
+
+For example, considering NG4memcpy, the main unrolled loop begins like
+this:
+
+     load   src + 0x00
+     load   src + 0x08
+     load   src + 0x10
+     load   src + 0x18
+     load   src + 0x20
+     store  dst + 0x00
+
+Assume dst is 64 byte aligned and let's say that dst is src - 8 for
+this memcpy() call.  That store at the end there is the one to the
+first line in the cache line, thus clearing the whole line, which thus
+clobbers "src + 0x28" before it even gets loaded.
+
+To avoid this, just fall through to a simple copy only mildly
+optimized for the case where src and dst are 8 byte aligned and the
+length is a multiple of 8 as well.  We could get fancy and call
+GENmemcpy() but this is good enough for how this thing is actually
+used.
+
+Reported-by: David Ahern <david.ahern@oracle.com>
+Reported-by: Bob Picco <bpicco@meloft.net>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/lib/memmove.S |   35 ++++++++++++++++++++++++++++++++---
+ 1 file changed, 32 insertions(+), 3 deletions(-)
+
+--- a/arch/sparc/lib/memmove.S
++++ b/arch/sparc/lib/memmove.S
+@@ -8,9 +8,11 @@
+       .text
+ ENTRY(memmove) /* o0=dst o1=src o2=len */
+-      mov             %o0, %g1
++      brz,pn          %o2, 99f
++       mov            %o0, %g1
++
+       cmp             %o0, %o1
+-      bleu,pt         %xcc, memcpy
++      bleu,pt         %xcc, 2f
+        add            %o1, %o2, %g7
+       cmp             %g7, %o0
+       bleu,pt         %xcc, memcpy
+@@ -24,7 +26,34 @@ ENTRY(memmove) /* o0=dst o1=src o2=len *
+       stb             %g7, [%o0]
+       bne,pt          %icc, 1b
+        sub            %o0, 1, %o0
+-
++99:
+       retl
+        mov            %g1, %o0
++
++      /* We can't just call memcpy for these memmove cases.  On some
++       * chips the memcpy uses cache initializing stores and when dst
++       * and src are close enough, those can clobber the source data
++       * before we've loaded it in.
++       */
++2:    or              %o0, %o1, %g7
++      or              %o2, %g7, %g7
++      andcc           %g7, 0x7, %g0
++      bne,pn          %xcc, 4f
++       nop
++
++3:    ldx             [%o1], %g7
++      add             %o1, 8, %o1
++      subcc           %o2, 8, %o2
++      add             %o0, 8, %o0
++      bne,pt          %icc, 3b
++       stx            %g7, [%o0 - 0x8]
++      ba,a,pt         %xcc, 99b
++
++4:    ldub            [%o1], %g7
++      add             %o1, 1, %o1
++      subcc           %o2, 1, %o2
++      add             %o0, 1, %o0
++      bne,pt          %icc, 4b
++       stb            %g7, [%o0 - 0x1]
++      ba,a,pt         %xcc, 99b
+ ENDPROC(memmove)
diff --git a/queue-3.19/series b/queue-3.19/series
new file mode 100644 (file)
index 0000000..5721d9b
--- /dev/null
@@ -0,0 +1,5 @@
+sparc-semtimedop-unreachable-due-to-comparison-error.patch
+sparc-perf-remove-redundant-perf_pmu_-en-dis-able-calls.patch
+sparc-perf-make-counting-mode-actually-work.patch
+sparc-touch-nmi-watchdog-when-walking-cpus-and-calling-printk.patch
+sparc64-fix-several-bugs-in-memmove.patch