net-fix-pskb_trim_rcsum_slow-with-odd-trim-offset.patch
net-mlx5-wq-fixes-for-fragmented-wq-buffers-api.patch
mlxsw-core-fix-devlink-unregister-flow.patch
+sparc64-export-__node_distance.patch
+sparc64-make-corrupted-user-stacks-more-debuggable.patch
+sparc64-make-proc_id-signed.patch
+sparc64-set-l4-properly-on-trap-return-after-handling-signals.patch
+sparc64-wire-up-compat-getpeername-and-getsockname.patch
+sparc-fix-single-pcr-perf-event-counter-management.patch
+sparc-fix-syscall-fallback-bugs-in-vdso.patch
+sparc-throttle-perf-events-properly.patch
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: "David S. Miller" <davem@davemloft.net>
+Date: Fri, 12 Oct 2018 10:31:58 -0700
+Subject: sparc: Fix single-pcr perf event counter management.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit cfdc3170d214046b9509183fe9b9544dc644d40b ]
+
+It is important to clear the hw->state value for non-stopped events
+when they are added into the PMU. Otherwise when the event is
+scheduled out, we won't read the counter because HES_UPTODATE is still
+set. This breaks 'perf stat' and similar use cases, causing all the
+events to show zero.
+
+This worked for multi-pcr because we make explicit sparc_pmu_start()
+calls in calculate_multiple_pcrs(). calculate_single_pcr() doesn't do
+this because the idea there is to accumulate all of the counter
+settings into the single pcr value. So we have to add explicit
+hw->state handling there.
+
+Like x86, we use the PERF_HES_ARCH bit to track truly stopped events
+so that we don't accidently start them on a reload.
+
+Related to all of this, sparc_pmu_start() is missing a userpage update
+so add it.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/perf_event.c | 17 +++++++++++++----
+ 1 file changed, 13 insertions(+), 4 deletions(-)
+
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -927,6 +927,8 @@ static void read_in_all_counters(struct
+ sparc_perf_event_update(cp, &cp->hw,
+ cpuc->current_idx[i]);
+ cpuc->current_idx[i] = PIC_NO_INDEX;
++ if (cp->hw.state & PERF_HES_STOPPED)
++ cp->hw.state |= PERF_HES_ARCH;
+ }
+ }
+ }
+@@ -959,10 +961,12 @@ static void calculate_single_pcr(struct
+
+ enc = perf_event_get_enc(cpuc->events[i]);
+ cpuc->pcr[0] &= ~mask_for_index(idx);
+- if (hwc->state & PERF_HES_STOPPED)
++ if (hwc->state & PERF_HES_ARCH) {
+ cpuc->pcr[0] |= nop_for_index(idx);
+- else
++ } else {
+ cpuc->pcr[0] |= event_encoding(enc, idx);
++ hwc->state = 0;
++ }
+ }
+ out:
+ cpuc->pcr[0] |= cpuc->event[0]->hw.config_base;
+@@ -988,6 +992,9 @@ static void calculate_multiple_pcrs(stru
+
+ cpuc->current_idx[i] = idx;
+
++ if (cp->hw.state & PERF_HES_ARCH)
++ continue;
++
+ sparc_pmu_start(cp, PERF_EF_RELOAD);
+ }
+ out:
+@@ -1079,6 +1086,8 @@ static void sparc_pmu_start(struct perf_
+ event->hw.state = 0;
+
+ sparc_pmu_enable_event(cpuc, &event->hw, idx);
++
++ perf_event_update_userpage(event);
+ }
+
+ static void sparc_pmu_stop(struct perf_event *event, int flags)
+@@ -1371,9 +1380,9 @@ static int sparc_pmu_add(struct perf_eve
+ cpuc->events[n0] = event->hw.event_base;
+ cpuc->current_idx[n0] = PIC_NO_INDEX;
+
+- event->hw.state = PERF_HES_UPTODATE;
++ event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+ if (!(ef_flags & PERF_EF_START))
+- event->hw.state |= PERF_HES_STOPPED;
++ event->hw.state |= PERF_HES_ARCH;
+
+ /*
+ * If group events scheduling transaction was started,
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 17 Oct 2018 21:28:01 -0700
+Subject: sparc: Fix syscall fallback bugs in VDSO.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 776ca1543b5fe673aaf1beb244fcc2429d378083 ]
+
+First, the trap number for 32-bit syscalls is 0x10.
+
+Also, only negate the return value when syscall error is indicated by
+the carry bit being set.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/vdso/vclock_gettime.c | 12 +++++++++++-
+ 1 file changed, 11 insertions(+), 1 deletion(-)
+
+--- a/arch/sparc/vdso/vclock_gettime.c
++++ b/arch/sparc/vdso/vclock_gettime.c
+@@ -33,9 +33,19 @@
+ #define TICK_PRIV_BIT (1ULL << 63)
+ #endif
+
++#ifdef CONFIG_SPARC64
+ #define SYSCALL_STRING \
+ "ta 0x6d;" \
+- "sub %%g0, %%o0, %%o0;" \
++ "bcs,a 1f;" \
++ " sub %%g0, %%o0, %%o0;" \
++ "1:"
++#else
++#define SYSCALL_STRING \
++ "ta 0x10;" \
++ "bcs,a 1f;" \
++ " sub %%g0, %%o0, %%o0;" \
++ "1:"
++#endif
+
+ #define SYSCALL_CLOBBERS \
+ "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", \
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: "David S. Miller" <davem@davemloft.net>
+Date: Fri, 12 Oct 2018 10:33:20 -0700
+Subject: sparc: Throttle perf events properly.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 455adb3174d2c8518cef1a61140c211f6ac224d2 ]
+
+Like x86 and arm, call perf_sample_event_took() in perf event
+NMI interrupt handler.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/perf_event.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+--- a/arch/sparc/kernel/perf_event.c
++++ b/arch/sparc/kernel/perf_event.c
+@@ -24,6 +24,7 @@
+ #include <asm/cpudata.h>
+ #include <linux/uaccess.h>
+ #include <linux/atomic.h>
++#include <linux/sched/clock.h>
+ #include <asm/nmi.h>
+ #include <asm/pcr.h>
+ #include <asm/cacheflush.h>
+@@ -1612,6 +1613,8 @@ static int __kprobes perf_event_nmi_hand
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc;
+ struct pt_regs *regs;
++ u64 finish_clock;
++ u64 start_clock;
+ int i;
+
+ if (!atomic_read(&active_events))
+@@ -1625,6 +1628,8 @@ static int __kprobes perf_event_nmi_hand
+ return NOTIFY_DONE;
+ }
+
++ start_clock = sched_clock();
++
+ regs = args->regs;
+
+ cpuc = this_cpu_ptr(&cpu_hw_events);
+@@ -1663,6 +1668,10 @@ static int __kprobes perf_event_nmi_hand
+ sparc_pmu_stop(event, 0);
+ }
+
++ finish_clock = sched_clock();
++
++ perf_sample_event_took(finish_clock - start_clock);
++
+ return NOTIFY_STOP;
+ }
+
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: "David S. Miller" <davem@davemloft.net>
+Date: Fri, 26 Oct 2018 15:11:56 -0700
+Subject: sparc64: Export __node_distance.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 2b4792eaa9f553764047d157365ed8b7787751a3 ]
+
+Some drivers reference it via node_distance(), for example the
+NVME host driver core.
+
+ERROR: "__node_distance" [drivers/nvme/host/nvme-core.ko] undefined!
+make[1]: *** [scripts/Makefile.modpost:92: __modpost] Error 1
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/mm/init_64.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/arch/sparc/mm/init_64.c
++++ b/arch/sparc/mm/init_64.c
+@@ -1383,6 +1383,7 @@ int __node_distance(int from, int to)
+ }
+ return numa_latency[from][to];
+ }
++EXPORT_SYMBOL(__node_distance);
+
+ static int __init find_best_numa_node_for_mlgroup(struct mdesc_mlgroup *grp)
+ {
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: David Miller <davem@redhat.com>
+Date: Thu, 25 Oct 2018 20:36:46 -0700
+Subject: sparc64: Make corrupted user stacks more debuggable.
+
+From: David Miller <davem@redhat.com>
+
+[ Upstream commit 5b4fc3882a649c9411dd0dcad2ddb78e911d340e ]
+
+Right now if we get a corrupted user stack frame we do a
+do_exit(SIGILL) which is not helpful.
+
+If under a debugger, this behavior causes the inferior process to
+exit. So the register and other state cannot be examined at the time
+of the event.
+
+Instead, conditionally log a rate limited kernel log message and then
+force a SIGSEGV.
+
+With bits and ideas borrowed (as usual) from powerpc.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/switch_to_64.h | 3 ++-
+ arch/sparc/kernel/process_64.c | 25 +++++++++++++++++++------
+ arch/sparc/kernel/rtrap_64.S | 1 +
+ arch/sparc/kernel/signal32.c | 12 ++++++++++--
+ arch/sparc/kernel/signal_64.c | 6 +++++-
+ 5 files changed, 37 insertions(+), 10 deletions(-)
+
+--- a/arch/sparc/include/asm/switch_to_64.h
++++ b/arch/sparc/include/asm/switch_to_64.h
+@@ -67,6 +67,7 @@ do { save_and_clear_fpu(); \
+ } while(0)
+
+ void synchronize_user_stack(void);
+-void fault_in_user_windows(void);
++struct pt_regs;
++void fault_in_user_windows(struct pt_regs *);
+
+ #endif /* __SPARC64_SWITCH_TO_64_H */
+--- a/arch/sparc/kernel/process_64.c
++++ b/arch/sparc/kernel/process_64.c
+@@ -36,6 +36,7 @@
+ #include <linux/sysrq.h>
+ #include <linux/nmi.h>
+ #include <linux/context_tracking.h>
++#include <linux/signal.h>
+
+ #include <linux/uaccess.h>
+ #include <asm/page.h>
+@@ -521,7 +522,12 @@ static void stack_unaligned(unsigned lon
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *) sp, 0, current);
+ }
+
+-void fault_in_user_windows(void)
++static const char uwfault32[] = KERN_INFO \
++ "%s[%d]: bad register window fault: SP %08lx (orig_sp %08lx) TPC %08lx O7 %08lx\n";
++static const char uwfault64[] = KERN_INFO \
++ "%s[%d]: bad register window fault: SP %016lx (orig_sp %016lx) TPC %08lx O7 %016lx\n";
++
++void fault_in_user_windows(struct pt_regs *regs)
+ {
+ struct thread_info *t = current_thread_info();
+ unsigned long window;
+@@ -534,9 +540,9 @@ void fault_in_user_windows(void)
+ do {
+ struct reg_window *rwin = &t->reg_window[window];
+ int winsize = sizeof(struct reg_window);
+- unsigned long sp;
++ unsigned long sp, orig_sp;
+
+- sp = t->rwbuf_stkptrs[window];
++ orig_sp = sp = t->rwbuf_stkptrs[window];
+
+ if (test_thread_64bit_stack(sp))
+ sp += STACK_BIAS;
+@@ -547,8 +553,16 @@ void fault_in_user_windows(void)
+ stack_unaligned(sp);
+
+ if (unlikely(copy_to_user((char __user *)sp,
+- rwin, winsize)))
++ rwin, winsize))) {
++ if (show_unhandled_signals)
++ printk_ratelimited(is_compat_task() ?
++ uwfault32 : uwfault64,
++ current->comm, current->pid,
++ sp, orig_sp,
++ regs->tpc,
++ regs->u_regs[UREG_I7]);
+ goto barf;
++ }
+ } while (window--);
+ }
+ set_thread_wsaved(0);
+@@ -556,8 +570,7 @@ void fault_in_user_windows(void)
+
+ barf:
+ set_thread_wsaved(window + 1);
+- user_exit();
+- do_exit(SIGILL);
++ force_sig(SIGSEGV, current);
+ }
+
+ asmlinkage long sparc_do_fork(unsigned long clone_flags,
+--- a/arch/sparc/kernel/rtrap_64.S
++++ b/arch/sparc/kernel/rtrap_64.S
+@@ -39,6 +39,7 @@ __handle_preemption:
+ wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
+
+ __handle_user_windows:
++ add %sp, PTREGS_OFF, %o0
+ call fault_in_user_windows
+ 661: wrpr %g0, RTRAP_PSTATE, %pstate
+ /* If userspace is using ADI, it could potentially pass
+--- a/arch/sparc/kernel/signal32.c
++++ b/arch/sparc/kernel/signal32.c
+@@ -371,7 +371,11 @@ static int setup_frame32(struct ksignal
+ get_sigframe(ksig, regs, sigframe_size);
+
+ if (invalid_frame_pointer(sf, sigframe_size)) {
+- do_exit(SIGILL);
++ if (show_unhandled_signals)
++ pr_info("%s[%d] bad frame in setup_frame32: %08lx TPC %08lx O7 %08lx\n",
++ current->comm, current->pid, (unsigned long)sf,
++ regs->tpc, regs->u_regs[UREG_I7]);
++ force_sigsegv(ksig->sig, current);
+ return -EINVAL;
+ }
+
+@@ -501,7 +505,11 @@ static int setup_rt_frame32(struct ksign
+ get_sigframe(ksig, regs, sigframe_size);
+
+ if (invalid_frame_pointer(sf, sigframe_size)) {
+- do_exit(SIGILL);
++ if (show_unhandled_signals)
++ pr_info("%s[%d] bad frame in setup_rt_frame32: %08lx TPC %08lx O7 %08lx\n",
++ current->comm, current->pid, (unsigned long)sf,
++ regs->tpc, regs->u_regs[UREG_I7]);
++ force_sigsegv(ksig->sig, current);
+ return -EINVAL;
+ }
+
+--- a/arch/sparc/kernel/signal_64.c
++++ b/arch/sparc/kernel/signal_64.c
+@@ -370,7 +370,11 @@ setup_rt_frame(struct ksignal *ksig, str
+ get_sigframe(ksig, regs, sf_size);
+
+ if (invalid_frame_pointer (sf)) {
+- do_exit(SIGILL); /* won't return, actually */
++ if (show_unhandled_signals)
++ pr_info("%s[%d] bad frame in setup_rt_frame: %016lx TPC %016lx O7 %016lx\n",
++ current->comm, current->pid, (unsigned long)sf,
++ regs->tpc, regs->u_regs[UREG_I7]);
++ force_sigsegv(ksig->sig, current);
+ return -EINVAL;
+ }
+
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: "David S. Miller" <davem@davemloft.net>
+Date: Sun, 14 Oct 2018 20:19:31 -0700
+Subject: sparc64: Make proc_id signed.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit b3e1eb8e7ac9aaa283989496651d99267c4cad6c ]
+
+So that when it is unset, ie. '-1', userspace can see it
+properly.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/include/asm/cpudata_64.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/sparc/include/asm/cpudata_64.h
++++ b/arch/sparc/include/asm/cpudata_64.h
+@@ -28,7 +28,7 @@ typedef struct {
+ unsigned short sock_id; /* physical package */
+ unsigned short core_id;
+ unsigned short max_cache_id; /* groupings of highest shared cache */
+- unsigned short proc_id; /* strand (aka HW thread) id */
++ signed short proc_id; /* strand (aka HW thread) id */
+ } cpuinfo_sparc;
+
+ DECLARE_PER_CPU(cpuinfo_sparc, __cpu_data);
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: "David S. Miller" <davem@davemloft.net>
+Date: Sun, 14 Oct 2018 20:22:28 -0700
+Subject: sparc64: Set %l4 properly on trap return after handling signals.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit d1f1f98c6d1708a90436e1a3b2aff5e93946731b ]
+
+If we did some signal processing, we have to reload the pt_regs
+tstate register because it's value may have changed.
+
+In doing so we also have to extract the %pil value contained in there
+anre load that into %l4.
+
+This value is at bit 20 and thus needs to be shifted down before we
+later write it into the %pil register.
+
+Most of the time this is harmless as we are returning to userspace
+and the %pil is zero for that case.
+
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/rtrap_64.S | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/arch/sparc/kernel/rtrap_64.S
++++ b/arch/sparc/kernel/rtrap_64.S
+@@ -85,8 +85,9 @@ __handle_signal:
+ ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
+ sethi %hi(0xf << 20), %l4
+ and %l1, %l4, %l4
++ andn %l1, %l4, %l1
+ ba,pt %xcc, __handle_preemption_continue
+- andn %l1, %l4, %l1
++ srl %l4, 20, %l4
+
+ /* When returning from a NMI (%pil==15) interrupt we want to
+ * avoid running softirqs, doing IRQ tracing, preempting, etc.
--- /dev/null
+From foo@baz Fri Nov 2 06:28:44 CET 2018
+From: "David S. Miller" <davem@davemloft.net>
+Date: Wed, 31 Oct 2018 18:30:21 -0700
+Subject: sparc64: Wire up compat getpeername and getsockname.
+
+From: "David S. Miller" <davem@davemloft.net>
+
+[ Upstream commit 1f2b5b8e2df4591fbca430aff9c5a072dcc0f408 ]
+
+Fixes: 8b30ca73b7cc ("sparc: Add all necessary direct socket system calls.")
+Reported-by: Joseph Myers <joseph@codesourcery.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/sparc/kernel/systbls_64.S | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/sparc/kernel/systbls_64.S
++++ b/arch/sparc/kernel/systbls_64.S
+@@ -47,9 +47,9 @@ sys_call_table32:
+ .word sys_recvfrom, sys_setreuid16, sys_setregid16, sys_rename, compat_sys_truncate
+ /*130*/ .word compat_sys_ftruncate, sys_flock, compat_sys_lstat64, sys_sendto, sys_shutdown
+ .word sys_socketpair, sys_mkdir, sys_rmdir, compat_sys_utimes, compat_sys_stat64
+-/*140*/ .word sys_sendfile64, sys_nis_syscall, compat_sys_futex, sys_gettid, compat_sys_getrlimit
++/*140*/ .word sys_sendfile64, sys_getpeername, compat_sys_futex, sys_gettid, compat_sys_getrlimit
+ .word compat_sys_setrlimit, sys_pivot_root, sys_prctl, sys_pciconfig_read, sys_pciconfig_write
+-/*150*/ .word sys_nis_syscall, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
++/*150*/ .word sys_getsockname, sys_inotify_init, sys_inotify_add_watch, sys_poll, sys_getdents64
+ .word compat_sys_fcntl64, sys_inotify_rm_watch, compat_sys_statfs, compat_sys_fstatfs, sys_oldumount
+ /*160*/ .word compat_sys_sched_setaffinity, compat_sys_sched_getaffinity, sys_getdomainname, sys_setdomainname, sys_nis_syscall
+ .word sys_quotactl, sys_set_tid_address, compat_sys_mount, compat_sys_ustat, sys_setxattr