--- /dev/null
+From 86a86804e4f18fc3880541b3d5a07f4df0fe29cb Mon Sep 17 00:00:00 2001
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Date: Mon, 18 Feb 2019 18:10:08 +0100
+Subject: s390/setup: fix boot crash for machine without EDAT-1
+
+From: Martin Schwidefsky <schwidefsky@de.ibm.com>
+
+commit 86a86804e4f18fc3880541b3d5a07f4df0fe29cb upstream.
+
+The fix to make WARN work in the early boot code created a problem
+on older machines without EDAT-1. The setup_lowcore_dat_on function
+uses the pointer from lowcore_ptr[0] to set the DAT bit in the new
+PSWs. That does not work if the kernel page table is set up with
+4K pages as the prefix address maps to absolute zero.
+
+To make this work the PSWs need to be changed with via address 0 in
+form of the S390_lowcore definition.
+
+Reported-by: Guenter Roeck <linux@roeck-us.net>
+Tested-by: Cornelia Huck <cohuck@redhat.com>
+Fixes: 94f85ed3e2f8 ("s390/setup: fix early warning messages")
+Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ arch/s390/kernel/setup.c | 13 ++++++-------
+ 1 file changed, 6 insertions(+), 7 deletions(-)
+
+--- a/arch/s390/kernel/setup.c
++++ b/arch/s390/kernel/setup.c
+@@ -451,13 +451,12 @@ static void __init setup_lowcore_dat_off
+
+ static void __init setup_lowcore_dat_on(void)
+ {
+- struct lowcore *lc;
+-
+- lc = lowcore_ptr[0];
+- lc->external_new_psw.mask |= PSW_MASK_DAT;
+- lc->svc_new_psw.mask |= PSW_MASK_DAT;
+- lc->program_new_psw.mask |= PSW_MASK_DAT;
+- lc->io_new_psw.mask |= PSW_MASK_DAT;
++ __ctl_clear_bit(0, 28);
++ S390_lowcore.external_new_psw.mask |= PSW_MASK_DAT;
++ S390_lowcore.svc_new_psw.mask |= PSW_MASK_DAT;
++ S390_lowcore.program_new_psw.mask |= PSW_MASK_DAT;
++ S390_lowcore.io_new_psw.mask |= PSW_MASK_DAT;
++ __ctl_set_bit(0, 28);
+ }
+
+ static struct resource code_resource = {
drm-fb-helper-generic-fix-drm_fbdev_client_restore.patch
drm-radeon-evergreen_cs-fix-missing-break-in-switch-statement.patch
drm-amd-powerplay-correct-power-reading-on-fiji.patch
-drm-amd-display-add-msse2-to-prevent-clang-from-emitting-libcalls-to-undefined-sw-fp-routines.patch
drm-amd-display-don-t-call-dm_pp_-function-from-an-fpu-block.patch
kvm-call-kvm_arch_memslots_updated-before-updating-memslots.patch
kvm-vmx-compare-only-a-single-byte-for-vmcs-launched-in-vcpu-run.patch
kvm-nvmx-check-a-single-byte-for-vmcs-launched-in-nested-early-checks.patch
net-dsa-lantiq_gswip-fix-use-after-free-on-failed-probe.patch
net-dsa-lantiq_gswip-fix-of-child-node-lookups.patch
+s390-setup-fix-boot-crash-for-machine-without-edat-1.patch
+sunrpc-prevent-thundering-herd-when-the-socket-is-not-connected.patch
+sunrpc-fix-up-rpc-back-channel-transmission.patch
+sunrpc-respect-rpc-call-timeouts-when-retrying-transmission.patch
--- /dev/null
+From 477687e1116ad16180caf8633dd830b296a5ce73 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Tue, 5 Mar 2019 07:30:48 -0500
+Subject: SUNRPC: Fix up RPC back channel transmission
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 477687e1116ad16180caf8633dd830b296a5ce73 upstream.
+
+Now that transmissions happen through a queue, we require the RPC tasks
+to handle error conditions that may have been set while they were
+sleeping. The back channel does not currently do this, but assumes
+that any error condition happens during its own call to xprt_transmit().
+
+The solution is to ensure that the back channel splits out the
+error handling just like the forward channel does.
+
+Fixes: 89f90fe1ad8b ("SUNRPC: Allow calls to xprt_transmit() to drain...")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/clnt.c | 61 +++++++++++++++++++++++++++++-------------------------
+ 1 file changed, 33 insertions(+), 28 deletions(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -66,9 +66,6 @@ static void call_decode(struct rpc_task
+ static void call_bind(struct rpc_task *task);
+ static void call_bind_status(struct rpc_task *task);
+ static void call_transmit(struct rpc_task *task);
+-#if defined(CONFIG_SUNRPC_BACKCHANNEL)
+-static void call_bc_transmit(struct rpc_task *task);
+-#endif /* CONFIG_SUNRPC_BACKCHANNEL */
+ static void call_status(struct rpc_task *task);
+ static void call_transmit_status(struct rpc_task *task);
+ static void call_refresh(struct rpc_task *task);
+@@ -1131,6 +1128,8 @@ rpc_call_async(struct rpc_clnt *clnt, co
+ EXPORT_SYMBOL_GPL(rpc_call_async);
+
+ #if defined(CONFIG_SUNRPC_BACKCHANNEL)
++static void call_bc_encode(struct rpc_task *task);
++
+ /**
+ * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run
+ * rpc_execute against it
+@@ -1152,7 +1151,7 @@ struct rpc_task *rpc_run_bc_task(struct
+ task = rpc_new_task(&task_setup_data);
+ xprt_init_bc_request(req, task);
+
+- task->tk_action = call_bc_transmit;
++ task->tk_action = call_bc_encode;
+ atomic_inc(&task->tk_count);
+ WARN_ON_ONCE(atomic_read(&task->tk_count) != 2);
+ rpc_execute(task);
+@@ -2064,6 +2063,16 @@ call_transmit_status(struct rpc_task *ta
+ }
+
+ #if defined(CONFIG_SUNRPC_BACKCHANNEL)
++static void call_bc_transmit(struct rpc_task *task);
++static void call_bc_transmit_status(struct rpc_task *task);
++
++static void
++call_bc_encode(struct rpc_task *task)
++{
++ xprt_request_enqueue_transmit(task);
++ task->tk_action = call_bc_transmit;
++}
++
+ /*
+ * 5b. Send the backchannel RPC reply. On error, drop the reply. In
+ * addition, disconnect on connectivity errors.
+@@ -2071,26 +2080,23 @@ call_transmit_status(struct rpc_task *ta
+ static void
+ call_bc_transmit(struct rpc_task *task)
+ {
+- struct rpc_rqst *req = task->tk_rqstp;
+-
+- if (rpc_task_need_encode(task))
+- xprt_request_enqueue_transmit(task);
+- if (!test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate))
+- goto out_wakeup;
+-
+- if (!xprt_prepare_transmit(task))
+- goto out_retry;
+-
+- if (task->tk_status < 0) {
+- printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+- "error: %d\n", task->tk_status);
+- goto out_done;
++ task->tk_action = call_bc_transmit_status;
++ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
++ if (!xprt_prepare_transmit(task))
++ return;
++ task->tk_status = 0;
++ xprt_transmit(task);
+ }
++ xprt_end_transmit(task);
++}
+
+- xprt_transmit(task);
++static void
++call_bc_transmit_status(struct rpc_task *task)
++{
++ struct rpc_rqst *req = task->tk_rqstp;
+
+- xprt_end_transmit(task);
+ dprint_status(task);
++
+ switch (task->tk_status) {
+ case 0:
+ /* Success */
+@@ -2104,8 +2110,14 @@ call_bc_transmit(struct rpc_task *task)
+ case -ENOTCONN:
+ case -EPIPE:
+ break;
++ case -ENOBUFS:
++ rpc_delay(task, HZ>>2);
++ /* fall through */
++ case -EBADSLT:
+ case -EAGAIN:
+- goto out_retry;
++ task->tk_status = 0;
++ task->tk_action = call_bc_transmit;
++ return;
+ case -ETIMEDOUT:
+ /*
+ * Problem reaching the server. Disconnect and let the
+@@ -2124,18 +2136,11 @@ call_bc_transmit(struct rpc_task *task)
+ * We were unable to reply and will have to drop the
+ * request. The server should reconnect and retransmit.
+ */
+- WARN_ON_ONCE(task->tk_status == -EAGAIN);
+ printk(KERN_NOTICE "RPC: Could not send backchannel reply "
+ "error: %d\n", task->tk_status);
+ break;
+ }
+-out_wakeup:
+- rpc_wake_up_queued_task(&req->rq_xprt->pending, task);
+-out_done:
+ task->tk_action = rpc_exit_task;
+- return;
+-out_retry:
+- task->tk_status = 0;
+ }
+ #endif /* CONFIG_SUNRPC_BACKCHANNEL */
+
--- /dev/null
+From ed7dc973bd91da234d93aff6d033a5206a6c9885 Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Mon, 4 Mar 2019 14:19:31 -0500
+Subject: SUNRPC: Prevent thundering herd when the socket is not connected
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit ed7dc973bd91da234d93aff6d033a5206a6c9885 upstream.
+
+If the socket is not connected, then we want to initiate a reconnect
+rather that trying to transmit requests. If there is a large number
+of requests queued and waiting for the lock in call_transmit(),
+then it can take a while for one of the to loop back and retake
+the lock in call_connect.
+
+Fixes: 89f90fe1ad8b ("SUNRPC: Allow calls to xprt_transmit() to drain...")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/clnt.c | 21 +++++++++++++++++----
+ 1 file changed, 17 insertions(+), 4 deletions(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -1786,7 +1786,12 @@ call_encode(struct rpc_task *task)
+ xprt_request_enqueue_receive(task);
+ xprt_request_enqueue_transmit(task);
+ out:
+- task->tk_action = call_bind;
++ task->tk_action = call_transmit;
++ /* Check that the connection is OK */
++ if (!xprt_bound(task->tk_xprt))
++ task->tk_action = call_bind;
++ else if (!xprt_connected(task->tk_xprt))
++ task->tk_action = call_connect;
+ }
+
+ /*
+@@ -1978,13 +1983,19 @@ call_transmit(struct rpc_task *task)
+ {
+ dprint_status(task);
+
+- task->tk_status = 0;
++ task->tk_action = call_transmit_status;
+ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
+ if (!xprt_prepare_transmit(task))
+ return;
+- xprt_transmit(task);
++ task->tk_status = 0;
++ if (test_bit(RPC_TASK_NEED_XMIT, &task->tk_runstate)) {
++ if (!xprt_connected(task->tk_xprt)) {
++ task->tk_status = -ENOTCONN;
++ return;
++ }
++ xprt_transmit(task);
++ }
+ }
+- task->tk_action = call_transmit_status;
+ xprt_end_transmit(task);
+ }
+
+@@ -2046,6 +2057,8 @@ call_transmit_status(struct rpc_task *ta
+ case -EADDRINUSE:
+ case -ENOTCONN:
+ case -EPIPE:
++ task->tk_action = call_bind;
++ task->tk_status = 0;
+ break;
+ }
+ }
--- /dev/null
+From 7b3fef8e4157ed424bcde039a60a730aa0dfb0eb Mon Sep 17 00:00:00 2001
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+Date: Thu, 7 Mar 2019 14:10:32 -0500
+Subject: SUNRPC: Respect RPC call timeouts when retrying transmission
+
+From: Trond Myklebust <trond.myklebust@hammerspace.com>
+
+commit 7b3fef8e4157ed424bcde039a60a730aa0dfb0eb upstream.
+
+Fix a regression where soft and softconn requests are not timing out
+as expected.
+
+Fixes: 89f90fe1ad8b ("SUNRPC: Allow calls to xprt_transmit() to drain...")
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/clnt.c | 42 ++++++++++++++++++++++++------------------
+ 1 file changed, 24 insertions(+), 18 deletions(-)
+
+--- a/net/sunrpc/clnt.c
++++ b/net/sunrpc/clnt.c
+@@ -77,6 +77,7 @@ static void call_connect_status(struct r
+ static __be32 *rpc_encode_header(struct rpc_task *task);
+ static __be32 *rpc_verify_header(struct rpc_task *task);
+ static int rpc_ping(struct rpc_clnt *clnt);
++static void rpc_check_timeout(struct rpc_task *task);
+
+ static void rpc_register_client(struct rpc_clnt *clnt)
+ {
+@@ -1941,8 +1942,7 @@ call_connect_status(struct rpc_task *tas
+ break;
+ if (clnt->cl_autobind) {
+ rpc_force_rebind(clnt);
+- task->tk_action = call_bind;
+- return;
++ goto out_retry;
+ }
+ /* fall through */
+ case -ECONNRESET:
+@@ -1962,16 +1962,19 @@ call_connect_status(struct rpc_task *tas
+ /* fall through */
+ case -ENOTCONN:
+ case -EAGAIN:
+- /* Check for timeouts before looping back to call_bind */
+ case -ETIMEDOUT:
+- task->tk_action = call_timeout;
+- return;
++ goto out_retry;
+ case 0:
+ clnt->cl_stats->netreconn++;
+ task->tk_action = call_transmit;
+ return;
+ }
+ rpc_exit(task, status);
++ return;
++out_retry:
++ /* Check for timeouts before looping back to call_bind */
++ task->tk_action = call_bind;
++ rpc_check_timeout(task);
+ }
+
+ /*
+@@ -2048,7 +2051,7 @@ call_transmit_status(struct rpc_task *ta
+ trace_xprt_ping(task->tk_xprt,
+ task->tk_status);
+ rpc_exit(task, task->tk_status);
+- break;
++ return;
+ }
+ /* fall through */
+ case -ECONNRESET:
+@@ -2060,6 +2063,7 @@ call_transmit_status(struct rpc_task *ta
+ task->tk_status = 0;
+ break;
+ }
++ rpc_check_timeout(task);
+ }
+
+ #if defined(CONFIG_SUNRPC_BACKCHANNEL)
+@@ -2196,7 +2200,7 @@ call_status(struct rpc_task *task)
+ case -EPIPE:
+ case -ENOTCONN:
+ case -EAGAIN:
+- task->tk_action = call_encode;
++ task->tk_action = call_timeout;
+ break;
+ case -EIO:
+ /* shutdown or soft timeout */
+@@ -2210,20 +2214,13 @@ call_status(struct rpc_task *task)
+ }
+ }
+
+-/*
+- * 6a. Handle RPC timeout
+- * We do not release the request slot, so we keep using the
+- * same XID for all retransmits.
+- */
+ static void
+-call_timeout(struct rpc_task *task)
++rpc_check_timeout(struct rpc_task *task)
+ {
+ struct rpc_clnt *clnt = task->tk_client;
+
+- if (xprt_adjust_timeout(task->tk_rqstp) == 0) {
+- dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid);
+- goto retry;
+- }
++ if (xprt_adjust_timeout(task->tk_rqstp) == 0)
++ return;
+
+ dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid);
+ task->tk_timeouts++;
+@@ -2259,10 +2256,19 @@ call_timeout(struct rpc_task *task)
+ * event? RFC2203 requires the server to drop all such requests.
+ */
+ rpcauth_invalcred(task);
++}
+
+-retry:
++/*
++ * 6a. Handle RPC timeout
++ * We do not release the request slot, so we keep using the
++ * same XID for all retransmits.
++ */
++static void
++call_timeout(struct rpc_task *task)
++{
+ task->tk_action = call_encode;
+ task->tk_status = 0;
++ rpc_check_timeout(task);
+ }
+
+ /*