From: Greg Kroah-Hartman Date: Mon, 10 Dec 2018 14:28:25 +0000 (+0100) Subject: 4.14-stable patches X-Git-Tag: v4.19.9~20 X-Git-Url: http://git.ipfire.org/?a=commitdiff_plain;h=13f6de4403d871354b1bf1783c1b947643697d20;p=thirdparty%2Fkernel%2Fstable-queue.git 4.14-stable patches added patches: arm-8806-1-kprobes-fix-false-positive-with-fortify_source.patch dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch dmaengine-dw-fix-fifo-size-for-intel-merrifield.patch sunrpc-fix-leak-of-krb5p-encode-pages.patch vhost-vsock-fix-use-after-free-in-network-stack-callers.patch virtio-s390-avoid-race-on-vcdev-config.patch virtio-s390-fix-race-in-ccw_io_helper.patch xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch xhci-workaround-css-timeout-on-amd-snps-3.0-xhc.patch --- diff --git a/queue-4.14/arm-8806-1-kprobes-fix-false-positive-with-fortify_source.patch b/queue-4.14/arm-8806-1-kprobes-fix-false-positive-with-fortify_source.patch new file mode 100644 index 00000000000..72ed04da6a7 --- /dev/null +++ b/queue-4.14/arm-8806-1-kprobes-fix-false-positive-with-fortify_source.patch @@ -0,0 +1,42 @@ +From e46daee53bb50bde38805f1823a182979724c229 Mon Sep 17 00:00:00 2001 +From: Kees Cook +Date: Tue, 30 Oct 2018 22:12:56 +0100 +Subject: ARM: 8806/1: kprobes: Fix false positive with FORTIFY_SOURCE + +From: Kees Cook + +commit e46daee53bb50bde38805f1823a182979724c229 upstream. + +The arm compiler internally interprets an inline assembly label +as an unsigned long value, not a pointer. As a result, under +CONFIG_FORTIFY_SOURCE, the address of a label has a size of 4 bytes, +which was tripping the runtime checks. Instead, we can just cast the label +(as done with the size calculations earlier). + +Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1639397 + +Reported-by: William Cohen +Fixes: 6974f0c4555e ("include/linux/string.h: add the option of fortified string.h functions") +Cc: stable@vger.kernel.org +Acked-by: Laura Abbott +Acked-by: Masami Hiramatsu +Tested-by: William Cohen +Signed-off-by: Kees Cook +Signed-off-by: Russell King +Signed-off-by: Greg Kroah-Hartman + +--- + arch/arm/probes/kprobes/opt-arm.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/arch/arm/probes/kprobes/opt-arm.c ++++ b/arch/arm/probes/kprobes/opt-arm.c +@@ -247,7 +247,7 @@ int arch_prepare_optimized_kprobe(struct + } + + /* Copy arch-dep-instance from template. */ +- memcpy(code, &optprobe_template_entry, ++ memcpy(code, (unsigned char *)optprobe_template_entry, + TMPL_END_IDX * sizeof(kprobe_opcode_t)); + + /* Adjust buffer according to instruction. */ diff --git a/queue-4.14/dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch b/queue-4.14/dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch new file mode 100644 index 00000000000..0f9cb2e1558 --- /dev/null +++ b/queue-4.14/dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch @@ -0,0 +1,62 @@ +From 59861547ec9a9736e7882f6fb0c096a720ff811a Mon Sep 17 00:00:00 2001 +From: Bin Liu +Date: Mon, 12 Nov 2018 09:43:22 -0600 +Subject: dmaengine: cppi41: delete channel from pending list when stop channel + +From: Bin Liu + +commit 59861547ec9a9736e7882f6fb0c096a720ff811a upstream. + +The driver defines three states for a cppi channel. +- idle: .chan_busy == 0 && not in .pending list +- pending: .chan_busy == 0 && in .pending list +- busy: .chan_busy == 1 && not in .pending list + +There are cases in which the cppi channel could be in the pending state +when cppi41_dma_issue_pending() is called after cppi41_runtime_suspend() +is called. + +cppi41_stop_chan() has a bug for these cases to set channels to idle state. +It only checks the .chan_busy flag, but not the .pending list, then later +when cppi41_runtime_resume() is called the channels in .pending list will +be transitioned to busy state. + +Removing channels from the .pending list solves the problem. + +Fixes: 975faaeb9985 ("dma: cppi41: start tear down only if channel is busy") +Cc: stable@vger.kernel.org # v3.15+ +Signed-off-by: Bin Liu +Reviewed-by: Peter Ujfalusi +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/cppi41.c | 16 +++++++++++++++- + 1 file changed, 15 insertions(+), 1 deletion(-) + +--- a/drivers/dma/cppi41.c ++++ b/drivers/dma/cppi41.c +@@ -723,8 +723,22 @@ static int cppi41_stop_chan(struct dma_c + + desc_phys = lower_32_bits(c->desc_phys); + desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc); +- if (!cdd->chan_busy[desc_num]) ++ if (!cdd->chan_busy[desc_num]) { ++ struct cppi41_channel *cc, *_ct; ++ ++ /* ++ * channels might still be in the pendling list if ++ * cppi41_dma_issue_pending() is called after ++ * cppi41_runtime_suspend() is called ++ */ ++ list_for_each_entry_safe(cc, _ct, &cdd->pending, node) { ++ if (cc != c) ++ continue; ++ list_del(&cc->node); ++ break; ++ } + return 0; ++ } + + ret = cppi41_tear_down_chan(c); + if (ret) diff --git a/queue-4.14/dmaengine-dw-fix-fifo-size-for-intel-merrifield.patch b/queue-4.14/dmaengine-dw-fix-fifo-size-for-intel-merrifield.patch new file mode 100644 index 00000000000..d6e7f37e937 --- /dev/null +++ b/queue-4.14/dmaengine-dw-fix-fifo-size-for-intel-merrifield.patch @@ -0,0 +1,53 @@ +From ffe843b18211301ad25893eba09f402c19d12304 Mon Sep 17 00:00:00 2001 +From: Andy Shevchenko +Date: Wed, 5 Dec 2018 18:33:59 +0200 +Subject: dmaengine: dw: Fix FIFO size for Intel Merrifield + +From: Andy Shevchenko + +commit ffe843b18211301ad25893eba09f402c19d12304 upstream. + +Intel Merrifield has a reduced size of FIFO used in iDMA 32-bit controller, +i.e. 512 bytes instead of 1024. + +Fix this by partitioning it as 64 bytes per channel. + +Note, in the future we might switch to 'fifo-size' property instead of +hard coded value. + +Fixes: 199244d69458 ("dmaengine: dw: add support of iDMA 32-bit hardware") +Signed-off-by: Andy Shevchenko +Cc: stable@vger.kernel.org +Signed-off-by: Vinod Koul +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/dma/dw/core.c | 6 +++--- + 1 file changed, 3 insertions(+), 3 deletions(-) + +--- a/drivers/dma/dw/core.c ++++ b/drivers/dma/dw/core.c +@@ -1064,12 +1064,12 @@ static void dwc_issue_pending(struct dma + /* + * Program FIFO size of channels. + * +- * By default full FIFO (1024 bytes) is assigned to channel 0. Here we ++ * By default full FIFO (512 bytes) is assigned to channel 0. Here we + * slice FIFO on equal parts between channels. + */ + static void idma32_fifo_partition(struct dw_dma *dw) + { +- u64 value = IDMA32C_FP_PSIZE_CH0(128) | IDMA32C_FP_PSIZE_CH1(128) | ++ u64 value = IDMA32C_FP_PSIZE_CH0(64) | IDMA32C_FP_PSIZE_CH1(64) | + IDMA32C_FP_UPDATE; + u64 fifo_partition = 0; + +@@ -1082,7 +1082,7 @@ static void idma32_fifo_partition(struct + /* Fill FIFO_PARTITION high bits (Channels 2..3, 6..7) */ + fifo_partition |= value << 32; + +- /* Program FIFO Partition registers - 128 bytes for each channel */ ++ /* Program FIFO Partition registers - 64 bytes per channel */ + idma32_writeq(dw, FIFO_PARTITION1, fifo_partition); + idma32_writeq(dw, FIFO_PARTITION0, fifo_partition); + } diff --git a/queue-4.14/series b/queue-4.14/series index bd085140220..b22c7a3e8b6 100644 --- a/queue-4.14/series +++ b/queue-4.14/series @@ -37,3 +37,12 @@ alsa-pcm-fix-starvation-on-down_write_nonblock.patch alsa-pcm-call-snd_pcm_unlink-conditionally-at-closing.patch alsa-pcm-fix-interval-evaluation-with-openmin-max.patch alsa-hda-realtek-fix-speaker-output-regression-on-thinkpad-t570.patch +virtio-s390-avoid-race-on-vcdev-config.patch +virtio-s390-fix-race-in-ccw_io_helper.patch +vhost-vsock-fix-use-after-free-in-network-stack-callers.patch +sunrpc-fix-leak-of-krb5p-encode-pages.patch +dmaengine-dw-fix-fifo-size-for-intel-merrifield.patch +dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch +arm-8806-1-kprobes-fix-false-positive-with-fortify_source.patch +xhci-workaround-css-timeout-on-amd-snps-3.0-xhc.patch +xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch diff --git a/queue-4.14/sunrpc-fix-leak-of-krb5p-encode-pages.patch b/queue-4.14/sunrpc-fix-leak-of-krb5p-encode-pages.patch new file mode 100644 index 00000000000..efe5d9aecf0 --- /dev/null +++ b/queue-4.14/sunrpc-fix-leak-of-krb5p-encode-pages.patch @@ -0,0 +1,42 @@ +From 8dae5398ab1ac107b1517e8195ed043d5f422bd0 Mon Sep 17 00:00:00 2001 +From: Chuck Lever +Date: Fri, 30 Nov 2018 15:39:57 -0500 +Subject: SUNRPC: Fix leak of krb5p encode pages + +From: Chuck Lever + +commit 8dae5398ab1ac107b1517e8195ed043d5f422bd0 upstream. + +call_encode can be invoked more than once per RPC call. Ensure that +each call to gss_wrap_req_priv does not overwrite pointers to +previously allocated memory. + +Signed-off-by: Chuck Lever +Cc: stable@kernel.org +Signed-off-by: Trond Myklebust +Signed-off-by: Greg Kroah-Hartman + +--- + net/sunrpc/auth_gss/auth_gss.c | 4 ++++ + 1 file changed, 4 insertions(+) + +--- a/net/sunrpc/auth_gss/auth_gss.c ++++ b/net/sunrpc/auth_gss/auth_gss.c +@@ -1736,6 +1736,7 @@ priv_release_snd_buf(struct rpc_rqst *rq + for (i=0; i < rqstp->rq_enc_pages_num; i++) + __free_page(rqstp->rq_enc_pages[i]); + kfree(rqstp->rq_enc_pages); ++ rqstp->rq_release_snd_buf = NULL; + } + + static int +@@ -1744,6 +1745,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp) + struct xdr_buf *snd_buf = &rqstp->rq_snd_buf; + int first, last, i; + ++ if (rqstp->rq_release_snd_buf) ++ rqstp->rq_release_snd_buf(rqstp); ++ + if (snd_buf->page_len == 0) { + rqstp->rq_enc_pages_num = 0; + return 0; diff --git a/queue-4.14/vhost-vsock-fix-use-after-free-in-network-stack-callers.patch b/queue-4.14/vhost-vsock-fix-use-after-free-in-network-stack-callers.patch new file mode 100644 index 00000000000..ba87227c552 --- /dev/null +++ b/queue-4.14/vhost-vsock-fix-use-after-free-in-network-stack-callers.patch @@ -0,0 +1,193 @@ +From 834e772c8db0c6a275d75315d90aba4ebbb1e249 Mon Sep 17 00:00:00 2001 +From: Stefan Hajnoczi +Date: Mon, 5 Nov 2018 10:35:47 +0000 +Subject: vhost/vsock: fix use-after-free in network stack callers + +From: Stefan Hajnoczi + +commit 834e772c8db0c6a275d75315d90aba4ebbb1e249 upstream. + +If the network stack calls .send_pkt()/.cancel_pkt() during .release(), +a struct vhost_vsock use-after-free is possible. This occurs because +.release() does not wait for other CPUs to stop using struct +vhost_vsock. + +Switch to an RCU-enabled hashtable (indexed by guest CID) so that +.release() can wait for other CPUs by calling synchronize_rcu(). This +also eliminates vhost_vsock_lock acquisition in the data path so it +could have a positive effect on performance. + +This is CVE-2018-14625 "kernel: use-after-free Read in vhost_transport_send_pkt". + +Cc: stable@vger.kernel.org +Reported-and-tested-by: syzbot+bd391451452fb0b93039@syzkaller.appspotmail.com +Reported-by: syzbot+e3e074963495f92a89ed@syzkaller.appspotmail.com +Reported-by: syzbot+d5a0a170c5069658b141@syzkaller.appspotmail.com +Signed-off-by: Stefan Hajnoczi +Signed-off-by: Michael S. Tsirkin +Acked-by: Jason Wang +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/vhost/vsock.c | 57 ++++++++++++++++++++++++++++---------------------- + 1 file changed, 33 insertions(+), 24 deletions(-) + +--- a/drivers/vhost/vsock.c ++++ b/drivers/vhost/vsock.c +@@ -15,6 +15,7 @@ + #include + #include + #include ++#include + + #include + #include "vhost.h" +@@ -27,14 +28,14 @@ enum { + + /* Used to track all the vhost_vsock instances on the system. */ + static DEFINE_SPINLOCK(vhost_vsock_lock); +-static LIST_HEAD(vhost_vsock_list); ++static DEFINE_READ_MOSTLY_HASHTABLE(vhost_vsock_hash, 8); + + struct vhost_vsock { + struct vhost_dev dev; + struct vhost_virtqueue vqs[2]; + +- /* Link to global vhost_vsock_list, protected by vhost_vsock_lock */ +- struct list_head list; ++ /* Link to global vhost_vsock_hash, writes use vhost_vsock_lock */ ++ struct hlist_node hash; + + struct vhost_work send_pkt_work; + spinlock_t send_pkt_list_lock; +@@ -50,11 +51,14 @@ static u32 vhost_transport_get_local_cid + return VHOST_VSOCK_DEFAULT_HOST_CID; + } + +-static struct vhost_vsock *__vhost_vsock_get(u32 guest_cid) ++/* Callers that dereference the return value must hold vhost_vsock_lock or the ++ * RCU read lock. ++ */ ++static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) + { + struct vhost_vsock *vsock; + +- list_for_each_entry(vsock, &vhost_vsock_list, list) { ++ hash_for_each_possible_rcu(vhost_vsock_hash, vsock, hash, guest_cid) { + u32 other_cid = vsock->guest_cid; + + /* Skip instances that have no CID yet */ +@@ -69,17 +73,6 @@ static struct vhost_vsock *__vhost_vsock + return NULL; + } + +-static struct vhost_vsock *vhost_vsock_get(u32 guest_cid) +-{ +- struct vhost_vsock *vsock; +- +- spin_lock_bh(&vhost_vsock_lock); +- vsock = __vhost_vsock_get(guest_cid); +- spin_unlock_bh(&vhost_vsock_lock); +- +- return vsock; +-} +- + static void + vhost_transport_do_send_pkt(struct vhost_vsock *vsock, + struct vhost_virtqueue *vq) +@@ -210,9 +203,12 @@ vhost_transport_send_pkt(struct virtio_v + struct vhost_vsock *vsock; + int len = pkt->len; + ++ rcu_read_lock(); ++ + /* Find the vhost_vsock according to guest context id */ + vsock = vhost_vsock_get(le64_to_cpu(pkt->hdr.dst_cid)); + if (!vsock) { ++ rcu_read_unlock(); + virtio_transport_free_pkt(pkt); + return -ENODEV; + } +@@ -225,6 +221,8 @@ vhost_transport_send_pkt(struct virtio_v + spin_unlock_bh(&vsock->send_pkt_list_lock); + + vhost_work_queue(&vsock->dev, &vsock->send_pkt_work); ++ ++ rcu_read_unlock(); + return len; + } + +@@ -234,12 +232,15 @@ vhost_transport_cancel_pkt(struct vsock_ + struct vhost_vsock *vsock; + struct virtio_vsock_pkt *pkt, *n; + int cnt = 0; ++ int ret = -ENODEV; + LIST_HEAD(freeme); + ++ rcu_read_lock(); ++ + /* Find the vhost_vsock according to guest context id */ + vsock = vhost_vsock_get(vsk->remote_addr.svm_cid); + if (!vsock) +- return -ENODEV; ++ goto out; + + spin_lock_bh(&vsock->send_pkt_list_lock); + list_for_each_entry_safe(pkt, n, &vsock->send_pkt_list, list) { +@@ -265,7 +266,10 @@ vhost_transport_cancel_pkt(struct vsock_ + vhost_poll_queue(&tx_vq->poll); + } + +- return 0; ++ ret = 0; ++out: ++ rcu_read_unlock(); ++ return ret; + } + + static struct virtio_vsock_pkt * +@@ -531,10 +535,6 @@ static int vhost_vsock_dev_open(struct i + spin_lock_init(&vsock->send_pkt_list_lock); + INIT_LIST_HEAD(&vsock->send_pkt_list); + vhost_work_init(&vsock->send_pkt_work, vhost_transport_send_pkt_work); +- +- spin_lock_bh(&vhost_vsock_lock); +- list_add_tail(&vsock->list, &vhost_vsock_list); +- spin_unlock_bh(&vhost_vsock_lock); + return 0; + + out: +@@ -575,9 +575,13 @@ static int vhost_vsock_dev_release(struc + struct vhost_vsock *vsock = file->private_data; + + spin_lock_bh(&vhost_vsock_lock); +- list_del(&vsock->list); ++ if (vsock->guest_cid) ++ hash_del_rcu(&vsock->hash); + spin_unlock_bh(&vhost_vsock_lock); + ++ /* Wait for other CPUs to finish using vsock */ ++ synchronize_rcu(); ++ + /* Iterating over all connections for all CIDs to find orphans is + * inefficient. Room for improvement here. */ + vsock_for_each_connected_socket(vhost_vsock_reset_orphans); +@@ -618,12 +622,17 @@ static int vhost_vsock_set_cid(struct vh + + /* Refuse if CID is already in use */ + spin_lock_bh(&vhost_vsock_lock); +- other = __vhost_vsock_get(guest_cid); ++ other = vhost_vsock_get(guest_cid); + if (other && other != vsock) { + spin_unlock_bh(&vhost_vsock_lock); + return -EADDRINUSE; + } ++ ++ if (vsock->guest_cid) ++ hash_del_rcu(&vsock->hash); ++ + vsock->guest_cid = guest_cid; ++ hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid); + spin_unlock_bh(&vhost_vsock_lock); + + return 0; diff --git a/queue-4.14/virtio-s390-avoid-race-on-vcdev-config.patch b/queue-4.14/virtio-s390-avoid-race-on-vcdev-config.patch new file mode 100644 index 00000000000..b88619089e0 --- /dev/null +++ b/queue-4.14/virtio-s390-avoid-race-on-vcdev-config.patch @@ -0,0 +1,74 @@ +From 2448a299ec416a80f699940a86f4a6d9a4f643b1 Mon Sep 17 00:00:00 2001 +From: Halil Pasic +Date: Wed, 26 Sep 2018 18:48:29 +0200 +Subject: virtio/s390: avoid race on vcdev->config + +From: Halil Pasic + +commit 2448a299ec416a80f699940a86f4a6d9a4f643b1 upstream. + +Currently we have a race on vcdev->config in virtio_ccw_get_config() and +in virtio_ccw_set_config(). + +This normally does not cause problems, as these are usually infrequent +operations. However, for some devices writing to/reading from the config +space can be triggered through sysfs attributes. For these, userspace can +force the race by increasing the frequency. + +Signed-off-by: Halil Pasic +Cc: stable@vger.kernel.org +Message-Id: <20180925121309.58524-2-pasic@linux.ibm.com> +Signed-off-by: Cornelia Huck +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/s390/virtio/virtio_ccw.c | 10 ++++++++-- + 1 file changed, 8 insertions(+), 2 deletions(-) + +--- a/drivers/s390/virtio/virtio_ccw.c ++++ b/drivers/s390/virtio/virtio_ccw.c +@@ -831,6 +831,7 @@ static void virtio_ccw_get_config(struct + int ret; + struct ccw1 *ccw; + void *config_area; ++ unsigned long flags; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) +@@ -849,11 +850,13 @@ static void virtio_ccw_get_config(struct + if (ret) + goto out_free; + ++ spin_lock_irqsave(&vcdev->lock, flags); + memcpy(vcdev->config, config_area, offset + len); +- if (buf) +- memcpy(buf, &vcdev->config[offset], len); + if (vcdev->config_ready < offset + len) + vcdev->config_ready = offset + len; ++ spin_unlock_irqrestore(&vcdev->lock, flags); ++ if (buf) ++ memcpy(buf, config_area + offset, len); + + out_free: + kfree(config_area); +@@ -867,6 +870,7 @@ static void virtio_ccw_set_config(struct + struct virtio_ccw_device *vcdev = to_vc_device(vdev); + struct ccw1 *ccw; + void *config_area; ++ unsigned long flags; + + ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL); + if (!ccw) +@@ -879,9 +883,11 @@ static void virtio_ccw_set_config(struct + /* Make sure we don't overwrite fields. */ + if (vcdev->config_ready < offset) + virtio_ccw_get_config(vdev, 0, NULL, offset); ++ spin_lock_irqsave(&vcdev->lock, flags); + memcpy(&vcdev->config[offset], buf, len); + /* Write the config area to the host. */ + memcpy(config_area, vcdev->config, sizeof(vcdev->config)); ++ spin_unlock_irqrestore(&vcdev->lock, flags); + ccw->cmd_code = CCW_CMD_WRITE_CONF; + ccw->flags = 0; + ccw->count = offset + len; diff --git a/queue-4.14/virtio-s390-fix-race-in-ccw_io_helper.patch b/queue-4.14/virtio-s390-fix-race-in-ccw_io_helper.patch new file mode 100644 index 00000000000..e27c1ccb7bc --- /dev/null +++ b/queue-4.14/virtio-s390-fix-race-in-ccw_io_helper.patch @@ -0,0 +1,79 @@ +From 78b1a52e05c9db11d293342e8d6d8a230a04b4e7 Mon Sep 17 00:00:00 2001 +From: Halil Pasic +Date: Wed, 26 Sep 2018 18:48:30 +0200 +Subject: virtio/s390: fix race in ccw_io_helper() + +From: Halil Pasic + +commit 78b1a52e05c9db11d293342e8d6d8a230a04b4e7 upstream. + +While ccw_io_helper() seems like intended to be exclusive in a sense that +it is supposed to facilitate I/O for at most one thread at any given +time, there is actually nothing ensuring that threads won't pile up at +vcdev->wait_q. If they do, all threads get woken up and see the status +that belongs to some other request than their own. This can lead to bugs. +For an example see: +https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1788432 + +This race normally does not cause any problems. The operations provided +by struct virtio_config_ops are usually invoked in a well defined +sequence, normally don't fail, and are normally used quite infrequent +too. + +Yet, if some of the these operations are directly triggered via sysfs +attributes, like in the case described by the referenced bug, userspace +is given an opportunity to force races by increasing the frequency of the +given operations. + +Let us fix the problem by ensuring, that for each device, we finish +processing the previous request before starting with a new one. + +Signed-off-by: Halil Pasic +Reported-by: Colin Ian King +Cc: stable@vger.kernel.org +Message-Id: <20180925121309.58524-3-pasic@linux.ibm.com> +Signed-off-by: Cornelia Huck +Signed-off-by: Michael S. Tsirkin +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/s390/virtio/virtio_ccw.c | 7 ++++++- + 1 file changed, 6 insertions(+), 1 deletion(-) + +--- a/drivers/s390/virtio/virtio_ccw.c ++++ b/drivers/s390/virtio/virtio_ccw.c +@@ -59,6 +59,7 @@ struct virtio_ccw_device { + unsigned int revision; /* Transport revision */ + wait_queue_head_t wait_q; + spinlock_t lock; ++ struct mutex io_lock; /* Serializes I/O requests */ + struct list_head virtqueues; + unsigned long indicators; + unsigned long indicators2; +@@ -299,6 +300,7 @@ static int ccw_io_helper(struct virtio_c + unsigned long flags; + int flag = intparm & VIRTIO_CCW_INTPARM_MASK; + ++ mutex_lock(&vcdev->io_lock); + do { + spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags); + ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0); +@@ -311,7 +313,9 @@ static int ccw_io_helper(struct virtio_c + cpu_relax(); + } while (ret == -EBUSY); + wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0); +- return ret ? ret : vcdev->err; ++ ret = ret ? ret : vcdev->err; ++ mutex_unlock(&vcdev->io_lock); ++ return ret; + } + + static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev, +@@ -1256,6 +1260,7 @@ static int virtio_ccw_online(struct ccw_ + init_waitqueue_head(&vcdev->wait_q); + INIT_LIST_HEAD(&vcdev->virtqueues); + spin_lock_init(&vcdev->lock); ++ mutex_init(&vcdev->io_lock); + + spin_lock_irqsave(get_ccwdev_lock(cdev), flags); + dev_set_drvdata(&cdev->dev, vcdev); diff --git a/queue-4.14/xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch b/queue-4.14/xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch new file mode 100644 index 00000000000..fdb9ebbef27 --- /dev/null +++ b/queue-4.14/xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch @@ -0,0 +1,61 @@ +From 0472bf06c6fd33c1a18aaead4c8f91e5a03d8d7b Mon Sep 17 00:00:00 2001 +From: Mathias Nyman +Date: Wed, 5 Dec 2018 14:22:39 +0200 +Subject: xhci: Prevent U1/U2 link pm states if exit latency is too long + +From: Mathias Nyman + +commit 0472bf06c6fd33c1a18aaead4c8f91e5a03d8d7b upstream. + +Don't allow USB3 U1 or U2 if the latency to wake up from the U-state +reaches the service interval for a periodic endpoint. + +This is according to xhci 1.1 specification section 4.23.5.2 extra note: + +"Software shall ensure that a device is prevented from entering a U-state + where its worst case exit latency approaches the ESIT." + +Allowing too long exit latencies for periodic endpoint confuses xHC +internal scheduling, and new devices may fail to enumerate with a +"Not enough bandwidth for new device state" error from the host. + +Cc: +Signed-off-by: Mathias Nyman +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/usb/host/xhci.c | 16 ++++++++++++++++ + 1 file changed, 16 insertions(+) + +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -4381,6 +4381,14 @@ static u16 xhci_calculate_u1_timeout(str + { + unsigned long long timeout_ns; + ++ /* Prevent U1 if service interval is shorter than U1 exit latency */ ++ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { ++ if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) { ++ dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n"); ++ return USB3_LPM_DISABLED; ++ } ++ } ++ + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc); + else +@@ -4437,6 +4445,14 @@ static u16 xhci_calculate_u2_timeout(str + { + unsigned long long timeout_ns; + ++ /* Prevent U2 if service interval is shorter than U2 exit latency */ ++ if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) { ++ if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) { ++ dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n"); ++ return USB3_LPM_DISABLED; ++ } ++ } ++ + if (xhci->quirks & XHCI_INTEL_HOST) + timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc); + else diff --git a/queue-4.14/xhci-workaround-css-timeout-on-amd-snps-3.0-xhc.patch b/queue-4.14/xhci-workaround-css-timeout-on-amd-snps-3.0-xhc.patch new file mode 100644 index 00000000000..0ae94fd5df9 --- /dev/null +++ b/queue-4.14/xhci-workaround-css-timeout-on-amd-snps-3.0-xhc.patch @@ -0,0 +1,118 @@ +From a7d57abcc8a5bdeb53bbf8e87558e8e0a2c2a29d Mon Sep 17 00:00:00 2001 +From: Sandeep Singh +Date: Wed, 5 Dec 2018 14:22:38 +0200 +Subject: xhci: workaround CSS timeout on AMD SNPS 3.0 xHC + +From: Sandeep Singh + +commit a7d57abcc8a5bdeb53bbf8e87558e8e0a2c2a29d upstream. + +Occasionally AMD SNPS 3.0 xHC does not respond to +CSS when set, also it does not flag anything on SRE and HCE +to point the internal xHC errors on USBSTS register. This stalls +the entire system wide suspend and there is no point in stalling +just because of xHC CSS is not responding. + +To work around this problem, if the xHC does not flag +anything on SRE and HCE, we can skip the CSS +timeout and allow the system to continue the suspend. Once the +system resume happens we can internally reset the controller +using XHCI_RESET_ON_RESUME quirk + +Signed-off-by: Shyam Sundar S K +Signed-off-by: Sandeep Singh +cc: Nehal Shah +Cc: +Tested-by: Kai-Heng Feng +Signed-off-by: Mathias Nyman +Signed-off-by: Greg Kroah-Hartman + +--- + drivers/usb/host/xhci-pci.c | 4 ++++ + drivers/usb/host/xhci.c | 26 ++++++++++++++++++++++---- + drivers/usb/host/xhci.h | 3 +++ + 3 files changed, 29 insertions(+), 4 deletions(-) + +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -144,6 +144,10 @@ static void xhci_pci_quirks(struct devic + pdev->device == 0x43bb)) + xhci->quirks |= XHCI_SUSPEND_DELAY; + ++ if (pdev->vendor == PCI_VENDOR_ID_AMD && ++ (pdev->device == 0x15e0 || pdev->device == 0x15e1)) ++ xhci->quirks |= XHCI_SNPS_BROKEN_SUSPEND; ++ + if (pdev->vendor == PCI_VENDOR_ID_AMD) + xhci->quirks |= XHCI_TRUST_TX_LENGTH; + +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -918,6 +918,7 @@ int xhci_suspend(struct xhci_hcd *xhci, + unsigned int delay = XHCI_MAX_HALT_USEC; + struct usb_hcd *hcd = xhci_to_hcd(xhci); + u32 command; ++ u32 res; + + if (!hcd->state) + return 0; +@@ -969,11 +970,28 @@ int xhci_suspend(struct xhci_hcd *xhci, + command = readl(&xhci->op_regs->command); + command |= CMD_CSS; + writel(command, &xhci->op_regs->command); ++ xhci->broken_suspend = 0; + if (xhci_handshake(&xhci->op_regs->status, + STS_SAVE, 0, 10 * 1000)) { +- xhci_warn(xhci, "WARN: xHC save state timeout\n"); +- spin_unlock_irq(&xhci->lock); +- return -ETIMEDOUT; ++ /* ++ * AMD SNPS xHC 3.0 occasionally does not clear the ++ * SSS bit of USBSTS and when driver tries to poll ++ * to see if the xHC clears BIT(8) which never happens ++ * and driver assumes that controller is not responding ++ * and times out. To workaround this, its good to check ++ * if SRE and HCE bits are not set (as per xhci ++ * Section 5.4.2) and bypass the timeout. ++ */ ++ res = readl(&xhci->op_regs->status); ++ if ((xhci->quirks & XHCI_SNPS_BROKEN_SUSPEND) && ++ (((res & STS_SRE) == 0) && ++ ((res & STS_HCE) == 0))) { ++ xhci->broken_suspend = 1; ++ } else { ++ xhci_warn(xhci, "WARN: xHC save state timeout\n"); ++ spin_unlock_irq(&xhci->lock); ++ return -ETIMEDOUT; ++ } + } + spin_unlock_irq(&xhci->lock); + +@@ -1026,7 +1044,7 @@ int xhci_resume(struct xhci_hcd *xhci, b + set_bit(HCD_FLAG_HW_ACCESSIBLE, &xhci->shared_hcd->flags); + + spin_lock_irq(&xhci->lock); +- if (xhci->quirks & XHCI_RESET_ON_RESUME) ++ if ((xhci->quirks & XHCI_RESET_ON_RESUME) || xhci->broken_suspend) + hibernated = true; + + if (!hibernated) { +--- a/drivers/usb/host/xhci.h ++++ b/drivers/usb/host/xhci.h +@@ -1839,6 +1839,7 @@ struct xhci_hcd { + #define XHCI_SUSPEND_DELAY BIT_ULL(30) + #define XHCI_INTEL_USB_ROLE_SW BIT_ULL(31) + #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) ++#define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) + + unsigned int num_active_eps; + unsigned int limit_active_eps; +@@ -1870,6 +1871,8 @@ struct xhci_hcd { + + /* platform-specific data -- must come last */ + unsigned long priv[0] __aligned(sizeof(s64)); ++ /* Broken Suspend flag for SNPS Suspend resume issue */ ++ u8 broken_suspend; + }; + + /* Platform specific overrides to generic XHCI hc_driver ops */