]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.4-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 10 Dec 2018 14:28:41 +0000 (15:28 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 10 Dec 2018 14:28:41 +0000 (15:28 +0100)
added patches:
dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch
sunrpc-fix-leak-of-krb5p-encode-pages.patch
virtio-s390-avoid-race-on-vcdev-config.patch
virtio-s390-fix-race-in-ccw_io_helper.patch
xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch

queue-4.4/dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch [new file with mode: 0644]
queue-4.4/series
queue-4.4/sunrpc-fix-leak-of-krb5p-encode-pages.patch [new file with mode: 0644]
queue-4.4/virtio-s390-avoid-race-on-vcdev-config.patch [new file with mode: 0644]
queue-4.4/virtio-s390-fix-race-in-ccw_io_helper.patch [new file with mode: 0644]
queue-4.4/xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch [new file with mode: 0644]

diff --git a/queue-4.4/dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch b/queue-4.4/dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch
new file mode 100644 (file)
index 0000000..dccb960
--- /dev/null
@@ -0,0 +1,62 @@
+From 59861547ec9a9736e7882f6fb0c096a720ff811a Mon Sep 17 00:00:00 2001
+From: Bin Liu <b-liu@ti.com>
+Date: Mon, 12 Nov 2018 09:43:22 -0600
+Subject: dmaengine: cppi41: delete channel from pending list when stop channel
+
+From: Bin Liu <b-liu@ti.com>
+
+commit 59861547ec9a9736e7882f6fb0c096a720ff811a upstream.
+
+The driver defines three states for a cppi channel.
+- idle: .chan_busy == 0 && not in .pending list
+- pending: .chan_busy == 0 && in .pending list
+- busy: .chan_busy == 1 && not in .pending list
+
+There are cases in which the cppi channel could be in the pending state
+when cppi41_dma_issue_pending() is called after cppi41_runtime_suspend()
+is called.
+
+cppi41_stop_chan() has a bug for these cases to set channels to idle state.
+It only checks the .chan_busy flag, but not the .pending list, then later
+when cppi41_runtime_resume() is called the channels in .pending list will
+be transitioned to busy state.
+
+Removing channels from the .pending list solves the problem.
+
+Fixes: 975faaeb9985 ("dma: cppi41: start tear down only if channel is busy")
+Cc: stable@vger.kernel.org # v3.15+
+Signed-off-by: Bin Liu <b-liu@ti.com>
+Reviewed-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Vinod Koul <vkoul@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/dma/cppi41.c |   16 +++++++++++++++-
+ 1 file changed, 15 insertions(+), 1 deletion(-)
+
+--- a/drivers/dma/cppi41.c
++++ b/drivers/dma/cppi41.c
+@@ -628,8 +628,22 @@ static int cppi41_stop_chan(struct dma_c
+       desc_phys = lower_32_bits(c->desc_phys);
+       desc_num = (desc_phys - cdd->descs_phys) / sizeof(struct cppi41_desc);
+-      if (!cdd->chan_busy[desc_num])
++      if (!cdd->chan_busy[desc_num]) {
++              struct cppi41_channel *cc, *_ct;
++
++              /*
++               * channels might still be in the pendling list if
++               * cppi41_dma_issue_pending() is called after
++               * cppi41_runtime_suspend() is called
++               */
++              list_for_each_entry_safe(cc, _ct, &cdd->pending, node) {
++                      if (cc != c)
++                              continue;
++                      list_del(&cc->node);
++                      break;
++              }
+               return 0;
++      }
+       ret = cppi41_tear_down_chan(c);
+       if (ret)
index fb1d79593e7bee2848981b8cbfb67bd8f6e2bfed..91bd78ef9407c48ef58ca955adb8398d08f97654 100644 (file)
@@ -75,3 +75,8 @@ alsa-hda-add-support-for-amd-stoney-ridge.patch
 alsa-pcm-fix-starvation-on-down_write_nonblock.patch
 alsa-pcm-call-snd_pcm_unlink-conditionally-at-closing.patch
 alsa-pcm-fix-interval-evaluation-with-openmin-max.patch
+virtio-s390-avoid-race-on-vcdev-config.patch
+virtio-s390-fix-race-in-ccw_io_helper.patch
+sunrpc-fix-leak-of-krb5p-encode-pages.patch
+dmaengine-cppi41-delete-channel-from-pending-list-when-stop-channel.patch
+xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch
diff --git a/queue-4.4/sunrpc-fix-leak-of-krb5p-encode-pages.patch b/queue-4.4/sunrpc-fix-leak-of-krb5p-encode-pages.patch
new file mode 100644 (file)
index 0000000..159f02c
--- /dev/null
@@ -0,0 +1,42 @@
+From 8dae5398ab1ac107b1517e8195ed043d5f422bd0 Mon Sep 17 00:00:00 2001
+From: Chuck Lever <chuck.lever@oracle.com>
+Date: Fri, 30 Nov 2018 15:39:57 -0500
+Subject: SUNRPC: Fix leak of krb5p encode pages
+
+From: Chuck Lever <chuck.lever@oracle.com>
+
+commit 8dae5398ab1ac107b1517e8195ed043d5f422bd0 upstream.
+
+call_encode can be invoked more than once per RPC call. Ensure that
+each call to gss_wrap_req_priv does not overwrite pointers to
+previously allocated memory.
+
+Signed-off-by: Chuck Lever <chuck.lever@oracle.com>
+Cc: stable@kernel.org
+Signed-off-by: Trond Myklebust <trond.myklebust@hammerspace.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/sunrpc/auth_gss/auth_gss.c |    4 ++++
+ 1 file changed, 4 insertions(+)
+
+--- a/net/sunrpc/auth_gss/auth_gss.c
++++ b/net/sunrpc/auth_gss/auth_gss.c
+@@ -1722,6 +1722,7 @@ priv_release_snd_buf(struct rpc_rqst *rq
+       for (i=0; i < rqstp->rq_enc_pages_num; i++)
+               __free_page(rqstp->rq_enc_pages[i]);
+       kfree(rqstp->rq_enc_pages);
++      rqstp->rq_release_snd_buf = NULL;
+ }
+ static int
+@@ -1730,6 +1731,9 @@ alloc_enc_pages(struct rpc_rqst *rqstp)
+       struct xdr_buf *snd_buf = &rqstp->rq_snd_buf;
+       int first, last, i;
++      if (rqstp->rq_release_snd_buf)
++              rqstp->rq_release_snd_buf(rqstp);
++
+       if (snd_buf->page_len == 0) {
+               rqstp->rq_enc_pages_num = 0;
+               return 0;
diff --git a/queue-4.4/virtio-s390-avoid-race-on-vcdev-config.patch b/queue-4.4/virtio-s390-avoid-race-on-vcdev-config.patch
new file mode 100644 (file)
index 0000000..1acd0ac
--- /dev/null
@@ -0,0 +1,74 @@
+From 2448a299ec416a80f699940a86f4a6d9a4f643b1 Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Wed, 26 Sep 2018 18:48:29 +0200
+Subject: virtio/s390: avoid race on vcdev->config
+
+From: Halil Pasic <pasic@linux.ibm.com>
+
+commit 2448a299ec416a80f699940a86f4a6d9a4f643b1 upstream.
+
+Currently we have a race on vcdev->config in virtio_ccw_get_config() and
+in virtio_ccw_set_config().
+
+This normally does not cause problems, as these are usually infrequent
+operations. However, for some devices writing to/reading from the config
+space can be triggered through sysfs attributes. For these, userspace can
+force the race by increasing the frequency.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Cc: stable@vger.kernel.org
+Message-Id: <20180925121309.58524-2-pasic@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/virtio/virtio_ccw.c |   10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -833,6 +833,7 @@ static void virtio_ccw_get_config(struct
+       int ret;
+       struct ccw1 *ccw;
+       void *config_area;
++      unsigned long flags;
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+@@ -851,11 +852,13 @@ static void virtio_ccw_get_config(struct
+       if (ret)
+               goto out_free;
++      spin_lock_irqsave(&vcdev->lock, flags);
+       memcpy(vcdev->config, config_area, offset + len);
+-      if (buf)
+-              memcpy(buf, &vcdev->config[offset], len);
+       if (vcdev->config_ready < offset + len)
+               vcdev->config_ready = offset + len;
++      spin_unlock_irqrestore(&vcdev->lock, flags);
++      if (buf)
++              memcpy(buf, config_area + offset, len);
+ out_free:
+       kfree(config_area);
+@@ -869,6 +872,7 @@ static void virtio_ccw_set_config(struct
+       struct virtio_ccw_device *vcdev = to_vc_device(vdev);
+       struct ccw1 *ccw;
+       void *config_area;
++      unsigned long flags;
+       ccw = kzalloc(sizeof(*ccw), GFP_DMA | GFP_KERNEL);
+       if (!ccw)
+@@ -881,9 +885,11 @@ static void virtio_ccw_set_config(struct
+       /* Make sure we don't overwrite fields. */
+       if (vcdev->config_ready < offset)
+               virtio_ccw_get_config(vdev, 0, NULL, offset);
++      spin_lock_irqsave(&vcdev->lock, flags);
+       memcpy(&vcdev->config[offset], buf, len);
+       /* Write the config area to the host. */
+       memcpy(config_area, vcdev->config, sizeof(vcdev->config));
++      spin_unlock_irqrestore(&vcdev->lock, flags);
+       ccw->cmd_code = CCW_CMD_WRITE_CONF;
+       ccw->flags = 0;
+       ccw->count = offset + len;
diff --git a/queue-4.4/virtio-s390-fix-race-in-ccw_io_helper.patch b/queue-4.4/virtio-s390-fix-race-in-ccw_io_helper.patch
new file mode 100644 (file)
index 0000000..1b6d1c0
--- /dev/null
@@ -0,0 +1,79 @@
+From 78b1a52e05c9db11d293342e8d6d8a230a04b4e7 Mon Sep 17 00:00:00 2001
+From: Halil Pasic <pasic@linux.ibm.com>
+Date: Wed, 26 Sep 2018 18:48:30 +0200
+Subject: virtio/s390: fix race in ccw_io_helper()
+
+From: Halil Pasic <pasic@linux.ibm.com>
+
+commit 78b1a52e05c9db11d293342e8d6d8a230a04b4e7 upstream.
+
+While ccw_io_helper() seems like intended to be exclusive in a sense that
+it is supposed to facilitate I/O for at most one thread at any given
+time, there is actually nothing ensuring that threads won't pile up at
+vcdev->wait_q. If they do, all threads get woken up and see the status
+that belongs to some other request than their own. This can lead to bugs.
+For an example see:
+https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1788432
+
+This race normally does not cause any problems. The operations provided
+by struct virtio_config_ops are usually invoked in a well defined
+sequence, normally don't fail, and are normally used quite infrequent
+too.
+
+Yet, if some of the these operations are directly triggered via sysfs
+attributes, like in the case described by the referenced bug, userspace
+is given an opportunity to force races by increasing the frequency of the
+given operations.
+
+Let us fix the problem by ensuring, that for each device, we finish
+processing the previous request before starting with a new one.
+
+Signed-off-by: Halil Pasic <pasic@linux.ibm.com>
+Reported-by: Colin Ian King <colin.king@canonical.com>
+Cc: stable@vger.kernel.org
+Message-Id: <20180925121309.58524-3-pasic@linux.ibm.com>
+Signed-off-by: Cornelia Huck <cohuck@redhat.com>
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/s390/virtio/virtio_ccw.c |    7 ++++++-
+ 1 file changed, 6 insertions(+), 1 deletion(-)
+
+--- a/drivers/s390/virtio/virtio_ccw.c
++++ b/drivers/s390/virtio/virtio_ccw.c
+@@ -59,6 +59,7 @@ struct virtio_ccw_device {
+       unsigned int revision; /* Transport revision */
+       wait_queue_head_t wait_q;
+       spinlock_t lock;
++      struct mutex io_lock; /* Serializes I/O requests */
+       struct list_head virtqueues;
+       unsigned long indicators;
+       unsigned long indicators2;
+@@ -307,6 +308,7 @@ static int ccw_io_helper(struct virtio_c
+       unsigned long flags;
+       int flag = intparm & VIRTIO_CCW_INTPARM_MASK;
++      mutex_lock(&vcdev->io_lock);
+       do {
+               spin_lock_irqsave(get_ccwdev_lock(vcdev->cdev), flags);
+               ret = ccw_device_start(vcdev->cdev, ccw, intparm, 0, 0);
+@@ -319,7 +321,9 @@ static int ccw_io_helper(struct virtio_c
+               cpu_relax();
+       } while (ret == -EBUSY);
+       wait_event(vcdev->wait_q, doing_io(vcdev, flag) == 0);
+-      return ret ? ret : vcdev->err;
++      ret = ret ? ret : vcdev->err;
++      mutex_unlock(&vcdev->io_lock);
++      return ret;
+ }
+ static void virtio_ccw_drop_indicator(struct virtio_ccw_device *vcdev,
+@@ -1236,6 +1240,7 @@ static int virtio_ccw_online(struct ccw_
+       init_waitqueue_head(&vcdev->wait_q);
+       INIT_LIST_HEAD(&vcdev->virtqueues);
+       spin_lock_init(&vcdev->lock);
++      mutex_init(&vcdev->io_lock);
+       spin_lock_irqsave(get_ccwdev_lock(cdev), flags);
+       dev_set_drvdata(&cdev->dev, vcdev);
diff --git a/queue-4.4/xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch b/queue-4.4/xhci-prevent-u1-u2-link-pm-states-if-exit-latency-is-too-long.patch
new file mode 100644 (file)
index 0000000..668c1cb
--- /dev/null
@@ -0,0 +1,61 @@
+From 0472bf06c6fd33c1a18aaead4c8f91e5a03d8d7b Mon Sep 17 00:00:00 2001
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+Date: Wed, 5 Dec 2018 14:22:39 +0200
+Subject: xhci: Prevent U1/U2 link pm states if exit latency is too long
+
+From: Mathias Nyman <mathias.nyman@linux.intel.com>
+
+commit 0472bf06c6fd33c1a18aaead4c8f91e5a03d8d7b upstream.
+
+Don't allow USB3 U1 or U2 if the latency to wake up from the U-state
+reaches the service interval for a periodic endpoint.
+
+This is according to xhci 1.1 specification section 4.23.5.2 extra note:
+
+"Software shall ensure that a device is prevented from entering a U-state
+ where its worst case exit latency approaches the ESIT."
+
+Allowing too long exit latencies for periodic endpoint confuses xHC
+internal scheduling, and new devices may fail to enumerate with a
+"Not enough bandwidth for new device state" error from the host.
+
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Mathias Nyman <mathias.nyman@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/host/xhci.c |   16 ++++++++++++++++
+ 1 file changed, 16 insertions(+)
+
+--- a/drivers/usb/host/xhci.c
++++ b/drivers/usb/host/xhci.c
+@@ -4417,6 +4417,14 @@ static u16 xhci_calculate_u1_timeout(str
+ {
+       unsigned long long timeout_ns;
++      /* Prevent U1 if service interval is shorter than U1 exit latency */
++      if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
++              if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
++                      dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
++                      return USB3_LPM_DISABLED;
++              }
++      }
++
+       if (xhci->quirks & XHCI_INTEL_HOST)
+               timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+       else
+@@ -4473,6 +4481,14 @@ static u16 xhci_calculate_u2_timeout(str
+ {
+       unsigned long long timeout_ns;
++      /* Prevent U2 if service interval is shorter than U2 exit latency */
++      if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
++              if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
++                      dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
++                      return USB3_LPM_DISABLED;
++              }
++      }
++
+       if (xhci->quirks & XHCI_INTEL_HOST)
+               timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+       else