]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
3.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Nov 2018 17:35:32 +0000 (09:35 -0800)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 8 Nov 2018 17:35:32 +0000 (09:35 -0800)
added patches:
cachefiles-fix-the-race-between-cachefiles_bury_object-and-rmdir-2.patch
cdc-acm-correct-counting-of-uart-states-in-serial-state-notification.patch
sched-fair-fix-throttle_list-starvation-with-low-cfs-quota.patch
usb-fix-the-usbfs-flag-sanitization-for-control-transfers.patch

queue-3.18/cachefiles-fix-the-race-between-cachefiles_bury_object-and-rmdir-2.patch [new file with mode: 0644]
queue-3.18/cdc-acm-correct-counting-of-uart-states-in-serial-state-notification.patch [new file with mode: 0644]
queue-3.18/sched-fair-fix-throttle_list-starvation-with-low-cfs-quota.patch [new file with mode: 0644]
queue-3.18/series
queue-3.18/usb-fix-the-usbfs-flag-sanitization-for-control-transfers.patch [new file with mode: 0644]

diff --git a/queue-3.18/cachefiles-fix-the-race-between-cachefiles_bury_object-and-rmdir-2.patch b/queue-3.18/cachefiles-fix-the-race-between-cachefiles_bury_object-and-rmdir-2.patch
new file mode 100644 (file)
index 0000000..d5f0b23
--- /dev/null
@@ -0,0 +1,42 @@
+From 169b803397499be85bdd1e3d07d6f5e3d4bd669e Mon Sep 17 00:00:00 2001
+From: Al Viro <viro@zeniv.linux.org.uk>
+Date: Wed, 17 Oct 2018 15:23:26 +0100
+Subject: cachefiles: fix the race between cachefiles_bury_object() and rmdir(2)
+
+From: Al Viro <viro@zeniv.linux.org.uk>
+
+commit 169b803397499be85bdd1e3d07d6f5e3d4bd669e upstream.
+
+the victim might've been rmdir'ed just before the lock_rename();
+unlike the normal callers, we do not look the source up after the
+parents are locked - we know it beforehand and just recheck that it's
+still the child of what used to be its parent.  Unfortunately,
+the check is too weak - we don't spot a dead directory since its
+->d_parent is unchanged, dentry is positive, etc.  So we sail all
+the way to ->rename(), with hosting filesystems _not_ expecting
+to be asked renaming an rmdir'ed subdirectory.
+
+The fix is easy, fortunately - the lock on parent is sufficient for
+making IS_DEADDIR() on child safe.
+
+Cc: stable@vger.kernel.org
+Fixes: 9ae326a69004 (CacheFiles: A cache that backs onto a mounted filesystem)
+Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
+Signed-off-by: David Howells <dhowells@redhat.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ fs/cachefiles/namei.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/cachefiles/namei.c
++++ b/fs/cachefiles/namei.c
+@@ -317,7 +317,7 @@ try_again:
+       trap = lock_rename(cache->graveyard, dir);
+       /* do some checks before getting the grave dentry */
+-      if (rep->d_parent != dir) {
++      if (rep->d_parent != dir || IS_DEADDIR(d_inode(rep))) {
+               /* the entry was probably culled when we dropped the parent dir
+                * lock */
+               unlock_rename(cache->graveyard, dir);
diff --git a/queue-3.18/cdc-acm-correct-counting-of-uart-states-in-serial-state-notification.patch b/queue-3.18/cdc-acm-correct-counting-of-uart-states-in-serial-state-notification.patch
new file mode 100644 (file)
index 0000000..e187946
--- /dev/null
@@ -0,0 +1,55 @@
+From f976d0e5747ca65ccd0fb2a4118b193d70aa1836 Mon Sep 17 00:00:00 2001
+From: Tobias Herzog <t-herzog@gmx.de>
+Date: Sat, 22 Sep 2018 22:11:11 +0200
+Subject: cdc-acm: correct counting of UART states in serial state notification
+
+From: Tobias Herzog <t-herzog@gmx.de>
+
+commit f976d0e5747ca65ccd0fb2a4118b193d70aa1836 upstream.
+
+The usb standard ("Universal Serial Bus Class Definitions for Communication
+Devices") distiguishes between "consistent signals" (DSR, DCD), and
+"irregular signals" (break, ring, parity error, framing error, overrun).
+The bits of "irregular signals" are set, if this error/event occurred on
+the device side and are immeadeatly unset, if the serial state notification
+was sent.
+Like other drivers of real serial ports do, just the occurence of those
+events should be counted in serial_icounter_struct (but no 1->0
+transitions).
+
+Signed-off-by: Tobias Herzog <t-herzog@gmx.de>
+Acked-by: Oliver Neukum <oneukum@suse.com>
+Cc: stable <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/class/cdc-acm.c |   14 +++++++-------
+ 1 file changed, 7 insertions(+), 7 deletions(-)
+
+--- a/drivers/usb/class/cdc-acm.c
++++ b/drivers/usb/class/cdc-acm.c
+@@ -336,17 +336,17 @@ static void acm_ctrl_irq(struct urb *urb
+               if (difference & ACM_CTRL_DSR)
+                       acm->iocount.dsr++;
+-              if (difference & ACM_CTRL_BRK)
+-                      acm->iocount.brk++;
+-              if (difference & ACM_CTRL_RI)
+-                      acm->iocount.rng++;
+               if (difference & ACM_CTRL_DCD)
+                       acm->iocount.dcd++;
+-              if (difference & ACM_CTRL_FRAMING)
++              if (newctrl & ACM_CTRL_BRK)
++                      acm->iocount.brk++;
++              if (newctrl & ACM_CTRL_RI)
++                      acm->iocount.rng++;
++              if (newctrl & ACM_CTRL_FRAMING)
+                       acm->iocount.frame++;
+-              if (difference & ACM_CTRL_PARITY)
++              if (newctrl & ACM_CTRL_PARITY)
+                       acm->iocount.parity++;
+-              if (difference & ACM_CTRL_OVERRUN)
++              if (newctrl & ACM_CTRL_OVERRUN)
+                       acm->iocount.overrun++;
+               spin_unlock(&acm->read_lock);
diff --git a/queue-3.18/sched-fair-fix-throttle_list-starvation-with-low-cfs-quota.patch b/queue-3.18/sched-fair-fix-throttle_list-starvation-with-low-cfs-quota.patch
new file mode 100644 (file)
index 0000000..33e7e5f
--- /dev/null
@@ -0,0 +1,176 @@
+From baa9be4ffb55876923dc9716abc0a448e510ba30 Mon Sep 17 00:00:00 2001
+From: Phil Auld <pauld@redhat.com>
+Date: Mon, 8 Oct 2018 10:36:40 -0400
+Subject: sched/fair: Fix throttle_list starvation with low CFS quota
+
+From: Phil Auld <pauld@redhat.com>
+
+commit baa9be4ffb55876923dc9716abc0a448e510ba30 upstream.
+
+With a very low cpu.cfs_quota_us setting, such as the minimum of 1000,
+distribute_cfs_runtime may not empty the throttled_list before it runs
+out of runtime to distribute. In that case, due to the change from
+c06f04c7048 to put throttled entries at the head of the list, later entries
+on the list will starve.  Essentially, the same X processes will get pulled
+off the list, given CPU time and then, when expired, get put back on the
+head of the list where distribute_cfs_runtime will give runtime to the same
+set of processes leaving the rest.
+
+Fix the issue by setting a bit in struct cfs_bandwidth when
+distribute_cfs_runtime is running, so that the code in throttle_cfs_rq can
+decide to put the throttled entry on the tail or the head of the list.  The
+bit is set/cleared by the callers of distribute_cfs_runtime while they hold
+cfs_bandwidth->lock.
+
+This is easy to reproduce with a handful of CPU consumers. I use 'crash' on
+the live system. In some cases you can simply look at the throttled list and
+see the later entries are not changing:
+
+  crash> list cfs_rq.throttled_list -H 0xffff90b54f6ade40 -s cfs_rq.runtime_remaining | paste - - | awk '{print $1"  "$4}' | pr -t -n3
+    1     ffff90b56cb2d200  -976050
+    2     ffff90b56cb2cc00  -484925
+    3     ffff90b56cb2bc00  -658814
+    4     ffff90b56cb2ba00  -275365
+    5     ffff90b166a45600  -135138
+    6     ffff90b56cb2da00  -282505
+    7     ffff90b56cb2e000  -148065
+    8     ffff90b56cb2fa00  -872591
+    9     ffff90b56cb2c000  -84687
+   10     ffff90b56cb2f000  -87237
+   11     ffff90b166a40a00  -164582
+
+  crash> list cfs_rq.throttled_list -H 0xffff90b54f6ade40 -s cfs_rq.runtime_remaining | paste - - | awk '{print $1"  "$4}' | pr -t -n3
+    1     ffff90b56cb2d200  -994147
+    2     ffff90b56cb2cc00  -306051
+    3     ffff90b56cb2bc00  -961321
+    4     ffff90b56cb2ba00  -24490
+    5     ffff90b166a45600  -135138
+    6     ffff90b56cb2da00  -282505
+    7     ffff90b56cb2e000  -148065
+    8     ffff90b56cb2fa00  -872591
+    9     ffff90b56cb2c000  -84687
+   10     ffff90b56cb2f000  -87237
+   11     ffff90b166a40a00  -164582
+
+Sometimes it is easier to see by finding a process getting starved and looking
+at the sched_info:
+
+  crash> task ffff8eb765994500 sched_info
+  PID: 7800   TASK: ffff8eb765994500  CPU: 16  COMMAND: "cputest"
+    sched_info = {
+      pcount = 8,
+      run_delay = 697094208,
+      last_arrival = 240260125039,
+      last_queued = 240260327513
+    },
+  crash> task ffff8eb765994500 sched_info
+  PID: 7800   TASK: ffff8eb765994500  CPU: 16  COMMAND: "cputest"
+    sched_info = {
+      pcount = 8,
+      run_delay = 697094208,
+      last_arrival = 240260125039,
+      last_queued = 240260327513
+    },
+
+Signed-off-by: Phil Auld <pauld@redhat.com>
+Reviewed-by: Ben Segall <bsegall@google.com>
+Cc: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Thomas Gleixner <tglx@linutronix.de>
+Cc: stable@vger.kernel.org
+Fixes: c06f04c70489 ("sched: Fix potential near-infinite distribute_cfs_runtime() loop")
+Link: http://lkml.kernel.org/r/20181008143639.GA4019@pauld.bos.csb
+Signed-off-by: Ingo Molnar <mingo@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/sched/fair.c  |   23 ++++++++++++++++++++---
+ kernel/sched/sched.h |    2 ++
+ 2 files changed, 22 insertions(+), 3 deletions(-)
+
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3416,9 +3416,14 @@ static void throttle_cfs_rq(struct cfs_r
+       raw_spin_lock(&cfs_b->lock);
+       /*
+        * Add to the _head_ of the list, so that an already-started
+-       * distribute_cfs_runtime will not see us
++       * distribute_cfs_runtime will not see us. If disribute_cfs_runtime is
++       * not running add to the tail so that later runqueues don't get starved.
+        */
+-      list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
++      if (cfs_b->distribute_running)
++              list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
++      else
++              list_add_tail_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
++
+       if (!cfs_b->timer_active)
+               __start_cfs_bandwidth(cfs_b, false);
+       raw_spin_unlock(&cfs_b->lock);
+@@ -3562,14 +3567,16 @@ static int do_sched_cfs_period_timer(str
+        * in us over-using our runtime if it is all used during this loop, but
+        * only by limited amounts in that extreme case.
+        */
+-      while (throttled && cfs_b->runtime > 0) {
++      while (throttled && cfs_b->runtime > 0 && !cfs_b->distribute_running) {
+               runtime = cfs_b->runtime;
++              cfs_b->distribute_running = 1;
+               raw_spin_unlock(&cfs_b->lock);
+               /* we can't nest cfs_b->lock while distributing bandwidth */
+               runtime = distribute_cfs_runtime(cfs_b, runtime,
+                                                runtime_expires);
+               raw_spin_lock(&cfs_b->lock);
++              cfs_b->distribute_running = 0;
+               throttled = !list_empty(&cfs_b->throttled_cfs_rq);
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
+@@ -3680,6 +3687,11 @@ static void do_sched_cfs_slack_timer(str
+       /* confirm we're still not at a refresh boundary */
+       raw_spin_lock(&cfs_b->lock);
++      if (cfs_b->distribute_running) {
++              raw_spin_unlock(&cfs_b->lock);
++              return;
++      }
++
+       if (runtime_refresh_within(cfs_b, min_bandwidth_expiration)) {
+               raw_spin_unlock(&cfs_b->lock);
+               return;
+@@ -3689,6 +3701,9 @@ static void do_sched_cfs_slack_timer(str
+               runtime = cfs_b->runtime;
+       expires = cfs_b->runtime_expires;
++      if (runtime)
++              cfs_b->distribute_running = 1;
++
+       raw_spin_unlock(&cfs_b->lock);
+       if (!runtime)
+@@ -3699,6 +3714,7 @@ static void do_sched_cfs_slack_timer(str
+       raw_spin_lock(&cfs_b->lock);
+       if (expires == cfs_b->runtime_expires)
+               cfs_b->runtime -= min(runtime, cfs_b->runtime);
++      cfs_b->distribute_running = 0;
+       raw_spin_unlock(&cfs_b->lock);
+ }
+@@ -3790,6 +3806,7 @@ void init_cfs_bandwidth(struct cfs_bandw
+       cfs_b->period_timer.function = sched_cfs_period_timer;
+       hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       cfs_b->slack_timer.function = sched_cfs_slack_timer;
++      cfs_b->distribute_running = 0;
+ }
+ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
+--- a/kernel/sched/sched.h
++++ b/kernel/sched/sched.h
+@@ -202,6 +202,8 @@ struct cfs_bandwidth {
+       /* statistics */
+       int nr_periods, nr_throttled;
+       u64 throttled_time;
++
++      bool distribute_running;
+ #endif
+ };
index 02975af164a8e6a6173c40931aa250bf97fa74d0..68d0cae51047a8ca7b2c14e3a335fa1f8b4a820f 100644 (file)
@@ -139,3 +139,7 @@ sctp-fix-race-on-sctp_id2asoc.patch
 net-drop-skb-on-failure-in-ip_check_defrag.patch
 rtnetlink-disallow-fdb-configuration-for-non-ethernet-device.patch
 net-sched-gred-pass-the-right-attribute-to-gred_change_table_def.patch
+cachefiles-fix-the-race-between-cachefiles_bury_object-and-rmdir-2.patch
+cdc-acm-correct-counting-of-uart-states-in-serial-state-notification.patch
+usb-fix-the-usbfs-flag-sanitization-for-control-transfers.patch
+sched-fair-fix-throttle_list-starvation-with-low-cfs-quota.patch
diff --git a/queue-3.18/usb-fix-the-usbfs-flag-sanitization-for-control-transfers.patch b/queue-3.18/usb-fix-the-usbfs-flag-sanitization-for-control-transfers.patch
new file mode 100644 (file)
index 0000000..15c2f54
--- /dev/null
@@ -0,0 +1,52 @@
+From 665c365a77fbfeabe52694aedf3446d5f2f1ce42 Mon Sep 17 00:00:00 2001
+From: Alan Stern <stern@rowland.harvard.edu>
+Date: Mon, 15 Oct 2018 16:55:04 -0400
+Subject: USB: fix the usbfs flag sanitization for control transfers
+
+From: Alan Stern <stern@rowland.harvard.edu>
+
+commit 665c365a77fbfeabe52694aedf3446d5f2f1ce42 upstream.
+
+Commit 7a68d9fb8510 ("USB: usbdevfs: sanitize flags more") checks the
+transfer flags for URBs submitted from userspace via usbfs.  However,
+the check for whether the USBDEVFS_URB_SHORT_NOT_OK flag should be
+allowed for a control transfer was added in the wrong place, before
+the code has properly determined the direction of the control
+transfer.  (Control transfers are special because for them, the
+direction is set by the bRequestType byte of the Setup packet rather
+than direction bit of the endpoint address.)
+
+This patch moves code which sets up the allow_short flag for control
+transfers down after is_in has been set to the correct value.
+
+Signed-off-by: Alan Stern <stern@rowland.harvard.edu>
+Reported-and-tested-by: syzbot+24a30223a4b609bb802e@syzkaller.appspotmail.com
+Fixes: 7a68d9fb8510 ("USB: usbdevfs: sanitize flags more")
+CC: Oliver Neukum <oneukum@suse.com>
+CC: <stable@vger.kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ drivers/usb/core/devio.c |    4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1326,8 +1326,6 @@ static int proc_do_submiturb(struct usb_
+       u = 0;
+       switch(uurb->type) {
+       case USBDEVFS_URB_TYPE_CONTROL:
+-              if (is_in)
+-                      allow_short = true;
+               if (!usb_endpoint_xfer_control(&ep->desc))
+                       return -EINVAL;
+               /* min 8 byte setup packet */
+@@ -1357,6 +1355,8 @@ static int proc_do_submiturb(struct usb_
+                       is_in = 0;
+                       uurb->endpoint &= ~USB_DIR_IN;
+               }
++              if (is_in)
++                      allow_short = true;
+               snoop(&ps->dev->dev, "control urb: bRequestType=%02x "
+                       "bRequest=%02x wValue=%04x "
+                       "wIndex=%04x wLength=%04x\n",