]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
4.14-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Feb 2020 12:25:31 +0000 (13:25 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Feb 2020 12:25:31 +0000 (13:25 +0100)
added patches:
alsa-rawmidi-avoid-bit-fields-for-state-flags.patch
alsa-seq-avoid-concurrent-access-to-queue-flags.patch
alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch
netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch
netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch

queue-4.14/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch [new file with mode: 0644]
queue-4.14/alsa-seq-avoid-concurrent-access-to-queue-flags.patch [new file with mode: 0644]
queue-4.14/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch [new file with mode: 0644]
queue-4.14/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch [new file with mode: 0644]
queue-4.14/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch [new file with mode: 0644]
queue-4.14/series

diff --git a/queue-4.14/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch b/queue-4.14/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch
new file mode 100644 (file)
index 0000000..32d0e00
--- /dev/null
@@ -0,0 +1,43 @@
+From dfa9a5efe8b932a84b3b319250aa3ac60c20f876 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 14 Feb 2020 12:13:16 +0100
+Subject: ALSA: rawmidi: Avoid bit fields for state flags
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit dfa9a5efe8b932a84b3b319250aa3ac60c20f876 upstream.
+
+The rawmidi state flags (opened, append, active_sensing) are stored in
+bit fields that can be potentially racy when concurrently accessed
+without any locks.  Although the current code should be fine, there is
+also no any real benefit by keeping the bitfields for this kind of
+short number of members.
+
+This patch changes those bit fields flags to the simple bool fields.
+There should be no size increase of the snd_rawmidi_substream by this
+change.
+
+Reported-by: syzbot+576cc007eb9f2c968200@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20200214111316.26939-4-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/rawmidi.h |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -92,9 +92,9 @@ struct snd_rawmidi_substream {
+       struct list_head list;          /* list of all substream for given stream */
+       int stream;                     /* direction */
+       int number;                     /* substream number */
+-      unsigned int opened: 1,         /* open flag */
+-                   append: 1,         /* append flag (merge more streams) */
+-                   active_sensing: 1; /* send active sensing when close */
++      bool opened;                    /* open flag */
++      bool append;                    /* append flag (merge more streams) */
++      bool active_sensing;            /* send active sensing when close */
+       int use_count;                  /* use counter (for output) */
+       size_t bytes;
+       struct snd_rawmidi *rmidi;
diff --git a/queue-4.14/alsa-seq-avoid-concurrent-access-to-queue-flags.patch b/queue-4.14/alsa-seq-avoid-concurrent-access-to-queue-flags.patch
new file mode 100644 (file)
index 0000000..21cab97
--- /dev/null
@@ -0,0 +1,96 @@
+From bb51e669fa49feb5904f452b2991b240ef31bc97 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 14 Feb 2020 12:13:14 +0100
+Subject: ALSA: seq: Avoid concurrent access to queue flags
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit bb51e669fa49feb5904f452b2991b240ef31bc97 upstream.
+
+The queue flags are represented in bit fields and the concurrent
+access may result in unexpected results.  Although the current code
+should be mostly OK as it's only reading a field while writing other
+fields as KCSAN reported, it's safer to cover both with a proper
+spinlock protection.
+
+This patch fixes the possible concurrent read by protecting with
+q->owner_lock.  Also the queue owner field is protected as well since
+it's the field to be protected by the lock itself.
+
+Reported-by: syzbot+65c6c92d04304d0a8efc@syzkaller.appspotmail.com
+Reported-by: syzbot+e60ddfa48717579799dd@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20200214111316.26939-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/seq_queue.c |   20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -415,6 +415,7 @@ int snd_seq_queue_check_access(int queue
+ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ {
+       struct snd_seq_queue *q = queueptr(queueid);
++      unsigned long flags;
+       if (q == NULL)
+               return -EINVAL;
+@@ -424,8 +425,10 @@ int snd_seq_queue_set_owner(int queueid,
+               return -EPERM;
+       }
++      spin_lock_irqsave(&q->owner_lock, flags);
+       q->locked = locked ? 1 : 0;
+       q->owner = client;
++      spin_unlock_irqrestore(&q->owner_lock, flags);
+       queue_access_unlock(q);
+       queuefree(q);
+@@ -564,15 +567,17 @@ void snd_seq_queue_client_termination(in
+       unsigned long flags;
+       int i;
+       struct snd_seq_queue *q;
++      bool matched;
+       for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+               if ((q = queueptr(i)) == NULL)
+                       continue;
+               spin_lock_irqsave(&q->owner_lock, flags);
+-              if (q->owner == client)
++              matched = (q->owner == client);
++              if (matched)
+                       q->klocked = 1;
+               spin_unlock_irqrestore(&q->owner_lock, flags);
+-              if (q->owner == client) {
++              if (matched) {
+                       if (q->timer->running)
+                               snd_seq_timer_stop(q->timer);
+                       snd_seq_timer_reset(q->timer);
+@@ -764,6 +769,8 @@ void snd_seq_info_queues_read(struct snd
+       int i, bpm;
+       struct snd_seq_queue *q;
+       struct snd_seq_timer *tmr;
++      bool locked;
++      int owner;
+       for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+               if ((q = queueptr(i)) == NULL)
+@@ -775,9 +782,14 @@ void snd_seq_info_queues_read(struct snd
+               else
+                       bpm = 0;
++              spin_lock_irq(&q->owner_lock);
++              locked = q->locked;
++              owner = q->owner;
++              spin_unlock_irq(&q->owner_lock);
++
+               snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
+-              snd_iprintf(buffer, "owned by client    : %d\n", q->owner);
+-              snd_iprintf(buffer, "lock status        : %s\n", q->locked ? "Locked" : "Free");
++              snd_iprintf(buffer, "owned by client    : %d\n", owner);
++              snd_iprintf(buffer, "lock status        : %s\n", locked ? "Locked" : "Free");
+               snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
+               snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
+               snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
diff --git a/queue-4.14/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch b/queue-4.14/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch
new file mode 100644 (file)
index 0000000..f0c81be
--- /dev/null
@@ -0,0 +1,137 @@
+From dc7497795e014d84699c3b8809ed6df35352dd74 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 14 Feb 2020 12:13:15 +0100
+Subject: ALSA: seq: Fix concurrent access to queue current tick/time
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit dc7497795e014d84699c3b8809ed6df35352dd74 upstream.
+
+snd_seq_check_queue() passes the current tick and time of the given
+queue as a pointer to snd_seq_prioq_cell_out(), but those might be
+updated concurrently by the seq timer update.
+
+Fix it by retrieving the current tick and time via the proper helper
+functions at first, and pass those values to snd_seq_prioq_cell_out()
+later in the loops.
+
+snd_seq_timer_get_cur_time() takes a new argument and adjusts with the
+current system time only when it's requested so; this update isn't
+needed for snd_seq_check_queue(), as it's called either from the
+interrupt handler or right after queuing.
+
+Also, snd_seq_timer_get_cur_tick() is changed to read the value in the
+spinlock for the concurrency, too.
+
+Reported-by: syzbot+fd5e0eaa1a32999173b2@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20200214111316.26939-3-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/seq_clientmgr.c |    4 ++--
+ sound/core/seq/seq_queue.c     |    9 ++++++---
+ sound/core/seq/seq_timer.c     |   13 ++++++++++---
+ sound/core/seq/seq_timer.h     |    3 ++-
+ 4 files changed, 20 insertions(+), 9 deletions(-)
+
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -564,7 +564,7 @@ static int update_timestamp_of_queue(str
+       event->queue = queue;
+       event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
+       if (real_time) {
+-              event->time.time = snd_seq_timer_get_cur_time(q->timer);
++              event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
+               event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
+       } else {
+               event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
+@@ -1639,7 +1639,7 @@ static int snd_seq_ioctl_get_queue_statu
+       tmr = queue->timer;
+       status->events = queue->tickq->cells + queue->timeq->cells;
+-      status->time = snd_seq_timer_get_cur_time(tmr);
++      status->time = snd_seq_timer_get_cur_time(tmr, true);
+       status->tick = snd_seq_timer_get_cur_tick(tmr);
+       status->running = tmr->running;
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -261,6 +261,8 @@ void snd_seq_check_queue(struct snd_seq_
+ {
+       unsigned long flags;
+       struct snd_seq_event_cell *cell;
++      snd_seq_tick_time_t cur_tick;
++      snd_seq_real_time_t cur_time;
+       if (q == NULL)
+               return;
+@@ -277,17 +279,18 @@ void snd_seq_check_queue(struct snd_seq_
+       __again:
+       /* Process tick queue... */
++      cur_tick = snd_seq_timer_get_cur_tick(q->timer);
+       for (;;) {
+-              cell = snd_seq_prioq_cell_out(q->tickq,
+-                                            &q->timer->tick.cur_tick);
++              cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
+               if (!cell)
+                       break;
+               snd_seq_dispatch_event(cell, atomic, hop);
+       }
+       /* Process time queue... */
++      cur_time = snd_seq_timer_get_cur_time(q->timer, false);
+       for (;;) {
+-              cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
++              cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
+               if (!cell)
+                       break;
+               snd_seq_dispatch_event(cell, atomic, hop);
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -436,14 +436,15 @@ int snd_seq_timer_continue(struct snd_se
+ }
+ /* return current 'real' time. use timeofday() to get better granularity. */
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++                                             bool adjust_ktime)
+ {
+       snd_seq_real_time_t cur_time;
+       unsigned long flags;
+       spin_lock_irqsave(&tmr->lock, flags);
+       cur_time = tmr->cur_time;
+-      if (tmr->running) { 
++      if (adjust_ktime && tmr->running) {
+               struct timespec64 tm;
+               ktime_get_ts64(&tm);
+@@ -460,7 +461,13 @@ snd_seq_real_time_t snd_seq_timer_get_cu
+  high PPQ values) */
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
+ {
+-      return tmr->tick.cur_tick;
++      snd_seq_tick_time_t cur_tick;
++      unsigned long flags;
++
++      spin_lock_irqsave(&tmr->lock, flags);
++      cur_tick = tmr->tick.cur_tick;
++      spin_unlock_irqrestore(&tmr->lock, flags);
++      return cur_tick;
+ }
+--- a/sound/core/seq/seq_timer.h
++++ b/sound/core/seq/seq_timer.h
+@@ -135,7 +135,8 @@ int snd_seq_timer_set_ppq(struct snd_seq
+ int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
+ int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
+ int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++                                             bool adjust_ktime);
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
+ extern int seq_default_timer_class;
diff --git a/queue-4.14/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch b/queue-4.14/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch
new file mode 100644 (file)
index 0000000..fe28578
--- /dev/null
@@ -0,0 +1,52 @@
+From 8d0015a7ab76b8b1e89a3e5f5710a6e5103f2dd5 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sun, 2 Feb 2020 20:30:53 -0800
+Subject: netfilter: xt_hashlimit: limit the max size of hashtable
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit 8d0015a7ab76b8b1e89a3e5f5710a6e5103f2dd5 upstream.
+
+The user-specified hashtable size is unbound, this could
+easily lead to an OOM or a hung task as we hold the global
+mutex while allocating and initializing the new hashtable.
+
+Add a max value to cap both cfg->size and cfg->max, as
+suggested by Florian.
+
+Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_hashlimit.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -845,6 +845,8 @@ hashlimit_mt(const struct sk_buff *skb,
+       return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
+ }
++#define HASHLIMIT_MAX_SIZE 1048576
++
+ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+                                    struct xt_hashlimit_htable **hinfo,
+                                    struct hashlimit_cfg3 *cfg,
+@@ -855,6 +857,14 @@ static int hashlimit_mt_check_common(con
+       if (cfg->gc_interval == 0 || cfg->expire == 0)
+               return -EINVAL;
++      if (cfg->size > HASHLIMIT_MAX_SIZE) {
++              cfg->size = HASHLIMIT_MAX_SIZE;
++              pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
++      }
++      if (cfg->max > HASHLIMIT_MAX_SIZE) {
++              cfg->max = HASHLIMIT_MAX_SIZE;
++              pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
++      }
+       if (par->family == NFPROTO_IPV4) {
+               if (cfg->srcmask > 32 || cfg->dstmask > 32)
+                       return -EINVAL;
diff --git a/queue-4.14/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch b/queue-4.14/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch
new file mode 100644 (file)
index 0000000..d504920
--- /dev/null
@@ -0,0 +1,78 @@
+From c4a3922d2d20c710f827d3a115ee338e8d0467df Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sun, 2 Feb 2020 20:30:52 -0800
+Subject: netfilter: xt_hashlimit: reduce hashlimit_mutex scope for htable_put()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit c4a3922d2d20c710f827d3a115ee338e8d0467df upstream.
+
+It is unnecessary to hold hashlimit_mutex for htable_destroy()
+as it is already removed from the global hashtable and its
+refcount is already zero.
+
+Also, switch hinfo->use to refcount_t so that we don't have
+to hold the mutex until it reaches zero in htable_put().
+
+Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_hashlimit.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -36,6 +36,7 @@
+ #include <linux/netfilter/xt_hashlimit.h>
+ #include <linux/mutex.h>
+ #include <linux/kernel.h>
++#include <linux/refcount.h>
+ MODULE_LICENSE("GPL");
+ MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
+@@ -108,7 +109,7 @@ struct dsthash_ent {
+ struct xt_hashlimit_htable {
+       struct hlist_node node;         /* global list of all htables */
+-      int use;
++      refcount_t use;
+       u_int8_t family;
+       bool rnd_initialized;
+@@ -309,7 +310,7 @@ static int htable_create(struct net *net
+       for (i = 0; i < hinfo->cfg.size; i++)
+               INIT_HLIST_HEAD(&hinfo->hash[i]);
+-      hinfo->use = 1;
++      refcount_set(&hinfo->use, 1);
+       hinfo->count = 0;
+       hinfo->family = family;
+       hinfo->rnd_initialized = false;
+@@ -428,7 +429,7 @@ static struct xt_hashlimit_htable *htabl
+       hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
+               if (!strcmp(name, hinfo->name) &&
+                   hinfo->family == family) {
+-                      hinfo->use++;
++                      refcount_inc(&hinfo->use);
+                       return hinfo;
+               }
+       }
+@@ -437,12 +438,11 @@ static struct xt_hashlimit_htable *htabl
+ static void htable_put(struct xt_hashlimit_htable *hinfo)
+ {
+-      mutex_lock(&hashlimit_mutex);
+-      if (--hinfo->use == 0) {
++      if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
+               hlist_del(&hinfo->node);
++              mutex_unlock(&hashlimit_mutex);
+               htable_destroy(hinfo);
+       }
+-      mutex_unlock(&hashlimit_mutex);
+ }
+ /* The algorithm used is the Simple Token Bucket Filter (TBF)
index f7d46e8731a99164131b218b493ab33c35ba94a7..11b5b2dd744f6c811a7e14ae7a5aea3af206c5c6 100644 (file)
@@ -228,3 +228,8 @@ staging-greybus-use-after-free-in-gb_audio_manager_remove_all.patch
 ecryptfs-replace-bug_on-with-error-handling-code.patch
 iommu-vt-d-fix-compile-warning-from-intel-svm.h.patch
 genirq-proc-reject-invalid-affinity-masks-again.patch
+alsa-rawmidi-avoid-bit-fields-for-state-flags.patch
+alsa-seq-avoid-concurrent-access-to-queue-flags.patch
+alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch
+netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch
+netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch