]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.5-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Feb 2020 12:21:16 +0000 (13:21 +0100)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Thu, 27 Feb 2020 12:21:16 +0000 (13:21 +0100)
added patches:
alsa-rawmidi-avoid-bit-fields-for-state-flags.patch
alsa-seq-avoid-concurrent-access-to-queue-flags.patch
alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch
dma-direct-relax-addressability-checks-in-dma_direct_supported.patch
netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch
netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch
rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch

queue-5.5/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch [new file with mode: 0644]
queue-5.5/alsa-seq-avoid-concurrent-access-to-queue-flags.patch [new file with mode: 0644]
queue-5.5/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch [new file with mode: 0644]
queue-5.5/dma-direct-relax-addressability-checks-in-dma_direct_supported.patch [new file with mode: 0644]
queue-5.5/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch [new file with mode: 0644]
queue-5.5/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch [new file with mode: 0644]
queue-5.5/rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch [new file with mode: 0644]
queue-5.5/series

diff --git a/queue-5.5/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch b/queue-5.5/alsa-rawmidi-avoid-bit-fields-for-state-flags.patch
new file mode 100644 (file)
index 0000000..bfabe32
--- /dev/null
@@ -0,0 +1,43 @@
+From dfa9a5efe8b932a84b3b319250aa3ac60c20f876 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 14 Feb 2020 12:13:16 +0100
+Subject: ALSA: rawmidi: Avoid bit fields for state flags
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit dfa9a5efe8b932a84b3b319250aa3ac60c20f876 upstream.
+
+The rawmidi state flags (opened, append, active_sensing) are stored in
+bit fields that can be potentially racy when concurrently accessed
+without any locks.  Although the current code should be fine, there is
+also no any real benefit by keeping the bitfields for this kind of
+short number of members.
+
+This patch changes those bit fields flags to the simple bool fields.
+There should be no size increase of the snd_rawmidi_substream by this
+change.
+
+Reported-by: syzbot+576cc007eb9f2c968200@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20200214111316.26939-4-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ include/sound/rawmidi.h |    6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+--- a/include/sound/rawmidi.h
++++ b/include/sound/rawmidi.h
+@@ -77,9 +77,9 @@ struct snd_rawmidi_substream {
+       struct list_head list;          /* list of all substream for given stream */
+       int stream;                     /* direction */
+       int number;                     /* substream number */
+-      unsigned int opened: 1,         /* open flag */
+-                   append: 1,         /* append flag (merge more streams) */
+-                   active_sensing: 1; /* send active sensing when close */
++      bool opened;                    /* open flag */
++      bool append;                    /* append flag (merge more streams) */
++      bool active_sensing;            /* send active sensing when close */
+       int use_count;                  /* use counter (for output) */
+       size_t bytes;
+       struct snd_rawmidi *rmidi;
diff --git a/queue-5.5/alsa-seq-avoid-concurrent-access-to-queue-flags.patch b/queue-5.5/alsa-seq-avoid-concurrent-access-to-queue-flags.patch
new file mode 100644 (file)
index 0000000..b58e3d0
--- /dev/null
@@ -0,0 +1,96 @@
+From bb51e669fa49feb5904f452b2991b240ef31bc97 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 14 Feb 2020 12:13:14 +0100
+Subject: ALSA: seq: Avoid concurrent access to queue flags
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit bb51e669fa49feb5904f452b2991b240ef31bc97 upstream.
+
+The queue flags are represented in bit fields and the concurrent
+access may result in unexpected results.  Although the current code
+should be mostly OK as it's only reading a field while writing other
+fields as KCSAN reported, it's safer to cover both with a proper
+spinlock protection.
+
+This patch fixes the possible concurrent read by protecting with
+q->owner_lock.  Also the queue owner field is protected as well since
+it's the field to be protected by the lock itself.
+
+Reported-by: syzbot+65c6c92d04304d0a8efc@syzkaller.appspotmail.com
+Reported-by: syzbot+e60ddfa48717579799dd@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20200214111316.26939-2-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/seq_queue.c |   20 ++++++++++++++++----
+ 1 file changed, 16 insertions(+), 4 deletions(-)
+
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -392,6 +392,7 @@ int snd_seq_queue_check_access(int queue
+ int snd_seq_queue_set_owner(int queueid, int client, int locked)
+ {
+       struct snd_seq_queue *q = queueptr(queueid);
++      unsigned long flags;
+       if (q == NULL)
+               return -EINVAL;
+@@ -401,8 +402,10 @@ int snd_seq_queue_set_owner(int queueid,
+               return -EPERM;
+       }
++      spin_lock_irqsave(&q->owner_lock, flags);
+       q->locked = locked ? 1 : 0;
+       q->owner = client;
++      spin_unlock_irqrestore(&q->owner_lock, flags);
+       queue_access_unlock(q);
+       queuefree(q);
+@@ -539,15 +542,17 @@ void snd_seq_queue_client_termination(in
+       unsigned long flags;
+       int i;
+       struct snd_seq_queue *q;
++      bool matched;
+       for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+               if ((q = queueptr(i)) == NULL)
+                       continue;
+               spin_lock_irqsave(&q->owner_lock, flags);
+-              if (q->owner == client)
++              matched = (q->owner == client);
++              if (matched)
+                       q->klocked = 1;
+               spin_unlock_irqrestore(&q->owner_lock, flags);
+-              if (q->owner == client) {
++              if (matched) {
+                       if (q->timer->running)
+                               snd_seq_timer_stop(q->timer);
+                       snd_seq_timer_reset(q->timer);
+@@ -739,6 +744,8 @@ void snd_seq_info_queues_read(struct snd
+       int i, bpm;
+       struct snd_seq_queue *q;
+       struct snd_seq_timer *tmr;
++      bool locked;
++      int owner;
+       for (i = 0; i < SNDRV_SEQ_MAX_QUEUES; i++) {
+               if ((q = queueptr(i)) == NULL)
+@@ -750,9 +757,14 @@ void snd_seq_info_queues_read(struct snd
+               else
+                       bpm = 0;
++              spin_lock_irq(&q->owner_lock);
++              locked = q->locked;
++              owner = q->owner;
++              spin_unlock_irq(&q->owner_lock);
++
+               snd_iprintf(buffer, "queue %d: [%s]\n", q->queue, q->name);
+-              snd_iprintf(buffer, "owned by client    : %d\n", q->owner);
+-              snd_iprintf(buffer, "lock status        : %s\n", q->locked ? "Locked" : "Free");
++              snd_iprintf(buffer, "owned by client    : %d\n", owner);
++              snd_iprintf(buffer, "lock status        : %s\n", locked ? "Locked" : "Free");
+               snd_iprintf(buffer, "queued time events : %d\n", snd_seq_prioq_avail(q->timeq));
+               snd_iprintf(buffer, "queued tick events : %d\n", snd_seq_prioq_avail(q->tickq));
+               snd_iprintf(buffer, "timer state        : %s\n", tmr->running ? "Running" : "Stopped");
diff --git a/queue-5.5/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch b/queue-5.5/alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch
new file mode 100644 (file)
index 0000000..e52fd7a
--- /dev/null
@@ -0,0 +1,137 @@
+From dc7497795e014d84699c3b8809ed6df35352dd74 Mon Sep 17 00:00:00 2001
+From: Takashi Iwai <tiwai@suse.de>
+Date: Fri, 14 Feb 2020 12:13:15 +0100
+Subject: ALSA: seq: Fix concurrent access to queue current tick/time
+
+From: Takashi Iwai <tiwai@suse.de>
+
+commit dc7497795e014d84699c3b8809ed6df35352dd74 upstream.
+
+snd_seq_check_queue() passes the current tick and time of the given
+queue as a pointer to snd_seq_prioq_cell_out(), but those might be
+updated concurrently by the seq timer update.
+
+Fix it by retrieving the current tick and time via the proper helper
+functions at first, and pass those values to snd_seq_prioq_cell_out()
+later in the loops.
+
+snd_seq_timer_get_cur_time() takes a new argument and adjusts with the
+current system time only when it's requested so; this update isn't
+needed for snd_seq_check_queue(), as it's called either from the
+interrupt handler or right after queuing.
+
+Also, snd_seq_timer_get_cur_tick() is changed to read the value in the
+spinlock for the concurrency, too.
+
+Reported-by: syzbot+fd5e0eaa1a32999173b2@syzkaller.appspotmail.com
+Link: https://lore.kernel.org/r/20200214111316.26939-3-tiwai@suse.de
+Signed-off-by: Takashi Iwai <tiwai@suse.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ sound/core/seq/seq_clientmgr.c |    4 ++--
+ sound/core/seq/seq_queue.c     |    9 ++++++---
+ sound/core/seq/seq_timer.c     |   13 ++++++++++---
+ sound/core/seq/seq_timer.h     |    3 ++-
+ 4 files changed, 20 insertions(+), 9 deletions(-)
+
+--- a/sound/core/seq/seq_clientmgr.c
++++ b/sound/core/seq/seq_clientmgr.c
+@@ -580,7 +580,7 @@ static int update_timestamp_of_queue(str
+       event->queue = queue;
+       event->flags &= ~SNDRV_SEQ_TIME_STAMP_MASK;
+       if (real_time) {
+-              event->time.time = snd_seq_timer_get_cur_time(q->timer);
++              event->time.time = snd_seq_timer_get_cur_time(q->timer, true);
+               event->flags |= SNDRV_SEQ_TIME_STAMP_REAL;
+       } else {
+               event->time.tick = snd_seq_timer_get_cur_tick(q->timer);
+@@ -1659,7 +1659,7 @@ static int snd_seq_ioctl_get_queue_statu
+       tmr = queue->timer;
+       status->events = queue->tickq->cells + queue->timeq->cells;
+-      status->time = snd_seq_timer_get_cur_time(tmr);
++      status->time = snd_seq_timer_get_cur_time(tmr, true);
+       status->tick = snd_seq_timer_get_cur_tick(tmr);
+       status->running = tmr->running;
+--- a/sound/core/seq/seq_queue.c
++++ b/sound/core/seq/seq_queue.c
+@@ -238,6 +238,8 @@ void snd_seq_check_queue(struct snd_seq_
+ {
+       unsigned long flags;
+       struct snd_seq_event_cell *cell;
++      snd_seq_tick_time_t cur_tick;
++      snd_seq_real_time_t cur_time;
+       if (q == NULL)
+               return;
+@@ -254,17 +256,18 @@ void snd_seq_check_queue(struct snd_seq_
+       __again:
+       /* Process tick queue... */
++      cur_tick = snd_seq_timer_get_cur_tick(q->timer);
+       for (;;) {
+-              cell = snd_seq_prioq_cell_out(q->tickq,
+-                                            &q->timer->tick.cur_tick);
++              cell = snd_seq_prioq_cell_out(q->tickq, &cur_tick);
+               if (!cell)
+                       break;
+               snd_seq_dispatch_event(cell, atomic, hop);
+       }
+       /* Process time queue... */
++      cur_time = snd_seq_timer_get_cur_time(q->timer, false);
+       for (;;) {
+-              cell = snd_seq_prioq_cell_out(q->timeq, &q->timer->cur_time);
++              cell = snd_seq_prioq_cell_out(q->timeq, &cur_time);
+               if (!cell)
+                       break;
+               snd_seq_dispatch_event(cell, atomic, hop);
+--- a/sound/core/seq/seq_timer.c
++++ b/sound/core/seq/seq_timer.c
+@@ -428,14 +428,15 @@ int snd_seq_timer_continue(struct snd_se
+ }
+ /* return current 'real' time. use timeofday() to get better granularity. */
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr)
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++                                             bool adjust_ktime)
+ {
+       snd_seq_real_time_t cur_time;
+       unsigned long flags;
+       spin_lock_irqsave(&tmr->lock, flags);
+       cur_time = tmr->cur_time;
+-      if (tmr->running) { 
++      if (adjust_ktime && tmr->running) {
+               struct timespec64 tm;
+               ktime_get_ts64(&tm);
+@@ -452,7 +453,13 @@ snd_seq_real_time_t snd_seq_timer_get_cu
+  high PPQ values) */
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr)
+ {
+-      return tmr->tick.cur_tick;
++      snd_seq_tick_time_t cur_tick;
++      unsigned long flags;
++
++      spin_lock_irqsave(&tmr->lock, flags);
++      cur_tick = tmr->tick.cur_tick;
++      spin_unlock_irqrestore(&tmr->lock, flags);
++      return cur_tick;
+ }
+--- a/sound/core/seq/seq_timer.h
++++ b/sound/core/seq/seq_timer.h
+@@ -120,7 +120,8 @@ int snd_seq_timer_set_tempo_ppq(struct s
+ int snd_seq_timer_set_position_tick(struct snd_seq_timer *tmr, snd_seq_tick_time_t position);
+ int snd_seq_timer_set_position_time(struct snd_seq_timer *tmr, snd_seq_real_time_t position);
+ int snd_seq_timer_set_skew(struct snd_seq_timer *tmr, unsigned int skew, unsigned int base);
+-snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr);
++snd_seq_real_time_t snd_seq_timer_get_cur_time(struct snd_seq_timer *tmr,
++                                             bool adjust_ktime);
+ snd_seq_tick_time_t snd_seq_timer_get_cur_tick(struct snd_seq_timer *tmr);
+ extern int seq_default_timer_class;
diff --git a/queue-5.5/dma-direct-relax-addressability-checks-in-dma_direct_supported.patch b/queue-5.5/dma-direct-relax-addressability-checks-in-dma_direct_supported.patch
new file mode 100644 (file)
index 0000000..ba56bb1
--- /dev/null
@@ -0,0 +1,77 @@
+From 91ef26f914171cf753330f13724fd9142b5b1640 Mon Sep 17 00:00:00 2001
+From: Christoph Hellwig <hch@lst.de>
+Date: Mon, 3 Feb 2020 18:11:10 +0100
+Subject: dma-direct: relax addressability checks in dma_direct_supported
+
+From: Christoph Hellwig <hch@lst.de>
+
+commit 91ef26f914171cf753330f13724fd9142b5b1640 upstream.
+
+dma_direct_supported tries to find the minimum addressable bitmask
+based on the end pfn and optional magic that architectures can use
+to communicate the size of the magic ZONE_DMA that can be used
+for bounce buffering.  But between the DMA offsets that can change
+per device (or sometimes even region), the fact the ZONE_DMA isn't
+even guaranteed to be the lowest addresses and failure of having
+proper interfaces to the MM code this fails at least for one
+arm subarchitecture.
+
+As all the legacy DMA implementations have supported 32-bit DMA
+masks, and 32-bit masks are guranteed to always work by the API
+contract (using bounce buffers if needed), we can short cut the
+complicated check and always return true without breaking existing
+assumptions.  Hopefully we can properly clean up the interaction
+with the arch defined zones and the bootmem allocator eventually.
+
+Fixes: ad3c7b18c5b3 ("arm: use swiotlb for bounce buffering on LPAE configs")
+Reported-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Tested-by: Peter Ujfalusi <peter.ujfalusi@ti.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ kernel/dma/direct.c |   24 +++++++++++-------------
+ 1 file changed, 11 insertions(+), 13 deletions(-)
+
+--- a/kernel/dma/direct.c
++++ b/kernel/dma/direct.c
+@@ -472,28 +472,26 @@ int dma_direct_mmap(struct device *dev,
+ }
+ #endif /* CONFIG_MMU */
+-/*
+- * Because 32-bit DMA masks are so common we expect every architecture to be
+- * able to satisfy them - either by not supporting more physical memory, or by
+- * providing a ZONE_DMA32.  If neither is the case, the architecture needs to
+- * use an IOMMU instead of the direct mapping.
+- */
+ int dma_direct_supported(struct device *dev, u64 mask)
+ {
+-      u64 min_mask;
++      u64 min_mask = (max_pfn - 1) << PAGE_SHIFT;
+-      if (IS_ENABLED(CONFIG_ZONE_DMA))
+-              min_mask = DMA_BIT_MASK(zone_dma_bits);
+-      else
+-              min_mask = DMA_BIT_MASK(32);
+-
+-      min_mask = min_t(u64, min_mask, (max_pfn - 1) << PAGE_SHIFT);
++      /*
++       * Because 32-bit DMA masks are so common we expect every architecture
++       * to be able to satisfy them - either by not supporting more physical
++       * memory, or by providing a ZONE_DMA32.  If neither is the case, the
++       * architecture needs to use an IOMMU instead of the direct mapping.
++       */
++      if (mask >= DMA_BIT_MASK(32))
++              return 1;
+       /*
+        * This check needs to be against the actual bit mask value, so
+        * use __phys_to_dma() here so that the SME encryption mask isn't
+        * part of the check.
+        */
++      if (IS_ENABLED(CONFIG_ZONE_DMA))
++              min_mask = min_t(u64, min_mask, DMA_BIT_MASK(zone_dma_bits));
+       return mask >= __phys_to_dma(dev, min_mask);
+ }
diff --git a/queue-5.5/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch b/queue-5.5/netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch
new file mode 100644 (file)
index 0000000..90964c6
--- /dev/null
@@ -0,0 +1,52 @@
+From 8d0015a7ab76b8b1e89a3e5f5710a6e5103f2dd5 Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sun, 2 Feb 2020 20:30:53 -0800
+Subject: netfilter: xt_hashlimit: limit the max size of hashtable
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit 8d0015a7ab76b8b1e89a3e5f5710a6e5103f2dd5 upstream.
+
+The user-specified hashtable size is unbound, this could
+easily lead to an OOM or a hung task as we hold the global
+mutex while allocating and initializing the new hashtable.
+
+Add a max value to cap both cfg->size and cfg->max, as
+suggested by Florian.
+
+Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Reviewed-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_hashlimit.c |   10 ++++++++++
+ 1 file changed, 10 insertions(+)
+
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -851,6 +851,8 @@ hashlimit_mt(const struct sk_buff *skb,
+       return hashlimit_mt_common(skb, par, hinfo, &info->cfg, 3);
+ }
++#define HASHLIMIT_MAX_SIZE 1048576
++
+ static int hashlimit_mt_check_common(const struct xt_mtchk_param *par,
+                                    struct xt_hashlimit_htable **hinfo,
+                                    struct hashlimit_cfg3 *cfg,
+@@ -861,6 +863,14 @@ static int hashlimit_mt_check_common(con
+       if (cfg->gc_interval == 0 || cfg->expire == 0)
+               return -EINVAL;
++      if (cfg->size > HASHLIMIT_MAX_SIZE) {
++              cfg->size = HASHLIMIT_MAX_SIZE;
++              pr_info_ratelimited("size too large, truncated to %u\n", cfg->size);
++      }
++      if (cfg->max > HASHLIMIT_MAX_SIZE) {
++              cfg->max = HASHLIMIT_MAX_SIZE;
++              pr_info_ratelimited("max too large, truncated to %u\n", cfg->max);
++      }
+       if (par->family == NFPROTO_IPV4) {
+               if (cfg->srcmask > 32 || cfg->dstmask > 32)
+                       return -EINVAL;
diff --git a/queue-5.5/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch b/queue-5.5/netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch
new file mode 100644 (file)
index 0000000..4babd89
--- /dev/null
@@ -0,0 +1,78 @@
+From c4a3922d2d20c710f827d3a115ee338e8d0467df Mon Sep 17 00:00:00 2001
+From: Cong Wang <xiyou.wangcong@gmail.com>
+Date: Sun, 2 Feb 2020 20:30:52 -0800
+Subject: netfilter: xt_hashlimit: reduce hashlimit_mutex scope for htable_put()
+
+From: Cong Wang <xiyou.wangcong@gmail.com>
+
+commit c4a3922d2d20c710f827d3a115ee338e8d0467df upstream.
+
+It is unnecessary to hold hashlimit_mutex for htable_destroy()
+as it is already removed from the global hashtable and its
+refcount is already zero.
+
+Also, switch hinfo->use to refcount_t so that we don't have
+to hold the mutex until it reaches zero in htable_put().
+
+Reported-and-tested-by: syzbot+adf6c6c2be1c3a718121@syzkaller.appspotmail.com
+Acked-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Cong Wang <xiyou.wangcong@gmail.com>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/netfilter/xt_hashlimit.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/net/netfilter/xt_hashlimit.c
++++ b/net/netfilter/xt_hashlimit.c
+@@ -36,6 +36,7 @@
+ #include <linux/netfilter_ipv6/ip6_tables.h>
+ #include <linux/mutex.h>
+ #include <linux/kernel.h>
++#include <linux/refcount.h>
+ #include <uapi/linux/netfilter/xt_hashlimit.h>
+ #define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
+@@ -114,7 +115,7 @@ struct dsthash_ent {
+ struct xt_hashlimit_htable {
+       struct hlist_node node;         /* global list of all htables */
+-      int use;
++      refcount_t use;
+       u_int8_t family;
+       bool rnd_initialized;
+@@ -315,7 +316,7 @@ static int htable_create(struct net *net
+       for (i = 0; i < hinfo->cfg.size; i++)
+               INIT_HLIST_HEAD(&hinfo->hash[i]);
+-      hinfo->use = 1;
++      refcount_set(&hinfo->use, 1);
+       hinfo->count = 0;
+       hinfo->family = family;
+       hinfo->rnd_initialized = false;
+@@ -434,7 +435,7 @@ static struct xt_hashlimit_htable *htabl
+       hlist_for_each_entry(hinfo, &hashlimit_net->htables, node) {
+               if (!strcmp(name, hinfo->name) &&
+                   hinfo->family == family) {
+-                      hinfo->use++;
++                      refcount_inc(&hinfo->use);
+                       return hinfo;
+               }
+       }
+@@ -443,12 +444,11 @@ static struct xt_hashlimit_htable *htabl
+ static void htable_put(struct xt_hashlimit_htable *hinfo)
+ {
+-      mutex_lock(&hashlimit_mutex);
+-      if (--hinfo->use == 0) {
++      if (refcount_dec_and_mutex_lock(&hinfo->use, &hashlimit_mutex)) {
+               hlist_del(&hinfo->node);
++              mutex_unlock(&hashlimit_mutex);
+               htable_destroy(hinfo);
+       }
+-      mutex_unlock(&hashlimit_mutex);
+ }
+ /* The algorithm used is the Simple Token Bucket Filter (TBF)
diff --git a/queue-5.5/rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch b/queue-5.5/rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch
new file mode 100644 (file)
index 0000000..38a9816
--- /dev/null
@@ -0,0 +1,85 @@
+From 963485d436ccc2810177a7b08af22336ec2af67b Mon Sep 17 00:00:00 2001
+From: David Howells <dhowells@redhat.com>
+Date: Thu, 6 Feb 2020 13:57:40 +0000
+Subject: rxrpc: Fix call RCU cleanup using non-bh-safe locks
+
+From: David Howells <dhowells@redhat.com>
+
+commit 963485d436ccc2810177a7b08af22336ec2af67b upstream.
+
+rxrpc_rcu_destroy_call(), which is called as an RCU callback to clean up a
+put call, calls rxrpc_put_connection() which, deep in its bowels, takes a
+number of spinlocks in a non-BH-safe way, including rxrpc_conn_id_lock and
+local->client_conns_lock.  RCU callbacks, however, are normally called from
+softirq context, which can cause lockdep to notice the locking
+inconsistency.
+
+To get lockdep to detect this, it's necessary to have the connection
+cleaned up on the put at the end of the last of its calls, though normally
+the clean up is deferred.  This can be induced, however, by starting a call
+on an AF_RXRPC socket and then closing the socket without reading the
+reply.
+
+Fix this by having rxrpc_rcu_destroy_call() punt the destruction to a
+workqueue if in softirq-mode and defer the destruction to process context.
+
+Note that another way to fix this could be to add a bunch of bh-disable
+annotations to the spinlocks concerned - and there might be more than just
+those two - but that means spending more time with BHs disabled.
+
+Note also that some of these places were covered by bh-disable spinlocks
+belonging to the rxrpc_transport object, but these got removed without the
+_bh annotation being retained on the next lock in.
+
+Fixes: 999b69f89241 ("rxrpc: Kill the client connection bundle concept")
+Reported-by: syzbot+d82f3ac8d87e7ccbb2c9@syzkaller.appspotmail.com
+Reported-by: syzbot+3f1fd6b8cbf8702d134e@syzkaller.appspotmail.com
+Signed-off-by: David Howells <dhowells@redhat.com>
+cc: Hillf Danton <hdanton@sina.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+---
+ net/rxrpc/call_object.c |   22 +++++++++++++++++++---
+ 1 file changed, 19 insertions(+), 3 deletions(-)
+
+--- a/net/rxrpc/call_object.c
++++ b/net/rxrpc/call_object.c
+@@ -562,11 +562,11 @@ void rxrpc_put_call(struct rxrpc_call *c
+ }
+ /*
+- * Final call destruction under RCU.
++ * Final call destruction - but must be done in process context.
+  */
+-static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++static void rxrpc_destroy_call(struct work_struct *work)
+ {
+-      struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++      struct rxrpc_call *call = container_of(work, struct rxrpc_call, processor);
+       struct rxrpc_net *rxnet = call->rxnet;
+       rxrpc_put_connection(call->conn);
+@@ -579,6 +579,22 @@ static void rxrpc_rcu_destroy_call(struc
+ }
+ /*
++ * Final call destruction under RCU.
++ */
++static void rxrpc_rcu_destroy_call(struct rcu_head *rcu)
++{
++      struct rxrpc_call *call = container_of(rcu, struct rxrpc_call, rcu);
++
++      if (in_softirq()) {
++              INIT_WORK(&call->processor, rxrpc_destroy_call);
++              if (!rxrpc_queue_work(&call->processor))
++                      BUG();
++      } else {
++              rxrpc_destroy_call(&call->processor);
++      }
++}
++
++/*
+  * clean up a call
+  */
+ void rxrpc_cleanup_call(struct rxrpc_call *call)
index 240afe8c7a1e49d4c8d529a03452f993daafe084..01374d4cc080b57bb93231adc5f33e78a3f129c4 100644 (file)
@@ -115,7 +115,6 @@ drm-bridge-tc358767-fix-poll-timeouts.patch
 drm-i915-gem-require-per-engine-reset-support-for-non-persistent-contexts.patch
 drm-i915-gt-protect-defer_request-from-new-waiters.patch
 drm-i915-ehl-update-port-clock-voltage-level-requirements.patch
-drm-amd-display-fix-dtm-unloading.patch
 drm-msm-dpu-fix-bgr565-vs-rgb565-confusion.patch
 scsi-revert-rdma-isert-fix-a-recently-introduced-regression-related-to-logout.patch
 scsi-revert-target-iscsi-wait-for-all-commands-to-finish-before-freeing-a-session.patch
@@ -134,3 +133,10 @@ arm64-lse-fix-lse-atomics-with-llvm.patch
 scripts-get_maintainer.pl-deprioritize-old-fixes-addresses.patch
 io_uring-prevent-sq_thread-from-spinning-when-it-should-stop.patch
 io_uring-fix-__io_iopoll_check-deadlock-in-io_sq_thread.patch
+dma-direct-relax-addressability-checks-in-dma_direct_supported.patch
+alsa-rawmidi-avoid-bit-fields-for-state-flags.patch
+alsa-seq-avoid-concurrent-access-to-queue-flags.patch
+alsa-seq-fix-concurrent-access-to-queue-current-tick-time.patch
+netfilter-xt_hashlimit-reduce-hashlimit_mutex-scope-for-htable_put.patch
+netfilter-xt_hashlimit-limit-the-max-size-of-hashtable.patch
+rxrpc-fix-call-rcu-cleanup-using-non-bh-safe-locks.patch