]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.18-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jun 2022 15:20:22 +0000 (17:20 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jun 2022 15:20:22 +0000 (17:20 +0200)
added patches:
asoc-rt5514-fix-event-generation-for-dsp-voice-wake-up-control.patch
bcache-avoid-journal-no-space-deadlock-by-reserving-1-journal-bucket.patch
bcache-improve-multithreaded-bch_btree_check.patch
bcache-improve-multithreaded-bch_sectors_dirty_init.patch
bcache-remove-incremental-dirty-sector-counting-for-bch_sectors_dirty_init.patch
carl9170-tx-fix-an-incorrect-use-of-list-iterator.patch
rtl818x-prevent-using-not-initialized-queues.patch
serial-pch-don-t-overwrite-xmit-buf-by-x_char.patch
stm-ltdc-fix-two-incorrect-null-checks-on-list-iterator.patch
tilcdc-tilcdc_external-fix-an-incorrect-null-check-on-list-iterator.patch

queue-5.18/asoc-rt5514-fix-event-generation-for-dsp-voice-wake-up-control.patch [new file with mode: 0644]
queue-5.18/bcache-avoid-journal-no-space-deadlock-by-reserving-1-journal-bucket.patch [new file with mode: 0644]
queue-5.18/bcache-improve-multithreaded-bch_btree_check.patch [new file with mode: 0644]
queue-5.18/bcache-improve-multithreaded-bch_sectors_dirty_init.patch [new file with mode: 0644]
queue-5.18/bcache-remove-incremental-dirty-sector-counting-for-bch_sectors_dirty_init.patch [new file with mode: 0644]
queue-5.18/carl9170-tx-fix-an-incorrect-use-of-list-iterator.patch [new file with mode: 0644]
queue-5.18/rtl818x-prevent-using-not-initialized-queues.patch [new file with mode: 0644]
queue-5.18/serial-pch-don-t-overwrite-xmit-buf-by-x_char.patch [new file with mode: 0644]
queue-5.18/series
queue-5.18/stm-ltdc-fix-two-incorrect-null-checks-on-list-iterator.patch [new file with mode: 0644]
queue-5.18/tilcdc-tilcdc_external-fix-an-incorrect-null-check-on-list-iterator.patch [new file with mode: 0644]

diff --git a/queue-5.18/asoc-rt5514-fix-event-generation-for-dsp-voice-wake-up-control.patch b/queue-5.18/asoc-rt5514-fix-event-generation-for-dsp-voice-wake-up-control.patch
new file mode 100644 (file)
index 0000000..066f686
--- /dev/null
@@ -0,0 +1,34 @@
+From 4213ff556740bb45e2d9ff0f50d056c4e7dd0921 Mon Sep 17 00:00:00 2001
+From: Mark Brown <broonie@kernel.org>
+Date: Thu, 28 Apr 2022 17:24:44 +0100
+Subject: ASoC: rt5514: Fix event generation for "DSP Voice Wake Up" control
+
+From: Mark Brown <broonie@kernel.org>
+
+commit 4213ff556740bb45e2d9ff0f50d056c4e7dd0921 upstream.
+
+The driver has a custom put function for "DSP Voice Wake Up" which does
+not generate event notifications on change, instead returning 0. Since we
+already exit early in the case that there is no change this can be fixed
+by unconditionally returning 1 at the end of the function.
+
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220428162444.3883147-1-broonie@kernel.org
+Signed-off-by: Mark Brown <broonie@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ sound/soc/codecs/rt5514.c |    2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/sound/soc/codecs/rt5514.c
++++ b/sound/soc/codecs/rt5514.c
+@@ -419,7 +419,7 @@ static int rt5514_dsp_voice_wake_up_put(
+               }
+       }
+-      return 0;
++      return 1;
+ }
+ static const struct snd_kcontrol_new rt5514_snd_controls[] = {
diff --git a/queue-5.18/bcache-avoid-journal-no-space-deadlock-by-reserving-1-journal-bucket.patch b/queue-5.18/bcache-avoid-journal-no-space-deadlock-by-reserving-1-journal-bucket.patch
new file mode 100644 (file)
index 0000000..8dc6dae
--- /dev/null
@@ -0,0 +1,145 @@
+From 32feee36c30ea06e38ccb8ae6e5c44c6eec790a6 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:36 +0800
+Subject: bcache: avoid journal no-space deadlock by reserving 1 journal bucket
+
+From: Coly Li <colyli@suse.de>
+
+commit 32feee36c30ea06e38ccb8ae6e5c44c6eec790a6 upstream.
+
+The journal no-space deadlock was reported time to time. Such deadlock
+can happen in the following situation.
+
+When all journal buckets are fully filled by active jset with heavy
+write I/O load, the cache set registration (after a reboot) will load
+all active jsets and inserting them into the btree again (which is
+called journal replay). If a journaled bkey is inserted into a btree
+node and results btree node split, new journal request might be
+triggered. For example, the btree grows one more level after the node
+split, then the root node record in cache device super block will be
+upgrade by bch_journal_meta() from bch_btree_set_root(). But there is no
+space in journal buckets, the journal replay has to wait for new journal
+bucket to be reclaimed after at least one journal bucket replayed. This
+is one example that how the journal no-space deadlock happens.
+
+The solution to avoid the deadlock is to reserve 1 journal bucket in
+run time, and only permit the reserved journal bucket to be used during
+cache set registration procedure for things like journal replay. Then
+the journal space will never be fully filled, there is no chance for
+journal no-space deadlock to happen anymore.
+
+This patch adds a new member "bool do_reserve" in struct journal, it is
+inititalized to 0 (false) when struct journal is allocated, and set to
+1 (true) by bch_journal_space_reserve() when all initialization done in
+run_cache_set(). In the run time when journal_reclaim() tries to
+allocate a new journal bucket, free_journal_buckets() is called to check
+whether there are enough free journal buckets to use. If there is only
+1 free journal bucket and journal->do_reserve is 1 (true), the last
+bucket is reserved and free_journal_buckets() will return 0 to indicate
+no free journal bucket. Then journal_reclaim() will give up, and try
+next time to see whetheer there is free journal bucket to allocate. By
+this method, there is always 1 jouranl bucket reserved in run time.
+
+During the cache set registration, journal->do_reserve is 0 (false), so
+the reserved journal bucket can be used to avoid the no-space deadlock.
+
+Reported-by: Nikhil Kshirsagar <nkshirsagar@gmail.com>
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-5-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/journal.c |   31 ++++++++++++++++++++++++++-----
+ drivers/md/bcache/journal.h |    2 ++
+ drivers/md/bcache/super.c   |    1 +
+ 3 files changed, 29 insertions(+), 5 deletions(-)
+
+--- a/drivers/md/bcache/journal.c
++++ b/drivers/md/bcache/journal.c
+@@ -405,6 +405,11 @@ err:
+       return ret;
+ }
++void bch_journal_space_reserve(struct journal *j)
++{
++      j->do_reserve = true;
++}
++
+ /* Journalling */
+ static void btree_flush_write(struct cache_set *c)
+@@ -621,12 +626,30 @@ static void do_journal_discard(struct ca
+       }
+ }
++static unsigned int free_journal_buckets(struct cache_set *c)
++{
++      struct journal *j = &c->journal;
++      struct cache *ca = c->cache;
++      struct journal_device *ja = &c->cache->journal;
++      unsigned int n;
++
++      /* In case njournal_buckets is not power of 2 */
++      if (ja->cur_idx >= ja->discard_idx)
++              n = ca->sb.njournal_buckets +  ja->discard_idx - ja->cur_idx;
++      else
++              n = ja->discard_idx - ja->cur_idx;
++
++      if (n > (1 + j->do_reserve))
++              return n - (1 + j->do_reserve);
++
++      return 0;
++}
++
+ static void journal_reclaim(struct cache_set *c)
+ {
+       struct bkey *k = &c->journal.key;
+       struct cache *ca = c->cache;
+       uint64_t last_seq;
+-      unsigned int next;
+       struct journal_device *ja = &ca->journal;
+       atomic_t p __maybe_unused;
+@@ -649,12 +672,10 @@ static void journal_reclaim(struct cache
+       if (c->journal.blocks_free)
+               goto out;
+-      next = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+-      /* No space available on this device */
+-      if (next == ja->discard_idx)
++      if (!free_journal_buckets(c))
+               goto out;
+-      ja->cur_idx = next;
++      ja->cur_idx = (ja->cur_idx + 1) % ca->sb.njournal_buckets;
+       k->ptr[0] = MAKE_PTR(0,
+                            bucket_to_sector(c, ca->sb.d[ja->cur_idx]),
+                            ca->sb.nr_this_dev);
+--- a/drivers/md/bcache/journal.h
++++ b/drivers/md/bcache/journal.h
+@@ -105,6 +105,7 @@ struct journal {
+       spinlock_t              lock;
+       spinlock_t              flush_write_lock;
+       bool                    btree_flushing;
++      bool                    do_reserve;
+       /* used when waiting because the journal was full */
+       struct closure_waitlist wait;
+       struct closure          io;
+@@ -182,5 +183,6 @@ int bch_journal_replay(struct cache_set
+ void bch_journal_free(struct cache_set *c);
+ int bch_journal_alloc(struct cache_set *c);
++void bch_journal_space_reserve(struct journal *j);
+ #endif /* _BCACHE_JOURNAL_H */
+--- a/drivers/md/bcache/super.c
++++ b/drivers/md/bcache/super.c
+@@ -2128,6 +2128,7 @@ static int run_cache_set(struct cache_se
+       flash_devs_run(c);
++      bch_journal_space_reserve(&c->journal);
+       set_bit(CACHE_SET_RUNNING, &c->flags);
+       return 0;
+ err:
diff --git a/queue-5.18/bcache-improve-multithreaded-bch_btree_check.patch b/queue-5.18/bcache-improve-multithreaded-bch_btree_check.patch
new file mode 100644 (file)
index 0000000..76d4994
--- /dev/null
@@ -0,0 +1,155 @@
+From 622536443b6731ec82c563aae7807165adbe9178 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:33 +0800
+Subject: bcache: improve multithreaded bch_btree_check()
+
+From: Coly Li <colyli@suse.de>
+
+commit 622536443b6731ec82c563aae7807165adbe9178 upstream.
+
+Commit 8e7102273f59 ("bcache: make bch_btree_check() to be
+multithreaded") makes bch_btree_check() to be much faster when checking
+all btree nodes during cache device registration. But it isn't in ideal
+shap yet, still can be improved.
+
+This patch does the following thing to improve current parallel btree
+nodes check by multiple threads in bch_btree_check(),
+- Add read lock to root node while checking all the btree nodes with
+  multiple threads. Although currently it is not mandatory but it is
+  good to have a read lock in code logic.
+- Remove local variable 'char name[32]', and generate kernel thread name
+  string directly when calling kthread_run().
+- Allocate local variable "struct btree_check_state check_state" on the
+  stack and avoid unnecessary dynamic memory allocation for it.
+- Reduce BCH_BTR_CHKTHREAD_MAX from 64 to 12 which is enough indeed.
+- Increase check_state->started to count created kernel thread after it
+  succeeds to create.
+- When wait for all checking kernel threads to finish, use wait_event()
+  to replace wait_event_interruptible().
+
+With this change, the code is more clear, and some potential error
+conditions are avoided.
+
+Fixes: 8e7102273f59 ("bcache: make bch_btree_check() to be multithreaded")
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-2-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/btree.c |   58 ++++++++++++++++++++--------------------------
+ drivers/md/bcache/btree.h |    2 -
+ 2 files changed, 27 insertions(+), 33 deletions(-)
+
+--- a/drivers/md/bcache/btree.c
++++ b/drivers/md/bcache/btree.c
+@@ -2006,8 +2006,7 @@ int bch_btree_check(struct cache_set *c)
+       int i;
+       struct bkey *k = NULL;
+       struct btree_iter iter;
+-      struct btree_check_state *check_state;
+-      char name[32];
++      struct btree_check_state check_state;
+       /* check and mark root node keys */
+       for_each_key_filter(&c->root->keys, k, &iter, bch_ptr_invalid)
+@@ -2018,63 +2017,58 @@ int bch_btree_check(struct cache_set *c)
+       if (c->root->level == 0)
+               return 0;
+-      check_state = kzalloc(sizeof(struct btree_check_state), GFP_KERNEL);
+-      if (!check_state)
+-              return -ENOMEM;
+-
+-      check_state->c = c;
+-      check_state->total_threads = bch_btree_chkthread_nr();
+-      check_state->key_idx = 0;
+-      spin_lock_init(&check_state->idx_lock);
+-      atomic_set(&check_state->started, 0);
+-      atomic_set(&check_state->enough, 0);
+-      init_waitqueue_head(&check_state->wait);
++      check_state.c = c;
++      check_state.total_threads = bch_btree_chkthread_nr();
++      check_state.key_idx = 0;
++      spin_lock_init(&check_state.idx_lock);
++      atomic_set(&check_state.started, 0);
++      atomic_set(&check_state.enough, 0);
++      init_waitqueue_head(&check_state.wait);
++      rw_lock(0, c->root, c->root->level);
+       /*
+        * Run multiple threads to check btree nodes in parallel,
+-       * if check_state->enough is non-zero, it means current
++       * if check_state.enough is non-zero, it means current
+        * running check threads are enough, unncessary to create
+        * more.
+        */
+-      for (i = 0; i < check_state->total_threads; i++) {
+-              /* fetch latest check_state->enough earlier */
++      for (i = 0; i < check_state.total_threads; i++) {
++              /* fetch latest check_state.enough earlier */
+               smp_mb__before_atomic();
+-              if (atomic_read(&check_state->enough))
++              if (atomic_read(&check_state.enough))
+                       break;
+-              check_state->infos[i].result = 0;
+-              check_state->infos[i].state = check_state;
+-              snprintf(name, sizeof(name), "bch_btrchk[%u]", i);
+-              atomic_inc(&check_state->started);
++              check_state.infos[i].result = 0;
++              check_state.infos[i].state = &check_state;
+-              check_state->infos[i].thread =
++              check_state.infos[i].thread =
+                       kthread_run(bch_btree_check_thread,
+-                                  &check_state->infos[i],
+-                                  name);
+-              if (IS_ERR(check_state->infos[i].thread)) {
++                                  &check_state.infos[i],
++                                  "bch_btrchk[%d]", i);
++              if (IS_ERR(check_state.infos[i].thread)) {
+                       pr_err("fails to run thread bch_btrchk[%d]\n", i);
+                       for (--i; i >= 0; i--)
+-                              kthread_stop(check_state->infos[i].thread);
++                              kthread_stop(check_state.infos[i].thread);
+                       ret = -ENOMEM;
+                       goto out;
+               }
++              atomic_inc(&check_state.started);
+       }
+       /*
+        * Must wait for all threads to stop.
+        */
+-      wait_event_interruptible(check_state->wait,
+-                               atomic_read(&check_state->started) == 0);
++      wait_event(check_state.wait, atomic_read(&check_state.started) == 0);
+-      for (i = 0; i < check_state->total_threads; i++) {
+-              if (check_state->infos[i].result) {
+-                      ret = check_state->infos[i].result;
++      for (i = 0; i < check_state.total_threads; i++) {
++              if (check_state.infos[i].result) {
++                      ret = check_state.infos[i].result;
+                       goto out;
+               }
+       }
+ out:
+-      kfree(check_state);
++      rw_unlock(0, c->root);
+       return ret;
+ }
+--- a/drivers/md/bcache/btree.h
++++ b/drivers/md/bcache/btree.h
+@@ -226,7 +226,7 @@ struct btree_check_info {
+       int                             result;
+ };
+-#define BCH_BTR_CHKTHREAD_MAX 64
++#define BCH_BTR_CHKTHREAD_MAX 12
+ struct btree_check_state {
+       struct cache_set                *c;
+       int                             total_threads;
diff --git a/queue-5.18/bcache-improve-multithreaded-bch_sectors_dirty_init.patch b/queue-5.18/bcache-improve-multithreaded-bch_sectors_dirty_init.patch
new file mode 100644 (file)
index 0000000..1dfb44d
--- /dev/null
@@ -0,0 +1,146 @@
+From 4dc34ae1b45fe26e772a44379f936c72623dd407 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:34 +0800
+Subject: bcache: improve multithreaded bch_sectors_dirty_init()
+
+From: Coly Li <colyli@suse.de>
+
+commit 4dc34ae1b45fe26e772a44379f936c72623dd407 upstream.
+
+Commit b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be
+multithreaded") makes bch_sectors_dirty_init() to be much faster
+when counting dirty sectors by iterating all dirty keys in the btree.
+But it isn't in ideal shape yet, still can be improved.
+
+This patch does the following changes to improve current parallel dirty
+keys iteration on the btree,
+- Add read lock to root node when multiple threads iterating the btree,
+  to prevent the root node gets split by I/Os from other registered
+  bcache devices.
+- Remove local variable "char name[32]" and generate kernel thread name
+  string directly when calling kthread_run().
+- Allocate "struct bch_dirty_init_state state" directly on stack and
+  avoid the unnecessary dynamic memory allocation for it.
+- Decrease BCH_DIRTY_INIT_THRD_MAX from 64 to 12 which is enough indeed.
+- Increase &state->started to count created kernel thread after it
+  succeeds to create.
+- When wait for all dirty key counting threads to finish, use
+  wait_event() to replace wait_event_interruptible().
+
+With the above changes, the code is more clear, and some potential error
+conditions are avoided.
+
+Fixes: b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be multithreaded")
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-3-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/writeback.c |   60 ++++++++++++++++--------------------------
+ drivers/md/bcache/writeback.h |    2 -
+ 2 files changed, 25 insertions(+), 37 deletions(-)
+
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -948,10 +948,10 @@ void bch_sectors_dirty_init(struct bcach
+       struct btree_iter iter;
+       struct sectors_dirty_init op;
+       struct cache_set *c = d->c;
+-      struct bch_dirty_init_state *state;
+-      char name[32];
++      struct bch_dirty_init_state state;
+       /* Just count root keys if no leaf node */
++      rw_lock(0, c->root, c->root->level);
+       if (c->root->level == 0) {
+               bch_btree_op_init(&op.op, -1);
+               op.inode = d->id;
+@@ -961,54 +961,42 @@ void bch_sectors_dirty_init(struct bcach
+               for_each_key_filter(&c->root->keys,
+                                   k, &iter, bch_ptr_invalid)
+                       sectors_dirty_init_fn(&op.op, c->root, k);
++              rw_unlock(0, c->root);
+               return;
+       }
+-      state = kzalloc(sizeof(struct bch_dirty_init_state), GFP_KERNEL);
+-      if (!state) {
+-              pr_warn("sectors dirty init failed: cannot allocate memory\n");
+-              return;
+-      }
++      state.c = c;
++      state.d = d;
++      state.total_threads = bch_btre_dirty_init_thread_nr();
++      state.key_idx = 0;
++      spin_lock_init(&state.idx_lock);
++      atomic_set(&state.started, 0);
++      atomic_set(&state.enough, 0);
++      init_waitqueue_head(&state.wait);
+-      state->c = c;
+-      state->d = d;
+-      state->total_threads = bch_btre_dirty_init_thread_nr();
+-      state->key_idx = 0;
+-      spin_lock_init(&state->idx_lock);
+-      atomic_set(&state->started, 0);
+-      atomic_set(&state->enough, 0);
+-      init_waitqueue_head(&state->wait);
+-
+-      for (i = 0; i < state->total_threads; i++) {
+-              /* Fetch latest state->enough earlier */
++      for (i = 0; i < state.total_threads; i++) {
++              /* Fetch latest state.enough earlier */
+               smp_mb__before_atomic();
+-              if (atomic_read(&state->enough))
++              if (atomic_read(&state.enough))
+                       break;
+-              state->infos[i].state = state;
+-              atomic_inc(&state->started);
+-              snprintf(name, sizeof(name), "bch_dirty_init[%d]", i);
+-
+-              state->infos[i].thread =
+-                      kthread_run(bch_dirty_init_thread,
+-                                  &state->infos[i],
+-                                  name);
+-              if (IS_ERR(state->infos[i].thread)) {
++              state.infos[i].state = &state;
++              state.infos[i].thread =
++                      kthread_run(bch_dirty_init_thread, &state.infos[i],
++                                  "bch_dirtcnt[%d]", i);
++              if (IS_ERR(state.infos[i].thread)) {
+                       pr_err("fails to run thread bch_dirty_init[%d]\n", i);
+                       for (--i; i >= 0; i--)
+-                              kthread_stop(state->infos[i].thread);
++                              kthread_stop(state.infos[i].thread);
+                       goto out;
+               }
++              atomic_inc(&state.started);
+       }
+-      /*
+-       * Must wait for all threads to stop.
+-       */
+-      wait_event_interruptible(state->wait,
+-               atomic_read(&state->started) == 0);
+-
+ out:
+-      kfree(state);
++      /* Must wait for all threads to stop. */
++      wait_event(state.wait, atomic_read(&state.started) == 0);
++      rw_unlock(0, c->root);
+ }
+ void bch_cached_dev_writeback_init(struct cached_dev *dc)
+--- a/drivers/md/bcache/writeback.h
++++ b/drivers/md/bcache/writeback.h
+@@ -20,7 +20,7 @@
+ #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_MID 57
+ #define BCH_WRITEBACK_FRAGMENT_THRESHOLD_HIGH 64
+-#define BCH_DIRTY_INIT_THRD_MAX       64
++#define BCH_DIRTY_INIT_THRD_MAX       12
+ /*
+  * 14 (16384ths) is chosen here as something that each backing device
+  * should be a reasonable fraction of the share, and not to blow up
diff --git a/queue-5.18/bcache-remove-incremental-dirty-sector-counting-for-bch_sectors_dirty_init.patch b/queue-5.18/bcache-remove-incremental-dirty-sector-counting-for-bch_sectors_dirty_init.patch
new file mode 100644 (file)
index 0000000..282e861
--- /dev/null
@@ -0,0 +1,138 @@
+From 80db4e4707e78cb22287da7d058d7274bd4cb370 Mon Sep 17 00:00:00 2001
+From: Coly Li <colyli@suse.de>
+Date: Tue, 24 May 2022 18:23:35 +0800
+Subject: bcache: remove incremental dirty sector counting for bch_sectors_dirty_init()
+
+From: Coly Li <colyli@suse.de>
+
+commit 80db4e4707e78cb22287da7d058d7274bd4cb370 upstream.
+
+After making bch_sectors_dirty_init() being multithreaded, the existing
+incremental dirty sector counting in bch_root_node_dirty_init() doesn't
+release btree occupation after iterating 500000 (INIT_KEYS_EACH_TIME)
+bkeys. Because a read lock is added on btree root node to prevent the
+btree to be split during the dirty sectors counting, other I/O requester
+has no chance to gain the write lock even restart bcache_btree().
+
+That is to say, the incremental dirty sectors counting is incompatible
+to the multhreaded bch_sectors_dirty_init(). We have to choose one and
+drop another one.
+
+In my testing, with 512 bytes random writes, I generate 1.2T dirty data
+and a btree with 400K nodes. With single thread and incremental dirty
+sectors counting, it takes 30+ minites to register the backing device.
+And with multithreaded dirty sectors counting, the backing device
+registration can be accomplished within 2 minutes.
+
+The 30+ minutes V.S. 2- minutes difference makes me decide to keep
+multithreaded bch_sectors_dirty_init() and drop the incremental dirty
+sectors counting. This is what this patch does.
+
+But INIT_KEYS_EACH_TIME is kept, in sectors_dirty_init_fn() the CPU
+will be released by cond_resched() after every INIT_KEYS_EACH_TIME keys
+iterated. This is to avoid the watchdog reports a bogus soft lockup
+warning.
+
+Fixes: b144e45fc576 ("bcache: make bch_sectors_dirty_init() to be multithreaded")
+Signed-off-by: Coly Li <colyli@suse.de>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220524102336.10684-4-colyli@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/bcache/writeback.c |   39 ++++++++++++---------------------------
+ 1 file changed, 12 insertions(+), 27 deletions(-)
+
+--- a/drivers/md/bcache/writeback.c
++++ b/drivers/md/bcache/writeback.c
+@@ -805,13 +805,11 @@ static int bch_writeback_thread(void *ar
+ /* Init */
+ #define INIT_KEYS_EACH_TIME   500000
+-#define INIT_KEYS_SLEEP_MS    100
+ struct sectors_dirty_init {
+       struct btree_op op;
+       unsigned int    inode;
+       size_t          count;
+-      struct bkey     start;
+ };
+ static int sectors_dirty_init_fn(struct btree_op *_op, struct btree *b,
+@@ -827,11 +825,8 @@ static int sectors_dirty_init_fn(struct
+                                            KEY_START(k), KEY_SIZE(k));
+       op->count++;
+-      if (atomic_read(&b->c->search_inflight) &&
+-          !(op->count % INIT_KEYS_EACH_TIME)) {
+-              bkey_copy_key(&op->start, k);
+-              return -EAGAIN;
+-      }
++      if (!(op->count % INIT_KEYS_EACH_TIME))
++              cond_resched();
+       return MAP_CONTINUE;
+ }
+@@ -846,24 +841,16 @@ static int bch_root_node_dirty_init(stru
+       bch_btree_op_init(&op.op, -1);
+       op.inode = d->id;
+       op.count = 0;
+-      op.start = KEY(op.inode, 0, 0);
+-      do {
+-              ret = bcache_btree(map_keys_recurse,
+-                                 k,
+-                                 c->root,
+-                                 &op.op,
+-                                 &op.start,
+-                                 sectors_dirty_init_fn,
+-                                 0);
+-              if (ret == -EAGAIN)
+-                      schedule_timeout_interruptible(
+-                              msecs_to_jiffies(INIT_KEYS_SLEEP_MS));
+-              else if (ret < 0) {
+-                      pr_warn("sectors dirty init failed, ret=%d!\n", ret);
+-                      break;
+-              }
+-      } while (ret == -EAGAIN);
++      ret = bcache_btree(map_keys_recurse,
++                         k,
++                         c->root,
++                         &op.op,
++                         &KEY(op.inode, 0, 0),
++                         sectors_dirty_init_fn,
++                         0);
++      if (ret < 0)
++              pr_warn("sectors dirty init failed, ret=%d!\n", ret);
+       return ret;
+ }
+@@ -907,7 +894,6 @@ static int bch_dirty_init_thread(void *a
+                               goto out;
+                       }
+                       skip_nr--;
+-                      cond_resched();
+               }
+               if (p) {
+@@ -917,7 +903,6 @@ static int bch_dirty_init_thread(void *a
+               p = NULL;
+               prev_idx = cur_idx;
+-              cond_resched();
+       }
+ out:
+@@ -956,11 +941,11 @@ void bch_sectors_dirty_init(struct bcach
+               bch_btree_op_init(&op.op, -1);
+               op.inode = d->id;
+               op.count = 0;
+-              op.start = KEY(op.inode, 0, 0);
+               for_each_key_filter(&c->root->keys,
+                                   k, &iter, bch_ptr_invalid)
+                       sectors_dirty_init_fn(&op.op, c->root, k);
++
+               rw_unlock(0, c->root);
+               return;
+       }
diff --git a/queue-5.18/carl9170-tx-fix-an-incorrect-use-of-list-iterator.patch b/queue-5.18/carl9170-tx-fix-an-incorrect-use-of-list-iterator.patch
new file mode 100644 (file)
index 0000000..58fba91
--- /dev/null
@@ -0,0 +1,44 @@
+From 54a6f29522da3c914da30e50721dedf51046449a Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Mon, 28 Mar 2022 20:28:20 +0800
+Subject: carl9170: tx: fix an incorrect use of list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 54a6f29522da3c914da30e50721dedf51046449a upstream.
+
+If the previous list_for_each_entry_continue_rcu() don't exit early
+(no goto hit inside the loop), the iterator 'cvif' after the loop
+will be a bogus pointer to an invalid structure object containing
+the HEAD (&ar->vif_list). As a result, the use of 'cvif' after that
+will lead to a invalid memory access (i.e., 'cvif->id': the invalid
+pointer dereference when return back to/after the callsite in the
+carl9170_update_beacon()).
+
+The original intention should have been to return the valid 'cvif'
+when found in list, NULL otherwise. So just return NULL when no
+entry found, to fix this bug.
+
+Cc: stable@vger.kernel.org
+Fixes: 1f1d9654e183c ("carl9170: refactor carl9170_update_beacon")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Acked-by: Christian Lamparter <chunkeey@gmail.com>
+Signed-off-by: Kalle Valo <quic_kvalo@quicinc.com>
+Link: https://lore.kernel.org/r/20220328122820.1004-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/ath/carl9170/tx.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/wireless/ath/carl9170/tx.c
++++ b/drivers/net/wireless/ath/carl9170/tx.c
+@@ -1558,6 +1558,9 @@ static struct carl9170_vif_info *carl917
+                                       goto out;
+                       }
+               } while (ar->beacon_enabled && i--);
++
++              /* no entry found in list */
++              return NULL;
+       }
+ out:
diff --git a/queue-5.18/rtl818x-prevent-using-not-initialized-queues.patch b/queue-5.18/rtl818x-prevent-using-not-initialized-queues.patch
new file mode 100644 (file)
index 0000000..84507e7
--- /dev/null
@@ -0,0 +1,70 @@
+From 746285cf81dc19502ab238249d75f5990bd2d231 Mon Sep 17 00:00:00 2001
+From: Alexander Wetzel <alexander@wetzel-home.de>
+Date: Fri, 22 Apr 2022 16:52:28 +0200
+Subject: rtl818x: Prevent using not initialized queues
+
+From: Alexander Wetzel <alexander@wetzel-home.de>
+
+commit 746285cf81dc19502ab238249d75f5990bd2d231 upstream.
+
+Using not existing queues can panic the kernel with rtl8180/rtl8185 cards.
+Ignore the skb priority for those cards, they only have one tx queue. Pierre
+Asselin (pa@panix.com) reported the kernel crash in the Gentoo forum:
+
+https://forums.gentoo.org/viewtopic-t-1147832-postdays-0-postorder-asc-start-25.html
+
+He also confirmed that this patch fixes the issue. In summary this happened:
+
+After updating wpa_supplicant from 2.9 to 2.10 the kernel crashed with a
+"divide error: 0000" when connecting to an AP. Control port tx now tries to
+use IEEE80211_AC_VO for the priority, which wpa_supplicants starts to use in
+2.10.
+
+Since only the rtl8187se part of the driver supports QoS, the priority
+of the skb is set to IEEE80211_AC_BE (2) by mac80211 for rtl8180/rtl8185
+cards.
+
+rtl8180 is then unconditionally reading out the priority and finally crashes on
+drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c line 544 without this
+patch:
+       idx = (ring->idx + skb_queue_len(&ring->queue)) % ring->entries
+
+"ring->entries" is zero for rtl8180/rtl8185 cards, tx_ring[2] never got
+initialized.
+
+Cc: stable@vger.kernel.org
+Reported-by: pa@panix.com
+Tested-by: pa@panix.com
+Signed-off-by: Alexander Wetzel <alexander@wetzel-home.de>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20220422145228.7567-1-alexander@wetzel-home.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c |    8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+--- a/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
++++ b/drivers/net/wireless/realtek/rtl818x/rtl8180/dev.c
+@@ -460,8 +460,10 @@ static void rtl8180_tx(struct ieee80211_
+       struct rtl8180_priv *priv = dev->priv;
+       struct rtl8180_tx_ring *ring;
+       struct rtl8180_tx_desc *entry;
++      unsigned int prio = 0;
+       unsigned long flags;
+-      unsigned int idx, prio, hw_prio;
++      unsigned int idx, hw_prio;
++
+       dma_addr_t mapping;
+       u32 tx_flags;
+       u8 rc_flags;
+@@ -470,7 +472,9 @@ static void rtl8180_tx(struct ieee80211_
+       /* do arithmetic and then convert to le16 */
+       u16 frame_duration = 0;
+-      prio = skb_get_queue_mapping(skb);
++      /* rtl8180/rtl8185 only has one useable tx queue */
++      if (dev->queues > IEEE80211_AC_BK)
++              prio = skb_get_queue_mapping(skb);
+       ring = &priv->tx_ring[prio];
+       mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
diff --git a/queue-5.18/serial-pch-don-t-overwrite-xmit-buf-by-x_char.patch b/queue-5.18/serial-pch-don-t-overwrite-xmit-buf-by-x_char.patch
new file mode 100644 (file)
index 0000000..2b48646
--- /dev/null
@@ -0,0 +1,79 @@
+From d9f3af4fbb1d955bbaf872d9e76502f6e3e803cb Mon Sep 17 00:00:00 2001
+From: Jiri Slaby <jslaby@suse.cz>
+Date: Tue, 3 May 2022 10:08:03 +0200
+Subject: serial: pch: don't overwrite xmit->buf[0] by x_char
+
+From: Jiri Slaby <jslaby@suse.cz>
+
+commit d9f3af4fbb1d955bbaf872d9e76502f6e3e803cb upstream.
+
+When x_char is to be sent, the TX path overwrites whatever is in the
+circular buffer at offset 0 with x_char and sends it using
+pch_uart_hal_write(). I don't understand how this was supposed to work
+if xmit->buf[0] already contained some character. It must have been
+lost.
+
+Remove this whole pop_tx_x() concept and do the work directly in the
+callers. (Without printing anything using dev_dbg().)
+
+Cc: <stable@vger.kernel.org>
+Fixes: 3c6a483275f4 (Serial: EG20T: add PCH_UART driver)
+Signed-off-by: Jiri Slaby <jslaby@suse.cz>
+Link: https://lore.kernel.org/r/20220503080808.28332-1-jslaby@suse.cz
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/tty/serial/pch_uart.c |   27 +++++++--------------------
+ 1 file changed, 7 insertions(+), 20 deletions(-)
+
+--- a/drivers/tty/serial/pch_uart.c
++++ b/drivers/tty/serial/pch_uart.c
+@@ -624,22 +624,6 @@ static int push_rx(struct eg20t_port *pr
+       return 0;
+ }
+-static int pop_tx_x(struct eg20t_port *priv, unsigned char *buf)
+-{
+-      int ret = 0;
+-      struct uart_port *port = &priv->port;
+-
+-      if (port->x_char) {
+-              dev_dbg(priv->port.dev, "%s:X character send %02x (%lu)\n",
+-                      __func__, port->x_char, jiffies);
+-              buf[0] = port->x_char;
+-              port->x_char = 0;
+-              ret = 1;
+-      }
+-
+-      return ret;
+-}
+-
+ static int dma_push_rx(struct eg20t_port *priv, int size)
+ {
+       int room;
+@@ -889,9 +873,10 @@ static unsigned int handle_tx(struct eg2
+       fifo_size = max(priv->fifo_size, 1);
+       tx_empty = 1;
+-      if (pop_tx_x(priv, xmit->buf)) {
+-              pch_uart_hal_write(priv, xmit->buf, 1);
++      if (port->x_char) {
++              pch_uart_hal_write(priv, &port->x_char, 1);
+               port->icount.tx++;
++              port->x_char = 0;
+               tx_empty = 0;
+               fifo_size--;
+       }
+@@ -946,9 +931,11 @@ static unsigned int dma_handle_tx(struct
+       }
+       fifo_size = max(priv->fifo_size, 1);
+-      if (pop_tx_x(priv, xmit->buf)) {
+-              pch_uart_hal_write(priv, xmit->buf, 1);
++
++      if (port->x_char) {
++              pch_uart_hal_write(priv, &port->x_char, 1);
+               port->icount.tx++;
++              port->x_char = 0;
+               fifo_size--;
+       }
index e4cf7b482d6f696ae98308ac6df29fcc4555d044..3b0a808d52660f7e70257fef09bff7fc45ed734f 100644 (file)
@@ -833,3 +833,13 @@ nodemask.h-fix-compilation-error-with-gcc12.patch
 hugetlb-fix-huge_pmd_unshare-address-update.patch
 mm-memremap-fix-missing-call-to-untrack_pfn-in-pagemap_range.patch
 xtensa-simdisk-fix-proc_read_simdisk.patch
+rtl818x-prevent-using-not-initialized-queues.patch
+asoc-rt5514-fix-event-generation-for-dsp-voice-wake-up-control.patch
+carl9170-tx-fix-an-incorrect-use-of-list-iterator.patch
+stm-ltdc-fix-two-incorrect-null-checks-on-list-iterator.patch
+bcache-improve-multithreaded-bch_btree_check.patch
+bcache-improve-multithreaded-bch_sectors_dirty_init.patch
+bcache-remove-incremental-dirty-sector-counting-for-bch_sectors_dirty_init.patch
+bcache-avoid-journal-no-space-deadlock-by-reserving-1-journal-bucket.patch
+serial-pch-don-t-overwrite-xmit-buf-by-x_char.patch
+tilcdc-tilcdc_external-fix-an-incorrect-null-check-on-list-iterator.patch
diff --git a/queue-5.18/stm-ltdc-fix-two-incorrect-null-checks-on-list-iterator.patch b/queue-5.18/stm-ltdc-fix-two-incorrect-null-checks-on-list-iterator.patch
new file mode 100644 (file)
index 0000000..fdbb46b
--- /dev/null
@@ -0,0 +1,70 @@
+From 2e6c86be0e57079d1fb6c7c7e5423db096d0548a Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Sun, 27 Mar 2022 13:53:55 +0800
+Subject: stm: ltdc: fix two incorrect NULL checks on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 2e6c86be0e57079d1fb6c7c7e5423db096d0548a upstream.
+
+The two bugs are here:
+       if (encoder) {
+       if (bridge && bridge->timings)
+
+The list iterator value 'encoder/bridge' will *always* be set and
+non-NULL by drm_for_each_encoder()/list_for_each_entry(), so it is
+incorrect to assume that the iterator value will be NULL if the
+list is empty or no element is found.
+
+To fix the bug, use a new variable '*_iter' as the list iterator,
+while use the old variable 'encoder/bridge' as a dedicated pointer
+to point to the found element.
+
+Cc: stable@vger.kernel.org
+Fixes: 99e360442f223 ("drm/stm: Fix bus_flags handling")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Acked-by: Raphael Gallais-Pou <raphael.gallais-pou@foss.st.com>
+Signed-off-by: Philippe Cornu <philippe.cornu@foss.st.com>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220327055355.3808-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/stm/ltdc.c |   16 ++++++++++------
+ 1 file changed, 10 insertions(+), 6 deletions(-)
+
+--- a/drivers/gpu/drm/stm/ltdc.c
++++ b/drivers/gpu/drm/stm/ltdc.c
+@@ -869,8 +869,8 @@ static void ltdc_crtc_mode_set_nofb(stru
+       struct drm_device *ddev = crtc->dev;
+       struct drm_connector_list_iter iter;
+       struct drm_connector *connector = NULL;
+-      struct drm_encoder *encoder = NULL;
+-      struct drm_bridge *bridge = NULL;
++      struct drm_encoder *encoder = NULL, *en_iter;
++      struct drm_bridge *bridge = NULL, *br_iter;
+       struct drm_display_mode *mode = &crtc->state->adjusted_mode;
+       u32 hsync, vsync, accum_hbp, accum_vbp, accum_act_w, accum_act_h;
+       u32 total_width, total_height;
+@@ -880,15 +880,19 @@ static void ltdc_crtc_mode_set_nofb(stru
+       int ret;
+       /* get encoder from crtc */
+-      drm_for_each_encoder(encoder, ddev)
+-              if (encoder->crtc == crtc)
++      drm_for_each_encoder(en_iter, ddev)
++              if (en_iter->crtc == crtc) {
++                      encoder = en_iter;
+                       break;
++              }
+       if (encoder) {
+               /* get bridge from encoder */
+-              list_for_each_entry(bridge, &encoder->bridge_chain, chain_node)
+-                      if (bridge->encoder == encoder)
++              list_for_each_entry(br_iter, &encoder->bridge_chain, chain_node)
++                      if (br_iter->encoder == encoder) {
++                              bridge = br_iter;
+                               break;
++                      }
+               /* Get the connector from encoder */
+               drm_connector_list_iter_begin(ddev, &iter);
diff --git a/queue-5.18/tilcdc-tilcdc_external-fix-an-incorrect-null-check-on-list-iterator.patch b/queue-5.18/tilcdc-tilcdc_external-fix-an-incorrect-null-check-on-list-iterator.patch
new file mode 100644 (file)
index 0000000..29889d7
--- /dev/null
@@ -0,0 +1,52 @@
+From 8b917cbe38e9b0d002492477a9fc2bfee2412ce4 Mon Sep 17 00:00:00 2001
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Date: Sun, 27 Mar 2022 14:15:16 +0800
+Subject: tilcdc: tilcdc_external: fix an incorrect NULL check on list iterator
+
+From: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+
+commit 8b917cbe38e9b0d002492477a9fc2bfee2412ce4 upstream.
+
+The bug is here:
+       if (!encoder) {
+
+The list iterator value 'encoder' will *always* be set and non-NULL
+by list_for_each_entry(), so it is incorrect to assume that the
+iterator value will be NULL if the list is empty or no element
+is found.
+
+To fix the bug, use a new variable 'iter' as the list iterator,
+while use the original variable 'encoder' as a dedicated pointer
+to point to the found element.
+
+Cc: stable@vger.kernel.org
+Fixes: ec9eab097a500 ("drm/tilcdc: Add drm bridge support for attaching drm bridge drivers")
+Signed-off-by: Xiaomeng Tong <xiam0nd.tong@gmail.com>
+Reviewed-by: Jyri Sarha <jyri.sarha@iki.fi>
+Tested-by: Jyri Sarha <jyri.sarha@iki.fi>
+Signed-off-by: Jyri Sarha <jyri.sarha@iki.fi>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220327061516.5076-1-xiam0nd.tong@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/tilcdc/tilcdc_external.c |    8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+--- a/drivers/gpu/drm/tilcdc/tilcdc_external.c
++++ b/drivers/gpu/drm/tilcdc/tilcdc_external.c
+@@ -60,11 +60,13 @@ struct drm_connector *tilcdc_encoder_fin
+ int tilcdc_add_component_encoder(struct drm_device *ddev)
+ {
+       struct tilcdc_drm_private *priv = ddev->dev_private;
+-      struct drm_encoder *encoder;
++      struct drm_encoder *encoder = NULL, *iter;
+-      list_for_each_entry(encoder, &ddev->mode_config.encoder_list, head)
+-              if (encoder->possible_crtcs & (1 << priv->crtc->index))
++      list_for_each_entry(iter, &ddev->mode_config.encoder_list, head)
++              if (iter->possible_crtcs & (1 << priv->crtc->index)) {
++                      encoder = iter;
+                       break;
++              }
+       if (!encoder) {
+               dev_err(ddev->dev, "%s: No suitable encoder found\n", __func__);