--- /dev/null
+From 976570b4ecd30d3ec6e1b0910da8e5edc591f2b6 Mon Sep 17 00:00:00 2001
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+Date: Tue, 15 Nov 2022 17:45:51 -0500
+Subject: sbitmap: Advance the queue index before waking up a queue
+
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+
+commit 976570b4ecd30d3ec6e1b0910da8e5edc591f2b6 upstream.
+
+When a queue is awaken, the wake_index written by sbq_wake_ptr currently
+keeps pointing to the same queue. On the next wake up, it will thus
+retry the same queue, which is unfair to other queues, and can lead to
+starvation. This patch, moves the index update to happen before the
+queue is returned, such that it will now try a different queue first on
+the next wake up, improving fairness.
+
+Fixes: 4f8126bb2308 ("sbitmap: Use single per-bitmap counting to wake up queued tags")
+Reported-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
+Link: https://lore.kernel.org/r/20221115224553.23594-2-krisman@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/sbitmap.c | 10 ++++++++--
+ 1 file changed, 8 insertions(+), 2 deletions(-)
+
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -566,13 +566,19 @@ static struct sbq_wait_state *sbq_wake_p
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+ struct sbq_wait_state *ws = &sbq->ws[wake_index];
+
++ /*
++ * Advance the index before checking the current queue.
++ * It improves fairness, by ensuring the queue doesn't
++ * need to be fully emptied before trying to wake up
++ * from the next one.
++ */
++ wake_index = sbq_index_inc(wake_index);
++
+ if (waitqueue_active(&ws->wait)) {
+ if (wake_index != atomic_read(&sbq->wake_index))
+ atomic_set(&sbq->wake_index, wake_index);
+ return ws;
+ }
+-
+- wake_index = sbq_index_inc(wake_index);
+ }
+
+ return NULL;
--- /dev/null
+From 26edb30dd1c0c9be11fa676b4f330ada7b794ba6 Mon Sep 17 00:00:00 2001
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+Date: Tue, 15 Nov 2022 17:45:53 -0500
+Subject: sbitmap: Try each queue to wake up at least one waiter
+
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+
+commit 26edb30dd1c0c9be11fa676b4f330ada7b794ba6 upstream.
+
+Jan reported the new algorithm as merged might be problematic if the
+queue being awaken becomes empty between the waitqueue_active inside
+sbq_wake_ptr check and the wake up. If that happens, wake_up_nr will
+not wake up any waiter and we loose too many wake ups. In order to
+guarantee progress, we need to wake up at least one waiter here, if
+there are any. This now requires trying to wake up from every queue.
+
+Instead of walking through all the queues with sbq_wake_ptr, this call
+moves the wake up inside that function. In a previous version of the
+patch, I found that updating wake_index several times when walking
+through queues had a measurable overhead. This ensures we only update
+it once, at the end.
+
+Fixes: 4f8126bb2308 ("sbitmap: Use single per-bitmap counting to wake up queued tags")
+Reported-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20221115224553.23594-4-krisman@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ lib/sbitmap.c | 28 ++++++++++++----------------
+ 1 file changed, 12 insertions(+), 16 deletions(-)
+
+--- a/lib/sbitmap.c
++++ b/lib/sbitmap.c
+@@ -555,12 +555,12 @@ void sbitmap_queue_min_shallow_depth(str
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_queue_min_shallow_depth);
+
+-static struct sbq_wait_state *sbq_wake_ptr(struct sbitmap_queue *sbq)
++static void __sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+ {
+ int i, wake_index;
+
+ if (!atomic_read(&sbq->ws_active))
+- return NULL;
++ return;
+
+ wake_index = atomic_read(&sbq->wake_index);
+ for (i = 0; i < SBQ_WAIT_QUEUES; i++) {
+@@ -574,20 +574,22 @@ static struct sbq_wait_state *sbq_wake_p
+ */
+ wake_index = sbq_index_inc(wake_index);
+
+- if (waitqueue_active(&ws->wait)) {
+- if (wake_index != atomic_read(&sbq->wake_index))
+- atomic_set(&sbq->wake_index, wake_index);
+- return ws;
+- }
++ /*
++ * It is sufficient to wake up at least one waiter to
++ * guarantee forward progress.
++ */
++ if (waitqueue_active(&ws->wait) &&
++ wake_up_nr(&ws->wait, nr))
++ break;
+ }
+
+- return NULL;
++ if (wake_index != atomic_read(&sbq->wake_index))
++ atomic_set(&sbq->wake_index, wake_index);
+ }
+
+ void sbitmap_queue_wake_up(struct sbitmap_queue *sbq, int nr)
+ {
+ unsigned int wake_batch = READ_ONCE(sbq->wake_batch);
+- struct sbq_wait_state *ws = NULL;
+ unsigned int wakeups;
+
+ if (!atomic_read(&sbq->ws_active))
+@@ -599,16 +601,10 @@ void sbitmap_queue_wake_up(struct sbitma
+ do {
+ if (atomic_read(&sbq->completion_cnt) - wakeups < wake_batch)
+ return;
+-
+- if (!ws) {
+- ws = sbq_wake_ptr(sbq);
+- if (!ws)
+- return;
+- }
+ } while (!atomic_try_cmpxchg(&sbq->wakeup_cnt,
+ &wakeups, wakeups + wake_batch));
+
+- wake_up_nr(&ws->wait, wake_batch);
++ __sbitmap_queue_wake_up(sbq, wake_batch);
+ }
+ EXPORT_SYMBOL_GPL(sbitmap_queue_wake_up);
+
drm-edid-fix-parsing-of-3d-modes-from-hdmi-vsdb.patch
qede-avoid-uninitialized-entries-in-coal_entry-array.patch
brd-use-radix_tree_maybe_preload-instead-of-radix_tree_preload.patch
+sbitmap-advance-the-queue-index-before-waking-up-a-queue.patch
+wait-return-number-of-exclusive-waiters-awaken.patch
+sbitmap-try-each-queue-to-wake-up-at-least-one-waiter.patch
--- /dev/null
+From ee7dc86b6d3e3b86c2c487f713eda657850de238 Mon Sep 17 00:00:00 2001
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+Date: Tue, 15 Nov 2022 17:45:52 -0500
+Subject: wait: Return number of exclusive waiters awaken
+
+From: Gabriel Krisman Bertazi <krisman@suse.de>
+
+commit ee7dc86b6d3e3b86c2c487f713eda657850de238 upstream.
+
+Sbitmap code will need to know how many waiters were actually woken for
+its batched wakeups implementation. Return the number of woken
+exclusive waiters from __wake_up() to facilitate that.
+
+Suggested-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Gabriel Krisman Bertazi <krisman@suse.de>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://lore.kernel.org/r/20221115224553.23594-3-krisman@suse.de
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/wait.h | 2 +-
+ kernel/sched/wait.c | 18 +++++++++++-------
+ 2 files changed, 12 insertions(+), 8 deletions(-)
+
+--- a/include/linux/wait.h
++++ b/include/linux/wait.h
+@@ -209,7 +209,7 @@ __remove_wait_queue(struct wait_queue_he
+ list_del(&wq_entry->entry);
+ }
+
+-void __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
++int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
+ void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
+ void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
+ unsigned int mode, void *key, wait_queue_entry_t *bookmark);
+--- a/kernel/sched/wait.c
++++ b/kernel/sched/wait.c
+@@ -121,11 +121,12 @@ static int __wake_up_common(struct wait_
+ return nr_exclusive;
+ }
+
+-static void __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
++static int __wake_up_common_lock(struct wait_queue_head *wq_head, unsigned int mode,
+ int nr_exclusive, int wake_flags, void *key)
+ {
+ unsigned long flags;
+ wait_queue_entry_t bookmark;
++ int remaining = nr_exclusive;
+
+ bookmark.flags = 0;
+ bookmark.private = NULL;
+@@ -134,10 +135,12 @@ static void __wake_up_common_lock(struct
+
+ do {
+ spin_lock_irqsave(&wq_head->lock, flags);
+- nr_exclusive = __wake_up_common(wq_head, mode, nr_exclusive,
++ remaining = __wake_up_common(wq_head, mode, remaining,
+ wake_flags, key, &bookmark);
+ spin_unlock_irqrestore(&wq_head->lock, flags);
+ } while (bookmark.flags & WQ_FLAG_BOOKMARK);
++
++ return nr_exclusive - remaining;
+ }
+
+ /**
+@@ -147,13 +150,14 @@ static void __wake_up_common_lock(struct
+ * @nr_exclusive: how many wake-one or wake-many threads to wake up
+ * @key: is directly passed to the wakeup function
+ *
+- * If this function wakes up a task, it executes a full memory barrier before
+- * accessing the task state.
++ * If this function wakes up a task, it executes a full memory barrier
++ * before accessing the task state. Returns the number of exclusive
++ * tasks that were awaken.
+ */
+-void __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
+- int nr_exclusive, void *key)
++int __wake_up(struct wait_queue_head *wq_head, unsigned int mode,
++ int nr_exclusive, void *key)
+ {
+- __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
++ return __wake_up_common_lock(wq_head, mode, nr_exclusive, 0, key);
+ }
+ EXPORT_SYMBOL(__wake_up);
+