]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
5.15-stable patches
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jun 2022 11:59:22 +0000 (13:59 +0200)
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>
Mon, 6 Jun 2022 11:59:22 +0000 (13:59 +0200)
added patches:
bfq-avoid-false-marking-of-bic-as-stably-merged.patch
bfq-avoid-merging-queues-with-different-parents.patch
bfq-drop-pointless-unlock-lock-pair.patch
bfq-get-rid-of-__bio_blkcg-usage.patch
bfq-make-sure-bfqg-for-which-we-are-queueing-requests-is-online.patch
bfq-remove-pointless-bfq_init_rq-calls.patch
bfq-split-shared-queues-on-move-between-cgroups.patch
bfq-track-whether-bfq_group-is-still-online.patch
bfq-update-cgroup-information-before-merging-bio.patch
efi-do-not-import-certificates-from-uefi-secure-boot-for-t2-macs.patch
fs-writeback-writeback_sb_inodes-recalculate-wrote-according-skipped-pages.patch
iwlwifi-mvm-fix-assert-1f04-upon-reconfig.patch
objtool-fix-objtool-regression-on-x32-systems.patch
objtool-fix-symbol-creation.patch
wifi-mac80211-fix-use-after-free-in-chanctx-code.patch

16 files changed:
queue-5.15/bfq-avoid-false-marking-of-bic-as-stably-merged.patch [new file with mode: 0644]
queue-5.15/bfq-avoid-merging-queues-with-different-parents.patch [new file with mode: 0644]
queue-5.15/bfq-drop-pointless-unlock-lock-pair.patch [new file with mode: 0644]
queue-5.15/bfq-get-rid-of-__bio_blkcg-usage.patch [new file with mode: 0644]
queue-5.15/bfq-make-sure-bfqg-for-which-we-are-queueing-requests-is-online.patch [new file with mode: 0644]
queue-5.15/bfq-remove-pointless-bfq_init_rq-calls.patch [new file with mode: 0644]
queue-5.15/bfq-split-shared-queues-on-move-between-cgroups.patch [new file with mode: 0644]
queue-5.15/bfq-track-whether-bfq_group-is-still-online.patch [new file with mode: 0644]
queue-5.15/bfq-update-cgroup-information-before-merging-bio.patch [new file with mode: 0644]
queue-5.15/efi-do-not-import-certificates-from-uefi-secure-boot-for-t2-macs.patch [new file with mode: 0644]
queue-5.15/fs-writeback-writeback_sb_inodes-recalculate-wrote-according-skipped-pages.patch [new file with mode: 0644]
queue-5.15/iwlwifi-mvm-fix-assert-1f04-upon-reconfig.patch [new file with mode: 0644]
queue-5.15/objtool-fix-objtool-regression-on-x32-systems.patch [new file with mode: 0644]
queue-5.15/objtool-fix-symbol-creation.patch [new file with mode: 0644]
queue-5.15/series
queue-5.15/wifi-mac80211-fix-use-after-free-in-chanctx-code.patch [new file with mode: 0644]

diff --git a/queue-5.15/bfq-avoid-false-marking-of-bic-as-stably-merged.patch b/queue-5.15/bfq-avoid-false-marking-of-bic-as-stably-merged.patch
new file mode 100644 (file)
index 0000000..4496b28
--- /dev/null
@@ -0,0 +1,44 @@
+From 70456e5210f40ffdb8f6d905acfdcec5bd5fad9e Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:42 +0200
+Subject: bfq: Avoid false marking of bic as stably merged
+
+From: Jan Kara <jack@suse.cz>
+
+commit 70456e5210f40ffdb8f6d905acfdcec5bd5fad9e upstream.
+
+bfq_setup_cooperator() can mark bic as stably merged even though it
+decides to not merge its bfqqs (when bfq_setup_merge() returns NULL).
+Make sure to mark bic as stably merged only if we are really going to
+merge bfqqs.
+
+CC: stable@vger.kernel.org
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Fixes: 430a67f9d616 ("block, bfq: merge bursts of newly-created queues")
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-1-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-iosched.c |    9 ++++++---
+ 1 file changed, 6 insertions(+), 3 deletions(-)
+
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2773,9 +2773,12 @@ bfq_setup_cooperator(struct bfq_data *bf
+                               struct bfq_queue *new_bfqq =
+                                       bfq_setup_merge(bfqq, stable_merge_bfqq);
+-                              bic->stably_merged = true;
+-                              if (new_bfqq && new_bfqq->bic)
+-                                      new_bfqq->bic->stably_merged = true;
++                              if (new_bfqq) {
++                                      bic->stably_merged = true;
++                                      if (new_bfqq->bic)
++                                              new_bfqq->bic->stably_merged =
++                                                                      true;
++                              }
+                               return new_bfqq;
+                       } else
+                               return NULL;
diff --git a/queue-5.15/bfq-avoid-merging-queues-with-different-parents.patch b/queue-5.15/bfq-avoid-merging-queues-with-different-parents.patch
new file mode 100644 (file)
index 0000000..ea522ba
--- /dev/null
@@ -0,0 +1,83 @@
+From c1cee4ab36acef271be9101590756ed0c0c374d9 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:43 +0200
+Subject: bfq: Avoid merging queues with different parents
+
+From: Jan Kara <jack@suse.cz>
+
+commit c1cee4ab36acef271be9101590756ed0c0c374d9 upstream.
+
+It can happen that the parent of a bfqq changes between the moment we
+decide two queues are worth to merge (and set bic->stable_merge_bfqq)
+and the moment bfq_setup_merge() is called. This can happen e.g. because
+the process submitted IO for a different cgroup and thus bfqq got
+reparented. It can even happen that the bfqq we are merging with has
+parent cgroup that is already offline and going to be destroyed in which
+case the merge can lead to use-after-free issues such as:
+
+BUG: KASAN: use-after-free in __bfq_deactivate_entity+0x9cb/0xa50
+Read of size 8 at addr ffff88800693c0c0 by task runc:[2:INIT]/10544
+
+CPU: 0 PID: 10544 Comm: runc:[2:INIT] Tainted: G            E     5.15.2-0.g5fb85fd-default #1 openSUSE Tumbleweed (unreleased) f1f3b891c72369aebecd2e43e4641a6358867c70
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a-rebuilt.opensuse.org 04/01/2014
+Call Trace:
+ <IRQ>
+ dump_stack_lvl+0x46/0x5a
+ print_address_description.constprop.0+0x1f/0x140
+ ? __bfq_deactivate_entity+0x9cb/0xa50
+ kasan_report.cold+0x7f/0x11b
+ ? __bfq_deactivate_entity+0x9cb/0xa50
+ __bfq_deactivate_entity+0x9cb/0xa50
+ ? update_curr+0x32f/0x5d0
+ bfq_deactivate_entity+0xa0/0x1d0
+ bfq_del_bfqq_busy+0x28a/0x420
+ ? resched_curr+0x116/0x1d0
+ ? bfq_requeue_bfqq+0x70/0x70
+ ? check_preempt_wakeup+0x52b/0xbc0
+ __bfq_bfqq_expire+0x1a2/0x270
+ bfq_bfqq_expire+0xd16/0x2160
+ ? try_to_wake_up+0x4ee/0x1260
+ ? bfq_end_wr_async_queues+0xe0/0xe0
+ ? _raw_write_unlock_bh+0x60/0x60
+ ? _raw_spin_lock_irq+0x81/0xe0
+ bfq_idle_slice_timer+0x109/0x280
+ ? bfq_dispatch_request+0x4870/0x4870
+ __hrtimer_run_queues+0x37d/0x700
+ ? enqueue_hrtimer+0x1b0/0x1b0
+ ? kvm_clock_get_cycles+0xd/0x10
+ ? ktime_get_update_offsets_now+0x6f/0x280
+ hrtimer_interrupt+0x2c8/0x740
+
+Fix the problem by checking that the parent of the two bfqqs we are
+merging in bfq_setup_merge() is the same.
+
+Link: https://lore.kernel.org/linux-block/20211125172809.GC19572@quack2.suse.cz/
+CC: stable@vger.kernel.org
+Fixes: 430a67f9d616 ("block, bfq: merge bursts of newly-created queues")
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-2-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-iosched.c |    8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2636,6 +2636,14 @@ bfq_setup_merge(struct bfq_queue *bfqq,
+       if (process_refs == 0 || new_process_refs == 0)
+               return NULL;
++      /*
++       * Make sure merged queues belong to the same parent. Parents could
++       * have changed since the time we decided the two queues are suitable
++       * for merging.
++       */
++      if (new_bfqq->entity.parent != bfqq->entity.parent)
++              return NULL;
++
+       bfq_log_bfqq(bfqq->bfqd, bfqq, "scheduling merge with queue %d",
+               new_bfqq->pid);
diff --git a/queue-5.15/bfq-drop-pointless-unlock-lock-pair.patch b/queue-5.15/bfq-drop-pointless-unlock-lock-pair.patch
new file mode 100644 (file)
index 0000000..b25f72b
--- /dev/null
@@ -0,0 +1,39 @@
+From fc84e1f941b91221092da5b3102ec82da24c5673 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:46 +0200
+Subject: bfq: Drop pointless unlock-lock pair
+
+From: Jan Kara <jack@suse.cz>
+
+commit fc84e1f941b91221092da5b3102ec82da24c5673 upstream.
+
+In bfq_insert_request() we unlock bfqd->lock only to call
+trace_block_rq_insert() and then lock bfqd->lock again. This is really
+pointless since tracing is disabled if we really care about performance
+and even if the tracepoint is enabled, it is a quick call.
+
+CC: stable@vger.kernel.org
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-5-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-iosched.c |    3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -6012,11 +6012,8 @@ static void bfq_insert_request(struct bl
+               return;
+       }
+-      spin_unlock_irq(&bfqd->lock);
+-
+       trace_block_rq_insert(rq);
+-      spin_lock_irq(&bfqd->lock);
+       bfqq = bfq_init_rq(rq);
+       if (!bfqq || at_head) {
+               if (at_head)
diff --git a/queue-5.15/bfq-get-rid-of-__bio_blkcg-usage.patch b/queue-5.15/bfq-get-rid-of-__bio_blkcg-usage.patch
new file mode 100644 (file)
index 0000000..5ec757a
--- /dev/null
@@ -0,0 +1,197 @@
+From 4e54a2493e582361adc3bfbf06c7d50d19d18837 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:49 +0200
+Subject: bfq: Get rid of __bio_blkcg() usage
+
+From: Jan Kara <jack@suse.cz>
+
+commit 4e54a2493e582361adc3bfbf06c7d50d19d18837 upstream.
+
+BFQ usage of __bio_blkcg() is a relict from the past. Furthermore if bio
+would not be associated with any blkcg, the usage of __bio_blkcg() in
+BFQ is prone to races with the task being migrated between cgroups as
+__bio_blkcg() calls at different places could return different blkcgs.
+
+Convert BFQ to the new situation where bio->bi_blkg is initialized in
+bio_set_dev() and thus practically always valid. This allows us to save
+blkcg_gq lookup and noticeably simplify the code.
+
+CC: stable@vger.kernel.org
+Fixes: 0fe061b9f03c ("blkcg: fix ref count issue with bio_blkcg() using task_css")
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-8-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-cgroup.c  |   63 ++++++++++++++++++----------------------------------
+ block/bfq-iosched.c |   11 ---------
+ block/bfq-iosched.h |    3 --
+ 3 files changed, 25 insertions(+), 52 deletions(-)
+
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -584,27 +584,11 @@ static void bfq_group_set_parent(struct
+       entity->sched_data = &parent->sched_data;
+ }
+-static struct bfq_group *bfq_lookup_bfqg(struct bfq_data *bfqd,
+-                                       struct blkcg *blkcg)
++static void bfq_link_bfqg(struct bfq_data *bfqd, struct bfq_group *bfqg)
+ {
+-      struct blkcg_gq *blkg;
+-
+-      blkg = blkg_lookup(blkcg, bfqd->queue);
+-      if (likely(blkg))
+-              return blkg_to_bfqg(blkg);
+-      return NULL;
+-}
+-
+-struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
+-                                   struct blkcg *blkcg)
+-{
+-      struct bfq_group *bfqg, *parent;
++      struct bfq_group *parent;
+       struct bfq_entity *entity;
+-      bfqg = bfq_lookup_bfqg(bfqd, blkcg);
+-      if (unlikely(!bfqg))
+-              return NULL;
+-
+       /*
+        * Update chain of bfq_groups as we might be handling a leaf group
+        * which, along with some of its relatives, has not been hooked yet
+@@ -621,8 +605,15 @@ struct bfq_group *bfq_find_set_group(str
+                       bfq_group_set_parent(curr_bfqg, parent);
+               }
+       }
++}
+-      return bfqg;
++struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
++{
++      struct blkcg_gq *blkg = bio->bi_blkg;
++
++      if (!blkg)
++              return bfqd->root_group;
++      return blkg_to_bfqg(blkg);
+ }
+ /**
+@@ -704,25 +695,15 @@ void bfq_bfqq_move(struct bfq_data *bfqd
+  * Move bic to blkcg, assuming that bfqd->lock is held; which makes
+  * sure that the reference to cgroup is valid across the call (see
+  * comments in bfq_bic_update_cgroup on this issue)
+- *
+- * NOTE: an alternative approach might have been to store the current
+- * cgroup in bfqq and getting a reference to it, reducing the lookup
+- * time here, at the price of slightly more complex code.
+  */
+-static struct bfq_group *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
+-                                              struct bfq_io_cq *bic,
+-                                              struct blkcg *blkcg)
++static void *__bfq_bic_change_cgroup(struct bfq_data *bfqd,
++                                   struct bfq_io_cq *bic,
++                                   struct bfq_group *bfqg)
+ {
+       struct bfq_queue *async_bfqq = bic_to_bfqq(bic, 0);
+       struct bfq_queue *sync_bfqq = bic_to_bfqq(bic, 1);
+-      struct bfq_group *bfqg;
+       struct bfq_entity *entity;
+-      bfqg = bfq_find_set_group(bfqd, blkcg);
+-
+-      if (unlikely(!bfqg))
+-              bfqg = bfqd->root_group;
+-
+       if (async_bfqq) {
+               entity = &async_bfqq->entity;
+@@ -774,20 +755,24 @@ static struct bfq_group *__bfq_bic_chang
+ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio)
+ {
+       struct bfq_data *bfqd = bic_to_bfqd(bic);
+-      struct bfq_group *bfqg = NULL;
++      struct bfq_group *bfqg = bfq_bio_bfqg(bfqd, bio);
+       uint64_t serial_nr;
+-      rcu_read_lock();
+-      serial_nr = __bio_blkcg(bio)->css.serial_nr;
++      serial_nr = bfqg_to_blkg(bfqg)->blkcg->css.serial_nr;
+       /*
+        * Check whether blkcg has changed.  The condition may trigger
+        * spuriously on a newly created cic but there's no harm.
+        */
+       if (unlikely(!bfqd) || likely(bic->blkcg_serial_nr == serial_nr))
+-              goto out;
++              return;
+-      bfqg = __bfq_bic_change_cgroup(bfqd, bic, __bio_blkcg(bio));
++      /*
++       * New cgroup for this process. Make sure it is linked to bfq internal
++       * cgroup hierarchy.
++       */
++      bfq_link_bfqg(bfqd, bfqg);
++      __bfq_bic_change_cgroup(bfqd, bic, bfqg);
+       /*
+        * Update blkg_path for bfq_log_* functions. We cache this
+        * path, and update it here, for the following
+@@ -840,8 +825,6 @@ void bfq_bic_update_cgroup(struct bfq_io
+        */
+       blkg_path(bfqg_to_blkg(bfqg), bfqg->blkg_path, sizeof(bfqg->blkg_path));
+       bic->blkcg_serial_nr = serial_nr;
+-out:
+-      rcu_read_unlock();
+ }
+ /**
+@@ -1459,7 +1442,7 @@ void bfq_end_wr_async(struct bfq_data *b
+       bfq_end_wr_async_queues(bfqd, bfqd->root_group);
+ }
+-struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd, struct blkcg *blkcg)
++struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
+ {
+       return bfqd->root_group;
+ }
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -5604,14 +5604,7 @@ static struct bfq_queue *bfq_get_queue(s
+       struct bfq_queue *bfqq;
+       struct bfq_group *bfqg;
+-      rcu_read_lock();
+-
+-      bfqg = bfq_find_set_group(bfqd, __bio_blkcg(bio));
+-      if (!bfqg) {
+-              bfqq = &bfqd->oom_bfqq;
+-              goto out;
+-      }
+-
++      bfqg = bfq_bio_bfqg(bfqd, bio);
+       if (!is_sync) {
+               async_bfqq = bfq_async_queue_prio(bfqd, bfqg, ioprio_class,
+                                                 ioprio);
+@@ -5657,8 +5650,6 @@ out:
+       if (bfqq != &bfqd->oom_bfqq && is_sync && !respawn)
+               bfqq = bfq_do_or_sched_stable_merge(bfqd, bfqq, bic);
+-
+-      rcu_read_unlock();
+       return bfqq;
+ }
+--- a/block/bfq-iosched.h
++++ b/block/bfq-iosched.h
+@@ -1007,8 +1007,7 @@ void bfq_bfqq_move(struct bfq_data *bfqd
+ void bfq_init_entity(struct bfq_entity *entity, struct bfq_group *bfqg);
+ void bfq_bic_update_cgroup(struct bfq_io_cq *bic, struct bio *bio);
+ void bfq_end_wr_async(struct bfq_data *bfqd);
+-struct bfq_group *bfq_find_set_group(struct bfq_data *bfqd,
+-                                   struct blkcg *blkcg);
++struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio);
+ struct blkcg_gq *bfqg_to_blkg(struct bfq_group *bfqg);
+ struct bfq_group *bfqq_group(struct bfq_queue *bfqq);
+ struct bfq_group *bfq_create_group_hierarchy(struct bfq_data *bfqd, int node);
diff --git a/queue-5.15/bfq-make-sure-bfqg-for-which-we-are-queueing-requests-is-online.patch b/queue-5.15/bfq-make-sure-bfqg-for-which-we-are-queueing-requests-is-online.patch
new file mode 100644 (file)
index 0000000..dd64602
--- /dev/null
@@ -0,0 +1,54 @@
+From 075a53b78b815301f8d3dd1ee2cd99554e34f0dd Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:50 +0200
+Subject: bfq: Make sure bfqg for which we are queueing requests is online
+
+From: Jan Kara <jack@suse.cz>
+
+commit 075a53b78b815301f8d3dd1ee2cd99554e34f0dd upstream.
+
+Bios queued into BFQ IO scheduler can be associated with a cgroup that
+was already offlined. This may then cause insertion of this bfq_group
+into a service tree. But this bfq_group will get freed as soon as last
+bio associated with it is completed leading to use after free issues for
+service tree users. Fix the problem by making sure we always operate on
+online bfq_group. If the bfq_group associated with the bio is not
+online, we pick the first online parent.
+
+CC: stable@vger.kernel.org
+Fixes: e21b7a0b9887 ("block, bfq: add full hierarchical scheduling and cgroups support")
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-9-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-cgroup.c |   15 ++++++++++++---
+ 1 file changed, 12 insertions(+), 3 deletions(-)
+
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -610,10 +610,19 @@ static void bfq_link_bfqg(struct bfq_dat
+ struct bfq_group *bfq_bio_bfqg(struct bfq_data *bfqd, struct bio *bio)
+ {
+       struct blkcg_gq *blkg = bio->bi_blkg;
++      struct bfq_group *bfqg;
+-      if (!blkg)
+-              return bfqd->root_group;
+-      return blkg_to_bfqg(blkg);
++      while (blkg) {
++              bfqg = blkg_to_bfqg(blkg);
++              if (bfqg->online) {
++                      bio_associate_blkg_from_css(bio, &blkg->blkcg->css);
++                      return bfqg;
++              }
++              blkg = blkg->parent;
++      }
++      bio_associate_blkg_from_css(bio,
++                              &bfqg_to_blkg(bfqd->root_group)->blkcg->css);
++      return bfqd->root_group;
+ }
+ /**
diff --git a/queue-5.15/bfq-remove-pointless-bfq_init_rq-calls.patch b/queue-5.15/bfq-remove-pointless-bfq_init_rq-calls.patch
new file mode 100644 (file)
index 0000000..7b13250
--- /dev/null
@@ -0,0 +1,84 @@
+From 5f550ede5edf846ecc0067be1ba80514e6fe7f8e Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:47 +0200
+Subject: bfq: Remove pointless bfq_init_rq() calls
+
+From: Jan Kara <jack@suse.cz>
+
+commit 5f550ede5edf846ecc0067be1ba80514e6fe7f8e upstream.
+
+We call bfq_init_rq() from request merging functions where requests we
+get should have already gone through bfq_init_rq() during insert and
+anyway we want to do anything only if the request is already tracked by
+BFQ. So replace calls to bfq_init_rq() with RQ_BFQQ() instead to simply
+skip requests untracked by BFQ. We move bfq_init_rq() call in
+bfq_insert_request() a bit earlier to cover request merging and thus
+can transfer FIFO position in case of a merge.
+
+CC: stable@vger.kernel.org
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-6-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-iosched.c |   12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2375,8 +2375,6 @@ static int bfq_request_merge(struct requ
+       return ELEVATOR_NO_MERGE;
+ }
+-static struct bfq_queue *bfq_init_rq(struct request *rq);
+-
+ static void bfq_request_merged(struct request_queue *q, struct request *req,
+                              enum elv_merge type)
+ {
+@@ -2385,7 +2383,7 @@ static void bfq_request_merged(struct re
+           blk_rq_pos(req) <
+           blk_rq_pos(container_of(rb_prev(&req->rb_node),
+                                   struct request, rb_node))) {
+-              struct bfq_queue *bfqq = bfq_init_rq(req);
++              struct bfq_queue *bfqq = RQ_BFQQ(req);
+               struct bfq_data *bfqd;
+               struct request *prev, *next_rq;
+@@ -2437,8 +2435,8 @@ static void bfq_request_merged(struct re
+ static void bfq_requests_merged(struct request_queue *q, struct request *rq,
+                               struct request *next)
+ {
+-      struct bfq_queue *bfqq = bfq_init_rq(rq),
+-              *next_bfqq = bfq_init_rq(next);
++      struct bfq_queue *bfqq = RQ_BFQQ(rq),
++              *next_bfqq = RQ_BFQQ(next);
+       if (!bfqq)
+               goto remove;
+@@ -5991,6 +5989,8 @@ static inline void bfq_update_insert_sta
+                                          unsigned int cmd_flags) {}
+ #endif /* CONFIG_BFQ_CGROUP_DEBUG */
++static struct bfq_queue *bfq_init_rq(struct request *rq);
++
+ static void bfq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
+                              bool at_head)
+ {
+@@ -6006,6 +6006,7 @@ static void bfq_insert_request(struct bl
+               bfqg_stats_update_legacy_io(q, rq);
+ #endif
+       spin_lock_irq(&bfqd->lock);
++      bfqq = bfq_init_rq(rq);
+       if (blk_mq_sched_try_insert_merge(q, rq, &free)) {
+               spin_unlock_irq(&bfqd->lock);
+               blk_mq_free_requests(&free);
+@@ -6014,7 +6015,6 @@ static void bfq_insert_request(struct bl
+       trace_block_rq_insert(rq);
+-      bfqq = bfq_init_rq(rq);
+       if (!bfqq || at_head) {
+               if (at_head)
+                       list_add(&rq->queuelist, &bfqd->dispatch);
diff --git a/queue-5.15/bfq-split-shared-queues-on-move-between-cgroups.patch b/queue-5.15/bfq-split-shared-queues-on-move-between-cgroups.patch
new file mode 100644 (file)
index 0000000..6c3d666
--- /dev/null
@@ -0,0 +1,99 @@
+From 3bc5e683c67d94bd839a1da2e796c15847b51b69 Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:44 +0200
+Subject: bfq: Split shared queues on move between cgroups
+
+From: Jan Kara <jack@suse.cz>
+
+commit 3bc5e683c67d94bd839a1da2e796c15847b51b69 upstream.
+
+When bfqq is shared by multiple processes it can happen that one of the
+processes gets moved to a different cgroup (or just starts submitting IO
+for different cgroup). In case that happens we need to split the merged
+bfqq as otherwise we will have IO for multiple cgroups in one bfqq and
+we will just account IO time to wrong entities etc.
+
+Similarly if the bfqq is scheduled to merge with another bfqq but the
+merge didn't happen yet, cancel the merge as it need not be valid
+anymore.
+
+CC: stable@vger.kernel.org
+Fixes: e21b7a0b9887 ("block, bfq: add full hierarchical scheduling and cgroups support")
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-3-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-cgroup.c  |   36 +++++++++++++++++++++++++++++++++---
+ block/bfq-iosched.c |    2 +-
+ block/bfq-iosched.h |    1 +
+ 3 files changed, 35 insertions(+), 4 deletions(-)
+
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -733,9 +733,39 @@ static struct bfq_group *__bfq_bic_chang
+       }
+       if (sync_bfqq) {
+-              entity = &sync_bfqq->entity;
+-              if (entity->sched_data != &bfqg->sched_data)
+-                      bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
++              if (!sync_bfqq->new_bfqq && !bfq_bfqq_coop(sync_bfqq)) {
++                      /* We are the only user of this bfqq, just move it */
++                      if (sync_bfqq->entity.sched_data != &bfqg->sched_data)
++                              bfq_bfqq_move(bfqd, sync_bfqq, bfqg);
++              } else {
++                      struct bfq_queue *bfqq;
++
++                      /*
++                       * The queue was merged to a different queue. Check
++                       * that the merge chain still belongs to the same
++                       * cgroup.
++                       */
++                      for (bfqq = sync_bfqq; bfqq; bfqq = bfqq->new_bfqq)
++                              if (bfqq->entity.sched_data !=
++                                  &bfqg->sched_data)
++                                      break;
++                      if (bfqq) {
++                              /*
++                               * Some queue changed cgroup so the merge is
++                               * not valid anymore. We cannot easily just
++                               * cancel the merge (by clearing new_bfqq) as
++                               * there may be other processes using this
++                               * queue and holding refs to all queues below
++                               * sync_bfqq->new_bfqq. Similarly if the merge
++                               * already happened, we need to detach from
++                               * bfqq now so that we cannot merge bio to a
++                               * request from the old cgroup.
++                               */
++                              bfq_put_cooperator(sync_bfqq);
++                              bfq_release_process_ref(bfqd, sync_bfqq);
++                              bic_set_bfqq(bic, NULL, 1);
++                      }
++              }
+       }
+       return bfqg;
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -5193,7 +5193,7 @@ static void bfq_put_stable_ref(struct bf
+       bfq_put_queue(bfqq);
+ }
+-static void bfq_put_cooperator(struct bfq_queue *bfqq)
++void bfq_put_cooperator(struct bfq_queue *bfqq)
+ {
+       struct bfq_queue *__bfqq, *next;
+--- a/block/bfq-iosched.h
++++ b/block/bfq-iosched.h
+@@ -977,6 +977,7 @@ void bfq_weights_tree_remove(struct bfq_
+ void bfq_bfqq_expire(struct bfq_data *bfqd, struct bfq_queue *bfqq,
+                    bool compensate, enum bfqq_expiration reason);
+ void bfq_put_queue(struct bfq_queue *bfqq);
++void bfq_put_cooperator(struct bfq_queue *bfqq);
+ void bfq_end_wr_async_queues(struct bfq_data *bfqd, struct bfq_group *bfqg);
+ void bfq_release_process_ref(struct bfq_data *bfqd, struct bfq_queue *bfqq);
+ void bfq_schedule_dispatch(struct bfq_data *bfqd);
diff --git a/queue-5.15/bfq-track-whether-bfq_group-is-still-online.patch b/queue-5.15/bfq-track-whether-bfq_group-is-still-online.patch
new file mode 100644 (file)
index 0000000..0287598
--- /dev/null
@@ -0,0 +1,65 @@
+From 09f871868080c33992cd6a9b72a5ca49582578fa Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:48 +0200
+Subject: bfq: Track whether bfq_group is still online
+
+From: Jan Kara <jack@suse.cz>
+
+commit 09f871868080c33992cd6a9b72a5ca49582578fa upstream.
+
+Track whether bfq_group is still online. We cannot rely on
+blkcg_gq->online because that gets cleared only after all policies are
+offlined and we need something that gets updated already under
+bfqd->lock when we are cleaning up our bfq_group to be able to guarantee
+that when we see online bfq_group, it will stay online while we are
+holding bfqd->lock lock.
+
+CC: stable@vger.kernel.org
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-7-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-cgroup.c  |    3 ++-
+ block/bfq-iosched.h |    2 ++
+ 2 files changed, 4 insertions(+), 1 deletion(-)
+
+--- a/block/bfq-cgroup.c
++++ b/block/bfq-cgroup.c
+@@ -555,6 +555,7 @@ static void bfq_pd_init(struct blkg_poli
+                                  */
+       bfqg->bfqd = bfqd;
+       bfqg->active_entities = 0;
++      bfqg->online = true;
+       bfqg->rq_pos_tree = RB_ROOT;
+ }
+@@ -601,7 +602,6 @@ struct bfq_group *bfq_find_set_group(str
+       struct bfq_entity *entity;
+       bfqg = bfq_lookup_bfqg(bfqd, blkcg);
+-
+       if (unlikely(!bfqg))
+               return NULL;
+@@ -969,6 +969,7 @@ static void bfq_pd_offline(struct blkg_p
+ put_async_queues:
+       bfq_put_async_queues(bfqd, bfqg);
++      bfqg->online = false;
+       spin_unlock_irqrestore(&bfqd->lock, flags);
+       /*
+--- a/block/bfq-iosched.h
++++ b/block/bfq-iosched.h
+@@ -926,6 +926,8 @@ struct bfq_group {
+       /* reference counter (see comments in bfq_bic_update_cgroup) */
+       int ref;
++      /* Is bfq_group still online? */
++      bool online;
+       struct bfq_entity entity;
+       struct bfq_sched_data sched_data;
diff --git a/queue-5.15/bfq-update-cgroup-information-before-merging-bio.patch b/queue-5.15/bfq-update-cgroup-information-before-merging-bio.patch
new file mode 100644 (file)
index 0000000..1660692
--- /dev/null
@@ -0,0 +1,51 @@
+From ea591cd4eb270393810e7be01feb8fde6a34fbbe Mon Sep 17 00:00:00 2001
+From: Jan Kara <jack@suse.cz>
+Date: Fri, 1 Apr 2022 12:27:45 +0200
+Subject: bfq: Update cgroup information before merging bio
+
+From: Jan Kara <jack@suse.cz>
+
+commit ea591cd4eb270393810e7be01feb8fde6a34fbbe upstream.
+
+When the process is migrated to a different cgroup (or in case of
+writeback just starts submitting bios associated with a different
+cgroup) bfq_merge_bio() can operate with stale cgroup information in
+bic. Thus the bio can be merged to a request from a different cgroup or
+it can result in merging of bfqqs for different cgroups or bfqqs of
+already dead cgroups and causing possible use-after-free issues. Fix the
+problem by updating cgroup information in bfq_merge_bio().
+
+CC: stable@vger.kernel.org
+Fixes: e21b7a0b9887 ("block, bfq: add full hierarchical scheduling and cgroups support")
+Tested-by: "yukuai (C)" <yukuai3@huawei.com>
+Signed-off-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220401102752.8599-4-jack@suse.cz
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/bfq-iosched.c |   11 +++++++++--
+ 1 file changed, 9 insertions(+), 2 deletions(-)
+
+--- a/block/bfq-iosched.c
++++ b/block/bfq-iosched.c
+@@ -2335,10 +2335,17 @@ static bool bfq_bio_merge(struct request
+       spin_lock_irq(&bfqd->lock);
+-      if (bic)
++      if (bic) {
++              /*
++               * Make sure cgroup info is uptodate for current process before
++               * considering the merge.
++               */
++              bfq_bic_update_cgroup(bic, bio);
++
+               bfqd->bio_bfqq = bic_to_bfqq(bic, op_is_sync(bio->bi_opf));
+-      else
++      } else {
+               bfqd->bio_bfqq = NULL;
++      }
+       bfqd->bio_bic = bic;
+       ret = blk_mq_sched_try_merge(q, bio, nr_segs, &free);
diff --git a/queue-5.15/efi-do-not-import-certificates-from-uefi-secure-boot-for-t2-macs.patch b/queue-5.15/efi-do-not-import-certificates-from-uefi-secure-boot-for-t2-macs.patch
new file mode 100644 (file)
index 0000000..aec0ffc
--- /dev/null
@@ -0,0 +1,133 @@
+From 155ca952c7ca19aa32ecfb7373a32bbc2e1ec6eb Mon Sep 17 00:00:00 2001
+From: Aditya Garg <gargaditya08@live.com>
+Date: Fri, 15 Apr 2022 17:02:46 +0000
+Subject: efi: Do not import certificates from UEFI Secure Boot for T2 Macs
+
+From: Aditya Garg <gargaditya08@live.com>
+
+commit 155ca952c7ca19aa32ecfb7373a32bbc2e1ec6eb upstream.
+
+On Apple T2 Macs, when Linux attempts to read the db and dbx efi variables
+at early boot to load UEFI Secure Boot certificates, a page fault occurs
+in Apple firmware code and EFI runtime services are disabled with the
+following logs:
+
+[Firmware Bug]: Page fault caused by firmware at PA: 0xffffb1edc0068000
+WARNING: CPU: 3 PID: 104 at arch/x86/platform/efi/quirks.c:735 efi_crash_gracefully_on_page_fault+0x50/0xf0
+(Removed some logs from here)
+Call Trace:
+ <TASK>
+ page_fault_oops+0x4f/0x2c0
+ ? search_bpf_extables+0x6b/0x80
+ ? search_module_extables+0x50/0x80
+ ? search_exception_tables+0x5b/0x60
+ kernelmode_fixup_or_oops+0x9e/0x110
+ __bad_area_nosemaphore+0x155/0x190
+ bad_area_nosemaphore+0x16/0x20
+ do_kern_addr_fault+0x8c/0xa0
+ exc_page_fault+0xd8/0x180
+ asm_exc_page_fault+0x1e/0x30
+(Removed some logs from here)
+ ? __efi_call+0x28/0x30
+ ? switch_mm+0x20/0x30
+ ? efi_call_rts+0x19a/0x8e0
+ ? process_one_work+0x222/0x3f0
+ ? worker_thread+0x4a/0x3d0
+ ? kthread+0x17a/0x1a0
+ ? process_one_work+0x3f0/0x3f0
+ ? set_kthread_struct+0x40/0x40
+ ? ret_from_fork+0x22/0x30
+ </TASK>
+---[ end trace 1f82023595a5927f ]---
+efi: Froze efi_rts_wq and disabled EFI Runtime Services
+integrity: Couldn't get size: 0x8000000000000015
+integrity: MODSIGN: Couldn't get UEFI db list
+efi: EFI Runtime Services are disabled!
+integrity: Couldn't get size: 0x8000000000000015
+integrity: Couldn't get UEFI dbx list
+integrity: Couldn't get size: 0x8000000000000015
+integrity: Couldn't get mokx list
+integrity: Couldn't get size: 0x80000000
+
+So we avoid reading these UEFI variables and thus prevent the crash.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Aditya Garg <gargaditya08@live.com>
+Reviewed-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Mimi Zohar <zohar@linux.ibm.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ security/integrity/platform_certs/keyring_handler.h |    8 ++++
+ security/integrity/platform_certs/load_uefi.c       |   33 ++++++++++++++++++++
+ 2 files changed, 41 insertions(+)
+
+--- a/security/integrity/platform_certs/keyring_handler.h
++++ b/security/integrity/platform_certs/keyring_handler.h
+@@ -30,3 +30,11 @@ efi_element_handler_t get_handler_for_db
+ efi_element_handler_t get_handler_for_dbx(const efi_guid_t *sig_type);
+ #endif
++
++#ifndef UEFI_QUIRK_SKIP_CERT
++#define UEFI_QUIRK_SKIP_CERT(vendor, product) \
++               .matches = { \
++                      DMI_MATCH(DMI_BOARD_VENDOR, vendor), \
++                      DMI_MATCH(DMI_PRODUCT_NAME, product), \
++              },
++#endif
+--- a/security/integrity/platform_certs/load_uefi.c
++++ b/security/integrity/platform_certs/load_uefi.c
+@@ -3,6 +3,7 @@
+ #include <linux/kernel.h>
+ #include <linux/sched.h>
+ #include <linux/cred.h>
++#include <linux/dmi.h>
+ #include <linux/err.h>
+ #include <linux/efi.h>
+ #include <linux/slab.h>
+@@ -12,6 +13,31 @@
+ #include "keyring_handler.h"
+ /*
++ * On T2 Macs reading the db and dbx efi variables to load UEFI Secure Boot
++ * certificates causes occurrence of a page fault in Apple's firmware and
++ * a crash disabling EFI runtime services. The following quirk skips reading
++ * these variables.
++ */
++static const struct dmi_system_id uefi_skip_cert[] = {
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,1") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,2") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,3") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro15,4") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,1") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,2") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,3") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookPro16,4") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,1") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir8,2") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacBookAir9,1") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacMini8,1") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "MacPro7,1") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,1") },
++      { UEFI_QUIRK_SKIP_CERT("Apple Inc.", "iMac20,2") },
++      { }
++};
++
++/*
+  * Look to see if a UEFI variable called MokIgnoreDB exists and return true if
+  * it does.
+  *
+@@ -137,6 +163,13 @@ static int __init load_uefi_certs(void)
+       unsigned long dbsize = 0, dbxsize = 0, mokxsize = 0;
+       efi_status_t status;
+       int rc = 0;
++      const struct dmi_system_id *dmi_id;
++
++      dmi_id = dmi_first_match(uefi_skip_cert);
++      if (dmi_id) {
++              pr_err("Reading UEFI Secure Boot Certs is not supported on T2 Macs.\n");
++              return false;
++      }
+       if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE))
+               return false;
diff --git a/queue-5.15/fs-writeback-writeback_sb_inodes-recalculate-wrote-according-skipped-pages.patch b/queue-5.15/fs-writeback-writeback_sb_inodes-recalculate-wrote-according-skipped-pages.patch
new file mode 100644 (file)
index 0000000..c7a1c9a
--- /dev/null
@@ -0,0 +1,154 @@
+From 68f4c6eba70df70a720188bce95c85570ddfcc87 Mon Sep 17 00:00:00 2001
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+Date: Tue, 10 May 2022 21:38:05 +0800
+Subject: fs-writeback: writeback_sb_inodes:Recalculate 'wrote' according skipped pages
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zhihao Cheng <chengzhihao1@huawei.com>
+
+commit 68f4c6eba70df70a720188bce95c85570ddfcc87 upstream.
+
+Commit 505a666ee3fc ("writeback: plug writeback in wb_writeback() and
+writeback_inodes_wb()") has us holding a plug during wb_writeback, which
+may cause a potential ABBA dead lock:
+
+    wb_writeback               fat_file_fsync
+blk_start_plug(&plug)
+for (;;) {
+  iter i-1: some reqs have been added into plug->mq_list  // LOCK A
+  iter i:
+    progress = __writeback_inodes_wb(wb, work)
+    . writeback_sb_inodes // fat's bdev
+    .   __writeback_single_inode
+    .   . generic_writepages
+    .   .   __block_write_full_page
+    .   .   . .            __generic_file_fsync
+    .   .   . .              sync_inode_metadata
+    .   .   . .                writeback_single_inode
+    .   .   . .                  __writeback_single_inode
+    .   .   . .                    fat_write_inode
+    .   .   . .                      __fat_write_inode
+    .   .   . .                        sync_dirty_buffer       // fat's bdev
+    .   .   . .                          lock_buffer(bh)       // LOCK B
+    .   .   . .                            submit_bh
+    .   .   . .                              blk_mq_get_tag    // LOCK A
+    .   .   . trylock_buffer(bh)  // LOCK B
+    .   .   .   redirty_page_for_writepage
+    .   .   .     wbc->pages_skipped++
+    .   .   --wbc->nr_to_write
+    .   wrote += write_chunk - wbc.nr_to_write  // wrote > 0
+    .   requeue_inode
+    .     redirty_tail_locked
+    if (progress)    // progress > 0
+      continue;
+  iter i+1:
+      queue_io
+      // similar process with iter i, infinite for-loop !
+}
+blk_finish_plug(&plug)   // flush plug won't be called
+
+Above process triggers a hungtask like:
+[  399.044861] INFO: task bb:2607 blocked for more than 30 seconds.
+[  399.046824]       Not tainted 5.18.0-rc1-00005-gefae4d9eb6a2-dirty
+[  399.051539] task:bb              state:D stack:    0 pid: 2607 ppid:
+2426 flags:0x00004000
+[  399.051556] Call Trace:
+[  399.051570]  __schedule+0x480/0x1050
+[  399.051592]  schedule+0x92/0x1a0
+[  399.051602]  io_schedule+0x22/0x50
+[  399.051613]  blk_mq_get_tag+0x1d3/0x3c0
+[  399.051640]  __blk_mq_alloc_requests+0x21d/0x3f0
+[  399.051657]  blk_mq_submit_bio+0x68d/0xca0
+[  399.051674]  __submit_bio+0x1b5/0x2d0
+[  399.051708]  submit_bio_noacct+0x34e/0x720
+[  399.051718]  submit_bio+0x3b/0x150
+[  399.051725]  submit_bh_wbc+0x161/0x230
+[  399.051734]  __sync_dirty_buffer+0xd1/0x420
+[  399.051744]  sync_dirty_buffer+0x17/0x20
+[  399.051750]  __fat_write_inode+0x289/0x310
+[  399.051766]  fat_write_inode+0x2a/0xa0
+[  399.051783]  __writeback_single_inode+0x53c/0x6f0
+[  399.051795]  writeback_single_inode+0x145/0x200
+[  399.051803]  sync_inode_metadata+0x45/0x70
+[  399.051856]  __generic_file_fsync+0xa3/0x150
+[  399.051880]  fat_file_fsync+0x1d/0x80
+[  399.051895]  vfs_fsync_range+0x40/0xb0
+[  399.051929]  __x64_sys_fsync+0x18/0x30
+
+In my test, 'need_resched()' (which is imported by 590dca3a71 "fs-writeback:
+unplug before cond_resched in writeback_sb_inodes") in function
+'writeback_sb_inodes()' seldom comes true, unless cond_resched() is deleted
+from write_cache_pages().
+
+Fix it by correcting wrote number according number of skipped pages
+in writeback_sb_inodes().
+
+Goto Link to find a reproducer.
+
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=215837
+Cc: stable@vger.kernel.org # v4.3
+Signed-off-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20220510133805.1988292-1-chengzhihao1@huawei.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/fs-writeback.c |   13 ++++++++-----
+ 1 file changed, 8 insertions(+), 5 deletions(-)
+
+--- a/fs/fs-writeback.c
++++ b/fs/fs-writeback.c
+@@ -1806,11 +1806,12 @@ static long writeback_sb_inodes(struct s
+       };
+       unsigned long start_time = jiffies;
+       long write_chunk;
+-      long wrote = 0;  /* count both pages and inodes */
++      long total_wrote = 0;  /* count both pages and inodes */
+       while (!list_empty(&wb->b_io)) {
+               struct inode *inode = wb_inode(wb->b_io.prev);
+               struct bdi_writeback *tmp_wb;
++              long wrote;
+               if (inode->i_sb != sb) {
+                       if (work->sb) {
+@@ -1886,7 +1887,9 @@ static long writeback_sb_inodes(struct s
+               wbc_detach_inode(&wbc);
+               work->nr_pages -= write_chunk - wbc.nr_to_write;
+-              wrote += write_chunk - wbc.nr_to_write;
++              wrote = write_chunk - wbc.nr_to_write - wbc.pages_skipped;
++              wrote = wrote < 0 ? 0 : wrote;
++              total_wrote += wrote;
+               if (need_resched()) {
+                       /*
+@@ -1908,7 +1911,7 @@ static long writeback_sb_inodes(struct s
+               tmp_wb = inode_to_wb_and_lock_list(inode);
+               spin_lock(&inode->i_lock);
+               if (!(inode->i_state & I_DIRTY_ALL))
+-                      wrote++;
++                      total_wrote++;
+               requeue_inode(inode, tmp_wb, &wbc);
+               inode_sync_complete(inode);
+               spin_unlock(&inode->i_lock);
+@@ -1922,14 +1925,14 @@ static long writeback_sb_inodes(struct s
+                * bail out to wb_writeback() often enough to check
+                * background threshold and other termination conditions.
+                */
+-              if (wrote) {
++              if (total_wrote) {
+                       if (time_is_before_jiffies(start_time + HZ / 10UL))
+                               break;
+                       if (work->nr_pages <= 0)
+                               break;
+               }
+       }
+-      return wrote;
++      return total_wrote;
+ }
+ static long __writeback_inodes_wb(struct bdi_writeback *wb,
diff --git a/queue-5.15/iwlwifi-mvm-fix-assert-1f04-upon-reconfig.patch b/queue-5.15/iwlwifi-mvm-fix-assert-1f04-upon-reconfig.patch
new file mode 100644 (file)
index 0000000..abbe532
--- /dev/null
@@ -0,0 +1,36 @@
+From 9d096e3d3061dbf4ee10e2b59fc2c06e05bdb997 Mon Sep 17 00:00:00 2001
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Date: Tue, 17 May 2022 12:05:09 +0300
+Subject: iwlwifi: mvm: fix assert 1F04 upon reconfig
+
+From: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+
+commit 9d096e3d3061dbf4ee10e2b59fc2c06e05bdb997 upstream.
+
+When we reconfig we must not send the MAC_POWER command that relates to
+a MAC that was not yet added to the firmware.
+
+Ignore those in the iterator.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Emmanuel Grumbach <emmanuel.grumbach@intel.com>
+Signed-off-by: Gregory Greenman <gregory.greenman@intel.com>
+Link: https://lore.kernel.org/r/20220517120044.ed2ffc8ce732.If786e19512d0da4334a6382ea6148703422c7d7b@changeid
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/mvm/power.c |    3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/net/wireless/intel/iwlwifi/mvm/power.c
++++ b/drivers/net/wireless/intel/iwlwifi/mvm/power.c
+@@ -563,6 +563,9 @@ static void iwl_mvm_power_get_vifs_itera
+       struct iwl_power_vifs *power_iterator = _data;
+       bool active = mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < NUM_PHY_CTX;
++      if (!mvmvif->uploaded)
++              return;
++
+       switch (ieee80211_vif_type_p2p(vif)) {
+       case NL80211_IFTYPE_P2P_DEVICE:
+               break;
diff --git a/queue-5.15/objtool-fix-objtool-regression-on-x32-systems.patch b/queue-5.15/objtool-fix-objtool-regression-on-x32-systems.patch
new file mode 100644 (file)
index 0000000..b734f94
--- /dev/null
@@ -0,0 +1,101 @@
+From 22682a07acc308ef78681572e19502ce8893c4d4 Mon Sep 17 00:00:00 2001
+From: Mikulas Patocka <mpatocka@redhat.com>
+Date: Mon, 16 May 2022 11:06:36 -0400
+Subject: objtool: Fix objtool regression on x32 systems
+
+From: Mikulas Patocka <mpatocka@redhat.com>
+
+commit 22682a07acc308ef78681572e19502ce8893c4d4 upstream.
+
+Commit c087c6e7b551 ("objtool: Fix type of reloc::addend") failed to
+appreciate cross building from ILP32 hosts, where 'int' == 'long' and
+the issue persists.
+
+As such, use s64/int64_t/Elf64_Sxword for this field and suffer the
+pain that is ISO C99 printf formats for it.
+
+Fixes: c087c6e7b551 ("objtool: Fix type of reloc::addend")
+Signed-off-by: Mikulas Patocka <mpatocka@redhat.com>
+[peterz: reword changelog, s/long long/s64/]
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/alpine.LRH.2.02.2205161041260.11556@file01.intranet.prod.int.rdu2.redhat.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/check.c               |    9 +++++----
+ tools/objtool/elf.c                 |    2 +-
+ tools/objtool/include/objtool/elf.h |    4 ++--
+ 3 files changed, 8 insertions(+), 7 deletions(-)
+
+--- a/tools/objtool/check.c
++++ b/tools/objtool/check.c
+@@ -5,6 +5,7 @@
+ #include <string.h>
+ #include <stdlib.h>
++#include <inttypes.h>
+ #include <arch/elf.h>
+ #include <objtool/builtin.h>
+@@ -393,12 +394,12 @@ static int add_dead_ends(struct objtool_
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
+                       insn = find_last_insn(file, reloc->sym->sec);
+                       if (!insn) {
+-                              WARN("can't find unreachable insn at %s+0x%lx",
++                              WARN("can't find unreachable insn at %s+0x%" PRIx64,
+                                    reloc->sym->sec->name, reloc->addend);
+                               return -1;
+                       }
+               } else {
+-                      WARN("can't find unreachable insn at %s+0x%lx",
++                      WARN("can't find unreachable insn at %s+0x%" PRIx64,
+                            reloc->sym->sec->name, reloc->addend);
+                       return -1;
+               }
+@@ -428,12 +429,12 @@ reachable:
+               else if (reloc->addend == reloc->sym->sec->sh.sh_size) {
+                       insn = find_last_insn(file, reloc->sym->sec);
+                       if (!insn) {
+-                              WARN("can't find reachable insn at %s+0x%lx",
++                              WARN("can't find reachable insn at %s+0x%" PRIx64,
+                                    reloc->sym->sec->name, reloc->addend);
+                               return -1;
+                       }
+               } else {
+-                      WARN("can't find reachable insn at %s+0x%lx",
++                      WARN("can't find reachable insn at %s+0x%" PRIx64,
+                            reloc->sym->sec->name, reloc->addend);
+                       return -1;
+               }
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -485,7 +485,7 @@ static struct section *elf_create_reloc_
+                                               int reltype);
+ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+-                unsigned int type, struct symbol *sym, long addend)
++                unsigned int type, struct symbol *sym, s64 addend)
+ {
+       struct reloc *reloc;
+--- a/tools/objtool/include/objtool/elf.h
++++ b/tools/objtool/include/objtool/elf.h
+@@ -69,7 +69,7 @@ struct reloc {
+       struct symbol *sym;
+       unsigned long offset;
+       unsigned int type;
+-      long addend;
++      s64 addend;
+       int idx;
+       bool jump_table_start;
+ };
+@@ -131,7 +131,7 @@ struct elf *elf_open_read(const char *na
+ struct section *elf_create_section(struct elf *elf, const char *name, unsigned int sh_flags, size_t entsize, int nr);
+ int elf_add_reloc(struct elf *elf, struct section *sec, unsigned long offset,
+-                unsigned int type, struct symbol *sym, long addend);
++                unsigned int type, struct symbol *sym, s64 addend);
+ int elf_add_reloc_to_insn(struct elf *elf, struct section *sec,
+                         unsigned long offset, unsigned int type,
+                         struct section *insn_sec, unsigned long insn_off);
diff --git a/queue-5.15/objtool-fix-symbol-creation.patch b/queue-5.15/objtool-fix-symbol-creation.patch
new file mode 100644 (file)
index 0000000..dd02ce4
--- /dev/null
@@ -0,0 +1,346 @@
+From ead165fa1042247b033afad7be4be9b815d04ade Mon Sep 17 00:00:00 2001
+From: Peter Zijlstra <peterz@infradead.org>
+Date: Tue, 17 May 2022 17:42:04 +0200
+Subject: objtool: Fix symbol creation
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit ead165fa1042247b033afad7be4be9b815d04ade upstream.
+
+Nathan reported objtool failing with the following messages:
+
+  warning: objtool: no non-local symbols !?
+  warning: objtool: gelf_update_symshndx: invalid section index
+
+The problem is due to commit 4abff6d48dbc ("objtool: Fix code relocs
+vs weak symbols") failing to consider the case where an object would
+have no non-local symbols.
+
+The problem that commit tries to address is adding a STB_LOCAL symbol
+to the symbol table in light of the ELF spec's requirement that:
+
+  In each symbol table, all symbols with STB_LOCAL binding preced the
+  weak and global symbols.  As ``Sections'' above describes, a symbol
+  table section's sh_info section header member holds the symbol table
+  index for the first non-local symbol.
+
+The approach taken is to find this first non-local symbol, move that
+to the end and then re-use the freed spot to insert a new local symbol
+and increment sh_info.
+
+Except it never considered the case of object files without global
+symbols and got a whole bunch of details wrong -- so many in fact that
+it is a wonder it ever worked :/
+
+Specifically:
+
+ - It failed to re-hash the symbol on the new index, so a subsequent
+   find_symbol_by_index() would not find it at the new location and a
+   query for the old location would now return a non-deterministic
+   choice between the old and new symbol.
+
+ - It failed to appreciate that the GElf wrappers are not a valid disk
+   format (it works because GElf is basically Elf64 and we only
+   support x86_64 atm.)
+
+ - It failed to fully appreciate how horrible the libelf API really is
+   and got the gelf_update_symshndx() call pretty much completely
+   wrong; with the direct consequence that if inserting a second
+   STB_LOCAL symbol would require moving the same STB_GLOBAL symbol
+   again it would completely come unstuck.
+
+Write a new elf_update_symbol() function that wraps all the magic
+required to update or create a new symbol at a given index.
+
+Specifically, gelf_update_sym*() require an @ndx argument that is
+relative to the @data argument; this means you have to manually
+iterate the section data descriptor list and update @ndx.
+
+Fixes: 4abff6d48dbc ("objtool: Fix code relocs vs weak symbols")
+Reported-by: Nathan Chancellor <nathan@kernel.org>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Acked-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Tested-by: Nathan Chancellor <nathan@kernel.org>
+Cc: <stable@vger.kernel.org>
+Link: https://lkml.kernel.org/r/YoPCTEYjoPqE4ZxB@hirez.programming.kicks-ass.net
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/objtool/elf.c |  196 +++++++++++++++++++++++++++++++++-------------------
+ 1 file changed, 128 insertions(+), 68 deletions(-)
+
+--- a/tools/objtool/elf.c
++++ b/tools/objtool/elf.c
+@@ -314,6 +314,8 @@ static void elf_add_symbol(struct elf *e
+       struct list_head *entry;
+       struct rb_node *pnode;
++      sym->alias = sym;
++
+       sym->type = GELF_ST_TYPE(sym->sym.st_info);
+       sym->bind = GELF_ST_BIND(sym->sym.st_info);
+@@ -375,7 +377,6 @@ static int read_symbols(struct elf *elf)
+                       return -1;
+               }
+               memset(sym, 0, sizeof(*sym));
+-              sym->alias = sym;
+               sym->idx = i;
+@@ -539,24 +540,21 @@ static void elf_dirty_reloc_sym(struct e
+ }
+ /*
+- * Move the first global symbol, as per sh_info, into a new, higher symbol
+- * index. This fees up the shndx for a new local symbol.
++ * The libelf API is terrible; gelf_update_sym*() takes a data block relative
++ * index value, *NOT* the symbol index. As such, iterate the data blocks and
++ * adjust index until it fits.
++ *
++ * If no data block is found, allow adding a new data block provided the index
++ * is only one past the end.
+  */
+-static int elf_move_global_symbol(struct elf *elf, struct section *symtab,
+-                                struct section *symtab_shndx)
++static int elf_update_symbol(struct elf *elf, struct section *symtab,
++                           struct section *symtab_shndx, struct symbol *sym)
+ {
+-      Elf_Data *data, *shndx_data = NULL;
+-      Elf32_Word first_non_local;
+-      struct symbol *sym;
+-      Elf_Scn *s;
+-
+-      first_non_local = symtab->sh.sh_info;
+-
+-      sym = find_symbol_by_index(elf, first_non_local);
+-      if (!sym) {
+-              WARN("no non-local symbols !?");
+-              return first_non_local;
+-      }
++      Elf32_Word shndx = sym->sec ? sym->sec->idx : SHN_UNDEF;
++      Elf_Data *symtab_data = NULL, *shndx_data = NULL;
++      Elf64_Xword entsize = symtab->sh.sh_entsize;
++      int max_idx, idx = sym->idx;
++      Elf_Scn *s, *t = NULL;
+       s = elf_getscn(elf->elf, symtab->idx);
+       if (!s) {
+@@ -564,79 +562,124 @@ static int elf_move_global_symbol(struct
+               return -1;
+       }
+-      data = elf_newdata(s);
+-      if (!data) {
+-              WARN_ELF("elf_newdata");
+-              return -1;
++      if (symtab_shndx) {
++              t = elf_getscn(elf->elf, symtab_shndx->idx);
++              if (!t) {
++                      WARN_ELF("elf_getscn");
++                      return -1;
++              }
+       }
+-      data->d_buf = &sym->sym;
+-      data->d_size = sizeof(sym->sym);
+-      data->d_align = 1;
+-      data->d_type = ELF_T_SYM;
++      for (;;) {
++              /* get next data descriptor for the relevant sections */
++              symtab_data = elf_getdata(s, symtab_data);
++              if (t)
++                      shndx_data = elf_getdata(t, shndx_data);
++
++              /* end-of-list */
++              if (!symtab_data) {
++                      void *buf;
++
++                      if (idx) {
++                              /* we don't do holes in symbol tables */
++                              WARN("index out of range");
++                              return -1;
++                      }
+-      sym->idx = symtab->sh.sh_size / sizeof(sym->sym);
+-      elf_dirty_reloc_sym(elf, sym);
++                      /* if @idx == 0, it's the next contiguous entry, create it */
++                      symtab_data = elf_newdata(s);
++                      if (t)
++                              shndx_data = elf_newdata(t);
++
++                      buf = calloc(1, entsize);
++                      if (!buf) {
++                              WARN("malloc");
++                              return -1;
++                      }
+-      symtab->sh.sh_info += 1;
+-      symtab->sh.sh_size += data->d_size;
+-      symtab->changed = true;
++                      symtab_data->d_buf = buf;
++                      symtab_data->d_size = entsize;
++                      symtab_data->d_align = 1;
++                      symtab_data->d_type = ELF_T_SYM;
++
++                      symtab->sh.sh_size += entsize;
++                      symtab->changed = true;
++
++                      if (t) {
++                              shndx_data->d_buf = &sym->sec->idx;
++                              shndx_data->d_size = sizeof(Elf32_Word);
++                              shndx_data->d_align = sizeof(Elf32_Word);
++                              shndx_data->d_type = ELF_T_WORD;
+-      if (symtab_shndx) {
+-              s = elf_getscn(elf->elf, symtab_shndx->idx);
+-              if (!s) {
+-                      WARN_ELF("elf_getscn");
++                              symtab_shndx->sh.sh_size += sizeof(Elf32_Word);
++                              symtab_shndx->changed = true;
++                      }
++
++                      break;
++              }
++
++              /* empty blocks should not happen */
++              if (!symtab_data->d_size) {
++                      WARN("zero size data");
+                       return -1;
+               }
+-              shndx_data = elf_newdata(s);
++              /* is this the right block? */
++              max_idx = symtab_data->d_size / entsize;
++              if (idx < max_idx)
++                      break;
++
++              /* adjust index and try again */
++              idx -= max_idx;
++      }
++
++      /* something went side-ways */
++      if (idx < 0) {
++              WARN("negative index");
++              return -1;
++      }
++
++      /* setup extended section index magic and write the symbol */
++      if (shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) {
++              sym->sym.st_shndx = shndx;
++              if (!shndx_data)
++                      shndx = 0;
++      } else {
++              sym->sym.st_shndx = SHN_XINDEX;
+               if (!shndx_data) {
+-                      WARN_ELF("elf_newshndx_data");
++                      WARN("no .symtab_shndx");
+                       return -1;
+               }
++      }
+-              shndx_data->d_buf = &sym->sec->idx;
+-              shndx_data->d_size = sizeof(Elf32_Word);
+-              shndx_data->d_align = 4;
+-              shndx_data->d_type = ELF_T_WORD;
+-
+-              symtab_shndx->sh.sh_size += 4;
+-              symtab_shndx->changed = true;
++      if (!gelf_update_symshndx(symtab_data, shndx_data, idx, &sym->sym, shndx)) {
++              WARN_ELF("gelf_update_symshndx");
++              return -1;
+       }
+-      return first_non_local;
++      return 0;
+ }
+ static struct symbol *
+ elf_create_section_symbol(struct elf *elf, struct section *sec)
+ {
+       struct section *symtab, *symtab_shndx;
+-      Elf_Data *shndx_data = NULL;
+-      struct symbol *sym;
+-      Elf32_Word shndx;
++      Elf32_Word first_non_local, new_idx;
++      struct symbol *sym, *old;
+       symtab = find_section_by_name(elf, ".symtab");
+       if (symtab) {
+               symtab_shndx = find_section_by_name(elf, ".symtab_shndx");
+-              if (symtab_shndx)
+-                      shndx_data = symtab_shndx->data;
+       } else {
+               WARN("no .symtab");
+               return NULL;
+       }
+-      sym = malloc(sizeof(*sym));
++      sym = calloc(1, sizeof(*sym));
+       if (!sym) {
+               perror("malloc");
+               return NULL;
+       }
+-      memset(sym, 0, sizeof(*sym));
+-
+-      sym->idx = elf_move_global_symbol(elf, symtab, symtab_shndx);
+-      if (sym->idx < 0) {
+-              WARN("elf_move_global_symbol");
+-              return NULL;
+-      }
+       sym->name = sec->name;
+       sym->sec = sec;
+@@ -646,24 +689,41 @@ elf_create_section_symbol(struct elf *el
+       // st_other 0
+       // st_value 0
+       // st_size 0
+-      shndx = sec->idx;
+-      if (shndx >= SHN_UNDEF && shndx < SHN_LORESERVE) {
+-              sym->sym.st_shndx = shndx;
+-              if (!shndx_data)
+-                      shndx = 0;
+-      } else {
+-              sym->sym.st_shndx = SHN_XINDEX;
+-              if (!shndx_data) {
+-                      WARN("no .symtab_shndx");
++
++      /*
++       * Move the first global symbol, as per sh_info, into a new, higher
++       * symbol index. This fees up a spot for a new local symbol.
++       */
++      first_non_local = symtab->sh.sh_info;
++      new_idx = symtab->sh.sh_size / symtab->sh.sh_entsize;
++      old = find_symbol_by_index(elf, first_non_local);
++      if (old) {
++              old->idx = new_idx;
++
++              hlist_del(&old->hash);
++              elf_hash_add(symbol, &old->hash, old->idx);
++
++              elf_dirty_reloc_sym(elf, old);
++
++              if (elf_update_symbol(elf, symtab, symtab_shndx, old)) {
++                      WARN("elf_update_symbol move");
+                       return NULL;
+               }
++
++              new_idx = first_non_local;
+       }
+-      if (!gelf_update_symshndx(symtab->data, shndx_data, sym->idx, &sym->sym, shndx)) {
+-              WARN_ELF("gelf_update_symshndx");
++      sym->idx = new_idx;
++      if (elf_update_symbol(elf, symtab, symtab_shndx, sym)) {
++              WARN("elf_update_symbol");
+               return NULL;
+       }
++      /*
++       * Either way, we added a LOCAL symbol.
++       */
++      symtab->sh.sh_info += 1;
++
+       elf_add_symbol(elf, sym);
+       return sym;
index f03e5a93707b25c92cf45c595b731dd7d8f23a40..88318eb8a12ec35ff1f7d2c601809753a1cccd2d 100644 (file)
@@ -522,3 +522,18 @@ f2fs-fix-to-do-sanity-check-on-total_data_blocks.patch
 f2fs-don-t-use-casefolded-comparison-for-.-and.patch
 f2fs-fix-fallocate-to-use-file_modified-to-update-permissions-consistently.patch
 f2fs-fix-to-do-sanity-check-for-inline-inode.patch
+objtool-fix-objtool-regression-on-x32-systems.patch
+objtool-fix-symbol-creation.patch
+wifi-mac80211-fix-use-after-free-in-chanctx-code.patch
+iwlwifi-mvm-fix-assert-1f04-upon-reconfig.patch
+fs-writeback-writeback_sb_inodes-recalculate-wrote-according-skipped-pages.patch
+efi-do-not-import-certificates-from-uefi-secure-boot-for-t2-macs.patch
+bfq-avoid-false-marking-of-bic-as-stably-merged.patch
+bfq-avoid-merging-queues-with-different-parents.patch
+bfq-split-shared-queues-on-move-between-cgroups.patch
+bfq-update-cgroup-information-before-merging-bio.patch
+bfq-drop-pointless-unlock-lock-pair.patch
+bfq-remove-pointless-bfq_init_rq-calls.patch
+bfq-track-whether-bfq_group-is-still-online.patch
+bfq-get-rid-of-__bio_blkcg-usage.patch
+bfq-make-sure-bfqg-for-which-we-are-queueing-requests-is-online.patch
diff --git a/queue-5.15/wifi-mac80211-fix-use-after-free-in-chanctx-code.patch b/queue-5.15/wifi-mac80211-fix-use-after-free-in-chanctx-code.patch
new file mode 100644 (file)
index 0000000..1cc4935
--- /dev/null
@@ -0,0 +1,48 @@
+From 2965c4cdf7ad9ce0796fac5e57debb9519ea721e Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Wed, 1 Jun 2022 09:19:36 +0200
+Subject: wifi: mac80211: fix use-after-free in chanctx code
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit 2965c4cdf7ad9ce0796fac5e57debb9519ea721e upstream.
+
+In ieee80211_vif_use_reserved_context(), when we have an
+old context and the new context's replace_state is set to
+IEEE80211_CHANCTX_REPLACE_NONE, we free the old context
+in ieee80211_vif_use_reserved_reassign(). Therefore, we
+cannot check the old_ctx anymore, so we should set it to
+NULL after this point.
+
+However, since the new_ctx replace state is clearly not
+IEEE80211_CHANCTX_REPLACES_OTHER, we're not going to do
+anything else in this function and can just return to
+avoid accessing the freed old_ctx.
+
+Cc: stable@vger.kernel.org
+Fixes: 5bcae31d9cb1 ("mac80211: implement multi-vif in-place reservations")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20220601091926.df419d91b165.I17a9b3894ff0b8323ce2afdb153b101124c821e5@changeid
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/mac80211/chan.c |    7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/net/mac80211/chan.c
++++ b/net/mac80211/chan.c
+@@ -1746,12 +1746,9 @@ int ieee80211_vif_use_reserved_context(s
+       if (new_ctx->replace_state == IEEE80211_CHANCTX_REPLACE_NONE) {
+               if (old_ctx)
+-                      err = ieee80211_vif_use_reserved_reassign(sdata);
+-              else
+-                      err = ieee80211_vif_use_reserved_assign(sdata);
++                      return ieee80211_vif_use_reserved_reassign(sdata);
+-              if (err)
+-                      return err;
++              return ieee80211_vif_use_reserved_assign(sdata);
+       }
+       /*