--- /dev/null
+From a53f77eb6c2d0b1befbcecbb70ff6f94ce6d2817 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sat, 11 Mar 2023 10:34:39 +0100
+Subject: Revert "blk-cgroup: dropping parent refcount after pd_free_fn() is done"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 029f1f1efa84387474b445dac4281cf95a398db8 which is
+commit c7241babf0855d8a6180cd1743ff0ec34de40b4e upstream.
+
+It is reported to cause problems, as only 2 of the 3 patch series were
+applied to the stable branches.
+
+Reported-by: Mike Cloaked <mike.cloaked@gmail.com>
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Cc: Yu Kuai <yukuai3@huawei.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Sasha Levin <sashal@kernel.org>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217174
+Link: https://lore.kernel.org/r/ZAuPkCn49urWBN5P@sol.localdomain
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-cgroup.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -93,8 +93,6 @@ static void blkg_free_workfn(struct work
+ if (blkg->pd[i])
+ blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+
+- if (blkg->parent)
+- blkg_put(blkg->parent);
+ if (blkg->q)
+ blk_put_queue(blkg->q);
+ free_percpu(blkg->iostat_cpu);
+@@ -129,6 +127,8 @@ static void __blkg_release(struct rcu_he
+
+ /* release the blkcg and parent blkg refs this blkg has been holding */
+ css_put(&blkg->blkcg->css);
++ if (blkg->parent)
++ blkg_put(blkg->parent);
+ blkg_free(blkg);
+ }
+
--- /dev/null
+From cdd86c17d2e3bb1b2f119ee722c632e2f7ef3d30 Mon Sep 17 00:00:00 2001
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Date: Sat, 11 Mar 2023 10:34:32 +0100
+Subject: Revert "blk-cgroup: synchronize pd_free_fn() from blkg_free_workfn() and blkcg_deactivate_policy()"
+
+From: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+
+This reverts commit 81c1188905f88b77743d1fdeeedfc8cb7b67787d which is
+commit f1c006f1c6850c14040f8337753a63119bba39b9 upstream.
+
+It is reported to cause problems, as only 2 of the 3 patch series were
+applied to the stable branches.
+
+Reported-by: Mike Cloaked <mike.cloaked@gmail.com>
+Reported-by: Eric Biggers <ebiggers@kernel.org>
+Cc: Yu Kuai <yukuai3@huawei.com>
+Cc: Tejun Heo <tj@kernel.org>
+Cc: Christoph Hellwig <hch@lst.de>
+Cc: Jens Axboe <axboe@kernel.dk>
+Cc: Sasha Levin <sashal@kernel.org>
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=217174
+Link: https://lore.kernel.org/r/ZAuPkCn49urWBN5P@sol.localdomain
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-cgroup.c | 35 ++++++-----------------------------
+ include/linux/blkdev.h | 1 -
+ 2 files changed, 6 insertions(+), 30 deletions(-)
+
+--- a/block/blk-cgroup.c
++++ b/block/blk-cgroup.c
+@@ -87,32 +87,16 @@ static void blkg_free_workfn(struct work
+ {
+ struct blkcg_gq *blkg = container_of(work, struct blkcg_gq,
+ free_work);
+- struct request_queue *q = blkg->q;
+ int i;
+
+- /*
+- * pd_free_fn() can also be called from blkcg_deactivate_policy(),
+- * in order to make sure pd_free_fn() is called in order, the deletion
+- * of the list blkg->q_node is delayed to here from blkg_destroy(), and
+- * blkcg_mutex is used to synchronize blkg_free_workfn() and
+- * blkcg_deactivate_policy().
+- */
+- if (q)
+- mutex_lock(&q->blkcg_mutex);
+-
+ for (i = 0; i < BLKCG_MAX_POLS; i++)
+ if (blkg->pd[i])
+ blkcg_policy[i]->pd_free_fn(blkg->pd[i]);
+
+ if (blkg->parent)
+ blkg_put(blkg->parent);
+-
+- if (q) {
+- list_del_init(&blkg->q_node);
+- mutex_unlock(&q->blkcg_mutex);
+- blk_put_queue(q);
+- }
+-
++ if (blkg->q)
++ blk_put_queue(blkg->q);
+ free_percpu(blkg->iostat_cpu);
+ percpu_ref_exit(&blkg->refcnt);
+ kfree(blkg);
+@@ -441,14 +425,9 @@ static void blkg_destroy(struct blkcg_gq
+ lockdep_assert_held(&blkg->q->queue_lock);
+ lockdep_assert_held(&blkcg->lock);
+
+- /*
+- * blkg stays on the queue list until blkg_free_workfn(), see details in
+- * blkg_free_workfn(), hence this function can be called from
+- * blkcg_destroy_blkgs() first and again from blkg_destroy_all() before
+- * blkg_free_workfn().
+- */
+- if (hlist_unhashed(&blkg->blkcg_node))
+- return;
++ /* Something wrong if we are trying to remove same group twice */
++ WARN_ON_ONCE(list_empty(&blkg->q_node));
++ WARN_ON_ONCE(hlist_unhashed(&blkg->blkcg_node));
+
+ for (i = 0; i < BLKCG_MAX_POLS; i++) {
+ struct blkcg_policy *pol = blkcg_policy[i];
+@@ -460,6 +439,7 @@ static void blkg_destroy(struct blkcg_gq
+ blkg->online = false;
+
+ radix_tree_delete(&blkcg->blkg_tree, blkg->q->id);
++ list_del_init(&blkg->q_node);
+ hlist_del_init_rcu(&blkg->blkcg_node);
+
+ /*
+@@ -1246,7 +1226,6 @@ int blkcg_init_disk(struct gendisk *disk
+ int ret;
+
+ INIT_LIST_HEAD(&q->blkg_list);
+- mutex_init(&q->blkcg_mutex);
+
+ new_blkg = blkg_alloc(&blkcg_root, disk, GFP_KERNEL);
+ if (!new_blkg)
+@@ -1484,7 +1463,6 @@ void blkcg_deactivate_policy(struct requ
+ if (queue_is_mq(q))
+ blk_mq_freeze_queue(q);
+
+- mutex_lock(&q->blkcg_mutex);
+ spin_lock_irq(&q->queue_lock);
+
+ __clear_bit(pol->plid, q->blkcg_pols);
+@@ -1503,7 +1481,6 @@ void blkcg_deactivate_policy(struct requ
+ }
+
+ spin_unlock_irq(&q->queue_lock);
+- mutex_unlock(&q->blkcg_mutex);
+
+ if (queue_is_mq(q))
+ blk_mq_unfreeze_queue(q);
+--- a/include/linux/blkdev.h
++++ b/include/linux/blkdev.h
+@@ -487,7 +487,6 @@ struct request_queue {
+ DECLARE_BITMAP (blkcg_pols, BLKCG_MAX_POLS);
+ struct blkcg_gq *root_blkg;
+ struct list_head blkg_list;
+- struct mutex blkcg_mutex;
+ #endif
+
+ struct queue_limits limits;