+++ /dev/null
-From 8e5573c40b2d75dcf27ffa8ec3ffda9543f948df Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 14 Nov 2022 05:26:33 +0100
-Subject: blk-crypto: pass a gendisk to blk_crypto_sysfs_{,un}register
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 450deb93df7d457cdd93594a1987f9650c749b96 ]
-
-Prepare for changes to the block layer sysfs handling by passing the
-readily available gendisk to blk_crypto_sysfs_{,un}register.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Eric Biggers <ebiggers@google.com>
-Link: https://lore.kernel.org/r/20221114042637.1009333-2-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: 49e4d04f0486 ("block: Drop spurious might_sleep() from blk_put_queue()")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-crypto-internal.h | 10 ++++++----
- block/blk-crypto-sysfs.c | 7 ++++---
- block/blk-sysfs.c | 4 ++--
- 3 files changed, 12 insertions(+), 9 deletions(-)
-
-diff --git a/block/blk-crypto-internal.h b/block/blk-crypto-internal.h
-index e6818ffaddbf..b8a00847171f 100644
---- a/block/blk-crypto-internal.h
-+++ b/block/blk-crypto-internal.h
-@@ -21,9 +21,9 @@ extern const struct blk_crypto_mode blk_crypto_modes[];
-
- #ifdef CONFIG_BLK_INLINE_ENCRYPTION
-
--int blk_crypto_sysfs_register(struct request_queue *q);
-+int blk_crypto_sysfs_register(struct gendisk *disk);
-
--void blk_crypto_sysfs_unregister(struct request_queue *q);
-+void blk_crypto_sysfs_unregister(struct gendisk *disk);
-
- void bio_crypt_dun_increment(u64 dun[BLK_CRYPTO_DUN_ARRAY_SIZE],
- unsigned int inc);
-@@ -67,12 +67,14 @@ static inline bool blk_crypto_rq_is_encrypted(struct request *rq)
-
- #else /* CONFIG_BLK_INLINE_ENCRYPTION */
-
--static inline int blk_crypto_sysfs_register(struct request_queue *q)
-+static inline int blk_crypto_sysfs_register(struct gendisk *disk)
- {
- return 0;
- }
-
--static inline void blk_crypto_sysfs_unregister(struct request_queue *q) { }
-+static inline void blk_crypto_sysfs_unregister(struct gendisk *disk)
-+{
-+}
-
- static inline bool bio_crypt_rq_ctx_compatible(struct request *rq,
- struct bio *bio)
-diff --git a/block/blk-crypto-sysfs.c b/block/blk-crypto-sysfs.c
-index fd93bd2f33b7..e05f145cd797 100644
---- a/block/blk-crypto-sysfs.c
-+++ b/block/blk-crypto-sysfs.c
-@@ -126,8 +126,9 @@ static struct kobj_type blk_crypto_ktype = {
- * If the request_queue has a blk_crypto_profile, create the "crypto"
- * subdirectory in sysfs (/sys/block/$disk/queue/crypto/).
- */
--int blk_crypto_sysfs_register(struct request_queue *q)
-+int blk_crypto_sysfs_register(struct gendisk *disk)
- {
-+ struct request_queue *q = disk->queue;
- struct blk_crypto_kobj *obj;
- int err;
-
-@@ -149,9 +150,9 @@ int blk_crypto_sysfs_register(struct request_queue *q)
- return 0;
- }
-
--void blk_crypto_sysfs_unregister(struct request_queue *q)
-+void blk_crypto_sysfs_unregister(struct gendisk *disk)
- {
-- kobject_put(q->crypto_kobject);
-+ kobject_put(disk->queue->crypto_kobject);
- }
-
- static int __init blk_crypto_sysfs_init(void)
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index e7871665825a..2b1cf0b2a5c7 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -833,7 +833,7 @@ int blk_register_queue(struct gendisk *disk)
- goto put_dev;
- }
-
-- ret = blk_crypto_sysfs_register(q);
-+ ret = blk_crypto_sysfs_register(disk);
- if (ret)
- goto put_dev;
-
-@@ -910,7 +910,7 @@ void blk_unregister_queue(struct gendisk *disk)
- */
- if (queue_is_mq(q))
- blk_mq_sysfs_unregister(disk);
-- blk_crypto_sysfs_unregister(q);
-+ blk_crypto_sysfs_unregister(disk);
-
- mutex_lock(&q->sysfs_lock);
- elv_unregister_queue(q);
---
-2.39.0
-
+++ /dev/null
-From 5652ba6946df40af95d99e88d2c85700905e41a6 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Tue, 1 Nov 2022 16:00:47 +0100
-Subject: blk-mq: move the srcu_struct used for quiescing to the tagset
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 80bd4a7aab4c9ce59bf5e35fdf52aa23d8a3c9f5 ]
-
-All I/O submissions have fairly similar latencies, and a tagset-wide
-quiesce is a fairly common operation.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Reviewed-by: Keith Busch <kbusch@kernel.org>
-Reviewed-by: Ming Lei <ming.lei@redhat.com>
-Reviewed-by: Chao Leng <lengchao@huawei.com>
-Reviewed-by: Sagi Grimberg <sagi@grimberg.me>
-Reviewed-by: Hannes Reinecke <hare@suse.de>
-Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
-Link: https://lore.kernel.org/r/20221101150050.3510-12-hch@lst.de
-[axboe: fix whitespace]
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: 49e4d04f0486 ("block: Drop spurious might_sleep() from blk_put_queue()")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-core.c | 27 +++++----------------------
- block/blk-mq.c | 33 +++++++++++++++++++++++++--------
- block/blk-mq.h | 14 +++++++-------
- block/blk-sysfs.c | 9 ++-------
- block/blk.h | 9 +--------
- block/genhd.c | 2 +-
- include/linux/blk-mq.h | 4 ++++
- include/linux/blkdev.h | 9 ---------
- 8 files changed, 45 insertions(+), 62 deletions(-)
-
-diff --git a/block/blk-core.c b/block/blk-core.c
-index 5487912befe8..9d6a947024ea 100644
---- a/block/blk-core.c
-+++ b/block/blk-core.c
-@@ -65,7 +65,6 @@ DEFINE_IDA(blk_queue_ida);
- * For queue allocation
- */
- struct kmem_cache *blk_requestq_cachep;
--struct kmem_cache *blk_requestq_srcu_cachep;
-
- /*
- * Controlling structure to kblockd
-@@ -373,26 +372,20 @@ static void blk_timeout_work(struct work_struct *work)
- {
- }
-
--struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
-+struct request_queue *blk_alloc_queue(int node_id)
- {
- struct request_queue *q;
-
-- q = kmem_cache_alloc_node(blk_get_queue_kmem_cache(alloc_srcu),
-- GFP_KERNEL | __GFP_ZERO, node_id);
-+ q = kmem_cache_alloc_node(blk_requestq_cachep, GFP_KERNEL | __GFP_ZERO,
-+ node_id);
- if (!q)
- return NULL;
-
-- if (alloc_srcu) {
-- blk_queue_flag_set(QUEUE_FLAG_HAS_SRCU, q);
-- if (init_srcu_struct(q->srcu) != 0)
-- goto fail_q;
-- }
--
- q->last_merge = NULL;
-
- q->id = ida_alloc(&blk_queue_ida, GFP_KERNEL);
- if (q->id < 0)
-- goto fail_srcu;
-+ goto fail_q;
-
- q->stats = blk_alloc_queue_stats();
- if (!q->stats)
-@@ -434,11 +427,8 @@ struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu)
- blk_free_queue_stats(q->stats);
- fail_id:
- ida_free(&blk_queue_ida, q->id);
--fail_srcu:
-- if (alloc_srcu)
-- cleanup_srcu_struct(q->srcu);
- fail_q:
-- kmem_cache_free(blk_get_queue_kmem_cache(alloc_srcu), q);
-+ kmem_cache_free(blk_requestq_cachep, q);
- return NULL;
- }
-
-@@ -1183,9 +1173,6 @@ int __init blk_dev_init(void)
- sizeof_field(struct request, cmd_flags));
- BUILD_BUG_ON(REQ_OP_BITS + REQ_FLAG_BITS > 8 *
- sizeof_field(struct bio, bi_opf));
-- BUILD_BUG_ON(ALIGN(offsetof(struct request_queue, srcu),
-- __alignof__(struct request_queue)) !=
-- sizeof(struct request_queue));
-
- /* used for unplugging and affects IO latency/throughput - HIGHPRI */
- kblockd_workqueue = alloc_workqueue("kblockd",
-@@ -1196,10 +1183,6 @@ int __init blk_dev_init(void)
- blk_requestq_cachep = kmem_cache_create("request_queue",
- sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
-
-- blk_requestq_srcu_cachep = kmem_cache_create("request_queue_srcu",
-- sizeof(struct request_queue) +
-- sizeof(struct srcu_struct), 0, SLAB_PANIC, NULL);
--
- blk_debugfs_root = debugfs_create_dir("block", NULL);
-
- return 0;
-diff --git a/block/blk-mq.c b/block/blk-mq.c
-index 63abbe342b28..bbf500537f75 100644
---- a/block/blk-mq.c
-+++ b/block/blk-mq.c
-@@ -261,8 +261,8 @@ EXPORT_SYMBOL_GPL(blk_mq_quiesce_queue_nowait);
- */
- void blk_mq_wait_quiesce_done(struct request_queue *q)
- {
-- if (blk_queue_has_srcu(q))
-- synchronize_srcu(q->srcu);
-+ if (q->tag_set->flags & BLK_MQ_F_BLOCKING)
-+ synchronize_srcu(q->tag_set->srcu);
- else
- synchronize_rcu();
- }
-@@ -4010,7 +4010,7 @@ static struct request_queue *blk_mq_init_queue_data(struct blk_mq_tag_set *set,
- struct request_queue *q;
- int ret;
-
-- q = blk_alloc_queue(set->numa_node, set->flags & BLK_MQ_F_BLOCKING);
-+ q = blk_alloc_queue(set->numa_node);
- if (!q)
- return ERR_PTR(-ENOMEM);
- q->queuedata = queuedata;
-@@ -4182,9 +4182,6 @@ static void blk_mq_update_poll_flag(struct request_queue *q)
- int blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
- struct request_queue *q)
- {
-- WARN_ON_ONCE(blk_queue_has_srcu(q) !=
-- !!(set->flags & BLK_MQ_F_BLOCKING));
--
- /* mark the queue as mq asap */
- q->mq_ops = set->ops;
-
-@@ -4441,8 +4438,18 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
- if (set->nr_maps == 1 && set->nr_hw_queues > nr_cpu_ids)
- set->nr_hw_queues = nr_cpu_ids;
-
-- if (blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues) < 0)
-- return -ENOMEM;
-+ if (set->flags & BLK_MQ_F_BLOCKING) {
-+ set->srcu = kmalloc(sizeof(*set->srcu), GFP_KERNEL);
-+ if (!set->srcu)
-+ return -ENOMEM;
-+ ret = init_srcu_struct(set->srcu);
-+ if (ret)
-+ goto out_free_srcu;
-+ }
-+
-+ ret = blk_mq_alloc_tag_set_tags(set, set->nr_hw_queues);
-+ if (ret)
-+ goto out_cleanup_srcu;
-
- ret = -ENOMEM;
- for (i = 0; i < set->nr_maps; i++) {
-@@ -4472,6 +4479,12 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
- }
- kfree(set->tags);
- set->tags = NULL;
-+out_cleanup_srcu:
-+ if (set->flags & BLK_MQ_F_BLOCKING)
-+ cleanup_srcu_struct(set->srcu);
-+out_free_srcu:
-+ if (set->flags & BLK_MQ_F_BLOCKING)
-+ kfree(set->srcu);
- return ret;
- }
- EXPORT_SYMBOL(blk_mq_alloc_tag_set);
-@@ -4511,6 +4524,10 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
-
- kfree(set->tags);
- set->tags = NULL;
-+ if (set->flags & BLK_MQ_F_BLOCKING) {
-+ cleanup_srcu_struct(set->srcu);
-+ kfree(set->srcu);
-+ }
- }
- EXPORT_SYMBOL(blk_mq_free_tag_set);
-
-diff --git a/block/blk-mq.h b/block/blk-mq.h
-index 0b2870839cdd..ef59fee62780 100644
---- a/block/blk-mq.h
-+++ b/block/blk-mq.h
-@@ -377,17 +377,17 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
- /* run the code block in @dispatch_ops with rcu/srcu read lock held */
- #define __blk_mq_run_dispatch_ops(q, check_sleep, dispatch_ops) \
- do { \
-- if (!blk_queue_has_srcu(q)) { \
-- rcu_read_lock(); \
-- (dispatch_ops); \
-- rcu_read_unlock(); \
-- } else { \
-+ if ((q)->tag_set->flags & BLK_MQ_F_BLOCKING) { \
- int srcu_idx; \
- \
- might_sleep_if(check_sleep); \
-- srcu_idx = srcu_read_lock((q)->srcu); \
-+ srcu_idx = srcu_read_lock((q)->tag_set->srcu); \
- (dispatch_ops); \
-- srcu_read_unlock((q)->srcu, srcu_idx); \
-+ srcu_read_unlock((q)->tag_set->srcu, srcu_idx); \
-+ } else { \
-+ rcu_read_lock(); \
-+ (dispatch_ops); \
-+ rcu_read_unlock(); \
- } \
- } while (0)
-
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index e71b3b43927c..e7871665825a 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -739,10 +739,8 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
-
- static void blk_free_queue_rcu(struct rcu_head *rcu_head)
- {
-- struct request_queue *q = container_of(rcu_head, struct request_queue,
-- rcu_head);
--
-- kmem_cache_free(blk_get_queue_kmem_cache(blk_queue_has_srcu(q)), q);
-+ kmem_cache_free(blk_requestq_cachep,
-+ container_of(rcu_head, struct request_queue, rcu_head));
- }
-
- /**
-@@ -779,9 +777,6 @@ static void blk_release_queue(struct kobject *kobj)
- if (queue_is_mq(q))
- blk_mq_release(q);
-
-- if (blk_queue_has_srcu(q))
-- cleanup_srcu_struct(q->srcu);
--
- ida_free(&blk_queue_ida, q->id);
- call_rcu(&q->rcu_head, blk_free_queue_rcu);
- }
-diff --git a/block/blk.h b/block/blk.h
-index 8b75a95b28d6..0661fa4b3a4d 100644
---- a/block/blk.h
-+++ b/block/blk.h
-@@ -27,7 +27,6 @@ struct blk_flush_queue {
- };
-
- extern struct kmem_cache *blk_requestq_cachep;
--extern struct kmem_cache *blk_requestq_srcu_cachep;
- extern struct kobj_type blk_queue_ktype;
- extern struct ida blk_queue_ida;
-
-@@ -428,13 +427,7 @@ int bio_add_hw_page(struct request_queue *q, struct bio *bio,
- struct page *page, unsigned int len, unsigned int offset,
- unsigned int max_sectors, bool *same_page);
-
--static inline struct kmem_cache *blk_get_queue_kmem_cache(bool srcu)
--{
-- if (srcu)
-- return blk_requestq_srcu_cachep;
-- return blk_requestq_cachep;
--}
--struct request_queue *blk_alloc_queue(int node_id, bool alloc_srcu);
-+struct request_queue *blk_alloc_queue(int node_id);
-
- int disk_scan_partitions(struct gendisk *disk, fmode_t mode, void *owner);
-
-diff --git a/block/genhd.c b/block/genhd.c
-index c4765681a8b4..f4f3f3b55634 100644
---- a/block/genhd.c
-+++ b/block/genhd.c
-@@ -1417,7 +1417,7 @@ struct gendisk *__blk_alloc_disk(int node, struct lock_class_key *lkclass)
- struct request_queue *q;
- struct gendisk *disk;
-
-- q = blk_alloc_queue(node, false);
-+ q = blk_alloc_queue(node);
- if (!q)
- return NULL;
-
-diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
-index d6119c5d1069..2952c28410e3 100644
---- a/include/linux/blk-mq.h
-+++ b/include/linux/blk-mq.h
-@@ -7,6 +7,7 @@
- #include <linux/lockdep.h>
- #include <linux/scatterlist.h>
- #include <linux/prefetch.h>
-+#include <linux/srcu.h>
-
- struct blk_mq_tags;
- struct blk_flush_queue;
-@@ -501,6 +502,8 @@ enum hctx_type {
- * @tag_list_lock: Serializes tag_list accesses.
- * @tag_list: List of the request queues that use this tag set. See also
- * request_queue.tag_set_list.
-+ * @srcu: Use as lock when type of the request queue is blocking
-+ * (BLK_MQ_F_BLOCKING).
- */
- struct blk_mq_tag_set {
- struct blk_mq_queue_map map[HCTX_MAX_TYPES];
-@@ -521,6 +524,7 @@ struct blk_mq_tag_set {
-
- struct mutex tag_list_lock;
- struct list_head tag_list;
-+ struct srcu_struct *srcu;
- };
-
- /**
-diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index 891f8cbcd043..36c286d22fb2 100644
---- a/include/linux/blkdev.h
-+++ b/include/linux/blkdev.h
-@@ -22,7 +22,6 @@
- #include <linux/blkzoned.h>
- #include <linux/sched.h>
- #include <linux/sbitmap.h>
--#include <linux/srcu.h>
- #include <linux/uuid.h>
- #include <linux/xarray.h>
-
-@@ -544,18 +543,11 @@ struct request_queue {
- struct mutex debugfs_mutex;
-
- bool mq_sysfs_init_done;
--
-- /**
-- * @srcu: Sleepable RCU. Use as lock when type of the request queue
-- * is blocking (BLK_MQ_F_BLOCKING). Must be the last member
-- */
-- struct srcu_struct srcu[];
- };
-
- /* Keep blk_queue_flag_name[] in sync with the definitions below */
- #define QUEUE_FLAG_STOPPED 0 /* queue is stopped */
- #define QUEUE_FLAG_DYING 1 /* queue being torn down */
--#define QUEUE_FLAG_HAS_SRCU 2 /* SRCU is allocated */
- #define QUEUE_FLAG_NOMERGES 3 /* disable merge attempts */
- #define QUEUE_FLAG_SAME_COMP 4 /* complete on same CPU-group */
- #define QUEUE_FLAG_FAIL_IO 5 /* fake timeout */
-@@ -591,7 +583,6 @@ bool blk_queue_flag_test_and_set(unsigned int flag, struct request_queue *q);
-
- #define blk_queue_stopped(q) test_bit(QUEUE_FLAG_STOPPED, &(q)->queue_flags)
- #define blk_queue_dying(q) test_bit(QUEUE_FLAG_DYING, &(q)->queue_flags)
--#define blk_queue_has_srcu(q) test_bit(QUEUE_FLAG_HAS_SRCU, &(q)->queue_flags)
- #define blk_queue_init_done(q) test_bit(QUEUE_FLAG_INIT_DONE, &(q)->queue_flags)
- #define blk_queue_nomerges(q) test_bit(QUEUE_FLAG_NOMERGES, &(q)->queue_flags)
- #define blk_queue_noxmerges(q) \
---
-2.39.0
-
+++ /dev/null
-From 9ac03e3fd8e1bdbf1bca0fa281849d2257ee8329 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Fri, 6 Jan 2023 10:34:10 -1000
-Subject: block: Drop spurious might_sleep() from blk_put_queue()
-
-From: Tejun Heo <tj@kernel.org>
-
-[ Upstream commit 49e4d04f0486117ac57a97890eb1db6d52bf82b3 ]
-
-Dan reports the following smatch detected the following:
-
- block/blk-cgroup.c:1863 blkcg_schedule_throttle() warn: sleeping in atomic context
-
-caused by blkcg_schedule_throttle() calling blk_put_queue() in an
-non-sleepable context.
-
-blk_put_queue() acquired might_sleep() in 63f93fd6fa57 ("block: mark
-blk_put_queue as potentially blocking") which transferred the might_sleep()
-from blk_free_queue().
-
-blk_free_queue() acquired might_sleep() in e8c7d14ac6c3 ("block: revert back
-to synchronous request_queue removal") while turning request_queue removal
-synchronous. However, this isn't necessary as nothing in the free path
-actually requires sleeping.
-
-It's pretty unusual to require a sleeping context in a put operation and
-it's not needed in the first place. Let's drop it.
-
-Signed-off-by: Tejun Heo <tj@kernel.org>
-Reported-by: Dan Carpenter <error27@gmail.com>
-Link: https://lkml.kernel.org/r/Y7g3L6fntnTtOm63@kili
-Cc: Christoph Hellwig <hch@lst.de>
-Cc: Luis Chamberlain <mcgrof@kernel.org>
-Fixes: e8c7d14ac6c3 ("block: revert back to synchronous request_queue removal") # v5.9+
-Reviewed-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/Y7iFwjN+XzWvLv3y@slm.duckdns.org
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-core.c | 3 ---
- 1 file changed, 3 deletions(-)
-
-diff --git a/block/blk-core.c b/block/blk-core.c
-index 815ffce6b988..f5ae527fb0c3 100644
---- a/block/blk-core.c
-+++ b/block/blk-core.c
-@@ -282,12 +282,9 @@ static void blk_free_queue(struct request_queue *q)
- *
- * Decrements the refcount of the request_queue and free it when the refcount
- * reaches 0.
-- *
-- * Context: Can sleep.
- */
- void blk_put_queue(struct request_queue *q)
- {
-- might_sleep();
- if (refcount_dec_and_test(&q->refs))
- blk_free_queue(q);
- }
---
-2.39.0
-
+++ /dev/null
-From 9683dfad8af965906565c987bf3d6edc3ed2f24a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 14 Nov 2022 05:26:34 +0100
-Subject: block: factor out a blk_debugfs_remove helper
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 6fc75f309d291d328b4ea2f91bef0ff56e4bc7c2 ]
-
-Split the debugfs removal from blk_unregister_queue into a helper so that
-the it can be reused for blk_register_queue error handling.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20221114042637.1009333-3-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: 49e4d04f0486 ("block: Drop spurious might_sleep() from blk_put_queue()")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-sysfs.c | 21 ++++++++++++++-------
- 1 file changed, 14 insertions(+), 7 deletions(-)
-
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index 2b1cf0b2a5c7..3d6951a0b4e7 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -797,6 +797,19 @@ struct kobj_type blk_queue_ktype = {
- .release = blk_release_queue,
- };
-
-+static void blk_debugfs_remove(struct gendisk *disk)
-+{
-+ struct request_queue *q = disk->queue;
-+
-+ mutex_lock(&q->debugfs_mutex);
-+ blk_trace_shutdown(q);
-+ debugfs_remove_recursive(q->debugfs_dir);
-+ q->debugfs_dir = NULL;
-+ q->sched_debugfs_dir = NULL;
-+ q->rqos_debugfs_dir = NULL;
-+ mutex_unlock(&q->debugfs_mutex);
-+}
-+
- /**
- * blk_register_queue - register a block layer queue with sysfs
- * @disk: Disk of which the request queue should be registered with sysfs.
-@@ -922,11 +935,5 @@ void blk_unregister_queue(struct gendisk *disk)
- kobject_del(&q->kobj);
- mutex_unlock(&q->sysfs_dir_lock);
-
-- mutex_lock(&q->debugfs_mutex);
-- blk_trace_shutdown(q);
-- debugfs_remove_recursive(q->debugfs_dir);
-- q->debugfs_dir = NULL;
-- q->sched_debugfs_dir = NULL;
-- q->rqos_debugfs_dir = NULL;
-- mutex_unlock(&q->debugfs_mutex);
-+ blk_debugfs_remove(disk);
- }
---
-2.39.0
-
+++ /dev/null
-From 520fc407ac237ecc98786169fb75fa8e35ea7d53 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 14 Nov 2022 05:26:35 +0100
-Subject: block: fix error unwinding in blk_register_queue
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 40602997be26887bdfa3d58659c3acb4579099e9 ]
-
-blk_register_queue fails to handle errors from blk_mq_sysfs_register,
-leaks various resources on errors and accidentally sets queue refs percpu
-refcount to percpu mode on kobject_add failure. Fix all that by
-properly unwinding on errors.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20221114042637.1009333-4-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: 49e4d04f0486 ("block: Drop spurious might_sleep() from blk_put_queue()")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-sysfs.c | 28 ++++++++++++++++------------
- 1 file changed, 16 insertions(+), 12 deletions(-)
-
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index 3d6951a0b4e7..1631ba2f7259 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -820,13 +820,15 @@ int blk_register_queue(struct gendisk *disk)
- int ret;
-
- mutex_lock(&q->sysfs_dir_lock);
--
- ret = kobject_add(&q->kobj, &disk_to_dev(disk)->kobj, "queue");
- if (ret < 0)
-- goto unlock;
-+ goto out_unlock_dir;
-
-- if (queue_is_mq(q))
-- blk_mq_sysfs_register(disk);
-+ if (queue_is_mq(q)) {
-+ ret = blk_mq_sysfs_register(disk);
-+ if (ret)
-+ goto out_del_queue_kobj;
-+ }
- mutex_lock(&q->sysfs_lock);
-
- mutex_lock(&q->debugfs_mutex);
-@@ -838,17 +840,17 @@ int blk_register_queue(struct gendisk *disk)
-
- ret = disk_register_independent_access_ranges(disk);
- if (ret)
-- goto put_dev;
-+ goto out_debugfs_remove;
-
- if (q->elevator) {
- ret = elv_register_queue(q, false);
- if (ret)
-- goto put_dev;
-+ goto out_unregister_ia_ranges;
- }
-
- ret = blk_crypto_sysfs_register(disk);
- if (ret)
-- goto put_dev;
-+ goto out_elv_unregister;
-
- blk_queue_flag_set(QUEUE_FLAG_REGISTERED, q);
- wbt_enable_default(q);
-@@ -859,8 +861,6 @@ int blk_register_queue(struct gendisk *disk)
- if (q->elevator)
- kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
- mutex_unlock(&q->sysfs_lock);
--
--unlock:
- mutex_unlock(&q->sysfs_dir_lock);
-
- /*
-@@ -879,13 +879,17 @@ int blk_register_queue(struct gendisk *disk)
-
- return ret;
-
--put_dev:
-+out_elv_unregister:
- elv_unregister_queue(q);
-+out_unregister_ia_ranges:
- disk_unregister_independent_access_ranges(disk);
-+out_debugfs_remove:
-+ blk_debugfs_remove(disk);
- mutex_unlock(&q->sysfs_lock);
-- mutex_unlock(&q->sysfs_dir_lock);
-+out_del_queue_kobj:
- kobject_del(&q->kobj);
--
-+out_unlock_dir:
-+ mutex_unlock(&q->sysfs_dir_lock);
- return ret;
- }
-
---
-2.39.0
-
+++ /dev/null
-From 6a52e5b9fb29a5ffd3d5bfc87155b10789be85c6 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 14 Nov 2022 05:26:37 +0100
-Subject: block: mark blk_put_queue as potentially blocking
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 63f93fd6fa5717769a78d6d7bea6f7f9a1ccca8e ]
-
-We can't just say that the last reference release may block, as any
-reference dropped could be the last one. So move the might_sleep() from
-blk_free_queue to blk_put_queue and update the documentation.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20221114042637.1009333-6-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: 49e4d04f0486 ("block: Drop spurious might_sleep() from blk_put_queue()")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-core.c | 6 ++----
- 1 file changed, 2 insertions(+), 4 deletions(-)
-
-diff --git a/block/blk-core.c b/block/blk-core.c
-index 7de1bb16e9a7..815ffce6b988 100644
---- a/block/blk-core.c
-+++ b/block/blk-core.c
-@@ -260,8 +260,6 @@ static void blk_free_queue_rcu(struct rcu_head *rcu_head)
-
- static void blk_free_queue(struct request_queue *q)
- {
-- might_sleep();
--
- percpu_ref_exit(&q->q_usage_counter);
-
- if (q->poll_stat)
-@@ -285,11 +283,11 @@ static void blk_free_queue(struct request_queue *q)
- * Decrements the refcount of the request_queue and free it when the refcount
- * reaches 0.
- *
-- * Context: Any context, but the last reference must not be dropped from
-- * atomic context.
-+ * Context: Can sleep.
- */
- void blk_put_queue(struct request_queue *q)
- {
-+ might_sleep();
- if (refcount_dec_and_test(&q->refs))
- blk_free_queue(q);
- }
---
-2.39.0
-
+++ /dev/null
-From 474aee255bb1335ccd2f17e880d2e25b49db4a5a Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Mon, 14 Nov 2022 05:26:36 +0100
-Subject: block: untangle request_queue refcounting from sysfs
-
-From: Christoph Hellwig <hch@lst.de>
-
-[ Upstream commit 2bd85221a625b316114bafaab527770b607095d3 ]
-
-The kobject embedded into the request_queue is used for the queue
-directory in sysfs, but that is a child of the gendisks directory and is
-intimately tied to it. Move this kobject to the gendisk and use a
-refcount_t in the request_queue for the actual request_queue refcounting
-that is completely unrelated to the device model.
-
-Signed-off-by: Christoph Hellwig <hch@lst.de>
-Link: https://lore.kernel.org/r/20221114042637.1009333-5-hch@lst.de
-Signed-off-by: Jens Axboe <axboe@kernel.dk>
-Stable-dep-of: 49e4d04f0486 ("block: Drop spurious might_sleep() from blk_put_queue()")
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- block/blk-core.c | 42 ++++++++++++++++----
- block/blk-crypto-sysfs.c | 4 +-
- block/blk-ia-ranges.c | 3 +-
- block/blk-sysfs.c | 86 +++++++++++-----------------------------
- block/blk.h | 4 --
- block/bsg.c | 11 +++--
- block/elevator.c | 2 +-
- include/linux/blkdev.h | 6 +--
- 8 files changed, 71 insertions(+), 87 deletions(-)
-
-diff --git a/block/blk-core.c b/block/blk-core.c
-index 9d6a947024ea..7de1bb16e9a7 100644
---- a/block/blk-core.c
-+++ b/block/blk-core.c
-@@ -59,12 +59,12 @@ EXPORT_TRACEPOINT_SYMBOL_GPL(block_split);
- EXPORT_TRACEPOINT_SYMBOL_GPL(block_unplug);
- EXPORT_TRACEPOINT_SYMBOL_GPL(block_rq_insert);
-
--DEFINE_IDA(blk_queue_ida);
-+static DEFINE_IDA(blk_queue_ida);
-
- /*
- * For queue allocation
- */
--struct kmem_cache *blk_requestq_cachep;
-+static struct kmem_cache *blk_requestq_cachep;
-
- /*
- * Controlling structure to kblockd
-@@ -252,19 +252,46 @@ void blk_clear_pm_only(struct request_queue *q)
- }
- EXPORT_SYMBOL_GPL(blk_clear_pm_only);
-
-+static void blk_free_queue_rcu(struct rcu_head *rcu_head)
-+{
-+ kmem_cache_free(blk_requestq_cachep,
-+ container_of(rcu_head, struct request_queue, rcu_head));
-+}
-+
-+static void blk_free_queue(struct request_queue *q)
-+{
-+ might_sleep();
-+
-+ percpu_ref_exit(&q->q_usage_counter);
-+
-+ if (q->poll_stat)
-+ blk_stat_remove_callback(q, q->poll_cb);
-+ blk_stat_free_callback(q->poll_cb);
-+
-+ blk_free_queue_stats(q->stats);
-+ kfree(q->poll_stat);
-+
-+ if (queue_is_mq(q))
-+ blk_mq_release(q);
-+
-+ ida_free(&blk_queue_ida, q->id);
-+ call_rcu(&q->rcu_head, blk_free_queue_rcu);
-+}
-+
- /**
- * blk_put_queue - decrement the request_queue refcount
- * @q: the request_queue structure to decrement the refcount for
- *
-- * Decrements the refcount of the request_queue kobject. When this reaches 0
-- * we'll have blk_release_queue() called.
-+ * Decrements the refcount of the request_queue and free it when the refcount
-+ * reaches 0.
- *
- * Context: Any context, but the last reference must not be dropped from
- * atomic context.
- */
- void blk_put_queue(struct request_queue *q)
- {
-- kobject_put(&q->kobj);
-+ if (refcount_dec_and_test(&q->refs))
-+ blk_free_queue(q);
- }
- EXPORT_SYMBOL(blk_put_queue);
-
-@@ -399,8 +426,7 @@ struct request_queue *blk_alloc_queue(int node_id)
- INIT_WORK(&q->timeout_work, blk_timeout_work);
- INIT_LIST_HEAD(&q->icq_list);
-
-- kobject_init(&q->kobj, &blk_queue_ktype);
--
-+ refcount_set(&q->refs, 1);
- mutex_init(&q->debugfs_mutex);
- mutex_init(&q->sysfs_lock);
- mutex_init(&q->sysfs_dir_lock);
-@@ -444,7 +470,7 @@ bool blk_get_queue(struct request_queue *q)
- {
- if (unlikely(blk_queue_dying(q)))
- return false;
-- kobject_get(&q->kobj);
-+ refcount_inc(&q->refs);
- return true;
- }
- EXPORT_SYMBOL(blk_get_queue);
-diff --git a/block/blk-crypto-sysfs.c b/block/blk-crypto-sysfs.c
-index e05f145cd797..55268edc0625 100644
---- a/block/blk-crypto-sysfs.c
-+++ b/block/blk-crypto-sysfs.c
-@@ -140,8 +140,8 @@ int blk_crypto_sysfs_register(struct gendisk *disk)
- return -ENOMEM;
- obj->profile = q->crypto_profile;
-
-- err = kobject_init_and_add(&obj->kobj, &blk_crypto_ktype, &q->kobj,
-- "crypto");
-+ err = kobject_init_and_add(&obj->kobj, &blk_crypto_ktype,
-+ &disk->queue_kobj, "crypto");
- if (err) {
- kobject_put(&obj->kobj);
- return err;
-diff --git a/block/blk-ia-ranges.c b/block/blk-ia-ranges.c
-index 2bd1d311033b..2141931ddd37 100644
---- a/block/blk-ia-ranges.c
-+++ b/block/blk-ia-ranges.c
-@@ -123,7 +123,8 @@ int disk_register_independent_access_ranges(struct gendisk *disk)
- */
- WARN_ON(iars->sysfs_registered);
- ret = kobject_init_and_add(&iars->kobj, &blk_ia_ranges_ktype,
-- &q->kobj, "%s", "independent_access_ranges");
-+ &disk->queue_kobj, "%s",
-+ "independent_access_ranges");
- if (ret) {
- disk->ia_ranges = NULL;
- kobject_put(&iars->kobj);
-diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
-index 1631ba2f7259..35e854bb6e0c 100644
---- a/block/blk-sysfs.c
-+++ b/block/blk-sysfs.c
-@@ -680,8 +680,8 @@ static struct attribute *queue_attrs[] = {
- static umode_t queue_attr_visible(struct kobject *kobj, struct attribute *attr,
- int n)
- {
-- struct request_queue *q =
-- container_of(kobj, struct request_queue, kobj);
-+ struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
-+ struct request_queue *q = disk->queue;
-
- if (attr == &queue_io_timeout_entry.attr &&
- (!q->mq_ops || !q->mq_ops->timeout))
-@@ -707,8 +707,8 @@ static ssize_t
- queue_attr_show(struct kobject *kobj, struct attribute *attr, char *page)
- {
- struct queue_sysfs_entry *entry = to_queue(attr);
-- struct request_queue *q =
-- container_of(kobj, struct request_queue, kobj);
-+ struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
-+ struct request_queue *q = disk->queue;
- ssize_t res;
-
- if (!entry->show)
-@@ -724,63 +724,19 @@ queue_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *page, size_t length)
- {
- struct queue_sysfs_entry *entry = to_queue(attr);
-- struct request_queue *q;
-+ struct gendisk *disk = container_of(kobj, struct gendisk, queue_kobj);
-+ struct request_queue *q = disk->queue;
- ssize_t res;
-
- if (!entry->store)
- return -EIO;
-
-- q = container_of(kobj, struct request_queue, kobj);
- mutex_lock(&q->sysfs_lock);
- res = entry->store(q, page, length);
- mutex_unlock(&q->sysfs_lock);
- return res;
- }
-
--static void blk_free_queue_rcu(struct rcu_head *rcu_head)
--{
-- kmem_cache_free(blk_requestq_cachep,
-- container_of(rcu_head, struct request_queue, rcu_head));
--}
--
--/**
-- * blk_release_queue - releases all allocated resources of the request_queue
-- * @kobj: pointer to a kobject, whose container is a request_queue
-- *
-- * This function releases all allocated resources of the request queue.
-- *
-- * The struct request_queue refcount is incremented with blk_get_queue() and
-- * decremented with blk_put_queue(). Once the refcount reaches 0 this function
-- * is called.
-- *
-- * Drivers exist which depend on the release of the request_queue to be
-- * synchronous, it should not be deferred.
-- *
-- * Context: can sleep
-- */
--static void blk_release_queue(struct kobject *kobj)
--{
-- struct request_queue *q =
-- container_of(kobj, struct request_queue, kobj);
--
-- might_sleep();
--
-- percpu_ref_exit(&q->q_usage_counter);
--
-- if (q->poll_stat)
-- blk_stat_remove_callback(q, q->poll_cb);
-- blk_stat_free_callback(q->poll_cb);
--
-- blk_free_queue_stats(q->stats);
-- kfree(q->poll_stat);
--
-- if (queue_is_mq(q))
-- blk_mq_release(q);
--
-- ida_free(&blk_queue_ida, q->id);
-- call_rcu(&q->rcu_head, blk_free_queue_rcu);
--}
--
- static const struct sysfs_ops queue_sysfs_ops = {
- .show = queue_attr_show,
- .store = queue_attr_store,
-@@ -791,10 +747,15 @@ static const struct attribute_group *blk_queue_attr_groups[] = {
- NULL
- };
-
--struct kobj_type blk_queue_ktype = {
-+static void blk_queue_release(struct kobject *kobj)
-+{
-+ /* nothing to do here, all data is associated with the parent gendisk */
-+}
-+
-+static struct kobj_type blk_queue_ktype = {
- .default_groups = blk_queue_attr_groups,
- .sysfs_ops = &queue_sysfs_ops,
-- .release = blk_release_queue,
-+ .release = blk_queue_release,
- };
-
- static void blk_debugfs_remove(struct gendisk *disk)
-@@ -820,20 +781,20 @@ int blk_register_queue(struct gendisk *disk)
- int ret;
-
- mutex_lock(&q->sysfs_dir_lock);
-- ret = kobject_add(&q->kobj, &disk_to_dev(disk)->kobj, "queue");
-+ kobject_init(&disk->queue_kobj, &blk_queue_ktype);
-+ ret = kobject_add(&disk->queue_kobj, &disk_to_dev(disk)->kobj, "queue");
- if (ret < 0)
-- goto out_unlock_dir;
-+ goto out_put_queue_kobj;
-
- if (queue_is_mq(q)) {
- ret = blk_mq_sysfs_register(disk);
- if (ret)
-- goto out_del_queue_kobj;
-+ goto out_put_queue_kobj;
- }
- mutex_lock(&q->sysfs_lock);
-
- mutex_lock(&q->debugfs_mutex);
-- q->debugfs_dir = debugfs_create_dir(kobject_name(q->kobj.parent),
-- blk_debugfs_root);
-+ q->debugfs_dir = debugfs_create_dir(disk->disk_name, blk_debugfs_root);
- if (queue_is_mq(q))
- blk_mq_debugfs_register(q);
- mutex_unlock(&q->debugfs_mutex);
-@@ -857,7 +818,7 @@ int blk_register_queue(struct gendisk *disk)
- blk_throtl_register(disk);
-
- /* Now everything is ready and send out KOBJ_ADD uevent */
-- kobject_uevent(&q->kobj, KOBJ_ADD);
-+ kobject_uevent(&disk->queue_kobj, KOBJ_ADD);
- if (q->elevator)
- kobject_uevent(&q->elevator->kobj, KOBJ_ADD);
- mutex_unlock(&q->sysfs_lock);
-@@ -886,9 +847,8 @@ int blk_register_queue(struct gendisk *disk)
- out_debugfs_remove:
- blk_debugfs_remove(disk);
- mutex_unlock(&q->sysfs_lock);
--out_del_queue_kobj:
-- kobject_del(&q->kobj);
--out_unlock_dir:
-+out_put_queue_kobj:
-+ kobject_put(&disk->queue_kobj);
- mutex_unlock(&q->sysfs_dir_lock);
- return ret;
- }
-@@ -935,8 +895,8 @@ void blk_unregister_queue(struct gendisk *disk)
- mutex_unlock(&q->sysfs_lock);
-
- /* Now that we've deleted all child objects, we can delete the queue. */
-- kobject_uevent(&q->kobj, KOBJ_REMOVE);
-- kobject_del(&q->kobj);
-+ kobject_uevent(&disk->queue_kobj, KOBJ_REMOVE);
-+ kobject_del(&disk->queue_kobj);
- mutex_unlock(&q->sysfs_dir_lock);
-
- blk_debugfs_remove(disk);
-diff --git a/block/blk.h b/block/blk.h
-index 0661fa4b3a4d..6fe583dd6e3b 100644
---- a/block/blk.h
-+++ b/block/blk.h
-@@ -26,10 +26,6 @@ struct blk_flush_queue {
- spinlock_t mq_flush_lock;
- };
-
--extern struct kmem_cache *blk_requestq_cachep;
--extern struct kobj_type blk_queue_ktype;
--extern struct ida blk_queue_ida;
--
- bool is_flush_rq(struct request *req);
-
- struct blk_flush_queue *blk_alloc_flush_queue(int node, int cmd_size,
-diff --git a/block/bsg.c b/block/bsg.c
-index 2ab1351eb082..8eba57b9bb46 100644
---- a/block/bsg.c
-+++ b/block/bsg.c
-@@ -175,8 +175,10 @@ static void bsg_device_release(struct device *dev)
-
- void bsg_unregister_queue(struct bsg_device *bd)
- {
-- if (bd->queue->kobj.sd)
-- sysfs_remove_link(&bd->queue->kobj, "bsg");
-+ struct gendisk *disk = bd->queue->disk;
-+
-+ if (disk && disk->queue_kobj.sd)
-+ sysfs_remove_link(&disk->queue_kobj, "bsg");
- cdev_device_del(&bd->cdev, &bd->device);
- put_device(&bd->device);
- }
-@@ -216,8 +218,9 @@ struct bsg_device *bsg_register_queue(struct request_queue *q,
- if (ret)
- goto out_put_device;
-
-- if (q->kobj.sd) {
-- ret = sysfs_create_link(&q->kobj, &bd->device.kobj, "bsg");
-+ if (q->disk && q->disk->queue_kobj.sd) {
-+ ret = sysfs_create_link(&q->disk->queue_kobj, &bd->device.kobj,
-+ "bsg");
- if (ret)
- goto out_device_del;
- }
-diff --git a/block/elevator.c b/block/elevator.c
-index bd71f0fc4e4b..ac096f494911 100644
---- a/block/elevator.c
-+++ b/block/elevator.c
-@@ -499,7 +499,7 @@ int elv_register_queue(struct request_queue *q, bool uevent)
-
- lockdep_assert_held(&q->sysfs_lock);
-
-- error = kobject_add(&e->kobj, &q->kobj, "%s", "iosched");
-+ error = kobject_add(&e->kobj, &q->disk->queue_kobj, "iosched");
- if (!error) {
- struct elv_fs_entry *attr = e->type->elevator_attrs;
- if (attr) {
-diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
-index 36c286d22fb2..17dc59c5aa90 100644
---- a/include/linux/blkdev.h
-+++ b/include/linux/blkdev.h
-@@ -155,6 +155,7 @@ struct gendisk {
- unsigned open_partitions; /* number of open partitions */
-
- struct backing_dev_info *bdi;
-+ struct kobject queue_kobj; /* the queue/ directory */
- struct kobject *slave_dir;
- #ifdef CONFIG_BLOCK_HOLDER_DEPRECATED
- struct list_head slave_bdevs;
-@@ -437,10 +438,7 @@ struct request_queue {
-
- struct gendisk *disk;
-
-- /*
-- * queue kobject
-- */
-- struct kobject kobj;
-+ refcount_t refs;
-
- /*
- * mq queue kobject
---
-2.39.0
-
interconnect-qcom-msm8996-fix-regmap-max_register-va.patch
hid-amd_sfh-fix-warning-unwind-goto.patch
tomoyo-fix-broken-dependency-on-.conf.default.patch
-blk-mq-move-the-srcu_struct-used-for-quiescing-to-th.patch
-blk-crypto-pass-a-gendisk-to-blk_crypto_sysfs_-un-re.patch
-block-factor-out-a-blk_debugfs_remove-helper.patch
-block-fix-error-unwinding-in-blk_register_queue.patch
-block-untangle-request_queue-refcounting-from-sysfs.patch
-block-mark-blk_put_queue-as-potentially-blocking.patch
-block-drop-spurious-might_sleep-from-blk_put_queue.patch
rdma-rxe-fix-inaccurate-constants-in-rxe_type_info.patch
rdma-rxe-prevent-faulty-rkey-generation.patch
erofs-fix-kvcalloc-misuse-with-__gfp_nofail.patch