--- /dev/null
+From 0bab7f034d227d42cb89a91a060f18ecc3f80390 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 8 Aug 2023 01:12:38 +0900
+Subject: btrfs: zoned: no longer count fresh BG region as zone unusable
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+[ Upstream commit 6a8ebc773ef64c8f12d6d60fd6e53d5ccc81314b ]
+
+Now that we switched to write time activation, we no longer need to (and
+must not) count the fresh region as zone unusable. This commit is similar
+to revert of commit fa2068d7e922b434eb ("btrfs: zoned: count fresh BG
+region as zone unusable").
+
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/free-space-cache.c | 8 +-------
+ fs/btrfs/zoned.c | 26 +++-----------------------
+ 2 files changed, 4 insertions(+), 30 deletions(-)
+
+diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
+index 4cd8e44cba4c5..b27795e13ff31 100644
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2685,13 +2685,8 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ bg_reclaim_threshold = READ_ONCE(sinfo->bg_reclaim_threshold);
+
+ spin_lock(&ctl->tree_lock);
+- /* Count initial region as zone_unusable until it gets activated. */
+ if (!used)
+ to_free = size;
+- else if (initial &&
+- test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &block_group->fs_info->flags) &&
+- (block_group->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM)))
+- to_free = 0;
+ else if (initial)
+ to_free = block_group->zone_capacity;
+ else if (offset >= block_group->alloc_offset)
+@@ -2719,8 +2714,7 @@ static int __btrfs_add_free_space_zoned(struct btrfs_block_group *block_group,
+ reclaimable_unusable = block_group->zone_unusable -
+ (block_group->length - block_group->zone_capacity);
+ /* All the region is now unusable. Mark it as unused and reclaim */
+- if (block_group->zone_unusable == block_group->length &&
+- block_group->alloc_offset) {
++ if (block_group->zone_unusable == block_group->length) {
+ btrfs_mark_bg_unused(block_group);
+ } else if (bg_reclaim_threshold &&
+ reclaimable_unusable >=
+diff --git a/fs/btrfs/zoned.c b/fs/btrfs/zoned.c
+index 675dbed075d8e..99cb690da9893 100644
+--- a/fs/btrfs/zoned.c
++++ b/fs/btrfs/zoned.c
+@@ -1574,19 +1574,9 @@ void btrfs_calc_zone_unusable(struct btrfs_block_group *cache)
+ return;
+
+ WARN_ON(cache->bytes_super != 0);
+-
+- /* Check for block groups never get activated */
+- if (test_bit(BTRFS_FS_ACTIVE_ZONE_TRACKING, &cache->fs_info->flags) &&
+- cache->flags & (BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_SYSTEM) &&
+- !test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &cache->runtime_flags) &&
+- cache->alloc_offset == 0) {
+- unusable = cache->length;
+- free = 0;
+- } else {
+- unusable = (cache->alloc_offset - cache->used) +
+- (cache->length - cache->zone_capacity);
+- free = cache->zone_capacity - cache->alloc_offset;
+- }
++ unusable = (cache->alloc_offset - cache->used) +
++ (cache->length - cache->zone_capacity);
++ free = cache->zone_capacity - cache->alloc_offset;
+
+ /* We only need ->free_space in ALLOC_SEQ block groups */
+ cache->cached = BTRFS_CACHE_FINISHED;
+@@ -1882,7 +1872,6 @@ struct btrfs_device *btrfs_zoned_get_device(struct btrfs_fs_info *fs_info,
+ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+ {
+ struct btrfs_fs_info *fs_info = block_group->fs_info;
+- struct btrfs_space_info *space_info = block_group->space_info;
+ struct map_lookup *map;
+ struct btrfs_device *device;
+ u64 physical;
+@@ -1894,7 +1883,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ map = block_group->physical_map;
+
+- spin_lock(&space_info->lock);
+ spin_lock(&block_group->lock);
+ if (test_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags)) {
+ ret = true;
+@@ -1923,14 +1911,7 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ /* Successfully activated all the zones */
+ set_bit(BLOCK_GROUP_FLAG_ZONE_IS_ACTIVE, &block_group->runtime_flags);
+- WARN_ON(block_group->alloc_offset != 0);
+- if (block_group->zone_unusable == block_group->length) {
+- block_group->zone_unusable = block_group->length - block_group->zone_capacity;
+- space_info->bytes_zone_unusable -= block_group->zone_capacity;
+- }
+ spin_unlock(&block_group->lock);
+- btrfs_try_granting_tickets(fs_info, space_info);
+- spin_unlock(&space_info->lock);
+
+ /* For the active block group list */
+ btrfs_get_block_group(block_group);
+@@ -1943,7 +1924,6 @@ bool btrfs_zone_activate(struct btrfs_block_group *block_group)
+
+ out_unlock:
+ spin_unlock(&block_group->lock);
+- spin_unlock(&space_info->lock);
+ return ret;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From b521537e2b33db4a11fb2f50277c92c280b67b4a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 5 Jun 2023 15:03:16 +0800
+Subject: dm thin metadata: Fix ABBA deadlock by resetting dm_bufio_client
+
+From: Li Lingfeng <lilingfeng3@huawei.com>
+
+[ Upstream commit d48300120627a1cb98914738fff38b424625b8ad ]
+
+As described in commit 8111964f1b85 ("dm thin: Fix ABBA deadlock between
+shrink_slab and dm_pool_abort_metadata"), ABBA deadlocks will be
+triggered because shrinker_rwsem currently needs to held by
+dm_pool_abort_metadata() as a side-effect of thin-pool metadata
+operation failure.
+
+The following three problem scenarios have been noticed:
+
+1) Described by commit 8111964f1b85 ("dm thin: Fix ABBA deadlock between
+ shrink_slab and dm_pool_abort_metadata")
+
+2) shrinker_rwsem and throttle->lock
+ P1(drop cache) P2(kworker)
+drop_caches_sysctl_handler
+ drop_slab
+ shrink_slab
+ down_read(&shrinker_rwsem) - LOCK A
+ do_shrink_slab
+ super_cache_scan
+ prune_icache_sb
+ dispose_list
+ evict
+ ext4_evict_inode
+ ext4_clear_inode
+ ext4_discard_preallocations
+ ext4_mb_load_buddy_gfp
+ ext4_mb_init_cache
+ ext4_wait_block_bitmap
+ __ext4_error
+ ext4_handle_error
+ ext4_commit_super
+ ...
+ dm_submit_bio
+ do_worker
+ throttle_work_update
+ down_write(&t->lock) -- LOCK B
+ process_deferred_bios
+ commit
+ metadata_operation_failed
+ dm_pool_abort_metadata
+ dm_block_manager_create
+ dm_bufio_client_create
+ register_shrinker
+ down_write(&shrinker_rwsem)
+ -- LOCK A
+ thin_map
+ thin_bio_map
+ thin_defer_bio_with_throttle
+ throttle_lock
+ down_read(&t->lock) - LOCK B
+
+3) shrinker_rwsem and wait_on_buffer
+ P1(drop cache) P2(kworker)
+drop_caches_sysctl_handler
+ drop_slab
+ shrink_slab
+ down_read(&shrinker_rwsem) - LOCK A
+ do_shrink_slab
+ ...
+ ext4_wait_block_bitmap
+ __ext4_error
+ ext4_handle_error
+ jbd2_journal_abort
+ jbd2_journal_update_sb_errno
+ jbd2_write_superblock
+ submit_bh
+ // LOCK B
+ // RELEASE B
+ do_worker
+ throttle_work_update
+ down_write(&t->lock) - LOCK B
+ process_deferred_bios
+ process_bio
+ commit
+ metadata_operation_failed
+ dm_pool_abort_metadata
+ dm_block_manager_create
+ dm_bufio_client_create
+ register_shrinker
+ register_shrinker_prepared
+ down_write(&shrinker_rwsem) - LOCK A
+ bio_endio
+ wait_on_buffer
+ __wait_on_buffer
+
+Fix these by resetting dm_bufio_client without holding shrinker_rwsem.
+
+Fixes: 8111964f1b85 ("dm thin: Fix ABBA deadlock between shrink_slab and dm_pool_abort_metadata")
+Cc: stable@vger.kernel.org
+Signed-off-by: Li Lingfeng <lilingfeng3@huawei.com>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm-bufio.c | 7 +++
+ drivers/md/dm-thin-metadata.c | 58 ++++++++-----------
+ drivers/md/persistent-data/dm-block-manager.c | 6 ++
+ drivers/md/persistent-data/dm-block-manager.h | 1 +
+ drivers/md/persistent-data/dm-space-map.h | 3 +-
+ .../persistent-data/dm-transaction-manager.c | 3 +
+ include/linux/dm-bufio.h | 2 +
+ 7 files changed, 46 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
+index 382c5cc471952..100a6a236d92a 100644
+--- a/drivers/md/dm-bufio.c
++++ b/drivers/md/dm-bufio.c
+@@ -1914,6 +1914,13 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
+ }
+ EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
+
++void dm_bufio_client_reset(struct dm_bufio_client *c)
++{
++ drop_buffers(c);
++ flush_work(&c->shrink_work);
++}
++EXPORT_SYMBOL_GPL(dm_bufio_client_reset);
++
+ void dm_bufio_set_sector_offset(struct dm_bufio_client *c, sector_t start)
+ {
+ c->start = start;
+diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
+index 4a0e15109997b..bb0e0a270f62a 100644
+--- a/drivers/md/dm-thin-metadata.c
++++ b/drivers/md/dm-thin-metadata.c
+@@ -597,6 +597,8 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+ r = dm_tm_create_with_sm(pmd->bm, THIN_SUPERBLOCK_LOCATION,
+ &pmd->tm, &pmd->metadata_sm);
+ if (r < 0) {
++ pmd->tm = NULL;
++ pmd->metadata_sm = NULL;
+ DMERR("tm_create_with_sm failed");
+ return r;
+ }
+@@ -605,6 +607,7 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+ if (IS_ERR(pmd->data_sm)) {
+ DMERR("sm_disk_create failed");
+ r = PTR_ERR(pmd->data_sm);
++ pmd->data_sm = NULL;
+ goto bad_cleanup_tm;
+ }
+
+@@ -635,11 +638,15 @@ static int __format_metadata(struct dm_pool_metadata *pmd)
+
+ bad_cleanup_nb_tm:
+ dm_tm_destroy(pmd->nb_tm);
++ pmd->nb_tm = NULL;
+ bad_cleanup_data_sm:
+ dm_sm_destroy(pmd->data_sm);
++ pmd->data_sm = NULL;
+ bad_cleanup_tm:
+ dm_tm_destroy(pmd->tm);
++ pmd->tm = NULL;
+ dm_sm_destroy(pmd->metadata_sm);
++ pmd->metadata_sm = NULL;
+
+ return r;
+ }
+@@ -705,6 +712,8 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ sizeof(disk_super->metadata_space_map_root),
+ &pmd->tm, &pmd->metadata_sm);
+ if (r < 0) {
++ pmd->tm = NULL;
++ pmd->metadata_sm = NULL;
+ DMERR("tm_open_with_sm failed");
+ goto bad_unlock_sblock;
+ }
+@@ -714,6 +723,7 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+ if (IS_ERR(pmd->data_sm)) {
+ DMERR("sm_disk_open failed");
+ r = PTR_ERR(pmd->data_sm);
++ pmd->data_sm = NULL;
+ goto bad_cleanup_tm;
+ }
+
+@@ -740,9 +750,12 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
+
+ bad_cleanup_data_sm:
+ dm_sm_destroy(pmd->data_sm);
++ pmd->data_sm = NULL;
+ bad_cleanup_tm:
+ dm_tm_destroy(pmd->tm);
++ pmd->tm = NULL;
+ dm_sm_destroy(pmd->metadata_sm);
++ pmd->metadata_sm = NULL;
+ bad_unlock_sblock:
+ dm_bm_unlock(sblock);
+
+@@ -789,9 +802,13 @@ static void __destroy_persistent_data_objects(struct dm_pool_metadata *pmd,
+ bool destroy_bm)
+ {
+ dm_sm_destroy(pmd->data_sm);
++ pmd->data_sm = NULL;
+ dm_sm_destroy(pmd->metadata_sm);
++ pmd->metadata_sm = NULL;
+ dm_tm_destroy(pmd->nb_tm);
++ pmd->nb_tm = NULL;
+ dm_tm_destroy(pmd->tm);
++ pmd->tm = NULL;
+ if (destroy_bm)
+ dm_block_manager_destroy(pmd->bm);
+ }
+@@ -999,8 +1016,7 @@ int dm_pool_metadata_close(struct dm_pool_metadata *pmd)
+ __func__, r);
+ }
+ pmd_write_unlock(pmd);
+- if (!pmd->fail_io)
+- __destroy_persistent_data_objects(pmd, true);
++ __destroy_persistent_data_objects(pmd, true);
+
+ kfree(pmd);
+ return 0;
+@@ -1875,53 +1891,29 @@ static void __set_abort_with_changes_flags(struct dm_pool_metadata *pmd)
+ int dm_pool_abort_metadata(struct dm_pool_metadata *pmd)
+ {
+ int r = -EINVAL;
+- struct dm_block_manager *old_bm = NULL, *new_bm = NULL;
+
+ /* fail_io is double-checked with pmd->root_lock held below */
+ if (unlikely(pmd->fail_io))
+ return r;
+
+- /*
+- * Replacement block manager (new_bm) is created and old_bm destroyed outside of
+- * pmd root_lock to avoid ABBA deadlock that would result (due to life-cycle of
+- * shrinker associated with the block manager's bufio client vs pmd root_lock).
+- * - must take shrinker_rwsem without holding pmd->root_lock
+- */
+- new_bm = dm_block_manager_create(pmd->bdev, THIN_METADATA_BLOCK_SIZE << SECTOR_SHIFT,
+- THIN_MAX_CONCURRENT_LOCKS);
+-
+ pmd_write_lock(pmd);
+ if (pmd->fail_io) {
+ pmd_write_unlock(pmd);
+- goto out;
++ return r;
+ }
+-
+ __set_abort_with_changes_flags(pmd);
++
++ /* destroy data_sm/metadata_sm/nb_tm/tm */
+ __destroy_persistent_data_objects(pmd, false);
+- old_bm = pmd->bm;
+- if (IS_ERR(new_bm)) {
+- DMERR("could not create block manager during abort");
+- pmd->bm = NULL;
+- r = PTR_ERR(new_bm);
+- goto out_unlock;
+- }
+
+- pmd->bm = new_bm;
++ /* reset bm */
++ dm_block_manager_reset(pmd->bm);
++
++ /* rebuild data_sm/metadata_sm/nb_tm/tm */
+ r = __open_or_format_metadata(pmd, false);
+- if (r) {
+- pmd->bm = NULL;
+- goto out_unlock;
+- }
+- new_bm = NULL;
+-out_unlock:
+ if (r)
+ pmd->fail_io = true;
+ pmd_write_unlock(pmd);
+- dm_block_manager_destroy(old_bm);
+-out:
+- if (new_bm && !IS_ERR(new_bm))
+- dm_block_manager_destroy(new_bm);
+-
+ return r;
+ }
+
+diff --git a/drivers/md/persistent-data/dm-block-manager.c b/drivers/md/persistent-data/dm-block-manager.c
+index 1f40100908d7c..2bbfbb704c751 100644
+--- a/drivers/md/persistent-data/dm-block-manager.c
++++ b/drivers/md/persistent-data/dm-block-manager.c
+@@ -415,6 +415,12 @@ void dm_block_manager_destroy(struct dm_block_manager *bm)
+ }
+ EXPORT_SYMBOL_GPL(dm_block_manager_destroy);
+
++void dm_block_manager_reset(struct dm_block_manager *bm)
++{
++ dm_bufio_client_reset(bm->bufio);
++}
++EXPORT_SYMBOL_GPL(dm_block_manager_reset);
++
+ unsigned int dm_bm_block_size(struct dm_block_manager *bm)
+ {
+ return dm_bufio_get_block_size(bm->bufio);
+diff --git a/drivers/md/persistent-data/dm-block-manager.h b/drivers/md/persistent-data/dm-block-manager.h
+index 58a23b8ec1902..4371d85d3c258 100644
+--- a/drivers/md/persistent-data/dm-block-manager.h
++++ b/drivers/md/persistent-data/dm-block-manager.h
+@@ -35,6 +35,7 @@ struct dm_block_manager *dm_block_manager_create(
+ struct block_device *bdev, unsigned int block_size,
+ unsigned int max_held_per_thread);
+ void dm_block_manager_destroy(struct dm_block_manager *bm);
++void dm_block_manager_reset(struct dm_block_manager *bm);
+
+ unsigned int dm_bm_block_size(struct dm_block_manager *bm);
+ dm_block_t dm_bm_nr_blocks(struct dm_block_manager *bm);
+diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
+index a015cd11f6e97..85aa0a3974fe0 100644
+--- a/drivers/md/persistent-data/dm-space-map.h
++++ b/drivers/md/persistent-data/dm-space-map.h
+@@ -76,7 +76,8 @@ struct dm_space_map {
+
+ static inline void dm_sm_destroy(struct dm_space_map *sm)
+ {
+- sm->destroy(sm);
++ if (sm)
++ sm->destroy(sm);
+ }
+
+ static inline int dm_sm_extend(struct dm_space_map *sm, dm_block_t extra_blocks)
+diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
+index 39885f8355847..557a3ecfe75a0 100644
+--- a/drivers/md/persistent-data/dm-transaction-manager.c
++++ b/drivers/md/persistent-data/dm-transaction-manager.c
+@@ -197,6 +197,9 @@ EXPORT_SYMBOL_GPL(dm_tm_create_non_blocking_clone);
+
+ void dm_tm_destroy(struct dm_transaction_manager *tm)
+ {
++ if (!tm)
++ return;
++
+ if (!tm->is_clone)
+ wipe_shadow_table(tm);
+
+diff --git a/include/linux/dm-bufio.h b/include/linux/dm-bufio.h
+index 1262d92ab88fc..2e71ca35942e9 100644
+--- a/include/linux/dm-bufio.h
++++ b/include/linux/dm-bufio.h
+@@ -37,6 +37,8 @@ dm_bufio_client_create(struct block_device *bdev, unsigned int block_size,
+ */
+ void dm_bufio_client_destroy(struct dm_bufio_client *c);
+
++void dm_bufio_client_reset(struct dm_bufio_client *c);
++
+ /*
+ * Set the sector range.
+ * When this function is called, there must be no I/O in progress on the bufio
+--
+2.43.0
+
--- /dev/null
+From ac2445a60bff73bed71b346714f566862389bbfa Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jul 2023 11:30:33 -0300
+Subject: loop: do not enforce max_loop hard limit by (new) default
+
+From: Mauricio Faria de Oliveira <mfo@canonical.com>
+
+[ Upstream commit bb5faa99f0ce40756ab7bbbce4f16c01ca5ebd5a ]
+
+Problem:
+
+The max_loop parameter is used for 2 different purposes:
+
+1) initial number of loop devices to pre-create on init
+2) maximum number of loop devices to add on access/open()
+
+Historically, its default value (zero) caused 1) to create non-zero
+number of devices (CONFIG_BLK_DEV_LOOP_MIN_COUNT), and no hard limit on
+2) to add devices with autoloading.
+
+However, the default value changed in commit 85c50197716c ("loop: Fix
+the max_loop commandline argument treatment when it is set to 0") to
+CONFIG_BLK_DEV_LOOP_MIN_COUNT, for max_loop=0 not to pre-create devices.
+
+That does improve 1), but unfortunately it breaks 2), as the default
+behavior changed from no-limit to hard-limit.
+
+Example:
+
+For example, this userspace code broke for N >= CONFIG, if the user
+relied on the default value 0 for max_loop:
+
+ mknod("/dev/loopN");
+ open("/dev/loopN"); // now fails with ENXIO
+
+Though affected users may "fix" it with (loop.)max_loop=0, this means to
+require a kernel parameter change on stable kernel update (that commit
+Fixes: an old commit in stable).
+
+Solution:
+
+The original semantics for the default value in 2) can be applied if the
+parameter is not set (ie, default behavior).
+
+This still keeps the intended function in 1) and 2) if set, and that
+commit's intended improvement in 1) if max_loop=0.
+
+Before 85c50197716c:
+ - default: 1) CONFIG devices 2) no limit
+ - max_loop=0: 1) CONFIG devices 2) no limit
+ - max_loop=X: 1) X devices 2) X limit
+
+After 85c50197716c:
+ - default: 1) CONFIG devices 2) CONFIG limit (*)
+ - max_loop=0: 1) 0 devices (*) 2) no limit
+ - max_loop=X: 1) X devices 2) X limit
+
+This commit:
+ - default: 1) CONFIG devices 2) no limit (*)
+ - max_loop=0: 1) 0 devices 2) no limit
+ - max_loop=X: 1) X devices 2) X limit
+
+Future:
+
+The issue/regression from that commit only affects code under the
+CONFIG_BLOCK_LEGACY_AUTOLOAD deprecation guard, thus the fix too is
+contained under it.
+
+Once that deprecated functionality/code is removed, the purpose 2) of
+max_loop (hard limit) is no longer in use, so the module parameter
+description can be changed then.
+
+Tests:
+
+Linux 6.4-rc7
+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8
+CONFIG_BLOCK_LEGACY_AUTOLOAD=y
+
+- default (original)
+
+ # ls -1 /dev/loop*
+ /dev/loop-control
+ /dev/loop0
+ ...
+ /dev/loop7
+
+ # ./test-loop
+ open: /dev/loop8: No such device or address
+
+- default (patched)
+
+ # ls -1 /dev/loop*
+ /dev/loop-control
+ /dev/loop0
+ ...
+ /dev/loop7
+
+ # ./test-loop
+ #
+
+- max_loop=0 (original & patched):
+
+ # ls -1 /dev/loop*
+ /dev/loop-control
+
+ # ./test-loop
+ #
+
+- max_loop=8 (original & patched):
+
+ # ls -1 /dev/loop*
+ /dev/loop-control
+ /dev/loop0
+ ...
+ /dev/loop7
+
+ # ./test-loop
+ open: /dev/loop8: No such device or address
+
+- max_loop=0 (patched; CONFIG_BLOCK_LEGACY_AUTOLOAD is not set)
+
+ # ls -1 /dev/loop*
+ /dev/loop-control
+
+ # ./test-loop
+ open: /dev/loop8: No such device or address
+
+Fixes: 85c50197716c ("loop: Fix the max_loop commandline argument treatment when it is set to 0")
+Signed-off-by: Mauricio Faria de Oliveira <mfo@canonical.com>
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Link: https://lore.kernel.org/r/20230720143033.841001-3-mfo@canonical.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/loop.c | 36 ++++++++++++++++++++++++++++++++++--
+ 1 file changed, 34 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/block/loop.c b/drivers/block/loop.c
+index 426d0b42685a0..d74f8eb7f5293 100644
+--- a/drivers/block/loop.c
++++ b/drivers/block/loop.c
+@@ -1777,14 +1777,43 @@ static const struct block_device_operations lo_fops = {
+ /*
+ * If max_loop is specified, create that many devices upfront.
+ * This also becomes a hard limit. If max_loop is not specified,
++ * the default isn't a hard limit (as before commit 85c50197716c
++ * changed the default value from 0 for max_loop=0 reasons), just
+ * create CONFIG_BLK_DEV_LOOP_MIN_COUNT loop devices at module
+ * init time. Loop devices can be requested on-demand with the
+ * /dev/loop-control interface, or be instantiated by accessing
+ * a 'dead' device node.
+ */
+ static int max_loop = CONFIG_BLK_DEV_LOOP_MIN_COUNT;
+-module_param(max_loop, int, 0444);
++
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
++static bool max_loop_specified;
++
++static int max_loop_param_set_int(const char *val,
++ const struct kernel_param *kp)
++{
++ int ret;
++
++ ret = param_set_int(val, kp);
++ if (ret < 0)
++ return ret;
++
++ max_loop_specified = true;
++ return 0;
++}
++
++static const struct kernel_param_ops max_loop_param_ops = {
++ .set = max_loop_param_set_int,
++ .get = param_get_int,
++};
++
++module_param_cb(max_loop, &max_loop_param_ops, &max_loop, 0444);
+ MODULE_PARM_DESC(max_loop, "Maximum number of loop devices");
++#else
++module_param(max_loop, int, 0444);
++MODULE_PARM_DESC(max_loop, "Initial number of loop devices");
++#endif
++
+ module_param(max_part, int, 0444);
+ MODULE_PARM_DESC(max_part, "Maximum number of partitions per loop device");
+
+@@ -2093,7 +2122,7 @@ static void loop_probe(dev_t dev)
+ {
+ int idx = MINOR(dev) >> part_shift;
+
+- if (max_loop && idx >= max_loop)
++ if (max_loop_specified && max_loop && idx >= max_loop)
+ return;
+ loop_add(idx);
+ }
+@@ -2277,6 +2306,9 @@ module_exit(loop_exit);
+ static int __init max_loop_setup(char *str)
+ {
+ max_loop = simple_strtol(str, NULL, 0);
++#ifdef CONFIG_BLOCK_LEGACY_AUTOLOAD
++ max_loop_specified = true;
++#endif
+ return 1;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From dd74579a47bb1bbc1ec3e8c22f96031b4a2a8d21 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 9 Feb 2023 20:03:33 -0500
+Subject: Revert "drm/amd/display: Do not set DRR on pipe commit"
+
+From: Aric Cyr <aric.cyr@amd.com>
+
+[ Upstream commit 36951fc9460fce96bafd131ceb0f343cae6d3cb9 ]
+
+This reverts commit 4f1b5e739dfd1edde33329e3f376733a131fb1ff.
+
+[Why & How]
+Original change causes a regression. Revert
+until fix is available.
+
+Reviewed-by: Aric Cyr <aric.cyr@amd.com>
+Acked-by: Qingqing Zhuo <qingqing.zhuo@amd.com>
+Signed-off-by: Aric Cyr <aric.cyr@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+index 53262f6bc40b0..72bec33e371f3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c
+@@ -994,5 +994,8 @@ void dcn30_prepare_bandwidth(struct dc *dc,
+ dc->clk_mgr->funcs->set_max_memclk(dc->clk_mgr, dc->clk_mgr->bw_params->clk_table.entries[dc->clk_mgr->bw_params->clk_table.num_entries - 1].memclk_mhz);
+
+ dcn20_prepare_bandwidth(dc, context);
++
++ dc_dmub_srv_p_state_delegate(dc,
++ context->bw_ctx.bw.dcn.clk.fw_based_mclk_switching, context);
+ }
+
+--
+2.43.0
+
--- /dev/null
+From 6005e1dc3205ae740de40773f5d8b3529f7017d3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 18 Jan 2023 15:22:52 +0100
+Subject: RISC-V: Fix do_notify_resume / do_work_pending prototype
+
+From: Heiko Stuebner <heiko.stuebner@vrull.eu>
+
+[ Upstream commit 285b6a18daf1358e70a4c842884d9ff2d2fe53e2 ]
+
+Commit b0f4c74eadbf ("RISC-V: Fix unannoted hardirqs-on in return to
+userspace slow-path") renamed the do_notify_resume function to
+do_work_pending but did not change the prototype in signal.h
+Do that now, as the original function does not exist anymore.
+
+Fixes: b0f4c74eadbf ("RISC-V: Fix unannoted hardirqs-on in return to userspace slow-path")
+Signed-off-by: Heiko Stuebner <heiko.stuebner@vrull.eu>
+Reviewed-by: Andrew Jones <ajones@ventanamicro.com>
+Reviewed-by: Conor Dooley <conor.dooley@microchip.com>
+Link: https://lore.kernel.org/r/20230118142252.337103-1-heiko@sntech.de
+Signed-off-by: Palmer Dabbelt <palmer@rivosinc.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/riscv/include/asm/signal.h | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
+index 532c29ef03769..956ae0a01bad1 100644
+--- a/arch/riscv/include/asm/signal.h
++++ b/arch/riscv/include/asm/signal.h
+@@ -7,6 +7,6 @@
+ #include <uapi/asm/ptrace.h>
+
+ asmlinkage __visible
+-void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
++void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
+
+ #endif
+--
+2.43.0
+
--- /dev/null
+From 6aa537c3d6b65f8667a36c97a02e41b4dac5b7c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Dec 2023 17:04:24 +0100
+Subject: selftests: mptcp: join: fix subflow_send_ack lookup
+
+From: Geliang Tang <geliang.tang@linux.dev>
+
+[ Upstream commit c8f021eec5817601dbd25ab7e3ad5c720965c688 ]
+
+MPC backups tests will skip unexpected sometimes (For example, when
+compiling kernel with an older version of gcc, such as gcc-8), since
+static functions like mptcp_subflow_send_ack also be listed in
+/proc/kallsyms, with a 't' in front of it, not 'T' ('T' is for a global
+function):
+
+ > grep "mptcp_subflow_send_ack" /proc/kallsyms
+
+ 0000000000000000 T __pfx___mptcp_subflow_send_ack
+ 0000000000000000 T __mptcp_subflow_send_ack
+ 0000000000000000 t __pfx_mptcp_subflow_send_ack
+ 0000000000000000 t mptcp_subflow_send_ack
+
+In this case, mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"
+will be false, MPC backups tests will skip. This is not what we expected.
+
+The correct logic here should be: if mptcp_subflow_send_ack is not a
+global function in /proc/kallsyms, do these MPC backups tests. So a 'T'
+must be added in front of mptcp_subflow_send_ack.
+
+Fixes: 632978f0a961 ("selftests: mptcp: join: skip MPC backups tests if not supported")
+Cc: stable@vger.kernel.org
+Signed-off-by: Geliang Tang <geliang.tang@linux.dev>
+Reviewed-by: Mat Martineau <martineau@kernel.org>
+Signed-off-by: Matthieu Baerts <matttbe@kernel.org>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 8 ++++----
+ 1 file changed, 4 insertions(+), 4 deletions(-)
+
+diff --git a/tools/testing/selftests/net/mptcp/mptcp_join.sh b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+index ea6fc59e9f62f..e52d513009fb0 100755
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -2652,7 +2652,7 @@ backup_tests()
+ fi
+
+ if reset "mpc backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+ chk_join_nr 0 0 0
+@@ -2660,7 +2660,7 @@ backup_tests()
+ fi
+
+ if reset "mpc backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow,backup
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,backup
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow
+@@ -2669,7 +2669,7 @@ backup_tests()
+ fi
+
+ if reset "mpc switch to backup" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+ chk_join_nr 0 0 0
+@@ -2677,7 +2677,7 @@ backup_tests()
+ fi
+
+ if reset "mpc switch to backup both sides" &&
+- continue_if mptcp_lib_kallsyms_doesnt_have "mptcp_subflow_send_ack$"; then
++ continue_if mptcp_lib_kallsyms_doesnt_have "T mptcp_subflow_send_ack$"; then
+ pm_nl_add_endpoint $ns1 10.0.1.1 flags subflow
+ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow
+ run_tests $ns1 $ns2 10.0.1.1 0 0 0 slow backup
+--
+2.43.0
+
smb-client-fix-oob-in-smbcalcsize.patch
drm-i915-reject-async-flips-with-bigjoiner.patch
9p-prevent-read-overrun-in-protocol-dump-tracepoint.patch
+risc-v-fix-do_notify_resume-do_work_pending-prototyp.patch
+loop-do-not-enforce-max_loop-hard-limit-by-new-defau.patch
+dm-thin-metadata-fix-abba-deadlock-by-resetting-dm_b.patch
+revert-drm-amd-display-do-not-set-drr-on-pipe-commit.patch
+btrfs-zoned-no-longer-count-fresh-bg-region-as-zone-.patch
+ubifs-fix-possible-dereference-after-free.patch
+ublk-move-ublk_cancel_dev-out-of-ub-mutex.patch
+selftests-mptcp-join-fix-subflow_send_ack-lookup.patch
--- /dev/null
+From 0b903ca54e6db911ec065d43970a50637d80e6eb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 5 Sep 2023 18:12:22 +0800
+Subject: ubifs: fix possible dereference after free
+
+From: Konstantin Meskhidze <konstantin.meskhidze@huawei.com>
+
+[ Upstream commit d81efd66106c03771ffc8637855a6ec24caa6350 ]
+
+'old_idx' could be dereferenced after free via 'rb_link_node' function
+call.
+
+Fixes: b5fda08ef213 ("ubifs: Fix memleak when insert_old_idx() failed")
+Co-developed-by: Ivanov Mikhail <ivanov.mikhail1@huawei-partners.com>
+Signed-off-by: Konstantin Meskhidze <konstantin.meskhidze@huawei.com>
+Reviewed-by: Zhihao Cheng <chengzhihao1@huawei.com>
+Signed-off-by: Richard Weinberger <richard@nod.at>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ubifs/tnc.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
+index 6b7d95b65f4b6..f4728e65d1bda 100644
+--- a/fs/ubifs/tnc.c
++++ b/fs/ubifs/tnc.c
+@@ -65,6 +65,7 @@ static void do_insert_old_idx(struct ubifs_info *c,
+ else {
+ ubifs_err(c, "old idx added twice!");
+ kfree(old_idx);
++ return;
+ }
+ }
+ rb_link_node(&old_idx->rb, parent, p);
+--
+2.43.0
+
--- /dev/null
+From eeb76dc852a79829125d744b0ca2ae446dd1cd1a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 9 Oct 2023 17:33:18 +0800
+Subject: ublk: move ublk_cancel_dev() out of ub->mutex
+
+From: Ming Lei <ming.lei@redhat.com>
+
+[ Upstream commit 85248d670b71d9edda9459ee14fdc85c8e9632c0 ]
+
+ublk_cancel_dev() just calls ublk_cancel_queue() to cancel all pending
+io commands after ublk request queue is idle. The only protection is just
+the read & write of ubq->nr_io_ready and avoid duplicated command cancel,
+so add one per-queue lock with cancel flag for providing this protection,
+meantime move ublk_cancel_dev() out of ub->mutex.
+
+Then we needn't to call io_uring_cmd_complete_in_task() to cancel
+pending command. And the same cancel logic will be re-used for
+cancelable uring command.
+
+This patch basically reverts commit ac5902f84bb5 ("ublk: fix AB-BA lockdep warning").
+
+Signed-off-by: Ming Lei <ming.lei@redhat.com>
+Link: https://lore.kernel.org/r/20231009093324.957829-4-ming.lei@redhat.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/block/ublk_drv.c | 40 +++++++++++++++++++++++-----------------
+ 1 file changed, 23 insertions(+), 17 deletions(-)
+
+diff --git a/drivers/block/ublk_drv.c b/drivers/block/ublk_drv.c
+index c2f0f74193f0e..3fa74051f31b4 100644
+--- a/drivers/block/ublk_drv.c
++++ b/drivers/block/ublk_drv.c
+@@ -103,6 +103,9 @@ struct ublk_uring_cmd_pdu {
+ */
+ #define UBLK_IO_FLAG_NEED_GET_DATA 0x08
+
++/* atomic RW with ubq->cancel_lock */
++#define UBLK_IO_FLAG_CANCELED 0x80000000
++
+ struct ublk_io {
+ /* userspace buffer address from io cmd */
+ __u64 addr;
+@@ -126,6 +129,7 @@ struct ublk_queue {
+ unsigned int max_io_sz;
+ bool force_abort;
+ unsigned short nr_io_ready; /* how many ios setup */
++ spinlock_t cancel_lock;
+ struct ublk_device *dev;
+ struct ublk_io ios[];
+ };
+@@ -1045,28 +1049,28 @@ static inline bool ublk_queue_ready(struct ublk_queue *ubq)
+ return ubq->nr_io_ready == ubq->q_depth;
+ }
+
+-static void ublk_cmd_cancel_cb(struct io_uring_cmd *cmd, unsigned issue_flags)
+-{
+- io_uring_cmd_done(cmd, UBLK_IO_RES_ABORT, 0, issue_flags);
+-}
+-
+ static void ublk_cancel_queue(struct ublk_queue *ubq)
+ {
+ int i;
+
+- if (!ublk_queue_ready(ubq))
+- return;
+-
+ for (i = 0; i < ubq->q_depth; i++) {
+ struct ublk_io *io = &ubq->ios[i];
+
+- if (io->flags & UBLK_IO_FLAG_ACTIVE)
+- io_uring_cmd_complete_in_task(io->cmd,
+- ublk_cmd_cancel_cb);
+- }
++ if (io->flags & UBLK_IO_FLAG_ACTIVE) {
++ bool done;
+
+- /* all io commands are canceled */
+- ubq->nr_io_ready = 0;
++ spin_lock(&ubq->cancel_lock);
++ done = !!(io->flags & UBLK_IO_FLAG_CANCELED);
++ if (!done)
++ io->flags |= UBLK_IO_FLAG_CANCELED;
++ spin_unlock(&ubq->cancel_lock);
++
++ if (!done)
++ io_uring_cmd_done(io->cmd,
++ UBLK_IO_RES_ABORT, 0,
++ IO_URING_F_UNLOCKED);
++ }
++ }
+ }
+
+ /* Cancel all pending commands, must be called after del_gendisk() returns */
+@@ -1113,7 +1117,6 @@ static void __ublk_quiesce_dev(struct ublk_device *ub)
+ blk_mq_quiesce_queue(ub->ub_disk->queue);
+ ublk_wait_tagset_rqs_idle(ub);
+ ub->dev_info.state = UBLK_S_DEV_QUIESCED;
+- ublk_cancel_dev(ub);
+ /* we are going to release task_struct of ubq_daemon and resets
+ * ->ubq_daemon to NULL. So in monitor_work, check on ubq_daemon causes UAF.
+ * Besides, monitor_work is not necessary in QUIESCED state since we have
+@@ -1136,6 +1139,7 @@ static void ublk_quiesce_work_fn(struct work_struct *work)
+ __ublk_quiesce_dev(ub);
+ unlock:
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ }
+
+ static void ublk_unquiesce_dev(struct ublk_device *ub)
+@@ -1175,8 +1179,8 @@ static void ublk_stop_dev(struct ublk_device *ub)
+ put_disk(ub->ub_disk);
+ ub->ub_disk = NULL;
+ unlock:
+- ublk_cancel_dev(ub);
+ mutex_unlock(&ub->mutex);
++ ublk_cancel_dev(ub);
+ cancel_delayed_work_sync(&ub->monitor_work);
+ }
+
+@@ -1353,6 +1357,7 @@ static int ublk_init_queue(struct ublk_device *ub, int q_id)
+ void *ptr;
+ int size;
+
++ spin_lock_init(&ubq->cancel_lock);
+ ubq->flags = ub->dev_info.flags;
+ ubq->q_id = q_id;
+ ubq->q_depth = ub->dev_info.queue_depth;
+@@ -1882,8 +1887,9 @@ static void ublk_queue_reinit(struct ublk_device *ub, struct ublk_queue *ubq)
+ int i;
+
+ WARN_ON_ONCE(!(ubq->ubq_daemon && ubq_daemon_is_dying(ubq)));
++
+ /* All old ioucmds have to be completed */
+- WARN_ON_ONCE(ubq->nr_io_ready);
++ ubq->nr_io_ready = 0;
+ /* old daemon is PF_EXITING, put it now */
+ put_task_struct(ubq->ubq_daemon);
+ /* We have to reset it to NULL, otherwise ub won't accept new FETCH_REQ */
+--
+2.43.0
+