--- /dev/null
+From 8dc4bb58a146655eb057247d7c9d19e73928715b Mon Sep 17 00:00:00 2001
+From: David Hildenbrand <david@redhat.com>
+Date: Thu, 12 Nov 2020 14:38:13 +0100
+Subject: mm/memory_hotplug: extend offline_and_remove_memory() to handle more than one memory block
+
+From: David Hildenbrand <david@redhat.com>
+
+commit 8dc4bb58a146655eb057247d7c9d19e73928715b upstream.
+
+virtio-mem soon wants to use offline_and_remove_memory() memory that
+exceeds a single Linux memory block (memory_block_size_bytes()). Let's
+remove that restriction.
+
+Let's remember the old state and try to restore that if anything goes
+wrong. While re-onlining can, in general, fail, it's highly unlikely to
+happen (usually only when a notifier fails to allocate memory, and these
+are rather rare).
+
+This will be used by virtio-mem to offline+remove memory ranges that are
+bigger than a single memory block - for example, with a device block
+size of 1 GiB (e.g., gigantic pages in the hypervisor) and a Linux memory
+block size of 128MB.
+
+While we could compress the state into 2 bit, using 8 bit is much
+easier.
+
+This handling is similar, but different to acpi_scan_try_to_offline():
+
+a) We don't try to offline twice. I am not sure if this CONFIG_MEMCG
+optimization is still relevant - it should only apply to ZONE_NORMAL
+(where we have no guarantees). If relevant, we can always add it.
+
+b) acpi_scan_try_to_offline() simply onlines all memory in case
+something goes wrong. It doesn't restore previous online type. Let's do
+that, so we won't overwrite what e.g., user space configured.
+
+Reviewed-by: Wei Yang <richard.weiyang@linux.alibaba.com>
+Cc: "Michael S. Tsirkin" <mst@redhat.com>
+Cc: Jason Wang <jasowang@redhat.com>
+Cc: Pankaj Gupta <pankaj.gupta.linux@gmail.com>
+Cc: Michal Hocko <mhocko@kernel.org>
+Cc: Oscar Salvador <osalvador@suse.de>
+Cc: Wei Yang <richard.weiyang@linux.alibaba.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Link: https://lore.kernel.org/r/20201112133815.13332-28-david@redhat.com
+Signed-off-by: Michael S. Tsirkin <mst@redhat.com>
+Acked-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Ma Wupeng <mawupeng1@huawei.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/memory_hotplug.c | 105 ++++++++++++++++++++++++++++++++++++++++++++--------
+ 1 file changed, 89 insertions(+), 16 deletions(-)
+
+--- a/mm/memory_hotplug.c
++++ b/mm/memory_hotplug.c
+@@ -1788,39 +1788,112 @@ int remove_memory(int nid, u64 start, u6
+ }
+ EXPORT_SYMBOL_GPL(remove_memory);
+
++static int try_offline_memory_block(struct memory_block *mem, void *arg)
++{
++ uint8_t online_type = MMOP_ONLINE_KERNEL;
++ uint8_t **online_types = arg;
++ struct page *page;
++ int rc;
++
++ /*
++ * Sense the online_type via the zone of the memory block. Offlining
++ * with multiple zones within one memory block will be rejected
++ * by offlining code ... so we don't care about that.
++ */
++ page = pfn_to_online_page(section_nr_to_pfn(mem->start_section_nr));
++ if (page && zone_idx(page_zone(page)) == ZONE_MOVABLE)
++ online_type = MMOP_ONLINE_MOVABLE;
++
++ rc = device_offline(&mem->dev);
++ /*
++ * Default is MMOP_OFFLINE - change it only if offlining succeeded,
++ * so try_reonline_memory_block() can do the right thing.
++ */
++ if (!rc)
++ **online_types = online_type;
++
++ (*online_types)++;
++ /* Ignore if already offline. */
++ return rc < 0 ? rc : 0;
++}
++
++static int try_reonline_memory_block(struct memory_block *mem, void *arg)
++{
++ uint8_t **online_types = arg;
++ int rc;
++
++ if (**online_types != MMOP_OFFLINE) {
++ mem->online_type = **online_types;
++ rc = device_online(&mem->dev);
++ if (rc < 0)
++ pr_warn("%s: Failed to re-online memory: %d",
++ __func__, rc);
++ }
++
++ /* Continue processing all remaining memory blocks. */
++ (*online_types)++;
++ return 0;
++}
++
+ /*
+- * Try to offline and remove a memory block. Might take a long time to
+- * finish in case memory is still in use. Primarily useful for memory devices
+- * that logically unplugged all memory (so it's no longer in use) and want to
+- * offline + remove the memory block.
++ * Try to offline and remove memory. Might take a long time to finish in case
++ * memory is still in use. Primarily useful for memory devices that logically
++ * unplugged all memory (so it's no longer in use) and want to offline + remove
++ * that memory.
+ */
+ int offline_and_remove_memory(int nid, u64 start, u64 size)
+ {
+- struct memory_block *mem;
+- int rc = -EINVAL;
++ const unsigned long mb_count = size / memory_block_size_bytes();
++ uint8_t *online_types, *tmp;
++ int rc;
+
+ if (!IS_ALIGNED(start, memory_block_size_bytes()) ||
+- size != memory_block_size_bytes())
+- return rc;
++ !IS_ALIGNED(size, memory_block_size_bytes()) || !size)
++ return -EINVAL;
++
++ /*
++ * We'll remember the old online type of each memory block, so we can
++ * try to revert whatever we did when offlining one memory block fails
++ * after offlining some others succeeded.
++ */
++ online_types = kmalloc_array(mb_count, sizeof(*online_types),
++ GFP_KERNEL);
++ if (!online_types)
++ return -ENOMEM;
++ /*
++ * Initialize all states to MMOP_OFFLINE, so when we abort processing in
++ * try_offline_memory_block(), we'll skip all unprocessed blocks in
++ * try_reonline_memory_block().
++ */
++ memset(online_types, MMOP_OFFLINE, mb_count);
+
+ lock_device_hotplug();
+- mem = find_memory_block(__pfn_to_section(PFN_DOWN(start)));
+- if (mem)
+- rc = device_offline(&mem->dev);
+- /* Ignore if the device is already offline. */
+- if (rc > 0)
+- rc = 0;
++
++ tmp = online_types;
++ rc = walk_memory_blocks(start, size, &tmp, try_offline_memory_block);
+
+ /*
+- * In case we succeeded to offline the memory block, remove it.
++ * In case we succeeded to offline all memory, remove it.
+ * This cannot fail as it cannot get onlined in the meantime.
+ */
+ if (!rc) {
+ rc = try_remove_memory(nid, start, size);
+- WARN_ON_ONCE(rc);
++ if (rc)
++ pr_err("%s: Failed to remove memory: %d", __func__, rc);
++ }
++
++ /*
++ * Rollback what we did. While memory onlining might theoretically fail
++ * (nacked by a notifier), it barely ever happens.
++ */
++ if (rc) {
++ tmp = online_types;
++ walk_memory_blocks(start, size, &tmp,
++ try_reonline_memory_block);
+ }
+ unlock_device_hotplug();
+
++ kfree(online_types);
+ return rc;
+ }
+ EXPORT_SYMBOL_GPL(offline_and_remove_memory);
--- /dev/null
+From 003fb0a51162d940f25fc35e70b0996a12c9e08a Mon Sep 17 00:00:00 2001
+From: Christian Loehle <CLoehle@hyperstone.com>
+Date: Wed, 26 Apr 2023 16:59:39 +0000
+Subject: mmc: block: ensure error propagation for non-blk
+
+From: Christian Loehle <CLoehle@hyperstone.com>
+
+commit 003fb0a51162d940f25fc35e70b0996a12c9e08a upstream.
+
+Requests to the mmc layer usually come through a block device IO.
+The exceptions are the ioctl interface, RPMB chardev ioctl
+and debugfs, which issue their own blk_mq requests through
+blk_execute_rq and do not query the BLK_STS error but the
+mmcblk-internal drv_op_result. This patch ensures that drv_op_result
+defaults to an error and has to be overwritten by the operation
+to be considered successful.
+
+The behavior leads to a bug where the request never propagates
+the error, e.g. by directly erroring out at mmc_blk_mq_issue_rq if
+mmc_blk_part_switch fails. The ioctl caller of the rpmb chardev then
+can never see an error (BLK_STS_IOERR, but drv_op_result is unchanged)
+and thus may assume that their call executed successfully when it did not.
+
+While always checking the blk_execute_rq return value would be
+advised, let's eliminate the error by always setting
+drv_op_result as -EIO to be overwritten on success (or other error)
+
+Fixes: 614f0388f580 ("mmc: block: move single ioctl() commands to block requests")
+Signed-off-by: Christian Loehle <cloehle@hyperstone.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/59c17ada35664b818b7bd83752119b2d@hyperstone.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Christian Loehle <cloehle@hyperstone.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/mmc/core/block.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -253,6 +253,7 @@ static ssize_t power_ro_lock_store(struc
+ goto out_put;
+ }
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_BOOT_WP;
++ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ret = req_to_mmc_queue_req(req)->drv_op_result;
+ blk_put_request(req);
+@@ -638,6 +639,7 @@ static int mmc_blk_ioctl_cmd(struct mmc_
+ idatas[0] = idata;
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
++ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ req_to_mmc_queue_req(req)->drv_op_data = idatas;
+ req_to_mmc_queue_req(req)->ioc_count = 1;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+@@ -707,6 +709,7 @@ static int mmc_blk_ioctl_multi_cmd(struc
+ }
+ req_to_mmc_queue_req(req)->drv_op =
+ rpmb ? MMC_DRV_OP_IOCTL_RPMB : MMC_DRV_OP_IOCTL;
++ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ req_to_mmc_queue_req(req)->drv_op_data = idata;
+ req_to_mmc_queue_req(req)->ioc_count = num_of_cmds;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+@@ -2749,6 +2752,7 @@ static int mmc_dbg_card_status_get(void
+ if (IS_ERR(req))
+ return PTR_ERR(req);
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_CARD_STATUS;
++ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ ret = req_to_mmc_queue_req(req)->drv_op_result;
+ if (ret >= 0) {
+@@ -2787,6 +2791,7 @@ static int mmc_ext_csd_open(struct inode
+ goto out_free;
+ }
+ req_to_mmc_queue_req(req)->drv_op = MMC_DRV_OP_GET_EXT_CSD;
++ req_to_mmc_queue_req(req)->drv_op_result = -EIO;
+ req_to_mmc_queue_req(req)->drv_op_data = &ext_csd;
+ blk_execute_rq(mq->queue, NULL, req, 0);
+ err = req_to_mmc_queue_req(req)->drv_op_result;
--- /dev/null
+From 92c5d1b860e9581d64baca76779576c0ab0d943d Mon Sep 17 00:00:00 2001
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 26 May 2023 11:13:32 +0900
+Subject: nilfs2: reject devices with insufficient block count
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 92c5d1b860e9581d64baca76779576c0ab0d943d upstream.
+
+The current sanity check for nilfs2 geometry information lacks checks for
+the number of segments stored in superblocks, so even for device images
+that have been destructively truncated or have an unusually high number of
+segments, the mount operation may succeed.
+
+This causes out-of-bounds block I/O on file system block reads or log
+writes to the segments, the latter in particular causing
+"a_ops->writepages" to repeatedly fail, resulting in sync_inodes_sb() to
+hang.
+
+Fix this issue by checking the number of segments stored in the superblock
+and avoiding mounting devices that can cause out-of-bounds accesses. To
+eliminate the possibility of overflow when calculating the number of
+blocks required for the device from the number of segments, this also adds
+a helper function to calculate the upper bound on the number of segments
+and inserts a check using it.
+
+Link: https://lkml.kernel.org/r/20230526021332.3431-1-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+7d50f1e54a12ba3aeae2@syzkaller.appspotmail.com
+ Link: https://syzkaller.appspot.com/bug?extid=7d50f1e54a12ba3aeae2
+Tested-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/the_nilfs.c | 44 +++++++++++++++++++++++++++++++++++++++++++-
+ 1 file changed, 43 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/the_nilfs.c
++++ b/fs/nilfs2/the_nilfs.c
+@@ -405,6 +405,18 @@ unsigned long nilfs_nrsvsegs(struct the_
+ 100));
+ }
+
++/**
++ * nilfs_max_segment_count - calculate the maximum number of segments
++ * @nilfs: nilfs object
++ */
++static u64 nilfs_max_segment_count(struct the_nilfs *nilfs)
++{
++ u64 max_count = U64_MAX;
++
++ do_div(max_count, nilfs->ns_blocks_per_segment);
++ return min_t(u64, max_count, ULONG_MAX);
++}
++
+ void nilfs_set_nsegments(struct the_nilfs *nilfs, unsigned long nsegs)
+ {
+ nilfs->ns_nsegments = nsegs;
+@@ -414,6 +426,8 @@ void nilfs_set_nsegments(struct the_nilf
+ static int nilfs_store_disk_layout(struct the_nilfs *nilfs,
+ struct nilfs_super_block *sbp)
+ {
++ u64 nsegments, nblocks;
++
+ if (le32_to_cpu(sbp->s_rev_level) < NILFS_MIN_SUPP_REV) {
+ nilfs_err(nilfs->ns_sb,
+ "unsupported revision (superblock rev.=%d.%d, current rev.=%d.%d). Please check the version of mkfs.nilfs(2).",
+@@ -457,7 +471,35 @@ static int nilfs_store_disk_layout(struc
+ return -EINVAL;
+ }
+
+- nilfs_set_nsegments(nilfs, le64_to_cpu(sbp->s_nsegments));
++ nsegments = le64_to_cpu(sbp->s_nsegments);
++ if (nsegments > nilfs_max_segment_count(nilfs)) {
++ nilfs_err(nilfs->ns_sb,
++ "segment count %llu exceeds upper limit (%llu segments)",
++ (unsigned long long)nsegments,
++ (unsigned long long)nilfs_max_segment_count(nilfs));
++ return -EINVAL;
++ }
++
++ nblocks = (u64)i_size_read(nilfs->ns_sb->s_bdev->bd_inode) >>
++ nilfs->ns_sb->s_blocksize_bits;
++ if (nblocks) {
++ u64 min_block_count = nsegments * nilfs->ns_blocks_per_segment;
++ /*
++ * To avoid failing to mount early device images without a
++ * second superblock, exclude that block count from the
++ * "min_block_count" calculation.
++ */
++
++ if (nblocks < min_block_count) {
++ nilfs_err(nilfs->ns_sb,
++ "total number of segment blocks %llu exceeds device size (%llu blocks)",
++ (unsigned long long)min_block_count,
++ (unsigned long long)nblocks);
++ return -EINVAL;
++ }
++ }
++
++ nilfs_set_nsegments(nilfs, nsegments);
+ nilfs->ns_crc_seed = le32_to_cpu(sbp->s_crc_seed);
+ return 0;
+ }
net-remove-decnet-leftovers-from-flow.h.patch
neighbour-delete-neigh_lookup_nodev-as-not-used.patch
batman-adv-switch-to-kstrtox.h-for-kstrtou64.patch
+mmc-block-ensure-error-propagation-for-non-blk.patch
+mm-memory_hotplug-extend-offline_and_remove_memory-to-handle-more-than-one-memory-block.patch
+nilfs2-reject-devices-with-insufficient-block-count.patch