--- /dev/null
+From 732b33d0dbf17e9483f0b50385bf606f724f50a2 Mon Sep 17 00:00:00 2001
+From: Harshvardhan Jha <harshvardhan.jha@oracle.com>
+Date: Tue, 27 Jul 2021 05:37:10 +0530
+Subject: 9p/xen: Fix end of loop tests for list_for_each_entry
+
+From: Harshvardhan Jha <harshvardhan.jha@oracle.com>
+
+commit 732b33d0dbf17e9483f0b50385bf606f724f50a2 upstream.
+
+This patch addresses the following problems:
+ - priv can never be NULL, so this part of the check is useless
+ - if the loop ran through the whole list, priv->client is invalid and
+it is more appropriate and sufficient to check for the end of
+list_for_each_entry loop condition.
+
+Link: http://lkml.kernel.org/r/20210727000709.225032-1-harshvardhan.jha@oracle.com
+Signed-off-by: Harshvardhan Jha <harshvardhan.jha@oracle.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Tested-by: Stefano Stabellini <sstabellini@kernel.org>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Dominique Martinet <asmadeus@codewreck.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ net/9p/trans_xen.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/net/9p/trans_xen.c
++++ b/net/9p/trans_xen.c
+@@ -138,7 +138,7 @@ static bool p9_xen_write_todo(struct xen
+
+ static int p9_xen_request(struct p9_client *client, struct p9_req_t *p9_req)
+ {
+- struct xen_9pfs_front_priv *priv = NULL;
++ struct xen_9pfs_front_priv *priv;
+ RING_IDX cons, prod, masked_cons, masked_prod;
+ unsigned long flags;
+ u32 size = p9_req->tc.size;
+@@ -151,7 +151,7 @@ static int p9_xen_request(struct p9_clie
+ break;
+ }
+ read_unlock(&xen_9pfs_lock);
+- if (!priv || priv->client != client)
++ if (list_entry_is_head(priv, &xen_9pfs_devs, list))
+ return -EINVAL;
+
+ num = p9_req->tc.tag % priv->num_rings;
--- /dev/null
+From 4d643b66089591b4769bcdb6fd1bfeff2fe301b8 Mon Sep 17 00:00:00 2001
+From: Niklas Cassel <niklas.cassel@wdc.com>
+Date: Wed, 11 Aug 2021 11:05:19 +0000
+Subject: blk-zoned: allow BLKREPORTZONE without CAP_SYS_ADMIN
+
+From: Niklas Cassel <niklas.cassel@wdc.com>
+
+commit 4d643b66089591b4769bcdb6fd1bfeff2fe301b8 upstream.
+
+A user space process should not need the CAP_SYS_ADMIN capability set
+in order to perform a BLKREPORTZONE ioctl.
+
+Getting the zone report is required in order to get the write pointer.
+Neither read() nor write() requires CAP_SYS_ADMIN, so it is reasonable
+that a user space process that can read/write from/to the device, also
+can get the write pointer. (Since e.g. writes have to be at the write
+pointer.)
+
+Fixes: 3ed05a987e0f ("blk-zoned: implement ioctls")
+Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Aravind Ramesh <aravind.ramesh@wdc.com>
+Reviewed-by: Adam Manzanares <a.manzanares@samsung.com>
+Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Cc: stable@vger.kernel.org # v4.10+
+Link: https://lore.kernel.org/r/20210811110505.29649-3-Niklas.Cassel@wdc.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-zoned.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -288,9 +288,6 @@ int blkdev_report_zones_ioctl(struct blo
+ if (!blk_queue_is_zoned(q))
+ return -ENOTTY;
+
+- if (!capable(CAP_SYS_ADMIN))
+- return -EACCES;
+-
+ if (copy_from_user(&rep, argp, sizeof(struct blk_zone_report)))
+ return -EFAULT;
+
--- /dev/null
+From ead3b768bb51259e3a5f2287ff5fc9041eb6f450 Mon Sep 17 00:00:00 2001
+From: Niklas Cassel <niklas.cassel@wdc.com>
+Date: Wed, 11 Aug 2021 11:05:18 +0000
+Subject: blk-zoned: allow zone management send operations without CAP_SYS_ADMIN
+
+From: Niklas Cassel <niklas.cassel@wdc.com>
+
+commit ead3b768bb51259e3a5f2287ff5fc9041eb6f450 upstream.
+
+Zone management send operations (BLKRESETZONE, BLKOPENZONE, BLKCLOSEZONE
+and BLKFINISHZONE) should be allowed under the same permissions as write().
+(write() does not require CAP_SYS_ADMIN).
+
+Additionally, other ioctls like BLKSECDISCARD and BLKZEROOUT only check if
+the fd was successfully opened with FMODE_WRITE.
+(They do not require CAP_SYS_ADMIN).
+
+Currently, zone management send operations require both CAP_SYS_ADMIN
+and that the fd was successfully opened with FMODE_WRITE.
+
+Remove the CAP_SYS_ADMIN requirement, so that zone management send
+operations match the access control requirement of write(), BLKSECDISCARD
+and BLKZEROOUT.
+
+Fixes: 3ed05a987e0f ("blk-zoned: implement ioctls")
+Signed-off-by: Niklas Cassel <niklas.cassel@wdc.com>
+Reviewed-by: Damien Le Moal <damien.lemoal@wdc.com>
+Reviewed-by: Aravind Ramesh <aravind.ramesh@wdc.com>
+Reviewed-by: Adam Manzanares <a.manzanares@samsung.com>
+Reviewed-by: Himanshu Madhani <himanshu.madhani@oracle.com>
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Cc: stable@vger.kernel.org # v4.10+
+Link: https://lore.kernel.org/r/20210811110505.29649-2-Niklas.Cassel@wdc.com
+Signed-off-by: Jens Axboe <axboe@kernel.dk>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ block/blk-zoned.c | 3 ---
+ 1 file changed, 3 deletions(-)
+
+--- a/block/blk-zoned.c
++++ b/block/blk-zoned.c
+@@ -349,9 +349,6 @@ int blkdev_zone_mgmt_ioctl(struct block_
+ if (!blk_queue_is_zoned(q))
+ return -ENOTTY;
+
+- if (!capable(CAP_SYS_ADMIN))
+- return -EACCES;
+-
+ if (!(mode & FMODE_WRITE))
+ return -EBADF;
+
--- /dev/null
+From 6f93e834fa7c5faa0372e46828b4b2a966ac61d7 Mon Sep 17 00:00:00 2001
+From: Anand Jain <anand.jain@oracle.com>
+Date: Tue, 10 Aug 2021 23:23:44 +0800
+Subject: btrfs: fix upper limit for max_inline for page size 64K
+
+From: Anand Jain <anand.jain@oracle.com>
+
+commit 6f93e834fa7c5faa0372e46828b4b2a966ac61d7 upstream.
+
+The mount option max_inline ranges from 0 to the sectorsize (which is
+now equal to page size). But we parse the mount options too early and
+before the actual sectorsize is read from the superblock. So the upper
+limit of max_inline is unaware of the actual sectorsize and is limited
+by the temporary sectorsize 4096, even on a system where the default
+sectorsize is 64K.
+
+Fix this by reading the superblock sectorsize before the mount option
+parse.
+
+Reported-by: Alexander Tsvetkov <alexander.tsvetkov@oracle.com>
+CC: stable@vger.kernel.org # 5.4+
+Signed-off-by: Anand Jain <anand.jain@oracle.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/disk-io.c | 48 ++++++++++++++++++++++++------------------------
+ 1 file changed, 24 insertions(+), 24 deletions(-)
+
+--- a/fs/btrfs/disk-io.c
++++ b/fs/btrfs/disk-io.c
+@@ -3329,6 +3329,30 @@ int __cold open_ctree(struct super_block
+ */
+ fs_info->compress_type = BTRFS_COMPRESS_ZLIB;
+
++ /*
++ * Flag our filesystem as having big metadata blocks if they are bigger
++ * than the page size.
++ */
++ if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
++ if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
++ btrfs_info(fs_info,
++ "flagging fs with big metadata feature");
++ features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
++ }
++
++ /* Set up fs_info before parsing mount options */
++ nodesize = btrfs_super_nodesize(disk_super);
++ sectorsize = btrfs_super_sectorsize(disk_super);
++ stripesize = sectorsize;
++ fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
++ fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
++
++ fs_info->nodesize = nodesize;
++ fs_info->sectorsize = sectorsize;
++ fs_info->sectorsize_bits = ilog2(sectorsize);
++ fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
++ fs_info->stripesize = stripesize;
++
+ ret = btrfs_parse_options(fs_info, options, sb->s_flags);
+ if (ret) {
+ err = ret;
+@@ -3356,30 +3380,6 @@ int __cold open_ctree(struct super_block
+ btrfs_info(fs_info, "has skinny extents");
+
+ /*
+- * flag our filesystem as having big metadata blocks if
+- * they are bigger than the page size
+- */
+- if (btrfs_super_nodesize(disk_super) > PAGE_SIZE) {
+- if (!(features & BTRFS_FEATURE_INCOMPAT_BIG_METADATA))
+- btrfs_info(fs_info,
+- "flagging fs with big metadata feature");
+- features |= BTRFS_FEATURE_INCOMPAT_BIG_METADATA;
+- }
+-
+- nodesize = btrfs_super_nodesize(disk_super);
+- sectorsize = btrfs_super_sectorsize(disk_super);
+- stripesize = sectorsize;
+- fs_info->dirty_metadata_batch = nodesize * (1 + ilog2(nr_cpu_ids));
+- fs_info->delalloc_batch = sectorsize * 512 * (1 + ilog2(nr_cpu_ids));
+-
+- /* Cache block sizes */
+- fs_info->nodesize = nodesize;
+- fs_info->sectorsize = sectorsize;
+- fs_info->sectorsize_bits = ilog2(sectorsize);
+- fs_info->csums_per_leaf = BTRFS_MAX_ITEM_SIZE(fs_info) / fs_info->csum_size;
+- fs_info->stripesize = stripesize;
+-
+- /*
+ * mixed block groups end up with duplicate but slightly offset
+ * extent buffers for the same range. It leads to corruptions
+ */
--- /dev/null
+From 93c60b17f2b5fca2c5931d7944788d1ef5f25528 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Wed, 11 Aug 2021 14:37:15 -0400
+Subject: btrfs: reduce the preemptive flushing threshold to 90%
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit 93c60b17f2b5fca2c5931d7944788d1ef5f25528 upstream.
+
+The preemptive flushing code was added in order to avoid needing to
+synchronously wait for ENOSPC flushing to recover space. Once we're
+almost full however we can essentially flush constantly. We were using
+98% as a threshold to determine if we were simply full, however in
+practice this is a really high bar to hit. For example reports of
+systems running into this problem had around 94% usage and thus
+continued to flush. Fix this by lowering the threshold to 90%, which is
+a more sane value, especially for smaller file systems.
+
+Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=212185
+CC: stable@vger.kernel.org # 5.12+
+Fixes: 576fa34830af ("btrfs: improve preemptive background space flushing")
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/space-info.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -833,7 +833,7 @@ static bool need_preemptive_reclaim(stru
+ struct btrfs_space_info *space_info)
+ {
+ u64 ordered, delalloc;
+- u64 thresh = div_factor_fine(space_info->total_bytes, 98);
++ u64 thresh = div_factor_fine(space_info->total_bytes, 90);
+ u64 used;
+
+ /* If we're just plain full then async reclaim just slows us down. */
--- /dev/null
+From 0d977e0eba234e01a60bdde27314dc21374201b3 Mon Sep 17 00:00:00 2001
+From: Desmond Cheong Zhi Xi <desmondcheongzx@gmail.com>
+Date: Sat, 21 Aug 2021 01:50:40 +0800
+Subject: btrfs: reset replace target device to allocation state on close
+
+From: Desmond Cheong Zhi Xi <desmondcheongzx@gmail.com>
+
+commit 0d977e0eba234e01a60bdde27314dc21374201b3 upstream.
+
+This crash was observed with a failed assertion on device close:
+
+ BTRFS: Transaction aborted (error -28)
+ WARNING: CPU: 1 PID: 3902 at fs/btrfs/extent-tree.c:2150 btrfs_run_delayed_refs+0x1d2/0x1e0 [btrfs]
+ Modules linked in: btrfs blake2b_generic libcrc32c crc32c_intel xor zstd_decompress zstd_compress xxhash lzo_compress lzo_decompress raid6_pq loop
+ CPU: 1 PID: 3902 Comm: kworker/u8:4 Not tainted 5.14.0-rc5-default+ #1532
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba527-rebuilt.opensuse.org 04/01/2014
+ Workqueue: events_unbound btrfs_async_reclaim_metadata_space [btrfs]
+ RIP: 0010:btrfs_run_delayed_refs+0x1d2/0x1e0 [btrfs]
+ RSP: 0018:ffffb7a5452d7d80 EFLAGS: 00010282
+ RAX: 0000000000000000 RBX: 0000000000000003 RCX: 0000000000000000
+ RDX: 0000000000000001 RSI: ffffffffabee13c4 RDI: 00000000ffffffff
+ RBP: ffff97834176a378 R08: 0000000000000001 R09: 0000000000000001
+ R10: 0000000000000000 R11: 0000000000000001 R12: ffff97835195d388
+ R13: 0000000005b08000 R14: ffff978385484000 R15: 000000000000016c
+ FS: 0000000000000000(0000) GS:ffff9783bd800000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000056190d003fe8 CR3: 000000002a81e005 CR4: 0000000000170ea0
+ Call Trace:
+ flush_space+0x197/0x2f0 [btrfs]
+ btrfs_async_reclaim_metadata_space+0x139/0x300 [btrfs]
+ process_one_work+0x262/0x5e0
+ worker_thread+0x4c/0x320
+ ? process_one_work+0x5e0/0x5e0
+ kthread+0x144/0x170
+ ? set_kthread_struct+0x40/0x40
+ ret_from_fork+0x1f/0x30
+ irq event stamp: 19334989
+ hardirqs last enabled at (19334997): [<ffffffffab0e0c87>] console_unlock+0x2b7/0x400
+ hardirqs last disabled at (19335006): [<ffffffffab0e0d0d>] console_unlock+0x33d/0x400
+ softirqs last enabled at (19334900): [<ffffffffaba0030d>] __do_softirq+0x30d/0x574
+ softirqs last disabled at (19334893): [<ffffffffab0721ec>] irq_exit_rcu+0x12c/0x140
+ ---[ end trace 45939e308e0dd3c7 ]---
+ BTRFS: error (device vdd) in btrfs_run_delayed_refs:2150: errno=-28 No space left
+ BTRFS info (device vdd): forced readonly
+ BTRFS warning (device vdd): failed setting block group ro: -30
+ BTRFS info (device vdd): suspending dev_replace for unmount
+ assertion failed: !test_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state), in fs/btrfs/volumes.c:1150
+ ------------[ cut here ]------------
+ kernel BUG at fs/btrfs/ctree.h:3431!
+ invalid opcode: 0000 [#1] PREEMPT SMP
+ CPU: 1 PID: 3982 Comm: umount Tainted: G W 5.14.0-rc5-default+ #1532
+ Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.12.0-59-gc9ba527-rebuilt.opensuse.org 04/01/2014
+ RIP: 0010:assertfail.constprop.0+0x18/0x1a [btrfs]
+ RSP: 0018:ffffb7a5454c7db8 EFLAGS: 00010246
+ RAX: 0000000000000068 RBX: ffff978364b91c00 RCX: 0000000000000000
+ RDX: 0000000000000000 RSI: ffffffffabee13c4 RDI: 00000000ffffffff
+ RBP: ffff9783523a4c00 R08: 0000000000000001 R09: 0000000000000001
+ R10: 0000000000000000 R11: 0000000000000001 R12: ffff9783523a4d18
+ R13: 0000000000000000 R14: 0000000000000004 R15: 0000000000000003
+ FS: 00007f61c8f42800(0000) GS:ffff9783bd800000(0000) knlGS:0000000000000000
+ CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+ CR2: 000056190cffa810 CR3: 0000000030b96002 CR4: 0000000000170ea0
+ Call Trace:
+ btrfs_close_one_device.cold+0x11/0x55 [btrfs]
+ close_fs_devices+0x44/0xb0 [btrfs]
+ btrfs_close_devices+0x48/0x160 [btrfs]
+ generic_shutdown_super+0x69/0x100
+ kill_anon_super+0x14/0x30
+ btrfs_kill_super+0x12/0x20 [btrfs]
+ deactivate_locked_super+0x2c/0xa0
+ cleanup_mnt+0x144/0x1b0
+ task_work_run+0x59/0xa0
+ exit_to_user_mode_loop+0xe7/0xf0
+ exit_to_user_mode_prepare+0xaf/0xf0
+ syscall_exit_to_user_mode+0x19/0x50
+ do_syscall_64+0x4a/0x90
+ entry_SYSCALL_64_after_hwframe+0x44/0xae
+
+This happens when close_ctree is called while a dev_replace hasn't
+completed. In close_ctree, we suspend the dev_replace, but keep the
+replace target around so that we can resume the dev_replace procedure
+when we mount the root again. This is the call trace:
+
+ close_ctree():
+ btrfs_dev_replace_suspend_for_unmount();
+ btrfs_close_devices():
+ btrfs_close_fs_devices():
+ btrfs_close_one_device():
+ ASSERT(!test_bit(BTRFS_DEV_STATE_REPLACE_TGT,
+ &device->dev_state));
+
+However, since the replace target sticks around, there is a device
+with BTRFS_DEV_STATE_REPLACE_TGT set on close, and we fail the
+assertion in btrfs_close_one_device.
+
+To fix this, if we come across the replace target device when
+closing, we should properly reset it back to allocation state. This
+fix also ensures that if a non-target device has a corrupted state and
+has the BTRFS_DEV_STATE_REPLACE_TGT bit set, the assertion will still
+catch the error.
+
+Reported-by: David Sterba <dsterba@suse.com>
+Fixes: b2a616676839 ("btrfs: fix rw device counting in __btrfs_free_extra_devids")
+CC: stable@vger.kernel.org # 4.19+
+Reviewed-by: Anand Jain <anand.jain@oracle.com>
+Signed-off-by: Desmond Cheong Zhi Xi <desmondcheongzx@gmail.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/volumes.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/btrfs/volumes.c
++++ b/fs/btrfs/volumes.c
+@@ -1130,6 +1130,9 @@ static void btrfs_close_one_device(struc
+ fs_devices->rw_devices--;
+ }
+
++ if (device->devid == BTRFS_DEV_REPLACE_DEVID)
++ clear_bit(BTRFS_DEV_STATE_REPLACE_TGT, &device->dev_state);
++
+ if (test_bit(BTRFS_DEV_STATE_MISSING, &device->dev_state))
+ fs_devices->missing_devices--;
+
--- /dev/null
+From e16460707e94c3d4c1b5418cb68b28b8efa903b2 Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Wed, 14 Jul 2021 14:47:21 -0400
+Subject: btrfs: wait on async extents when flushing delalloc
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit e16460707e94c3d4c1b5418cb68b28b8efa903b2 upstream.
+
+I've been debugging an early ENOSPC problem in production and finally
+root caused it to this problem. When we switched to the per-inode in
+38d715f494f2 ("btrfs: use btrfs_start_delalloc_roots in
+shrink_delalloc") I pulled out the async extent handling, because we
+were doing the correct thing by calling filemap_flush() if we had async
+extents set. This would properly wait on any async extents by locking
+the page in the second flush, thus making sure our ordered extents were
+properly set up.
+
+However when I switched us back to page based flushing, I used
+sync_inode(), which allows us to pass in our own wbc. The problem here
+is that sync_inode() is smarter than the filemap_* helpers, it tries to
+avoid calling writepages at all. This means that our second call could
+skip calling do_writepages altogether, and thus not wait on the pagelock
+for the async helpers. This means we could come back before any ordered
+extents were created and then simply continue on in our flushing
+mechanisms and ENOSPC out when we have plenty of space to use.
+
+Fix this by putting back the async pages logic in shrink_delalloc. This
+allows us to bulk write out everything that we need to, and then we can
+wait in one place for the async helpers to catch up, and then wait on
+any ordered extents that are created.
+
+Fixes: e076ab2a2ca7 ("btrfs: shrink delalloc pages instead of full inodes")
+CC: stable@vger.kernel.org # 5.10+
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 4 ----
+ fs/btrfs/space-info.c | 40 ++++++++++++++++++++++++++++++++++++++++
+ 2 files changed, 40 insertions(+), 4 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -9774,10 +9774,6 @@ static int start_delalloc_inodes(struct
+ &work->work);
+ } else {
+ ret = sync_inode(inode, wbc);
+- if (!ret &&
+- test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
+- &BTRFS_I(inode)->runtime_flags))
+- ret = sync_inode(inode, wbc);
+ btrfs_add_delayed_iput(inode);
+ if (ret || wbc->nr_to_write <= 0)
+ goto out;
+--- a/fs/btrfs/space-info.c
++++ b/fs/btrfs/space-info.c
+@@ -539,9 +539,49 @@ static void shrink_delalloc(struct btrfs
+ while ((delalloc_bytes || ordered_bytes) && loops < 3) {
+ u64 temp = min(delalloc_bytes, to_reclaim) >> PAGE_SHIFT;
+ long nr_pages = min_t(u64, temp, LONG_MAX);
++ int async_pages;
+
+ btrfs_start_delalloc_roots(fs_info, nr_pages, true);
+
++ /*
++ * We need to make sure any outstanding async pages are now
++ * processed before we continue. This is because things like
++ * sync_inode() try to be smart and skip writing if the inode is
++ * marked clean. We don't use filemap_fwrite for flushing
++ * because we want to control how many pages we write out at a
++ * time, thus this is the only safe way to make sure we've
++ * waited for outstanding compressed workers to have started
++ * their jobs and thus have ordered extents set up properly.
++ *
++ * This exists because we do not want to wait for each
++ * individual inode to finish its async work, we simply want to
++ * start the IO on everybody, and then come back here and wait
++ * for all of the async work to catch up. Once we're done with
++ * that we know we'll have ordered extents for everything and we
++ * can decide if we wait for that or not.
++ *
++ * If we choose to replace this in the future, make absolutely
++ * sure that the proper waiting is being done in the async case,
++ * as there have been bugs in that area before.
++ */
++ async_pages = atomic_read(&fs_info->async_delalloc_pages);
++ if (!async_pages)
++ goto skip_async;
++
++ /*
++ * We don't want to wait forever, if we wrote less pages in this
++ * loop than we have outstanding, only wait for that number of
++ * pages, otherwise we can wait for all async pages to finish
++ * before continuing.
++ */
++ if (async_pages > nr_pages)
++ async_pages -= nr_pages;
++ else
++ async_pages = 0;
++ wait_event(fs_info->async_submit_wait,
++ atomic_read(&fs_info->async_delalloc_pages) <=
++ async_pages);
++skip_async:
+ loops++;
+ if (wait_ordered && !trans) {
+ btrfs_wait_ordered_roots(fs_info, items, 0, (u64)-1);
--- /dev/null
+From ac98141d140444fe93e26471d3074c603b70e2ca Mon Sep 17 00:00:00 2001
+From: Josef Bacik <josef@toxicpanda.com>
+Date: Wed, 14 Jul 2021 14:47:17 -0400
+Subject: btrfs: wake up async_delalloc_pages waiters after submit
+
+From: Josef Bacik <josef@toxicpanda.com>
+
+commit ac98141d140444fe93e26471d3074c603b70e2ca upstream.
+
+We use the async_delalloc_pages mechanism to make sure that we've
+completed our async work before trying to continue our delalloc
+flushing. The reason for this is we need to see any ordered extents
+that were created by our delalloc flushing. However we're waking up
+before we do the submit work, which is before we create the ordered
+extents. This is a pretty wide race window where we could potentially
+think there are no ordered extents and thus exit shrink_delalloc
+prematurely. Fix this by waking us up after we've done the work to
+create ordered extents.
+
+CC: stable@vger.kernel.org # 5.4+
+Reviewed-by: Nikolay Borisov <nborisov@suse.com>
+Signed-off-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/inode.c | 10 +++++-----
+ 1 file changed, 5 insertions(+), 5 deletions(-)
+
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -1248,11 +1248,6 @@ static noinline void async_cow_submit(st
+ nr_pages = (async_chunk->end - async_chunk->start + PAGE_SIZE) >>
+ PAGE_SHIFT;
+
+- /* atomic_sub_return implies a barrier */
+- if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
+- 5 * SZ_1M)
+- cond_wake_up_nomb(&fs_info->async_submit_wait);
+-
+ /*
+ * ->inode could be NULL if async_chunk_start has failed to compress,
+ * in which case we don't have anything to submit, yet we need to
+@@ -1261,6 +1256,11 @@ static noinline void async_cow_submit(st
+ */
+ if (async_chunk->inode)
+ submit_compressed_extents(async_chunk);
++
++ /* atomic_sub_return implies a barrier */
++ if (atomic_sub_return(nr_pages, &fs_info->async_delalloc_pages) <
++ 5 * SZ_1M)
++ cond_wake_up_nomb(&fs_info->async_submit_wait);
+ }
+
+ static noinline void async_cow_free(struct btrfs_work *work)
--- /dev/null
+From 0ae79c6fe70d5c5c645733b7ed39d5e6021d8c9a Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Mon, 9 Aug 2021 13:13:44 +0900
+Subject: btrfs: zoned: fix block group alloc_offset calculation
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit 0ae79c6fe70d5c5c645733b7ed39d5e6021d8c9a upstream.
+
+alloc_offset is offset from the start of a block group and @offset is
+actually an address in logical space. Thus, we need to consider
+block_group->start when calculating them.
+
+Fixes: 011b41bffa3d ("btrfs: zoned: advance allocation pointer after tree log node")
+CC: stable@vger.kernel.org # 5.12+
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/free-space-cache.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+--- a/fs/btrfs/free-space-cache.c
++++ b/fs/btrfs/free-space-cache.c
+@@ -2652,8 +2652,11 @@ int btrfs_remove_free_space(struct btrfs
+ * btrfs_pin_extent_for_log_replay() when replaying the log.
+ * Advance the pointer not to overwrite the tree-log nodes.
+ */
+- if (block_group->alloc_offset < offset + bytes)
+- block_group->alloc_offset = offset + bytes;
++ if (block_group->start + block_group->alloc_offset <
++ offset + bytes) {
++ block_group->alloc_offset =
++ offset + bytes - block_group->start;
++ }
+ return 0;
+ }
+
--- /dev/null
+From f79645df806565a03abb2847a1d20e6930b25e7e Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Tue, 7 Sep 2021 00:04:28 +0900
+Subject: btrfs: zoned: fix double counting of split ordered extent
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit f79645df806565a03abb2847a1d20e6930b25e7e upstream.
+
+btrfs_add_ordered_extent_*() add num_bytes to fs_info->ordered_bytes.
+Then, splitting an ordered extent will call btrfs_add_ordered_extent_*()
+again for split extents, leading to double counting of the region of
+a split extent. These leaked bytes are finally reported at unmount time
+as follow:
+
+ BTRFS info (device dm-1): at unmount dio bytes count 364544
+
+Fix the double counting by subtracting split extent's size from
+fs_info->ordered_bytes.
+
+Fixes: d22002fd37bd ("btrfs: zoned: split ordered extent when bio is sent")
+CC: stable@vger.kernel.org # 5.12+
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/ordered-data.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+--- a/fs/btrfs/ordered-data.c
++++ b/fs/btrfs/ordered-data.c
+@@ -917,6 +917,7 @@ static int clone_ordered_extent(struct b
+ u64 len)
+ {
+ struct inode *inode = ordered->inode;
++ struct btrfs_fs_info *fs_info = BTRFS_I(inode)->root->fs_info;
+ u64 file_offset = ordered->file_offset + pos;
+ u64 disk_bytenr = ordered->disk_bytenr + pos;
+ u64 num_bytes = len;
+@@ -934,6 +935,13 @@ static int clone_ordered_extent(struct b
+ else
+ type = __ffs(flags_masked);
+
++ /*
++ * The splitting extent is already counted and will be added again
++ * in btrfs_add_ordered_extent_*(). Subtract num_bytes to avoid
++ * double counting.
++ */
++ percpu_counter_add_batch(&fs_info->ordered_bytes, -num_bytes,
++ fs_info->delalloc_batch);
+ if (test_bit(BTRFS_ORDERED_COMPRESSED, &ordered->flags)) {
+ WARN_ON_ONCE(1);
+ ret = btrfs_add_ordered_extent_compress(BTRFS_I(inode),
--- /dev/null
+From ba86dd9fe60e5853fbff96f2658212908b83f271 Mon Sep 17 00:00:00 2001
+From: Naohiro Aota <naohiro.aota@wdc.com>
+Date: Mon, 9 Aug 2021 13:32:30 +0900
+Subject: btrfs: zoned: suppress reclaim error message on EAGAIN
+
+From: Naohiro Aota <naohiro.aota@wdc.com>
+
+commit ba86dd9fe60e5853fbff96f2658212908b83f271 upstream.
+
+btrfs_relocate_chunk() can fail with -EAGAIN when e.g. send operations are
+running. The message can fail btrfs/187 and it's unnecessary because we
+anyway add it back to the reclaim list.
+
+btrfs_reclaim_bgs_work()
+`-> btrfs_relocate_chunk()
+ `-> btrfs_relocate_block_group()
+ `-> reloc_chunk_start()
+ `-> if (fs_info->send_in_progress)
+ `-> return -EAGAIN
+
+CC: stable@vger.kernel.org # 5.13+
+Fixes: 18bb8bbf13c1 ("btrfs: zoned: automatically reclaim zones")
+Reviewed-by: Johannes Thumshirn <johannes.thumshirn@wdc.com>
+Signed-off-by: Naohiro Aota <naohiro.aota@wdc.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/block-group.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/fs/btrfs/block-group.c
++++ b/fs/btrfs/block-group.c
+@@ -1550,7 +1550,7 @@ void btrfs_reclaim_bgs_work(struct work_
+ bg->start, div64_u64(bg->used * 100, bg->length));
+ trace_btrfs_reclaim_block_group(bg);
+ ret = btrfs_relocate_chunk(fs_info, bg->start);
+- if (ret)
++ if (ret && ret != -EAGAIN)
+ btrfs_err(fs_info, "error relocating chunk %llu",
+ bg->start);
+
--- /dev/null
+From 05a444d3f90a3c3e6362e88a1bf13e1a60f8cace Mon Sep 17 00:00:00 2001
+From: Colin Ian King <colin.king@canonical.com>
+Date: Sun, 29 Aug 2021 19:18:24 +0100
+Subject: ceph: fix dereference of null pointer cf
+
+From: Colin Ian King <colin.king@canonical.com>
+
+commit 05a444d3f90a3c3e6362e88a1bf13e1a60f8cace upstream.
+
+Currently in the case where kmem_cache_alloc fails the null pointer
+cf is dereferenced when assigning cf->is_capsnap = false. Fix this
+by adding a null pointer check and return path.
+
+Cc: stable@vger.kernel.org
+Addresses-Coverity: ("Dereference null return")
+Fixes: b2f9fa1f3bd8 ("ceph: correctly handle releasing an embedded cap flush")
+Signed-off-by: Colin Ian King <colin.king@canonical.com>
+Reviewed-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ceph/caps.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/fs/ceph/caps.c
++++ b/fs/ceph/caps.c
+@@ -1756,6 +1756,9 @@ struct ceph_cap_flush *ceph_alloc_cap_fl
+ struct ceph_cap_flush *cf;
+
+ cf = kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
++ if (!cf)
++ return NULL;
++
+ cf->is_capsnap = false;
+ return cf;
+ }
--- /dev/null
+From d198b8273e3006818ea287a93eb4d8fd2543e512 Mon Sep 17 00:00:00 2001
+From: "jingle.wu" <jingle.wu@emc.com.tw>
+Date: Mon, 6 Sep 2021 21:52:05 -0700
+Subject: Input: elan_i2c - reduce the resume time for controller in Whitebox
+
+From: jingle.wu <jingle.wu@emc.com.tw>
+
+commit d198b8273e3006818ea287a93eb4d8fd2543e512 upstream.
+
+Similar to controllers found Voxel, Delbin, Magpie and Bobba, the one found
+in Whitebox does not need to be reset after issuing power-on command, and
+skipping reset saves resume time.
+
+Signed-off-by: Jingle Wu <jingle.wu@emc.com.tw>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210907012924.11391-1-jingle.wu@emc.com.tw
+Signed-off-by: Dmitry Torokhov <dmitry.torokhov@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/input/mouse/elan_i2c.h | 3 ++-
+ drivers/input/mouse/elan_i2c_core.c | 1 +
+ 2 files changed, 3 insertions(+), 1 deletion(-)
+
+--- a/drivers/input/mouse/elan_i2c.h
++++ b/drivers/input/mouse/elan_i2c.h
+@@ -55,8 +55,9 @@
+ #define ETP_FW_PAGE_SIZE_512 512
+ #define ETP_FW_SIGNATURE_SIZE 6
+
+-#define ETP_PRODUCT_ID_DELBIN 0x00C2
++#define ETP_PRODUCT_ID_WHITEBOX 0x00B8
+ #define ETP_PRODUCT_ID_VOXEL 0x00BF
++#define ETP_PRODUCT_ID_DELBIN 0x00C2
+ #define ETP_PRODUCT_ID_MAGPIE 0x0120
+ #define ETP_PRODUCT_ID_BOBBA 0x0121
+
+--- a/drivers/input/mouse/elan_i2c_core.c
++++ b/drivers/input/mouse/elan_i2c_core.c
+@@ -105,6 +105,7 @@ static u32 elan_i2c_lookup_quirks(u16 ic
+ u32 quirks;
+ } elan_i2c_quirks[] = {
+ { 0x0D, ETP_PRODUCT_ID_DELBIN, ETP_QUIRK_QUICK_WAKEUP },
++ { 0x0D, ETP_PRODUCT_ID_WHITEBOX, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x10, ETP_PRODUCT_ID_VOXEL, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x14, ETP_PRODUCT_ID_MAGPIE, ETP_QUIRK_QUICK_WAKEUP },
+ { 0x14, ETP_PRODUCT_ID_BOBBA, ETP_QUIRK_QUICK_WAKEUP },
--- /dev/null
+From 1a519dc7a73c977547d8b5108d98c6e769c89f4b Mon Sep 17 00:00:00 2001
+From: =?UTF-8?q?Marek=20Marczykowski-G=C3=B3recki?=
+ <marmarek@invisiblethingslab.com>
+Date: Thu, 26 Aug 2021 19:03:42 +0200
+Subject: PCI/MSI: Skip masking MSI-X on Xen PV
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+
+commit 1a519dc7a73c977547d8b5108d98c6e769c89f4b upstream.
+
+When running as Xen PV guest, masking MSI-X is a responsibility of the
+hypervisor. The guest has no write access to the relevant BAR at all - when
+it tries to, it results in a crash like this:
+
+ BUG: unable to handle page fault for address: ffffc9004069100c
+ #PF: supervisor write access in kernel mode
+ #PF: error_code(0x0003) - permissions violation
+ RIP: e030:__pci_enable_msix_range.part.0+0x26b/0x5f0
+ e1000e_set_interrupt_capability+0xbf/0xd0 [e1000e]
+ e1000_probe+0x41f/0xdb0 [e1000e]
+ local_pci_probe+0x42/0x80
+ (...)
+
+The recently introduced function msix_mask_all() does not check the global
+variable pci_msi_ignore_mask which is set by XEN PV to bypass the masking
+of MSI[-X] interrupts.
+
+Add the check to make this function XEN PV compatible.
+
+Fixes: 7d5ec3d36123 ("PCI/MSI: Mask all unused MSI-X entries")
+Signed-off-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
+Acked-by: Bjorn Helgaas <bhelgaas@google.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20210826170342.135172-1-marmarek@invisiblethingslab.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/pci/msi.c | 3 +++
+ 1 file changed, 3 insertions(+)
+
+--- a/drivers/pci/msi.c
++++ b/drivers/pci/msi.c
+@@ -776,6 +776,9 @@ static void msix_mask_all(void __iomem *
+ u32 ctrl = PCI_MSIX_ENTRY_CTRL_MASKBIT;
+ int i;
+
++ if (pci_msi_ignore_mask)
++ return;
++
+ for (i = 0; i < tsize; i++, base += PCI_MSIX_ENTRY_SIZE)
+ writel(ctrl, base + PCI_MSIX_ENTRY_VECTOR_CTRL);
+ }
--- /dev/null
+From f9addd85fbfacf0d155e83dbee8696d6df5ed0c7 Mon Sep 17 00:00:00 2001
+From: Kajol Jain <kjain@linux.ibm.com>
+Date: Fri, 13 Aug 2021 13:51:58 +0530
+Subject: powerpc/perf/hv-gpci: Fix counter value parsing
+
+From: Kajol Jain <kjain@linux.ibm.com>
+
+commit f9addd85fbfacf0d155e83dbee8696d6df5ed0c7 upstream.
+
+H_GetPerformanceCounterInfo (0xF080) hcall returns the counter data in
+the result buffer. Result buffer has specific format defined in the PAPR
+specification. One of the fields is counter offset and width of the
+counter data returned.
+
+Counter data are returned in a unsigned char array in big endian byte
+order. To get the final counter data, the values must be left shifted
+byte at a time. But commit 220a0c609ad17 ("powerpc/perf: Add support for
+the hv gpci (get performance counter info) interface") made the shifting
+bitwise and also assumed little endian order. Because of that, hcall
+counters values are reported incorrectly.
+
+In particular this can lead to counters go backwards which messes up the
+counter prev vs now calculation and leads to huge counter value
+reporting:
+
+ #: perf stat -e hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ -C 0 -I 1000
+ time counts unit events
+ 1.000078854 18,446,744,073,709,535,232 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 2.000213293 0 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 3.000320107 0 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 4.000428392 0 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 5.000537864 0 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 6.000649087 0 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 7.000760312 0 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 8.000865218 16,448 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 9.000978985 18,446,744,073,709,535,232 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 10.001088891 16,384 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 11.001201435 0 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+ 12.001307937 18,446,744,073,709,535,232 hv_gpci/system_tlbie_count_and_time_tlbie_instructions_issued/
+
+Fix the shifting logic to correct match the format, ie. read bytes in
+big endian order.
+
+Fixes: e4f226b1580b ("powerpc/perf/hv-gpci: Increase request buffer size")
+Cc: stable@vger.kernel.org # v4.6+
+Reported-by: Nageswara R Sastry<rnsastry@linux.ibm.com>
+Signed-off-by: Kajol Jain <kjain@linux.ibm.com>
+Tested-by: Nageswara R Sastry<rnsastry@linux.ibm.com>
+Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
+Link: https://lore.kernel.org/r/20210813082158.429023-1-kjain@linux.ibm.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/powerpc/perf/hv-gpci.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/arch/powerpc/perf/hv-gpci.c
++++ b/arch/powerpc/perf/hv-gpci.c
+@@ -175,7 +175,7 @@ static unsigned long single_gpci_request
+ */
+ count = 0;
+ for (i = offset; i < offset + length; i++)
+- count |= arg->bytes[i] << (i - offset);
++ count |= (u64)(arg->bytes[i]) << ((length - 1 - (i - offset)) * 8);
+
+ *value = count;
+ out:
--- /dev/null
+From 8d448fa0a8bb1c8d94eef7647edffe9ac81a281e Mon Sep 17 00:00:00 2001
+From: Dmitry Osipenko <digetx@gmail.com>
+Date: Sun, 8 Aug 2021 19:00:30 +0300
+Subject: rtc: tps65910: Correct driver module alias
+
+From: Dmitry Osipenko <digetx@gmail.com>
+
+commit 8d448fa0a8bb1c8d94eef7647edffe9ac81a281e upstream.
+
+The TPS65910 RTC driver module doesn't auto-load because of the wrong
+module alias that doesn't match the device name, fix it.
+
+Cc: stable@vger.kernel.org
+Reported-by: Anton Bambura <jenneron@protonmail.com>
+Tested-by: Anton Bambura <jenneron@protonmail.com>
+Signed-off-by: Dmitry Osipenko <digetx@gmail.com>
+Signed-off-by: Alexandre Belloni <alexandre.belloni@bootlin.com>
+Link: https://lore.kernel.org/r/20210808160030.8556-1-digetx@gmail.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/rtc/rtc-tps65910.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/drivers/rtc/rtc-tps65910.c
++++ b/drivers/rtc/rtc-tps65910.c
+@@ -467,6 +467,6 @@ static struct platform_driver tps65910_r
+ };
+
+ module_platform_driver(tps65910_rtc_driver);
+-MODULE_ALIAS("platform:rtc-tps65910");
++MODULE_ALIAS("platform:tps65910-rtc");
+ MODULE_AUTHOR("Venu Byravarasu <vbyravarasu@nvidia.com>");
+ MODULE_LICENSE("GPL");
+rtc-tps65910-correct-driver-module-alias.patch
+btrfs-wake-up-async_delalloc_pages-waiters-after-submit.patch
+btrfs-wait-on-async-extents-when-flushing-delalloc.patch
+btrfs-reduce-the-preemptive-flushing-threshold-to-90.patch
+btrfs-zoned-fix-block-group-alloc_offset-calculation.patch
+btrfs-zoned-suppress-reclaim-error-message-on-eagain.patch
+btrfs-fix-upper-limit-for-max_inline-for-page-size-64k.patch
+btrfs-reset-replace-target-device-to-allocation-state-on-close.patch
+btrfs-zoned-fix-double-counting-of-split-ordered-extent.patch
+blk-zoned-allow-zone-management-send-operations-without-cap_sys_admin.patch
+blk-zoned-allow-blkreportzone-without-cap_sys_admin.patch
+pci-msi-skip-masking-msi-x-on-xen-pv.patch
+powerpc-perf-hv-gpci-fix-counter-value-parsing.patch
+xen-fix-setting-of-max_pfn-in-shared_info.patch
+9p-xen-fix-end-of-loop-tests-for-list_for_each_entry.patch
+ceph-fix-dereference-of-null-pointer-cf.patch
+input-elan_i2c-reduce-the-resume-time-for-controller-in-whitebox.patch
--- /dev/null
+From 4b511d5bfa74b1926daefd1694205c7f1bcf677f Mon Sep 17 00:00:00 2001
+From: Juergen Gross <jgross@suse.com>
+Date: Fri, 30 Jul 2021 11:26:21 +0200
+Subject: xen: fix setting of max_pfn in shared_info
+
+From: Juergen Gross <jgross@suse.com>
+
+commit 4b511d5bfa74b1926daefd1694205c7f1bcf677f upstream.
+
+Xen PV guests are specifying the highest used PFN via the max_pfn
+field in shared_info. This value is used by the Xen tools when saving
+or migrating the guest.
+
+Unfortunately this field is misnamed, as in reality it is specifying
+the number of pages (including any memory holes) of the guest, so it
+is the highest used PFN + 1. Renaming isn't possible, as this is a
+public Xen hypervisor interface which needs to be kept stable.
+
+The kernel will set the value correctly initially at boot time, but
+when adding more pages (e.g. due to memory hotplug or ballooning) a
+real PFN number is stored in max_pfn. This is done when expanding the
+p2m array, and the PFN stored there is even possibly wrong, as it
+should be the last possible PFN of the just added P2M frame, and not
+one which led to the P2M expansion.
+
+Fix that by setting shared_info->max_pfn to the last possible PFN + 1.
+
+Fixes: 98dd166ea3a3c3 ("x86/xen/p2m: hint at the last populated P2M entry")
+Cc: stable@vger.kernel.org
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Link: https://lore.kernel.org/r/20210730092622.9973-2-jgross@suse.com
+Signed-off-by: Juergen Gross <jgross@suse.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/xen/p2m.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/xen/p2m.c
++++ b/arch/x86/xen/p2m.c
+@@ -618,8 +618,8 @@ int xen_alloc_p2m_entry(unsigned long pf
+ }
+
+ /* Expanded the p2m? */
+- if (pfn > xen_p2m_last_pfn) {
+- xen_p2m_last_pfn = pfn;
++ if (pfn >= xen_p2m_last_pfn) {
++ xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE);
+ HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
+ }
+