--- /dev/null
+From a6a7cba17c544fb95d5a29ab9d9ed4503029cb29 Mon Sep 17 00:00:00 2001
+From: Tianling Shen <cnsztl@gmail.com>
+Date: Sun, 19 Jan 2025 17:11:54 +0800
+Subject: arm64: dts: rockchip: change eth phy mode to rgmii-id for orangepi r1 plus lts
+
+From: Tianling Shen <cnsztl@gmail.com>
+
+commit a6a7cba17c544fb95d5a29ab9d9ed4503029cb29 upstream.
+
+In general the delay should be added by the PHY instead of the MAC,
+and this improves network stability on some boards which seem to
+need different delay.
+
+Fixes: 387b3bbac5ea ("arm64: dts: rockchip: Add Xunlong OrangePi R1 Plus LTS")
+Cc: stable@vger.kernel.org # 6.6+
+Signed-off-by: Tianling Shen <cnsztl@gmail.com>
+Link: https://lore.kernel.org/r/20250119091154.1110762-1-cnsztl@gmail.com
+Signed-off-by: Heiko Stuebner <heiko@sntech.de>
+[Fix conflicts due to missing dtsi conversion]
+Signed-off-by: Tianling Shen <cnsztl@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+--- a/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
++++ b/arch/arm64/boot/dts/rockchip/rk3328-orangepi-r1-plus-lts.dts
+@@ -15,9 +15,11 @@
+ };
+
+ &gmac2io {
++ /delete-property/ tx_delay;
++ /delete-property/ rx_delay;
++
+ phy-handle = <&yt8531c>;
+- tx_delay = <0x19>;
+- rx_delay = <0x05>;
++ phy-mode = "rgmii-id";
+
+ mdio {
+ /delete-node/ ethernet-phy@1;
--- /dev/null
+From f9cfe7e7f96a9414a17d596e288693c4f2325d49 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Tue, 9 Jan 2024 21:39:57 +0800
+Subject: md: Fix md_seq_ops() regressions
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit f9cfe7e7f96a9414a17d596e288693c4f2325d49 upstream.
+
+Commit cf1b6d4441ff ("md: simplify md_seq_ops") introduce following
+regressions:
+
+1) If list all_mddevs is emptly, personalities and unused devices won't
+ be showed to user anymore.
+2) If seq_file buffer overflowed from md_seq_show(), then md_seq_start()
+ will be called again, hence personalities will be showed to user
+ again.
+3) If seq_file buffer overflowed from md_seq_stop(), seq_read_iter()
+ doesn't handle this, hence unused devices won't be showed to user.
+
+Fix above problems by printing personalities and unused devices in
+md_seq_show().
+
+Fixes: cf1b6d4441ff ("md: simplify md_seq_ops")
+Cc: stable@vger.kernel.org # v6.7+
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20240109133957.2975272-1-yukuai1@huaweicloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 40 +++++++++++++++++++++++++++-------------
+ 1 file changed, 27 insertions(+), 13 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -8121,6 +8121,19 @@ static void status_unused(struct seq_fil
+ seq_printf(seq, "\n");
+ }
+
++static void status_personalities(struct seq_file *seq)
++{
++ struct md_personality *pers;
++
++ seq_puts(seq, "Personalities : ");
++ spin_lock(&pers_lock);
++ list_for_each_entry(pers, &pers_list, list)
++ seq_printf(seq, "[%s] ", pers->name);
++
++ spin_unlock(&pers_lock);
++ seq_puts(seq, "\n");
++}
++
+ static int status_resync(struct seq_file *seq, struct mddev *mddev)
+ {
+ sector_t max_sectors, resync, res;
+@@ -8262,20 +8275,10 @@ static int status_resync(struct seq_file
+ static void *md_seq_start(struct seq_file *seq, loff_t *pos)
+ __acquires(&all_mddevs_lock)
+ {
+- struct md_personality *pers;
+-
+- seq_puts(seq, "Personalities : ");
+- spin_lock(&pers_lock);
+- list_for_each_entry(pers, &pers_list, list)
+- seq_printf(seq, "[%s] ", pers->name);
+-
+- spin_unlock(&pers_lock);
+- seq_puts(seq, "\n");
+ seq->poll_event = atomic_read(&md_event_count);
+-
+ spin_lock(&all_mddevs_lock);
+
+- return seq_list_start(&all_mddevs, *pos);
++ return seq_list_start_head(&all_mddevs, *pos);
+ }
+
+ static void *md_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+@@ -8286,7 +8289,6 @@ static void *md_seq_next(struct seq_file
+ static void md_seq_stop(struct seq_file *seq, void *v)
+ __releases(&all_mddevs_lock)
+ {
+- status_unused(seq);
+ spin_unlock(&all_mddevs_lock);
+ }
+
+@@ -8319,10 +8321,18 @@ static void md_bitmap_status(struct seq_
+
+ static int md_seq_show(struct seq_file *seq, void *v)
+ {
+- struct mddev *mddev = list_entry(v, struct mddev, all_mddevs);
++ struct mddev *mddev;
+ sector_t sectors;
+ struct md_rdev *rdev;
+
++ if (v == &all_mddevs) {
++ status_personalities(seq);
++ if (list_empty(&all_mddevs))
++ status_unused(seq);
++ return 0;
++ }
++
++ mddev = list_entry(v, struct mddev, all_mddevs);
+ if (!mddev_get(mddev))
+ return 0;
+
+@@ -8403,6 +8413,10 @@ static int md_seq_show(struct seq_file *
+ spin_unlock(&mddev->lock);
+ mutex_unlock(&mddev->bitmap_info.mutex);
+ spin_lock(&all_mddevs_lock);
++
++ if (mddev == list_last_entry(&all_mddevs, struct mddev, all_mddevs))
++ status_unused(seq);
++
+ if (atomic_dec_and_test(&mddev->active))
+ __mddev_put(mddev);
+
--- /dev/null
+From f2d87a759f6841a132e845e2fafdad37385ddd30 Mon Sep 17 00:00:00 2001
+From: Yu Kuai <yukuai3@huawei.com>
+Date: Tue, 5 Dec 2023 17:42:13 +0800
+Subject: md: fix missing flush of sync_work
+
+From: Yu Kuai <yukuai3@huawei.com>
+
+commit f2d87a759f6841a132e845e2fafdad37385ddd30 upstream.
+
+Commit ac619781967b ("md: use separate work_struct for md_start_sync()")
+use a new sync_work to replace del_work, however, stop_sync_thread() and
+__md_stop_writes() was trying to wait for sync_thread to be done, hence
+they should switch to use sync_work as well.
+
+Noted that md_start_sync() from sync_work will grab 'reconfig_mutex',
+hence other contex can't held the same lock to flush work, and this will
+be fixed in later patches.
+
+Fixes: ac619781967b ("md: use separate work_struct for md_start_sync()")
+Signed-off-by: Yu Kuai <yukuai3@huawei.com>
+Acked-by: Xiao Ni <xni@redhat.com>
+Signed-off-by: Song Liu <song@kernel.org>
+Link: https://lore.kernel.org/r/20231205094215.1824240-2-yukuai1@huaweicloud.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/md/md.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+--- a/drivers/md/md.c
++++ b/drivers/md/md.c
+@@ -4836,7 +4836,7 @@ static void stop_sync_thread(struct mdde
+ return;
+ }
+
+- if (work_pending(&mddev->del_work))
++ if (work_pending(&mddev->sync_work))
+ flush_workqueue(md_misc_wq);
+
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
+@@ -6293,7 +6293,7 @@ static void md_clean(struct mddev *mddev
+ static void __md_stop_writes(struct mddev *mddev)
+ {
+ set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+- if (work_pending(&mddev->del_work))
++ if (work_pending(&mddev->sync_work))
+ flush_workqueue(md_misc_wq);
+ if (mddev->sync_thread) {
+ set_bit(MD_RECOVERY_INTR, &mddev->recovery);
--- /dev/null
+From stable+bounces-118590-greg=kroah.com@vger.kernel.org Fri Feb 21 14:39:52 2025
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 21 Feb 2025 22:37:54 +0900
+Subject: nilfs2: eliminate staggered calls to kunmap in nilfs_rename
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <20250221133848.4335-3-konishi.ryusuke@gmail.com>
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 8cf57c6df818f58fdad16a909506be213623a88e upstream.
+
+In nilfs_rename(), calls to nilfs_put_page() to release pages obtained
+with nilfs_find_entry() or nilfs_dotdot() are alternated in the normal
+path.
+
+When replacing the kernel memory mapping method from kmap to
+kmap_local_{page,folio}, this violates the constraint on the calling order
+of kunmap_local().
+
+Swap the order of nilfs_put_page calls where the kmap sections of multiple
+pages overlap so that they are nested, allowing direct replacement of
+nilfs_put_page() -> unmap_and_put_page().
+
+Without this reordering, that replacement will cause a kernel WARNING in
+kunmap_local_indexed() on architectures with high memory mapping.
+
+Link: https://lkml.kernel.org/r/20231127143036.2425-3-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: ee70999a988b ("nilfs2: handle errors that nilfs_prepare_chunk() may return")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/namei.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -431,13 +431,14 @@ static int nilfs_rename(struct mnt_idmap
+ inode_set_ctime_current(old_inode);
+
+ nilfs_delete_entry(old_de, old_page);
+- nilfs_put_page(old_page);
+
+ if (dir_de) {
+ nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+ nilfs_put_page(dir_page);
+ drop_nlink(old_dir);
+ }
++ nilfs_put_page(old_page);
++
+ nilfs_mark_inode_dirty(old_dir);
+ nilfs_mark_inode_dirty(old_inode);
+
--- /dev/null
+From stable+bounces-118591-greg=kroah.com@vger.kernel.org Fri Feb 21 14:40:19 2025
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 21 Feb 2025 22:37:55 +0900
+Subject: nilfs2: handle errors that nilfs_prepare_chunk() may return
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <20250221133848.4335-4-konishi.ryusuke@gmail.com>
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit ee70999a988b8abc3490609142f50ebaa8344432 upstream.
+
+Patch series "nilfs2: fix issues with rename operations".
+
+This series fixes BUG_ON check failures reported by syzbot around rename
+operations, and a minor behavioral issue where the mtime of a child
+directory changes when it is renamed instead of moved.
+
+This patch (of 2):
+
+The directory manipulation routines nilfs_set_link() and
+nilfs_delete_entry() rewrite the directory entry in the folio/page
+previously read by nilfs_find_entry(), so error handling is omitted on the
+assumption that nilfs_prepare_chunk(), which prepares the buffer for
+rewriting, will always succeed for these. And if an error is returned, it
+triggers the legacy BUG_ON() checks in each routine.
+
+This assumption is wrong, as proven by syzbot: the buffer layer called by
+nilfs_prepare_chunk() may call nilfs_get_block() if necessary, which may
+fail due to metadata corruption or other reasons. This has been there all
+along, but improved sanity checks and error handling may have made it more
+reproducible in fuzzing tests.
+
+Fix this issue by adding missing error paths in nilfs_set_link(),
+nilfs_delete_entry(), and their caller nilfs_rename().
+
+[konishi.ryusuke@gmail.com: adjusted for page/folio conversion]
+Link: https://lkml.kernel.org/r/20250111143518.7901-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20250111143518.7901-2-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reported-by: syzbot+32c3706ebf5d95046ea1@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=32c3706ebf5d95046ea1
+Reported-by: syzbot+1097e95f134f37d9395c@syzkaller.appspotmail.com
+Closes: https://syzkaller.appspot.com/bug?extid=1097e95f134f37d9395c
+Fixes: 2ba466d74ed7 ("nilfs2: directory entry operations")
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dir.c | 13 ++++++++++---
+ fs/nilfs2/namei.c | 29 +++++++++++++++--------------
+ fs/nilfs2/nilfs.h | 4 ++--
+ 3 files changed, 27 insertions(+), 19 deletions(-)
+
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -444,7 +444,7 @@ int nilfs_inode_by_name(struct inode *di
+ return 0;
+ }
+
+-void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ struct page *page, struct inode *inode)
+ {
+ unsigned int from = (char *)de - (char *)page_address(page);
+@@ -454,11 +454,15 @@ void nilfs_set_link(struct inode *dir, s
+
+ lock_page(page);
+ err = nilfs_prepare_chunk(page, from, to);
+- BUG_ON(err);
++ if (unlikely(err)) {
++ unlock_page(page);
++ return err;
++ }
+ de->inode = cpu_to_le64(inode->i_ino);
+ nilfs_set_de_type(de, inode);
+ nilfs_commit_chunk(page, mapping, from, to);
+ dir->i_mtime = inode_set_ctime_current(dir);
++ return 0;
+ }
+
+ /*
+@@ -590,7 +594,10 @@ int nilfs_delete_entry(struct nilfs_dir_
+ from = (char *)pde - (char *)page_address(page);
+ lock_page(page);
+ err = nilfs_prepare_chunk(page, from, to);
+- BUG_ON(err);
++ if (unlikely(err)) {
++ unlock_page(page);
++ goto out;
++ }
+ if (pde)
+ pde->rec_len = nilfs_rec_len_to_disk(to - from);
+ dir->inode = 0;
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -406,8 +406,10 @@ static int nilfs_rename(struct mnt_idmap
+ err = PTR_ERR(new_de);
+ goto out_dir;
+ }
+- nilfs_set_link(new_dir, new_de, new_page, old_inode);
++ err = nilfs_set_link(new_dir, new_de, new_page, old_inode);
+ nilfs_put_page(new_page);
++ if (unlikely(err))
++ goto out_dir;
+ nilfs_mark_inode_dirty(new_dir);
+ inode_set_ctime_current(new_inode);
+ if (dir_de)
+@@ -430,28 +432,27 @@ static int nilfs_rename(struct mnt_idmap
+ */
+ inode_set_ctime_current(old_inode);
+
+- nilfs_delete_entry(old_de, old_page);
+-
+- if (dir_de) {
+- nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
+- nilfs_put_page(dir_page);
+- drop_nlink(old_dir);
++ err = nilfs_delete_entry(old_de, old_page);
++ if (likely(!err)) {
++ if (dir_de) {
++ err = nilfs_set_link(old_inode, dir_de, dir_page,
++ new_dir);
++ drop_nlink(old_dir);
++ }
++ nilfs_mark_inode_dirty(old_dir);
+ }
+- nilfs_put_page(old_page);
+-
+- nilfs_mark_inode_dirty(old_dir);
+ nilfs_mark_inode_dirty(old_inode);
+
+- err = nilfs_transaction_commit(old_dir->i_sb);
+- return err;
+-
+ out_dir:
+ if (dir_de)
+ nilfs_put_page(dir_page);
+ out_old:
+ nilfs_put_page(old_page);
+ out:
+- nilfs_transaction_abort(old_dir->i_sb);
++ if (likely(!err))
++ err = nilfs_transaction_commit(old_dir->i_sb);
++ else
++ nilfs_transaction_abort(old_dir->i_sb);
+ return err;
+ }
+
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -240,8 +240,8 @@ nilfs_find_entry(struct inode *, const s
+ extern int nilfs_delete_entry(struct nilfs_dir_entry *, struct page *);
+ extern int nilfs_empty_dir(struct inode *);
+ extern struct nilfs_dir_entry *nilfs_dotdot(struct inode *, struct page **);
+-extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+- struct page *, struct inode *);
++int nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
++ struct page *page, struct inode *inode);
+
+ static inline void nilfs_put_page(struct page *page)
+ {
--- /dev/null
+From konishi.ryusuke@gmail.com Fri Feb 21 14:38:57 2025
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Date: Fri, 21 Feb 2025 22:37:53 +0900
+Subject: nilfs2: move page release outside of nilfs_delete_entry and nilfs_set_link
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Message-ID: <20250221133848.4335-2-konishi.ryusuke@gmail.com>
+
+From: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+
+commit 584db20c181f5e28c0386d7987406ace7fbd3e49 upstream.
+
+Patch series "nilfs2: Folio conversions for directory paths".
+
+This series applies page->folio conversions to nilfs2 directory
+operations. This reduces hidden compound_head() calls and also converts
+deprecated kmap calls to kmap_local in the directory code.
+
+Although nilfs2 does not yet support large folios, Matthew has done his
+best here to include support for large folios, which will be needed for
+devices with large block sizes.
+
+This series corresponds to the second half of the original post [1], but
+with two complementary patches inserted at the beginning and some
+adjustments, to prevent a kmap_local constraint violation found during
+testing with highmem mapping.
+
+[1] https://lkml.kernel.org/r/20231106173903.1734114-1-willy@infradead.org
+
+I have reviewed all changes and tested this for regular and small block
+sizes, both on machines with and without highmem mapping. No issues
+found.
+
+This patch (of 17):
+
+In a few directory operations, the call to nilfs_put_page() for a page
+obtained using nilfs_find_entry() or nilfs_dotdot() is hidden in
+nilfs_set_link() and nilfs_delete_entry(), making it difficult to track
+page release and preventing change of its call position.
+
+By moving nilfs_put_page() out of these functions, this makes the page
+get/put correspondence clearer and makes it easier to swap
+nilfs_put_page() calls (and kunmap calls within them) when modifying
+multiple directory entries simultaneously in nilfs_rename().
+
+Also, update comments for nilfs_set_link() and nilfs_delete_entry() to
+reflect changes in their behavior.
+
+To make nilfs_put_page() visible from namei.c, this moves its definition
+to nilfs.h and replaces existing equivalents to use it, but the exposure
+of that definition is temporary and will be removed on a later kmap ->
+kmap_local conversion.
+
+Link: https://lkml.kernel.org/r/20231127143036.2425-1-konishi.ryusuke@gmail.com
+Link: https://lkml.kernel.org/r/20231127143036.2425-2-konishi.ryusuke@gmail.com
+Signed-off-by: Ryusuke Konishi <konishi.ryusuke@gmail.com>
+Reviewed-by: Matthew Wilcox (Oracle) <willy@infradead.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: ee70999a988b ("nilfs2: handle errors that nilfs_prepare_chunk() may return")
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/nilfs2/dir.c | 11 +----------
+ fs/nilfs2/namei.c | 13 +++++++------
+ fs/nilfs2/nilfs.h | 6 ++++++
+ 3 files changed, 14 insertions(+), 16 deletions(-)
+
+--- a/fs/nilfs2/dir.c
++++ b/fs/nilfs2/dir.c
+@@ -64,12 +64,6 @@ static inline unsigned int nilfs_chunk_s
+ return inode->i_sb->s_blocksize;
+ }
+
+-static inline void nilfs_put_page(struct page *page)
+-{
+- kunmap(page);
+- put_page(page);
+-}
+-
+ /*
+ * Return the offset into page `page_nr' of the last valid
+ * byte in that page, plus one.
+@@ -450,7 +444,6 @@ int nilfs_inode_by_name(struct inode *di
+ return 0;
+ }
+
+-/* Releases the page */
+ void nilfs_set_link(struct inode *dir, struct nilfs_dir_entry *de,
+ struct page *page, struct inode *inode)
+ {
+@@ -465,7 +458,6 @@ void nilfs_set_link(struct inode *dir, s
+ de->inode = cpu_to_le64(inode->i_ino);
+ nilfs_set_de_type(de, inode);
+ nilfs_commit_chunk(page, mapping, from, to);
+- nilfs_put_page(page);
+ dir->i_mtime = inode_set_ctime_current(dir);
+ }
+
+@@ -569,7 +561,7 @@ out_unlock:
+
+ /*
+ * nilfs_delete_entry deletes a directory entry by merging it with the
+- * previous entry. Page is up-to-date. Releases the page.
++ * previous entry. Page is up-to-date.
+ */
+ int nilfs_delete_entry(struct nilfs_dir_entry *dir, struct page *page)
+ {
+@@ -605,7 +597,6 @@ int nilfs_delete_entry(struct nilfs_dir_
+ nilfs_commit_chunk(page, mapping, from, to);
+ inode->i_mtime = inode_set_ctime_current(inode);
+ out:
+- nilfs_put_page(page);
+ return err;
+ }
+
+--- a/fs/nilfs2/namei.c
++++ b/fs/nilfs2/namei.c
+@@ -297,6 +297,7 @@ static int nilfs_do_unlink(struct inode
+ set_nlink(inode, 1);
+ }
+ err = nilfs_delete_entry(de, page);
++ nilfs_put_page(page);
+ if (err)
+ goto out;
+
+@@ -406,6 +407,7 @@ static int nilfs_rename(struct mnt_idmap
+ goto out_dir;
+ }
+ nilfs_set_link(new_dir, new_de, new_page, old_inode);
++ nilfs_put_page(new_page);
+ nilfs_mark_inode_dirty(new_dir);
+ inode_set_ctime_current(new_inode);
+ if (dir_de)
+@@ -429,9 +431,11 @@ static int nilfs_rename(struct mnt_idmap
+ inode_set_ctime_current(old_inode);
+
+ nilfs_delete_entry(old_de, old_page);
++ nilfs_put_page(old_page);
+
+ if (dir_de) {
+ nilfs_set_link(old_inode, dir_de, dir_page, new_dir);
++ nilfs_put_page(dir_page);
+ drop_nlink(old_dir);
+ }
+ nilfs_mark_inode_dirty(old_dir);
+@@ -441,13 +445,10 @@ static int nilfs_rename(struct mnt_idmap
+ return err;
+
+ out_dir:
+- if (dir_de) {
+- kunmap(dir_page);
+- put_page(dir_page);
+- }
++ if (dir_de)
++ nilfs_put_page(dir_page);
+ out_old:
+- kunmap(old_page);
+- put_page(old_page);
++ nilfs_put_page(old_page);
+ out:
+ nilfs_transaction_abort(old_dir->i_sb);
+ return err;
+--- a/fs/nilfs2/nilfs.h
++++ b/fs/nilfs2/nilfs.h
+@@ -243,6 +243,12 @@ extern struct nilfs_dir_entry *nilfs_dot
+ extern void nilfs_set_link(struct inode *, struct nilfs_dir_entry *,
+ struct page *, struct inode *);
+
++static inline void nilfs_put_page(struct page *page)
++{
++ kunmap(page);
++ put_page(page);
++}
++
+ /* file.c */
+ extern int nilfs_sync_file(struct file *, loff_t, loff_t, int);
+
--- /dev/null
+From 47a973fd75639fe80d59f9e1860113bb2a0b112b Mon Sep 17 00:00:00 2001
+From: Kan Liang <kan.liang@linux.intel.com>
+Date: Wed, 29 Jan 2025 07:48:19 -0800
+Subject: perf/x86/intel: Fix ARCH_PERFMON_NUM_COUNTER_LEAF
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+commit 47a973fd75639fe80d59f9e1860113bb2a0b112b upstream.
+
+The EAX of the CPUID Leaf 023H enumerates the mask of valid sub-leaves.
+To tell the availability of the sub-leaf 1 (enumerate the counter mask),
+perf should check the bit 1 (0x2) of EAS, rather than bit 0 (0x1).
+
+The error is not user-visible on bare metal. Because the sub-leaf 0 and
+the sub-leaf 1 are always available. However, it may bring issues in a
+virtualization environment when a VMM only enumerates the sub-leaf 0.
+
+Introduce the cpuid35_e?x to replace the macros, which makes the
+implementation style consistent.
+
+Fixes: eb467aaac21e ("perf/x86/intel: Support Architectural PerfMon Extension leaf")
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20250129154820.3755948-3-kan.liang@linux.intel.com
+[ The patch is not exactly the same as the upstream patch. Because in the 6.6
+ stable kernel, the umask2/eq enumeration is not supported. The number of
+ counters is used rather than the counter mask. But the change is
+ straightforward, which utilizes the structured union to replace the macros
+ when parsing the CPUID enumeration. It also fixed a wrong macros. ]
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/events/intel/core.c | 17 ++++++++++-------
+ arch/x86/include/asm/perf_event.h | 26 +++++++++++++++++++++++++-
+ 2 files changed, 35 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4643,16 +4643,19 @@ static void intel_pmu_check_num_counters
+
+ static void update_pmu_cap(struct x86_hybrid_pmu *pmu)
+ {
+- unsigned int sub_bitmaps = cpuid_eax(ARCH_PERFMON_EXT_LEAF);
+- unsigned int eax, ebx, ecx, edx;
++ unsigned int cntr, fixed_cntr, ecx, edx;
++ union cpuid35_eax eax;
++ union cpuid35_ebx ebx;
+
+- if (sub_bitmaps & ARCH_PERFMON_NUM_COUNTER_LEAF_BIT) {
++ cpuid(ARCH_PERFMON_EXT_LEAF, &eax.full, &ebx.full, &ecx, &edx);
++
++ if (eax.split.cntr_subleaf) {
+ cpuid_count(ARCH_PERFMON_EXT_LEAF, ARCH_PERFMON_NUM_COUNTER_LEAF,
+- &eax, &ebx, &ecx, &edx);
+- pmu->num_counters = fls(eax);
+- pmu->num_counters_fixed = fls(ebx);
++ &cntr, &fixed_cntr, &ecx, &edx);
++ pmu->num_counters = fls(cntr);
++ pmu->num_counters_fixed = fls(fixed_cntr);
+ intel_pmu_check_num_counters(&pmu->num_counters, &pmu->num_counters_fixed,
+- &pmu->intel_ctrl, ebx);
++ &pmu->intel_ctrl, fixed_cntr);
+ }
+ }
+
+--- a/arch/x86/include/asm/perf_event.h
++++ b/arch/x86/include/asm/perf_event.h
+@@ -177,9 +177,33 @@ union cpuid10_edx {
+ * detection/enumeration details:
+ */
+ #define ARCH_PERFMON_EXT_LEAF 0x00000023
+-#define ARCH_PERFMON_NUM_COUNTER_LEAF_BIT 0x1
+ #define ARCH_PERFMON_NUM_COUNTER_LEAF 0x1
+
++union cpuid35_eax {
++ struct {
++ unsigned int leaf0:1;
++ /* Counters Sub-Leaf */
++ unsigned int cntr_subleaf:1;
++ /* Auto Counter Reload Sub-Leaf */
++ unsigned int acr_subleaf:1;
++ /* Events Sub-Leaf */
++ unsigned int events_subleaf:1;
++ unsigned int reserved:28;
++ } split;
++ unsigned int full;
++};
++
++union cpuid35_ebx {
++ struct {
++ /* UnitMask2 Supported */
++ unsigned int umask2:1;
++ /* EQ-bit Supported */
++ unsigned int eq:1;
++ unsigned int reserved:30;
++ } split;
++ unsigned int full;
++};
++
+ /*
+ * Intel Architectural LBR CPUID detection/enumeration details:
+ */
ftrace-correct-preemption-accounting-for-function-tracing.patch
ftrace-do-not-add-duplicate-entries-in-subops-manager-ops.patch
net-mlx5e-don-t-call-cleanup-on-profile-rollback-failure.patch
+md-fix-missing-flush-of-sync_work.patch
+md-fix-md_seq_ops-regressions.patch
+arm64-dts-rockchip-change-eth-phy-mode-to-rgmii-id-for-orangepi-r1-plus-lts.patch
+perf-x86-intel-fix-arch_perfmon_num_counter_leaf.patch
+nilfs2-move-page-release-outside-of-nilfs_delete_entry-and-nilfs_set_link.patch
+nilfs2-eliminate-staggered-calls-to-kunmap-in-nilfs_rename.patch
+nilfs2-handle-errors-that-nilfs_prepare_chunk-may-return.patch
+x86-cpu-kvm-srso-fix-possible-missing-ibpb-on-vm-exit.patch
--- /dev/null
+From 318e8c339c9a0891c389298bb328ed0762a9935e Mon Sep 17 00:00:00 2001
+From: Patrick Bellasi <derkling@google.com>
+Date: Wed, 5 Feb 2025 14:04:41 +0000
+Subject: x86/cpu/kvm: SRSO: Fix possible missing IBPB on VM-Exit
+
+From: Patrick Bellasi <derkling@google.com>
+
+commit 318e8c339c9a0891c389298bb328ed0762a9935e upstream.
+
+In [1] the meaning of the synthetic IBPB flags has been redefined for a
+better separation of concerns:
+ - ENTRY_IBPB -- issue IBPB on entry only
+ - IBPB_ON_VMEXIT -- issue IBPB on VM-Exit only
+and the Retbleed mitigations have been updated to match this new
+semantics.
+
+Commit [2] was merged shortly before [1], and their interaction was not
+handled properly. This resulted in IBPB not being triggered on VM-Exit
+in all SRSO mitigation configs requesting an IBPB there.
+
+Specifically, an IBPB on VM-Exit is triggered only when
+X86_FEATURE_IBPB_ON_VMEXIT is set. However:
+
+ - X86_FEATURE_IBPB_ON_VMEXIT is not set for "spec_rstack_overflow=ibpb",
+ because before [1] having X86_FEATURE_ENTRY_IBPB was enough. Hence,
+ an IBPB is triggered on entry but the expected IBPB on VM-exit is
+ not.
+
+ - X86_FEATURE_IBPB_ON_VMEXIT is not set also when
+ "spec_rstack_overflow=ibpb-vmexit" if X86_FEATURE_ENTRY_IBPB is
+ already set.
+
+ That's because before [1] this was effectively redundant. Hence, e.g.
+ a "retbleed=ibpb spec_rstack_overflow=bpb-vmexit" config mistakenly
+ reports the machine still vulnerable to SRSO, despite an IBPB being
+ triggered both on entry and VM-Exit, because of the Retbleed selected
+ mitigation config.
+
+ - UNTRAIN_RET_VM won't still actually do anything unless
+ CONFIG_MITIGATION_IBPB_ENTRY is set.
+
+For "spec_rstack_overflow=ibpb", enable IBPB on both entry and VM-Exit
+and clear X86_FEATURE_RSB_VMEXIT which is made superfluous by
+X86_FEATURE_IBPB_ON_VMEXIT. This effectively makes this mitigation
+option similar to the one for 'retbleed=ibpb', thus re-order the code
+for the RETBLEED_MITIGATION_IBPB option to be less confusing by having
+all features enabling before the disabling of the not needed ones.
+
+For "spec_rstack_overflow=ibpb-vmexit", guard this mitigation setting
+with CONFIG_MITIGATION_IBPB_ENTRY to ensure UNTRAIN_RET_VM sequence is
+effectively compiled in. Drop instead the CONFIG_MITIGATION_SRSO guard,
+since none of the SRSO compile cruft is required in this configuration.
+Also, check only that the required microcode is present to effectively
+enabled the IBPB on VM-Exit.
+
+Finally, update the KConfig description for CONFIG_MITIGATION_IBPB_ENTRY
+to list also all SRSO config settings enabled by this guard.
+
+Fixes: 864bcaa38ee4 ("x86/cpu/kvm: Provide UNTRAIN_RET_VM") [1]
+Fixes: d893832d0e1e ("x86/srso: Add IBPB on VMEXIT") [2]
+Reported-by: Yosry Ahmed <yosryahmed@google.com>
+Signed-off-by: Patrick Bellasi <derkling@google.com>
+Reviewed-by: Borislav Petkov (AMD) <bp@alien8.de>
+Cc: stable@kernel.org
+Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/Kconfig | 3 ++-
+ arch/x86/kernel/cpu/bugs.c | 21 ++++++++++++++-------
+ 2 files changed, 16 insertions(+), 8 deletions(-)
+
+--- a/arch/x86/Kconfig
++++ b/arch/x86/Kconfig
+@@ -2514,7 +2514,8 @@ config CPU_IBPB_ENTRY
+ depends on CPU_SUP_AMD && X86_64
+ default y
+ help
+- Compile the kernel with support for the retbleed=ibpb mitigation.
++ Compile the kernel with support for the retbleed=ibpb and
++ spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations.
+
+ config CPU_IBRS_ENTRY
+ bool "Enable IBRS on kernel entry"
+--- a/arch/x86/kernel/cpu/bugs.c
++++ b/arch/x86/kernel/cpu/bugs.c
+@@ -1113,6 +1113,8 @@ do_cmd_auto:
+
+ case RETBLEED_MITIGATION_IBPB:
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
++ mitigate_smt = true;
+
+ /*
+ * IBPB on entry already obviates the need for
+@@ -1122,9 +1124,6 @@ do_cmd_auto:
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
+
+- setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+- mitigate_smt = true;
+-
+ /*
+ * There is no need for RSB filling: entry_ibpb() ensures
+ * all predictions, including the RSB, are invalidated,
+@@ -2626,6 +2625,7 @@ static void __init srso_select_mitigatio
+ if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
+ if (has_microcode) {
+ setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
++ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ srso_mitigation = SRSO_MITIGATION_IBPB;
+
+ /*
+@@ -2635,6 +2635,13 @@ static void __init srso_select_mitigatio
+ */
+ setup_clear_cpu_cap(X86_FEATURE_UNRET);
+ setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
++
++ /*
++ * There is no need for RSB filling: entry_ibpb() ensures
++ * all predictions, including the RSB, are invalidated,
++ * regardless of IBPB implementation.
++ */
++ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ }
+ } else {
+ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
+@@ -2643,8 +2650,8 @@ static void __init srso_select_mitigatio
+ break;
+
+ case SRSO_CMD_IBPB_ON_VMEXIT:
+- if (IS_ENABLED(CONFIG_CPU_SRSO)) {
+- if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
++ if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
++ if (has_microcode) {
+ setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
+ srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
+
+@@ -2656,9 +2663,9 @@ static void __init srso_select_mitigatio
+ setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
+ }
+ } else {
+- pr_err("WARNING: kernel not compiled with CPU_SRSO.\n");
++ pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
+ goto pred_cmd;
+- }
++ }
+ break;
+
+ default: