--- /dev/null
+From 9e66e26876431a0793c3188fa7cd09ba99e4a0a0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 May 2024 20:39:57 +0800
+Subject: ext4: check the extent status again before inserting delalloc block
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 0ea6560abb3bac1ffcfa4bf6b2c4d344fdc27b3c ]
+
+ext4_da_map_blocks looks up for any extent entry in the extent status
+tree (w/o i_data_sem) and then the looks up for any ondisk extent
+mapping (with i_data_sem in read mode).
+
+If it finds a hole in the extent status tree or if it couldn't find any
+entry at all, it then takes the i_data_sem in write mode to add a da
+entry into the extent status tree. This can actually race with page
+mkwrite & fallocate path.
+
+Note that this is ok between
+1. ext4 buffered-write path v/s ext4_page_mkwrite(), because of the
+ folio lock
+2. ext4 buffered write path v/s ext4 fallocate because of the inode
+ lock.
+
+But this can race between ext4_page_mkwrite() & ext4 fallocate path
+
+ext4_page_mkwrite() ext4_fallocate()
+ block_page_mkwrite()
+ ext4_da_map_blocks()
+ //find hole in extent status tree
+ ext4_alloc_file_blocks()
+ ext4_map_blocks()
+ //allocate block and unwritten extent
+ ext4_insert_delayed_block()
+ ext4_da_reserve_space()
+ //reserve one more block
+ ext4_es_insert_delayed_block()
+ //drop unwritten extent and add delayed extent by mistake
+
+Then, the delalloc extent is wrong until writeback and the extra
+reserved block can't be released any more and it triggers below warning:
+
+ EXT4-fs (pmem2): Inode 13 (00000000bbbd4d23): i_reserved_data_blocks(1) not cleared!
+
+Fix the problem by looking up extent status tree again while the
+i_data_sem is held in write mode. If it still can't find any entry, then
+we insert a new da entry into the extent status tree.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Link: https://patch.msgid.link/20240517124005.347221-3-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/inode.c | 21 +++++++++++++++++++++
+ 1 file changed, 21 insertions(+)
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 168819b4db019..4b0d64a76e88e 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -1737,6 +1737,7 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
+ if (ext4_es_is_hole(&es))
+ goto add_delayed;
+
++found:
+ /*
+ * Delayed extent could be allocated by fallocate.
+ * So we need to check it.
+@@ -1781,6 +1782,26 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
+
+ add_delayed:
+ down_write(&EXT4_I(inode)->i_data_sem);
++ /*
++ * Page fault path (ext4_page_mkwrite does not take i_rwsem)
++ * and fallocate path (no folio lock) can race. Make sure we
++ * lookup the extent status tree here again while i_data_sem
++ * is held in write mode, before inserting a new da entry in
++ * the extent status tree.
++ */
++ if (ext4_es_lookup_extent(inode, iblock, NULL, &es)) {
++ if (!ext4_es_is_hole(&es)) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ goto found;
++ }
++ } else if (!ext4_has_inline_data(inode)) {
++ retval = ext4_map_query_blocks(NULL, inode, map);
++ if (retval) {
++ up_write(&EXT4_I(inode)->i_data_sem);
++ return retval;
++ }
++ }
++
+ retval = ext4_insert_delayed_block(inode, map->m_lblk);
+ up_write(&EXT4_I(inode)->i_data_sem);
+ if (retval)
+--
+2.43.0
+
--- /dev/null
+From f0e3449e9bd17e972f0fec7fdd4c19e83668efca Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 17 May 2024 20:39:56 +0800
+Subject: ext4: factor out a common helper to query extent map
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+[ Upstream commit 8e4e5cdf2fdeb99445a468b6b6436ad79b9ecb30 ]
+
+Factor out a new common helper ext4_map_query_blocks() from the
+ext4_da_map_blocks(), it query and return the extent map status on the
+inode's extent path, no logic changes.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Jan Kara <jack@suse.cz>
+Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com>
+Link: https://patch.msgid.link/20240517124005.347221-2-yi.zhang@huaweicloud.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Stable-dep-of: 0ea6560abb3b ("ext4: check the extent status again before inserting delalloc block")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ext4/inode.c | 57 +++++++++++++++++++++++++++----------------------
+ 1 file changed, 32 insertions(+), 25 deletions(-)
+
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 4bae9ccf5fe01..168819b4db019 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -453,6 +453,35 @@ static void ext4_map_blocks_es_recheck(handle_t *handle,
+ }
+ #endif /* ES_AGGRESSIVE_TEST */
+
++static int ext4_map_query_blocks(handle_t *handle, struct inode *inode,
++ struct ext4_map_blocks *map)
++{
++ unsigned int status;
++ int retval;
++
++ if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
++ retval = ext4_ext_map_blocks(handle, inode, map, 0);
++ else
++ retval = ext4_ind_map_blocks(handle, inode, map, 0);
++
++ if (retval <= 0)
++ return retval;
++
++ if (unlikely(retval != map->m_len)) {
++ ext4_warning(inode->i_sb,
++ "ES len assertion failed for inode "
++ "%lu: retval %d != map->m_len %d",
++ inode->i_ino, retval, map->m_len);
++ WARN_ON(1);
++ }
++
++ status = map->m_flags & EXT4_MAP_UNWRITTEN ?
++ EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
++ ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
++ map->m_pblk, status);
++ return retval;
++}
++
+ /*
+ * The ext4_map_blocks() function tries to look up the requested blocks,
+ * and returns if the blocks are already mapped.
+@@ -1744,33 +1773,11 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
+ down_read(&EXT4_I(inode)->i_data_sem);
+ if (ext4_has_inline_data(inode))
+ retval = 0;
+- else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+- retval = ext4_ext_map_blocks(NULL, inode, map, 0);
+ else
+- retval = ext4_ind_map_blocks(NULL, inode, map, 0);
+- if (retval < 0) {
+- up_read(&EXT4_I(inode)->i_data_sem);
+- return retval;
+- }
+- if (retval > 0) {
+- unsigned int status;
+-
+- if (unlikely(retval != map->m_len)) {
+- ext4_warning(inode->i_sb,
+- "ES len assertion failed for inode "
+- "%lu: retval %d != map->m_len %d",
+- inode->i_ino, retval, map->m_len);
+- WARN_ON(1);
+- }
+-
+- status = map->m_flags & EXT4_MAP_UNWRITTEN ?
+- EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
+- ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
+- map->m_pblk, status);
+- up_read(&EXT4_I(inode)->i_data_sem);
+- return retval;
+- }
++ retval = ext4_map_query_blocks(NULL, inode, map);
+ up_read(&EXT4_I(inode)->i_data_sem);
++ if (retval)
++ return retval;
+
+ add_delayed:
+ down_write(&EXT4_I(inode)->i_data_sem);
+--
+2.43.0
+
--- /dev/null
+From 60c4e3763d6f37d5cfe68eaf7568b681ad3701cb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Jun 2024 02:15:38 +0000
+Subject: f2fs: assign CURSEG_ALL_DATA_ATGC if blkaddr is valid
+
+From: Jaegeuk Kim <jaegeuk@kernel.org>
+
+[ Upstream commit 8cb1f4080dd91c6e6b01dbea013a3f42341cb6a1 ]
+
+mkdir /mnt/test/comp
+f2fs_io setflags compression /mnt/test/comp
+dd if=/dev/zero of=/mnt/test/comp/testfile bs=16k count=1
+truncate --size 13 /mnt/test/comp/testfile
+
+In the above scenario, we can get a BUG_ON.
+ kernel BUG at fs/f2fs/segment.c:3589!
+ Call Trace:
+ do_write_page+0x78/0x390 [f2fs]
+ f2fs_outplace_write_data+0x62/0xb0 [f2fs]
+ f2fs_do_write_data_page+0x275/0x740 [f2fs]
+ f2fs_write_single_data_page+0x1dc/0x8f0 [f2fs]
+ f2fs_write_multi_pages+0x1e5/0xae0 [f2fs]
+ f2fs_write_cache_pages+0xab1/0xc60 [f2fs]
+ f2fs_write_data_pages+0x2d8/0x330 [f2fs]
+ do_writepages+0xcf/0x270
+ __writeback_single_inode+0x44/0x350
+ writeback_sb_inodes+0x242/0x530
+ __writeback_inodes_wb+0x54/0xf0
+ wb_writeback+0x192/0x310
+ wb_workfn+0x30d/0x400
+
+The reason is we gave CURSEG_ALL_DATA_ATGC to COMPR_ADDR where the
+page was set the gcing flag by set_cluster_dirty().
+
+Cc: stable@vger.kernel.org
+Fixes: 4961acdd65c9 ("f2fs: fix to tag gcing flag on page during block migration")
+Reviewed-by: Chao Yu <chao@kernel.org>
+Tested-by: Will McVicker <willmcvicker@google.com>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/segment.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 92331f8dbe4e3..601825785226d 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3484,6 +3484,7 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
+ if (fio->sbi->am.atgc_enabled &&
+ (fio->io_type == FS_DATA_IO) &&
+ (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
++ __is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !is_inode_flag_set(inode, FI_OPU_WRITE))
+ return CURSEG_ALL_DATA_ATGC;
+ else
+--
+2.43.0
+
--- /dev/null
+From c2cf13f9fc70adac338a0ff621db1c749de917e3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 17:47:00 +0800
+Subject: f2fs: fix to avoid use SSR allocate when do defragment
+
+From: Zhiguo Niu <zhiguo.niu@unisoc.com>
+
+[ Upstream commit 21327a042dd94bc73181d7300e688699cb1f467e ]
+
+SSR allocate mode will be used when doing file defragment
+if ATGC is working at the same time, that is because
+set_page_private_gcing may make CURSEG_ALL_DATA_ATGC segment
+type got in f2fs_allocate_data_block when defragment page
+is writeback, which may cause file fragmentation is worse.
+
+A file with 2 fragmentations is changed as following after defragment:
+
+----------------file info-------------------
+sensorsdata :
+--------------------------------------------
+dev [254:48]
+ino [0x 3029 : 12329]
+mode [0x 81b0 : 33200]
+nlink [0x 1 : 1]
+uid [0x 27e6 : 10214]
+gid [0x 27e6 : 10214]
+size [0x 242000 : 2367488]
+blksize [0x 1000 : 4096]
+blocks [0x 1210 : 4624]
+--------------------------------------------
+
+file_pos start_blk end_blk blks
+ 0 11361121 11361207 87
+ 356352 11361215 11361216 2
+ 364544 11361218 11361218 1
+ 368640 11361220 11361221 2
+ 376832 11361224 11361225 2
+ 385024 11361227 11361238 12
+ 434176 11361240 11361252 13
+ 487424 11361254 11361254 1
+ 491520 11361271 11361279 9
+ 528384 3681794 3681795 2
+ 536576 3681797 3681797 1
+ 540672 3681799 3681799 1
+ 544768 3681803 3681803 1
+ 548864 3681805 3681805 1
+ 552960 3681807 3681807 1
+ 557056 3681809 3681809 1
+
+Signed-off-by: Zhiguo Niu <zhiguo.niu@unisoc.com>
+Reviewed-by: Chao Yu <chao@kernel.org>
+Signed-off-by: Jaegeuk Kim <jaegeuk@kernel.org>
+Stable-dep-of: 8cb1f4080dd9 ("f2fs: assign CURSEG_ALL_DATA_ATGC if blkaddr is valid")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/f2fs/segment.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
+index 259e235becc59..92331f8dbe4e3 100644
+--- a/fs/f2fs/segment.c
++++ b/fs/f2fs/segment.c
+@@ -3483,7 +3483,8 @@ static int __get_segment_type_6(struct f2fs_io_info *fio)
+ if (page_private_gcing(fio->page)) {
+ if (fio->sbi->am.atgc_enabled &&
+ (fio->io_type == FS_DATA_IO) &&
+- (fio->sbi->gc_mode != GC_URGENT_HIGH))
++ (fio->sbi->gc_mode != GC_URGENT_HIGH) &&
++ !is_inode_flag_set(inode, FI_OPU_WRITE))
+ return CURSEG_ALL_DATA_ATGC;
+ else
+ return CURSEG_COLD_DATA;
+--
+2.43.0
+
--- /dev/null
+From 018c857e26faef3f160c02b21f8f701f012b1762 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 16:40:10 +0100
+Subject: MIPS: dts: loongson: Fix liointc IRQ polarity
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit dbb69b9d6234aad23b3ecd33e5bc8a8ae1485b7d ]
+
+All internal liointc interrupts are high level triggered.
+
+Fixes: b1a792601f26 ("MIPS: Loongson64: DeviceTree for Loongson-2K1000")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../boot/dts/loongson/loongson64-2k1000.dtsi | 42 +++++++++----------
+ 1 file changed, 21 insertions(+), 21 deletions(-)
+
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index c1d3092fdd870..eec8243be6499 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -100,7 +100,7 @@ rtc0: rtc@1fe07800 {
+ compatible = "loongson,ls2k1000-rtc";
+ reg = <0 0x1fe07800 0 0x78>;
+ interrupt-parent = <&liointc0>;
+- interrupts = <60 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <60 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ uart0: serial@1fe00000 {
+@@ -108,7 +108,7 @@ uart0: serial@1fe00000 {
+ reg = <0 0x1fe00000 0 0x8>;
+ clock-frequency = <125000000>;
+ interrupt-parent = <&liointc0>;
+- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ no-loopback-test;
+ };
+
+@@ -131,8 +131,8 @@ gmac@3,0 {
+ "pciclass0c03";
+
+ reg = <0x1800 0x0 0x0 0x0 0x0>;
+- interrupts = <12 IRQ_TYPE_LEVEL_LOW>,
+- <13 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <12 IRQ_TYPE_LEVEL_HIGH>,
++ <13 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii-id";
+@@ -155,8 +155,8 @@ gmac@3,1 {
+ "loongson, pci-gmac";
+
+ reg = <0x1900 0x0 0x0 0x0 0x0>;
+- interrupts = <14 IRQ_TYPE_LEVEL_LOW>,
+- <15 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <14 IRQ_TYPE_LEVEL_HIGH>,
++ <15 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-names = "macirq", "eth_lpi";
+ interrupt-parent = <&liointc0>;
+ phy-mode = "rgmii-id";
+@@ -178,7 +178,7 @@ ehci@4,1 {
+ "pciclass0c03";
+
+ reg = <0x2100 0x0 0x0 0x0 0x0>;
+- interrupts = <18 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <18 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ };
+
+@@ -189,7 +189,7 @@ ohci@4,2 {
+ "pciclass0c03";
+
+ reg = <0x2200 0x0 0x0 0x0 0x0>;
+- interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ };
+
+@@ -200,7 +200,7 @@ sata@8,0 {
+ "pciclass0106";
+
+ reg = <0x4000 0x0 0x0 0x0 0x0>;
+- interrupts = <19 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <19 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc0>;
+ };
+
+@@ -215,10 +215,10 @@ pcie@9,0 {
+ #size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <0 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ external-facing;
+ };
+@@ -234,10 +234,10 @@ pcie@a,0 {
+ #size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <1 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ external-facing;
+ };
+@@ -253,10 +253,10 @@ pcie@b,0 {
+ #size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <2 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ external-facing;
+ };
+@@ -272,10 +272,10 @@ pcie@c,0 {
+ #size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <3 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ external-facing;
+ };
+@@ -291,10 +291,10 @@ pcie@d,0 {
+ #size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <4 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ external-facing;
+ };
+@@ -310,10 +310,10 @@ pcie@e,0 {
+ #size-cells = <2>;
+ device_type = "pci";
+ #interrupt-cells = <1>;
+- interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
++ interrupts = <5 IRQ_TYPE_LEVEL_HIGH>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+- interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>;
++ interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_HIGH>;
+ ranges;
+ external-facing;
+ };
+--
+2.43.0
+
--- /dev/null
+From 198e0b212349500a15aff38a72ea02f8559618e7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Jun 2024 16:40:11 +0100
+Subject: MIPS: dts: loongson: Fix ls2k1000-rtc interrupt
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit f70fd92df7529e7283e02a6c3a2510075f13ba30 ]
+
+The correct interrupt line for RTC is line 8 on liointc1.
+
+Fixes: e47084e116fc ("MIPS: Loongson64: DTS: Add RTC support to Loongson-2K1000")
+Cc: stable@vger.kernel.org
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index eec8243be6499..cc7747c5f21f3 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -99,8 +99,8 @@ liointc1: interrupt-controller@1fe11440 {
+ rtc0: rtc@1fe07800 {
+ compatible = "loongson,ls2k1000-rtc";
+ reg = <0 0x1fe07800 0 0x78>;
+- interrupt-parent = <&liointc0>;
+- interrupts = <60 IRQ_TYPE_LEVEL_HIGH>;
++ interrupt-parent = <&liointc1>;
++ interrupts = <8 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ uart0: serial@1fe00000 {
+--
+2.43.0
+
--- /dev/null
+From fc7566b874ddc60bc8cf6f16618acf6a7f783a46 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 7 May 2024 19:51:22 +0100
+Subject: MIPS: Loongson64: DTS: Fix PCIe port nodes for ls7a
+
+From: Jiaxun Yang <jiaxun.yang@flygoat.com>
+
+[ Upstream commit d89a415ff8d5e0aad4963f2d8ebb0f9e8110b7fa ]
+
+Add various required properties to silent warnings:
+
+arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi:116.16-297.5: Warning (interrupt_provider): /bus@10000000/pci@1a000000: '#interrupt-cells' found, but node is not an interrupt provider
+arch/mips/boot/dts/loongson/loongson64_2core_2k1000.dtb: Warning (interrupt_map): Failed prerequisite 'interrupt_provider'
+
+Signed-off-by: Jiaxun Yang <jiaxun.yang@flygoat.com>
+Signed-off-by: Thomas Bogendoerfer <tsbogend@alpha.franken.de>
+Stable-dep-of: dbb69b9d6234 ("MIPS: dts: loongson: Fix liointc IRQ polarity")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../boot/dts/loongson/loongson64-2k1000.dtsi | 37 +++++++++++++++----
+ 1 file changed, 30 insertions(+), 7 deletions(-)
+
+diff --git a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+index c0be84a6e81fd..c1d3092fdd870 100644
+--- a/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
++++ b/arch/mips/boot/dts/loongson/loongson64-2k1000.dtsi
+@@ -117,7 +117,6 @@ pci@1a000000 {
+ device_type = "pci";
+ #address-cells = <3>;
+ #size-cells = <2>;
+- #interrupt-cells = <2>;
+
+ reg = <0 0x1a000000 0 0x02000000>,
+ <0xfe 0x00000000 0 0x20000000>;
+@@ -205,93 +204,117 @@ sata@8,0 {
+ interrupt-parent = <&liointc0>;
+ };
+
+- pci_bridge@9,0 {
++ pcie@9,0 {
+ compatible = "pci0014,7a19.0",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x4800 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupts = <0 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 0 IRQ_TYPE_LEVEL_LOW>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@a,0 {
++ pcie@a,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5000 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupts = <1 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 1 IRQ_TYPE_LEVEL_LOW>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@b,0 {
++ pcie@b,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x5800 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupts = <2 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 2 IRQ_TYPE_LEVEL_LOW>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@c,0 {
++ pcie@c,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6000 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupts = <3 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 3 IRQ_TYPE_LEVEL_LOW>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@d,0 {
++ pcie@d,0 {
+ compatible = "pci0014,7a19.0",
+ "pci0014,7a19",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x6800 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupts = <4 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 4 IRQ_TYPE_LEVEL_LOW>;
++ ranges;
+ external-facing;
+ };
+
+- pci_bridge@e,0 {
++ pcie@e,0 {
+ compatible = "pci0014,7a09.0",
+ "pci0014,7a09",
+ "pciclass060400",
+ "pciclass0604";
+
+ reg = <0x7000 0x0 0x0 0x0 0x0>;
++ #address-cells = <3>;
++ #size-cells = <2>;
++ device_type = "pci";
+ #interrupt-cells = <1>;
+ interrupts = <5 IRQ_TYPE_LEVEL_LOW>;
+ interrupt-parent = <&liointc1>;
+ interrupt-map-mask = <0 0 0 0>;
+ interrupt-map = <0 0 0 0 &liointc1 5 IRQ_TYPE_LEVEL_LOW>;
++ ranges;
+ external-facing;
+ };
+
+--
+2.43.0
+
--- /dev/null
+From 051859b28bada979e25472672c9f5c64348af74c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 4 Jul 2024 10:10:50 +0100
+Subject: mm: fix khugepaged activation policy
+
+From: Ryan Roberts <ryan.roberts@arm.com>
+
+[ Upstream commit 00f58104202c472e487f0866fbd38832523fd4f9 ]
+
+Since the introduction of mTHP, the docuementation has stated that
+khugepaged would be enabled when any mTHP size is enabled, and disabled
+when all mTHP sizes are disabled. There are 2 problems with this; 1.
+this is not what was implemented by the code and 2. this is not the
+desirable behavior.
+
+Desirable behavior is for khugepaged to be enabled when any PMD-sized THP
+is enabled, anon or file. (Note that file THP is still controlled by the
+top-level control so we must always consider that, as well as the PMD-size
+mTHP control for anon). khugepaged only supports collapsing to PMD-sized
+THP so there is no value in enabling it when PMD-sized THP is disabled.
+So let's change the code and documentation to reflect this policy.
+
+Further, per-size enabled control modification events were not previously
+forwarded to khugepaged to give it an opportunity to start or stop.
+Consequently the following was resulting in khugepaged eroneously not
+being activated:
+
+ echo never > /sys/kernel/mm/transparent_hugepage/enabled
+ echo always > /sys/kernel/mm/transparent_hugepage/hugepages-2048kB/enabled
+
+[ryan.roberts@arm.com: v3]
+ Link: https://lkml.kernel.org/r/20240705102849.2479686-1-ryan.roberts@arm.com
+Link: https://lkml.kernel.org/r/20240705102849.2479686-1-ryan.roberts@arm.com
+Link: https://lkml.kernel.org/r/20240704091051.2411934-1-ryan.roberts@arm.com
+Signed-off-by: Ryan Roberts <ryan.roberts@arm.com>
+Fixes: 3485b88390b0 ("mm: thp: introduce multi-size THP sysfs interface")
+Closes: https://lore.kernel.org/linux-mm/7a0bbe69-1e3d-4263-b206-da007791a5c4@redhat.com/
+Acked-by: David Hildenbrand <david@redhat.com>
+Cc: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Barry Song <baohua@kernel.org>
+Cc: Jonathan Corbet <corbet@lwn.net>
+Cc: Lance Yang <ioworker0@gmail.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ Documentation/admin-guide/mm/transhuge.rst | 11 ++++----
+ include/linux/huge_mm.h | 12 --------
+ mm/huge_memory.c | 7 +++++
+ mm/khugepaged.c | 33 +++++++++++++++++-----
+ 4 files changed, 38 insertions(+), 25 deletions(-)
+
+diff --git a/Documentation/admin-guide/mm/transhuge.rst b/Documentation/admin-guide/mm/transhuge.rst
+index d414d3f5592a8..1f901de208bca 100644
+--- a/Documentation/admin-guide/mm/transhuge.rst
++++ b/Documentation/admin-guide/mm/transhuge.rst
+@@ -202,12 +202,11 @@ PMD-mappable transparent hugepage::
+
+ cat /sys/kernel/mm/transparent_hugepage/hpage_pmd_size
+
+-khugepaged will be automatically started when one or more hugepage
+-sizes are enabled (either by directly setting "always" or "madvise",
+-or by setting "inherit" while the top-level enabled is set to "always"
+-or "madvise"), and it'll be automatically shutdown when the last
+-hugepage size is disabled (either by directly setting "never", or by
+-setting "inherit" while the top-level enabled is set to "never").
++khugepaged will be automatically started when PMD-sized THP is enabled
++(either of the per-size anon control or the top-level control are set
++to "always" or "madvise"), and it'll be automatically shutdown when
++PMD-sized THP is disabled (when both the per-size anon control and the
++top-level control are "never")
+
+ Khugepaged controls
+ -------------------
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index 71945cf4c7a8d..d73c7d89d27b9 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -132,18 +132,6 @@ static inline bool hugepage_global_always(void)
+ (1<<TRANSPARENT_HUGEPAGE_FLAG);
+ }
+
+-static inline bool hugepage_flags_enabled(void)
+-{
+- /*
+- * We cover both the anon and the file-backed case here; we must return
+- * true if globally enabled, even when all anon sizes are set to never.
+- * So we don't need to look at huge_anon_orders_inherit.
+- */
+- return hugepage_global_enabled() ||
+- READ_ONCE(huge_anon_orders_always) ||
+- READ_ONCE(huge_anon_orders_madvise);
+-}
+-
+ static inline int highest_order(unsigned long orders)
+ {
+ return fls_long(orders) - 1;
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 374a0d54b08df..e234954cf5067 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -517,6 +517,13 @@ static ssize_t thpsize_enabled_store(struct kobject *kobj,
+ } else
+ ret = -EINVAL;
+
++ if (ret > 0) {
++ int err;
++
++ err = start_stop_khugepaged();
++ if (err)
++ ret = err;
++ }
+ return ret;
+ }
+
+diff --git a/mm/khugepaged.c b/mm/khugepaged.c
+index 774a97e6e2da3..92ecd59fffd41 100644
+--- a/mm/khugepaged.c
++++ b/mm/khugepaged.c
+@@ -416,6 +416,26 @@ static inline int hpage_collapse_test_exit_or_disable(struct mm_struct *mm)
+ test_bit(MMF_DISABLE_THP, &mm->flags);
+ }
+
++static bool hugepage_pmd_enabled(void)
++{
++ /*
++ * We cover both the anon and the file-backed case here; file-backed
++ * hugepages, when configured in, are determined by the global control.
++ * Anon pmd-sized hugepages are determined by the pmd-size control.
++ */
++ if (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS) &&
++ hugepage_global_enabled())
++ return true;
++ if (test_bit(PMD_ORDER, &huge_anon_orders_always))
++ return true;
++ if (test_bit(PMD_ORDER, &huge_anon_orders_madvise))
++ return true;
++ if (test_bit(PMD_ORDER, &huge_anon_orders_inherit) &&
++ hugepage_global_enabled())
++ return true;
++ return false;
++}
++
+ void __khugepaged_enter(struct mm_struct *mm)
+ {
+ struct khugepaged_mm_slot *mm_slot;
+@@ -452,7 +472,7 @@ void khugepaged_enter_vma(struct vm_area_struct *vma,
+ unsigned long vm_flags)
+ {
+ if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags) &&
+- hugepage_flags_enabled()) {
++ hugepage_pmd_enabled()) {
+ if (thp_vma_allowable_order(vma, vm_flags, TVA_ENFORCE_SYSFS,
+ PMD_ORDER))
+ __khugepaged_enter(vma->vm_mm);
+@@ -2465,8 +2485,7 @@ static unsigned int khugepaged_scan_mm_slot(unsigned int pages, int *result,
+
+ static int khugepaged_has_work(void)
+ {
+- return !list_empty(&khugepaged_scan.mm_head) &&
+- hugepage_flags_enabled();
++ return !list_empty(&khugepaged_scan.mm_head) && hugepage_pmd_enabled();
+ }
+
+ static int khugepaged_wait_event(void)
+@@ -2539,7 +2558,7 @@ static void khugepaged_wait_work(void)
+ return;
+ }
+
+- if (hugepage_flags_enabled())
++ if (hugepage_pmd_enabled())
+ wait_event_freezable(khugepaged_wait, khugepaged_wait_event());
+ }
+
+@@ -2570,7 +2589,7 @@ static void set_recommended_min_free_kbytes(void)
+ int nr_zones = 0;
+ unsigned long recommended_min;
+
+- if (!hugepage_flags_enabled()) {
++ if (!hugepage_pmd_enabled()) {
+ calculate_min_free_kbytes();
+ goto update_wmarks;
+ }
+@@ -2620,7 +2639,7 @@ int start_stop_khugepaged(void)
+ int err = 0;
+
+ mutex_lock(&khugepaged_mutex);
+- if (hugepage_flags_enabled()) {
++ if (hugepage_pmd_enabled()) {
+ if (!khugepaged_thread)
+ khugepaged_thread = kthread_run(khugepaged, NULL,
+ "khugepaged");
+@@ -2646,7 +2665,7 @@ int start_stop_khugepaged(void)
+ void khugepaged_min_free_kbytes_update(void)
+ {
+ mutex_lock(&khugepaged_mutex);
+- if (hugepage_flags_enabled() && khugepaged_thread)
++ if (hugepage_pmd_enabled() && khugepaged_thread)
+ set_recommended_min_free_kbytes();
+ mutex_unlock(&khugepaged_mutex);
+ }
+--
+2.43.0
+
--- /dev/null
+From 4b1b231f4ddf05b0781efedcb92b1ec57a095258 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 15 May 2024 10:47:54 +0800
+Subject: mm/huge_memory: mark racy access onhuge_anon_orders_always
+
+From: Ran Xiaokai <ran.xiaokai@zte.com.cn>
+
+[ Upstream commit 7f83bf14603ef41a44dc907594d749a283e22c37 ]
+
+huge_anon_orders_always is accessed lockless, it is better to use the
+READ_ONCE() wrapper. This is not fixing any visible bug, hopefully this
+can cease some KCSAN complains in the future. Also do that for
+huge_anon_orders_madvise.
+
+Link: https://lkml.kernel.org/r/20240515104754889HqrahFPePOIE1UlANHVAh@zte.com.cn
+Signed-off-by: Ran Xiaokai <ran.xiaokai@zte.com.cn>
+Acked-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Lu Zhongjun <lu.zhongjun@zte.com.cn>
+Reviewed-by: xu xin <xu.xin16@zte.com.cn>
+Cc: Yang Yang <yang.yang29@zte.com.cn>
+Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Zi Yan <ziy@nvidia.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 00f58104202c ("mm: fix khugepaged activation policy")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/huge_mm.h | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
+index c73ad77fa33d3..71945cf4c7a8d 100644
+--- a/include/linux/huge_mm.h
++++ b/include/linux/huge_mm.h
+@@ -140,8 +140,8 @@ static inline bool hugepage_flags_enabled(void)
+ * So we don't need to look at huge_anon_orders_inherit.
+ */
+ return hugepage_global_enabled() ||
+- huge_anon_orders_always ||
+- huge_anon_orders_madvise;
++ READ_ONCE(huge_anon_orders_always) ||
++ READ_ONCE(huge_anon_orders_madvise);
+ }
+
+ static inline int highest_order(unsigned long orders)
+--
+2.43.0
+
--- /dev/null
+From fe35c28699f58b6dca992f18c7556c10002ec329 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 23:29:34 +0200
+Subject: mm/migrate: make migrate_misplaced_folio() return 0 on success
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit 4b88c23ab8c9bc3857f7c8847e2c6bed95185530 ]
+
+Patch series "mm/migrate: move NUMA hinting fault folio isolation + checks
+under PTL".
+
+Let's just return 0 on success, which is less confusing.
+
+... especially because we got it wrong in the migrate.h stub where we
+have "return -EAGAIN; /* can't migrate now */" instead of "return 0;".
+Likely this wrong return value doesn't currently matter, but it certainly
+adds confusion.
+
+We'll add migrate_misplaced_folio_prepare() next, where we want to use the
+same "return 0 on success" approach, so let's just clean this up.
+
+Link: https://lkml.kernel.org/r/20240620212935.656243-1-david@redhat.com
+Link: https://lkml.kernel.org/r/20240620212935.656243-2-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Donet Tom <donettom@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 6e49019db5f7 ("mm/migrate: putback split folios when numa hint migration fails")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/huge_memory.c | 5 ++---
+ mm/memory.c | 2 +-
+ mm/migrate.c | 4 ++--
+ 3 files changed, 5 insertions(+), 6 deletions(-)
+
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index e234954cf5067..5ca9d45e6742c 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1666,7 +1666,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+ int nid = NUMA_NO_NODE;
+ int target_nid, last_cpupid = (-1 & LAST_CPUPID_MASK);
+- bool migrated = false, writable = false;
++ bool writable = false;
+ int flags = 0;
+
+ vmf->ptl = pmd_lock(vma->vm_mm, vmf->pmd);
+@@ -1710,8 +1710,7 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ spin_unlock(vmf->ptl);
+ writable = false;
+
+- migrated = migrate_misplaced_folio(folio, vma, target_nid);
+- if (migrated) {
++ if (!migrate_misplaced_folio(folio, vma, target_nid)) {
+ flags |= TNF_MIGRATED;
+ nid = target_nid;
+ } else {
+diff --git a/mm/memory.c b/mm/memory.c
+index f81760c93801f..b1e77b9d17e75 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -5214,7 +5214,7 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ ignore_writable = true;
+
+ /* Migrate to the requested node */
+- if (migrate_misplaced_folio(folio, vma, target_nid)) {
++ if (!migrate_misplaced_folio(folio, vma, target_nid)) {
+ nid = target_nid;
+ flags |= TNF_MIGRATED;
+ } else {
+diff --git a/mm/migrate.c b/mm/migrate.c
+index a8c6f466e33ac..83e0e1aa21c7e 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2656,11 +2656,11 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ nr_succeeded);
+ }
+ BUG_ON(!list_empty(&migratepages));
+- return isolated;
++ return isolated ? 0 : -EAGAIN;
+
+ out:
+ folio_put(folio);
+- return 0;
++ return -EAGAIN;
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+ #endif /* CONFIG_NUMA */
+--
+2.43.0
+
--- /dev/null
+From bb61c01425264b95d276722d7c24f416403bed0c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 20 Jun 2024 23:29:35 +0200
+Subject: mm/migrate: move NUMA hinting fault folio isolation + checks under
+ PTL
+
+From: David Hildenbrand <david@redhat.com>
+
+[ Upstream commit ee86814b0562f18255b55c5e6a01a022895994cf ]
+
+Currently we always take a folio reference even if migration will not even
+be tried or isolation failed, requiring us to grab+drop an additional
+reference.
+
+Further, we end up calling folio_likely_mapped_shared() while the folio
+might have already been unmapped, because after we dropped the PTL, that
+can easily happen. We want to stop touching mapcounts and friends from
+such context, and only call folio_likely_mapped_shared() while the folio
+is still mapped: mapcount information is pretty much stale and unreliable
+otherwise.
+
+So let's move checks into numamigrate_isolate_folio(), rename that
+function to migrate_misplaced_folio_prepare(), and call that function from
+callsites where we call migrate_misplaced_folio(), but still with the PTL
+held.
+
+We can now stop taking temporary folio references, and really only take a
+reference if folio isolation succeeded. Doing the
+folio_likely_mapped_shared() + folio isolation under PT lock is now
+similar to how we handle MADV_PAGEOUT.
+
+While at it, combine the folio_is_file_lru() checks.
+
+[david@redhat.com: fix list_del() corruption]
+ Link: https://lkml.kernel.org/r/8f85c31a-e603-4578-bf49-136dae0d4b69@redhat.com
+ Link: https://lkml.kernel.org/r/20240626191129.658CFC32782@smtp.kernel.org
+Link: https://lkml.kernel.org/r/20240620212935.656243-3-david@redhat.com
+Signed-off-by: David Hildenbrand <david@redhat.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Tested-by: Donet Tom <donettom@linux.ibm.com>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Stable-dep-of: 6e49019db5f7 ("mm/migrate: putback split folios when numa hint migration fails")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/migrate.h | 7 ++++
+ mm/huge_memory.c | 8 ++--
+ mm/memory.c | 9 +++--
+ mm/migrate.c | 83 +++++++++++++++++++----------------------
+ 4 files changed, 56 insertions(+), 51 deletions(-)
+
+diff --git a/include/linux/migrate.h b/include/linux/migrate.h
+index 2ce13e8a309bd..9438cc7c2aeb5 100644
+--- a/include/linux/migrate.h
++++ b/include/linux/migrate.h
+@@ -142,9 +142,16 @@ const struct movable_operations *page_movable_ops(struct page *page)
+ }
+
+ #ifdef CONFIG_NUMA_BALANCING
++int migrate_misplaced_folio_prepare(struct folio *folio,
++ struct vm_area_struct *vma, int node);
+ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ int node);
+ #else
++static inline int migrate_misplaced_folio_prepare(struct folio *folio,
++ struct vm_area_struct *vma, int node)
++{
++ return -EAGAIN; /* can't migrate now */
++}
+ static inline int migrate_misplaced_folio(struct folio *folio,
+ struct vm_area_struct *vma, int node)
+ {
+diff --git a/mm/huge_memory.c b/mm/huge_memory.c
+index 5ca9d45e6742c..5f32a196a612e 100644
+--- a/mm/huge_memory.c
++++ b/mm/huge_memory.c
+@@ -1702,11 +1702,13 @@ vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
+ if (node_is_toptier(nid))
+ last_cpupid = folio_last_cpupid(folio);
+ target_nid = numa_migrate_prep(folio, vmf, haddr, nid, &flags);
+- if (target_nid == NUMA_NO_NODE) {
+- folio_put(folio);
++ if (target_nid == NUMA_NO_NODE)
++ goto out_map;
++ if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
++ flags |= TNF_MIGRATE_FAIL;
+ goto out_map;
+ }
+-
++ /* The folio is isolated and isolation code holds a folio reference. */
+ spin_unlock(vmf->ptl);
+ writable = false;
+
+diff --git a/mm/memory.c b/mm/memory.c
+index b1e77b9d17e75..755ffe082e217 100644
+--- a/mm/memory.c
++++ b/mm/memory.c
+@@ -5067,8 +5067,6 @@ int numa_migrate_prep(struct folio *folio, struct vm_fault *vmf,
+ {
+ struct vm_area_struct *vma = vmf->vma;
+
+- folio_get(folio);
+-
+ /* Record the current PID acceesing VMA */
+ vma_set_access_pid_bit(vma);
+
+@@ -5205,10 +5203,13 @@ static vm_fault_t do_numa_page(struct vm_fault *vmf)
+ else
+ last_cpupid = folio_last_cpupid(folio);
+ target_nid = numa_migrate_prep(folio, vmf, vmf->address, nid, &flags);
+- if (target_nid == NUMA_NO_NODE) {
+- folio_put(folio);
++ if (target_nid == NUMA_NO_NODE)
++ goto out_map;
++ if (migrate_misplaced_folio_prepare(folio, vma, target_nid)) {
++ flags |= TNF_MIGRATE_FAIL;
+ goto out_map;
+ }
++ /* The folio is isolated and isolation code holds a folio reference. */
+ pte_unmap_unlock(vmf->pte, vmf->ptl);
+ writable = false;
+ ignore_writable = true;
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 83e0e1aa21c7e..6b5affe49cf91 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2557,16 +2557,44 @@ static struct folio *alloc_misplaced_dst_folio(struct folio *src,
+ return __folio_alloc_node(gfp, order, nid);
+ }
+
+-static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
++/*
++ * Prepare for calling migrate_misplaced_folio() by isolating the folio if
++ * permitted. Must be called with the PTL still held.
++ */
++int migrate_misplaced_folio_prepare(struct folio *folio,
++ struct vm_area_struct *vma, int node)
+ {
+ int nr_pages = folio_nr_pages(folio);
++ pg_data_t *pgdat = NODE_DATA(node);
++
++ if (folio_is_file_lru(folio)) {
++ /*
++ * Do not migrate file folios that are mapped in multiple
++ * processes with execute permissions as they are probably
++ * shared libraries.
++ *
++ * See folio_likely_mapped_shared() on possible imprecision
++ * when we cannot easily detect if a folio is shared.
++ */
++ if ((vma->vm_flags & VM_EXEC) &&
++ folio_likely_mapped_shared(folio))
++ return -EACCES;
++
++ /*
++ * Do not migrate dirty folios as not all filesystems can move
++ * dirty folios in MIGRATE_ASYNC mode which is a waste of
++ * cycles.
++ */
++ if (folio_test_dirty(folio))
++ return -EAGAIN;
++ }
+
+ /* Avoid migrating to a node that is nearly full */
+ if (!migrate_balanced_pgdat(pgdat, nr_pages)) {
+ int z;
+
+ if (!(sysctl_numa_balancing_mode & NUMA_BALANCING_MEMORY_TIERING))
+- return 0;
++ return -EAGAIN;
+ for (z = pgdat->nr_zones - 1; z >= 0; z--) {
+ if (managed_zone(pgdat->node_zones + z))
+ break;
+@@ -2577,65 +2605,37 @@ static int numamigrate_isolate_folio(pg_data_t *pgdat, struct folio *folio)
+ * further.
+ */
+ if (z < 0)
+- return 0;
++ return -EAGAIN;
+
+ wakeup_kswapd(pgdat->node_zones + z, 0,
+ folio_order(folio), ZONE_MOVABLE);
+- return 0;
++ return -EAGAIN;
+ }
+
+ if (!folio_isolate_lru(folio))
+- return 0;
++ return -EAGAIN;
+
+ node_stat_mod_folio(folio, NR_ISOLATED_ANON + folio_is_file_lru(folio),
+ nr_pages);
+-
+- /*
+- * Isolating the folio has taken another reference, so the
+- * caller's reference can be safely dropped without the folio
+- * disappearing underneath us during migration.
+- */
+- folio_put(folio);
+- return 1;
++ return 0;
+ }
+
+ /*
+ * Attempt to migrate a misplaced folio to the specified destination
+- * node. Caller is expected to have an elevated reference count on
+- * the folio that will be dropped by this function before returning.
++ * node. Caller is expected to have isolated the folio by calling
++ * migrate_misplaced_folio_prepare(), which will result in an
++ * elevated reference count on the folio. This function will un-isolate the
++ * folio, dereferencing the folio before returning.
+ */
+ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ int node)
+ {
+ pg_data_t *pgdat = NODE_DATA(node);
+- int isolated;
+ int nr_remaining;
+ unsigned int nr_succeeded;
+ LIST_HEAD(migratepages);
+ int nr_pages = folio_nr_pages(folio);
+
+- /*
+- * Don't migrate file folios that are mapped in multiple processes
+- * with execute permissions as they are probably shared libraries.
+- *
+- * See folio_likely_mapped_shared() on possible imprecision when we
+- * cannot easily detect if a folio is shared.
+- */
+- if (folio_likely_mapped_shared(folio) && folio_is_file_lru(folio) &&
+- (vma->vm_flags & VM_EXEC))
+- goto out;
+-
+- /*
+- * Also do not migrate dirty folios as not all filesystems can move
+- * dirty folios in MIGRATE_ASYNC mode which is a waste of cycles.
+- */
+- if (folio_is_file_lru(folio) && folio_test_dirty(folio))
+- goto out;
+-
+- isolated = numamigrate_isolate_folio(pgdat, folio);
+- if (!isolated)
+- goto out;
+-
+ list_add(&folio->lru, &migratepages);
+ nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
+ NULL, node, MIGRATE_ASYNC,
+@@ -2647,7 +2647,6 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ folio_is_file_lru(folio), -nr_pages);
+ folio_putback_lru(folio);
+ }
+- isolated = 0;
+ }
+ if (nr_succeeded) {
+ count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
+@@ -2656,11 +2655,7 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ nr_succeeded);
+ }
+ BUG_ON(!list_empty(&migratepages));
+- return isolated ? 0 : -EAGAIN;
+-
+-out:
+- folio_put(folio);
+- return -EAGAIN;
++ return nr_remaining ? -EAGAIN : 0;
+ }
+ #endif /* CONFIG_NUMA_BALANCING */
+ #endif /* CONFIG_NUMA */
+--
+2.43.0
+
--- /dev/null
+From 491a3579cdad8a4feb8b5f5fcbfc1f1c0a6e25cd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Jul 2024 17:55:37 -0400
+Subject: mm/migrate: putback split folios when numa hint migration fails
+
+From: Peter Xu <peterx@redhat.com>
+
+[ Upstream commit 6e49019db5f7a09a9c0e8ac4d108e656c3f8e583 ]
+
+This issue is not from any report yet, but by code observation only.
+
+This is yet another fix besides Hugh's patch [1] but on relevant code
+path, where eager split of folio can happen if the folio is already on
+deferred list during a folio migration.
+
+Here the issue is NUMA path (migrate_misplaced_folio()) may start to
+encounter such folio split now even with MR_NUMA_MISPLACED hint applied.
+Then when migrate_pages() didn't migrate all the folios, it's possible the
+split small folios be put onto the list instead of the original folio.
+Then putting back only the head page won't be enough.
+
+Fix it by putting back all the folios on the list.
+
+[1] https://lore.kernel.org/all/46c948b4-4dd8-6e03-4c7b-ce4e81cfa536@google.com/
+
+[akpm@linux-foundation.org: remove now unused local `nr_pages']
+Link: https://lkml.kernel.org/r/20240708215537.2630610-1-peterx@redhat.com
+Fixes: 7262f208ca68 ("mm/migrate: split source folio if it is on deferred split list")
+Signed-off-by: Peter Xu <peterx@redhat.com>
+Reviewed-by: Zi Yan <ziy@nvidia.com>
+Reviewed-by: Baolin Wang <baolin.wang@linux.alibaba.com>
+Cc: Yang Shi <shy828301@gmail.com>
+Cc: Hugh Dickins <hughd@google.com>
+Cc: Huang Ying <ying.huang@intel.com>
+Cc: David Hildenbrand <david@redhat.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ mm/migrate.c | 11 ++---------
+ 1 file changed, 2 insertions(+), 9 deletions(-)
+
+diff --git a/mm/migrate.c b/mm/migrate.c
+index 6b5affe49cf91..9dabeb90f772d 100644
+--- a/mm/migrate.c
++++ b/mm/migrate.c
+@@ -2634,20 +2634,13 @@ int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
+ int nr_remaining;
+ unsigned int nr_succeeded;
+ LIST_HEAD(migratepages);
+- int nr_pages = folio_nr_pages(folio);
+
+ list_add(&folio->lru, &migratepages);
+ nr_remaining = migrate_pages(&migratepages, alloc_misplaced_dst_folio,
+ NULL, node, MIGRATE_ASYNC,
+ MR_NUMA_MISPLACED, &nr_succeeded);
+- if (nr_remaining) {
+- if (!list_empty(&migratepages)) {
+- list_del(&folio->lru);
+- node_stat_mod_folio(folio, NR_ISOLATED_ANON +
+- folio_is_file_lru(folio), -nr_pages);
+- folio_putback_lru(folio);
+- }
+- }
++ if (nr_remaining && !list_empty(&migratepages))
++ putback_movable_pages(&migratepages);
+ if (nr_succeeded) {
+ count_vm_numa_events(NUMA_PAGE_MIGRATE, nr_succeeded);
+ if (!node_is_toptier(folio_nid(folio)) && node_is_toptier(node))
+--
+2.43.0
+
--- /dev/null
+From e4de46c6cdc3c624df1651e6856f4ee8ff65b74e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 29 May 2024 16:03:55 +0800
+Subject: perf: imx_perf: fix counter start and config sequence
+
+From: Xu Yang <xu.yang_2@nxp.com>
+
+[ Upstream commit ac9aa295f7a89d38656739628796f086f0b160e2 ]
+
+In current driver, the counter will start firstly and then be configured.
+This sequence is not correct for AXI filter events since the correct
+AXI_MASK and AXI_ID are not set yet. Then the results may be inaccurate.
+
+Reviewed-by: Frank Li <Frank.Li@nxp.com>
+Fixes: 55691f99d417 ("drivers/perf: imx_ddr: Add support for NXP i.MX9 SoC DDRC PMU driver")
+cc: stable@vger.kernel.org
+Signed-off-by: Xu Yang <xu.yang_2@nxp.com>
+Link: https://lore.kernel.org/r/20240529080358.703784-5-xu.yang_2@nxp.com
+Signed-off-by: Will Deacon <will@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/perf/fsl_imx9_ddr_perf.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/perf/fsl_imx9_ddr_perf.c b/drivers/perf/fsl_imx9_ddr_perf.c
+index 72c2d3074cded..98af97750a6e3 100644
+--- a/drivers/perf/fsl_imx9_ddr_perf.c
++++ b/drivers/perf/fsl_imx9_ddr_perf.c
+@@ -476,12 +476,12 @@ static int ddr_perf_event_add(struct perf_event *event, int flags)
+ hwc->idx = counter;
+ hwc->state |= PERF_HES_STOPPED;
+
+- if (flags & PERF_EF_START)
+- ddr_perf_event_start(event, flags);
+-
+ /* read trans, write trans, read beat */
+ ddr_perf_monitor_config(pmu, cfg, cfg1, cfg2);
+
++ if (flags & PERF_EF_START)
++ ddr_perf_event_start(event, flags);
++
+ return 0;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From b551134d601c1b80303cd8d16931225ae2ed2b7c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 8 Jul 2024 12:33:35 -0700
+Subject: perf/x86/intel: Add a distinct name for Granite Rapids
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Kan Liang <kan.liang@linux.intel.com>
+
+[ Upstream commit fa0c1c9d283b37fdb7fc1dcccbb88fc8f48a4aa4 ]
+
+Currently, the Sapphire Rapids and Granite Rapids share the same PMU
+name, sapphire_rapids. Because from the kernel’s perspective, GNR is
+similar to SPR. The only key difference is that they support different
+extra MSRs. The code path and the PMU name are shared.
+
+However, from end users' perspective, they are quite different. Besides
+the extra MSRs, GNR has a newer PEBS format, supports Retire Latency,
+supports new CPUID enumeration architecture, doesn't required the
+load-latency AUX event, has additional TMA Level 1 Architectural Events,
+etc. The differences can be enumerated by CPUID or the PERF_CAPABILITIES
+MSR. They weren't reflected in the model-specific kernel setup.
+But it is worth to have a distinct PMU name for GNR.
+
+Fixes: a6742cb90b56 ("perf/x86/intel: Fix the FRONTEND encoding on GNR and MTL")
+Suggested-by: Ahmad Yasin <ahmad.yasin@intel.com>
+Signed-off-by: Kan Liang <kan.liang@linux.intel.com>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lkml.kernel.org/r/20240708193336.1192217-3-kan.liang@linux.intel.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/core.c | 14 +++++++++-----
+ 1 file changed, 9 insertions(+), 5 deletions(-)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 7f7f1c3bb1881..101a21fe9c213 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -6756,12 +6756,18 @@ __init int intel_pmu_init(void)
+ case INTEL_EMERALDRAPIDS_X:
+ x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
+ x86_pmu.extra_regs = intel_glc_extra_regs;
+- fallthrough;
++ pr_cont("Sapphire Rapids events, ");
++ name = "sapphire_rapids";
++ goto glc_common;
++
+ case INTEL_GRANITERAPIDS_X:
+ case INTEL_GRANITERAPIDS_D:
++ x86_pmu.extra_regs = intel_rwc_extra_regs;
++ pr_cont("Granite Rapids events, ");
++ name = "granite_rapids";
++
++ glc_common:
+ intel_pmu_init_glc(NULL);
+- if (!x86_pmu.extra_regs)
+- x86_pmu.extra_regs = intel_rwc_extra_regs;
+ x86_pmu.pebs_ept = 1;
+ x86_pmu.hw_config = hsw_hw_config;
+ x86_pmu.get_event_constraints = glc_get_event_constraints;
+@@ -6772,8 +6778,6 @@ __init int intel_pmu_init(void)
+ td_attr = glc_td_events_attrs;
+ tsx_attr = glc_tsx_events_attrs;
+ intel_pmu_pebs_data_source_skl(true);
+- pr_cont("Sapphire Rapids events, ");
+- name = "sapphire_rapids";
+ break;
+
+ case INTEL_ALDERLAKE:
+--
+2.43.0
+
--- /dev/null
+From bb96a04e6a87c41cf8d915ed508995f4e0a3b6ab Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 20 May 2024 15:46:02 -0700
+Subject: perf/x86/intel: Switch to new Intel CPU model defines
+
+From: Tony Luck <tony.luck@intel.com>
+
+[ Upstream commit d142df13f3574237688c7a20e0019cccc7ae39eb ]
+
+New CPU #defines encode vendor and family as well as model.
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Link: https://lore.kernel.org/all/20240520224620.9480-32-tony.luck%40intel.com
+Stable-dep-of: fa0c1c9d283b ("perf/x86/intel: Add a distinct name for Granite Rapids")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/events/intel/core.c | 148 +++++++++++++++++------------------
+ 1 file changed, 74 insertions(+), 74 deletions(-)
+
+diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
+index 38c1b1f1deaad..7f7f1c3bb1881 100644
+--- a/arch/x86/events/intel/core.c
++++ b/arch/x86/events/intel/core.c
+@@ -4698,8 +4698,8 @@ static void intel_pmu_check_extra_regs(struct extra_reg *extra_regs);
+ static inline bool intel_pmu_broken_perf_cap(void)
+ {
+ /* The Perf Metric (Bit 15) is always cleared */
+- if ((boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE) ||
+- (boot_cpu_data.x86_model == INTEL_FAM6_METEORLAKE_L))
++ if (boot_cpu_data.x86_vfm == INTEL_METEORLAKE ||
++ boot_cpu_data.x86_vfm == INTEL_METEORLAKE_L)
+ return true;
+
+ return false;
+@@ -6238,19 +6238,19 @@ __init int intel_pmu_init(void)
+ /*
+ * Install the hw-cache-events table:
+ */
+- switch (boot_cpu_data.x86_model) {
+- case INTEL_FAM6_CORE_YONAH:
++ switch (boot_cpu_data.x86_vfm) {
++ case INTEL_CORE_YONAH:
+ pr_cont("Core events, ");
+ name = "core";
+ break;
+
+- case INTEL_FAM6_CORE2_MEROM:
++ case INTEL_CORE2_MEROM:
+ x86_add_quirk(intel_clovertown_quirk);
+ fallthrough;
+
+- case INTEL_FAM6_CORE2_MEROM_L:
+- case INTEL_FAM6_CORE2_PENRYN:
+- case INTEL_FAM6_CORE2_DUNNINGTON:
++ case INTEL_CORE2_MEROM_L:
++ case INTEL_CORE2_PENRYN:
++ case INTEL_CORE2_DUNNINGTON:
+ memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+@@ -6262,9 +6262,9 @@ __init int intel_pmu_init(void)
+ name = "core2";
+ break;
+
+- case INTEL_FAM6_NEHALEM:
+- case INTEL_FAM6_NEHALEM_EP:
+- case INTEL_FAM6_NEHALEM_EX:
++ case INTEL_NEHALEM:
++ case INTEL_NEHALEM_EP:
++ case INTEL_NEHALEM_EX:
+ memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
+@@ -6296,11 +6296,11 @@ __init int intel_pmu_init(void)
+ name = "nehalem";
+ break;
+
+- case INTEL_FAM6_ATOM_BONNELL:
+- case INTEL_FAM6_ATOM_BONNELL_MID:
+- case INTEL_FAM6_ATOM_SALTWELL:
+- case INTEL_FAM6_ATOM_SALTWELL_MID:
+- case INTEL_FAM6_ATOM_SALTWELL_TABLET:
++ case INTEL_ATOM_BONNELL:
++ case INTEL_ATOM_BONNELL_MID:
++ case INTEL_ATOM_SALTWELL:
++ case INTEL_ATOM_SALTWELL_MID:
++ case INTEL_ATOM_SALTWELL_TABLET:
+ memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+
+@@ -6313,11 +6313,11 @@ __init int intel_pmu_init(void)
+ name = "bonnell";
+ break;
+
+- case INTEL_FAM6_ATOM_SILVERMONT:
+- case INTEL_FAM6_ATOM_SILVERMONT_D:
+- case INTEL_FAM6_ATOM_SILVERMONT_MID:
+- case INTEL_FAM6_ATOM_AIRMONT:
+- case INTEL_FAM6_ATOM_AIRMONT_MID:
++ case INTEL_ATOM_SILVERMONT:
++ case INTEL_ATOM_SILVERMONT_D:
++ case INTEL_ATOM_SILVERMONT_MID:
++ case INTEL_ATOM_AIRMONT:
++ case INTEL_ATOM_AIRMONT_MID:
+ memcpy(hw_cache_event_ids, slm_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, slm_hw_cache_extra_regs,
+@@ -6335,8 +6335,8 @@ __init int intel_pmu_init(void)
+ name = "silvermont";
+ break;
+
+- case INTEL_FAM6_ATOM_GOLDMONT:
+- case INTEL_FAM6_ATOM_GOLDMONT_D:
++ case INTEL_ATOM_GOLDMONT:
++ case INTEL_ATOM_GOLDMONT_D:
+ memcpy(hw_cache_event_ids, glm_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, glm_hw_cache_extra_regs,
+@@ -6362,7 +6362,7 @@ __init int intel_pmu_init(void)
+ name = "goldmont";
+ break;
+
+- case INTEL_FAM6_ATOM_GOLDMONT_PLUS:
++ case INTEL_ATOM_GOLDMONT_PLUS:
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, glp_hw_cache_extra_regs,
+@@ -6391,9 +6391,9 @@ __init int intel_pmu_init(void)
+ name = "goldmont_plus";
+ break;
+
+- case INTEL_FAM6_ATOM_TREMONT_D:
+- case INTEL_FAM6_ATOM_TREMONT:
+- case INTEL_FAM6_ATOM_TREMONT_L:
++ case INTEL_ATOM_TREMONT_D:
++ case INTEL_ATOM_TREMONT:
++ case INTEL_ATOM_TREMONT_L:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, glp_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+@@ -6420,7 +6420,7 @@ __init int intel_pmu_init(void)
+ name = "Tremont";
+ break;
+
+- case INTEL_FAM6_ATOM_GRACEMONT:
++ case INTEL_ATOM_GRACEMONT:
+ intel_pmu_init_grt(NULL);
+ intel_pmu_pebs_data_source_grt();
+ x86_pmu.pebs_latency_data = adl_latency_data_small;
+@@ -6432,8 +6432,8 @@ __init int intel_pmu_init(void)
+ name = "gracemont";
+ break;
+
+- case INTEL_FAM6_ATOM_CRESTMONT:
+- case INTEL_FAM6_ATOM_CRESTMONT_X:
++ case INTEL_ATOM_CRESTMONT:
++ case INTEL_ATOM_CRESTMONT_X:
+ intel_pmu_init_grt(NULL);
+ x86_pmu.extra_regs = intel_cmt_extra_regs;
+ intel_pmu_pebs_data_source_cmt();
+@@ -6446,9 +6446,9 @@ __init int intel_pmu_init(void)
+ name = "crestmont";
+ break;
+
+- case INTEL_FAM6_WESTMERE:
+- case INTEL_FAM6_WESTMERE_EP:
+- case INTEL_FAM6_WESTMERE_EX:
++ case INTEL_WESTMERE:
++ case INTEL_WESTMERE_EP:
++ case INTEL_WESTMERE_EX:
+ memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, nehalem_hw_cache_extra_regs,
+@@ -6477,8 +6477,8 @@ __init int intel_pmu_init(void)
+ name = "westmere";
+ break;
+
+- case INTEL_FAM6_SANDYBRIDGE:
+- case INTEL_FAM6_SANDYBRIDGE_X:
++ case INTEL_SANDYBRIDGE:
++ case INTEL_SANDYBRIDGE_X:
+ x86_add_quirk(intel_sandybridge_quirk);
+ x86_add_quirk(intel_ht_bug);
+ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+@@ -6491,7 +6491,7 @@ __init int intel_pmu_init(void)
+ x86_pmu.event_constraints = intel_snb_event_constraints;
+ x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
+- if (boot_cpu_data.x86_model == INTEL_FAM6_SANDYBRIDGE_X)
++ if (boot_cpu_data.x86_vfm == INTEL_SANDYBRIDGE_X)
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ else
+ x86_pmu.extra_regs = intel_snb_extra_regs;
+@@ -6517,8 +6517,8 @@ __init int intel_pmu_init(void)
+ name = "sandybridge";
+ break;
+
+- case INTEL_FAM6_IVYBRIDGE:
+- case INTEL_FAM6_IVYBRIDGE_X:
++ case INTEL_IVYBRIDGE:
++ case INTEL_IVYBRIDGE_X:
+ x86_add_quirk(intel_ht_bug);
+ memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
+ sizeof(hw_cache_event_ids));
+@@ -6534,7 +6534,7 @@ __init int intel_pmu_init(void)
+ x86_pmu.pebs_constraints = intel_ivb_pebs_event_constraints;
+ x86_pmu.pebs_aliases = intel_pebs_aliases_ivb;
+ x86_pmu.pebs_prec_dist = true;
+- if (boot_cpu_data.x86_model == INTEL_FAM6_IVYBRIDGE_X)
++ if (boot_cpu_data.x86_vfm == INTEL_IVYBRIDGE_X)
+ x86_pmu.extra_regs = intel_snbep_extra_regs;
+ else
+ x86_pmu.extra_regs = intel_snb_extra_regs;
+@@ -6556,10 +6556,10 @@ __init int intel_pmu_init(void)
+ break;
+
+
+- case INTEL_FAM6_HASWELL:
+- case INTEL_FAM6_HASWELL_X:
+- case INTEL_FAM6_HASWELL_L:
+- case INTEL_FAM6_HASWELL_G:
++ case INTEL_HASWELL:
++ case INTEL_HASWELL_X:
++ case INTEL_HASWELL_L:
++ case INTEL_HASWELL_G:
+ x86_add_quirk(intel_ht_bug);
+ x86_add_quirk(intel_pebs_isolation_quirk);
+ x86_pmu.late_ack = true;
+@@ -6589,10 +6589,10 @@ __init int intel_pmu_init(void)
+ name = "haswell";
+ break;
+
+- case INTEL_FAM6_BROADWELL:
+- case INTEL_FAM6_BROADWELL_D:
+- case INTEL_FAM6_BROADWELL_G:
+- case INTEL_FAM6_BROADWELL_X:
++ case INTEL_BROADWELL:
++ case INTEL_BROADWELL_D:
++ case INTEL_BROADWELL_G:
++ case INTEL_BROADWELL_X:
+ x86_add_quirk(intel_pebs_isolation_quirk);
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+@@ -6631,8 +6631,8 @@ __init int intel_pmu_init(void)
+ name = "broadwell";
+ break;
+
+- case INTEL_FAM6_XEON_PHI_KNL:
+- case INTEL_FAM6_XEON_PHI_KNM:
++ case INTEL_XEON_PHI_KNL:
++ case INTEL_XEON_PHI_KNM:
+ memcpy(hw_cache_event_ids,
+ slm_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs,
+@@ -6651,15 +6651,15 @@ __init int intel_pmu_init(void)
+ name = "knights-landing";
+ break;
+
+- case INTEL_FAM6_SKYLAKE_X:
++ case INTEL_SKYLAKE_X:
+ pmem = true;
+ fallthrough;
+- case INTEL_FAM6_SKYLAKE_L:
+- case INTEL_FAM6_SKYLAKE:
+- case INTEL_FAM6_KABYLAKE_L:
+- case INTEL_FAM6_KABYLAKE:
+- case INTEL_FAM6_COMETLAKE_L:
+- case INTEL_FAM6_COMETLAKE:
++ case INTEL_SKYLAKE_L:
++ case INTEL_SKYLAKE:
++ case INTEL_KABYLAKE_L:
++ case INTEL_KABYLAKE:
++ case INTEL_COMETLAKE_L:
++ case INTEL_COMETLAKE:
+ x86_add_quirk(intel_pebs_isolation_quirk);
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+@@ -6708,16 +6708,16 @@ __init int intel_pmu_init(void)
+ name = "skylake";
+ break;
+
+- case INTEL_FAM6_ICELAKE_X:
+- case INTEL_FAM6_ICELAKE_D:
++ case INTEL_ICELAKE_X:
++ case INTEL_ICELAKE_D:
+ x86_pmu.pebs_ept = 1;
+ pmem = true;
+ fallthrough;
+- case INTEL_FAM6_ICELAKE_L:
+- case INTEL_FAM6_ICELAKE:
+- case INTEL_FAM6_TIGERLAKE_L:
+- case INTEL_FAM6_TIGERLAKE:
+- case INTEL_FAM6_ROCKETLAKE:
++ case INTEL_ICELAKE_L:
++ case INTEL_ICELAKE:
++ case INTEL_TIGERLAKE_L:
++ case INTEL_TIGERLAKE:
++ case INTEL_ROCKETLAKE:
+ x86_pmu.late_ack = true;
+ memcpy(hw_cache_event_ids, skl_hw_cache_event_ids, sizeof(hw_cache_event_ids));
+ memcpy(hw_cache_extra_regs, skl_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
+@@ -6752,13 +6752,13 @@ __init int intel_pmu_init(void)
+ name = "icelake";
+ break;
+
+- case INTEL_FAM6_SAPPHIRERAPIDS_X:
+- case INTEL_FAM6_EMERALDRAPIDS_X:
++ case INTEL_SAPPHIRERAPIDS_X:
++ case INTEL_EMERALDRAPIDS_X:
+ x86_pmu.flags |= PMU_FL_MEM_LOADS_AUX;
+ x86_pmu.extra_regs = intel_glc_extra_regs;
+ fallthrough;
+- case INTEL_FAM6_GRANITERAPIDS_X:
+- case INTEL_FAM6_GRANITERAPIDS_D:
++ case INTEL_GRANITERAPIDS_X:
++ case INTEL_GRANITERAPIDS_D:
+ intel_pmu_init_glc(NULL);
+ if (!x86_pmu.extra_regs)
+ x86_pmu.extra_regs = intel_rwc_extra_regs;
+@@ -6776,11 +6776,11 @@ __init int intel_pmu_init(void)
+ name = "sapphire_rapids";
+ break;
+
+- case INTEL_FAM6_ALDERLAKE:
+- case INTEL_FAM6_ALDERLAKE_L:
+- case INTEL_FAM6_RAPTORLAKE:
+- case INTEL_FAM6_RAPTORLAKE_P:
+- case INTEL_FAM6_RAPTORLAKE_S:
++ case INTEL_ALDERLAKE:
++ case INTEL_ALDERLAKE_L:
++ case INTEL_RAPTORLAKE:
++ case INTEL_RAPTORLAKE_P:
++ case INTEL_RAPTORLAKE_S:
+ /*
+ * Alder Lake has 2 types of CPU, core and atom.
+ *
+@@ -6838,8 +6838,8 @@ __init int intel_pmu_init(void)
+ name = "alderlake_hybrid";
+ break;
+
+- case INTEL_FAM6_METEORLAKE:
+- case INTEL_FAM6_METEORLAKE_L:
++ case INTEL_METEORLAKE:
++ case INTEL_METEORLAKE_L:
+ intel_pmu_init_hybrid(hybrid_big_small);
+
+ x86_pmu.pebs_latency_data = mtl_latency_data_small;
+--
+2.43.0
+
--- /dev/null
+mm-huge_memory-mark-racy-access-onhuge_anon_orders_a.patch
+mm-fix-khugepaged-activation-policy.patch
+mm-migrate-make-migrate_misplaced_folio-return-0-on-.patch
+mm-migrate-move-numa-hinting-fault-folio-isolation-c.patch
+mm-migrate-putback-split-folios-when-numa-hint-migra.patch
+ext4-factor-out-a-common-helper-to-query-extent-map.patch
+ext4-check-the-extent-status-again-before-inserting-.patch
+f2fs-fix-to-avoid-use-ssr-allocate-when-do-defragmen.patch
+f2fs-assign-curseg_all_data_atgc-if-blkaddr-is-valid.patch
+perf-imx_perf-fix-counter-start-and-config-sequence.patch
+perf-x86-intel-switch-to-new-intel-cpu-model-defines.patch
+perf-x86-intel-add-a-distinct-name-for-granite-rapid.patch
+mips-loongson64-dts-fix-pcie-port-nodes-for-ls7a.patch
+mips-dts-loongson-fix-liointc-irq-polarity.patch
+mips-dts-loongson-fix-ls2k1000-rtc-interrupt.patch