--- /dev/null
+From f8f210dc84709804c9f952297f2bfafa6ea6b4bd Mon Sep 17 00:00:00 2001
+From: Filipe Manana <fdmanana@suse.com>
+Date: Tue, 21 Mar 2023 11:13:59 +0000
+Subject: btrfs: calculate the right space for delayed refs when updating global reserve
+
+From: Filipe Manana <fdmanana@suse.com>
+
+commit f8f210dc84709804c9f952297f2bfafa6ea6b4bd upstream.
+
+When updating the global block reserve, we account for the 6 items needed
+by an unlink operation and the 6 delayed references for each one of those
+items. However the calculation for the delayed references is not correct
+in case we have the free space tree enabled, as in that case we need to
+touch the free space tree as well and therefore need twice the number of
+bytes. So use the btrfs_calc_delayed_ref_bytes() helper to calculate the
+number of bytes need for the delayed references at
+btrfs_update_global_block_rsv().
+
+Reviewed-by: Josef Bacik <josef@toxicpanda.com>
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+[Diogo: this patch has been cherry-picked from the original commit;
+conflicts included lack of a define (picked from commit 5630e2bcfe223)
+and lack of btrfs_calc_delayed_ref_bytes (picked from commit 0e55a54502b97)
+- changed const struct -> struct for compatibility.]
+Signed-off-by: Diogo Jahchan Koike <djahchankoike@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/btrfs/block-rsv.c | 14 ++++++++------
+ fs/btrfs/block-rsv.h | 12 ++++++++++++
+ fs/btrfs/delayed-ref.h | 21 +++++++++++++++++++++
+ 3 files changed, 41 insertions(+), 6 deletions(-)
+
+--- a/fs/btrfs/block-rsv.c
++++ b/fs/btrfs/block-rsv.c
+@@ -384,17 +384,19 @@ void btrfs_update_global_block_rsv(struc
+
+ /*
+ * But we also want to reserve enough space so we can do the fallback
+- * global reserve for an unlink, which is an additional 5 items (see the
+- * comment in __unlink_start_trans for what we're modifying.)
++ * global reserve for an unlink, which is an additional
++ * BTRFS_UNLINK_METADATA_UNITS items.
+ *
+ * But we also need space for the delayed ref updates from the unlink,
+- * so its 10, 5 for the actual operation, and 5 for the delayed ref
+- * updates.
++ * so add BTRFS_UNLINK_METADATA_UNITS units for delayed refs, one for
++ * each unlink metadata item.
+ */
+- min_items += 10;
++ min_items += BTRFS_UNLINK_METADATA_UNITS;
+
+ num_bytes = max_t(u64, num_bytes,
+- btrfs_calc_insert_metadata_size(fs_info, min_items));
++ btrfs_calc_insert_metadata_size(fs_info, min_items) +
++ btrfs_calc_delayed_ref_bytes(fs_info,
++ BTRFS_UNLINK_METADATA_UNITS));
+
+ spin_lock(&sinfo->lock);
+ spin_lock(&block_rsv->lock);
+--- a/fs/btrfs/block-rsv.h
++++ b/fs/btrfs/block-rsv.h
+@@ -50,6 +50,18 @@ struct btrfs_block_rsv {
+ u64 qgroup_rsv_reserved;
+ };
+
++/*
++ * Number of metadata items necessary for an unlink operation:
++ *
++ * 1 for the possible orphan item
++ * 1 for the dir item
++ * 1 for the dir index
++ * 1 for the inode ref
++ * 1 for the inode
++ * 1 for the parent inode
++ */
++#define BTRFS_UNLINK_METADATA_UNITS 6
++
+ void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, enum btrfs_rsv_type type);
+ void btrfs_init_root_block_rsv(struct btrfs_root *root);
+ struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_fs_info *fs_info,
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -253,6 +253,27 @@ extern struct kmem_cache *btrfs_delayed_
+ int __init btrfs_delayed_ref_init(void);
+ void __cold btrfs_delayed_ref_exit(void);
+
++static inline u64 btrfs_calc_delayed_ref_bytes(struct btrfs_fs_info *fs_info,
++ int num_delayed_refs)
++{
++ u64 num_bytes;
++
++ num_bytes = btrfs_calc_insert_metadata_size(fs_info, num_delayed_refs);
++
++ /*
++ * We have to check the mount option here because we could be enabling
++ * the free space tree for the first time and don't have the compat_ro
++ * option set yet.
++ *
++ * We need extra reservations if we have the free space tree because
++ * we'll have to modify that tree as well.
++ */
++ if (btrfs_test_opt(fs_info, FREE_SPACE_TREE))
++ num_bytes *= 2;
++
++ return num_bytes;
++}
++
+ static inline void btrfs_init_generic_ref(struct btrfs_ref *generic_ref,
+ int action, u64 bytenr, u64 len, u64 parent)
+ {
--- /dev/null
+From d05b5e0baf424c8c4b4709ac11f66ab726c8deaf Mon Sep 17 00:00:00 2001
+From: Sumeet Pawnikar <sumeet.r.pawnikar@intel.com>
+Date: Thu, 8 Jun 2023 08:00:06 +0530
+Subject: powercap: RAPL: fix invalid initialization for pl4_supported field
+
+From: Sumeet Pawnikar <sumeet.r.pawnikar@intel.com>
+
+commit d05b5e0baf424c8c4b4709ac11f66ab726c8deaf upstream.
+
+The current initialization of the struct x86_cpu_id via
+pl4_support_ids[] is partial and wrong. It is initializing
+"stepping" field with "X86_FEATURE_ANY" instead of "feature" field.
+
+Use X86_MATCH_INTEL_FAM6_MODEL macro instead of initializing
+each field of the struct x86_cpu_id for pl4_supported list of CPUs.
+This X86_MATCH_INTEL_FAM6_MODEL macro internally uses another macro
+X86_MATCH_VENDOR_FAM_MODEL_FEATURE for X86 based CPU matching with
+appropriate initialized values.
+
+Reported-by: Dave Hansen <dave.hansen@intel.com>
+Link: https://lore.kernel.org/lkml/28ead36b-2d9e-1a36-6f4e-04684e420260@intel.com
+Fixes: eb52bc2ae5b8 ("powercap: RAPL: Add Power Limit4 support for Meteor Lake SoC")
+Fixes: b08b95cf30f5 ("powercap: RAPL: Add Power Limit4 support for Alder Lake-N and Raptor Lake-P")
+Fixes: 515755906921 ("powercap: RAPL: Add Power Limit4 support for RaptorLake")
+Fixes: 1cc5b9a411e4 ("powercap: Add Power Limit4 support for Alder Lake SoC")
+Fixes: 8365a898fe53 ("powercap: Add Power Limit4 support")
+Signed-off-by: Sumeet Pawnikar <sumeet.r.pawnikar@intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+[ Ricardo: I removed METEORLAKE and METEORLAKE_L from pl4_support_ids as
+ they are not included in v6.1. ]
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/powercap/intel_rapl_msr.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+--- a/drivers/powercap/intel_rapl_msr.c
++++ b/drivers/powercap/intel_rapl_msr.c
+@@ -136,12 +136,12 @@ static int rapl_msr_write_raw(int cpu, s
+
+ /* List of verified CPUs. */
+ static const struct x86_cpu_id pl4_support_ids[] = {
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_TIGERLAKE_L, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_L, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_ALDERLAKE_N, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE, X86_FEATURE_ANY },
+- { X86_VENDOR_INTEL, 6, INTEL_FAM6_RAPTORLAKE_P, X86_FEATURE_ANY },
++ X86_MATCH_INTEL_FAM6_MODEL(TIGERLAKE_L, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, NULL),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, NULL),
+ {}
+ };
+
--- /dev/null
+From 49ac6f05ace5bb0070c68a0193aa05d3c25d4c83 Mon Sep 17 00:00:00 2001
+From: "Matthieu Baerts (NGI0)" <matttbe@kernel.org>
+Date: Tue, 10 Sep 2024 21:06:36 +0200
+Subject: selftests: mptcp: join: restrict fullmesh endp on 1st sf
+
+From: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+
+commit 49ac6f05ace5bb0070c68a0193aa05d3c25d4c83 upstream.
+
+A new endpoint using the IP of the initial subflow has been recently
+added to increase the code coverage. But it breaks the test when using
+old kernels not having commit 86e39e04482b ("mptcp: keep track of local
+endpoint still available for each msk"), e.g. on v5.15.
+
+Similar to commit d4c81bbb8600 ("selftests: mptcp: join: support local
+endpoint being tracked or not"), it is possible to add the new endpoint
+conditionally, by checking if "mptcp_pm_subflow_check_next" is present
+in kallsyms: this is not directly linked to the commit introducing this
+symbol but for the parent one which is linked anyway. So we can know in
+advance what will be the expected behaviour, and add the new endpoint
+only when it makes sense to do so.
+
+Fixes: 4878f9f8421f ("selftests: mptcp: join: validate fullmesh endp on 1st sf")
+Cc: stable@vger.kernel.org
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Link: https://patch.msgid.link/20240910-net-selftests-mptcp-fix-install-v1-1-8f124aa9156d@kernel.org
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflicts in mptcp_join.sh, because the 'run_tests' helper has been
+ modified in multiple commits that are not in this version, e.g. commit
+ e571fb09c893 ("selftests: mptcp: add speed env var"). The conflict was
+ in the context, the new lines can still be added at the same place. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ tools/testing/selftests/net/mptcp/mptcp_join.sh | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+--- a/tools/testing/selftests/net/mptcp/mptcp_join.sh
++++ b/tools/testing/selftests/net/mptcp/mptcp_join.sh
+@@ -3048,7 +3048,9 @@ fullmesh_tests()
+ pm_nl_set_limits $ns1 1 3
+ pm_nl_set_limits $ns2 1 3
+ pm_nl_add_endpoint $ns1 10.0.2.1 flags signal
+- pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
++ if mptcp_lib_kallsyms_has "mptcp_pm_subflow_check_next$"; then
++ pm_nl_add_endpoint $ns2 10.0.1.2 flags subflow,fullmesh
++ fi
+ run_tests $ns1 $ns2 10.0.1.1 0 0 fullmesh_1 slow
+ chk_join_nr 3 3 3
+ chk_add_nr 1 1
gpio-prevent-potential-speculation-leaks-in-gpio_device_get_desc.patch
can-mcp251xfd-properly-indent-labels.patch
can-mcp251xfd-move-mcp251xfd_timestamp_start-stop-into-mcp251xfd_chip_start-stop.patch
+selftests-mptcp-join-restrict-fullmesh-endp-on-1st-sf.patch
+btrfs-calculate-the-right-space-for-delayed-refs-when-updating-global-reserve.patch
+powercap-rapl-fix-invalid-initialization-for-pl4_supported-field.patch
+x86-mm-switch-to-new-intel-cpu-model-defines.patch
--- /dev/null
+From 2eda374e883ad297bd9fe575a16c1dc850346075 Mon Sep 17 00:00:00 2001
+From: Tony Luck <tony.luck@intel.com>
+Date: Wed, 24 Apr 2024 11:15:18 -0700
+Subject: x86/mm: Switch to new Intel CPU model defines
+
+From: Tony Luck <tony.luck@intel.com>
+
+commit 2eda374e883ad297bd9fe575a16c1dc850346075 upstream.
+
+New CPU #defines encode vendor and family as well as model.
+
+[ dhansen: vertically align 0's in invlpg_miss_ids[] ]
+
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Dave Hansen <dave.hansen@linux.intel.com>
+Signed-off-by: Borislav Petkov (AMD) <bp@alien8.de>
+Link: https://lore.kernel.org/all/20240424181518.41946-1-tony.luck%40intel.com
+[ Ricardo: I used the old match macro X86_MATCH_INTEL_FAM6_MODEL()
+ instead of X86_MATCH_VFM() as in the upstream commit.
+ I also kept the ALDERLAKE_N name instead of ATOM_GRACEMONT. Both refer
+ to the same CPU model. ]
+Signed-off-by: Ricardo Neri <ricardo.neri-calderon@linux.intel.com>
+Reviewed-by: Pawan Gupta <pawan.kumar.gupta@linux.intel.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/mm/init.c | 16 ++++++----------
+ 1 file changed, 6 insertions(+), 10 deletions(-)
+
+--- a/arch/x86/mm/init.c
++++ b/arch/x86/mm/init.c
+@@ -262,21 +262,17 @@ static void __init probe_page_size_mask(
+ }
+ }
+
+-#define INTEL_MATCH(_model) { .vendor = X86_VENDOR_INTEL, \
+- .family = 6, \
+- .model = _model, \
+- }
+ /*
+ * INVLPG may not properly flush Global entries
+ * on these CPUs when PCIDs are enabled.
+ */
+ static const struct x86_cpu_id invlpg_miss_ids[] = {
+- INTEL_MATCH(INTEL_FAM6_ALDERLAKE ),
+- INTEL_MATCH(INTEL_FAM6_ALDERLAKE_L ),
+- INTEL_MATCH(INTEL_FAM6_ALDERLAKE_N ),
+- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE ),
+- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_P),
+- INTEL_MATCH(INTEL_FAM6_RAPTORLAKE_S),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_L, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(ALDERLAKE_N, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_P, 0),
++ X86_MATCH_INTEL_FAM6_MODEL(RAPTORLAKE_S, 0),
+ {}
+ };
+