--- /dev/null
+From 25057f4013d40baff46bd8d3005dc285eca5d420 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Oct 2022 13:34:23 -0700
+Subject: ACPI: extlog: Handle multiple records
+
+From: Tony Luck <tony.luck@intel.com>
+
+[ Upstream commit f6ec01da40e4139b41179f046044ee7c4f6370dc ]
+
+If there is no user space consumer of extlog_mem trace records, then
+Linux properly handles multiple error records in an ELOG block
+
+ extlog_print()
+ print_extlog_rcd()
+ __print_extlog_rcd()
+ cper_estatus_print()
+ apei_estatus_for_each_section()
+
+But the other code path hard codes looking for a single record to
+output a trace record.
+
+Fix by using the same apei_estatus_for_each_section() iterator
+to step over all records.
+
+Fixes: 2dfb7d51a61d ("trace, RAS: Add eMCA trace event interface")
+Signed-off-by: Tony Luck <tony.luck@intel.com>
+Signed-off-by: Rafael J. Wysocki <rafael.j.wysocki@intel.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/acpi/acpi_extlog.c | 33 ++++++++++++++++++++-------------
+ 1 file changed, 20 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/acpi/acpi_extlog.c b/drivers/acpi/acpi_extlog.c
+index 72f1fb77abcd..e648158368a7 100644
+--- a/drivers/acpi/acpi_extlog.c
++++ b/drivers/acpi/acpi_extlog.c
+@@ -12,6 +12,7 @@
+ #include <linux/ratelimit.h>
+ #include <linux/edac.h>
+ #include <linux/ras.h>
++#include <acpi/ghes.h>
+ #include <asm/cpu.h>
+ #include <asm/mce.h>
+
+@@ -138,8 +139,8 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
+ int cpu = mce->extcpu;
+ struct acpi_hest_generic_status *estatus, *tmp;
+ struct acpi_hest_generic_data *gdata;
+- const guid_t *fru_id = &guid_null;
+- char *fru_text = "";
++ const guid_t *fru_id;
++ char *fru_text;
+ guid_t *sec_type;
+ static u32 err_seq;
+
+@@ -160,17 +161,23 @@ static int extlog_print(struct notifier_block *nb, unsigned long val,
+
+ /* log event via trace */
+ err_seq++;
+- gdata = (struct acpi_hest_generic_data *)(tmp + 1);
+- if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
+- fru_id = (guid_t *)gdata->fru_id;
+- if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
+- fru_text = gdata->fru_text;
+- sec_type = (guid_t *)gdata->section_type;
+- if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
+- struct cper_sec_mem_err *mem = (void *)(gdata + 1);
+- if (gdata->error_data_length >= sizeof(*mem))
+- trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
+- (u8)gdata->error_severity);
++ apei_estatus_for_each_section(tmp, gdata) {
++ if (gdata->validation_bits & CPER_SEC_VALID_FRU_ID)
++ fru_id = (guid_t *)gdata->fru_id;
++ else
++ fru_id = &guid_null;
++ if (gdata->validation_bits & CPER_SEC_VALID_FRU_TEXT)
++ fru_text = gdata->fru_text;
++ else
++ fru_text = "";
++ sec_type = (guid_t *)gdata->section_type;
++ if (guid_equal(sec_type, &CPER_SEC_PLATFORM_MEM)) {
++ struct cper_sec_mem_err *mem = (void *)(gdata + 1);
++
++ if (gdata->error_data_length >= sizeof(*mem))
++ trace_extlog_mem_event(mem, err_seq, fru_id, fru_text,
++ (u8)gdata->error_severity);
++ }
+ }
+
+ out:
+--
+2.35.1
+
--- /dev/null
+From 62e31680681ecd189d282e28d8cd5d097430a751 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 15 Sep 2022 15:20:53 -0700
+Subject: arm64: mte: move register initialization to C
+
+From: Peter Collingbourne <pcc@google.com>
+
+[ Upstream commit 973b9e37330656dec719ede508e4dc40e5c2d80c ]
+
+If FEAT_MTE2 is disabled via the arm64.nomte command line argument on a
+CPU that claims to support FEAT_MTE2, the kernel will use Tagged Normal
+in the MAIR. If we interpret arm64.nomte to mean that the CPU does not
+in fact implement FEAT_MTE2, setting the system register like this may
+lead to UNSPECIFIED behavior. Fix it by arranging for MAIR to be set
+in the C function cpu_enable_mte which is called based on the sanitized
+version of the system register.
+
+There is no need for the rest of the MTE-related system register
+initialization to happen from assembly, with the exception of TCR_EL1,
+which must be set to include at least TBI1 because the secondary CPUs
+access KASan-allocated data structures early. Therefore, make the TCR_EL1
+initialization unconditional and move the rest of the initialization to
+cpu_enable_mte so that we no longer have a dependency on the unsanitized
+ID register value.
+
+Co-developed-by: Evgenii Stepanov <eugenis@google.com>
+Signed-off-by: Peter Collingbourne <pcc@google.com>
+Signed-off-by: Evgenii Stepanov <eugenis@google.com>
+Suggested-by: Catalin Marinas <catalin.marinas@arm.com>
+Reported-by: kernel test robot <lkp@intel.com>
+Fixes: 3b714d24ef17 ("arm64: mte: CPU feature detection and initial sysreg configuration")
+Cc: <stable@vger.kernel.org> # 5.10.x
+Link: https://lore.kernel.org/r/20220915222053.3484231-1-eugenis@google.com
+Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/arm64/include/asm/mte.h | 5 ++++
+ arch/arm64/kernel/cpufeature.c | 3 +-
+ arch/arm64/kernel/mte.c | 51 ++++++++++++++++++++++++++++++++++
+ arch/arm64/kernel/suspend.c | 2 ++
+ arch/arm64/mm/proc.S | 46 ++++--------------------------
+ 5 files changed, 65 insertions(+), 42 deletions(-)
+
+diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h
+index 02511650cffe..3e368ca66623 100644
+--- a/arch/arm64/include/asm/mte.h
++++ b/arch/arm64/include/asm/mte.h
+@@ -40,7 +40,9 @@ void mte_sync_tags(pte_t old_pte, pte_t pte);
+ void mte_copy_page_tags(void *kto, const void *kfrom);
+ void mte_thread_init_user(void);
+ void mte_thread_switch(struct task_struct *next);
++void mte_cpu_setup(void);
+ void mte_suspend_enter(void);
++void mte_suspend_exit(void);
+ long set_mte_ctrl(struct task_struct *task, unsigned long arg);
+ long get_mte_ctrl(struct task_struct *task);
+ int mte_ptrace_copy_tags(struct task_struct *child, long request,
+@@ -69,6 +71,9 @@ static inline void mte_thread_switch(struct task_struct *next)
+ static inline void mte_suspend_enter(void)
+ {
+ }
++static inline void mte_suspend_exit(void)
++{
++}
+ static inline long set_mte_ctrl(struct task_struct *task, unsigned long arg)
+ {
+ return 0;
+diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
+index 13e3cb1acbdf..d4ee345ff429 100644
+--- a/arch/arm64/kernel/cpufeature.c
++++ b/arch/arm64/kernel/cpufeature.c
+@@ -1903,7 +1903,8 @@ static void bti_enable(const struct arm64_cpu_capabilities *__unused)
+ static void cpu_enable_mte(struct arm64_cpu_capabilities const *cap)
+ {
+ sysreg_clear_set(sctlr_el1, 0, SCTLR_ELx_ATA | SCTLR_EL1_ATA0);
+- isb();
++
++ mte_cpu_setup();
+
+ /*
+ * Clear the tags in the zero page. This needs to be done via the
+diff --git a/arch/arm64/kernel/mte.c b/arch/arm64/kernel/mte.c
+index 7c1c82c8115c..dacca0684ea3 100644
+--- a/arch/arm64/kernel/mte.c
++++ b/arch/arm64/kernel/mte.c
+@@ -213,6 +213,49 @@ void mte_thread_switch(struct task_struct *next)
+ mte_check_tfsr_el1();
+ }
+
++void mte_cpu_setup(void)
++{
++ u64 rgsr;
++
++ /*
++ * CnP must be enabled only after the MAIR_EL1 register has been set
++ * up. Inconsistent MAIR_EL1 between CPUs sharing the same TLB may
++ * lead to the wrong memory type being used for a brief window during
++ * CPU power-up.
++ *
++ * CnP is not a boot feature so MTE gets enabled before CnP, but let's
++ * make sure that is the case.
++ */
++ BUG_ON(read_sysreg(ttbr0_el1) & TTBR_CNP_BIT);
++ BUG_ON(read_sysreg(ttbr1_el1) & TTBR_CNP_BIT);
++
++ /* Normal Tagged memory type at the corresponding MAIR index */
++ sysreg_clear_set(mair_el1,
++ MAIR_ATTRIDX(MAIR_ATTR_MASK, MT_NORMAL_TAGGED),
++ MAIR_ATTRIDX(MAIR_ATTR_NORMAL_TAGGED,
++ MT_NORMAL_TAGGED));
++
++ write_sysreg_s(KERNEL_GCR_EL1, SYS_GCR_EL1);
++
++ /*
++ * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
++ * RGSR_EL1.SEED must be non-zero for IRG to produce
++ * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
++ * must initialize it.
++ */
++ rgsr = (read_sysreg(CNTVCT_EL0) & SYS_RGSR_EL1_SEED_MASK) <<
++ SYS_RGSR_EL1_SEED_SHIFT;
++ if (rgsr == 0)
++ rgsr = 1 << SYS_RGSR_EL1_SEED_SHIFT;
++ write_sysreg_s(rgsr, SYS_RGSR_EL1);
++
++ /* clear any pending tag check faults in TFSR*_EL1 */
++ write_sysreg_s(0, SYS_TFSR_EL1);
++ write_sysreg_s(0, SYS_TFSRE0_EL1);
++
++ local_flush_tlb_all();
++}
++
+ void mte_suspend_enter(void)
+ {
+ if (!system_supports_mte())
+@@ -229,6 +272,14 @@ void mte_suspend_enter(void)
+ mte_check_tfsr_el1();
+ }
+
++void mte_suspend_exit(void)
++{
++ if (!system_supports_mte())
++ return;
++
++ mte_cpu_setup();
++}
++
+ long set_mte_ctrl(struct task_struct *task, unsigned long arg)
+ {
+ u64 mte_ctrl = (~((arg & PR_MTE_TAG_MASK) >> PR_MTE_TAG_SHIFT) &
+diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
+index 19ee7c33769d..d473ec204fef 100644
+--- a/arch/arm64/kernel/suspend.c
++++ b/arch/arm64/kernel/suspend.c
+@@ -43,6 +43,8 @@ void notrace __cpu_suspend_exit(void)
+ {
+ unsigned int cpu = smp_processor_id();
+
++ mte_suspend_exit();
++
+ /*
+ * We are resuming from reset with the idmap active in TTBR0_EL1.
+ * We must uninstall the idmap and restore the expected MMU
+diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
+index 50bbed947bec..1a9684b11474 100644
+--- a/arch/arm64/mm/proc.S
++++ b/arch/arm64/mm/proc.S
+@@ -47,17 +47,19 @@
+
+ #ifdef CONFIG_KASAN_HW_TAGS
+ #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1
+-#else
++#elif defined(CONFIG_ARM64_MTE)
+ /*
+ * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on
+ * TBI being enabled at EL1.
+ */
+ #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1
++#else
++#define TCR_MTE_FLAGS 0
+ #endif
+
+ /*
+ * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and
+- * changed during __cpu_setup to Normal Tagged if the system supports MTE.
++ * changed during mte_cpu_setup to Normal Tagged if the system supports MTE.
+ */
+ #define MAIR_EL1_SET \
+ (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \
+@@ -421,46 +423,8 @@ SYM_FUNC_START(__cpu_setup)
+ mov_q mair, MAIR_EL1_SET
+ mov_q tcr, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \
+ TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \
+- TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS
+-
+-#ifdef CONFIG_ARM64_MTE
+- /*
+- * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported
+- * (ID_AA64PFR1_EL1[11:8] > 1).
+- */
+- mrs x10, ID_AA64PFR1_EL1
+- ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4
+- cmp x10, #ID_AA64PFR1_MTE
+- b.lt 1f
+-
+- /* Normal Tagged memory type at the corresponding MAIR index */
+- mov x10, #MAIR_ATTR_NORMAL_TAGGED
+- bfi mair, x10, #(8 * MT_NORMAL_TAGGED), #8
++ TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS
+
+- mov x10, #KERNEL_GCR_EL1
+- msr_s SYS_GCR_EL1, x10
+-
+- /*
+- * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then
+- * RGSR_EL1.SEED must be non-zero for IRG to produce
+- * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we
+- * must initialize it.
+- */
+- mrs x10, CNTVCT_EL0
+- ands x10, x10, #SYS_RGSR_EL1_SEED_MASK
+- csinc x10, x10, xzr, ne
+- lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT
+- msr_s SYS_RGSR_EL1, x10
+-
+- /* clear any pending tag check faults in TFSR*_EL1 */
+- msr_s SYS_TFSR_EL1, xzr
+- msr_s SYS_TFSRE0_EL1, xzr
+-
+- /* set the TCR_EL1 bits */
+- mov_q x10, TCR_MTE_FLAGS
+- orr tcr, tcr, x10
+-1:
+-#endif
+ tcr_clear_errata_bits tcr, x9, x5
+
+ #ifdef CONFIG_ARM64_VA_BITS_52
+--
+2.35.1
+
--- /dev/null
+From 019fc9c7d081504818aad53b437b4f6189919ae3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Oct 2022 13:16:51 +0100
+Subject: btrfs: fix processing of delayed data refs during backref walking
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 4fc7b57228243d09c0d878873bf24fa64a90fa01 ]
+
+When processing delayed data references during backref walking and we are
+using a share context (we are being called through fiemap), whenever we
+find a delayed data reference for an inode different from the one we are
+interested in, then we immediately exit and consider the data extent as
+shared. This is wrong, because:
+
+1) This might be a DROP reference that will cancel out a reference in the
+ extent tree;
+
+2) Even if it's an ADD reference, it may be followed by a DROP reference
+ that cancels it out.
+
+In either case we should not exit immediately.
+
+Fix this by never exiting when we find a delayed data reference for
+another inode - instead add the reference and if it does not cancel out
+other delayed reference, we will exit early when we call
+extent_is_shared() after processing all delayed references. If we find
+a drop reference, then signal the code that processes references from
+the extent tree (add_inline_refs() and add_keyed_refs()) to not exit
+immediately if it finds there a reference for another inode, since we
+have delayed drop references that may cancel it out. In this later case
+we exit once we don't have references in the rb trees that cancel out
+each other and have two references for different inodes.
+
+Example reproducer for case 1):
+
+ $ cat test-1.sh
+ #!/bin/bash
+
+ DEV=/dev/sdj
+ MNT=/mnt/sdj
+
+ mkfs.btrfs -f $DEV
+ mount $DEV $MNT
+
+ xfs_io -f -c "pwrite 0 64K" $MNT/foo
+ cp --reflink=always $MNT/foo $MNT/bar
+
+ echo
+ echo "fiemap after cloning:"
+ xfs_io -c "fiemap -v" $MNT/foo
+
+ rm -f $MNT/bar
+ echo
+ echo "fiemap after removing file bar:"
+ xfs_io -c "fiemap -v" $MNT/foo
+
+ umount $MNT
+
+Running it before this patch, the extent is still listed as shared, it has
+the flag 0x2000 (FIEMAP_EXTENT_SHARED) set:
+
+ $ ./test-1.sh
+ fiemap after cloning:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x2001
+
+ fiemap after removing file bar:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x2001
+
+Example reproducer for case 2):
+
+ $ cat test-2.sh
+ #!/bin/bash
+
+ DEV=/dev/sdj
+ MNT=/mnt/sdj
+
+ mkfs.btrfs -f $DEV
+ mount $DEV $MNT
+
+ xfs_io -f -c "pwrite 0 64K" $MNT/foo
+ cp --reflink=always $MNT/foo $MNT/bar
+
+ # Flush delayed references to the extent tree and commit current
+ # transaction.
+ sync
+
+ echo
+ echo "fiemap after cloning:"
+ xfs_io -c "fiemap -v" $MNT/foo
+
+ rm -f $MNT/bar
+ echo
+ echo "fiemap after removing file bar:"
+ xfs_io -c "fiemap -v" $MNT/foo
+
+ umount $MNT
+
+Running it before this patch, the extent is still listed as shared, it has
+the flag 0x2000 (FIEMAP_EXTENT_SHARED) set:
+
+ $ ./test-2.sh
+ fiemap after cloning:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x2001
+
+ fiemap after removing file bar:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x2001
+
+After this patch, after deleting bar in both tests, the extent is not
+reported with the 0x2000 flag anymore, it gets only the flag 0x1
+(which is FIEMAP_EXTENT_LAST):
+
+ $ ./test-1.sh
+ fiemap after cloning:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x2001
+
+ fiemap after removing file bar:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x1
+
+ $ ./test-2.sh
+ fiemap after cloning:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x2001
+
+ fiemap after removing file bar:
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [0..127]: 26624..26751 128 0x1
+
+These tests will later be converted to a test case for fstests.
+
+Fixes: dc046b10c8b7d4 ("Btrfs: make fiemap not blow when you have lots of snapshots")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/backref.c | 33 ++++++++++++++++++++++++---------
+ 1 file changed, 24 insertions(+), 9 deletions(-)
+
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index 8b090c40daf7..f33bad9f5e29 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -138,6 +138,7 @@ struct share_check {
+ u64 root_objectid;
+ u64 inum;
+ int share_count;
++ bool have_delayed_delete_refs;
+ };
+
+ static inline int extent_is_shared(struct share_check *sc)
+@@ -882,13 +883,22 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ key.offset = ref->offset;
+
+ /*
+- * Found a inum that doesn't match our known inum, we
+- * know it's shared.
++ * If we have a share check context and a reference for
++ * another inode, we can't exit immediately. This is
++ * because even if this is a BTRFS_ADD_DELAYED_REF
++ * reference we may find next a BTRFS_DROP_DELAYED_REF
++ * which cancels out this ADD reference.
++ *
++ * If this is a DROP reference and there was no previous
++ * ADD reference, then we need to signal that when we
++ * process references from the extent tree (through
++ * add_inline_refs() and add_keyed_refs()), we should
++ * not exit early if we find a reference for another
++ * inode, because one of the delayed DROP references
++ * may cancel that reference in the extent tree.
+ */
+- if (sc && sc->inum && ref->objectid != sc->inum) {
+- ret = BACKREF_FOUND_SHARED;
+- goto out;
+- }
++ if (sc && count < 0)
++ sc->have_delayed_delete_refs = true;
+
+ ret = add_indirect_ref(fs_info, preftrees, ref->root,
+ &key, 0, node->bytenr, count, sc,
+@@ -918,7 +928,7 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ }
+ if (!ret)
+ ret = extent_is_shared(sc);
+-out:
++
+ spin_unlock(&head->lock);
+ return ret;
+ }
+@@ -1021,7 +1031,8 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+
+- if (sc && sc->inum && key.objectid != sc->inum) {
++ if (sc && sc->inum && key.objectid != sc->inum &&
++ !sc->have_delayed_delete_refs) {
+ ret = BACKREF_FOUND_SHARED;
+ break;
+ }
+@@ -1031,6 +1042,7 @@ static int add_inline_refs(const struct btrfs_fs_info *fs_info,
+ ret = add_indirect_ref(fs_info, preftrees, root,
+ &key, 0, bytenr, count,
+ sc, GFP_NOFS);
++
+ break;
+ }
+ default:
+@@ -1120,7 +1132,8 @@ static int add_keyed_refs(struct btrfs_fs_info *fs_info,
+ key.type = BTRFS_EXTENT_DATA_KEY;
+ key.offset = btrfs_extent_data_ref_offset(leaf, dref);
+
+- if (sc && sc->inum && key.objectid != sc->inum) {
++ if (sc && sc->inum && key.objectid != sc->inum &&
++ !sc->have_delayed_delete_refs) {
+ ret = BACKREF_FOUND_SHARED;
+ break;
+ }
+@@ -1547,6 +1560,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
+ .root_objectid = root->root_key.objectid,
+ .inum = inum,
+ .share_count = 0,
++ .have_delayed_delete_refs = false,
+ };
+
+ ulist_init(roots);
+@@ -1581,6 +1595,7 @@ int btrfs_check_shared(struct btrfs_root *root, u64 inum, u64 bytenr,
+ break;
+ bytenr = node->val;
+ shared.share_count = 0;
++ shared.have_delayed_delete_refs = false;
+ cond_resched();
+ }
+
+--
+2.35.1
+
--- /dev/null
+From 66ba58eef70415529a640e8ea9a77b34e8de2788 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 11 Oct 2022 13:16:52 +0100
+Subject: btrfs: fix processing of delayed tree block refs during backref
+ walking
+
+From: Filipe Manana <fdmanana@suse.com>
+
+[ Upstream commit 943553ef9b51db303ab2b955c1025261abfdf6fb ]
+
+During backref walking, when processing a delayed reference with a type of
+BTRFS_TREE_BLOCK_REF_KEY, we have two bugs there:
+
+1) We are accessing the delayed references extent_op, and its key, without
+ the protection of the delayed ref head's lock;
+
+2) If there's no extent op for the delayed ref head, we end up with an
+ uninitialized key in the stack, variable 'tmp_op_key', and then pass
+ it to add_indirect_ref(), which adds the reference to the indirect
+ refs rb tree.
+
+ This is wrong, because indirect references should have a NULL key
+ when we don't have access to the key, and in that case they should be
+ added to the indirect_missing_keys rb tree and not to the indirect rb
+ tree.
+
+ This means that if have BTRFS_TREE_BLOCK_REF_KEY delayed ref resulting
+ from freeing an extent buffer, therefore with a count of -1, it will
+ not cancel out the corresponding reference we have in the extent tree
+ (with a count of 1), since both references end up in different rb
+ trees.
+
+ When using fiemap, where we often need to check if extents are shared
+ through shared subtrees resulting from snapshots, it means we can
+ incorrectly report an extent as shared when it's no longer shared.
+ However this is temporary because after the transaction is committed
+ the extent is no longer reported as shared, as running the delayed
+ reference results in deleting the tree block reference from the extent
+ tree.
+
+ Outside the fiemap context, the result is unpredictable, as the key was
+ not initialized but it's used when navigating the rb trees to insert
+ and search for references (prelim_ref_compare()), and we expect all
+ references in the indirect rb tree to have valid keys.
+
+The following reproducer triggers the second bug:
+
+ $ cat test.sh
+ #!/bin/bash
+
+ DEV=/dev/sdj
+ MNT=/mnt/sdj
+
+ mkfs.btrfs -f $DEV
+ mount -o compress $DEV $MNT
+
+ # With a compressed 128M file we get a tree height of 2 (level 1 root).
+ xfs_io -f -c "pwrite -b 1M 0 128M" $MNT/foo
+
+ btrfs subvolume snapshot $MNT $MNT/snap
+
+ # Fiemap should output 0x2008 in the flags column.
+ # 0x2000 means shared extent
+ # 0x8 means encoded extent (because it's compressed)
+ echo
+ echo "fiemap after snapshot, range [120M, 120M + 128K):"
+ xfs_io -c "fiemap -v 120M 128K" $MNT/foo
+ echo
+
+ # Overwrite one extent and fsync to flush delalloc and COW a new path
+ # in the snapshot's tree.
+ #
+ # After this we have a BTRFS_DROP_DELAYED_REF delayed ref of type
+ # BTRFS_TREE_BLOCK_REF_KEY with a count of -1 for every COWed extent
+ # buffer in the path.
+ #
+ # In the extent tree we have inline references of type
+ # BTRFS_TREE_BLOCK_REF_KEY, with a count of 1, for the same extent
+ # buffers, so they should cancel each other, and the extent buffers in
+ # the fs tree should no longer be considered as shared.
+ #
+ echo "Overwriting file range [120M, 120M + 128K)..."
+ xfs_io -c "pwrite -b 128K 120M 128K" $MNT/snap/foo
+ xfs_io -c "fsync" $MNT/snap/foo
+
+ # Fiemap should output 0x8 in the flags column. The extent in the range
+ # [120M, 120M + 128K) is no longer shared, it's now exclusive to the fs
+ # tree.
+ echo
+ echo "fiemap after overwrite range [120M, 120M + 128K):"
+ xfs_io -c "fiemap -v 120M 128K" $MNT/foo
+ echo
+
+ umount $MNT
+
+Running it before this patch:
+
+ $ ./test.sh
+ (...)
+ wrote 134217728/134217728 bytes at offset 0
+ 128 MiB, 128 ops; 0.1152 sec (1.085 GiB/sec and 1110.5809 ops/sec)
+ Create a snapshot of '/mnt/sdj' in '/mnt/sdj/snap'
+
+ fiemap after snapshot, range [120M, 120M + 128K):
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [245760..246015]: 34304..34559 256 0x2008
+
+ Overwriting file range [120M, 120M + 128K)...
+ wrote 131072/131072 bytes at offset 125829120
+ 128 KiB, 1 ops; 0.0001 sec (683.060 MiB/sec and 5464.4809 ops/sec)
+
+ fiemap after overwrite range [120M, 120M + 128K):
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [245760..246015]: 34304..34559 256 0x2008
+
+The extent in the range [120M, 120M + 128K) is still reported as shared
+(0x2000 bit set) after overwriting that range and flushing delalloc, which
+is not correct - an entire path was COWed in the snapshot's tree and the
+extent is now only referenced by the original fs tree.
+
+Running it after this patch:
+
+ $ ./test.sh
+ (...)
+ wrote 134217728/134217728 bytes at offset 0
+ 128 MiB, 128 ops; 0.1198 sec (1.043 GiB/sec and 1068.2067 ops/sec)
+ Create a snapshot of '/mnt/sdj' in '/mnt/sdj/snap'
+
+ fiemap after snapshot, range [120M, 120M + 128K):
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [245760..246015]: 34304..34559 256 0x2008
+
+ Overwriting file range [120M, 120M + 128K)...
+ wrote 131072/131072 bytes at offset 125829120
+ 128 KiB, 1 ops; 0.0001 sec (694.444 MiB/sec and 5555.5556 ops/sec)
+
+ fiemap after overwrite range [120M, 120M + 128K):
+ /mnt/sdj/foo:
+ EXT: FILE-OFFSET BLOCK-RANGE TOTAL FLAGS
+ 0: [245760..246015]: 34304..34559 256 0x8
+
+Now the extent is not reported as shared anymore.
+
+So fix this by passing a NULL key pointer to add_indirect_ref() when
+processing a delayed reference for a tree block if there's no extent op
+for our delayed ref head with a defined key. Also access the extent op
+only after locking the delayed ref head's lock.
+
+The reproducer will be converted later to a test case for fstests.
+
+Fixes: 86d5f994425252 ("btrfs: convert prelimary reference tracking to use rbtrees")
+Fixes: a6dbceafb915e8 ("btrfs: Remove unused op_key var from add_delayed_refs")
+Signed-off-by: Filipe Manana <fdmanana@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/backref.c | 13 +++++++------
+ 1 file changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
+index f33bad9f5e29..2e7c3e48bc9c 100644
+--- a/fs/btrfs/backref.c
++++ b/fs/btrfs/backref.c
+@@ -819,16 +819,11 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ struct preftrees *preftrees, struct share_check *sc)
+ {
+ struct btrfs_delayed_ref_node *node;
+- struct btrfs_delayed_extent_op *extent_op = head->extent_op;
+ struct btrfs_key key;
+- struct btrfs_key tmp_op_key;
+ struct rb_node *n;
+ int count;
+ int ret = 0;
+
+- if (extent_op && extent_op->update_key)
+- btrfs_disk_key_to_cpu(&tmp_op_key, &extent_op->key);
+-
+ spin_lock(&head->lock);
+ for (n = rb_first_cached(&head->ref_tree); n; n = rb_next(n)) {
+ node = rb_entry(n, struct btrfs_delayed_ref_node,
+@@ -854,10 +849,16 @@ static int add_delayed_refs(const struct btrfs_fs_info *fs_info,
+ case BTRFS_TREE_BLOCK_REF_KEY: {
+ /* NORMAL INDIRECT METADATA backref */
+ struct btrfs_delayed_tree_ref *ref;
++ struct btrfs_key *key_ptr = NULL;
++
++ if (head->extent_op && head->extent_op->update_key) {
++ btrfs_disk_key_to_cpu(&key, &head->extent_op->key);
++ key_ptr = &key;
++ }
+
+ ref = btrfs_delayed_node_to_tree_ref(node);
+ ret = add_indirect_ref(fs_info, preftrees, ref->root,
+- &tmp_op_key, ref->level + 1,
++ key_ptr, ref->level + 1,
+ node->bytenr, count, sc,
+ GFP_ATOMIC);
+ break;
+--
+2.35.1
+
--- /dev/null
+From 824dc7ddba91e1469044597d361d97df752b4043 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Oct 2021 11:21:36 +0300
+Subject: btrfs: pull up qgroup checks from delayed-ref core to init time
+
+From: Nikolay Borisov <nborisov@suse.com>
+
+[ Upstream commit 681145d4acf4ecba052432f2e466b120c3739d01 ]
+
+Instead of checking whether qgroup processing for a dealyed ref has to
+happen in the core of delayed ref, simply pull the check at init time of
+respective delayed ref structures. This eliminates the final use of
+real_root in delayed-ref core paving the way to making this member
+optional.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: cef7820d6abf ("btrfs: fix missed extent on fsync after dropping extent maps")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/delayed-ref.c | 4 ----
+ fs/btrfs/delayed-ref.h | 11 +++++++++++
+ fs/btrfs/extent-tree.c | 5 -----
+ fs/btrfs/inode.c | 1 -
+ fs/btrfs/relocation.c | 7 -------
+ 5 files changed, 11 insertions(+), 17 deletions(-)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index 53a80163c1d7..5f4b58d8d101 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -921,8 +921,6 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ }
+
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+- is_fstree(generic_ref->real_root) &&
+- is_fstree(generic_ref->tree_ref.owning_root) &&
+ !generic_ref->skip_qgroup) {
+ record = kzalloc(sizeof(*record), GFP_NOFS);
+ if (!record) {
+@@ -1027,8 +1025,6 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ }
+
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+- is_fstree(ref_root) &&
+- is_fstree(generic_ref->real_root) &&
+ !generic_ref->skip_qgroup) {
+ record = kzalloc(sizeof(*record), GFP_NOFS);
+ if (!record) {
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 8ab79def8e98..7a0a2b21975d 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -279,6 +279,12 @@ static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
+ generic_ref->tree_ref.level = level;
+ generic_ref->tree_ref.owning_root = root;
+ generic_ref->type = BTRFS_REF_METADATA;
++ if (skip_qgroup || !(is_fstree(root) &&
++ (!mod_root || is_fstree(mod_root))))
++ generic_ref->skip_qgroup = true;
++ else
++ generic_ref->skip_qgroup = false;
++
+ }
+
+ static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
+@@ -292,6 +298,11 @@ static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
+ generic_ref->data_ref.ino = ino;
+ generic_ref->data_ref.offset = offset;
+ generic_ref->type = BTRFS_REF_DATA;
++ if (skip_qgroup || !(is_fstree(ref_root) &&
++ (!mod_root || is_fstree(mod_root))))
++ generic_ref->skip_qgroup = true;
++ else
++ generic_ref->skip_qgroup = false;
+ }
+
+ static inline struct btrfs_delayed_extent_op *
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index 8419315a59ff..9fdb880f900e 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -2437,11 +2437,9 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ key.offset -= btrfs_file_extent_offset(buf, fi);
+ btrfs_init_generic_ref(&generic_ref, action, bytenr,
+ num_bytes, parent);
+- generic_ref.real_root = root->root_key.objectid;
+ btrfs_init_data_ref(&generic_ref, ref_root, key.objectid,
+ key.offset, root->root_key.objectid,
+ for_reloc);
+- generic_ref.skip_qgroup = for_reloc;
+ if (inc)
+ ret = btrfs_inc_extent_ref(trans, &generic_ref);
+ else
+@@ -2453,10 +2451,8 @@ static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
+ num_bytes = fs_info->nodesize;
+ btrfs_init_generic_ref(&generic_ref, action, bytenr,
+ num_bytes, parent);
+- generic_ref.real_root = root->root_key.objectid;
+ btrfs_init_tree_ref(&generic_ref, level - 1, ref_root,
+ root->root_key.objectid, for_reloc);
+- generic_ref.skip_qgroup = for_reloc;
+ if (inc)
+ ret = btrfs_inc_extent_ref(trans, &generic_ref);
+ else
+@@ -4922,7 +4918,6 @@ struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
+
+ btrfs_init_generic_ref(&generic_ref, BTRFS_ADD_DELAYED_EXTENT,
+ ins.objectid, ins.offset, parent);
+- generic_ref.real_root = root->root_key.objectid;
+ btrfs_init_tree_ref(&generic_ref, level, root_objectid,
+ root->root_key.objectid, false);
+ btrfs_ref_tree_mod(fs_info, &generic_ref);
+diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
+index f8a01964a216..f9df0000cd88 100644
+--- a/fs/btrfs/inode.c
++++ b/fs/btrfs/inode.c
+@@ -4974,7 +4974,6 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
+
+ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF,
+ extent_start, extent_num_bytes, 0);
+- ref.real_root = root->root_key.objectid;
+ btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
+ ino, extent_offset,
+ root->root_key.objectid, false);
+diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c
+index becf3396d533..374c5353fe65 100644
+--- a/fs/btrfs/relocation.c
++++ b/fs/btrfs/relocation.c
+@@ -1145,7 +1145,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+ key.offset -= btrfs_file_extent_offset(leaf, fi);
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
+ num_bytes, parent);
+- ref.real_root = root->root_key.objectid;
+ btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
+ key.objectid, key.offset,
+ root->root_key.objectid, false);
+@@ -1157,7 +1156,6 @@ int replace_file_extents(struct btrfs_trans_handle *trans,
+
+ btrfs_init_generic_ref(&ref, BTRFS_DROP_DELAYED_REF, bytenr,
+ num_bytes, parent);
+- ref.real_root = root->root_key.objectid;
+ btrfs_init_data_ref(&ref, btrfs_header_owner(leaf),
+ key.objectid, key.offset,
+ root->root_key.objectid, false);
+@@ -1371,7 +1369,6 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
+
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, old_bytenr,
+ blocksize, path->nodes[level]->start);
+- ref.skip_qgroup = true;
+ btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
+ 0, true);
+ ret = btrfs_inc_extent_ref(trans, &ref);
+@@ -1381,7 +1378,6 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
+ }
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF, new_bytenr,
+ blocksize, 0);
+- ref.skip_qgroup = true;
+ btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid, 0,
+ true);
+ ret = btrfs_inc_extent_ref(trans, &ref);
+@@ -1394,7 +1390,6 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
+ blocksize, path->nodes[level]->start);
+ btrfs_init_tree_ref(&ref, level - 1, src->root_key.objectid,
+ 0, true);
+- ref.skip_qgroup = true;
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+@@ -1405,7 +1400,6 @@ int replace_path(struct btrfs_trans_handle *trans, struct reloc_control *rc,
+ blocksize, 0);
+ btrfs_init_tree_ref(&ref, level - 1, dest->root_key.objectid,
+ 0, true);
+- ref.skip_qgroup = true;
+ ret = btrfs_free_extent(trans, &ref);
+ if (ret) {
+ btrfs_abort_transaction(trans, ret);
+@@ -2481,7 +2475,6 @@ static int do_relocation(struct btrfs_trans_handle *trans,
+ btrfs_init_generic_ref(&ref, BTRFS_ADD_DELAYED_REF,
+ node->eb->start, blocksize,
+ upper->eb->start);
+- ref.real_root = root->root_key.objectid;
+ btrfs_init_tree_ref(&ref, node->level,
+ btrfs_header_owner(upper->eb),
+ root->root_key.objectid, false);
+--
+2.35.1
+
--- /dev/null
+From e17ece4ef86c5ef5191c06eaa8bba72235325f44 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 12 Oct 2021 11:21:33 +0300
+Subject: btrfs: rename root fields in delayed refs structs
+
+From: Nikolay Borisov <nborisov@suse.com>
+
+[ Upstream commit 113479d5b8eb20d685da63b89e97b6ebb4206f15 ]
+
+Both data and metadata delayed ref structures have fields named
+root/ref_root respectively. Those are somewhat cryptic and don't really
+convey the real meaning. In fact those roots are really the original
+owners of the respective block (i.e in case of a snapshot a data delayed
+ref will contain the original root that owns the given block). Rename
+those fields accordingly and adjust comments.
+
+Signed-off-by: Nikolay Borisov <nborisov@suse.com>
+Reviewed-by: David Sterba <dsterba@suse.com>
+Signed-off-by: David Sterba <dsterba@suse.com>
+Stable-dep-of: cef7820d6abf ("btrfs: fix missed extent on fsync after dropping extent maps")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/btrfs/delayed-ref.c | 13 +++++++------
+ fs/btrfs/delayed-ref.h | 12 ++++++------
+ fs/btrfs/extent-tree.c | 10 +++++-----
+ fs/btrfs/ref-verify.c | 4 ++--
+ 4 files changed, 20 insertions(+), 19 deletions(-)
+
+diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
+index ca848b183474..53a80163c1d7 100644
+--- a/fs/btrfs/delayed-ref.c
++++ b/fs/btrfs/delayed-ref.c
+@@ -922,7 +922,7 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+
+ if (test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags) &&
+ is_fstree(generic_ref->real_root) &&
+- is_fstree(generic_ref->tree_ref.root) &&
++ is_fstree(generic_ref->tree_ref.owning_root) &&
+ !generic_ref->skip_qgroup) {
+ record = kzalloc(sizeof(*record), GFP_NOFS);
+ if (!record) {
+@@ -938,14 +938,15 @@ int btrfs_add_delayed_tree_ref(struct btrfs_trans_handle *trans,
+ ref_type = BTRFS_TREE_BLOCK_REF_KEY;
+
+ init_delayed_ref_common(fs_info, &ref->node, bytenr, num_bytes,
+- generic_ref->tree_ref.root, action, ref_type);
+- ref->root = generic_ref->tree_ref.root;
++ generic_ref->tree_ref.owning_root, action,
++ ref_type);
++ ref->root = generic_ref->tree_ref.owning_root;
+ ref->parent = parent;
+ ref->level = level;
+
+ init_delayed_ref_head(head_ref, record, bytenr, num_bytes,
+- generic_ref->tree_ref.root, 0, action, false,
+- is_system);
++ generic_ref->tree_ref.owning_root, 0, action,
++ false, is_system);
+ head_ref->extent_op = extent_op;
+
+ delayed_refs = &trans->transaction->delayed_refs;
+@@ -997,7 +998,7 @@ int btrfs_add_delayed_data_ref(struct btrfs_trans_handle *trans,
+ u64 bytenr = generic_ref->bytenr;
+ u64 num_bytes = generic_ref->len;
+ u64 parent = generic_ref->parent;
+- u64 ref_root = generic_ref->data_ref.ref_root;
++ u64 ref_root = generic_ref->data_ref.owning_root;
+ u64 owner = generic_ref->data_ref.ino;
+ u64 offset = generic_ref->data_ref.offset;
+ u8 ref_type;
+diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
+index 31266ba1d430..8ab79def8e98 100644
+--- a/fs/btrfs/delayed-ref.h
++++ b/fs/btrfs/delayed-ref.h
+@@ -186,8 +186,8 @@ enum btrfs_ref_type {
+ struct btrfs_data_ref {
+ /* For EXTENT_DATA_REF */
+
+- /* Root which refers to this data extent */
+- u64 ref_root;
++ /* Original root this data extent belongs to */
++ u64 owning_root;
+
+ /* Inode which refers to this data extent */
+ u64 ino;
+@@ -210,11 +210,11 @@ struct btrfs_tree_ref {
+ int level;
+
+ /*
+- * Root which refers to this tree block.
++ * Root which owns this tree block.
+ *
+ * For TREE_BLOCK_REF (skinny metadata, either inline or keyed)
+ */
+- u64 root;
++ u64 owning_root;
+
+ /* For non-skinny metadata, no special member needed */
+ };
+@@ -277,7 +277,7 @@ static inline void btrfs_init_tree_ref(struct btrfs_ref *generic_ref,
+ if (!generic_ref->real_root)
+ generic_ref->real_root = root;
+ generic_ref->tree_ref.level = level;
+- generic_ref->tree_ref.root = root;
++ generic_ref->tree_ref.owning_root = root;
+ generic_ref->type = BTRFS_REF_METADATA;
+ }
+
+@@ -288,7 +288,7 @@ static inline void btrfs_init_data_ref(struct btrfs_ref *generic_ref,
+ /* If @real_root not set, use @root as fallback */
+ if (!generic_ref->real_root)
+ generic_ref->real_root = ref_root;
+- generic_ref->data_ref.ref_root = ref_root;
++ generic_ref->data_ref.owning_root = ref_root;
+ generic_ref->data_ref.ino = ino;
+ generic_ref->data_ref.offset = offset;
+ generic_ref->type = BTRFS_REF_DATA;
+diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
+index b90e9aa24005..8419315a59ff 100644
+--- a/fs/btrfs/extent-tree.c
++++ b/fs/btrfs/extent-tree.c
+@@ -1396,7 +1396,7 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
+ ASSERT(generic_ref->type != BTRFS_REF_NOT_SET &&
+ generic_ref->action);
+ BUG_ON(generic_ref->type == BTRFS_REF_METADATA &&
+- generic_ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID);
++ generic_ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID);
+
+ if (generic_ref->type == BTRFS_REF_METADATA)
+ ret = btrfs_add_delayed_tree_ref(trans, generic_ref, NULL);
+@@ -3362,9 +3362,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ * tree, just update pinning info and exit early.
+ */
+ if ((ref->type == BTRFS_REF_METADATA &&
+- ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
++ ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
+ (ref->type == BTRFS_REF_DATA &&
+- ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)) {
++ ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)) {
+ /* unlocks the pinned mutex */
+ btrfs_pin_extent(trans, ref->bytenr, ref->len, 1);
+ ret = 0;
+@@ -3375,9 +3375,9 @@ int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_ref *ref)
+ }
+
+ if (!((ref->type == BTRFS_REF_METADATA &&
+- ref->tree_ref.root == BTRFS_TREE_LOG_OBJECTID) ||
++ ref->tree_ref.owning_root == BTRFS_TREE_LOG_OBJECTID) ||
+ (ref->type == BTRFS_REF_DATA &&
+- ref->data_ref.ref_root == BTRFS_TREE_LOG_OBJECTID)))
++ ref->data_ref.owning_root == BTRFS_TREE_LOG_OBJECTID)))
+ btrfs_ref_tree_mod(fs_info, ref);
+
+ return ret;
+diff --git a/fs/btrfs/ref-verify.c b/fs/btrfs/ref-verify.c
+index d2062d5f71dd..e2b9f8616501 100644
+--- a/fs/btrfs/ref-verify.c
++++ b/fs/btrfs/ref-verify.c
+@@ -678,10 +678,10 @@ int btrfs_ref_tree_mod(struct btrfs_fs_info *fs_info,
+
+ if (generic_ref->type == BTRFS_REF_METADATA) {
+ if (!parent)
+- ref_root = generic_ref->tree_ref.root;
++ ref_root = generic_ref->tree_ref.owning_root;
+ owner = generic_ref->tree_ref.level;
+ } else if (!parent) {
+- ref_root = generic_ref->data_ref.ref_root;
++ ref_root = generic_ref->data_ref.owning_root;
+ owner = generic_ref->data_ref.ino;
+ offset = generic_ref->data_ref.offset;
+ }
+--
+2.35.1
+
--- /dev/null
+From 317a9f5090fe1046451ab5822c084c62140d4532 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 22:45:22 +0800
+Subject: cifs: Fix xid leak in cifs_copy_file_range()
+
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+
+[ Upstream commit 9a97df404a402fe1174d2d1119f87ff2a0ca2fe9 ]
+
+If the file is used by swap, before return -EOPNOTSUPP, should
+free the xid, otherwise, the xid will be leaked.
+
+Fixes: 4e8aea30f775 ("smb3: enable swap on SMB3 mounts")
+Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/cifsfs.c | 7 +++++--
+ 1 file changed, 5 insertions(+), 2 deletions(-)
+
+diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
+index 8ec55bbd705d..668dd6a86295 100644
+--- a/fs/cifs/cifsfs.c
++++ b/fs/cifs/cifsfs.c
+@@ -1263,8 +1263,11 @@ static ssize_t cifs_copy_file_range(struct file *src_file, loff_t off,
+ ssize_t rc;
+ struct cifsFileInfo *cfile = dst_file->private_data;
+
+- if (cfile->swapfile)
+- return -EOPNOTSUPP;
++ if (cfile->swapfile) {
++ rc = -EOPNOTSUPP;
++ free_xid(xid);
++ return rc;
++ }
+
+ rc = cifs_file_copychunk_range(xid, src_file, off, dst_file, destoff,
+ len, flags);
+--
+2.35.1
+
--- /dev/null
+From 8016230d535b9f2c2a7d648419b641ec96931c89 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 22:45:21 +0800
+Subject: cifs: Fix xid leak in cifs_create()
+
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+
+[ Upstream commit fee0fb1f15054bb6a0ede452acb42da5bef4d587 ]
+
+If the cifs already shutdown, we should free the xid before return,
+otherwise, the xid will be leaked.
+
+Fixes: 087f757b0129 ("cifs: add shutdown support")
+Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/dir.c | 6 ++++--
+ 1 file changed, 4 insertions(+), 2 deletions(-)
+
+diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
+index 6e8e7cc26ae2..83c929dd6ed5 100644
+--- a/fs/cifs/dir.c
++++ b/fs/cifs/dir.c
+@@ -538,8 +538,10 @@ int cifs_create(struct user_namespace *mnt_userns, struct inode *inode,
+ cifs_dbg(FYI, "cifs_create parent inode = 0x%p name is: %pd and dentry = 0x%p\n",
+ inode, direntry, direntry);
+
+- if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb))))
+- return -EIO;
++ if (unlikely(cifs_forced_shutdown(CIFS_SB(inode->i_sb)))) {
++ rc = -EIO;
++ goto out_free_xid;
++ }
+
+ tlink = cifs_sb_tlink(CIFS_SB(inode->i_sb));
+ rc = PTR_ERR(tlink);
+--
+2.35.1
+
--- /dev/null
+From 2a39174cd12cb70f88612f1350cf96275bbb9cfc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 22:45:23 +0800
+Subject: cifs: Fix xid leak in cifs_flock()
+
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+
+[ Upstream commit 575e079c782b9862ec2626403922d041a42e6ed6 ]
+
+If not flock, before return -ENOLCK, should free the xid,
+otherwise, the xid will be leaked.
+
+Fixes: d0677992d2af ("cifs: add support for flock")
+Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/file.c | 11 +++++++----
+ 1 file changed, 7 insertions(+), 4 deletions(-)
+
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index ffec3a2f995d..aa422348824a 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -1806,11 +1806,13 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
+ struct cifsFileInfo *cfile;
+ __u32 type;
+
+- rc = -EACCES;
+ xid = get_xid();
+
+- if (!(fl->fl_flags & FL_FLOCK))
+- return -ENOLCK;
++ if (!(fl->fl_flags & FL_FLOCK)) {
++ rc = -ENOLCK;
++ free_xid(xid);
++ return rc;
++ }
+
+ cfile = (struct cifsFileInfo *)file->private_data;
+ tcon = tlink_tcon(cfile->tlink);
+@@ -1829,8 +1831,9 @@ int cifs_flock(struct file *file, int cmd, struct file_lock *fl)
+ * if no lock or unlock then nothing to do since we do not
+ * know what it is
+ */
++ rc = -EOPNOTSUPP;
+ free_xid(xid);
+- return -EOPNOTSUPP;
++ return rc;
+ }
+
+ rc = cifs_setlk(file, fl, type, wait_flag, posix_lck, lock, unlock,
+--
+2.35.1
+
--- /dev/null
+From 4641afff5d883342e12b6a0bf92eeeb48101541c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 22:45:24 +0800
+Subject: cifs: Fix xid leak in cifs_ses_add_channel()
+
+From: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+
+[ Upstream commit e909d054bdea75ef1ec48c18c5936affdaecbb2c ]
+
+Before return, should free the xid, otherwise, the
+xid will be leaked.
+
+Fixes: d70e9fa55884 ("cifs: try opening channels after mounting")
+Reviewed-by: Paulo Alcantara (SUSE) <pc@cjr.nz>
+Signed-off-by: Zhang Xiaoxu <zhangxiaoxu5@huawei.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/cifs/sess.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
+index 5500ea783784..0fbd0f78f361 100644
+--- a/fs/cifs/sess.c
++++ b/fs/cifs/sess.c
+@@ -320,6 +320,7 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
+ if (rc && chan->server)
+ cifs_put_tcp_session(chan->server, 0);
+
++ free_xid(xid);
+ return rc;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From b571587e408f188b6c54e08f99cba2642bc5c6df Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:39 -0400
+Subject: dlm: use __le types for dlm header
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 3428785a65dabf05bc899b6c5334984e98286184 ]
+
+This patch changes to use __le types directly in the dlm header
+structure which is casted at the right dlm message buffer positions.
+
+The main goal what is reached here is to remove sparse warnings
+regarding to host to little byte order conversion or vice versa. Leaving
+those sparse issues ignored and always do it in out/in functionality
+tends to leave it unknown in which byte order the variable is being
+handled.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 7175e131ebba ("fs: dlm: fix invalid derefence of sb_lvbptr")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/dir.c | 2 +-
+ fs/dlm/dlm_internal.h | 10 ++---
+ fs/dlm/lock.c | 95 +++++++++++++++++++++++--------------------
+ fs/dlm/member.c | 2 +-
+ fs/dlm/midcomms.c | 26 +++++-------
+ fs/dlm/rcom.c | 42 ++++++++++---------
+ fs/dlm/requestqueue.c | 7 ++--
+ fs/dlm/util.c | 26 ------------
+ fs/dlm/util.h | 2 -
+ 9 files changed, 97 insertions(+), 115 deletions(-)
+
+diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
+index 45ebbe602bbf..dd363c39c620 100644
+--- a/fs/dlm/dir.c
++++ b/fs/dlm/dir.c
+@@ -102,7 +102,7 @@ int dlm_recover_directory(struct dlm_ls *ls)
+ */
+
+ b = ls->ls_recover_buf->rc_buf;
+- left = ls->ls_recover_buf->rc_header.h_length;
++ left = le16_to_cpu(ls->ls_recover_buf->rc_header.h_length);
+ left -= sizeof(struct dlm_rcom);
+
+ for (;;) {
+diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
+index 5f57538b5d45..c3cfe4be436a 100644
+--- a/fs/dlm/dlm_internal.h
++++ b/fs/dlm/dlm_internal.h
+@@ -385,15 +385,15 @@ static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
+ #define DLM_FIN 5
+
+ struct dlm_header {
+- uint32_t h_version;
++ __le32 h_version;
+ union {
+ /* for DLM_MSG and DLM_RCOM */
+- uint32_t h_lockspace;
++ __le32 h_lockspace;
+ /* for DLM_ACK and DLM_OPTS */
+- uint32_t h_seq;
++ __le32 h_seq;
+ } u;
+- uint32_t h_nodeid; /* nodeid of sender */
+- uint16_t h_length;
++ __le32 h_nodeid; /* nodeid of sender */
++ __le16 h_length;
+ uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
+ uint8_t h_pad;
+ };
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index bcc7127c4c0a..859412b7a82c 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1564,8 +1564,8 @@ static int _remove_from_waiters(struct dlm_lkb *lkb, int mstype,
+ }
+
+ log_error(ls, "remwait error %x remote %d %x msg %d flags %x no wait",
+- lkb->lkb_id, ms ? ms->m_header.h_nodeid : 0, lkb->lkb_remid,
+- mstype, lkb->lkb_flags);
++ lkb->lkb_id, ms ? le32_to_cpu(ms->m_header.h_nodeid) : 0,
++ lkb->lkb_remid, mstype, lkb->lkb_flags);
+ return -1;
+
+ out_del:
+@@ -3545,10 +3545,10 @@ static int _create_message(struct dlm_ls *ls, int mb_len,
+
+ ms = (struct dlm_message *) mb;
+
+- ms->m_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+- ms->m_header.u.h_lockspace = ls->ls_global_id;
+- ms->m_header.h_nodeid = dlm_our_nodeid();
+- ms->m_header.h_length = mb_len;
++ ms->m_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
++ ms->m_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
++ ms->m_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
++ ms->m_header.h_length = cpu_to_le16(mb_len);
+ ms->m_header.h_cmd = DLM_MSG;
+
+ ms->m_type = mstype;
+@@ -3842,7 +3842,7 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
+ struct dlm_rsb *r = &ls->ls_stub_rsb;
+ struct dlm_message *ms;
+ struct dlm_mhandle *mh;
+- int error, nodeid = ms_in->m_header.h_nodeid;
++ int error, nodeid = le32_to_cpu(ms_in->m_header.h_nodeid);
+
+ error = create_message(r, NULL, nodeid, DLM_MSG_LOOKUP_REPLY, &ms, &mh);
+ if (error)
+@@ -3881,7 +3881,8 @@ static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+
+ static int receive_extralen(struct dlm_message *ms)
+ {
+- return (ms->m_header.h_length - sizeof(struct dlm_message));
++ return (le16_to_cpu(ms->m_header.h_length) -
++ sizeof(struct dlm_message));
+ }
+
+ static int receive_lvb(struct dlm_ls *ls, struct dlm_lkb *lkb,
+@@ -3915,7 +3916,7 @@ static void fake_astfn(void *astparam)
+ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ struct dlm_message *ms)
+ {
+- lkb->lkb_nodeid = ms->m_header.h_nodeid;
++ lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ lkb->lkb_ownpid = ms->m_pid;
+ lkb->lkb_remid = ms->m_lkid;
+ lkb->lkb_grmode = DLM_LOCK_IV;
+@@ -3963,7 +3964,7 @@ static int receive_unlock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
+ {
+ struct dlm_lkb *lkb = &ls->ls_stub_lkb;
+- lkb->lkb_nodeid = ms->m_header.h_nodeid;
++ lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ lkb->lkb_remid = ms->m_lkid;
+ }
+
+@@ -3972,7 +3973,7 @@ static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
+
+ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
+ {
+- int from = ms->m_header.h_nodeid;
++ int from = le32_to_cpu(ms->m_header.h_nodeid);
+ int error = 0;
+
+ /* currently mixing of user/kernel locks are not supported */
+@@ -4086,7 +4087,7 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
+ int from_nodeid;
+ int error, namelen = 0;
+
+- from_nodeid = ms->m_header.h_nodeid;
++ from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+
+ error = create_lkb(ls, &lkb);
+ if (error)
+@@ -4186,7 +4187,7 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
+ log_error(ls, "receive_convert %x remid %x recover_seq %llu "
+ "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
+ (unsigned long long)lkb->lkb_recover_seq,
+- ms->m_header.h_nodeid, ms->m_lkid);
++ le32_to_cpu(ms->m_header.h_nodeid), ms->m_lkid);
+ error = -ENOENT;
+ dlm_put_lkb(lkb);
+ goto fail;
+@@ -4240,7 +4241,7 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
+ if (lkb->lkb_remid != ms->m_lkid) {
+ log_error(ls, "receive_unlock %x remid %x remote %d %x",
+ lkb->lkb_id, lkb->lkb_remid,
+- ms->m_header.h_nodeid, ms->m_lkid);
++ le32_to_cpu(ms->m_header.h_nodeid), ms->m_lkid);
+ error = -ENOENT;
+ dlm_put_lkb(lkb);
+ goto fail;
+@@ -4377,7 +4378,7 @@ static void receive_lookup(struct dlm_ls *ls, struct dlm_message *ms)
+ {
+ int len, error, ret_nodeid, from_nodeid, our_nodeid;
+
+- from_nodeid = ms->m_header.h_nodeid;
++ from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+ our_nodeid = dlm_our_nodeid();
+
+ len = receive_extralen(ms);
+@@ -4400,7 +4401,7 @@ static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
+ uint32_t hash, b;
+ int rv, len, dir_nodeid, from_nodeid;
+
+- from_nodeid = ms->m_header.h_nodeid;
++ from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+
+ len = receive_extralen(ms);
+
+@@ -4491,7 +4492,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_lkb *lkb;
+ struct dlm_rsb *r;
+ int error, mstype, result;
+- int from_nodeid = ms->m_header.h_nodeid;
++ int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+
+ error = find_lkb(ls, ms->m_remid, &lkb);
+ if (error)
+@@ -4643,8 +4644,8 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+
+ default:
+ log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
+- lkb->lkb_id, ms->m_header.h_nodeid, ms->m_lkid,
+- ms->m_result);
++ lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
++ ms->m_lkid, ms->m_result);
+ dlm_print_rsb(r);
+ dlm_print_lkb(lkb);
+ }
+@@ -4823,8 +4824,8 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ /* This should never happen */
+ log_error(ls, "receive_lookup_reply %x from %d ret %d "
+ "master %d dir %d our %d first %x %s",
+- lkb->lkb_id, ms->m_header.h_nodeid, ret_nodeid,
+- r->res_master_nodeid, r->res_dir_nodeid,
++ lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
++ ret_nodeid, r->res_master_nodeid, r->res_dir_nodeid,
+ dlm_our_nodeid(), r->res_first_lkid, r->res_name);
+ }
+
+@@ -4836,7 +4837,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ } else if (ret_nodeid == -1) {
+ /* the remote node doesn't believe it's the dir node */
+ log_error(ls, "receive_lookup_reply %x from %d bad ret_nodeid",
+- lkb->lkb_id, ms->m_header.h_nodeid);
++ lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid));
+ r->res_master_nodeid = 0;
+ r->res_nodeid = -1;
+ lkb->lkb_nodeid = -1;
+@@ -4870,10 +4871,10 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+ {
+ int error = 0, noent = 0;
+
+- if (!dlm_is_member(ls, ms->m_header.h_nodeid)) {
++ if (!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid))) {
+ log_limit(ls, "receive %d from non-member %d %x %x %d",
+- ms->m_type, ms->m_header.h_nodeid, ms->m_lkid,
+- ms->m_remid, ms->m_result);
++ ms->m_type, le32_to_cpu(ms->m_header.h_nodeid),
++ ms->m_lkid, ms->m_remid, ms->m_result);
+ return;
+ }
+
+@@ -4967,11 +4968,13 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+
+ if (error == -ENOENT && noent) {
+ log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
+- ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
++ ms->m_type, ms->m_remid,
++ le32_to_cpu(ms->m_header.h_nodeid),
+ ms->m_lkid, saved_seq);
+ } else if (error == -ENOENT) {
+ log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
+- ms->m_type, ms->m_remid, ms->m_header.h_nodeid,
++ ms->m_type, ms->m_remid,
++ le32_to_cpu(ms->m_header.h_nodeid),
+ ms->m_lkid, saved_seq);
+
+ if (ms->m_type == DLM_MSG_CONVERT)
+@@ -4981,7 +4984,7 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+ if (error == -EINVAL) {
+ log_error(ls, "receive %d inval from %d lkid %x remid %x "
+ "saved_seq %u",
+- ms->m_type, ms->m_header.h_nodeid,
++ ms->m_type, le32_to_cpu(ms->m_header.h_nodeid),
+ ms->m_lkid, ms->m_remid, saved_seq);
+ }
+ }
+@@ -5048,18 +5051,20 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid)
+ return;
+ }
+
+- if (hd->h_nodeid != nodeid) {
++ if (le32_to_cpu(hd->h_nodeid) != nodeid) {
+ log_print("invalid h_nodeid %d from %d lockspace %x",
+- hd->h_nodeid, nodeid, hd->u.h_lockspace);
++ le32_to_cpu(hd->h_nodeid), nodeid,
++ le32_to_cpu(hd->u.h_lockspace));
+ return;
+ }
+
+- ls = dlm_find_lockspace_global(hd->u.h_lockspace);
++ ls = dlm_find_lockspace_global(le32_to_cpu(hd->u.h_lockspace));
+ if (!ls) {
+ if (dlm_config.ci_log_debug) {
+ printk_ratelimited(KERN_DEBUG "dlm: invalid lockspace "
+ "%u from %d cmd %d type %d\n",
+- hd->u.h_lockspace, nodeid, hd->h_cmd, type);
++ le32_to_cpu(hd->u.h_lockspace), nodeid,
++ hd->h_cmd, type);
+ }
+
+ if (hd->h_cmd == DLM_RCOM && type == DLM_RCOM_STATUS)
+@@ -5089,7 +5094,7 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ ms_stub->m_flags = DLM_IFL_STUB_MS;
+ ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
+ ms_stub->m_result = -EINPROGRESS;
+- ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
++ ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_convert_reply(lkb, ms_stub);
+
+ /* Same special case as in receive_rcom_lock_args() */
+@@ -5211,7 +5216,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ ms_stub->m_flags = DLM_IFL_STUB_MS;
+ ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
+ ms_stub->m_result = stub_unlock_result;
+- ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
++ ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_unlock_reply(lkb, ms_stub);
+ dlm_put_lkb(lkb);
+ break;
+@@ -5222,7 +5227,7 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ ms_stub->m_flags = DLM_IFL_STUB_MS;
+ ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
+ ms_stub->m_result = stub_cancel_result;
+- ms_stub->m_header.h_nodeid = lkb->lkb_nodeid;
++ ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_cancel_reply(lkb, ms_stub);
+ dlm_put_lkb(lkb);
+ break;
+@@ -5592,7 +5597,7 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ {
+ struct rcom_lock *rl = (struct rcom_lock *) rc->rc_buf;
+
+- lkb->lkb_nodeid = rc->rc_header.h_nodeid;
++ lkb->lkb_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
+ lkb->lkb_ownpid = le32_to_cpu(rl->rl_ownpid);
+ lkb->lkb_remid = le32_to_cpu(rl->rl_lkid);
+ lkb->lkb_exflags = le32_to_cpu(rl->rl_exflags);
+@@ -5607,8 +5612,8 @@ static int receive_rcom_lock_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ lkb->lkb_astfn = (rl->rl_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
+
+ if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
+- int lvblen = rc->rc_header.h_length - sizeof(struct dlm_rcom) -
+- sizeof(struct rcom_lock);
++ int lvblen = le16_to_cpu(rc->rc_header.h_length) -
++ sizeof(struct dlm_rcom) - sizeof(struct rcom_lock);
+ if (lvblen > ls->ls_lvblen)
+ return -EINVAL;
+ lkb->lkb_lvbptr = dlm_allocate_lvb(ls);
+@@ -5644,7 +5649,7 @@ int dlm_recover_master_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+ struct dlm_rsb *r;
+ struct dlm_lkb *lkb;
+ uint32_t remid = 0;
+- int from_nodeid = rc->rc_header.h_nodeid;
++ int from_nodeid = le32_to_cpu(rc->rc_header.h_nodeid);
+ int error;
+
+ if (rl->rl_parent_lkid) {
+@@ -5734,7 +5739,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+ error = find_lkb(ls, lkid, &lkb);
+ if (error) {
+ log_error(ls, "dlm_recover_process_copy no %x remote %d %x %d",
+- lkid, rc->rc_header.h_nodeid, remid, result);
++ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
++ result);
+ return error;
+ }
+
+@@ -5744,7 +5750,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+
+ if (!is_process_copy(lkb)) {
+ log_error(ls, "dlm_recover_process_copy bad %x remote %d %x %d",
+- lkid, rc->rc_header.h_nodeid, remid, result);
++ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
++ result);
+ dlm_dump_rsb(r);
+ unlock_rsb(r);
+ put_rsb(r);
+@@ -5759,7 +5766,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+ a barrier between recover_masters and recover_locks. */
+
+ log_debug(ls, "dlm_recover_process_copy %x remote %d %x %d",
+- lkid, rc->rc_header.h_nodeid, remid, result);
++ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
++ result);
+
+ dlm_send_rcom_lock(r, lkb);
+ goto out;
+@@ -5769,7 +5777,8 @@ int dlm_recover_process_copy(struct dlm_ls *ls, struct dlm_rcom *rc)
+ break;
+ default:
+ log_error(ls, "dlm_recover_process_copy %x remote %d %x %d unk",
+- lkid, rc->rc_header.h_nodeid, remid, result);
++ lkid, le32_to_cpu(rc->rc_header.h_nodeid), remid,
++ result);
+ }
+
+ /* an ack for dlm_recover_locks() which waits for replies from
+diff --git a/fs/dlm/member.c b/fs/dlm/member.c
+index 731d489aa323..0dacddc63f55 100644
+--- a/fs/dlm/member.c
++++ b/fs/dlm/member.c
+@@ -20,7 +20,7 @@
+
+ int dlm_slots_version(struct dlm_header *h)
+ {
+- if ((h->h_version & 0x0000FFFF) < DLM_HEADER_SLOTS)
++ if ((le32_to_cpu(h->h_version) & 0x0000FFFF) < DLM_HEADER_SLOTS)
+ return 0;
+ return 1;
+ }
+diff --git a/fs/dlm/midcomms.c b/fs/dlm/midcomms.c
+index 7ae39ec8d9b0..a9903cdb1438 100644
+--- a/fs/dlm/midcomms.c
++++ b/fs/dlm/midcomms.c
+@@ -373,13 +373,12 @@ static int dlm_send_ack(int nodeid, uint32_t seq)
+
+ m_header = (struct dlm_header *)ppc;
+
+- m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+- m_header->h_nodeid = dlm_our_nodeid();
+- m_header->h_length = mb_len;
++ m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
++ m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
++ m_header->h_length = cpu_to_le16(mb_len);
+ m_header->h_cmd = DLM_ACK;
+- m_header->u.h_seq = seq;
++ m_header->u.h_seq = cpu_to_le32(seq);
+
+- header_out(m_header);
+ dlm_lowcomms_commit_msg(msg);
+ dlm_lowcomms_put_msg(msg);
+
+@@ -402,13 +401,11 @@ static int dlm_send_fin(struct midcomms_node *node,
+
+ m_header = (struct dlm_header *)ppc;
+
+- m_header->h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+- m_header->h_nodeid = dlm_our_nodeid();
+- m_header->h_length = mb_len;
++ m_header->h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
++ m_header->h_nodeid = cpu_to_le32(dlm_our_nodeid());
++ m_header->h_length = cpu_to_le16(mb_len);
+ m_header->h_cmd = DLM_FIN;
+
+- header_out(m_header);
+-
+ pr_debug("sending fin msg to node %d\n", node->nodeid);
+ dlm_midcomms_commit_mhandle(mh);
+ set_bit(DLM_NODE_FLAG_STOP_TX, &node->flags);
+@@ -1013,11 +1010,10 @@ static void dlm_fill_opts_header(struct dlm_opts *opts, uint16_t inner_len,
+ uint32_t seq)
+ {
+ opts->o_header.h_cmd = DLM_OPTS;
+- opts->o_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+- opts->o_header.h_nodeid = dlm_our_nodeid();
+- opts->o_header.h_length = DLM_MIDCOMMS_OPT_LEN + inner_len;
+- opts->o_header.u.h_seq = seq;
+- header_out(&opts->o_header);
++ opts->o_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
++ opts->o_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
++ opts->o_header.h_length = cpu_to_le16(DLM_MIDCOMMS_OPT_LEN + inner_len);
++ opts->o_header.u.h_seq = cpu_to_le32(seq);
+ }
+
+ static void midcomms_new_msg_cb(struct dlm_mhandle *mh)
+diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
+index 6cba86470278..b956b7e416c8 100644
+--- a/fs/dlm/rcom.c
++++ b/fs/dlm/rcom.c
+@@ -34,10 +34,10 @@ static void _create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
+
+ rc = (struct dlm_rcom *) mb;
+
+- rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+- rc->rc_header.u.h_lockspace = ls->ls_global_id;
+- rc->rc_header.h_nodeid = dlm_our_nodeid();
+- rc->rc_header.h_length = mb_len;
++ rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
++ rc->rc_header.u.h_lockspace = cpu_to_le32(ls->ls_global_id);
++ rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
++ rc->rc_header.h_length = cpu_to_le16(mb_len);
+ rc->rc_header.h_cmd = DLM_RCOM;
+
+ rc->rc_type = type;
+@@ -127,10 +127,10 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ {
+ struct rcom_config *rf = (struct rcom_config *) rc->rc_buf;
+
+- if ((rc->rc_header.h_version & 0xFFFF0000) != DLM_HEADER_MAJOR) {
++ if ((le32_to_cpu(rc->rc_header.h_version) & 0xFFFF0000) != DLM_HEADER_MAJOR) {
+ log_error(ls, "version mismatch: %x nodeid %d: %x",
+ DLM_HEADER_MAJOR | DLM_HEADER_MINOR, nodeid,
+- rc->rc_header.h_version);
++ le32_to_cpu(rc->rc_header.h_version));
+ return -EPROTO;
+ }
+
+@@ -227,7 +227,7 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ struct dlm_rcom *rc;
+ struct rcom_status *rs;
+ uint32_t status;
+- int nodeid = rc_in->rc_header.h_nodeid;
++ int nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
+ int len = sizeof(struct rcom_config);
+ struct dlm_msg *msg;
+ int num_slots = 0;
+@@ -289,12 +289,14 @@ static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
+ rc_in->rc_id != ls->ls_rcom_seq) {
+ log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
+- rc_in->rc_type, rc_in->rc_header.h_nodeid,
++ rc_in->rc_type,
++ le32_to_cpu(rc_in->rc_header.h_nodeid),
+ (unsigned long long)rc_in->rc_id,
+ (unsigned long long)ls->ls_rcom_seq);
+ goto out;
+ }
+- memcpy(ls->ls_recover_buf, rc_in, rc_in->rc_header.h_length);
++ memcpy(ls->ls_recover_buf, rc_in,
++ le16_to_cpu(rc_in->rc_header.h_length));
+ set_bit(LSFL_RCOM_READY, &ls->ls_flags);
+ clear_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
+ wake_up(&ls->ls_wait_general);
+@@ -336,8 +338,9 @@ static void receive_rcom_names(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ int error, inlen, outlen, nodeid;
+ struct dlm_msg *msg;
+
+- nodeid = rc_in->rc_header.h_nodeid;
+- inlen = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
++ nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
++ inlen = le16_to_cpu(rc_in->rc_header.h_length) -
++ sizeof(struct dlm_rcom);
+ outlen = DLM_MAX_APP_BUFSIZE - sizeof(struct dlm_rcom);
+
+ error = create_rcom_stateless(ls, nodeid, DLM_RCOM_NAMES_REPLY, outlen,
+@@ -375,8 +378,9 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ {
+ struct dlm_rcom *rc;
+ struct dlm_mhandle *mh;
+- int error, ret_nodeid, nodeid = rc_in->rc_header.h_nodeid;
+- int len = rc_in->rc_header.h_length - sizeof(struct dlm_rcom);
++ int error, ret_nodeid, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
++ int len = le16_to_cpu(rc_in->rc_header.h_length) -
++ sizeof(struct dlm_rcom);
+
+ /* Old code would send this special id to trigger a debug dump. */
+ if (rc_in->rc_id == 0xFFFFFFFF) {
+@@ -464,7 +468,7 @@ static void receive_rcom_lock(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ {
+ struct dlm_rcom *rc;
+ struct dlm_mhandle *mh;
+- int error, nodeid = rc_in->rc_header.h_nodeid;
++ int error, nodeid = le32_to_cpu(rc_in->rc_header.h_nodeid);
+
+ dlm_recover_master_copy(ls, rc_in);
+
+@@ -500,10 +504,10 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
+
+ rc = (struct dlm_rcom *) mb;
+
+- rc->rc_header.h_version = (DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
++ rc->rc_header.h_version = cpu_to_le32(DLM_HEADER_MAJOR | DLM_HEADER_MINOR);
+ rc->rc_header.u.h_lockspace = rc_in->rc_header.u.h_lockspace;
+- rc->rc_header.h_nodeid = dlm_our_nodeid();
+- rc->rc_header.h_length = mb_len;
++ rc->rc_header.h_nodeid = cpu_to_le32(dlm_our_nodeid());
++ rc->rc_header.h_length = cpu_to_le16(mb_len);
+ rc->rc_header.h_cmd = DLM_RCOM;
+
+ rc->rc_type = DLM_RCOM_STATUS_REPLY;
+@@ -631,7 +635,7 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ break;
+
+ case DLM_RCOM_LOCK:
+- if (rc->rc_header.h_length < lock_size)
++ if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
+ goto Eshort;
+ receive_rcom_lock(ls, rc);
+ break;
+@@ -649,7 +653,7 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ break;
+
+ case DLM_RCOM_LOCK_REPLY:
+- if (rc->rc_header.h_length < lock_size)
++ if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
+ goto Eshort;
+ dlm_recover_process_copy(ls, rc);
+ break;
+diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
+index e89e0ff8bfa3..3692fbd218d5 100644
+--- a/fs/dlm/requestqueue.c
++++ b/fs/dlm/requestqueue.c
+@@ -32,7 +32,8 @@ struct rq_entry {
+ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
+ {
+ struct rq_entry *e;
+- int length = ms->m_header.h_length - sizeof(struct dlm_message);
++ int length = le16_to_cpu(ms->m_header.h_length) -
++ sizeof(struct dlm_message);
+
+ e = kmalloc(sizeof(struct rq_entry) + length, GFP_NOFS);
+ if (!e) {
+@@ -42,7 +43,7 @@ void dlm_add_requestqueue(struct dlm_ls *ls, int nodeid, struct dlm_message *ms)
+
+ e->recover_seq = ls->ls_recover_seq & 0xFFFFFFFF;
+ e->nodeid = nodeid;
+- memcpy(&e->request, ms, ms->m_header.h_length);
++ memcpy(&e->request, ms, le16_to_cpu(ms->m_header.h_length));
+
+ mutex_lock(&ls->ls_requestqueue_mutex);
+ list_add_tail(&e->list, &ls->ls_requestqueue);
+@@ -81,7 +82,7 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
+
+ log_limit(ls, "dlm_process_requestqueue msg %d from %d "
+ "lkid %x remid %x result %d seq %u",
+- ms->m_type, ms->m_header.h_nodeid,
++ ms->m_type, le32_to_cpu(ms->m_header.h_nodeid),
+ ms->m_lkid, ms->m_remid, ms->m_result,
+ e->recover_seq);
+
+diff --git a/fs/dlm/util.c b/fs/dlm/util.c
+index 58acbcc2081a..66b9a123768d 100644
+--- a/fs/dlm/util.c
++++ b/fs/dlm/util.c
+@@ -20,24 +20,6 @@
+ #define DLM_ERRNO_ETIMEDOUT 110
+ #define DLM_ERRNO_EINPROGRESS 115
+
+-void header_out(struct dlm_header *hd)
+-{
+- hd->h_version = cpu_to_le32(hd->h_version);
+- /* does it for others u32 in union as well */
+- hd->u.h_lockspace = cpu_to_le32(hd->u.h_lockspace);
+- hd->h_nodeid = cpu_to_le32(hd->h_nodeid);
+- hd->h_length = cpu_to_le16(hd->h_length);
+-}
+-
+-void header_in(struct dlm_header *hd)
+-{
+- hd->h_version = le32_to_cpu(hd->h_version);
+- /* does it for others u32 in union as well */
+- hd->u.h_lockspace = le32_to_cpu(hd->u.h_lockspace);
+- hd->h_nodeid = le32_to_cpu(hd->h_nodeid);
+- hd->h_length = le16_to_cpu(hd->h_length);
+-}
+-
+ /* higher errno values are inconsistent across architectures, so select
+ one set of values for on the wire */
+
+@@ -85,8 +67,6 @@ static int from_dlm_errno(int err)
+
+ void dlm_message_out(struct dlm_message *ms)
+ {
+- header_out(&ms->m_header);
+-
+ ms->m_type = cpu_to_le32(ms->m_type);
+ ms->m_nodeid = cpu_to_le32(ms->m_nodeid);
+ ms->m_pid = cpu_to_le32(ms->m_pid);
+@@ -109,8 +89,6 @@ void dlm_message_out(struct dlm_message *ms)
+
+ void dlm_message_in(struct dlm_message *ms)
+ {
+- header_in(&ms->m_header);
+-
+ ms->m_type = le32_to_cpu(ms->m_type);
+ ms->m_nodeid = le32_to_cpu(ms->m_nodeid);
+ ms->m_pid = le32_to_cpu(ms->m_pid);
+@@ -133,8 +111,6 @@ void dlm_message_in(struct dlm_message *ms)
+
+ void dlm_rcom_out(struct dlm_rcom *rc)
+ {
+- header_out(&rc->rc_header);
+-
+ rc->rc_type = cpu_to_le32(rc->rc_type);
+ rc->rc_result = cpu_to_le32(rc->rc_result);
+ rc->rc_id = cpu_to_le64(rc->rc_id);
+@@ -144,8 +120,6 @@ void dlm_rcom_out(struct dlm_rcom *rc)
+
+ void dlm_rcom_in(struct dlm_rcom *rc)
+ {
+- header_in(&rc->rc_header);
+-
+ rc->rc_type = le32_to_cpu(rc->rc_type);
+ rc->rc_result = le32_to_cpu(rc->rc_result);
+ rc->rc_id = le64_to_cpu(rc->rc_id);
+diff --git a/fs/dlm/util.h b/fs/dlm/util.h
+index d46f23c7a6a0..cc719ca9397e 100644
+--- a/fs/dlm/util.h
++++ b/fs/dlm/util.h
+@@ -15,8 +15,6 @@ void dlm_message_out(struct dlm_message *ms);
+ void dlm_message_in(struct dlm_message *ms);
+ void dlm_rcom_out(struct dlm_rcom *rc);
+ void dlm_rcom_in(struct dlm_rcom *rc);
+-void header_out(struct dlm_header *hd);
+-void header_in(struct dlm_header *hd);
+
+ #endif
+
+--
+2.35.1
+
--- /dev/null
+From b59651cc695c5aaaf09223589e7516043843182d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:41 -0400
+Subject: dlm: use __le types for dlm messages
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 00e99ccde75722599faf089416341bfed7e4edb5 ]
+
+This patch changes to use __le types directly in the dlm message
+structure which is casted at the right dlm message buffer positions.
+
+The main goal what is reached here is to remove sparse warnings
+regarding to host to little byte order conversion or vice versa. Leaving
+those sparse issues ignored and always do it in out/in functionality
+tends to leave it unknown in which byte order the variable is being
+handled.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 7175e131ebba ("fs: dlm: fix invalid derefence of sb_lvbptr")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/dlm_internal.h | 36 +++---
+ fs/dlm/lock.c | 276 ++++++++++++++++++++++--------------------
+ fs/dlm/requestqueue.c | 15 ++-
+ fs/dlm/util.c | 48 +-------
+ fs/dlm/util.h | 4 +-
+ 5 files changed, 174 insertions(+), 205 deletions(-)
+
+diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
+index 78704fa31a74..aa5ed57ab1ba 100644
+--- a/fs/dlm/dlm_internal.h
++++ b/fs/dlm/dlm_internal.h
+@@ -415,24 +415,24 @@ struct dlm_header {
+
+ struct dlm_message {
+ struct dlm_header m_header;
+- uint32_t m_type; /* DLM_MSG_ */
+- uint32_t m_nodeid;
+- uint32_t m_pid;
+- uint32_t m_lkid; /* lkid on sender */
+- uint32_t m_remid; /* lkid on receiver */
+- uint32_t m_parent_lkid;
+- uint32_t m_parent_remid;
+- uint32_t m_exflags;
+- uint32_t m_sbflags;
+- uint32_t m_flags;
+- uint32_t m_lvbseq;
+- uint32_t m_hash;
+- int m_status;
+- int m_grmode;
+- int m_rqmode;
+- int m_bastmode;
+- int m_asts;
+- int m_result; /* 0 or -EXXX */
++ __le32 m_type; /* DLM_MSG_ */
++ __le32 m_nodeid;
++ __le32 m_pid;
++ __le32 m_lkid; /* lkid on sender */
++ __le32 m_remid; /* lkid on receiver */
++ __le32 m_parent_lkid;
++ __le32 m_parent_remid;
++ __le32 m_exflags;
++ __le32 m_sbflags;
++ __le32 m_flags;
++ __le32 m_lvbseq;
++ __le32 m_hash;
++ __le32 m_status;
++ __le32 m_grmode;
++ __le32 m_rqmode;
++ __le32 m_bastmode;
++ __le32 m_asts;
++ __le32 m_result; /* 0 or -EXXX */
+ char m_extra[]; /* name or lvb */
+ };
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index fea9a7216297..a6a5fbe36ca9 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -1611,10 +1611,10 @@ static int remove_from_waiters_ms(struct dlm_lkb *lkb, struct dlm_message *ms)
+ struct dlm_ls *ls = lkb->lkb_resource->res_ls;
+ int error;
+
+- if (ms->m_flags != DLM_IFL_STUB_MS)
++ if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
+ mutex_lock(&ls->ls_waiters_mutex);
+- error = _remove_from_waiters(lkb, ms->m_type, ms);
+- if (ms->m_flags != DLM_IFL_STUB_MS)
++ error = _remove_from_waiters(lkb, le32_to_cpu(ms->m_type), ms);
++ if (ms->m_flags != cpu_to_le32(DLM_IFL_STUB_MS))
+ mutex_unlock(&ls->ls_waiters_mutex);
+ return error;
+ }
+@@ -2041,7 +2041,7 @@ static void set_lvb_lock_pc(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ if (len > r->res_ls->ls_lvblen)
+ len = r->res_ls->ls_lvblen;
+ memcpy(lkb->lkb_lvbptr, ms->m_extra, len);
+- lkb->lkb_lvbseq = ms->m_lvbseq;
++ lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
+ }
+ }
+
+@@ -2172,10 +2172,10 @@ static void munge_demoted(struct dlm_lkb *lkb)
+
+ static void munge_altmode(struct dlm_lkb *lkb, struct dlm_message *ms)
+ {
+- if (ms->m_type != DLM_MSG_REQUEST_REPLY &&
+- ms->m_type != DLM_MSG_GRANT) {
++ if (ms->m_type != cpu_to_le32(DLM_MSG_REQUEST_REPLY) &&
++ ms->m_type != cpu_to_le32(DLM_MSG_GRANT)) {
+ log_print("munge_altmode %x invalid reply type %d",
+- lkb->lkb_id, ms->m_type);
++ lkb->lkb_id, le32_to_cpu(ms->m_type));
+ return;
+ }
+
+@@ -3551,7 +3551,7 @@ static int _create_message(struct dlm_ls *ls, int mb_len,
+ ms->m_header.h_length = cpu_to_le16(mb_len);
+ ms->m_header.h_cmd = DLM_MSG;
+
+- ms->m_type = mstype;
++ ms->m_type = cpu_to_le32(mstype);
+
+ *mh_ret = mh;
+ *ms_ret = ms;
+@@ -3590,7 +3590,6 @@ static int create_message(struct dlm_rsb *r, struct dlm_lkb *lkb,
+
+ static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
+ {
+- dlm_message_out(ms);
+ dlm_midcomms_commit_mhandle(mh);
+ return 0;
+ }
+@@ -3598,40 +3597,40 @@ static int send_message(struct dlm_mhandle *mh, struct dlm_message *ms)
+ static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ struct dlm_message *ms)
+ {
+- ms->m_nodeid = lkb->lkb_nodeid;
+- ms->m_pid = lkb->lkb_ownpid;
+- ms->m_lkid = lkb->lkb_id;
+- ms->m_remid = lkb->lkb_remid;
+- ms->m_exflags = lkb->lkb_exflags;
+- ms->m_sbflags = lkb->lkb_sbflags;
+- ms->m_flags = lkb->lkb_flags;
+- ms->m_lvbseq = lkb->lkb_lvbseq;
+- ms->m_status = lkb->lkb_status;
+- ms->m_grmode = lkb->lkb_grmode;
+- ms->m_rqmode = lkb->lkb_rqmode;
+- ms->m_hash = r->res_hash;
++ ms->m_nodeid = cpu_to_le32(lkb->lkb_nodeid);
++ ms->m_pid = cpu_to_le32(lkb->lkb_ownpid);
++ ms->m_lkid = cpu_to_le32(lkb->lkb_id);
++ ms->m_remid = cpu_to_le32(lkb->lkb_remid);
++ ms->m_exflags = cpu_to_le32(lkb->lkb_exflags);
++ ms->m_sbflags = cpu_to_le32(lkb->lkb_sbflags);
++ ms->m_flags = cpu_to_le32(lkb->lkb_flags);
++ ms->m_lvbseq = cpu_to_le32(lkb->lkb_lvbseq);
++ ms->m_status = cpu_to_le32(lkb->lkb_status);
++ ms->m_grmode = cpu_to_le32(lkb->lkb_grmode);
++ ms->m_rqmode = cpu_to_le32(lkb->lkb_rqmode);
++ ms->m_hash = cpu_to_le32(r->res_hash);
+
+ /* m_result and m_bastmode are set from function args,
+ not from lkb fields */
+
+ if (lkb->lkb_bastfn)
+- ms->m_asts |= DLM_CB_BAST;
++ ms->m_asts |= cpu_to_le32(DLM_CB_BAST);
+ if (lkb->lkb_astfn)
+- ms->m_asts |= DLM_CB_CAST;
++ ms->m_asts |= cpu_to_le32(DLM_CB_CAST);
+
+ /* compare with switch in create_message; send_remove() doesn't
+ use send_args() */
+
+ switch (ms->m_type) {
+- case DLM_MSG_REQUEST:
+- case DLM_MSG_LOOKUP:
++ case cpu_to_le32(DLM_MSG_REQUEST):
++ case cpu_to_le32(DLM_MSG_LOOKUP):
+ memcpy(ms->m_extra, r->res_name, r->res_length);
+ break;
+- case DLM_MSG_CONVERT:
+- case DLM_MSG_UNLOCK:
+- case DLM_MSG_REQUEST_REPLY:
+- case DLM_MSG_CONVERT_REPLY:
+- case DLM_MSG_GRANT:
++ case cpu_to_le32(DLM_MSG_CONVERT):
++ case cpu_to_le32(DLM_MSG_UNLOCK):
++ case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
++ case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
++ case cpu_to_le32(DLM_MSG_GRANT):
+ if (!lkb->lkb_lvbptr)
+ break;
+ memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
+@@ -3681,8 +3680,8 @@ static int send_convert(struct dlm_rsb *r, struct dlm_lkb *lkb)
+ /* down conversions go without a reply from the master */
+ if (!error && down_conversion(lkb)) {
+ remove_from_waiters(lkb, DLM_MSG_CONVERT_REPLY);
+- r->res_ls->ls_stub_ms.m_flags = DLM_IFL_STUB_MS;
+- r->res_ls->ls_stub_ms.m_type = DLM_MSG_CONVERT_REPLY;
++ r->res_ls->ls_stub_ms.m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
++ r->res_ls->ls_stub_ms.m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
+ r->res_ls->ls_stub_ms.m_result = 0;
+ __receive_convert_reply(r, lkb, &r->res_ls->ls_stub_ms);
+ }
+@@ -3739,7 +3738,7 @@ static int send_bast(struct dlm_rsb *r, struct dlm_lkb *lkb, int mode)
+
+ send_args(r, lkb, ms);
+
+- ms->m_bastmode = mode;
++ ms->m_bastmode = cpu_to_le32(mode);
+
+ error = send_message(mh, ms);
+ out:
+@@ -3787,7 +3786,7 @@ static int send_remove(struct dlm_rsb *r)
+ goto out;
+
+ memcpy(ms->m_extra, r->res_name, r->res_length);
+- ms->m_hash = r->res_hash;
++ ms->m_hash = cpu_to_le32(r->res_hash);
+
+ error = send_message(mh, ms);
+ out:
+@@ -3809,7 +3808,7 @@ static int send_common_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+
+ send_args(r, lkb, ms);
+
+- ms->m_result = rv;
++ ms->m_result = cpu_to_le32(to_dlm_errno(rv));
+
+ error = send_message(mh, ms);
+ out:
+@@ -3849,8 +3848,8 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
+ goto out;
+
+ ms->m_lkid = ms_in->m_lkid;
+- ms->m_result = rv;
+- ms->m_nodeid = ret_nodeid;
++ ms->m_result = cpu_to_le32(to_dlm_errno(rv));
++ ms->m_nodeid = cpu_to_le32(ret_nodeid);
+
+ error = send_message(mh, ms);
+ out:
+@@ -3863,20 +3862,20 @@ static int send_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms_in,
+
+ static void receive_flags(struct dlm_lkb *lkb, struct dlm_message *ms)
+ {
+- lkb->lkb_exflags = ms->m_exflags;
+- lkb->lkb_sbflags = ms->m_sbflags;
++ lkb->lkb_exflags = le32_to_cpu(ms->m_exflags);
++ lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
+ lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
+- (ms->m_flags & 0x0000FFFF);
++ (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
+ }
+
+ static void receive_flags_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+ {
+- if (ms->m_flags == DLM_IFL_STUB_MS)
++ if (ms->m_flags == cpu_to_le32(DLM_IFL_STUB_MS))
+ return;
+
+- lkb->lkb_sbflags = ms->m_sbflags;
++ lkb->lkb_sbflags = le32_to_cpu(ms->m_sbflags);
+ lkb->lkb_flags = (lkb->lkb_flags & 0xFFFF0000) |
+- (ms->m_flags & 0x0000FFFF);
++ (le32_to_cpu(ms->m_flags) & 0x0000FFFF);
+ }
+
+ static int receive_extralen(struct dlm_message *ms)
+@@ -3917,13 +3916,13 @@ static int receive_request_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ struct dlm_message *ms)
+ {
+ lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+- lkb->lkb_ownpid = ms->m_pid;
+- lkb->lkb_remid = ms->m_lkid;
++ lkb->lkb_ownpid = le32_to_cpu(ms->m_pid);
++ lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
+ lkb->lkb_grmode = DLM_LOCK_IV;
+- lkb->lkb_rqmode = ms->m_rqmode;
++ lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
+
+- lkb->lkb_bastfn = (ms->m_asts & DLM_CB_BAST) ? &fake_bastfn : NULL;
+- lkb->lkb_astfn = (ms->m_asts & DLM_CB_CAST) ? &fake_astfn : NULL;
++ lkb->lkb_bastfn = (ms->m_asts & cpu_to_le32(DLM_CB_BAST)) ? &fake_bastfn : NULL;
++ lkb->lkb_astfn = (ms->m_asts & cpu_to_le32(DLM_CB_CAST)) ? &fake_astfn : NULL;
+
+ if (lkb->lkb_exflags & DLM_LKF_VALBLK) {
+ /* lkb was just created so there won't be an lvb yet */
+@@ -3944,8 +3943,8 @@ static int receive_convert_args(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ if (receive_lvb(ls, lkb, ms))
+ return -ENOMEM;
+
+- lkb->lkb_rqmode = ms->m_rqmode;
+- lkb->lkb_lvbseq = ms->m_lvbseq;
++ lkb->lkb_rqmode = le32_to_cpu(ms->m_rqmode);
++ lkb->lkb_lvbseq = le32_to_cpu(ms->m_lvbseq);
+
+ return 0;
+ }
+@@ -3965,7 +3964,7 @@ static void setup_stub_lkb(struct dlm_ls *ls, struct dlm_message *ms)
+ {
+ struct dlm_lkb *lkb = &ls->ls_stub_lkb;
+ lkb->lkb_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+- lkb->lkb_remid = ms->m_lkid;
++ lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
+ }
+
+ /* This is called after the rsb is locked so that we can safely inspect
+@@ -3977,7 +3976,8 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
+ int error = 0;
+
+ /* currently mixing of user/kernel locks are not supported */
+- if (ms->m_flags & DLM_IFL_USER && ~lkb->lkb_flags & DLM_IFL_USER) {
++ if (ms->m_flags & cpu_to_le32(DLM_IFL_USER) &&
++ ~lkb->lkb_flags & DLM_IFL_USER) {
+ log_error(lkb->lkb_resource->res_ls,
+ "got user dlm message for a kernel lock");
+ error = -EINVAL;
+@@ -3985,23 +3985,23 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
+ }
+
+ switch (ms->m_type) {
+- case DLM_MSG_CONVERT:
+- case DLM_MSG_UNLOCK:
+- case DLM_MSG_CANCEL:
++ case cpu_to_le32(DLM_MSG_CONVERT):
++ case cpu_to_le32(DLM_MSG_UNLOCK):
++ case cpu_to_le32(DLM_MSG_CANCEL):
+ if (!is_master_copy(lkb) || lkb->lkb_nodeid != from)
+ error = -EINVAL;
+ break;
+
+- case DLM_MSG_CONVERT_REPLY:
+- case DLM_MSG_UNLOCK_REPLY:
+- case DLM_MSG_CANCEL_REPLY:
+- case DLM_MSG_GRANT:
+- case DLM_MSG_BAST:
++ case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
++ case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
++ case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
++ case cpu_to_le32(DLM_MSG_GRANT):
++ case cpu_to_le32(DLM_MSG_BAST):
+ if (!is_process_copy(lkb) || lkb->lkb_nodeid != from)
+ error = -EINVAL;
+ break;
+
+- case DLM_MSG_REQUEST_REPLY:
++ case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
+ if (!is_process_copy(lkb))
+ error = -EINVAL;
+ else if (lkb->lkb_nodeid != -1 && lkb->lkb_nodeid != from)
+@@ -4016,8 +4016,8 @@ static int validate_message(struct dlm_lkb *lkb, struct dlm_message *ms)
+ if (error)
+ log_error(lkb->lkb_resource->res_ls,
+ "ignore invalid message %d from %d %x %x %x %d",
+- ms->m_type, from, lkb->lkb_id, lkb->lkb_remid,
+- lkb->lkb_flags, lkb->lkb_nodeid);
++ le32_to_cpu(ms->m_type), from, lkb->lkb_id,
++ lkb->lkb_remid, lkb->lkb_flags, lkb->lkb_nodeid);
+ return error;
+ }
+
+@@ -4069,7 +4069,7 @@ static void send_repeat_remove(struct dlm_ls *ls, char *ms_name, int len)
+ goto out;
+
+ memcpy(ms->m_extra, name, len);
+- ms->m_hash = hash;
++ ms->m_hash = cpu_to_le32(hash);
+
+ send_message(mh, ms);
+
+@@ -4160,7 +4160,7 @@ static int receive_request(struct dlm_ls *ls, struct dlm_message *ms)
+
+ if (error != -ENOTBLK) {
+ log_limit(ls, "receive_request %x from %d %d",
+- ms->m_lkid, from_nodeid, error);
++ le32_to_cpu(ms->m_lkid), from_nodeid, error);
+ }
+
+ if (namelen && error == -EBADR) {
+@@ -4179,15 +4179,16 @@ static int receive_convert(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_rsb *r;
+ int error, reply = 1;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ goto fail;
+
+- if (lkb->lkb_remid != ms->m_lkid) {
++ if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
+ log_error(ls, "receive_convert %x remid %x recover_seq %llu "
+ "remote %d %x", lkb->lkb_id, lkb->lkb_remid,
+ (unsigned long long)lkb->lkb_recover_seq,
+- le32_to_cpu(ms->m_header.h_nodeid), ms->m_lkid);
++ le32_to_cpu(ms->m_header.h_nodeid),
++ le32_to_cpu(ms->m_lkid));
+ error = -ENOENT;
+ dlm_put_lkb(lkb);
+ goto fail;
+@@ -4234,14 +4235,15 @@ static int receive_unlock(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_rsb *r;
+ int error;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ goto fail;
+
+- if (lkb->lkb_remid != ms->m_lkid) {
++ if (lkb->lkb_remid != le32_to_cpu(ms->m_lkid)) {
+ log_error(ls, "receive_unlock %x remid %x remote %d %x",
+ lkb->lkb_id, lkb->lkb_remid,
+- le32_to_cpu(ms->m_header.h_nodeid), ms->m_lkid);
++ le32_to_cpu(ms->m_header.h_nodeid),
++ le32_to_cpu(ms->m_lkid));
+ error = -ENOENT;
+ dlm_put_lkb(lkb);
+ goto fail;
+@@ -4285,7 +4287,7 @@ static int receive_cancel(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_rsb *r;
+ int error;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ goto fail;
+
+@@ -4321,7 +4323,7 @@ static int receive_grant(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_rsb *r;
+ int error;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ return error;
+
+@@ -4352,7 +4354,7 @@ static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_rsb *r;
+ int error;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ return error;
+
+@@ -4365,8 +4367,8 @@ static int receive_bast(struct dlm_ls *ls, struct dlm_message *ms)
+ if (error)
+ goto out;
+
+- queue_bast(r, lkb, ms->m_bastmode);
+- lkb->lkb_highbast = ms->m_bastmode;
++ queue_bast(r, lkb, le32_to_cpu(ms->m_bastmode));
++ lkb->lkb_highbast = le32_to_cpu(ms->m_bastmode);
+ out:
+ unlock_rsb(r);
+ put_rsb(r);
+@@ -4411,7 +4413,7 @@ static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
+ return;
+ }
+
+- dir_nodeid = dlm_hash2nodeid(ls, ms->m_hash);
++ dir_nodeid = dlm_hash2nodeid(ls, le32_to_cpu(ms->m_hash));
+ if (dir_nodeid != dlm_our_nodeid()) {
+ log_error(ls, "receive_remove from %d bad nodeid %d",
+ from_nodeid, dir_nodeid);
+@@ -4484,7 +4486,7 @@ static void receive_remove(struct dlm_ls *ls, struct dlm_message *ms)
+
+ static void receive_purge(struct dlm_ls *ls, struct dlm_message *ms)
+ {
+- do_purge(ls, ms->m_nodeid, ms->m_pid);
++ do_purge(ls, le32_to_cpu(ms->m_nodeid), le32_to_cpu(ms->m_pid));
+ }
+
+ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+@@ -4494,7 +4496,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ int error, mstype, result;
+ int from_nodeid = le32_to_cpu(ms->m_header.h_nodeid);
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ return error;
+
+@@ -4510,7 +4512,8 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ error = remove_from_waiters(lkb, DLM_MSG_REQUEST_REPLY);
+ if (error) {
+ log_error(ls, "receive_request_reply %x remote %d %x result %d",
+- lkb->lkb_id, from_nodeid, ms->m_lkid, ms->m_result);
++ lkb->lkb_id, from_nodeid, le32_to_cpu(ms->m_lkid),
++ from_dlm_errno(le32_to_cpu(ms->m_result)));
+ dlm_dump_rsb(r);
+ goto out;
+ }
+@@ -4524,7 +4527,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ }
+
+ /* this is the value returned from do_request() on the master */
+- result = ms->m_result;
++ result = from_dlm_errno(le32_to_cpu(ms->m_result));
+
+ switch (result) {
+ case -EAGAIN:
+@@ -4538,7 +4541,7 @@ static int receive_request_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ case 0:
+ /* request was queued or granted on remote master */
+ receive_flags_reply(lkb, ms);
+- lkb->lkb_remid = ms->m_lkid;
++ lkb->lkb_remid = le32_to_cpu(ms->m_lkid);
+ if (is_altmode(lkb))
+ munge_altmode(lkb, ms);
+ if (result) {
+@@ -4611,7 +4614,7 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ struct dlm_message *ms)
+ {
+ /* this is the value returned from do_convert() on the master */
+- switch (ms->m_result) {
++ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
+ case -EAGAIN:
+ /* convert would block (be queued) on remote master */
+ queue_cast(r, lkb, -EAGAIN);
+@@ -4645,7 +4648,8 @@ static void __receive_convert_reply(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ default:
+ log_error(r->res_ls, "receive_convert_reply %x remote %d %x %d",
+ lkb->lkb_id, le32_to_cpu(ms->m_header.h_nodeid),
+- ms->m_lkid, ms->m_result);
++ le32_to_cpu(ms->m_lkid),
++ from_dlm_errno(le32_to_cpu(ms->m_result)));
+ dlm_print_rsb(r);
+ dlm_print_lkb(lkb);
+ }
+@@ -4679,7 +4683,7 @@ static int receive_convert_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_lkb *lkb;
+ int error;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ return error;
+
+@@ -4707,7 +4711,7 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+
+ /* this is the value returned from do_unlock() on the master */
+
+- switch (ms->m_result) {
++ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
+ case -DLM_EUNLOCK:
+ receive_flags_reply(lkb, ms);
+ remove_lock_pc(r, lkb);
+@@ -4717,7 +4721,7 @@ static void _receive_unlock_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+ break;
+ default:
+ log_error(r->res_ls, "receive_unlock_reply %x error %d",
+- lkb->lkb_id, ms->m_result);
++ lkb->lkb_id, from_dlm_errno(le32_to_cpu(ms->m_result)));
+ }
+ out:
+ unlock_rsb(r);
+@@ -4729,7 +4733,7 @@ static int receive_unlock_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_lkb *lkb;
+ int error;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ return error;
+
+@@ -4757,7 +4761,7 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+
+ /* this is the value returned from do_cancel() on the master */
+
+- switch (ms->m_result) {
++ switch (from_dlm_errno(le32_to_cpu(ms->m_result))) {
+ case -DLM_ECANCEL:
+ receive_flags_reply(lkb, ms);
+ revert_lock_pc(r, lkb);
+@@ -4767,7 +4771,8 @@ static void _receive_cancel_reply(struct dlm_lkb *lkb, struct dlm_message *ms)
+ break;
+ default:
+ log_error(r->res_ls, "receive_cancel_reply %x error %d",
+- lkb->lkb_id, ms->m_result);
++ lkb->lkb_id,
++ from_dlm_errno(le32_to_cpu(ms->m_result)));
+ }
+ out:
+ unlock_rsb(r);
+@@ -4779,7 +4784,7 @@ static int receive_cancel_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ struct dlm_lkb *lkb;
+ int error;
+
+- error = find_lkb(ls, ms->m_remid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_remid), &lkb);
+ if (error)
+ return error;
+
+@@ -4795,9 +4800,10 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ int error, ret_nodeid;
+ int do_lookup_list = 0;
+
+- error = find_lkb(ls, ms->m_lkid, &lkb);
++ error = find_lkb(ls, le32_to_cpu(ms->m_lkid), &lkb);
+ if (error) {
+- log_error(ls, "receive_lookup_reply no lkid %x", ms->m_lkid);
++ log_error(ls, "%s no lkid %x", __func__,
++ le32_to_cpu(ms->m_lkid));
+ return;
+ }
+
+@@ -4812,7 +4818,7 @@ static void receive_lookup_reply(struct dlm_ls *ls, struct dlm_message *ms)
+ if (error)
+ goto out;
+
+- ret_nodeid = ms->m_nodeid;
++ ret_nodeid = le32_to_cpu(ms->m_nodeid);
+
+ /* We sometimes receive a request from the dir node for this
+ rsb before we've received the dir node's loookup_reply for it.
+@@ -4873,8 +4879,10 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+
+ if (!dlm_is_member(ls, le32_to_cpu(ms->m_header.h_nodeid))) {
+ log_limit(ls, "receive %d from non-member %d %x %x %d",
+- ms->m_type, le32_to_cpu(ms->m_header.h_nodeid),
+- ms->m_lkid, ms->m_remid, ms->m_result);
++ le32_to_cpu(ms->m_type),
++ le32_to_cpu(ms->m_header.h_nodeid),
++ le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
++ from_dlm_errno(le32_to_cpu(ms->m_result)));
+ return;
+ }
+
+@@ -4882,77 +4890,78 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+
+ /* messages sent to a master node */
+
+- case DLM_MSG_REQUEST:
++ case cpu_to_le32(DLM_MSG_REQUEST):
+ error = receive_request(ls, ms);
+ break;
+
+- case DLM_MSG_CONVERT:
++ case cpu_to_le32(DLM_MSG_CONVERT):
+ error = receive_convert(ls, ms);
+ break;
+
+- case DLM_MSG_UNLOCK:
++ case cpu_to_le32(DLM_MSG_UNLOCK):
+ error = receive_unlock(ls, ms);
+ break;
+
+- case DLM_MSG_CANCEL:
++ case cpu_to_le32(DLM_MSG_CANCEL):
+ noent = 1;
+ error = receive_cancel(ls, ms);
+ break;
+
+ /* messages sent from a master node (replies to above) */
+
+- case DLM_MSG_REQUEST_REPLY:
++ case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
+ error = receive_request_reply(ls, ms);
+ break;
+
+- case DLM_MSG_CONVERT_REPLY:
++ case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
+ error = receive_convert_reply(ls, ms);
+ break;
+
+- case DLM_MSG_UNLOCK_REPLY:
++ case cpu_to_le32(DLM_MSG_UNLOCK_REPLY):
+ error = receive_unlock_reply(ls, ms);
+ break;
+
+- case DLM_MSG_CANCEL_REPLY:
++ case cpu_to_le32(DLM_MSG_CANCEL_REPLY):
+ error = receive_cancel_reply(ls, ms);
+ break;
+
+ /* messages sent from a master node (only two types of async msg) */
+
+- case DLM_MSG_GRANT:
++ case cpu_to_le32(DLM_MSG_GRANT):
+ noent = 1;
+ error = receive_grant(ls, ms);
+ break;
+
+- case DLM_MSG_BAST:
++ case cpu_to_le32(DLM_MSG_BAST):
+ noent = 1;
+ error = receive_bast(ls, ms);
+ break;
+
+ /* messages sent to a dir node */
+
+- case DLM_MSG_LOOKUP:
++ case cpu_to_le32(DLM_MSG_LOOKUP):
+ receive_lookup(ls, ms);
+ break;
+
+- case DLM_MSG_REMOVE:
++ case cpu_to_le32(DLM_MSG_REMOVE):
+ receive_remove(ls, ms);
+ break;
+
+ /* messages sent from a dir node (remove has no reply) */
+
+- case DLM_MSG_LOOKUP_REPLY:
++ case cpu_to_le32(DLM_MSG_LOOKUP_REPLY):
+ receive_lookup_reply(ls, ms);
+ break;
+
+ /* other messages */
+
+- case DLM_MSG_PURGE:
++ case cpu_to_le32(DLM_MSG_PURGE):
+ receive_purge(ls, ms);
+ break;
+
+ default:
+- log_error(ls, "unknown message type %d", ms->m_type);
++ log_error(ls, "unknown message type %d",
++ le32_to_cpu(ms->m_type));
+ }
+
+ /*
+@@ -4968,24 +4977,26 @@ static void _receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+
+ if (error == -ENOENT && noent) {
+ log_debug(ls, "receive %d no %x remote %d %x saved_seq %u",
+- ms->m_type, ms->m_remid,
++ le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
+ le32_to_cpu(ms->m_header.h_nodeid),
+- ms->m_lkid, saved_seq);
++ le32_to_cpu(ms->m_lkid), saved_seq);
+ } else if (error == -ENOENT) {
+ log_error(ls, "receive %d no %x remote %d %x saved_seq %u",
+- ms->m_type, ms->m_remid,
++ le32_to_cpu(ms->m_type), le32_to_cpu(ms->m_remid),
+ le32_to_cpu(ms->m_header.h_nodeid),
+- ms->m_lkid, saved_seq);
++ le32_to_cpu(ms->m_lkid), saved_seq);
+
+- if (ms->m_type == DLM_MSG_CONVERT)
+- dlm_dump_rsb_hash(ls, ms->m_hash);
++ if (ms->m_type == cpu_to_le32(DLM_MSG_CONVERT))
++ dlm_dump_rsb_hash(ls, le32_to_cpu(ms->m_hash));
+ }
+
+ if (error == -EINVAL) {
+ log_error(ls, "receive %d inval from %d lkid %x remid %x "
+ "saved_seq %u",
+- ms->m_type, le32_to_cpu(ms->m_header.h_nodeid),
+- ms->m_lkid, ms->m_remid, saved_seq);
++ le32_to_cpu(ms->m_type),
++ le32_to_cpu(ms->m_header.h_nodeid),
++ le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
++ saved_seq);
+ }
+ }
+
+@@ -5006,7 +5017,7 @@ static void dlm_receive_message(struct dlm_ls *ls, struct dlm_message *ms,
+ lockspace generation before we left. */
+ if (!ls->ls_generation) {
+ log_limit(ls, "receive %d from %d ignore old gen",
+- ms->m_type, nodeid);
++ le32_to_cpu(ms->m_type), nodeid);
+ return;
+ }
+
+@@ -5039,8 +5050,7 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid)
+
+ switch (hd->h_cmd) {
+ case DLM_MSG:
+- dlm_message_in(&p->message);
+- type = p->message.m_type;
++ type = le32_to_cpu(p->message.m_type);
+ break;
+ case DLM_RCOM:
+ type = le32_to_cpu(p->rcom.rc_type);
+@@ -5090,9 +5100,9 @@ static void recover_convert_waiter(struct dlm_ls *ls, struct dlm_lkb *lkb,
+ if (middle_conversion(lkb)) {
+ hold_lkb(lkb);
+ memset(ms_stub, 0, sizeof(struct dlm_message));
+- ms_stub->m_flags = DLM_IFL_STUB_MS;
+- ms_stub->m_type = DLM_MSG_CONVERT_REPLY;
+- ms_stub->m_result = -EINPROGRESS;
++ ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
++ ms_stub->m_type = cpu_to_le32(DLM_MSG_CONVERT_REPLY);
++ ms_stub->m_result = cpu_to_le32(to_dlm_errno(-EINPROGRESS));
+ ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_convert_reply(lkb, ms_stub);
+
+@@ -5212,9 +5222,9 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ case DLM_MSG_UNLOCK:
+ hold_lkb(lkb);
+ memset(ms_stub, 0, sizeof(struct dlm_message));
+- ms_stub->m_flags = DLM_IFL_STUB_MS;
+- ms_stub->m_type = DLM_MSG_UNLOCK_REPLY;
+- ms_stub->m_result = stub_unlock_result;
++ ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
++ ms_stub->m_type = cpu_to_le32(DLM_MSG_UNLOCK_REPLY);
++ ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_unlock_result));
+ ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_unlock_reply(lkb, ms_stub);
+ dlm_put_lkb(lkb);
+@@ -5223,9 +5233,9 @@ void dlm_recover_waiters_pre(struct dlm_ls *ls)
+ case DLM_MSG_CANCEL:
+ hold_lkb(lkb);
+ memset(ms_stub, 0, sizeof(struct dlm_message));
+- ms_stub->m_flags = DLM_IFL_STUB_MS;
+- ms_stub->m_type = DLM_MSG_CANCEL_REPLY;
+- ms_stub->m_result = stub_cancel_result;
++ ms_stub->m_flags = cpu_to_le32(DLM_IFL_STUB_MS);
++ ms_stub->m_type = cpu_to_le32(DLM_MSG_CANCEL_REPLY);
++ ms_stub->m_result = cpu_to_le32(to_dlm_errno(stub_cancel_result));
+ ms_stub->m_header.h_nodeid = cpu_to_le32(lkb->lkb_nodeid);
+ _receive_cancel_reply(lkb, ms_stub);
+ dlm_put_lkb(lkb);
+@@ -6302,8 +6312,8 @@ static int send_purge(struct dlm_ls *ls, int nodeid, int pid)
+ DLM_MSG_PURGE, &ms, &mh);
+ if (error)
+ return error;
+- ms->m_nodeid = nodeid;
+- ms->m_pid = pid;
++ ms->m_nodeid = cpu_to_le32(nodeid);
++ ms->m_pid = cpu_to_le32(pid);
+
+ return send_message(mh, ms);
+ }
+diff --git a/fs/dlm/requestqueue.c b/fs/dlm/requestqueue.c
+index 3692fbd218d5..ad27d2642c82 100644
+--- a/fs/dlm/requestqueue.c
++++ b/fs/dlm/requestqueue.c
+@@ -14,6 +14,7 @@
+ #include "dir.h"
+ #include "config.h"
+ #include "requestqueue.h"
++#include "util.h"
+
+ struct rq_entry {
+ struct list_head list;
+@@ -82,8 +83,10 @@ int dlm_process_requestqueue(struct dlm_ls *ls)
+
+ log_limit(ls, "dlm_process_requestqueue msg %d from %d "
+ "lkid %x remid %x result %d seq %u",
+- ms->m_type, le32_to_cpu(ms->m_header.h_nodeid),
+- ms->m_lkid, ms->m_remid, ms->m_result,
++ le32_to_cpu(ms->m_type),
++ le32_to_cpu(ms->m_header.h_nodeid),
++ le32_to_cpu(ms->m_lkid), le32_to_cpu(ms->m_remid),
++ from_dlm_errno(le32_to_cpu(ms->m_result)),
+ e->recover_seq);
+
+ dlm_receive_message_saved(ls, &e->request, e->recover_seq);
+@@ -128,7 +131,7 @@ void dlm_wait_requestqueue(struct dlm_ls *ls)
+
+ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
+ {
+- uint32_t type = ms->m_type;
++ __le32 type = ms->m_type;
+
+ /* the ls is being cleaned up and freed by release_lockspace */
+ if (!ls->ls_count)
+@@ -140,9 +143,9 @@ static int purge_request(struct dlm_ls *ls, struct dlm_message *ms, int nodeid)
+ /* directory operations are always purged because the directory is
+ always rebuilt during recovery and the lookups resent */
+
+- if (type == DLM_MSG_REMOVE ||
+- type == DLM_MSG_LOOKUP ||
+- type == DLM_MSG_LOOKUP_REPLY)
++ if (type == cpu_to_le32(DLM_MSG_REMOVE) ||
++ type == cpu_to_le32(DLM_MSG_LOOKUP) ||
++ type == cpu_to_le32(DLM_MSG_LOOKUP_REPLY))
+ return 1;
+
+ if (!dlm_no_directory(ls))
+diff --git a/fs/dlm/util.c b/fs/dlm/util.c
+index 657dbed1bd60..f2bc401f312f 100644
+--- a/fs/dlm/util.c
++++ b/fs/dlm/util.c
+@@ -23,7 +23,7 @@
+ /* higher errno values are inconsistent across architectures, so select
+ one set of values for on the wire */
+
+-static int to_dlm_errno(int err)
++int to_dlm_errno(int err)
+ {
+ switch (err) {
+ case -EDEADLK:
+@@ -44,7 +44,7 @@ static int to_dlm_errno(int err)
+ return err;
+ }
+
+-static int from_dlm_errno(int err)
++int from_dlm_errno(int err)
+ {
+ switch (err) {
+ case -DLM_ERRNO_EDEADLK:
+@@ -64,47 +64,3 @@ static int from_dlm_errno(int err)
+ }
+ return err;
+ }
+-
+-void dlm_message_out(struct dlm_message *ms)
+-{
+- ms->m_type = cpu_to_le32(ms->m_type);
+- ms->m_nodeid = cpu_to_le32(ms->m_nodeid);
+- ms->m_pid = cpu_to_le32(ms->m_pid);
+- ms->m_lkid = cpu_to_le32(ms->m_lkid);
+- ms->m_remid = cpu_to_le32(ms->m_remid);
+- ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid);
+- ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid);
+- ms->m_exflags = cpu_to_le32(ms->m_exflags);
+- ms->m_sbflags = cpu_to_le32(ms->m_sbflags);
+- ms->m_flags = cpu_to_le32(ms->m_flags);
+- ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq);
+- ms->m_hash = cpu_to_le32(ms->m_hash);
+- ms->m_status = cpu_to_le32(ms->m_status);
+- ms->m_grmode = cpu_to_le32(ms->m_grmode);
+- ms->m_rqmode = cpu_to_le32(ms->m_rqmode);
+- ms->m_bastmode = cpu_to_le32(ms->m_bastmode);
+- ms->m_asts = cpu_to_le32(ms->m_asts);
+- ms->m_result = cpu_to_le32(to_dlm_errno(ms->m_result));
+-}
+-
+-void dlm_message_in(struct dlm_message *ms)
+-{
+- ms->m_type = le32_to_cpu(ms->m_type);
+- ms->m_nodeid = le32_to_cpu(ms->m_nodeid);
+- ms->m_pid = le32_to_cpu(ms->m_pid);
+- ms->m_lkid = le32_to_cpu(ms->m_lkid);
+- ms->m_remid = le32_to_cpu(ms->m_remid);
+- ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid);
+- ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid);
+- ms->m_exflags = le32_to_cpu(ms->m_exflags);
+- ms->m_sbflags = le32_to_cpu(ms->m_sbflags);
+- ms->m_flags = le32_to_cpu(ms->m_flags);
+- ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq);
+- ms->m_hash = le32_to_cpu(ms->m_hash);
+- ms->m_status = le32_to_cpu(ms->m_status);
+- ms->m_grmode = le32_to_cpu(ms->m_grmode);
+- ms->m_rqmode = le32_to_cpu(ms->m_rqmode);
+- ms->m_bastmode = le32_to_cpu(ms->m_bastmode);
+- ms->m_asts = le32_to_cpu(ms->m_asts);
+- ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result));
+-}
+diff --git a/fs/dlm/util.h b/fs/dlm/util.h
+index cd099c4f5d6a..b6a4b8adca8d 100644
+--- a/fs/dlm/util.h
++++ b/fs/dlm/util.h
+@@ -11,8 +11,8 @@
+ #ifndef __UTIL_DOT_H__
+ #define __UTIL_DOT_H__
+
+-void dlm_message_out(struct dlm_message *ms);
+-void dlm_message_in(struct dlm_message *ms);
++int to_dlm_errno(int err);
++int from_dlm_errno(int err);
+
+ #endif
+
+--
+2.35.1
+
--- /dev/null
+From b8f2c1902d3baf578d94e7003f691dbcefef4916 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 4 Apr 2022 16:06:40 -0400
+Subject: dlm: use __le types for rcom messages
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 2f9dbeda8dc04b5b754e032000adf6bab03aa9be ]
+
+This patch changes to use __le types directly in the dlm rcom
+structure which is casted at the right dlm message buffer positions.
+
+The main goal what is reached here is to remove sparse warnings
+regarding to host to little byte order conversion or vice versa. Leaving
+those sparse issues ignored and always do it in out/in functionality
+tends to leave it unknown in which byte order the variable is being
+handled.
+
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Stable-dep-of: 7175e131ebba ("fs: dlm: fix invalid derefence of sb_lvbptr")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/dlm_internal.h | 10 +++---
+ fs/dlm/lock.c | 3 +-
+ fs/dlm/member.c | 9 ++---
+ fs/dlm/rcom.c | 80 +++++++++++++++++++++----------------------
+ fs/dlm/recover.c | 10 +++---
+ fs/dlm/util.c | 18 ----------
+ fs/dlm/util.h | 2 --
+ 7 files changed, 52 insertions(+), 80 deletions(-)
+
+diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
+index c3cfe4be436a..78704fa31a74 100644
+--- a/fs/dlm/dlm_internal.h
++++ b/fs/dlm/dlm_internal.h
+@@ -457,11 +457,11 @@ struct dlm_message {
+
+ struct dlm_rcom {
+ struct dlm_header rc_header;
+- uint32_t rc_type; /* DLM_RCOM_ */
+- int rc_result; /* multi-purpose */
+- uint64_t rc_id; /* match reply with request */
+- uint64_t rc_seq; /* sender's ls_recover_seq */
+- uint64_t rc_seq_reply; /* remote ls_recover_seq */
++ __le32 rc_type; /* DLM_RCOM_ */
++ __le32 rc_result; /* multi-purpose */
++ __le64 rc_id; /* match reply with request */
++ __le64 rc_seq; /* sender's ls_recover_seq */
++ __le64 rc_seq_reply; /* remote ls_recover_seq */
+ char rc_buf[];
+ };
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index 859412b7a82c..fea9a7216297 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -5043,8 +5043,7 @@ void dlm_receive_buffer(union dlm_packet *p, int nodeid)
+ type = p->message.m_type;
+ break;
+ case DLM_RCOM:
+- dlm_rcom_in(&p->rcom);
+- type = p->rcom.rc_type;
++ type = le32_to_cpu(p->rcom.rc_type);
+ break;
+ default:
+ log_print("invalid h_cmd %d from %u", hd->h_cmd, nodeid);
+diff --git a/fs/dlm/member.c b/fs/dlm/member.c
+index 0dacddc63f55..a6ff21430863 100644
+--- a/fs/dlm/member.c
++++ b/fs/dlm/member.c
+@@ -120,18 +120,13 @@ int dlm_slots_copy_in(struct dlm_ls *ls)
+
+ ro0 = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config));
+
+- for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
+- ro->ro_nodeid = le32_to_cpu(ro->ro_nodeid);
+- ro->ro_slot = le16_to_cpu(ro->ro_slot);
+- }
+-
+ log_slots(ls, gen, num_slots, ro0, NULL, 0);
+
+ list_for_each_entry(memb, &ls->ls_nodes, list) {
+ for (i = 0, ro = ro0; i < num_slots; i++, ro++) {
+- if (ro->ro_nodeid != memb->nodeid)
++ if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid)
+ continue;
+- memb->slot = ro->ro_slot;
++ memb->slot = le16_to_cpu(ro->ro_slot);
+ memb->slot_prev = memb->slot;
+ break;
+ }
+diff --git a/fs/dlm/rcom.c b/fs/dlm/rcom.c
+index b956b7e416c8..125557e18525 100644
+--- a/fs/dlm/rcom.c
++++ b/fs/dlm/rcom.c
+@@ -40,10 +40,10 @@ static void _create_rcom(struct dlm_ls *ls, int to_nodeid, int type, int len,
+ rc->rc_header.h_length = cpu_to_le16(mb_len);
+ rc->rc_header.h_cmd = DLM_RCOM;
+
+- rc->rc_type = type;
++ rc->rc_type = cpu_to_le32(type);
+
+ spin_lock(&ls->ls_recover_lock);
+- rc->rc_seq = ls->ls_recover_seq;
++ rc->rc_seq = cpu_to_le64(ls->ls_recover_seq);
+ spin_unlock(&ls->ls_recover_lock);
+
+ *rc_ret = rc;
+@@ -91,13 +91,11 @@ static int create_rcom_stateless(struct dlm_ls *ls, int to_nodeid, int type,
+
+ static void send_rcom(struct dlm_mhandle *mh, struct dlm_rcom *rc)
+ {
+- dlm_rcom_out(rc);
+ dlm_midcomms_commit_mhandle(mh);
+ }
+
+ static void send_rcom_stateless(struct dlm_msg *msg, struct dlm_rcom *rc)
+ {
+- dlm_rcom_out(rc);
+ dlm_lowcomms_commit_msg(msg);
+ dlm_lowcomms_put_msg(msg);
+ }
+@@ -145,10 +143,10 @@ static int check_rcom_config(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ return 0;
+ }
+
+-static void allow_sync_reply(struct dlm_ls *ls, uint64_t *new_seq)
++static void allow_sync_reply(struct dlm_ls *ls, __le64 *new_seq)
+ {
+ spin_lock(&ls->ls_rcom_spin);
+- *new_seq = ++ls->ls_rcom_seq;
++ *new_seq = cpu_to_le64(++ls->ls_rcom_seq);
+ set_bit(LSFL_RCOM_WAIT, &ls->ls_flags);
+ spin_unlock(&ls->ls_rcom_spin);
+ }
+@@ -182,7 +180,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
+
+ if (nodeid == dlm_our_nodeid()) {
+ rc = ls->ls_recover_buf;
+- rc->rc_result = dlm_recover_status(ls);
++ rc->rc_result = cpu_to_le32(dlm_recover_status(ls));
+ goto out;
+ }
+
+@@ -208,7 +206,7 @@ int dlm_rcom_status(struct dlm_ls *ls, int nodeid, uint32_t status_flags)
+
+ rc = ls->ls_recover_buf;
+
+- if (rc->rc_result == -ESRCH) {
++ if (rc->rc_result == cpu_to_le32(-ESRCH)) {
+ /* we pretend the remote lockspace exists with 0 status */
+ log_debug(ls, "remote node %d not ready", nodeid);
+ rc->rc_result = 0;
+@@ -259,7 +257,7 @@ static void receive_rcom_status(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+
+ rc->rc_id = rc_in->rc_id;
+ rc->rc_seq_reply = rc_in->rc_seq;
+- rc->rc_result = status;
++ rc->rc_result = cpu_to_le32(status);
+
+ set_rcom_config(ls, (struct rcom_config *)rc->rc_buf, num_slots);
+
+@@ -287,11 +285,11 @@ static void receive_sync_reply(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ {
+ spin_lock(&ls->ls_rcom_spin);
+ if (!test_bit(LSFL_RCOM_WAIT, &ls->ls_flags) ||
+- rc_in->rc_id != ls->ls_rcom_seq) {
++ le64_to_cpu(rc_in->rc_id) != ls->ls_rcom_seq) {
+ log_debug(ls, "reject reply %d from %d seq %llx expect %llx",
+- rc_in->rc_type,
++ le32_to_cpu(rc_in->rc_type),
+ le32_to_cpu(rc_in->rc_header.h_nodeid),
+- (unsigned long long)rc_in->rc_id,
++ (unsigned long long)le64_to_cpu(rc_in->rc_id),
+ (unsigned long long)ls->ls_rcom_seq);
+ goto out;
+ }
+@@ -367,7 +365,7 @@ int dlm_send_rcom_lookup(struct dlm_rsb *r, int dir_nodeid)
+ if (error)
+ goto out;
+ memcpy(rc->rc_buf, r->res_name, r->res_length);
+- rc->rc_id = (unsigned long) r->res_id;
++ rc->rc_id = cpu_to_le64(r->res_id);
+
+ send_rcom(mh, rc);
+ out:
+@@ -383,7 +381,7 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ sizeof(struct dlm_rcom);
+
+ /* Old code would send this special id to trigger a debug dump. */
+- if (rc_in->rc_id == 0xFFFFFFFF) {
++ if (rc_in->rc_id == cpu_to_le64(0xFFFFFFFF)) {
+ log_error(ls, "receive_rcom_lookup dump from %d", nodeid);
+ dlm_dump_rsb_name(ls, rc_in->rc_buf, len);
+ return;
+@@ -397,7 +395,7 @@ static void receive_rcom_lookup(struct dlm_ls *ls, struct dlm_rcom *rc_in)
+ DLM_LU_RECOVER_MASTER, &ret_nodeid, NULL);
+ if (error)
+ ret_nodeid = error;
+- rc->rc_result = ret_nodeid;
++ rc->rc_result = cpu_to_le32(ret_nodeid);
+ rc->rc_id = rc_in->rc_id;
+ rc->rc_seq_reply = rc_in->rc_seq;
+
+@@ -456,7 +454,7 @@ int dlm_send_rcom_lock(struct dlm_rsb *r, struct dlm_lkb *lkb)
+
+ rl = (struct rcom_lock *) rc->rc_buf;
+ pack_rcom_lock(r, lkb, rl);
+- rc->rc_id = (unsigned long) r;
++ rc->rc_id = cpu_to_le64(r);
+
+ send_rcom(mh, rc);
+ out:
+@@ -510,15 +508,14 @@ int dlm_send_ls_not_ready(int nodeid, struct dlm_rcom *rc_in)
+ rc->rc_header.h_length = cpu_to_le16(mb_len);
+ rc->rc_header.h_cmd = DLM_RCOM;
+
+- rc->rc_type = DLM_RCOM_STATUS_REPLY;
++ rc->rc_type = cpu_to_le32(DLM_RCOM_STATUS_REPLY);
+ rc->rc_id = rc_in->rc_id;
+ rc->rc_seq_reply = rc_in->rc_seq;
+- rc->rc_result = -ESRCH;
++ rc->rc_result = cpu_to_le32(-ESRCH);
+
+ rf = (struct rcom_config *) rc->rc_buf;
+ rf->rf_lvblen = cpu_to_le32(~0U);
+
+- dlm_rcom_out(rc);
+ dlm_midcomms_commit_mhandle(mh);
+
+ return 0;
+@@ -577,27 +574,27 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ uint64_t seq;
+
+ switch (rc->rc_type) {
+- case DLM_RCOM_STATUS_REPLY:
++ case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
+ reply = 1;
+ break;
+- case DLM_RCOM_NAMES:
++ case cpu_to_le32(DLM_RCOM_NAMES):
+ names = 1;
+ break;
+- case DLM_RCOM_NAMES_REPLY:
++ case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
+ names = 1;
+ reply = 1;
+ break;
+- case DLM_RCOM_LOOKUP:
++ case cpu_to_le32(DLM_RCOM_LOOKUP):
+ lookup = 1;
+ break;
+- case DLM_RCOM_LOOKUP_REPLY:
++ case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY):
+ lookup = 1;
+ reply = 1;
+ break;
+- case DLM_RCOM_LOCK:
++ case cpu_to_le32(DLM_RCOM_LOCK):
+ lock = 1;
+ break;
+- case DLM_RCOM_LOCK_REPLY:
++ case cpu_to_le32(DLM_RCOM_LOCK_REPLY):
+ lock = 1;
+ reply = 1;
+ break;
+@@ -609,10 +606,10 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ seq = ls->ls_recover_seq;
+ spin_unlock(&ls->ls_recover_lock);
+
+- if (stop && (rc->rc_type != DLM_RCOM_STATUS))
++ if (stop && (rc->rc_type != cpu_to_le32(DLM_RCOM_STATUS)))
+ goto ignore;
+
+- if (reply && (rc->rc_seq_reply != seq))
++ if (reply && (le64_to_cpu(rc->rc_seq_reply) != seq))
+ goto ignore;
+
+ if (!(status & DLM_RS_NODES) && (names || lookup || lock))
+@@ -622,59 +619,60 @@ void dlm_receive_rcom(struct dlm_ls *ls, struct dlm_rcom *rc, int nodeid)
+ goto ignore;
+
+ switch (rc->rc_type) {
+- case DLM_RCOM_STATUS:
++ case cpu_to_le32(DLM_RCOM_STATUS):
+ receive_rcom_status(ls, rc);
+ break;
+
+- case DLM_RCOM_NAMES:
++ case cpu_to_le32(DLM_RCOM_NAMES):
+ receive_rcom_names(ls, rc);
+ break;
+
+- case DLM_RCOM_LOOKUP:
++ case cpu_to_le32(DLM_RCOM_LOOKUP):
+ receive_rcom_lookup(ls, rc);
+ break;
+
+- case DLM_RCOM_LOCK:
++ case cpu_to_le32(DLM_RCOM_LOCK):
+ if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
+ goto Eshort;
+ receive_rcom_lock(ls, rc);
+ break;
+
+- case DLM_RCOM_STATUS_REPLY:
++ case cpu_to_le32(DLM_RCOM_STATUS_REPLY):
+ receive_sync_reply(ls, rc);
+ break;
+
+- case DLM_RCOM_NAMES_REPLY:
++ case cpu_to_le32(DLM_RCOM_NAMES_REPLY):
+ receive_sync_reply(ls, rc);
+ break;
+
+- case DLM_RCOM_LOOKUP_REPLY:
++ case cpu_to_le32(DLM_RCOM_LOOKUP_REPLY):
+ receive_rcom_lookup_reply(ls, rc);
+ break;
+
+- case DLM_RCOM_LOCK_REPLY:
++ case cpu_to_le32(DLM_RCOM_LOCK_REPLY):
+ if (le16_to_cpu(rc->rc_header.h_length) < lock_size)
+ goto Eshort;
+ dlm_recover_process_copy(ls, rc);
+ break;
+
+ default:
+- log_error(ls, "receive_rcom bad type %d", rc->rc_type);
++ log_error(ls, "receive_rcom bad type %d",
++ le32_to_cpu(rc->rc_type));
+ }
+ return;
+
+ ignore:
+ log_limit(ls, "dlm_receive_rcom ignore msg %d "
+ "from %d %llu %llu recover seq %llu sts %x gen %u",
+- rc->rc_type,
++ le32_to_cpu(rc->rc_type),
+ nodeid,
+- (unsigned long long)rc->rc_seq,
+- (unsigned long long)rc->rc_seq_reply,
++ (unsigned long long)le64_to_cpu(rc->rc_seq),
++ (unsigned long long)le64_to_cpu(rc->rc_seq_reply),
+ (unsigned long long)seq,
+ status, ls->ls_generation);
+ return;
+ Eshort:
+ log_error(ls, "recovery message %d from %d is too short",
+- rc->rc_type, nodeid);
++ le32_to_cpu(rc->rc_type), nodeid);
+ }
+
+diff --git a/fs/dlm/recover.c b/fs/dlm/recover.c
+index 8928e99dfd47..dfd504bf1ecf 100644
+--- a/fs/dlm/recover.c
++++ b/fs/dlm/recover.c
+@@ -114,7 +114,7 @@ static int wait_status_all(struct dlm_ls *ls, uint32_t wait_status,
+ if (save_slots)
+ dlm_slot_save(ls, rc, memb);
+
+- if (rc->rc_result & wait_status)
++ if (le32_to_cpu(rc->rc_result) & wait_status)
+ break;
+ if (delay < 1000)
+ delay += 20;
+@@ -141,7 +141,7 @@ static int wait_status_low(struct dlm_ls *ls, uint32_t wait_status,
+ if (error)
+ break;
+
+- if (rc->rc_result & wait_status)
++ if (le32_to_cpu(rc->rc_result) & wait_status)
+ break;
+ if (delay < 1000)
+ delay += 20;
+@@ -568,14 +568,14 @@ int dlm_recover_master_reply(struct dlm_ls *ls, struct dlm_rcom *rc)
+ struct dlm_rsb *r;
+ int ret_nodeid, new_master;
+
+- r = recover_idr_find(ls, rc->rc_id);
++ r = recover_idr_find(ls, le64_to_cpu(rc->rc_id));
+ if (!r) {
+ log_error(ls, "dlm_recover_master_reply no id %llx",
+- (unsigned long long)rc->rc_id);
++ (unsigned long long)le64_to_cpu(rc->rc_id));
+ goto out;
+ }
+
+- ret_nodeid = rc->rc_result;
++ ret_nodeid = le32_to_cpu(rc->rc_result);
+
+ if (ret_nodeid == dlm_our_nodeid())
+ new_master = 0;
+diff --git a/fs/dlm/util.c b/fs/dlm/util.c
+index 66b9a123768d..657dbed1bd60 100644
+--- a/fs/dlm/util.c
++++ b/fs/dlm/util.c
+@@ -108,21 +108,3 @@ void dlm_message_in(struct dlm_message *ms)
+ ms->m_asts = le32_to_cpu(ms->m_asts);
+ ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result));
+ }
+-
+-void dlm_rcom_out(struct dlm_rcom *rc)
+-{
+- rc->rc_type = cpu_to_le32(rc->rc_type);
+- rc->rc_result = cpu_to_le32(rc->rc_result);
+- rc->rc_id = cpu_to_le64(rc->rc_id);
+- rc->rc_seq = cpu_to_le64(rc->rc_seq);
+- rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply);
+-}
+-
+-void dlm_rcom_in(struct dlm_rcom *rc)
+-{
+- rc->rc_type = le32_to_cpu(rc->rc_type);
+- rc->rc_result = le32_to_cpu(rc->rc_result);
+- rc->rc_id = le64_to_cpu(rc->rc_id);
+- rc->rc_seq = le64_to_cpu(rc->rc_seq);
+- rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply);
+-}
+diff --git a/fs/dlm/util.h b/fs/dlm/util.h
+index cc719ca9397e..cd099c4f5d6a 100644
+--- a/fs/dlm/util.h
++++ b/fs/dlm/util.h
+@@ -13,8 +13,6 @@
+
+ void dlm_message_out(struct dlm_message *ms);
+ void dlm_message_in(struct dlm_message *ms);
+-void dlm_rcom_out(struct dlm_rcom *rc);
+-void dlm_rcom_in(struct dlm_rcom *rc);
+
+ #endif
+
+--
+2.35.1
+
--- /dev/null
+From e4142b2345f60860028b94e8930df3656f8e6acf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 29 Sep 2022 16:20:36 +0800
+Subject: dm: remove unnecessary assignment statement in alloc_dev()
+
+From: Genjian Zhang <zhanggenjian@kylinos.cn>
+
+[ Upstream commit 99f4f5bcb975527508eb7a5e3e34bdb91d576746 ]
+
+Fixes: 74fe6ba923949 ("dm: convert to blk_alloc_disk/blk_cleanup_disk")
+Signed-off-by: Genjian Zhang <zhanggenjian@kylinos.cn>
+Signed-off-by: Mike Snitzer <snitzer@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/md/dm.c | 1 -
+ 1 file changed, 1 deletion(-)
+
+diff --git a/drivers/md/dm.c b/drivers/md/dm.c
+index 41d2e1285c07..9dd2c2da075d 100644
+--- a/drivers/md/dm.c
++++ b/drivers/md/dm.c
+@@ -1797,7 +1797,6 @@ static struct mapped_device *alloc_dev(int minor)
+ md->disk->first_minor = minor;
+ md->disk->minors = 1;
+ md->disk->fops = &dm_blk_dops;
+- md->disk->queue = md->queue;
+ md->disk->private_data = md;
+ sprintf(md->disk->disk_name, "dm-%d", minor);
+
+--
+2.35.1
+
--- /dev/null
+From c05a282e8fd0cf9ee7f350e8f6c6debb1ee45160 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 7 Oct 2022 20:31:49 +0530
+Subject: drm/amd/display: explicitly disable psr_feature_enable appropriately
+
+From: Shirish S <shirish.s@amd.com>
+
+[ Upstream commit 6094b9136ca9038b61e9c4b5d25cd5512ce50b34 ]
+
+[Why]
+If psr_feature_enable is set to true by default, it continues to be enabled
+for non capable links.
+
+[How]
+explicitly disable the feature on links that are not capable of the same.
+
+Fixes: 8c322309e48e9 ("drm/amd/display: Enable PSR")
+Signed-off-by: Shirish S <shirish.s@amd.com>
+Reviewed-by: Leo Li <sunpeng.li@amd.com>
+Reviewed-by: Mario Limonciello <mario.limonciello@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org # 5.15+
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 8 ++++++--
+ 1 file changed, 6 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 39eb6452b986..393369aa3d03 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -101,11 +101,15 @@ static bool link_supports_psrsu(struct dc_link *link)
+ */
+ void amdgpu_dm_set_psr_caps(struct dc_link *link)
+ {
+- if (!(link->connector_signal & SIGNAL_TYPE_EDP))
++ if (!(link->connector_signal & SIGNAL_TYPE_EDP)) {
++ link->psr_settings.psr_feature_enabled = false;
+ return;
++ }
+
+- if (link->type == dc_connection_none)
++ if (link->type == dc_connection_none) {
++ link->psr_settings.psr_feature_enabled = false;
+ return;
++ }
+
+ if (!link_get_psr_caps(link)) {
+ DRM_ERROR("amdgpu: Failed to read PSR Caps!\n");
+--
+2.35.1
+
--- /dev/null
+From 5e41019c79e6b8d506c859ca4c15c3fba06d5a29 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Dec 2021 15:04:02 -0800
+Subject: drm/amd/display: parse and check PSR SU caps
+
+From: Mikita Lipski <mikita.lipski@amd.com>
+
+[ Upstream commit cd9a0d026baa10c75688908556b3af218bc4ddad ]
+
+[why]
+Adding a function to read PSR capabilities
+and ALPM capabilities.
+Also adding a helper function to validate if
+the sink and the driver support PSR SU.
+[how]
+- isolated all PSR and ALPM reading calls to a separate funciton
+- set all required PSR caps
+- added a helper function to check if PSR SU is supported by sink
+and the driver
+
+Reviewed-by: Roman Li <Roman.Li@amd.com>
+Reviewed-by: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Acked-by: Pavle Kotarac <Pavle.Kotarac@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Mikita Lipski <mikita.lipski@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Stable-dep-of: 6094b9136ca9 ("drm/amd/display: explicitly disable psr_feature_enable appropriately")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 101 +++++++++++++++---
+ drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 9 ++
+ drivers/gpu/drm/amd/display/dc/dc_types.h | 1 +
+ 3 files changed, 98 insertions(+), 13 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+index 7072fb2ec07f..39eb6452b986 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c
+@@ -26,6 +26,73 @@
+ #include "amdgpu_dm_psr.h"
+ #include "dc.h"
+ #include "dm_helpers.h"
++#include "amdgpu_dm.h"
++
++static bool link_get_psr_caps(struct dc_link *link)
++{
++ uint8_t psr_dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
++ uint8_t edp_rev_dpcd_data;
++
++
++
++ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
++ psr_dpcd_data, sizeof(psr_dpcd_data)))
++ return false;
++
++ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_EDP_DPCD_REV,
++ &edp_rev_dpcd_data, sizeof(edp_rev_dpcd_data)))
++ return false;
++
++ link->dpcd_caps.psr_caps.psr_version = psr_dpcd_data[0];
++ link->dpcd_caps.psr_caps.edp_revision = edp_rev_dpcd_data;
++
++#ifdef CONFIG_DRM_AMD_DC_DCN
++ if (link->dpcd_caps.psr_caps.psr_version > 0x1) {
++ uint8_t alpm_dpcd_data;
++ uint8_t su_granularity_dpcd_data;
++
++ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_RECEIVER_ALPM_CAP,
++ &alpm_dpcd_data, sizeof(alpm_dpcd_data)))
++ return false;
++
++ if (!dm_helpers_dp_read_dpcd(NULL, link, DP_PSR2_SU_Y_GRANULARITY,
++ &su_granularity_dpcd_data, sizeof(su_granularity_dpcd_data)))
++ return false;
++
++ link->dpcd_caps.psr_caps.y_coordinate_required = psr_dpcd_data[1] & DP_PSR2_SU_Y_COORDINATE_REQUIRED;
++ link->dpcd_caps.psr_caps.su_granularity_required = psr_dpcd_data[1] & DP_PSR2_SU_GRANULARITY_REQUIRED;
++
++ link->dpcd_caps.psr_caps.alpm_cap = alpm_dpcd_data & DP_ALPM_CAP;
++ link->dpcd_caps.psr_caps.standby_support = alpm_dpcd_data & (1 << 1);
++
++ link->dpcd_caps.psr_caps.su_y_granularity = su_granularity_dpcd_data;
++ }
++#endif
++ return true;
++}
++
++#ifdef CONFIG_DRM_AMD_DC_DCN
++static bool link_supports_psrsu(struct dc_link *link)
++{
++ struct dc *dc = link->ctx->dc;
++
++ if (!dc->caps.dmcub_support)
++ return false;
++
++ if (dc->ctx->dce_version < DCN_VERSION_3_1)
++ return false;
++
++ if (!link->dpcd_caps.psr_caps.alpm_cap ||
++ !link->dpcd_caps.psr_caps.y_coordinate_required)
++ return false;
++
++ if (link->dpcd_caps.psr_caps.su_granularity_required &&
++ !link->dpcd_caps.psr_caps.su_y_granularity)
++ return false;
++
++ return true;
++}
++#endif
+
+ /*
+ * amdgpu_dm_set_psr_caps() - set link psr capabilities
+@@ -34,26 +101,34 @@
+ */
+ void amdgpu_dm_set_psr_caps(struct dc_link *link)
+ {
+- uint8_t dpcd_data[EDP_PSR_RECEIVER_CAP_SIZE];
+-
+ if (!(link->connector_signal & SIGNAL_TYPE_EDP))
+ return;
++
+ if (link->type == dc_connection_none)
+ return;
+- if (dm_helpers_dp_read_dpcd(NULL, link, DP_PSR_SUPPORT,
+- dpcd_data, sizeof(dpcd_data))) {
+- link->dpcd_caps.psr_caps.psr_version = dpcd_data[0];
+-
+- if (dpcd_data[0] == 0) {
+- link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
+- link->psr_settings.psr_feature_enabled = false;
+- } else {
++
++ if (!link_get_psr_caps(link)) {
++ DRM_ERROR("amdgpu: Failed to read PSR Caps!\n");
++ return;
++ }
++
++ if (link->dpcd_caps.psr_caps.psr_version == 0) {
++ link->psr_settings.psr_version = DC_PSR_VERSION_UNSUPPORTED;
++ link->psr_settings.psr_feature_enabled = false;
++
++ } else {
++#ifdef CONFIG_DRM_AMD_DC_DCN
++ if (link_supports_psrsu(link))
++ link->psr_settings.psr_version = DC_PSR_VERSION_SU_1;
++ else
++#endif
+ link->psr_settings.psr_version = DC_PSR_VERSION_1;
+- link->psr_settings.psr_feature_enabled = true;
+- }
+
+- DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
++ link->psr_settings.psr_feature_enabled = true;
+ }
++
++ DRM_INFO("PSR support:%d\n", link->psr_settings.psr_feature_enabled);
++
+ }
+
+ /*
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+index 4f54bde1bb1c..6d0dc178fe53 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h
+@@ -756,6 +756,15 @@ struct psr_caps {
+ unsigned char psr_version;
+ unsigned int psr_rfb_setup_time;
+ bool psr_exit_link_training_required;
++ unsigned char edp_revision;
++ unsigned char support_ver;
++ bool su_granularity_required;
++ bool y_coordinate_required;
++ uint8_t su_y_granularity;
++ bool alpm_cap;
++ bool standby_support;
++ uint8_t rate_control_caps;
++ unsigned int psr_power_opt_flag;
+ };
+
+ #endif /* DC_DP_TYPES_H */
+diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h
+index c1532930169b..018e132dbedf 100644
+--- a/drivers/gpu/drm/amd/display/dc/dc_types.h
++++ b/drivers/gpu/drm/amd/display/dc/dc_types.h
+@@ -928,6 +928,7 @@ enum dc_gpu_mem_alloc_type {
+
+ enum dc_psr_version {
+ DC_PSR_VERSION_1 = 0,
++ DC_PSR_VERSION_SU_1 = 1,
+ DC_PSR_VERSION_UNSUPPORTED = 0xFFFFFFFF,
+ };
+
+--
+2.35.1
+
--- /dev/null
+From 749408b6713f5525b72ed4e9763176da39cb4a05 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 2 Sep 2022 16:41:11 +0200
+Subject: drm/vc4: Add module dependency on hdmi-codec
+
+From: Maxime Ripard <maxime@cerno.tech>
+
+[ Upstream commit d1c0b7de4dfa5505cf7a1d6220aa72aace4435d0 ]
+
+The VC4 HDMI controller driver relies on the HDMI codec ASoC driver. In
+order to set it up properly, in vc4_hdmi_audio_init(), our HDMI driver
+will register a device matching the HDMI codec driver, and then register
+an ASoC card using that codec.
+
+However, if vc4 is compiled as a module, chances are that the hdmi-codec
+driver will be too. In such a case, the module loader will have a very
+narrow window to load the module between the device registration and the
+card registration.
+
+If it fails to load the module in time, the card registration will fail
+with EPROBE_DEFER, and we'll abort the audio initialisation,
+unregistering the HDMI codec device in the process.
+
+The next time the bind callback will be run, it's likely that we end up
+missing that window again, effectively preventing vc4 to probe entirely.
+
+In order to prevent this, we can create a soft dependency of the vc4
+driver on the HDMI codec one so that we're sure the HDMI codec will be
+loaded before the VC4 module is, and thus we'll never end up in the
+previous situation.
+
+Fixes: 91e99e113929 ("drm/vc4: hdmi: Register HDMI codec")
+Reviewed-by: Javier Martinez Canillas <javierm@redhat.com>
+Signed-off-by: Maxime Ripard <maxime@cerno.tech>
+Link: https://patchwork.freedesktop.org/patch/msgid/20220902144111.3424560-1-maxime@cerno.tech
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/vc4/vc4_drv.c | 1 +
+ 1 file changed, 1 insertion(+)
+
+diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
+index ef8fa2850ed6..d216a1fd057c 100644
+--- a/drivers/gpu/drm/vc4/vc4_drv.c
++++ b/drivers/gpu/drm/vc4/vc4_drv.c
+@@ -397,6 +397,7 @@ module_init(vc4_drm_register);
+ module_exit(vc4_drm_unregister);
+
+ MODULE_ALIAS("platform:vc4-drm");
++MODULE_SOFTDEP("pre: snd-soc-hdmi-codec");
+ MODULE_DESCRIPTION("Broadcom VC4 DRM Driver");
+ MODULE_AUTHOR("Eric Anholt <eric@anholt.net>");
+ MODULE_LICENSE("GPL v2");
+--
+2.35.1
+
--- /dev/null
+From 5506d417e542081ed6becb4f7c9559f0609c3fbd Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 15 Aug 2022 15:43:19 -0400
+Subject: fs: dlm: fix invalid derefence of sb_lvbptr
+
+From: Alexander Aring <aahringo@redhat.com>
+
+[ Upstream commit 7175e131ebba47afef47e6ac4d5bab474d1e6e49 ]
+
+I experience issues when putting a lkbsb on the stack and have sb_lvbptr
+field to a dangled pointer while not using DLM_LKF_VALBLK. It will crash
+with the following kernel message, the dangled pointer is here
+0xdeadbeef as example:
+
+[ 102.749317] BUG: unable to handle page fault for address: 00000000deadbeef
+[ 102.749320] #PF: supervisor read access in kernel mode
+[ 102.749323] #PF: error_code(0x0000) - not-present page
+[ 102.749325] PGD 0 P4D 0
+[ 102.749332] Oops: 0000 [#1] PREEMPT SMP PTI
+[ 102.749336] CPU: 0 PID: 1567 Comm: lock_torture_wr Tainted: G W 5.19.0-rc3+ #1565
+[ 102.749343] Hardware name: Red Hat KVM/RHEL-AV, BIOS 1.16.0-2.module+el8.7.0+15506+033991b0 04/01/2014
+[ 102.749344] RIP: 0010:memcpy_erms+0x6/0x10
+[ 102.749353] Code: cc cc cc cc eb 1e 0f 1f 00 48 89 f8 48 89 d1 48 c1 e9 03 83 e2 07 f3 48 a5 89 d1 f3 a4 c3 66 0f 1f 44 00 00 48 89 f8 48 89 d1 <f3> a4 c3 0f 1f 80 00 00 00 00 48 89 f8 48 83 fa 20 72 7e 40 38 fe
+[ 102.749355] RSP: 0018:ffff97a58145fd08 EFLAGS: 00010202
+[ 102.749358] RAX: ffff901778b77070 RBX: 0000000000000000 RCX: 0000000000000040
+[ 102.749360] RDX: 0000000000000040 RSI: 00000000deadbeef RDI: ffff901778b77070
+[ 102.749362] RBP: ffff97a58145fd10 R08: ffff901760b67a70 R09: 0000000000000001
+[ 102.749364] R10: ffff9017008e2cb8 R11: 0000000000000001 R12: ffff901760b67a70
+[ 102.749366] R13: ffff901760b78f00 R14: 0000000000000003 R15: 0000000000000001
+[ 102.749368] FS: 0000000000000000(0000) GS:ffff901876e00000(0000) knlGS:0000000000000000
+[ 102.749372] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+[ 102.749374] CR2: 00000000deadbeef CR3: 000000017c49a004 CR4: 0000000000770ef0
+[ 102.749376] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+[ 102.749378] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+[ 102.749379] PKRU: 55555554
+[ 102.749381] Call Trace:
+[ 102.749382] <TASK>
+[ 102.749383] ? send_args+0xb2/0xd0
+[ 102.749389] send_common+0xb7/0xd0
+[ 102.749395] _unlock_lock+0x2c/0x90
+[ 102.749400] unlock_lock.isra.56+0x62/0xa0
+[ 102.749405] dlm_unlock+0x21e/0x330
+[ 102.749411] ? lock_torture_stats+0x80/0x80 [dlm_locktorture]
+[ 102.749416] torture_unlock+0x5a/0x90 [dlm_locktorture]
+[ 102.749419] ? preempt_count_sub+0xba/0x100
+[ 102.749427] lock_torture_writer+0xbd/0x150 [dlm_locktorture]
+[ 102.786186] kthread+0x10a/0x130
+[ 102.786581] ? kthread_complete_and_exit+0x20/0x20
+[ 102.787156] ret_from_fork+0x22/0x30
+[ 102.787588] </TASK>
+[ 102.787855] Modules linked in: dlm_locktorture torture rpcsec_gss_krb5 intel_rapl_msr intel_rapl_common kvm_intel iTCO_wdt iTCO_vendor_support kvm vmw_vsock_virtio_transport qxl irqbypass vmw_vsock_virtio_transport_common drm_ttm_helper crc32_pclmul joydev crc32c_intel ttm vsock virtio_scsi virtio_balloon snd_pcm drm_kms_helper virtio_console snd_timer snd drm soundcore syscopyarea i2c_i801 sysfillrect sysimgblt i2c_smbus pcspkr fb_sys_fops lpc_ich serio_raw
+[ 102.792536] CR2: 00000000deadbeef
+[ 102.792930] ---[ end trace 0000000000000000 ]---
+
+This patch fixes the issue by checking also on DLM_LKF_VALBLK on exflags
+is set when copying the lvbptr array instead of if it's just null which
+fixes for me the issue.
+
+I think this patch can fix other dlm users as well, depending how they
+handle the init, freeing memory handling of sb_lvbptr and don't set
+DLM_LKF_VALBLK for some dlm_lock() calls. It might a there could be a
+hidden issue all the time. However with checking on DLM_LKF_VALBLK the
+user always need to provide a sb_lvbptr non-null value. There might be
+more intelligent handling between per ls lvblen, DLM_LKF_VALBLK and
+non-null to report the user the way how DLM API is used is wrong but can
+be added for later, this will only fix the current behaviour.
+
+Cc: stable@vger.kernel.org
+Signed-off-by: Alexander Aring <aahringo@redhat.com>
+Signed-off-by: David Teigland <teigland@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/dlm/lock.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
+index a6a5fbe36ca9..1ce6d988146f 100644
+--- a/fs/dlm/lock.c
++++ b/fs/dlm/lock.c
+@@ -3631,7 +3631,7 @@ static void send_args(struct dlm_rsb *r, struct dlm_lkb *lkb,
+ case cpu_to_le32(DLM_MSG_REQUEST_REPLY):
+ case cpu_to_le32(DLM_MSG_CONVERT_REPLY):
+ case cpu_to_le32(DLM_MSG_GRANT):
+- if (!lkb->lkb_lvbptr)
++ if (!lkb->lkb_lvbptr || !(lkb->lkb_exflags & DLM_LKF_VALBLK))
+ break;
+ memcpy(ms->m_extra, lkb->lkb_lvbptr, r->res_ls->ls_lvblen);
+ break;
+--
+2.35.1
+
--- /dev/null
+From be8c6f0884b79e1f21794af4d4c9dfa66a510ed4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 9 Oct 2022 20:27:47 +0200
+Subject: HID: magicmouse: Do not set BTN_MOUSE on double report
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: José Expósito <jose.exposito89@gmail.com>
+
+[ Upstream commit bb5f0c855dcfc893ae5ed90e4c646bde9e4498bf ]
+
+Under certain conditions the Magic Trackpad can group 2 reports in a
+single packet. The packet is split and the raw event function is
+invoked recursively for each part.
+
+However, after processing each part, the BTN_MOUSE status is updated,
+sending multiple click events. [1]
+
+Return after processing double reports to avoid this issue.
+
+Link: https://gitlab.freedesktop.org/libinput/libinput/-/issues/811 # [1]
+Fixes: a462230e16ac ("HID: magicmouse: enable Magic Trackpad support")
+Reported-by: Nulo <git@nulo.in>
+Signed-off-by: José Expósito <jose.exposito89@gmail.com>
+Signed-off-by: Benjamin Tissoires <benjamin.tissoires@redhat.com>
+Link: https://lore.kernel.org/r/20221009182747.90730-1-jose.exposito89@gmail.com
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/hid/hid-magicmouse.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
+index b8b08f0a8c54..c6b8da716002 100644
+--- a/drivers/hid/hid-magicmouse.c
++++ b/drivers/hid/hid-magicmouse.c
+@@ -478,7 +478,7 @@ static int magicmouse_raw_event(struct hid_device *hdev,
+ magicmouse_raw_event(hdev, report, data + 2, data[1]);
+ magicmouse_raw_event(hdev, report, data + 2 + data[1],
+ size - 2 - data[1]);
+- break;
++ return 0;
+ default:
+ return 0;
+ }
+--
+2.35.1
+
--- /dev/null
+From f3457f2ccf42269e1de43e17c3fdc6a79ecbb82b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Oct 2022 13:54:40 -0700
+Subject: i40e: Fix DMA mappings leak
+
+From: Jan Sokolowski <jan.sokolowski@intel.com>
+
+[ Upstream commit aae425efdfd1b1d8452260a3cb49344ebf20b1f5 ]
+
+During reallocation of RX buffers, new DMA mappings are created for
+those buffers.
+
+steps for reproduction:
+while :
+do
+for ((i=0; i<=8160; i=i+32))
+do
+ethtool -G enp130s0f0 rx $i tx $i
+sleep 0.5
+ethtool -g enp130s0f0
+done
+done
+
+This resulted in crash:
+i40e 0000:01:00.1: Unable to allocate memory for the Rx descriptor ring, size=65536
+Driver BUG
+WARNING: CPU: 0 PID: 4300 at net/core/xdp.c:141 xdp_rxq_info_unreg+0x43/0x50
+Call Trace:
+i40e_free_rx_resources+0x70/0x80 [i40e]
+i40e_set_ringparam+0x27c/0x800 [i40e]
+ethnl_set_rings+0x1b2/0x290
+genl_family_rcv_msg_doit.isra.15+0x10f/0x150
+genl_family_rcv_msg+0xb3/0x160
+? rings_fill_reply+0x1a0/0x1a0
+genl_rcv_msg+0x47/0x90
+? genl_family_rcv_msg+0x160/0x160
+netlink_rcv_skb+0x4c/0x120
+genl_rcv+0x24/0x40
+netlink_unicast+0x196/0x230
+netlink_sendmsg+0x204/0x3d0
+sock_sendmsg+0x4c/0x50
+__sys_sendto+0xee/0x160
+? handle_mm_fault+0xbe/0x1e0
+? syscall_trace_enter+0x1d3/0x2c0
+__x64_sys_sendto+0x24/0x30
+do_syscall_64+0x5b/0x1a0
+entry_SYSCALL_64_after_hwframe+0x65/0xca
+RIP: 0033:0x7f5eac8b035b
+Missing register, driver bug
+WARNING: CPU: 0 PID: 4300 at net/core/xdp.c:119 xdp_rxq_info_unreg_mem_model+0x69/0x140
+Call Trace:
+xdp_rxq_info_unreg+0x1e/0x50
+i40e_free_rx_resources+0x70/0x80 [i40e]
+i40e_set_ringparam+0x27c/0x800 [i40e]
+ethnl_set_rings+0x1b2/0x290
+genl_family_rcv_msg_doit.isra.15+0x10f/0x150
+genl_family_rcv_msg+0xb3/0x160
+? rings_fill_reply+0x1a0/0x1a0
+genl_rcv_msg+0x47/0x90
+? genl_family_rcv_msg+0x160/0x160
+netlink_rcv_skb+0x4c/0x120
+genl_rcv+0x24/0x40
+netlink_unicast+0x196/0x230
+netlink_sendmsg+0x204/0x3d0
+sock_sendmsg+0x4c/0x50
+__sys_sendto+0xee/0x160
+? handle_mm_fault+0xbe/0x1e0
+? syscall_trace_enter+0x1d3/0x2c0
+__x64_sys_sendto+0x24/0x30
+do_syscall_64+0x5b/0x1a0
+entry_SYSCALL_64_after_hwframe+0x65/0xca
+RIP: 0033:0x7f5eac8b035b
+
+This was caused because of new buffers with different RX ring count should
+substitute older ones, but those buffers were freed in
+i40e_configure_rx_ring and reallocated again with i40e_alloc_rx_bi,
+thus kfree on rx_bi caused leak of already mapped DMA.
+
+Fix this by reallocating ZC with rx_bi_zc struct when BPF program loads. Additionally
+reallocate back to rx_bi when BPF program unloads.
+
+If BPF program is loaded/unloaded and XSK pools are created, reallocate
+RX queues accordingly in XSP_SETUP_XSK_POOL handler.
+
+Fixes: be1222b585fd ("i40e: Separate kernel allocated rx_bi rings from AF_XDP rings")
+Signed-off-by: Jan Sokolowski <jan.sokolowski@intel.com>
+Signed-off-by: Mateusz Palczewski <mateusz.palczewski@intel.com>
+Signed-off-by: Jacob Keller <jacob.e.keller@intel.com>
+Tested-by: Chandan <chandanx.rout@intel.com> (A Contingent Worker at Intel)
+Tested-by: Gurucharan <gurucharanx.g@intel.com> (A Contingent worker at Intel)
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../net/ethernet/intel/i40e/i40e_ethtool.c | 3 -
+ drivers/net/ethernet/intel/i40e/i40e_main.c | 16 +++--
+ drivers/net/ethernet/intel/i40e/i40e_txrx.c | 13 ++--
+ drivers/net/ethernet/intel/i40e/i40e_txrx.h | 1 -
+ drivers/net/ethernet/intel/i40e/i40e_xsk.c | 67 ++++++++++++++++---
+ drivers/net/ethernet/intel/i40e/i40e_xsk.h | 2 +-
+ 6 files changed, 74 insertions(+), 28 deletions(-)
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+index 8e770c5e181e..11a17ebfceef 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_ethtool.c
+@@ -2081,9 +2081,6 @@ static int i40e_set_ringparam(struct net_device *netdev,
+ */
+ rx_rings[i].tail = hw->hw_addr + I40E_PRTGEN_STATUS;
+ err = i40e_setup_rx_descriptors(&rx_rings[i]);
+- if (err)
+- goto rx_unwind;
+- err = i40e_alloc_rx_bi(&rx_rings[i]);
+ if (err)
+ goto rx_unwind;
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
+index 5922520fdb01..ad6f6fe25057 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
+@@ -3421,12 +3421,8 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ if (ring->vsi->type == I40E_VSI_MAIN)
+ xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
+
+- kfree(ring->rx_bi);
+ ring->xsk_pool = i40e_xsk_pool(ring);
+ if (ring->xsk_pool) {
+- ret = i40e_alloc_rx_bi_zc(ring);
+- if (ret)
+- return ret;
+ ring->rx_buf_len =
+ xsk_pool_get_rx_frame_size(ring->xsk_pool);
+ /* For AF_XDP ZC, we disallow packets to span on
+@@ -3444,9 +3440,6 @@ static int i40e_configure_rx_ring(struct i40e_ring *ring)
+ ring->queue_index);
+
+ } else {
+- ret = i40e_alloc_rx_bi(ring);
+- if (ret)
+- return ret;
+ ring->rx_buf_len = vsi->rx_buf_len;
+ if (ring->vsi->type == I40E_VSI_MAIN) {
+ ret = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
+@@ -13161,6 +13154,14 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog,
+ i40e_reset_and_rebuild(pf, true, true);
+ }
+
++ if (!i40e_enabled_xdp_vsi(vsi) && prog) {
++ if (i40e_realloc_rx_bi_zc(vsi, true))
++ return -ENOMEM;
++ } else if (i40e_enabled_xdp_vsi(vsi) && !prog) {
++ if (i40e_realloc_rx_bi_zc(vsi, false))
++ return -ENOMEM;
++ }
++
+ for (i = 0; i < vsi->num_queue_pairs; i++)
+ WRITE_ONCE(vsi->rx_rings[i]->xdp_prog, vsi->xdp_prog);
+
+@@ -13393,6 +13394,7 @@ int i40e_queue_pair_disable(struct i40e_vsi *vsi, int queue_pair)
+
+ i40e_queue_pair_disable_irq(vsi, queue_pair);
+ err = i40e_queue_pair_toggle_rings(vsi, queue_pair, false /* off */);
++ i40e_clean_rx_ring(vsi->rx_rings[queue_pair]);
+ i40e_queue_pair_toggle_napi(vsi, queue_pair, false /* off */);
+ i40e_queue_pair_clean_rings(vsi, queue_pair);
+ i40e_queue_pair_reset_stats(vsi, queue_pair);
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+index 326fd25d055f..8f5aad9bbba3 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c
+@@ -1459,14 +1459,6 @@ int i40e_setup_tx_descriptors(struct i40e_ring *tx_ring)
+ return -ENOMEM;
+ }
+
+-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring)
+-{
+- unsigned long sz = sizeof(*rx_ring->rx_bi) * rx_ring->count;
+-
+- rx_ring->rx_bi = kzalloc(sz, GFP_KERNEL);
+- return rx_ring->rx_bi ? 0 : -ENOMEM;
+-}
+-
+ static void i40e_clear_rx_bi(struct i40e_ring *rx_ring)
+ {
+ memset(rx_ring->rx_bi, 0, sizeof(*rx_ring->rx_bi) * rx_ring->count);
+@@ -1597,6 +1589,11 @@ int i40e_setup_rx_descriptors(struct i40e_ring *rx_ring)
+
+ rx_ring->xdp_prog = rx_ring->vsi->xdp_prog;
+
++ rx_ring->rx_bi =
++ kcalloc(rx_ring->count, sizeof(*rx_ring->rx_bi), GFP_KERNEL);
++ if (!rx_ring->rx_bi)
++ return -ENOMEM;
++
+ return 0;
+ }
+
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.h b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+index f6d91fa1562e..f3b0b8151709 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_txrx.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.h
+@@ -466,7 +466,6 @@ int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
+ bool __i40e_chk_linearize(struct sk_buff *skb);
+ int i40e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
+ u32 flags);
+-int i40e_alloc_rx_bi(struct i40e_ring *rx_ring);
+
+ /**
+ * i40e_get_head - Retrieve head from head writeback
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+index 54c91dc459dd..7e50b8fff9b5 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+@@ -10,14 +10,6 @@
+ #include "i40e_txrx_common.h"
+ #include "i40e_xsk.h"
+
+-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring)
+-{
+- unsigned long sz = sizeof(*rx_ring->rx_bi_zc) * rx_ring->count;
+-
+- rx_ring->rx_bi_zc = kzalloc(sz, GFP_KERNEL);
+- return rx_ring->rx_bi_zc ? 0 : -ENOMEM;
+-}
+-
+ void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring)
+ {
+ memset(rx_ring->rx_bi_zc, 0,
+@@ -29,6 +21,58 @@ static struct xdp_buff **i40e_rx_bi(struct i40e_ring *rx_ring, u32 idx)
+ return &rx_ring->rx_bi_zc[idx];
+ }
+
++/**
++ * i40e_realloc_rx_xdp_bi - reallocate SW ring for either XSK or normal buffer
++ * @rx_ring: Current rx ring
++ * @pool_present: is pool for XSK present
++ *
++ * Try allocating memory and return ENOMEM, if failed to allocate.
++ * If allocation was successful, substitute buffer with allocated one.
++ * Returns 0 on success, negative on failure
++ */
++static int i40e_realloc_rx_xdp_bi(struct i40e_ring *rx_ring, bool pool_present)
++{
++ size_t elem_size = pool_present ? sizeof(*rx_ring->rx_bi_zc) :
++ sizeof(*rx_ring->rx_bi);
++ void *sw_ring = kcalloc(rx_ring->count, elem_size, GFP_KERNEL);
++
++ if (!sw_ring)
++ return -ENOMEM;
++
++ if (pool_present) {
++ kfree(rx_ring->rx_bi);
++ rx_ring->rx_bi = NULL;
++ rx_ring->rx_bi_zc = sw_ring;
++ } else {
++ kfree(rx_ring->rx_bi_zc);
++ rx_ring->rx_bi_zc = NULL;
++ rx_ring->rx_bi = sw_ring;
++ }
++ return 0;
++}
++
++/**
++ * i40e_realloc_rx_bi_zc - reallocate rx SW rings
++ * @vsi: Current VSI
++ * @zc: is zero copy set
++ *
++ * Reallocate buffer for rx_rings that might be used by XSK.
++ * XDP requires more memory, than rx_buf provides.
++ * Returns 0 on success, negative on failure
++ */
++int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc)
++{
++ struct i40e_ring *rx_ring;
++ unsigned long q;
++
++ for_each_set_bit(q, vsi->af_xdp_zc_qps, vsi->alloc_queue_pairs) {
++ rx_ring = vsi->rx_rings[q];
++ if (i40e_realloc_rx_xdp_bi(rx_ring, zc))
++ return -ENOMEM;
++ }
++ return 0;
++}
++
+ /**
+ * i40e_xsk_pool_enable - Enable/associate an AF_XDP buffer pool to a
+ * certain ring/qid
+@@ -69,6 +113,10 @@ static int i40e_xsk_pool_enable(struct i40e_vsi *vsi,
+ if (err)
+ return err;
+
++ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], true);
++ if (err)
++ return err;
++
+ err = i40e_queue_pair_enable(vsi, qid);
+ if (err)
+ return err;
+@@ -113,6 +161,9 @@ static int i40e_xsk_pool_disable(struct i40e_vsi *vsi, u16 qid)
+ xsk_pool_dma_unmap(pool, I40E_RX_DMA_ATTR);
+
+ if (if_running) {
++ err = i40e_realloc_rx_xdp_bi(vsi->rx_rings[qid], false);
++ if (err)
++ return err;
+ err = i40e_queue_pair_enable(vsi, qid);
+ if (err)
+ return err;
+diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.h b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+index ea88f4597a07..75103c992269 100644
+--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.h
++++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.h
+@@ -33,7 +33,7 @@ int i40e_clean_rx_irq_zc(struct i40e_ring *rx_ring, int budget);
+
+ bool i40e_clean_xdp_tx_irq(struct i40e_vsi *vsi, struct i40e_ring *tx_ring);
+ int i40e_xsk_wakeup(struct net_device *dev, u32 queue_id, u32 flags);
+-int i40e_alloc_rx_bi_zc(struct i40e_ring *rx_ring);
++int i40e_realloc_rx_bi_zc(struct i40e_vsi *vsi, bool zc);
+ void i40e_clear_rx_bi_zc(struct i40e_ring *rx_ring);
+
+ #endif /* _I40E_XSK_H_ */
+--
+2.35.1
+
--- /dev/null
+From 5f30223ca9cad85da94b6a369ce574ae98fcba78 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 08:44:46 +0800
+Subject: iommu/vt-d: Allow NVS regions in arch_rmrr_sanity_check()
+
+From: Charlotte Tan <charlotte@extrahop.com>
+
+[ Upstream commit 5566e68d829f5d87670d5984c1c2ccb4c518405f ]
+
+arch_rmrr_sanity_check() warns if the RMRR is not covered by an ACPI
+Reserved region, but it seems like it should accept an NVS region as
+well. The ACPI spec
+https://uefi.org/specs/ACPI/6.5/15_System_Address_Map_Interfaces.html
+uses similar wording for "Reserved" and "NVS" region types; for NVS
+regions it says "This range of addresses is in use or reserved by the
+system and must not be used by the operating system."
+
+There is an old comment on this mailing list that also suggests NVS
+regions should pass the arch_rmrr_sanity_check() test:
+
+ The warnings come from arch_rmrr_sanity_check() since it checks whether
+ the region is E820_TYPE_RESERVED. However, if the purpose of the check
+ is to detect RMRR has regions that may be used by OS as free memory,
+ isn't E820_TYPE_NVS safe, too?
+
+This patch overlaps with another proposed patch that would add the region
+type to the log since sometimes the bug reporter sees this log on the
+console but doesn't know to include the kernel log:
+
+https://lore.kernel.org/lkml/20220611204859.234975-3-atomlin@redhat.com/
+
+Here's an example of the "Firmware Bug" apparent false positive (wrapped
+for line length):
+
+ DMAR: [Firmware Bug]: No firmware reserved region can cover this RMRR
+ [0x000000006f760000-0x000000006f762fff], contact BIOS vendor for
+ fixes
+ DMAR: [Firmware Bug]: Your BIOS is broken; bad RMRR
+ [0x000000006f760000-0x000000006f762fff]
+
+This is the snippet from the e820 table:
+
+ BIOS-e820: [mem 0x0000000068bff000-0x000000006ebfefff] reserved
+ BIOS-e820: [mem 0x000000006ebff000-0x000000006f9fefff] ACPI NVS
+ BIOS-e820: [mem 0x000000006f9ff000-0x000000006fffefff] ACPI data
+
+Fixes: f036c7fa0ab6 ("iommu/vt-d: Check VT-d RMRR region in BIOS is reported as reserved")
+Cc: Will Mortensen <will@extrahop.com>
+Link: https://lore.kernel.org/linux-iommu/64a5843d-850d-e58c-4fc2-0a0eeeb656dc@nec.com/
+Link: https://bugzilla.kernel.org/show_bug.cgi?id=216443
+Signed-off-by: Charlotte Tan <charlotte@extrahop.com>
+Reviewed-by: Aaron Tomlin <atomlin@redhat.com>
+Link: https://lore.kernel.org/r/20220929044449.32515-1-charlotte@extrahop.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ arch/x86/include/asm/iommu.h | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/arch/x86/include/asm/iommu.h b/arch/x86/include/asm/iommu.h
+index bf1ed2ddc74b..7a983119bc40 100644
+--- a/arch/x86/include/asm/iommu.h
++++ b/arch/x86/include/asm/iommu.h
+@@ -17,8 +17,10 @@ arch_rmrr_sanity_check(struct acpi_dmar_reserved_memory *rmrr)
+ {
+ u64 start = rmrr->base_address;
+ u64 end = rmrr->end_address + 1;
++ int entry_type;
+
+- if (e820__mapped_all(start, end, E820_TYPE_RESERVED))
++ entry_type = e820__get_entry_type(start, end);
++ if (entry_type == E820_TYPE_RESERVED || entry_type == E820_TYPE_NVS)
+ return 0;
+
+ pr_err(FW_BUG "No firmware reserved region can cover this RMRR [%#018Lx-%#018Lx], contact BIOS vendor for fixes\n",
+--
+2.35.1
+
--- /dev/null
+From 1f81140f88f47b97015f2908659ebe9700147f5d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 19 Oct 2022 08:44:47 +0800
+Subject: iommu/vt-d: Clean up si_domain in the init_dmars() error path
+
+From: Jerry Snitselaar <jsnitsel@redhat.com>
+
+[ Upstream commit 620bf9f981365c18cc2766c53d92bf8131c63f32 ]
+
+A splat from kmem_cache_destroy() was seen with a kernel prior to
+commit ee2653bbe89d ("iommu/vt-d: Remove domain and devinfo mempool")
+when there was a failure in init_dmars(), because the iommu_domain
+cache still had objects. While the mempool code is now gone, there
+still is a leak of the si_domain memory if init_dmars() fails. So
+clean up si_domain in the init_dmars() error path.
+
+Cc: Lu Baolu <baolu.lu@linux.intel.com>
+Cc: Joerg Roedel <joro@8bytes.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: Robin Murphy <robin.murphy@arm.com>
+Fixes: 86080ccc223a ("iommu/vt-d: Allocate si_domain in init_dmars()")
+Signed-off-by: Jerry Snitselaar <jsnitsel@redhat.com>
+Link: https://lore.kernel.org/r/20221010144842.308890-1-jsnitsel@redhat.com
+Signed-off-by: Lu Baolu <baolu.lu@linux.intel.com>
+Signed-off-by: Joerg Roedel <jroedel@suse.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/iommu/intel/iommu.c | 5 +++++
+ 1 file changed, 5 insertions(+)
+
+diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c
+index 71a932017772..b0a975e0a8cb 100644
+--- a/drivers/iommu/intel/iommu.c
++++ b/drivers/iommu/intel/iommu.c
+@@ -2761,6 +2761,7 @@ static int __init si_domain_init(int hw)
+
+ if (md_domain_init(si_domain, DEFAULT_DOMAIN_ADDRESS_WIDTH)) {
+ domain_exit(si_domain);
++ si_domain = NULL;
+ return -EFAULT;
+ }
+
+@@ -3397,6 +3398,10 @@ static int __init init_dmars(void)
+ disable_dmar_iommu(iommu);
+ free_dmar_iommu(iommu);
+ }
++ if (si_domain) {
++ domain_exit(si_domain);
++ si_domain = NULL;
++ }
+
+ kfree(g_iommus);
+
+--
+2.35.1
+
--- /dev/null
+From b3e420e0dbf4ddd4a85c39c2a79eed27c78ac5ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 16:31:23 -0700
+Subject: ionic: catch NULL pointer issue on reconfig
+
+From: Brett Creeley <brett@pensando.io>
+
+[ Upstream commit aa1d7e1267c12e07d979aa34c613716a89029db2 ]
+
+It's possible that the driver will dereference a qcq that doesn't exist
+when calling ionic_reconfigure_queues(), which causes a page fault BUG.
+
+If a reduction in the number of queues is followed by a different
+reconfig such as changing the ring size, the driver can hit a NULL
+pointer when trying to clean up non-existent queues.
+
+Fix this by checking to make sure both the qcqs array and qcq entry
+exists bofore trying to use and free the entry.
+
+Fixes: 101b40a0171f ("ionic: change queue count with no reset")
+Signed-off-by: Brett Creeley <brett@pensando.io>
+Signed-off-by: Shannon Nelson <snelson@pensando.io>
+Link: https://lore.kernel.org/r/20221017233123.15869-1-snelson@pensando.io
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/pensando/ionic/ionic_lif.c | 12 ++++++++----
+ 1 file changed, 8 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.c b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+index c713a3ee6571..886c997a3ad1 100644
+--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.c
++++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.c
+@@ -2880,11 +2880,15 @@ int ionic_reconfigure_queues(struct ionic_lif *lif,
+ * than the full array, but leave the qcq shells in place
+ */
+ for (i = lif->nxqs; i < lif->ionic->ntxqs_per_lif; i++) {
+- lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+- ionic_qcq_free(lif, lif->txqcqs[i]);
++ if (lif->txqcqs && lif->txqcqs[i]) {
++ lif->txqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
++ ionic_qcq_free(lif, lif->txqcqs[i]);
++ }
+
+- lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
+- ionic_qcq_free(lif, lif->rxqcqs[i]);
++ if (lif->rxqcqs && lif->rxqcqs[i]) {
++ lif->rxqcqs[i]->flags &= ~IONIC_QCQ_F_INTR;
++ ionic_qcq_free(lif, lif->rxqcqs[i]);
++ }
+ }
+
+ if (err)
+--
+2.35.1
+
--- /dev/null
+From 6709ef0f416b63f2565a5fc702f7b2049e5cc023 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 13 Sep 2022 14:17:23 +0200
+Subject: kernfs: fix use-after-free in __kernfs_remove
+
+From: Christian A. Ehrhardt <lk@c--e.de>
+
+[ Upstream commit 4abc99652812a2ddf932f137515d5c5a04723538 ]
+
+Syzkaller managed to trigger concurrent calls to
+kernfs_remove_by_name_ns() for the same file resulting in
+a KASAN detected use-after-free. The race occurs when the root
+node is freed during kernfs_drain().
+
+To prevent this acquire an additional reference for the root
+of the tree that is removed before calling __kernfs_remove().
+
+Found by syzkaller with the following reproducer (slab_nomerge is
+required):
+
+syz_mount_image$ext4(0x0, &(0x7f0000000100)='./file0\x00', 0x100000, 0x0, 0x0, 0x0, 0x0)
+r0 = openat(0xffffffffffffff9c, &(0x7f0000000080)='/proc/self/exe\x00', 0x0, 0x0)
+close(r0)
+pipe2(&(0x7f0000000140)={0xffffffffffffffff, <r1=>0xffffffffffffffff}, 0x800)
+mount$9p_fd(0x0, &(0x7f0000000040)='./file0\x00', &(0x7f00000000c0), 0x408, &(0x7f0000000280)={'trans=fd,', {'rfdno', 0x3d, r0}, 0x2c, {'wfdno', 0x3d, r1}, 0x2c, {[{@cache_loose}, {@mmap}, {@loose}, {@loose}, {@mmap}], [{@mask={'mask', 0x3d, '^MAY_EXEC'}}, {@fsmagic={'fsmagic', 0x3d, 0x10001}}, {@dont_hash}]}})
+
+Sample report:
+
+==================================================================
+BUG: KASAN: use-after-free in kernfs_type include/linux/kernfs.h:335 [inline]
+BUG: KASAN: use-after-free in kernfs_leftmost_descendant fs/kernfs/dir.c:1261 [inline]
+BUG: KASAN: use-after-free in __kernfs_remove.part.0+0x843/0x960 fs/kernfs/dir.c:1369
+Read of size 2 at addr ffff8880088807f0 by task syz-executor.2/857
+
+CPU: 0 PID: 857 Comm: syz-executor.2 Not tainted 6.0.0-rc3-00363-g7726d4c3e60b #5
+Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS 1.15.0-1 04/01/2014
+Call Trace:
+ <TASK>
+ __dump_stack lib/dump_stack.c:88 [inline]
+ dump_stack_lvl+0x6e/0x91 lib/dump_stack.c:106
+ print_address_description mm/kasan/report.c:317 [inline]
+ print_report.cold+0x5e/0x5e5 mm/kasan/report.c:433
+ kasan_report+0xa3/0x130 mm/kasan/report.c:495
+ kernfs_type include/linux/kernfs.h:335 [inline]
+ kernfs_leftmost_descendant fs/kernfs/dir.c:1261 [inline]
+ __kernfs_remove.part.0+0x843/0x960 fs/kernfs/dir.c:1369
+ __kernfs_remove fs/kernfs/dir.c:1356 [inline]
+ kernfs_remove_by_name_ns+0x108/0x190 fs/kernfs/dir.c:1589
+ sysfs_slab_add+0x133/0x1e0 mm/slub.c:5943
+ __kmem_cache_create+0x3e0/0x550 mm/slub.c:4899
+ create_cache mm/slab_common.c:229 [inline]
+ kmem_cache_create_usercopy+0x167/0x2a0 mm/slab_common.c:335
+ p9_client_create+0xd4d/0x1190 net/9p/client.c:993
+ v9fs_session_init+0x1e6/0x13c0 fs/9p/v9fs.c:408
+ v9fs_mount+0xb9/0xbd0 fs/9p/vfs_super.c:126
+ legacy_get_tree+0xf1/0x200 fs/fs_context.c:610
+ vfs_get_tree+0x85/0x2e0 fs/super.c:1530
+ do_new_mount fs/namespace.c:3040 [inline]
+ path_mount+0x675/0x1d00 fs/namespace.c:3370
+ do_mount fs/namespace.c:3383 [inline]
+ __do_sys_mount fs/namespace.c:3591 [inline]
+ __se_sys_mount fs/namespace.c:3568 [inline]
+ __x64_sys_mount+0x282/0x300 fs/namespace.c:3568
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x38/0x90 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+RIP: 0033:0x7f725f983aed
+Code: 02 b8 ff ff ff ff c3 66 0f 1f 44 00 00 f3 0f 1e fa 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b0 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007f725f0f7028 EFLAGS: 00000246 ORIG_RAX: 00000000000000a5
+RAX: ffffffffffffffda RBX: 00007f725faa3f80 RCX: 00007f725f983aed
+RDX: 00000000200000c0 RSI: 0000000020000040 RDI: 0000000000000000
+RBP: 00007f725f9f419c R08: 0000000020000280 R09: 0000000000000000
+R10: 0000000000000408 R11: 0000000000000246 R12: 0000000000000000
+R13: 0000000000000006 R14: 00007f725faa3f80 R15: 00007f725f0d7000
+ </TASK>
+
+Allocated by task 855:
+ kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+ kasan_set_track mm/kasan/common.c:45 [inline]
+ set_alloc_info mm/kasan/common.c:437 [inline]
+ __kasan_slab_alloc+0x66/0x80 mm/kasan/common.c:470
+ kasan_slab_alloc include/linux/kasan.h:224 [inline]
+ slab_post_alloc_hook mm/slab.h:727 [inline]
+ slab_alloc_node mm/slub.c:3243 [inline]
+ slab_alloc mm/slub.c:3251 [inline]
+ __kmem_cache_alloc_lru mm/slub.c:3258 [inline]
+ kmem_cache_alloc+0xbf/0x200 mm/slub.c:3268
+ kmem_cache_zalloc include/linux/slab.h:723 [inline]
+ __kernfs_new_node+0xd4/0x680 fs/kernfs/dir.c:593
+ kernfs_new_node fs/kernfs/dir.c:655 [inline]
+ kernfs_create_dir_ns+0x9c/0x220 fs/kernfs/dir.c:1010
+ sysfs_create_dir_ns+0x127/0x290 fs/sysfs/dir.c:59
+ create_dir lib/kobject.c:63 [inline]
+ kobject_add_internal+0x24a/0x8d0 lib/kobject.c:223
+ kobject_add_varg lib/kobject.c:358 [inline]
+ kobject_init_and_add+0x101/0x160 lib/kobject.c:441
+ sysfs_slab_add+0x156/0x1e0 mm/slub.c:5954
+ __kmem_cache_create+0x3e0/0x550 mm/slub.c:4899
+ create_cache mm/slab_common.c:229 [inline]
+ kmem_cache_create_usercopy+0x167/0x2a0 mm/slab_common.c:335
+ p9_client_create+0xd4d/0x1190 net/9p/client.c:993
+ v9fs_session_init+0x1e6/0x13c0 fs/9p/v9fs.c:408
+ v9fs_mount+0xb9/0xbd0 fs/9p/vfs_super.c:126
+ legacy_get_tree+0xf1/0x200 fs/fs_context.c:610
+ vfs_get_tree+0x85/0x2e0 fs/super.c:1530
+ do_new_mount fs/namespace.c:3040 [inline]
+ path_mount+0x675/0x1d00 fs/namespace.c:3370
+ do_mount fs/namespace.c:3383 [inline]
+ __do_sys_mount fs/namespace.c:3591 [inline]
+ __se_sys_mount fs/namespace.c:3568 [inline]
+ __x64_sys_mount+0x282/0x300 fs/namespace.c:3568
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x38/0x90 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Freed by task 857:
+ kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+ kasan_set_track+0x21/0x30 mm/kasan/common.c:45
+ kasan_set_free_info+0x20/0x40 mm/kasan/generic.c:370
+ ____kasan_slab_free mm/kasan/common.c:367 [inline]
+ ____kasan_slab_free mm/kasan/common.c:329 [inline]
+ __kasan_slab_free+0x108/0x190 mm/kasan/common.c:375
+ kasan_slab_free include/linux/kasan.h:200 [inline]
+ slab_free_hook mm/slub.c:1754 [inline]
+ slab_free_freelist_hook mm/slub.c:1780 [inline]
+ slab_free mm/slub.c:3534 [inline]
+ kmem_cache_free+0x9c/0x340 mm/slub.c:3551
+ kernfs_put.part.0+0x2b2/0x520 fs/kernfs/dir.c:547
+ kernfs_put+0x42/0x50 fs/kernfs/dir.c:521
+ __kernfs_remove.part.0+0x72d/0x960 fs/kernfs/dir.c:1407
+ __kernfs_remove fs/kernfs/dir.c:1356 [inline]
+ kernfs_remove_by_name_ns+0x108/0x190 fs/kernfs/dir.c:1589
+ sysfs_slab_add+0x133/0x1e0 mm/slub.c:5943
+ __kmem_cache_create+0x3e0/0x550 mm/slub.c:4899
+ create_cache mm/slab_common.c:229 [inline]
+ kmem_cache_create_usercopy+0x167/0x2a0 mm/slab_common.c:335
+ p9_client_create+0xd4d/0x1190 net/9p/client.c:993
+ v9fs_session_init+0x1e6/0x13c0 fs/9p/v9fs.c:408
+ v9fs_mount+0xb9/0xbd0 fs/9p/vfs_super.c:126
+ legacy_get_tree+0xf1/0x200 fs/fs_context.c:610
+ vfs_get_tree+0x85/0x2e0 fs/super.c:1530
+ do_new_mount fs/namespace.c:3040 [inline]
+ path_mount+0x675/0x1d00 fs/namespace.c:3370
+ do_mount fs/namespace.c:3383 [inline]
+ __do_sys_mount fs/namespace.c:3591 [inline]
+ __se_sys_mount fs/namespace.c:3568 [inline]
+ __x64_sys_mount+0x282/0x300 fs/namespace.c:3568
+ do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+ do_syscall_64+0x38/0x90 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+The buggy address belongs to the object at ffff888008880780
+ which belongs to the cache kernfs_node_cache of size 128
+The buggy address is located 112 bytes inside of
+ 128-byte region [ffff888008880780, ffff888008880800)
+
+The buggy address belongs to the physical page:
+page:00000000732833f8 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x8880
+flags: 0x100000000000200(slab|node=0|zone=1)
+raw: 0100000000000200 0000000000000000 dead000000000122 ffff888001147280
+raw: 0000000000000000 0000000000150015 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+
+Memory state around the buggy address:
+ ffff888008880680: fc fc fc fc fc fc fc fc fa fb fb fb fb fb fb fb
+ ffff888008880700: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+>ffff888008880780: fa fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb
+ ^
+ ffff888008880800: fc fc fc fc fc fc fc fc fa fb fb fb fb fb fb fb
+ ffff888008880880: fb fb fb fb fb fb fb fb fc fc fc fc fc fc fc fc
+==================================================================
+
+Acked-by: Tejun Heo <tj@kernel.org>
+Cc: stable <stable@kernel.org> # -rc3
+Signed-off-by: Christian A. Ehrhardt <lk@c--e.de>
+Link: https://lore.kernel.org/r/20220913121723.691454-1-lk@c--e.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/kernfs/dir.c | 5 ++++-
+ 1 file changed, 4 insertions(+), 1 deletion(-)
+
+diff --git a/fs/kernfs/dir.c b/fs/kernfs/dir.c
+index 7bf1d5fc2e9c..90677cfbcf9c 100644
+--- a/fs/kernfs/dir.c
++++ b/fs/kernfs/dir.c
+@@ -1547,8 +1547,11 @@ int kernfs_remove_by_name_ns(struct kernfs_node *parent, const char *name,
+ down_write(&kernfs_rwsem);
+
+ kn = kernfs_find_ns(parent, name, ns);
+- if (kn)
++ if (kn) {
++ kernfs_get(kn);
+ __kernfs_remove(kn);
++ kernfs_put(kn);
++ }
+
+ up_write(&kernfs_rwsem);
+
+--
+2.35.1
+
--- /dev/null
+From c7d506df0a18a0d1336d68e8457d0a3666d3404c Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 9 Sep 2022 17:43:53 +0900
+Subject: ksmbd: fix incorrect handling of iterate_dir
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 88541cb414b7a2450c45fc9c131b37b5753b7679 ]
+
+if iterate_dir() returns non-negative value, caller has to treat it
+as normal and check there is any error while populating dentry
+information. ksmbd doesn't have to do anything because ksmbd already
+checks too small OutputBufferLength to store one file information.
+
+And because ctx->pos is set to file->f_pos when iterative_dir is called,
+remove restart_ctx(). And if iterate_dir() return -EIO, which mean
+directory entry is corrupted, return STATUS_FILE_CORRUPT_ERROR error
+response.
+
+This patch fixes some failure of SMB2_QUERY_DIRECTORY, which happens when
+ntfs3 is local filesystem.
+
+Fixes: e2f34481b24d ("cifsd: add server-side procedures for SMB3")
+Cc: stable@vger.kernel.org
+Signed-off-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/smb2pdu.c | 14 ++++----------
+ 1 file changed, 4 insertions(+), 10 deletions(-)
+
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index ef0aef78eba6..65c85ca71ebe 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -3803,11 +3803,6 @@ static int __query_dir(struct dir_context *ctx, const char *name, int namlen,
+ return 0;
+ }
+
+-static void restart_ctx(struct dir_context *ctx)
+-{
+- ctx->pos = 0;
+-}
+-
+ static int verify_info_level(int info_level)
+ {
+ switch (info_level) {
+@@ -3921,7 +3916,6 @@ int smb2_query_dir(struct ksmbd_work *work)
+ if (srch_flag & SMB2_REOPEN || srch_flag & SMB2_RESTART_SCANS) {
+ ksmbd_debug(SMB, "Restart directory scan\n");
+ generic_file_llseek(dir_fp->filp, 0, SEEK_SET);
+- restart_ctx(&dir_fp->readdir_data.ctx);
+ }
+
+ memset(&d_info, 0, sizeof(struct ksmbd_dir_info));
+@@ -3968,11 +3962,9 @@ int smb2_query_dir(struct ksmbd_work *work)
+ */
+ if (!d_info.out_buf_len && !d_info.num_entry)
+ goto no_buf_len;
+- if (rc == 0)
+- restart_ctx(&dir_fp->readdir_data.ctx);
+- if (rc == -ENOSPC)
++ if (rc > 0 || rc == -ENOSPC)
+ rc = 0;
+- if (rc)
++ else if (rc)
+ goto err_out;
+
+ d_info.wptr = d_info.rptr;
+@@ -4029,6 +4021,8 @@ int smb2_query_dir(struct ksmbd_work *work)
+ rsp->hdr.Status = STATUS_NO_MEMORY;
+ else if (rc == -EFAULT)
+ rsp->hdr.Status = STATUS_INVALID_INFO_CLASS;
++ else if (rc == -EIO)
++ rsp->hdr.Status = STATUS_FILE_CORRUPT_ERROR;
+ if (!rsp->hdr.Status)
+ rsp->hdr.Status = STATUS_UNEXPECTED_IO_ERROR;
+
+--
+2.35.1
+
--- /dev/null
+From 8f9ac24503b16997fb4074618756ca6e150ac530 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 16 May 2022 16:22:09 +0900
+Subject: ksmbd: handle smb2 query dir request for OutputBufferLength that is
+ too small
+
+From: Namjae Jeon <linkinjeon@kernel.org>
+
+[ Upstream commit 65ca7a3ffff811d6c0d4342d467c381257d566d4 ]
+
+We found the issue that ksmbd return STATUS_NO_MORE_FILES response
+even though there are still dentries that needs to be read while
+file read/write test using framtest utils.
+windows client send smb2 query dir request included
+OutputBufferLength(128) that is too small to contain even one entry.
+This patch make ksmbd immediately returns OutputBufferLength of response
+as zero to client.
+
+Signed-off-by: Namjae Jeon <linkinjeon@kernel.org>
+Reviewed-by: Hyunchul Lee <hyc.lee@gmail.com>
+Signed-off-by: Steve French <stfrench@microsoft.com>
+Stable-dep-of: 88541cb414b7 ("ksmbd: fix incorrect handling of iterate_dir")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/ksmbd/smb2pdu.c | 10 +++++++++-
+ 1 file changed, 9 insertions(+), 1 deletion(-)
+
+diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
+index bec9a84572c0..ef0aef78eba6 100644
+--- a/fs/ksmbd/smb2pdu.c
++++ b/fs/ksmbd/smb2pdu.c
+@@ -3962,6 +3962,12 @@ int smb2_query_dir(struct ksmbd_work *work)
+ set_ctx_actor(&dir_fp->readdir_data.ctx, __query_dir);
+
+ rc = iterate_dir(dir_fp->filp, &dir_fp->readdir_data.ctx);
++ /*
++ * req->OutputBufferLength is too small to contain even one entry.
++ * In this case, it immediately returns OutputBufferLength 0 to client.
++ */
++ if (!d_info.out_buf_len && !d_info.num_entry)
++ goto no_buf_len;
+ if (rc == 0)
+ restart_ctx(&dir_fp->readdir_data.ctx);
+ if (rc == -ENOSPC)
+@@ -3988,10 +3994,12 @@ int smb2_query_dir(struct ksmbd_work *work)
+ rsp->Buffer[0] = 0;
+ inc_rfc1001_len(rsp_org, 9);
+ } else {
++no_buf_len:
+ ((struct file_directory_info *)
+ ((char *)rsp->Buffer + d_info.last_entry_offset))
+ ->NextEntryOffset = 0;
+- d_info.data_count -= d_info.last_entry_off_align;
++ if (d_info.data_count >= d_info.last_entry_off_align)
++ d_info.data_count -= d_info.last_entry_off_align;
+
+ rsp->StructureSize = cpu_to_le16(9);
+ rsp->OutputBufferOffset = cpu_to_le16(72);
+--
+2.35.1
+
--- /dev/null
+From e217fcafe46e9a283ca9deb43e4beeb707d4870a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Sep 2022 12:57:44 +0300
+Subject: mmc: core: Add SD card quirk for broken discard
+
+From: Avri Altman <avri.altman@wdc.com>
+
+[ Upstream commit 07d2872bf4c864eb83d034263c155746a2fb7a3b ]
+
+Some SD-cards from Sandisk that are SDA-6.0 compliant reports they supports
+discard, while they actually don't. This might cause mk2fs to fail while
+trying to format the card and revert it to a read-only mode.
+
+To fix this problem, let's add a card quirk (MMC_QUIRK_BROKEN_SD_DISCARD)
+to indicate that we shall fall-back to use the legacy erase command
+instead.
+
+Signed-off-by: Avri Altman <avri.altman@wdc.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220928095744.16455-1-avri.altman@wdc.com
+[Ulf: Updated the commit message]
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/core/block.c | 6 +++++-
+ drivers/mmc/core/card.h | 6 ++++++
+ drivers/mmc/core/quirks.h | 6 ++++++
+ include/linux/mmc/card.h | 1 +
+ 4 files changed, 18 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index e3af3e3224e4..6024faa6f745 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -1138,8 +1138,12 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+ {
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
++ unsigned int arg = card->erase_arg;
+
+- mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg);
++ if (mmc_card_broken_sd_discard(card))
++ arg = SD_ERASE_ARG;
++
++ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, arg);
+ }
+
+ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+diff --git a/drivers/mmc/core/card.h b/drivers/mmc/core/card.h
+index 7bd392d55cfa..5c6986131faf 100644
+--- a/drivers/mmc/core/card.h
++++ b/drivers/mmc/core/card.h
+@@ -70,6 +70,7 @@ struct mmc_fixup {
+ #define EXT_CSD_REV_ANY (-1u)
+
+ #define CID_MANFID_SANDISK 0x2
++#define CID_MANFID_SANDISK_SD 0x3
+ #define CID_MANFID_ATP 0x9
+ #define CID_MANFID_TOSHIBA 0x11
+ #define CID_MANFID_MICRON 0x13
+@@ -222,4 +223,9 @@ static inline int mmc_card_broken_hpi(const struct mmc_card *c)
+ return c->quirks & MMC_QUIRK_BROKEN_HPI;
+ }
+
++static inline int mmc_card_broken_sd_discard(const struct mmc_card *c)
++{
++ return c->quirks & MMC_QUIRK_BROKEN_SD_DISCARD;
++}
++
+ #endif
+diff --git a/drivers/mmc/core/quirks.h b/drivers/mmc/core/quirks.h
+index d68e6e513a4f..c8c0f50a2076 100644
+--- a/drivers/mmc/core/quirks.h
++++ b/drivers/mmc/core/quirks.h
+@@ -99,6 +99,12 @@ static const struct mmc_fixup __maybe_unused mmc_blk_fixups[] = {
+ MMC_FIXUP("V10016", CID_MANFID_KINGSTON, CID_OEMID_ANY, add_quirk_mmc,
+ MMC_QUIRK_TRIM_BROKEN),
+
++ /*
++ * Some SD cards reports discard support while they don't
++ */
++ MMC_FIXUP(CID_NAME_ANY, CID_MANFID_SANDISK_SD, 0x5344, add_quirk_sd,
++ MMC_QUIRK_BROKEN_SD_DISCARD),
++
+ END_FIXUP
+ };
+
+diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
+index 37f975875102..12c7f2d3e210 100644
+--- a/include/linux/mmc/card.h
++++ b/include/linux/mmc/card.h
+@@ -292,6 +292,7 @@ struct mmc_card {
+ #define MMC_QUIRK_BROKEN_IRQ_POLLING (1<<11) /* Polling SDIO_CCCR_INTx could create a fake interrupt */
+ #define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
+ #define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
++#define MMC_QUIRK_BROKEN_SD_DISCARD (1<<14) /* Disable broken SD discard support */
+
+ bool reenable_cmdq; /* Re-enable Command Queue */
+
+--
+2.35.1
+
--- /dev/null
+From d3cc702e4a9f80cf1ae41399593d843d4c509e08 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 29 Apr 2022 17:21:18 +0200
+Subject: mmc: core: Support zeroout using TRIM for eMMC
+
+From: Vincent Whitchurch <vincent.whitchurch@axis.com>
+
+[ Upstream commit f7b6fc327327698924ef3afa0c3e87a5b7466af3 ]
+
+If an eMMC card supports TRIM and indicates that it erases to zeros, we can
+use it to support hardware offloading of REQ_OP_WRITE_ZEROES, so let's add
+support for this.
+
+Signed-off-by: Vincent Whitchurch <vincent.whitchurch@axis.com>
+Reviewed-by: Avri Altman <Avri.Altman@wdc.com>
+Link: https://lore.kernel.org/r/20220429152118.3617303-1-vincent.whitchurch@axis.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Stable-dep-of: 07d2872bf4c8 ("mmc: core: Add SD card quirk for broken discard")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/core/block.c | 26 ++++++++++++++++++++++----
+ drivers/mmc/core/queue.c | 2 ++
+ 2 files changed, 24 insertions(+), 4 deletions(-)
+
+diff --git a/drivers/mmc/core/block.c b/drivers/mmc/core/block.c
+index 3222a9d0c245..e3af3e3224e4 100644
+--- a/drivers/mmc/core/block.c
++++ b/drivers/mmc/core/block.c
+@@ -126,6 +126,7 @@ struct mmc_blk_data {
+ #define MMC_BLK_DISCARD BIT(2)
+ #define MMC_BLK_SECDISCARD BIT(3)
+ #define MMC_BLK_CQE_RECOVERY BIT(4)
++#define MMC_BLK_TRIM BIT(5)
+
+ /*
+ * Only set in main mmc_blk_data associated
+@@ -1090,12 +1091,13 @@ static void mmc_blk_issue_drv_op(struct mmc_queue *mq, struct request *req)
+ blk_mq_end_request(req, ret ? BLK_STS_IOERR : BLK_STS_OK);
+ }
+
+-static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
++static void mmc_blk_issue_erase_rq(struct mmc_queue *mq, struct request *req,
++ int type, unsigned int erase_arg)
+ {
+ struct mmc_blk_data *md = mq->blkdata;
+ struct mmc_card *card = md->queue.card;
+ unsigned int from, nr;
+- int err = 0, type = MMC_BLK_DISCARD;
++ int err = 0;
+ blk_status_t status = BLK_STS_OK;
+
+ if (!mmc_can_erase(card)) {
+@@ -1111,13 +1113,13 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+ if (card->quirks & MMC_QUIRK_INAND_CMD38) {
+ err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+ INAND_CMD38_ARG_EXT_CSD,
+- card->erase_arg == MMC_TRIM_ARG ?
++ erase_arg == MMC_TRIM_ARG ?
+ INAND_CMD38_ARG_TRIM :
+ INAND_CMD38_ARG_ERASE,
+ card->ext_csd.generic_cmd6_time);
+ }
+ if (!err)
+- err = mmc_erase(card, from, nr, card->erase_arg);
++ err = mmc_erase(card, from, nr, erase_arg);
+ } while (err == -EIO && !mmc_blk_reset(md, card->host, type));
+ if (err)
+ status = BLK_STS_IOERR;
+@@ -1127,6 +1129,19 @@ static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
+ blk_mq_end_request(req, status);
+ }
+
++static void mmc_blk_issue_trim_rq(struct mmc_queue *mq, struct request *req)
++{
++ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_TRIM, MMC_TRIM_ARG);
++}
++
++static void mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
++{
++ struct mmc_blk_data *md = mq->blkdata;
++ struct mmc_card *card = md->queue.card;
++
++ mmc_blk_issue_erase_rq(mq, req, MMC_BLK_DISCARD, card->erase_arg);
++}
++
+ static void mmc_blk_issue_secdiscard_rq(struct mmc_queue *mq,
+ struct request *req)
+ {
+@@ -2320,6 +2335,9 @@ enum mmc_issued mmc_blk_mq_issue_rq(struct mmc_queue *mq, struct request *req)
+ case REQ_OP_SECURE_ERASE:
+ mmc_blk_issue_secdiscard_rq(mq, req);
+ break;
++ case REQ_OP_WRITE_ZEROES:
++ mmc_blk_issue_trim_rq(mq, req);
++ break;
+ case REQ_OP_FLUSH:
+ mmc_blk_issue_flush(mq, req);
+ break;
+diff --git a/drivers/mmc/core/queue.c b/drivers/mmc/core/queue.c
+index b15c034b42fb..a72431703e52 100644
+--- a/drivers/mmc/core/queue.c
++++ b/drivers/mmc/core/queue.c
+@@ -191,6 +191,8 @@ static void mmc_queue_setup_discard(struct request_queue *q,
+ q->limits.discard_granularity = SECTOR_SIZE;
+ if (mmc_can_secure_erase_trim(card))
+ blk_queue_flag_set(QUEUE_FLAG_SECERASE, q);
++ if (mmc_can_trim(card) && card->erased_byte == 0)
++ blk_queue_max_write_zeroes_sectors(q, max_discard);
+ }
+
+ static unsigned short mmc_get_max_segments(struct mmc_host *host)
+--
+2.35.1
+
--- /dev/null
+From 9efcd5280011fe9890bf23cb2da6fb86100bbca7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 6 Oct 2022 18:36:22 +0530
+Subject: mmc: sdhci-tegra: Use actual clock rate for SW tuning correction
+
+From: Prathamesh Shete <pshete@nvidia.com>
+
+[ Upstream commit b78870e7f41534cc719c295d1f8809aca93aeeab ]
+
+Ensure tegra_host member "curr_clk_rate" holds the actual clock rate
+instead of requested clock rate for proper use during tuning correction
+algorithm. Actual clk rate may not be the same as the requested clk
+frequency depending on the parent clock source set. Tuning correction
+algorithm depends on certain parameters which are sensitive to current
+clk rate. If the host clk is selected instead of the actual clock rate,
+tuning correction algorithm may end up applying invalid correction,
+which could result in errors
+
+Fixes: ea8fc5953e8b ("mmc: tegra: update hw tuning process")
+Signed-off-by: Aniruddha TVS Rao <anrao@nvidia.com>
+Signed-off-by: Prathamesh Shete <pshete@nvidia.com>
+Acked-by: Adrian Hunter <adrian.hunter@intel.com>
+Acked-by: Thierry Reding <treding@nvidia.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20221006130622.22900-4-pshete@nvidia.com
+Signed-off-by: Ulf Hansson <ulf.hansson@linaro.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/mmc/host/sdhci-tegra.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
+index 9762ffab2e23..829a8bf7c77d 100644
+--- a/drivers/mmc/host/sdhci-tegra.c
++++ b/drivers/mmc/host/sdhci-tegra.c
+@@ -762,7 +762,7 @@ static void tegra_sdhci_set_clock(struct sdhci_host *host, unsigned int clock)
+ */
+ host_clk = tegra_host->ddr_signaling ? clock * 2 : clock;
+ clk_set_rate(pltfm_host->clk, host_clk);
+- tegra_host->curr_clk_rate = host_clk;
++ tegra_host->curr_clk_rate = clk_get_rate(pltfm_host->clk);
+ if (tegra_host->ddr_signaling)
+ host->max_clk = host_clk;
+ else
+--
+2.35.1
+
--- /dev/null
+From 894f732bca7d372ec53eb3b3767bdd6fd337ae38 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Oct 2022 10:05:40 +0800
+Subject: net/atm: fix proc_mpc_write incorrect return value
+
+From: Xiaobo Liu <cppcoffee@gmail.com>
+
+[ Upstream commit d8bde3bf7f82dac5fc68a62c2816793a12cafa2a ]
+
+Then the input contains '\0' or '\n', proc_mpc_write has read them,
+so the return value needs +1.
+
+Fixes: 1da177e4c3f4 ("Linux-2.6.12-rc2")
+Signed-off-by: Xiaobo Liu <cppcoffee@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/atm/mpoa_proc.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/atm/mpoa_proc.c b/net/atm/mpoa_proc.c
+index 829db9eba0cb..aaf64b953915 100644
+--- a/net/atm/mpoa_proc.c
++++ b/net/atm/mpoa_proc.c
+@@ -219,11 +219,12 @@ static ssize_t proc_mpc_write(struct file *file, const char __user *buff,
+ if (!page)
+ return -ENOMEM;
+
+- for (p = page, len = 0; len < nbytes; p++, len++) {
++ for (p = page, len = 0; len < nbytes; p++) {
+ if (get_user(*p, buff++)) {
+ free_page((unsigned long)page);
+ return -EFAULT;
+ }
++ len += 1;
+ if (*p == '\0' || *p == '\n')
+ break;
+ }
+--
+2.35.1
+
--- /dev/null
+From 406e1d9f886ec5664e0185f758a8b38c8dd9b1b3 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 20:24:51 +0800
+Subject: net: hns: fix possible memory leak in hnae_ae_register()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit ff2f5ec5d009844ec28f171123f9e58750cef4bf ]
+
+Inject fault while probing module, if device_register() fails,
+but the refcount of kobject is not decreased to 0, the name
+allocated in dev_set_name() is leaked. Fix this by calling
+put_device(), so that name can be freed in callback function
+kobject_cleanup().
+
+unreferenced object 0xffff00c01aba2100 (size 128):
+ comm "systemd-udevd", pid 1259, jiffies 4294903284 (age 294.152s)
+ hex dump (first 32 bytes):
+ 68 6e 61 65 30 00 00 00 18 21 ba 1a c0 00 ff ff hnae0....!......
+ 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
+ backtrace:
+ [<0000000034783f26>] slab_post_alloc_hook+0xa0/0x3e0
+ [<00000000748188f2>] __kmem_cache_alloc_node+0x164/0x2b0
+ [<00000000ab0743e8>] __kmalloc_node_track_caller+0x6c/0x390
+ [<000000006c0ffb13>] kvasprintf+0x8c/0x118
+ [<00000000fa27bfe1>] kvasprintf_const+0x60/0xc8
+ [<0000000083e10ed7>] kobject_set_name_vargs+0x3c/0xc0
+ [<000000000b87affc>] dev_set_name+0x7c/0xa0
+ [<000000003fd8fe26>] hnae_ae_register+0xcc/0x190 [hnae]
+ [<00000000fe97edc9>] hns_dsaf_ae_init+0x9c/0x108 [hns_dsaf]
+ [<00000000c36ff1eb>] hns_dsaf_probe+0x548/0x748 [hns_dsaf]
+
+Fixes: 6fe6611ff275 ("net: add Hisilicon Network Subsystem hnae framework support")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Reviewed-by: Leon Romanovsky <leonro@nvidia.com>
+Link: https://lore.kernel.org/r/20221018122451.1749171-1-yangyingliang@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/hisilicon/hns/hnae.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
+index 00fafc0f8512..430eccea8e5e 100644
+--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
++++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
+@@ -419,8 +419,10 @@ int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
+ hdev->cls_dev.release = hnae_release;
+ (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
+ ret = device_register(&hdev->cls_dev);
+- if (ret)
++ if (ret) {
++ put_device(&hdev->cls_dev);
+ return ret;
++ }
+
+ __module_get(THIS_MODULE);
+
+--
+2.35.1
+
--- /dev/null
+From 66e54926acc6e461e260398e027a403d8d33a71e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 16:59:28 +0000
+Subject: net: hsr: avoid possible NULL deref in skb_clone()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit d8b57135fd9ffe9a5b445350a686442a531c5339 ]
+
+syzbot got a crash [1] in skb_clone(), caused by a bug
+in hsr_get_untagged_frame().
+
+When/if create_stripped_skb_hsr() returns NULL, we must
+not attempt to call skb_clone().
+
+While we are at it, replace a WARN_ONCE() by netdev_warn_once().
+
+[1]
+general protection fault, probably for non-canonical address 0xdffffc000000000f: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000078-0x000000000000007f]
+CPU: 1 PID: 754 Comm: syz-executor.0 Not tainted 6.0.0-syzkaller-02734-g0326074ff465 #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 09/22/2022
+RIP: 0010:skb_clone+0x108/0x3c0 net/core/skbuff.c:1641
+Code: 93 02 00 00 49 83 7c 24 28 00 0f 85 e9 00 00 00 e8 5d 4a 29 fa 4c 8d 75 7e 48 b8 00 00 00 00 00 fc ff df 4c 89 f2 48 c1 ea 03 <0f> b6 04 02 4c 89 f2 83 e2 07 38 d0 7f 08 84 c0 0f 85 9e 01 00 00
+RSP: 0018:ffffc90003ccf4e0 EFLAGS: 00010207
+
+RAX: dffffc0000000000 RBX: ffffc90003ccf5f8 RCX: ffffc9000c24b000
+RDX: 000000000000000f RSI: ffffffff8751cb13 RDI: 0000000000000000
+RBP: 0000000000000000 R08: 00000000000000f0 R09: 0000000000000140
+R10: fffffbfff181d972 R11: 0000000000000000 R12: ffff888161fc3640
+R13: 0000000000000a20 R14: 000000000000007e R15: ffffffff8dc5f620
+FS: 00007feb621e4700(0000) GS:ffff8880b9b00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 00007feb621e3ff8 CR3: 00000001643a9000 CR4: 00000000003506e0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+<TASK>
+hsr_get_untagged_frame+0x4e/0x610 net/hsr/hsr_forward.c:164
+hsr_forward_do net/hsr/hsr_forward.c:461 [inline]
+hsr_forward_skb+0xcca/0x1d50 net/hsr/hsr_forward.c:623
+hsr_handle_frame+0x588/0x7c0 net/hsr/hsr_slave.c:69
+__netif_receive_skb_core+0x9fe/0x38f0 net/core/dev.c:5379
+__netif_receive_skb_one_core+0xae/0x180 net/core/dev.c:5483
+__netif_receive_skb+0x1f/0x1c0 net/core/dev.c:5599
+netif_receive_skb_internal net/core/dev.c:5685 [inline]
+netif_receive_skb+0x12f/0x8d0 net/core/dev.c:5744
+tun_rx_batched+0x4ab/0x7a0 drivers/net/tun.c:1544
+tun_get_user+0x2686/0x3a00 drivers/net/tun.c:1995
+tun_chr_write_iter+0xdb/0x200 drivers/net/tun.c:2025
+call_write_iter include/linux/fs.h:2187 [inline]
+new_sync_write fs/read_write.c:491 [inline]
+vfs_write+0x9e9/0xdd0 fs/read_write.c:584
+ksys_write+0x127/0x250 fs/read_write.c:637
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Fixes: f266a683a480 ("net/hsr: Better frame dispatch")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20221017165928.2150130-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/hsr/hsr_forward.c | 12 ++++++------
+ 1 file changed, 6 insertions(+), 6 deletions(-)
+
+diff --git a/net/hsr/hsr_forward.c b/net/hsr/hsr_forward.c
+index ceb8afb2a62f..13f81c246f5f 100644
+--- a/net/hsr/hsr_forward.c
++++ b/net/hsr/hsr_forward.c
+@@ -108,15 +108,15 @@ struct sk_buff *hsr_get_untagged_frame(struct hsr_frame_info *frame,
+ struct hsr_port *port)
+ {
+ if (!frame->skb_std) {
+- if (frame->skb_hsr) {
++ if (frame->skb_hsr)
+ frame->skb_std =
+ create_stripped_skb_hsr(frame->skb_hsr, frame);
+- } else {
+- /* Unexpected */
+- WARN_ONCE(1, "%s:%d: Unexpected frame received (port_src %s)\n",
+- __FILE__, __LINE__, port->dev->name);
++ else
++ netdev_warn_once(port->dev,
++ "Unexpected frame received in hsr_get_untagged_frame()\n");
++
++ if (!frame->skb_std)
+ return NULL;
+- }
+ }
+
+ return skb_clone(frame->skb_std, GFP_ATOMIC);
+--
+2.35.1
+
--- /dev/null
+From 8d5639cc940bdb3a2f4427821113ff1a34b8aa65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 12:47:54 +0200
+Subject: net: phy: dp83822: disable MDI crossover status change interrupt
+
+From: Felix Riemann <felix.riemann@sma.de>
+
+[ Upstream commit 7f378c03aa4952507521174fb0da7b24a9ad0be6 ]
+
+If the cable is disconnected the PHY seems to toggle between MDI and
+MDI-X modes. With the MDI crossover status interrupt active this causes
+roughly 10 interrupts per second.
+
+As the crossover status isn't checked by the driver, the interrupt can
+be disabled to reduce the interrupt load.
+
+Fixes: 87461f7a58ab ("net: phy: DP83822 initial driver submission")
+Signed-off-by: Felix Riemann <felix.riemann@sma.de>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Link: https://lore.kernel.org/r/20221018104755.30025-1-svc.sw.rte.linux@sma.de
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/dp83822.c | 3 +--
+ 1 file changed, 1 insertion(+), 2 deletions(-)
+
+diff --git a/drivers/net/phy/dp83822.c b/drivers/net/phy/dp83822.c
+index a792dd6d2ec3..0b511abb5422 100644
+--- a/drivers/net/phy/dp83822.c
++++ b/drivers/net/phy/dp83822.c
+@@ -253,8 +253,7 @@ static int dp83822_config_intr(struct phy_device *phydev)
+ DP83822_EEE_ERROR_CHANGE_INT_EN);
+
+ if (!dp83822->fx_enabled)
+- misr_status |= DP83822_MDI_XOVER_INT_EN |
+- DP83822_ANEG_ERR_INT_EN |
++ misr_status |= DP83822_ANEG_ERR_INT_EN |
+ DP83822_WOL_PKT_INT_EN;
+
+ err = phy_write(phydev, MII_DP83822_MISR2, misr_status);
+--
+2.35.1
+
--- /dev/null
+From de44dd4e6b9bd34173aaad72e63748ea3104321d Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Oct 2022 12:17:35 +0530
+Subject: net: phy: dp83867: Extend RX strap quirk for SGMII mode
+
+From: Harini Katakam <harini.katakam@amd.com>
+
+[ Upstream commit 0c9efbd5c50c64ead434960a404c9c9a097b0403 ]
+
+When RX strap in HW is not set to MODE 3 or 4, bit 7 and 8 in CF4
+register should be set. The former is already handled in
+dp83867_config_init; add the latter in SGMII specific initialization.
+
+Fixes: 2a10154abcb7 ("net: phy: dp83867: Add TI dp83867 phy")
+Signed-off-by: Harini Katakam <harini.katakam@amd.com>
+Reviewed-by: Andrew Lunn <andrew@lunn.ch>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/dp83867.c | 8 ++++++++
+ 1 file changed, 8 insertions(+)
+
+diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
+index d097097c93c3..783e30451e30 100644
+--- a/drivers/net/phy/dp83867.c
++++ b/drivers/net/phy/dp83867.c
+@@ -791,6 +791,14 @@ static int dp83867_config_init(struct phy_device *phydev)
+ else
+ val &= ~DP83867_SGMII_TYPE;
+ phy_write_mmd(phydev, DP83867_DEVADDR, DP83867_SGMIICTL, val);
++
++ /* This is a SW workaround for link instability if RX_CTRL is
++ * not strapped to mode 3 or 4 in HW. This is required for SGMII
++ * in addition to clearing bit 7, handled above.
++ */
++ if (dp83867->rxctrl_strap_quirk)
++ phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4,
++ BIT(8));
+ }
+
+ val = phy_read(phydev, DP83867_CFG3);
+--
+2.35.1
+
--- /dev/null
+From 5cc2be6e11ab32ec72117dd2ba57fb848a5e6225 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Oct 2022 09:47:28 -0500
+Subject: net: phylink: add mac_managed_pm in phylink_config structure
+
+From: Shenwei Wang <shenwei.wang@nxp.com>
+
+[ Upstream commit 96de900ae78e7dbedc937fd91bafe2934579c65a ]
+
+The recent commit
+
+'commit 744d23c71af3 ("net: phy: Warn about incorrect
+mdio_bus_phy_resume() state")'
+
+requires the MAC driver explicitly tell the phy driver who is
+managing the PM, otherwise you will see warning during resume
+stage.
+
+Add a boolean property in the phylink_config structure so that
+the MAC driver can use it to tell the PHY driver if it wants to
+manage the PM.
+
+Fixes: 744d23c71af3 ("net: phy: Warn about incorrect mdio_bus_phy_resume() state")
+Signed-off-by: Shenwei Wang <shenwei.wang@nxp.com>
+Acked-by: Florian Fainelli <f.fainelli@gmail.com>
+Reviewed-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/phy/phylink.c | 3 +++
+ include/linux/phylink.h | 2 ++
+ 2 files changed, 5 insertions(+)
+
+diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
+index fef1416dcee4..7afcf6310d59 100644
+--- a/drivers/net/phy/phylink.c
++++ b/drivers/net/phy/phylink.c
+@@ -1050,6 +1050,9 @@ static int phylink_bringup_phy(struct phylink *pl, struct phy_device *phy,
+ if (phy_interrupt_is_valid(phy))
+ phy_request_interrupt(phy);
+
++ if (pl->config->mac_managed_pm)
++ phy->mac_managed_pm = true;
++
+ return 0;
+ }
+
+diff --git a/include/linux/phylink.h b/include/linux/phylink.h
+index 237291196ce2..b306159c1fad 100644
+--- a/include/linux/phylink.h
++++ b/include/linux/phylink.h
+@@ -64,6 +64,7 @@ enum phylink_op_type {
+ * @pcs_poll: MAC PCS cannot provide link change interrupt
+ * @poll_fixed_state: if true, starts link_poll,
+ * if MAC link is at %MLO_AN_FIXED mode.
++ * @mac_managed_pm: if true, indicate the MAC driver is responsible for PHY PM.
+ * @ovr_an_inband: if true, override PCS to MLO_AN_INBAND
+ * @get_fixed_state: callback to execute to determine the fixed link state,
+ * if MAC link is at %MLO_AN_FIXED mode.
+@@ -73,6 +74,7 @@ struct phylink_config {
+ enum phylink_op_type type;
+ bool pcs_poll;
+ bool poll_fixed_state;
++ bool mac_managed_pm;
+ bool ovr_an_inband;
+ void (*get_fixed_state)(struct phylink_config *config,
+ struct phylink_link_state *state);
+--
+2.35.1
+
--- /dev/null
+From 9ab1e97c75a564d485b601c70e0aca212ac9e56b Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 14:31:59 +0800
+Subject: net: sched: cake: fix null pointer access issue when cake_init()
+ fails
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit 51f9a8921ceacd7bf0d3f47fa867a64988ba1dcb ]
+
+When the default qdisc is cake, if the qdisc of dev_queue fails to be
+inited during mqprio_init(), cake_reset() is invoked to clear
+resources. In this case, the tins is NULL, and it will cause gpf issue.
+
+The process is as follows:
+qdisc_create_dflt()
+ cake_init()
+ q->tins = kvcalloc(...) --->failed, q->tins is NULL
+ ...
+ qdisc_put()
+ ...
+ cake_reset()
+ ...
+ cake_dequeue_one()
+ b = &q->tins[...] --->q->tins is NULL
+
+The following is the Call Trace information:
+general protection fault, probably for non-canonical address
+0xdffffc0000000000: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000000-0x0000000000000007]
+RIP: 0010:cake_dequeue_one+0xc9/0x3c0
+Call Trace:
+<TASK>
+cake_reset+0xb1/0x140
+qdisc_reset+0xed/0x6f0
+qdisc_destroy+0x82/0x4c0
+qdisc_put+0x9e/0xb0
+qdisc_create_dflt+0x2c3/0x4a0
+mqprio_init+0xa71/0x1760
+qdisc_create+0x3eb/0x1000
+tc_modify_qdisc+0x408/0x1720
+rtnetlink_rcv_msg+0x38e/0xac0
+netlink_rcv_skb+0x12d/0x3a0
+netlink_unicast+0x4a2/0x740
+netlink_sendmsg+0x826/0xcc0
+sock_sendmsg+0xc5/0x100
+____sys_sendmsg+0x583/0x690
+___sys_sendmsg+0xe8/0x160
+__sys_sendmsg+0xbf/0x160
+do_syscall_64+0x35/0x80
+entry_SYSCALL_64_after_hwframe+0x46/0xb0
+RIP: 0033:0x7f89e5122d04
+</TASK>
+
+Fixes: 046f6fd5daef ("sched: Add Common Applications Kept Enhanced (cake) qdisc")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Acked-by: Toke Høiland-Jørgensen <toke@toke.dk>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_cake.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/net/sched/sch_cake.c b/net/sched/sch_cake.c
+index 857aaebd49f4..6f6e74ce927f 100644
+--- a/net/sched/sch_cake.c
++++ b/net/sched/sch_cake.c
+@@ -2224,8 +2224,12 @@ static struct sk_buff *cake_dequeue(struct Qdisc *sch)
+
+ static void cake_reset(struct Qdisc *sch)
+ {
++ struct cake_sched_data *q = qdisc_priv(sch);
+ u32 c;
+
++ if (!q->tins)
++ return;
++
+ for (c = 0; c < CAKE_MAX_TINS; c++)
+ cake_clear_tin(sch, c);
+ }
+--
+2.35.1
+
--- /dev/null
+From 23431e46ce9a0e78055ac4461dec27feb7c7bcbc Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 24 Aug 2022 08:52:31 +0800
+Subject: net: sched: delete duplicate cleanup of backlog and qlen
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit c19d893fbf3f2f8fa864ae39652c7fee939edde2 ]
+
+qdisc_reset() is clearing qdisc->q.qlen and qdisc->qstats.backlog
+_after_ calling qdisc->ops->reset. There is no need to clear them
+again in the specific reset function.
+
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Link: https://lore.kernel.org/r/20220824005231.345727-1-shaozhengchao@huawei.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Stable-dep-of: 2a3fc78210b9 ("net: sched: sfb: fix null pointer access issue when sfb_init() fails")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sch_generic.h | 1 -
+ net/sched/sch_atm.c | 1 -
+ net/sched/sch_cbq.c | 1 -
+ net/sched/sch_choke.c | 2 --
+ net/sched/sch_drr.c | 2 --
+ net/sched/sch_dsmark.c | 2 --
+ net/sched/sch_etf.c | 3 ---
+ net/sched/sch_ets.c | 2 --
+ net/sched/sch_fq_codel.c | 2 --
+ net/sched/sch_fq_pie.c | 3 ---
+ net/sched/sch_hfsc.c | 2 --
+ net/sched/sch_htb.c | 2 --
+ net/sched/sch_multiq.c | 1 -
+ net/sched/sch_prio.c | 2 --
+ net/sched/sch_qfq.c | 2 --
+ net/sched/sch_red.c | 2 --
+ net/sched/sch_sfb.c | 2 --
+ net/sched/sch_skbprio.c | 3 ---
+ net/sched/sch_taprio.c | 2 --
+ net/sched/sch_tbf.c | 2 --
+ net/sched/sch_teql.c | 1 -
+ 21 files changed, 40 deletions(-)
+
+diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
+index 1958d1260fe9..891b44d80c98 100644
+--- a/include/net/sch_generic.h
++++ b/include/net/sch_generic.h
+@@ -1177,7 +1177,6 @@ static inline void __qdisc_reset_queue(struct qdisc_skb_head *qh)
+ static inline void qdisc_reset_queue(struct Qdisc *sch)
+ {
+ __qdisc_reset_queue(&sch->q);
+- sch->qstats.backlog = 0;
+ }
+
+ static inline struct Qdisc *qdisc_replace(struct Qdisc *sch, struct Qdisc *new,
+diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
+index 7d8518176b45..70fe1c5e44ad 100644
+--- a/net/sched/sch_atm.c
++++ b/net/sched/sch_atm.c
+@@ -576,7 +576,6 @@ static void atm_tc_reset(struct Qdisc *sch)
+ pr_debug("atm_tc_reset(sch %p,[qdisc %p])\n", sch, p);
+ list_for_each_entry(flow, &p->flows, list)
+ qdisc_reset(flow->q);
+- sch->q.qlen = 0;
+ }
+
+ static void atm_tc_destroy(struct Qdisc *sch)
+diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c
+index e0da15530f0e..fd7e10567371 100644
+--- a/net/sched/sch_cbq.c
++++ b/net/sched/sch_cbq.c
+@@ -1053,7 +1053,6 @@ cbq_reset(struct Qdisc *sch)
+ cl->cpriority = cl->priority;
+ }
+ }
+- sch->q.qlen = 0;
+ }
+
+
+diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
+index 2adbd945bf15..25d2daaa8122 100644
+--- a/net/sched/sch_choke.c
++++ b/net/sched/sch_choke.c
+@@ -315,8 +315,6 @@ static void choke_reset(struct Qdisc *sch)
+ rtnl_qdisc_drop(skb, sch);
+ }
+
+- sch->q.qlen = 0;
+- sch->qstats.backlog = 0;
+ if (q->tab)
+ memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
+ q->head = q->tail = 0;
+diff --git a/net/sched/sch_drr.c b/net/sched/sch_drr.c
+index 642cd179b7a7..80a88e208d2b 100644
+--- a/net/sched/sch_drr.c
++++ b/net/sched/sch_drr.c
+@@ -444,8 +444,6 @@ static void drr_reset_qdisc(struct Qdisc *sch)
+ qdisc_reset(cl->qdisc);
+ }
+ }
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ }
+
+ static void drr_destroy_qdisc(struct Qdisc *sch)
+diff --git a/net/sched/sch_dsmark.c b/net/sched/sch_dsmark.c
+index 4c100d105269..7da6dc38a382 100644
+--- a/net/sched/sch_dsmark.c
++++ b/net/sched/sch_dsmark.c
+@@ -409,8 +409,6 @@ static void dsmark_reset(struct Qdisc *sch)
+ pr_debug("%s(sch %p,[qdisc %p])\n", __func__, sch, p);
+ if (p->q)
+ qdisc_reset(p->q);
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ }
+
+ static void dsmark_destroy(struct Qdisc *sch)
+diff --git a/net/sched/sch_etf.c b/net/sched/sch_etf.c
+index c48f91075b5c..d96103b0e2bf 100644
+--- a/net/sched/sch_etf.c
++++ b/net/sched/sch_etf.c
+@@ -445,9 +445,6 @@ static void etf_reset(struct Qdisc *sch)
+ timesortedlist_clear(sch);
+ __qdisc_reset_queue(&sch->q);
+
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+-
+ q->last = 0;
+ }
+
+diff --git a/net/sched/sch_ets.c b/net/sched/sch_ets.c
+index 44fa2532a87c..175e07b3d25c 100644
+--- a/net/sched/sch_ets.c
++++ b/net/sched/sch_ets.c
+@@ -722,8 +722,6 @@ static void ets_qdisc_reset(struct Qdisc *sch)
+ }
+ for (band = 0; band < q->nbands; band++)
+ qdisc_reset(q->classes[band].qdisc);
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ }
+
+ static void ets_qdisc_destroy(struct Qdisc *sch)
+diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
+index bb0cd6d3d2c2..efda894bbb78 100644
+--- a/net/sched/sch_fq_codel.c
++++ b/net/sched/sch_fq_codel.c
+@@ -347,8 +347,6 @@ static void fq_codel_reset(struct Qdisc *sch)
+ codel_vars_init(&flow->cvars);
+ }
+ memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
+- sch->q.qlen = 0;
+- sch->qstats.backlog = 0;
+ q->memory_usage = 0;
+ }
+
+diff --git a/net/sched/sch_fq_pie.c b/net/sched/sch_fq_pie.c
+index d6aba6edd16e..35c35465226b 100644
+--- a/net/sched/sch_fq_pie.c
++++ b/net/sched/sch_fq_pie.c
+@@ -521,9 +521,6 @@ static void fq_pie_reset(struct Qdisc *sch)
+ INIT_LIST_HEAD(&flow->flowchain);
+ pie_vars_init(&flow->vars);
+ }
+-
+- sch->q.qlen = 0;
+- sch->qstats.backlog = 0;
+ }
+
+ static void fq_pie_destroy(struct Qdisc *sch)
+diff --git a/net/sched/sch_hfsc.c b/net/sched/sch_hfsc.c
+index b7ac30cca035..c802a027b4f3 100644
+--- a/net/sched/sch_hfsc.c
++++ b/net/sched/sch_hfsc.c
+@@ -1485,8 +1485,6 @@ hfsc_reset_qdisc(struct Qdisc *sch)
+ }
+ q->eligible = RB_ROOT;
+ qdisc_watchdog_cancel(&q->watchdog);
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ }
+
+ static void
+diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c
+index 5cbc32fee867..caabdaa2f30f 100644
+--- a/net/sched/sch_htb.c
++++ b/net/sched/sch_htb.c
+@@ -1008,8 +1008,6 @@ static void htb_reset(struct Qdisc *sch)
+ }
+ qdisc_watchdog_cancel(&q->watchdog);
+ __qdisc_reset_queue(&q->direct_queue);
+- sch->q.qlen = 0;
+- sch->qstats.backlog = 0;
+ memset(q->hlevel, 0, sizeof(q->hlevel));
+ memset(q->row_mask, 0, sizeof(q->row_mask));
+ }
+diff --git a/net/sched/sch_multiq.c b/net/sched/sch_multiq.c
+index e282e7382117..8b99f07aa3a7 100644
+--- a/net/sched/sch_multiq.c
++++ b/net/sched/sch_multiq.c
+@@ -152,7 +152,6 @@ multiq_reset(struct Qdisc *sch)
+
+ for (band = 0; band < q->bands; band++)
+ qdisc_reset(q->queues[band]);
+- sch->q.qlen = 0;
+ q->curband = 0;
+ }
+
+diff --git a/net/sched/sch_prio.c b/net/sched/sch_prio.c
+index 03fdf31ccb6a..2e0b1e7f5466 100644
+--- a/net/sched/sch_prio.c
++++ b/net/sched/sch_prio.c
+@@ -135,8 +135,6 @@ prio_reset(struct Qdisc *sch)
+
+ for (prio = 0; prio < q->bands; prio++)
+ qdisc_reset(q->queues[prio]);
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ }
+
+ static int prio_offload(struct Qdisc *sch, struct tc_prio_qopt *qopt)
+diff --git a/net/sched/sch_qfq.c b/net/sched/sch_qfq.c
+index aea435b0aeb3..50e51c1322fc 100644
+--- a/net/sched/sch_qfq.c
++++ b/net/sched/sch_qfq.c
+@@ -1459,8 +1459,6 @@ static void qfq_reset_qdisc(struct Qdisc *sch)
+ qdisc_reset(cl->qdisc);
+ }
+ }
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ }
+
+ static void qfq_destroy_qdisc(struct Qdisc *sch)
+diff --git a/net/sched/sch_red.c b/net/sched/sch_red.c
+index 40adf1f07a82..f1e013e3f04a 100644
+--- a/net/sched/sch_red.c
++++ b/net/sched/sch_red.c
+@@ -176,8 +176,6 @@ static void red_reset(struct Qdisc *sch)
+ struct red_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset(q->qdisc);
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ red_restart(&q->vars);
+ }
+
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 2829455211f8..1be8d04d69dc 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -456,8 +456,6 @@ static void sfb_reset(struct Qdisc *sch)
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset(q->qdisc);
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ q->slot = 0;
+ q->double_buffering = false;
+ sfb_zero_all_buckets(q);
+diff --git a/net/sched/sch_skbprio.c b/net/sched/sch_skbprio.c
+index 7a5e4c454715..df72fb83d9c7 100644
+--- a/net/sched/sch_skbprio.c
++++ b/net/sched/sch_skbprio.c
+@@ -213,9 +213,6 @@ static void skbprio_reset(struct Qdisc *sch)
+ struct skbprio_sched_data *q = qdisc_priv(sch);
+ int prio;
+
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+-
+ for (prio = 0; prio < SKBPRIO_MAX_PRIORITY; prio++)
+ __skb_queue_purge(&q->qdiscs[prio]);
+
+diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c
+index ae7ca68f2cf9..bd10a8eeb82d 100644
+--- a/net/sched/sch_taprio.c
++++ b/net/sched/sch_taprio.c
+@@ -1637,8 +1637,6 @@ static void taprio_reset(struct Qdisc *sch)
+ if (q->qdiscs[i])
+ qdisc_reset(q->qdiscs[i]);
+ }
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ }
+
+ static void taprio_destroy(struct Qdisc *sch)
+diff --git a/net/sched/sch_tbf.c b/net/sched/sch_tbf.c
+index 6eb17004a9e4..7461e5c67d50 100644
+--- a/net/sched/sch_tbf.c
++++ b/net/sched/sch_tbf.c
+@@ -316,8 +316,6 @@ static void tbf_reset(struct Qdisc *sch)
+ struct tbf_sched_data *q = qdisc_priv(sch);
+
+ qdisc_reset(q->qdisc);
+- sch->qstats.backlog = 0;
+- sch->q.qlen = 0;
+ q->t_c = ktime_get_ns();
+ q->tokens = q->buffer;
+ q->ptokens = q->mtu;
+diff --git a/net/sched/sch_teql.c b/net/sched/sch_teql.c
+index 6af6b95bdb67..79aaab51cbf5 100644
+--- a/net/sched/sch_teql.c
++++ b/net/sched/sch_teql.c
+@@ -124,7 +124,6 @@ teql_reset(struct Qdisc *sch)
+ struct teql_sched_data *dat = qdisc_priv(sch);
+
+ skb_queue_purge(&dat->q);
+- sch->q.qlen = 0;
+ }
+
+ static void
+--
+2.35.1
+
--- /dev/null
+From a210711a9cd342d99a4b91e52e9930f4fd6972ce Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 20:32:58 +0000
+Subject: net: sched: fix race condition in qdisc_graft()
+
+From: Eric Dumazet <edumazet@google.com>
+
+[ Upstream commit ebda44da44f6f309d302522b049f43d6f829f7aa ]
+
+We had one syzbot report [1] in syzbot queue for a while.
+I was waiting for more occurrences and/or a repro but
+Dmitry Vyukov spotted the issue right away.
+
+<quoting Dmitry>
+qdisc_graft() drops reference to qdisc in notify_and_destroy
+while it's still assigned to dev->qdisc
+</quoting>
+
+Indeed, RCU rules are clear when replacing a data structure.
+The visible pointer (dev->qdisc in this case) must be updated
+to the new object _before_ RCU grace period is started
+(qdisc_put(old) in this case).
+
+[1]
+BUG: KASAN: use-after-free in __tcf_qdisc_find.part.0+0xa3a/0xac0 net/sched/cls_api.c:1066
+Read of size 4 at addr ffff88802065e038 by task syz-executor.4/21027
+
+CPU: 0 PID: 21027 Comm: syz-executor.4 Not tainted 6.0.0-rc3-syzkaller-00363-g7726d4c3e60b #0
+Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 08/26/2022
+Call Trace:
+<TASK>
+__dump_stack lib/dump_stack.c:88 [inline]
+dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106
+print_address_description mm/kasan/report.c:317 [inline]
+print_report.cold+0x2ba/0x719 mm/kasan/report.c:433
+kasan_report+0xb1/0x1e0 mm/kasan/report.c:495
+__tcf_qdisc_find.part.0+0xa3a/0xac0 net/sched/cls_api.c:1066
+__tcf_qdisc_find net/sched/cls_api.c:1051 [inline]
+tc_new_tfilter+0x34f/0x2200 net/sched/cls_api.c:2018
+rtnetlink_rcv_msg+0x955/0xca0 net/core/rtnetlink.c:6081
+netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2501
+netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+netlink_unicast+0x543/0x7f0 net/netlink/af_netlink.c:1345
+netlink_sendmsg+0x917/0xe10 net/netlink/af_netlink.c:1921
+sock_sendmsg_nosec net/socket.c:714 [inline]
+sock_sendmsg+0xcf/0x120 net/socket.c:734
+____sys_sendmsg+0x6eb/0x810 net/socket.c:2482
+___sys_sendmsg+0x110/0x1b0 net/socket.c:2536
+__sys_sendmsg+0xf3/0x1c0 net/socket.c:2565
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+RIP: 0033:0x7f5efaa89279
+Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48
+RSP: 002b:00007f5efbc31168 EFLAGS: 00000246 ORIG_RAX: 000000000000002e
+RAX: ffffffffffffffda RBX: 00007f5efab9bf80 RCX: 00007f5efaa89279
+RDX: 0000000000000000 RSI: 0000000020000140 RDI: 0000000000000005
+RBP: 00007f5efaae32e9 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000246 R12: 0000000000000000
+R13: 00007f5efb0cfb1f R14: 00007f5efbc31300 R15: 0000000000022000
+</TASK>
+
+Allocated by task 21027:
+kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+kasan_set_track mm/kasan/common.c:45 [inline]
+set_alloc_info mm/kasan/common.c:437 [inline]
+____kasan_kmalloc mm/kasan/common.c:516 [inline]
+____kasan_kmalloc mm/kasan/common.c:475 [inline]
+__kasan_kmalloc+0xa9/0xd0 mm/kasan/common.c:525
+kmalloc_node include/linux/slab.h:623 [inline]
+kzalloc_node include/linux/slab.h:744 [inline]
+qdisc_alloc+0xb0/0xc50 net/sched/sch_generic.c:938
+qdisc_create_dflt+0x71/0x4a0 net/sched/sch_generic.c:997
+attach_one_default_qdisc net/sched/sch_generic.c:1152 [inline]
+netdev_for_each_tx_queue include/linux/netdevice.h:2437 [inline]
+attach_default_qdiscs net/sched/sch_generic.c:1170 [inline]
+dev_activate+0x760/0xcd0 net/sched/sch_generic.c:1229
+__dev_open+0x393/0x4d0 net/core/dev.c:1441
+__dev_change_flags+0x583/0x750 net/core/dev.c:8556
+rtnl_configure_link+0xee/0x240 net/core/rtnetlink.c:3189
+rtnl_newlink_create net/core/rtnetlink.c:3371 [inline]
+__rtnl_newlink+0x10b8/0x17e0 net/core/rtnetlink.c:3580
+rtnl_newlink+0x64/0xa0 net/core/rtnetlink.c:3593
+rtnetlink_rcv_msg+0x43a/0xca0 net/core/rtnetlink.c:6090
+netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2501
+netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+netlink_unicast+0x543/0x7f0 net/netlink/af_netlink.c:1345
+netlink_sendmsg+0x917/0xe10 net/netlink/af_netlink.c:1921
+sock_sendmsg_nosec net/socket.c:714 [inline]
+sock_sendmsg+0xcf/0x120 net/socket.c:734
+____sys_sendmsg+0x6eb/0x810 net/socket.c:2482
+___sys_sendmsg+0x110/0x1b0 net/socket.c:2536
+__sys_sendmsg+0xf3/0x1c0 net/socket.c:2565
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Freed by task 21020:
+kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+kasan_set_track+0x21/0x30 mm/kasan/common.c:45
+kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:370
+____kasan_slab_free mm/kasan/common.c:367 [inline]
+____kasan_slab_free+0x166/0x1c0 mm/kasan/common.c:329
+kasan_slab_free include/linux/kasan.h:200 [inline]
+slab_free_hook mm/slub.c:1754 [inline]
+slab_free_freelist_hook+0x8b/0x1c0 mm/slub.c:1780
+slab_free mm/slub.c:3534 [inline]
+kfree+0xe2/0x580 mm/slub.c:4562
+rcu_do_batch kernel/rcu/tree.c:2245 [inline]
+rcu_core+0x7b5/0x1890 kernel/rcu/tree.c:2505
+__do_softirq+0x1d3/0x9c6 kernel/softirq.c:571
+
+Last potentially related work creation:
+kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+__kasan_record_aux_stack+0xbe/0xd0 mm/kasan/generic.c:348
+call_rcu+0x99/0x790 kernel/rcu/tree.c:2793
+qdisc_put+0xcd/0xe0 net/sched/sch_generic.c:1083
+notify_and_destroy net/sched/sch_api.c:1012 [inline]
+qdisc_graft+0xeb1/0x1270 net/sched/sch_api.c:1084
+tc_modify_qdisc+0xbb7/0x1a00 net/sched/sch_api.c:1671
+rtnetlink_rcv_msg+0x43a/0xca0 net/core/rtnetlink.c:6090
+netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2501
+netlink_unicast_kernel net/netlink/af_netlink.c:1319 [inline]
+netlink_unicast+0x543/0x7f0 net/netlink/af_netlink.c:1345
+netlink_sendmsg+0x917/0xe10 net/netlink/af_netlink.c:1921
+sock_sendmsg_nosec net/socket.c:714 [inline]
+sock_sendmsg+0xcf/0x120 net/socket.c:734
+____sys_sendmsg+0x6eb/0x810 net/socket.c:2482
+___sys_sendmsg+0x110/0x1b0 net/socket.c:2536
+__sys_sendmsg+0xf3/0x1c0 net/socket.c:2565
+do_syscall_x64 arch/x86/entry/common.c:50 [inline]
+do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80
+entry_SYSCALL_64_after_hwframe+0x63/0xcd
+
+Second to last potentially related work creation:
+kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38
+__kasan_record_aux_stack+0xbe/0xd0 mm/kasan/generic.c:348
+kvfree_call_rcu+0x74/0x940 kernel/rcu/tree.c:3322
+neigh_destroy+0x431/0x630 net/core/neighbour.c:912
+neigh_release include/net/neighbour.h:454 [inline]
+neigh_cleanup_and_release+0x1f8/0x330 net/core/neighbour.c:103
+neigh_del net/core/neighbour.c:225 [inline]
+neigh_remove_one+0x37d/0x460 net/core/neighbour.c:246
+neigh_forced_gc net/core/neighbour.c:276 [inline]
+neigh_alloc net/core/neighbour.c:447 [inline]
+___neigh_create+0x18b5/0x29a0 net/core/neighbour.c:642
+ip6_finish_output2+0xfb8/0x1520 net/ipv6/ip6_output.c:125
+__ip6_finish_output net/ipv6/ip6_output.c:195 [inline]
+ip6_finish_output+0x690/0x1160 net/ipv6/ip6_output.c:206
+NF_HOOK_COND include/linux/netfilter.h:296 [inline]
+ip6_output+0x1ed/0x540 net/ipv6/ip6_output.c:227
+dst_output include/net/dst.h:451 [inline]
+NF_HOOK include/linux/netfilter.h:307 [inline]
+NF_HOOK include/linux/netfilter.h:301 [inline]
+mld_sendpack+0xa09/0xe70 net/ipv6/mcast.c:1820
+mld_send_cr net/ipv6/mcast.c:2121 [inline]
+mld_ifc_work+0x71c/0xdc0 net/ipv6/mcast.c:2653
+process_one_work+0x991/0x1610 kernel/workqueue.c:2289
+worker_thread+0x665/0x1080 kernel/workqueue.c:2436
+kthread+0x2e4/0x3a0 kernel/kthread.c:376
+ret_from_fork+0x1f/0x30 arch/x86/entry/entry_64.S:306
+
+The buggy address belongs to the object at ffff88802065e000
+which belongs to the cache kmalloc-1k of size 1024
+The buggy address is located 56 bytes inside of
+1024-byte region [ffff88802065e000, ffff88802065e400)
+
+The buggy address belongs to the physical page:
+page:ffffea0000819600 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x20658
+head:ffffea0000819600 order:3 compound_mapcount:0 compound_pincount:0
+flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff)
+raw: 00fff00000010200 0000000000000000 dead000000000001 ffff888011841dc0
+raw: 0000000000000000 0000000000100010 00000001ffffffff 0000000000000000
+page dumped because: kasan: bad access detected
+page_owner tracks the page as allocated
+page last allocated via order 3, migratetype Unmovable, gfp_mask 0xd20c0(__GFP_IO|__GFP_FS|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC), pid 3523, tgid 3523 (sshd), ts 41495190986, free_ts 41417713212
+prep_new_page mm/page_alloc.c:2532 [inline]
+get_page_from_freelist+0x109b/0x2ce0 mm/page_alloc.c:4283
+__alloc_pages+0x1c7/0x510 mm/page_alloc.c:5515
+alloc_pages+0x1a6/0x270 mm/mempolicy.c:2270
+alloc_slab_page mm/slub.c:1824 [inline]
+allocate_slab+0x27e/0x3d0 mm/slub.c:1969
+new_slab mm/slub.c:2029 [inline]
+___slab_alloc+0x7f1/0xe10 mm/slub.c:3031
+__slab_alloc.constprop.0+0x4d/0xa0 mm/slub.c:3118
+slab_alloc_node mm/slub.c:3209 [inline]
+__kmalloc_node_track_caller+0x2f2/0x380 mm/slub.c:4955
+kmalloc_reserve net/core/skbuff.c:358 [inline]
+__alloc_skb+0xd9/0x2f0 net/core/skbuff.c:430
+alloc_skb_fclone include/linux/skbuff.h:1307 [inline]
+tcp_stream_alloc_skb+0x38/0x580 net/ipv4/tcp.c:861
+tcp_sendmsg_locked+0xc36/0x2f80 net/ipv4/tcp.c:1325
+tcp_sendmsg+0x2b/0x40 net/ipv4/tcp.c:1483
+inet_sendmsg+0x99/0xe0 net/ipv4/af_inet.c:819
+sock_sendmsg_nosec net/socket.c:714 [inline]
+sock_sendmsg+0xcf/0x120 net/socket.c:734
+sock_write_iter+0x291/0x3d0 net/socket.c:1108
+call_write_iter include/linux/fs.h:2187 [inline]
+new_sync_write fs/read_write.c:491 [inline]
+vfs_write+0x9e9/0xdd0 fs/read_write.c:578
+ksys_write+0x1e8/0x250 fs/read_write.c:631
+page last free stack trace:
+reset_page_owner include/linux/page_owner.h:24 [inline]
+free_pages_prepare mm/page_alloc.c:1449 [inline]
+free_pcp_prepare+0x5e4/0xd20 mm/page_alloc.c:1499
+free_unref_page_prepare mm/page_alloc.c:3380 [inline]
+free_unref_page+0x19/0x4d0 mm/page_alloc.c:3476
+__unfreeze_partials+0x17c/0x1a0 mm/slub.c:2548
+qlink_free mm/kasan/quarantine.c:168 [inline]
+qlist_free_all+0x6a/0x170 mm/kasan/quarantine.c:187
+kasan_quarantine_reduce+0x180/0x200 mm/kasan/quarantine.c:294
+__kasan_slab_alloc+0xa2/0xc0 mm/kasan/common.c:447
+kasan_slab_alloc include/linux/kasan.h:224 [inline]
+slab_post_alloc_hook mm/slab.h:727 [inline]
+slab_alloc_node mm/slub.c:3243 [inline]
+slab_alloc mm/slub.c:3251 [inline]
+__kmem_cache_alloc_lru mm/slub.c:3258 [inline]
+kmem_cache_alloc+0x267/0x3b0 mm/slub.c:3268
+kmem_cache_zalloc include/linux/slab.h:723 [inline]
+alloc_buffer_head+0x20/0x140 fs/buffer.c:2974
+alloc_page_buffers+0x280/0x790 fs/buffer.c:829
+create_empty_buffers+0x2c/0xee0 fs/buffer.c:1558
+ext4_block_write_begin+0x1004/0x1530 fs/ext4/inode.c:1074
+ext4_da_write_begin+0x422/0xae0 fs/ext4/inode.c:2996
+generic_perform_write+0x246/0x560 mm/filemap.c:3738
+ext4_buffered_write_iter+0x15b/0x460 fs/ext4/file.c:270
+ext4_file_write_iter+0x44a/0x1660 fs/ext4/file.c:679
+call_write_iter include/linux/fs.h:2187 [inline]
+new_sync_write fs/read_write.c:491 [inline]
+vfs_write+0x9e9/0xdd0 fs/read_write.c:578
+
+Fixes: af356afa010f ("net_sched: reintroduce dev->qdisc for use by sch_api")
+Reported-by: syzbot <syzkaller@googlegroups.com>
+Diagnosed-by: Dmitry Vyukov <dvyukov@google.com>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/r/20221018203258.2793282-1-edumazet@google.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_api.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
+index 0fb387c9d706..5ab20c764aa5 100644
+--- a/net/sched/sch_api.c
++++ b/net/sched/sch_api.c
+@@ -1081,12 +1081,13 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
+
+ skip:
+ if (!ingress) {
+- notify_and_destroy(net, skb, n, classid,
+- rtnl_dereference(dev->qdisc), new);
++ old = rtnl_dereference(dev->qdisc);
+ if (new && !new->ops->attach)
+ qdisc_refcount_inc(new);
+ rcu_assign_pointer(dev->qdisc, new ? : &noop_qdisc);
+
++ notify_and_destroy(net, skb, n, classid, old, new);
++
+ if (new && new->ops->attach)
+ new->ops->attach(new);
+ } else {
+--
+2.35.1
+
--- /dev/null
+From 8ab201813cb863d0ab5f1e780f30148d4e34d8e5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 14:32:01 +0800
+Subject: net: sched: sfb: fix null pointer access issue when sfb_init() fails
+
+From: Zhengchao Shao <shaozhengchao@huawei.com>
+
+[ Upstream commit 2a3fc78210b9f0e85372a2435368962009f480fc ]
+
+When the default qdisc is sfb, if the qdisc of dev_queue fails to be
+inited during mqprio_init(), sfb_reset() is invoked to clear resources.
+In this case, the q->qdisc is NULL, and it will cause gpf issue.
+
+The process is as follows:
+qdisc_create_dflt()
+ sfb_init()
+ tcf_block_get() --->failed, q->qdisc is NULL
+ ...
+ qdisc_put()
+ ...
+ sfb_reset()
+ qdisc_reset(q->qdisc) --->q->qdisc is NULL
+ ops = qdisc->ops
+
+The following is the Call Trace information:
+general protection fault, probably for non-canonical address
+0xdffffc0000000003: 0000 [#1] PREEMPT SMP KASAN
+KASAN: null-ptr-deref in range [0x0000000000000018-0x000000000000001f]
+RIP: 0010:qdisc_reset+0x2b/0x6f0
+Call Trace:
+<TASK>
+sfb_reset+0x37/0xd0
+qdisc_reset+0xed/0x6f0
+qdisc_destroy+0x82/0x4c0
+qdisc_put+0x9e/0xb0
+qdisc_create_dflt+0x2c3/0x4a0
+mqprio_init+0xa71/0x1760
+qdisc_create+0x3eb/0x1000
+tc_modify_qdisc+0x408/0x1720
+rtnetlink_rcv_msg+0x38e/0xac0
+netlink_rcv_skb+0x12d/0x3a0
+netlink_unicast+0x4a2/0x740
+netlink_sendmsg+0x826/0xcc0
+sock_sendmsg+0xc5/0x100
+____sys_sendmsg+0x583/0x690
+___sys_sendmsg+0xe8/0x160
+__sys_sendmsg+0xbf/0x160
+do_syscall_64+0x35/0x80
+entry_SYSCALL_64_after_hwframe+0x46/0xb0
+RIP: 0033:0x7f2164122d04
+</TASK>
+
+Fixes: e13e02a3c68d ("net_sched: SFB flow scheduler")
+Signed-off-by: Zhengchao Shao <shaozhengchao@huawei.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/sched/sch_sfb.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
+index 1be8d04d69dc..0490eb5b98de 100644
+--- a/net/sched/sch_sfb.c
++++ b/net/sched/sch_sfb.c
+@@ -455,7 +455,8 @@ static void sfb_reset(struct Qdisc *sch)
+ {
+ struct sfb_sched_data *q = qdisc_priv(sch);
+
+- qdisc_reset(q->qdisc);
++ if (likely(q->qdisc))
++ qdisc_reset(q->qdisc);
+ q->slot = 0;
+ q->double_buffering = false;
+ sfb_zero_all_buckets(q);
+--
+2.35.1
+
--- /dev/null
+From 63c9497b58bc455febb50838e4283938b1211742 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 17 Oct 2022 14:12:58 +0200
+Subject: netfilter: nf_tables: relax NFTA_SET_ELEM_KEY_END set flags
+ requirements
+
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+
+[ Upstream commit 96df8360dbb435cc69f7c3c8db44bf8b1c24cd7b ]
+
+Otherwise EINVAL is bogusly reported to userspace when deleting a set
+element. NFTA_SET_ELEM_KEY_END does not need to be set in case of:
+
+- insertion: if not present, start key is used as end key.
+- deletion: only start key needs to be specified, end key is ignored.
+
+Hence, relax the sanity check.
+
+Fixes: 88cccd908d51 ("netfilter: nf_tables: NFTA_SET_ELEM_KEY_END requires concat and interval flags")
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/netfilter/nf_tables_api.c | 5 +++--
+ 1 file changed, 3 insertions(+), 2 deletions(-)
+
+diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
+index 460ad341d160..f7a5b8414423 100644
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -5720,8 +5720,9 @@ static bool nft_setelem_valid_key_end(const struct nft_set *set,
+ (NFT_SET_CONCAT | NFT_SET_INTERVAL)) {
+ if (flags & NFT_SET_ELEM_INTERVAL_END)
+ return false;
+- if (!nla[NFTA_SET_ELEM_KEY_END] &&
+- !(flags & NFT_SET_ELEM_CATCHALL))
++
++ if (nla[NFTA_SET_ELEM_KEY_END] &&
++ flags & NFT_SET_ELEM_CATCHALL)
+ return false;
+ } else {
+ if (nla[NFTA_SET_ELEM_KEY_END])
+--
+2.35.1
+
--- /dev/null
+From 455ba74534f28ef70810483583188c7a2b2f504e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 16:55:55 +0200
+Subject: nvme-hwmon: consistently ignore errors from nvme_hwmon_init
+
+From: Christoph Hellwig <hch@lst.de>
+
+[ Upstream commit 6b8cf94005187952f794c0c4ed3920a1e8accfa3 ]
+
+An NVMe controller works perfectly fine even when the hwmon
+initialization fails. Stop returning errors that do not come from a
+controller reset from nvme_hwmon_init to handle this case consistently.
+
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Guenter Roeck <linux@roeck-us.net>
+Reviewed-by: Serge Semin <fancer.lancer@gmail.com>
+Stable-dep-of: c94b7f9bab22 ("nvme-hwmon: kmalloc the NVME SMART log buffer")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/core.c | 6 +++++-
+ drivers/nvme/host/hwmon.c | 13 ++++++++-----
+ 2 files changed, 13 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
+index 3527a0667568..92fe67bd2457 100644
+--- a/drivers/nvme/host/core.c
++++ b/drivers/nvme/host/core.c
+@@ -3088,8 +3088,12 @@ int nvme_init_ctrl_finish(struct nvme_ctrl *ctrl)
+ return ret;
+
+ if (!ctrl->identified && !nvme_discovery_ctrl(ctrl)) {
++ /*
++ * Do not return errors unless we are in a controller reset,
++ * the controller works perfectly fine without hwmon.
++ */
+ ret = nvme_hwmon_init(ctrl);
+- if (ret < 0)
++ if (ret == -EINTR)
+ return ret;
+ }
+
+diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
+index 0a586d712920..23918bb7bdca 100644
+--- a/drivers/nvme/host/hwmon.c
++++ b/drivers/nvme/host/hwmon.c
+@@ -230,7 +230,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
+ if (!data)
+- return 0;
++ return -ENOMEM;
+
+ data->ctrl = ctrl;
+ mutex_init(&data->read_lock);
+@@ -238,8 +238,7 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+ err = nvme_hwmon_get_smart_log(data);
+ if (err) {
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+- kfree(data);
+- return err;
++ goto err_free_data;
+ }
+
+ hwmon = hwmon_device_register_with_info(dev, "nvme",
+@@ -247,11 +246,15 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+ NULL);
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+- kfree(data);
+- return PTR_ERR(hwmon);
++ err = PTR_ERR(hwmon);
++ goto err_free_data;
+ }
+ ctrl->hwmon_device = hwmon;
+ return 0;
++
++err_free_data:
++ kfree(data);
++ return err;
+ }
+
+ void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+--
+2.35.1
+
--- /dev/null
+From 03f22467f5322a5a75a4be7a8a841f351713e4c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 17:33:52 +0200
+Subject: nvme-hwmon: kmalloc the NVME SMART log buffer
+
+From: Serge Semin <Sergey.Semin@baikalelectronics.ru>
+
+[ Upstream commit c94b7f9bab22ac504f9153767676e659988575ad ]
+
+Recent commit 52fde2c07da6 ("nvme: set dma alignment to dword") has
+caused a regression on our platform.
+
+It turned out that the nvme_get_log() method invocation caused the
+nvme_hwmon_data structure instance corruption. In particular the
+nvme_hwmon_data.ctrl pointer was overwritten either with zeros or with
+garbage. After some research we discovered that the problem happened
+even before the actual NVME DMA execution, but during the buffer mapping.
+Since our platform is DMA-noncoherent, the mapping implied the cache-line
+invalidations or write-backs depending on the DMA-direction parameter.
+In case of the NVME SMART log getting the DMA was performed
+from-device-to-memory, thus the cache-invalidation was activated during
+the buffer mapping. Since the log-buffer isn't cache-line aligned, the
+cache-invalidation caused the neighbour data to be discarded. The
+neighbouring data turned to be the data surrounding the buffer in the
+framework of the nvme_hwmon_data structure.
+
+In order to fix that we need to make sure that the whole log-buffer is
+defined within the cache-line-aligned memory region so the
+cache-invalidation procedure wouldn't involve the adjacent data. One of
+the option to guarantee that is to kmalloc the DMA-buffer [1]. Seeing the
+rest of the NVME core driver prefer that method it has been chosen to fix
+this problem too.
+
+Note after a deeper researches we found out that the denoted commit wasn't
+a root cause of the problem. It just revealed the invalidity by activating
+the DMA-based NVME SMART log getting performed in the framework of the
+NVME hwmon driver. The problem was here since the initial commit of the
+driver.
+
+[1] Documentation/core-api/dma-api-howto.rst
+
+Fixes: 400b6a7b13a3 ("nvme: Add hardware monitoring support")
+Signed-off-by: Serge Semin <Sergey.Semin@baikalelectronics.ru>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/hwmon.c | 23 ++++++++++++++++-------
+ 1 file changed, 16 insertions(+), 7 deletions(-)
+
+diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c
+index 23918bb7bdca..9e6e56c20ec9 100644
+--- a/drivers/nvme/host/hwmon.c
++++ b/drivers/nvme/host/hwmon.c
+@@ -12,7 +12,7 @@
+
+ struct nvme_hwmon_data {
+ struct nvme_ctrl *ctrl;
+- struct nvme_smart_log log;
++ struct nvme_smart_log *log;
+ struct mutex read_lock;
+ };
+
+@@ -60,14 +60,14 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under,
+ static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data)
+ {
+ return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0,
+- NVME_CSI_NVM, &data->log, sizeof(data->log), 0);
++ NVME_CSI_NVM, data->log, sizeof(*data->log), 0);
+ }
+
+ static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type,
+ u32 attr, int channel, long *val)
+ {
+ struct nvme_hwmon_data *data = dev_get_drvdata(dev);
+- struct nvme_smart_log *log = &data->log;
++ struct nvme_smart_log *log = data->log;
+ int temp;
+ int err;
+
+@@ -163,7 +163,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
+ case hwmon_temp_max:
+ case hwmon_temp_min:
+ if ((!channel && data->ctrl->wctemp) ||
+- (channel && data->log.temp_sensor[channel - 1])) {
++ (channel && data->log->temp_sensor[channel - 1])) {
+ if (data->ctrl->quirks &
+ NVME_QUIRK_NO_TEMP_THRESH_CHANGE)
+ return 0444;
+@@ -176,7 +176,7 @@ static umode_t nvme_hwmon_is_visible(const void *_data,
+ break;
+ case hwmon_temp_input:
+ case hwmon_temp_label:
+- if (!channel || data->log.temp_sensor[channel - 1])
++ if (!channel || data->log->temp_sensor[channel - 1])
+ return 0444;
+ break;
+ default:
+@@ -232,13 +232,19 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+ if (!data)
+ return -ENOMEM;
+
++ data->log = kzalloc(sizeof(*data->log), GFP_KERNEL);
++ if (!data->log) {
++ err = -ENOMEM;
++ goto err_free_data;
++ }
++
+ data->ctrl = ctrl;
+ mutex_init(&data->read_lock);
+
+ err = nvme_hwmon_get_smart_log(data);
+ if (err) {
+ dev_warn(dev, "Failed to read smart log (error %d)\n", err);
+- goto err_free_data;
++ goto err_free_log;
+ }
+
+ hwmon = hwmon_device_register_with_info(dev, "nvme",
+@@ -247,11 +253,13 @@ int nvme_hwmon_init(struct nvme_ctrl *ctrl)
+ if (IS_ERR(hwmon)) {
+ dev_warn(dev, "Failed to instantiate hwmon device\n");
+ err = PTR_ERR(hwmon);
+- goto err_free_data;
++ goto err_free_log;
+ }
+ ctrl->hwmon_device = hwmon;
+ return 0;
+
++err_free_log:
++ kfree(data->log);
+ err_free_data:
+ kfree(data);
+ return err;
+@@ -265,6 +273,7 @@ void nvme_hwmon_exit(struct nvme_ctrl *ctrl)
+
+ hwmon_device_unregister(ctrl->hwmon_device);
+ ctrl->hwmon_device = NULL;
++ kfree(data->log);
+ kfree(data);
+ }
+ }
+--
+2.35.1
+
--- /dev/null
+From a8b3568da51aa574b938647eab69c5c62a7e1fd5 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 28 Sep 2022 09:39:10 +0300
+Subject: nvmet: fix workqueue MEM_RECLAIM flushing dependency
+
+From: Sagi Grimberg <sagi@grimberg.me>
+
+[ Upstream commit ddd2b8de9f85b388925e7dc46b3890fc1a0d8d24 ]
+
+The keep alive timer needs to stay on nvmet_wq, and not
+modified to reschedule on the system_wq.
+
+This fixes a warning:
+------------[ cut here ]------------
+workqueue: WQ_MEM_RECLAIM
+nvmet-wq:nvmet_rdma_release_queue_work [nvmet_rdma] is flushing
+!WQ_MEM_RECLAIM events:nvmet_keep_alive_timer [nvmet]
+WARNING: CPU: 3 PID: 1086 at kernel/workqueue.c:2628
+check_flush_dependency+0x16c/0x1e0
+
+Reported-by: Yi Zhang <yi.zhang@redhat.com>
+Fixes: 8832cf922151 ("nvmet: use a private workqueue instead of the system workqueue")
+Signed-off-by: Sagi Grimberg <sagi@grimberg.me>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Christoph Hellwig <hch@lst.de>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/target/core.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
+index 0a0c1d956c73..87a347248c38 100644
+--- a/drivers/nvme/target/core.c
++++ b/drivers/nvme/target/core.c
+@@ -1168,7 +1168,7 @@ static void nvmet_start_ctrl(struct nvmet_ctrl *ctrl)
+ * reset the keep alive timer when the controller is enabled.
+ */
+ if (ctrl->kato)
+- mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
++ mod_delayed_work(nvmet_wq, &ctrl->ka_work, ctrl->kato * HZ);
+ }
+
+ static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
+--
+2.35.1
+
--- /dev/null
+From 023f0bf44580af3a3a4133f865e30cf139951ba7 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 15 Oct 2021 10:21:24 -0700
+Subject: perf parse-events: Add const to evsel name
+
+From: Ian Rogers <irogers@google.com>
+
+[ Upstream commit 8e8bbfb311a26a17834f1839e15e2c29ea5e58c6 ]
+
+The evsel name is strdup-ed before assignment and so can be const.
+
+A later change will add another similar string.
+
+Using const makes it clearer that these are not out arguments.
+
+Signed-off-by: Ian Rogers <irogers@google.com>
+Acked-by: Andi Kleen <ak@linux.intel.com>
+Cc: Adrian Hunter <adrian.hunter@intel.com>
+Cc: Alexander Antonov <alexander.antonov@linux.intel.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Andrew Kilroy <andrew.kilroy@arm.com>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: Changbin Du <changbin.du@intel.com>
+Cc: Denys Zagorui <dzagorui@cisco.com>
+Cc: Fabian Hemmer <copy@copy.sh>
+Cc: Felix Fietkau <nbd@nbd.name>
+Cc: Heiko Carstens <hca@linux.ibm.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Jacob Keller <jacob.e.keller@intel.com>
+Cc: Jiapeng Chong <jiapeng.chong@linux.alibaba.com>
+Cc: Jin Yao <yao.jin@linux.intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Joakim Zhang <qiangqing.zhang@nxp.com>
+Cc: John Garry <john.garry@huawei.com>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: Kan Liang <kan.liang@linux.intel.com>
+Cc: Kees Kook <keescook@chromium.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Nicholas Fraser <nfraser@codeweavers.com>
+Cc: Nick Desaulniers <ndesaulniers@google.com>
+Cc: Paul Clarke <pc@us.ibm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Riccardo Mancini <rickyman7@gmail.com>
+Cc: Sami Tolvanen <samitolvanen@google.com>
+Cc: ShihCheng Tu <mrtoastcheng@gmail.com>
+Cc: Song Liu <songliubraving@fb.com>
+Cc: Stephane Eranian <eranian@google.com>
+Cc: Sumanth Korikkar <sumanthk@linux.ibm.com>
+Cc: Thomas Richter <tmricht@linux.ibm.com>
+Cc: Wan Jiabing <wanjiabing@vivo.com>
+Cc: Zhen Lei <thunder.leizhen@huawei.com>
+Link: https://lore.kernel.org/r/20211015172132.1162559-14-irogers@google.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Stable-dep-of: e552b7be12ed ("perf: Skip and warn on unknown format 'configN' attrs")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/parse-events-hybrid.c | 15 +++++++++------
+ tools/perf/util/parse-events-hybrid.h | 6 ++++--
+ tools/perf/util/parse-events.c | 15 ++++++++-------
+ tools/perf/util/parse-events.h | 7 ++++---
+ tools/perf/util/pmu.c | 2 +-
+ tools/perf/util/pmu.h | 2 +-
+ 6 files changed, 27 insertions(+), 20 deletions(-)
+
+diff --git a/tools/perf/util/parse-events-hybrid.c b/tools/perf/util/parse-events-hybrid.c
+index b234d95fb10a..7e44deee1343 100644
+--- a/tools/perf/util/parse-events-hybrid.c
++++ b/tools/perf/util/parse-events-hybrid.c
+@@ -38,7 +38,7 @@ static void config_hybrid_attr(struct perf_event_attr *attr,
+
+ static int create_event_hybrid(__u32 config_type, int *idx,
+ struct list_head *list,
+- struct perf_event_attr *attr, char *name,
++ struct perf_event_attr *attr, const char *name,
+ struct list_head *config_terms,
+ struct perf_pmu *pmu)
+ {
+@@ -70,7 +70,7 @@ static int pmu_cmp(struct parse_events_state *parse_state,
+
+ static int add_hw_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_event_attr *attr,
+- char *name, struct list_head *config_terms)
++ const char *name, struct list_head *config_terms)
+ {
+ struct perf_pmu *pmu;
+ int ret;
+@@ -94,7 +94,8 @@ static int add_hw_hybrid(struct parse_events_state *parse_state,
+ }
+
+ static int create_raw_event_hybrid(int *idx, struct list_head *list,
+- struct perf_event_attr *attr, char *name,
++ struct perf_event_attr *attr,
++ const char *name,
+ struct list_head *config_terms,
+ struct perf_pmu *pmu)
+ {
+@@ -113,7 +114,7 @@ static int create_raw_event_hybrid(int *idx, struct list_head *list,
+
+ static int add_raw_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list, struct perf_event_attr *attr,
+- char *name, struct list_head *config_terms)
++ const char *name, struct list_head *config_terms)
+ {
+ struct perf_pmu *pmu;
+ int ret;
+@@ -138,7 +139,8 @@ static int add_raw_hybrid(struct parse_events_state *parse_state,
+ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list,
+ struct perf_event_attr *attr,
+- char *name, struct list_head *config_terms,
++ const char *name,
++ struct list_head *config_terms,
+ bool *hybrid)
+ {
+ *hybrid = false;
+@@ -159,7 +161,8 @@ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
+ }
+
+ int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
+- struct perf_event_attr *attr, char *name,
++ struct perf_event_attr *attr,
++ const char *name,
+ struct list_head *config_terms,
+ bool *hybrid,
+ struct parse_events_state *parse_state)
+diff --git a/tools/perf/util/parse-events-hybrid.h b/tools/perf/util/parse-events-hybrid.h
+index f33bd67aa851..25a4a4f73f3a 100644
+--- a/tools/perf/util/parse-events-hybrid.h
++++ b/tools/perf/util/parse-events-hybrid.h
+@@ -11,11 +11,13 @@
+ int parse_events__add_numeric_hybrid(struct parse_events_state *parse_state,
+ struct list_head *list,
+ struct perf_event_attr *attr,
+- char *name, struct list_head *config_terms,
++ const char *name,
++ struct list_head *config_terms,
+ bool *hybrid);
+
+ int parse_events__add_cache_hybrid(struct list_head *list, int *idx,
+- struct perf_event_attr *attr, char *name,
++ struct perf_event_attr *attr,
++ const char *name,
+ struct list_head *config_terms,
+ bool *hybrid,
+ struct parse_events_state *parse_state);
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index b93a36ffeb9e..aaeebf0752b7 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -347,7 +347,7 @@ static int parse_events__is_name_term(struct parse_events_term *term)
+ return term->type_term == PARSE_EVENTS__TERM_TYPE_NAME;
+ }
+
+-static char *get_config_name(struct list_head *head_terms)
++static const char *get_config_name(struct list_head *head_terms)
+ {
+ struct parse_events_term *term;
+
+@@ -365,7 +365,7 @@ static struct evsel *
+ __add_event(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+ bool init_attr,
+- char *name, struct perf_pmu *pmu,
++ const char *name, struct perf_pmu *pmu,
+ struct list_head *config_terms, bool auto_merge_stats,
+ const char *cpu_list)
+ {
+@@ -404,14 +404,14 @@ __add_event(struct list_head *list, int *idx,
+ }
+
+ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
+- char *name, struct perf_pmu *pmu)
++ const char *name, struct perf_pmu *pmu)
+ {
+ return __add_event(NULL, &idx, attr, false, name, pmu, NULL, false,
+ NULL);
+ }
+
+ static int add_event(struct list_head *list, int *idx,
+- struct perf_event_attr *attr, char *name,
++ struct perf_event_attr *attr, const char *name,
+ struct list_head *config_terms)
+ {
+ return __add_event(list, idx, attr, true, name, NULL, config_terms,
+@@ -474,7 +474,8 @@ int parse_events_add_cache(struct list_head *list, int *idx,
+ {
+ struct perf_event_attr attr;
+ LIST_HEAD(config_terms);
+- char name[MAX_NAME_LEN], *config_name;
++ char name[MAX_NAME_LEN];
++ const char *config_name;
+ int cache_type = -1, cache_op = -1, cache_result = -1;
+ char *op_result[2] = { op_result1, op_result2 };
+ int i, n, ret;
+@@ -2038,7 +2039,7 @@ int parse_events__modifier_event(struct list_head *list, char *str, bool add)
+ return 0;
+ }
+
+-int parse_events_name(struct list_head *list, char *name)
++int parse_events_name(struct list_head *list, const char *name)
+ {
+ struct evsel *evsel;
+
+@@ -3295,7 +3296,7 @@ char *parse_events_formats_error_string(char *additional_terms)
+
+ struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+- char *name, struct perf_pmu *pmu,
++ const char *name, struct perf_pmu *pmu,
+ struct list_head *config_terms)
+ {
+ return __add_event(list, idx, attr, true, name, pmu,
+diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
+index bf6e41aa9b6a..6ef506c1b29e 100644
+--- a/tools/perf/util/parse-events.h
++++ b/tools/perf/util/parse-events.h
+@@ -162,7 +162,7 @@ void parse_events_terms__purge(struct list_head *terms);
+ void parse_events__clear_array(struct parse_events_array *a);
+ int parse_events__modifier_event(struct list_head *list, char *str, bool add);
+ int parse_events__modifier_group(struct list_head *list, char *event_mod);
+-int parse_events_name(struct list_head *list, char *name);
++int parse_events_name(struct list_head *list, const char *name);
+ int parse_events_add_tracepoint(struct list_head *list, int *idx,
+ const char *sys, const char *event,
+ struct parse_events_error *error,
+@@ -200,7 +200,7 @@ int parse_events_add_pmu(struct parse_events_state *parse_state,
+ bool use_alias);
+
+ struct evsel *parse_events__add_event(int idx, struct perf_event_attr *attr,
+- char *name, struct perf_pmu *pmu);
++ const char *name, struct perf_pmu *pmu);
+
+ int parse_events_multi_pmu_add(struct parse_events_state *parse_state,
+ char *str,
+@@ -267,7 +267,8 @@ int perf_pmu__test_parse_init(void);
+
+ struct evsel *parse_events__add_event_hybrid(struct list_head *list, int *idx,
+ struct perf_event_attr *attr,
+- char *name, struct perf_pmu *pmu,
++ const char *name,
++ struct perf_pmu *pmu,
+ struct list_head *config_terms);
+
+ #endif /* __PERF_PARSE_EVENTS_H */
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index bdabd62170d2..c647b3633d1d 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -1906,7 +1906,7 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu)
+ }
+
+ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+- char *name)
++ const char *name)
+ {
+ struct perf_pmu_format *format;
+ __u64 masks = 0, bits;
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index 394898b07fd9..dd0736de32c8 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -134,7 +134,7 @@ int perf_pmu__convert_scale(const char *scale, char **end, double *sval);
+ int perf_pmu__caps_parse(struct perf_pmu *pmu);
+
+ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+- char *name);
++ const char *name);
+
+ bool perf_pmu__has_hybrid(void);
+ int perf_pmu__match(char *pattern, char *name, char *tok);
+--
+2.35.1
+
--- /dev/null
+From 80110045dd716a437be512318bc4fe0c9e26ef5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 4 Oct 2022 14:12:35 -0500
+Subject: perf: Skip and warn on unknown format 'configN' attrs
+
+From: Rob Herring <robh@kernel.org>
+
+[ Upstream commit e552b7be12ed62357df84392efa525ecb01910fb ]
+
+If the kernel exposes a new perf_event_attr field in a format attr, perf
+will return an error stating the specified PMU can't be found. For
+example, a format attr with 'config3:0-63' causes an error as config3 is
+unknown to perf. This causes a compatibility issue between a newer
+kernel with older perf tool.
+
+Before this change with a kernel adding 'config3' I get:
+
+ $ perf record -e arm_spe// -- true
+ event syntax error: 'arm_spe//'
+ \___ Cannot find PMU `arm_spe'. Missing kernel support?
+ Run 'perf list' for a list of valid events
+
+ Usage: perf record [<options>] [<command>]
+ or: perf record [<options>] -- <command> [<options>]
+
+ -e, --event <event> event selector. use 'perf list' to list
+ available events
+
+After this change, I get:
+
+ $ perf record -e arm_spe// -- true
+ WARNING: 'arm_spe_0' format 'inv_event_filter' requires 'perf_event_attr::config3' which is not supported by this version of perf!
+ [ perf record: Woken up 2 times to write data ]
+ [ perf record: Captured and wrote 0.091 MB perf.data ]
+
+To support unknown configN formats, rework the YACC implementation to
+pass any config[0-9]+ format to perf_pmu__new_format() to handle with a
+warning.
+
+Reviewed-by: Namhyung Kim <namhyung@kernel.org>
+Signed-off-by: Rob Herring <robh@kernel.org>
+Tested-by: Leo Yan <leo.yan@linaro.org>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: James Clark <james.clark@arm.com>
+Cc: Jiri Olsa <jolsa@kernel.org>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220914-arm-perf-tool-spe1-2-v2-v4-1-83c098e6212e@kernel.org
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/parse-events.c | 3 +++
+ tools/perf/util/pmu.c | 17 +++++++++++++++++
+ tools/perf/util/pmu.h | 2 ++
+ tools/perf/util/pmu.l | 2 --
+ tools/perf/util/pmu.y | 15 ++++-----------
+ 5 files changed, 26 insertions(+), 13 deletions(-)
+
+diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
+index aaeebf0752b7..b2804e65f93a 100644
+--- a/tools/perf/util/parse-events.c
++++ b/tools/perf/util/parse-events.c
+@@ -373,6 +373,9 @@ __add_event(struct list_head *list, int *idx,
+ struct perf_cpu_map *cpus = pmu ? perf_cpu_map__get(pmu->cpus) :
+ cpu_list ? perf_cpu_map__new(cpu_list) : NULL;
+
++ if (pmu)
++ perf_pmu__warn_invalid_formats(pmu);
++
+ if (pmu && attr->type == PERF_TYPE_RAW)
+ perf_pmu__warn_invalid_config(pmu, attr->config, name);
+
+diff --git a/tools/perf/util/pmu.c b/tools/perf/util/pmu.c
+index c647b3633d1d..988aae2cbdc3 100644
+--- a/tools/perf/util/pmu.c
++++ b/tools/perf/util/pmu.c
+@@ -1048,6 +1048,23 @@ static struct perf_pmu *pmu_lookup(const char *lookup_name)
+ return NULL;
+ }
+
++void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu)
++{
++ struct perf_pmu_format *format;
++
++ /* fake pmu doesn't have format list */
++ if (pmu == &perf_pmu__fake)
++ return;
++
++ list_for_each_entry(format, &pmu->format, list)
++ if (format->value >= PERF_PMU_FORMAT_VALUE_CONFIG_END) {
++ pr_warning("WARNING: '%s' format '%s' requires 'perf_event_attr::config%d'"
++ "which is not supported by this version of perf!\n",
++ pmu->name, format->name, format->value);
++ return;
++ }
++}
++
+ static struct perf_pmu *pmu_find(const char *name)
+ {
+ struct perf_pmu *pmu;
+diff --git a/tools/perf/util/pmu.h b/tools/perf/util/pmu.h
+index dd0736de32c8..b621c9f9e104 100644
+--- a/tools/perf/util/pmu.h
++++ b/tools/perf/util/pmu.h
+@@ -17,6 +17,7 @@ enum {
+ PERF_PMU_FORMAT_VALUE_CONFIG,
+ PERF_PMU_FORMAT_VALUE_CONFIG1,
+ PERF_PMU_FORMAT_VALUE_CONFIG2,
++ PERF_PMU_FORMAT_VALUE_CONFIG_END,
+ };
+
+ #define PERF_PMU_FORMAT_BITS 64
+@@ -135,6 +136,7 @@ int perf_pmu__caps_parse(struct perf_pmu *pmu);
+
+ void perf_pmu__warn_invalid_config(struct perf_pmu *pmu, __u64 config,
+ const char *name);
++void perf_pmu__warn_invalid_formats(struct perf_pmu *pmu);
+
+ bool perf_pmu__has_hybrid(void);
+ int perf_pmu__match(char *pattern, char *name, char *tok);
+diff --git a/tools/perf/util/pmu.l b/tools/perf/util/pmu.l
+index a15d9fbd7c0e..58b4926cfaca 100644
+--- a/tools/perf/util/pmu.l
++++ b/tools/perf/util/pmu.l
+@@ -27,8 +27,6 @@ num_dec [0-9]+
+
+ {num_dec} { return value(10); }
+ config { return PP_CONFIG; }
+-config1 { return PP_CONFIG1; }
+-config2 { return PP_CONFIG2; }
+ - { return '-'; }
+ : { return ':'; }
+ , { return ','; }
+diff --git a/tools/perf/util/pmu.y b/tools/perf/util/pmu.y
+index bfd7e8509869..283efe059819 100644
+--- a/tools/perf/util/pmu.y
++++ b/tools/perf/util/pmu.y
+@@ -20,7 +20,7 @@ do { \
+
+ %}
+
+-%token PP_CONFIG PP_CONFIG1 PP_CONFIG2
++%token PP_CONFIG
+ %token PP_VALUE PP_ERROR
+ %type <num> PP_VALUE
+ %type <bits> bit_term
+@@ -47,18 +47,11 @@ PP_CONFIG ':' bits
+ $3));
+ }
+ |
+-PP_CONFIG1 ':' bits
++PP_CONFIG PP_VALUE ':' bits
+ {
+ ABORT_ON(perf_pmu__new_format(format, name,
+- PERF_PMU_FORMAT_VALUE_CONFIG1,
+- $3));
+-}
+-|
+-PP_CONFIG2 ':' bits
+-{
+- ABORT_ON(perf_pmu__new_format(format, name,
+- PERF_PMU_FORMAT_VALUE_CONFIG2,
+- $3));
++ $2,
++ $4));
+ }
+
+ bits:
+--
+2.35.1
+
--- /dev/null
+From dd42026a63215bac5e387a95be0e85d70dac30ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 10 Sep 2021 16:31:55 -0700
+Subject: scsi: lpfc: Adjust bytes received vales during cmf timer interval
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit d5ac69b332d8859d1f8bd5d4dee31f3267f6b0d2 ]
+
+The newly added congestion mgmt framework is seeing unexpected congestion
+FPINs and signals. In analysis, time values given to the adapter are not
+at hard time intervals. Thus the drift vs the transfer count seen is
+affecting how the framework manages things.
+
+Adjust counters to cover the drift.
+
+Link: https://lore.kernel.org/r/20210910233159.115896-11-jsmart2021@gmail.com
+Co-developed-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: bd269188ea94 ("scsi: lpfc: Rework MIB Rx Monitor debug info logic")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 16 ++++++++++++++--
+ 1 file changed, 14 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 48043e1ba485..730a6de4b8a6 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -5869,7 +5869,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ uint32_t io_cnt;
+ uint32_t head, tail;
+ uint32_t busy, max_read;
+- uint64_t total, rcv, lat, mbpi;
++ uint64_t total, rcv, lat, mbpi, extra;
+ int timer_interval = LPFC_CMF_INTERVAL;
+ uint32_t ms;
+ struct lpfc_cgn_stat *cgs;
+@@ -5936,7 +5936,19 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ phba->hba_flag & HBA_SETUP) {
+ mbpi = phba->cmf_last_sync_bw;
+ phba->cmf_last_sync_bw = 0;
+- lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total);
++ extra = 0;
++
++ /* Calculate any extra bytes needed to account for the
++ * timer accuracy. If we are less than LPFC_CMF_INTERVAL
++ * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
++ * add an extra 2%. The goal is to equalize total with a
++ * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
++ */
++ if (ms == LPFC_CMF_INTERVAL)
++ extra = div_u64(total, 50);
++ else if (ms < LPFC_CMF_INTERVAL)
++ extra = div_u64(total, 33);
++ lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
+ } else {
+ /* For Monitor mode or link down we want mbpi
+ * to be the full link speed
+--
+2.35.1
+
--- /dev/null
+From 4b18481655a14a5f5eaa9ab2a4ce82e75e35b873 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 3 Dec 2021 16:26:41 -0800
+Subject: scsi: lpfc: Adjust CMF total bytes and rxmonitor
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit a6269f837045acb02904f31f05acde847ec8f8a7 ]
+
+Calculate any extra bytes needed to account for timer accuracy. If we are
+less than LPFC_CMF_INTERVAL, then calculate the adjustment needed for total
+to reflect a full LPFC_CMF_INTERVAL.
+
+Add additional info to rxmonitor, and adjust some log formatting.
+
+Link: https://lore.kernel.org/r/20211204002644.116455-7-jsmart2021@gmail.com
+Co-developed-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Stable-dep-of: bd269188ea94 ("scsi: lpfc: Rework MIB Rx Monitor debug info logic")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc.h | 1 +
+ drivers/scsi/lpfc/lpfc_debugfs.c | 14 ++++++++------
+ drivers/scsi/lpfc/lpfc_debugfs.h | 2 +-
+ drivers/scsi/lpfc/lpfc_init.c | 20 ++++++++++++--------
+ 4 files changed, 22 insertions(+), 15 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index f3bcb56e9ef2..fb5092ca9397 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -1615,6 +1615,7 @@ struct lpfc_hba {
+ #define LPFC_MAX_RXMONITOR_ENTRY 800
+ #define LPFC_MAX_RXMONITOR_DUMP 32
+ struct rxtable_entry {
++ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
+ uint64_t total_bytes; /* Total no of read bytes requested */
+ uint64_t rcv_bytes; /* Total no of read bytes completed */
+ uint64_t avg_io_size;
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 79bc86ba59b3..61f8dcd072ac 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -5561,22 +5561,24 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
+ start = tail;
+
+ len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- " MaxBPI\t Total Data Cmd Total Data Cmpl "
+- " Latency(us) Avg IO Size\tMax IO Size IO cnt "
+- "Info BWutil(ms)\n");
++ " MaxBPI Tot_Data_CMF Tot_Data_Cmd "
++ "Tot_Data_Cmpl Lat(us) Avg_IO Max_IO "
++ "Bsy IO_cnt Info BWutil(ms)\n");
+ get_table:
+ for (i = start; i < last; i++) {
+ entry = &phba->rxtable[i];
+ len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- "%3d:%12lld %12lld\t%12lld\t"
+- "%8lldus\t%8lld\t%10lld "
+- "%8d %2d %2d(%2d)\n",
++ "%3d:%12lld %12lld %12lld %12lld "
++ "%7lldus %8lld %7lld "
++ "%2d %4d %2d %2d(%2d)\n",
+ i, entry->max_bytes_per_interval,
++ entry->cmf_bytes,
+ entry->total_bytes,
+ entry->rcv_bytes,
+ entry->avg_io_latency,
+ entry->avg_io_size,
+ entry->max_read_cnt,
++ entry->cmf_busy,
+ entry->io_cnt,
+ entry->cmf_info,
+ entry->timer_utilization,
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
+index a5bf71b34972..6dd361c1fd31 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.h
++++ b/drivers/scsi/lpfc/lpfc_debugfs.h
+@@ -282,7 +282,7 @@ struct lpfc_idiag {
+ void *ptr_private;
+ };
+
+-#define MAX_DEBUGFS_RX_TABLE_SIZE (100 * LPFC_MAX_RXMONITOR_ENTRY)
++#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
+ struct lpfc_rx_monitor_debug {
+ char *i_private;
+ char *buffer;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 730a6de4b8a6..7545ff55ff50 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -5869,7 +5869,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ uint32_t io_cnt;
+ uint32_t head, tail;
+ uint32_t busy, max_read;
+- uint64_t total, rcv, lat, mbpi, extra;
++ uint64_t total, rcv, lat, mbpi, extra, cnt;
+ int timer_interval = LPFC_CMF_INTERVAL;
+ uint32_t ms;
+ struct lpfc_cgn_stat *cgs;
+@@ -5940,20 +5940,23 @@ lpfc_cmf_timer(struct hrtimer *timer)
+
+ /* Calculate any extra bytes needed to account for the
+ * timer accuracy. If we are less than LPFC_CMF_INTERVAL
+- * add an extra 3% slop factor, equal to LPFC_CMF_INTERVAL
+- * add an extra 2%. The goal is to equalize total with a
+- * time > LPFC_CMF_INTERVAL or <= LPFC_CMF_INTERVAL + 1
++ * calculate the adjustment needed for total to reflect
++ * a full LPFC_CMF_INTERVAL.
+ */
+- if (ms == LPFC_CMF_INTERVAL)
+- extra = div_u64(total, 50);
+- else if (ms < LPFC_CMF_INTERVAL)
+- extra = div_u64(total, 33);
++ if (ms && ms < LPFC_CMF_INTERVAL) {
++ cnt = div_u64(total, ms); /* bytes per ms */
++ cnt *= LPFC_CMF_INTERVAL; /* what total should be */
++ if (cnt > mbpi)
++ cnt = mbpi;
++ extra = cnt - total;
++ }
+ lpfc_issue_cmf_sync_wqe(phba, LPFC_CMF_INTERVAL, total + extra);
+ } else {
+ /* For Monitor mode or link down we want mbpi
+ * to be the full link speed
+ */
+ mbpi = phba->cmf_link_byte_count;
++ extra = 0;
+ }
+ phba->cmf_timer_cnt++;
+
+@@ -5984,6 +5987,7 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ LPFC_RXMONITOR_TABLE_IN_USE);
+ entry = &phba->rxtable[head];
+ entry->total_bytes = total;
++ entry->cmf_bytes = total + extra;
+ entry->rcv_bytes = rcv;
+ entry->cmf_busy = busy;
+ entry->cmf_info = phba->cmf_active_info;
+--
+2.35.1
+
--- /dev/null
+From fbbf55f60c5ce61f1aae8e8751a616f86518c85f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 16 Sep 2022 00:59:07 -0300
+Subject: scsi: lpfc: Fix memory leak in lpfc_create_port()
+
+From: Rafael Mendonca <rafaelmendsr@gmail.com>
+
+[ Upstream commit dc8e483f684a24cc06e1d5fa958b54db58855093 ]
+
+Commit 5e633302ace1 ("scsi: lpfc: vmid: Add support for VMID in mailbox
+command") introduced allocations for the VMID resources in
+lpfc_create_port() after the call to scsi_host_alloc(). Upon failure on the
+VMID allocations, the new code would branch to the 'out' label, which
+returns NULL without unwinding anything, thus skipping the call to
+scsi_host_put().
+
+Fix the problem by creating a separate label 'out_free_vmid' to unwind the
+VMID resources and make the 'out_put_shost' label call only
+scsi_host_put(), as was done before the introduction of allocations for
+VMID.
+
+Fixes: 5e633302ace1 ("scsi: lpfc: vmid: Add support for VMID in mailbox command")
+Signed-off-by: Rafael Mendonca <rafaelmendsr@gmail.com>
+Link: https://lore.kernel.org/r/20220916035908.712799-1-rafaelmendsr@gmail.com
+Reviewed-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc_init.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 33e33fff8986..48043e1ba485 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -4666,7 +4666,7 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
+ rc = lpfc_vmid_res_alloc(phba, vport);
+
+ if (rc)
+- goto out;
++ goto out_put_shost;
+
+ /* Initialize all internally managed lists. */
+ INIT_LIST_HEAD(&vport->fc_nodes);
+@@ -4684,16 +4684,17 @@ lpfc_create_port(struct lpfc_hba *phba, int instance, struct device *dev)
+
+ error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
+ if (error)
+- goto out_put_shost;
++ goto out_free_vmid;
+
+ spin_lock_irq(&phba->port_list_lock);
+ list_add_tail(&vport->listentry, &phba->port_list);
+ spin_unlock_irq(&phba->port_list_lock);
+ return vport;
+
+-out_put_shost:
++out_free_vmid:
+ kfree(vport->vmid);
+ bitmap_free(vport->vmid_priority_range);
++out_put_shost:
+ scsi_host_put(shost);
+ out:
+ return NULL;
+--
+2.35.1
+
--- /dev/null
+From b70fb09cf0a3f6e10d5973c29f4020a49bfc7e5f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 18 Aug 2022 18:17:32 -0700
+Subject: scsi: lpfc: Rework MIB Rx Monitor debug info logic
+
+From: James Smart <jsmart2021@gmail.com>
+
+[ Upstream commit bd269188ea94e40ab002cad7b0df8f12b8f0de54 ]
+
+The kernel test robot reported the following sparse warning:
+
+arch/arm64/include/asm/cmpxchg.h:88:1: sparse: sparse: cast truncates
+ bits from constant value (369 becomes 69)
+
+On arm64, atomic_xchg only works on 8-bit byte fields. Thus, the macro
+usage of LPFC_RXMONITOR_TABLE_IN_USE can be unintentionally truncated
+leading to all logic involving the LPFC_RXMONITOR_TABLE_IN_USE macro to not
+work properly.
+
+Replace the Rx Table atomic_t indexing logic with a new
+lpfc_rx_info_monitor structure that holds a circular ring buffer. For
+locking semantics, a spinlock_t is used.
+
+Link: https://lore.kernel.org/r/20220819011736.14141-4-jsmart2021@gmail.com
+Fixes: 17b27ac59224 ("scsi: lpfc: Add rx monitoring statistics")
+Cc: <stable@vger.kernel.org> # v5.15+
+Co-developed-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: Justin Tee <justin.tee@broadcom.com>
+Signed-off-by: James Smart <jsmart2021@gmail.com>
+Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/scsi/lpfc/lpfc.h | 14 ++-
+ drivers/scsi/lpfc/lpfc_crtn.h | 8 ++
+ drivers/scsi/lpfc/lpfc_debugfs.c | 59 ++--------
+ drivers/scsi/lpfc/lpfc_debugfs.h | 2 +-
+ drivers/scsi/lpfc/lpfc_init.c | 83 ++++----------
+ drivers/scsi/lpfc/lpfc_mem.c | 9 +-
+ drivers/scsi/lpfc/lpfc_sli.c | 190 +++++++++++++++++++++++++++++--
+ 7 files changed, 240 insertions(+), 125 deletions(-)
+
+diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
+index fb5092ca9397..7eaaa421848d 100644
+--- a/drivers/scsi/lpfc/lpfc.h
++++ b/drivers/scsi/lpfc/lpfc.h
+@@ -1562,10 +1562,7 @@ struct lpfc_hba {
+ u32 cgn_acqe_cnt;
+
+ /* RX monitor handling for CMF */
+- struct rxtable_entry *rxtable; /* RX_monitor information */
+- atomic_t rxtable_idx_head;
+-#define LPFC_RXMONITOR_TABLE_IN_USE (LPFC_MAX_RXMONITOR_ENTRY + 73)
+- atomic_t rxtable_idx_tail;
++ struct lpfc_rx_info_monitor *rx_monitor;
+ atomic_t rx_max_read_cnt; /* Maximum read bytes */
+ uint64_t rx_block_cnt;
+
+@@ -1614,7 +1611,7 @@ struct lpfc_hba {
+
+ #define LPFC_MAX_RXMONITOR_ENTRY 800
+ #define LPFC_MAX_RXMONITOR_DUMP 32
+-struct rxtable_entry {
++struct rx_info_entry {
+ uint64_t cmf_bytes; /* Total no of read bytes for CMF_SYNC_WQE */
+ uint64_t total_bytes; /* Total no of read bytes requested */
+ uint64_t rcv_bytes; /* Total no of read bytes completed */
+@@ -1629,6 +1626,13 @@ struct rxtable_entry {
+ uint32_t timer_interval;
+ };
+
++struct lpfc_rx_info_monitor {
++ struct rx_info_entry *ring; /* info organized in a circular buffer */
++ u32 head_idx, tail_idx; /* index to head/tail of ring */
++ spinlock_t lock; /* spinlock for ring */
++ u32 entries; /* storing number entries/size of ring */
++};
++
+ static inline struct Scsi_Host *
+ lpfc_shost_from_vport(struct lpfc_vport *vport)
+ {
+diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
+index f7bf589b63fb..a485b8905a23 100644
+--- a/drivers/scsi/lpfc/lpfc_crtn.h
++++ b/drivers/scsi/lpfc/lpfc_crtn.h
+@@ -90,6 +90,14 @@ void lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba);
+ void lpfc_cgn_update_stat(struct lpfc_hba *phba, uint32_t dtag);
+ void lpfc_unblock_requests(struct lpfc_hba *phba);
+ void lpfc_block_requests(struct lpfc_hba *phba);
++int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
++ u32 entries);
++void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor);
++void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
++ struct rx_info_entry *entry);
++u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
++ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
++ u32 buf_len, u32 max_read_entries);
+
+ void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+ void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
+index 61f8dcd072ac..8e8bbe734e87 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.c
++++ b/drivers/scsi/lpfc/lpfc_debugfs.c
+@@ -5520,7 +5520,7 @@ lpfc_rx_monitor_open(struct inode *inode, struct file *file)
+ if (!debug)
+ goto out;
+
+- debug->buffer = vmalloc(MAX_DEBUGFS_RX_TABLE_SIZE);
++ debug->buffer = vmalloc(MAX_DEBUGFS_RX_INFO_SIZE);
+ if (!debug->buffer) {
+ kfree(debug);
+ goto out;
+@@ -5541,57 +5541,18 @@ lpfc_rx_monitor_read(struct file *file, char __user *buf, size_t nbytes,
+ struct lpfc_rx_monitor_debug *debug = file->private_data;
+ struct lpfc_hba *phba = (struct lpfc_hba *)debug->i_private;
+ char *buffer = debug->buffer;
+- struct rxtable_entry *entry;
+- int i, len = 0, head, tail, last, start;
+-
+- head = atomic_read(&phba->rxtable_idx_head);
+- while (head == LPFC_RXMONITOR_TABLE_IN_USE) {
+- /* Table is getting updated */
+- msleep(20);
+- head = atomic_read(&phba->rxtable_idx_head);
+- }
+
+- tail = atomic_xchg(&phba->rxtable_idx_tail, head);
+- if (!phba->rxtable || head == tail) {
+- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- "Rxtable is empty\n");
+- goto out;
+- }
+- last = (head > tail) ? head : LPFC_MAX_RXMONITOR_ENTRY;
+- start = tail;
+-
+- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- " MaxBPI Tot_Data_CMF Tot_Data_Cmd "
+- "Tot_Data_Cmpl Lat(us) Avg_IO Max_IO "
+- "Bsy IO_cnt Info BWutil(ms)\n");
+-get_table:
+- for (i = start; i < last; i++) {
+- entry = &phba->rxtable[i];
+- len += scnprintf(buffer + len, MAX_DEBUGFS_RX_TABLE_SIZE - len,
+- "%3d:%12lld %12lld %12lld %12lld "
+- "%7lldus %8lld %7lld "
+- "%2d %4d %2d %2d(%2d)\n",
+- i, entry->max_bytes_per_interval,
+- entry->cmf_bytes,
+- entry->total_bytes,
+- entry->rcv_bytes,
+- entry->avg_io_latency,
+- entry->avg_io_size,
+- entry->max_read_cnt,
+- entry->cmf_busy,
+- entry->io_cnt,
+- entry->cmf_info,
+- entry->timer_utilization,
+- entry->timer_interval);
++ if (!phba->rx_monitor) {
++ scnprintf(buffer, MAX_DEBUGFS_RX_INFO_SIZE,
++ "Rx Monitor Info is empty.\n");
++ } else {
++ lpfc_rx_monitor_report(phba, phba->rx_monitor, buffer,
++ MAX_DEBUGFS_RX_INFO_SIZE,
++ LPFC_MAX_RXMONITOR_ENTRY);
+ }
+
+- if (head != last) {
+- start = 0;
+- last = head;
+- goto get_table;
+- }
+-out:
+- return simple_read_from_buffer(buf, nbytes, ppos, buffer, len);
++ return simple_read_from_buffer(buf, nbytes, ppos, buffer,
++ strlen(buffer));
+ }
+
+ static int
+diff --git a/drivers/scsi/lpfc/lpfc_debugfs.h b/drivers/scsi/lpfc/lpfc_debugfs.h
+index 6dd361c1fd31..f71e5b6311ac 100644
+--- a/drivers/scsi/lpfc/lpfc_debugfs.h
++++ b/drivers/scsi/lpfc/lpfc_debugfs.h
+@@ -282,7 +282,7 @@ struct lpfc_idiag {
+ void *ptr_private;
+ };
+
+-#define MAX_DEBUGFS_RX_TABLE_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
++#define MAX_DEBUGFS_RX_INFO_SIZE (128 * LPFC_MAX_RXMONITOR_ENTRY)
+ struct lpfc_rx_monitor_debug {
+ char *i_private;
+ char *buffer;
+diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
+index 7545ff55ff50..f69a797a58f2 100644
+--- a/drivers/scsi/lpfc/lpfc_init.c
++++ b/drivers/scsi/lpfc/lpfc_init.c
+@@ -5427,38 +5427,12 @@ lpfc_async_link_speed_to_read_top(struct lpfc_hba *phba, uint8_t speed_code)
+ void
+ lpfc_cgn_dump_rxmonitor(struct lpfc_hba *phba)
+ {
+- struct rxtable_entry *entry;
+- int cnt = 0, head, tail, last, start;
+-
+- head = atomic_read(&phba->rxtable_idx_head);
+- tail = atomic_read(&phba->rxtable_idx_tail);
+- if (!phba->rxtable || head == tail) {
+- lpfc_printf_log(phba, KERN_ERR, LOG_CGN_MGMT,
+- "4411 Rxtable is empty\n");
+- return;
+- }
+- last = tail;
+- start = head;
+-
+- /* Display the last LPFC_MAX_RXMONITOR_DUMP entries from the rxtable */
+- while (start != last) {
+- if (start)
+- start--;
+- else
+- start = LPFC_MAX_RXMONITOR_ENTRY - 1;
+- entry = &phba->rxtable[start];
++ if (!phba->rx_monitor) {
+ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
+- "4410 %02d: MBPI %lld Xmit %lld Cmpl %lld "
+- "Lat %lld ASz %lld Info %02d BWUtil %d "
+- "Int %d slot %d\n",
+- cnt, entry->max_bytes_per_interval,
+- entry->total_bytes, entry->rcv_bytes,
+- entry->avg_io_latency, entry->avg_io_size,
+- entry->cmf_info, entry->timer_utilization,
+- entry->timer_interval, start);
+- cnt++;
+- if (cnt >= LPFC_MAX_RXMONITOR_DUMP)
+- return;
++ "4411 Rx Monitor Info is empty.\n");
++ } else {
++ lpfc_rx_monitor_report(phba, phba->rx_monitor, NULL, 0,
++ LPFC_MAX_RXMONITOR_DUMP);
+ }
+ }
+
+@@ -5865,9 +5839,8 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ {
+ struct lpfc_hba *phba = container_of(timer, struct lpfc_hba,
+ cmf_timer);
+- struct rxtable_entry *entry;
++ struct rx_info_entry entry;
+ uint32_t io_cnt;
+- uint32_t head, tail;
+ uint32_t busy, max_read;
+ uint64_t total, rcv, lat, mbpi, extra, cnt;
+ int timer_interval = LPFC_CMF_INTERVAL;
+@@ -5982,40 +5955,30 @@ lpfc_cmf_timer(struct hrtimer *timer)
+ }
+
+ /* Save rxmonitor information for debug */
+- if (phba->rxtable) {
+- head = atomic_xchg(&phba->rxtable_idx_head,
+- LPFC_RXMONITOR_TABLE_IN_USE);
+- entry = &phba->rxtable[head];
+- entry->total_bytes = total;
+- entry->cmf_bytes = total + extra;
+- entry->rcv_bytes = rcv;
+- entry->cmf_busy = busy;
+- entry->cmf_info = phba->cmf_active_info;
++ if (phba->rx_monitor) {
++ entry.total_bytes = total;
++ entry.cmf_bytes = total + extra;
++ entry.rcv_bytes = rcv;
++ entry.cmf_busy = busy;
++ entry.cmf_info = phba->cmf_active_info;
+ if (io_cnt) {
+- entry->avg_io_latency = div_u64(lat, io_cnt);
+- entry->avg_io_size = div_u64(rcv, io_cnt);
++ entry.avg_io_latency = div_u64(lat, io_cnt);
++ entry.avg_io_size = div_u64(rcv, io_cnt);
+ } else {
+- entry->avg_io_latency = 0;
+- entry->avg_io_size = 0;
++ entry.avg_io_latency = 0;
++ entry.avg_io_size = 0;
+ }
+- entry->max_read_cnt = max_read;
+- entry->io_cnt = io_cnt;
+- entry->max_bytes_per_interval = mbpi;
++ entry.max_read_cnt = max_read;
++ entry.io_cnt = io_cnt;
++ entry.max_bytes_per_interval = mbpi;
+ if (phba->cmf_active_mode == LPFC_CFG_MANAGED)
+- entry->timer_utilization = phba->cmf_last_ts;
++ entry.timer_utilization = phba->cmf_last_ts;
+ else
+- entry->timer_utilization = ms;
+- entry->timer_interval = ms;
++ entry.timer_utilization = ms;
++ entry.timer_interval = ms;
+ phba->cmf_last_ts = 0;
+
+- /* Increment rxtable index */
+- head = (head + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+- tail = atomic_read(&phba->rxtable_idx_tail);
+- if (head == tail) {
+- tail = (tail + 1) % LPFC_MAX_RXMONITOR_ENTRY;
+- atomic_set(&phba->rxtable_idx_tail, tail);
+- }
+- atomic_set(&phba->rxtable_idx_head, head);
++ lpfc_rx_monitor_record(phba->rx_monitor, &entry);
+ }
+
+ if (phba->cmf_active_mode == LPFC_CFG_MONITOR) {
+diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
+index 870e53b8f81d..5d36b3514864 100644
+--- a/drivers/scsi/lpfc/lpfc_mem.c
++++ b/drivers/scsi/lpfc/lpfc_mem.c
+@@ -344,9 +344,12 @@ lpfc_mem_free_all(struct lpfc_hba *phba)
+ phba->cgn_i = NULL;
+ }
+
+- /* Free RX table */
+- kfree(phba->rxtable);
+- phba->rxtable = NULL;
++ /* Free RX Monitor */
++ if (phba->rx_monitor) {
++ lpfc_rx_monitor_destroy_ring(phba->rx_monitor);
++ kfree(phba->rx_monitor);
++ phba->rx_monitor = NULL;
++ }
+
+ /* Free the iocb lookup array */
+ kfree(psli->iocbq_lookup);
+diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
+index f594a006d04c..21779ec043e3 100644
+--- a/drivers/scsi/lpfc/lpfc_sli.c
++++ b/drivers/scsi/lpfc/lpfc_sli.c
+@@ -7880,6 +7880,172 @@ static void lpfc_sli4_dip(struct lpfc_hba *phba)
+ }
+ }
+
++/**
++ * lpfc_rx_monitor_create_ring - Initialize ring buffer for rx_monitor
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ * @entries: Number of rx_info_entry objects to allocate in ring
++ *
++ * Return:
++ * 0 - Success
++ * ENOMEM - Failure to kmalloc
++ **/
++int lpfc_rx_monitor_create_ring(struct lpfc_rx_info_monitor *rx_monitor,
++ u32 entries)
++{
++ rx_monitor->ring = kmalloc_array(entries, sizeof(struct rx_info_entry),
++ GFP_KERNEL);
++ if (!rx_monitor->ring)
++ return -ENOMEM;
++
++ rx_monitor->head_idx = 0;
++ rx_monitor->tail_idx = 0;
++ spin_lock_init(&rx_monitor->lock);
++ rx_monitor->entries = entries;
++
++ return 0;
++}
++
++/**
++ * lpfc_rx_monitor_destroy_ring - Free ring buffer for rx_monitor
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ **/
++void lpfc_rx_monitor_destroy_ring(struct lpfc_rx_info_monitor *rx_monitor)
++{
++ spin_lock(&rx_monitor->lock);
++ kfree(rx_monitor->ring);
++ rx_monitor->ring = NULL;
++ rx_monitor->entries = 0;
++ rx_monitor->head_idx = 0;
++ rx_monitor->tail_idx = 0;
++ spin_unlock(&rx_monitor->lock);
++}
++
++/**
++ * lpfc_rx_monitor_record - Insert an entry into rx_monitor's ring
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ * @entry: Pointer to rx_info_entry
++ *
++ * Used to insert an rx_info_entry into rx_monitor's ring. Note that this is a
++ * deep copy of rx_info_entry not a shallow copy of the rx_info_entry ptr.
++ *
++ * This is called from lpfc_cmf_timer, which is in timer/softirq context.
++ *
++ * In cases of old data overflow, we do a best effort of FIFO order.
++ **/
++void lpfc_rx_monitor_record(struct lpfc_rx_info_monitor *rx_monitor,
++ struct rx_info_entry *entry)
++{
++ struct rx_info_entry *ring = rx_monitor->ring;
++ u32 *head_idx = &rx_monitor->head_idx;
++ u32 *tail_idx = &rx_monitor->tail_idx;
++ spinlock_t *ring_lock = &rx_monitor->lock;
++ u32 ring_size = rx_monitor->entries;
++
++ spin_lock(ring_lock);
++ memcpy(&ring[*tail_idx], entry, sizeof(*entry));
++ *tail_idx = (*tail_idx + 1) % ring_size;
++
++ /* Best effort of FIFO saved data */
++ if (*tail_idx == *head_idx)
++ *head_idx = (*head_idx + 1) % ring_size;
++
++ spin_unlock(ring_lock);
++}
++
++/**
++ * lpfc_rx_monitor_report - Read out rx_monitor's ring
++ * @phba: Pointer to lpfc_hba object
++ * @rx_monitor: Pointer to lpfc_rx_info_monitor object
++ * @buf: Pointer to char buffer that will contain rx monitor info data
++ * @buf_len: Length buf including null char
++ * @max_read_entries: Maximum number of entries to read out of ring
++ *
++ * Used to dump/read what's in rx_monitor's ring buffer.
++ *
++ * If buf is NULL || buf_len == 0, then it is implied that we want to log the
++ * information to kmsg instead of filling out buf.
++ *
++ * Return:
++ * Number of entries read out of the ring
++ **/
++u32 lpfc_rx_monitor_report(struct lpfc_hba *phba,
++ struct lpfc_rx_info_monitor *rx_monitor, char *buf,
++ u32 buf_len, u32 max_read_entries)
++{
++ struct rx_info_entry *ring = rx_monitor->ring;
++ struct rx_info_entry *entry;
++ u32 *head_idx = &rx_monitor->head_idx;
++ u32 *tail_idx = &rx_monitor->tail_idx;
++ spinlock_t *ring_lock = &rx_monitor->lock;
++ u32 ring_size = rx_monitor->entries;
++ u32 cnt = 0;
++ char tmp[DBG_LOG_STR_SZ] = {0};
++ bool log_to_kmsg = (!buf || !buf_len) ? true : false;
++
++ if (!log_to_kmsg) {
++ /* clear the buffer to be sure */
++ memset(buf, 0, buf_len);
++
++ scnprintf(buf, buf_len, "\t%-16s%-16s%-16s%-16s%-8s%-8s%-8s"
++ "%-8s%-8s%-8s%-16s\n",
++ "MaxBPI", "Tot_Data_CMF",
++ "Tot_Data_Cmd", "Tot_Data_Cmpl",
++ "Lat(us)", "Avg_IO", "Max_IO", "Bsy",
++ "IO_cnt", "Info", "BWutil(ms)");
++ }
++
++ /* Needs to be _bh because record is called from timer interrupt
++ * context
++ */
++ spin_lock_bh(ring_lock);
++ while (*head_idx != *tail_idx) {
++ entry = &ring[*head_idx];
++
++ /* Read out this entry's data. */
++ if (!log_to_kmsg) {
++ /* If !log_to_kmsg, then store to buf. */
++ scnprintf(tmp, sizeof(tmp),
++ "%03d:\t%-16llu%-16llu%-16llu%-16llu%-8llu"
++ "%-8llu%-8llu%-8u%-8u%-8u%u(%u)\n",
++ *head_idx, entry->max_bytes_per_interval,
++ entry->cmf_bytes, entry->total_bytes,
++ entry->rcv_bytes, entry->avg_io_latency,
++ entry->avg_io_size, entry->max_read_cnt,
++ entry->cmf_busy, entry->io_cnt,
++ entry->cmf_info, entry->timer_utilization,
++ entry->timer_interval);
++
++ /* Check for buffer overflow */
++ if ((strlen(buf) + strlen(tmp)) >= buf_len)
++ break;
++
++ /* Append entry's data to buffer */
++ strlcat(buf, tmp, buf_len);
++ } else {
++ lpfc_printf_log(phba, KERN_INFO, LOG_CGN_MGMT,
++ "4410 %02u: MBPI %llu Xmit %llu "
++ "Cmpl %llu Lat %llu ASz %llu Info %02u "
++ "BWUtil %u Int %u slot %u\n",
++ cnt, entry->max_bytes_per_interval,
++ entry->total_bytes, entry->rcv_bytes,
++ entry->avg_io_latency,
++ entry->avg_io_size, entry->cmf_info,
++ entry->timer_utilization,
++ entry->timer_interval, *head_idx);
++ }
++
++ *head_idx = (*head_idx + 1) % ring_size;
++
++ /* Don't feed more than max_read_entries */
++ cnt++;
++ if (cnt >= max_read_entries)
++ break;
++ }
++ spin_unlock_bh(ring_lock);
++
++ return cnt;
++}
++
+ /**
+ * lpfc_cmf_setup - Initialize idle_stat tracking
+ * @phba: Pointer to HBA context object.
+@@ -8071,19 +8237,29 @@ lpfc_cmf_setup(struct lpfc_hba *phba)
+ phba->cmf_interval_rate = LPFC_CMF_INTERVAL;
+
+ /* Allocate RX Monitor Buffer */
+- if (!phba->rxtable) {
+- phba->rxtable = kmalloc_array(LPFC_MAX_RXMONITOR_ENTRY,
+- sizeof(struct rxtable_entry),
+- GFP_KERNEL);
+- if (!phba->rxtable) {
++ if (!phba->rx_monitor) {
++ phba->rx_monitor = kzalloc(sizeof(*phba->rx_monitor),
++ GFP_KERNEL);
++
++ if (!phba->rx_monitor) {
+ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+ "2644 Failed to alloc memory "
+ "for RX Monitor Buffer\n");
+ return -ENOMEM;
+ }
++
++ /* Instruct the rx_monitor object to instantiate its ring */
++ if (lpfc_rx_monitor_create_ring(phba->rx_monitor,
++ LPFC_MAX_RXMONITOR_ENTRY)) {
++ kfree(phba->rx_monitor);
++ phba->rx_monitor = NULL;
++ lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
++ "2645 Failed to alloc memory "
++ "for RX Monitor's Ring\n");
++ return -ENOMEM;
++ }
+ }
+- atomic_set(&phba->rxtable_idx_head, 0);
+- atomic_set(&phba->rxtable_idx_tail, 0);
++
+ return 0;
+ }
+
+--
+2.35.1
+
--- /dev/null
+From edf11620c93fc1a9a7b3dbbdb5eccb8e45608c79 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sun, 11 Sep 2022 11:02:03 +0200
+Subject: serial: stm32: Deassert Transmit Enable on ->rs485_config()
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Lukas Wunner <lukas@wunner.de>
+
+[ Upstream commit adafbbf6895eb0ce41a313c6ee68870ab9aa93cd ]
+
+The STM32 USART can control RS-485 Transmit Enable in hardware. Since
+commit 7df5081cbf5e ("serial: stm32: Add RS485 RTS GPIO control"),
+it can alternatively be controlled in software. That was done to allow
+RS-485 even if the RTS pin is unavailable because it's pinmuxed to a
+different function.
+
+However the commit neglected to deassert Transmit Enable upon invocation
+of the ->rs485_config() callback. Fix it.
+
+Avoid forward declarations by moving stm32_usart_tx_empty(),
+stm32_usart_rs485_rts_enable() and stm32_usart_rs485_rts_disable()
+further up in the driver.
+
+Fixes: 7df5081cbf5e ("serial: stm32: Add RS485 RTS GPIO control")
+Cc: stable@vger.kernel.org # v5.9+
+Cc: Marek Vasut <marex@denx.de>
+Reviewed-by: Ilpo Järvinen <ilpo.jarvinen@linux.intel.com>
+Signed-off-by: Lukas Wunner <lukas@wunner.de>
+Link: https://lore.kernel.org/r/6059eab35dba394468335ef640df8b0050fd9dbd.1662886616.git.lukas@wunner.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/stm32-usart.c | 100 ++++++++++++++++---------------
+ 1 file changed, 53 insertions(+), 47 deletions(-)
+
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 4a4108ef23e8..c2daf8c151e6 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -61,6 +61,53 @@ static void stm32_usart_clr_bits(struct uart_port *port, u32 reg, u32 bits)
+ writel_relaxed(val, port->membase + reg);
+ }
+
++static unsigned int stm32_usart_tx_empty(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++
++ if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
++ return TIOCSER_TEMT;
++
++ return 0;
++}
++
++static void stm32_usart_rs485_rts_enable(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ struct serial_rs485 *rs485conf = &port->rs485;
++
++ if (stm32_port->hw_flow_control ||
++ !(rs485conf->flags & SER_RS485_ENABLED))
++ return;
++
++ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl | TIOCM_RTS);
++ } else {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl & ~TIOCM_RTS);
++ }
++}
++
++static void stm32_usart_rs485_rts_disable(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ struct serial_rs485 *rs485conf = &port->rs485;
++
++ if (stm32_port->hw_flow_control ||
++ !(rs485conf->flags & SER_RS485_ENABLED))
++ return;
++
++ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl & ~TIOCM_RTS);
++ } else {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl | TIOCM_RTS);
++ }
++}
++
+ static void stm32_usart_config_reg_rs485(u32 *cr1, u32 *cr3, u32 delay_ADE,
+ u32 delay_DDE, u32 baud)
+ {
+@@ -149,6 +196,12 @@ static int stm32_usart_config_rs485(struct uart_port *port,
+
+ stm32_usart_set_bits(port, ofs->cr1, BIT(cfg->uart_enable_bit));
+
++ /* Adjust RTS polarity in case it's driven in software */
++ if (stm32_usart_tx_empty(port))
++ stm32_usart_rs485_rts_disable(port);
++ else
++ stm32_usart_rs485_rts_enable(port);
++
+ return 0;
+ }
+
+@@ -436,42 +489,6 @@ static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
+ }
+
+-static void stm32_usart_rs485_rts_enable(struct uart_port *port)
+-{
+- struct stm32_port *stm32_port = to_stm32_port(port);
+- struct serial_rs485 *rs485conf = &port->rs485;
+-
+- if (stm32_port->hw_flow_control ||
+- !(rs485conf->flags & SER_RS485_ENABLED))
+- return;
+-
+- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl | TIOCM_RTS);
+- } else {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl & ~TIOCM_RTS);
+- }
+-}
+-
+-static void stm32_usart_rs485_rts_disable(struct uart_port *port)
+-{
+- struct stm32_port *stm32_port = to_stm32_port(port);
+- struct serial_rs485 *rs485conf = &port->rs485;
+-
+- if (stm32_port->hw_flow_control ||
+- !(rs485conf->flags & SER_RS485_ENABLED))
+- return;
+-
+- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl & ~TIOCM_RTS);
+- } else {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl | TIOCM_RTS);
+- }
+-}
+-
+ static void stm32_usart_transmit_chars_pio(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+@@ -689,17 +706,6 @@ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
+ return IRQ_HANDLED;
+ }
+
+-static unsigned int stm32_usart_tx_empty(struct uart_port *port)
+-{
+- struct stm32_port *stm32_port = to_stm32_port(port);
+- const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+-
+- if (readl_relaxed(port->membase + ofs->isr) & USART_SR_TC)
+- return TIOCSER_TEMT;
+-
+- return 0;
+-}
+-
+ static void stm32_usart_set_mctrl(struct uart_port *port, unsigned int mctrl)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+--
+2.35.1
+
--- /dev/null
+From dd8c5cbb18aa72a3448ed2c02910460c8e789c43 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Apr 2022 18:28:44 +0200
+Subject: serial: stm32: Factor out GPIO RTS toggling into separate function
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit 3bcea529b295a993b1b05db63f245ae8030c5acf ]
+
+Pull out the GPIO RTS enable and disable handling into separate function.
+Limit the scope of GPIO RTS toggling only to GPIO emulated RS485 too.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>
+Cc: Erwan Le Ray <erwan.leray@foss.st.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jean Philippe Romain <jean-philippe.romain@foss.st.com>
+Cc: Valentin Caron <valentin.caron@foss.st.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-stm32@st-md-mailman.stormreply.com
+To: linux-serial@vger.kernel.org
+Link: https://lore.kernel.org/r/20220430162845.244655-1-marex@denx.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: adafbbf6895e ("serial: stm32: Deassert Transmit Enable on ->rs485_config()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/stm32-usart.c | 59 ++++++++++++++++++++------------
+ 1 file changed, 38 insertions(+), 21 deletions(-)
+
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index b4c512b138df..0e7bcbaf7268 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -420,6 +420,42 @@ static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ }
+
++static void stm32_usart_rs485_rts_enable(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ struct serial_rs485 *rs485conf = &port->rs485;
++
++ if (stm32_port->hw_flow_control ||
++ !(rs485conf->flags & SER_RS485_ENABLED))
++ return;
++
++ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl | TIOCM_RTS);
++ } else {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl & ~TIOCM_RTS);
++ }
++}
++
++static void stm32_usart_rs485_rts_disable(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ struct serial_rs485 *rs485conf = &port->rs485;
++
++ if (stm32_port->hw_flow_control ||
++ !(rs485conf->flags & SER_RS485_ENABLED))
++ return;
++
++ if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl & ~TIOCM_RTS);
++ } else {
++ mctrl_gpio_set(stm32_port->gpios,
++ stm32_port->port.mctrl | TIOCM_RTS);
++ }
++}
++
+ static void stm32_usart_transmit_chars_pio(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+@@ -666,40 +702,21 @@ static void stm32_usart_disable_ms(struct uart_port *port)
+ static void stm32_usart_stop_tx(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+- struct serial_rs485 *rs485conf = &port->rs485;
+
+ stm32_usart_tx_interrupt_disable(port);
+
+- if (rs485conf->flags & SER_RS485_ENABLED) {
+- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl & ~TIOCM_RTS);
+- } else {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl | TIOCM_RTS);
+- }
+- }
++ stm32_usart_rs485_rts_disable(port);
+ }
+
+ /* There are probably characters waiting to be transmitted. */
+ static void stm32_usart_start_tx(struct uart_port *port)
+ {
+- struct stm32_port *stm32_port = to_stm32_port(port);
+- struct serial_rs485 *rs485conf = &port->rs485;
+ struct circ_buf *xmit = &port->state->xmit;
+
+ if (uart_circ_empty(xmit) && !port->x_char)
+ return;
+
+- if (rs485conf->flags & SER_RS485_ENABLED) {
+- if (rs485conf->flags & SER_RS485_RTS_ON_SEND) {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl | TIOCM_RTS);
+- } else {
+- mctrl_gpio_set(stm32_port->gpios,
+- stm32_port->port.mctrl & ~TIOCM_RTS);
+- }
+- }
++ stm32_usart_rs485_rts_enable(port);
+
+ stm32_usart_transmit_chars(port);
+ }
+--
+2.35.1
+
--- /dev/null
+From 8661fb04fedaa722480ba4fb0410ea9801db0c65 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Oct 2021 17:03:30 +0200
+Subject: serial: stm32: re-introduce an irq flag condition in
+ usart_receive_chars
+
+From: Erwan Le Ray <erwan.leray@foss.st.com>
+
+[ Upstream commit cc58d0a3f0a4755b9c808e065d9227c6e984e7db ]
+
+Re-introduce an irq flag condition in usart_receive_chars.
+This condition has been deleted by commit 75f4e830fa9c ("serial: do not
+restore interrupt state in sysrq helper").
+This code was present to handle threaded case, and has been removed
+because it is no more needed in this case. Nevertheless an irq safe lock
+is still needed in some cases, when DMA should be stopped to receive errors
+or breaks in PIO mode.
+This patch is a precursor to the complete rework or stm32 serial driver
+DMA implementation.
+
+Signed-off-by: Erwan Le Ray <erwan.leray@foss.st.com>
+Link: https://lore.kernel.org/r/20211020150332.10214-2-erwan.leray@foss.st.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: adafbbf6895e ("serial: stm32: Deassert Transmit Enable on ->rs485_config()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/stm32-usart.c | 21 +++++++++++++--------
+ 1 file changed, 13 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index fc166cc2c856..bba4facf7425 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -211,19 +211,22 @@ static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr,
+ return c;
+ }
+
+-static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
++static void stm32_usart_receive_chars(struct uart_port *port, bool irqflag)
+ {
+ struct tty_port *tport = &port->state->port;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+- unsigned long c;
++ unsigned long c, flags;
+ u32 sr;
+ char flag;
+
+- spin_lock(&port->lock);
++ if (irqflag)
++ spin_lock_irqsave(&port->lock, flags);
++ else
++ spin_lock(&port->lock);
+
+ while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
+- threaded)) {
++ irqflag)) {
+ sr |= USART_SR_DUMMY_RX;
+ flag = TTY_NORMAL;
+
+@@ -277,7 +280,10 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool threaded)
+ uart_insert_char(port, sr, USART_SR_ORE, c, flag);
+ }
+
+- uart_unlock_and_check_sysrq(port);
++ if (irqflag)
++ uart_unlock_and_check_sysrq_irqrestore(port, irqflag);
++ else
++ uart_unlock_and_check_sysrq(port);
+
+ tty_flip_buffer_push(tport);
+ }
+@@ -510,10 +516,9 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ static irqreturn_t stm32_usart_threaded_interrupt(int irq, void *ptr)
+ {
+ struct uart_port *port = ptr;
+- struct stm32_port *stm32_port = to_stm32_port(port);
+
+- if (stm32_port->rx_ch)
+- stm32_usart_receive_chars(port, true);
++ /* Receiver timeout irq for DMA RX */
++ stm32_usart_receive_chars(port, false);
+
+ return IRQ_HANDLED;
+ }
+--
+2.35.1
+
--- /dev/null
+From bf8b05df5e2e3aec519732bb58eab724f82aeff1 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 20 Oct 2021 17:03:31 +0200
+Subject: serial: stm32: rework RX over DMA
+
+From: Erwan Le Ray <erwan.leray@foss.st.com>
+
+[ Upstream commit 33bb2f6ac3088936b7aad3cab6f439f91af0223c ]
+
+This patch reworks RX support over DMA to improve reliability:
+- change dma buffer cyclic configuration by using 2 periods. DMA buffer
+data are handled by a flip-flop between the 2 periods in order to avoid
+risk of data loss/corruption
+- change the size of dma buffer to 4096 to limit overruns
+- add rx errors management (breaks, parity, framing and overrun).
+ When an error occurs on the uart line, the dma request line is masked at
+ HW level. The SW must 1st clear DMAR (dma request line enable), to
+ handle the error, then re-enable DMAR to recover. So, any correct data
+ is taken from the DMA buffer, before handling the error itself. Then
+ errors are handled from RDR/ISR/FIFO (e.g. in PIO mode). Last, DMA
+ reception is resumed.
+- add a condition on DMA request line in DMA RX routines in order to
+switch to PIO mode when no DMA request line is disabled, even if the DMA
+channel is still enabled.
+ When the UART is wakeup source and is configured to use DMA for RX, any
+ incoming data that wakes up the system isn't correctly received.
+ At data reception, the irq_handler handles the WUF irq, and then the
+ data reception over DMA.
+ As the DMA transfer has been terminated at suspend, and will be restored
+ by resume callback (which has no yet been called by system), the data
+ can't be received.
+ The wake-up data has to be handled in PIO mode while suspend callback
+ has not been called.
+
+Signed-off-by: Valentin Caron <valentin.caron@foss.st.com>
+Signed-off-by: Erwan Le Ray <erwan.leray@foss.st.com>
+Link: https://lore.kernel.org/r/20211020150332.10214-3-erwan.leray@foss.st.com
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: adafbbf6895e ("serial: stm32: Deassert Transmit Enable on ->rs485_config()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/stm32-usart.c | 206 ++++++++++++++++++++++++-------
+ drivers/tty/serial/stm32-usart.h | 12 +-
+ 2 files changed, 165 insertions(+), 53 deletions(-)
+
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index bba4facf7425..b4c512b138df 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -167,66 +167,60 @@ static int stm32_usart_init_rs485(struct uart_port *port,
+ return uart_get_rs485_mode(port);
+ }
+
+-static int stm32_usart_pending_rx(struct uart_port *port, u32 *sr,
+- int *last_res, bool threaded)
++static bool stm32_usart_rx_dma_enabled(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+- enum dma_status status;
+- struct dma_tx_state state;
+
+- *sr = readl_relaxed(port->membase + ofs->isr);
++ if (!stm32_port->rx_ch)
++ return false;
+
+- if (threaded && stm32_port->rx_ch) {
+- status = dmaengine_tx_status(stm32_port->rx_ch,
+- stm32_port->rx_ch->cookie,
+- &state);
+- if (status == DMA_IN_PROGRESS && (*last_res != state.residue))
+- return 1;
+- else
+- return 0;
+- } else if (*sr & USART_SR_RXNE) {
+- return 1;
++ return !!(readl_relaxed(port->membase + ofs->cr3) & USART_CR3_DMAR);
++}
++
++/* Return true when data is pending (in pio mode), and false when no data is pending. */
++static bool stm32_usart_pending_rx_pio(struct uart_port *port, u32 *sr)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++
++ *sr = readl_relaxed(port->membase + ofs->isr);
++ /* Get pending characters in RDR or FIFO */
++ if (*sr & USART_SR_RXNE) {
++ /* Get all pending characters from the RDR or the FIFO when using interrupts */
++ if (!stm32_usart_rx_dma_enabled(port))
++ return true;
++
++ /* Handle only RX data errors when using DMA */
++ if (*sr & USART_SR_ERR_MASK)
++ return true;
+ }
+- return 0;
++
++ return false;
+ }
+
+-static unsigned long stm32_usart_get_char(struct uart_port *port, u32 *sr,
+- int *last_res)
++static unsigned long stm32_usart_get_char_pio(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ unsigned long c;
+
+- if (stm32_port->rx_ch) {
+- c = stm32_port->rx_buf[RX_BUF_L - (*last_res)--];
+- if ((*last_res) == 0)
+- *last_res = RX_BUF_L;
+- } else {
+- c = readl_relaxed(port->membase + ofs->rdr);
+- /* apply RDR data mask */
+- c &= stm32_port->rdr_mask;
+- }
++ c = readl_relaxed(port->membase + ofs->rdr);
++ /* Apply RDR data mask */
++ c &= stm32_port->rdr_mask;
+
+ return c;
+ }
+
+-static void stm32_usart_receive_chars(struct uart_port *port, bool irqflag)
++static void stm32_usart_receive_chars_pio(struct uart_port *port)
+ {
+- struct tty_port *tport = &port->state->port;
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+- unsigned long c, flags;
++ unsigned long c;
+ u32 sr;
+ char flag;
+
+- if (irqflag)
+- spin_lock_irqsave(&port->lock, flags);
+- else
+- spin_lock(&port->lock);
+-
+- while (stm32_usart_pending_rx(port, &sr, &stm32_port->last_res,
+- irqflag)) {
++ while (stm32_usart_pending_rx_pio(port, &sr)) {
+ sr |= USART_SR_DUMMY_RX;
+ flag = TTY_NORMAL;
+
+@@ -245,7 +239,7 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool irqflag)
+ writel_relaxed(sr & USART_SR_ERR_MASK,
+ port->membase + ofs->icr);
+
+- c = stm32_usart_get_char(port, &sr, &stm32_port->last_res);
++ c = stm32_usart_get_char_pio(port);
+ port->icount.rx++;
+ if (sr & USART_SR_ERR_MASK) {
+ if (sr & USART_SR_ORE) {
+@@ -279,6 +273,94 @@ static void stm32_usart_receive_chars(struct uart_port *port, bool irqflag)
+ continue;
+ uart_insert_char(port, sr, USART_SR_ORE, c, flag);
+ }
++}
++
++static void stm32_usart_push_buffer_dma(struct uart_port *port, unsigned int dma_size)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ struct tty_port *ttyport = &stm32_port->port.state->port;
++ unsigned char *dma_start;
++ int dma_count, i;
++
++ dma_start = stm32_port->rx_buf + (RX_BUF_L - stm32_port->last_res);
++
++ /*
++ * Apply rdr_mask on buffer in order to mask parity bit.
++ * This loop is useless in cs8 mode because DMA copies only
++ * 8 bits and already ignores parity bit.
++ */
++ if (!(stm32_port->rdr_mask == (BIT(8) - 1)))
++ for (i = 0; i < dma_size; i++)
++ *(dma_start + i) &= stm32_port->rdr_mask;
++
++ dma_count = tty_insert_flip_string(ttyport, dma_start, dma_size);
++ port->icount.rx += dma_count;
++ if (dma_count != dma_size)
++ port->icount.buf_overrun++;
++ stm32_port->last_res -= dma_count;
++ if (stm32_port->last_res == 0)
++ stm32_port->last_res = RX_BUF_L;
++}
++
++static void stm32_usart_receive_chars_dma(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ unsigned int dma_size;
++
++ /* DMA buffer is configured in cyclic mode and handles the rollback of the buffer. */
++ if (stm32_port->rx_dma_state.residue > stm32_port->last_res) {
++ /* Conditional first part: from last_res to end of DMA buffer */
++ dma_size = stm32_port->last_res;
++ stm32_usart_push_buffer_dma(port, dma_size);
++ }
++
++ dma_size = stm32_port->last_res - stm32_port->rx_dma_state.residue;
++ stm32_usart_push_buffer_dma(port, dma_size);
++}
++
++static void stm32_usart_receive_chars(struct uart_port *port, bool irqflag)
++{
++ struct tty_port *tport = &port->state->port;
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++ enum dma_status rx_dma_status;
++ unsigned long flags;
++ u32 sr;
++
++ if (irqflag)
++ spin_lock_irqsave(&port->lock, flags);
++ else
++ spin_lock(&port->lock);
++
++ if (stm32_usart_rx_dma_enabled(port)) {
++ rx_dma_status = dmaengine_tx_status(stm32_port->rx_ch,
++ stm32_port->rx_ch->cookie,
++ &stm32_port->rx_dma_state);
++ if (rx_dma_status == DMA_IN_PROGRESS) {
++ /* Empty DMA buffer */
++ stm32_usart_receive_chars_dma(port);
++ sr = readl_relaxed(port->membase + ofs->isr);
++ if (sr & USART_SR_ERR_MASK) {
++ /* Disable DMA request line */
++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
++
++ /* Switch to PIO mode to handle the errors */
++ stm32_usart_receive_chars_pio(port);
++
++ /* Switch back to DMA mode */
++ stm32_usart_set_bits(port, ofs->cr3, USART_CR3_DMAR);
++ }
++ } else {
++ /* Disable RX DMA */
++ dmaengine_terminate_async(stm32_port->rx_ch);
++ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
++ /* Fall back to interrupt mode */
++ dev_dbg(port->dev, "DMA error, fallback to irq mode\n");
++ stm32_usart_receive_chars_pio(port);
++ }
++ } else {
++ stm32_usart_receive_chars_pio(port);
++ }
+
+ if (irqflag)
+ uart_unlock_and_check_sysrq_irqrestore(port, irqflag);
+@@ -320,6 +402,13 @@ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ }
+
++static void stm32_usart_rx_dma_complete(void *arg)
++{
++ struct uart_port *port = arg;
++
++ stm32_usart_receive_chars(port, true);
++}
++
+ static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+@@ -498,7 +587,12 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ pm_wakeup_event(tport->tty->dev, 0);
+ }
+
+- if ((sr & USART_SR_RXNE) && !(stm32_port->rx_ch))
++ /*
++ * rx errors in dma mode has to be handled ASAP to avoid overrun as the DMA request
++ * line has been masked by HW and rx data are stacking in FIFO.
++ */
++ if (((sr & USART_SR_RXNE) && !stm32_usart_rx_dma_enabled(port)) ||
++ ((sr & USART_SR_ERR_MASK) && stm32_usart_rx_dma_enabled(port)))
+ stm32_usart_receive_chars(port, false);
+
+ if ((sr & USART_SR_TXE) && !(stm32_port->tx_ch)) {
+@@ -507,7 +601,7 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+ spin_unlock(&port->lock);
+ }
+
+- if (stm32_port->rx_ch)
++ if (stm32_usart_rx_dma_enabled(port))
+ return IRQ_WAKE_THREAD;
+ else
+ return IRQ_HANDLED;
+@@ -843,9 +937,11 @@ static void stm32_usart_set_termios(struct uart_port *port,
+ stm32_port->cr1_irq = USART_CR1_RTOIE;
+ writel_relaxed(bits, port->membase + ofs->rtor);
+ cr2 |= USART_CR2_RTOEN;
+- /* Not using dma, enable fifo threshold irq */
+- if (!stm32_port->rx_ch)
+- stm32_port->cr3_irq = USART_CR3_RXFTIE;
++ /*
++ * Enable fifo threshold irq in two cases, either when there is no DMA, or when
++ * wake up over usart, from low power until the DMA gets re-enabled by resume.
++ */
++ stm32_port->cr3_irq = USART_CR3_RXFTIE;
+ }
+
+ cr1 |= stm32_port->cr1_irq;
+@@ -908,8 +1004,16 @@ static void stm32_usart_set_termios(struct uart_port *port,
+ if ((termios->c_cflag & CREAD) == 0)
+ port->ignore_status_mask |= USART_SR_DUMMY_RX;
+
+- if (stm32_port->rx_ch)
++ if (stm32_port->rx_ch) {
++ /*
++ * Setup DMA to collect only valid data and enable error irqs.
++ * This also enables break reception when using DMA.
++ */
++ cr1 |= USART_CR1_PEIE;
++ cr3 |= USART_CR3_EIE;
+ cr3 |= USART_CR3_DMAR;
++ cr3 |= USART_CR3_DDRE;
++ }
+
+ if (rs485conf->flags & SER_RS485_ENABLED) {
+ stm32_usart_config_reg_rs485(&cr1, &cr3,
+@@ -1238,9 +1342,9 @@ static int stm32_usart_of_dma_rx_probe(struct stm32_port *stm32port,
+ return -ENODEV;
+ }
+
+- /* No callback as dma buffer is drained on usart interrupt */
+- desc->callback = NULL;
+- desc->callback_param = NULL;
++ /* Set DMA callback */
++ desc->callback = stm32_usart_rx_dma_complete;
++ desc->callback_param = port;
+
+ /* Push current DMA transaction in the pending queue */
+ ret = dma_submit_error(dmaengine_submit(desc));
+@@ -1405,6 +1509,7 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ struct stm32_port *stm32_port = to_stm32_port(port);
+ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
+ int err;
++ u32 cr3;
+
+ pm_runtime_get_sync(&pdev->dev);
+ err = uart_remove_one_port(&stm32_usart_driver, port);
+@@ -1415,7 +1520,12 @@ static int stm32_usart_serial_remove(struct platform_device *pdev)
+ pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
+
+- stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAR);
++ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_PEIE);
++ cr3 = readl_relaxed(port->membase + ofs->cr3);
++ cr3 &= ~USART_CR3_EIE;
++ cr3 &= ~USART_CR3_DMAR;
++ cr3 &= ~USART_CR3_DDRE;
++ writel_relaxed(cr3, port->membase + ofs->cr3);
+
+ if (stm32_port->tx_ch) {
+ stm32_usart_of_dma_tx_remove(stm32_port, pdev);
+diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
+index 07ac291328cd..53bcd032fce7 100644
+--- a/drivers/tty/serial/stm32-usart.h
++++ b/drivers/tty/serial/stm32-usart.h
+@@ -109,7 +109,7 @@ struct stm32_usart_info stm32h7_info = {
+ /* USART_SR (F4) / USART_ISR (F7) */
+ #define USART_SR_PE BIT(0)
+ #define USART_SR_FE BIT(1)
+-#define USART_SR_NF BIT(2)
++#define USART_SR_NE BIT(2) /* F7 (NF for F4) */
+ #define USART_SR_ORE BIT(3)
+ #define USART_SR_IDLE BIT(4)
+ #define USART_SR_RXNE BIT(5)
+@@ -126,7 +126,8 @@ struct stm32_usart_info stm32h7_info = {
+ #define USART_SR_SBKF BIT(18) /* F7 */
+ #define USART_SR_WUF BIT(20) /* H7 */
+ #define USART_SR_TEACK BIT(21) /* F7 */
+-#define USART_SR_ERR_MASK (USART_SR_ORE | USART_SR_FE | USART_SR_PE)
++#define USART_SR_ERR_MASK (USART_SR_ORE | USART_SR_NE | USART_SR_FE |\
++ USART_SR_PE)
+ /* Dummy bits */
+ #define USART_SR_DUMMY_RX BIT(16)
+
+@@ -246,9 +247,9 @@ struct stm32_usart_info stm32h7_info = {
+ #define STM32_SERIAL_NAME "ttySTM"
+ #define STM32_MAX_PORTS 8
+
+-#define RX_BUF_L 200 /* dma rx buffer length */
+-#define RX_BUF_P RX_BUF_L /* dma rx buffer period */
+-#define TX_BUF_L 200 /* dma tx buffer length */
++#define RX_BUF_L 4096 /* dma rx buffer length */
++#define RX_BUF_P (RX_BUF_L / 2) /* dma rx buffer period */
++#define TX_BUF_L RX_BUF_L /* dma tx buffer length */
+
+ struct stm32_port {
+ struct uart_port port;
+@@ -272,6 +273,7 @@ struct stm32_port {
+ bool wakeup_src;
+ int rdr_mask; /* receive data register mask */
+ struct mctrl_gpios *gpios; /* modem control gpios */
++ struct dma_tx_state rx_dma_state;
+ };
+
+ static struct stm32_port stm32_ports[STM32_MAX_PORTS];
+--
+2.35.1
+
--- /dev/null
+From 6fa778f8c0f1bc1bad754c6fca14cb758de84258 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 30 Apr 2022 18:28:45 +0200
+Subject: serial: stm32: Use TC interrupt to deassert GPIO RTS in RS485 mode
+
+From: Marek Vasut <marex@denx.de>
+
+[ Upstream commit d7c76716169ddc37cf6316ff381d34ea807fbfd7 ]
+
+In case the RS485 mode is emulated using GPIO RTS, use the TC interrupt
+to deassert the GPIO RTS, otherwise the GPIO RTS stays asserted after a
+transmission ended and the RS485 cannot work.
+
+Signed-off-by: Marek Vasut <marex@denx.de>
+Cc: Alexandre Torgue <alexandre.torgue@foss.st.com>
+Cc: Erwan Le Ray <erwan.leray@foss.st.com>
+Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Jean Philippe Romain <jean-philippe.romain@foss.st.com>
+Cc: Valentin Caron <valentin.caron@foss.st.com>
+Cc: linux-arm-kernel@lists.infradead.org
+Cc: linux-stm32@st-md-mailman.stormreply.com
+To: linux-serial@vger.kernel.org
+Link: https://lore.kernel.org/r/20220430162845.244655-2-marex@denx.de
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Stable-dep-of: adafbbf6895e ("serial: stm32: Deassert Transmit Enable on ->rs485_config()")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/tty/serial/stm32-usart.c | 42 ++++++++++++++++++++++++++++++--
+ drivers/tty/serial/stm32-usart.h | 1 +
+ 2 files changed, 41 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/tty/serial/stm32-usart.c b/drivers/tty/serial/stm32-usart.c
+index 0e7bcbaf7268..4a4108ef23e8 100644
+--- a/drivers/tty/serial/stm32-usart.c
++++ b/drivers/tty/serial/stm32-usart.c
+@@ -402,6 +402,14 @@ static void stm32_usart_tx_interrupt_enable(struct uart_port *port)
+ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ }
+
++static void stm32_usart_tc_interrupt_enable(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++
++ stm32_usart_set_bits(port, ofs->cr1, USART_CR1_TCIE);
++}
++
+ static void stm32_usart_rx_dma_complete(void *arg)
+ {
+ struct uart_port *port = arg;
+@@ -420,6 +428,14 @@ static void stm32_usart_tx_interrupt_disable(struct uart_port *port)
+ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TXEIE);
+ }
+
++static void stm32_usart_tc_interrupt_disable(struct uart_port *port)
++{
++ struct stm32_port *stm32_port = to_stm32_port(port);
++ const struct stm32_usart_offsets *ofs = &stm32_port->info->ofs;
++
++ stm32_usart_clr_bits(port, ofs->cr1, USART_CR1_TCIE);
++}
++
+ static void stm32_usart_rs485_rts_enable(struct uart_port *port)
+ {
+ struct stm32_port *stm32_port = to_stm32_port(port);
+@@ -557,6 +573,13 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
+ u32 isr;
+ int ret;
+
++ if (!stm32_port->hw_flow_control &&
++ port->rs485.flags & SER_RS485_ENABLED) {
++ stm32_port->txdone = false;
++ stm32_usart_tc_interrupt_disable(port);
++ stm32_usart_rs485_rts_enable(port);
++ }
++
+ if (port->x_char) {
+ if (stm32_port->tx_dma_busy)
+ stm32_usart_clr_bits(port, ofs->cr3, USART_CR3_DMAT);
+@@ -596,8 +619,14 @@ static void stm32_usart_transmit_chars(struct uart_port *port)
+ if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
+ uart_write_wakeup(port);
+
+- if (uart_circ_empty(xmit))
++ if (uart_circ_empty(xmit)) {
+ stm32_usart_tx_interrupt_disable(port);
++ if (!stm32_port->hw_flow_control &&
++ port->rs485.flags & SER_RS485_ENABLED) {
++ stm32_port->txdone = true;
++ stm32_usart_tc_interrupt_enable(port);
++ }
++ }
+ }
+
+ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+@@ -610,6 +639,13 @@ static irqreturn_t stm32_usart_interrupt(int irq, void *ptr)
+
+ sr = readl_relaxed(port->membase + ofs->isr);
+
++ if (!stm32_port->hw_flow_control &&
++ port->rs485.flags & SER_RS485_ENABLED &&
++ (sr & USART_SR_TC)) {
++ stm32_usart_tc_interrupt_disable(port);
++ stm32_usart_rs485_rts_disable(port);
++ }
++
+ if ((sr & USART_SR_RTOF) && ofs->icr != UNDEF_REG)
+ writel_relaxed(USART_ICR_RTOCF,
+ port->membase + ofs->icr);
+@@ -713,8 +749,10 @@ static void stm32_usart_start_tx(struct uart_port *port)
+ {
+ struct circ_buf *xmit = &port->state->xmit;
+
+- if (uart_circ_empty(xmit) && !port->x_char)
++ if (uart_circ_empty(xmit) && !port->x_char) {
++ stm32_usart_rs485_rts_disable(port);
+ return;
++ }
+
+ stm32_usart_rs485_rts_enable(port);
+
+diff --git a/drivers/tty/serial/stm32-usart.h b/drivers/tty/serial/stm32-usart.h
+index 53bcd032fce7..852573e1a690 100644
+--- a/drivers/tty/serial/stm32-usart.h
++++ b/drivers/tty/serial/stm32-usart.h
+@@ -268,6 +268,7 @@ struct stm32_port {
+ bool hw_flow_control;
+ bool swap; /* swap RX & TX pins */
+ bool fifoen;
++ bool txdone;
+ int rxftcfg; /* RX FIFO threshold CFG */
+ int txftcfg; /* TX FIFO threshold CFG */
+ bool wakeup_src;
+--
+2.35.1
+
x86-topology-fix-multiple-packages-shown-on-a-single-package-system.patch
x86-topology-fix-duplicated-core-id-within-a-package.patch
fs-clear-or-set-fmode_lseek-based-on-llseek-function.patch
+btrfs-fix-processing-of-delayed-data-refs-during-bac.patch
+btrfs-fix-processing-of-delayed-tree-block-refs-duri.patch
+drm-vc4-add-module-dependency-on-hdmi-codec.patch
+acpi-extlog-handle-multiple-records.patch
+tipc-fix-recognition-of-trial-period.patch
+tipc-fix-an-information-leak-in-tipc_topsrv_kern_sub.patch
+i40e-fix-dma-mappings-leak.patch
+hid-magicmouse-do-not-set-btn_mouse-on-double-report.patch
+sfc-change-vf-mac-via-pf-as-first-preference-if-avai.patch
+net-atm-fix-proc_mpc_write-incorrect-return-value.patch
+net-phy-dp83867-extend-rx-strap-quirk-for-sgmii-mode.patch
+net-phylink-add-mac_managed_pm-in-phylink_config-str.patch
+scsi-lpfc-fix-memory-leak-in-lpfc_create_port.patch
+udp-update-reuse-has_conns-under-reuseport_lock.patch
+cifs-fix-xid-leak-in-cifs_create.patch
+cifs-fix-xid-leak-in-cifs_copy_file_range.patch
+cifs-fix-xid-leak-in-cifs_flock.patch
+cifs-fix-xid-leak-in-cifs_ses_add_channel.patch
+dm-remove-unnecessary-assignment-statement-in-alloc_.patch
+net-hsr-avoid-possible-null-deref-in-skb_clone.patch
+ionic-catch-null-pointer-issue-on-reconfig.patch
+netfilter-nf_tables-relax-nfta_set_elem_key_end-set-.patch
+nvme-hwmon-consistently-ignore-errors-from-nvme_hwmo.patch
+nvme-hwmon-kmalloc-the-nvme-smart-log-buffer.patch
+nvmet-fix-workqueue-mem_reclaim-flushing-dependency.patch
+net-sched-cake-fix-null-pointer-access-issue-when-ca.patch
+net-sched-delete-duplicate-cleanup-of-backlog-and-ql.patch
+net-sched-sfb-fix-null-pointer-access-issue-when-sfb.patch
+sfc-include-vport_id-in-filter-spec-hash-and-equal.patch
+wwan_hwsim-fix-possible-memory-leak-in-wwan_hwsim_de.patch
+net-hns-fix-possible-memory-leak-in-hnae_ae_register.patch
+net-sched-fix-race-condition-in-qdisc_graft.patch
+net-phy-dp83822-disable-mdi-crossover-status-change-.patch
+iommu-vt-d-allow-nvs-regions-in-arch_rmrr_sanity_che.patch
+iommu-vt-d-clean-up-si_domain-in-the-init_dmars-erro.patch
+dlm-use-__le-types-for-dlm-header.patch
+dlm-use-__le-types-for-rcom-messages.patch
+dlm-use-__le-types-for-dlm-messages.patch
+fs-dlm-fix-invalid-derefence-of-sb_lvbptr.patch
+scsi-lpfc-adjust-bytes-received-vales-during-cmf-tim.patch
+scsi-lpfc-adjust-cmf-total-bytes-and-rxmonitor.patch
+scsi-lpfc-rework-mib-rx-monitor-debug-info-logic.patch
+usb-add-reset_resume-quirk-for-nvidia-jetson-devices.patch
+serial-stm32-re-introduce-an-irq-flag-condition-in-u.patch
+serial-stm32-rework-rx-over-dma.patch
+serial-stm32-factor-out-gpio-rts-toggling-into-separ.patch
+serial-stm32-use-tc-interrupt-to-deassert-gpio-rts-i.patch
+serial-stm32-deassert-transmit-enable-on-rs485_confi.patch
+arm64-mte-move-register-initialization-to-c.patch
+kernfs-fix-use-after-free-in-__kernfs_remove.patch
+btrfs-rename-root-fields-in-delayed-refs-structs.patch
+btrfs-pull-up-qgroup-checks-from-delayed-ref-core-to.patch
+ksmbd-handle-smb2-query-dir-request-for-outputbuffer.patch
+ksmbd-fix-incorrect-handling-of-iterate_dir.patch
+tracing-simplify-conditional-compilation-code-in-tra.patch
+tracing-do-not-free-snapshot-if-tracer-is-on-cmdline.patch
+mmc-core-support-zeroout-using-trim-for-emmc.patch
+mmc-core-add-sd-card-quirk-for-broken-discard.patch
+mmc-sdhci-tegra-use-actual-clock-rate-for-sw-tuning-.patch
+drm-amd-display-parse-and-check-psr-su-caps.patch
+drm-amd-display-explicitly-disable-psr_feature_enabl.patch
+perf-parse-events-add-const-to-evsel-name.patch
+perf-skip-and-warn-on-unknown-format-confign-attrs.patch
--- /dev/null
+From d1e22f09573e7cf235ff780e6f493f031729523a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 13 Oct 2022 10:55:53 +0100
+Subject: sfc: Change VF mac via PF as first preference if available.
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Jonathan Cooper <jonathan.s.cooper@amd.com>
+
+[ Upstream commit a8aed7b35becfd21f22a77c7014029ea837b018f ]
+
+Changing a VF's mac address through the VF (rather than via the PF)
+fails with EPERM because the latter part of efx_ef10_set_mac_address
+attempts to change the vport mac address list as the VF.
+Even with this fixed it still fails with EBUSY because the vadaptor
+is still assigned on the VF - the vadaptor reassignment must be within
+a section where the VF has torn down its state.
+
+A major reason this has broken is because we have two functions that
+ostensibly do the same thing - have a PF and VF cooperate to change a
+VF mac address. Rather than do this, if we are changing the mac of a VF
+that has a link to the PF in the same VM then simply call
+sriov_set_vf_mac instead, which is a proven working function that does
+that.
+
+If there is no PF available, or that fails non-fatally, then attempt to
+change the VF's mac address as we would a PF, without updating the PF's
+data.
+
+Test case:
+Create a VF:
+ echo 1 > /sys/class/net/<if>/device/sriov_numvfs
+Set the mac address of the VF directly:
+ ip link set <vf> addr 00:11:22:33:44:55
+Set the MAC address of the VF via the PF:
+ ip link set <pf> vf 0 mac 00:11:22:33:44:66
+Without this patch the last command will fail with ENOENT.
+
+Signed-off-by: Jonathan Cooper <jonathan.s.cooper@amd.com>
+Reported-by: Íñigo Huguet <ihuguet@redhat.com>
+Fixes: 910c8789a777 ("set the MAC address using MC_CMD_VADAPTOR_SET_MAC")
+Acked-by: Edward Cree <ecree.xilinx@gmail.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/ef10.c | 58 ++++++++++++++-------------------
+ 1 file changed, 24 insertions(+), 34 deletions(-)
+
+diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
+index 056c24ec1249..c316a9eb5be3 100644
+--- a/drivers/net/ethernet/sfc/ef10.c
++++ b/drivers/net/ethernet/sfc/ef10.c
+@@ -3271,6 +3271,30 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
+ bool was_enabled = efx->port_enabled;
+ int rc;
+
++#ifdef CONFIG_SFC_SRIOV
++ /* If this function is a VF and we have access to the parent PF,
++ * then use the PF control path to attempt to change the VF MAC address.
++ */
++ if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
++ struct efx_nic *efx_pf = pci_get_drvdata(efx->pci_dev->physfn);
++ struct efx_ef10_nic_data *nic_data = efx->nic_data;
++ u8 mac[ETH_ALEN];
++
++ /* net_dev->dev_addr can be zeroed by efx_net_stop in
++ * efx_ef10_sriov_set_vf_mac, so pass in a copy.
++ */
++ ether_addr_copy(mac, efx->net_dev->dev_addr);
++
++ rc = efx_ef10_sriov_set_vf_mac(efx_pf, nic_data->vf_index, mac);
++ if (!rc)
++ return 0;
++
++ netif_dbg(efx, drv, efx->net_dev,
++ "Updating VF mac via PF failed (%d), setting directly\n",
++ rc);
++ }
++#endif
++
+ efx_device_detach_sync(efx);
+ efx_net_stop(efx->net_dev);
+
+@@ -3293,40 +3317,6 @@ static int efx_ef10_set_mac_address(struct efx_nic *efx)
+ efx_net_open(efx->net_dev);
+ efx_device_attach_if_not_resetting(efx);
+
+-#ifdef CONFIG_SFC_SRIOV
+- if (efx->pci_dev->is_virtfn && efx->pci_dev->physfn) {
+- struct efx_ef10_nic_data *nic_data = efx->nic_data;
+- struct pci_dev *pci_dev_pf = efx->pci_dev->physfn;
+-
+- if (rc == -EPERM) {
+- struct efx_nic *efx_pf;
+-
+- /* Switch to PF and change MAC address on vport */
+- efx_pf = pci_get_drvdata(pci_dev_pf);
+-
+- rc = efx_ef10_sriov_set_vf_mac(efx_pf,
+- nic_data->vf_index,
+- efx->net_dev->dev_addr);
+- } else if (!rc) {
+- struct efx_nic *efx_pf = pci_get_drvdata(pci_dev_pf);
+- struct efx_ef10_nic_data *nic_data = efx_pf->nic_data;
+- unsigned int i;
+-
+- /* MAC address successfully changed by VF (with MAC
+- * spoofing) so update the parent PF if possible.
+- */
+- for (i = 0; i < efx_pf->vf_count; ++i) {
+- struct ef10_vf *vf = nic_data->vf + i;
+-
+- if (vf->efx == efx) {
+- ether_addr_copy(vf->mac,
+- efx->net_dev->dev_addr);
+- return 0;
+- }
+- }
+- }
+- } else
+-#endif
+ if (rc == -EPERM) {
+ netif_err(efx, drv, efx->net_dev,
+ "Cannot change MAC address; use sfboot to enable"
+--
+2.35.1
+
--- /dev/null
+From 7116977e039126d670b23e785ec3745e1717c2ad Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 10:28:41 +0100
+Subject: sfc: include vport_id in filter spec hash and equal()
+
+From: Pieter Jansen van Vuuren <pieter.jansen-van-vuuren@amd.com>
+
+[ Upstream commit c2bf23e4a5af37a4d77901d9ff14c50a269f143d ]
+
+Filters on different vports are qualified by different implicit MACs and/or
+VLANs, so shouldn't be considered equal even if their other match fields
+are identical.
+
+Fixes: 7c460d9be610 ("sfc: Extend and abstract efx_filter_spec to cover Huntington/EF10")
+Co-developed-by: Edward Cree <ecree.xilinx@gmail.com>
+Signed-off-by: Edward Cree <ecree.xilinx@gmail.com>
+Signed-off-by: Pieter Jansen van Vuuren <pieter.jansen-van-vuuren@amd.com>
+Reviewed-by: Martin Habets <habetsm.xilinx@gmail.com>
+Link: https://lore.kernel.org/r/20221018092841.32206-1-pieter.jansen-van-vuuren@amd.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/ethernet/sfc/filter.h | 3 ++-
+ drivers/net/ethernet/sfc/rx_common.c | 10 +++++-----
+ 2 files changed, 7 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/net/ethernet/sfc/filter.h b/drivers/net/ethernet/sfc/filter.h
+index 40b2af8bfb81..2ac3c8f1b04b 100644
+--- a/drivers/net/ethernet/sfc/filter.h
++++ b/drivers/net/ethernet/sfc/filter.h
+@@ -157,7 +157,8 @@ struct efx_filter_spec {
+ u32 flags:6;
+ u32 dmaq_id:12;
+ u32 rss_context;
+- __be16 outer_vid __aligned(4); /* allow jhash2() of match values */
++ u32 vport_id;
++ __be16 outer_vid;
+ __be16 inner_vid;
+ u8 loc_mac[ETH_ALEN];
+ u8 rem_mac[ETH_ALEN];
+diff --git a/drivers/net/ethernet/sfc/rx_common.c b/drivers/net/ethernet/sfc/rx_common.c
+index b925de9b4302..a804c754cd7d 100644
+--- a/drivers/net/ethernet/sfc/rx_common.c
++++ b/drivers/net/ethernet/sfc/rx_common.c
+@@ -676,17 +676,17 @@ bool efx_filter_spec_equal(const struct efx_filter_spec *left,
+ (EFX_FILTER_FLAG_RX | EFX_FILTER_FLAG_TX)))
+ return false;
+
+- return memcmp(&left->outer_vid, &right->outer_vid,
++ return memcmp(&left->vport_id, &right->vport_id,
+ sizeof(struct efx_filter_spec) -
+- offsetof(struct efx_filter_spec, outer_vid)) == 0;
++ offsetof(struct efx_filter_spec, vport_id)) == 0;
+ }
+
+ u32 efx_filter_spec_hash(const struct efx_filter_spec *spec)
+ {
+- BUILD_BUG_ON(offsetof(struct efx_filter_spec, outer_vid) & 3);
+- return jhash2((const u32 *)&spec->outer_vid,
++ BUILD_BUG_ON(offsetof(struct efx_filter_spec, vport_id) & 3);
++ return jhash2((const u32 *)&spec->vport_id,
+ (sizeof(struct efx_filter_spec) -
+- offsetof(struct efx_filter_spec, outer_vid)) / 4,
++ offsetof(struct efx_filter_spec, vport_id)) / 4,
+ 0);
+ }
+
+--
+2.35.1
+
--- /dev/null
+From bfa34d228f3fad5e7f28f2e37dcf4ec7c3baa5bb Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 12 Oct 2022 17:25:14 +0200
+Subject: tipc: fix an information leak in tipc_topsrv_kern_subscr
+
+From: Alexander Potapenko <glider@google.com>
+
+[ Upstream commit 777ecaabd614d47c482a5c9031579e66da13989a ]
+
+Use a 8-byte write to initialize sub.usr_handle in
+tipc_topsrv_kern_subscr(), otherwise four bytes remain uninitialized
+when issuing setsockopt(..., SOL_TIPC, ...).
+This resulted in an infoleak reported by KMSAN when the packet was
+received:
+
+ =====================================================
+ BUG: KMSAN: kernel-infoleak in copyout+0xbc/0x100 lib/iov_iter.c:169
+ instrument_copy_to_user ./include/linux/instrumented.h:121
+ copyout+0xbc/0x100 lib/iov_iter.c:169
+ _copy_to_iter+0x5c0/0x20a0 lib/iov_iter.c:527
+ copy_to_iter ./include/linux/uio.h:176
+ simple_copy_to_iter+0x64/0xa0 net/core/datagram.c:513
+ __skb_datagram_iter+0x123/0xdc0 net/core/datagram.c:419
+ skb_copy_datagram_iter+0x58/0x200 net/core/datagram.c:527
+ skb_copy_datagram_msg ./include/linux/skbuff.h:3903
+ packet_recvmsg+0x521/0x1e70 net/packet/af_packet.c:3469
+ ____sys_recvmsg+0x2c4/0x810 net/socket.c:?
+ ___sys_recvmsg+0x217/0x840 net/socket.c:2743
+ __sys_recvmsg net/socket.c:2773
+ __do_sys_recvmsg net/socket.c:2783
+ __se_sys_recvmsg net/socket.c:2780
+ __x64_sys_recvmsg+0x364/0x540 net/socket.c:2780
+ do_syscall_x64 arch/x86/entry/common.c:50
+ do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd arch/x86/entry/entry_64.S:120
+
+ ...
+
+ Uninit was stored to memory at:
+ tipc_sub_subscribe+0x42d/0xb50 net/tipc/subscr.c:156
+ tipc_conn_rcv_sub+0x246/0x620 net/tipc/topsrv.c:375
+ tipc_topsrv_kern_subscr+0x2e8/0x400 net/tipc/topsrv.c:579
+ tipc_group_create+0x4e7/0x7d0 net/tipc/group.c:190
+ tipc_sk_join+0x2a8/0x770 net/tipc/socket.c:3084
+ tipc_setsockopt+0xae5/0xe40 net/tipc/socket.c:3201
+ __sys_setsockopt+0x87f/0xdc0 net/socket.c:2252
+ __do_sys_setsockopt net/socket.c:2263
+ __se_sys_setsockopt net/socket.c:2260
+ __x64_sys_setsockopt+0xe0/0x160 net/socket.c:2260
+ do_syscall_x64 arch/x86/entry/common.c:50
+ do_syscall_64+0x3d/0xb0 arch/x86/entry/common.c:80
+ entry_SYSCALL_64_after_hwframe+0x63/0xcd arch/x86/entry/entry_64.S:120
+
+ Local variable sub created at:
+ tipc_topsrv_kern_subscr+0x57/0x400 net/tipc/topsrv.c:562
+ tipc_group_create+0x4e7/0x7d0 net/tipc/group.c:190
+
+ Bytes 84-87 of 88 are uninitialized
+ Memory access of size 88 starts at ffff88801ed57cd0
+ Data copied to user address 0000000020000400
+ ...
+ =====================================================
+
+Signed-off-by: Alexander Potapenko <glider@google.com>
+Fixes: 026321c6d056a5 ("tipc: rename tipc_server to tipc_topsrv")
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/topsrv.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/tipc/topsrv.c b/net/tipc/topsrv.c
+index 5522865deae9..14fd05fd6107 100644
+--- a/net/tipc/topsrv.c
++++ b/net/tipc/topsrv.c
+@@ -568,7 +568,7 @@ bool tipc_topsrv_kern_subscr(struct net *net, u32 port, u32 type, u32 lower,
+ sub.seq.upper = upper;
+ sub.timeout = TIPC_WAIT_FOREVER;
+ sub.filter = filter;
+- *(u32 *)&sub.usr_handle = port;
++ *(u64 *)&sub.usr_handle = (u64)port;
+
+ con = tipc_conn_alloc(tipc_topsrv(net));
+ if (IS_ERR(con))
+--
+2.35.1
+
--- /dev/null
+From 8e2f0cef7c38c4779d7af2d5dd4c77588a13c4c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 10 Oct 2022 15:46:13 +1300
+Subject: tipc: Fix recognition of trial period
+
+From: Mark Tomlinson <mark.tomlinson@alliedtelesis.co.nz>
+
+[ Upstream commit 28be7ca4fcfd69a2d52aaa331adbf9dbe91f9e6e ]
+
+The trial period exists until jiffies is after addr_trial_end. But as
+jiffies will eventually overflow, just using time_after will eventually
+give incorrect results. As the node address is set once the trial period
+ends, this can be used to know that we are not in the trial period.
+
+Fixes: e415577f57f4 ("tipc: correct discovery message handling during address trial period")
+Signed-off-by: Mark Tomlinson <mark.tomlinson@alliedtelesis.co.nz>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/tipc/discover.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/net/tipc/discover.c b/net/tipc/discover.c
+index da69e1abf68f..e8630707901e 100644
+--- a/net/tipc/discover.c
++++ b/net/tipc/discover.c
+@@ -148,8 +148,8 @@ static bool tipc_disc_addr_trial_msg(struct tipc_discoverer *d,
+ {
+ struct net *net = d->net;
+ struct tipc_net *tn = tipc_net(net);
+- bool trial = time_before(jiffies, tn->addr_trial_end);
+ u32 self = tipc_own_addr(net);
++ bool trial = time_before(jiffies, tn->addr_trial_end) && !self;
+
+ if (mtyp == DSC_TRIAL_FAIL_MSG) {
+ if (!trial)
+--
+2.35.1
+
--- /dev/null
+From 6ab66d31addb64dde43b25e9982d488d23cc60c4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Wed, 5 Oct 2022 11:37:57 -0400
+Subject: tracing: Do not free snapshot if tracer is on cmdline
+
+From: Steven Rostedt (Google) <rostedt@goodmis.org>
+
+[ Upstream commit a541a9559bb0a8ecc434de01d3e4826c32e8bb53 ]
+
+The ftrace_boot_snapshot and alloc_snapshot cmdline options allocate the
+snapshot buffer at boot up for use later. The ftrace_boot_snapshot in
+particular requires the snapshot to be allocated because it will take a
+snapshot at the end of boot up allowing to see the traces that happened
+during boot so that it's not lost when user space takes over.
+
+When a tracer is registered (started) there's a path that checks if it
+requires the snapshot buffer or not, and if it does not and it was
+allocated it will do a synchronization and free the snapshot buffer.
+
+This is only required if the previous tracer was using it for "max
+latency" snapshots, as it needs to make sure all max snapshots are
+complete before freeing. But this is only needed if the previous tracer
+was using the snapshot buffer for latency (like irqoff tracer and
+friends). But it does not make sense to free it, if the previous tracer
+was not using it, and the snapshot was allocated by the cmdline
+parameters. This basically takes away the point of allocating it in the
+first place!
+
+Note, the allocated snapshot worked fine for just trace events, but fails
+when a tracer is enabled on the cmdline.
+
+Further investigation, this goes back even further and it does not require
+a tracer on the cmdline to fail. Simply enable snapshots and then enable a
+tracer, and it will remove the snapshot.
+
+Link: https://lkml.kernel.org/r/20221005113757.041df7fe@gandalf.local.home
+
+Cc: Masami Hiramatsu <mhiramat@kernel.org>
+Cc: Andrew Morton <akpm@linux-foundation.org>
+Cc: stable@vger.kernel.org
+Fixes: 45ad21ca5530 ("tracing: Have trace_array keep track if snapshot buffer is allocated")
+Reported-by: Ross Zwisler <zwisler@kernel.org>
+Tested-by: Ross Zwisler <zwisler@kernel.org>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 10 ++++++----
+ 1 file changed, 6 insertions(+), 4 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 21795777587b..24a5ea9a2cc0 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6399,12 +6399,12 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ if (tr->current_trace->reset)
+ tr->current_trace->reset(tr);
+
++#ifdef CONFIG_TRACER_MAX_TRACE
++ had_max_tr = tr->current_trace->use_max_tr;
++
+ /* Current trace needs to be nop_trace before synchronize_rcu */
+ tr->current_trace = &nop_trace;
+
+-#ifdef CONFIG_TRACER_MAX_TRACE
+- had_max_tr = tr->allocated_snapshot;
+-
+ if (had_max_tr && !t->use_max_tr) {
+ /*
+ * We need to make sure that the update_max_tr sees that
+@@ -6417,11 +6417,13 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ free_snapshot(tr);
+ }
+
+- if (t->use_max_tr && !had_max_tr) {
++ if (t->use_max_tr && !tr->allocated_snapshot) {
+ ret = tracing_alloc_snapshot_instance(tr);
+ if (ret < 0)
+ goto out;
+ }
++#else
++ tr->current_trace = &nop_trace;
+ #endif
+
+ if (t->init) {
+--
+2.35.1
+
--- /dev/null
+From 86cfc8663c67806905c13b4e037a52d3e6cd7b64 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 2 Jun 2022 22:06:13 +0800
+Subject: tracing: Simplify conditional compilation code in
+ tracing_set_tracer()
+
+From: sunliming <sunliming@kylinos.cn>
+
+[ Upstream commit f4b0d318097e45cbac5e14976f8bb56aa2cef504 ]
+
+Two conditional compilation directives "#ifdef CONFIG_TRACER_MAX_TRACE"
+are used consecutively, and no other code in between. Simplify conditional
+the compilation code and only use one "#ifdef CONFIG_TRACER_MAX_TRACE".
+
+Link: https://lkml.kernel.org/r/20220602140613.545069-1-sunliming@kylinos.cn
+
+Signed-off-by: sunliming <sunliming@kylinos.cn>
+Signed-off-by: Steven Rostedt (Google) <rostedt@goodmis.org>
+Stable-dep-of: a541a9559bb0 ("tracing: Do not free snapshot if tracer is on cmdline")
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/trace/trace.c | 2 --
+ 1 file changed, 2 deletions(-)
+
+diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
+index 0dc17fd96102..21795777587b 100644
+--- a/kernel/trace/trace.c
++++ b/kernel/trace/trace.c
+@@ -6416,9 +6416,7 @@ int tracing_set_tracer(struct trace_array *tr, const char *buf)
+ synchronize_rcu();
+ free_snapshot(tr);
+ }
+-#endif
+
+-#ifdef CONFIG_TRACER_MAX_TRACE
+ if (t->use_max_tr && !had_max_tr) {
+ ret = tracing_alloc_snapshot_instance(tr);
+ if (ret < 0)
+--
+2.35.1
+
--- /dev/null
+From 9e553eb28db73badd9f538fabad65631547c6d94 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 14 Oct 2022 11:26:25 -0700
+Subject: udp: Update reuse->has_conns under reuseport_lock.
+
+From: Kuniyuki Iwashima <kuniyu@amazon.com>
+
+[ Upstream commit 69421bf98482d089e50799f45e48b25ce4a8d154 ]
+
+When we call connect() for a UDP socket in a reuseport group, we have
+to update sk->sk_reuseport_cb->has_conns to 1. Otherwise, the kernel
+could select a unconnected socket wrongly for packets sent to the
+connected socket.
+
+However, the current way to set has_conns is illegal and possible to
+trigger that problem. reuseport_has_conns() changes has_conns under
+rcu_read_lock(), which upgrades the RCU reader to the updater. Then,
+it must do the update under the updater's lock, reuseport_lock, but
+it doesn't for now.
+
+For this reason, there is a race below where we fail to set has_conns
+resulting in the wrong socket selection. To avoid the race, let's split
+the reader and updater with proper locking.
+
+ cpu1 cpu2
++----+ +----+
+
+__ip[46]_datagram_connect() reuseport_grow()
+. .
+|- reuseport_has_conns(sk, true) |- more_reuse = __reuseport_alloc(more_socks_size)
+| . |
+| |- rcu_read_lock()
+| |- reuse = rcu_dereference(sk->sk_reuseport_cb)
+| |
+| | | /* reuse->has_conns == 0 here */
+| | |- more_reuse->has_conns = reuse->has_conns
+| |- reuse->has_conns = 1 | /* more_reuse->has_conns SHOULD BE 1 HERE */
+| | |
+| | |- rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
+| | | more_reuse)
+| `- rcu_read_unlock() `- kfree_rcu(reuse, rcu)
+|
+|- sk->sk_state = TCP_ESTABLISHED
+
+Note the likely(reuse) in reuseport_has_conns_set() is always true,
+but we put the test there for ease of review. [0]
+
+For the record, usually, sk_reuseport_cb is changed under lock_sock().
+The only exception is reuseport_grow() & TCP reqsk migration case.
+
+ 1) shutdown() TCP listener, which is moved into the latter part of
+ reuse->socks[] to migrate reqsk.
+
+ 2) New listen() overflows reuse->socks[] and call reuseport_grow().
+
+ 3) reuse->max_socks overflows u16 with the new listener.
+
+ 4) reuseport_grow() pops the old shutdown()ed listener from the array
+ and update its sk->sk_reuseport_cb as NULL without lock_sock().
+
+shutdown()ed TCP sk->sk_reuseport_cb can be changed without lock_sock(),
+but, reuseport_has_conns_set() is called only for UDP under lock_sock(),
+so likely(reuse) never be false in reuseport_has_conns_set().
+
+[0]: https://lore.kernel.org/netdev/CANn89iLja=eQHbsM_Ta2sQF0tOGU8vAGrh_izRuuHjuO1ouUag@mail.gmail.com/
+
+Fixes: acdcecc61285 ("udp: correct reuseport selection with connected sockets")
+Signed-off-by: Kuniyuki Iwashima <kuniyu@amazon.com>
+Link: https://lore.kernel.org/r/20221014182625.89913-1-kuniyu@amazon.com
+Signed-off-by: Paolo Abeni <pabeni@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/net/sock_reuseport.h | 11 +++++------
+ net/core/sock_reuseport.c | 16 ++++++++++++++++
+ net/ipv4/datagram.c | 2 +-
+ net/ipv4/udp.c | 2 +-
+ net/ipv6/datagram.c | 2 +-
+ net/ipv6/udp.c | 2 +-
+ 6 files changed, 25 insertions(+), 10 deletions(-)
+
+diff --git a/include/net/sock_reuseport.h b/include/net/sock_reuseport.h
+index 473b0b0fa4ab..efc9085c6892 100644
+--- a/include/net/sock_reuseport.h
++++ b/include/net/sock_reuseport.h
+@@ -43,21 +43,20 @@ struct sock *reuseport_migrate_sock(struct sock *sk,
+ extern int reuseport_attach_prog(struct sock *sk, struct bpf_prog *prog);
+ extern int reuseport_detach_prog(struct sock *sk);
+
+-static inline bool reuseport_has_conns(struct sock *sk, bool set)
++static inline bool reuseport_has_conns(struct sock *sk)
+ {
+ struct sock_reuseport *reuse;
+ bool ret = false;
+
+ rcu_read_lock();
+ reuse = rcu_dereference(sk->sk_reuseport_cb);
+- if (reuse) {
+- if (set)
+- reuse->has_conns = 1;
+- ret = reuse->has_conns;
+- }
++ if (reuse && reuse->has_conns)
++ ret = true;
+ rcu_read_unlock();
+
+ return ret;
+ }
+
++void reuseport_has_conns_set(struct sock *sk);
++
+ #endif /* _SOCK_REUSEPORT_H */
+diff --git a/net/core/sock_reuseport.c b/net/core/sock_reuseport.c
+index 5daa1fa54249..fb90e1e00773 100644
+--- a/net/core/sock_reuseport.c
++++ b/net/core/sock_reuseport.c
+@@ -21,6 +21,22 @@ static DEFINE_IDA(reuseport_ida);
+ static int reuseport_resurrect(struct sock *sk, struct sock_reuseport *old_reuse,
+ struct sock_reuseport *reuse, bool bind_inany);
+
++void reuseport_has_conns_set(struct sock *sk)
++{
++ struct sock_reuseport *reuse;
++
++ if (!rcu_access_pointer(sk->sk_reuseport_cb))
++ return;
++
++ spin_lock_bh(&reuseport_lock);
++ reuse = rcu_dereference_protected(sk->sk_reuseport_cb,
++ lockdep_is_held(&reuseport_lock));
++ if (likely(reuse))
++ reuse->has_conns = 1;
++ spin_unlock_bh(&reuseport_lock);
++}
++EXPORT_SYMBOL(reuseport_has_conns_set);
++
+ static int reuseport_sock_index(struct sock *sk,
+ const struct sock_reuseport *reuse,
+ bool closed)
+diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
+index 4a8550c49202..112c6e892d30 100644
+--- a/net/ipv4/datagram.c
++++ b/net/ipv4/datagram.c
+@@ -70,7 +70,7 @@ int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
+ }
+ inet->inet_daddr = fl4->daddr;
+ inet->inet_dport = usin->sin_port;
+- reuseport_has_conns(sk, true);
++ reuseport_has_conns_set(sk);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk_set_txhash(sk);
+ inet->inet_id = prandom_u32();
+diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
+index 75d1977ecc07..79d5425bed07 100644
+--- a/net/ipv4/udp.c
++++ b/net/ipv4/udp.c
+@@ -446,7 +446,7 @@ static struct sock *udp4_lib_lookup2(struct net *net,
+ result = lookup_reuseport(net, sk, skb,
+ saddr, sport, daddr, hnum);
+ /* Fall back to scoring if group has connections */
+- if (result && !reuseport_has_conns(sk, false))
++ if (result && !reuseport_has_conns(sk))
+ return result;
+
+ result = result ? : sk;
+diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
+index 206f66310a88..f4559e5bc84b 100644
+--- a/net/ipv6/datagram.c
++++ b/net/ipv6/datagram.c
+@@ -256,7 +256,7 @@ int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr,
+ goto out;
+ }
+
+- reuseport_has_conns(sk, true);
++ reuseport_has_conns_set(sk);
+ sk->sk_state = TCP_ESTABLISHED;
+ sk_set_txhash(sk);
+ out:
+diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
+index 07726a51a3f0..19b6c4da0f42 100644
+--- a/net/ipv6/udp.c
++++ b/net/ipv6/udp.c
+@@ -180,7 +180,7 @@ static struct sock *udp6_lib_lookup2(struct net *net,
+ result = lookup_reuseport(net, sk, skb,
+ saddr, sport, daddr, hnum);
+ /* Fall back to scoring if group has connections */
+- if (result && !reuseport_has_conns(sk, false))
++ if (result && !reuseport_has_conns(sk))
+ return result;
+
+ result = result ? : sk;
+--
+2.35.1
+
--- /dev/null
+From 854c1c2b871ac10afc8e370f4fec71125a956db4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 19 Sep 2022 20:16:10 +0300
+Subject: USB: add RESET_RESUME quirk for NVIDIA Jetson devices in RCM
+
+From: Hannu Hartikainen <hannu@hrtk.in>
+
+[ Upstream commit fc4ade55c617dc73c7e9756b57f3230b4ff24540 ]
+
+NVIDIA Jetson devices in Force Recovery mode (RCM) do not support
+suspending, ie. flashing fails if the device has been suspended. The
+devices are still visible in lsusb and seem to work otherwise, making
+the issue hard to debug. This has been discovered in various forum
+posts, eg. [1].
+
+The patch has been tested on NVIDIA Jetson AGX Xavier, but I'm adding
+all the Jetson models listed in [2] on the assumption that they all
+behave similarly.
+
+[1]: https://forums.developer.nvidia.com/t/flashing-not-working/72365
+[2]: https://docs.nvidia.com/jetson/archives/l4t-archived/l4t-3271/index.html#page/Tegra%20Linux%20Driver%20Package%20Development%20Guide/quick_start.html
+
+Signed-off-by: Hannu Hartikainen <hannu@hrtk.in>
+Cc: stable <stable@kernel.org> # after 6.1-rc3
+Link: https://lore.kernel.org/r/20220919171610.30484-1-hannu@hrtk.in
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/usb/core/quirks.c | 9 +++++++++
+ 1 file changed, 9 insertions(+)
+
+diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
+index 999b7c9697fc..0722d2131305 100644
+--- a/drivers/usb/core/quirks.c
++++ b/drivers/usb/core/quirks.c
+@@ -388,6 +388,15 @@ static const struct usb_device_id usb_quirk_list[] = {
+ /* Kingston DataTraveler 3.0 */
+ { USB_DEVICE(0x0951, 0x1666), .driver_info = USB_QUIRK_NO_LPM },
+
++ /* NVIDIA Jetson devices in Force Recovery mode */
++ { USB_DEVICE(0x0955, 0x7018), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x0955, 0x7019), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x0955, 0x7418), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x0955, 0x7721), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x0955, 0x7c18), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x0955, 0x7e19), .driver_info = USB_QUIRK_RESET_RESUME },
++ { USB_DEVICE(0x0955, 0x7f21), .driver_info = USB_QUIRK_RESET_RESUME },
++
+ /* X-Rite/Gretag-Macbeth Eye-One Pro display colorimeter */
+ { USB_DEVICE(0x0971, 0x2000), .driver_info = USB_QUIRK_NO_SET_INTF },
+
+--
+2.35.1
+
--- /dev/null
+From ac2a7e218664fdb3a56750b47c5b41ca756c3aa4 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 18 Oct 2022 21:16:07 +0800
+Subject: wwan_hwsim: fix possible memory leak in wwan_hwsim_dev_new()
+
+From: Yang Yingliang <yangyingliang@huawei.com>
+
+[ Upstream commit 258ad2fe5ede773625adfda88b173f4123e59f45 ]
+
+Inject fault while probing module, if device_register() fails,
+but the refcount of kobject is not decreased to 0, the name
+allocated in dev_set_name() is leaked. Fix this by calling
+put_device(), so that name can be freed in callback function
+kobject_cleanup().
+
+unreferenced object 0xffff88810152ad20 (size 8):
+ comm "modprobe", pid 252, jiffies 4294849206 (age 22.713s)
+ hex dump (first 8 bytes):
+ 68 77 73 69 6d 30 00 ff hwsim0..
+ backtrace:
+ [<000000009c3504ed>] __kmalloc_node_track_caller+0x44/0x1b0
+ [<00000000c0228a5e>] kvasprintf+0xb5/0x140
+ [<00000000cff8c21f>] kvasprintf_const+0x55/0x180
+ [<0000000055a1e073>] kobject_set_name_vargs+0x56/0x150
+ [<000000000a80b139>] dev_set_name+0xab/0xe0
+
+Fixes: f36a111a74e7 ("wwan_hwsim: WWAN device simulator")
+Signed-off-by: Yang Yingliang <yangyingliang@huawei.com>
+Reviewed-by: Loic Poulain <loic.poulain@linaro.org>
+Acked-by: Sergey Ryazanov <ryazanov.s.a@gmail.com>
+Link: https://lore.kernel.org/r/20221018131607.1901641-1-yangyingliang@huawei.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/net/wwan/wwan_hwsim.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+diff --git a/drivers/net/wwan/wwan_hwsim.c b/drivers/net/wwan/wwan_hwsim.c
+index 5b62cf3b3c42..a4230a7376df 100644
+--- a/drivers/net/wwan/wwan_hwsim.c
++++ b/drivers/net/wwan/wwan_hwsim.c
+@@ -310,7 +310,7 @@ static struct wwan_hwsim_dev *wwan_hwsim_dev_new(void)
+ return ERR_PTR(err);
+
+ err_free_dev:
+- kfree(dev);
++ put_device(&dev->dev);
+
+ return ERR_PTR(err);
+ }
+--
+2.35.1
+