--- /dev/null
+From jannh@google.com Fri Sep 16 11:29:18 2022
+From: Jann Horn <jannh@google.com>
+Date: Thu, 15 Sep 2022 16:25:19 +0200
+Subject: mm: Fix TLB flush for not-first PFNMAP mappings in unmap_region()
+To: stable@vger.kernel.org, Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+Cc: Hugh Dickins <hughd@google.com>, Peter Zijlstra <peterz@infradead.org>
+Message-ID: <20220915142519.2941949-1-jannh@google.com>
+
+From: Jann Horn <jannh@google.com>
+
+This is a stable-specific patch.
+I botched the stable-specific rewrite of
+commit b67fbebd4cf98 ("mmu_gather: Force tlb-flush VM_PFNMAP vmas"):
+As Hugh pointed out, unmap_region() actually operates on a list of VMAs,
+and the variable "vma" merely points to the first VMA in that list.
+So if we want to check whether any of the VMAs we're operating on is
+PFNMAP or MIXEDMAP, we have to iterate through the list and check each VMA.
+
+Signed-off-by: Jann Horn <jannh@google.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ mm/mmap.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/mm/mmap.c
++++ b/mm/mmap.c
+@@ -2664,6 +2664,7 @@ static void unmap_region(struct mm_struc
+ {
+ struct vm_area_struct *next = vma_next(mm, prev);
+ struct mmu_gather tlb;
++ struct vm_area_struct *cur_vma;
+
+ lru_add_drain();
+ tlb_gather_mmu(&tlb, mm, start, end);
+@@ -2678,8 +2679,12 @@ static void unmap_region(struct mm_struc
+ * concurrent flush in this region has to be coming through the rmap,
+ * and we synchronize against that using the rmap lock.
+ */
+- if ((vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0)
+- tlb_flush_mmu(&tlb);
++ for (cur_vma = vma; cur_vma; cur_vma = cur_vma->vm_next) {
++ if ((cur_vma->vm_flags & (VM_PFNMAP|VM_MIXEDMAP)) != 0) {
++ tlb_flush_mmu(&tlb);
++ break;
++ }
++ }
+
+ free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
+ next ? next->vm_start : USER_PGTABLES_CEILING);
--- /dev/null
+From foo@baz Fri Sep 16 11:33:35 AM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Wed, 14 Sep 2022 14:52:36 +0300
+Subject: Revert "x86/ftrace: Use alternative RET encoding"
+To: stable@vger.kernel.org
+Cc: paul.gortmaker@windriver.com, gregkh@linuxfoundation.org, peterz@infradead.org, bp@suse.de, jpoimboe@kernel.org, cascardo@canonical.com, Ovidiu Panait <ovidiu.panait@windriver.com>
+Message-ID: <20220914115238.882630-1-ovidiu.panait@windriver.com>
+
+From: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+
+This reverts commit 00b136bb6254e0abf6aaafe62c4da5f6c4fea4cb.
+
+This temporarily reverts the backport of upstream commit
+1f001e9da6bbf482311e45e48f53c2bd2179e59c. It was not correct to copy the
+ftrace stub as it would contain a relative jump to the return thunk which
+would not apply to the context where it was being copied to, leading to
+ftrace support to be broken.
+
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/ftrace.c | 7 ++-----
+ 1 file changed, 2 insertions(+), 5 deletions(-)
+
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -309,7 +309,7 @@ union ftrace_op_code_union {
+ } __attribute__((packed));
+ };
+
+-#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
++#define RET_SIZE 1 + IS_ENABLED(CONFIG_SLS)
+
+ static unsigned long
+ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+@@ -368,10 +368,7 @@ create_trampoline(struct ftrace_ops *ops
+
+ /* The trampoline ends with ret(q) */
+ retq = (unsigned long)ftrace_stub;
+- if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
+- memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE);
+- else
+- ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
++ ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
+ if (WARN_ON(ret < 0))
+ goto fail;
+
perf-arm_pmu_platform-fix-tests-for-platform_get_irq.patch
platform-x86-acer-wmi-acer-aspire-one-aod270-packard.patch
usb-storage-add-asus-0x0b05-0x1932-to-ignore_uas.patch
+mm-fix-tlb-flush-for-not-first-pfnmap-mappings-in-unmap_region.patch
+revert-x86-ftrace-use-alternative-ret-encoding.patch
+x86-ibt-ftrace-make-function-graph-play-nice.patch
+x86-ftrace-use-alternative-ret-encoding.patch
+soc-fsl-select-fsl_guts-driver-for-dpio.patch
--- /dev/null
+From 9a472613f5bccf1b36837423495ae592a9c5182f Mon Sep 17 00:00:00 2001
+From: Mathew McBride <matt@traverse.com.au>
+Date: Thu, 1 Sep 2022 05:21:49 +0000
+Subject: soc: fsl: select FSL_GUTS driver for DPIO
+
+From: Mathew McBride <matt@traverse.com.au>
+
+commit 9a472613f5bccf1b36837423495ae592a9c5182f upstream.
+
+The soc/fsl/dpio driver will perform a soc_device_match()
+to determine the optimal cache settings for a given CPU core.
+
+If FSL_GUTS is not enabled, this search will fail and
+the driver will not configure cache stashing for the given
+DPIO, and a string of "unknown SoC" messages will appear:
+
+fsl_mc_dpio dpio.7: unknown SoC version
+fsl_mc_dpio dpio.6: unknown SoC version
+fsl_mc_dpio dpio.5: unknown SoC version
+
+Fixes: 51da14e96e9b ("soc: fsl: dpio: configure cache stashing destination")
+Signed-off-by: Mathew McBride <matt@traverse.com.au>
+Reviewed-by: Ioana Ciornei <ioana.ciornei@nxp.com>
+Cc: stable@vger.kernel.org
+Link: https://lore.kernel.org/r/20220901052149.23873-2-matt@traverse.com.au'
+Signed-off-by: Arnd Bergmann <arnd@arndb.de>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/soc/fsl/Kconfig | 1 +
+ 1 file changed, 1 insertion(+)
+
+--- a/drivers/soc/fsl/Kconfig
++++ b/drivers/soc/fsl/Kconfig
+@@ -24,6 +24,7 @@ config FSL_MC_DPIO
+ tristate "QorIQ DPAA2 DPIO driver"
+ depends on FSL_MC_BUS
+ select SOC_BUS
++ select FSL_GUTS
+ help
+ Driver for the DPAA2 DPIO object. A DPIO provides queue and
+ buffer management facilities for software to interact with
--- /dev/null
+From foo@baz Fri Sep 16 11:33:35 AM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Wed, 14 Sep 2022 14:52:38 +0300
+Subject: x86/ftrace: Use alternative RET encoding
+To: stable@vger.kernel.org
+Cc: paul.gortmaker@windriver.com, gregkh@linuxfoundation.org, peterz@infradead.org, bp@suse.de, jpoimboe@kernel.org, cascardo@canonical.com, Ovidiu Panait <ovidiu.panait@windriver.com>
+Message-ID: <20220914115238.882630-3-ovidiu.panait@windriver.com>
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit 1f001e9da6bbf482311e45e48f53c2bd2179e59c upstream.
+
+Use the return thunk in ftrace trampolines, if needed.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+Reviewed-by: Josh Poimboeuf <jpoimboe@kernel.org>
+Signed-off-by: Borislav Petkov <bp@suse.de>
+[cascardo: use memcpy(text_gen_insn) as there is no __text_gen_insn]
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/ftrace.c | 9 +++++++--
+ 1 file changed, 7 insertions(+), 2 deletions(-)
+
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -309,7 +309,7 @@ union ftrace_op_code_union {
+ } __attribute__((packed));
+ };
+
+-#define RET_SIZE 1 + IS_ENABLED(CONFIG_SLS)
++#define RET_SIZE (IS_ENABLED(CONFIG_RETPOLINE) ? 5 : 1 + IS_ENABLED(CONFIG_SLS))
+
+ static unsigned long
+ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
+@@ -365,7 +365,12 @@ create_trampoline(struct ftrace_ops *ops
+ goto fail;
+
+ ip = trampoline + size;
+- memcpy(ip, retq, RET_SIZE);
++
++ /* The trampoline ends with ret(q) */
++ if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
++ memcpy(ip, text_gen_insn(JMP32_INSN_OPCODE, ip, &__x86_return_thunk), JMP32_INSN_SIZE);
++ else
++ memcpy(ip, retq, sizeof(retq));
+
+ /* No need to test direct calls on created trampolines */
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
--- /dev/null
+From foo@baz Fri Sep 16 11:33:35 AM CEST 2022
+From: Ovidiu Panait <ovidiu.panait@windriver.com>
+Date: Wed, 14 Sep 2022 14:52:37 +0300
+Subject: x86/ibt,ftrace: Make function-graph play nice
+To: stable@vger.kernel.org
+Cc: paul.gortmaker@windriver.com, gregkh@linuxfoundation.org, peterz@infradead.org, bp@suse.de, jpoimboe@kernel.org, cascardo@canonical.com, Josh Poimboeuf <jpoimboe@redhat.com>, Ovidiu Panait <ovidiu.panait@windriver.com>
+Message-ID: <20220914115238.882630-2-ovidiu.panait@windriver.com>
+
+From: Peter Zijlstra <peterz@infradead.org>
+
+commit e52fc2cf3f662828cc0d51c4b73bed73ad275fce upstream.
+
+Return trampoline must not use indirect branch to return; while this
+preserves the RSB, it is fundamentally incompatible with IBT. Instead
+use a retpoline like ROP gadget that defeats IBT while not unbalancing
+the RSB.
+
+And since ftrace_stub is no longer a plain RET, don't use it to copy
+from. Since RET is a trivial instruction, poke it directly.
+
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Acked-by: Josh Poimboeuf <jpoimboe@redhat.com>
+Link: https://lore.kernel.org/r/20220308154318.347296408@infradead.org
+[cascardo: remove ENDBR]
+Signed-off-by: Thadeu Lima de Souza Cascardo <cascardo@canonical.com>
+[OP: adjusted context for 5.10-stable]
+Signed-off-by: Ovidiu Panait <ovidiu.panait@windriver.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/x86/kernel/ftrace.c | 9 ++-------
+ arch/x86/kernel/ftrace_64.S | 19 +++++++++++++++----
+ 2 files changed, 17 insertions(+), 11 deletions(-)
+
+--- a/arch/x86/kernel/ftrace.c
++++ b/arch/x86/kernel/ftrace.c
+@@ -322,12 +322,12 @@ create_trampoline(struct ftrace_ops *ops
+ unsigned long offset;
+ unsigned long npages;
+ unsigned long size;
+- unsigned long retq;
+ unsigned long *ptr;
+ void *trampoline;
+ void *ip;
+ /* 48 8b 15 <offset> is movq <offset>(%rip), %rdx */
+ unsigned const char op_ref[] = { 0x48, 0x8b, 0x15 };
++ unsigned const char retq[] = { RET_INSN_OPCODE, INT3_INSN_OPCODE };
+ union ftrace_op_code_union op_ptr;
+ int ret;
+
+@@ -365,12 +365,7 @@ create_trampoline(struct ftrace_ops *ops
+ goto fail;
+
+ ip = trampoline + size;
+-
+- /* The trampoline ends with ret(q) */
+- retq = (unsigned long)ftrace_stub;
+- ret = copy_from_kernel_nofault(ip, (void *)retq, RET_SIZE);
+- if (WARN_ON(ret < 0))
+- goto fail;
++ memcpy(ip, retq, RET_SIZE);
+
+ /* No need to test direct calls on created trampolines */
+ if (ops->flags & FTRACE_OPS_FL_SAVE_REGS) {
+--- a/arch/x86/kernel/ftrace_64.S
++++ b/arch/x86/kernel/ftrace_64.S
+@@ -170,7 +170,6 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L
+
+ /*
+ * This is weak to keep gas from relaxing the jumps.
+- * It is also used to copy the RET for trampolines.
+ */
+ SYM_INNER_LABEL_ALIGN(ftrace_stub, SYM_L_WEAK)
+ UNWIND_HINT_FUNC
+@@ -325,7 +324,7 @@ SYM_FUNC_END(ftrace_graph_caller)
+
+ SYM_CODE_START(return_to_handler)
+ UNWIND_HINT_EMPTY
+- subq $24, %rsp
++ subq $16, %rsp
+
+ /* Save the return values */
+ movq %rax, (%rsp)
+@@ -337,7 +336,19 @@ SYM_CODE_START(return_to_handler)
+ movq %rax, %rdi
+ movq 8(%rsp), %rdx
+ movq (%rsp), %rax
+- addq $24, %rsp
+- JMP_NOSPEC rdi
++
++ addq $16, %rsp
++ /*
++ * Jump back to the old return address. This cannot be JMP_NOSPEC rdi
++ * since IBT would demand that contain ENDBR, which simply isn't so for
++ * return addresses. Use a retpoline here to keep the RSB balanced.
++ */
++ ANNOTATE_INTRA_FUNCTION_CALL
++ call .Ldo_rop
++ int3
++.Ldo_rop:
++ mov %rdi, (%rsp)
++ UNWIND_HINT_FUNC
++ RET
+ SYM_CODE_END(return_to_handler)
+ #endif