--- /dev/null
+From f1a54ae9af0da4d76239256ed640a93ab3aadac0 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 17 Oct 2019 15:26:38 +0100
+Subject: arm64: module/ftrace: intialize PLT at load time
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit f1a54ae9af0da4d76239256ed640a93ab3aadac0 upstream.
+
+Currently we lazily-initialize a module's ftrace PLT at runtime when we
+install the first ftrace call. To do so we have to apply a number of
+sanity checks, transiently mark the module text as RW, and perform an
+IPI as part of handling Neoverse-N1 erratum #1542419.
+
+We only expect the ftrace trampoline to point at ftrace_caller() (AKA
+FTRACE_ADDR), so let's simplify all of this by intializing the PLT at
+module load time, before the module loader marks the module RO and
+performs the intial I-cache maintenance for the module.
+
+Thus we can rely on the module having been correctly intialized, and can
+simplify the runtime work necessary to install an ftrace call in a
+module. This will also allow for the removal of module_disable_ro().
+
+Tested by forcing ftrace_make_call() to use the module PLT, and then
+loading up a module after setting up ftrace with:
+
+| echo ":mod:<module-name>" > set_ftrace_filter;
+| echo function > current_tracer;
+| modprobe <module-name>
+
+Since FTRACE_ADDR is only defined when CONFIG_DYNAMIC_FTRACE is
+selected, we wrap its use along with most of module_init_ftrace_plt()
+with ifdeffery rather than using IS_ENABLED().
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Torsten Duwe <duwe@suse.de>
+Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Tested-by: Torsten Duwe <duwe@suse.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Cc: Will Deacon <will@kernel.org>
+Cc: Stephen Boyd <swboyd@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/ftrace.c | 55 +++++++++++----------------------------------
+ arch/arm64/kernel/module.c | 32 +++++++++++++++++---------
+ 2 files changed, 35 insertions(+), 52 deletions(-)
+
+--- a/arch/arm64/kernel/ftrace.c
++++ b/arch/arm64/kernel/ftrace.c
+@@ -73,10 +73,22 @@ int ftrace_make_call(struct dyn_ftrace *
+
+ if (offset < -SZ_128M || offset >= SZ_128M) {
+ #ifdef CONFIG_ARM64_MODULE_PLTS
+- struct plt_entry trampoline, *dst;
+ struct module *mod;
+
+ /*
++ * There is only one ftrace trampoline per module. For now,
++ * this is not a problem since on arm64, all dynamic ftrace
++ * invocations are routed via ftrace_caller(). This will need
++ * to be revisited if support for multiple ftrace entry points
++ * is added in the future, but for now, the pr_err() below
++ * deals with a theoretical issue only.
++ */
++ if (addr != FTRACE_ADDR) {
++ pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
++ return -EINVAL;
++ }
++
++ /*
+ * On kernels that support module PLTs, the offset between the
+ * branch instruction and its target may legally exceed the
+ * range of an ordinary relative 'bl' opcode. In this case, we
+@@ -93,46 +105,7 @@ int ftrace_make_call(struct dyn_ftrace *
+ if (WARN_ON(!mod))
+ return -EINVAL;
+
+- /*
+- * There is only one ftrace trampoline per module. For now,
+- * this is not a problem since on arm64, all dynamic ftrace
+- * invocations are routed via ftrace_caller(). This will need
+- * to be revisited if support for multiple ftrace entry points
+- * is added in the future, but for now, the pr_err() below
+- * deals with a theoretical issue only.
+- *
+- * Note that PLTs are place relative, and plt_entries_equal()
+- * checks whether they point to the same target. Here, we need
+- * to check if the actual opcodes are in fact identical,
+- * regardless of the offset in memory so use memcmp() instead.
+- */
+- dst = mod->arch.ftrace_trampoline;
+- trampoline = get_plt_entry(addr, dst);
+- if (memcmp(dst, &trampoline, sizeof(trampoline))) {
+- if (plt_entry_is_initialized(dst)) {
+- pr_err("ftrace: far branches to multiple entry points unsupported inside a single module\n");
+- return -EINVAL;
+- }
+-
+- /* point the trampoline to our ftrace entry point */
+- module_disable_ro(mod);
+- *dst = trampoline;
+- module_enable_ro(mod, true);
+-
+- /*
+- * Ensure updated trampoline is visible to instruction
+- * fetch before we patch in the branch. Although the
+- * architecture doesn't require an IPI in this case,
+- * Neoverse-N1 erratum #1542419 does require one
+- * if the TLB maintenance in module_enable_ro() is
+- * skipped due to rodata_enabled. It doesn't seem worth
+- * it to make it conditional given that this is
+- * certainly not a fast-path.
+- */
+- flush_icache_range((unsigned long)&dst[0],
+- (unsigned long)&dst[1]);
+- }
+- addr = (unsigned long)dst;
++ addr = (unsigned long)mod->arch.ftrace_trampoline;
+ #else /* CONFIG_ARM64_MODULE_PLTS */
+ return -EINVAL;
+ #endif /* CONFIG_ARM64_MODULE_PLTS */
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -9,6 +9,7 @@
+
+ #include <linux/bitops.h>
+ #include <linux/elf.h>
++#include <linux/ftrace.h>
+ #include <linux/gfp.h>
+ #include <linux/kasan.h>
+ #include <linux/kernel.h>
+@@ -485,24 +486,33 @@ static const Elf_Shdr *find_section(cons
+ return NULL;
+ }
+
++static int module_init_ftrace_plt(const Elf_Ehdr *hdr,
++ const Elf_Shdr *sechdrs,
++ struct module *mod)
++{
++#if defined(CONFIG_ARM64_MODULE_PLTS) && defined(CONFIG_DYNAMIC_FTRACE)
++ const Elf_Shdr *s;
++ struct plt_entry *plt;
++
++ s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
++ if (!s)
++ return -ENOEXEC;
++
++ plt = (void *)s->sh_addr;
++ *plt = get_plt_entry(FTRACE_ADDR, plt);
++ mod->arch.ftrace_trampoline = plt;
++#endif
++ return 0;
++}
++
+ int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+ {
+ const Elf_Shdr *s;
+-
+ s = find_section(hdr, sechdrs, ".altinstructions");
+ if (s)
+ apply_alternatives_module((void *)s->sh_addr, s->sh_size);
+
+-#ifdef CONFIG_ARM64_MODULE_PLTS
+- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE)) {
+- s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
+- if (!s)
+- return -ENOEXEC;
+- me->arch.ftrace_trampoline = (void *)s->sh_addr;
+- }
+-#endif
+-
+- return 0;
++ return module_init_ftrace_plt(hdr, sechdrs, me);
+ }
--- /dev/null
+From bd8b21d3dd661658addc1cd4cc869bab11d28596 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Thu, 17 Oct 2019 14:03:26 +0100
+Subject: arm64: module: rework special section handling
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit bd8b21d3dd661658addc1cd4cc869bab11d28596 upstream.
+
+When we load a module, we have to perform some special work for a couple
+of named sections. To do this, we iterate over all of the module's
+sections, and perform work for each section we recognize.
+
+To make it easier to handle the unexpected absence of a section, and to
+make the section-specific logic easer to read, let's factor the section
+search into a helper. Similar is already done in the core module loader,
+and other architectures (and ideally we'd unify these in future).
+
+If we expect a module to have an ftrace trampoline section, but it
+doesn't have one, we'll now reject loading the module. When
+ARM64_MODULE_PLTS is selected, any correctly built module should have
+one (and this is assumed by arm64's ftrace PLT code) and the absence of
+such a section implies something has gone wrong at build time.
+
+Subsequent patches will make use of the new helper.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Torsten Duwe <duwe@suse.de>
+Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Tested-by: Torsten Duwe <duwe@suse.de>
+Cc: Catalin Marinas <catalin.marinas@arm.com>
+Cc: James Morse <james.morse@arm.com>
+Cc: Will Deacon <will@kernel.org>
+Cc: Stephen Boyd <swboyd@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/arm64/kernel/module.c | 35 ++++++++++++++++++++++++++---------
+ 1 file changed, 26 insertions(+), 9 deletions(-)
+
+--- a/arch/arm64/kernel/module.c
++++ b/arch/arm64/kernel/module.c
+@@ -470,22 +470,39 @@ overflow:
+ return -ENOEXEC;
+ }
+
+-int module_finalize(const Elf_Ehdr *hdr,
+- const Elf_Shdr *sechdrs,
+- struct module *me)
++static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
++ const Elf_Shdr *sechdrs,
++ const char *name)
+ {
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+- if (strcmp(".altinstructions", secstrs + s->sh_name) == 0)
+- apply_alternatives_module((void *)s->sh_addr, s->sh_size);
++ if (strcmp(name, secstrs + s->sh_name) == 0)
++ return s;
++ }
++
++ return NULL;
++}
++
++int module_finalize(const Elf_Ehdr *hdr,
++ const Elf_Shdr *sechdrs,
++ struct module *me)
++{
++ const Elf_Shdr *s;
++
++ s = find_section(hdr, sechdrs, ".altinstructions");
++ if (s)
++ apply_alternatives_module((void *)s->sh_addr, s->sh_size);
++
+ #ifdef CONFIG_ARM64_MODULE_PLTS
+- if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE) &&
+- !strcmp(".text.ftrace_trampoline", secstrs + s->sh_name))
+- me->arch.ftrace_trampoline = (void *)s->sh_addr;
+-#endif
++ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE)) {
++ s = find_section(hdr, sechdrs, ".text.ftrace_trampoline");
++ if (!s)
++ return -ENOEXEC;
++ me->arch.ftrace_trampoline = (void *)s->sh_addr;
+ }
++#endif
+
+ return 0;
+ }
--- /dev/null
+From 364438fd629f7611a84c8e6d7de91659300f1502 Mon Sep 17 00:00:00 2001
+From: Nicholas Bishop <nicholasbishop@google.com>
+Date: Fri, 11 Feb 2022 14:57:39 -0500
+Subject: drm/radeon: Fix backlight control on iMac 12,1
+
+From: Nicholas Bishop <nicholasbishop@google.com>
+
+commit 364438fd629f7611a84c8e6d7de91659300f1502 upstream.
+
+The iMac 12,1 does not use the gmux driver for backlight, so the radeon
+backlight device is needed to set the brightness.
+
+Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1838
+Signed-off-by: Nicholas Bishop <nicholasbishop@google.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Cc: stable@vger.kernel.org
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/gpu/drm/radeon/atombios_encoders.c | 3 ++-
+ 1 file changed, 2 insertions(+), 1 deletion(-)
+
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -197,7 +197,8 @@ void radeon_atom_backlight_init(struct r
+ * so don't register a backlight device
+ */
+ if ((rdev->pdev->subsystem_vendor == PCI_VENDOR_ID_APPLE) &&
+- (rdev->pdev->device == 0x6741))
++ (rdev->pdev->device == 0x6741) &&
++ !dmi_match(DMI_PRODUCT_NAME, "iMac12,1"))
+ return;
+
+ if (!radeon_encoder->enc_priv)
--- /dev/null
+From 9c6e071913792d80894cd0be98cc3c4b770e26d3 Mon Sep 17 00:00:00 2001
+From: Zhang Yi <yi.zhang@huawei.com>
+Date: Wed, 8 Sep 2021 20:08:49 +0800
+Subject: ext4: check for inconsistent extents between index and leaf block
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+commit 9c6e071913792d80894cd0be98cc3c4b770e26d3 upstream.
+
+Now that we can check out overlapping extents in leaf block and
+out-of-order index extents in index block. But the .ee_block in the
+first extent of one leaf block should equal to the .ei_block in it's
+parent index extent entry. This patch add a check to verify such
+inconsistent between the index and leaf block.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Link: https://lore.kernel.org/r/20210908120850.4012324-3-yi.zhang@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 59 ++++++++++++++++++++++++++++++++----------------------
+ 1 file changed, 36 insertions(+), 23 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -390,7 +390,8 @@ static int ext4_valid_extent_idx(struct
+
+ static int ext4_valid_extent_entries(struct inode *inode,
+ struct ext4_extent_header *eh,
+- ext4_fsblk_t *pblk, int depth)
++ ext4_lblk_t lblk, ext4_fsblk_t *pblk,
++ int depth)
+ {
+ unsigned short entries;
+ ext4_lblk_t lblock = 0;
+@@ -406,6 +407,14 @@ static int ext4_valid_extent_entries(str
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ ext4_fsblk_t pblock = 0;
++
++ /*
++ * The logical block in the first entry should equal to
++ * the number in the index block.
++ */
++ if (depth != ext_depth(inode) &&
++ lblk != le32_to_cpu(ext->ee_block))
++ return 0;
+ while (entries) {
+ if (!ext4_valid_extent(inode, ext))
+ return 0;
+@@ -423,6 +432,14 @@ static int ext4_valid_extent_entries(str
+ }
+ } else {
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
++
++ /*
++ * The logical block in the first entry should equal to
++ * the number in the parent index block.
++ */
++ if (depth != ext_depth(inode) &&
++ lblk != le32_to_cpu(ext_idx->ei_block))
++ return 0;
+ while (entries) {
+ if (!ext4_valid_extent_idx(inode, ext_idx))
+ return 0;
+@@ -443,7 +460,7 @@ static int ext4_valid_extent_entries(str
+
+ static int __ext4_ext_check(const char *function, unsigned int line,
+ struct inode *inode, struct ext4_extent_header *eh,
+- int depth, ext4_fsblk_t pblk)
++ int depth, ext4_fsblk_t pblk, ext4_lblk_t lblk)
+ {
+ const char *error_msg;
+ int max = 0, err = -EFSCORRUPTED;
+@@ -469,7 +486,7 @@ static int __ext4_ext_check(const char *
+ error_msg = "invalid eh_entries";
+ goto corrupted;
+ }
+- if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
++ if (!ext4_valid_extent_entries(inode, eh, lblk, &pblk, depth)) {
+ error_msg = "invalid extent entries";
+ goto corrupted;
+ }
+@@ -498,7 +515,7 @@ corrupted:
+ }
+
+ #define ext4_ext_check(inode, eh, depth, pblk) \
+- __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk))
++ __ext4_ext_check(__func__, __LINE__, (inode), (eh), (depth), (pblk), 0)
+
+ int ext4_ext_check_inode(struct inode *inode)
+ {
+@@ -531,12 +548,14 @@ static void ext4_cache_extents(struct in
+
+ static struct buffer_head *
+ __read_extent_tree_block(const char *function, unsigned int line,
+- struct inode *inode, ext4_fsblk_t pblk, int depth,
+- int flags)
++ struct inode *inode, struct ext4_extent_idx *idx,
++ int depth, int flags)
+ {
+ struct buffer_head *bh;
+ int err;
++ ext4_fsblk_t pblk;
+
++ pblk = ext4_idx_pblock(idx);
+ bh = sb_getblk_gfp(inode->i_sb, pblk, __GFP_MOVABLE | GFP_NOFS);
+ if (unlikely(!bh))
+ return ERR_PTR(-ENOMEM);
+@@ -552,8 +571,8 @@ __read_extent_tree_block(const char *fun
+ if (!ext4_has_feature_journal(inode->i_sb) ||
+ (inode->i_ino !=
+ le32_to_cpu(EXT4_SB(inode->i_sb)->s_es->s_journal_inum))) {
+- err = __ext4_ext_check(function, line, inode,
+- ext_block_hdr(bh), depth, pblk);
++ err = __ext4_ext_check(function, line, inode, ext_block_hdr(bh),
++ depth, pblk, le32_to_cpu(idx->ei_block));
+ if (err)
+ goto errout;
+ }
+@@ -572,8 +591,8 @@ errout:
+
+ }
+
+-#define read_extent_tree_block(inode, pblk, depth, flags) \
+- __read_extent_tree_block(__func__, __LINE__, (inode), (pblk), \
++#define read_extent_tree_block(inode, idx, depth, flags) \
++ __read_extent_tree_block(__func__, __LINE__, (inode), (idx), \
+ (depth), (flags))
+
+ /*
+@@ -620,8 +639,7 @@ int ext4_ext_precache(struct inode *inod
+ i--;
+ continue;
+ }
+- bh = read_extent_tree_block(inode,
+- ext4_idx_pblock(path[i].p_idx++),
++ bh = read_extent_tree_block(inode, path[i].p_idx++,
+ depth - i - 1,
+ EXT4_EX_FORCE_CACHE);
+ if (IS_ERR(bh)) {
+@@ -924,8 +942,7 @@ ext4_find_extent(struct inode *inode, ex
+ path[ppos].p_depth = i;
+ path[ppos].p_ext = NULL;
+
+- bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
+- flags);
++ bh = read_extent_tree_block(inode, path[ppos].p_idx, --i, flags);
+ if (IS_ERR(bh)) {
+ ret = PTR_ERR(bh);
+ goto err;
+@@ -1524,7 +1541,6 @@ static int ext4_ext_search_right(struct
+ struct ext4_extent_header *eh;
+ struct ext4_extent_idx *ix;
+ struct ext4_extent *ex;
+- ext4_fsblk_t block;
+ int depth; /* Note, NOT eh_depth; depth from top of tree */
+ int ee_len;
+
+@@ -1591,20 +1607,17 @@ got_index:
+ * follow it and find the closest allocated
+ * block to the right */
+ ix++;
+- block = ext4_idx_pblock(ix);
+ while (++depth < path->p_depth) {
+ /* subtract from p_depth to get proper eh_depth */
+- bh = read_extent_tree_block(inode, block,
+- path->p_depth - depth, 0);
++ bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+ eh = ext_block_hdr(bh);
+ ix = EXT_FIRST_INDEX(eh);
+- block = ext4_idx_pblock(ix);
+ put_bh(bh);
+ }
+
+- bh = read_extent_tree_block(inode, block, path->p_depth - depth, 0);
++ bh = read_extent_tree_block(inode, ix, path->p_depth - depth, 0);
+ if (IS_ERR(bh))
+ return PTR_ERR(bh);
+ eh = ext_block_hdr(bh);
+@@ -3126,9 +3139,9 @@ again:
+ ext_debug("move to level %d (block %llu)\n",
+ i + 1, ext4_idx_pblock(path[i].p_idx));
+ memset(path + i + 1, 0, sizeof(*path));
+- bh = read_extent_tree_block(inode,
+- ext4_idx_pblock(path[i].p_idx), depth - i - 1,
+- EXT4_EX_NOCACHE);
++ bh = read_extent_tree_block(inode, path[i].p_idx,
++ depth - i - 1,
++ EXT4_EX_NOCACHE);
+ if (IS_ERR(bh)) {
+ /* should we reset i_size? */
+ err = PTR_ERR(bh);
--- /dev/null
+From 8dd27fecede55e8a4e67eef2878040ecad0f0d33 Mon Sep 17 00:00:00 2001
+From: Zhang Yi <yi.zhang@huawei.com>
+Date: Wed, 8 Sep 2021 20:08:48 +0800
+Subject: ext4: check for out-of-order index extents in ext4_valid_extent_entries()
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+commit 8dd27fecede55e8a4e67eef2878040ecad0f0d33 upstream.
+
+After commit 5946d089379a ("ext4: check for overlapping extents in
+ext4_valid_extent_entries()"), we can check out the overlapping extent
+entry in leaf extent blocks. But the out-of-order extent entry in index
+extent blocks could also trigger bad things if the filesystem is
+inconsistent. So this patch add a check to figure out the out-of-order
+index extents and return error.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Theodore Ts'o <tytso@mit.edu>
+Link: https://lore.kernel.org/r/20210908120850.4012324-2-yi.zhang@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 21 ++++++++++++++-------
+ 1 file changed, 14 insertions(+), 7 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -390,9 +390,12 @@ static int ext4_valid_extent_idx(struct
+
+ static int ext4_valid_extent_entries(struct inode *inode,
+ struct ext4_extent_header *eh,
+- int depth)
++ ext4_fsblk_t *pblk, int depth)
+ {
+ unsigned short entries;
++ ext4_lblk_t lblock = 0;
++ ext4_lblk_t prev = 0;
++
+ if (eh->eh_entries == 0)
+ return 1;
+
+@@ -403,32 +406,36 @@ static int ext4_valid_extent_entries(str
+ struct ext4_extent *ext = EXT_FIRST_EXTENT(eh);
+ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
+ ext4_fsblk_t pblock = 0;
+- ext4_lblk_t lblock = 0;
+- ext4_lblk_t prev = 0;
+- int len = 0;
+ while (entries) {
+ if (!ext4_valid_extent(inode, ext))
+ return 0;
+
+ /* Check for overlapping extents */
+ lblock = le32_to_cpu(ext->ee_block);
+- len = ext4_ext_get_actual_len(ext);
+ if ((lblock <= prev) && prev) {
+ pblock = ext4_ext_pblock(ext);
+ es->s_last_error_block = cpu_to_le64(pblock);
+ return 0;
+ }
++ prev = lblock + ext4_ext_get_actual_len(ext) - 1;
+ ext++;
+ entries--;
+- prev = lblock + len - 1;
+ }
+ } else {
+ struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh);
+ while (entries) {
+ if (!ext4_valid_extent_idx(inode, ext_idx))
+ return 0;
++
++ /* Check for overlapping index extents */
++ lblock = le32_to_cpu(ext_idx->ei_block);
++ if ((lblock <= prev) && prev) {
++ *pblk = ext4_idx_pblock(ext_idx);
++ return 0;
++ }
+ ext_idx++;
+ entries--;
++ prev = lblock;
+ }
+ }
+ return 1;
+@@ -462,7 +469,7 @@ static int __ext4_ext_check(const char *
+ error_msg = "invalid eh_entries";
+ goto corrupted;
+ }
+- if (!ext4_valid_extent_entries(inode, eh, depth)) {
++ if (!ext4_valid_extent_entries(inode, eh, &pblk, depth)) {
+ error_msg = "invalid extent entries";
+ goto corrupted;
+ }
--- /dev/null
+From 0f2f87d51aebcf71a709b52f661d681594c7dffa Mon Sep 17 00:00:00 2001
+From: Zhang Yi <yi.zhang@huawei.com>
+Date: Wed, 8 Sep 2021 20:08:50 +0800
+Subject: ext4: prevent partial update of the extent blocks
+
+From: Zhang Yi <yi.zhang@huawei.com>
+
+commit 0f2f87d51aebcf71a709b52f661d681594c7dffa upstream.
+
+In the most error path of current extents updating operations are not
+roll back partial updates properly when some bad things happens(.e.g in
+ext4_ext_insert_extent()). So we may get an inconsistent extents tree
+if journal has been aborted due to IO error, which may probability lead
+to BUGON later when we accessing these extent entries in errors=continue
+mode. This patch drop extent buffer's verify flag before updatng the
+contents in ext4_ext_get_access(), and reset it after updating in
+__ext4_ext_dirty(). After this patch we could force to check the extent
+buffer if extents tree updating was break off, make sure the extents are
+consistent.
+
+Signed-off-by: Zhang Yi <yi.zhang@huawei.com>
+Reviewed-by: Theodore Ts'o <tytso@mit.edu>
+Link: https://lore.kernel.org/r/20210908120850.4012324-4-yi.zhang@huawei.com
+Signed-off-by: Theodore Ts'o <tytso@mit.edu>
+Signed-off-by: Leah Rumancik <leah.rumancik@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ fs/ext4/extents.c | 18 ++++++++++++++++--
+ 1 file changed, 16 insertions(+), 2 deletions(-)
+
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -133,14 +133,25 @@ static int ext4_ext_truncate_extend_rest
+ static int ext4_ext_get_access(handle_t *handle, struct inode *inode,
+ struct ext4_ext_path *path)
+ {
++ int err = 0;
++
+ if (path->p_bh) {
+ /* path points to block */
+ BUFFER_TRACE(path->p_bh, "get_write_access");
+- return ext4_journal_get_write_access(handle, path->p_bh);
++ err = ext4_journal_get_write_access(handle, path->p_bh);
++
++ /*
++ * The extent buffer's verified bit will be set again in
++ * __ext4_ext_dirty(). We could leave an inconsistent
++ * buffer if the extents updating procudure break off du
++ * to some error happens, force to check it again.
++ */
++ if (!err)
++ clear_buffer_verified(path->p_bh);
+ }
+ /* path points to leaf/index in inode body */
+ /* we use in-core data, no need to protect them */
+- return 0;
++ return err;
+ }
+
+ /*
+@@ -160,6 +171,9 @@ int __ext4_ext_dirty(const char *where,
+ /* path points to block */
+ err = __ext4_handle_dirty_metadata(where, line, handle,
+ inode, path->p_bh);
++ /* Extents updating done, re-set verified flag */
++ if (!err)
++ set_buffer_verified(path->p_bh);
+ } else {
+ /* path points to leaf/index in inode body */
+ err = ext4_mark_inode_dirty(handle, inode);
--- /dev/null
+From fbf6c73c5b264c25484fa9f449b5546569fe11f0 Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 16 Oct 2019 17:51:10 +0100
+Subject: ftrace: add ftrace_init_nop()
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit fbf6c73c5b264c25484fa9f449b5546569fe11f0 upstream.
+
+Architectures may need to perform special initialization of ftrace
+callsites, and today they do so by special-casing ftrace_make_nop() when
+the expected branch address is MCOUNT_ADDR. In some cases (e.g. for
+patchable-function-entry), we don't have an mcount-like symbol and don't
+want a synthetic MCOUNT_ADDR, but we may need to perform some
+initialization of callsites.
+
+To make it possible to separate initialization from runtime
+modification, and to handle cases without an mcount-like symbol, this
+patch adds an optional ftrace_init_nop() function that architectures can
+implement, which does not pass a branch address.
+
+Where an architecture does not provide ftrace_init_nop(), we will fall
+back to the existing behaviour of calling ftrace_make_nop() with
+MCOUNT_ADDR.
+
+At the same time, ftrace_code_disable() is renamed to
+ftrace_nop_initialize() to make it clearer that it is intended to
+intialize a callsite into a disabled state, and is not for disabling a
+callsite that has been runtime enabled. The kerneldoc description of rec
+arguments is updated to cover non-mcount callsites.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Reviewed-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Miroslav Benes <mbenes@suse.cz>
+Reviewed-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Reviewed-by: Torsten Duwe <duwe@suse.de>
+Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Tested-by: Sven Schnelle <svens@stackframe.org>
+Tested-by: Torsten Duwe <duwe@suse.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: Stephen Boyd <swboyd@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ include/linux/ftrace.h | 35 ++++++++++++++++++++++++++++++++---
+ kernel/trace/ftrace.c | 6 +++---
+ 2 files changed, 35 insertions(+), 6 deletions(-)
+
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -499,7 +499,7 @@ static inline int ftrace_disable_ftrace_
+ /**
+ * ftrace_make_nop - convert code into nop
+ * @mod: module structure if called by module load initialization
+- * @rec: the mcount call site record
++ * @rec: the call site record (e.g. mcount/fentry)
+ * @addr: the address that the call site should be calling
+ *
+ * This is a very sensitive operation and great care needs
+@@ -520,9 +520,38 @@ static inline int ftrace_disable_ftrace_
+ extern int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr);
+
++
++/**
++ * ftrace_init_nop - initialize a nop call site
++ * @mod: module structure if called by module load initialization
++ * @rec: the call site record (e.g. mcount/fentry)
++ *
++ * This is a very sensitive operation and great care needs
++ * to be taken by the arch. The operation should carefully
++ * read the location, check to see if what is read is indeed
++ * what we expect it to be, and then on success of the compare,
++ * it should write to the location.
++ *
++ * The code segment at @rec->ip should contain the contents created by
++ * the compiler
++ *
++ * Return must be:
++ * 0 on success
++ * -EFAULT on error reading the location
++ * -EINVAL on a failed compare of the contents
++ * -EPERM on error writing to the location
++ * Any other value will be considered a failure.
++ */
++#ifndef ftrace_init_nop
++static inline int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
++{
++ return ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++}
++#endif
++
+ /**
+ * ftrace_make_call - convert a nop call site into a call to addr
+- * @rec: the mcount call site record
++ * @rec: the call site record (e.g. mcount/fentry)
+ * @addr: the address that the call site should call
+ *
+ * This is a very sensitive operation and great care needs
+@@ -545,7 +574,7 @@ extern int ftrace_make_call(struct dyn_f
+ #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ /**
+ * ftrace_modify_call - convert from one addr to another (no nop)
+- * @rec: the mcount call site record
++ * @rec: the call site record (e.g. mcount/fentry)
+ * @old_addr: the address expected to be currently called to
+ * @addr: the address to change to
+ *
+--- a/kernel/trace/ftrace.c
++++ b/kernel/trace/ftrace.c
+@@ -2520,14 +2520,14 @@ struct dyn_ftrace *ftrace_rec_iter_recor
+ }
+
+ static int
+-ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
++ftrace_nop_initialize(struct module *mod, struct dyn_ftrace *rec)
+ {
+ int ret;
+
+ if (unlikely(ftrace_disabled))
+ return 0;
+
+- ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
++ ret = ftrace_init_nop(mod, rec);
+ if (ret) {
+ ftrace_bug_type = FTRACE_BUG_INIT;
+ ftrace_bug(ret, rec);
+@@ -2969,7 +2969,7 @@ static int ftrace_update_code(struct mod
+ * to the NOP instructions.
+ */
+ if (!__is_defined(CC_USING_NOP_MCOUNT) &&
+- !ftrace_code_disable(mod, p))
++ !ftrace_nop_initialize(mod, p))
+ break;
+
+ update_cnt++;
--- /dev/null
+From bea2662e7818e15d7607d17d57912ac984275d94 Mon Sep 17 00:00:00 2001
+From: Johannes Berg <johannes.berg@intel.com>
+Date: Tue, 8 Feb 2022 11:47:30 +0100
+Subject: iwlwifi: fix use-after-free
+MIME-Version: 1.0
+Content-Type: text/plain; charset=UTF-8
+Content-Transfer-Encoding: 8bit
+
+From: Johannes Berg <johannes.berg@intel.com>
+
+commit bea2662e7818e15d7607d17d57912ac984275d94 upstream.
+
+If no firmware was present at all (or, presumably, all of the
+firmware files failed to parse), we end up unbinding by calling
+device_release_driver(), which calls remove(), which then in
+iwlwifi calls iwl_drv_stop(), freeing the 'drv' struct. However
+the new code I added will still erroneously access it after it
+was freed.
+
+Set 'failure=false' in this case to avoid the access, all data
+was already freed anyway.
+
+Cc: stable@vger.kernel.org
+Reported-by: Stefan Agner <stefan@agner.ch>
+Reported-by: Wolfgang Walter <linux@stwm.de>
+Reported-by: Jason Self <jason@bluehome.net>
+Reported-by: Dominik Behr <dominik@dominikbehr.com>
+Reported-by: Marek Marczykowski-Górecki <marmarek@invisiblethingslab.com>
+Fixes: ab07506b0454 ("iwlwifi: fix leaks/bad data after failed firmware load")
+Signed-off-by: Johannes Berg <johannes.berg@intel.com>
+Signed-off-by: Kalle Valo <kvalo@kernel.org>
+Link: https://lore.kernel.org/r/20220208114728.e6b514cf4c85.Iffb575ca2a623d7859b542c33b2a507d01554251@changeid
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ drivers/net/wireless/intel/iwlwifi/iwl-drv.c | 2 ++
+ 1 file changed, 2 insertions(+)
+
+--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
++++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+@@ -1616,6 +1616,8 @@ static void iwl_req_fw_callback(const st
+ out_unbind:
+ complete(&drv->request_firmware_complete);
+ device_release_driver(drv->trans->dev);
++ /* drv has just been freed by the release */
++ failure = false;
+ free:
+ if (failure)
+ iwl_dealloc_ucode(drv);
--- /dev/null
+From a1326b17ac03a9012cb3d01e434aacb4d67a416c Mon Sep 17 00:00:00 2001
+From: Mark Rutland <mark.rutland@arm.com>
+Date: Wed, 16 Oct 2019 18:17:11 +0100
+Subject: module/ftrace: handle patchable-function-entry
+
+From: Mark Rutland <mark.rutland@arm.com>
+
+commit a1326b17ac03a9012cb3d01e434aacb4d67a416c upstream.
+
+When using patchable-function-entry, the compiler will record the
+callsites into a section named "__patchable_function_entries" rather
+than "__mcount_loc". Let's abstract this difference behind a new
+FTRACE_CALLSITE_SECTION, so that architectures don't have to handle this
+explicitly (e.g. with custom module linker scripts).
+
+As parisc currently handles this explicitly, it is fixed up accordingly,
+with its custom linker script removed. Since FTRACE_CALLSITE_SECTION is
+only defined when DYNAMIC_FTRACE is selected, the parisc module loading
+code is updated to only use the definition in that case. When
+DYNAMIC_FTRACE is not selected, modules shouldn't have this section, so
+this removes some redundant work in that case.
+
+To make sure that this is keep up-to-date for modules and the main
+kernel, a comment is added to vmlinux.lds.h, with the existing ifdeffery
+simplified for legibility.
+
+I built parisc generic-{32,64}bit_defconfig with DYNAMIC_FTRACE enabled,
+and verified that the section made it into the .ko files for modules.
+
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Acked-by: Helge Deller <deller@gmx.de>
+Acked-by: Steven Rostedt (VMware) <rostedt@goodmis.org>
+Reviewed-by: Ard Biesheuvel <ard.biesheuvel@linaro.org>
+Reviewed-by: Torsten Duwe <duwe@suse.de>
+Tested-by: Amit Daniel Kachhap <amit.kachhap@arm.com>
+Tested-by: Sven Schnelle <svens@stackframe.org>
+Tested-by: Torsten Duwe <duwe@suse.de>
+Cc: Ingo Molnar <mingo@redhat.com>
+Cc: James E.J. Bottomley <James.Bottomley@HansenPartnership.com>
+Cc: Jessica Yu <jeyu@kernel.org>
+Cc: linux-parisc@vger.kernel.org
+Cc: Stephen Boyd <swboyd@chromium.org>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ arch/parisc/Makefile | 1 -
+ arch/parisc/kernel/module.c | 10 +++++++---
+ arch/parisc/kernel/module.lds | 7 -------
+ include/asm-generic/vmlinux.lds.h | 14 +++++++-------
+ include/linux/ftrace.h | 5 +++++
+ kernel/module.c | 2 +-
+ 6 files changed, 20 insertions(+), 19 deletions(-)
+ delete mode 100644 arch/parisc/kernel/module.lds
+
+--- a/arch/parisc/Makefile
++++ b/arch/parisc/Makefile
+@@ -65,7 +65,6 @@ KBUILD_CFLAGS += -DCC_USING_PATCHABLE_FU
+ -DFTRACE_PATCHABLE_FUNCTION_SIZE=$(NOP_COUNT)
+
+ CC_FLAGS_FTRACE := -fpatchable-function-entry=$(NOP_COUNT),$(shell echo $$(($(NOP_COUNT)-1)))
+-KBUILD_LDS_MODULE += $(srctree)/arch/parisc/kernel/module.lds
+ endif
+
+ OBJCOPY_FLAGS =-O binary -R .note -R .comment -S
+--- a/arch/parisc/kernel/module.c
++++ b/arch/parisc/kernel/module.c
+@@ -43,6 +43,7 @@
+ #include <linux/elf.h>
+ #include <linux/vmalloc.h>
+ #include <linux/fs.h>
++#include <linux/ftrace.h>
+ #include <linux/string.h>
+ #include <linux/kernel.h>
+ #include <linux/bug.h>
+@@ -862,7 +863,7 @@ int module_finalize(const Elf_Ehdr *hdr,
+ const char *strtab = NULL;
+ const Elf_Shdr *s;
+ char *secstrings;
+- int err, symindex = -1;
++ int symindex = -1;
+ Elf_Sym *newptr, *oldptr;
+ Elf_Shdr *symhdr = NULL;
+ #ifdef DEBUG
+@@ -946,11 +947,13 @@ int module_finalize(const Elf_Ehdr *hdr,
+ /* patch .altinstructions */
+ apply_alternatives(aseg, aseg + s->sh_size, me->name);
+
++#ifdef CONFIG_DYNAMIC_FTRACE
+ /* For 32 bit kernels we're compiling modules with
+ * -ffunction-sections so we must relocate the addresses in the
+- *__mcount_loc section.
++ * ftrace callsite section.
+ */
+- if (symindex != -1 && !strcmp(secname, "__mcount_loc")) {
++ if (symindex != -1 && !strcmp(secname, FTRACE_CALLSITE_SECTION)) {
++ int err;
+ if (s->sh_type == SHT_REL)
+ err = apply_relocate((Elf_Shdr *)sechdrs,
+ strtab, symindex,
+@@ -962,6 +965,7 @@ int module_finalize(const Elf_Ehdr *hdr,
+ if (err)
+ return err;
+ }
++#endif
+ }
+ return 0;
+ }
+--- a/arch/parisc/kernel/module.lds
++++ /dev/null
+@@ -1,7 +0,0 @@
+-/* SPDX-License-Identifier: GPL-2.0 */
+-
+-SECTIONS {
+- __mcount_loc : {
+- *(__patchable_function_entries)
+- }
+-}
+--- a/include/asm-generic/vmlinux.lds.h
++++ b/include/asm-generic/vmlinux.lds.h
+@@ -110,17 +110,17 @@
+ #endif
+
+ #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+-#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
+-#define MCOUNT_REC() . = ALIGN(8); \
+- __start_mcount_loc = .; \
+- KEEP(*(__patchable_function_entries)) \
+- __stop_mcount_loc = .;
+-#else
++/*
++ * The ftrace call sites are logged to a section whose name depends on the
++ * compiler option used. A given kernel image will only use one, AKA
++ * FTRACE_CALLSITE_SECTION. We capture all of them here to avoid header
++ * dependencies for FTRACE_CALLSITE_SECTION's definition.
++ */
+ #define MCOUNT_REC() . = ALIGN(8); \
+ __start_mcount_loc = .; \
+ KEEP(*(__mcount_loc)) \
++ KEEP(*(__patchable_function_entries)) \
+ __stop_mcount_loc = .;
+-#endif
+ #else
+ #define MCOUNT_REC()
+ #endif
+--- a/include/linux/ftrace.h
++++ b/include/linux/ftrace.h
+@@ -738,6 +738,11 @@ static inline unsigned long get_lock_par
+
+ #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ extern void ftrace_init(void);
++#ifdef CC_USING_PATCHABLE_FUNCTION_ENTRY
++#define FTRACE_CALLSITE_SECTION "__patchable_function_entries"
++#else
++#define FTRACE_CALLSITE_SECTION "__mcount_loc"
++#endif
+ #else
+ static inline void ftrace_init(void) { }
+ #endif
+--- a/kernel/module.c
++++ b/kernel/module.c
+@@ -3377,7 +3377,7 @@ static int find_module_sections(struct m
+ #endif
+ #ifdef CONFIG_FTRACE_MCOUNT_RECORD
+ /* sechdrs[0].sh_size is always zero */
+- mod->ftrace_callsites = section_objs(info, "__mcount_loc",
++ mod->ftrace_callsites = section_objs(info, FTRACE_CALLSITE_SECTION,
+ sizeof(*mod->ftrace_callsites),
+ &mod->num_ftrace_callsites);
+ #endif
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
---
- include/linux/sched.h | 1 -
- kernel/async.c | 3 ---
- kernel/module.c | 25 +++++--------------------
+ include/linux/sched.h | 1 -
+ kernel/async.c | 3 ---
+ kernel/module.c | 25 +++++--------------------
3 files changed, 5 insertions(+), 24 deletions(-)
-diff --git a/include/linux/sched.h b/include/linux/sched.h
-index afee5d5eb9458..b341471de9d60 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1454,7 +1454,6 @@ extern struct pid *cad_pid;
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
#define PF_KSWAPD 0x00020000 /* I am kswapd */
-diff --git a/kernel/async.c b/kernel/async.c
-index 4f9c1d6140168..74660f611b97d 100644
--- a/kernel/async.c
+++ b/kernel/async.c
-@@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domain(async_func_t func, void *data,
+@@ -205,9 +205,6 @@ async_cookie_t async_schedule_node_domai
atomic_inc(&entry_count);
spin_unlock_irqrestore(&async_lock, flags);
/* schedule for execution */
queue_work_node(node, system_unbound_wq, &entry->work);
-diff --git a/kernel/module.c b/kernel/module.c
-index 59d487b8d8dad..e7656cf1652c9 100644
--- a/kernel/module.c
+++ b/kernel/module.c
-@@ -3711,12 +3711,6 @@ static noinline int do_init_module(struct module *mod)
+@@ -3711,12 +3711,6 @@ static noinline int do_init_module(struc
}
freeinit->module_init = mod->init_layout.base;
do_mod_ctors(mod);
/* Start the module */
if (mod->init != NULL)
-@@ -3742,22 +3736,13 @@ static noinline int do_init_module(struct module *mod)
+@@ -3742,22 +3736,13 @@ static noinline int do_init_module(struc
/*
* We need to finish all async code before the module init sequence
async_synchronize_full();
ftrace_free_mem(mod, mod->init_layout.base, mod->init_layout.base +
---
-2.34.1
-
nvme-rdma-fix-possible-use-after-free-in-transport-e.patch
drm-amdgpu-fix-logic-inversion-in-check.patch
revert-module-async-async_synchronize_full-on-module.patch
+ftrace-add-ftrace_init_nop.patch
+module-ftrace-handle-patchable-function-entry.patch
+arm64-module-rework-special-section-handling.patch
+arm64-module-ftrace-intialize-plt-at-load-time.patch
+iwlwifi-fix-use-after-free.patch
+drm-radeon-fix-backlight-control-on-imac-12-1.patch
+ext4-check-for-out-of-order-index-extents-in-ext4_valid_extent_entries.patch
+ext4-check-for-inconsistent-extents-between-index-and-leaf-block.patch
+ext4-prevent-partial-update-of-the-extent-blocks.patch
+taskstats-cleanup-the-use-of-task-exit_code.patch
--- /dev/null
+From foo@baz Fri Feb 18 10:34:57 AM CET 2022
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+Date: Mon, 3 Jan 2022 11:32:36 -0600
+Subject: taskstats: Cleanup the use of task->exit_code
+
+From: "Eric W. Biederman" <ebiederm@xmission.com>
+
+commit 1b5a42d9c85f0e731f01c8d1129001fd8531a8a0 upstream.
+
+In the function bacct_add_task the code reading task->exit_code was
+introduced in commit f3cef7a99469 ("[PATCH] csa: basic accounting over
+taskstats"), and it is not entirely clear what the taskstats interface
+is trying to return as only returning the exit_code of the first task
+in a process doesn't make a lot of sense.
+
+As best as I can figure the intent is to return task->exit_code after
+a task exits. The field is returned with per task fields, so the
+exit_code of the entire process is not wanted. Only the value of the
+first task is returned so this is not a useful way to get the per task
+ptrace stop code. The ordinary case of returning this value is
+returning after a task exits, which also precludes use for getting
+a ptrace value.
+
+It is common to for the first task of a process to also be the last
+task of a process so this field may have done something reasonable by
+accident in testing.
+
+Make ac_exitcode a reliable per task value by always returning it for
+every exited task.
+
+Setting ac_exitcode in a sensible mannter makes it possible to continue
+to provide this value going forward.
+
+Cc: Balbir Singh <bsingharora@gmail.com>
+Fixes: f3cef7a99469 ("[PATCH] csa: basic accounting over taskstats")
+Link: https://lkml.kernel.org/r/20220103213312.9144-5-ebiederm@xmission.com
+Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
+[sudip: adjust context]
+Signed-off-by: Sudip Mukherjee <sudipm.mukherjee@gmail.com>
+Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
+---
+ kernel/tsacct.c | 7 +++----
+ 1 file changed, 3 insertions(+), 4 deletions(-)
+
+--- a/kernel/tsacct.c
++++ b/kernel/tsacct.c
+@@ -35,11 +35,10 @@ void bacct_add_tsk(struct user_namespace
+ /* Convert to seconds for btime */
+ do_div(delta, USEC_PER_SEC);
+ stats->ac_btime = get_seconds() - delta;
+- if (thread_group_leader(tsk)) {
++ if (tsk->flags & PF_EXITING)
+ stats->ac_exitcode = tsk->exit_code;
+- if (tsk->flags & PF_FORKNOEXEC)
+- stats->ac_flag |= AFORK;
+- }
++ if (thread_group_leader(tsk) && (tsk->flags & PF_FORKNOEXEC))
++ stats->ac_flag |= AFORK;
+ if (tsk->flags & PF_SUPERPRIV)
+ stats->ac_flag |= ASU;
+ if (tsk->flags & PF_DUMPCORE)