--- /dev/null
+From fdafacb86a2294f9cd528fd633cb908dc005330f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 15:37:07 +0800
+Subject: bpf: support non-r10 register spill/fill to/from stack in precision
+ tracking
+
+From: Andrii Nakryiko <andrii@kernel.org>
+
+[ Upstream commit 41f6f64e6999a837048b1bd13a2f8742964eca6b ]
+
+Use instruction (jump) history to record instructions that performed
+register spill/fill to/from stack, regardless if this was done through
+read-only r10 register, or any other register after copying r10 into it
+*and* potentially adjusting offset.
+
+To make this work reliably, we push extra per-instruction flags into
+instruction history, encoding stack slot index (spi) and stack frame
+number in extra 10 bit flags we take away from prev_idx in instruction
+history. We don't touch idx field for maximum performance, as it's
+checked most frequently during backtracking.
+
+This change removes basically the last remaining practical limitation of
+precision backtracking logic in BPF verifier. It fixes known
+deficiencies, but also opens up new opportunities to reduce number of
+verified states, explored in the subsequent patches.
+
+There are only three differences in selftests' BPF object files
+according to veristat, all in the positive direction (less states).
+
+File Program Insns (A) Insns (B) Insns (DIFF) States (A) States (B) States (DIFF)
+-------------------------------------- ------------- --------- --------- ------------- ---------- ---------- -------------
+test_cls_redirect_dynptr.bpf.linked3.o cls_redirect 2987 2864 -123 (-4.12%) 240 231 -9 (-3.75%)
+xdp_synproxy_kern.bpf.linked3.o syncookie_tc 82848 82661 -187 (-0.23%) 5107 5073 -34 (-0.67%)
+xdp_synproxy_kern.bpf.linked3.o syncookie_xdp 85116 84964 -152 (-0.18%) 5162 5130 -32 (-0.62%)
+
+Note, I avoided renaming jmp_history to more generic insn_hist to
+minimize number of lines changed and potential merge conflicts between
+bpf and bpf-next trees.
+
+Notice also cur_hist_entry pointer reset to NULL at the beginning of
+instruction verification loop. This pointer avoids the problem of
+relying on last jump history entry's insn_idx to determine whether we
+already have entry for current instruction or not. It can happen that we
+added jump history entry because current instruction is_jmp_point(), but
+also we need to add instruction flags for stack access. In this case, we
+don't want to entries, so we need to reuse last added entry, if it is
+present.
+
+Relying on insn_idx comparison has the same ambiguity problem as the one
+that was fixed recently in [0], so we avoid that.
+
+ [0] https://patchwork.kernel.org/project/netdevbpf/patch/20231110002638.4168352-3-andrii@kernel.org/
+
+Acked-by: Eduard Zingerman <eddyz87@gmail.com>
+Reported-by: Tao Lyu <tao.lyu@epfl.ch>
+Signed-off-by: Andrii Nakryiko <andrii@kernel.org>
+Link: https://lore.kernel.org/r/20231205184248.1502704-2-andrii@kernel.org
+Signed-off-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Shung-Hsi Yu <shung-hsi.yu@suse.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ include/linux/bpf_verifier.h | 31 +++-
+ kernel/bpf/verifier.c | 175 ++++++++++--------
+ .../bpf/progs/verifier_subprog_precision.c | 23 ++-
+ .../testing/selftests/bpf/verifier/precise.c | 38 ++--
+ 4 files changed, 169 insertions(+), 98 deletions(-)
+
+diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
+index 92919d52f7e1b..cb8e97665eaa5 100644
+--- a/include/linux/bpf_verifier.h
++++ b/include/linux/bpf_verifier.h
+@@ -319,12 +319,34 @@ struct bpf_func_state {
+ struct bpf_stack_state *stack;
+ };
+
+-struct bpf_idx_pair {
+- u32 prev_idx;
++#define MAX_CALL_FRAMES 8
++
++/* instruction history flags, used in bpf_jmp_history_entry.flags field */
++enum {
++ /* instruction references stack slot through PTR_TO_STACK register;
++ * we also store stack's frame number in lower 3 bits (MAX_CALL_FRAMES is 8)
++ * and accessed stack slot's index in next 6 bits (MAX_BPF_STACK is 512,
++ * 8 bytes per slot, so slot index (spi) is [0, 63])
++ */
++ INSN_F_FRAMENO_MASK = 0x7, /* 3 bits */
++
++ INSN_F_SPI_MASK = 0x3f, /* 6 bits */
++ INSN_F_SPI_SHIFT = 3, /* shifted 3 bits to the left */
++
++ INSN_F_STACK_ACCESS = BIT(9), /* we need 10 bits total */
++};
++
++static_assert(INSN_F_FRAMENO_MASK + 1 >= MAX_CALL_FRAMES);
++static_assert(INSN_F_SPI_MASK + 1 >= MAX_BPF_STACK / 8);
++
++struct bpf_jmp_history_entry {
+ u32 idx;
++ /* insn idx can't be bigger than 1 million */
++ u32 prev_idx : 22;
++ /* special flags, e.g., whether insn is doing register stack spill/load */
++ u32 flags : 10;
+ };
+
+-#define MAX_CALL_FRAMES 8
+ /* Maximum number of register states that can exist at once */
+ #define BPF_ID_MAP_SIZE ((MAX_BPF_REG + MAX_BPF_STACK / BPF_REG_SIZE) * MAX_CALL_FRAMES)
+ struct bpf_verifier_state {
+@@ -407,7 +429,7 @@ struct bpf_verifier_state {
+ * For most states jmp_history_cnt is [0-3].
+ * For loops can go up to ~40.
+ */
+- struct bpf_idx_pair *jmp_history;
++ struct bpf_jmp_history_entry *jmp_history;
+ u32 jmp_history_cnt;
+ u32 dfs_depth;
+ u32 callback_unroll_depth;
+@@ -640,6 +662,7 @@ struct bpf_verifier_env {
+ int cur_stack;
+ } cfg;
+ struct backtrack_state bt;
++ struct bpf_jmp_history_entry *cur_hist_ent;
+ u32 pass_cnt; /* number of times do_check() was called */
+ u32 subprog_cnt;
+ /* number of instructions analyzed by the verifier */
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 4f19a091571bb..5ca02af3a8728 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -1762,8 +1762,8 @@ static int copy_verifier_state(struct bpf_verifier_state *dst_state,
+ int i, err;
+
+ dst_state->jmp_history = copy_array(dst_state->jmp_history, src->jmp_history,
+- src->jmp_history_cnt, sizeof(struct bpf_idx_pair),
+- GFP_USER);
++ src->jmp_history_cnt, sizeof(*dst_state->jmp_history),
++ GFP_USER);
+ if (!dst_state->jmp_history)
+ return -ENOMEM;
+ dst_state->jmp_history_cnt = src->jmp_history_cnt;
+@@ -3397,6 +3397,21 @@ static int check_reg_arg(struct bpf_verifier_env *env, u32 regno,
+ return __check_reg_arg(env, state->regs, regno, t);
+ }
+
++static int insn_stack_access_flags(int frameno, int spi)
++{
++ return INSN_F_STACK_ACCESS | (spi << INSN_F_SPI_SHIFT) | frameno;
++}
++
++static int insn_stack_access_spi(int insn_flags)
++{
++ return (insn_flags >> INSN_F_SPI_SHIFT) & INSN_F_SPI_MASK;
++}
++
++static int insn_stack_access_frameno(int insn_flags)
++{
++ return insn_flags & INSN_F_FRAMENO_MASK;
++}
++
+ static void mark_jmp_point(struct bpf_verifier_env *env, int idx)
+ {
+ env->insn_aux_data[idx].jmp_point = true;
+@@ -3408,28 +3423,51 @@ static bool is_jmp_point(struct bpf_verifier_env *env, int insn_idx)
+ }
+
+ /* for any branch, call, exit record the history of jmps in the given state */
+-static int push_jmp_history(struct bpf_verifier_env *env,
+- struct bpf_verifier_state *cur)
++static int push_jmp_history(struct bpf_verifier_env *env, struct bpf_verifier_state *cur,
++ int insn_flags)
+ {
+ u32 cnt = cur->jmp_history_cnt;
+- struct bpf_idx_pair *p;
++ struct bpf_jmp_history_entry *p;
+ size_t alloc_size;
+
+- if (!is_jmp_point(env, env->insn_idx))
++ /* combine instruction flags if we already recorded this instruction */
++ if (env->cur_hist_ent) {
++ /* atomic instructions push insn_flags twice, for READ and
++ * WRITE sides, but they should agree on stack slot
++ */
++ WARN_ONCE((env->cur_hist_ent->flags & insn_flags) &&
++ (env->cur_hist_ent->flags & insn_flags) != insn_flags,
++ "verifier insn history bug: insn_idx %d cur flags %x new flags %x\n",
++ env->insn_idx, env->cur_hist_ent->flags, insn_flags);
++ env->cur_hist_ent->flags |= insn_flags;
+ return 0;
++ }
+
+ cnt++;
+ alloc_size = kmalloc_size_roundup(size_mul(cnt, sizeof(*p)));
+ p = krealloc(cur->jmp_history, alloc_size, GFP_USER);
+ if (!p)
+ return -ENOMEM;
+- p[cnt - 1].idx = env->insn_idx;
+- p[cnt - 1].prev_idx = env->prev_insn_idx;
+ cur->jmp_history = p;
++
++ p = &cur->jmp_history[cnt - 1];
++ p->idx = env->insn_idx;
++ p->prev_idx = env->prev_insn_idx;
++ p->flags = insn_flags;
+ cur->jmp_history_cnt = cnt;
++ env->cur_hist_ent = p;
++
+ return 0;
+ }
+
++static struct bpf_jmp_history_entry *get_jmp_hist_entry(struct bpf_verifier_state *st,
++ u32 hist_end, int insn_idx)
++{
++ if (hist_end > 0 && st->jmp_history[hist_end - 1].idx == insn_idx)
++ return &st->jmp_history[hist_end - 1];
++ return NULL;
++}
++
+ /* Backtrack one insn at a time. If idx is not at the top of recorded
+ * history then previous instruction came from straight line execution.
+ * Return -ENOENT if we exhausted all instructions within given state.
+@@ -3591,9 +3629,14 @@ static inline bool bt_is_reg_set(struct backtrack_state *bt, u32 reg)
+ return bt->reg_masks[bt->frame] & (1 << reg);
+ }
+
++static inline bool bt_is_frame_slot_set(struct backtrack_state *bt, u32 frame, u32 slot)
++{
++ return bt->stack_masks[frame] & (1ull << slot);
++}
++
+ static inline bool bt_is_slot_set(struct backtrack_state *bt, u32 slot)
+ {
+- return bt->stack_masks[bt->frame] & (1ull << slot);
++ return bt_is_frame_slot_set(bt, bt->frame, slot);
+ }
+
+ /* format registers bitmask, e.g., "r0,r2,r4" for 0x15 mask */
+@@ -3647,7 +3690,7 @@ static bool calls_callback(struct bpf_verifier_env *env, int insn_idx);
+ * - *was* processed previously during backtracking.
+ */
+ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+- struct backtrack_state *bt)
++ struct bpf_jmp_history_entry *hist, struct backtrack_state *bt)
+ {
+ const struct bpf_insn_cbs cbs = {
+ .cb_call = disasm_kfunc_name,
+@@ -3660,7 +3703,7 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ u8 mode = BPF_MODE(insn->code);
+ u32 dreg = insn->dst_reg;
+ u32 sreg = insn->src_reg;
+- u32 spi, i;
++ u32 spi, i, fr;
+
+ if (insn->code == 0)
+ return 0;
+@@ -3723,20 +3766,15 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ * by 'precise' mark in corresponding register of this state.
+ * No further tracking necessary.
+ */
+- if (insn->src_reg != BPF_REG_FP)
++ if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
+ return 0;
+-
+ /* dreg = *(u64 *)[fp - off] was a fill from the stack.
+ * that [fp - off] slot contains scalar that needs to be
+ * tracked with precision
+ */
+- spi = (-insn->off - 1) / BPF_REG_SIZE;
+- if (spi >= 64) {
+- verbose(env, "BUG spi %d\n", spi);
+- WARN_ONCE(1, "verifier backtracking bug");
+- return -EFAULT;
+- }
+- bt_set_slot(bt, spi);
++ spi = insn_stack_access_spi(hist->flags);
++ fr = insn_stack_access_frameno(hist->flags);
++ bt_set_frame_slot(bt, fr, spi);
+ } else if (class == BPF_STX || class == BPF_ST) {
+ if (bt_is_reg_set(bt, dreg))
+ /* stx & st shouldn't be using _scalar_ dst_reg
+@@ -3745,17 +3783,13 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ */
+ return -ENOTSUPP;
+ /* scalars can only be spilled into stack */
+- if (insn->dst_reg != BPF_REG_FP)
++ if (!hist || !(hist->flags & INSN_F_STACK_ACCESS))
+ return 0;
+- spi = (-insn->off - 1) / BPF_REG_SIZE;
+- if (spi >= 64) {
+- verbose(env, "BUG spi %d\n", spi);
+- WARN_ONCE(1, "verifier backtracking bug");
+- return -EFAULT;
+- }
+- if (!bt_is_slot_set(bt, spi))
++ spi = insn_stack_access_spi(hist->flags);
++ fr = insn_stack_access_frameno(hist->flags);
++ if (!bt_is_frame_slot_set(bt, fr, spi))
+ return 0;
+- bt_clear_slot(bt, spi);
++ bt_clear_frame_slot(bt, fr, spi);
+ if (class == BPF_STX)
+ bt_set_reg(bt, sreg);
+ } else if (class == BPF_JMP || class == BPF_JMP32) {
+@@ -3799,10 +3833,14 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+- /* we don't track register spills perfectly,
+- * so fallback to force-precise instead of failing */
+- if (bt_stack_mask(bt) != 0)
+- return -ENOTSUPP;
++ /* we are now tracking register spills correctly,
++ * so any instance of leftover slots is a bug
++ */
++ if (bt_stack_mask(bt) != 0) {
++ verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
++ WARN_ONCE(1, "verifier backtracking bug (subprog leftover stack slots)");
++ return -EFAULT;
++ }
+ /* propagate r1-r5 to the caller */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++) {
+ if (bt_is_reg_set(bt, i)) {
+@@ -3827,8 +3865,11 @@ static int backtrack_insn(struct bpf_verifier_env *env, int idx, int subseq_idx,
+ WARN_ONCE(1, "verifier backtracking bug");
+ return -EFAULT;
+ }
+- if (bt_stack_mask(bt) != 0)
+- return -ENOTSUPP;
++ if (bt_stack_mask(bt) != 0) {
++ verbose(env, "BUG stack slots %llx\n", bt_stack_mask(bt));
++ WARN_ONCE(1, "verifier backtracking bug (callback leftover stack slots)");
++ return -EFAULT;
++ }
+ /* clear r1-r5 in callback subprog's mask */
+ for (i = BPF_REG_1; i <= BPF_REG_5; i++)
+ bt_clear_reg(bt, i);
+@@ -4265,6 +4306,7 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ for (;;) {
+ DECLARE_BITMAP(mask, 64);
+ u32 history = st->jmp_history_cnt;
++ struct bpf_jmp_history_entry *hist;
+
+ if (env->log.level & BPF_LOG_LEVEL2) {
+ verbose(env, "mark_precise: frame%d: last_idx %d first_idx %d subseq_idx %d \n",
+@@ -4328,7 +4370,8 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ err = 0;
+ skip_first = false;
+ } else {
+- err = backtrack_insn(env, i, subseq_idx, bt);
++ hist = get_jmp_hist_entry(st, history, i);
++ err = backtrack_insn(env, i, subseq_idx, hist, bt);
+ }
+ if (err == -ENOTSUPP) {
+ mark_all_scalars_precise(env, env->cur_state);
+@@ -4381,22 +4424,10 @@ static int __mark_chain_precision(struct bpf_verifier_env *env, int regno)
+ bitmap_from_u64(mask, bt_frame_stack_mask(bt, fr));
+ for_each_set_bit(i, mask, 64) {
+ if (i >= func->allocated_stack / BPF_REG_SIZE) {
+- /* the sequence of instructions:
+- * 2: (bf) r3 = r10
+- * 3: (7b) *(u64 *)(r3 -8) = r0
+- * 4: (79) r4 = *(u64 *)(r10 -8)
+- * doesn't contain jmps. It's backtracked
+- * as a single block.
+- * During backtracking insn 3 is not recognized as
+- * stack access, so at the end of backtracking
+- * stack slot fp-8 is still marked in stack_mask.
+- * However the parent state may not have accessed
+- * fp-8 and it's "unallocated" stack space.
+- * In such case fallback to conservative.
+- */
+- mark_all_scalars_precise(env, env->cur_state);
+- bt_reset(bt);
+- return 0;
++ verbose(env, "BUG backtracking (stack slot %d, total slots %d)\n",
++ i, func->allocated_stack / BPF_REG_SIZE);
++ WARN_ONCE(1, "verifier backtracking bug (stack slot out of bounds)");
++ return -EFAULT;
+ }
+
+ if (!is_spilled_scalar_reg(&func->stack[i])) {
+@@ -4561,7 +4592,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE, err;
+ struct bpf_insn *insn = &env->prog->insnsi[insn_idx];
+ struct bpf_reg_state *reg = NULL;
+- u32 dst_reg = insn->dst_reg;
++ int insn_flags = insn_stack_access_flags(state->frameno, spi);
+
+ /* caller checked that off % size == 0 and -MAX_BPF_STACK <= off < 0,
+ * so it's aligned access and [off, off + size) are within stack limits
+@@ -4599,17 +4630,6 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ mark_stack_slot_scratched(env, spi);
+ if (reg && !(off % BPF_REG_SIZE) && register_is_bounded(reg) &&
+ !register_is_null(reg) && env->bpf_capable) {
+- if (dst_reg != BPF_REG_FP) {
+- /* The backtracking logic can only recognize explicit
+- * stack slot address like [fp - 8]. Other spill of
+- * scalar via different register has to be conservative.
+- * Backtrack from here and mark all registers as precise
+- * that contributed into 'reg' being a constant.
+- */
+- err = mark_chain_precision(env, value_regno);
+- if (err)
+- return err;
+- }
+ save_register_state(state, spi, reg, size);
+ /* Break the relation on a narrowing spill. */
+ if (fls64(reg->umax_value) > BITS_PER_BYTE * size)
+@@ -4621,6 +4641,7 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+ __mark_reg_known(&fake_reg, insn->imm);
+ fake_reg.type = SCALAR_VALUE;
+ save_register_state(state, spi, &fake_reg, size);
++ insn_flags = 0; /* not a register spill */
+ } else if (reg && is_spillable_regtype(reg->type)) {
+ /* register containing pointer is being spilled into stack */
+ if (size != BPF_REG_SIZE) {
+@@ -4666,9 +4687,12 @@ static int check_stack_write_fixed_off(struct bpf_verifier_env *env,
+
+ /* Mark slots affected by this stack write. */
+ for (i = 0; i < size; i++)
+- state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] =
+- type;
++ state->stack[spi].slot_type[(slot - i) % BPF_REG_SIZE] = type;
++ insn_flags = 0; /* not a register spill */
+ }
++
++ if (insn_flags)
++ return push_jmp_history(env, env->cur_state, insn_flags);
+ return 0;
+ }
+
+@@ -4857,6 +4881,7 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ int i, slot = -off - 1, spi = slot / BPF_REG_SIZE;
+ struct bpf_reg_state *reg;
+ u8 *stype, type;
++ int insn_flags = insn_stack_access_flags(reg_state->frameno, spi);
+
+ stype = reg_state->stack[spi].slot_type;
+ reg = ®_state->stack[spi].spilled_ptr;
+@@ -4902,12 +4927,10 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ return -EACCES;
+ }
+ mark_reg_unknown(env, state->regs, dst_regno);
++ insn_flags = 0; /* not restoring original register state */
+ }
+ state->regs[dst_regno].live |= REG_LIVE_WRITTEN;
+- return 0;
+- }
+-
+- if (dst_regno >= 0) {
++ } else if (dst_regno >= 0) {
+ /* restore register state from stack */
+ copy_register_state(&state->regs[dst_regno], reg);
+ /* mark reg as written since spilled pointer state likely
+@@ -4943,7 +4966,10 @@ static int check_stack_read_fixed_off(struct bpf_verifier_env *env,
+ mark_reg_read(env, reg, reg->parent, REG_LIVE_READ64);
+ if (dst_regno >= 0)
+ mark_reg_stack_read(env, reg_state, off, off + size, dst_regno);
++ insn_flags = 0; /* we are not restoring spilled register */
+ }
++ if (insn_flags)
++ return push_jmp_history(env, env->cur_state, insn_flags);
+ return 0;
+ }
+
+@@ -7027,7 +7053,6 @@ static int check_atomic(struct bpf_verifier_env *env, int insn_idx, struct bpf_i
+ BPF_SIZE(insn->code), BPF_WRITE, -1, true, false);
+ if (err)
+ return err;
+-
+ return 0;
+ }
+
+@@ -16773,7 +16798,8 @@ static int is_state_visited(struct bpf_verifier_env *env, int insn_idx)
+ * the precision needs to be propagated back in
+ * the current state.
+ */
+- err = err ? : push_jmp_history(env, cur);
++ if (is_jmp_point(env, env->insn_idx))
++ err = err ? : push_jmp_history(env, cur, 0);
+ err = err ? : propagate_precision(env, &sl->state);
+ if (err)
+ return err;
+@@ -16997,6 +17023,9 @@ static int do_check(struct bpf_verifier_env *env)
+ u8 class;
+ int err;
+
++ /* reset current history entry on each new instruction */
++ env->cur_hist_ent = NULL;
++
+ env->prev_insn_idx = prev_insn_idx;
+ if (env->insn_idx >= insn_cnt) {
+ verbose(env, "invalid insn idx %d insn_cnt %d\n",
+@@ -17036,7 +17065,7 @@ static int do_check(struct bpf_verifier_env *env)
+ }
+
+ if (is_jmp_point(env, env->insn_idx)) {
+- err = push_jmp_history(env, state);
++ err = push_jmp_history(env, state, 0);
+ if (err)
+ return err;
+ }
+diff --git a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+index f61d623b1ce8d..f87365f7599bf 100644
+--- a/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
++++ b/tools/testing/selftests/bpf/progs/verifier_subprog_precision.c
+@@ -541,11 +541,24 @@ static __u64 subprog_spill_reg_precise(void)
+
+ SEC("?raw_tp")
+ __success __log_level(2)
+-/* precision backtracking can't currently handle stack access not through r10,
+- * so we won't be able to mark stack slot fp-8 as precise, and so will
+- * fallback to forcing all as precise
+- */
+-__msg("mark_precise: frame0: falling back to forcing all scalars precise")
++__msg("10: (0f) r1 += r7")
++__msg("mark_precise: frame0: last_idx 10 first_idx 7 subseq_idx -1")
++__msg("mark_precise: frame0: regs=r7 stack= before 9: (bf) r1 = r8")
++__msg("mark_precise: frame0: regs=r7 stack= before 8: (27) r7 *= 4")
++__msg("mark_precise: frame0: regs=r7 stack= before 7: (79) r7 = *(u64 *)(r10 -8)")
++__msg("mark_precise: frame0: parent state regs= stack=-8: R0_w=2 R6_w=1 R8_rw=map_value(map=.data.vals,ks=4,vs=16) R10=fp0 fp-8_rw=P1")
++__msg("mark_precise: frame0: last_idx 18 first_idx 0 subseq_idx 7")
++__msg("mark_precise: frame0: regs= stack=-8 before 18: (95) exit")
++__msg("mark_precise: frame1: regs= stack= before 17: (0f) r0 += r2")
++__msg("mark_precise: frame1: regs= stack= before 16: (79) r2 = *(u64 *)(r1 +0)")
++__msg("mark_precise: frame1: regs= stack= before 15: (79) r0 = *(u64 *)(r10 -16)")
++__msg("mark_precise: frame1: regs= stack= before 14: (7b) *(u64 *)(r10 -16) = r2")
++__msg("mark_precise: frame1: regs= stack= before 13: (7b) *(u64 *)(r1 +0) = r2")
++__msg("mark_precise: frame1: regs=r2 stack= before 6: (85) call pc+6")
++__msg("mark_precise: frame0: regs=r2 stack= before 5: (bf) r2 = r6")
++__msg("mark_precise: frame0: regs=r6 stack= before 4: (07) r1 += -8")
++__msg("mark_precise: frame0: regs=r6 stack= before 3: (bf) r1 = r10")
++__msg("mark_precise: frame0: regs=r6 stack= before 2: (b7) r6 = 1")
+ __naked int subprog_spill_into_parent_stack_slot_precise(void)
+ {
+ asm volatile (
+diff --git a/tools/testing/selftests/bpf/verifier/precise.c b/tools/testing/selftests/bpf/verifier/precise.c
+index 0d84dd1f38b6b..8a2ff81d83508 100644
+--- a/tools/testing/selftests/bpf/verifier/precise.c
++++ b/tools/testing/selftests/bpf/verifier/precise.c
+@@ -140,10 +140,11 @@
+ .result = REJECT,
+ },
+ {
+- "precise: ST insn causing spi > allocated_stack",
++ "precise: ST zero to stack insn is supported",
+ .insns = {
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
++ /* not a register spill, so we stop precision propagation for R4 here */
+ BPF_ST_MEM(BPF_DW, BPF_REG_3, -8, 0),
+ BPF_LDX_MEM(BPF_DW, BPF_REG_4, BPF_REG_10, -8),
+ BPF_MOV64_IMM(BPF_REG_0, -1),
+@@ -157,11 +158,11 @@
+ mark_precise: frame0: last_idx 4 first_idx 2\
+ mark_precise: frame0: regs=r4 stack= before 4\
+ mark_precise: frame0: regs=r4 stack= before 3\
+- mark_precise: frame0: regs= stack=-8 before 2\
+- mark_precise: frame0: falling back to forcing all scalars precise\
+- force_precise: frame0: forcing r0 to be precise\
+ mark_precise: frame0: last_idx 5 first_idx 5\
+- mark_precise: frame0: parent state regs= stack=:",
++ mark_precise: frame0: parent state regs=r0 stack=:\
++ mark_precise: frame0: last_idx 4 first_idx 2\
++ mark_precise: frame0: regs=r0 stack= before 4\
++ 5: R0=-1 R4=0",
+ .result = VERBOSE_ACCEPT,
+ .retval = -1,
+ },
+@@ -169,6 +170,8 @@
+ "precise: STX insn causing spi > allocated_stack",
+ .insns = {
+ BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_get_prandom_u32),
++ /* make later reg spill more interesting by having somewhat known scalar */
++ BPF_ALU64_IMM(BPF_AND, BPF_REG_0, 0xff),
+ BPF_MOV64_REG(BPF_REG_3, BPF_REG_10),
+ BPF_JMP_IMM(BPF_JNE, BPF_REG_3, 123, 0),
+ BPF_STX_MEM(BPF_DW, BPF_REG_3, BPF_REG_0, -8),
+@@ -179,18 +182,21 @@
+ },
+ .prog_type = BPF_PROG_TYPE_XDP,
+ .flags = BPF_F_TEST_STATE_FREQ,
+- .errstr = "mark_precise: frame0: last_idx 6 first_idx 6\
++ .errstr = "mark_precise: frame0: last_idx 7 first_idx 7\
+ mark_precise: frame0: parent state regs=r4 stack=:\
+- mark_precise: frame0: last_idx 5 first_idx 3\
+- mark_precise: frame0: regs=r4 stack= before 5\
+- mark_precise: frame0: regs=r4 stack= before 4\
+- mark_precise: frame0: regs= stack=-8 before 3\
+- mark_precise: frame0: falling back to forcing all scalars precise\
+- force_precise: frame0: forcing r0 to be precise\
+- force_precise: frame0: forcing r0 to be precise\
+- force_precise: frame0: forcing r0 to be precise\
+- force_precise: frame0: forcing r0 to be precise\
+- mark_precise: frame0: last_idx 6 first_idx 6\
++ mark_precise: frame0: last_idx 6 first_idx 4\
++ mark_precise: frame0: regs=r4 stack= before 6: (b7) r0 = -1\
++ mark_precise: frame0: regs=r4 stack= before 5: (79) r4 = *(u64 *)(r10 -8)\
++ mark_precise: frame0: regs= stack=-8 before 4: (7b) *(u64 *)(r3 -8) = r0\
++ mark_precise: frame0: parent state regs=r0 stack=:\
++ mark_precise: frame0: last_idx 3 first_idx 3\
++ mark_precise: frame0: regs=r0 stack= before 3: (55) if r3 != 0x7b goto pc+0\
++ mark_precise: frame0: regs=r0 stack= before 2: (bf) r3 = r10\
++ mark_precise: frame0: regs=r0 stack= before 1: (57) r0 &= 255\
++ mark_precise: frame0: parent state regs=r0 stack=:\
++ mark_precise: frame0: last_idx 0 first_idx 0\
++ mark_precise: frame0: regs=r0 stack= before 0: (85) call bpf_get_prandom_u32#7\
++ mark_precise: frame0: last_idx 7 first_idx 7\
+ mark_precise: frame0: parent state regs= stack=:",
+ .result = VERBOSE_ACCEPT,
+ .retval = -1,
+--
+2.43.0
+
--- /dev/null
+From c3e3bcec2083c9e45a676cfccbb36f0ac397565e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 18:10:51 +0800
+Subject: drm/amd/display: Add NULL check for function pointer in
+ dcn32_set_output_transfer_func
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+[ Upstream commit 28574b08c70e56d34d6f6379326a860b96749051 ]
+
+This commit adds a null check for the set_output_gamma function pointer
+in the dcn32_set_output_transfer_func function. Previously,
+set_output_gamma was being checked for null, but then it was being
+dereferenced without any null check. This could lead to a null pointer
+dereference if set_output_gamma is null.
+
+To fix this, we now ensure that set_output_gamma is not null before
+dereferencing it. We do this by adding a null check for set_output_gamma
+before the call to set_output_gamma.
+
+Cc: Tom Chung <chiahsuan.chung@amd.com>
+Cc: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Cc: Roman Li <roman.li@amd.com>
+Cc: Alex Hung <alex.hung@amd.com>
+Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Tom Chung <chiahsuan.chung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+index 650e1598bddcb..2289c17f6ead5 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_hwseq.c
+@@ -587,7 +587,9 @@ bool dcn32_set_output_transfer_func(struct dc *dc,
+ }
+ }
+
+- mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
++ if (mpc->funcs->set_output_gamma)
++ mpc->funcs->set_output_gamma(mpc, mpcc_id, params);
++
+ return ret;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From bd8a596346d42735b8d6641000d5206a0343bdaf Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 14:55:32 +0800
+Subject: drm/amd/display: Add null check for pipe_ctx->plane_state in
+ dcn20_program_pipe
+
+From: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+
+[ Upstream commit 8e4ed3cf1642df0c4456443d865cff61a9598aa8 ]
+
+This commit addresses a null pointer dereference issue in the
+`dcn20_program_pipe` function. The issue could occur when
+`pipe_ctx->plane_state` is null.
+
+The fix adds a check to ensure `pipe_ctx->plane_state` is not null
+before accessing. This prevents a null pointer dereference.
+
+Reported by smatch:
+drivers/gpu/drm/amd/amdgpu/../display/dc/hwss/dcn20/dcn20_hwseq.c:1925 dcn20_program_pipe() error: we previously assumed 'pipe_ctx->plane_state' could be null (see line 1877)
+
+Cc: Tom Chung <chiahsuan.chung@amd.com>
+Cc: Rodrigo Siqueira <Rodrigo.Siqueira@amd.com>
+Cc: Roman Li <roman.li@amd.com>
+Cc: Alex Hung <alex.hung@amd.com>
+Cc: Aurabindo Pillai <aurabindo.pillai@amd.com>
+Cc: Harry Wentland <harry.wentland@amd.com>
+Cc: Hamza Mahfooz <hamza.mahfooz@amd.com>
+Signed-off-by: Srinivasan Shanmugam <srinivasan.shanmugam@amd.com>
+Reviewed-by: Tom Chung <chiahsuan.chung@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[Xiangyu: BP to fix CVE: CVE-2024-49914, modified the file path from
+drivers/gpu/drm/amd/amdgpu/../display/dc/hwss/dcn20/dcn20_hwseq.c to
+drivers/gpu/drm/amd/amdgpu/../display/dc/dcn20/dcn20_hwseq.c
+and minor conflict resolution]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../drm/amd/display/dc/dcn20/dcn20_hwseq.c | 22 ++++++++++++-------
+ 1 file changed, 14 insertions(+), 8 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+index 12af2859002f7..cd1d1b7283ab9 100644
+--- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
++++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_hwseq.c
+@@ -1732,17 +1732,22 @@ static void dcn20_program_pipe(
+ dc->res_pool->hubbub->funcs->program_det_size(
+ dc->res_pool->hubbub, pipe_ctx->plane_res.hubp->inst, pipe_ctx->det_buffer_size_kb);
+
+- if (pipe_ctx->update_flags.raw || pipe_ctx->plane_state->update_flags.raw || pipe_ctx->stream->update_flags.raw)
++ if (pipe_ctx->update_flags.raw ||
++ (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.raw) ||
++ pipe_ctx->stream->update_flags.raw)
+ dcn20_update_dchubp_dpp(dc, pipe_ctx, context);
+
+- if (pipe_ctx->update_flags.bits.enable
+- || pipe_ctx->plane_state->update_flags.bits.hdr_mult)
++ if (pipe_ctx->update_flags.bits.enable ||
++ (pipe_ctx->plane_state && pipe_ctx->plane_state->update_flags.bits.hdr_mult))
+ hws->funcs.set_hdr_multiplier(pipe_ctx);
+
+ if (pipe_ctx->update_flags.bits.enable ||
+- pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change ||
+- pipe_ctx->plane_state->update_flags.bits.gamma_change ||
+- pipe_ctx->plane_state->update_flags.bits.lut_3d)
++ (pipe_ctx->plane_state &&
++ pipe_ctx->plane_state->update_flags.bits.in_transfer_func_change) ||
++ (pipe_ctx->plane_state &&
++ pipe_ctx->plane_state->update_flags.bits.gamma_change) ||
++ (pipe_ctx->plane_state &&
++ pipe_ctx->plane_state->update_flags.bits.lut_3d))
+ hws->funcs.set_input_transfer_func(dc, pipe_ctx, pipe_ctx->plane_state);
+
+ /* dcn10_translate_regamma_to_hw_format takes 750us to finish
+@@ -1752,7 +1757,8 @@ static void dcn20_program_pipe(
+ if (pipe_ctx->update_flags.bits.enable ||
+ pipe_ctx->update_flags.bits.plane_changed ||
+ pipe_ctx->stream->update_flags.bits.out_tf ||
+- pipe_ctx->plane_state->update_flags.bits.output_tf_change)
++ (pipe_ctx->plane_state &&
++ pipe_ctx->plane_state->update_flags.bits.output_tf_change))
+ hws->funcs.set_output_transfer_func(dc, pipe_ctx, pipe_ctx->stream);
+
+ /* If the pipe has been enabled or has a different opp, we
+@@ -1776,7 +1782,7 @@ static void dcn20_program_pipe(
+ }
+
+ /* Set ABM pipe after other pipe configurations done */
+- if (pipe_ctx->plane_state->visible) {
++ if ((pipe_ctx->plane_state && pipe_ctx->plane_state->visible)) {
+ if (pipe_ctx->stream_res.abm) {
+ dc->hwss.set_pipe(pipe_ctx);
+ pipe_ctx->stream_res.abm->funcs->set_abm_level(pipe_ctx->stream_res.abm,
+--
+2.43.0
+
--- /dev/null
+From 1e6ebb31c85093a8b72d88020bf008502747e5c0 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 19:23:26 +0800
+Subject: drm/amd/display: Check null-initialized variables
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit 367cd9ceba1933b63bc1d87d967baf6d9fd241d2 ]
+
+[WHAT & HOW]
+drr_timing and subvp_pipe are initialized to null and they are not
+always assigned new values. It is necessary to check for null before
+dereferencing.
+
+This fixes 2 FORWARD_NULL issues reported by Coverity.
+
+Reviewed-by: Nevenko Stupar <nevenko.stupar@amd.com>
+Reviewed-by: Rodrigo Siqueira <rodrigo.siqueira@amd.com>
+Signed-off-by: Jerry Zuo <jerry.zuo@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c | 7 ++++---
+ 1 file changed, 4 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+index 3d82cbef12740..ac6357c089e70 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c
+@@ -932,8 +932,9 @@ static bool subvp_drr_schedulable(struct dc *dc, struct dc_state *context)
+ * for VBLANK: (VACTIVE region of the SubVP pipe can fit the MALL prefetch, VBLANK frame time,
+ * and the max of (VBLANK blanking time, MALL region)).
+ */
+- if (stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
+- subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
++ if (drr_timing &&
++ stretched_drr_us < (1 / (double)drr_timing->min_refresh_in_uhz) * 1000000 * 1000000 &&
++ subvp_active_us - prefetch_us - stretched_drr_us - max_vblank_mallregion > 0)
+ schedulable = true;
+
+ return schedulable;
+@@ -995,7 +996,7 @@ static bool subvp_vblank_schedulable(struct dc *dc, struct dc_state *context)
+ if (!subvp_pipe && pipe->stream->mall_stream_config.type == SUBVP_MAIN)
+ subvp_pipe = pipe;
+ }
+- if (found) {
++ if (found && subvp_pipe) {
+ main_timing = &subvp_pipe->stream->timing;
+ phantom_timing = &subvp_pipe->stream->mall_stream_config.paired_stream->timing;
+ vblank_timing = &context->res_ctx.pipe_ctx[vblank_index].stream->timing;
+--
+2.43.0
+
--- /dev/null
+From 35c0acf266f0007bed3dd8f79554a47ccca2f7a9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 09:34:03 +0800
+Subject: drm/amd/display: Don't refer to dc_sink in is_dsc_need_re_compute
+
+From: Wayne Lin <wayne.lin@amd.com>
+
+[ Upstream commit fcf6a49d79923a234844b8efe830a61f3f0584e4 ]
+
+[Why]
+When unplug one of monitors connected after mst hub, encounter null pointer dereference.
+
+It's due to dc_sink get released immediately in early_unregister() or detect_ctx(). When
+commit new state which directly referring to info stored in dc_sink will cause null pointer
+dereference.
+
+[how]
+Remove redundant checking condition. Relevant condition should already be covered by checking
+if dsc_aux is null or not. Also reset dsc_aux to NULL when the connector is disconnected.
+
+Reviewed-by: Jerry Zuo <jerry.zuo@amd.com>
+Acked-by: Zaeem Mohamed <zaeem.mohamed@amd.com>
+Signed-off-by: Wayne Lin <wayne.lin@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+[ Resolve minor conflicts ]
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 4 ++++
+ 1 file changed, 4 insertions(+)
+
+diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+index d390e3d62e56e..9ec9792f115a8 100644
+--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
++++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+@@ -179,6 +179,8 @@ amdgpu_dm_mst_connector_early_unregister(struct drm_connector *connector)
+ dc_sink_release(dc_sink);
+ aconnector->dc_sink = NULL;
+ aconnector->edid = NULL;
++ aconnector->dsc_aux = NULL;
++ port->passthrough_aux = NULL;
+ }
+
+ aconnector->mst_status = MST_STATUS_DEFAULT;
+@@ -487,6 +489,8 @@ dm_dp_mst_detect(struct drm_connector *connector,
+ dc_sink_release(aconnector->dc_sink);
+ aconnector->dc_sink = NULL;
+ aconnector->edid = NULL;
++ aconnector->dsc_aux = NULL;
++ port->passthrough_aux = NULL;
+
+ amdgpu_dm_set_mst_status(&aconnector->mst_status,
+ MST_REMOTE_EDID | MST_ALLOCATE_NEW_PAYLOAD | MST_CLEAR_ALLOCATED_PAYLOAD,
+--
+2.43.0
+
--- /dev/null
+From 745672b2a85b15de2a2ce36ca72d80103a3076c6 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 17:36:04 +0800
+Subject: drm/amd/display: Initialize denominators' default to 1
+
+From: Alex Hung <alex.hung@amd.com>
+
+[ Upstream commit b995c0a6de6c74656a0c39cd57a0626351b13e3c ]
+
+[WHAT & HOW]
+Variables used as denominators and maybe not assigned to other values,
+should not be 0. Change their default to 1 so they are never 0.
+
+This fixes 10 DIVIDE_BY_ZERO issues reported by Coverity.
+
+Reviewed-by: Harry Wentland <harry.wentland@amd.com>
+Signed-off-by: Jerry Zuo <jerry.zuo@amd.com>
+Signed-off-by: Alex Hung <alex.hung@amd.com>
+Tested-by: Daniel Wheeler <daniel.wheeler@amd.com>
+Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
+[Xiangyu: Bp to fix CVE: CVE-2024-49899
+Discard the dml2_core/dml2_core_shared.c due to this file no exists]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ .../gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c | 2 +-
+ drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c | 2 +-
+ 2 files changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+index 548cdef8a8ade..543ce9a08cfd3 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c
+@@ -78,7 +78,7 @@ static void calculate_ttu_cursor(struct display_mode_lib *mode_lib,
+
+ static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+ {
+- unsigned int ret_val = 0;
++ unsigned int ret_val = 1;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+index 3df559c591f89..70df992f859d7 100644
+--- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
++++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c
+@@ -39,7 +39,7 @@
+
+ static unsigned int get_bytes_per_element(enum source_format_class source_format, bool is_chroma)
+ {
+- unsigned int ret_val = 0;
++ unsigned int ret_val = 1;
+
+ if (source_format == dm_444_16) {
+ if (!is_chroma)
+--
+2.43.0
+
--- /dev/null
+From 72ce31d4479ba5ad80350438c3ee912b04b2f97e Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Nov 2024 16:04:01 +0800
+Subject: fs/inode: Prevent dump_mapping() accessing invalid dentry.d_name.name
+
+From: Li Zhijian <lizhijian@fujitsu.com>
+
+[ Upstream commit 7f7b850689ac06a62befe26e1fd1806799e7f152 ]
+
+It's observed that a crash occurs during hot-remove a memory device,
+in which user is accessing the hugetlb. See calltrace as following:
+
+------------[ cut here ]------------
+WARNING: CPU: 1 PID: 14045 at arch/x86/mm/fault.c:1278 do_user_addr_fault+0x2a0/0x790
+Modules linked in: kmem device_dax cxl_mem cxl_pmem cxl_port cxl_pci dax_hmem dax_pmem nd_pmem cxl_acpi nd_btt cxl_core crc32c_intel nvme virtiofs fuse nvme_core nfit libnvdimm dm_multipath scsi_dh_rdac scsi_dh_emc s
+mirror dm_region_hash dm_log dm_mod
+CPU: 1 PID: 14045 Comm: daxctl Not tainted 6.10.0-rc2-lizhijian+ #492
+Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+RIP: 0010:do_user_addr_fault+0x2a0/0x790
+Code: 48 8b 00 a8 04 0f 84 b5 fe ff ff e9 1c ff ff ff 4c 89 e9 4c 89 e2 be 01 00 00 00 bf 02 00 00 00 e8 b5 ef 24 00 e9 42 fe ff ff <0f> 0b 48 83 c4 08 4c 89 ea 48 89 ee 4c 89 e7 5b 5d 41 5c 41 5d 41
+RSP: 0000:ffffc90000a575f0 EFLAGS: 00010046
+RAX: ffff88800c303600 RBX: 0000000000000000 RCX: 0000000000000000
+RDX: 0000000000001000 RSI: ffffffff82504162 RDI: ffffffff824b2c36
+RBP: 0000000000000000 R08: 0000000000000000 R09: 0000000000000000
+R10: 0000000000000000 R11: 0000000000000000 R12: ffffc90000a57658
+R13: 0000000000001000 R14: ffff88800bc2e040 R15: 0000000000000000
+FS: 00007f51cb57d880(0000) GS:ffff88807fd00000(0000) knlGS:0000000000000000
+CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033
+CR2: 0000000000001000 CR3: 00000000072e2004 CR4: 00000000001706f0
+DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000
+DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400
+Call Trace:
+ <TASK>
+ ? __warn+0x8d/0x190
+ ? do_user_addr_fault+0x2a0/0x790
+ ? report_bug+0x1c3/0x1d0
+ ? handle_bug+0x3c/0x70
+ ? exc_invalid_op+0x14/0x70
+ ? asm_exc_invalid_op+0x16/0x20
+ ? do_user_addr_fault+0x2a0/0x790
+ ? exc_page_fault+0x31/0x200
+ exc_page_fault+0x68/0x200
+<...snip...>
+BUG: unable to handle page fault for address: 0000000000001000
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 800000000ad92067 P4D 800000000ad92067 PUD 7677067 PMD 0
+ Oops: Oops: 0000 [#1] PREEMPT SMP PTI
+ ---[ end trace 0000000000000000 ]---
+ BUG: unable to handle page fault for address: 0000000000001000
+ #PF: supervisor read access in kernel mode
+ #PF: error_code(0x0000) - not-present page
+ PGD 800000000ad92067 P4D 800000000ad92067 PUD 7677067 PMD 0
+ Oops: Oops: 0000 [#1] PREEMPT SMP PTI
+ CPU: 1 PID: 14045 Comm: daxctl Kdump: loaded Tainted: G W 6.10.0-rc2-lizhijian+ #492
+ Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS rel-1.16.3-0-ga6ed6b701f0a-prebuilt.qemu.org 04/01/2014
+ RIP: 0010:dentry_name+0x1f4/0x440
+<...snip...>
+? dentry_name+0x2fa/0x440
+vsnprintf+0x1f3/0x4f0
+vprintk_store+0x23a/0x540
+vprintk_emit+0x6d/0x330
+_printk+0x58/0x80
+dump_mapping+0x10b/0x1a0
+? __pfx_free_object_rcu+0x10/0x10
+__dump_page+0x26b/0x3e0
+? vprintk_emit+0xe0/0x330
+? _printk+0x58/0x80
+? dump_page+0x17/0x50
+dump_page+0x17/0x50
+do_migrate_range+0x2f7/0x7f0
+? do_migrate_range+0x42/0x7f0
+? offline_pages+0x2f4/0x8c0
+offline_pages+0x60a/0x8c0
+memory_subsys_offline+0x9f/0x1c0
+? lockdep_hardirqs_on+0x77/0x100
+? _raw_spin_unlock_irqrestore+0x38/0x60
+device_offline+0xe3/0x110
+state_store+0x6e/0xc0
+kernfs_fop_write_iter+0x143/0x200
+vfs_write+0x39f/0x560
+ksys_write+0x65/0xf0
+do_syscall_64+0x62/0x130
+
+Previously, some sanity check have been done in dump_mapping() before
+the print facility parsing '%pd' though, it's still possible to run into
+an invalid dentry.d_name.name.
+
+Since dump_mapping() only needs to dump the filename only, retrieve it
+by itself in a safer way to prevent an unnecessary crash.
+
+Note that either retrieving the filename with '%pd' or
+strncpy_from_kernel_nofault(), the filename could be unreliable.
+
+Signed-off-by: Li Zhijian <lizhijian@fujitsu.com>
+Link: https://lore.kernel.org/r/20240826055503.1522320-1-lizhijian@fujitsu.com
+Reviewed-by: Jan Kara <jack@suse.cz>
+Signed-off-by: Christian Brauner <brauner@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+[Xiangyu: Bp to fix CVE: CVE-2024-49934, modified strscpy step due to 6.1/6.6 need pass
+the max len to strscpy]
+Signed-off-by: Xiangyu Chen <xiangyu.chen@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/inode.c | 10 +++++++---
+ 1 file changed, 7 insertions(+), 3 deletions(-)
+
+diff --git a/fs/inode.c b/fs/inode.c
+index 9cafde77e2b03..030e07b169c27 100644
+--- a/fs/inode.c
++++ b/fs/inode.c
+@@ -593,6 +593,7 @@ void dump_mapping(const struct address_space *mapping)
+ struct hlist_node *dentry_first;
+ struct dentry *dentry_ptr;
+ struct dentry dentry;
++ char fname[64] = {};
+ unsigned long ino;
+
+ /*
+@@ -628,11 +629,14 @@ void dump_mapping(const struct address_space *mapping)
+ return;
+ }
+
++ if (strncpy_from_kernel_nofault(fname, dentry.d_name.name, 63) < 0)
++ strscpy(fname, "<invalid>", 63);
+ /*
+- * if dentry is corrupted, the %pd handler may still crash,
+- * but it's unlikely that we reach here with a corrupt mapping
++ * Even if strncpy_from_kernel_nofault() succeeded,
++ * the fname could be unreliable
+ */
+- pr_warn("aops:%ps ino:%lx dentry name:\"%pd\"\n", a_ops, ino, &dentry);
++ pr_warn("aops:%ps ino:%lx dentry name(?):\"%s\"\n",
++ a_ops, ino, fname);
+ }
+
+ void clear_inode(struct inode *inode)
+--
+2.43.0
+
--- /dev/null
+From e10784b0a6563303dbb0d6d93a3ddc6e31d4aa83 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Nov 2024 13:33:07 +0800
+Subject: fs/proc: do_task_stat: use sig->stats_lock to gather the
+ threads/children stats
+
+From: Oleg Nesterov <oleg@redhat.com>
+
+[ Upstream commit 7601df8031fd67310af891897ef6cc0df4209305 ]
+
+lock_task_sighand() can trigger a hard lockup. If NR_CPUS threads call
+do_task_stat() at the same time and the process has NR_THREADS, it will
+spin with irqs disabled O(NR_CPUS * NR_THREADS) time.
+
+Change do_task_stat() to use sig->stats_lock to gather the statistics
+outside of ->siglock protected section, in the likely case this code will
+run lockless.
+
+Link: https://lkml.kernel.org/r/20240123153357.GA21857@redhat.com
+Signed-off-by: Oleg Nesterov <oleg@redhat.com>
+Signed-off-by: Dylan Hatch <dylanbhatch@google.com>
+Cc: Eric W. Biederman <ebiederm@xmission.com>
+Cc: <stable@vger.kernel.org>
+Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
+[ Resolve minor conflicts ]
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ fs/proc/array.c | 57 +++++++++++++++++++++++++++----------------------
+ 1 file changed, 32 insertions(+), 25 deletions(-)
+
+diff --git a/fs/proc/array.c b/fs/proc/array.c
+index 37b8061d84bb7..34a47fb0c57f2 100644
+--- a/fs/proc/array.c
++++ b/fs/proc/array.c
+@@ -477,13 +477,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ int permitted;
+ struct mm_struct *mm;
+ unsigned long long start_time;
+- unsigned long cmin_flt = 0, cmaj_flt = 0;
+- unsigned long min_flt = 0, maj_flt = 0;
+- u64 cutime, cstime, utime, stime;
+- u64 cgtime, gtime;
++ unsigned long cmin_flt, cmaj_flt, min_flt, maj_flt;
++ u64 cutime, cstime, cgtime, utime, stime, gtime;
+ unsigned long rsslim = 0;
+ unsigned long flags;
+ int exit_code = task->exit_code;
++ struct signal_struct *sig = task->signal;
++ unsigned int seq = 1;
+
+ state = *get_task_state(task);
+ vsize = eip = esp = 0;
+@@ -511,12 +511,8 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+
+ sigemptyset(&sigign);
+ sigemptyset(&sigcatch);
+- cutime = cstime = 0;
+- cgtime = gtime = 0;
+
+ if (lock_task_sighand(task, &flags)) {
+- struct signal_struct *sig = task->signal;
+-
+ if (sig->tty) {
+ struct pid *pgrp = tty_get_pgrp(sig->tty);
+ tty_pgrp = pid_nr_ns(pgrp, ns);
+@@ -527,26 +523,9 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ num_threads = get_nr_threads(task);
+ collect_sigign_sigcatch(task, &sigign, &sigcatch);
+
+- cmin_flt = sig->cmin_flt;
+- cmaj_flt = sig->cmaj_flt;
+- cutime = sig->cutime;
+- cstime = sig->cstime;
+- cgtime = sig->cgtime;
+ rsslim = READ_ONCE(sig->rlim[RLIMIT_RSS].rlim_cur);
+
+- /* add up live thread stats at the group level */
+ if (whole) {
+- struct task_struct *t = task;
+- do {
+- min_flt += t->min_flt;
+- maj_flt += t->maj_flt;
+- gtime += task_gtime(t);
+- } while_each_thread(task, t);
+-
+- min_flt += sig->min_flt;
+- maj_flt += sig->maj_flt;
+- gtime += sig->gtime;
+-
+ if (sig->flags & (SIGNAL_GROUP_EXIT | SIGNAL_STOP_STOPPED))
+ exit_code = sig->group_exit_code;
+ }
+@@ -561,6 +540,34 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
+ if (permitted && (!whole || num_threads < 2))
+ wchan = !task_is_running(task);
+
++ do {
++ seq++; /* 2 on the 1st/lockless path, otherwise odd */
++ flags = read_seqbegin_or_lock_irqsave(&sig->stats_lock, &seq);
++
++ cmin_flt = sig->cmin_flt;
++ cmaj_flt = sig->cmaj_flt;
++ cutime = sig->cutime;
++ cstime = sig->cstime;
++ cgtime = sig->cgtime;
++
++ if (whole) {
++ struct task_struct *t;
++
++ min_flt = sig->min_flt;
++ maj_flt = sig->maj_flt;
++ gtime = sig->gtime;
++
++ rcu_read_lock();
++ __for_each_thread(sig, t) {
++ min_flt += t->min_flt;
++ maj_flt += t->maj_flt;
++ gtime += task_gtime(t);
++ }
++ rcu_read_unlock();
++ }
++ } while (need_seqretry(&sig->stats_lock, seq));
++ done_seqretry_irqrestore(&sig->stats_lock, seq, flags);
++
+ if (whole) {
+ thread_group_cputime_adjusted(task, &utime, &stime);
+ } else {
+--
+2.43.0
+
--- /dev/null
+From eb15e9eae32231c03553db56bec11002a5849057 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Nov 2024 15:04:51 +0100
+Subject: mptcp: fix possible integer overflow in mptcp_reset_tout_timer
+
+From: Dmitry Kandybka <d.kandybka@gmail.com>
+
+commit b169e76ebad22cbd055101ee5aa1a7bed0e66606 upstream.
+
+In 'mptcp_reset_tout_timer', promote 'probe_timestamp' to unsigned long
+to avoid possible integer overflow. Compile tested only.
+
+Found by Linux Verification Center (linuxtesting.org) with SVACE.
+
+Signed-off-by: Dmitry Kandybka <d.kandybka@gmail.com>
+Link: https://patch.msgid.link/20241107103657.1560536-1-d.kandybka@gmail.com
+Signed-off-by: Jakub Kicinski <kuba@kernel.org>
+[ Conflict in this version because commit d866ae9aaa43 ("mptcp: add a
+ new sysctl for make after break timeout") is not in this version, and
+ replaced TCP_TIMEWAIT_LEN in the expression. The fix can still be
+ applied the same way: by forcing a cast to unsigned long for the first
+ item. ]
+Signed-off-by: Matthieu Baerts (NGI0) <matttbe@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ net/mptcp/protocol.c | 4 ++--
+ 1 file changed, 2 insertions(+), 2 deletions(-)
+
+diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
+index b8357d7c6b3a1..01f6ce970918c 100644
+--- a/net/mptcp/protocol.c
++++ b/net/mptcp/protocol.c
+@@ -2691,8 +2691,8 @@ void mptcp_reset_tout_timer(struct mptcp_sock *msk, unsigned long fail_tout)
+ if (!fail_tout && !inet_csk(sk)->icsk_mtup.probe_timestamp)
+ return;
+
+- close_timeout = inet_csk(sk)->icsk_mtup.probe_timestamp - tcp_jiffies32 + jiffies +
+- TCP_TIMEWAIT_LEN;
++ close_timeout = (unsigned long)inet_csk(sk)->icsk_mtup.probe_timestamp -
++ tcp_jiffies32 + jiffies + TCP_TIMEWAIT_LEN;
+
+ /* the close timeout takes precedence on the fail one, and here at least one of
+ * them is active
+--
+2.43.0
+
--- /dev/null
+From b72577604073ca4cb55540b839c9ed4220a59c19 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Tue, 26 Nov 2024 15:46:57 +0800
+Subject: nvme: apple: fix device reference counting
+
+From: Keith Busch <kbusch@kernel.org>
+
+[ Upstream commit b9ecbfa45516182cd062fecd286db7907ba84210 ]
+
+Drivers must call nvme_uninit_ctrl after a successful nvme_init_ctrl.
+Split the allocation side out to make the error handling boundary easier
+to navigate. The apple driver had been doing this wrong, leaking the
+controller device memory on a tagset failure.
+
+Reviewed-by: Christoph Hellwig <hch@lst.de>
+Reviewed-by: Chaitanya Kulkarni <kch@nvidia.com>
+Signed-off-by: Keith Busch <kbusch@kernel.org>
+[ Resolve minor conflicts ]
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/nvme/host/apple.c | 27 ++++++++++++++++++++++-----
+ 1 file changed, 22 insertions(+), 5 deletions(-)
+
+diff --git a/drivers/nvme/host/apple.c b/drivers/nvme/host/apple.c
+index 596bb11eeba5a..396eb94376597 100644
+--- a/drivers/nvme/host/apple.c
++++ b/drivers/nvme/host/apple.c
+@@ -1387,7 +1387,7 @@ static void devm_apple_nvme_mempool_destroy(void *data)
+ mempool_destroy(data);
+ }
+
+-static int apple_nvme_probe(struct platform_device *pdev)
++static struct apple_nvme *apple_nvme_alloc(struct platform_device *pdev)
+ {
+ struct device *dev = &pdev->dev;
+ struct apple_nvme *anv;
+@@ -1395,7 +1395,7 @@ static int apple_nvme_probe(struct platform_device *pdev)
+
+ anv = devm_kzalloc(dev, sizeof(*anv), GFP_KERNEL);
+ if (!anv)
+- return -ENOMEM;
++ return ERR_PTR(-ENOMEM);
+
+ anv->dev = get_device(dev);
+ anv->adminq.is_adminq = true;
+@@ -1515,10 +1515,26 @@ static int apple_nvme_probe(struct platform_device *pdev)
+ goto put_dev;
+ }
+
++ return anv;
++put_dev:
++ put_device(anv->dev);
++ return ERR_PTR(ret);
++}
++
++static int apple_nvme_probe(struct platform_device *pdev)
++{
++ struct apple_nvme *anv;
++ int ret;
++
++ anv = apple_nvme_alloc(pdev);
++ if (IS_ERR(anv))
++ return PTR_ERR(anv);
++
+ anv->ctrl.admin_q = blk_mq_init_queue(&anv->admin_tagset);
+ if (IS_ERR(anv->ctrl.admin_q)) {
+ ret = -ENOMEM;
+- goto put_dev;
++ anv->ctrl.admin_q = NULL;
++ goto out_uninit_ctrl;
+ }
+
+ nvme_reset_ctrl(&anv->ctrl);
+@@ -1526,8 +1542,9 @@ static int apple_nvme_probe(struct platform_device *pdev)
+
+ return 0;
+
+-put_dev:
+- put_device(anv->dev);
++out_uninit_ctrl:
++ nvme_uninit_ctrl(&anv->ctrl);
++ nvme_put_ctrl(&anv->ctrl);
+ return ret;
+ }
+
+--
+2.43.0
+
--- /dev/null
+From b4b5f66403eb253e9d0aba7ac8af8d9180a4ea5a Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Mon, 25 Nov 2024 16:06:25 +0800
+Subject: platform/x86: x86-android-tablets: Unregister devices in reverse
+ order
+
+From: Hans de Goede <hdegoede@redhat.com>
+
+[ Upstream commit 3de0f2627ef849735f155c1818247f58404dddfe ]
+
+Not all subsystems support a device getting removed while there are
+still consumers of the device with a reference to the device.
+
+One example of this is the regulator subsystem. If a regulator gets
+unregistered while there are still drivers holding a reference
+a WARN() at drivers/regulator/core.c:5829 triggers, e.g.:
+
+ WARNING: CPU: 1 PID: 1587 at drivers/regulator/core.c:5829 regulator_unregister
+ Hardware name: Intel Corp. VALLEYVIEW C0 PLATFORM/BYT-T FFD8, BIOS BLADE_21.X64.0005.R00.1504101516 FFD8_X64_R_2015_04_10_1516 04/10/2015
+ RIP: 0010:regulator_unregister
+ Call Trace:
+ <TASK>
+ regulator_unregister
+ devres_release_group
+ i2c_device_remove
+ device_release_driver_internal
+ bus_remove_device
+ device_del
+ device_unregister
+ x86_android_tablet_remove
+
+On the Lenovo Yoga Tablet 2 series the bq24190 charger chip also provides
+a 5V boost converter output for powering USB devices connected to the micro
+USB port, the bq24190-charger driver exports this as a Vbus regulator.
+
+On the 830 (8") and 1050 ("10") models this regulator is controlled by
+a platform_device and x86_android_tablet_remove() removes platform_device-s
+before i2c_clients so the consumer gets removed first.
+
+But on the 1380 (13") model there is a lc824206xa micro-USB switch
+connected over I2C and the extcon driver for that controls the regulator.
+The bq24190 i2c-client *must* be registered first, because that creates
+the regulator with the lc824206xa listed as its consumer. If the regulator
+has not been registered yet the lc824206xa driver will end up getting
+a dummy regulator.
+
+Since in this case both the regulator provider and consumer are I2C
+devices, the only way to ensure that the consumer is unregistered first
+is to unregister the I2C devices in reverse order of in which they were
+created.
+
+For consistency and to avoid similar problems in the future change
+x86_android_tablet_remove() to unregister all device types in reverse
+order.
+
+Signed-off-by: Hans de Goede <hdegoede@redhat.com>
+Link: https://lore.kernel.org/r/20240406125058.13624-1-hdegoede@redhat.com
+[ Resolve minor conflicts ]
+Signed-off-by: Bin Lan <bin.lan.cn@windriver.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/platform/x86/x86-android-tablets/core.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/drivers/platform/x86/x86-android-tablets/core.c b/drivers/platform/x86/x86-android-tablets/core.c
+index a0fa0b6859c9c..63a348af83db1 100644
+--- a/drivers/platform/x86/x86-android-tablets/core.c
++++ b/drivers/platform/x86/x86-android-tablets/core.c
+@@ -230,20 +230,20 @@ static void x86_android_tablet_remove(struct platform_device *pdev)
+ {
+ int i;
+
+- for (i = 0; i < serdev_count; i++) {
++ for (i = serdev_count - 1; i >= 0; i--) {
+ if (serdevs[i])
+ serdev_device_remove(serdevs[i]);
+ }
+
+ kfree(serdevs);
+
+- for (i = 0; i < pdev_count; i++)
++ for (i = pdev_count - 1; i >= 0; i--)
+ platform_device_unregister(pdevs[i]);
+
+ kfree(pdevs);
+ kfree(buttons);
+
+- for (i = 0; i < i2c_client_count; i++)
++ for (i = i2c_client_count - 1; i >= 0; i--)
+ i2c_unregister_device(i2c_clients[i]);
+
+ kfree(i2c_clients);
+--
+2.43.0
+
s390-pkey-wipe-copies-of-clear-key-structures-on-fai.patch
serial-sc16is7xx-fix-invalid-fifo-access-with-specia.patch
x86-stackprotector-work-around-strict-clang-tls-symb.patch
+drm-amd-display-add-null-check-for-function-pointer-.patch
+drm-amd-display-initialize-denominators-default-to-1.patch
+fs-inode-prevent-dump_mapping-accessing-invalid-dent.patch
+drm-amd-display-check-null-initialized-variables.patch
+drm-amd-display-don-t-refer-to-dc_sink-in-is_dsc_nee.patch
+fs-proc-do_task_stat-use-sig-stats_lock-to-gather-th.patch
+nvme-apple-fix-device-reference-counting.patch
+platform-x86-x86-android-tablets-unregister-devices-.patch
+drm-amd-display-add-null-check-for-pipe_ctx-plane_st.patch
+mptcp-fix-possible-integer-overflow-in-mptcp_reset_t.patch
+bpf-support-non-r10-register-spill-fill-to-from-stac.patch