]> git.ipfire.org Git - thirdparty/kernel/stable-queue.git/commitdiff
Fixes for 5.12
authorSasha Levin <sashal@kernel.org>
Mon, 21 Jun 2021 02:03:38 +0000 (22:03 -0400)
committerSasha Levin <sashal@kernel.org>
Mon, 21 Jun 2021 02:03:38 +0000 (22:03 -0400)
Signed-off-by: Sasha Levin <sashal@kernel.org>
queue-5.12/bpf-do-not-mark-insn-as-seen-under-speculative-path-.patch [new file with mode: 0644]
queue-5.12/bpf-inherit-expanded-patched-seen-count-from-old-aux.patch [new file with mode: 0644]
queue-5.12/irqchip-gic-v3-workaround-inconsistent-pmr-setting-o.patch [new file with mode: 0644]
queue-5.12/perf-metricgroup-fix-find_evsel_group-event-selector.patch [new file with mode: 0644]
queue-5.12/perf-metricgroup-return-error-code-from-metricgroup_.patch [new file with mode: 0644]
queue-5.12/sched-fair-correctly-insert-cfs_rq-s-to-list-on-unth.patch [new file with mode: 0644]
queue-5.12/series

diff --git a/queue-5.12/bpf-do-not-mark-insn-as-seen-under-speculative-path-.patch b/queue-5.12/bpf-do-not-mark-insn-as-seen-under-speculative-path-.patch
new file mode 100644 (file)
index 0000000..ddb184f
--- /dev/null
@@ -0,0 +1,84 @@
+From da029688fefce5b612a1bf81defaaaf0d77dbe30 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 May 2021 13:47:27 +0000
+Subject: bpf: Do not mark insn as seen under speculative path verification
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit fe9a5ca7e370e613a9a75a13008a3845ea759d6e ]
+
+... in such circumstances, we do not want to mark the instruction as seen given
+the goal is still to jmp-1 rewrite/sanitize dead code, if it is not reachable
+from the non-speculative path verification. We do however want to verify it for
+safety regardless.
+
+With the patch as-is all the insns that have been marked as seen before the
+patch will also be marked as seen after the patch (just with a potentially
+different non-zero count). An upcoming patch will also verify paths that are
+unreachable in the non-speculative domain, hence this extension is needed.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Reviewed-by: Benedict Schlueter <benedict.schlueter@rub.de>
+Reviewed-by: Piotr Krysiuk <piotras@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 20 ++++++++++++++++++--
+ 1 file changed, 18 insertions(+), 2 deletions(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index 0ffe85f22887..2423b4e918b9 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -6048,6 +6048,19 @@ do_sim:
+       return !ret ? REASON_STACK : 0;
+ }
++static void sanitize_mark_insn_seen(struct bpf_verifier_env *env)
++{
++      struct bpf_verifier_state *vstate = env->cur_state;
++
++      /* If we simulate paths under speculation, we don't update the
++       * insn as 'seen' such that when we verify unreachable paths in
++       * the non-speculative domain, sanitize_dead_code() can still
++       * rewrite/sanitize them.
++       */
++      if (!vstate->speculative)
++              env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++}
++
+ static int sanitize_err(struct bpf_verifier_env *env,
+                       const struct bpf_insn *insn, int reason,
+                       const struct bpf_reg_state *off_reg,
+@@ -10096,7 +10109,7 @@ static int do_check(struct bpf_verifier_env *env)
+               }
+               regs = cur_regs(env);
+-              env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++              sanitize_mark_insn_seen(env);
+               prev_insn_idx = env->insn_idx;
+               if (class == BPF_ALU || class == BPF_ALU64) {
+@@ -10321,7 +10334,7 @@ process_bpf_exit:
+                                       return err;
+                               env->insn_idx++;
+-                              env->insn_aux_data[env->insn_idx].seen = env->pass_cnt;
++                              sanitize_mark_insn_seen(env);
+                       } else {
+                               verbose(env, "invalid BPF_LD mode\n");
+                               return -EINVAL;
+@@ -12098,6 +12111,9 @@ static void free_states(struct bpf_verifier_env *env)
+  * insn_aux_data was touched. These variables are compared to clear temporary
+  * data from failed pass. For testing and experiments do_check_common() can be
+  * run multiple times even when prior attempt to verify is unsuccessful.
++ *
++ * Note that special handling is needed on !env->bypass_spec_v1 if this is
++ * ever called outside of error path with subsequent program rejection.
+  */
+ static void sanitize_insn_aux_data(struct bpf_verifier_env *env)
+ {
+-- 
+2.30.2
+
diff --git a/queue-5.12/bpf-inherit-expanded-patched-seen-count-from-old-aux.patch b/queue-5.12/bpf-inherit-expanded-patched-seen-count-from-old-aux.patch
new file mode 100644 (file)
index 0000000..828a578
--- /dev/null
@@ -0,0 +1,54 @@
+From 1c723ab399f0e9444a40036bbff64075832e7431 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Fri, 28 May 2021 13:03:30 +0000
+Subject: bpf: Inherit expanded/patched seen count from old aux data
+
+From: Daniel Borkmann <daniel@iogearbox.net>
+
+[ Upstream commit d203b0fd863a2261e5d00b97f3d060c4c2a6db71 ]
+
+Instead of relying on current env->pass_cnt, use the seen count from the
+old aux data in adjust_insn_aux_data(), and expand it to the new range of
+patched instructions. This change is valid given we always expand 1:n
+with n>=1, so what applies to the old/original instruction needs to apply
+for the replacement as well.
+
+Not relying on env->pass_cnt is a prerequisite for a later change where we
+want to avoid marking an instruction seen when verified under speculative
+execution path.
+
+Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
+Reviewed-by: John Fastabend <john.fastabend@gmail.com>
+Reviewed-by: Benedict Schlueter <benedict.schlueter@rub.de>
+Reviewed-by: Piotr Krysiuk <piotras@gmail.com>
+Acked-by: Alexei Starovoitov <ast@kernel.org>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/bpf/verifier.c | 4 +++-
+ 1 file changed, 3 insertions(+), 1 deletion(-)
+
+diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
+index bdf4be10c8cc..0ffe85f22887 100644
+--- a/kernel/bpf/verifier.c
++++ b/kernel/bpf/verifier.c
+@@ -10820,6 +10820,7 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
+ {
+       struct bpf_insn_aux_data *new_data, *old_data = env->insn_aux_data;
+       struct bpf_insn *insn = new_prog->insnsi;
++      u32 old_seen = old_data[off].seen;
+       u32 prog_len;
+       int i;
+@@ -10840,7 +10841,8 @@ static int adjust_insn_aux_data(struct bpf_verifier_env *env,
+       memcpy(new_data + off + cnt - 1, old_data + off,
+              sizeof(struct bpf_insn_aux_data) * (prog_len - off - cnt + 1));
+       for (i = off; i < off + cnt - 1; i++) {
+-              new_data[i].seen = env->pass_cnt;
++              /* Expand insni[off]'s seen count to the patched range. */
++              new_data[i].seen = old_seen;
+               new_data[i].zext_dst = insn_has_def32(env, insn + i);
+       }
+       env->insn_aux_data = new_data;
+-- 
+2.30.2
+
diff --git a/queue-5.12/irqchip-gic-v3-workaround-inconsistent-pmr-setting-o.patch b/queue-5.12/irqchip-gic-v3-workaround-inconsistent-pmr-setting-o.patch
new file mode 100644 (file)
index 0000000..66164ae
--- /dev/null
@@ -0,0 +1,94 @@
+From da5ee96a46567691d5b9f471af07bdd90b7f3289 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Jun 2021 15:13:46 +0100
+Subject: irqchip/gic-v3: Workaround inconsistent PMR setting on NMI entry
+
+From: Marc Zyngier <maz@kernel.org>
+
+[ Upstream commit 382e6e177bc1c02473e56591fe5083ae1e4904f6 ]
+
+The arm64 entry code suffers from an annoying issue on taking
+a NMI, as it sets PMR to a value that actually allows IRQs
+to be acknowledged. This is done for consistency with other parts
+of the code, and is in the process of being fixed. This shouldn't
+be a problem, as we are not enabling interrupts whilst in NMI
+context.
+
+However, in the infortunate scenario that we took a spurious NMI
+(retired before the read of IAR) *and* that there is an IRQ pending
+at the same time, we'll ack the IRQ in NMI context. Too bad.
+
+In order to avoid deadlocks while running something like perf,
+teach the GICv3 driver about this situation: if we were in
+a context where no interrupt should have fired, transiently
+set PMR to a value that only allows NMIs before acking the pending
+interrupt, and restore the original value after that.
+
+This papers over the core issue for the time being, and makes
+NMIs great again. Sort of.
+
+Fixes: 4d6a38da8e79e94c ("arm64: entry: always set GIC_PRIO_PSR_I_SET during entry")
+Co-developed-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Mark Rutland <mark.rutland@arm.com>
+Signed-off-by: Marc Zyngier <maz@kernel.org>
+Reviewed-by: Mark Rutland <mark.rutland@arm.com>
+Link: https://lore.kernel.org/lkml/20210610145731.1350460-1-maz@kernel.org
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ drivers/irqchip/irq-gic-v3.c | 36 +++++++++++++++++++++++++++++++++++-
+ 1 file changed, 35 insertions(+), 1 deletion(-)
+
+diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c
+index 00404024d7cd..fea237838bb0 100644
+--- a/drivers/irqchip/irq-gic-v3.c
++++ b/drivers/irqchip/irq-gic-v3.c
+@@ -642,11 +642,45 @@ static inline void gic_handle_nmi(u32 irqnr, struct pt_regs *regs)
+               nmi_exit();
+ }
++static u32 do_read_iar(struct pt_regs *regs)
++{
++      u32 iar;
++
++      if (gic_supports_nmi() && unlikely(!interrupts_enabled(regs))) {
++              u64 pmr;
++
++              /*
++               * We were in a context with IRQs disabled. However, the
++               * entry code has set PMR to a value that allows any
++               * interrupt to be acknowledged, and not just NMIs. This can
++               * lead to surprising effects if the NMI has been retired in
++               * the meantime, and that there is an IRQ pending. The IRQ
++               * would then be taken in NMI context, something that nobody
++               * wants to debug twice.
++               *
++               * Until we sort this, drop PMR again to a level that will
++               * actually only allow NMIs before reading IAR, and then
++               * restore it to what it was.
++               */
++              pmr = gic_read_pmr();
++              gic_pmr_mask_irqs();
++              isb();
++
++              iar = gic_read_iar();
++
++              gic_write_pmr(pmr);
++      } else {
++              iar = gic_read_iar();
++      }
++
++      return iar;
++}
++
+ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
+ {
+       u32 irqnr;
+-      irqnr = gic_read_iar();
++      irqnr = do_read_iar(regs);
+       /* Check for special IDs first */
+       if ((irqnr >= 1020 && irqnr <= 1023))
+-- 
+2.30.2
+
diff --git a/queue-5.12/perf-metricgroup-fix-find_evsel_group-event-selector.patch b/queue-5.12/perf-metricgroup-fix-find_evsel_group-event-selector.patch
new file mode 100644 (file)
index 0000000..f359a8d
--- /dev/null
@@ -0,0 +1,87 @@
+From 4efbd5b157663ac3fe616c08258c8ff13fceb22f Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Jun 2021 22:32:59 +0800
+Subject: perf metricgroup: Fix find_evsel_group() event selector
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit fc96ec4d5d4155c61cbafd49fb2dd403c899a9f4 ]
+
+The following command segfaults on my x86 broadwell:
+
+  $ ./perf stat  -M frontend_bound,retiring,backend_bound,bad_speculation sleep 1
+  WARNING: grouped events cpus do not match, disabling group:
+    anon group { raw 0x10e }
+    anon group { raw 0x10e }
+  perf: util/evsel.c:1596: get_group_fd: Assertion `!(!leader->core.fd)' failed.
+  Aborted (core dumped)
+
+The issue shows itself as a use-after-free in evlist__check_cpu_maps(),
+whereby the leader of an event selector (evsel) has been deleted (yet we
+still attempt to verify for an evsel).
+
+Fundamentally the problem comes from metricgroup__setup_events() ->
+find_evsel_group(), and has developed from the previous fix attempt in
+commit 9c880c24cb0d ("perf metricgroup: Fix for metrics containing
+duration_time").
+
+The problem now is that the logic in checking if an evsel is in the same
+group is subtly broken for the "cycles" event. For the "cycles" event,
+the pmu_name is NULL; however the logic in find_evsel_group() may set an
+event matched against "cycles" as used, when it should not be.
+
+This leads to a condition where an evsel is set, yet its leader is not.
+
+Fix the check for evsel pmu_name by not matching evsels when either has a
+NULL pmu_name.
+
+There is still a pre-existing metric issue whereby the ordering of the
+metrics may break the 'stat' function, as discussed at:
+https://lore.kernel.org/lkml/49c6fccb-b716-1bf0-18a6-cace1cdb66b9@huawei.com/
+
+Fixes: 9c880c24cb0d ("perf metricgroup: Fix for metrics containing duration_time")
+Signed-off-by: John Garry <john.garry@huawei.com>
+Tested-by: Arnaldo Carvalho de Melo <acme@redhat.com> # On a Thinkpad T450S
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Ian Rogers <irogers@google.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/1623335580-187317-2-git-send-email-john.garry@huawei.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/metricgroup.c | 6 +++---
+ 1 file changed, 3 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 26c990e32378..1af71ac1cc68 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -162,10 +162,10 @@ static bool contains_event(struct evsel **metric_events, int num_events,
+       return false;
+ }
+-static bool evsel_same_pmu(struct evsel *ev1, struct evsel *ev2)
++static bool evsel_same_pmu_or_none(struct evsel *ev1, struct evsel *ev2)
+ {
+       if (!ev1->pmu_name || !ev2->pmu_name)
+-              return false;
++              return true;
+       return !strcmp(ev1->pmu_name, ev2->pmu_name);
+ }
+@@ -288,7 +288,7 @@ static struct evsel *find_evsel_group(struct evlist *perf_evlist,
+                        */
+                       if (!has_constraint &&
+                           ev->leader != metric_events[i]->leader &&
+-                          evsel_same_pmu(ev->leader, metric_events[i]->leader))
++                          evsel_same_pmu_or_none(ev->leader, metric_events[i]->leader))
+                               break;
+                       if (!strcmp(metric_events[i]->name, ev->name)) {
+                               set_bit(ev->idx, evlist_used);
+-- 
+2.30.2
+
diff --git a/queue-5.12/perf-metricgroup-return-error-code-from-metricgroup_.patch b/queue-5.12/perf-metricgroup-return-error-code-from-metricgroup_.patch
new file mode 100644 (file)
index 0000000..9a3061f
--- /dev/null
@@ -0,0 +1,69 @@
+From 60c8a0270e63ddcc5bd5cd75c5ab3c5d103d32f9 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Thu, 10 Jun 2021 22:33:00 +0800
+Subject: perf metricgroup: Return error code from
+ metricgroup__add_metric_sys_event_iter()
+
+From: John Garry <john.garry@huawei.com>
+
+[ Upstream commit fe7a98b9d9b36e5c8a22d76b67d29721f153f66e ]
+
+The error code is not set at all in the sys event iter function.
+
+This may lead to an uninitialized value of "ret" in
+metricgroup__add_metric() when no CPU metric is added.
+
+Fix by properly setting the error code.
+
+It is not necessary to init "ret" to 0 in metricgroup__add_metric(), as
+if we have no CPU or sys event metric matching, then "has_match" should
+be 0 and "ret" is set to -EINVAL.
+
+However gcc cannot detect that it may not have been set after the
+map_for_each_metric() loop for CPU metrics, which is strange.
+
+Fixes: be335ec28efa8 ("perf metricgroup: Support adding metrics for system PMUs")
+Signed-off-by: John Garry <john.garry@huawei.com>
+Acked-by: Ian Rogers <irogers@google.com>
+Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
+Cc: Jiri Olsa <jolsa@redhat.com>
+Cc: Kajol Jain <kjain@linux.ibm.com>
+Cc: Mark Rutland <mark.rutland@arm.com>
+Cc: Namhyung Kim <namhyung@kernel.org>
+Cc: Peter Zijlstra <peterz@infradead.org>
+Link: http://lore.kernel.org/lkml/1623335580-187317-3-git-send-email-john.garry@huawei.com
+Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ tools/perf/util/metricgroup.c | 8 +++++---
+ 1 file changed, 5 insertions(+), 3 deletions(-)
+
+diff --git a/tools/perf/util/metricgroup.c b/tools/perf/util/metricgroup.c
+index 1af71ac1cc68..939aed36e0c2 100644
+--- a/tools/perf/util/metricgroup.c
++++ b/tools/perf/util/metricgroup.c
+@@ -1072,16 +1072,18 @@ static int metricgroup__add_metric_sys_event_iter(struct pmu_event *pe,
+       ret = add_metric(d->metric_list, pe, d->metric_no_group, &m, NULL, d->ids);
+       if (ret)
+-              return ret;
++              goto out;
+       ret = resolve_metric(d->metric_no_group,
+                                    d->metric_list, NULL, d->ids);
+       if (ret)
+-              return ret;
++              goto out;
+       *(d->has_match) = true;
+-      return *d->ret;
++out:
++      *(d->ret) = ret;
++      return ret;
+ }
+ static int metricgroup__add_metric(const char *metric, bool metric_no_group,
+-- 
+2.30.2
+
diff --git a/queue-5.12/sched-fair-correctly-insert-cfs_rq-s-to-list-on-unth.patch b/queue-5.12/sched-fair-correctly-insert-cfs_rq-s-to-list-on-unth.patch
new file mode 100644 (file)
index 0000000..23e52db
--- /dev/null
@@ -0,0 +1,120 @@
+From 563140f0945ed8b5240a0416fc47d121f8b84667 Mon Sep 17 00:00:00 2001
+From: Sasha Levin <sashal@kernel.org>
+Date: Sat, 12 Jun 2021 13:28:15 +0200
+Subject: sched/fair: Correctly insert cfs_rq's to list on unthrottle
+
+From: Odin Ugedal <odin@uged.al>
+
+[ Upstream commit a7b359fc6a37faaf472125867c8dc5a068c90982 ]
+
+Fix an issue where fairness is decreased since cfs_rq's can end up not
+being decayed properly. For two sibling control groups with the same
+priority, this can often lead to a load ratio of 99/1 (!!).
+
+This happens because when a cfs_rq is throttled, all the descendant
+cfs_rq's will be removed from the leaf list. When they initial cfs_rq
+is unthrottled, it will currently only re add descendant cfs_rq's if
+they have one or more entities enqueued. This is not a perfect
+heuristic.
+
+Instead, we insert all cfs_rq's that contain one or more enqueued
+entities, or it its load is not completely decayed.
+
+Can often lead to situations like this for equally weighted control
+groups:
+
+  $ ps u -C stress
+  USER         PID %CPU %MEM    VSZ   RSS TTY      STAT START   TIME COMMAND
+  root       10009 88.8  0.0   3676   100 pts/1    R+   11:04   0:13 stress --cpu 1
+  root       10023  3.0  0.0   3676   104 pts/1    R+   11:04   0:00 stress --cpu 1
+
+Fixes: 31bc6aeaab1d ("sched/fair: Optimize update_blocked_averages()")
+[vingo: !SMP build fix]
+Signed-off-by: Odin Ugedal <odin@uged.al>
+Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
+Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
+Link: https://lore.kernel.org/r/20210612112815.61678-1-odin@uged.al
+Signed-off-by: Sasha Levin <sashal@kernel.org>
+---
+ kernel/sched/fair.c | 44 +++++++++++++++++++++++++-------------------
+ 1 file changed, 25 insertions(+), 19 deletions(-)
+
+diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
+index 47fcc3fe9dc5..56e2334fe66b 100644
+--- a/kernel/sched/fair.c
++++ b/kernel/sched/fair.c
+@@ -3293,6 +3293,24 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
+ #ifdef CONFIG_SMP
+ #ifdef CONFIG_FAIR_GROUP_SCHED
++
++static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
++{
++      if (cfs_rq->load.weight)
++              return false;
++
++      if (cfs_rq->avg.load_sum)
++              return false;
++
++      if (cfs_rq->avg.util_sum)
++              return false;
++
++      if (cfs_rq->avg.runnable_sum)
++              return false;
++
++      return true;
++}
++
+ /**
+  * update_tg_load_avg - update the tg's load avg
+  * @cfs_rq: the cfs_rq whose avg changed
+@@ -4086,6 +4104,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
+ #else /* CONFIG_SMP */
++static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
++{
++      return true;
++}
++
+ #define UPDATE_TG     0x0
+ #define SKIP_AGE_LOAD 0x0
+ #define DO_ATTACH     0x0
+@@ -4744,8 +4767,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
+               cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
+                                            cfs_rq->throttled_clock_task;
+-              /* Add cfs_rq with already running entity in the list */
+-              if (cfs_rq->nr_running >= 1)
++              /* Add cfs_rq with load or one or more already running entities to the list */
++              if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
+                       list_add_leaf_cfs_rq(cfs_rq);
+       }
+@@ -7972,23 +7995,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
+ #ifdef CONFIG_FAIR_GROUP_SCHED
+-static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
+-{
+-      if (cfs_rq->load.weight)
+-              return false;
+-
+-      if (cfs_rq->avg.load_sum)
+-              return false;
+-
+-      if (cfs_rq->avg.util_sum)
+-              return false;
+-
+-      if (cfs_rq->avg.runnable_sum)
+-              return false;
+-
+-      return true;
+-}
+-
+ static bool __update_blocked_fair(struct rq *rq, bool *done)
+ {
+       struct cfs_rq *cfs_rq, *pos;
+-- 
+2.30.2
+
index a18e02fe11d13b335e65c1c5c67529595e0afa40..524282731324d64d3835d6d22cd4d08c4f441616 100644 (file)
@@ -108,3 +108,9 @@ radeon-use-memcpy_to-fromio-for-uvd-fw-upload.patch
 hwmon-scpi-hwmon-shows-the-negative-temperature-prop.patch
 riscv-code-patching-only-works-on-xip_kernel.patch
 mm-relocate-write_protect_seq-in-struct-mm_struct.patch
+irqchip-gic-v3-workaround-inconsistent-pmr-setting-o.patch
+sched-fair-correctly-insert-cfs_rq-s-to-list-on-unth.patch
+perf-metricgroup-fix-find_evsel_group-event-selector.patch
+perf-metricgroup-return-error-code-from-metricgroup_.patch
+bpf-inherit-expanded-patched-seen-count-from-old-aux.patch
+bpf-do-not-mark-insn-as-seen-under-speculative-path-.patch