+++ /dev/null
-From fcdb8bc3bcb97a33cba7278eb7e18f07744805fc Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sat, 12 Jun 2021 13:28:15 +0200
-Subject: sched/fair: Correctly insert cfs_rq's to list on unthrottle
-
-From: Odin Ugedal <odin@uged.al>
-
-[ Upstream commit a7b359fc6a37faaf472125867c8dc5a068c90982 ]
-
-Fix an issue where fairness is decreased since cfs_rq's can end up not
-being decayed properly. For two sibling control groups with the same
-priority, this can often lead to a load ratio of 99/1 (!!).
-
-This happens because when a cfs_rq is throttled, all the descendant
-cfs_rq's will be removed from the leaf list. When they initial cfs_rq
-is unthrottled, it will currently only re add descendant cfs_rq's if
-they have one or more entities enqueued. This is not a perfect
-heuristic.
-
-Instead, we insert all cfs_rq's that contain one or more enqueued
-entities, or it its load is not completely decayed.
-
-Can often lead to situations like this for equally weighted control
-groups:
-
- $ ps u -C stress
- USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
- root 10009 88.8 0.0 3676 100 pts/1 R+ 11:04 0:13 stress --cpu 1
- root 10023 3.0 0.0 3676 104 pts/1 R+ 11:04 0:00 stress --cpu 1
-
-Fixes: 31bc6aeaab1d ("sched/fair: Optimize update_blocked_averages()")
-[vingo: !SMP build fix]
-Signed-off-by: Odin Ugedal <odin@uged.al>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
-Link: https://lore.kernel.org/r/20210612112815.61678-1-odin@uged.al
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/sched/fair.c | 44 +++++++++++++++++++++++++-------------------
- 1 file changed, 25 insertions(+), 19 deletions(-)
-
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index d6e1c90de570..1cbb7f80db31 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -3300,6 +3300,24 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
-
- #ifdef CONFIG_SMP
- #ifdef CONFIG_FAIR_GROUP_SCHED
-+
-+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-+{
-+ if (cfs_rq->load.weight)
-+ return false;
-+
-+ if (cfs_rq->avg.load_sum)
-+ return false;
-+
-+ if (cfs_rq->avg.util_sum)
-+ return false;
-+
-+ if (cfs_rq->avg.runnable_sum)
-+ return false;
-+
-+ return true;
-+}
-+
- /**
- * update_tg_load_avg - update the tg's load avg
- * @cfs_rq: the cfs_rq whose avg changed
-@@ -4093,6 +4111,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
-
- #else /* CONFIG_SMP */
-
-+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-+{
-+ return true;
-+}
-+
- #define UPDATE_TG 0x0
- #define SKIP_AGE_LOAD 0x0
- #define DO_ATTACH 0x0
-@@ -4751,8 +4774,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
- cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
- cfs_rq->throttled_clock_task;
-
-- /* Add cfs_rq with already running entity in the list */
-- if (cfs_rq->nr_running >= 1)
-+ /* Add cfs_rq with load or one or more already running entities to the list */
-+ if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
- list_add_leaf_cfs_rq(cfs_rq);
- }
-
-@@ -7927,23 +7950,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-
--static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
--{
-- if (cfs_rq->load.weight)
-- return false;
--
-- if (cfs_rq->avg.load_sum)
-- return false;
--
-- if (cfs_rq->avg.util_sum)
-- return false;
--
-- if (cfs_rq->avg.runnable_sum)
-- return false;
--
-- return true;
--}
--
- static bool __update_blocked_fair(struct rq *rq, bool *done)
- {
- struct cfs_rq *cfs_rq, *pos;
---
-2.30.2
-
hwmon-scpi-hwmon-shows-the-negative-temperature-prop.patch
mm-relocate-write_protect_seq-in-struct-mm_struct.patch
irqchip-gic-v3-workaround-inconsistent-pmr-setting-o.patch
-sched-fair-correctly-insert-cfs_rq-s-to-list-on-unth.patch
bpf-inherit-expanded-patched-seen-count-from-old-aux.patch
bpf-do-not-mark-insn-as-seen-under-speculative-path-.patch
can-bcm-fix-infoleak-in-struct-bcm_msg_head.patch
+++ /dev/null
-From 563140f0945ed8b5240a0416fc47d121f8b84667 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sat, 12 Jun 2021 13:28:15 +0200
-Subject: sched/fair: Correctly insert cfs_rq's to list on unthrottle
-
-From: Odin Ugedal <odin@uged.al>
-
-[ Upstream commit a7b359fc6a37faaf472125867c8dc5a068c90982 ]
-
-Fix an issue where fairness is decreased since cfs_rq's can end up not
-being decayed properly. For two sibling control groups with the same
-priority, this can often lead to a load ratio of 99/1 (!!).
-
-This happens because when a cfs_rq is throttled, all the descendant
-cfs_rq's will be removed from the leaf list. When they initial cfs_rq
-is unthrottled, it will currently only re add descendant cfs_rq's if
-they have one or more entities enqueued. This is not a perfect
-heuristic.
-
-Instead, we insert all cfs_rq's that contain one or more enqueued
-entities, or it its load is not completely decayed.
-
-Can often lead to situations like this for equally weighted control
-groups:
-
- $ ps u -C stress
- USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
- root 10009 88.8 0.0 3676 100 pts/1 R+ 11:04 0:13 stress --cpu 1
- root 10023 3.0 0.0 3676 104 pts/1 R+ 11:04 0:00 stress --cpu 1
-
-Fixes: 31bc6aeaab1d ("sched/fair: Optimize update_blocked_averages()")
-[vingo: !SMP build fix]
-Signed-off-by: Odin Ugedal <odin@uged.al>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
-Link: https://lore.kernel.org/r/20210612112815.61678-1-odin@uged.al
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/sched/fair.c | 44 +++++++++++++++++++++++++-------------------
- 1 file changed, 25 insertions(+), 19 deletions(-)
-
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index 47fcc3fe9dc5..56e2334fe66b 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -3293,6 +3293,24 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
-
- #ifdef CONFIG_SMP
- #ifdef CONFIG_FAIR_GROUP_SCHED
-+
-+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-+{
-+ if (cfs_rq->load.weight)
-+ return false;
-+
-+ if (cfs_rq->avg.load_sum)
-+ return false;
-+
-+ if (cfs_rq->avg.util_sum)
-+ return false;
-+
-+ if (cfs_rq->avg.runnable_sum)
-+ return false;
-+
-+ return true;
-+}
-+
- /**
- * update_tg_load_avg - update the tg's load avg
- * @cfs_rq: the cfs_rq whose avg changed
-@@ -4086,6 +4104,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
-
- #else /* CONFIG_SMP */
-
-+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-+{
-+ return true;
-+}
-+
- #define UPDATE_TG 0x0
- #define SKIP_AGE_LOAD 0x0
- #define DO_ATTACH 0x0
-@@ -4744,8 +4767,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
- cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
- cfs_rq->throttled_clock_task;
-
-- /* Add cfs_rq with already running entity in the list */
-- if (cfs_rq->nr_running >= 1)
-+ /* Add cfs_rq with load or one or more already running entities to the list */
-+ if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
- list_add_leaf_cfs_rq(cfs_rq);
- }
-
-@@ -7972,23 +7995,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-
--static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
--{
-- if (cfs_rq->load.weight)
-- return false;
--
-- if (cfs_rq->avg.load_sum)
-- return false;
--
-- if (cfs_rq->avg.util_sum)
-- return false;
--
-- if (cfs_rq->avg.runnable_sum)
-- return false;
--
-- return true;
--}
--
- static bool __update_blocked_fair(struct rq *rq, bool *done)
- {
- struct cfs_rq *cfs_rq, *pos;
---
-2.30.2
-
riscv-code-patching-only-works-on-xip_kernel.patch
mm-relocate-write_protect_seq-in-struct-mm_struct.patch
irqchip-gic-v3-workaround-inconsistent-pmr-setting-o.patch
-sched-fair-correctly-insert-cfs_rq-s-to-list-on-unth.patch
perf-metricgroup-fix-find_evsel_group-event-selector.patch
perf-metricgroup-return-error-code-from-metricgroup_.patch
bpf-inherit-expanded-patched-seen-count-from-old-aux.patch
+++ /dev/null
-From 2ce8185d181f56f500e24da1d506d97c762927b6 Mon Sep 17 00:00:00 2001
-From: Sasha Levin <sashal@kernel.org>
-Date: Sat, 12 Jun 2021 13:28:15 +0200
-Subject: sched/fair: Correctly insert cfs_rq's to list on unthrottle
-
-From: Odin Ugedal <odin@uged.al>
-
-[ Upstream commit a7b359fc6a37faaf472125867c8dc5a068c90982 ]
-
-Fix an issue where fairness is decreased since cfs_rq's can end up not
-being decayed properly. For two sibling control groups with the same
-priority, this can often lead to a load ratio of 99/1 (!!).
-
-This happens because when a cfs_rq is throttled, all the descendant
-cfs_rq's will be removed from the leaf list. When they initial cfs_rq
-is unthrottled, it will currently only re add descendant cfs_rq's if
-they have one or more entities enqueued. This is not a perfect
-heuristic.
-
-Instead, we insert all cfs_rq's that contain one or more enqueued
-entities, or it its load is not completely decayed.
-
-Can often lead to situations like this for equally weighted control
-groups:
-
- $ ps u -C stress
- USER PID %CPU %MEM VSZ RSS TTY STAT START TIME COMMAND
- root 10009 88.8 0.0 3676 100 pts/1 R+ 11:04 0:13 stress --cpu 1
- root 10023 3.0 0.0 3676 104 pts/1 R+ 11:04 0:00 stress --cpu 1
-
-Fixes: 31bc6aeaab1d ("sched/fair: Optimize update_blocked_averages()")
-[vingo: !SMP build fix]
-Signed-off-by: Odin Ugedal <odin@uged.al>
-Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
-Reviewed-by: Vincent Guittot <vincent.guittot@linaro.org>
-Link: https://lore.kernel.org/r/20210612112815.61678-1-odin@uged.al
-Signed-off-by: Sasha Levin <sashal@kernel.org>
----
- kernel/sched/fair.c | 44 +++++++++++++++++++++++++-------------------
- 1 file changed, 25 insertions(+), 19 deletions(-)
-
-diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
-index d3f4113e87de..877672df822f 100644
---- a/kernel/sched/fair.c
-+++ b/kernel/sched/fair.c
-@@ -3131,6 +3131,24 @@ static inline void cfs_rq_util_change(struct cfs_rq *cfs_rq, int flags)
-
- #ifdef CONFIG_SMP
- #ifdef CONFIG_FAIR_GROUP_SCHED
-+
-+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-+{
-+ if (cfs_rq->load.weight)
-+ return false;
-+
-+ if (cfs_rq->avg.load_sum)
-+ return false;
-+
-+ if (cfs_rq->avg.util_sum)
-+ return false;
-+
-+ if (cfs_rq->avg.runnable_load_sum)
-+ return false;
-+
-+ return true;
-+}
-+
- /**
- * update_tg_load_avg - update the tg's load avg
- * @cfs_rq: the cfs_rq whose avg changed
-@@ -3833,6 +3851,11 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
-
- #else /* CONFIG_SMP */
-
-+static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
-+{
-+ return true;
-+}
-+
- #define UPDATE_TG 0x0
- #define SKIP_AGE_LOAD 0x0
- #define DO_ATTACH 0x0
-@@ -4488,8 +4511,8 @@ static int tg_unthrottle_up(struct task_group *tg, void *data)
- cfs_rq->throttled_clock_task_time += rq_clock_task(rq) -
- cfs_rq->throttled_clock_task;
-
-- /* Add cfs_rq with already running entity in the list */
-- if (cfs_rq->nr_running >= 1)
-+ /* Add cfs_rq with load or one or more already running entities to the list */
-+ if (!cfs_rq_is_decayed(cfs_rq) || cfs_rq->nr_running)
- list_add_leaf_cfs_rq(cfs_rq);
- }
-
-@@ -7620,23 +7643,6 @@ static bool __update_blocked_others(struct rq *rq, bool *done)
-
- #ifdef CONFIG_FAIR_GROUP_SCHED
-
--static inline bool cfs_rq_is_decayed(struct cfs_rq *cfs_rq)
--{
-- if (cfs_rq->load.weight)
-- return false;
--
-- if (cfs_rq->avg.load_sum)
-- return false;
--
-- if (cfs_rq->avg.util_sum)
-- return false;
--
-- if (cfs_rq->avg.runnable_load_sum)
-- return false;
--
-- return true;
--}
--
- static bool __update_blocked_fair(struct rq *rq, bool *done)
- {
- struct cfs_rq *cfs_rq, *pos;
---
-2.30.2
-
pinctrl-ralink-rt2880-avoid-to-error-in-calls-is-pin.patch
radeon-use-memcpy_to-fromio-for-uvd-fw-upload.patch
hwmon-scpi-hwmon-shows-the-negative-temperature-prop.patch
-sched-fair-correctly-insert-cfs_rq-s-to-list-on-unth.patch
can-bcm-fix-infoleak-in-struct-bcm_msg_head.patch
can-bcm-raw-isotp-use-per-module-netdevice-notifier.patch
can-j1939-fix-use-after-free-hold-skb-ref-while-in-use.patch