]> git.ipfire.org Git - thirdparty/kernel/linux.git/commitdiff
sched,arm64: Handle CPU isolation on last resort fallback rq selection
authorFrederic Weisbecker <frederic@kernel.org>
Thu, 26 Sep 2024 22:48:59 +0000 (00:48 +0200)
committerFrederic Weisbecker <frederic@kernel.org>
Wed, 8 Jan 2025 17:14:23 +0000 (18:14 +0100)
When a kthread or any other task has an affinity mask that is fully
offline or unallowed, the scheduler reaffines the task to all possible
CPUs as a last resort.

This default decision doesn't mix up very well with nohz_full CPUs that
are part of the possible cpumask but don't want to be disturbed by
unbound kthreads or even detached pinned user tasks.

Make the fallback affinity setting aware of nohz_full.

Suggested-by: Michal Hocko <mhocko@suse.com>
Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Frederic Weisbecker <frederic@kernel.org>
arch/arm64/include/asm/cpufeature.h
arch/arm64/include/asm/mmu_context.h
arch/arm64/kernel/cpufeature.c
include/linux/mmu_context.h
kernel/sched/core.c

index 8b4e5a3cd24c801e13a1339b7b748c513b12c6f4..cac5efc836c0a1318cba017a5c1c6ce7b78086f7 100644 (file)
@@ -671,6 +671,7 @@ static inline bool supports_clearbhb(int scope)
 }
 
 const struct cpumask *system_32bit_el0_cpumask(void);
+const struct cpumask *fallback_32bit_el0_cpumask(void);
 DECLARE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);
 
 static inline bool system_supports_32bit_el0(void)
index 48b3d9553b675c3c0b8bbe1b0030ed089afd3990..0dbe3b29049b742327ea391c13c1813bc8f312f8 100644 (file)
@@ -271,18 +271,26 @@ switch_mm(struct mm_struct *prev, struct mm_struct *next,
 }
 
 static inline const struct cpumask *
-task_cpu_possible_mask(struct task_struct *p)
+__task_cpu_possible_mask(struct task_struct *p, const struct cpumask *mask)
 {
        if (!static_branch_unlikely(&arm64_mismatched_32bit_el0))
-               return cpu_possible_mask;
+               return mask;
 
        if (!is_compat_thread(task_thread_info(p)))
-               return cpu_possible_mask;
+               return mask;
 
        return system_32bit_el0_cpumask();
 }
+
+static inline const struct cpumask *
+task_cpu_possible_mask(struct task_struct *p)
+{
+       return __task_cpu_possible_mask(p, cpu_possible_mask);
+}
 #define task_cpu_possible_mask task_cpu_possible_mask
 
+const struct cpumask *task_cpu_fallback_mask(struct task_struct *p);
+
 void verify_cpu_asid_bits(void);
 void post_ttbr_update_workaround(void);
 
index 3c87659c14db86a001efc56a5c5e4ce04b4dfa73..a983e8660987b17f501ee5a49cc810e749466498 100644 (file)
@@ -1642,6 +1642,11 @@ const struct cpumask *system_32bit_el0_cpumask(void)
        return cpu_possible_mask;
 }
 
+const struct cpumask *task_cpu_fallback_mask(struct task_struct *p)
+{
+       return __task_cpu_possible_mask(p, housekeeping_cpumask(HK_TYPE_TICK));
+}
+
 static int __init parse_32bit_el0_param(char *str)
 {
        allow_mismatched_32bit_el0 = true;
index bbaec80c78c5055b348edb3d641f8480a5f9b15e..ac01dc4eb2cecd71ed39bbb65df912b4734e9169 100644 (file)
@@ -24,6 +24,7 @@ static inline void leave_mm(void) { }
 #ifndef task_cpu_possible_mask
 # define task_cpu_possible_mask(p)     cpu_possible_mask
 # define task_cpu_possible(cpu, p)     true
+# define task_cpu_fallback_mask(p)     housekeeping_cpumask(HK_TYPE_TICK)
 #else
 # define task_cpu_possible(cpu, p)     cpumask_test_cpu((cpu), task_cpu_possible_mask(p))
 #endif
index 95e40895a51909c618f5ec82566b06c9197a2b38..233b50b0e123ee0e93b5bb85ea82eab3169ec4d6 100644 (file)
@@ -3534,7 +3534,7 @@ static int select_fallback_rq(int cpu, struct task_struct *p)
                         *
                         * More yuck to audit.
                         */
-                       do_set_cpus_allowed(p, task_cpu_possible_mask(p));
+                       do_set_cpus_allowed(p, task_cpu_fallback_mask(p));
                        state = fail;
                        break;
                case fail: