]> git.ipfire.org Git - thirdparty/linux.git/commitdiff
sched_ext: Rename CFI stubs to names that are recognized by BPF
authorTejun Heo <tj@kernel.org>
Thu, 24 Oct 2024 16:58:09 +0000 (06:58 -1000)
committerTejun Heo <tj@kernel.org>
Thu, 24 Oct 2024 16:58:09 +0000 (06:58 -1000)
CFI stubs can be used to tag arguments with __nullable (and possibly other
tags in the future) but for that to work the CFI stubs must have names that
are recognized by BPF. Rename them.

Signed-off-by: Tejun Heo <tj@kernel.org>
Acked-by: David Vernet <void@manifault.com>
Acked-by: Martin KaFai Lau <martin.lau@kernel.org>
Acked-by: Alexei Starovoitov <ast@kernel.org>
kernel/sched/ext.c

index d7ae816db6f222b978728dbad553334dea15341b..00a05578d75a86a7bf6079863a4e3ea87635ec8b 100644 (file)
@@ -5634,78 +5634,78 @@ static int bpf_scx_validate(void *kdata)
        return 0;
 }
 
-static s32 select_cpu_stub(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
-static void enqueue_stub(struct task_struct *p, u64 enq_flags) {}
-static void dequeue_stub(struct task_struct *p, u64 enq_flags) {}
-static void dispatch_stub(s32 prev_cpu, struct task_struct *p) {}
-static void tick_stub(struct task_struct *p) {}
-static void runnable_stub(struct task_struct *p, u64 enq_flags) {}
-static void running_stub(struct task_struct *p) {}
-static void stopping_stub(struct task_struct *p, bool runnable) {}
-static void quiescent_stub(struct task_struct *p, u64 deq_flags) {}
-static bool yield_stub(struct task_struct *from, struct task_struct *to) { return false; }
-static bool core_sched_before_stub(struct task_struct *a, struct task_struct *b) { return false; }
-static void set_weight_stub(struct task_struct *p, u32 weight) {}
-static void set_cpumask_stub(struct task_struct *p, const struct cpumask *mask) {}
-static void update_idle_stub(s32 cpu, bool idle) {}
-static void cpu_acquire_stub(s32 cpu, struct scx_cpu_acquire_args *args) {}
-static void cpu_release_stub(s32 cpu, struct scx_cpu_release_args *args) {}
-static s32 init_task_stub(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
-static void exit_task_stub(struct task_struct *p, struct scx_exit_task_args *args) {}
-static void enable_stub(struct task_struct *p) {}
-static void disable_stub(struct task_struct *p) {}
+static s32 sched_ext_ops__select_cpu(struct task_struct *p, s32 prev_cpu, u64 wake_flags) { return -EINVAL; }
+static void sched_ext_ops__enqueue(struct task_struct *p, u64 enq_flags) {}
+static void sched_ext_ops__dequeue(struct task_struct *p, u64 enq_flags) {}
+static void sched_ext_ops__dispatch(s32 prev_cpu, struct task_struct *p) {}
+static void sched_ext_ops__tick(struct task_struct *p) {}
+static void sched_ext_ops__runnable(struct task_struct *p, u64 enq_flags) {}
+static void sched_ext_ops__running(struct task_struct *p) {}
+static void sched_ext_ops__stopping(struct task_struct *p, bool runnable) {}
+static void sched_ext_ops__quiescent(struct task_struct *p, u64 deq_flags) {}
+static bool sched_ext_ops__yield(struct task_struct *from, struct task_struct *to) { return false; }
+static bool sched_ext_ops__core_sched_before(struct task_struct *a, struct task_struct *b) { return false; }
+static void sched_ext_ops__set_weight(struct task_struct *p, u32 weight) {}
+static void sched_ext_ops__set_cpumask(struct task_struct *p, const struct cpumask *mask) {}
+static void sched_ext_ops__update_idle(s32 cpu, bool idle) {}
+static void sched_ext_ops__cpu_acquire(s32 cpu, struct scx_cpu_acquire_args *args) {}
+static void sched_ext_ops__cpu_release(s32 cpu, struct scx_cpu_release_args *args) {}
+static s32 sched_ext_ops__init_task(struct task_struct *p, struct scx_init_task_args *args) { return -EINVAL; }
+static void sched_ext_ops__exit_task(struct task_struct *p, struct scx_exit_task_args *args) {}
+static void sched_ext_ops__enable(struct task_struct *p) {}
+static void sched_ext_ops__disable(struct task_struct *p) {}
 #ifdef CONFIG_EXT_GROUP_SCHED
-static s32 cgroup_init_stub(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
-static void cgroup_exit_stub(struct cgroup *cgrp) {}
-static s32 cgroup_prep_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
-static void cgroup_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
-static void cgroup_cancel_move_stub(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
-static void cgroup_set_weight_stub(struct cgroup *cgrp, u32 weight) {}
+static s32 sched_ext_ops__cgroup_init(struct cgroup *cgrp, struct scx_cgroup_init_args *args) { return -EINVAL; }
+static void sched_ext_ops__cgroup_exit(struct cgroup *cgrp) {}
+static s32 sched_ext_ops__cgroup_prep_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) { return -EINVAL; }
+static void sched_ext_ops__cgroup_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void sched_ext_ops__cgroup_cancel_move(struct task_struct *p, struct cgroup *from, struct cgroup *to) {}
+static void sched_ext_ops__cgroup_set_weight(struct cgroup *cgrp, u32 weight) {}
 #endif
-static void cpu_online_stub(s32 cpu) {}
-static void cpu_offline_stub(s32 cpu) {}
-static s32 init_stub(void) { return -EINVAL; }
-static void exit_stub(struct scx_exit_info *info) {}
-static void dump_stub(struct scx_dump_ctx *ctx) {}
-static void dump_cpu_stub(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
-static void dump_task_stub(struct scx_dump_ctx *ctx, struct task_struct *p) {}
+static void sched_ext_ops__cpu_online(s32 cpu) {}
+static void sched_ext_ops__cpu_offline(s32 cpu) {}
+static s32 sched_ext_ops__init(void) { return -EINVAL; }
+static void sched_ext_ops__exit(struct scx_exit_info *info) {}
+static void sched_ext_ops__dump(struct scx_dump_ctx *ctx) {}
+static void sched_ext_ops__dump_cpu(struct scx_dump_ctx *ctx, s32 cpu, bool idle) {}
+static void sched_ext_ops__dump_task(struct scx_dump_ctx *ctx, struct task_struct *p) {}
 
 static struct sched_ext_ops __bpf_ops_sched_ext_ops = {
-       .select_cpu = select_cpu_stub,
-       .enqueue = enqueue_stub,
-       .dequeue = dequeue_stub,
-       .dispatch = dispatch_stub,
-       .tick = tick_stub,
-       .runnable = runnable_stub,
-       .running = running_stub,
-       .stopping = stopping_stub,
-       .quiescent = quiescent_stub,
-       .yield = yield_stub,
-       .core_sched_before = core_sched_before_stub,
-       .set_weight = set_weight_stub,
-       .set_cpumask = set_cpumask_stub,
-       .update_idle = update_idle_stub,
-       .cpu_acquire = cpu_acquire_stub,
-       .cpu_release = cpu_release_stub,
-       .init_task = init_task_stub,
-       .exit_task = exit_task_stub,
-       .enable = enable_stub,
-       .disable = disable_stub,
+       .select_cpu             = sched_ext_ops__select_cpu,
+       .enqueue                = sched_ext_ops__enqueue,
+       .dequeue                = sched_ext_ops__dequeue,
+       .dispatch               = sched_ext_ops__dispatch,
+       .tick                   = sched_ext_ops__tick,
+       .runnable               = sched_ext_ops__runnable,
+       .running                = sched_ext_ops__running,
+       .stopping               = sched_ext_ops__stopping,
+       .quiescent              = sched_ext_ops__quiescent,
+       .yield                  = sched_ext_ops__yield,
+       .core_sched_before      = sched_ext_ops__core_sched_before,
+       .set_weight             = sched_ext_ops__set_weight,
+       .set_cpumask            = sched_ext_ops__set_cpumask,
+       .update_idle            = sched_ext_ops__update_idle,
+       .cpu_acquire            = sched_ext_ops__cpu_acquire,
+       .cpu_release            = sched_ext_ops__cpu_release,
+       .init_task              = sched_ext_ops__init_task,
+       .exit_task              = sched_ext_ops__exit_task,
+       .enable                 = sched_ext_ops__enable,
+       .disable                = sched_ext_ops__disable,
 #ifdef CONFIG_EXT_GROUP_SCHED
-       .cgroup_init = cgroup_init_stub,
-       .cgroup_exit = cgroup_exit_stub,
-       .cgroup_prep_move = cgroup_prep_move_stub,
-       .cgroup_move = cgroup_move_stub,
-       .cgroup_cancel_move = cgroup_cancel_move_stub,
-       .cgroup_set_weight = cgroup_set_weight_stub,
+       .cgroup_init            = sched_ext_ops__cgroup_init,
+       .cgroup_exit            = sched_ext_ops__cgroup_exit,
+       .cgroup_prep_move       = sched_ext_ops__cgroup_prep_move,
+       .cgroup_move            = sched_ext_ops__cgroup_move,
+       .cgroup_cancel_move     = sched_ext_ops__cgroup_cancel_move,
+       .cgroup_set_weight      = sched_ext_ops__cgroup_set_weight,
 #endif
-       .cpu_online = cpu_online_stub,
-       .cpu_offline = cpu_offline_stub,
-       .init = init_stub,
-       .exit = exit_stub,
-       .dump = dump_stub,
-       .dump_cpu = dump_cpu_stub,
-       .dump_task = dump_task_stub,
+       .cpu_online             = sched_ext_ops__cpu_online,
+       .cpu_offline            = sched_ext_ops__cpu_offline,
+       .init                   = sched_ext_ops__init,
+       .exit                   = sched_ext_ops__exit,
+       .dump                   = sched_ext_ops__dump,
+       .dump_cpu               = sched_ext_ops__dump_cpu,
+       .dump_task              = sched_ext_ops__dump_task,
 };
 
 static struct bpf_struct_ops bpf_sched_ext_ops = {