]> git.ipfire.org Git - thirdparty/kernel/stable.git/commitdiff
bpf: Replace get_next_cpu() with cpumask_next_wrap()
authorFushuai Wang <wangfushuai@baidu.com>
Mon, 18 Aug 2025 03:23:44 +0000 (11:23 +0800)
committerDaniel Borkmann <daniel@iogearbox.net>
Mon, 18 Aug 2025 13:11:02 +0000 (15:11 +0200)
The get_next_cpu() function was only used in one place to find
the next possible CPU, which can be replaced by cpumask_next_wrap().

Signed-off-by: Fushuai Wang <wangfushuai@baidu.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Acked-by: Daniel Borkmann <daniel@iogearbox.net>
Link: https://lore.kernel.org/bpf/20250818032344.23229-1-wangfushuai@baidu.com
kernel/bpf/bpf_lru_list.c

index 2d6e1c98d8adc34d91a1f23206bdcc0bb5b526e3..e7a2fc60523f6c01e1d5996cb9cd98aa3f31cbba 100644 (file)
 #define LOCAL_PENDING_LIST_IDX LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
 #define IS_LOCAL_LIST_TYPE(t)  ((t) >= BPF_LOCAL_LIST_T_OFFSET)
 
-static int get_next_cpu(int cpu)
-{
-       cpu = cpumask_next(cpu, cpu_possible_mask);
-       if (cpu >= nr_cpu_ids)
-               cpu = cpumask_first(cpu_possible_mask);
-       return cpu;
-}
-
 /* Local list helpers */
 static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
 {
@@ -482,7 +474,7 @@ static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
 
                raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
 
-               steal = get_next_cpu(steal);
+               steal = cpumask_next_wrap(steal, cpu_possible_mask);
        } while (!node && steal != first_steal);
 
        loc_l->next_steal = steal;