From: Tejun Heo Date: Sun, 8 Mar 2026 02:45:15 +0000 (-1000) Subject: tools/sched_ext/include: Sync bpf_arena_common.bpf.h with scx repo X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=9c6437f7c2e848aea2469df3396f8365d06adbb0;p=thirdparty%2Fkernel%2Flinux.git tools/sched_ext/include: Sync bpf_arena_common.bpf.h with scx repo Sync the following changes from the scx repo: - Guard __arena define with #ifndef to avoid redefinition when the attribute is already defined by another header. - Add bpf_arena_reserve_pages() and bpf_arena_mapping_nr_pages() ksym declarations. - Rename TEST to SCX_BPF_UNITTEST to avoid collision with generic TEST macros in other projects. Signed-off-by: Tejun Heo Acked-by: Andrea Righi --- diff --git a/tools/sched_ext/include/scx/bpf_arena_common.bpf.h b/tools/sched_ext/include/scx/bpf_arena_common.bpf.h index 4366fb3c91ce8..2043d66940eab 100644 --- a/tools/sched_ext/include/scx/bpf_arena_common.bpf.h +++ b/tools/sched_ext/include/scx/bpf_arena_common.bpf.h @@ -15,7 +15,9 @@ #endif #if defined(__BPF_FEATURE_ADDR_SPACE_CAST) && !defined(BPF_ARENA_FORCE_ASM) +#ifndef __arena #define __arena __attribute__((address_space(1))) +#endif #define __arena_global __attribute__((address_space(1))) #define cast_kern(ptr) /* nop for bpf prog. emitted by LLVM */ #define cast_user(ptr) /* nop for bpf prog. emitted by LLVM */ @@ -81,12 +83,13 @@ void __arena* bpf_arena_alloc_pages(void *map, void __arena *addr, __u32 page_cnt, int node_id, __u64 flags) __ksym __weak; void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; +int bpf_arena_reserve_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym __weak; /* * Note that cond_break can only be portably used in the body of a breakable * construct, whereas can_loop can be used anywhere. */ -#ifdef TEST +#ifdef SCX_BPF_UNITTEST #define can_loop true #define __cond_break(expr) expr #else @@ -165,7 +168,7 @@ void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym _ }) #endif /* __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__ */ #endif /* __BPF_FEATURE_MAY_GOTO */ -#endif /* TEST */ +#endif /* SCX_BPF_UNITTEST */ #define cond_break __cond_break(break) #define cond_break_label(label) __cond_break(goto label) @@ -173,3 +176,4 @@ void bpf_arena_free_pages(void *map, void __arena *ptr, __u32 page_cnt) __ksym _ void bpf_preempt_disable(void) __weak __ksym; void bpf_preempt_enable(void) __weak __ksym; +ssize_t bpf_arena_mapping_nr_pages(void *p__map) __weak __ksym;