From: Amaury Denoyelle Date: Tue, 27 Apr 2021 08:46:36 +0000 (+0200) Subject: BUG/MAJOR: fix build on musl with cpu_set_t support X-Git-Tag: v2.4-dev18~41 X-Git-Url: http://git.ipfire.org/cgi-bin/gitweb.cgi?a=commitdiff_plain;h=fc6ac53dca8391ba9c32bc716fb61267b475ba71;p=thirdparty%2Fhaproxy.git BUG/MAJOR: fix build on musl with cpu_set_t support Move cpu_map structure outside of the global struct to a global variable defined in cpuset.c compilation unit. This allows to reorganize the includes without having to define _GNU_SOURCE everywhere for the support of the cpu_set_t. This fixes the compilation with musl libc, most notably used for the alpine based docker image. This fixes the github issue #1235. No need to backport as this feature is new in the current 2.4-dev. --- diff --git a/include/haproxy/cpuset-t.h b/include/haproxy/cpuset-t.h index 6bc29af413..36b4a5165c 100644 --- a/include/haproxy/cpuset-t.h +++ b/include/haproxy/cpuset-t.h @@ -12,6 +12,8 @@ #endif #endif +#include + #if defined(__linux__) || defined(__DragonFly__) # define CPUSET_REPR cpu_set_t @@ -37,4 +39,10 @@ struct hap_cpuset { CPUSET_REPR cpuset; }; +struct cpu_map { + struct hap_cpuset proc[MAX_PROCS]; /* list of CPU masks for the 32/64 first processes */ + struct hap_cpuset proc_t1[MAX_PROCS]; /* list of CPU masks for the 1st thread of each process */ + struct hap_cpuset thread[MAX_THREADS]; /* list of CPU masks for the 32/64 first threads of the 1st process */ +}; + #endif /* _HAPROXY_CPUSET_T_H */ diff --git a/include/haproxy/cpuset.h b/include/haproxy/cpuset.h index 6e16271316..d29c3560bc 100644 --- a/include/haproxy/cpuset.h +++ b/include/haproxy/cpuset.h @@ -3,6 +3,8 @@ #include +extern struct cpu_map cpu_map; + /* Unset all indexes in . */ void ha_cpuset_zero(struct hap_cpuset *set); diff --git a/include/haproxy/global-t.h b/include/haproxy/global-t.h index f2cf5ce551..bea97dd7ac 100644 --- a/include/haproxy/global-t.h +++ b/include/haproxy/global-t.h @@ -24,9 +24,6 @@ #include #include -#ifdef USE_CPU_AFFINITY -#include -#endif #include #include @@ -161,13 +158,6 @@ struct global { } ux; } unix_bind; struct proxy *cli_fe; /* the frontend holding the stats settings */ -#ifdef USE_CPU_AFFINITY - struct { - struct hap_cpuset proc[MAX_PROCS]; /* list of CPU masks for the 32/64 first processes */ - struct hap_cpuset proc_t1[MAX_PROCS]; /* list of CPU masks for the 1st thread of each process */ - struct hap_cpuset thread[MAX_THREADS]; /* list of CPU masks for the 32/64 first threads of the 1st process */ - } cpu_map; -#endif int numa_cpu_mapping; /* The info above is config stuff, it doesn't change during the process' life */ /* A number of the elements below are updated by all threads in real time and diff --git a/src/cfgparse-global.c b/src/cfgparse-global.c index c653fb49d6..384ad3c960 100644 --- a/src/cfgparse-global.c +++ b/src/cfgparse-global.c @@ -1,3 +1,4 @@ +#define _GNU_SOURCE /* for cpu_set_t from haproxy/cpuset.h */ #include #include #include @@ -1109,12 +1110,12 @@ int cfg_parse_global(const char *file, int linenum, char **args, int kwm) continue; if (!autoinc) - ha_cpuset_assign(&global.cpu_map.proc[i], &cpus); + ha_cpuset_assign(&cpu_map.proc[i], &cpus); else { - ha_cpuset_zero(&global.cpu_map.proc[i]); + ha_cpuset_zero(&cpu_map.proc[i]); n = ha_cpuset_ffs(&cpus_copy) - 1; ha_cpuset_clr(&cpus_copy, n); - ha_cpuset_set(&global.cpu_map.proc[i], n); + ha_cpuset_set(&cpu_map.proc[i], n); } } } else { @@ -1135,8 +1136,8 @@ int cfg_parse_global(const char *file, int linenum, char **args, int kwm) /* For first process, thread[0] is used. * Use proc_t1[N] for all others */ - dst = i ? &global.cpu_map.proc_t1[i] : - &global.cpu_map.thread[0]; + dst = i ? &cpu_map.proc_t1[i] : + &cpu_map.thread[0]; if (!autoinc) { ha_cpuset_assign(dst, &cpus); @@ -1159,12 +1160,12 @@ int cfg_parse_global(const char *file, int linenum, char **args, int kwm) continue; if (!autoinc) - ha_cpuset_assign(&global.cpu_map.thread[j], &cpus); + ha_cpuset_assign(&cpu_map.thread[j], &cpus); else { - ha_cpuset_zero(&global.cpu_map.thread[j]); + ha_cpuset_zero(&cpu_map.thread[j]); n = ha_cpuset_ffs(&cpus_copy) - 1; ha_cpuset_clr(&cpus_copy, n); - ha_cpuset_set(&global.cpu_map.thread[j], n); + ha_cpuset_set(&cpu_map.thread[j], n); } } } diff --git a/src/cpuset.c b/src/cpuset.c index e4310b696d..46e572dab7 100644 --- a/src/cpuset.c +++ b/src/cpuset.c @@ -5,6 +5,8 @@ #include #include +struct cpu_map cpu_map; + void ha_cpuset_zero(struct hap_cpuset *set) { #if defined(CPUSET_USE_CPUSET) || defined(CPUSET_USE_FREEBSD_CPUSET) diff --git a/src/haproxy.c b/src/haproxy.c index cd0edcf097..4dcf8375e7 100644 --- a/src/haproxy.c +++ b/src/haproxy.c @@ -1584,9 +1584,9 @@ static void init(int argc, char **argv) { int i; for (i = 0; i < MAX_PROCS; ++i) { - ha_cpuset_zero(&global.cpu_map.proc[i]); - ha_cpuset_zero(&global.cpu_map.proc_t1[i]); - ha_cpuset_zero(&global.cpu_map.thread[i]); + ha_cpuset_zero(&cpu_map.proc[i]); + ha_cpuset_zero(&cpu_map.proc_t1[i]); + ha_cpuset_zero(&cpu_map.thread[i]); } } #endif @@ -2940,13 +2940,13 @@ int main(int argc, char **argv) #ifdef USE_CPU_AFFINITY if (proc < global.nbproc && /* child */ proc < MAX_PROCS && /* only the first 32/64 processes may be pinned */ - ha_cpuset_count(&global.cpu_map.proc[proc])) { /* only do this if the process has a CPU map */ + ha_cpuset_count(&cpu_map.proc[proc])) { /* only do this if the process has a CPU map */ #ifdef __FreeBSD__ - struct hap_cpuset *set = &global.cpu_map.proc[proc]; + struct hap_cpuset *set = &cpu_map.proc[proc]; ret = cpuset_setaffinity(CPU_LEVEL_WHICH, CPU_WHICH_PID, -1, sizeof(set->cpuset), &set->cpuset); #elif defined(__linux__) || defined(__DragonFly__) - struct hap_cpuset *set = &global.cpu_map.proc[proc]; + struct hap_cpuset *set = &cpu_map.proc[proc]; sched_setaffinity(0, sizeof(set->cpuset), &set->cpuset); #endif } @@ -3184,17 +3184,17 @@ int main(int argc, char **argv) /* If on multiprocess, use proc_t1 except for the first process. */ if ((relative_pid - 1) > 0) - global.cpu_map.thread[0] = global.cpu_map.proc_t1[relative_pid-1]; + cpu_map.thread[0] = cpu_map.proc_t1[relative_pid-1]; for (i = 0; i < global.nbthread; i++) { - if (ha_cpuset_count(&global.cpu_map.proc[relative_pid-1])) - ha_cpuset_and(&global.cpu_map.thread[i], &global.cpu_map.proc[relative_pid-1]); + if (ha_cpuset_count(&cpu_map.proc[relative_pid-1])) + ha_cpuset_and(&cpu_map.thread[i], &cpu_map.proc[relative_pid-1]); if (i < MAX_THREADS && /* only the first 32/64 threads may be pinned */ - ha_cpuset_count(&global.cpu_map.thread[i])) {/* only do this if the thread has a THREAD map */ + ha_cpuset_count(&cpu_map.thread[i])) {/* only do this if the thread has a THREAD map */ #if defined(__APPLE__) int j; - unsigned long cpu_map = global.cpu_map.thread[i].cpuset; + unsigned long cpu_map = cpu_map.thread[i].cpuset; while ((j = ffsl(cpu_map)) > 0) { thread_affinity_policy_data_t cpu_set = { j - 1 }; @@ -3203,7 +3203,7 @@ int main(int argc, char **argv) cpu_map &= ~(1UL << (j - 1)); } #else - struct hap_cpuset *set = &global.cpu_map.thread[i]; + struct hap_cpuset *set = &cpu_map.thread[i]; pthread_setaffinity_np(ha_thread_info[i].pthread, sizeof(set->cpuset), &set->cpuset); #endif