return TAKE_PTR(str) ?: strdup("");
}
-cpu_set_t* cpu_set_malloc(unsigned *ncpus) {
- cpu_set_t *c;
- unsigned n = 1024;
-
- /* Allocates the cpuset in the right size */
-
- for (;;) {
- c = CPU_ALLOC(n);
- if (!c)
- return NULL;
-
- if (sched_getaffinity(0, CPU_ALLOC_SIZE(n), c) >= 0) {
- CPU_ZERO_S(CPU_ALLOC_SIZE(n), c);
-
- if (ncpus)
- *ncpus = n;
-
- return c;
- }
-
- CPU_FREE(c);
-
- if (errno != EINVAL)
- return NULL;
-
- n *= 2;
- }
-}
-
-static int cpu_set_realloc(CPUSet *cpu_set, unsigned ncpus) {
+int cpu_set_realloc(CPUSet *cpu_set, unsigned ncpus) {
size_t need;
assert(cpu_set);
DEFINE_TRIVIAL_CLEANUP_FUNC(cpu_set_t*, CPU_FREE);
#define _cleanup_cpu_free_ _cleanup_(CPU_FREEp)
-cpu_set_t* cpu_set_malloc(unsigned *ncpus);
-
/* This wraps the libc interface with a variable to keep the allocated size. */
typedef struct CPUSet {
cpu_set_t *set;
int cpu_set_add_all(CPUSet *a, const CPUSet *b);
char* cpu_set_to_string(const CPUSet *a);
+int cpu_set_realloc(CPUSet *cpu_set, unsigned ncpus);
int parse_cpu_set_full(
const char *rvalue,
CPUSet *cpu_set,
}
static void test_exec_cpuaffinity(Manager *m) {
- _cleanup_cpu_free_ cpu_set_t *c = NULL;
- unsigned n;
+ _cleanup_(cpu_set_reset) CPUSet c = {};
- assert_se(c = cpu_set_malloc(&n));
- assert_se(sched_getaffinity(0, CPU_ALLOC_SIZE(n), c) >= 0);
+ assert_se(cpu_set_realloc(&c, 8192) >= 0); /* just allocate the maximum possible size */
+ assert_se(sched_getaffinity(0, c.allocated, c.set) >= 0);
- if (CPU_ISSET_S(0, CPU_ALLOC_SIZE(n), c) == 0) {
+ if (!CPU_ISSET_S(0, c.allocated, c.set)) {
log_notice("Cannot use CPU 0, skipping %s", __func__);
return;
}
test(__func__, m, "exec-cpuaffinity1.service", 0, CLD_EXITED);
test(__func__, m, "exec-cpuaffinity2.service", 0, CLD_EXITED);
- if (CPU_ISSET_S(1, CPU_ALLOC_SIZE(n), c) == 0 ||
- CPU_ISSET_S(2, CPU_ALLOC_SIZE(n), c) == 0) {
+ if (!CPU_ISSET_S(1, c.allocated, c.set) ||
+ !CPU_ISSET_S(2, c.allocated, c.set)) {
log_notice("Cannot use CPU 1 or 2, skipping remaining tests in %s", __func__);
return;
}