]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/basic/macro.h
ethtool: add several new link modes
[thirdparty/systemd.git] / src / basic / macro.h
1 /* SPDX-License-Identifier: LGPL-2.1+ */
2 #pragma once
3
4 #include <assert.h>
5 #include <errno.h>
6 #include <inttypes.h>
7 #include <stdbool.h>
8 #include <sys/param.h>
9 #include <sys/sysmacros.h>
10 #include <sys/types.h>
11
12 #define _printf_(a, b) __attribute__((__format__(printf, a, b)))
13 #ifdef __clang__
14 # define _alloc_(...)
15 #else
16 # define _alloc_(...) __attribute__((__alloc_size__(__VA_ARGS__)))
17 #endif
18 #define _sentinel_ __attribute__((__sentinel__))
19 #define _section_(x) __attribute__((__section__(x)))
20 #define _used_ __attribute__((__used__))
21 #define _unused_ __attribute__((__unused__))
22 #define _destructor_ __attribute__((__destructor__))
23 #define _pure_ __attribute__((__pure__))
24 #define _const_ __attribute__((__const__))
25 #define _deprecated_ __attribute__((__deprecated__))
26 #define _packed_ __attribute__((__packed__))
27 #define _malloc_ __attribute__((__malloc__))
28 #define _weak_ __attribute__((__weak__))
29 #define _likely_(x) (__builtin_expect(!!(x), 1))
30 #define _unlikely_(x) (__builtin_expect(!!(x), 0))
31 #define _public_ __attribute__((__visibility__("default")))
32 #define _hidden_ __attribute__((__visibility__("hidden")))
33 #define _weakref_(x) __attribute__((__weakref__(#x)))
34 #define _align_(x) __attribute__((__aligned__(x)))
35 #define _alignas_(x) __attribute__((__aligned__(__alignof(x))))
36 #define _alignptr_ __attribute__((__aligned__(sizeof(void*))))
37 #define _cleanup_(x) __attribute__((__cleanup__(x)))
38 #if __GNUC__ >= 7
39 #define _fallthrough_ __attribute__((__fallthrough__))
40 #else
41 #define _fallthrough_
42 #endif
43 /* Define C11 noreturn without <stdnoreturn.h> and even on older gcc
44 * compiler versions */
45 #ifndef _noreturn_
46 #if __STDC_VERSION__ >= 201112L
47 #define _noreturn_ _Noreturn
48 #else
49 #define _noreturn_ __attribute__((__noreturn__))
50 #endif
51 #endif
52
53 #if !defined(HAS_FEATURE_MEMORY_SANITIZER)
54 # if defined(__has_feature)
55 # if __has_feature(memory_sanitizer)
56 # define HAS_FEATURE_MEMORY_SANITIZER 1
57 # endif
58 # endif
59 # if !defined(HAS_FEATURE_MEMORY_SANITIZER)
60 # define HAS_FEATURE_MEMORY_SANITIZER 0
61 # endif
62 #endif
63
64 #if !defined(HAS_FEATURE_ADDRESS_SANITIZER)
65 # ifdef __SANITIZE_ADDRESS__
66 # define HAS_FEATURE_ADDRESS_SANITIZER 1
67 # elif defined(__has_feature)
68 # if __has_feature(address_sanitizer)
69 # define HAS_FEATURE_ADDRESS_SANITIZER 1
70 # endif
71 # endif
72 # if !defined(HAS_FEATURE_ADDRESS_SANITIZER)
73 # define HAS_FEATURE_ADDRESS_SANITIZER 0
74 # endif
75 #endif
76
77 /* Note: on GCC "no_sanitize_address" is a function attribute only, on llvm it may also be applied to global
78 * variables. We define a specific macro which knows this. Note that on GCC we don't need this decorator so much, since
79 * our primary usecase for this attribute is registration structures placed in named ELF sections which shall not be
80 * padded, but GCC doesn't pad those anyway if AddressSanitizer is enabled. */
81 #if HAS_FEATURE_ADDRESS_SANITIZER && defined(__clang__)
82 #define _variable_no_sanitize_address_ __attribute__((__no_sanitize_address__))
83 #else
84 #define _variable_no_sanitize_address_
85 #endif
86
87 /* Apparently there's no has_feature() call defined to check for ubsan, hence let's define this
88 * unconditionally on llvm */
89 #if defined(__clang__)
90 #define _function_no_sanitize_float_cast_overflow_ __attribute__((no_sanitize("float-cast-overflow")))
91 #else
92 #define _function_no_sanitize_float_cast_overflow_
93 #endif
94
95 /* Temporarily disable some warnings */
96 #define DISABLE_WARNING_FORMAT_NONLITERAL \
97 _Pragma("GCC diagnostic push"); \
98 _Pragma("GCC diagnostic ignored \"-Wformat-nonliteral\"")
99
100 #define DISABLE_WARNING_MISSING_PROTOTYPES \
101 _Pragma("GCC diagnostic push"); \
102 _Pragma("GCC diagnostic ignored \"-Wmissing-prototypes\"")
103
104 #define DISABLE_WARNING_NONNULL \
105 _Pragma("GCC diagnostic push"); \
106 _Pragma("GCC diagnostic ignored \"-Wnonnull\"")
107
108 #define DISABLE_WARNING_SHADOW \
109 _Pragma("GCC diagnostic push"); \
110 _Pragma("GCC diagnostic ignored \"-Wshadow\"")
111
112 #define DISABLE_WARNING_INCOMPATIBLE_POINTER_TYPES \
113 _Pragma("GCC diagnostic push"); \
114 _Pragma("GCC diagnostic ignored \"-Wincompatible-pointer-types\"")
115
116 #if HAVE_WSTRINGOP_TRUNCATION
117 # define DISABLE_WARNING_STRINGOP_TRUNCATION \
118 _Pragma("GCC diagnostic push"); \
119 _Pragma("GCC diagnostic ignored \"-Wstringop-truncation\"")
120 #else
121 # define DISABLE_WARNING_STRINGOP_TRUNCATION \
122 _Pragma("GCC diagnostic push")
123 #endif
124
125 #define DISABLE_WARNING_FLOAT_EQUAL \
126 _Pragma("GCC diagnostic push"); \
127 _Pragma("GCC diagnostic ignored \"-Wfloat-equal\"")
128
129 #define DISABLE_WARNING_TYPE_LIMITS \
130 _Pragma("GCC diagnostic push"); \
131 _Pragma("GCC diagnostic ignored \"-Wtype-limits\"")
132
133 #define REENABLE_WARNING \
134 _Pragma("GCC diagnostic pop")
135
136 /* automake test harness */
137 #define EXIT_TEST_SKIP 77
138
139 #define XSTRINGIFY(x) #x
140 #define STRINGIFY(x) XSTRINGIFY(x)
141
142 #define XCONCATENATE(x, y) x ## y
143 #define CONCATENATE(x, y) XCONCATENATE(x, y)
144
145 #define UNIQ_T(x, uniq) CONCATENATE(__unique_prefix_, CONCATENATE(x, uniq))
146 #define UNIQ __COUNTER__
147
148 /* builtins */
149 #if __SIZEOF_INT__ == 4
150 #define BUILTIN_FFS_U32(x) __builtin_ffs(x);
151 #elif __SIZEOF_LONG__ == 4
152 #define BUILTIN_FFS_U32(x) __builtin_ffsl(x);
153 #else
154 #error "neither int nor long are four bytes long?!?"
155 #endif
156
157 /* Rounds up */
158
159 #define ALIGN4(l) (((l) + 3) & ~3)
160 #define ALIGN8(l) (((l) + 7) & ~7)
161
162 #if __SIZEOF_POINTER__ == 8
163 #define ALIGN(l) ALIGN8(l)
164 #elif __SIZEOF_POINTER__ == 4
165 #define ALIGN(l) ALIGN4(l)
166 #else
167 #error "Wut? Pointers are neither 4 nor 8 bytes long?"
168 #endif
169
170 #define ALIGN_PTR(p) ((void*) ALIGN((unsigned long) (p)))
171 #define ALIGN4_PTR(p) ((void*) ALIGN4((unsigned long) (p)))
172 #define ALIGN8_PTR(p) ((void*) ALIGN8((unsigned long) (p)))
173
174 static inline size_t ALIGN_TO(size_t l, size_t ali) {
175 return ((l + ali - 1) & ~(ali - 1));
176 }
177
178 #define ALIGN_TO_PTR(p, ali) ((void*) ALIGN_TO((unsigned long) (p), (ali)))
179
180 /* align to next higher power-of-2 (except for: 0 => 0, overflow => 0) */
181 static inline unsigned long ALIGN_POWER2(unsigned long u) {
182
183 /* Avoid subtraction overflow */
184 if (u == 0)
185 return 0;
186
187 /* clz(0) is undefined */
188 if (u == 1)
189 return 1;
190
191 /* left-shift overflow is undefined */
192 if (__builtin_clzl(u - 1UL) < 1)
193 return 0;
194
195 return 1UL << (sizeof(u) * 8 - __builtin_clzl(u - 1UL));
196 }
197
198 static inline size_t GREEDY_ALLOC_ROUND_UP(size_t l) {
199 size_t m;
200
201 /* Round up allocation sizes a bit to some reasonable, likely larger value. This is supposed to be
202 * used for cases which are likely called in an allocation loop of some form, i.e. that repetitively
203 * grow stuff, for example strv_extend() and suchlike.
204 *
205 * Note the difference to GREEDY_REALLOC() here, as this helper operates on a single size value only,
206 * and rounds up to next multiple of 2, needing no further counter.
207 *
208 * Note the benefits of direct ALIGN_POWER2() usage: type-safety for size_t, sane handling for very
209 * small (i.e. <= 2) and safe handling for very large (i.e. > SSIZE_MAX) values. */
210
211 if (l <= 2)
212 return 2; /* Never allocate less than 2 of something. */
213
214 m = ALIGN_POWER2(l);
215 if (m == 0) /* overflow? */
216 return l;
217
218 return m;
219 }
220
221 #ifndef __COVERITY__
222 # define VOID_0 ((void)0)
223 #else
224 # define VOID_0 ((void*)0)
225 #endif
226
227 #define ELEMENTSOF(x) \
228 (__builtin_choose_expr( \
229 !__builtin_types_compatible_p(typeof(x), typeof(&*(x))), \
230 sizeof(x)/sizeof((x)[0]), \
231 VOID_0))
232
233 /*
234 * STRLEN - return the length of a string literal, minus the trailing NUL byte.
235 * Contrary to strlen(), this is a constant expression.
236 * @x: a string literal.
237 */
238 #define STRLEN(x) (sizeof(""x"") - 1)
239
240 /*
241 * container_of - cast a member of a structure out to the containing structure
242 * @ptr: the pointer to the member.
243 * @type: the type of the container struct this is embedded in.
244 * @member: the name of the member within the struct.
245 */
246 #define container_of(ptr, type, member) __container_of(UNIQ, (ptr), type, member)
247 #define __container_of(uniq, ptr, type, member) \
248 ({ \
249 const typeof( ((type*)0)->member ) *UNIQ_T(A, uniq) = (ptr); \
250 (type*)( (char *)UNIQ_T(A, uniq) - offsetof(type, member) ); \
251 })
252
253 #undef MAX
254 #define MAX(a, b) __MAX(UNIQ, (a), UNIQ, (b))
255 #define __MAX(aq, a, bq, b) \
256 ({ \
257 const typeof(a) UNIQ_T(A, aq) = (a); \
258 const typeof(b) UNIQ_T(B, bq) = (b); \
259 UNIQ_T(A, aq) > UNIQ_T(B, bq) ? UNIQ_T(A, aq) : UNIQ_T(B, bq); \
260 })
261
262 /* evaluates to (void) if _A or _B are not constant or of different types */
263 #define CONST_MAX(_A, _B) \
264 (__builtin_choose_expr( \
265 __builtin_constant_p(_A) && \
266 __builtin_constant_p(_B) && \
267 __builtin_types_compatible_p(typeof(_A), typeof(_B)), \
268 ((_A) > (_B)) ? (_A) : (_B), \
269 VOID_0))
270
271 /* takes two types and returns the size of the larger one */
272 #define MAXSIZE(A, B) (sizeof(union _packed_ { typeof(A) a; typeof(B) b; }))
273
274 #define MAX3(x, y, z) \
275 ({ \
276 const typeof(x) _c = MAX(x, y); \
277 MAX(_c, z); \
278 })
279
280 #undef MIN
281 #define MIN(a, b) __MIN(UNIQ, (a), UNIQ, (b))
282 #define __MIN(aq, a, bq, b) \
283 ({ \
284 const typeof(a) UNIQ_T(A, aq) = (a); \
285 const typeof(b) UNIQ_T(B, bq) = (b); \
286 UNIQ_T(A, aq) < UNIQ_T(B, bq) ? UNIQ_T(A, aq) : UNIQ_T(B, bq); \
287 })
288
289 /* evaluates to (void) if _A or _B are not constant or of different types */
290 #define CONST_MIN(_A, _B) \
291 (__builtin_choose_expr( \
292 __builtin_constant_p(_A) && \
293 __builtin_constant_p(_B) && \
294 __builtin_types_compatible_p(typeof(_A), typeof(_B)), \
295 ((_A) < (_B)) ? (_A) : (_B), \
296 VOID_0))
297
298 #define MIN3(x, y, z) \
299 ({ \
300 const typeof(x) _c = MIN(x, y); \
301 MIN(_c, z); \
302 })
303
304 #define LESS_BY(a, b) __LESS_BY(UNIQ, (a), UNIQ, (b))
305 #define __LESS_BY(aq, a, bq, b) \
306 ({ \
307 const typeof(a) UNIQ_T(A, aq) = (a); \
308 const typeof(b) UNIQ_T(B, bq) = (b); \
309 UNIQ_T(A, aq) > UNIQ_T(B, bq) ? UNIQ_T(A, aq) - UNIQ_T(B, bq) : 0; \
310 })
311
312 #define CMP(a, b) __CMP(UNIQ, (a), UNIQ, (b))
313 #define __CMP(aq, a, bq, b) \
314 ({ \
315 const typeof(a) UNIQ_T(A, aq) = (a); \
316 const typeof(b) UNIQ_T(B, bq) = (b); \
317 UNIQ_T(A, aq) < UNIQ_T(B, bq) ? -1 : \
318 UNIQ_T(A, aq) > UNIQ_T(B, bq) ? 1 : 0; \
319 })
320
321 #undef CLAMP
322 #define CLAMP(x, low, high) __CLAMP(UNIQ, (x), UNIQ, (low), UNIQ, (high))
323 #define __CLAMP(xq, x, lowq, low, highq, high) \
324 ({ \
325 const typeof(x) UNIQ_T(X, xq) = (x); \
326 const typeof(low) UNIQ_T(LOW, lowq) = (low); \
327 const typeof(high) UNIQ_T(HIGH, highq) = (high); \
328 UNIQ_T(X, xq) > UNIQ_T(HIGH, highq) ? \
329 UNIQ_T(HIGH, highq) : \
330 UNIQ_T(X, xq) < UNIQ_T(LOW, lowq) ? \
331 UNIQ_T(LOW, lowq) : \
332 UNIQ_T(X, xq); \
333 })
334
335 /* [(x + y - 1) / y] suffers from an integer overflow, even though the
336 * computation should be possible in the given type. Therefore, we use
337 * [x / y + !!(x % y)]. Note that on "Real CPUs" a division returns both the
338 * quotient and the remainder, so both should be equally fast. */
339 #define DIV_ROUND_UP(x, y) __DIV_ROUND_UP(UNIQ, (x), UNIQ, (y))
340 #define __DIV_ROUND_UP(xq, x, yq, y) \
341 ({ \
342 const typeof(x) UNIQ_T(X, xq) = (x); \
343 const typeof(y) UNIQ_T(Y, yq) = (y); \
344 (UNIQ_T(X, xq) / UNIQ_T(Y, yq) + !!(UNIQ_T(X, xq) % UNIQ_T(Y, yq))); \
345 })
346
347 #ifdef __COVERITY__
348
349 /* Use special definitions of assertion macros in order to prevent
350 * false positives of ASSERT_SIDE_EFFECT on Coverity static analyzer
351 * for uses of assert_se() and assert_return().
352 *
353 * These definitions make expression go through a (trivial) function
354 * call to ensure they are not discarded. Also use ! or !! to ensure
355 * the boolean expressions are seen as such.
356 *
357 * This technique has been described and recommended in:
358 * https://community.synopsys.com/s/question/0D534000046Yuzb/suppressing-assertsideeffect-for-functions-that-allow-for-sideeffects
359 */
360
361 extern void __coverity_panic__(void);
362
363 static inline void __coverity_check__(int condition) {
364 if (!condition)
365 __coverity_panic__();
366 }
367
368 static inline int __coverity_check_and_return__(int condition) {
369 return condition;
370 }
371
372 #define assert_message_se(expr, message) __coverity_check__(!!(expr))
373
374 #define assert_log(expr, message) __coverity_check_and_return__(!!(expr))
375
376 #else /* ! __COVERITY__ */
377
378 #define assert_message_se(expr, message) \
379 do { \
380 if (_unlikely_(!(expr))) \
381 log_assert_failed(message, PROJECT_FILE, __LINE__, __PRETTY_FUNCTION__); \
382 } while (false)
383
384 #define assert_log(expr, message) ((_likely_(expr)) \
385 ? (true) \
386 : (log_assert_failed_return(message, PROJECT_FILE, __LINE__, __PRETTY_FUNCTION__), false))
387
388 #endif /* __COVERITY__ */
389
390 #define assert_se(expr) assert_message_se(expr, #expr)
391
392 /* We override the glibc assert() here. */
393 #undef assert
394 #ifdef NDEBUG
395 #define assert(expr) do {} while (false)
396 #else
397 #define assert(expr) assert_message_se(expr, #expr)
398 #endif
399
400 #define assert_not_reached(t) \
401 log_assert_failed_unreachable(t, PROJECT_FILE, __LINE__, __PRETTY_FUNCTION__)
402
403 #if defined(static_assert)
404 #define assert_cc(expr) \
405 static_assert(expr, #expr)
406 #else
407 #define assert_cc(expr) \
408 struct CONCATENATE(_assert_struct_, __COUNTER__) { \
409 char x[(expr) ? 0 : -1]; \
410 }
411 #endif
412
413 #define assert_return(expr, r) \
414 do { \
415 if (!assert_log(expr, #expr)) \
416 return (r); \
417 } while (false)
418
419 #define assert_return_errno(expr, r, err) \
420 do { \
421 if (!assert_log(expr, #expr)) { \
422 errno = err; \
423 return (r); \
424 } \
425 } while (false)
426
427 #define return_with_errno(r, err) \
428 do { \
429 errno = abs(err); \
430 return r; \
431 } while (false)
432
433 #define PTR_TO_INT(p) ((int) ((intptr_t) (p)))
434 #define INT_TO_PTR(u) ((void *) ((intptr_t) (u)))
435 #define PTR_TO_UINT(p) ((unsigned) ((uintptr_t) (p)))
436 #define UINT_TO_PTR(u) ((void *) ((uintptr_t) (u)))
437
438 #define PTR_TO_LONG(p) ((long) ((intptr_t) (p)))
439 #define LONG_TO_PTR(u) ((void *) ((intptr_t) (u)))
440 #define PTR_TO_ULONG(p) ((unsigned long) ((uintptr_t) (p)))
441 #define ULONG_TO_PTR(u) ((void *) ((uintptr_t) (u)))
442
443 #define PTR_TO_INT32(p) ((int32_t) ((intptr_t) (p)))
444 #define INT32_TO_PTR(u) ((void *) ((intptr_t) (u)))
445 #define PTR_TO_UINT32(p) ((uint32_t) ((uintptr_t) (p)))
446 #define UINT32_TO_PTR(u) ((void *) ((uintptr_t) (u)))
447
448 #define PTR_TO_INT64(p) ((int64_t) ((intptr_t) (p)))
449 #define INT64_TO_PTR(u) ((void *) ((intptr_t) (u)))
450 #define PTR_TO_UINT64(p) ((uint64_t) ((uintptr_t) (p)))
451 #define UINT64_TO_PTR(u) ((void *) ((uintptr_t) (u)))
452
453 #define PTR_TO_SIZE(p) ((size_t) ((uintptr_t) (p)))
454 #define SIZE_TO_PTR(u) ((void *) ((uintptr_t) (u)))
455
456 #define CHAR_TO_STR(x) ((char[2]) { x, 0 })
457
458 #define char_array_0(x) x[sizeof(x)-1] = 0;
459
460 #define sizeof_field(struct_type, member) sizeof(((struct_type *) 0)->member)
461
462 /* Returns the number of chars needed to format variables of the
463 * specified type as a decimal string. Adds in extra space for a
464 * negative '-' prefix (hence works correctly on signed
465 * types). Includes space for the trailing NUL. */
466 #define DECIMAL_STR_MAX(type) \
467 (2+(sizeof(type) <= 1 ? 3 : \
468 sizeof(type) <= 2 ? 5 : \
469 sizeof(type) <= 4 ? 10 : \
470 sizeof(type) <= 8 ? 20 : sizeof(int[-2*(sizeof(type) > 8)])))
471
472 #define DECIMAL_STR_WIDTH(x) \
473 ({ \
474 typeof(x) _x_ = (x); \
475 unsigned ans = 1; \
476 while ((_x_ /= 10) != 0) \
477 ans++; \
478 ans; \
479 })
480
481 #define UPDATE_FLAG(orig, flag, b) \
482 ((b) ? ((orig) | (flag)) : ((orig) & ~(flag)))
483 #define SET_FLAG(v, flag, b) \
484 (v) = UPDATE_FLAG(v, flag, b)
485 #define FLAGS_SET(v, flags) \
486 ((~(v) & (flags)) == 0)
487
488 #define CASE_F(X) case X:
489 #define CASE_F_1(CASE, X) CASE_F(X)
490 #define CASE_F_2(CASE, X, ...) CASE(X) CASE_F_1(CASE, __VA_ARGS__)
491 #define CASE_F_3(CASE, X, ...) CASE(X) CASE_F_2(CASE, __VA_ARGS__)
492 #define CASE_F_4(CASE, X, ...) CASE(X) CASE_F_3(CASE, __VA_ARGS__)
493 #define CASE_F_5(CASE, X, ...) CASE(X) CASE_F_4(CASE, __VA_ARGS__)
494 #define CASE_F_6(CASE, X, ...) CASE(X) CASE_F_5(CASE, __VA_ARGS__)
495 #define CASE_F_7(CASE, X, ...) CASE(X) CASE_F_6(CASE, __VA_ARGS__)
496 #define CASE_F_8(CASE, X, ...) CASE(X) CASE_F_7(CASE, __VA_ARGS__)
497 #define CASE_F_9(CASE, X, ...) CASE(X) CASE_F_8(CASE, __VA_ARGS__)
498 #define CASE_F_10(CASE, X, ...) CASE(X) CASE_F_9(CASE, __VA_ARGS__)
499 #define CASE_F_11(CASE, X, ...) CASE(X) CASE_F_10(CASE, __VA_ARGS__)
500 #define CASE_F_12(CASE, X, ...) CASE(X) CASE_F_11(CASE, __VA_ARGS__)
501 #define CASE_F_13(CASE, X, ...) CASE(X) CASE_F_12(CASE, __VA_ARGS__)
502 #define CASE_F_14(CASE, X, ...) CASE(X) CASE_F_13(CASE, __VA_ARGS__)
503 #define CASE_F_15(CASE, X, ...) CASE(X) CASE_F_14(CASE, __VA_ARGS__)
504 #define CASE_F_16(CASE, X, ...) CASE(X) CASE_F_15(CASE, __VA_ARGS__)
505 #define CASE_F_17(CASE, X, ...) CASE(X) CASE_F_16(CASE, __VA_ARGS__)
506 #define CASE_F_18(CASE, X, ...) CASE(X) CASE_F_17(CASE, __VA_ARGS__)
507 #define CASE_F_19(CASE, X, ...) CASE(X) CASE_F_18(CASE, __VA_ARGS__)
508 #define CASE_F_20(CASE, X, ...) CASE(X) CASE_F_19(CASE, __VA_ARGS__)
509
510 #define GET_CASE_F(_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,NAME,...) NAME
511 #define FOR_EACH_MAKE_CASE(...) \
512 GET_CASE_F(__VA_ARGS__,CASE_F_20,CASE_F_19,CASE_F_18,CASE_F_17,CASE_F_16,CASE_F_15,CASE_F_14,CASE_F_13,CASE_F_12,CASE_F_11, \
513 CASE_F_10,CASE_F_9,CASE_F_8,CASE_F_7,CASE_F_6,CASE_F_5,CASE_F_4,CASE_F_3,CASE_F_2,CASE_F_1) \
514 (CASE_F,__VA_ARGS__)
515
516 #define IN_SET(x, ...) \
517 ({ \
518 bool _found = false; \
519 /* If the build breaks in the line below, you need to extend the case macros. (We use "long double" as \
520 * type for the array, in the hope that checkers such as ubsan don't complain that the initializers for \
521 * the array are not representable by the base type. Ideally we'd use typeof(x) as base type, but that \
522 * doesn't work, as we want to use this on bitfields and gcc refuses typeof() on bitfields.) */ \
523 static const long double __assert_in_set[] _unused_ = { __VA_ARGS__ }; \
524 assert_cc(ELEMENTSOF(__assert_in_set) <= 20); \
525 switch(x) { \
526 FOR_EACH_MAKE_CASE(__VA_ARGS__) \
527 _found = true; \
528 break; \
529 default: \
530 break; \
531 } \
532 _found; \
533 })
534
535 #define SWAP_TWO(x, y) do { \
536 typeof(x) _t = (x); \
537 (x) = (y); \
538 (y) = (_t); \
539 } while (false)
540
541 #define STRV_MAKE(...) ((char**) ((const char*[]) { __VA_ARGS__, NULL }))
542 #define STRV_MAKE_EMPTY ((char*[1]) { NULL })
543
544 /* Iterates through a specified list of pointers. Accepts NULL pointers, but uses (void*) -1 as internal marker for EOL. */
545 #define FOREACH_POINTER(p, x, ...) \
546 for (typeof(p) *_l = (typeof(p)[]) { ({ p = x; }), ##__VA_ARGS__, (void*) -1 }; \
547 p != (typeof(p)) (void*) -1; \
548 p = *(++_l))
549
550 /* Define C11 thread_local attribute even on older gcc compiler
551 * version */
552 #ifndef thread_local
553 /*
554 * Don't break on glibc < 2.16 that doesn't define __STDC_NO_THREADS__
555 * see http://gcc.gnu.org/bugzilla/show_bug.cgi?id=53769
556 */
557 #if __STDC_VERSION__ >= 201112L && !(defined(__STDC_NO_THREADS__) || (defined(__GNU_LIBRARY__) && __GLIBC__ == 2 && __GLIBC_MINOR__ < 16))
558 #define thread_local _Thread_local
559 #else
560 #define thread_local __thread
561 #endif
562 #endif
563
564 #define DEFINE_TRIVIAL_DESTRUCTOR(name, type, func) \
565 static inline void name(type *p) { \
566 func(p); \
567 }
568
569 #define DEFINE_TRIVIAL_CLEANUP_FUNC(type, func) \
570 static inline void func##p(type *p) { \
571 if (*p) \
572 func(*p); \
573 }
574
575 #define _DEFINE_TRIVIAL_REF_FUNC(type, name, scope) \
576 scope type *name##_ref(type *p) { \
577 if (!p) \
578 return NULL; \
579 \
580 assert(p->n_ref > 0); \
581 p->n_ref++; \
582 return p; \
583 }
584
585 #define _DEFINE_TRIVIAL_UNREF_FUNC(type, name, free_func, scope) \
586 scope type *name##_unref(type *p) { \
587 if (!p) \
588 return NULL; \
589 \
590 assert(p->n_ref > 0); \
591 p->n_ref--; \
592 if (p->n_ref > 0) \
593 return NULL; \
594 \
595 return free_func(p); \
596 }
597
598 #define DEFINE_TRIVIAL_REF_FUNC(type, name) \
599 _DEFINE_TRIVIAL_REF_FUNC(type, name,)
600 #define DEFINE_PRIVATE_TRIVIAL_REF_FUNC(type, name) \
601 _DEFINE_TRIVIAL_REF_FUNC(type, name, static)
602 #define DEFINE_PUBLIC_TRIVIAL_REF_FUNC(type, name) \
603 _DEFINE_TRIVIAL_REF_FUNC(type, name, _public_)
604
605 #define DEFINE_TRIVIAL_UNREF_FUNC(type, name, free_func) \
606 _DEFINE_TRIVIAL_UNREF_FUNC(type, name, free_func,)
607 #define DEFINE_PRIVATE_TRIVIAL_UNREF_FUNC(type, name, free_func) \
608 _DEFINE_TRIVIAL_UNREF_FUNC(type, name, free_func, static)
609 #define DEFINE_PUBLIC_TRIVIAL_UNREF_FUNC(type, name, free_func) \
610 _DEFINE_TRIVIAL_UNREF_FUNC(type, name, free_func, _public_)
611
612 #define DEFINE_TRIVIAL_REF_UNREF_FUNC(type, name, free_func) \
613 DEFINE_TRIVIAL_REF_FUNC(type, name); \
614 DEFINE_TRIVIAL_UNREF_FUNC(type, name, free_func);
615
616 #define DEFINE_PRIVATE_TRIVIAL_REF_UNREF_FUNC(type, name, free_func) \
617 DEFINE_PRIVATE_TRIVIAL_REF_FUNC(type, name); \
618 DEFINE_PRIVATE_TRIVIAL_UNREF_FUNC(type, name, free_func);
619
620 #define DEFINE_PUBLIC_TRIVIAL_REF_UNREF_FUNC(type, name, free_func) \
621 DEFINE_PUBLIC_TRIVIAL_REF_FUNC(type, name); \
622 DEFINE_PUBLIC_TRIVIAL_UNREF_FUNC(type, name, free_func);
623
624 /* A macro to force copying of a variable from memory. This is useful whenever we want to read something from
625 * memory and want to make sure the compiler won't optimize away the destination variable for us. It's not
626 * supposed to be a full CPU memory barrier, i.e. CPU is still allowed to reorder the reads, but it is not
627 * allowed to remove our local copies of the variables. We want this to work for unaligned memory, hence
628 * memcpy() is great for our purposes. */
629 #define READ_NOW(x) \
630 ({ \
631 typeof(x) _copy; \
632 memcpy(&_copy, &(x), sizeof(_copy)); \
633 asm volatile ("" : : : "memory"); \
634 _copy; \
635 })
636
637 #include "log.h"