]> git.ipfire.org Git - thirdparty/systemd.git/blob - src/fundamental/macro-fundamental.h
Merge pull request #29687 from yuwata/network-state-file-sync
[thirdparty/systemd.git] / src / fundamental / macro-fundamental.h
1 /* SPDX-License-Identifier: LGPL-2.1-or-later */
2 #pragma once
3
4 #if !SD_BOOT
5 # include <assert.h>
6 #endif
7
8 #include <limits.h>
9 #include <stdalign.h>
10 #include <stdbool.h>
11 #include <stddef.h>
12 #include <stdint.h>
13
14 /* Temporarily disable some warnings */
15 #define DISABLE_WARNING_DEPRECATED_DECLARATIONS \
16 _Pragma("GCC diagnostic push"); \
17 _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"")
18
19 #define DISABLE_WARNING_FORMAT_NONLITERAL \
20 _Pragma("GCC diagnostic push"); \
21 _Pragma("GCC diagnostic ignored \"-Wformat-nonliteral\"")
22
23 #define DISABLE_WARNING_MISSING_PROTOTYPES \
24 _Pragma("GCC diagnostic push"); \
25 _Pragma("GCC diagnostic ignored \"-Wmissing-prototypes\"")
26
27 #define DISABLE_WARNING_NONNULL \
28 _Pragma("GCC diagnostic push"); \
29 _Pragma("GCC diagnostic ignored \"-Wnonnull\"")
30
31 #define DISABLE_WARNING_SHADOW \
32 _Pragma("GCC diagnostic push"); \
33 _Pragma("GCC diagnostic ignored \"-Wshadow\"")
34
35 #define DISABLE_WARNING_INCOMPATIBLE_POINTER_TYPES \
36 _Pragma("GCC diagnostic push"); \
37 _Pragma("GCC diagnostic ignored \"-Wincompatible-pointer-types\"")
38
39 #define DISABLE_WARNING_TYPE_LIMITS \
40 _Pragma("GCC diagnostic push"); \
41 _Pragma("GCC diagnostic ignored \"-Wtype-limits\"")
42
43 #define DISABLE_WARNING_ADDRESS \
44 _Pragma("GCC diagnostic push"); \
45 _Pragma("GCC diagnostic ignored \"-Waddress\"")
46
47 #define REENABLE_WARNING \
48 _Pragma("GCC diagnostic pop")
49
50 #define _align_(x) __attribute__((__aligned__(x)))
51 #define _alignas_(x) __attribute__((__aligned__(alignof(x))))
52 #define _alignptr_ __attribute__((__aligned__(sizeof(void *))))
53 #define _cleanup_(x) __attribute__((__cleanup__(x)))
54 #define _const_ __attribute__((__const__))
55 #define _deprecated_ __attribute__((__deprecated__))
56 #define _destructor_ __attribute__((__destructor__))
57 #define _hidden_ __attribute__((__visibility__("hidden")))
58 #define _likely_(x) (__builtin_expect(!!(x), 1))
59 #define _malloc_ __attribute__((__malloc__))
60 #define _noinline_ __attribute__((noinline))
61 #define _noreturn_ _Noreturn
62 #define _packed_ __attribute__((__packed__))
63 #define _printf_(a, b) __attribute__((__format__(printf, a, b)))
64 #define _public_ __attribute__((__visibility__("default")))
65 #define _pure_ __attribute__((__pure__))
66 #define _retain_ __attribute__((__retain__))
67 #define _returns_nonnull_ __attribute__((__returns_nonnull__))
68 #define _section_(x) __attribute__((__section__(x)))
69 #define _sentinel_ __attribute__((__sentinel__))
70 #define _unlikely_(x) (__builtin_expect(!!(x), 0))
71 #define _unused_ __attribute__((__unused__))
72 #define _used_ __attribute__((__used__))
73 #define _warn_unused_result_ __attribute__((__warn_unused_result__))
74 #define _weak_ __attribute__((__weak__))
75 #define _weakref_(x) __attribute__((__weakref__(#x)))
76
77 #ifdef __clang__
78 # define _alloc_(...)
79 #else
80 # define _alloc_(...) __attribute__((__alloc_size__(__VA_ARGS__)))
81 #endif
82
83 #if __GNUC__ >= 7 || (defined(__clang__) && __clang_major__ >= 10)
84 # define _fallthrough_ __attribute__((__fallthrough__))
85 #else
86 # define _fallthrough_
87 #endif
88
89 #define XSTRINGIFY(x) #x
90 #define STRINGIFY(x) XSTRINGIFY(x)
91
92 #ifndef __COVERITY__
93 # define VOID_0 ((void)0)
94 #else
95 # define VOID_0 ((void*)0)
96 #endif
97
98 #define ELEMENTSOF(x) \
99 (__builtin_choose_expr( \
100 !__builtin_types_compatible_p(typeof(x), typeof(&*(x))), \
101 sizeof(x)/sizeof((x)[0]), \
102 VOID_0))
103
104 #define XCONCATENATE(x, y) x ## y
105 #define CONCATENATE(x, y) XCONCATENATE(x, y)
106
107 #if SD_BOOT
108 _noreturn_ void efi_assert(const char *expr, const char *file, unsigned line, const char *function);
109
110 #ifdef NDEBUG
111 #define assert(expr) ({ if (!(expr)) __builtin_unreachable(); })
112 #define assert_not_reached() __builtin_unreachable()
113 #else
114 #define assert(expr) ({ _likely_(expr) ? VOID_0 : efi_assert(#expr, __FILE__, __LINE__, __func__); })
115 #define assert_not_reached() efi_assert("Code should not be reached", __FILE__, __LINE__, __func__)
116 #endif
117 #define static_assert _Static_assert
118 #define assert_se(expr) ({ _likely_(expr) ? VOID_0 : efi_assert(#expr, __FILE__, __LINE__, __func__); })
119 #endif
120
121 /* This passes the argument through after (if asserts are enabled) checking that it is not null. */
122 #define ASSERT_PTR(expr) _ASSERT_PTR(expr, UNIQ_T(_expr_, UNIQ), assert)
123 #define ASSERT_SE_PTR(expr) _ASSERT_PTR(expr, UNIQ_T(_expr_, UNIQ), assert_se)
124 #define _ASSERT_PTR(expr, var, check) \
125 ({ \
126 typeof(expr) var = (expr); \
127 check(var); \
128 var; \
129 })
130
131 #define ASSERT_NONNEG(expr) \
132 ({ \
133 typeof(expr) _expr_ = (expr), _zero = 0; \
134 assert(_expr_ >= _zero); \
135 _expr_; \
136 })
137
138 #define ASSERT_SE_NONNEG(expr) \
139 ({ \
140 typeof(expr) _expr_ = (expr), _zero = 0; \
141 assert_se(_expr_ >= _zero); \
142 _expr_; \
143 })
144
145 #define assert_cc(expr) static_assert(expr, #expr)
146
147 #define UNIQ_T(x, uniq) CONCATENATE(__unique_prefix_, CONCATENATE(x, uniq))
148 #define UNIQ __COUNTER__
149
150 /* Note that this works differently from pthread_once(): this macro does
151 * not synchronize code execution, i.e. code that is run conditionalized
152 * on this macro will run concurrently to all other code conditionalized
153 * the same way, there's no ordering or completion enforced. */
154 #define ONCE __ONCE(UNIQ_T(_once_, UNIQ))
155 #define __ONCE(o) \
156 ({ \
157 static bool (o) = false; \
158 __atomic_exchange_n(&(o), true, __ATOMIC_SEQ_CST); \
159 })
160
161 #undef MAX
162 #define MAX(a, b) __MAX(UNIQ, (a), UNIQ, (b))
163 #define __MAX(aq, a, bq, b) \
164 ({ \
165 const typeof(a) UNIQ_T(A, aq) = (a); \
166 const typeof(b) UNIQ_T(B, bq) = (b); \
167 UNIQ_T(A, aq) > UNIQ_T(B, bq) ? UNIQ_T(A, aq) : UNIQ_T(B, bq); \
168 })
169
170 #define IS_UNSIGNED_INTEGER_TYPE(type) \
171 (__builtin_types_compatible_p(typeof(type), unsigned char) || \
172 __builtin_types_compatible_p(typeof(type), unsigned short) || \
173 __builtin_types_compatible_p(typeof(type), unsigned) || \
174 __builtin_types_compatible_p(typeof(type), unsigned long) || \
175 __builtin_types_compatible_p(typeof(type), unsigned long long))
176
177 #define IS_SIGNED_INTEGER_TYPE(type) \
178 (__builtin_types_compatible_p(typeof(type), signed char) || \
179 __builtin_types_compatible_p(typeof(type), signed short) || \
180 __builtin_types_compatible_p(typeof(type), signed) || \
181 __builtin_types_compatible_p(typeof(type), signed long) || \
182 __builtin_types_compatible_p(typeof(type), signed long long))
183
184 /* Evaluates to (void) if _A or _B are not constant or of different types (being integers of different sizes
185 * is also OK as long as the signedness matches) */
186 #define CONST_MAX(_A, _B) \
187 (__builtin_choose_expr( \
188 __builtin_constant_p(_A) && \
189 __builtin_constant_p(_B) && \
190 (__builtin_types_compatible_p(typeof(_A), typeof(_B)) || \
191 (IS_UNSIGNED_INTEGER_TYPE(_A) && IS_UNSIGNED_INTEGER_TYPE(_B)) || \
192 (IS_SIGNED_INTEGER_TYPE(_A) && IS_SIGNED_INTEGER_TYPE(_B))), \
193 ((_A) > (_B)) ? (_A) : (_B), \
194 VOID_0))
195
196 /* takes two types and returns the size of the larger one */
197 #define MAXSIZE(A, B) (sizeof(union _packed_ { typeof(A) a; typeof(B) b; }))
198
199 #define MAX3(x, y, z) \
200 ({ \
201 const typeof(x) _c = MAX(x, y); \
202 MAX(_c, z); \
203 })
204
205 #define MAX4(x, y, z, a) \
206 ({ \
207 const typeof(x) _d = MAX3(x, y, z); \
208 MAX(_d, a); \
209 })
210
211 #undef MIN
212 #define MIN(a, b) __MIN(UNIQ, (a), UNIQ, (b))
213 #define __MIN(aq, a, bq, b) \
214 ({ \
215 const typeof(a) UNIQ_T(A, aq) = (a); \
216 const typeof(b) UNIQ_T(B, bq) = (b); \
217 UNIQ_T(A, aq) < UNIQ_T(B, bq) ? UNIQ_T(A, aq) : UNIQ_T(B, bq); \
218 })
219
220 /* evaluates to (void) if _A or _B are not constant or of different types */
221 #define CONST_MIN(_A, _B) \
222 (__builtin_choose_expr( \
223 __builtin_constant_p(_A) && \
224 __builtin_constant_p(_B) && \
225 __builtin_types_compatible_p(typeof(_A), typeof(_B)), \
226 ((_A) < (_B)) ? (_A) : (_B), \
227 VOID_0))
228
229 #define MIN3(x, y, z) \
230 ({ \
231 const typeof(x) _c = MIN(x, y); \
232 MIN(_c, z); \
233 })
234
235 /* Returns true if the passed integer is a positive power of two */
236 #define CONST_ISPOWEROF2(x) \
237 ((x) > 0 && ((x) & ((x) - 1)) == 0)
238
239 #define ISPOWEROF2(x) \
240 __builtin_choose_expr( \
241 __builtin_constant_p(x), \
242 CONST_ISPOWEROF2(x), \
243 ({ \
244 const typeof(x) _x = (x); \
245 CONST_ISPOWEROF2(_x); \
246 }))
247
248 #define LESS_BY(a, b) __LESS_BY(UNIQ, (a), UNIQ, (b))
249 #define __LESS_BY(aq, a, bq, b) \
250 ({ \
251 const typeof(a) UNIQ_T(A, aq) = (a); \
252 const typeof(b) UNIQ_T(B, bq) = (b); \
253 UNIQ_T(A, aq) > UNIQ_T(B, bq) ? UNIQ_T(A, aq) - UNIQ_T(B, bq) : 0; \
254 })
255
256 #define CMP(a, b) __CMP(UNIQ, (a), UNIQ, (b))
257 #define __CMP(aq, a, bq, b) \
258 ({ \
259 const typeof(a) UNIQ_T(A, aq) = (a); \
260 const typeof(b) UNIQ_T(B, bq) = (b); \
261 UNIQ_T(A, aq) < UNIQ_T(B, bq) ? -1 : \
262 UNIQ_T(A, aq) > UNIQ_T(B, bq) ? 1 : 0; \
263 })
264
265 #undef CLAMP
266 #define CLAMP(x, low, high) __CLAMP(UNIQ, (x), UNIQ, (low), UNIQ, (high))
267 #define __CLAMP(xq, x, lowq, low, highq, high) \
268 ({ \
269 const typeof(x) UNIQ_T(X, xq) = (x); \
270 const typeof(low) UNIQ_T(LOW, lowq) = (low); \
271 const typeof(high) UNIQ_T(HIGH, highq) = (high); \
272 UNIQ_T(X, xq) > UNIQ_T(HIGH, highq) ? \
273 UNIQ_T(HIGH, highq) : \
274 UNIQ_T(X, xq) < UNIQ_T(LOW, lowq) ? \
275 UNIQ_T(LOW, lowq) : \
276 UNIQ_T(X, xq); \
277 })
278
279 /* [(x + y - 1) / y] suffers from an integer overflow, even though the
280 * computation should be possible in the given type. Therefore, we use
281 * [x / y + !!(x % y)]. Note that on "Real CPUs" a division returns both the
282 * quotient and the remainder, so both should be equally fast. */
283 #define DIV_ROUND_UP(x, y) __DIV_ROUND_UP(UNIQ, (x), UNIQ, (y))
284 #define __DIV_ROUND_UP(xq, x, yq, y) \
285 ({ \
286 const typeof(x) UNIQ_T(X, xq) = (x); \
287 const typeof(y) UNIQ_T(Y, yq) = (y); \
288 (UNIQ_T(X, xq) / UNIQ_T(Y, yq) + !!(UNIQ_T(X, xq) % UNIQ_T(Y, yq))); \
289 })
290
291 /* Rounds up x to the next multiple of y. Resolves to typeof(x) -1 in case of overflow */
292 #define __ROUND_UP(q, x, y) \
293 ({ \
294 const typeof(y) UNIQ_T(A, q) = (y); \
295 const typeof(x) UNIQ_T(B, q) = DIV_ROUND_UP((x), UNIQ_T(A, q)); \
296 typeof(x) UNIQ_T(C, q); \
297 __builtin_mul_overflow(UNIQ_T(B, q), UNIQ_T(A, q), &UNIQ_T(C, q)) ? (typeof(x)) -1 : UNIQ_T(C, q); \
298 })
299 #define ROUND_UP(x, y) __ROUND_UP(UNIQ, (x), (y))
300
301 #define CASE_F_1(X) case X:
302 #define CASE_F_2(X, ...) case X: CASE_F_1( __VA_ARGS__)
303 #define CASE_F_3(X, ...) case X: CASE_F_2( __VA_ARGS__)
304 #define CASE_F_4(X, ...) case X: CASE_F_3( __VA_ARGS__)
305 #define CASE_F_5(X, ...) case X: CASE_F_4( __VA_ARGS__)
306 #define CASE_F_6(X, ...) case X: CASE_F_5( __VA_ARGS__)
307 #define CASE_F_7(X, ...) case X: CASE_F_6( __VA_ARGS__)
308 #define CASE_F_8(X, ...) case X: CASE_F_7( __VA_ARGS__)
309 #define CASE_F_9(X, ...) case X: CASE_F_8( __VA_ARGS__)
310 #define CASE_F_10(X, ...) case X: CASE_F_9( __VA_ARGS__)
311 #define CASE_F_11(X, ...) case X: CASE_F_10( __VA_ARGS__)
312 #define CASE_F_12(X, ...) case X: CASE_F_11( __VA_ARGS__)
313 #define CASE_F_13(X, ...) case X: CASE_F_12( __VA_ARGS__)
314 #define CASE_F_14(X, ...) case X: CASE_F_13( __VA_ARGS__)
315 #define CASE_F_15(X, ...) case X: CASE_F_14( __VA_ARGS__)
316 #define CASE_F_16(X, ...) case X: CASE_F_15( __VA_ARGS__)
317 #define CASE_F_17(X, ...) case X: CASE_F_16( __VA_ARGS__)
318 #define CASE_F_18(X, ...) case X: CASE_F_17( __VA_ARGS__)
319 #define CASE_F_19(X, ...) case X: CASE_F_18( __VA_ARGS__)
320 #define CASE_F_20(X, ...) case X: CASE_F_19( __VA_ARGS__)
321
322 #define GET_CASE_F(_1,_2,_3,_4,_5,_6,_7,_8,_9,_10,_11,_12,_13,_14,_15,_16,_17,_18,_19,_20,NAME,...) NAME
323 #define FOR_EACH_MAKE_CASE(...) \
324 GET_CASE_F(__VA_ARGS__,CASE_F_20,CASE_F_19,CASE_F_18,CASE_F_17,CASE_F_16,CASE_F_15,CASE_F_14,CASE_F_13,CASE_F_12,CASE_F_11, \
325 CASE_F_10,CASE_F_9,CASE_F_8,CASE_F_7,CASE_F_6,CASE_F_5,CASE_F_4,CASE_F_3,CASE_F_2,CASE_F_1) \
326 (__VA_ARGS__)
327
328 #define IN_SET(x, first, ...) \
329 ({ \
330 bool _found = false; \
331 /* If the build breaks in the line below, you need to extend the case macros. We use typeof(+x) \
332 * here to widen the type of x if it is a bit-field as this would otherwise be illegal. */ \
333 static const typeof(+x) __assert_in_set[] _unused_ = { first, __VA_ARGS__ }; \
334 assert_cc(ELEMENTSOF(__assert_in_set) <= 20); \
335 switch (x) { \
336 FOR_EACH_MAKE_CASE(first, __VA_ARGS__) \
337 _found = true; \
338 break; \
339 default: \
340 break; \
341 } \
342 _found; \
343 })
344
345 /* Takes inspiration from Rust's Option::take() method: reads and returns a pointer, but at the same time
346 * resets it to NULL. See: https://doc.rust-lang.org/std/option/enum.Option.html#method.take */
347 #define TAKE_GENERIC(var, type, nullvalue) \
348 ({ \
349 type *_pvar_ = &(var); \
350 type _var_ = *_pvar_; \
351 type _nullvalue_ = nullvalue; \
352 *_pvar_ = _nullvalue_; \
353 _var_; \
354 })
355 #define TAKE_PTR_TYPE(ptr, type) TAKE_GENERIC(ptr, type, NULL)
356 #define TAKE_PTR(ptr) TAKE_PTR_TYPE(ptr, typeof(ptr))
357 #define TAKE_STRUCT_TYPE(s, type) TAKE_GENERIC(s, type, {})
358 #define TAKE_STRUCT(s) TAKE_STRUCT_TYPE(s, typeof(s))
359
360 /*
361 * STRLEN - return the length of a string literal, minus the trailing NUL byte.
362 * Contrary to strlen(), this is a constant expression.
363 * @x: a string literal.
364 */
365 #define STRLEN(x) (sizeof(""x"") - sizeof(typeof(x[0])))
366
367 #define mfree(memory) \
368 ({ \
369 free(memory); \
370 (typeof(memory)) NULL; \
371 })
372
373 static inline size_t ALIGN_TO(size_t l, size_t ali) {
374 assert(ISPOWEROF2(ali));
375
376 if (l > SIZE_MAX - (ali - 1))
377 return SIZE_MAX; /* indicate overflow */
378
379 return ((l + (ali - 1)) & ~(ali - 1));
380 }
381
382 static inline uint64_t ALIGN_TO_U64(uint64_t l, uint64_t ali) {
383 assert(ISPOWEROF2(ali));
384
385 if (l > UINT64_MAX - (ali - 1))
386 return UINT64_MAX; /* indicate overflow */
387
388 return ((l + (ali - 1)) & ~(ali - 1));
389 }
390
391 static inline size_t ALIGN_DOWN(size_t l, size_t ali) {
392 assert(ISPOWEROF2(ali));
393
394 return l & ~(ali - 1);
395 }
396
397 static inline uint64_t ALIGN_DOWN_U64(uint64_t l, uint64_t ali) {
398 assert(ISPOWEROF2(ali));
399
400 return l & ~(ali - 1);
401 }
402
403 static inline size_t ALIGN_OFFSET(size_t l, size_t ali) {
404 assert(ISPOWEROF2(ali));
405
406 return l & (ali - 1);
407 }
408
409 static inline uint64_t ALIGN_OFFSET_U64(uint64_t l, uint64_t ali) {
410 assert(ISPOWEROF2(ali));
411
412 return l & (ali - 1);
413 }
414
415 #define ALIGN2(l) ALIGN_TO(l, 2)
416 #define ALIGN4(l) ALIGN_TO(l, 4)
417 #define ALIGN8(l) ALIGN_TO(l, 8)
418 #define ALIGN2_PTR(p) ((void*) ALIGN2((uintptr_t) p))
419 #define ALIGN4_PTR(p) ((void*) ALIGN4((uintptr_t) p))
420 #define ALIGN8_PTR(p) ((void*) ALIGN8((uintptr_t) p))
421 #define ALIGN(l) ALIGN_TO(l, sizeof(void*))
422 #define ALIGN_PTR(p) ((void*) ALIGN((uintptr_t) (p)))
423
424 /* Checks if the specified pointer is aligned as appropriate for the specific type */
425 #define IS_ALIGNED16(p) (((uintptr_t) p) % alignof(uint16_t) == 0)
426 #define IS_ALIGNED32(p) (((uintptr_t) p) % alignof(uint32_t) == 0)
427 #define IS_ALIGNED64(p) (((uintptr_t) p) % alignof(uint64_t) == 0)
428
429 /* Same as ALIGN_TO but callable in constant contexts. */
430 #define CONST_ALIGN_TO(l, ali) \
431 __builtin_choose_expr( \
432 __builtin_constant_p(l) && \
433 __builtin_constant_p(ali) && \
434 CONST_ISPOWEROF2(ali) && \
435 (l <= SIZE_MAX - (ali - 1)), /* overflow? */ \
436 ((l) + (ali) - 1) & ~((ali) - 1), \
437 VOID_0)
438
439 /* Similar to ((t *) (void *) (p)) to cast a pointer. The macro asserts that the pointer has a suitable
440 * alignment for type "t". This exists for places where otherwise "-Wcast-align=strict" would issue a
441 * warning or if you want to assert that the cast gives a pointer of suitable alignment. */
442 #define CAST_ALIGN_PTR(t, p) \
443 ({ \
444 const void *_p = (p); \
445 assert(((uintptr_t) _p) % alignof(t) == 0); \
446 (t *) _p; \
447 })
448
449 #define UPDATE_FLAG(orig, flag, b) \
450 ((b) ? ((orig) | (flag)) : ((orig) & ~(flag)))
451 #define SET_FLAG(v, flag, b) \
452 (v) = UPDATE_FLAG(v, flag, b)
453 #define FLAGS_SET(v, flags) \
454 ((~(v) & (flags)) == 0)
455
456 /* A wrapper for 'func' to return void.
457 * Only useful when a void-returning function is required by some API. */
458 #define DEFINE_TRIVIAL_DESTRUCTOR(name, type, func) \
459 static inline void name(type *p) { \
460 func(p); \
461 }
462
463 /* When func() returns the void value (NULL, -1, …) of the appropriate type */
464 #define DEFINE_TRIVIAL_CLEANUP_FUNC(type, func) \
465 static inline void func##p(type *p) { \
466 if (*p) \
467 *p = func(*p); \
468 }
469
470 /* When func() doesn't return the appropriate type, set variable to empty afterwards.
471 * The func() may be provided by a dynamically loaded shared library, hence add an assertion. */
472 #define DEFINE_TRIVIAL_CLEANUP_FUNC_FULL(type, func, empty) \
473 static inline void func##p(type *p) { \
474 if (*p != (empty)) { \
475 DISABLE_WARNING_ADDRESS; \
476 assert(func); \
477 REENABLE_WARNING; \
478 func(*p); \
479 *p = (empty); \
480 } \
481 }
482
483 /* When func() doesn't return the appropriate type, and is also a macro, set variable to empty afterwards. */
484 #define DEFINE_TRIVIAL_CLEANUP_FUNC_FULL_MACRO(type, func, empty) \
485 static inline void func##p(type *p) { \
486 if (*p != (empty)) { \
487 func(*p); \
488 *p = (empty); \
489 } \
490 }
491
492 /* Declare a flexible array usable in a union.
493 * This is essentially a work-around for a pointless constraint in C99
494 * and might go away in some future version of the standard.
495 *
496 * See https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/commit/?id=3080ea5553cc909b000d1f1d964a9041962f2c5b
497 */
498 #define DECLARE_FLEX_ARRAY(type, name) \
499 struct { \
500 dummy_t __empty__ ## name; \
501 type name[]; \
502 }
503
504 /* Declares an ELF read-only string section that does not occupy memory at runtime. */
505 #define DECLARE_NOALLOC_SECTION(name, text) \
506 asm(".pushsection " name ",\"S\"\n\t" \
507 ".ascii " STRINGIFY(text) "\n\t" \
508 ".zero 1\n\t" \
509 ".popsection\n")
510
511 #ifdef SBAT_DISTRO
512 #define DECLARE_SBAT(text) DECLARE_NOALLOC_SECTION(".sbat", text)
513 #else
514 #define DECLARE_SBAT(text)
515 #endif