]> git.ipfire.org Git - people/ms/linux.git/blob - include/linux/compiler.h
Importing "grsecurity-3.1-3.19.2-201503201903.patch"
[people/ms/linux.git] / include / linux / compiler.h
1 #ifndef __LINUX_COMPILER_H
2 #define __LINUX_COMPILER_H
3
4 #ifndef __ASSEMBLY__
5
6 #ifdef __CHECKER__
7 # define __user __attribute__((noderef, address_space(1)))
8 # define __force_user __force __user
9 # define __kernel __attribute__((address_space(0)))
10 # define __force_kernel __force __kernel
11 # define __safe __attribute__((safe))
12 # define __force __attribute__((force))
13 # define __nocast __attribute__((nocast))
14 # define __iomem __attribute__((noderef, address_space(2)))
15 # define __force_iomem __force __iomem
16 # define __must_hold(x) __attribute__((context(x,1,1)))
17 # define __acquires(x) __attribute__((context(x,0,1)))
18 # define __releases(x) __attribute__((context(x,1,0)))
19 # define __acquire(x) __context__(x,1)
20 # define __release(x) __context__(x,-1)
21 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
22 # define __percpu __attribute__((noderef, address_space(3)))
23 # define __force_percpu __force __percpu
24 #ifdef CONFIG_SPARSE_RCU_POINTER
25 # define __rcu __attribute__((noderef, address_space(4)))
26 # define __force_rcu __force __rcu
27 #else
28 # define __rcu
29 # define __force_rcu
30 #endif
31 extern void __chk_user_ptr(const volatile void __user *);
32 extern void __chk_io_ptr(const volatile void __iomem *);
33 #else
34 # ifdef CHECKER_PLUGIN
35 //# define __user
36 //# define __force_user
37 //# define __kernel
38 //# define __force_kernel
39 # else
40 # ifdef STRUCTLEAK_PLUGIN
41 # define __user __attribute__((user))
42 # else
43 # define __user
44 # endif
45 # define __force_user
46 # define __kernel
47 # define __force_kernel
48 # endif
49 # define __safe
50 # define __force
51 # define __nocast
52 # define __iomem
53 # define __force_iomem
54 # define __chk_user_ptr(x) (void)0
55 # define __chk_io_ptr(x) (void)0
56 # define __builtin_warning(x, y...) (1)
57 # define __must_hold(x)
58 # define __acquires(x)
59 # define __releases(x)
60 # define __acquire(x) (void)0
61 # define __release(x) (void)0
62 # define __cond_lock(x,c) (c)
63 # define __percpu
64 # define __force_percpu
65 # define __rcu
66 # define __force_rcu
67 #endif
68
69 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
70 #define ___PASTE(a,b) a##b
71 #define __PASTE(a,b) ___PASTE(a,b)
72
73 #ifdef __KERNEL__
74
75 #ifdef __GNUC__
76 #include <linux/compiler-gcc.h>
77 #endif
78
79 #define notrace __attribute__((no_instrument_function))
80
81 /* Intel compiler defines __GNUC__. So we will overwrite implementations
82 * coming from above header files here
83 */
84 #ifdef __INTEL_COMPILER
85 # include <linux/compiler-intel.h>
86 #endif
87
88 /* Clang compiler defines __GNUC__. So we will overwrite implementations
89 * coming from above header files here
90 */
91 #ifdef __clang__
92 #include <linux/compiler-clang.h>
93 #endif
94
95 /*
96 * Generic compiler-dependent macros required for kernel
97 * build go below this comment. Actual compiler/compiler version
98 * specific implementations come from the above header files
99 */
100
101 struct ftrace_branch_data {
102 const char *func;
103 const char *file;
104 unsigned line;
105 union {
106 struct {
107 unsigned long correct;
108 unsigned long incorrect;
109 };
110 struct {
111 unsigned long miss;
112 unsigned long hit;
113 };
114 unsigned long miss_hit[2];
115 };
116 };
117
118 /*
119 * Note: DISABLE_BRANCH_PROFILING can be used by special lowlevel code
120 * to disable branch tracing on a per file basis.
121 */
122 #if defined(CONFIG_TRACE_BRANCH_PROFILING) \
123 && !defined(DISABLE_BRANCH_PROFILING) && !defined(__CHECKER__)
124 void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
125
126 #define likely_notrace(x) __builtin_expect(!!(x), 1)
127 #define unlikely_notrace(x) __builtin_expect(!!(x), 0)
128
129 #define __branch_check__(x, expect) ({ \
130 int ______r; \
131 static struct ftrace_branch_data \
132 __attribute__((__aligned__(4))) \
133 __attribute__((section("_ftrace_annotated_branch"))) \
134 ______f = { \
135 .func = __func__, \
136 .file = __FILE__, \
137 .line = __LINE__, \
138 }; \
139 ______r = likely_notrace(x); \
140 ftrace_likely_update(&______f, ______r, expect); \
141 ______r; \
142 })
143
144 /*
145 * Using __builtin_constant_p(x) to ignore cases where the return
146 * value is always the same. This idea is taken from a similar patch
147 * written by Daniel Walker.
148 */
149 # ifndef likely
150 # define likely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 1))
151 # endif
152 # ifndef unlikely
153 # define unlikely(x) (__builtin_constant_p(x) ? !!(x) : __branch_check__(x, 0))
154 # endif
155
156 #ifdef CONFIG_PROFILE_ALL_BRANCHES
157 /*
158 * "Define 'is'", Bill Clinton
159 * "Define 'if'", Steven Rostedt
160 */
161 #define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
162 #define __trace_if(cond) \
163 if (__builtin_constant_p((cond)) ? !!(cond) : \
164 ({ \
165 int ______r; \
166 static struct ftrace_branch_data \
167 __attribute__((__aligned__(4))) \
168 __attribute__((section("_ftrace_branch"))) \
169 ______f = { \
170 .func = __func__, \
171 .file = __FILE__, \
172 .line = __LINE__, \
173 }; \
174 ______r = !!(cond); \
175 ______f.miss_hit[______r]++; \
176 ______r; \
177 }))
178 #endif /* CONFIG_PROFILE_ALL_BRANCHES */
179
180 #else
181 # define likely(x) __builtin_expect(!!(x), 1)
182 # define unlikely(x) __builtin_expect(!!(x), 0)
183 #endif
184
185 /* Optimization barrier */
186 #ifndef barrier
187 # define barrier() __memory_barrier()
188 #endif
189
190 /* Unreachable code */
191 #ifndef unreachable
192 # define unreachable() do { } while (1)
193 #endif
194
195 #ifndef RELOC_HIDE
196 # define RELOC_HIDE(ptr, off) \
197 ({ unsigned long __ptr; \
198 __ptr = (unsigned long) (ptr); \
199 (typeof(ptr)) (__ptr + (off)); })
200 #endif
201
202 #ifndef OPTIMIZER_HIDE_VAR
203 #define OPTIMIZER_HIDE_VAR(var) barrier()
204 #endif
205
206 /* Not-quite-unique ID. */
207 #ifndef __UNIQUE_ID
208 # define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
209 #endif
210
211 #include <uapi/linux/types.h>
212
213 static __always_inline void data_access_exceeds_word_size(void)
214 #ifdef __compiletime_warning
215 __compiletime_warning("data access exceeds word size and won't be atomic")
216 #endif
217 ;
218
219 static __always_inline void data_access_exceeds_word_size(void)
220 {
221 }
222
223 static __always_inline void __read_once_size(const volatile void *p, void *res, int size)
224 {
225 switch (size) {
226 case 1: *(__u8 *)res = *(const volatile __u8 *)p; break;
227 case 2: *(__u16 *)res = *(const volatile __u16 *)p; break;
228 case 4: *(__u32 *)res = *(const volatile __u32 *)p; break;
229 #ifdef CONFIG_64BIT
230 case 8: *(__u64 *)res = *(const volatile __u64 *)p; break;
231 #endif
232 default:
233 barrier();
234 __builtin_memcpy(res, (const void *)p, size);
235 data_access_exceeds_word_size();
236 barrier();
237 }
238 }
239
240 static __always_inline void __write_once_size(volatile void *p, const void *res, int size)
241 {
242 switch (size) {
243 case 1: *(volatile __u8 *)p = *(const __u8 *)res; break;
244 case 2: *(volatile __u16 *)p = *(const __u16 *)res; break;
245 case 4: *(volatile __u32 *)p = *(const __u32 *)res; break;
246 #ifdef CONFIG_64BIT
247 case 8: *(volatile __u64 *)p = *(const __u64 *)res; break;
248 #endif
249 default:
250 barrier();
251 __builtin_memcpy((void *)p, res, size);
252 data_access_exceeds_word_size();
253 barrier();
254 }
255 }
256
257 /*
258 * Prevent the compiler from merging or refetching reads or writes. The
259 * compiler is also forbidden from reordering successive instances of
260 * READ_ONCE, WRITE_ONCE and ACCESS_ONCE (see below), but only when the
261 * compiler is aware of some particular ordering. One way to make the
262 * compiler aware of ordering is to put the two invocations of READ_ONCE,
263 * WRITE_ONCE or ACCESS_ONCE() in different C statements.
264 *
265 * In contrast to ACCESS_ONCE these two macros will also work on aggregate
266 * data types like structs or unions. If the size of the accessed data
267 * type exceeds the word size of the machine (e.g., 32 bits or 64 bits)
268 * READ_ONCE() and WRITE_ONCE() will fall back to memcpy and print a
269 * compile-time warning.
270 *
271 * Their two major use cases are: (1) Mediating communication between
272 * process-level code and irq/NMI handlers, all running on the same CPU,
273 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
274 * mutilate accesses that either do not require ordering or that interact
275 * with an explicit memory barrier or atomic instruction that provides the
276 * required ordering.
277 */
278
279 #define READ_ONCE(x) \
280 ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
281
282 #define WRITE_ONCE(x, val) \
283 ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
284
285 #endif /* __KERNEL__ */
286
287 #endif /* __ASSEMBLY__ */
288
289 #ifdef __KERNEL__
290 /*
291 * Allow us to mark functions as 'deprecated' and have gcc emit a nice
292 * warning for each use, in hopes of speeding the functions removal.
293 * Usage is:
294 * int __deprecated foo(void)
295 */
296 #ifndef __deprecated
297 # define __deprecated /* unimplemented */
298 #endif
299
300 #ifdef MODULE
301 #define __deprecated_for_modules __deprecated
302 #else
303 #define __deprecated_for_modules
304 #endif
305
306 #ifndef __must_check
307 #define __must_check
308 #endif
309
310 #ifndef CONFIG_ENABLE_MUST_CHECK
311 #undef __must_check
312 #define __must_check
313 #endif
314 #ifndef CONFIG_ENABLE_WARN_DEPRECATED
315 #undef __deprecated
316 #undef __deprecated_for_modules
317 #define __deprecated
318 #define __deprecated_for_modules
319 #endif
320
321 /*
322 * Allow us to avoid 'defined but not used' warnings on functions and data,
323 * as well as force them to be emitted to the assembly file.
324 *
325 * As of gcc 3.4, static functions that are not marked with attribute((used))
326 * may be elided from the assembly file. As of gcc 3.4, static data not so
327 * marked will not be elided, but this may change in a future gcc version.
328 *
329 * NOTE: Because distributions shipped with a backported unit-at-a-time
330 * compiler in gcc 3.3, we must define __used to be __attribute__((used))
331 * for gcc >=3.3 instead of 3.4.
332 *
333 * In prior versions of gcc, such functions and data would be emitted, but
334 * would be warned about except with attribute((unused)).
335 *
336 * Mark functions that are referenced only in inline assembly as __used so
337 * the code is emitted even though it appears to be unreferenced.
338 */
339 #ifndef __used
340 # define __used /* unimplemented */
341 #endif
342
343 #ifndef __maybe_unused
344 # define __maybe_unused /* unimplemented */
345 #endif
346
347 #ifndef __always_unused
348 # define __always_unused /* unimplemented */
349 #endif
350
351 #ifndef noinline
352 #define noinline
353 #endif
354
355 /*
356 * Rather then using noinline to prevent stack consumption, use
357 * noinline_for_stack instead. For documentation reasons.
358 */
359 #define noinline_for_stack noinline
360
361 #ifndef __always_inline
362 #define __always_inline inline
363 #endif
364
365 #endif /* __KERNEL__ */
366
367 /*
368 * From the GCC manual:
369 *
370 * Many functions do not examine any values except their arguments,
371 * and have no effects except the return value. Basically this is
372 * just slightly more strict class than the `pure' attribute above,
373 * since function is not allowed to read global memory.
374 *
375 * Note that a function that has pointer arguments and examines the
376 * data pointed to must _not_ be declared `const'. Likewise, a
377 * function that calls a non-`const' function usually must not be
378 * `const'. It does not make sense for a `const' function to return
379 * `void'.
380 */
381 #ifndef __attribute_const__
382 # define __attribute_const__ /* unimplemented */
383 #endif
384
385 #ifndef __randomize_layout
386 # define __randomize_layout
387 #endif
388
389 #ifndef __no_randomize_layout
390 # define __no_randomize_layout
391 #endif
392
393 #ifndef __no_const
394 # define __no_const
395 #endif
396
397 #ifndef __do_const
398 # define __do_const
399 #endif
400
401 #ifndef __size_overflow
402 # define __size_overflow(...)
403 #endif
404
405 #ifndef __intentional_overflow
406 # define __intentional_overflow(...)
407 #endif
408
409 #ifndef __latent_entropy
410 # define __latent_entropy
411 #endif
412
413 /*
414 * Tell gcc if a function is cold. The compiler will assume any path
415 * directly leading to the call is unlikely.
416 */
417
418 #ifndef __cold
419 #define __cold
420 #endif
421
422 #ifndef __alloc_size
423 #define __alloc_size(...)
424 #endif
425
426 #ifndef __bos
427 #define __bos(ptr, arg)
428 #endif
429
430 #ifndef __bos0
431 #define __bos0(ptr)
432 #endif
433
434 #ifndef __bos1
435 #define __bos1(ptr)
436 #endif
437
438 /* Simple shorthand for a section definition */
439 #ifndef __section
440 # define __section(S) __attribute__ ((__section__(#S)))
441 #endif
442
443 #ifndef __visible
444 #define __visible
445 #endif
446
447 /* Are two types/vars the same type (ignoring qualifiers)? */
448 #ifndef __same_type
449 # define __same_type(a, b) __builtin_types_compatible_p(typeof(a), typeof(b))
450 #endif
451
452 /* Is this type a native word size -- useful for atomic operations */
453 #ifndef __native_word
454 # define __native_word(t) (sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long))
455 #endif
456
457 /* Compile time object size, -1 for unknown */
458 #ifndef __compiletime_object_size
459 # define __compiletime_object_size(obj) -1
460 #endif
461 #ifndef __compiletime_warning
462 # define __compiletime_warning(message)
463 #endif
464 #ifndef __compiletime_error
465 # define __compiletime_error(message)
466 /*
467 * Sparse complains of variable sized arrays due to the temporary variable in
468 * __compiletime_assert. Unfortunately we can't just expand it out to make
469 * sparse see a constant array size without breaking compiletime_assert on old
470 * versions of GCC (e.g. 4.2.4), so hide the array from sparse altogether.
471 */
472 # ifndef __CHECKER__
473 # define __compiletime_error_fallback(condition) \
474 do { ((void)sizeof(char[1 - 2 * condition])); } while (0)
475 # endif
476 #endif
477 #ifndef __compiletime_error_fallback
478 # define __compiletime_error_fallback(condition) do { } while (0)
479 #endif
480
481 #define __compiletime_assert(condition, msg, prefix, suffix) \
482 do { \
483 bool __cond = !(condition); \
484 extern void prefix ## suffix(void) __compiletime_error(msg); \
485 if (__cond) \
486 prefix ## suffix(); \
487 __compiletime_error_fallback(__cond); \
488 } while (0)
489
490 #define _compiletime_assert(condition, msg, prefix, suffix) \
491 __compiletime_assert(condition, msg, prefix, suffix)
492
493 /**
494 * compiletime_assert - break build and emit msg if condition is false
495 * @condition: a compile-time constant condition to check
496 * @msg: a message to emit if condition is false
497 *
498 * In tradition of POSIX assert, this macro will break the build if the
499 * supplied condition is *false*, emitting the supplied error message if the
500 * compiler has support to do so.
501 */
502 #define compiletime_assert(condition, msg) \
503 _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__)
504
505 #define compiletime_assert_atomic_type(t) \
506 compiletime_assert(__native_word(t), \
507 "Need native word sized stores/loads for atomicity.")
508
509 /*
510 * Prevent the compiler from merging or refetching accesses. The compiler
511 * is also forbidden from reordering successive instances of ACCESS_ONCE(),
512 * but only when the compiler is aware of some particular ordering. One way
513 * to make the compiler aware of ordering is to put the two invocations of
514 * ACCESS_ONCE() in different C statements.
515 *
516 * ACCESS_ONCE will only work on scalar types. For union types, ACCESS_ONCE
517 * on a union member will work as long as the size of the member matches the
518 * size of the union and the size is smaller than word size.
519 *
520 * The major use cases of ACCESS_ONCE used to be (1) Mediating communication
521 * between process-level code and irq/NMI handlers, all running on the same CPU,
522 * and (2) Ensuring that the compiler does not fold, spindle, or otherwise
523 * mutilate accesses that either do not require ordering or that interact
524 * with an explicit memory barrier or atomic instruction that provides the
525 * required ordering.
526 *
527 * If possible use READ_ONCE/ASSIGN_ONCE instead.
528 */
529 #define __ACCESS_ONCE(x) ({ \
530 __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
531 (volatile const typeof(x) *)&(x); })
532 #define ACCESS_ONCE(x) (*__ACCESS_ONCE(x))
533 #define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
534
535 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
536 #ifdef CONFIG_KPROBES
537 # define __kprobes __attribute__((__section__(".kprobes.text")))
538 # define nokprobe_inline __always_inline
539 #else
540 # define __kprobes
541 # define nokprobe_inline inline
542 #endif
543 #endif /* __LINUX_COMPILER_H */