]> git.ipfire.org Git - thirdparty/man-pages.git/blob - man2/membarrier.2
grantpt.3: SYNOPSIS: Explicitly show #define _XOPEN_SOURCE requirement
[thirdparty/man-pages.git] / man2 / membarrier.2
1 .\" Copyright 2015-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 .\"
3 .\" %%%LICENSE_START(VERBATIM)
4 .\" Permission is granted to make and distribute verbatim copies of this
5 .\" manual provided the copyright notice and this permission notice are
6 .\" preserved on all copies.
7 .\"
8 .\" Permission is granted to copy and distribute modified versions of this
9 .\" manual under the conditions for verbatim copying, provided that the
10 .\" entire resulting derived work is distributed under the terms of a
11 .\" permission notice identical to this one.
12 .\"
13 .\" Since the Linux kernel and libraries are constantly changing, this
14 .\" manual page may be incorrect or out-of-date. The author(s) assume no
15 .\" responsibility for errors or omissions, or for damages resulting from
16 .\" the use of the information contained herein. The author(s) may not
17 .\" have taken the same level of care in the production of this manual,
18 .\" which is licensed free of charge, as they might when working
19 .\" professionally.
20 .\"
21 .\" Formatted or processed versions of this manual, if unaccompanied by
22 .\" the source, must acknowledge the copyright and authors of this work.
23 .\" %%%LICENSE_END
24 .\"
25 .TH MEMBARRIER 2 2018-04-30 "Linux" "Linux Programmer's Manual"
26 .SH NAME
27 membarrier \- issue memory barriers on a set of threads
28 .SH SYNOPSIS
29 .B #include <linux/membarrier.h>
30 .PP
31 .BI "int membarrier(int " cmd ", int " flags ");
32 .SH DESCRIPTION
33 The
34 .BR membarrier ()
35 system call helps reducing the overhead of the memory barrier
36 instructions required to order memory accesses on multi-core systems.
37 However, this system call is heavier than a memory barrier, so using it
38 effectively is
39 .I not
40 as simple as replacing memory barriers with this
41 system call, but requires understanding of the details below.
42 .PP
43 Use of memory barriers needs to be done taking into account that a
44 memory barrier always needs to be either matched with its memory barrier
45 counterparts, or that the architecture's memory model doesn't require the
46 matching barriers.
47 .PP
48 There are cases where one side of the matching barriers (which we will
49 refer to as "fast side") is executed much more often than the other
50 (which we will refer to as "slow side").
51 This is a prime target for the use of
52 .BR membarrier ().
53 The key idea is to replace, for these matching
54 barriers, the fast-side memory barriers by simple compiler barriers,
55 for example:
56 .PP
57 .in +4n
58 .EX
59 asm volatile ("" : : : "memory")
60 .EE
61 .in
62 .PP
63 and replace the slow-side memory barriers by calls to
64 .BR membarrier ().
65 .PP
66 This will add overhead to the slow side, and remove overhead from the
67 fast side, thus resulting in an overall performance increase as long as
68 the slow side is infrequent enough that the overhead of the
69 .BR membarrier ()
70 calls does not outweigh the performance gain on the fast side.
71 .PP
72 The
73 .I cmd
74 argument is one of the following:
75 .TP
76 .BR MEMBARRIER_CMD_QUERY " (since Linux 4.3)"
77 Query the set of supported commands.
78 The return value of the call is a bit mask of supported
79 commands.
80 .BR MEMBARRIER_CMD_QUERY ,
81 which has the value 0,
82 is not itself included in this bit mask.
83 This command is always supported (on kernels where
84 .BR membarrier ()
85 is provided).
86 .TP
87 .BR MEMBARRIER_CMD_GLOBAL " (since Linux 4.16)"
88 Ensure that all threads from all processes on the system pass through a
89 state where all memory accesses to user-space addresses match program
90 order between entry to and return from the
91 .BR membarrier ()
92 system call.
93 All threads on the system are targeted by this command.
94 .TP
95 .BR MEMBARRIER_CMD_GLOBAL_EXPEDITED " (since Linux 4.16)"
96 Execute a memory barrier on all running threads of all processes that
97 previously registered with
98 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED .
99 .IP
100 Upon return from the system call, the calling thread has a guarantee that all
101 running threads have passed through a state where all memory accesses to
102 user-space addresses match program order between entry to and return
103 from the system call (non-running threads are de facto in such a state).
104 This guarantee is provided only for the threads of processes that
105 previously registered with
106 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED .
107 .IP
108 Given that registration is about the intent to receive the barriers, it
109 is valid to invoke
110 .BR MEMBARRIER_CMD_GLOBAL_EXPEDITED
111 from a process that has not employed
112 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED .
113 .IP
114 The "expedited" commands complete faster than the non-expedited ones;
115 they never block, but have the downside of causing extra overhead.
116 .TP
117 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED " (since Linux 4.16)"
118 Register the process's intent to receive
119 .BR MEMBARRIER_CMD_GLOBAL_EXPEDITED
120 memory barriers.
121 .TP
122 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED " (since Linux 4.14)"
123 Execute a memory barrier on each running thread belonging to the same
124 process as the calling thread.
125 .IP
126 Upon return from the system call, the calling
127 thread has a guarantee that all its running thread siblings have passed
128 through a state where all memory accesses to user-space addresses match
129 program order between entry to and return from the system call
130 (non-running threads are de facto in such a state).
131 This guarantee is provided only for threads in
132 the same process as the calling thread.
133 .IP
134 The "expedited" commands complete faster than the non-expedited ones;
135 they never block, but have the downside of causing extra overhead.
136 .IP
137 A process must register its intent to use the private
138 expedited command prior to using it.
139 .TP
140 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED " (since Linux 4.14)"
141 Register the process's intent to use
142 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED .
143 .TP
144 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE " (since Linux 4.16)"
145 In addition to providing the memory ordering guarantees described in
146 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED ,
147 upon return from system call the calling thread has a guarantee that all its
148 running thread siblings have executed a core serializing instruction.
149 This guarantee is provided only for threads in
150 the same process as the calling thread.
151 .IP
152 The "expedited" commands complete faster than the non-expedited ones,
153 they never block, but have the downside of causing extra overhead.
154 .IP
155 A process must register its intent to use the private expedited sync
156 core command prior to using it.
157 .TP
158 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE " (since Linux 4.16)"
159 Register the process's intent to use
160 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE .
161 .TP
162 .BR MEMBARRIER_CMD_SHARED " (since Linux 4.3)"
163 This is an alias for
164 .BR MEMBARRIER_CMD_GLOBAL
165 that exists for header backward compatibility.
166 .PP
167 The
168 .I flags
169 argument is currently unused and must be specified as 0.
170 .PP
171 All memory accesses performed in program order from each targeted thread
172 are guaranteed to be ordered with respect to
173 .BR membarrier ().
174 .PP
175 If we use the semantic
176 .I barrier()
177 to represent a compiler barrier forcing memory
178 accesses to be performed in program order across the barrier, and
179 .I smp_mb()
180 to represent explicit memory barriers forcing full memory
181 ordering across the barrier, we have the following ordering table for
182 each pairing of
183 .IR barrier() ,
184 .BR membarrier ()
185 and
186 .IR smp_mb() .
187 The pair ordering is detailed as (O: ordered, X: not ordered):
188 .PP
189 barrier() smp_mb() membarrier()
190 barrier() X X O
191 smp_mb() X O O
192 membarrier() O O O
193 .SH RETURN VALUE
194 On success, the
195 .B MEMBARRIER_CMD_QUERY
196 operation returns a bit mask of supported commands, and the
197 .BR MEMBARRIER_CMD_GLOBAL ,
198 .BR MEMBARRIER_CMD_GLOBAL_EXPEDITED ,
199 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED ,
200 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED ,
201 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED ,
202 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ,
203 and
204 .B MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
205 operations return zero.
206 On error, \-1 is returned,
207 and
208 .I errno
209 is set appropriately.
210 .PP
211 For a given command, with
212 .I flags
213 set to 0, this system call is
214 guaranteed to always return the same value until reboot.
215 Further calls with the same arguments will lead to the same result.
216 Therefore, with
217 .I flags
218 set to 0, error handling is required only for the first call to
219 .BR membarrier ().
220 .SH ERRORS
221 .TP
222 .B EINVAL
223 .I cmd
224 is invalid, or
225 .I flags
226 is nonzero, or the
227 .BR MEMBARRIER_CMD_GLOBAL
228 command is disabled because the
229 .I nohz_full
230 CPU parameter has been set, or the
231 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE
232 and
233 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
234 commands are not implemented by the architecture.
235 .TP
236 .B ENOSYS
237 The
238 .BR membarrier ()
239 system call is not implemented by this kernel.
240 .TP
241 .B EPERM
242 The current process was not registered prior to using private expedited
243 commands.
244 .SH VERSIONS
245 The
246 .BR membarrier ()
247 system call was added in Linux 4.3.
248 .\"
249 .SH CONFORMING TO
250 .BR membarrier ()
251 is Linux-specific.
252 .\" .SH SEE ALSO
253 .\" FIXME See if the following syscalls make it into Linux 4.15 or later
254 .\" .BR cpu_opv (2),
255 .\" .BR rseq (2)
256 .SH NOTES
257 A memory barrier instruction is part of the instruction set of
258 architectures with weakly-ordered memory models.
259 It orders memory
260 accesses prior to the barrier and after the barrier with respect to
261 matching barriers on other cores.
262 For instance, a load fence can order
263 loads prior to and following that fence with respect to stores ordered
264 by store fences.
265 .PP
266 Program order is the order in which instructions are ordered in the
267 program assembly code.
268 .PP
269 Examples where
270 .BR membarrier ()
271 can be useful include implementations
272 of Read-Copy-Update libraries and garbage collectors.
273 .SH EXAMPLE
274 Assuming a multithreaded application where "fast_path()" is executed
275 very frequently, and where "slow_path()" is executed infrequently, the
276 following code (x86) can be transformed using
277 .BR membarrier ():
278 .PP
279 .in +4n
280 .EX
281 #include <stdlib.h>
282
283 static volatile int a, b;
284
285 static void
286 fast_path(int *read_b)
287 {
288 a = 1;
289 asm volatile ("mfence" : : : "memory");
290 *read_b = b;
291 }
292
293 static void
294 slow_path(int *read_a)
295 {
296 b = 1;
297 asm volatile ("mfence" : : : "memory");
298 *read_a = a;
299 }
300
301 int
302 main(int argc, char **argv)
303 {
304 int read_a, read_b;
305
306 /*
307 * Real applications would call fast_path() and slow_path()
308 * from different threads. Call those from main() to keep
309 * this example short.
310 */
311
312 slow_path(&read_a);
313 fast_path(&read_b);
314
315 /*
316 * read_b == 0 implies read_a == 1 and
317 * read_a == 0 implies read_b == 1.
318 */
319
320 if (read_b == 0 && read_a == 0)
321 abort();
322
323 exit(EXIT_SUCCESS);
324 }
325 .EE
326 .in
327 .PP
328 The code above transformed to use
329 .BR membarrier ()
330 becomes:
331 .PP
332 .in +4n
333 .EX
334 #define _GNU_SOURCE
335 #include <stdlib.h>
336 #include <stdio.h>
337 #include <unistd.h>
338 #include <sys/syscall.h>
339 #include <linux/membarrier.h>
340
341 static volatile int a, b;
342
343 static int
344 membarrier(int cmd, int flags)
345 {
346 return syscall(__NR_membarrier, cmd, flags);
347 }
348
349 static int
350 init_membarrier(void)
351 {
352 int ret;
353
354 /* Check that membarrier() is supported. */
355
356 ret = membarrier(MEMBARRIER_CMD_QUERY, 0);
357 if (ret < 0) {
358 perror("membarrier");
359 return \-1;
360 }
361
362 if (!(ret & MEMBARRIER_CMD_GLOBAL)) {
363 fprintf(stderr,
364 "membarrier does not support MEMBARRIER_CMD_GLOBAL\en");
365 return \-1;
366 }
367
368 return 0;
369 }
370
371 static void
372 fast_path(int *read_b)
373 {
374 a = 1;
375 asm volatile ("" : : : "memory");
376 *read_b = b;
377 }
378
379 static void
380 slow_path(int *read_a)
381 {
382 b = 1;
383 membarrier(MEMBARRIER_CMD_GLOBAL, 0);
384 *read_a = a;
385 }
386
387 int
388 main(int argc, char **argv)
389 {
390 int read_a, read_b;
391
392 if (init_membarrier())
393 exit(EXIT_FAILURE);
394
395 /*
396 * Real applications would call fast_path() and slow_path()
397 * from different threads. Call those from main() to keep
398 * this example short.
399 */
400
401 slow_path(&read_a);
402 fast_path(&read_b);
403
404 /*
405 * read_b == 0 implies read_a == 1 and
406 * read_a == 0 implies read_b == 1.
407 */
408
409 if (read_b == 0 && read_a == 0)
410 abort();
411
412 exit(EXIT_SUCCESS);
413 }
414 .EE
415 .in