]> git.ipfire.org Git - thirdparty/man-pages.git/blob - man2/membarrier.2
All pages: Remove the 5th argument to .TH
[thirdparty/man-pages.git] / man2 / membarrier.2
1 .\" Copyright 2015-2017 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
2 .\"
3 .\" SPDX-License-Identifier: Linux-man-pages-copyleft
4 .\"
5 .TH MEMBARRIER 2 2021-08-27 "Linux man-pages (unreleased)"
6 .SH NAME
7 membarrier \- issue memory barriers on a set of threads
8 .SH LIBRARY
9 Standard C library
10 .RI ( libc ", " \-lc )
11 .SH SYNOPSIS
12 .nf
13 .PP
14 .BR "#include <linux/membarrier.h>" \
15 " /* Definition of " MEMBARRIER_* " constants */"
16 .BR "#include <sys/syscall.h>" " /* Definition of " SYS_* " constants */"
17 .B #include <unistd.h>
18 .PP
19 .BI "int syscall(SYS_membarrier, int " cmd ", unsigned int " flags \
20 ", int " cpu_id );
21 .fi
22 .PP
23 .IR Note :
24 glibc provides no wrapper for
25 .BR membarrier (),
26 necessitating the use of
27 .BR syscall (2).
28 .SH DESCRIPTION
29 The
30 .BR membarrier ()
31 system call helps reducing the overhead of the memory barrier
32 instructions required to order memory accesses on multi-core systems.
33 However, this system call is heavier than a memory barrier, so using it
34 effectively is
35 .I not
36 as simple as replacing memory barriers with this
37 system call, but requires understanding of the details below.
38 .PP
39 Use of memory barriers needs to be done taking into account that a
40 memory barrier always needs to be either matched with its memory barrier
41 counterparts, or that the architecture's memory model doesn't require the
42 matching barriers.
43 .PP
44 There are cases where one side of the matching barriers (which we will
45 refer to as "fast side") is executed much more often than the other
46 (which we will refer to as "slow side").
47 This is a prime target for the use of
48 .BR membarrier ().
49 The key idea is to replace, for these matching
50 barriers, the fast-side memory barriers by simple compiler barriers,
51 for example:
52 .PP
53 .in +4n
54 .EX
55 asm volatile ("" : : : "memory")
56 .EE
57 .in
58 .PP
59 and replace the slow-side memory barriers by calls to
60 .BR membarrier ().
61 .PP
62 This will add overhead to the slow side, and remove overhead from the
63 fast side, thus resulting in an overall performance increase as long as
64 the slow side is infrequent enough that the overhead of the
65 .BR membarrier ()
66 calls does not outweigh the performance gain on the fast side.
67 .PP
68 The
69 .I cmd
70 argument is one of the following:
71 .TP
72 .BR MEMBARRIER_CMD_QUERY " (since Linux 4.3)"
73 Query the set of supported commands.
74 The return value of the call is a bit mask of supported
75 commands.
76 .BR MEMBARRIER_CMD_QUERY ,
77 which has the value 0,
78 is not itself included in this bit mask.
79 This command is always supported (on kernels where
80 .BR membarrier ()
81 is provided).
82 .TP
83 .BR MEMBARRIER_CMD_GLOBAL " (since Linux 4.16)"
84 Ensure that all threads from all processes on the system pass through a
85 state where all memory accesses to user-space addresses match program
86 order between entry to and return from the
87 .BR membarrier ()
88 system call.
89 All threads on the system are targeted by this command.
90 .TP
91 .BR MEMBARRIER_CMD_GLOBAL_EXPEDITED " (since Linux 4.16)"
92 Execute a memory barrier on all running threads of all processes that
93 previously registered with
94 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED .
95 .IP
96 Upon return from the system call, the calling thread has a guarantee that all
97 running threads have passed through a state where all memory accesses to
98 user-space addresses match program order between entry to and return
99 from the system call (non-running threads are de facto in such a state).
100 This guarantee is provided only for the threads of processes that
101 previously registered with
102 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED .
103 .IP
104 Given that registration is about the intent to receive the barriers, it
105 is valid to invoke
106 .B MEMBARRIER_CMD_GLOBAL_EXPEDITED
107 from a process that has not employed
108 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED .
109 .IP
110 The "expedited" commands complete faster than the non-expedited ones;
111 they never block, but have the downside of causing extra overhead.
112 .TP
113 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED " (since Linux 4.16)"
114 Register the process's intent to receive
115 .B MEMBARRIER_CMD_GLOBAL_EXPEDITED
116 memory barriers.
117 .TP
118 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED " (since Linux 4.14)"
119 Execute a memory barrier on each running thread belonging to the same
120 process as the calling thread.
121 .IP
122 Upon return from the system call, the calling
123 thread has a guarantee that all its running thread siblings have passed
124 through a state where all memory accesses to user-space addresses match
125 program order between entry to and return from the system call
126 (non-running threads are de facto in such a state).
127 This guarantee is provided only for threads in
128 the same process as the calling thread.
129 .IP
130 The "expedited" commands complete faster than the non-expedited ones;
131 they never block, but have the downside of causing extra overhead.
132 .IP
133 A process must register its intent to use the private
134 expedited command prior to using it.
135 .TP
136 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED " (since Linux 4.14)"
137 Register the process's intent to use
138 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED .
139 .TP
140 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE " (since Linux 4.16)"
141 In addition to providing the memory ordering guarantees described in
142 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED ,
143 upon return from system call the calling thread has a guarantee that all its
144 running thread siblings have executed a core serializing instruction.
145 This guarantee is provided only for threads in
146 the same process as the calling thread.
147 .IP
148 The "expedited" commands complete faster than the non-expedited ones,
149 they never block, but have the downside of causing extra overhead.
150 .IP
151 A process must register its intent to use the private expedited sync
152 core command prior to using it.
153 .TP
154 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE " (since Linux 4.16)"
155 Register the process's intent to use
156 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE .
157 .TP
158 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ " (since Linux 5.10)"
159 Ensure the caller thread, upon return from system call, that all its
160 running thread siblings have any currently running rseq critical sections
161 restarted if
162 .I flags
163 parameter is 0; if
164 .I flags
165 parameter is
166 .BR MEMBARRIER_CMD_FLAG_CPU ,
167 then this operation is performed only on CPU indicated by
168 .IR cpu_id .
169 This guarantee is provided only for threads in
170 the same process as the calling thread.
171 .IP
172 RSEQ membarrier is only available in the "private expedited" form.
173 .IP
174 A process must register its intent to use the private expedited rseq
175 command prior to using it.
176 .TP
177 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_RSEQ " (since Linux 5.10)"
178 Register the process's intent to use
179 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ .
180 .TP
181 .BR MEMBARRIER_CMD_SHARED " (since Linux 4.3)"
182 This is an alias for
183 .B MEMBARRIER_CMD_GLOBAL
184 that exists for header backward compatibility.
185 .PP
186 The
187 .I flags
188 argument must be specified as 0 unless the command is
189 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_RSEQ ,
190 in which case
191 .I flags
192 can be either 0 or
193 .BR MEMBARRIER_CMD_FLAG_CPU .
194 .PP
195 The
196 .I cpu_id
197 argument is ignored unless
198 .I flags
199 is
200 .BR MEMBARRIER_CMD_FLAG_CPU ,
201 in which case it must specify the CPU targeted by this membarrier
202 command.
203 .PP
204 All memory accesses performed in program order from each targeted thread
205 are guaranteed to be ordered with respect to
206 .BR membarrier ().
207 .PP
208 If we use the semantic
209 .I barrier()
210 to represent a compiler barrier forcing memory
211 accesses to be performed in program order across the barrier, and
212 .I smp_mb()
213 to represent explicit memory barriers forcing full memory
214 ordering across the barrier, we have the following ordering table for
215 each pairing of
216 .IR barrier() ,
217 .BR membarrier (),
218 and
219 .IR smp_mb() .
220 The pair ordering is detailed as (O: ordered, X: not ordered):
221 .PP
222 .RS
223 .TS
224 l c c c.
225 \& barrier() smp_mb() membarrier()
226 barrier() X X O
227 smp_mb() X O O
228 membarrier() O O O
229 .TE
230 .RE
231 .SH RETURN VALUE
232 On success, the
233 .B MEMBARRIER_CMD_QUERY
234 operation returns a bit mask of supported commands, and the
235 .BR MEMBARRIER_CMD_GLOBAL ,
236 .BR MEMBARRIER_CMD_GLOBAL_EXPEDITED ,
237 .BR MEMBARRIER_CMD_REGISTER_GLOBAL_EXPEDITED ,
238 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED ,
239 .BR MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED ,
240 .BR MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE ,
241 and
242 .B MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
243 operations return zero.
244 On error, \-1 is returned,
245 and
246 .I errno
247 is set to indicate the error.
248 .PP
249 For a given command, with
250 .I flags
251 set to 0, this system call is
252 guaranteed to always return the same value until reboot.
253 Further calls with the same arguments will lead to the same result.
254 Therefore, with
255 .I flags
256 set to 0, error handling is required only for the first call to
257 .BR membarrier ().
258 .SH ERRORS
259 .TP
260 .B EINVAL
261 .I cmd
262 is invalid, or
263 .I flags
264 is nonzero, or the
265 .B MEMBARRIER_CMD_GLOBAL
266 command is disabled because the
267 .I nohz_full
268 CPU parameter has been set, or the
269 .B MEMBARRIER_CMD_PRIVATE_EXPEDITED_SYNC_CORE
270 and
271 .B MEMBARRIER_CMD_REGISTER_PRIVATE_EXPEDITED_SYNC_CORE
272 commands are not implemented by the architecture.
273 .TP
274 .B ENOSYS
275 The
276 .BR membarrier ()
277 system call is not implemented by this kernel.
278 .TP
279 .B EPERM
280 The current process was not registered prior to using private expedited
281 commands.
282 .SH VERSIONS
283 The
284 .BR membarrier ()
285 system call was added in Linux 4.3.
286 .PP
287 Before Linux 5.10, the prototype for
288 .BR membarrier ()
289 was:
290 .PP
291 .in +4n
292 .EX
293 .BI "int membarrier(int " cmd ", int " flags );
294 .EE
295 .in
296 .SH STANDARDS
297 .BR membarrier ()
298 is Linux-specific.
299 .\" .SH SEE ALSO
300 .\" FIXME See if the following syscalls make it into Linux 4.15 or later
301 .\" .BR cpu_opv (2),
302 .\" .BR rseq (2)
303 .SH NOTES
304 A memory barrier instruction is part of the instruction set of
305 architectures with weakly ordered memory models.
306 It orders memory
307 accesses prior to the barrier and after the barrier with respect to
308 matching barriers on other cores.
309 For instance, a load fence can order
310 loads prior to and following that fence with respect to stores ordered
311 by store fences.
312 .PP
313 Program order is the order in which instructions are ordered in the
314 program assembly code.
315 .PP
316 Examples where
317 .BR membarrier ()
318 can be useful include implementations
319 of Read-Copy-Update libraries and garbage collectors.
320 .SH EXAMPLES
321 Assuming a multithreaded application where "fast_path()" is executed
322 very frequently, and where "slow_path()" is executed infrequently, the
323 following code (x86) can be transformed using
324 .BR membarrier ():
325 .PP
326 .in +4n
327 .\" SRC BEGIN (membarrier.c)
328 .EX
329 #include <stdlib.h>
330
331 static volatile int a, b;
332
333 static void
334 fast_path(int *read_b)
335 {
336 a = 1;
337 asm volatile ("mfence" : : : "memory");
338 *read_b = b;
339 }
340
341 static void
342 slow_path(int *read_a)
343 {
344 b = 1;
345 asm volatile ("mfence" : : : "memory");
346 *read_a = a;
347 }
348
349 int
350 main(void)
351 {
352 int read_a, read_b;
353
354 /*
355 * Real applications would call fast_path() and slow_path()
356 * from different threads. Call those from main() to keep
357 * this example short.
358 */
359
360 slow_path(&read_a);
361 fast_path(&read_b);
362
363 /*
364 * read_b == 0 implies read_a == 1 and
365 * read_a == 0 implies read_b == 1.
366 */
367
368 if (read_b == 0 && read_a == 0)
369 abort();
370
371 exit(EXIT_SUCCESS);
372 }
373 .EE
374 .\" SRC END
375 .in
376 .PP
377 The code above transformed to use
378 .BR membarrier ()
379 becomes:
380 .PP
381 .in +4n
382 .EX
383 #define _GNU_SOURCE
384 #include <stdlib.h>
385 #include <stdio.h>
386 #include <unistd.h>
387 #include <sys/syscall.h>
388 #include <linux/membarrier.h>
389
390 static volatile int a, b;
391
392 static int
393 membarrier(int cmd, unsigned int flags, int cpu_id)
394 {
395 return syscall(__NR_membarrier, cmd, flags, cpu_id);
396 }
397
398 static int
399 init_membarrier(void)
400 {
401 int ret;
402
403 /* Check that membarrier() is supported. */
404
405 ret = membarrier(MEMBARRIER_CMD_QUERY, 0, 0);
406 if (ret < 0) {
407 perror("membarrier");
408 return \-1;
409 }
410
411 if (!(ret & MEMBARRIER_CMD_GLOBAL)) {
412 fprintf(stderr,
413 "membarrier does not support MEMBARRIER_CMD_GLOBAL\en");
414 return \-1;
415 }
416
417 return 0;
418 }
419
420 static void
421 fast_path(int *read_b)
422 {
423 a = 1;
424 asm volatile ("" : : : "memory");
425 *read_b = b;
426 }
427
428 static void
429 slow_path(int *read_a)
430 {
431 b = 1;
432 membarrier(MEMBARRIER_CMD_GLOBAL, 0, 0);
433 *read_a = a;
434 }
435
436 int
437 main(int argc, char *argv[])
438 {
439 int read_a, read_b;
440
441 if (init_membarrier())
442 exit(EXIT_FAILURE);
443
444 /*
445 * Real applications would call fast_path() and slow_path()
446 * from different threads. Call those from main() to keep
447 * this example short.
448 */
449
450 slow_path(&read_a);
451 fast_path(&read_b);
452
453 /*
454 * read_b == 0 implies read_a == 1 and
455 * read_a == 0 implies read_b == 1.
456 */
457
458 if (read_b == 0 && read_a == 0)
459 abort();
460
461 exit(EXIT_SUCCESS);
462 }
463 .EE
464 .in