]> git.ipfire.org Git - thirdparty/gcc.git/blame - libgomp/critical.c
PR libstdc++/90920 restore previous checks for empty ranges
[thirdparty/gcc.git] / libgomp / critical.c
CommitLineData
fbd26352 1/* Copyright (C) 2005-2019 Free Software Foundation, Inc.
1e8e9920 2 Contributed by Richard Henderson <rth@redhat.com>.
3
c35c9a62 4 This file is part of the GNU Offloading and Multi Processing Library
5 (libgomp).
1e8e9920 6
7 Libgomp is free software; you can redistribute it and/or modify it
6bc9506f 8 under the terms of the GNU General Public License as published by
9 the Free Software Foundation; either version 3, or (at your option)
10 any later version.
1e8e9920 11
12 Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
13 WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
6bc9506f 14 FOR A PARTICULAR PURPOSE. See the GNU General Public License for
1e8e9920 15 more details.
16
6bc9506f 17 Under Section 7 of GPL version 3, you are granted additional
18 permissions described in the GCC Runtime Library Exception, version
19 3.1, as published by the Free Software Foundation.
20
21 You should have received a copy of the GNU General Public License and
22 a copy of the GCC Runtime Library Exception along with this program;
23 see the files COPYING3 and COPYING.RUNTIME respectively. If not, see
24 <http://www.gnu.org/licenses/>. */
1e8e9920 25
26/* This file handles the CRITICAL construct. */
27
28#include "libgomp.h"
29#include <stdlib.h>
30
31
32static gomp_mutex_t default_lock;
33
34void
35GOMP_critical_start (void)
36{
a28ddb67 37 /* There is an implicit flush on entry to a critical region. */
38 __atomic_thread_fence (MEMMODEL_RELEASE);
1e8e9920 39 gomp_mutex_lock (&default_lock);
40}
41
42void
43GOMP_critical_end (void)
44{
45 gomp_mutex_unlock (&default_lock);
46}
47
48#ifndef HAVE_SYNC_BUILTINS
49static gomp_mutex_t create_lock_lock;
50#endif
51
52void
53GOMP_critical_name_start (void **pptr)
54{
55 gomp_mutex_t *plock;
56
57 /* If a mutex fits within the space for a pointer, and is zero initialized,
58 then use the pointer space directly. */
59 if (GOMP_MUTEX_INIT_0
60 && sizeof (gomp_mutex_t) <= sizeof (void *)
61 && __alignof (gomp_mutex_t) <= sizeof (void *))
62 plock = (gomp_mutex_t *)pptr;
63
64 /* Otherwise we have to be prepared to malloc storage. */
65 else
66 {
67 plock = *pptr;
68
69 if (plock == NULL)
70 {
71#ifdef HAVE_SYNC_BUILTINS
72 gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
73 gomp_mutex_init (nlock);
74
f1e89bc3 75 plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
76 if (plock != NULL)
1e8e9920 77 {
78 gomp_mutex_destroy (nlock);
79 free (nlock);
80 }
f1e89bc3 81 else
82 plock = nlock;
1e8e9920 83#else
84 gomp_mutex_lock (&create_lock_lock);
85 plock = *pptr;
86 if (plock == NULL)
87 {
88 plock = gomp_malloc (sizeof (gomp_mutex_t));
89 gomp_mutex_init (plock);
90 __sync_synchronize ();
91 *pptr = plock;
92 }
93 gomp_mutex_unlock (&create_lock_lock);
94#endif
95 }
96 }
97
98 gomp_mutex_lock (plock);
99}
100
101void
102GOMP_critical_name_end (void **pptr)
103{
104 gomp_mutex_t *plock;
105
106 /* If a mutex fits within the space for a pointer, and is zero initialized,
107 then use the pointer space directly. */
108 if (GOMP_MUTEX_INIT_0
109 && sizeof (gomp_mutex_t) <= sizeof (void *)
110 && __alignof (gomp_mutex_t) <= sizeof (void *))
111 plock = (gomp_mutex_t *)pptr;
112 else
113 plock = *pptr;
114
115 gomp_mutex_unlock (plock);
116}
117
1e8e9920 118#if !GOMP_MUTEX_INIT_0
119static void __attribute__((constructor))
120initialize_critical (void)
121{
122 gomp_mutex_init (&default_lock);
1e8e9920 123#ifndef HAVE_SYNC_BUILTINS
124 gomp_mutex_init (&create_lock_lock);
125#endif
126}
127#endif