]> git.ipfire.org Git - thirdparty/gcc.git/commitdiff
libphobos: core.atomic should have fallback when there's no libatomic.
authoribuclaw <ibuclaw@138bc75d-0d04-0410-961f-82ee72b054a4>
Sat, 20 Apr 2019 17:14:50 +0000 (17:14 +0000)
committeribuclaw <ibuclaw@138bc75d-0d04-0410-961f-82ee72b054a4>
Sat, 20 Apr 2019 17:14:50 +0000 (17:14 +0000)
libphobos/ChangeLog:

2019-04-20  Iain Buclaw  <ibuclaw@gdcproject.org>

PR d/89293
* libdruntime/core/atomic.d (casImpl): Remove static assert for
GNU_Have_Atomics, add static path to handle missing atomic support.
(atomicLoad): Likewise.
(atomicStore): Likewise.
(atomicFence):  Likewise.
(atomicMutexHandle, AtomicMutex): Declare types.
(_getAtomicMutex): New function.
(getAtomicMutex): Declare.

git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@270470 138bc75d-0d04-0410-961f-82ee72b054a4

libphobos/ChangeLog
libphobos/libdruntime/core/atomic.d

index 60d39c9bb77b8153b249b207e41e819dc392152f..4f158776e233a37600ee96065a77c7a93417b9b8 100644 (file)
@@ -1,3 +1,15 @@
+2019-04-20  Iain Buclaw  <ibuclaw@gdcproject.org>
+
+       PR d/89293
+       * libdruntime/core/atomic.d (casImpl): Remove static assert for
+       GNU_Have_Atomics, add static path to handle missing atomic support.
+       (atomicLoad): Likewise.
+       (atomicStore): Likewise.
+       (atomicFence):  Likewise.
+       (atomicMutexHandle, AtomicMutex): Declare types.
+       (_getAtomicMutex): New function.
+       (getAtomicMutex): Declare.
+
 2019-04-16  Iain Buclaw  <ibuclaw@gdcproject.org>
 
        * config.h.in: Regenerate.
index 0b39cddb6c941c94bf1984258c48f5e85d9cde89..1d0a2ea8b489bafb289d07914215e88a29be8f9e 100644 (file)
@@ -1353,36 +1353,62 @@ else version (GNU)
 
     private bool casImpl(T,V1,V2)( shared(T)* here, V1 ifThis, V2 writeThis ) pure nothrow @nogc @trusted
     {
-        static assert(GNU_Have_Atomics, "cas() not supported on this architecture");
         bool res = void;
 
-        static if (T.sizeof == byte.sizeof)
+        static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
         {
-            res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis,
-                                              false, MemoryOrder.seq, MemoryOrder.seq);
-        }
-        else static if (T.sizeof == short.sizeof)
-        {
-            res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis,
-                                              false, MemoryOrder.seq, MemoryOrder.seq);
-        }
-        else static if (T.sizeof == int.sizeof)
-        {
-            res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis,
-                                              false, MemoryOrder.seq, MemoryOrder.seq);
-        }
-        else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
-        {
-            res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis,
-                                              false, MemoryOrder.seq, MemoryOrder.seq);
+            static if (T.sizeof == byte.sizeof)
+            {
+                res = __atomic_compare_exchange_1(here, cast(void*) &ifThis, *cast(ubyte*) &writeThis,
+                                                  false, MemoryOrder.seq, MemoryOrder.seq);
+            }
+            else static if (T.sizeof == short.sizeof)
+            {
+                res = __atomic_compare_exchange_2(here, cast(void*) &ifThis, *cast(ushort*) &writeThis,
+                                                  false, MemoryOrder.seq, MemoryOrder.seq);
+            }
+            else static if (T.sizeof == int.sizeof)
+            {
+                res = __atomic_compare_exchange_4(here, cast(void*) &ifThis, *cast(uint*) &writeThis,
+                                                  false, MemoryOrder.seq, MemoryOrder.seq);
+            }
+            else static if (T.sizeof == long.sizeof && GNU_Have_64Bit_Atomics)
+            {
+                res = __atomic_compare_exchange_8(here, cast(void*) &ifThis, *cast(ulong*) &writeThis,
+                                                  false, MemoryOrder.seq, MemoryOrder.seq);
+            }
+            else static if (GNU_Have_LibAtomic)
+            {
+                res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis,
+                                                MemoryOrder.seq, MemoryOrder.seq);
+            }
+            else
+                static assert(0, "Invalid template type specified.");
         }
-        else static if (GNU_Have_LibAtomic)
+        else
         {
-            res = __atomic_compare_exchange(T.sizeof, here, cast(void*) &ifThis, cast(void*) &writeThis,
-                                            MemoryOrder.seq, MemoryOrder.seq);
+            static if (T.sizeof == byte.sizeof)
+                alias U = byte;
+            else static if (T.sizeof == short.sizeof)
+                alias U = short;
+            else static if (T.sizeof == int.sizeof)
+                alias U = int;
+            else static if (T.sizeof == long.sizeof)
+                alias U = long;
+            else
+                static assert(0, "Invalid template type specified.");
+
+            getAtomicMutex.lock();
+            scope(exit) getAtomicMutex.unlock();
+
+            if (*cast(U*)here == *cast(U*)&ifThis)
+            {
+                *here = writeThis;
+                res = true;
+            }
+            else
+                res = false;
         }
-        else
-            static assert(0, "Invalid template type specified.");
 
         return res;
     }
@@ -1406,36 +1432,44 @@ else version (GNU)
     {
         static assert(ms != MemoryOrder.rel, "Invalid MemoryOrder for atomicLoad");
         static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");
-        static assert(GNU_Have_Atomics, "atomicLoad() not supported on this architecture");
 
-        static if (T.sizeof == ubyte.sizeof)
+        static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
         {
-            ubyte value = __atomic_load_1(&val, ms);
-            return *cast(HeadUnshared!T*) &value;
-        }
-        else static if (T.sizeof == ushort.sizeof)
-        {
-            ushort value = __atomic_load_2(&val, ms);
-            return *cast(HeadUnshared!T*) &value;
-        }
-        else static if (T.sizeof == uint.sizeof)
-        {
-            uint value = __atomic_load_4(&val, ms);
-            return *cast(HeadUnshared!T*) &value;
-        }
-        else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
-        {
-            ulong value = __atomic_load_8(&val, ms);
-            return *cast(HeadUnshared!T*) &value;
+            static if (T.sizeof == ubyte.sizeof)
+            {
+                ubyte value = __atomic_load_1(&val, ms);
+                return *cast(HeadUnshared!T*) &value;
+            }
+            else static if (T.sizeof == ushort.sizeof)
+            {
+                ushort value = __atomic_load_2(&val, ms);
+                return *cast(HeadUnshared!T*) &value;
+            }
+            else static if (T.sizeof == uint.sizeof)
+            {
+                uint value = __atomic_load_4(&val, ms);
+                return *cast(HeadUnshared!T*) &value;
+            }
+            else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
+            {
+                ulong value = __atomic_load_8(&val, ms);
+                return *cast(HeadUnshared!T*) &value;
+            }
+            else static if (GNU_Have_LibAtomic)
+            {
+                T value;
+                __atomic_load(T.sizeof, &val, cast(void*)&value, ms);
+                return *cast(HeadUnshared!T*) &value;
+            }
+            else
+                static assert(0, "Invalid template type specified.");
         }
-        else static if (GNU_Have_LibAtomic)
+        else
         {
-            T value;
-            __atomic_load(T.sizeof, &val, cast(void*)&value, ms);
-            return *cast(HeadUnshared!T*) &value;
+            getAtomicMutex.lock();
+            scope(exit) getAtomicMutex.unlock();
+            return *cast(HeadUnshared!T*)&val;
         }
-        else
-            static assert(0, "Invalid template type specified.");
     }
 
 
@@ -1444,36 +1478,138 @@ else version (GNU)
     {
         static assert(ms != MemoryOrder.acq, "Invalid MemoryOrder for atomicStore");
         static assert(__traits(isPOD, T), "argument to atomicLoad() must be POD");
-        static assert(GNU_Have_Atomics, "atomicStore() not supported on this architecture");
 
-        static if (T.sizeof == ubyte.sizeof)
+        static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
         {
-            __atomic_store_1(&val, *cast(ubyte*) &newval, ms);
+            static if (T.sizeof == ubyte.sizeof)
+            {
+                __atomic_store_1(&val, *cast(ubyte*) &newval, ms);
+            }
+            else static if (T.sizeof == ushort.sizeof)
+            {
+                __atomic_store_2(&val, *cast(ushort*) &newval, ms);
+            }
+            else static if (T.sizeof == uint.sizeof)
+            {
+                __atomic_store_4(&val, *cast(uint*) &newval, ms);
+            }
+            else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
+            {
+                __atomic_store_8(&val, *cast(ulong*) &newval, ms);
+            }
+            else static if (GNU_Have_LibAtomic)
+            {
+                __atomic_store(T.sizeof, &val, cast(void*)&newval, ms);
+            }
+            else
+                static assert(0, "Invalid template type specified.");
         }
-        else static if (T.sizeof == ushort.sizeof)
+        else
         {
-            __atomic_store_2(&val, *cast(ushort*) &newval, ms);
+            getAtomicMutex.lock();
+            val = newval;
+            getAtomicMutex.unlock();
         }
-        else static if (T.sizeof == uint.sizeof)
+    }
+
+
+    void atomicFence() nothrow @nogc
+    {
+        static if (GNU_Have_Atomics || GNU_Have_LibAtomic)
+            __atomic_thread_fence(MemoryOrder.seq);
+        else
         {
-            __atomic_store_4(&val, *cast(uint*) &newval, ms);
+            getAtomicMutex.lock();
+            getAtomicMutex.unlock();
         }
-        else static if (T.sizeof == ulong.sizeof && GNU_Have_64Bit_Atomics)
+    }
+
+    static if (!GNU_Have_Atomics && !GNU_Have_LibAtomic)
+    {
+        // Use system mutex for atomics, faking the purity of the functions so
+        // that they can be used in pure/nothrow/@safe code.
+        extern (C) private pure @trusted @nogc nothrow
         {
-            __atomic_store_8(&val, *cast(ulong*) &newval, ms);
+            static if (GNU_Thread_Model == ThreadModel.Posix)
+            {
+                import core.sys.posix.pthread;
+                alias atomicMutexHandle = pthread_mutex_t;
+
+                pragma(mangle, "pthread_mutex_init") int fakePureMutexInit(pthread_mutex_t*, pthread_mutexattr_t*);
+                pragma(mangle, "pthread_mutex_lock") int fakePureMutexLock(pthread_mutex_t*);
+                pragma(mangle, "pthread_mutex_unlock") int fakePureMutexUnlock(pthread_mutex_t*);
+            }
+            else static if (GNU_Thread_Model == ThreadModel.Win32)
+            {
+                import core.sys.windows.winbase;
+                alias atomicMutexHandle = CRITICAL_SECTION;
+
+                pragma(mangle, "InitializeCriticalSection") int fakePureMutexInit(CRITICAL_SECTION*);
+                pragma(mangle, "EnterCriticalSection") void fakePureMutexLock(CRITICAL_SECTION*);
+                pragma(mangle, "LeaveCriticalSection") int fakePureMutexUnlock(CRITICAL_SECTION*);
+            }
+            else
+            {
+                alias atomicMutexHandle = int;
+            }
         }
-        else static if (GNU_Have_LibAtomic)
+
+        // Implements lock/unlock operations.
+        private struct AtomicMutex
         {
-            __atomic_store(T.sizeof, &val, cast(void*)&newval, ms);
+            int lock() pure @trusted @nogc nothrow
+            {
+                static if (GNU_Thread_Model == ThreadModel.Posix)
+                {
+                    if (!_inited)
+                    {
+                        fakePureMutexInit(&_handle, null);
+                        _inited = true;
+                    }
+                    return fakePureMutexLock(&_handle);
+                }
+                else
+                {
+                    static if (GNU_Thread_Model == ThreadModel.Win32)
+                    {
+                        if (!_inited)
+                        {
+                            fakePureMutexInit(&_handle);
+                            _inited = true;
+                        }
+                        fakePureMutexLock(&_handle);
+                    }
+                    return 0;
+                }
+            }
+
+            int unlock() pure @trusted @nogc nothrow
+            {
+                static if (GNU_Thread_Model == ThreadModel.Posix)
+                    return fakePureMutexUnlock(&_handle);
+                else
+                {
+                    static if (GNU_Thread_Model == ThreadModel.Win32)
+                        fakePureMutexUnlock(&_handle);
+                    return 0;
+                }
+            }
+
+        private:
+            atomicMutexHandle _handle;
+            bool _inited;
         }
-        else
-            static assert(0, "Invalid template type specified.");
-    }
 
+        // Internal static mutex reference.
+        private AtomicMutex* _getAtomicMutex() @trusted @nogc nothrow
+        {
+            __gshared static AtomicMutex mutex;
+            return &mutex;
+        }
 
-    void atomicFence() nothrow @nogc
-    {
-        __atomic_thread_fence(MemoryOrder.seq);
+        // Pure alias for _getAtomicMutex.
+        pragma(mangle, _getAtomicMutex.mangleof)
+        private AtomicMutex* getAtomicMutex() pure @trusted @nogc nothrow @property;
     }
 }