]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: various: Use __ha_barrier_atomic* when relevant.
authorOlivier Houchard <ohouchard@haproxy.com>
Fri, 8 Mar 2019 12:47:21 +0000 (13:47 +0100)
committerOlivier Houchard <cognet@ci0.org>
Mon, 11 Mar 2019 16:02:37 +0000 (17:02 +0100)
When protecting data modified by atomic operations, use __ha_barrier_atomic*
to avoid unneeded barriers on x86.

include/common/mini-clist.h
include/common/xref.h
src/fd.c
src/task.c

index 62a62d78b5d32a3740fcebaacd0485809ae57ea9..074176a2088a6cde3ad7702ee5afd041ebec2a2c 100644 (file)
@@ -192,7 +192,7 @@ struct cond_wordlist {
                        n = HA_ATOMIC_XCHG(&(lh)->n, LLIST_BUSY);          \
                        if (n == LLIST_BUSY)                               \
                                continue;                                  \
-                       __ha_barrier_store();                              \
+                       __ha_barrier_atomic_store();                       \
                        p = HA_ATOMIC_XCHG(&n->p, LLIST_BUSY);             \
                        if (p == LLIST_BUSY) {                             \
                                (lh)->n = n;                               \
index a6291f52f5badfb53fa0afe2b46b21f53fc6a7d2..48bc07a92f4cd7bec6e22e80a443a7e51aab84de 100644 (file)
@@ -32,7 +32,7 @@ static inline struct xref *xref_get_peer_and_lock(struct xref *xref)
 
                /* Get the local pointer to the peer. */
                local = HA_ATOMIC_XCHG(&xref->peer, XREF_BUSY);
-               __ha_barrier_store();
+               __ha_barrier_atomic_store();
 
                /* If the local pointer is NULL, the peer no longer exists. */
                if (local == NULL) {
index 581c5aaae8b7c2c5cd292be9b991208f3bec95f8..cb9df1f6b379d112b769f327bbcfd02efd4e0dbe 100644 (file)
--- a/src/fd.c
+++ b/src/fd.c
@@ -203,7 +203,7 @@ redo_next:
                goto done;
        if (!HA_ATOMIC_CAS(&_GET_NEXT(fd, off), &next, -2))
                goto redo_next;
-       __ha_barrier_store();
+       __ha_barrier_atomic_store();
 
        new = fd;
 redo_last:
@@ -292,7 +292,7 @@ lock_self_prev:
        if (unlikely(!HA_ATOMIC_CAS(&_GET_PREV(fd, off), &prev, -2)))
                goto lock_self_prev;
 #endif
-       __ha_barrier_store();
+       __ha_barrier_atomic_store();
 
        /* Now, lock the entries of our neighbours */
        if (likely(prev != -1)) {
index 826e2124b3f6f46a22637f7dc3ce6787bdab0133..d7c3e059a969bca96420d9a944e1160d288bba27 100644 (file)
@@ -122,7 +122,7 @@ redo:
 #ifdef USE_THREAD
        if (root == &rqueue) {
                HA_ATOMIC_OR(&global_tasks_mask, t->thread_mask);
-               __ha_barrier_store();
+               __ha_barrier_atomic_store();
        }
 #endif
        old_active_mask = active_tasks_mask;
@@ -401,7 +401,7 @@ void process_runnable_tasks()
        }
        if (!(global_tasks_mask & tid_bit) && task_per_thread[tid].rqueue_size == 0) {
                HA_ATOMIC_AND(&active_tasks_mask, ~tid_bit);
-               __ha_barrier_load();
+               __ha_barrier_atomic_load();
                if (global_tasks_mask & tid_bit)
                        HA_ATOMIC_OR(&active_tasks_mask, tid_bit);
        }
@@ -413,7 +413,7 @@ void process_runnable_tasks()
 
                t = (struct task *)LIST_ELEM(task_per_thread[tid].task_list.n, struct tasklet *, list);
                state = HA_ATOMIC_XCHG(&t->state, TASK_RUNNING);
-               __ha_barrier_store();
+               __ha_barrier_atomic_store();
                task_remove_from_task_list(t);
 
                ctx = t->context;