]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
CLEANUP: ring: use only curr_cell and not next_cell in the main write loop
authorWilly Tarreau <w@1wt.eu>
Sun, 17 Mar 2024 15:54:36 +0000 (16:54 +0100)
committerWilly Tarreau <w@1wt.eu>
Mon, 25 Mar 2024 17:34:19 +0000 (17:34 +0000)
It turns out that we can reduce by one variable in the loop and this
clobbers one less register, making it slightly faster on Cortex A72.

src/ring.c

index 8d46679cca65537f3fccfa4807c5f32fe9d392f5..1b9cd8d8e6882e322886b1b7905fc3332cea3a11 100644 (file)
@@ -266,8 +266,6 @@ ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], siz
         * comes in and becomes the leader in turn.
         */
 
-       next_cell = &cell;
-
        /* Wait for another thread to take the lead or for the tail to
         * be available again. It's critical to be read-only in this
         * loop so as not to lose time synchronizing cache lines. Also,
@@ -276,7 +274,7 @@ ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], siz
         */
 
        while (1) {
-               if ((next_cell = HA_ATOMIC_LOAD(ring_queue_ptr)) != &cell)
+               if ((curr_cell = HA_ATOMIC_LOAD(ring_queue_ptr)) != &cell)
                        goto wait_for_flush;
                __ha_cpu_relax_for_read();
 
@@ -296,7 +294,6 @@ ssize_t ring_write(struct ring *ring, size_t maxlen, const struct ist pfx[], siz
                         * which we'll confirm by trying to reset the queue. If we're
                         * still the leader, we're done.
                         */
-                       curr_cell = &cell;
                        if (HA_ATOMIC_CAS(ring_queue_ptr, &curr_cell, NULL))
                                break; // Won!