]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: fwlc: Make it so fwlc_srv_reposition works with unqueued srv
authorOlivier Houchard <ohouchard@haproxy.com>
Wed, 1 Oct 2025 15:59:37 +0000 (17:59 +0200)
committerOlivier Houchard <cognet@ci0.org>
Wed, 1 Oct 2025 16:13:33 +0000 (18:13 +0200)
Modify fwlc_srv_reposition() so that it does not assume that the server
was already queued, and so make it so it works even if s->tree_elt is
NULL.
While the server will usually be queued, there is an unlikely
possibility that when the server attempted to get queued when it got up,
it failed due to a memory allocation failure, and it just expect the
server_requeue tasklet to run to take care of that later.

This should be backported to 3.2.
This is part of an attempt to fix github issue #3143

src/lb_fwlc.c

index aa9d679e07f02caf7b1e9cfbe529266ffa9b6cf1..0807459104a42ba596cbd99e90a0194972f18f3c 100644 (file)
@@ -329,15 +329,6 @@ static void fwlc_srv_reposition(struct server *s)
                return;
        }
 
-       /*
-        * We're not in the tree, the server is probably down, don't
-        * do anything.
-        */
-       if (unlikely(!s->tree_elt)) {
-               HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
-               _HA_ATOMIC_STORE(&s->lb_lock, 0);
-               return;
-       }
        node = eb32_lookup(s->lb_tree, new_key);
        if (node)
                tree_elt = container_of(node, struct fwlc_tree_elt, lb_node);
@@ -347,7 +338,7 @@ static void fwlc_srv_reposition(struct server *s)
                 * check again now that we acquired it, and if we're using
                 * the right element, do nothing.
                 */
-       if (tree_elt == s->tree_elt) {
+       if (s->tree_elt && tree_elt == s->tree_elt) {
                HA_RWLOCK_RDUNLOCK(LBPRM_LOCK, &s->proxy->lbprm.lock);
                _HA_ATOMIC_STORE(&s->lb_lock, 0);
                fwlc_check_srv_key(s, new_key);
@@ -470,26 +461,29 @@ static void fwlc_srv_reposition(struct server *s)
 
        __ha_barrier_store();
 
-       _HA_ATOMIC_DEC(&s->tree_elt->elements);
+       if (likely(s->tree_elt)) {
+               _HA_ATOMIC_DEC(&s->tree_elt->elements);
 
-       /*
-        * Now lock the existing element, and its target list.
-        * To prevent a deadlock, we always lock the one
-        * with the lowest key first.
-        */
-       if (new_key < s->tree_elt->lb_node.key) {
-               to_unlock = mt_list_lock_full(&s->lb_mt_list);
-               list = fwlc_lock_target_list(tree_elt);
-       } else {
-               list = fwlc_lock_target_list(tree_elt);
-               to_unlock = mt_list_lock_full(&s->lb_mt_list);
-       }
+               /*
+                * Now lock the existing element, and its target list.
+                * To prevent a deadlock, we always lock the one
+                * with the lowest key first.
+                */
+               if (new_key < s->tree_elt->lb_node.key) {
+                       to_unlock = mt_list_lock_full(&s->lb_mt_list);
+                       list = fwlc_lock_target_list(tree_elt);
+               } else {
+                       list = fwlc_lock_target_list(tree_elt);
+                       to_unlock = mt_list_lock_full(&s->lb_mt_list);
+               }
 
-       /*
-        * Unlock the old list, the element is now
-        * no longer in it.
-        */
-       mt_list_unlock_link(to_unlock);
+               /*
+                * Unlock the old list, the element is now
+                * no longer in it.
+                */
+               mt_list_unlock_link(to_unlock);
+       } else
+               list = fwlc_lock_target_list(tree_elt);
 
        /*
         * Add the element to the new list, and unlock it.