]> git.ipfire.org Git - thirdparty/haproxy.git/commitdiff
MEDIUM: threads: automatically assign threads to groups
authorWilly Tarreau <w@1wt.eu>
Mon, 27 Sep 2021 08:10:26 +0000 (10:10 +0200)
committerWilly Tarreau <w@1wt.eu>
Fri, 8 Oct 2021 15:22:26 +0000 (17:22 +0200)
This takes care of unassigned threads groups and places unassigned
threads there, in a more or less balanced way. Too sparse allocations
may still fail though. For now with a maximum group number fixed to 1
nothing can really fail.

include/haproxy/thread.h
src/cfgparse.c
src/thread.c

index 14c88f0379a813fab4b20908a280f23edfd77f0f..c70108b5443390b961f307a0e0c4e4ed288f79c6 100644 (file)
@@ -43,6 +43,7 @@ int parse_nbthread(const char *arg, char **err);
 void ha_tkill(unsigned int thr, int sig);
 void ha_tkillall(int sig);
 void ha_thread_relax(void);
+int thread_map_to_groups();
 extern int thread_cpus_enabled_at_boot;
 
 
index fe498116fcf4163aa433fe78dfd53f5dea738d9c..f8e777e4cb4926850ec877cc9235461f81f42977 100644 (file)
@@ -2439,6 +2439,11 @@ int check_config_validity()
        if (!global.nbtgroups)
                global.nbtgroups = 1;
 
+       if (thread_map_to_groups() < 0) {
+               err_code |= ERR_ALERT | ERR_FATAL;
+               goto out;
+       }
+
        pool_head_requri = create_pool("requri", global.tune.requri_len , MEM_F_SHARED);
 
        pool_head_capture = create_pool("capture", global.tune.cookie_len, MEM_F_SHARED);
index 7374a8eede9044a3d662847cd4393d0422490fd7..2a7d3aff3d6126a05cbbd1f3699a14a53833b599 100644 (file)
@@ -1001,6 +1001,82 @@ REGISTER_BUILD_OPTS("Built without multi-threading support (USE_THREAD not set).
 #endif // USE_THREAD
 
 
+/* scans the configured thread mapping and establishes the final one. Returns <0
+ * on failure, >=0 on success.
+ */
+int thread_map_to_groups()
+{
+       int t, g, ut, ug;
+       int q, r;
+
+       ut = ug = 0; // unassigned threads & groups
+
+       for (t = 0; t < global.nbthread; t++) {
+               if (!ha_thread_info[t].tg)
+                       ut++;
+       }
+
+       for (g = 0; g < global.nbtgroups; g++) {
+               if (!ha_tgroup_info[g].count)
+                       ug++;
+       }
+
+       if (ug > ut) {
+               ha_alert("More unassigned thread-groups (%d) than threads (%d). Please reduce thread-groups\n", ug, ut);
+               return -1;
+       }
+
+       /* look for first unassigned thread */
+       for (t = 0; t < global.nbthread && ha_thread_info[t].tg; t++)
+               ;
+
+       /* assign threads to empty groups */
+       for (g = 0; ug && ut; ) {
+               /* due to sparse thread assignment we can end up with more threads
+                * per group on last assigned groups than former ones, so we must
+                * always try to pack the maximum remaining ones together first.
+                */
+               q = ut / ug;
+               r = ut % ug;
+               if ((q + !!r) > MAX_THREADS_PER_GROUP) {
+                       ha_alert("Too many remaining unassigned threads (%d) for thread groups (%d). Please increase thread-groups or make sure to keep thread numbers contiguous\n", ug, ut);
+                       return -1;
+               }
+
+               /* thread <t> is the next unassigned one. Let's look for next
+                * unassigned group, we know there are some left
+                */
+               while (ut >= ug && ha_tgroup_info[g].count)
+                       g++;
+
+               /* group g is unassigned, try to fill it with consecutive threads */
+               while (ut && ut >= ug && ha_tgroup_info[g].count < q + !!r &&
+                      (!ha_tgroup_info[g].count || t == ha_tgroup_info[g].base + ha_tgroup_info[g].count)) {
+
+                       if (!ha_tgroup_info[g].count) {
+                               /* assign new group */
+                               ha_tgroup_info[g].base = t;
+                               ug--;
+                       }
+
+                       ha_tgroup_info[g].count++;
+                       ha_thread_info[t].tg = &ha_tgroup_info[g];
+
+                       ut--;
+                       /* switch to next unassigned thread */
+                       while (++t < global.nbthread && ha_thread_info[t].tg)
+                               ;
+               }
+       }
+
+       if (ut) {
+               ha_alert("Remaining unassigned threads found (%d) because all groups are in use. Please increase 'thread-groups', reduce 'nbthreads' or remove or extend 'thread-group' enumerations.\n", ut);
+               return -1;
+       }
+
+       return 0;
+}
+
 /* Parse the "nbthread" global directive, which takes an integer argument that
  * contains the desired number of threads.
  */