#define CPU_SET_FL_NONE 0x0000
#define CPU_SET_FL_DO_RESET 0x0001
+/* cpu_policy_conf flags */
+#define CPU_POLICY_ONE_THREAD_PER_CORE (1 << 0)
+
/* CPU topology information, ha_cpuset_size() entries, allocated at boot */
int cpu_topo_maxcpus = -1; // max number of CPUs supported by OS/haproxy
int cpu_topo_lastcpu = -1; // last supposed online CPU (no need to look beyond)
/* CPU policy choice */
struct {
int cpu_policy;
+ int flags;
} cpu_policy_conf = {
- 1, /* "first-usable-node" */
+ 1, /* "performance" policy */
+ 0, /* Default flags */
};
/* list of CPU policies for "cpu-policy". The default one is the first one. */
static int cpu_policy_first_usable_node(int policy, int tmin, int tmax, int gmin, int gmax, char **err)
{
struct hap_cpuset node_cpu_set;
+ struct hap_cpuset visited_tsid;
int first_node_id = -1;
int second_node_id = -1;
int cpu;
int cpu_count;
int grp, thr;
+ int thr_count = 0;
if (!global.numa_cpu_mapping)
return 0;
* and make a CPU set of them.
*/
ha_cpuset_zero(&node_cpu_set);
+ ha_cpuset_zero(&visited_tsid);
for (cpu = cpu_count = 0; cpu <= cpu_topo_lastcpu; cpu++) {
if (ha_cpu_topo[cpu].no_id != first_node_id)
ha_cpu_topo[cpu].st |= HA_CPU_F_IGNORED;
else if (!(ha_cpu_topo[cpu].st & HA_CPU_F_EXCL_MASK)) {
ha_cpuset_set(&node_cpu_set, ha_cpu_topo[cpu].idx);
cpu_count++;
+
+ if (!(cpu_policy_conf.flags & CPU_POLICY_ONE_THREAD_PER_CORE) || !ha_cpuset_isset(&visited_tsid, ha_cpu_topo[cpu].ts_id)) {
+ ha_cpuset_set(&visited_tsid, ha_cpu_topo[cpu].ts_id);
+ thr_count++;
+ last_id = ha_cpu_topo[cpu].ts_id;
+ }
}
}
for (thr = 0; thr < MAX_THREADS_PER_GROUP; thr++)
ha_cpuset_assign(&cpu_map[grp].thread[thr], &node_cpu_set);
- if (tmin <= cpu_count && cpu_count < tmax)
- tmax = cpu_count;
+ if (tmin <= thr_count && thr_count < tmax)
+ tmax = thr_count;
ha_diag_warning("Multi-socket cpu detected, automatically binding on active CPUs of '%d' (%u active cpu(s))\n", first_node_id, cpu_count);
{
struct hap_cpuset visited_cl_set;
struct hap_cpuset node_cpu_set;
+ struct hap_cpuset visited_tsid;
int cpu, cpu_start;
int cpu_count;
int cid;
/* make a mask of all of this cluster's CPUs */
ha_cpuset_set(&node_cpu_set, ha_cpu_topo[cpu].idx);
- cpu_count++;
+ if (cpu_policy_conf.flags & CPU_POLICY_ONE_THREAD_PER_CORE) {
+ if (!ha_cpuset_isset(&visited_tsid, ha_cpu_topo[cpu].ts_id)) {
+ ha_cpuset_set(&visited_tsid, ha_cpu_topo[cpu].ts_id);
+ cpu_count++;
+ }
+ } else
+ cpu_count++;
}
/* now cid = next cluster_id or -1 if none; cpu_count is the
{
struct hap_cpuset visited_ccx_set;
struct hap_cpuset node_cpu_set;
+ struct hap_cpuset visited_tsid;
int cpu, cpu_start;
int cpu_count;
int l3id;
while (global.nbtgroups < MAX_TGROUPS && global.nbthread < MAX_THREADS) {
ha_cpuset_zero(&node_cpu_set);
+ ha_cpuset_zero(&visited_tsid);
l3id = -1; cpu_count = 0;
for (cpu = cpu_start; cpu <= cpu_topo_lastcpu; cpu++) {
/* make a mask of all of this cluster's CPUs */
ha_cpuset_set(&node_cpu_set, ha_cpu_topo[cpu].idx);
- cpu_count++;
+ if (cpu_policy_conf.flags & CPU_POLICY_ONE_THREAD_PER_CORE) {
+ if (!ha_cpuset_isset(&visited_tsid, ha_cpu_topo[cpu].ts_id)) {
+ ha_cpuset_set(&visited_tsid, ha_cpu_topo[cpu].ts_id);
+ cpu_count++;
+ }
+ } else
+ cpu_count++;
}
/* now l3id = next L3 ID or -1 if none; cpu_count is the
{
int i;
- if (too_many_args(1, args, err, NULL))
+ if (too_many_args(3, args, err, NULL))
return -1;
+ if (*args[2] != 0) {
+ if (!strcmp(args[2], "threads-per-core")) {
+ if (!strcmp(args[3], "1"))
+ cpu_policy_conf.flags |= CPU_POLICY_ONE_THREAD_PER_CORE;
+ else if (strcmp(args[3], "auto")) {
+ memprintf(err, "'%s' passed an unknown value '%s' to keyword '%s', known values are 1 or auto", args[0], args[3], args[2]);
+ return -1;
+ }
+ } else {
+ memprintf(err, "'%s' passed an unknown keyword '%s', the only known values are threads-per-core", args[0], args[2]);
+ return -1;
+ }
+
+ }
for (i = 0; ha_cpu_policy[i].name; i++) {
if (strcmp(args[1], ha_cpu_policy[i].name) == 0) {
cpu_policy_conf.cpu_policy = i;