]> git.ipfire.org Git - thirdparty/linux.git/blame - kernel/irq/affinity.c
genirq/affinity: Rename irq_build_affinity_masks as group_cpus_evenly
[thirdparty/linux.git] / kernel / irq / affinity.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
9a0ef98e
CH
2/*
3 * Copyright (C) 2016 Thomas Gleixner.
4 * Copyright (C) 2016-2017 Christoph Hellwig.
5 */
5e385a6e
CH
6#include <linux/interrupt.h>
7#include <linux/kernel.h>
8#include <linux/slab.h>
9#include <linux/cpu.h>
b1a5a73e 10#include <linux/sort.h>
5e385a6e 11
523f1ea7
ML
12static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
13 unsigned int cpus_per_grp)
34c3d981
TG
14{
15 const struct cpumask *siblmsk;
16 int cpu, sibl;
17
523f1ea7 18 for ( ; cpus_per_grp > 0; ) {
34c3d981
TG
19 cpu = cpumask_first(nmsk);
20
21 /* Should not happen, but I'm too lazy to think about it */
22 if (cpu >= nr_cpu_ids)
23 return;
24
25 cpumask_clear_cpu(cpu, nmsk);
26 cpumask_set_cpu(cpu, irqmsk);
523f1ea7 27 cpus_per_grp--;
34c3d981
TG
28
29 /* If the cpu has siblings, use them first */
30 siblmsk = topology_sibling_cpumask(cpu);
523f1ea7 31 for (sibl = -1; cpus_per_grp > 0; ) {
34c3d981
TG
32 sibl = cpumask_next(sibl, siblmsk);
33 if (sibl >= nr_cpu_ids)
34 break;
35 if (!cpumask_test_and_clear_cpu(sibl, nmsk))
36 continue;
37 cpumask_set_cpu(sibl, irqmsk);
523f1ea7 38 cpus_per_grp--;
34c3d981
TG
39 }
40 }
41}
42
47778f33 43static cpumask_var_t *alloc_node_to_cpumask(void)
9a0ef98e
CH
44{
45 cpumask_var_t *masks;
46 int node;
47
48 masks = kcalloc(nr_node_ids, sizeof(cpumask_var_t), GFP_KERNEL);
49 if (!masks)
50 return NULL;
51
52 for (node = 0; node < nr_node_ids; node++) {
53 if (!zalloc_cpumask_var(&masks[node], GFP_KERNEL))
54 goto out_unwind;
55 }
56
57 return masks;
58
59out_unwind:
60 while (--node >= 0)
61 free_cpumask_var(masks[node]);
62 kfree(masks);
63 return NULL;
64}
65
47778f33 66static void free_node_to_cpumask(cpumask_var_t *masks)
9a0ef98e
CH
67{
68 int node;
69
70 for (node = 0; node < nr_node_ids; node++)
71 free_cpumask_var(masks[node]);
72 kfree(masks);
73}
74
47778f33 75static void build_node_to_cpumask(cpumask_var_t *masks)
9a0ef98e
CH
76{
77 int cpu;
78
84676c1f 79 for_each_possible_cpu(cpu)
9a0ef98e
CH
80 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
81}
82
47778f33 83static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
9a0ef98e 84 const struct cpumask *mask, nodemask_t *nodemsk)
34c3d981 85{
c0af5243 86 int n, nodes = 0;
34c3d981
TG
87
88 /* Calculate the number of nodes in the supplied affinity mask */
9a0ef98e 89 for_each_node(n) {
47778f33 90 if (cpumask_intersects(mask, node_to_cpumask[n])) {
34c3d981
TG
91 node_set(n, *nodemsk);
92 nodes++;
93 }
94 }
95 return nodes;
96}
97
523f1ea7 98struct node_groups {
b1a5a73e
ML
99 unsigned id;
100
101 union {
523f1ea7 102 unsigned ngroups;
b1a5a73e
ML
103 unsigned ncpus;
104 };
105};
106
107static int ncpus_cmp_func(const void *l, const void *r)
108{
523f1ea7
ML
109 const struct node_groups *ln = l;
110 const struct node_groups *rn = r;
b1a5a73e
ML
111
112 return ln->ncpus - rn->ncpus;
113}
114
115/*
523f1ea7 116 * Allocate group number for each node, so that for each node:
b1a5a73e
ML
117 *
118 * 1) the allocated number is >= 1
119 *
523f1ea7 120 * 2) the allocated number is <= active CPU number of this node
b1a5a73e 121 *
523f1ea7
ML
122 * The actual allocated total groups may be less than @numgrps when
123 * active total CPU number is less than @numgrps.
b1a5a73e
ML
124 *
125 * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
126 * for each node.
127 */
523f1ea7
ML
128static void alloc_nodes_groups(unsigned int numgrps,
129 cpumask_var_t *node_to_cpumask,
130 const struct cpumask *cpu_mask,
131 const nodemask_t nodemsk,
132 struct cpumask *nmsk,
133 struct node_groups *node_groups)
b1a5a73e
ML
134{
135 unsigned n, remaining_ncpus = 0;
136
137 for (n = 0; n < nr_node_ids; n++) {
523f1ea7
ML
138 node_groups[n].id = n;
139 node_groups[n].ncpus = UINT_MAX;
b1a5a73e
ML
140 }
141
142 for_each_node_mask(n, nodemsk) {
143 unsigned ncpus;
144
145 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
146 ncpus = cpumask_weight(nmsk);
147
148 if (!ncpus)
149 continue;
150 remaining_ncpus += ncpus;
523f1ea7 151 node_groups[n].ncpus = ncpus;
b1a5a73e
ML
152 }
153
523f1ea7 154 numgrps = min_t(unsigned, remaining_ncpus, numgrps);
b1a5a73e 155
523f1ea7 156 sort(node_groups, nr_node_ids, sizeof(node_groups[0]),
b1a5a73e
ML
157 ncpus_cmp_func, NULL);
158
159 /*
523f1ea7
ML
160 * Allocate groups for each node according to the ratio of this
161 * node's nr_cpus to remaining un-assigned ncpus. 'numgrps' is
b1a5a73e
ML
162 * bigger than number of active numa nodes. Always start the
163 * allocation from the node with minimized nr_cpus.
164 *
165 * This way guarantees that each active node gets allocated at
523f1ea7
ML
166 * least one group, and the theory is simple: over-allocation
167 * is only done when this node is assigned by one group, so
168 * other nodes will be allocated >= 1 groups, since 'numgrps' is
b1a5a73e
ML
169 * bigger than number of numa nodes.
170 *
523f1ea7 171 * One perfect invariant is that number of allocated groups for
b1a5a73e
ML
172 * each node is <= CPU count of this node:
173 *
174 * 1) suppose there are two nodes: A and B
175 * ncpu(X) is CPU count of node X
523f1ea7 176 * grps(X) is the group count allocated to node X via this
b1a5a73e
ML
177 * algorithm
178 *
179 * ncpu(A) <= ncpu(B)
180 * ncpu(A) + ncpu(B) = N
523f1ea7 181 * grps(A) + grps(B) = G
b1a5a73e 182 *
523f1ea7
ML
183 * grps(A) = max(1, round_down(G * ncpu(A) / N))
184 * grps(B) = G - grps(A)
b1a5a73e 185 *
523f1ea7
ML
186 * both N and G are integer, and 2 <= G <= N, suppose
187 * G = N - delta, and 0 <= delta <= N - 2
b1a5a73e 188 *
523f1ea7 189 * 2) obviously grps(A) <= ncpu(A) because:
b1a5a73e 190 *
523f1ea7 191 * if grps(A) is 1, then grps(A) <= ncpu(A) given
b1a5a73e
ML
192 * ncpu(A) >= 1
193 *
194 * otherwise,
523f1ea7 195 * grps(A) <= G * ncpu(A) / N <= ncpu(A), given G <= N
b1a5a73e 196 *
523f1ea7 197 * 3) prove how grps(B) <= ncpu(B):
b1a5a73e 198 *
523f1ea7
ML
199 * if round_down(G * ncpu(A) / N) == 0, vecs(B) won't be
200 * over-allocated, so grps(B) <= ncpu(B),
b1a5a73e
ML
201 *
202 * otherwise:
203 *
523f1ea7
ML
204 * grps(A) =
205 * round_down(G * ncpu(A) / N) =
b1a5a73e
ML
206 * round_down((N - delta) * ncpu(A) / N) =
207 * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >=
208 * round_down((N * ncpu(A) - delta * N) / N) =
209 * cpu(A) - delta
210 *
211 * then:
212 *
523f1ea7 213 * grps(A) - G >= ncpu(A) - delta - G
b1a5a73e 214 * =>
523f1ea7 215 * G - grps(A) <= G + delta - ncpu(A)
b1a5a73e 216 * =>
523f1ea7 217 * grps(B) <= N - ncpu(A)
b1a5a73e 218 * =>
523f1ea7 219 * grps(B) <= cpu(B)
b1a5a73e
ML
220 *
221 * For nodes >= 3, it can be thought as one node and another big
222 * node given that is exactly what this algorithm is implemented,
523f1ea7
ML
223 * and we always re-calculate 'remaining_ncpus' & 'numgrps', and
224 * finally for each node X: grps(X) <= ncpu(X).
b1a5a73e
ML
225 *
226 */
227 for (n = 0; n < nr_node_ids; n++) {
523f1ea7 228 unsigned ngroups, ncpus;
b1a5a73e 229
523f1ea7 230 if (node_groups[n].ncpus == UINT_MAX)
b1a5a73e
ML
231 continue;
232
523f1ea7 233 WARN_ON_ONCE(numgrps == 0);
b1a5a73e 234
523f1ea7
ML
235 ncpus = node_groups[n].ncpus;
236 ngroups = max_t(unsigned, 1,
237 numgrps * ncpus / remaining_ncpus);
238 WARN_ON_ONCE(ngroups > ncpus);
b1a5a73e 239
523f1ea7 240 node_groups[n].ngroups = ngroups;
b1a5a73e
ML
241
242 remaining_ncpus -= ncpus;
523f1ea7 243 numgrps -= ngroups;
b1a5a73e
ML
244 }
245}
246
523f1ea7
ML
247static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
248 cpumask_var_t *node_to_cpumask,
249 const struct cpumask *cpu_mask,
250 struct cpumask *nmsk, struct cpumask *masks)
34c3d981 251{
523f1ea7
ML
252 unsigned int i, n, nodes, cpus_per_grp, extra_grps, done = 0;
253 unsigned int last_grp = numgrps;
254 unsigned int curgrp = startgrp;
34c3d981 255 nodemask_t nodemsk = NODE_MASK_NONE;
523f1ea7 256 struct node_groups *node_groups;
34c3d981 257
911488de 258 if (cpumask_empty(cpu_mask))
d3056812
ML
259 return 0;
260
b3e6aaa8 261 nodes = get_nodes_in_cpumask(node_to_cpumask, cpu_mask, &nodemsk);
34c3d981
TG
262
263 /*
c0af5243 264 * If the number of nodes in the mask is greater than or equal the
523f1ea7 265 * number of groups we just spread the groups across the nodes.
34c3d981 266 */
523f1ea7 267 if (numgrps <= nodes) {
34c3d981 268 for_each_node_mask(n, nodemsk) {
08d835df
RY
269 /* Ensure that only CPUs which are in both masks are set */
270 cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
523f1ea7
ML
271 cpumask_or(&masks[curgrp], &masks[curgrp], nmsk);
272 if (++curgrp == last_grp)
273 curgrp = 0;
34c3d981 274 }
523f1ea7 275 return numgrps;
34c3d981
TG
276 }
277
523f1ea7
ML
278 node_groups = kcalloc(nr_node_ids,
279 sizeof(struct node_groups),
b1a5a73e 280 GFP_KERNEL);
523f1ea7 281 if (!node_groups)
b1a5a73e
ML
282 return -ENOMEM;
283
523f1ea7
ML
284 /* allocate group number for each node */
285 alloc_nodes_groups(numgrps, node_to_cpumask, cpu_mask,
286 nodemsk, nmsk, node_groups);
b1a5a73e
ML
287 for (i = 0; i < nr_node_ids; i++) {
288 unsigned int ncpus, v;
523f1ea7 289 struct node_groups *nv = &node_groups[i];
b1a5a73e 290
523f1ea7 291 if (nv->ngroups == UINT_MAX)
b1a5a73e 292 continue;
7bf8222b 293
34c3d981 294 /* Get the cpus on this node which are in the mask */
b1a5a73e 295 cpumask_and(nmsk, cpu_mask, node_to_cpumask[nv->id]);
34c3d981 296 ncpus = cpumask_weight(nmsk);
53c1788b
ML
297 if (!ncpus)
298 continue;
299
523f1ea7 300 WARN_ON_ONCE(nv->ngroups > ncpus);
7bf8222b
KB
301
302 /* Account for rounding errors */
523f1ea7 303 extra_grps = ncpus - nv->ngroups * (ncpus / nv->ngroups);
34c3d981 304
523f1ea7
ML
305 /* Spread allocated groups on CPUs of the current node */
306 for (v = 0; v < nv->ngroups; v++, curgrp++) {
307 cpus_per_grp = ncpus / nv->ngroups;
34c3d981 308
523f1ea7
ML
309 /* Account for extra groups to compensate rounding errors */
310 if (extra_grps) {
311 cpus_per_grp++;
312 --extra_grps;
34c3d981 313 }
b1a5a73e
ML
314
315 /*
523f1ea7 316 * wrapping has to be considered given 'startgrp'
b1a5a73e
ML
317 * may start anywhere
318 */
523f1ea7
ML
319 if (curgrp >= last_grp)
320 curgrp = 0;
321 grp_spread_init_one(&masks[curgrp], nmsk,
322 cpus_per_grp);
34c3d981 323 }
523f1ea7 324 done += nv->ngroups;
34c3d981 325 }
523f1ea7 326 kfree(node_groups);
b1a5a73e 327 return done;
b3e6aaa8
ML
328}
329
5c903e10 330/*
523f1ea7
ML
331 * build affinity in two stages for each group, and try to put close CPUs
332 * in viewpoint of CPU and NUMA locality into same group, and we run
333 * two-stage grouping:
334 *
335 * 1) allocate present CPUs on these groups evenly first
336 * 2) allocate other possible CPUs on these groups evenly
5c903e10 337 */
523f1ea7 338static struct cpumask *group_cpus_evenly(unsigned int numgrps)
5c903e10 339{
523f1ea7 340 unsigned int curgrp = 0, nr_present = 0, nr_others = 0;
347253c4 341 cpumask_var_t *node_to_cpumask;
0145c30e
TG
342 cpumask_var_t nmsk, npresmsk;
343 int ret = -ENOMEM;
e7bdd7f0 344 struct cpumask *masks = NULL;
5c903e10
ML
345
346 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
e7bdd7f0 347 return NULL;
5c903e10
ML
348
349 if (!zalloc_cpumask_var(&npresmsk, GFP_KERNEL))
347253c4
ML
350 goto fail_nmsk;
351
352 node_to_cpumask = alloc_node_to_cpumask();
353 if (!node_to_cpumask)
354 goto fail_npresmsk;
5c903e10 355
523f1ea7 356 masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
e7bdd7f0
ML
357 if (!masks)
358 goto fail_node_to_cpumask;
359
5c903e10 360 /* Stabilize the cpumasks */
428e2116 361 cpus_read_lock();
5c903e10
ML
362 build_node_to_cpumask(node_to_cpumask);
363
523f1ea7
ML
364 /* grouping present CPUs first */
365 ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
366 cpu_present_mask, nmsk, masks);
b1a5a73e
ML
367 if (ret < 0)
368 goto fail_build_affinity;
369 nr_present = ret;
5c903e10
ML
370
371 /*
523f1ea7
ML
372 * Allocate non present CPUs starting from the next group to be
373 * handled. If the grouping of present CPUs already exhausted the
374 * group space, assign the non present CPUs to the already
375 * allocated out groups.
5c903e10 376 */
523f1ea7
ML
377 if (nr_present >= numgrps)
378 curgrp = 0;
5c903e10 379 else
523f1ea7 380 curgrp = nr_present;
5c903e10 381 cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
523f1ea7
ML
382 ret = __group_cpus_evenly(curgrp, numgrps, node_to_cpumask,
383 npresmsk, nmsk, masks);
b1a5a73e
ML
384 if (ret >= 0)
385 nr_others = ret;
386
387 fail_build_affinity:
428e2116 388 cpus_read_unlock();
5c903e10 389
b1a5a73e 390 if (ret >= 0)
523f1ea7 391 WARN_ON(nr_present + nr_others < numgrps);
6da4b3ab 392
e7bdd7f0 393 fail_node_to_cpumask:
347253c4
ML
394 free_node_to_cpumask(node_to_cpumask);
395
396 fail_npresmsk:
5c903e10
ML
397 free_cpumask_var(npresmsk);
398
347253c4 399 fail_nmsk:
5c903e10 400 free_cpumask_var(nmsk);
e7bdd7f0
ML
401 if (ret < 0) {
402 kfree(masks);
403 return NULL;
404 }
405 return masks;
5c903e10
ML
406}
407
c66d4bd1
ML
408static void default_calc_sets(struct irq_affinity *affd, unsigned int affvecs)
409{
410 affd->nr_sets = 1;
411 affd->set_size[0] = affvecs;
412}
413
b3e6aaa8
ML
414/**
415 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
416 * @nvecs: The total number of vectors
417 * @affd: Description of the affinity requirements
418 *
bec04037 419 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
b3e6aaa8 420 */
bec04037 421struct irq_affinity_desc *
9cfef55b 422irq_create_affinity_masks(unsigned int nvecs, struct irq_affinity *affd)
b3e6aaa8 423{
c66d4bd1 424 unsigned int affvecs, curvec, usedvecs, i;
bec04037 425 struct irq_affinity_desc *masks = NULL;
b3e6aaa8
ML
426
427 /*
c66d4bd1
ML
428 * Determine the number of vectors which need interrupt affinities
429 * assigned. If the pre/post request exhausts the available vectors
430 * then nothing to do here except for invoking the calc_sets()
491beed3 431 * callback so the device driver can adjust to the situation.
b3e6aaa8 432 */
491beed3 433 if (nvecs > affd->pre_vectors + affd->post_vectors)
c66d4bd1
ML
434 affvecs = nvecs - affd->pre_vectors - affd->post_vectors;
435 else
436 affvecs = 0;
437
438 /*
439 * Simple invocations do not provide a calc_sets() callback. Install
a6a309ed 440 * the generic one.
c66d4bd1 441 */
a6a309ed 442 if (!affd->calc_sets)
c66d4bd1
ML
443 affd->calc_sets = default_calc_sets;
444
a6a309ed
TG
445 /* Recalculate the sets */
446 affd->calc_sets(affd, affvecs);
b3e6aaa8 447
9cfef55b
ML
448 if (WARN_ON_ONCE(affd->nr_sets > IRQ_AFFINITY_MAX_SETS))
449 return NULL;
450
c66d4bd1
ML
451 /* Nothing to assign? */
452 if (!affvecs)
453 return NULL;
454
b3e6aaa8
ML
455 masks = kcalloc(nvecs, sizeof(*masks), GFP_KERNEL);
456 if (!masks)
347253c4 457 return NULL;
b3e6aaa8
ML
458
459 /* Fill out vectors at the beginning that don't need affinity */
460 for (curvec = 0; curvec < affd->pre_vectors; curvec++)
bec04037 461 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
c66d4bd1 462
6da4b3ab
JA
463 /*
464 * Spread on present CPUs starting from affd->pre_vectors. If we
465 * have multiple sets, build each sets affinity mask separately.
466 */
c66d4bd1
ML
467 for (i = 0, usedvecs = 0; i < affd->nr_sets; i++) {
468 unsigned int this_vecs = affd->set_size[i];
e7bdd7f0 469 int j;
523f1ea7 470 struct cpumask *result = group_cpus_evenly(this_vecs);
6da4b3ab 471
e7bdd7f0 472 if (!result) {
c2899c34 473 kfree(masks);
347253c4 474 return NULL;
6da4b3ab 475 }
e7bdd7f0
ML
476
477 for (j = 0; j < this_vecs; j++)
478 cpumask_copy(&masks[curvec + j].mask, &result[j]);
479 kfree(result);
480
6da4b3ab
JA
481 curvec += this_vecs;
482 usedvecs += this_vecs;
483 }
67c93c21
CH
484
485 /* Fill out vectors at the end that don't need affinity */
d3056812
ML
486 if (usedvecs >= affvecs)
487 curvec = affd->pre_vectors + affvecs;
488 else
489 curvec = affd->pre_vectors + usedvecs;
67c93c21 490 for (; curvec < nvecs; curvec++)
bec04037 491 cpumask_copy(&masks[curvec].mask, irq_default_affinity);
d3056812 492
c410abbb
DL
493 /* Mark the managed interrupts */
494 for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
495 masks[i].is_managed = 1;
496
34c3d981
TG
497 return masks;
498}
499
500/**
212bd846 501 * irq_calc_affinity_vectors - Calculate the optimal number of vectors
6f9a22bc 502 * @minvec: The minimum number of vectors available
212bd846
CH
503 * @maxvec: The maximum number of vectors available
504 * @affd: Description of the affinity requirements
34c3d981 505 */
0145c30e
TG
506unsigned int irq_calc_affinity_vectors(unsigned int minvec, unsigned int maxvec,
507 const struct irq_affinity *affd)
34c3d981 508{
0145c30e
TG
509 unsigned int resv = affd->pre_vectors + affd->post_vectors;
510 unsigned int set_vecs;
34c3d981 511
6f9a22bc
MH
512 if (resv > minvec)
513 return 0;
514
c66d4bd1
ML
515 if (affd->calc_sets) {
516 set_vecs = maxvec - resv;
6da4b3ab 517 } else {
428e2116 518 cpus_read_lock();
6da4b3ab 519 set_vecs = cpumask_weight(cpu_possible_mask);
428e2116 520 cpus_read_unlock();
6da4b3ab
JA
521 }
522
0145c30e 523 return resv + min(set_vecs, maxvec - resv);
34c3d981 524}