]>
git.ipfire.org Git - thirdparty/linux.git/blob - kernel/irq/affinity.c
00bba1020ecb214a8cba0743a7bcc4daf8a9190e
1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 Thomas Gleixner.
4 * Copyright (C) 2016-2017 Christoph Hellwig.
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
10 #include <linux/sort.h>
12 static void irq_spread_init_one(struct cpumask
*irqmsk
, struct cpumask
*nmsk
,
13 unsigned int cpus_per_vec
)
15 const struct cpumask
*siblmsk
;
18 for ( ; cpus_per_vec
> 0; ) {
19 cpu
= cpumask_first(nmsk
);
21 /* Should not happen, but I'm too lazy to think about it */
22 if (cpu
>= nr_cpu_ids
)
25 cpumask_clear_cpu(cpu
, nmsk
);
26 cpumask_set_cpu(cpu
, irqmsk
);
29 /* If the cpu has siblings, use them first */
30 siblmsk
= topology_sibling_cpumask(cpu
);
31 for (sibl
= -1; cpus_per_vec
> 0; ) {
32 sibl
= cpumask_next(sibl
, siblmsk
);
33 if (sibl
>= nr_cpu_ids
)
35 if (!cpumask_test_and_clear_cpu(sibl
, nmsk
))
37 cpumask_set_cpu(sibl
, irqmsk
);
43 static cpumask_var_t
*alloc_node_to_cpumask(void)
48 masks
= kcalloc(nr_node_ids
, sizeof(cpumask_var_t
), GFP_KERNEL
);
52 for (node
= 0; node
< nr_node_ids
; node
++) {
53 if (!zalloc_cpumask_var(&masks
[node
], GFP_KERNEL
))
61 free_cpumask_var(masks
[node
]);
66 static void free_node_to_cpumask(cpumask_var_t
*masks
)
70 for (node
= 0; node
< nr_node_ids
; node
++)
71 free_cpumask_var(masks
[node
]);
75 static void build_node_to_cpumask(cpumask_var_t
*masks
)
79 for_each_possible_cpu(cpu
)
80 cpumask_set_cpu(cpu
, masks
[cpu_to_node(cpu
)]);
83 static int get_nodes_in_cpumask(cpumask_var_t
*node_to_cpumask
,
84 const struct cpumask
*mask
, nodemask_t
*nodemsk
)
88 /* Calculate the number of nodes in the supplied affinity mask */
90 if (cpumask_intersects(mask
, node_to_cpumask
[n
])) {
91 node_set(n
, *nodemsk
);
107 static int ncpus_cmp_func(const void *l
, const void *r
)
109 const struct node_vectors
*ln
= l
;
110 const struct node_vectors
*rn
= r
;
112 return ln
->ncpus
- rn
->ncpus
;
116 * Allocate vector number for each node, so that for each node:
118 * 1) the allocated number is >= 1
120 * 2) the allocated numbver is <= active CPU number of this node
122 * The actual allocated total vectors may be less than @numvecs when
123 * active total CPU number is less than @numvecs.
125 * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
128 static void alloc_nodes_vectors(unsigned int numvecs
,
129 cpumask_var_t
*node_to_cpumask
,
130 const struct cpumask
*cpu_mask
,
131 const nodemask_t nodemsk
,
132 struct cpumask
*nmsk
,
133 struct node_vectors
*node_vectors
)
135 unsigned n
, remaining_ncpus
= 0;
137 for (n
= 0; n
< nr_node_ids
; n
++) {
138 node_vectors
[n
].id
= n
;
139 node_vectors
[n
].ncpus
= UINT_MAX
;
142 for_each_node_mask(n
, nodemsk
) {
145 cpumask_and(nmsk
, cpu_mask
, node_to_cpumask
[n
]);
146 ncpus
= cpumask_weight(nmsk
);
150 remaining_ncpus
+= ncpus
;
151 node_vectors
[n
].ncpus
= ncpus
;
154 numvecs
= min_t(unsigned, remaining_ncpus
, numvecs
);
156 sort(node_vectors
, nr_node_ids
, sizeof(node_vectors
[0]),
157 ncpus_cmp_func
, NULL
);
160 * Allocate vectors for each node according to the ratio of this
161 * node's nr_cpus to remaining un-assigned ncpus. 'numvecs' is
162 * bigger than number of active numa nodes. Always start the
163 * allocation from the node with minimized nr_cpus.
165 * This way guarantees that each active node gets allocated at
166 * least one vector, and the theory is simple: over-allocation
167 * is only done when this node is assigned by one vector, so
168 * other nodes will be allocated >= 1 vector, since 'numvecs' is
169 * bigger than number of numa nodes.
171 * One perfect invariant is that number of allocated vectors for
172 * each node is <= CPU count of this node:
174 * 1) suppose there are two nodes: A and B
175 * ncpu(X) is CPU count of node X
176 * vecs(X) is the vector count allocated to node X via this
180 * ncpu(A) + ncpu(B) = N
181 * vecs(A) + vecs(B) = V
183 * vecs(A) = max(1, round_down(V * ncpu(A) / N))
184 * vecs(B) = V - vecs(A)
186 * both N and V are integer, and 2 <= V <= N, suppose
187 * V = N - delta, and 0 <= delta <= N - 2
189 * 2) obviously vecs(A) <= ncpu(A) because:
191 * if vecs(A) is 1, then vecs(A) <= ncpu(A) given
195 * vecs(A) <= V * ncpu(A) / N <= ncpu(A), given V <= N
197 * 3) prove how vecs(B) <= ncpu(B):
199 * if round_down(V * ncpu(A) / N) == 0, vecs(B) won't be
200 * over-allocated, so vecs(B) <= ncpu(B),
205 * round_down(V * ncpu(A) / N) =
206 * round_down((N - delta) * ncpu(A) / N) =
207 * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >=
208 * round_down((N * ncpu(A) - delta * N) / N) =
213 * vecs(A) - V >= ncpu(A) - delta - V
215 * V - vecs(A) <= V + delta - ncpu(A)
217 * vecs(B) <= N - ncpu(A)
221 * For nodes >= 3, it can be thought as one node and another big
222 * node given that is exactly what this algorithm is implemented,
223 * and we always re-calculate 'remaining_ncpus' & 'numvecs', and
224 * finally for each node X: vecs(X) <= ncpu(X).
227 for (n
= 0; n
< nr_node_ids
; n
++) {
228 unsigned nvectors
, ncpus
;
230 if (node_vectors
[n
].ncpus
== UINT_MAX
)
233 WARN_ON_ONCE(numvecs
== 0);
235 ncpus
= node_vectors
[n
].ncpus
;
236 nvectors
= max_t(unsigned, 1,
237 numvecs
* ncpus
/ remaining_ncpus
);
238 WARN_ON_ONCE(nvectors
> ncpus
);
240 node_vectors
[n
].nvectors
= nvectors
;
242 remaining_ncpus
-= ncpus
;
247 static int __irq_build_affinity_masks(unsigned int startvec
,
248 unsigned int numvecs
,
249 cpumask_var_t
*node_to_cpumask
,
250 const struct cpumask
*cpu_mask
,
251 struct cpumask
*nmsk
,
252 struct cpumask
*masks
)
254 unsigned int i
, n
, nodes
, cpus_per_vec
, extra_vecs
, done
= 0;
255 unsigned int last_affv
= numvecs
;
256 unsigned int curvec
= startvec
;
257 nodemask_t nodemsk
= NODE_MASK_NONE
;
258 struct node_vectors
*node_vectors
;
260 if (cpumask_empty(cpu_mask
))
263 nodes
= get_nodes_in_cpumask(node_to_cpumask
, cpu_mask
, &nodemsk
);
266 * If the number of nodes in the mask is greater than or equal the
267 * number of vectors we just spread the vectors across the nodes.
269 if (numvecs
<= nodes
) {
270 for_each_node_mask(n
, nodemsk
) {
271 /* Ensure that only CPUs which are in both masks are set */
272 cpumask_and(nmsk
, cpu_mask
, node_to_cpumask
[n
]);
273 cpumask_or(&masks
[curvec
], &masks
[curvec
], nmsk
);
274 if (++curvec
== last_affv
)
280 node_vectors
= kcalloc(nr_node_ids
,
281 sizeof(struct node_vectors
),
286 /* allocate vector number for each node */
287 alloc_nodes_vectors(numvecs
, node_to_cpumask
, cpu_mask
,
288 nodemsk
, nmsk
, node_vectors
);
290 for (i
= 0; i
< nr_node_ids
; i
++) {
291 unsigned int ncpus
, v
;
292 struct node_vectors
*nv
= &node_vectors
[i
];
294 if (nv
->nvectors
== UINT_MAX
)
297 /* Get the cpus on this node which are in the mask */
298 cpumask_and(nmsk
, cpu_mask
, node_to_cpumask
[nv
->id
]);
299 ncpus
= cpumask_weight(nmsk
);
303 WARN_ON_ONCE(nv
->nvectors
> ncpus
);
305 /* Account for rounding errors */
306 extra_vecs
= ncpus
- nv
->nvectors
* (ncpus
/ nv
->nvectors
);
308 /* Spread allocated vectors on CPUs of the current node */
309 for (v
= 0; v
< nv
->nvectors
; v
++, curvec
++) {
310 cpus_per_vec
= ncpus
/ nv
->nvectors
;
312 /* Account for extra vectors to compensate rounding errors */
319 * wrapping has to be considered given 'startvec'
322 if (curvec
>= last_affv
)
324 irq_spread_init_one(&masks
[curvec
], nmsk
,
327 done
+= nv
->nvectors
;
334 * build affinity in two stages:
335 * 1) spread present CPU on these vectors
336 * 2) spread other possible CPUs on these vectors
338 static struct cpumask
*irq_build_affinity_masks(unsigned int numvecs
)
340 unsigned int curvec
= 0, nr_present
= 0, nr_others
= 0;
341 cpumask_var_t
*node_to_cpumask
;
342 cpumask_var_t nmsk
, npresmsk
;
344 struct cpumask
*masks
= NULL
;
346 if (!zalloc_cpumask_var(&nmsk
, GFP_KERNEL
))
349 if (!zalloc_cpumask_var(&npresmsk
, GFP_KERNEL
))
352 node_to_cpumask
= alloc_node_to_cpumask();
353 if (!node_to_cpumask
)
356 masks
= kcalloc(numvecs
, sizeof(*masks
), GFP_KERNEL
);
358 goto fail_node_to_cpumask
;
360 /* Stabilize the cpumasks */
362 build_node_to_cpumask(node_to_cpumask
);
364 /* Spread on present CPUs starting from affd->pre_vectors */
365 ret
= __irq_build_affinity_masks(curvec
, numvecs
, node_to_cpumask
,
366 cpu_present_mask
, nmsk
, masks
);
368 goto fail_build_affinity
;
372 * Spread on non present CPUs starting from the next vector to be
373 * handled. If the spreading of present CPUs already exhausted the
374 * vector space, assign the non present CPUs to the already spread
377 if (nr_present
>= numvecs
)
381 cpumask_andnot(npresmsk
, cpu_possible_mask
, cpu_present_mask
);
382 ret
= __irq_build_affinity_masks(curvec
, numvecs
, node_to_cpumask
,
383 npresmsk
, nmsk
, masks
);
391 WARN_ON(nr_present
+ nr_others
< numvecs
);
393 fail_node_to_cpumask
:
394 free_node_to_cpumask(node_to_cpumask
);
397 free_cpumask_var(npresmsk
);
400 free_cpumask_var(nmsk
);
408 static void default_calc_sets(struct irq_affinity
*affd
, unsigned int affvecs
)
411 affd
->set_size
[0] = affvecs
;
415 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
416 * @nvecs: The total number of vectors
417 * @affd: Description of the affinity requirements
419 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
421 struct irq_affinity_desc
*
422 irq_create_affinity_masks(unsigned int nvecs
, struct irq_affinity
*affd
)
424 unsigned int affvecs
, curvec
, usedvecs
, i
;
425 struct irq_affinity_desc
*masks
= NULL
;
428 * Determine the number of vectors which need interrupt affinities
429 * assigned. If the pre/post request exhausts the available vectors
430 * then nothing to do here except for invoking the calc_sets()
431 * callback so the device driver can adjust to the situation.
433 if (nvecs
> affd
->pre_vectors
+ affd
->post_vectors
)
434 affvecs
= nvecs
- affd
->pre_vectors
- affd
->post_vectors
;
439 * Simple invocations do not provide a calc_sets() callback. Install
442 if (!affd
->calc_sets
)
443 affd
->calc_sets
= default_calc_sets
;
445 /* Recalculate the sets */
446 affd
->calc_sets(affd
, affvecs
);
448 if (WARN_ON_ONCE(affd
->nr_sets
> IRQ_AFFINITY_MAX_SETS
))
451 /* Nothing to assign? */
455 masks
= kcalloc(nvecs
, sizeof(*masks
), GFP_KERNEL
);
459 /* Fill out vectors at the beginning that don't need affinity */
460 for (curvec
= 0; curvec
< affd
->pre_vectors
; curvec
++)
461 cpumask_copy(&masks
[curvec
].mask
, irq_default_affinity
);
464 * Spread on present CPUs starting from affd->pre_vectors. If we
465 * have multiple sets, build each sets affinity mask separately.
467 for (i
= 0, usedvecs
= 0; i
< affd
->nr_sets
; i
++) {
468 unsigned int this_vecs
= affd
->set_size
[i
];
470 struct cpumask
*result
= irq_build_affinity_masks(this_vecs
);
477 for (j
= 0; j
< this_vecs
; j
++)
478 cpumask_copy(&masks
[curvec
+ j
].mask
, &result
[j
]);
482 usedvecs
+= this_vecs
;
485 /* Fill out vectors at the end that don't need affinity */
486 if (usedvecs
>= affvecs
)
487 curvec
= affd
->pre_vectors
+ affvecs
;
489 curvec
= affd
->pre_vectors
+ usedvecs
;
490 for (; curvec
< nvecs
; curvec
++)
491 cpumask_copy(&masks
[curvec
].mask
, irq_default_affinity
);
493 /* Mark the managed interrupts */
494 for (i
= affd
->pre_vectors
; i
< nvecs
- affd
->post_vectors
; i
++)
495 masks
[i
].is_managed
= 1;
501 * irq_calc_affinity_vectors - Calculate the optimal number of vectors
502 * @minvec: The minimum number of vectors available
503 * @maxvec: The maximum number of vectors available
504 * @affd: Description of the affinity requirements
506 unsigned int irq_calc_affinity_vectors(unsigned int minvec
, unsigned int maxvec
,
507 const struct irq_affinity
*affd
)
509 unsigned int resv
= affd
->pre_vectors
+ affd
->post_vectors
;
510 unsigned int set_vecs
;
515 if (affd
->calc_sets
) {
516 set_vecs
= maxvec
- resv
;
519 set_vecs
= cpumask_weight(cpu_possible_mask
);
523 return resv
+ min(set_vecs
, maxvec
- resv
);