1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 Thomas Gleixner.
4 * Copyright (C) 2016-2017 Christoph Hellwig.
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/slab.h>
10 #include <linux/sort.h>
12 static void irq_spread_init_one(struct cpumask
*irqmsk
, struct cpumask
*nmsk
,
13 unsigned int cpus_per_vec
)
15 const struct cpumask
*siblmsk
;
18 for ( ; cpus_per_vec
> 0; ) {
19 cpu
= cpumask_first(nmsk
);
21 /* Should not happen, but I'm too lazy to think about it */
22 if (cpu
>= nr_cpu_ids
)
25 cpumask_clear_cpu(cpu
, nmsk
);
26 cpumask_set_cpu(cpu
, irqmsk
);
29 /* If the cpu has siblings, use them first */
30 siblmsk
= topology_sibling_cpumask(cpu
);
31 for (sibl
= -1; cpus_per_vec
> 0; ) {
32 sibl
= cpumask_next(sibl
, siblmsk
);
33 if (sibl
>= nr_cpu_ids
)
35 if (!cpumask_test_and_clear_cpu(sibl
, nmsk
))
37 cpumask_set_cpu(sibl
, irqmsk
);
43 static cpumask_var_t
*alloc_node_to_cpumask(void)
48 masks
= kcalloc(nr_node_ids
, sizeof(cpumask_var_t
), GFP_KERNEL
);
52 for (node
= 0; node
< nr_node_ids
; node
++) {
53 if (!zalloc_cpumask_var(&masks
[node
], GFP_KERNEL
))
61 free_cpumask_var(masks
[node
]);
66 static void free_node_to_cpumask(cpumask_var_t
*masks
)
70 for (node
= 0; node
< nr_node_ids
; node
++)
71 free_cpumask_var(masks
[node
]);
75 static void build_node_to_cpumask(cpumask_var_t
*masks
)
79 for_each_possible_cpu(cpu
)
80 cpumask_set_cpu(cpu
, masks
[cpu_to_node(cpu
)]);
83 static int get_nodes_in_cpumask(cpumask_var_t
*node_to_cpumask
,
84 const struct cpumask
*mask
, nodemask_t
*nodemsk
)
88 /* Calculate the number of nodes in the supplied affinity mask */
90 if (cpumask_intersects(mask
, node_to_cpumask
[n
])) {
91 node_set(n
, *nodemsk
);
107 static int ncpus_cmp_func(const void *l
, const void *r
)
109 const struct node_vectors
*ln
= l
;
110 const struct node_vectors
*rn
= r
;
112 return ln
->ncpus
- rn
->ncpus
;
116 * Allocate vector number for each node, so that for each node:
118 * 1) the allocated number is >= 1
120 * 2) the allocated numbver is <= active CPU number of this node
122 * The actual allocated total vectors may be less than @numvecs when
123 * active total CPU number is less than @numvecs.
125 * Active CPUs means the CPUs in '@cpu_mask AND @node_to_cpumask[]'
128 static void alloc_nodes_vectors(unsigned int numvecs
,
129 cpumask_var_t
*node_to_cpumask
,
130 const struct cpumask
*cpu_mask
,
131 const nodemask_t nodemsk
,
132 struct cpumask
*nmsk
,
133 struct node_vectors
*node_vectors
)
135 unsigned n
, remaining_ncpus
= 0;
137 for (n
= 0; n
< nr_node_ids
; n
++) {
138 node_vectors
[n
].id
= n
;
139 node_vectors
[n
].ncpus
= UINT_MAX
;
142 for_each_node_mask(n
, nodemsk
) {
145 cpumask_and(nmsk
, cpu_mask
, node_to_cpumask
[n
]);
146 ncpus
= cpumask_weight(nmsk
);
150 remaining_ncpus
+= ncpus
;
151 node_vectors
[n
].ncpus
= ncpus
;
154 numvecs
= min_t(unsigned, remaining_ncpus
, numvecs
);
156 sort(node_vectors
, nr_node_ids
, sizeof(node_vectors
[0]),
157 ncpus_cmp_func
, NULL
);
160 * Allocate vectors for each node according to the ratio of this
161 * node's nr_cpus to remaining un-assigned ncpus. 'numvecs' is
162 * bigger than number of active numa nodes. Always start the
163 * allocation from the node with minimized nr_cpus.
165 * This way guarantees that each active node gets allocated at
166 * least one vector, and the theory is simple: over-allocation
167 * is only done when this node is assigned by one vector, so
168 * other nodes will be allocated >= 1 vector, since 'numvecs' is
169 * bigger than number of numa nodes.
171 * One perfect invariant is that number of allocated vectors for
172 * each node is <= CPU count of this node:
174 * 1) suppose there are two nodes: A and B
175 * ncpu(X) is CPU count of node X
176 * vecs(X) is the vector count allocated to node X via this
180 * ncpu(A) + ncpu(B) = N
181 * vecs(A) + vecs(B) = V
183 * vecs(A) = max(1, round_down(V * ncpu(A) / N))
184 * vecs(B) = V - vecs(A)
186 * both N and V are integer, and 2 <= V <= N, suppose
187 * V = N - delta, and 0 <= delta <= N - 2
189 * 2) obviously vecs(A) <= ncpu(A) because:
191 * if vecs(A) is 1, then vecs(A) <= ncpu(A) given
195 * vecs(A) <= V * ncpu(A) / N <= ncpu(A), given V <= N
197 * 3) prove how vecs(B) <= ncpu(B):
199 * if round_down(V * ncpu(A) / N) == 0, vecs(B) won't be
200 * over-allocated, so vecs(B) <= ncpu(B),
205 * round_down(V * ncpu(A) / N) =
206 * round_down((N - delta) * ncpu(A) / N) =
207 * round_down((N * ncpu(A) - delta * ncpu(A)) / N) >=
208 * round_down((N * ncpu(A) - delta * N) / N) =
213 * vecs(A) - V >= ncpu(A) - delta - V
215 * V - vecs(A) <= V + delta - ncpu(A)
217 * vecs(B) <= N - ncpu(A)
221 * For nodes >= 3, it can be thought as one node and another big
222 * node given that is exactly what this algorithm is implemented,
223 * and we always re-calculate 'remaining_ncpus' & 'numvecs', and
224 * finally for each node X: vecs(X) <= ncpu(X).
227 for (n
= 0; n
< nr_node_ids
; n
++) {
228 unsigned nvectors
, ncpus
;
230 if (node_vectors
[n
].ncpus
== UINT_MAX
)
233 WARN_ON_ONCE(numvecs
== 0);
235 ncpus
= node_vectors
[n
].ncpus
;
236 nvectors
= max_t(unsigned, 1,
237 numvecs
* ncpus
/ remaining_ncpus
);
238 WARN_ON_ONCE(nvectors
> ncpus
);
240 node_vectors
[n
].nvectors
= nvectors
;
242 remaining_ncpus
-= ncpus
;
247 static int __irq_build_affinity_masks(unsigned int startvec
,
248 unsigned int numvecs
,
249 unsigned int firstvec
,
250 cpumask_var_t
*node_to_cpumask
,
251 const struct cpumask
*cpu_mask
,
252 struct cpumask
*nmsk
,
253 struct irq_affinity_desc
*masks
)
255 unsigned int i
, n
, nodes
, cpus_per_vec
, extra_vecs
, done
= 0;
256 unsigned int last_affv
= firstvec
+ numvecs
;
257 unsigned int curvec
= startvec
;
258 nodemask_t nodemsk
= NODE_MASK_NONE
;
259 struct node_vectors
*node_vectors
;
261 if (!cpumask_weight(cpu_mask
))
264 nodes
= get_nodes_in_cpumask(node_to_cpumask
, cpu_mask
, &nodemsk
);
267 * If the number of nodes in the mask is greater than or equal the
268 * number of vectors we just spread the vectors across the nodes.
270 if (numvecs
<= nodes
) {
271 for_each_node_mask(n
, nodemsk
) {
272 cpumask_or(&masks
[curvec
].mask
, &masks
[curvec
].mask
,
274 if (++curvec
== last_affv
)
280 node_vectors
= kcalloc(nr_node_ids
,
281 sizeof(struct node_vectors
),
286 /* allocate vector number for each node */
287 alloc_nodes_vectors(numvecs
, node_to_cpumask
, cpu_mask
,
288 nodemsk
, nmsk
, node_vectors
);
290 for (i
= 0; i
< nr_node_ids
; i
++) {
291 unsigned int ncpus
, v
;
292 struct node_vectors
*nv
= &node_vectors
[i
];
294 if (nv
->nvectors
== UINT_MAX
)
297 /* Get the cpus on this node which are in the mask */
298 cpumask_and(nmsk
, cpu_mask
, node_to_cpumask
[nv
->id
]);
299 ncpus
= cpumask_weight(nmsk
);
303 WARN_ON_ONCE(nv
->nvectors
> ncpus
);
305 /* Account for rounding errors */
306 extra_vecs
= ncpus
- nv
->nvectors
* (ncpus
/ nv
->nvectors
);
308 /* Spread allocated vectors on CPUs of the current node */
309 for (v
= 0; v
< nv
->nvectors
; v
++, curvec
++) {
310 cpus_per_vec
= ncpus
/ nv
->nvectors
;
312 /* Account for extra vectors to compensate rounding errors */
319 * wrapping has to be considered given 'startvec'
322 if (curvec
>= last_affv
)
324 irq_spread_init_one(&masks
[curvec
].mask
, nmsk
,
327 done
+= nv
->nvectors
;
334 * build affinity in two stages:
335 * 1) spread present CPU on these vectors
336 * 2) spread other possible CPUs on these vectors
338 static int irq_build_affinity_masks(unsigned int startvec
, unsigned int numvecs
,
339 unsigned int firstvec
,
340 struct irq_affinity_desc
*masks
)
342 unsigned int curvec
= startvec
, nr_present
= 0, nr_others
= 0;
343 cpumask_var_t
*node_to_cpumask
;
344 cpumask_var_t nmsk
, npresmsk
;
347 if (!zalloc_cpumask_var(&nmsk
, GFP_KERNEL
))
350 if (!zalloc_cpumask_var(&npresmsk
, GFP_KERNEL
))
353 node_to_cpumask
= alloc_node_to_cpumask();
354 if (!node_to_cpumask
)
357 /* Stabilize the cpumasks */
359 build_node_to_cpumask(node_to_cpumask
);
361 /* Spread on present CPUs starting from affd->pre_vectors */
362 ret
= __irq_build_affinity_masks(curvec
, numvecs
, firstvec
,
363 node_to_cpumask
, cpu_present_mask
,
366 goto fail_build_affinity
;
370 * Spread on non present CPUs starting from the next vector to be
371 * handled. If the spreading of present CPUs already exhausted the
372 * vector space, assign the non present CPUs to the already spread
375 if (nr_present
>= numvecs
)
378 curvec
= firstvec
+ nr_present
;
379 cpumask_andnot(npresmsk
, cpu_possible_mask
, cpu_present_mask
);
380 ret
= __irq_build_affinity_masks(curvec
, numvecs
, firstvec
,
381 node_to_cpumask
, npresmsk
, nmsk
,
390 WARN_ON(nr_present
+ nr_others
< numvecs
);
392 free_node_to_cpumask(node_to_cpumask
);
395 free_cpumask_var(npresmsk
);
398 free_cpumask_var(nmsk
);
399 return ret
< 0 ? ret
: 0;
402 static void default_calc_sets(struct irq_affinity
*affd
, unsigned int affvecs
)
405 affd
->set_size
[0] = affvecs
;
409 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
410 * @nvecs: The total number of vectors
411 * @affd: Description of the affinity requirements
413 * Returns the irq_affinity_desc pointer or NULL if allocation failed.
415 struct irq_affinity_desc
*
416 irq_create_affinity_masks(unsigned int nvecs
, struct irq_affinity
*affd
)
418 unsigned int affvecs
, curvec
, usedvecs
, i
;
419 struct irq_affinity_desc
*masks
= NULL
;
422 * Determine the number of vectors which need interrupt affinities
423 * assigned. If the pre/post request exhausts the available vectors
424 * then nothing to do here except for invoking the calc_sets()
425 * callback so the device driver can adjust to the situation.
427 if (nvecs
> affd
->pre_vectors
+ affd
->post_vectors
)
428 affvecs
= nvecs
- affd
->pre_vectors
- affd
->post_vectors
;
433 * Simple invocations do not provide a calc_sets() callback. Install
436 if (!affd
->calc_sets
)
437 affd
->calc_sets
= default_calc_sets
;
439 /* Recalculate the sets */
440 affd
->calc_sets(affd
, affvecs
);
442 if (WARN_ON_ONCE(affd
->nr_sets
> IRQ_AFFINITY_MAX_SETS
))
445 /* Nothing to assign? */
449 masks
= kcalloc(nvecs
, sizeof(*masks
), GFP_KERNEL
);
453 /* Fill out vectors at the beginning that don't need affinity */
454 for (curvec
= 0; curvec
< affd
->pre_vectors
; curvec
++)
455 cpumask_copy(&masks
[curvec
].mask
, irq_default_affinity
);
458 * Spread on present CPUs starting from affd->pre_vectors. If we
459 * have multiple sets, build each sets affinity mask separately.
461 for (i
= 0, usedvecs
= 0; i
< affd
->nr_sets
; i
++) {
462 unsigned int this_vecs
= affd
->set_size
[i
];
465 ret
= irq_build_affinity_masks(curvec
, this_vecs
,
472 usedvecs
+= this_vecs
;
475 /* Fill out vectors at the end that don't need affinity */
476 if (usedvecs
>= affvecs
)
477 curvec
= affd
->pre_vectors
+ affvecs
;
479 curvec
= affd
->pre_vectors
+ usedvecs
;
480 for (; curvec
< nvecs
; curvec
++)
481 cpumask_copy(&masks
[curvec
].mask
, irq_default_affinity
);
483 /* Mark the managed interrupts */
484 for (i
= affd
->pre_vectors
; i
< nvecs
- affd
->post_vectors
; i
++)
485 masks
[i
].is_managed
= 1;
491 * irq_calc_affinity_vectors - Calculate the optimal number of vectors
492 * @minvec: The minimum number of vectors available
493 * @maxvec: The maximum number of vectors available
494 * @affd: Description of the affinity requirements
496 unsigned int irq_calc_affinity_vectors(unsigned int minvec
, unsigned int maxvec
,
497 const struct irq_affinity
*affd
)
499 unsigned int resv
= affd
->pre_vectors
+ affd
->post_vectors
;
500 unsigned int set_vecs
;
505 if (affd
->calc_sets
) {
506 set_vecs
= maxvec
- resv
;
509 set_vecs
= cpumask_weight(cpu_possible_mask
);
513 return resv
+ min(set_vecs
, maxvec
- resv
);