scsi: ufs: Use the resource-managed function to add devfreq device
[linux/fpc-iii.git] / kernel / irq / affinity.c
blob17f51d63da5631ca0de1aa1aa6cc4077b220fe97
2 #include <linux/interrupt.h>
3 #include <linux/kernel.h>
4 #include <linux/slab.h>
5 #include <linux/cpu.h>
7 static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
8 int cpus_per_vec)
10 const struct cpumask *siblmsk;
11 int cpu, sibl;
13 for ( ; cpus_per_vec > 0; ) {
14 cpu = cpumask_first(nmsk);
16 /* Should not happen, but I'm too lazy to think about it */
17 if (cpu >= nr_cpu_ids)
18 return;
20 cpumask_clear_cpu(cpu, nmsk);
21 cpumask_set_cpu(cpu, irqmsk);
22 cpus_per_vec--;
24 /* If the cpu has siblings, use them first */
25 siblmsk = topology_sibling_cpumask(cpu);
26 for (sibl = -1; cpus_per_vec > 0; ) {
27 sibl = cpumask_next(sibl, siblmsk);
28 if (sibl >= nr_cpu_ids)
29 break;
30 if (!cpumask_test_and_clear_cpu(sibl, nmsk))
31 continue;
32 cpumask_set_cpu(sibl, irqmsk);
33 cpus_per_vec--;
38 static int get_nodes_in_cpumask(const struct cpumask *mask, nodemask_t *nodemsk)
40 int n, nodes;
42 /* Calculate the number of nodes in the supplied affinity mask */
43 for (n = 0, nodes = 0; n < num_online_nodes(); n++) {
44 if (cpumask_intersects(mask, cpumask_of_node(n))) {
45 node_set(n, *nodemsk);
46 nodes++;
49 return nodes;
52 /**
53 * irq_create_affinity_masks - Create affinity masks for multiqueue spreading
54 * @affinity: The affinity mask to spread. If NULL cpu_online_mask
55 * is used
56 * @nvecs: The number of vectors
58 * Returns the masks pointer or NULL if allocation failed.
60 struct cpumask *irq_create_affinity_masks(const struct cpumask *affinity,
61 int nvec)
63 int n, nodes, vecs_per_node, cpus_per_vec, extra_vecs, curvec = 0;
64 nodemask_t nodemsk = NODE_MASK_NONE;
65 struct cpumask *masks;
66 cpumask_var_t nmsk;
68 if (!zalloc_cpumask_var(&nmsk, GFP_KERNEL))
69 return NULL;
71 masks = kzalloc(nvec * sizeof(*masks), GFP_KERNEL);
72 if (!masks)
73 goto out;
75 /* Stabilize the cpumasks */
76 get_online_cpus();
77 /* If the supplied affinity mask is NULL, use cpu online mask */
78 if (!affinity)
79 affinity = cpu_online_mask;
81 nodes = get_nodes_in_cpumask(affinity, &nodemsk);
84 * If the number of nodes in the mask is less than or equal the
85 * number of vectors we just spread the vectors across the nodes.
87 if (nvec <= nodes) {
88 for_each_node_mask(n, nodemsk) {
89 cpumask_copy(masks + curvec, cpumask_of_node(n));
90 if (++curvec == nvec)
91 break;
93 goto outonl;
96 /* Spread the vectors per node */
97 vecs_per_node = nvec / nodes;
98 /* Account for rounding errors */
99 extra_vecs = nvec - (nodes * vecs_per_node);
101 for_each_node_mask(n, nodemsk) {
102 int ncpus, v, vecs_to_assign = vecs_per_node;
104 /* Get the cpus on this node which are in the mask */
105 cpumask_and(nmsk, affinity, cpumask_of_node(n));
107 /* Calculate the number of cpus per vector */
108 ncpus = cpumask_weight(nmsk);
110 for (v = 0; curvec < nvec && v < vecs_to_assign; curvec++, v++) {
111 cpus_per_vec = ncpus / vecs_to_assign;
113 /* Account for extra vectors to compensate rounding errors */
114 if (extra_vecs) {
115 cpus_per_vec++;
116 if (!--extra_vecs)
117 vecs_per_node++;
119 irq_spread_init_one(masks + curvec, nmsk, cpus_per_vec);
122 if (curvec >= nvec)
123 break;
126 outonl:
127 put_online_cpus();
128 out:
129 free_cpumask_var(nmsk);
130 return masks;
134 * irq_calc_affinity_vectors - Calculate to optimal number of vectors for a given affinity mask
135 * @affinity: The affinity mask to spread. If NULL cpu_online_mask
136 * is used
137 * @maxvec: The maximum number of vectors available
139 int irq_calc_affinity_vectors(const struct cpumask *affinity, int maxvec)
141 int cpus, ret;
143 /* Stabilize the cpumasks */
144 get_online_cpus();
145 /* If the supplied affinity mask is NULL, use cpu online mask */
146 if (!affinity)
147 affinity = cpu_online_mask;
149 cpus = cpumask_weight(affinity);
150 ret = (cpus < maxvec) ? cpus : maxvec;
152 put_online_cpus();
153 return ret;