2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #include <linux/topology.h>
48 #include <linux/cpumask.h>
49 #include <linux/module.h>
50 #include <linux/interrupt.h>
57 struct hfi1_affinity_node_list node_affinity
= {
58 .list
= LIST_HEAD_INIT(node_affinity
.list
),
59 .lock
= __MUTEX_INITIALIZER(node_affinity
.lock
)
62 /* Name of IRQ types, indexed by enum irq_type */
63 static const char * const irq_type_names
[] = {
70 /* Per NUMA node count of HFI devices */
71 static unsigned int *hfi1_per_node_cntr
;
73 static inline void init_cpu_mask_set(struct cpu_mask_set
*set
)
75 cpumask_clear(&set
->mask
);
76 cpumask_clear(&set
->used
);
80 /* Initialize non-HT cpu cores mask */
81 void init_real_cpu_mask(void)
83 int possible
, curr_cpu
, i
, ht
;
85 cpumask_clear(&node_affinity
.real_cpu_mask
);
87 /* Start with cpu online mask as the real cpu mask */
88 cpumask_copy(&node_affinity
.real_cpu_mask
, cpu_online_mask
);
91 * Remove HT cores from the real cpu mask. Do this in two steps below.
93 possible
= cpumask_weight(&node_affinity
.real_cpu_mask
);
94 ht
= cpumask_weight(topology_sibling_cpumask(
95 cpumask_first(&node_affinity
.real_cpu_mask
)));
97 * Step 1. Skip over the first N HT siblings and use them as the
98 * "real" cores. Assumes that HT cores are not enumerated in
99 * succession (except in the single core case).
101 curr_cpu
= cpumask_first(&node_affinity
.real_cpu_mask
);
102 for (i
= 0; i
< possible
/ ht
; i
++)
103 curr_cpu
= cpumask_next(curr_cpu
, &node_affinity
.real_cpu_mask
);
105 * Step 2. Remove the remaining HT siblings. Use cpumask_next() to
108 for (; i
< possible
; i
++) {
109 cpumask_clear_cpu(curr_cpu
, &node_affinity
.real_cpu_mask
);
110 curr_cpu
= cpumask_next(curr_cpu
, &node_affinity
.real_cpu_mask
);
114 int node_affinity_init(void)
117 struct pci_dev
*dev
= NULL
;
118 const struct pci_device_id
*ids
= hfi1_pci_tbl
;
120 cpumask_clear(&node_affinity
.proc
.used
);
121 cpumask_copy(&node_affinity
.proc
.mask
, cpu_online_mask
);
123 node_affinity
.proc
.gen
= 0;
124 node_affinity
.num_core_siblings
=
125 cpumask_weight(topology_sibling_cpumask(
126 cpumask_first(&node_affinity
.proc
.mask
)
128 node_affinity
.num_possible_nodes
= num_possible_nodes();
129 node_affinity
.num_online_nodes
= num_online_nodes();
130 node_affinity
.num_online_cpus
= num_online_cpus();
133 * The real cpu mask is part of the affinity struct but it has to be
134 * initialized early. It is needed to calculate the number of user
135 * contexts in set_up_context_variables().
137 init_real_cpu_mask();
139 hfi1_per_node_cntr
= kcalloc(node_affinity
.num_possible_nodes
,
140 sizeof(*hfi1_per_node_cntr
), GFP_KERNEL
);
141 if (!hfi1_per_node_cntr
)
144 while (ids
->vendor
) {
146 while ((dev
= pci_get_device(ids
->vendor
, ids
->device
, dev
))) {
147 node
= pcibus_to_node(dev
->bus
);
149 node
= numa_node_id();
151 hfi1_per_node_cntr
[node
]++;
159 void node_affinity_destroy(void)
161 struct list_head
*pos
, *q
;
162 struct hfi1_affinity_node
*entry
;
164 mutex_lock(&node_affinity
.lock
);
165 list_for_each_safe(pos
, q
, &node_affinity
.list
) {
166 entry
= list_entry(pos
, struct hfi1_affinity_node
,
171 mutex_unlock(&node_affinity
.lock
);
172 kfree(hfi1_per_node_cntr
);
175 static struct hfi1_affinity_node
*node_affinity_allocate(int node
)
177 struct hfi1_affinity_node
*entry
;
179 entry
= kzalloc(sizeof(*entry
), GFP_KERNEL
);
183 INIT_LIST_HEAD(&entry
->list
);
189 * It appends an entry to the list.
190 * It *must* be called with node_affinity.lock held.
192 static void node_affinity_add_tail(struct hfi1_affinity_node
*entry
)
194 list_add_tail(&entry
->list
, &node_affinity
.list
);
197 /* It must be called with node_affinity.lock held */
198 static struct hfi1_affinity_node
*node_affinity_lookup(int node
)
200 struct list_head
*pos
;
201 struct hfi1_affinity_node
*entry
;
203 list_for_each(pos
, &node_affinity
.list
) {
204 entry
= list_entry(pos
, struct hfi1_affinity_node
, list
);
205 if (entry
->node
== node
)
213 * Interrupt affinity.
215 * non-rcv avail gets a default mask that
216 * starts as possible cpus with threads reset
217 * and each rcv avail reset.
219 * rcv avail gets node relative 1 wrapping back
220 * to the node relative 1 as necessary.
223 int hfi1_dev_affinity_init(struct hfi1_devdata
*dd
)
225 int node
= pcibus_to_node(dd
->pcidev
->bus
);
226 struct hfi1_affinity_node
*entry
;
227 const struct cpumask
*local_mask
;
228 int curr_cpu
, possible
, i
;
231 node
= numa_node_id();
234 local_mask
= cpumask_of_node(dd
->node
);
235 if (cpumask_first(local_mask
) >= nr_cpu_ids
)
236 local_mask
= topology_core_cpumask(0);
238 mutex_lock(&node_affinity
.lock
);
239 entry
= node_affinity_lookup(dd
->node
);
242 * If this is the first time this NUMA node's affinity is used,
243 * create an entry in the global affinity structure and initialize it.
246 entry
= node_affinity_allocate(node
);
249 "Unable to allocate global affinity node\n");
250 mutex_unlock(&node_affinity
.lock
);
253 init_cpu_mask_set(&entry
->def_intr
);
254 init_cpu_mask_set(&entry
->rcv_intr
);
255 cpumask_clear(&entry
->general_intr_mask
);
256 /* Use the "real" cpu mask of this node as the default */
257 cpumask_and(&entry
->def_intr
.mask
, &node_affinity
.real_cpu_mask
,
260 /* fill in the receive list */
261 possible
= cpumask_weight(&entry
->def_intr
.mask
);
262 curr_cpu
= cpumask_first(&entry
->def_intr
.mask
);
265 /* only one CPU, everyone will use it */
266 cpumask_set_cpu(curr_cpu
, &entry
->rcv_intr
.mask
);
267 cpumask_set_cpu(curr_cpu
, &entry
->general_intr_mask
);
270 * The general/control context will be the first CPU in
271 * the default list, so it is removed from the default
272 * list and added to the general interrupt list.
274 cpumask_clear_cpu(curr_cpu
, &entry
->def_intr
.mask
);
275 cpumask_set_cpu(curr_cpu
, &entry
->general_intr_mask
);
276 curr_cpu
= cpumask_next(curr_cpu
,
277 &entry
->def_intr
.mask
);
280 * Remove the remaining kernel receive queues from
281 * the default list and add them to the receive list.
284 i
< (dd
->n_krcv_queues
- 1) *
285 hfi1_per_node_cntr
[dd
->node
];
287 cpumask_clear_cpu(curr_cpu
,
288 &entry
->def_intr
.mask
);
289 cpumask_set_cpu(curr_cpu
,
290 &entry
->rcv_intr
.mask
);
291 curr_cpu
= cpumask_next(curr_cpu
,
292 &entry
->def_intr
.mask
);
293 if (curr_cpu
>= nr_cpu_ids
)
298 * If there ends up being 0 CPU cores leftover for SDMA
299 * engines, use the same CPU cores as general/control
302 if (cpumask_weight(&entry
->def_intr
.mask
) == 0)
303 cpumask_copy(&entry
->def_intr
.mask
,
304 &entry
->general_intr_mask
);
307 node_affinity_add_tail(entry
);
309 mutex_unlock(&node_affinity
.lock
);
314 * Function updates the irq affinity hint for msix after it has been changed
315 * by the user using the /proc/irq interface. This function only accepts
316 * one cpu in the mask.
318 static void hfi1_update_sdma_affinity(struct hfi1_msix_entry
*msix
, int cpu
)
320 struct sdma_engine
*sde
= msix
->arg
;
321 struct hfi1_devdata
*dd
= sde
->dd
;
322 struct hfi1_affinity_node
*entry
;
323 struct cpu_mask_set
*set
;
326 if (cpu
> num_online_cpus() || cpu
== sde
->cpu
)
329 mutex_lock(&node_affinity
.lock
);
330 entry
= node_affinity_lookup(dd
->node
);
336 cpumask_clear(&msix
->mask
);
337 cpumask_set_cpu(cpu
, &msix
->mask
);
338 dd_dev_dbg(dd
, "IRQ: %u, type %s engine %u -> cpu: %d\n",
339 msix
->irq
, irq_type_names
[msix
->type
],
341 irq_set_affinity_hint(msix
->irq
, &msix
->mask
);
344 * Set the new cpu in the hfi1_affinity_node and clean
345 * the old cpu if it is not used by any other IRQ
347 set
= &entry
->def_intr
;
348 cpumask_set_cpu(cpu
, &set
->mask
);
349 cpumask_set_cpu(cpu
, &set
->used
);
350 for (i
= 0; i
< dd
->num_msix_entries
; i
++) {
351 struct hfi1_msix_entry
*other_msix
;
353 other_msix
= &dd
->msix_entries
[i
];
354 if (other_msix
->type
!= IRQ_SDMA
|| other_msix
== msix
)
357 if (cpumask_test_cpu(old_cpu
, &other_msix
->mask
))
360 cpumask_clear_cpu(old_cpu
, &set
->mask
);
361 cpumask_clear_cpu(old_cpu
, &set
->used
);
363 mutex_unlock(&node_affinity
.lock
);
366 static void hfi1_irq_notifier_notify(struct irq_affinity_notify
*notify
,
367 const cpumask_t
*mask
)
369 int cpu
= cpumask_first(mask
);
370 struct hfi1_msix_entry
*msix
= container_of(notify
,
371 struct hfi1_msix_entry
,
374 /* Only one CPU configuration supported currently */
375 hfi1_update_sdma_affinity(msix
, cpu
);
378 static void hfi1_irq_notifier_release(struct kref
*ref
)
381 * This is required by affinity notifier. We don't have anything to
386 static void hfi1_setup_sdma_notifier(struct hfi1_msix_entry
*msix
)
388 struct irq_affinity_notify
*notify
= &msix
->notify
;
390 notify
->irq
= msix
->irq
;
391 notify
->notify
= hfi1_irq_notifier_notify
;
392 notify
->release
= hfi1_irq_notifier_release
;
394 if (irq_set_affinity_notifier(notify
->irq
, notify
))
395 pr_err("Failed to register sdma irq affinity notifier for irq %d\n",
399 static void hfi1_cleanup_sdma_notifier(struct hfi1_msix_entry
*msix
)
401 struct irq_affinity_notify
*notify
= &msix
->notify
;
403 if (irq_set_affinity_notifier(notify
->irq
, NULL
))
404 pr_err("Failed to cleanup sdma irq affinity notifier for irq %d\n",
409 * Function sets the irq affinity for msix.
410 * It *must* be called with node_affinity.lock held.
412 static int get_irq_affinity(struct hfi1_devdata
*dd
,
413 struct hfi1_msix_entry
*msix
)
417 struct hfi1_affinity_node
*entry
;
418 struct cpu_mask_set
*set
= NULL
;
419 struct sdma_engine
*sde
= NULL
;
420 struct hfi1_ctxtdata
*rcd
= NULL
;
425 cpumask_clear(&msix
->mask
);
427 ret
= zalloc_cpumask_var(&diff
, GFP_KERNEL
);
431 entry
= node_affinity_lookup(dd
->node
);
433 switch (msix
->type
) {
435 sde
= (struct sdma_engine
*)msix
->arg
;
436 scnprintf(extra
, 64, "engine %u", sde
->this_idx
);
437 set
= &entry
->def_intr
;
440 cpu
= cpumask_first(&entry
->general_intr_mask
);
443 rcd
= (struct hfi1_ctxtdata
*)msix
->arg
;
444 if (rcd
->ctxt
== HFI1_CTRL_CTXT
)
445 cpu
= cpumask_first(&entry
->general_intr_mask
);
447 set
= &entry
->rcv_intr
;
448 scnprintf(extra
, 64, "ctxt %u", rcd
->ctxt
);
451 dd_dev_err(dd
, "Invalid IRQ type %d\n", msix
->type
);
456 * The general and control contexts are placed on a particular
457 * CPU, which is set above. Skip accounting for it. Everything else
458 * finds its CPU here.
460 if (cpu
== -1 && set
) {
461 if (cpumask_equal(&set
->mask
, &set
->used
)) {
463 * We've used up all the CPUs, bump up the generation
464 * and reset the 'used' map
467 cpumask_clear(&set
->used
);
469 cpumask_andnot(diff
, &set
->mask
, &set
->used
);
470 cpu
= cpumask_first(diff
);
471 cpumask_set_cpu(cpu
, &set
->used
);
474 cpumask_set_cpu(cpu
, &msix
->mask
);
475 dd_dev_info(dd
, "IRQ: %u, type %s %s -> cpu: %d\n",
476 msix
->irq
, irq_type_names
[msix
->type
],
478 irq_set_affinity_hint(msix
->irq
, &msix
->mask
);
480 if (msix
->type
== IRQ_SDMA
) {
482 hfi1_setup_sdma_notifier(msix
);
485 free_cpumask_var(diff
);
489 int hfi1_get_irq_affinity(struct hfi1_devdata
*dd
, struct hfi1_msix_entry
*msix
)
493 mutex_lock(&node_affinity
.lock
);
494 ret
= get_irq_affinity(dd
, msix
);
495 mutex_unlock(&node_affinity
.lock
);
499 void hfi1_put_irq_affinity(struct hfi1_devdata
*dd
,
500 struct hfi1_msix_entry
*msix
)
502 struct cpu_mask_set
*set
= NULL
;
503 struct hfi1_ctxtdata
*rcd
;
504 struct hfi1_affinity_node
*entry
;
506 mutex_lock(&node_affinity
.lock
);
507 entry
= node_affinity_lookup(dd
->node
);
509 switch (msix
->type
) {
511 set
= &entry
->def_intr
;
512 hfi1_cleanup_sdma_notifier(msix
);
515 /* Don't do accounting for general contexts */
518 rcd
= (struct hfi1_ctxtdata
*)msix
->arg
;
519 /* Don't do accounting for control contexts */
520 if (rcd
->ctxt
!= HFI1_CTRL_CTXT
)
521 set
= &entry
->rcv_intr
;
524 mutex_unlock(&node_affinity
.lock
);
529 cpumask_andnot(&set
->used
, &set
->used
, &msix
->mask
);
530 if (cpumask_empty(&set
->used
) && set
->gen
) {
532 cpumask_copy(&set
->used
, &set
->mask
);
536 irq_set_affinity_hint(msix
->irq
, NULL
);
537 cpumask_clear(&msix
->mask
);
538 mutex_unlock(&node_affinity
.lock
);
541 /* This should be called with node_affinity.lock held */
542 static void find_hw_thread_mask(uint hw_thread_no
, cpumask_var_t hw_thread_mask
,
543 struct hfi1_affinity_node_list
*affinity
)
545 int possible
, curr_cpu
, i
;
546 uint num_cores_per_socket
= node_affinity
.num_online_cpus
/
547 affinity
->num_core_siblings
/
548 node_affinity
.num_online_nodes
;
550 cpumask_copy(hw_thread_mask
, &affinity
->proc
.mask
);
551 if (affinity
->num_core_siblings
> 0) {
552 /* Removing other siblings not needed for now */
553 possible
= cpumask_weight(hw_thread_mask
);
554 curr_cpu
= cpumask_first(hw_thread_mask
);
556 i
< num_cores_per_socket
* node_affinity
.num_online_nodes
;
558 curr_cpu
= cpumask_next(curr_cpu
, hw_thread_mask
);
560 for (; i
< possible
; i
++) {
561 cpumask_clear_cpu(curr_cpu
, hw_thread_mask
);
562 curr_cpu
= cpumask_next(curr_cpu
, hw_thread_mask
);
565 /* Identifying correct HW threads within physical cores */
566 cpumask_shift_left(hw_thread_mask
, hw_thread_mask
,
567 num_cores_per_socket
*
568 node_affinity
.num_online_nodes
*
573 int hfi1_get_proc_affinity(int node
)
575 int cpu
= -1, ret
, i
;
576 struct hfi1_affinity_node
*entry
;
577 cpumask_var_t diff
, hw_thread_mask
, available_mask
, intrs_mask
;
578 const struct cpumask
*node_mask
,
579 *proc_mask
= ¤t
->cpus_allowed
;
580 struct hfi1_affinity_node_list
*affinity
= &node_affinity
;
581 struct cpu_mask_set
*set
= &affinity
->proc
;
584 * check whether process/context affinity has already
587 if (cpumask_weight(proc_mask
) == 1) {
588 hfi1_cdbg(PROC
, "PID %u %s affinity set to CPU %*pbl",
589 current
->pid
, current
->comm
,
590 cpumask_pr_args(proc_mask
));
592 * Mark the pre-set CPU as used. This is atomic so we don't
595 cpu
= cpumask_first(proc_mask
);
596 cpumask_set_cpu(cpu
, &set
->used
);
598 } else if (cpumask_weight(proc_mask
) < cpumask_weight(&set
->mask
)) {
599 hfi1_cdbg(PROC
, "PID %u %s affinity set to CPU set(s) %*pbl",
600 current
->pid
, current
->comm
,
601 cpumask_pr_args(proc_mask
));
606 * The process does not have a preset CPU affinity so find one to
607 * recommend using the following algorithm:
609 * For each user process that is opening a context on HFI Y:
610 * a) If all cores are filled, reinitialize the bitmask
611 * b) Fill real cores first, then HT cores (First set of HT
612 * cores on all physical cores, then second set of HT core,
613 * and, so on) in the following order:
615 * 1. Same NUMA node as HFI Y and not running an IRQ
617 * 2. Same NUMA node as HFI Y and running an IRQ handler
618 * 3. Different NUMA node to HFI Y and not running an IRQ
620 * 4. Different NUMA node to HFI Y and running an IRQ
622 * c) Mark core as filled in the bitmask. As user processes are
623 * done, clear cores from the bitmask.
626 ret
= zalloc_cpumask_var(&diff
, GFP_KERNEL
);
629 ret
= zalloc_cpumask_var(&hw_thread_mask
, GFP_KERNEL
);
632 ret
= zalloc_cpumask_var(&available_mask
, GFP_KERNEL
);
634 goto free_hw_thread_mask
;
635 ret
= zalloc_cpumask_var(&intrs_mask
, GFP_KERNEL
);
637 goto free_available_mask
;
639 mutex_lock(&affinity
->lock
);
641 * If we've used all available HW threads, clear the mask and start
644 if (cpumask_equal(&set
->mask
, &set
->used
)) {
646 cpumask_clear(&set
->used
);
650 * If NUMA node has CPUs used by interrupt handlers, include them in the
651 * interrupt handler mask.
653 entry
= node_affinity_lookup(node
);
655 cpumask_copy(intrs_mask
, (entry
->def_intr
.gen
?
656 &entry
->def_intr
.mask
:
657 &entry
->def_intr
.used
));
658 cpumask_or(intrs_mask
, intrs_mask
, (entry
->rcv_intr
.gen
?
659 &entry
->rcv_intr
.mask
:
660 &entry
->rcv_intr
.used
));
661 cpumask_or(intrs_mask
, intrs_mask
, &entry
->general_intr_mask
);
663 hfi1_cdbg(PROC
, "CPUs used by interrupts: %*pbl",
664 cpumask_pr_args(intrs_mask
));
666 cpumask_copy(hw_thread_mask
, &set
->mask
);
669 * If HT cores are enabled, identify which HW threads within the
670 * physical cores should be used.
672 if (affinity
->num_core_siblings
> 0) {
673 for (i
= 0; i
< affinity
->num_core_siblings
; i
++) {
674 find_hw_thread_mask(i
, hw_thread_mask
, affinity
);
677 * If there's at least one available core for this HW
678 * thread number, stop looking for a core.
680 * diff will always be not empty at least once in this
681 * loop as the used mask gets reset when
682 * (set->mask == set->used) before this loop.
684 cpumask_andnot(diff
, hw_thread_mask
, &set
->used
);
685 if (!cpumask_empty(diff
))
689 hfi1_cdbg(PROC
, "Same available HW thread on all physical CPUs: %*pbl",
690 cpumask_pr_args(hw_thread_mask
));
692 node_mask
= cpumask_of_node(node
);
693 hfi1_cdbg(PROC
, "Device on NUMA %u, CPUs %*pbl", node
,
694 cpumask_pr_args(node_mask
));
696 /* Get cpumask of available CPUs on preferred NUMA */
697 cpumask_and(available_mask
, hw_thread_mask
, node_mask
);
698 cpumask_andnot(available_mask
, available_mask
, &set
->used
);
699 hfi1_cdbg(PROC
, "Available CPUs on NUMA %u: %*pbl", node
,
700 cpumask_pr_args(available_mask
));
703 * At first, we don't want to place processes on the same
704 * CPUs as interrupt handlers. Then, CPUs running interrupt
707 * 1) If diff is not empty, then there are CPUs not running
708 * non-interrupt handlers available, so diff gets copied
709 * over to available_mask.
710 * 2) If diff is empty, then all CPUs not running interrupt
711 * handlers are taken, so available_mask contains all
712 * available CPUs running interrupt handlers.
713 * 3) If available_mask is empty, then all CPUs on the
714 * preferred NUMA node are taken, so other NUMA nodes are
715 * used for process assignments using the same method as
716 * the preferred NUMA node.
718 cpumask_andnot(diff
, available_mask
, intrs_mask
);
719 if (!cpumask_empty(diff
))
720 cpumask_copy(available_mask
, diff
);
722 /* If we don't have CPUs on the preferred node, use other NUMA nodes */
723 if (cpumask_empty(available_mask
)) {
724 cpumask_andnot(available_mask
, hw_thread_mask
, &set
->used
);
725 /* Excluding preferred NUMA cores */
726 cpumask_andnot(available_mask
, available_mask
, node_mask
);
728 "Preferred NUMA node cores are taken, cores available in other NUMA nodes: %*pbl",
729 cpumask_pr_args(available_mask
));
732 * At first, we don't want to place processes on the same
733 * CPUs as interrupt handlers.
735 cpumask_andnot(diff
, available_mask
, intrs_mask
);
736 if (!cpumask_empty(diff
))
737 cpumask_copy(available_mask
, diff
);
739 hfi1_cdbg(PROC
, "Possible CPUs for process: %*pbl",
740 cpumask_pr_args(available_mask
));
742 cpu
= cpumask_first(available_mask
);
743 if (cpu
>= nr_cpu_ids
) /* empty */
746 cpumask_set_cpu(cpu
, &set
->used
);
748 mutex_unlock(&affinity
->lock
);
749 hfi1_cdbg(PROC
, "Process assigned to CPU %d", cpu
);
751 free_cpumask_var(intrs_mask
);
753 free_cpumask_var(available_mask
);
755 free_cpumask_var(hw_thread_mask
);
757 free_cpumask_var(diff
);
762 void hfi1_put_proc_affinity(int cpu
)
764 struct hfi1_affinity_node_list
*affinity
= &node_affinity
;
765 struct cpu_mask_set
*set
= &affinity
->proc
;
770 mutex_lock(&affinity
->lock
);
771 cpumask_clear_cpu(cpu
, &set
->used
);
772 hfi1_cdbg(PROC
, "Returning CPU %d for future process assignment", cpu
);
773 if (cpumask_empty(&set
->used
) && set
->gen
) {
775 cpumask_copy(&set
->used
, &set
->mask
);
777 mutex_unlock(&affinity
->lock
);