1 // SPDX-License-Identifier: GPL-2.0
3 * padata.c - generic interface to process data streams in parallel
5 * See Documentation/padata.txt for an api documentation.
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 #include <linux/export.h>
25 #include <linux/cpumask.h>
26 #include <linux/err.h>
27 #include <linux/cpu.h>
28 #include <linux/padata.h>
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/sysfs.h>
33 #include <linux/rcupdate.h>
34 #include <linux/module.h>
36 #define MAX_OBJ_NUM 1000
38 static void padata_free_pd(struct parallel_data
*pd
);
40 static int padata_index_to_cpu(struct parallel_data
*pd
, int cpu_index
)
44 target_cpu
= cpumask_first(pd
->cpumask
.pcpu
);
45 for (cpu
= 0; cpu
< cpu_index
; cpu
++)
46 target_cpu
= cpumask_next(target_cpu
, pd
->cpumask
.pcpu
);
51 static int padata_cpu_hash(struct parallel_data
*pd
, unsigned int seq_nr
)
54 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use.
57 int cpu_index
= seq_nr
% cpumask_weight(pd
->cpumask
.pcpu
);
59 return padata_index_to_cpu(pd
, cpu_index
);
62 static void padata_parallel_worker(struct work_struct
*parallel_work
)
64 struct padata_parallel_queue
*pqueue
;
65 LIST_HEAD(local_list
);
68 pqueue
= container_of(parallel_work
,
69 struct padata_parallel_queue
, work
);
71 spin_lock(&pqueue
->parallel
.lock
);
72 list_replace_init(&pqueue
->parallel
.list
, &local_list
);
73 spin_unlock(&pqueue
->parallel
.lock
);
75 while (!list_empty(&local_list
)) {
76 struct padata_priv
*padata
;
78 padata
= list_entry(local_list
.next
,
79 struct padata_priv
, list
);
81 list_del_init(&padata
->list
);
83 padata
->parallel(padata
);
90 * padata_do_parallel - padata parallelization function
93 * @padata: object to be parallelized
94 * @cb_cpu: pointer to the CPU that the serialization callback function should
95 * run on. If it's not in the serial cpumask of @pinst
96 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
97 * none found, returns -EINVAL.
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
103 int padata_do_parallel(struct padata_shell
*ps
,
104 struct padata_priv
*padata
, int *cb_cpu
)
106 struct padata_instance
*pinst
= ps
->pinst
;
107 int i
, cpu
, cpu_index
, target_cpu
, err
;
108 struct padata_parallel_queue
*queue
;
109 struct parallel_data
*pd
;
113 pd
= rcu_dereference_bh(ps
->pd
);
116 if (!(pinst
->flags
& PADATA_INIT
) || pinst
->flags
& PADATA_INVALID
)
119 if (!cpumask_test_cpu(*cb_cpu
, pd
->cpumask
.cbcpu
)) {
120 if (!cpumask_weight(pd
->cpumask
.cbcpu
))
123 /* Select an alternate fallback CPU and notify the caller. */
124 cpu_index
= *cb_cpu
% cpumask_weight(pd
->cpumask
.cbcpu
);
126 cpu
= cpumask_first(pd
->cpumask
.cbcpu
);
127 for (i
= 0; i
< cpu_index
; i
++)
128 cpu
= cpumask_next(cpu
, pd
->cpumask
.cbcpu
);
134 if ((pinst
->flags
& PADATA_RESET
))
137 if (atomic_read(&pd
->refcnt
) >= MAX_OBJ_NUM
)
141 atomic_inc(&pd
->refcnt
);
143 padata
->cb_cpu
= *cb_cpu
;
145 padata
->seq_nr
= atomic_inc_return(&pd
->seq_nr
);
146 target_cpu
= padata_cpu_hash(pd
, padata
->seq_nr
);
147 padata
->cpu
= target_cpu
;
148 queue
= per_cpu_ptr(pd
->pqueue
, target_cpu
);
150 spin_lock(&queue
->parallel
.lock
);
151 list_add_tail(&padata
->list
, &queue
->parallel
.list
);
152 spin_unlock(&queue
->parallel
.lock
);
154 queue_work(pinst
->parallel_wq
, &queue
->work
);
157 rcu_read_unlock_bh();
161 EXPORT_SYMBOL(padata_do_parallel
);
164 * padata_find_next - Find the next object that needs serialization.
168 * A pointer to the control struct of the next object that needs
169 * serialization, if present in one of the percpu reorder queues.
171 * NULL, if the next object that needs serialization will
172 * be parallel processed by another cpu and is not yet present in
173 * the cpu's reorder queue.
175 static struct padata_priv
*padata_find_next(struct parallel_data
*pd
,
178 struct padata_parallel_queue
*next_queue
;
179 struct padata_priv
*padata
;
180 struct padata_list
*reorder
;
183 next_queue
= per_cpu_ptr(pd
->pqueue
, cpu
);
184 reorder
= &next_queue
->reorder
;
186 spin_lock(&reorder
->lock
);
187 if (list_empty(&reorder
->list
)) {
188 spin_unlock(&reorder
->lock
);
192 padata
= list_entry(reorder
->list
.next
, struct padata_priv
, list
);
195 * Checks the rare case where two or more parallel jobs have hashed to
196 * the same CPU and one of the later ones finishes first.
198 if (padata
->seq_nr
!= pd
->processed
) {
199 spin_unlock(&reorder
->lock
);
204 list_del_init(&padata
->list
);
205 atomic_dec(&pd
->reorder_objects
);
207 pd
->cpu
= cpumask_next_wrap(cpu
, pd
->cpumask
.pcpu
, -1, false);
210 spin_unlock(&reorder
->lock
);
214 static void padata_reorder(struct parallel_data
*pd
)
216 struct padata_instance
*pinst
= pd
->ps
->pinst
;
218 struct padata_priv
*padata
;
219 struct padata_serial_queue
*squeue
;
220 struct padata_parallel_queue
*next_queue
;
223 * We need to ensure that only one cpu can work on dequeueing of
224 * the reorder queue the time. Calculating in which percpu reorder
225 * queue the next object will arrive takes some time. A spinlock
226 * would be highly contended. Also it is not clear in which order
227 * the objects arrive to the reorder queues. So a cpu could wait to
228 * get the lock just to notice that there is nothing to do at the
229 * moment. Therefore we use a trylock and let the holder of the lock
230 * care for all the objects enqueued during the holdtime of the lock.
232 if (!spin_trylock_bh(&pd
->lock
))
236 padata
= padata_find_next(pd
, true);
239 * If the next object that needs serialization is parallel
240 * processed by another cpu and is still on it's way to the
241 * cpu's reorder queue, nothing to do for now.
246 cb_cpu
= padata
->cb_cpu
;
247 squeue
= per_cpu_ptr(pd
->squeue
, cb_cpu
);
249 spin_lock(&squeue
->serial
.lock
);
250 list_add_tail(&padata
->list
, &squeue
->serial
.list
);
251 spin_unlock(&squeue
->serial
.lock
);
253 queue_work_on(cb_cpu
, pinst
->serial_wq
, &squeue
->work
);
256 spin_unlock_bh(&pd
->lock
);
259 * The next object that needs serialization might have arrived to
260 * the reorder queues in the meantime.
262 * Ensure reorder queue is read after pd->lock is dropped so we see
263 * new objects from another task in padata_do_serial. Pairs with
264 * smp_mb__after_atomic in padata_do_serial.
268 next_queue
= per_cpu_ptr(pd
->pqueue
, pd
->cpu
);
269 if (!list_empty(&next_queue
->reorder
.list
) &&
270 padata_find_next(pd
, false))
271 queue_work(pinst
->serial_wq
, &pd
->reorder_work
);
274 static void invoke_padata_reorder(struct work_struct
*work
)
276 struct parallel_data
*pd
;
279 pd
= container_of(work
, struct parallel_data
, reorder_work
);
284 static void padata_serial_worker(struct work_struct
*serial_work
)
286 struct padata_serial_queue
*squeue
;
287 struct parallel_data
*pd
;
288 LIST_HEAD(local_list
);
292 squeue
= container_of(serial_work
, struct padata_serial_queue
, work
);
295 spin_lock(&squeue
->serial
.lock
);
296 list_replace_init(&squeue
->serial
.list
, &local_list
);
297 spin_unlock(&squeue
->serial
.lock
);
301 while (!list_empty(&local_list
)) {
302 struct padata_priv
*padata
;
304 padata
= list_entry(local_list
.next
,
305 struct padata_priv
, list
);
307 list_del_init(&padata
->list
);
309 padata
->serial(padata
);
314 if (atomic_sub_and_test(cnt
, &pd
->refcnt
))
319 * padata_do_serial - padata serialization function
321 * @padata: object to be serialized.
323 * padata_do_serial must be called for every parallelized object.
324 * The serialization callback function will run with BHs off.
326 void padata_do_serial(struct padata_priv
*padata
)
328 struct parallel_data
*pd
= padata
->pd
;
329 struct padata_parallel_queue
*pqueue
= per_cpu_ptr(pd
->pqueue
,
331 struct padata_priv
*cur
;
333 spin_lock(&pqueue
->reorder
.lock
);
334 /* Sort in ascending order of sequence number. */
335 list_for_each_entry_reverse(cur
, &pqueue
->reorder
.list
, list
)
336 if (cur
->seq_nr
< padata
->seq_nr
)
338 list_add(&padata
->list
, &cur
->list
);
339 atomic_inc(&pd
->reorder_objects
);
340 spin_unlock(&pqueue
->reorder
.lock
);
343 * Ensure the addition to the reorder list is ordered correctly
344 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
347 smp_mb__after_atomic();
351 EXPORT_SYMBOL(padata_do_serial
);
353 static int padata_setup_cpumasks(struct padata_instance
*pinst
)
355 struct workqueue_attrs
*attrs
;
358 attrs
= alloc_workqueue_attrs();
362 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
363 cpumask_copy(attrs
->cpumask
, pinst
->cpumask
.pcpu
);
364 err
= apply_workqueue_attrs(pinst
->parallel_wq
, attrs
);
365 free_workqueue_attrs(attrs
);
370 static int pd_setup_cpumasks(struct parallel_data
*pd
,
371 const struct cpumask
*pcpumask
,
372 const struct cpumask
*cbcpumask
)
376 if (!alloc_cpumask_var(&pd
->cpumask
.pcpu
, GFP_KERNEL
))
378 if (!alloc_cpumask_var(&pd
->cpumask
.cbcpu
, GFP_KERNEL
))
381 cpumask_copy(pd
->cpumask
.pcpu
, pcpumask
);
382 cpumask_copy(pd
->cpumask
.cbcpu
, cbcpumask
);
387 free_cpumask_var(pd
->cpumask
.pcpu
);
392 static void __padata_list_init(struct padata_list
*pd_list
)
394 INIT_LIST_HEAD(&pd_list
->list
);
395 spin_lock_init(&pd_list
->lock
);
398 /* Initialize all percpu queues used by serial workers */
399 static void padata_init_squeues(struct parallel_data
*pd
)
402 struct padata_serial_queue
*squeue
;
404 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
405 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
407 __padata_list_init(&squeue
->serial
);
408 INIT_WORK(&squeue
->work
, padata_serial_worker
);
412 /* Initialize all percpu queues used by parallel workers */
413 static void padata_init_pqueues(struct parallel_data
*pd
)
416 struct padata_parallel_queue
*pqueue
;
418 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
419 pqueue
= per_cpu_ptr(pd
->pqueue
, cpu
);
421 __padata_list_init(&pqueue
->reorder
);
422 __padata_list_init(&pqueue
->parallel
);
423 INIT_WORK(&pqueue
->work
, padata_parallel_worker
);
424 atomic_set(&pqueue
->num_obj
, 0);
428 /* Allocate and initialize the internal cpumask dependend resources. */
429 static struct parallel_data
*padata_alloc_pd(struct padata_shell
*ps
)
431 struct padata_instance
*pinst
= ps
->pinst
;
432 const struct cpumask
*cbcpumask
;
433 const struct cpumask
*pcpumask
;
434 struct parallel_data
*pd
;
436 cbcpumask
= pinst
->rcpumask
.cbcpu
;
437 pcpumask
= pinst
->rcpumask
.pcpu
;
439 pd
= kzalloc(sizeof(struct parallel_data
), GFP_KERNEL
);
443 pd
->pqueue
= alloc_percpu(struct padata_parallel_queue
);
447 pd
->squeue
= alloc_percpu(struct padata_serial_queue
);
449 goto err_free_pqueue
;
452 if (pd_setup_cpumasks(pd
, pcpumask
, cbcpumask
))
453 goto err_free_squeue
;
455 padata_init_pqueues(pd
);
456 padata_init_squeues(pd
);
457 atomic_set(&pd
->seq_nr
, -1);
458 atomic_set(&pd
->reorder_objects
, 0);
459 atomic_set(&pd
->refcnt
, 1);
460 spin_lock_init(&pd
->lock
);
461 pd
->cpu
= cpumask_first(pd
->cpumask
.pcpu
);
462 INIT_WORK(&pd
->reorder_work
, invoke_padata_reorder
);
467 free_percpu(pd
->squeue
);
469 free_percpu(pd
->pqueue
);
476 static void padata_free_pd(struct parallel_data
*pd
)
478 free_cpumask_var(pd
->cpumask
.pcpu
);
479 free_cpumask_var(pd
->cpumask
.cbcpu
);
480 free_percpu(pd
->pqueue
);
481 free_percpu(pd
->squeue
);
485 static void __padata_start(struct padata_instance
*pinst
)
487 pinst
->flags
|= PADATA_INIT
;
490 static void __padata_stop(struct padata_instance
*pinst
)
492 if (!(pinst
->flags
& PADATA_INIT
))
495 pinst
->flags
&= ~PADATA_INIT
;
500 /* Replace the internal control structure with a new one. */
501 static int padata_replace_one(struct padata_shell
*ps
)
503 struct parallel_data
*pd_new
;
505 pd_new
= padata_alloc_pd(ps
);
509 ps
->opd
= rcu_dereference_protected(ps
->pd
, 1);
510 rcu_assign_pointer(ps
->pd
, pd_new
);
515 static int padata_replace(struct padata_instance
*pinst
)
517 int notification_mask
= 0;
518 struct padata_shell
*ps
;
521 pinst
->flags
|= PADATA_RESET
;
523 cpumask_copy(pinst
->omask
, pinst
->rcpumask
.pcpu
);
524 cpumask_and(pinst
->rcpumask
.pcpu
, pinst
->cpumask
.pcpu
,
526 if (!cpumask_equal(pinst
->omask
, pinst
->rcpumask
.pcpu
))
527 notification_mask
|= PADATA_CPU_PARALLEL
;
529 cpumask_copy(pinst
->omask
, pinst
->rcpumask
.cbcpu
);
530 cpumask_and(pinst
->rcpumask
.cbcpu
, pinst
->cpumask
.cbcpu
,
532 if (!cpumask_equal(pinst
->omask
, pinst
->rcpumask
.cbcpu
))
533 notification_mask
|= PADATA_CPU_SERIAL
;
535 list_for_each_entry(ps
, &pinst
->pslist
, list
) {
536 err
= padata_replace_one(ps
);
543 list_for_each_entry_continue_reverse(ps
, &pinst
->pslist
, list
)
544 if (atomic_dec_and_test(&ps
->opd
->refcnt
))
545 padata_free_pd(ps
->opd
);
547 if (notification_mask
)
548 blocking_notifier_call_chain(&pinst
->cpumask_change_notifier
,
552 pinst
->flags
&= ~PADATA_RESET
;
558 * padata_register_cpumask_notifier - Registers a notifier that will be called
559 * if either pcpu or cbcpu or both cpumasks change.
561 * @pinst: A poineter to padata instance
562 * @nblock: A pointer to notifier block.
564 int padata_register_cpumask_notifier(struct padata_instance
*pinst
,
565 struct notifier_block
*nblock
)
567 return blocking_notifier_chain_register(&pinst
->cpumask_change_notifier
,
570 EXPORT_SYMBOL(padata_register_cpumask_notifier
);
573 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
574 * registered earlier using padata_register_cpumask_notifier
576 * @pinst: A pointer to data instance.
577 * @nlock: A pointer to notifier block.
579 int padata_unregister_cpumask_notifier(struct padata_instance
*pinst
,
580 struct notifier_block
*nblock
)
582 return blocking_notifier_chain_unregister(
583 &pinst
->cpumask_change_notifier
,
586 EXPORT_SYMBOL(padata_unregister_cpumask_notifier
);
589 /* If cpumask contains no active cpu, we mark the instance as invalid. */
590 static bool padata_validate_cpumask(struct padata_instance
*pinst
,
591 const struct cpumask
*cpumask
)
593 if (!cpumask_intersects(cpumask
, cpu_online_mask
)) {
594 pinst
->flags
|= PADATA_INVALID
;
598 pinst
->flags
&= ~PADATA_INVALID
;
602 static int __padata_set_cpumasks(struct padata_instance
*pinst
,
603 cpumask_var_t pcpumask
,
604 cpumask_var_t cbcpumask
)
609 valid
= padata_validate_cpumask(pinst
, pcpumask
);
611 __padata_stop(pinst
);
615 valid
= padata_validate_cpumask(pinst
, cbcpumask
);
617 __padata_stop(pinst
);
620 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
621 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
623 err
= padata_setup_cpumasks(pinst
) ?: padata_replace(pinst
);
626 __padata_start(pinst
);
632 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
633 * equivalent to @cpumask.
635 * @pinst: padata instance
636 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
637 * to parallel and serial cpumasks respectively.
638 * @cpumask: the cpumask to use
640 int padata_set_cpumask(struct padata_instance
*pinst
, int cpumask_type
,
641 cpumask_var_t cpumask
)
643 struct cpumask
*serial_mask
, *parallel_mask
;
647 mutex_lock(&pinst
->lock
);
649 switch (cpumask_type
) {
650 case PADATA_CPU_PARALLEL
:
651 serial_mask
= pinst
->cpumask
.cbcpu
;
652 parallel_mask
= cpumask
;
654 case PADATA_CPU_SERIAL
:
655 parallel_mask
= pinst
->cpumask
.pcpu
;
656 serial_mask
= cpumask
;
662 err
= __padata_set_cpumasks(pinst
, parallel_mask
, serial_mask
);
665 mutex_unlock(&pinst
->lock
);
670 EXPORT_SYMBOL(padata_set_cpumask
);
673 * padata_start - start the parallel processing
675 * @pinst: padata instance to start
677 int padata_start(struct padata_instance
*pinst
)
681 mutex_lock(&pinst
->lock
);
683 if (pinst
->flags
& PADATA_INVALID
)
686 __padata_start(pinst
);
688 mutex_unlock(&pinst
->lock
);
692 EXPORT_SYMBOL(padata_start
);
695 * padata_stop - stop the parallel processing
697 * @pinst: padata instance to stop
699 void padata_stop(struct padata_instance
*pinst
)
701 mutex_lock(&pinst
->lock
);
702 __padata_stop(pinst
);
703 mutex_unlock(&pinst
->lock
);
705 EXPORT_SYMBOL(padata_stop
);
707 #ifdef CONFIG_HOTPLUG_CPU
709 static int __padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
713 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
714 err
= padata_replace(pinst
);
716 if (padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) &&
717 padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
718 __padata_start(pinst
);
724 static int __padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
728 if (!cpumask_test_cpu(cpu
, cpu_online_mask
)) {
729 if (!padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) ||
730 !padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
731 __padata_stop(pinst
);
733 err
= padata_replace(pinst
);
740 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
743 * @pinst: padata instance
744 * @cpu: cpu to remove
745 * @mask: bitmask specifying from which cpumask @cpu should be removed
746 * The @mask may be any combination of the following flags:
747 * PADATA_CPU_SERIAL - serial cpumask
748 * PADATA_CPU_PARALLEL - parallel cpumask
750 int padata_remove_cpu(struct padata_instance
*pinst
, int cpu
, int mask
)
754 if (!(mask
& (PADATA_CPU_SERIAL
| PADATA_CPU_PARALLEL
)))
757 mutex_lock(&pinst
->lock
);
760 if (mask
& PADATA_CPU_SERIAL
)
761 cpumask_clear_cpu(cpu
, pinst
->cpumask
.cbcpu
);
762 if (mask
& PADATA_CPU_PARALLEL
)
763 cpumask_clear_cpu(cpu
, pinst
->cpumask
.pcpu
);
765 err
= __padata_remove_cpu(pinst
, cpu
);
768 mutex_unlock(&pinst
->lock
);
772 EXPORT_SYMBOL(padata_remove_cpu
);
774 static inline int pinst_has_cpu(struct padata_instance
*pinst
, int cpu
)
776 return cpumask_test_cpu(cpu
, pinst
->cpumask
.pcpu
) ||
777 cpumask_test_cpu(cpu
, pinst
->cpumask
.cbcpu
);
780 static int padata_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
782 struct padata_instance
*pinst
;
785 pinst
= hlist_entry_safe(node
, struct padata_instance
, cpu_online_node
);
786 if (!pinst_has_cpu(pinst
, cpu
))
789 mutex_lock(&pinst
->lock
);
790 ret
= __padata_add_cpu(pinst
, cpu
);
791 mutex_unlock(&pinst
->lock
);
795 static int padata_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
797 struct padata_instance
*pinst
;
800 pinst
= hlist_entry_safe(node
, struct padata_instance
, cpu_dead_node
);
801 if (!pinst_has_cpu(pinst
, cpu
))
804 mutex_lock(&pinst
->lock
);
805 ret
= __padata_remove_cpu(pinst
, cpu
);
806 mutex_unlock(&pinst
->lock
);
810 static enum cpuhp_state hp_online
;
813 static void __padata_free(struct padata_instance
*pinst
)
815 #ifdef CONFIG_HOTPLUG_CPU
816 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD
,
817 &pinst
->cpu_dead_node
);
818 cpuhp_state_remove_instance_nocalls(hp_online
, &pinst
->cpu_online_node
);
821 WARN_ON(!list_empty(&pinst
->pslist
));
824 free_cpumask_var(pinst
->omask
);
825 free_cpumask_var(pinst
->rcpumask
.cbcpu
);
826 free_cpumask_var(pinst
->rcpumask
.pcpu
);
827 free_cpumask_var(pinst
->cpumask
.pcpu
);
828 free_cpumask_var(pinst
->cpumask
.cbcpu
);
829 destroy_workqueue(pinst
->serial_wq
);
830 destroy_workqueue(pinst
->parallel_wq
);
834 #define kobj2pinst(_kobj) \
835 container_of(_kobj, struct padata_instance, kobj)
836 #define attr2pentry(_attr) \
837 container_of(_attr, struct padata_sysfs_entry, attr)
839 static void padata_sysfs_release(struct kobject
*kobj
)
841 struct padata_instance
*pinst
= kobj2pinst(kobj
);
842 __padata_free(pinst
);
845 struct padata_sysfs_entry
{
846 struct attribute attr
;
847 ssize_t (*show
)(struct padata_instance
*, struct attribute
*, char *);
848 ssize_t (*store
)(struct padata_instance
*, struct attribute
*,
849 const char *, size_t);
852 static ssize_t
show_cpumask(struct padata_instance
*pinst
,
853 struct attribute
*attr
, char *buf
)
855 struct cpumask
*cpumask
;
858 mutex_lock(&pinst
->lock
);
859 if (!strcmp(attr
->name
, "serial_cpumask"))
860 cpumask
= pinst
->cpumask
.cbcpu
;
862 cpumask
= pinst
->cpumask
.pcpu
;
864 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n",
865 nr_cpu_ids
, cpumask_bits(cpumask
));
866 mutex_unlock(&pinst
->lock
);
867 return len
< PAGE_SIZE
? len
: -EINVAL
;
870 static ssize_t
store_cpumask(struct padata_instance
*pinst
,
871 struct attribute
*attr
,
872 const char *buf
, size_t count
)
874 cpumask_var_t new_cpumask
;
878 if (!alloc_cpumask_var(&new_cpumask
, GFP_KERNEL
))
881 ret
= bitmap_parse(buf
, count
, cpumask_bits(new_cpumask
),
886 mask_type
= !strcmp(attr
->name
, "serial_cpumask") ?
887 PADATA_CPU_SERIAL
: PADATA_CPU_PARALLEL
;
888 ret
= padata_set_cpumask(pinst
, mask_type
, new_cpumask
);
893 free_cpumask_var(new_cpumask
);
897 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
898 static struct padata_sysfs_entry _name##_attr = \
899 __ATTR(_name, 0644, _show_name, _store_name)
900 #define PADATA_ATTR_RO(_name, _show_name) \
901 static struct padata_sysfs_entry _name##_attr = \
902 __ATTR(_name, 0400, _show_name, NULL)
904 PADATA_ATTR_RW(serial_cpumask
, show_cpumask
, store_cpumask
);
905 PADATA_ATTR_RW(parallel_cpumask
, show_cpumask
, store_cpumask
);
908 * Padata sysfs provides the following objects:
909 * serial_cpumask [RW] - cpumask for serial workers
910 * parallel_cpumask [RW] - cpumask for parallel workers
912 static struct attribute
*padata_default_attrs
[] = {
913 &serial_cpumask_attr
.attr
,
914 ¶llel_cpumask_attr
.attr
,
917 ATTRIBUTE_GROUPS(padata_default
);
919 static ssize_t
padata_sysfs_show(struct kobject
*kobj
,
920 struct attribute
*attr
, char *buf
)
922 struct padata_instance
*pinst
;
923 struct padata_sysfs_entry
*pentry
;
926 pinst
= kobj2pinst(kobj
);
927 pentry
= attr2pentry(attr
);
929 ret
= pentry
->show(pinst
, attr
, buf
);
934 static ssize_t
padata_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
935 const char *buf
, size_t count
)
937 struct padata_instance
*pinst
;
938 struct padata_sysfs_entry
*pentry
;
941 pinst
= kobj2pinst(kobj
);
942 pentry
= attr2pentry(attr
);
944 ret
= pentry
->store(pinst
, attr
, buf
, count
);
949 static const struct sysfs_ops padata_sysfs_ops
= {
950 .show
= padata_sysfs_show
,
951 .store
= padata_sysfs_store
,
954 static struct kobj_type padata_attr_type
= {
955 .sysfs_ops
= &padata_sysfs_ops
,
956 .default_groups
= padata_default_groups
,
957 .release
= padata_sysfs_release
,
961 * padata_alloc - allocate and initialize a padata instance and specify
962 * cpumasks for serial and parallel workers.
964 * @name: used to identify the instance
965 * @pcpumask: cpumask that will be used for padata parallelization
966 * @cbcpumask: cpumask that will be used for padata serialization
968 static struct padata_instance
*padata_alloc(const char *name
,
969 const struct cpumask
*pcpumask
,
970 const struct cpumask
*cbcpumask
)
972 struct padata_instance
*pinst
;
974 pinst
= kzalloc(sizeof(struct padata_instance
), GFP_KERNEL
);
978 pinst
->parallel_wq
= alloc_workqueue("%s_parallel", WQ_UNBOUND
, 0,
980 if (!pinst
->parallel_wq
)
985 pinst
->serial_wq
= alloc_workqueue("%s_serial", WQ_MEM_RECLAIM
|
986 WQ_CPU_INTENSIVE
, 1, name
);
987 if (!pinst
->serial_wq
)
990 if (!alloc_cpumask_var(&pinst
->cpumask
.pcpu
, GFP_KERNEL
))
991 goto err_free_serial_wq
;
992 if (!alloc_cpumask_var(&pinst
->cpumask
.cbcpu
, GFP_KERNEL
)) {
993 free_cpumask_var(pinst
->cpumask
.pcpu
);
994 goto err_free_serial_wq
;
996 if (!padata_validate_cpumask(pinst
, pcpumask
) ||
997 !padata_validate_cpumask(pinst
, cbcpumask
))
1000 if (!alloc_cpumask_var(&pinst
->rcpumask
.pcpu
, GFP_KERNEL
))
1001 goto err_free_masks
;
1002 if (!alloc_cpumask_var(&pinst
->rcpumask
.cbcpu
, GFP_KERNEL
))
1003 goto err_free_rcpumask_pcpu
;
1004 if (!alloc_cpumask_var(&pinst
->omask
, GFP_KERNEL
))
1005 goto err_free_rcpumask_cbcpu
;
1007 INIT_LIST_HEAD(&pinst
->pslist
);
1009 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
1010 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
1011 cpumask_and(pinst
->rcpumask
.pcpu
, pcpumask
, cpu_online_mask
);
1012 cpumask_and(pinst
->rcpumask
.cbcpu
, cbcpumask
, cpu_online_mask
);
1014 if (padata_setup_cpumasks(pinst
))
1015 goto err_free_omask
;
1019 BLOCKING_INIT_NOTIFIER_HEAD(&pinst
->cpumask_change_notifier
);
1020 kobject_init(&pinst
->kobj
, &padata_attr_type
);
1021 mutex_init(&pinst
->lock
);
1023 #ifdef CONFIG_HOTPLUG_CPU
1024 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online
,
1025 &pinst
->cpu_online_node
);
1026 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD
,
1027 &pinst
->cpu_dead_node
);
1035 free_cpumask_var(pinst
->omask
);
1036 err_free_rcpumask_cbcpu
:
1037 free_cpumask_var(pinst
->rcpumask
.cbcpu
);
1038 err_free_rcpumask_pcpu
:
1039 free_cpumask_var(pinst
->rcpumask
.pcpu
);
1041 free_cpumask_var(pinst
->cpumask
.pcpu
);
1042 free_cpumask_var(pinst
->cpumask
.cbcpu
);
1044 destroy_workqueue(pinst
->serial_wq
);
1047 destroy_workqueue(pinst
->parallel_wq
);
1055 * padata_alloc_possible - Allocate and initialize padata instance.
1056 * Use the cpu_possible_mask for serial and
1059 * @name: used to identify the instance
1061 struct padata_instance
*padata_alloc_possible(const char *name
)
1063 return padata_alloc(name
, cpu_possible_mask
, cpu_possible_mask
);
1065 EXPORT_SYMBOL(padata_alloc_possible
);
1068 * padata_free - free a padata instance
1070 * @padata_inst: padata instance to free
1072 void padata_free(struct padata_instance
*pinst
)
1074 kobject_put(&pinst
->kobj
);
1076 EXPORT_SYMBOL(padata_free
);
1079 * padata_alloc_shell - Allocate and initialize padata shell.
1081 * @pinst: Parent padata_instance object.
1083 struct padata_shell
*padata_alloc_shell(struct padata_instance
*pinst
)
1085 struct parallel_data
*pd
;
1086 struct padata_shell
*ps
;
1088 ps
= kzalloc(sizeof(*ps
), GFP_KERNEL
);
1095 pd
= padata_alloc_pd(ps
);
1101 mutex_lock(&pinst
->lock
);
1102 RCU_INIT_POINTER(ps
->pd
, pd
);
1103 list_add(&ps
->list
, &pinst
->pslist
);
1104 mutex_unlock(&pinst
->lock
);
1113 EXPORT_SYMBOL(padata_alloc_shell
);
1116 * padata_free_shell - free a padata shell
1118 * @ps: padata shell to free
1120 void padata_free_shell(struct padata_shell
*ps
)
1122 struct padata_instance
*pinst
= ps
->pinst
;
1124 mutex_lock(&pinst
->lock
);
1125 list_del(&ps
->list
);
1126 padata_free_pd(rcu_dereference_protected(ps
->pd
, 1));
1127 mutex_unlock(&pinst
->lock
);
1131 EXPORT_SYMBOL(padata_free_shell
);
1133 #ifdef CONFIG_HOTPLUG_CPU
1135 static __init
int padata_driver_init(void)
1139 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "padata:online",
1140 padata_cpu_online
, NULL
);
1145 ret
= cpuhp_setup_state_multi(CPUHP_PADATA_DEAD
, "padata:dead",
1146 NULL
, padata_cpu_dead
);
1148 cpuhp_remove_multi_state(hp_online
);
1153 module_init(padata_driver_init
);
1155 static __exit
void padata_driver_exit(void)
1157 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD
);
1158 cpuhp_remove_multi_state(hp_online
);
1160 module_exit(padata_driver_exit
);