1 // SPDX-License-Identifier: GPL-2.0
3 * padata.c - generic interface to process data streams in parallel
5 * See Documentation/core-api/padata.rst for more information.
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
10 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
14 #include <linux/completion.h>
15 #include <linux/export.h>
16 #include <linux/cpumask.h>
17 #include <linux/err.h>
18 #include <linux/cpu.h>
19 #include <linux/padata.h>
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/sysfs.h>
24 #include <linux/rcupdate.h>
26 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
29 struct work_struct pw_work
;
30 struct list_head pw_list
; /* padata_free_works linkage */
34 static DEFINE_SPINLOCK(padata_works_lock
);
35 static struct padata_work
*padata_works
;
36 static LIST_HEAD(padata_free_works
);
38 struct padata_mt_job_state
{
40 struct completion completion
;
41 struct padata_mt_job
*job
;
44 unsigned long chunk_size
;
47 static void padata_free_pd(struct parallel_data
*pd
);
48 static void __init
padata_mt_helper(struct work_struct
*work
);
50 static int padata_index_to_cpu(struct parallel_data
*pd
, int cpu_index
)
54 target_cpu
= cpumask_first(pd
->cpumask
.pcpu
);
55 for (cpu
= 0; cpu
< cpu_index
; cpu
++)
56 target_cpu
= cpumask_next(target_cpu
, pd
->cpumask
.pcpu
);
61 static int padata_cpu_hash(struct parallel_data
*pd
, unsigned int seq_nr
)
64 * Hash the sequence numbers to the cpus by taking
65 * seq_nr mod. number of cpus in use.
67 int cpu_index
= seq_nr
% cpumask_weight(pd
->cpumask
.pcpu
);
69 return padata_index_to_cpu(pd
, cpu_index
);
72 static struct padata_work
*padata_work_alloc(void)
74 struct padata_work
*pw
;
76 lockdep_assert_held(&padata_works_lock
);
78 if (list_empty(&padata_free_works
))
79 return NULL
; /* No more work items allowed to be queued. */
81 pw
= list_first_entry(&padata_free_works
, struct padata_work
, pw_list
);
82 list_del(&pw
->pw_list
);
87 * This function is marked __ref because this function may be optimized in such
88 * a way that it directly refers to work_fn's address, which causes modpost to
89 * complain when work_fn is marked __init. This scenario was observed with clang
90 * LTO, where padata_work_init() was optimized to refer directly to
91 * padata_mt_helper() because the calls to padata_work_init() with other work_fn
92 * values were eliminated or inlined.
94 static void __ref
padata_work_init(struct padata_work
*pw
, work_func_t work_fn
,
95 void *data
, int flags
)
97 if (flags
& PADATA_WORK_ONSTACK
)
98 INIT_WORK_ONSTACK(&pw
->pw_work
, work_fn
);
100 INIT_WORK(&pw
->pw_work
, work_fn
);
104 static int __init
padata_work_alloc_mt(int nworks
, void *data
,
105 struct list_head
*head
)
109 spin_lock_bh(&padata_works_lock
);
110 /* Start at 1 because the current task participates in the job. */
111 for (i
= 1; i
< nworks
; ++i
) {
112 struct padata_work
*pw
= padata_work_alloc();
116 padata_work_init(pw
, padata_mt_helper
, data
, 0);
117 list_add(&pw
->pw_list
, head
);
119 spin_unlock_bh(&padata_works_lock
);
124 static void padata_work_free(struct padata_work
*pw
)
126 lockdep_assert_held(&padata_works_lock
);
127 list_add(&pw
->pw_list
, &padata_free_works
);
130 static void __init
padata_works_free(struct list_head
*works
)
132 struct padata_work
*cur
, *next
;
134 if (list_empty(works
))
137 spin_lock_bh(&padata_works_lock
);
138 list_for_each_entry_safe(cur
, next
, works
, pw_list
) {
139 list_del(&cur
->pw_list
);
140 padata_work_free(cur
);
142 spin_unlock_bh(&padata_works_lock
);
145 static void padata_parallel_worker(struct work_struct
*parallel_work
)
147 struct padata_work
*pw
= container_of(parallel_work
, struct padata_work
,
149 struct padata_priv
*padata
= pw
->pw_data
;
152 padata
->parallel(padata
);
153 spin_lock(&padata_works_lock
);
154 padata_work_free(pw
);
155 spin_unlock(&padata_works_lock
);
160 * padata_do_parallel - padata parallelization function
163 * @padata: object to be parallelized
164 * @cb_cpu: pointer to the CPU that the serialization callback function should
165 * run on. If it's not in the serial cpumask of @pinst
166 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
167 * none found, returns -EINVAL.
169 * The parallelization callback function will run with BHs off.
170 * Note: Every object which is parallelized by padata_do_parallel
171 * must be seen by padata_do_serial.
173 * Return: 0 on success or else negative error code.
175 int padata_do_parallel(struct padata_shell
*ps
,
176 struct padata_priv
*padata
, int *cb_cpu
)
178 struct padata_instance
*pinst
= ps
->pinst
;
179 int i
, cpu
, cpu_index
, err
;
180 struct parallel_data
*pd
;
181 struct padata_work
*pw
;
185 pd
= rcu_dereference_bh(ps
->pd
);
188 if (!(pinst
->flags
& PADATA_INIT
) || pinst
->flags
& PADATA_INVALID
)
191 if (!cpumask_test_cpu(*cb_cpu
, pd
->cpumask
.cbcpu
)) {
192 if (cpumask_empty(pd
->cpumask
.cbcpu
))
195 /* Select an alternate fallback CPU and notify the caller. */
196 cpu_index
= *cb_cpu
% cpumask_weight(pd
->cpumask
.cbcpu
);
198 cpu
= cpumask_first(pd
->cpumask
.cbcpu
);
199 for (i
= 0; i
< cpu_index
; i
++)
200 cpu
= cpumask_next(cpu
, pd
->cpumask
.cbcpu
);
206 if ((pinst
->flags
& PADATA_RESET
))
209 refcount_inc(&pd
->refcnt
);
211 padata
->cb_cpu
= *cb_cpu
;
213 spin_lock(&padata_works_lock
);
214 padata
->seq_nr
= ++pd
->seq_nr
;
215 pw
= padata_work_alloc();
216 spin_unlock(&padata_works_lock
);
219 /* Maximum works limit exceeded, run in the current task. */
220 padata
->parallel(padata
);
223 rcu_read_unlock_bh();
226 padata_work_init(pw
, padata_parallel_worker
, padata
, 0);
227 queue_work(pinst
->parallel_wq
, &pw
->pw_work
);
232 rcu_read_unlock_bh();
236 EXPORT_SYMBOL(padata_do_parallel
);
239 * padata_find_next - Find the next object that needs serialization.
242 * * A pointer to the control struct of the next object that needs
243 * serialization, if present in one of the percpu reorder queues.
244 * * NULL, if the next object that needs serialization will
245 * be parallel processed by another cpu and is not yet present in
246 * the cpu's reorder queue.
248 static struct padata_priv
*padata_find_next(struct parallel_data
*pd
,
251 struct padata_priv
*padata
;
252 struct padata_list
*reorder
;
255 reorder
= per_cpu_ptr(pd
->reorder_list
, cpu
);
257 spin_lock(&reorder
->lock
);
258 if (list_empty(&reorder
->list
)) {
259 spin_unlock(&reorder
->lock
);
263 padata
= list_entry(reorder
->list
.next
, struct padata_priv
, list
);
266 * Checks the rare case where two or more parallel jobs have hashed to
267 * the same CPU and one of the later ones finishes first.
269 if (padata
->seq_nr
!= pd
->processed
) {
270 spin_unlock(&reorder
->lock
);
275 list_del_init(&padata
->list
);
277 pd
->cpu
= cpumask_next_wrap(cpu
, pd
->cpumask
.pcpu
, -1, false);
280 spin_unlock(&reorder
->lock
);
284 static void padata_reorder(struct parallel_data
*pd
)
286 struct padata_instance
*pinst
= pd
->ps
->pinst
;
288 struct padata_priv
*padata
;
289 struct padata_serial_queue
*squeue
;
290 struct padata_list
*reorder
;
293 * We need to ensure that only one cpu can work on dequeueing of
294 * the reorder queue the time. Calculating in which percpu reorder
295 * queue the next object will arrive takes some time. A spinlock
296 * would be highly contended. Also it is not clear in which order
297 * the objects arrive to the reorder queues. So a cpu could wait to
298 * get the lock just to notice that there is nothing to do at the
299 * moment. Therefore we use a trylock and let the holder of the lock
300 * care for all the objects enqueued during the holdtime of the lock.
302 if (!spin_trylock_bh(&pd
->lock
))
306 padata
= padata_find_next(pd
, true);
309 * If the next object that needs serialization is parallel
310 * processed by another cpu and is still on it's way to the
311 * cpu's reorder queue, nothing to do for now.
316 cb_cpu
= padata
->cb_cpu
;
317 squeue
= per_cpu_ptr(pd
->squeue
, cb_cpu
);
319 spin_lock(&squeue
->serial
.lock
);
320 list_add_tail(&padata
->list
, &squeue
->serial
.list
);
321 spin_unlock(&squeue
->serial
.lock
);
323 queue_work_on(cb_cpu
, pinst
->serial_wq
, &squeue
->work
);
326 spin_unlock_bh(&pd
->lock
);
329 * The next object that needs serialization might have arrived to
330 * the reorder queues in the meantime.
332 * Ensure reorder queue is read after pd->lock is dropped so we see
333 * new objects from another task in padata_do_serial. Pairs with
334 * smp_mb in padata_do_serial.
338 reorder
= per_cpu_ptr(pd
->reorder_list
, pd
->cpu
);
339 if (!list_empty(&reorder
->list
) && padata_find_next(pd
, false))
340 queue_work(pinst
->serial_wq
, &pd
->reorder_work
);
343 static void invoke_padata_reorder(struct work_struct
*work
)
345 struct parallel_data
*pd
;
348 pd
= container_of(work
, struct parallel_data
, reorder_work
);
353 static void padata_serial_worker(struct work_struct
*serial_work
)
355 struct padata_serial_queue
*squeue
;
356 struct parallel_data
*pd
;
357 LIST_HEAD(local_list
);
361 squeue
= container_of(serial_work
, struct padata_serial_queue
, work
);
364 spin_lock(&squeue
->serial
.lock
);
365 list_replace_init(&squeue
->serial
.list
, &local_list
);
366 spin_unlock(&squeue
->serial
.lock
);
370 while (!list_empty(&local_list
)) {
371 struct padata_priv
*padata
;
373 padata
= list_entry(local_list
.next
,
374 struct padata_priv
, list
);
376 list_del_init(&padata
->list
);
378 padata
->serial(padata
);
383 if (refcount_sub_and_test(cnt
, &pd
->refcnt
))
388 * padata_do_serial - padata serialization function
390 * @padata: object to be serialized.
392 * padata_do_serial must be called for every parallelized object.
393 * The serialization callback function will run with BHs off.
395 void padata_do_serial(struct padata_priv
*padata
)
397 struct parallel_data
*pd
= padata
->pd
;
398 int hashed_cpu
= padata_cpu_hash(pd
, padata
->seq_nr
);
399 struct padata_list
*reorder
= per_cpu_ptr(pd
->reorder_list
, hashed_cpu
);
400 struct padata_priv
*cur
;
401 struct list_head
*pos
;
403 spin_lock(&reorder
->lock
);
404 /* Sort in ascending order of sequence number. */
405 list_for_each_prev(pos
, &reorder
->list
) {
406 cur
= list_entry(pos
, struct padata_priv
, list
);
407 /* Compare by difference to consider integer wrap around */
408 if ((signed int)(cur
->seq_nr
- padata
->seq_nr
) < 0)
411 list_add(&padata
->list
, pos
);
412 spin_unlock(&reorder
->lock
);
415 * Ensure the addition to the reorder list is ordered correctly
416 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
423 EXPORT_SYMBOL(padata_do_serial
);
425 static int padata_setup_cpumasks(struct padata_instance
*pinst
)
427 struct workqueue_attrs
*attrs
;
430 attrs
= alloc_workqueue_attrs();
434 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
435 cpumask_copy(attrs
->cpumask
, pinst
->cpumask
.pcpu
);
436 err
= apply_workqueue_attrs(pinst
->parallel_wq
, attrs
);
437 free_workqueue_attrs(attrs
);
442 static void __init
padata_mt_helper(struct work_struct
*w
)
444 struct padata_work
*pw
= container_of(w
, struct padata_work
, pw_work
);
445 struct padata_mt_job_state
*ps
= pw
->pw_data
;
446 struct padata_mt_job
*job
= ps
->job
;
449 spin_lock(&ps
->lock
);
451 while (job
->size
> 0) {
452 unsigned long start
, size
, end
;
455 /* So end is chunk size aligned if enough work remains. */
456 size
= roundup(start
+ 1, ps
->chunk_size
) - start
;
457 size
= min(size
, job
->size
);
463 spin_unlock(&ps
->lock
);
464 job
->thread_fn(start
, end
, job
->fn_arg
);
465 spin_lock(&ps
->lock
);
469 done
= (ps
->nworks_fini
== ps
->nworks
);
470 spin_unlock(&ps
->lock
);
473 complete(&ps
->completion
);
477 * padata_do_multithreaded - run a multithreaded job
478 * @job: Description of the job.
480 * See the definition of struct padata_mt_job for more details.
482 void __init
padata_do_multithreaded(struct padata_mt_job
*job
)
484 /* In case threads finish at different times. */
485 static const unsigned long load_balance_factor
= 4;
486 struct padata_work my_work
, *pw
;
487 struct padata_mt_job_state ps
;
490 static atomic_t last_used_nid __initdata
;
495 /* Ensure at least one thread when size < min_chunk. */
496 nworks
= max(job
->size
/ max(job
->min_chunk
, job
->align
), 1ul);
497 nworks
= min(nworks
, job
->max_threads
);
500 /* Single thread, no coordination needed, cut to the chase. */
501 job
->thread_fn(job
->start
, job
->start
+ job
->size
, job
->fn_arg
);
505 spin_lock_init(&ps
.lock
);
506 init_completion(&ps
.completion
);
508 ps
.nworks
= padata_work_alloc_mt(nworks
, &ps
, &works
);
512 * Chunk size is the amount of work a helper does per call to the
513 * thread function. Load balance large jobs between threads by
514 * increasing the number of chunks, guarantee at least the minimum
515 * chunk size from the caller, and honor the caller's alignment.
516 * Ensure chunk_size is at least 1 to prevent divide-by-0
517 * panic in padata_mt_helper().
519 ps
.chunk_size
= job
->size
/ (ps
.nworks
* load_balance_factor
);
520 ps
.chunk_size
= max(ps
.chunk_size
, job
->min_chunk
);
521 ps
.chunk_size
= max(ps
.chunk_size
, 1ul);
522 ps
.chunk_size
= roundup(ps
.chunk_size
, job
->align
);
525 * chunk_size can be 0 if the caller sets min_chunk to 0. So force it
526 * to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
531 list_for_each_entry(pw
, &works
, pw_list
)
532 if (job
->numa_aware
) {
533 int old_node
= atomic_read(&last_used_nid
);
536 nid
= next_node_in(old_node
, node_states
[N_CPU
]);
537 } while (!atomic_try_cmpxchg(&last_used_nid
, &old_node
, nid
));
538 queue_work_node(nid
, system_unbound_wq
, &pw
->pw_work
);
540 queue_work(system_unbound_wq
, &pw
->pw_work
);
543 /* Use the current thread, which saves starting a workqueue worker. */
544 padata_work_init(&my_work
, padata_mt_helper
, &ps
, PADATA_WORK_ONSTACK
);
545 padata_mt_helper(&my_work
.pw_work
);
547 /* Wait for all the helpers to finish. */
548 wait_for_completion(&ps
.completion
);
550 destroy_work_on_stack(&my_work
.pw_work
);
551 padata_works_free(&works
);
554 static void __padata_list_init(struct padata_list
*pd_list
)
556 INIT_LIST_HEAD(&pd_list
->list
);
557 spin_lock_init(&pd_list
->lock
);
560 /* Initialize all percpu queues used by serial workers */
561 static void padata_init_squeues(struct parallel_data
*pd
)
564 struct padata_serial_queue
*squeue
;
566 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
567 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
569 __padata_list_init(&squeue
->serial
);
570 INIT_WORK(&squeue
->work
, padata_serial_worker
);
574 /* Initialize per-CPU reorder lists */
575 static void padata_init_reorder_list(struct parallel_data
*pd
)
578 struct padata_list
*list
;
580 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
581 list
= per_cpu_ptr(pd
->reorder_list
, cpu
);
582 __padata_list_init(list
);
586 /* Allocate and initialize the internal cpumask dependend resources. */
587 static struct parallel_data
*padata_alloc_pd(struct padata_shell
*ps
)
589 struct padata_instance
*pinst
= ps
->pinst
;
590 struct parallel_data
*pd
;
592 pd
= kzalloc(sizeof(struct parallel_data
), GFP_KERNEL
);
596 pd
->reorder_list
= alloc_percpu(struct padata_list
);
597 if (!pd
->reorder_list
)
600 pd
->squeue
= alloc_percpu(struct padata_serial_queue
);
602 goto err_free_reorder_list
;
606 if (!alloc_cpumask_var(&pd
->cpumask
.pcpu
, GFP_KERNEL
))
607 goto err_free_squeue
;
608 if (!alloc_cpumask_var(&pd
->cpumask
.cbcpu
, GFP_KERNEL
))
611 cpumask_and(pd
->cpumask
.pcpu
, pinst
->cpumask
.pcpu
, cpu_online_mask
);
612 cpumask_and(pd
->cpumask
.cbcpu
, pinst
->cpumask
.cbcpu
, cpu_online_mask
);
614 padata_init_reorder_list(pd
);
615 padata_init_squeues(pd
);
617 refcount_set(&pd
->refcnt
, 1);
618 spin_lock_init(&pd
->lock
);
619 pd
->cpu
= cpumask_first(pd
->cpumask
.pcpu
);
620 INIT_WORK(&pd
->reorder_work
, invoke_padata_reorder
);
625 free_cpumask_var(pd
->cpumask
.pcpu
);
627 free_percpu(pd
->squeue
);
628 err_free_reorder_list
:
629 free_percpu(pd
->reorder_list
);
636 static void padata_free_pd(struct parallel_data
*pd
)
638 free_cpumask_var(pd
->cpumask
.pcpu
);
639 free_cpumask_var(pd
->cpumask
.cbcpu
);
640 free_percpu(pd
->reorder_list
);
641 free_percpu(pd
->squeue
);
645 static void __padata_start(struct padata_instance
*pinst
)
647 pinst
->flags
|= PADATA_INIT
;
650 static void __padata_stop(struct padata_instance
*pinst
)
652 if (!(pinst
->flags
& PADATA_INIT
))
655 pinst
->flags
&= ~PADATA_INIT
;
660 /* Replace the internal control structure with a new one. */
661 static int padata_replace_one(struct padata_shell
*ps
)
663 struct parallel_data
*pd_new
;
665 pd_new
= padata_alloc_pd(ps
);
669 ps
->opd
= rcu_dereference_protected(ps
->pd
, 1);
670 rcu_assign_pointer(ps
->pd
, pd_new
);
675 static int padata_replace(struct padata_instance
*pinst
)
677 struct padata_shell
*ps
;
680 pinst
->flags
|= PADATA_RESET
;
682 list_for_each_entry(ps
, &pinst
->pslist
, list
) {
683 err
= padata_replace_one(ps
);
690 list_for_each_entry_continue_reverse(ps
, &pinst
->pslist
, list
)
691 if (refcount_dec_and_test(&ps
->opd
->refcnt
))
692 padata_free_pd(ps
->opd
);
694 pinst
->flags
&= ~PADATA_RESET
;
699 /* If cpumask contains no active cpu, we mark the instance as invalid. */
700 static bool padata_validate_cpumask(struct padata_instance
*pinst
,
701 const struct cpumask
*cpumask
)
703 if (!cpumask_intersects(cpumask
, cpu_online_mask
)) {
704 pinst
->flags
|= PADATA_INVALID
;
708 pinst
->flags
&= ~PADATA_INVALID
;
712 static int __padata_set_cpumasks(struct padata_instance
*pinst
,
713 cpumask_var_t pcpumask
,
714 cpumask_var_t cbcpumask
)
719 valid
= padata_validate_cpumask(pinst
, pcpumask
);
721 __padata_stop(pinst
);
725 valid
= padata_validate_cpumask(pinst
, cbcpumask
);
727 __padata_stop(pinst
);
730 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
731 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
733 err
= padata_setup_cpumasks(pinst
) ?: padata_replace(pinst
);
736 __padata_start(pinst
);
742 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
743 * equivalent to @cpumask.
744 * @pinst: padata instance
745 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
746 * to parallel and serial cpumasks respectively.
747 * @cpumask: the cpumask to use
749 * Return: 0 on success or negative error code
751 int padata_set_cpumask(struct padata_instance
*pinst
, int cpumask_type
,
752 cpumask_var_t cpumask
)
754 struct cpumask
*serial_mask
, *parallel_mask
;
758 mutex_lock(&pinst
->lock
);
760 switch (cpumask_type
) {
761 case PADATA_CPU_PARALLEL
:
762 serial_mask
= pinst
->cpumask
.cbcpu
;
763 parallel_mask
= cpumask
;
765 case PADATA_CPU_SERIAL
:
766 parallel_mask
= pinst
->cpumask
.pcpu
;
767 serial_mask
= cpumask
;
773 err
= __padata_set_cpumasks(pinst
, parallel_mask
, serial_mask
);
776 mutex_unlock(&pinst
->lock
);
781 EXPORT_SYMBOL(padata_set_cpumask
);
783 #ifdef CONFIG_HOTPLUG_CPU
785 static int __padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
789 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
790 err
= padata_replace(pinst
);
792 if (padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) &&
793 padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
794 __padata_start(pinst
);
800 static int __padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
804 if (!cpumask_test_cpu(cpu
, cpu_online_mask
)) {
805 if (!padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) ||
806 !padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
807 __padata_stop(pinst
);
809 err
= padata_replace(pinst
);
815 static inline int pinst_has_cpu(struct padata_instance
*pinst
, int cpu
)
817 return cpumask_test_cpu(cpu
, pinst
->cpumask
.pcpu
) ||
818 cpumask_test_cpu(cpu
, pinst
->cpumask
.cbcpu
);
821 static int padata_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
823 struct padata_instance
*pinst
;
826 pinst
= hlist_entry_safe(node
, struct padata_instance
, cpu_online_node
);
827 if (!pinst_has_cpu(pinst
, cpu
))
830 mutex_lock(&pinst
->lock
);
831 ret
= __padata_add_cpu(pinst
, cpu
);
832 mutex_unlock(&pinst
->lock
);
836 static int padata_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
838 struct padata_instance
*pinst
;
841 pinst
= hlist_entry_safe(node
, struct padata_instance
, cpu_dead_node
);
842 if (!pinst_has_cpu(pinst
, cpu
))
845 mutex_lock(&pinst
->lock
);
846 ret
= __padata_remove_cpu(pinst
, cpu
);
847 mutex_unlock(&pinst
->lock
);
851 static enum cpuhp_state hp_online
;
854 static void __padata_free(struct padata_instance
*pinst
)
856 #ifdef CONFIG_HOTPLUG_CPU
857 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD
,
858 &pinst
->cpu_dead_node
);
859 cpuhp_state_remove_instance_nocalls(hp_online
, &pinst
->cpu_online_node
);
862 WARN_ON(!list_empty(&pinst
->pslist
));
864 free_cpumask_var(pinst
->cpumask
.pcpu
);
865 free_cpumask_var(pinst
->cpumask
.cbcpu
);
866 destroy_workqueue(pinst
->serial_wq
);
867 destroy_workqueue(pinst
->parallel_wq
);
871 #define kobj2pinst(_kobj) \
872 container_of(_kobj, struct padata_instance, kobj)
873 #define attr2pentry(_attr) \
874 container_of(_attr, struct padata_sysfs_entry, attr)
876 static void padata_sysfs_release(struct kobject
*kobj
)
878 struct padata_instance
*pinst
= kobj2pinst(kobj
);
879 __padata_free(pinst
);
882 struct padata_sysfs_entry
{
883 struct attribute attr
;
884 ssize_t (*show
)(struct padata_instance
*, struct attribute
*, char *);
885 ssize_t (*store
)(struct padata_instance
*, struct attribute
*,
886 const char *, size_t);
889 static ssize_t
show_cpumask(struct padata_instance
*pinst
,
890 struct attribute
*attr
, char *buf
)
892 struct cpumask
*cpumask
;
895 mutex_lock(&pinst
->lock
);
896 if (!strcmp(attr
->name
, "serial_cpumask"))
897 cpumask
= pinst
->cpumask
.cbcpu
;
899 cpumask
= pinst
->cpumask
.pcpu
;
901 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n",
902 nr_cpu_ids
, cpumask_bits(cpumask
));
903 mutex_unlock(&pinst
->lock
);
904 return len
< PAGE_SIZE
? len
: -EINVAL
;
907 static ssize_t
store_cpumask(struct padata_instance
*pinst
,
908 struct attribute
*attr
,
909 const char *buf
, size_t count
)
911 cpumask_var_t new_cpumask
;
915 if (!alloc_cpumask_var(&new_cpumask
, GFP_KERNEL
))
918 ret
= bitmap_parse(buf
, count
, cpumask_bits(new_cpumask
),
923 mask_type
= !strcmp(attr
->name
, "serial_cpumask") ?
924 PADATA_CPU_SERIAL
: PADATA_CPU_PARALLEL
;
925 ret
= padata_set_cpumask(pinst
, mask_type
, new_cpumask
);
930 free_cpumask_var(new_cpumask
);
934 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
935 static struct padata_sysfs_entry _name##_attr = \
936 __ATTR(_name, 0644, _show_name, _store_name)
937 #define PADATA_ATTR_RO(_name, _show_name) \
938 static struct padata_sysfs_entry _name##_attr = \
939 __ATTR(_name, 0400, _show_name, NULL)
941 PADATA_ATTR_RW(serial_cpumask
, show_cpumask
, store_cpumask
);
942 PADATA_ATTR_RW(parallel_cpumask
, show_cpumask
, store_cpumask
);
945 * Padata sysfs provides the following objects:
946 * serial_cpumask [RW] - cpumask for serial workers
947 * parallel_cpumask [RW] - cpumask for parallel workers
949 static struct attribute
*padata_default_attrs
[] = {
950 &serial_cpumask_attr
.attr
,
951 ¶llel_cpumask_attr
.attr
,
954 ATTRIBUTE_GROUPS(padata_default
);
956 static ssize_t
padata_sysfs_show(struct kobject
*kobj
,
957 struct attribute
*attr
, char *buf
)
959 struct padata_instance
*pinst
;
960 struct padata_sysfs_entry
*pentry
;
963 pinst
= kobj2pinst(kobj
);
964 pentry
= attr2pentry(attr
);
966 ret
= pentry
->show(pinst
, attr
, buf
);
971 static ssize_t
padata_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
972 const char *buf
, size_t count
)
974 struct padata_instance
*pinst
;
975 struct padata_sysfs_entry
*pentry
;
978 pinst
= kobj2pinst(kobj
);
979 pentry
= attr2pentry(attr
);
981 ret
= pentry
->store(pinst
, attr
, buf
, count
);
986 static const struct sysfs_ops padata_sysfs_ops
= {
987 .show
= padata_sysfs_show
,
988 .store
= padata_sysfs_store
,
991 static const struct kobj_type padata_attr_type
= {
992 .sysfs_ops
= &padata_sysfs_ops
,
993 .default_groups
= padata_default_groups
,
994 .release
= padata_sysfs_release
,
998 * padata_alloc - allocate and initialize a padata instance
999 * @name: used to identify the instance
1001 * Return: new instance on success, NULL on error
1003 struct padata_instance
*padata_alloc(const char *name
)
1005 struct padata_instance
*pinst
;
1007 pinst
= kzalloc(sizeof(struct padata_instance
), GFP_KERNEL
);
1011 pinst
->parallel_wq
= alloc_workqueue("%s_parallel", WQ_UNBOUND
, 0,
1013 if (!pinst
->parallel_wq
)
1018 pinst
->serial_wq
= alloc_workqueue("%s_serial", WQ_MEM_RECLAIM
|
1019 WQ_CPU_INTENSIVE
, 1, name
);
1020 if (!pinst
->serial_wq
)
1023 if (!alloc_cpumask_var(&pinst
->cpumask
.pcpu
, GFP_KERNEL
))
1024 goto err_free_serial_wq
;
1025 if (!alloc_cpumask_var(&pinst
->cpumask
.cbcpu
, GFP_KERNEL
)) {
1026 free_cpumask_var(pinst
->cpumask
.pcpu
);
1027 goto err_free_serial_wq
;
1030 INIT_LIST_HEAD(&pinst
->pslist
);
1032 cpumask_copy(pinst
->cpumask
.pcpu
, cpu_possible_mask
);
1033 cpumask_copy(pinst
->cpumask
.cbcpu
, cpu_possible_mask
);
1035 if (padata_setup_cpumasks(pinst
))
1036 goto err_free_masks
;
1038 __padata_start(pinst
);
1040 kobject_init(&pinst
->kobj
, &padata_attr_type
);
1041 mutex_init(&pinst
->lock
);
1043 #ifdef CONFIG_HOTPLUG_CPU
1044 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online
,
1045 &pinst
->cpu_online_node
);
1046 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD
,
1047 &pinst
->cpu_dead_node
);
1055 free_cpumask_var(pinst
->cpumask
.pcpu
);
1056 free_cpumask_var(pinst
->cpumask
.cbcpu
);
1058 destroy_workqueue(pinst
->serial_wq
);
1061 destroy_workqueue(pinst
->parallel_wq
);
1067 EXPORT_SYMBOL(padata_alloc
);
1070 * padata_free - free a padata instance
1072 * @pinst: padata instance to free
1074 void padata_free(struct padata_instance
*pinst
)
1076 kobject_put(&pinst
->kobj
);
1078 EXPORT_SYMBOL(padata_free
);
1081 * padata_alloc_shell - Allocate and initialize padata shell.
1083 * @pinst: Parent padata_instance object.
1085 * Return: new shell on success, NULL on error
1087 struct padata_shell
*padata_alloc_shell(struct padata_instance
*pinst
)
1089 struct parallel_data
*pd
;
1090 struct padata_shell
*ps
;
1092 ps
= kzalloc(sizeof(*ps
), GFP_KERNEL
);
1099 pd
= padata_alloc_pd(ps
);
1105 mutex_lock(&pinst
->lock
);
1106 RCU_INIT_POINTER(ps
->pd
, pd
);
1107 list_add(&ps
->list
, &pinst
->pslist
);
1108 mutex_unlock(&pinst
->lock
);
1117 EXPORT_SYMBOL(padata_alloc_shell
);
1120 * padata_free_shell - free a padata shell
1122 * @ps: padata shell to free
1124 void padata_free_shell(struct padata_shell
*ps
)
1126 struct parallel_data
*pd
;
1131 mutex_lock(&ps
->pinst
->lock
);
1132 list_del(&ps
->list
);
1133 pd
= rcu_dereference_protected(ps
->pd
, 1);
1134 if (refcount_dec_and_test(&pd
->refcnt
))
1136 mutex_unlock(&ps
->pinst
->lock
);
1140 EXPORT_SYMBOL(padata_free_shell
);
1142 void __init
padata_init(void)
1144 unsigned int i
, possible_cpus
;
1145 #ifdef CONFIG_HOTPLUG_CPU
1148 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "padata:online",
1149 padata_cpu_online
, NULL
);
1154 ret
= cpuhp_setup_state_multi(CPUHP_PADATA_DEAD
, "padata:dead",
1155 NULL
, padata_cpu_dead
);
1157 goto remove_online_state
;
1160 possible_cpus
= num_possible_cpus();
1161 padata_works
= kmalloc_array(possible_cpus
, sizeof(struct padata_work
),
1164 goto remove_dead_state
;
1166 for (i
= 0; i
< possible_cpus
; ++i
)
1167 list_add(&padata_works
[i
].pw_list
, &padata_free_works
);
1172 #ifdef CONFIG_HOTPLUG_CPU
1173 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD
);
1174 remove_online_state
:
1175 cpuhp_remove_multi_state(hp_online
);
1178 pr_warn("padata: initialization failed\n");