1 // SPDX-License-Identifier: GPL-2.0
3 * padata.c - generic interface to process data streams in parallel
5 * See Documentation/core-api/padata.rst for more information.
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
10 * Copyright (c) 2020 Oracle and/or its affiliates.
11 * Author: Daniel Jordan <daniel.m.jordan@oracle.com>
14 #include <linux/completion.h>
15 #include <linux/export.h>
16 #include <linux/cpumask.h>
17 #include <linux/err.h>
18 #include <linux/cpu.h>
19 #include <linux/padata.h>
20 #include <linux/mutex.h>
21 #include <linux/sched.h>
22 #include <linux/slab.h>
23 #include <linux/sysfs.h>
24 #include <linux/rcupdate.h>
26 #define PADATA_WORK_ONSTACK 1 /* Work's memory is on stack */
29 struct work_struct pw_work
;
30 struct list_head pw_list
; /* padata_free_works linkage */
34 static DEFINE_SPINLOCK(padata_works_lock
);
35 static struct padata_work
*padata_works
;
36 static LIST_HEAD(padata_free_works
);
38 struct padata_mt_job_state
{
40 struct completion completion
;
41 struct padata_mt_job
*job
;
44 unsigned long chunk_size
;
47 static void padata_free_pd(struct parallel_data
*pd
);
48 static void __init
padata_mt_helper(struct work_struct
*work
);
50 static int padata_index_to_cpu(struct parallel_data
*pd
, int cpu_index
)
54 target_cpu
= cpumask_first(pd
->cpumask
.pcpu
);
55 for (cpu
= 0; cpu
< cpu_index
; cpu
++)
56 target_cpu
= cpumask_next(target_cpu
, pd
->cpumask
.pcpu
);
61 static int padata_cpu_hash(struct parallel_data
*pd
, unsigned int seq_nr
)
64 * Hash the sequence numbers to the cpus by taking
65 * seq_nr mod. number of cpus in use.
67 int cpu_index
= seq_nr
% cpumask_weight(pd
->cpumask
.pcpu
);
69 return padata_index_to_cpu(pd
, cpu_index
);
72 static struct padata_work
*padata_work_alloc(void)
74 struct padata_work
*pw
;
76 lockdep_assert_held(&padata_works_lock
);
78 if (list_empty(&padata_free_works
))
79 return NULL
; /* No more work items allowed to be queued. */
81 pw
= list_first_entry(&padata_free_works
, struct padata_work
, pw_list
);
82 list_del(&pw
->pw_list
);
87 * This function is marked __ref because this function may be optimized in such
88 * a way that it directly refers to work_fn's address, which causes modpost to
89 * complain when work_fn is marked __init. This scenario was observed with clang
90 * LTO, where padata_work_init() was optimized to refer directly to
91 * padata_mt_helper() because the calls to padata_work_init() with other work_fn
92 * values were eliminated or inlined.
94 static void __ref
padata_work_init(struct padata_work
*pw
, work_func_t work_fn
,
95 void *data
, int flags
)
97 if (flags
& PADATA_WORK_ONSTACK
)
98 INIT_WORK_ONSTACK(&pw
->pw_work
, work_fn
);
100 INIT_WORK(&pw
->pw_work
, work_fn
);
104 static int __init
padata_work_alloc_mt(int nworks
, void *data
,
105 struct list_head
*head
)
109 spin_lock_bh(&padata_works_lock
);
110 /* Start at 1 because the current task participates in the job. */
111 for (i
= 1; i
< nworks
; ++i
) {
112 struct padata_work
*pw
= padata_work_alloc();
116 padata_work_init(pw
, padata_mt_helper
, data
, 0);
117 list_add(&pw
->pw_list
, head
);
119 spin_unlock_bh(&padata_works_lock
);
124 static void padata_work_free(struct padata_work
*pw
)
126 lockdep_assert_held(&padata_works_lock
);
127 list_add(&pw
->pw_list
, &padata_free_works
);
130 static void __init
padata_works_free(struct list_head
*works
)
132 struct padata_work
*cur
, *next
;
134 if (list_empty(works
))
137 spin_lock_bh(&padata_works_lock
);
138 list_for_each_entry_safe(cur
, next
, works
, pw_list
) {
139 list_del(&cur
->pw_list
);
140 padata_work_free(cur
);
142 spin_unlock_bh(&padata_works_lock
);
145 static void padata_parallel_worker(struct work_struct
*parallel_work
)
147 struct padata_work
*pw
= container_of(parallel_work
, struct padata_work
,
149 struct padata_priv
*padata
= pw
->pw_data
;
152 padata
->parallel(padata
);
153 spin_lock(&padata_works_lock
);
154 padata_work_free(pw
);
155 spin_unlock(&padata_works_lock
);
160 * padata_do_parallel - padata parallelization function
163 * @padata: object to be parallelized
164 * @cb_cpu: pointer to the CPU that the serialization callback function should
165 * run on. If it's not in the serial cpumask of @pinst
166 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
167 * none found, returns -EINVAL.
169 * The parallelization callback function will run with BHs off.
170 * Note: Every object which is parallelized by padata_do_parallel
171 * must be seen by padata_do_serial.
173 * Return: 0 on success or else negative error code.
175 int padata_do_parallel(struct padata_shell
*ps
,
176 struct padata_priv
*padata
, int *cb_cpu
)
178 struct padata_instance
*pinst
= ps
->pinst
;
179 int i
, cpu
, cpu_index
, err
;
180 struct parallel_data
*pd
;
181 struct padata_work
*pw
;
185 pd
= rcu_dereference_bh(ps
->pd
);
188 if (!(pinst
->flags
& PADATA_INIT
) || pinst
->flags
& PADATA_INVALID
)
191 if (!cpumask_test_cpu(*cb_cpu
, pd
->cpumask
.cbcpu
)) {
192 if (cpumask_empty(pd
->cpumask
.cbcpu
))
195 /* Select an alternate fallback CPU and notify the caller. */
196 cpu_index
= *cb_cpu
% cpumask_weight(pd
->cpumask
.cbcpu
);
198 cpu
= cpumask_first(pd
->cpumask
.cbcpu
);
199 for (i
= 0; i
< cpu_index
; i
++)
200 cpu
= cpumask_next(cpu
, pd
->cpumask
.cbcpu
);
206 if ((pinst
->flags
& PADATA_RESET
))
209 refcount_inc(&pd
->refcnt
);
211 padata
->cb_cpu
= *cb_cpu
;
213 spin_lock(&padata_works_lock
);
214 padata
->seq_nr
= ++pd
->seq_nr
;
215 pw
= padata_work_alloc();
216 spin_unlock(&padata_works_lock
);
219 /* Maximum works limit exceeded, run in the current task. */
220 padata
->parallel(padata
);
223 rcu_read_unlock_bh();
226 padata_work_init(pw
, padata_parallel_worker
, padata
, 0);
227 queue_work(pinst
->parallel_wq
, &pw
->pw_work
);
232 rcu_read_unlock_bh();
236 EXPORT_SYMBOL(padata_do_parallel
);
239 * padata_find_next - Find the next object that needs serialization.
242 * * A pointer to the control struct of the next object that needs
243 * serialization, if present in one of the percpu reorder queues.
244 * * NULL, if the next object that needs serialization will
245 * be parallel processed by another cpu and is not yet present in
246 * the cpu's reorder queue.
248 static struct padata_priv
*padata_find_next(struct parallel_data
*pd
,
251 struct padata_priv
*padata
;
252 struct padata_list
*reorder
;
255 reorder
= per_cpu_ptr(pd
->reorder_list
, cpu
);
257 spin_lock(&reorder
->lock
);
258 if (list_empty(&reorder
->list
)) {
259 spin_unlock(&reorder
->lock
);
263 padata
= list_entry(reorder
->list
.next
, struct padata_priv
, list
);
266 * Checks the rare case where two or more parallel jobs have hashed to
267 * the same CPU and one of the later ones finishes first.
269 if (padata
->seq_nr
!= pd
->processed
) {
270 spin_unlock(&reorder
->lock
);
275 list_del_init(&padata
->list
);
277 pd
->cpu
= cpumask_next_wrap(cpu
, pd
->cpumask
.pcpu
, -1, false);
280 spin_unlock(&reorder
->lock
);
284 static void padata_reorder(struct parallel_data
*pd
)
286 struct padata_instance
*pinst
= pd
->ps
->pinst
;
288 struct padata_priv
*padata
;
289 struct padata_serial_queue
*squeue
;
290 struct padata_list
*reorder
;
293 * We need to ensure that only one cpu can work on dequeueing of
294 * the reorder queue the time. Calculating in which percpu reorder
295 * queue the next object will arrive takes some time. A spinlock
296 * would be highly contended. Also it is not clear in which order
297 * the objects arrive to the reorder queues. So a cpu could wait to
298 * get the lock just to notice that there is nothing to do at the
299 * moment. Therefore we use a trylock and let the holder of the lock
300 * care for all the objects enqueued during the holdtime of the lock.
302 if (!spin_trylock_bh(&pd
->lock
))
306 padata
= padata_find_next(pd
, true);
309 * If the next object that needs serialization is parallel
310 * processed by another cpu and is still on it's way to the
311 * cpu's reorder queue, nothing to do for now.
316 cb_cpu
= padata
->cb_cpu
;
317 squeue
= per_cpu_ptr(pd
->squeue
, cb_cpu
);
319 spin_lock(&squeue
->serial
.lock
);
320 list_add_tail(&padata
->list
, &squeue
->serial
.list
);
321 spin_unlock(&squeue
->serial
.lock
);
323 queue_work_on(cb_cpu
, pinst
->serial_wq
, &squeue
->work
);
326 spin_unlock_bh(&pd
->lock
);
329 * The next object that needs serialization might have arrived to
330 * the reorder queues in the meantime.
332 * Ensure reorder queue is read after pd->lock is dropped so we see
333 * new objects from another task in padata_do_serial. Pairs with
334 * smp_mb in padata_do_serial.
338 reorder
= per_cpu_ptr(pd
->reorder_list
, pd
->cpu
);
339 if (!list_empty(&reorder
->list
) && padata_find_next(pd
, false))
340 queue_work(pinst
->serial_wq
, &pd
->reorder_work
);
343 static void invoke_padata_reorder(struct work_struct
*work
)
345 struct parallel_data
*pd
;
348 pd
= container_of(work
, struct parallel_data
, reorder_work
);
353 static void padata_serial_worker(struct work_struct
*serial_work
)
355 struct padata_serial_queue
*squeue
;
356 struct parallel_data
*pd
;
357 LIST_HEAD(local_list
);
361 squeue
= container_of(serial_work
, struct padata_serial_queue
, work
);
364 spin_lock(&squeue
->serial
.lock
);
365 list_replace_init(&squeue
->serial
.list
, &local_list
);
366 spin_unlock(&squeue
->serial
.lock
);
370 while (!list_empty(&local_list
)) {
371 struct padata_priv
*padata
;
373 padata
= list_entry(local_list
.next
,
374 struct padata_priv
, list
);
376 list_del_init(&padata
->list
);
378 padata
->serial(padata
);
383 if (refcount_sub_and_test(cnt
, &pd
->refcnt
))
388 * padata_do_serial - padata serialization function
390 * @padata: object to be serialized.
392 * padata_do_serial must be called for every parallelized object.
393 * The serialization callback function will run with BHs off.
395 void padata_do_serial(struct padata_priv
*padata
)
397 struct parallel_data
*pd
= padata
->pd
;
398 int hashed_cpu
= padata_cpu_hash(pd
, padata
->seq_nr
);
399 struct padata_list
*reorder
= per_cpu_ptr(pd
->reorder_list
, hashed_cpu
);
400 struct padata_priv
*cur
;
401 struct list_head
*pos
;
403 spin_lock(&reorder
->lock
);
404 /* Sort in ascending order of sequence number. */
405 list_for_each_prev(pos
, &reorder
->list
) {
406 cur
= list_entry(pos
, struct padata_priv
, list
);
407 /* Compare by difference to consider integer wrap around */
408 if ((signed int)(cur
->seq_nr
- padata
->seq_nr
) < 0)
411 list_add(&padata
->list
, pos
);
412 spin_unlock(&reorder
->lock
);
415 * Ensure the addition to the reorder list is ordered correctly
416 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
423 EXPORT_SYMBOL(padata_do_serial
);
425 static int padata_setup_cpumasks(struct padata_instance
*pinst
)
427 struct workqueue_attrs
*attrs
;
430 attrs
= alloc_workqueue_attrs();
434 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
435 cpumask_copy(attrs
->cpumask
, pinst
->cpumask
.pcpu
);
436 err
= apply_workqueue_attrs(pinst
->parallel_wq
, attrs
);
437 free_workqueue_attrs(attrs
);
442 static void __init
padata_mt_helper(struct work_struct
*w
)
444 struct padata_work
*pw
= container_of(w
, struct padata_work
, pw_work
);
445 struct padata_mt_job_state
*ps
= pw
->pw_data
;
446 struct padata_mt_job
*job
= ps
->job
;
449 spin_lock(&ps
->lock
);
451 while (job
->size
> 0) {
452 unsigned long start
, size
, end
;
455 /* So end is chunk size aligned if enough work remains. */
456 size
= roundup(start
+ 1, ps
->chunk_size
) - start
;
457 size
= min(size
, job
->size
);
463 spin_unlock(&ps
->lock
);
464 job
->thread_fn(start
, end
, job
->fn_arg
);
465 spin_lock(&ps
->lock
);
469 done
= (ps
->nworks_fini
== ps
->nworks
);
470 spin_unlock(&ps
->lock
);
473 complete(&ps
->completion
);
477 * padata_do_multithreaded - run a multithreaded job
478 * @job: Description of the job.
480 * See the definition of struct padata_mt_job for more details.
482 void __init
padata_do_multithreaded(struct padata_mt_job
*job
)
484 /* In case threads finish at different times. */
485 static const unsigned long load_balance_factor
= 4;
486 struct padata_work my_work
, *pw
;
487 struct padata_mt_job_state ps
;
490 static atomic_t last_used_nid __initdata
;
495 /* Ensure at least one thread when size < min_chunk. */
496 nworks
= max(job
->size
/ max(job
->min_chunk
, job
->align
), 1ul);
497 nworks
= min(nworks
, job
->max_threads
);
500 /* Single thread, no coordination needed, cut to the chase. */
501 job
->thread_fn(job
->start
, job
->start
+ job
->size
, job
->fn_arg
);
505 spin_lock_init(&ps
.lock
);
506 init_completion(&ps
.completion
);
508 ps
.nworks
= padata_work_alloc_mt(nworks
, &ps
, &works
);
512 * Chunk size is the amount of work a helper does per call to the
513 * thread function. Load balance large jobs between threads by
514 * increasing the number of chunks, guarantee at least the minimum
515 * chunk size from the caller, and honor the caller's alignment.
516 * Ensure chunk_size is at least 1 to prevent divide-by-0
517 * panic in padata_mt_helper().
519 ps
.chunk_size
= job
->size
/ (ps
.nworks
* load_balance_factor
);
520 ps
.chunk_size
= max(ps
.chunk_size
, job
->min_chunk
);
521 ps
.chunk_size
= max(ps
.chunk_size
, 1ul);
522 ps
.chunk_size
= roundup(ps
.chunk_size
, job
->align
);
524 list_for_each_entry(pw
, &works
, pw_list
)
525 if (job
->numa_aware
) {
526 int old_node
= atomic_read(&last_used_nid
);
529 nid
= next_node_in(old_node
, node_states
[N_CPU
]);
530 } while (!atomic_try_cmpxchg(&last_used_nid
, &old_node
, nid
));
531 queue_work_node(nid
, system_unbound_wq
, &pw
->pw_work
);
533 queue_work(system_unbound_wq
, &pw
->pw_work
);
536 /* Use the current thread, which saves starting a workqueue worker. */
537 padata_work_init(&my_work
, padata_mt_helper
, &ps
, PADATA_WORK_ONSTACK
);
538 padata_mt_helper(&my_work
.pw_work
);
540 /* Wait for all the helpers to finish. */
541 wait_for_completion(&ps
.completion
);
543 destroy_work_on_stack(&my_work
.pw_work
);
544 padata_works_free(&works
);
547 static void __padata_list_init(struct padata_list
*pd_list
)
549 INIT_LIST_HEAD(&pd_list
->list
);
550 spin_lock_init(&pd_list
->lock
);
553 /* Initialize all percpu queues used by serial workers */
554 static void padata_init_squeues(struct parallel_data
*pd
)
557 struct padata_serial_queue
*squeue
;
559 for_each_cpu(cpu
, pd
->cpumask
.cbcpu
) {
560 squeue
= per_cpu_ptr(pd
->squeue
, cpu
);
562 __padata_list_init(&squeue
->serial
);
563 INIT_WORK(&squeue
->work
, padata_serial_worker
);
567 /* Initialize per-CPU reorder lists */
568 static void padata_init_reorder_list(struct parallel_data
*pd
)
571 struct padata_list
*list
;
573 for_each_cpu(cpu
, pd
->cpumask
.pcpu
) {
574 list
= per_cpu_ptr(pd
->reorder_list
, cpu
);
575 __padata_list_init(list
);
579 /* Allocate and initialize the internal cpumask dependend resources. */
580 static struct parallel_data
*padata_alloc_pd(struct padata_shell
*ps
)
582 struct padata_instance
*pinst
= ps
->pinst
;
583 struct parallel_data
*pd
;
585 pd
= kzalloc(sizeof(struct parallel_data
), GFP_KERNEL
);
589 pd
->reorder_list
= alloc_percpu(struct padata_list
);
590 if (!pd
->reorder_list
)
593 pd
->squeue
= alloc_percpu(struct padata_serial_queue
);
595 goto err_free_reorder_list
;
599 if (!alloc_cpumask_var(&pd
->cpumask
.pcpu
, GFP_KERNEL
))
600 goto err_free_squeue
;
601 if (!alloc_cpumask_var(&pd
->cpumask
.cbcpu
, GFP_KERNEL
))
604 cpumask_and(pd
->cpumask
.pcpu
, pinst
->cpumask
.pcpu
, cpu_online_mask
);
605 cpumask_and(pd
->cpumask
.cbcpu
, pinst
->cpumask
.cbcpu
, cpu_online_mask
);
607 padata_init_reorder_list(pd
);
608 padata_init_squeues(pd
);
610 refcount_set(&pd
->refcnt
, 1);
611 spin_lock_init(&pd
->lock
);
612 pd
->cpu
= cpumask_first(pd
->cpumask
.pcpu
);
613 INIT_WORK(&pd
->reorder_work
, invoke_padata_reorder
);
618 free_cpumask_var(pd
->cpumask
.pcpu
);
620 free_percpu(pd
->squeue
);
621 err_free_reorder_list
:
622 free_percpu(pd
->reorder_list
);
629 static void padata_free_pd(struct parallel_data
*pd
)
631 free_cpumask_var(pd
->cpumask
.pcpu
);
632 free_cpumask_var(pd
->cpumask
.cbcpu
);
633 free_percpu(pd
->reorder_list
);
634 free_percpu(pd
->squeue
);
638 static void __padata_start(struct padata_instance
*pinst
)
640 pinst
->flags
|= PADATA_INIT
;
643 static void __padata_stop(struct padata_instance
*pinst
)
645 if (!(pinst
->flags
& PADATA_INIT
))
648 pinst
->flags
&= ~PADATA_INIT
;
653 /* Replace the internal control structure with a new one. */
654 static int padata_replace_one(struct padata_shell
*ps
)
656 struct parallel_data
*pd_new
;
658 pd_new
= padata_alloc_pd(ps
);
662 ps
->opd
= rcu_dereference_protected(ps
->pd
, 1);
663 rcu_assign_pointer(ps
->pd
, pd_new
);
668 static int padata_replace(struct padata_instance
*pinst
)
670 struct padata_shell
*ps
;
673 pinst
->flags
|= PADATA_RESET
;
675 list_for_each_entry(ps
, &pinst
->pslist
, list
) {
676 err
= padata_replace_one(ps
);
683 list_for_each_entry_continue_reverse(ps
, &pinst
->pslist
, list
)
684 if (refcount_dec_and_test(&ps
->opd
->refcnt
))
685 padata_free_pd(ps
->opd
);
687 pinst
->flags
&= ~PADATA_RESET
;
692 /* If cpumask contains no active cpu, we mark the instance as invalid. */
693 static bool padata_validate_cpumask(struct padata_instance
*pinst
,
694 const struct cpumask
*cpumask
)
696 if (!cpumask_intersects(cpumask
, cpu_online_mask
)) {
697 pinst
->flags
|= PADATA_INVALID
;
701 pinst
->flags
&= ~PADATA_INVALID
;
705 static int __padata_set_cpumasks(struct padata_instance
*pinst
,
706 cpumask_var_t pcpumask
,
707 cpumask_var_t cbcpumask
)
712 valid
= padata_validate_cpumask(pinst
, pcpumask
);
714 __padata_stop(pinst
);
718 valid
= padata_validate_cpumask(pinst
, cbcpumask
);
720 __padata_stop(pinst
);
723 cpumask_copy(pinst
->cpumask
.pcpu
, pcpumask
);
724 cpumask_copy(pinst
->cpumask
.cbcpu
, cbcpumask
);
726 err
= padata_setup_cpumasks(pinst
) ?: padata_replace(pinst
);
729 __padata_start(pinst
);
735 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
736 * equivalent to @cpumask.
737 * @pinst: padata instance
738 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
739 * to parallel and serial cpumasks respectively.
740 * @cpumask: the cpumask to use
742 * Return: 0 on success or negative error code
744 int padata_set_cpumask(struct padata_instance
*pinst
, int cpumask_type
,
745 cpumask_var_t cpumask
)
747 struct cpumask
*serial_mask
, *parallel_mask
;
751 mutex_lock(&pinst
->lock
);
753 switch (cpumask_type
) {
754 case PADATA_CPU_PARALLEL
:
755 serial_mask
= pinst
->cpumask
.cbcpu
;
756 parallel_mask
= cpumask
;
758 case PADATA_CPU_SERIAL
:
759 parallel_mask
= pinst
->cpumask
.pcpu
;
760 serial_mask
= cpumask
;
766 err
= __padata_set_cpumasks(pinst
, parallel_mask
, serial_mask
);
769 mutex_unlock(&pinst
->lock
);
774 EXPORT_SYMBOL(padata_set_cpumask
);
776 #ifdef CONFIG_HOTPLUG_CPU
778 static int __padata_add_cpu(struct padata_instance
*pinst
, int cpu
)
782 if (cpumask_test_cpu(cpu
, cpu_online_mask
)) {
783 err
= padata_replace(pinst
);
785 if (padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) &&
786 padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
787 __padata_start(pinst
);
793 static int __padata_remove_cpu(struct padata_instance
*pinst
, int cpu
)
797 if (!cpumask_test_cpu(cpu
, cpu_online_mask
)) {
798 if (!padata_validate_cpumask(pinst
, pinst
->cpumask
.pcpu
) ||
799 !padata_validate_cpumask(pinst
, pinst
->cpumask
.cbcpu
))
800 __padata_stop(pinst
);
802 err
= padata_replace(pinst
);
808 static inline int pinst_has_cpu(struct padata_instance
*pinst
, int cpu
)
810 return cpumask_test_cpu(cpu
, pinst
->cpumask
.pcpu
) ||
811 cpumask_test_cpu(cpu
, pinst
->cpumask
.cbcpu
);
814 static int padata_cpu_online(unsigned int cpu
, struct hlist_node
*node
)
816 struct padata_instance
*pinst
;
819 pinst
= hlist_entry_safe(node
, struct padata_instance
, cpu_online_node
);
820 if (!pinst_has_cpu(pinst
, cpu
))
823 mutex_lock(&pinst
->lock
);
824 ret
= __padata_add_cpu(pinst
, cpu
);
825 mutex_unlock(&pinst
->lock
);
829 static int padata_cpu_dead(unsigned int cpu
, struct hlist_node
*node
)
831 struct padata_instance
*pinst
;
834 pinst
= hlist_entry_safe(node
, struct padata_instance
, cpu_dead_node
);
835 if (!pinst_has_cpu(pinst
, cpu
))
838 mutex_lock(&pinst
->lock
);
839 ret
= __padata_remove_cpu(pinst
, cpu
);
840 mutex_unlock(&pinst
->lock
);
844 static enum cpuhp_state hp_online
;
847 static void __padata_free(struct padata_instance
*pinst
)
849 #ifdef CONFIG_HOTPLUG_CPU
850 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD
,
851 &pinst
->cpu_dead_node
);
852 cpuhp_state_remove_instance_nocalls(hp_online
, &pinst
->cpu_online_node
);
855 WARN_ON(!list_empty(&pinst
->pslist
));
857 free_cpumask_var(pinst
->cpumask
.pcpu
);
858 free_cpumask_var(pinst
->cpumask
.cbcpu
);
859 destroy_workqueue(pinst
->serial_wq
);
860 destroy_workqueue(pinst
->parallel_wq
);
864 #define kobj2pinst(_kobj) \
865 container_of(_kobj, struct padata_instance, kobj)
866 #define attr2pentry(_attr) \
867 container_of(_attr, struct padata_sysfs_entry, attr)
869 static void padata_sysfs_release(struct kobject
*kobj
)
871 struct padata_instance
*pinst
= kobj2pinst(kobj
);
872 __padata_free(pinst
);
875 struct padata_sysfs_entry
{
876 struct attribute attr
;
877 ssize_t (*show
)(struct padata_instance
*, struct attribute
*, char *);
878 ssize_t (*store
)(struct padata_instance
*, struct attribute
*,
879 const char *, size_t);
882 static ssize_t
show_cpumask(struct padata_instance
*pinst
,
883 struct attribute
*attr
, char *buf
)
885 struct cpumask
*cpumask
;
888 mutex_lock(&pinst
->lock
);
889 if (!strcmp(attr
->name
, "serial_cpumask"))
890 cpumask
= pinst
->cpumask
.cbcpu
;
892 cpumask
= pinst
->cpumask
.pcpu
;
894 len
= snprintf(buf
, PAGE_SIZE
, "%*pb\n",
895 nr_cpu_ids
, cpumask_bits(cpumask
));
896 mutex_unlock(&pinst
->lock
);
897 return len
< PAGE_SIZE
? len
: -EINVAL
;
900 static ssize_t
store_cpumask(struct padata_instance
*pinst
,
901 struct attribute
*attr
,
902 const char *buf
, size_t count
)
904 cpumask_var_t new_cpumask
;
908 if (!alloc_cpumask_var(&new_cpumask
, GFP_KERNEL
))
911 ret
= bitmap_parse(buf
, count
, cpumask_bits(new_cpumask
),
916 mask_type
= !strcmp(attr
->name
, "serial_cpumask") ?
917 PADATA_CPU_SERIAL
: PADATA_CPU_PARALLEL
;
918 ret
= padata_set_cpumask(pinst
, mask_type
, new_cpumask
);
923 free_cpumask_var(new_cpumask
);
927 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
928 static struct padata_sysfs_entry _name##_attr = \
929 __ATTR(_name, 0644, _show_name, _store_name)
930 #define PADATA_ATTR_RO(_name, _show_name) \
931 static struct padata_sysfs_entry _name##_attr = \
932 __ATTR(_name, 0400, _show_name, NULL)
934 PADATA_ATTR_RW(serial_cpumask
, show_cpumask
, store_cpumask
);
935 PADATA_ATTR_RW(parallel_cpumask
, show_cpumask
, store_cpumask
);
938 * Padata sysfs provides the following objects:
939 * serial_cpumask [RW] - cpumask for serial workers
940 * parallel_cpumask [RW] - cpumask for parallel workers
942 static struct attribute
*padata_default_attrs
[] = {
943 &serial_cpumask_attr
.attr
,
944 ¶llel_cpumask_attr
.attr
,
947 ATTRIBUTE_GROUPS(padata_default
);
949 static ssize_t
padata_sysfs_show(struct kobject
*kobj
,
950 struct attribute
*attr
, char *buf
)
952 struct padata_instance
*pinst
;
953 struct padata_sysfs_entry
*pentry
;
956 pinst
= kobj2pinst(kobj
);
957 pentry
= attr2pentry(attr
);
959 ret
= pentry
->show(pinst
, attr
, buf
);
964 static ssize_t
padata_sysfs_store(struct kobject
*kobj
, struct attribute
*attr
,
965 const char *buf
, size_t count
)
967 struct padata_instance
*pinst
;
968 struct padata_sysfs_entry
*pentry
;
971 pinst
= kobj2pinst(kobj
);
972 pentry
= attr2pentry(attr
);
974 ret
= pentry
->store(pinst
, attr
, buf
, count
);
979 static const struct sysfs_ops padata_sysfs_ops
= {
980 .show
= padata_sysfs_show
,
981 .store
= padata_sysfs_store
,
984 static const struct kobj_type padata_attr_type
= {
985 .sysfs_ops
= &padata_sysfs_ops
,
986 .default_groups
= padata_default_groups
,
987 .release
= padata_sysfs_release
,
991 * padata_alloc - allocate and initialize a padata instance
992 * @name: used to identify the instance
994 * Return: new instance on success, NULL on error
996 struct padata_instance
*padata_alloc(const char *name
)
998 struct padata_instance
*pinst
;
1000 pinst
= kzalloc(sizeof(struct padata_instance
), GFP_KERNEL
);
1004 pinst
->parallel_wq
= alloc_workqueue("%s_parallel", WQ_UNBOUND
, 0,
1006 if (!pinst
->parallel_wq
)
1011 pinst
->serial_wq
= alloc_workqueue("%s_serial", WQ_MEM_RECLAIM
|
1012 WQ_CPU_INTENSIVE
, 1, name
);
1013 if (!pinst
->serial_wq
)
1016 if (!alloc_cpumask_var(&pinst
->cpumask
.pcpu
, GFP_KERNEL
))
1017 goto err_free_serial_wq
;
1018 if (!alloc_cpumask_var(&pinst
->cpumask
.cbcpu
, GFP_KERNEL
)) {
1019 free_cpumask_var(pinst
->cpumask
.pcpu
);
1020 goto err_free_serial_wq
;
1023 INIT_LIST_HEAD(&pinst
->pslist
);
1025 cpumask_copy(pinst
->cpumask
.pcpu
, cpu_possible_mask
);
1026 cpumask_copy(pinst
->cpumask
.cbcpu
, cpu_possible_mask
);
1028 if (padata_setup_cpumasks(pinst
))
1029 goto err_free_masks
;
1031 __padata_start(pinst
);
1033 kobject_init(&pinst
->kobj
, &padata_attr_type
);
1034 mutex_init(&pinst
->lock
);
1036 #ifdef CONFIG_HOTPLUG_CPU
1037 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online
,
1038 &pinst
->cpu_online_node
);
1039 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD
,
1040 &pinst
->cpu_dead_node
);
1048 free_cpumask_var(pinst
->cpumask
.pcpu
);
1049 free_cpumask_var(pinst
->cpumask
.cbcpu
);
1051 destroy_workqueue(pinst
->serial_wq
);
1054 destroy_workqueue(pinst
->parallel_wq
);
1060 EXPORT_SYMBOL(padata_alloc
);
1063 * padata_free - free a padata instance
1065 * @pinst: padata instance to free
1067 void padata_free(struct padata_instance
*pinst
)
1069 kobject_put(&pinst
->kobj
);
1071 EXPORT_SYMBOL(padata_free
);
1074 * padata_alloc_shell - Allocate and initialize padata shell.
1076 * @pinst: Parent padata_instance object.
1078 * Return: new shell on success, NULL on error
1080 struct padata_shell
*padata_alloc_shell(struct padata_instance
*pinst
)
1082 struct parallel_data
*pd
;
1083 struct padata_shell
*ps
;
1085 ps
= kzalloc(sizeof(*ps
), GFP_KERNEL
);
1092 pd
= padata_alloc_pd(ps
);
1098 mutex_lock(&pinst
->lock
);
1099 RCU_INIT_POINTER(ps
->pd
, pd
);
1100 list_add(&ps
->list
, &pinst
->pslist
);
1101 mutex_unlock(&pinst
->lock
);
1110 EXPORT_SYMBOL(padata_alloc_shell
);
1113 * padata_free_shell - free a padata shell
1115 * @ps: padata shell to free
1117 void padata_free_shell(struct padata_shell
*ps
)
1119 struct parallel_data
*pd
;
1124 mutex_lock(&ps
->pinst
->lock
);
1125 list_del(&ps
->list
);
1126 pd
= rcu_dereference_protected(ps
->pd
, 1);
1127 if (refcount_dec_and_test(&pd
->refcnt
))
1129 mutex_unlock(&ps
->pinst
->lock
);
1133 EXPORT_SYMBOL(padata_free_shell
);
1135 void __init
padata_init(void)
1137 unsigned int i
, possible_cpus
;
1138 #ifdef CONFIG_HOTPLUG_CPU
1141 ret
= cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN
, "padata:online",
1142 padata_cpu_online
, NULL
);
1147 ret
= cpuhp_setup_state_multi(CPUHP_PADATA_DEAD
, "padata:dead",
1148 NULL
, padata_cpu_dead
);
1150 goto remove_online_state
;
1153 possible_cpus
= num_possible_cpus();
1154 padata_works
= kmalloc_array(possible_cpus
, sizeof(struct padata_work
),
1157 goto remove_dead_state
;
1159 for (i
= 0; i
< possible_cpus
; ++i
)
1160 list_add(&padata_works
[i
].pw_list
, &padata_free_works
);
1165 #ifdef CONFIG_HOTPLUG_CPU
1166 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD
);
1167 remove_online_state
:
1168 cpuhp_remove_multi_state(hp_online
);
1171 pr_warn("padata: initialization failed\n");