io_uring: do not always copy iovec in io_req_map_rw()
[linux/fpc-iii.git] / kernel / padata.c
blob72777c10bb9cb7452f8250de95827f03013117dc
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * padata.c - generic interface to process data streams in parallel
5 * See Documentation/core-api/padata.rst for more information.
7 * Copyright (C) 2008, 2009 secunet Security Networks AG
8 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms and conditions of the GNU General Public License,
12 * version 2, as published by the Free Software Foundation.
14 * This program is distributed in the hope it will be useful, but WITHOUT
15 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
16 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * more details.
19 * You should have received a copy of the GNU General Public License along with
20 * this program; if not, write to the Free Software Foundation, Inc.,
21 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
24 #include <linux/export.h>
25 #include <linux/cpumask.h>
26 #include <linux/err.h>
27 #include <linux/cpu.h>
28 #include <linux/padata.h>
29 #include <linux/mutex.h>
30 #include <linux/sched.h>
31 #include <linux/slab.h>
32 #include <linux/sysfs.h>
33 #include <linux/rcupdate.h>
34 #include <linux/module.h>
36 #define MAX_OBJ_NUM 1000
38 static void padata_free_pd(struct parallel_data *pd);
40 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
42 int cpu, target_cpu;
44 target_cpu = cpumask_first(pd->cpumask.pcpu);
45 for (cpu = 0; cpu < cpu_index; cpu++)
46 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
48 return target_cpu;
51 static int padata_cpu_hash(struct parallel_data *pd, unsigned int seq_nr)
54 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use.
57 int cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
59 return padata_index_to_cpu(pd, cpu_index);
62 static void padata_parallel_worker(struct work_struct *parallel_work)
64 struct padata_parallel_queue *pqueue;
65 LIST_HEAD(local_list);
67 local_bh_disable();
68 pqueue = container_of(parallel_work,
69 struct padata_parallel_queue, work);
71 spin_lock(&pqueue->parallel.lock);
72 list_replace_init(&pqueue->parallel.list, &local_list);
73 spin_unlock(&pqueue->parallel.lock);
75 while (!list_empty(&local_list)) {
76 struct padata_priv *padata;
78 padata = list_entry(local_list.next,
79 struct padata_priv, list);
81 list_del_init(&padata->list);
83 padata->parallel(padata);
86 local_bh_enable();
89 /**
90 * padata_do_parallel - padata parallelization function
92 * @ps: padatashell
93 * @padata: object to be parallelized
94 * @cb_cpu: pointer to the CPU that the serialization callback function should
95 * run on. If it's not in the serial cpumask of @pinst
96 * (i.e. cpumask.cbcpu), this function selects a fallback CPU and if
97 * none found, returns -EINVAL.
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
103 * Return: 0 on success or else negative error code.
105 int padata_do_parallel(struct padata_shell *ps,
106 struct padata_priv *padata, int *cb_cpu)
108 struct padata_instance *pinst = ps->pinst;
109 int i, cpu, cpu_index, target_cpu, err;
110 struct padata_parallel_queue *queue;
111 struct parallel_data *pd;
113 rcu_read_lock_bh();
115 pd = rcu_dereference_bh(ps->pd);
117 err = -EINVAL;
118 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
119 goto out;
121 if (!cpumask_test_cpu(*cb_cpu, pd->cpumask.cbcpu)) {
122 if (!cpumask_weight(pd->cpumask.cbcpu))
123 goto out;
125 /* Select an alternate fallback CPU and notify the caller. */
126 cpu_index = *cb_cpu % cpumask_weight(pd->cpumask.cbcpu);
128 cpu = cpumask_first(pd->cpumask.cbcpu);
129 for (i = 0; i < cpu_index; i++)
130 cpu = cpumask_next(cpu, pd->cpumask.cbcpu);
132 *cb_cpu = cpu;
135 err = -EBUSY;
136 if ((pinst->flags & PADATA_RESET))
137 goto out;
139 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
140 goto out;
142 err = 0;
143 atomic_inc(&pd->refcnt);
144 padata->pd = pd;
145 padata->cb_cpu = *cb_cpu;
147 padata->seq_nr = atomic_inc_return(&pd->seq_nr);
148 target_cpu = padata_cpu_hash(pd, padata->seq_nr);
149 padata->cpu = target_cpu;
150 queue = per_cpu_ptr(pd->pqueue, target_cpu);
152 spin_lock(&queue->parallel.lock);
153 list_add_tail(&padata->list, &queue->parallel.list);
154 spin_unlock(&queue->parallel.lock);
156 queue_work(pinst->parallel_wq, &queue->work);
158 out:
159 rcu_read_unlock_bh();
161 return err;
163 EXPORT_SYMBOL(padata_do_parallel);
166 * padata_find_next - Find the next object that needs serialization.
168 * Return:
169 * * A pointer to the control struct of the next object that needs
170 * serialization, if present in one of the percpu reorder queues.
171 * * NULL, if the next object that needs serialization will
172 * be parallel processed by another cpu and is not yet present in
173 * the cpu's reorder queue.
175 static struct padata_priv *padata_find_next(struct parallel_data *pd,
176 bool remove_object)
178 struct padata_parallel_queue *next_queue;
179 struct padata_priv *padata;
180 struct padata_list *reorder;
181 int cpu = pd->cpu;
183 next_queue = per_cpu_ptr(pd->pqueue, cpu);
184 reorder = &next_queue->reorder;
186 spin_lock(&reorder->lock);
187 if (list_empty(&reorder->list)) {
188 spin_unlock(&reorder->lock);
189 return NULL;
192 padata = list_entry(reorder->list.next, struct padata_priv, list);
195 * Checks the rare case where two or more parallel jobs have hashed to
196 * the same CPU and one of the later ones finishes first.
198 if (padata->seq_nr != pd->processed) {
199 spin_unlock(&reorder->lock);
200 return NULL;
203 if (remove_object) {
204 list_del_init(&padata->list);
205 ++pd->processed;
206 pd->cpu = cpumask_next_wrap(cpu, pd->cpumask.pcpu, -1, false);
209 spin_unlock(&reorder->lock);
210 return padata;
213 static void padata_reorder(struct parallel_data *pd)
215 struct padata_instance *pinst = pd->ps->pinst;
216 int cb_cpu;
217 struct padata_priv *padata;
218 struct padata_serial_queue *squeue;
219 struct padata_parallel_queue *next_queue;
222 * We need to ensure that only one cpu can work on dequeueing of
223 * the reorder queue the time. Calculating in which percpu reorder
224 * queue the next object will arrive takes some time. A spinlock
225 * would be highly contended. Also it is not clear in which order
226 * the objects arrive to the reorder queues. So a cpu could wait to
227 * get the lock just to notice that there is nothing to do at the
228 * moment. Therefore we use a trylock and let the holder of the lock
229 * care for all the objects enqueued during the holdtime of the lock.
231 if (!spin_trylock_bh(&pd->lock))
232 return;
234 while (1) {
235 padata = padata_find_next(pd, true);
238 * If the next object that needs serialization is parallel
239 * processed by another cpu and is still on it's way to the
240 * cpu's reorder queue, nothing to do for now.
242 if (!padata)
243 break;
245 cb_cpu = padata->cb_cpu;
246 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
248 spin_lock(&squeue->serial.lock);
249 list_add_tail(&padata->list, &squeue->serial.list);
250 spin_unlock(&squeue->serial.lock);
252 queue_work_on(cb_cpu, pinst->serial_wq, &squeue->work);
255 spin_unlock_bh(&pd->lock);
258 * The next object that needs serialization might have arrived to
259 * the reorder queues in the meantime.
261 * Ensure reorder queue is read after pd->lock is dropped so we see
262 * new objects from another task in padata_do_serial. Pairs with
263 * smp_mb__after_atomic in padata_do_serial.
265 smp_mb();
267 next_queue = per_cpu_ptr(pd->pqueue, pd->cpu);
268 if (!list_empty(&next_queue->reorder.list) &&
269 padata_find_next(pd, false))
270 queue_work(pinst->serial_wq, &pd->reorder_work);
273 static void invoke_padata_reorder(struct work_struct *work)
275 struct parallel_data *pd;
277 local_bh_disable();
278 pd = container_of(work, struct parallel_data, reorder_work);
279 padata_reorder(pd);
280 local_bh_enable();
283 static void padata_serial_worker(struct work_struct *serial_work)
285 struct padata_serial_queue *squeue;
286 struct parallel_data *pd;
287 LIST_HEAD(local_list);
288 int cnt;
290 local_bh_disable();
291 squeue = container_of(serial_work, struct padata_serial_queue, work);
292 pd = squeue->pd;
294 spin_lock(&squeue->serial.lock);
295 list_replace_init(&squeue->serial.list, &local_list);
296 spin_unlock(&squeue->serial.lock);
298 cnt = 0;
300 while (!list_empty(&local_list)) {
301 struct padata_priv *padata;
303 padata = list_entry(local_list.next,
304 struct padata_priv, list);
306 list_del_init(&padata->list);
308 padata->serial(padata);
309 cnt++;
311 local_bh_enable();
313 if (atomic_sub_and_test(cnt, &pd->refcnt))
314 padata_free_pd(pd);
318 * padata_do_serial - padata serialization function
320 * @padata: object to be serialized.
322 * padata_do_serial must be called for every parallelized object.
323 * The serialization callback function will run with BHs off.
325 void padata_do_serial(struct padata_priv *padata)
327 struct parallel_data *pd = padata->pd;
328 struct padata_parallel_queue *pqueue = per_cpu_ptr(pd->pqueue,
329 padata->cpu);
330 struct padata_priv *cur;
332 spin_lock(&pqueue->reorder.lock);
333 /* Sort in ascending order of sequence number. */
334 list_for_each_entry_reverse(cur, &pqueue->reorder.list, list)
335 if (cur->seq_nr < padata->seq_nr)
336 break;
337 list_add(&padata->list, &cur->list);
338 spin_unlock(&pqueue->reorder.lock);
341 * Ensure the addition to the reorder list is ordered correctly
342 * with the trylock of pd->lock in padata_reorder. Pairs with smp_mb
343 * in padata_reorder.
345 smp_mb__after_atomic();
347 padata_reorder(pd);
349 EXPORT_SYMBOL(padata_do_serial);
351 static int padata_setup_cpumasks(struct padata_instance *pinst)
353 struct workqueue_attrs *attrs;
354 int err;
356 attrs = alloc_workqueue_attrs();
357 if (!attrs)
358 return -ENOMEM;
360 /* Restrict parallel_wq workers to pd->cpumask.pcpu. */
361 cpumask_copy(attrs->cpumask, pinst->cpumask.pcpu);
362 err = apply_workqueue_attrs(pinst->parallel_wq, attrs);
363 free_workqueue_attrs(attrs);
365 return err;
368 static int pd_setup_cpumasks(struct parallel_data *pd,
369 const struct cpumask *pcpumask,
370 const struct cpumask *cbcpumask)
372 int err = -ENOMEM;
374 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
375 goto out;
376 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL))
377 goto free_pcpu_mask;
379 cpumask_copy(pd->cpumask.pcpu, pcpumask);
380 cpumask_copy(pd->cpumask.cbcpu, cbcpumask);
382 return 0;
384 free_pcpu_mask:
385 free_cpumask_var(pd->cpumask.pcpu);
386 out:
387 return err;
390 static void __padata_list_init(struct padata_list *pd_list)
392 INIT_LIST_HEAD(&pd_list->list);
393 spin_lock_init(&pd_list->lock);
396 /* Initialize all percpu queues used by serial workers */
397 static void padata_init_squeues(struct parallel_data *pd)
399 int cpu;
400 struct padata_serial_queue *squeue;
402 for_each_cpu(cpu, pd->cpumask.cbcpu) {
403 squeue = per_cpu_ptr(pd->squeue, cpu);
404 squeue->pd = pd;
405 __padata_list_init(&squeue->serial);
406 INIT_WORK(&squeue->work, padata_serial_worker);
410 /* Initialize all percpu queues used by parallel workers */
411 static void padata_init_pqueues(struct parallel_data *pd)
413 int cpu;
414 struct padata_parallel_queue *pqueue;
416 for_each_cpu(cpu, pd->cpumask.pcpu) {
417 pqueue = per_cpu_ptr(pd->pqueue, cpu);
419 __padata_list_init(&pqueue->reorder);
420 __padata_list_init(&pqueue->parallel);
421 INIT_WORK(&pqueue->work, padata_parallel_worker);
422 atomic_set(&pqueue->num_obj, 0);
426 /* Allocate and initialize the internal cpumask dependend resources. */
427 static struct parallel_data *padata_alloc_pd(struct padata_shell *ps)
429 struct padata_instance *pinst = ps->pinst;
430 const struct cpumask *cbcpumask;
431 const struct cpumask *pcpumask;
432 struct parallel_data *pd;
434 cbcpumask = pinst->rcpumask.cbcpu;
435 pcpumask = pinst->rcpumask.pcpu;
437 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
438 if (!pd)
439 goto err;
441 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
442 if (!pd->pqueue)
443 goto err_free_pd;
445 pd->squeue = alloc_percpu(struct padata_serial_queue);
446 if (!pd->squeue)
447 goto err_free_pqueue;
449 pd->ps = ps;
450 if (pd_setup_cpumasks(pd, pcpumask, cbcpumask))
451 goto err_free_squeue;
453 padata_init_pqueues(pd);
454 padata_init_squeues(pd);
455 atomic_set(&pd->seq_nr, -1);
456 atomic_set(&pd->refcnt, 1);
457 spin_lock_init(&pd->lock);
458 pd->cpu = cpumask_first(pd->cpumask.pcpu);
459 INIT_WORK(&pd->reorder_work, invoke_padata_reorder);
461 return pd;
463 err_free_squeue:
464 free_percpu(pd->squeue);
465 err_free_pqueue:
466 free_percpu(pd->pqueue);
467 err_free_pd:
468 kfree(pd);
469 err:
470 return NULL;
473 static void padata_free_pd(struct parallel_data *pd)
475 free_cpumask_var(pd->cpumask.pcpu);
476 free_cpumask_var(pd->cpumask.cbcpu);
477 free_percpu(pd->pqueue);
478 free_percpu(pd->squeue);
479 kfree(pd);
482 static void __padata_start(struct padata_instance *pinst)
484 pinst->flags |= PADATA_INIT;
487 static void __padata_stop(struct padata_instance *pinst)
489 if (!(pinst->flags & PADATA_INIT))
490 return;
492 pinst->flags &= ~PADATA_INIT;
494 synchronize_rcu();
497 /* Replace the internal control structure with a new one. */
498 static int padata_replace_one(struct padata_shell *ps)
500 struct parallel_data *pd_new;
502 pd_new = padata_alloc_pd(ps);
503 if (!pd_new)
504 return -ENOMEM;
506 ps->opd = rcu_dereference_protected(ps->pd, 1);
507 rcu_assign_pointer(ps->pd, pd_new);
509 return 0;
512 static int padata_replace(struct padata_instance *pinst)
514 struct padata_shell *ps;
515 int err;
517 pinst->flags |= PADATA_RESET;
519 cpumask_and(pinst->rcpumask.pcpu, pinst->cpumask.pcpu,
520 cpu_online_mask);
522 cpumask_and(pinst->rcpumask.cbcpu, pinst->cpumask.cbcpu,
523 cpu_online_mask);
525 list_for_each_entry(ps, &pinst->pslist, list) {
526 err = padata_replace_one(ps);
527 if (err)
528 break;
531 synchronize_rcu();
533 list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
534 if (atomic_dec_and_test(&ps->opd->refcnt))
535 padata_free_pd(ps->opd);
537 pinst->flags &= ~PADATA_RESET;
539 return err;
542 /* If cpumask contains no active cpu, we mark the instance as invalid. */
543 static bool padata_validate_cpumask(struct padata_instance *pinst,
544 const struct cpumask *cpumask)
546 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
547 pinst->flags |= PADATA_INVALID;
548 return false;
551 pinst->flags &= ~PADATA_INVALID;
552 return true;
555 static int __padata_set_cpumasks(struct padata_instance *pinst,
556 cpumask_var_t pcpumask,
557 cpumask_var_t cbcpumask)
559 int valid;
560 int err;
562 valid = padata_validate_cpumask(pinst, pcpumask);
563 if (!valid) {
564 __padata_stop(pinst);
565 goto out_replace;
568 valid = padata_validate_cpumask(pinst, cbcpumask);
569 if (!valid)
570 __padata_stop(pinst);
572 out_replace:
573 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
574 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
576 err = padata_setup_cpumasks(pinst) ?: padata_replace(pinst);
578 if (valid)
579 __padata_start(pinst);
581 return err;
585 * padata_set_cpumask - Sets specified by @cpumask_type cpumask to the value
586 * equivalent to @cpumask.
587 * @pinst: padata instance
588 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
589 * to parallel and serial cpumasks respectively.
590 * @cpumask: the cpumask to use
592 * Return: 0 on success or negative error code
594 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
595 cpumask_var_t cpumask)
597 struct cpumask *serial_mask, *parallel_mask;
598 int err = -EINVAL;
600 get_online_cpus();
601 mutex_lock(&pinst->lock);
603 switch (cpumask_type) {
604 case PADATA_CPU_PARALLEL:
605 serial_mask = pinst->cpumask.cbcpu;
606 parallel_mask = cpumask;
607 break;
608 case PADATA_CPU_SERIAL:
609 parallel_mask = pinst->cpumask.pcpu;
610 serial_mask = cpumask;
611 break;
612 default:
613 goto out;
616 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
618 out:
619 mutex_unlock(&pinst->lock);
620 put_online_cpus();
622 return err;
624 EXPORT_SYMBOL(padata_set_cpumask);
627 * padata_start - start the parallel processing
629 * @pinst: padata instance to start
631 * Return: 0 on success or negative error code
633 int padata_start(struct padata_instance *pinst)
635 int err = 0;
637 mutex_lock(&pinst->lock);
639 if (pinst->flags & PADATA_INVALID)
640 err = -EINVAL;
642 __padata_start(pinst);
644 mutex_unlock(&pinst->lock);
646 return err;
648 EXPORT_SYMBOL(padata_start);
651 * padata_stop - stop the parallel processing
653 * @pinst: padata instance to stop
655 void padata_stop(struct padata_instance *pinst)
657 mutex_lock(&pinst->lock);
658 __padata_stop(pinst);
659 mutex_unlock(&pinst->lock);
661 EXPORT_SYMBOL(padata_stop);
663 #ifdef CONFIG_HOTPLUG_CPU
665 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
667 int err = 0;
669 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
670 err = padata_replace(pinst);
672 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
673 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
674 __padata_start(pinst);
677 return err;
680 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
682 int err = 0;
684 if (!cpumask_test_cpu(cpu, cpu_online_mask)) {
685 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
686 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
687 __padata_stop(pinst);
689 err = padata_replace(pinst);
692 return err;
695 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
697 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
698 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
701 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
703 struct padata_instance *pinst;
704 int ret;
706 pinst = hlist_entry_safe(node, struct padata_instance, node);
707 if (!pinst_has_cpu(pinst, cpu))
708 return 0;
710 mutex_lock(&pinst->lock);
711 ret = __padata_add_cpu(pinst, cpu);
712 mutex_unlock(&pinst->lock);
713 return ret;
716 static int padata_cpu_dead(unsigned int cpu, struct hlist_node *node)
718 struct padata_instance *pinst;
719 int ret;
721 pinst = hlist_entry_safe(node, struct padata_instance, node);
722 if (!pinst_has_cpu(pinst, cpu))
723 return 0;
725 mutex_lock(&pinst->lock);
726 ret = __padata_remove_cpu(pinst, cpu);
727 mutex_unlock(&pinst->lock);
728 return ret;
731 static enum cpuhp_state hp_online;
732 #endif
734 static void __padata_free(struct padata_instance *pinst)
736 #ifdef CONFIG_HOTPLUG_CPU
737 cpuhp_state_remove_instance_nocalls(CPUHP_PADATA_DEAD, &pinst->node);
738 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
739 #endif
741 WARN_ON(!list_empty(&pinst->pslist));
743 padata_stop(pinst);
744 free_cpumask_var(pinst->rcpumask.cbcpu);
745 free_cpumask_var(pinst->rcpumask.pcpu);
746 free_cpumask_var(pinst->cpumask.pcpu);
747 free_cpumask_var(pinst->cpumask.cbcpu);
748 destroy_workqueue(pinst->serial_wq);
749 destroy_workqueue(pinst->parallel_wq);
750 kfree(pinst);
753 #define kobj2pinst(_kobj) \
754 container_of(_kobj, struct padata_instance, kobj)
755 #define attr2pentry(_attr) \
756 container_of(_attr, struct padata_sysfs_entry, attr)
758 static void padata_sysfs_release(struct kobject *kobj)
760 struct padata_instance *pinst = kobj2pinst(kobj);
761 __padata_free(pinst);
764 struct padata_sysfs_entry {
765 struct attribute attr;
766 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
767 ssize_t (*store)(struct padata_instance *, struct attribute *,
768 const char *, size_t);
771 static ssize_t show_cpumask(struct padata_instance *pinst,
772 struct attribute *attr, char *buf)
774 struct cpumask *cpumask;
775 ssize_t len;
777 mutex_lock(&pinst->lock);
778 if (!strcmp(attr->name, "serial_cpumask"))
779 cpumask = pinst->cpumask.cbcpu;
780 else
781 cpumask = pinst->cpumask.pcpu;
783 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
784 nr_cpu_ids, cpumask_bits(cpumask));
785 mutex_unlock(&pinst->lock);
786 return len < PAGE_SIZE ? len : -EINVAL;
789 static ssize_t store_cpumask(struct padata_instance *pinst,
790 struct attribute *attr,
791 const char *buf, size_t count)
793 cpumask_var_t new_cpumask;
794 ssize_t ret;
795 int mask_type;
797 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
798 return -ENOMEM;
800 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
801 nr_cpumask_bits);
802 if (ret < 0)
803 goto out;
805 mask_type = !strcmp(attr->name, "serial_cpumask") ?
806 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
807 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
808 if (!ret)
809 ret = count;
811 out:
812 free_cpumask_var(new_cpumask);
813 return ret;
816 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
817 static struct padata_sysfs_entry _name##_attr = \
818 __ATTR(_name, 0644, _show_name, _store_name)
819 #define PADATA_ATTR_RO(_name, _show_name) \
820 static struct padata_sysfs_entry _name##_attr = \
821 __ATTR(_name, 0400, _show_name, NULL)
823 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
824 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
827 * Padata sysfs provides the following objects:
828 * serial_cpumask [RW] - cpumask for serial workers
829 * parallel_cpumask [RW] - cpumask for parallel workers
831 static struct attribute *padata_default_attrs[] = {
832 &serial_cpumask_attr.attr,
833 &parallel_cpumask_attr.attr,
834 NULL,
836 ATTRIBUTE_GROUPS(padata_default);
838 static ssize_t padata_sysfs_show(struct kobject *kobj,
839 struct attribute *attr, char *buf)
841 struct padata_instance *pinst;
842 struct padata_sysfs_entry *pentry;
843 ssize_t ret = -EIO;
845 pinst = kobj2pinst(kobj);
846 pentry = attr2pentry(attr);
847 if (pentry->show)
848 ret = pentry->show(pinst, attr, buf);
850 return ret;
853 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
854 const char *buf, size_t count)
856 struct padata_instance *pinst;
857 struct padata_sysfs_entry *pentry;
858 ssize_t ret = -EIO;
860 pinst = kobj2pinst(kobj);
861 pentry = attr2pentry(attr);
862 if (pentry->show)
863 ret = pentry->store(pinst, attr, buf, count);
865 return ret;
868 static const struct sysfs_ops padata_sysfs_ops = {
869 .show = padata_sysfs_show,
870 .store = padata_sysfs_store,
873 static struct kobj_type padata_attr_type = {
874 .sysfs_ops = &padata_sysfs_ops,
875 .default_groups = padata_default_groups,
876 .release = padata_sysfs_release,
880 * padata_alloc - allocate and initialize a padata instance and specify
881 * cpumasks for serial and parallel workers.
883 * @name: used to identify the instance
884 * @pcpumask: cpumask that will be used for padata parallelization
885 * @cbcpumask: cpumask that will be used for padata serialization
887 * Return: new instance on success, NULL on error
889 static struct padata_instance *padata_alloc(const char *name,
890 const struct cpumask *pcpumask,
891 const struct cpumask *cbcpumask)
893 struct padata_instance *pinst;
895 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
896 if (!pinst)
897 goto err;
899 pinst->parallel_wq = alloc_workqueue("%s_parallel", WQ_UNBOUND, 0,
900 name);
901 if (!pinst->parallel_wq)
902 goto err_free_inst;
904 get_online_cpus();
906 pinst->serial_wq = alloc_workqueue("%s_serial", WQ_MEM_RECLAIM |
907 WQ_CPU_INTENSIVE, 1, name);
908 if (!pinst->serial_wq)
909 goto err_put_cpus;
911 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
912 goto err_free_serial_wq;
913 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
914 free_cpumask_var(pinst->cpumask.pcpu);
915 goto err_free_serial_wq;
917 if (!padata_validate_cpumask(pinst, pcpumask) ||
918 !padata_validate_cpumask(pinst, cbcpumask))
919 goto err_free_masks;
921 if (!alloc_cpumask_var(&pinst->rcpumask.pcpu, GFP_KERNEL))
922 goto err_free_masks;
923 if (!alloc_cpumask_var(&pinst->rcpumask.cbcpu, GFP_KERNEL))
924 goto err_free_rcpumask_pcpu;
926 INIT_LIST_HEAD(&pinst->pslist);
928 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
929 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
930 cpumask_and(pinst->rcpumask.pcpu, pcpumask, cpu_online_mask);
931 cpumask_and(pinst->rcpumask.cbcpu, cbcpumask, cpu_online_mask);
933 if (padata_setup_cpumasks(pinst))
934 goto err_free_rcpumask_cbcpu;
936 pinst->flags = 0;
938 kobject_init(&pinst->kobj, &padata_attr_type);
939 mutex_init(&pinst->lock);
941 #ifdef CONFIG_HOTPLUG_CPU
942 cpuhp_state_add_instance_nocalls_cpuslocked(hp_online, &pinst->node);
943 cpuhp_state_add_instance_nocalls_cpuslocked(CPUHP_PADATA_DEAD,
944 &pinst->node);
945 #endif
947 put_online_cpus();
949 return pinst;
951 err_free_rcpumask_cbcpu:
952 free_cpumask_var(pinst->rcpumask.cbcpu);
953 err_free_rcpumask_pcpu:
954 free_cpumask_var(pinst->rcpumask.pcpu);
955 err_free_masks:
956 free_cpumask_var(pinst->cpumask.pcpu);
957 free_cpumask_var(pinst->cpumask.cbcpu);
958 err_free_serial_wq:
959 destroy_workqueue(pinst->serial_wq);
960 err_put_cpus:
961 put_online_cpus();
962 destroy_workqueue(pinst->parallel_wq);
963 err_free_inst:
964 kfree(pinst);
965 err:
966 return NULL;
970 * padata_alloc_possible - Allocate and initialize padata instance.
971 * Use the cpu_possible_mask for serial and
972 * parallel workers.
974 * @name: used to identify the instance
976 * Return: new instance on success, NULL on error
978 struct padata_instance *padata_alloc_possible(const char *name)
980 return padata_alloc(name, cpu_possible_mask, cpu_possible_mask);
982 EXPORT_SYMBOL(padata_alloc_possible);
985 * padata_free - free a padata instance
987 * @pinst: padata instance to free
989 void padata_free(struct padata_instance *pinst)
991 kobject_put(&pinst->kobj);
993 EXPORT_SYMBOL(padata_free);
996 * padata_alloc_shell - Allocate and initialize padata shell.
998 * @pinst: Parent padata_instance object.
1000 * Return: new shell on success, NULL on error
1002 struct padata_shell *padata_alloc_shell(struct padata_instance *pinst)
1004 struct parallel_data *pd;
1005 struct padata_shell *ps;
1007 ps = kzalloc(sizeof(*ps), GFP_KERNEL);
1008 if (!ps)
1009 goto out;
1011 ps->pinst = pinst;
1013 get_online_cpus();
1014 pd = padata_alloc_pd(ps);
1015 put_online_cpus();
1017 if (!pd)
1018 goto out_free_ps;
1020 mutex_lock(&pinst->lock);
1021 RCU_INIT_POINTER(ps->pd, pd);
1022 list_add(&ps->list, &pinst->pslist);
1023 mutex_unlock(&pinst->lock);
1025 return ps;
1027 out_free_ps:
1028 kfree(ps);
1029 out:
1030 return NULL;
1032 EXPORT_SYMBOL(padata_alloc_shell);
1035 * padata_free_shell - free a padata shell
1037 * @ps: padata shell to free
1039 void padata_free_shell(struct padata_shell *ps)
1041 struct padata_instance *pinst = ps->pinst;
1043 mutex_lock(&pinst->lock);
1044 list_del(&ps->list);
1045 padata_free_pd(rcu_dereference_protected(ps->pd, 1));
1046 mutex_unlock(&pinst->lock);
1048 kfree(ps);
1050 EXPORT_SYMBOL(padata_free_shell);
1052 #ifdef CONFIG_HOTPLUG_CPU
1054 static __init int padata_driver_init(void)
1056 int ret;
1058 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1059 padata_cpu_online, NULL);
1060 if (ret < 0)
1061 return ret;
1062 hp_online = ret;
1064 ret = cpuhp_setup_state_multi(CPUHP_PADATA_DEAD, "padata:dead",
1065 NULL, padata_cpu_dead);
1066 if (ret < 0) {
1067 cpuhp_remove_multi_state(hp_online);
1068 return ret;
1070 return 0;
1072 module_init(padata_driver_init);
1074 static __exit void padata_driver_exit(void)
1076 cpuhp_remove_multi_state(CPUHP_PADATA_DEAD);
1077 cpuhp_remove_multi_state(hp_online);
1079 module_exit(padata_driver_exit);
1080 #endif