kvm: nVMX: Fix kernel panics induced by illegal INVEPT/INVVPID types
[linux/fpc-iii.git] / kernel / padata.c
blob7848f0566403cde858d3c4d23735d7675a53e710
1 /*
2 * padata.c - generic interface to process data streams in parallel
4 * See Documentation/padata.txt for an api documentation.
6 * Copyright (C) 2008, 2009 secunet Security Networks AG
7 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms and conditions of the GNU General Public License,
11 * version 2, as published by the Free Software Foundation.
13 * This program is distributed in the hope it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc.,
20 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
23 #include <linux/export.h>
24 #include <linux/cpumask.h>
25 #include <linux/err.h>
26 #include <linux/cpu.h>
27 #include <linux/padata.h>
28 #include <linux/mutex.h>
29 #include <linux/sched.h>
30 #include <linux/slab.h>
31 #include <linux/sysfs.h>
32 #include <linux/rcupdate.h>
33 #include <linux/module.h>
35 #define MAX_OBJ_NUM 1000
37 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
39 int cpu, target_cpu;
41 target_cpu = cpumask_first(pd->cpumask.pcpu);
42 for (cpu = 0; cpu < cpu_index; cpu++)
43 target_cpu = cpumask_next(target_cpu, pd->cpumask.pcpu);
45 return target_cpu;
48 static int padata_cpu_hash(struct parallel_data *pd)
50 unsigned int seq_nr;
51 int cpu_index;
54 * Hash the sequence numbers to the cpus by taking
55 * seq_nr mod. number of cpus in use.
58 seq_nr = atomic_inc_return(&pd->seq_nr);
59 cpu_index = seq_nr % cpumask_weight(pd->cpumask.pcpu);
61 return padata_index_to_cpu(pd, cpu_index);
64 static void padata_parallel_worker(struct work_struct *parallel_work)
66 struct padata_parallel_queue *pqueue;
67 struct parallel_data *pd;
68 struct padata_instance *pinst;
69 LIST_HEAD(local_list);
71 local_bh_disable();
72 pqueue = container_of(parallel_work,
73 struct padata_parallel_queue, work);
74 pd = pqueue->pd;
75 pinst = pd->pinst;
77 spin_lock(&pqueue->parallel.lock);
78 list_replace_init(&pqueue->parallel.list, &local_list);
79 spin_unlock(&pqueue->parallel.lock);
81 while (!list_empty(&local_list)) {
82 struct padata_priv *padata;
84 padata = list_entry(local_list.next,
85 struct padata_priv, list);
87 list_del_init(&padata->list);
89 padata->parallel(padata);
92 local_bh_enable();
95 /**
96 * padata_do_parallel - padata parallelization function
98 * @pinst: padata instance
99 * @padata: object to be parallelized
100 * @cb_cpu: cpu the serialization callback function will run on,
101 * must be in the serial cpumask of padata(i.e. cpumask.cbcpu).
103 * The parallelization callback function will run with BHs off.
104 * Note: Every object which is parallelized by padata_do_parallel
105 * must be seen by padata_do_serial.
107 int padata_do_parallel(struct padata_instance *pinst,
108 struct padata_priv *padata, int cb_cpu)
110 int target_cpu, err;
111 struct padata_parallel_queue *queue;
112 struct parallel_data *pd;
114 rcu_read_lock_bh();
116 pd = rcu_dereference_bh(pinst->pd);
118 err = -EINVAL;
119 if (!(pinst->flags & PADATA_INIT) || pinst->flags & PADATA_INVALID)
120 goto out;
122 if (!cpumask_test_cpu(cb_cpu, pd->cpumask.cbcpu))
123 goto out;
125 err = -EBUSY;
126 if ((pinst->flags & PADATA_RESET))
127 goto out;
129 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
130 goto out;
132 err = 0;
133 atomic_inc(&pd->refcnt);
134 padata->pd = pd;
135 padata->cb_cpu = cb_cpu;
137 target_cpu = padata_cpu_hash(pd);
138 queue = per_cpu_ptr(pd->pqueue, target_cpu);
140 spin_lock(&queue->parallel.lock);
141 list_add_tail(&padata->list, &queue->parallel.list);
142 spin_unlock(&queue->parallel.lock);
144 queue_work_on(target_cpu, pinst->wq, &queue->work);
146 out:
147 rcu_read_unlock_bh();
149 return err;
151 EXPORT_SYMBOL(padata_do_parallel);
154 * padata_get_next - Get the next object that needs serialization.
156 * Return values are:
158 * A pointer to the control struct of the next object that needs
159 * serialization, if present in one of the percpu reorder queues.
161 * NULL, if all percpu reorder queues are empty.
163 * -EINPROGRESS, if the next object that needs serialization will
164 * be parallel processed by another cpu and is not yet present in
165 * the cpu's reorder queue.
167 * -ENODATA, if this cpu has to do the parallel processing for
168 * the next object.
170 static struct padata_priv *padata_get_next(struct parallel_data *pd)
172 int cpu, num_cpus;
173 unsigned int next_nr, next_index;
174 struct padata_parallel_queue *next_queue;
175 struct padata_priv *padata;
176 struct padata_list *reorder;
178 num_cpus = cpumask_weight(pd->cpumask.pcpu);
181 * Calculate the percpu reorder queue and the sequence
182 * number of the next object.
184 next_nr = pd->processed;
185 next_index = next_nr % num_cpus;
186 cpu = padata_index_to_cpu(pd, next_index);
187 next_queue = per_cpu_ptr(pd->pqueue, cpu);
189 padata = NULL;
191 reorder = &next_queue->reorder;
193 if (!list_empty(&reorder->list)) {
194 padata = list_entry(reorder->list.next,
195 struct padata_priv, list);
197 spin_lock(&reorder->lock);
198 list_del_init(&padata->list);
199 atomic_dec(&pd->reorder_objects);
200 spin_unlock(&reorder->lock);
202 pd->processed++;
204 goto out;
207 if (__this_cpu_read(pd->pqueue->cpu_index) == next_queue->cpu_index) {
208 padata = ERR_PTR(-ENODATA);
209 goto out;
212 padata = ERR_PTR(-EINPROGRESS);
213 out:
214 return padata;
217 static void padata_reorder(struct parallel_data *pd)
219 int cb_cpu;
220 struct padata_priv *padata;
221 struct padata_serial_queue *squeue;
222 struct padata_instance *pinst = pd->pinst;
225 * We need to ensure that only one cpu can work on dequeueing of
226 * the reorder queue the time. Calculating in which percpu reorder
227 * queue the next object will arrive takes some time. A spinlock
228 * would be highly contended. Also it is not clear in which order
229 * the objects arrive to the reorder queues. So a cpu could wait to
230 * get the lock just to notice that there is nothing to do at the
231 * moment. Therefore we use a trylock and let the holder of the lock
232 * care for all the objects enqueued during the holdtime of the lock.
234 if (!spin_trylock_bh(&pd->lock))
235 return;
237 while (1) {
238 padata = padata_get_next(pd);
241 * All reorder queues are empty, or the next object that needs
242 * serialization is parallel processed by another cpu and is
243 * still on it's way to the cpu's reorder queue, nothing to
244 * do for now.
246 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
247 break;
250 * This cpu has to do the parallel processing of the next
251 * object. It's waiting in the cpu's parallelization queue,
252 * so exit immediately.
254 if (PTR_ERR(padata) == -ENODATA) {
255 del_timer(&pd->timer);
256 spin_unlock_bh(&pd->lock);
257 return;
260 cb_cpu = padata->cb_cpu;
261 squeue = per_cpu_ptr(pd->squeue, cb_cpu);
263 spin_lock(&squeue->serial.lock);
264 list_add_tail(&padata->list, &squeue->serial.list);
265 spin_unlock(&squeue->serial.lock);
267 queue_work_on(cb_cpu, pinst->wq, &squeue->work);
270 spin_unlock_bh(&pd->lock);
273 * The next object that needs serialization might have arrived to
274 * the reorder queues in the meantime, we will be called again
275 * from the timer function if no one else cares for it.
277 if (atomic_read(&pd->reorder_objects)
278 && !(pinst->flags & PADATA_RESET))
279 mod_timer(&pd->timer, jiffies + HZ);
280 else
281 del_timer(&pd->timer);
283 return;
286 static void padata_reorder_timer(unsigned long arg)
288 struct parallel_data *pd = (struct parallel_data *)arg;
290 padata_reorder(pd);
293 static void padata_serial_worker(struct work_struct *serial_work)
295 struct padata_serial_queue *squeue;
296 struct parallel_data *pd;
297 LIST_HEAD(local_list);
299 local_bh_disable();
300 squeue = container_of(serial_work, struct padata_serial_queue, work);
301 pd = squeue->pd;
303 spin_lock(&squeue->serial.lock);
304 list_replace_init(&squeue->serial.list, &local_list);
305 spin_unlock(&squeue->serial.lock);
307 while (!list_empty(&local_list)) {
308 struct padata_priv *padata;
310 padata = list_entry(local_list.next,
311 struct padata_priv, list);
313 list_del_init(&padata->list);
315 padata->serial(padata);
316 atomic_dec(&pd->refcnt);
318 local_bh_enable();
322 * padata_do_serial - padata serialization function
324 * @padata: object to be serialized.
326 * padata_do_serial must be called for every parallelized object.
327 * The serialization callback function will run with BHs off.
329 void padata_do_serial(struct padata_priv *padata)
331 int cpu;
332 struct padata_parallel_queue *pqueue;
333 struct parallel_data *pd;
335 pd = padata->pd;
337 cpu = get_cpu();
338 pqueue = per_cpu_ptr(pd->pqueue, cpu);
340 spin_lock(&pqueue->reorder.lock);
341 atomic_inc(&pd->reorder_objects);
342 list_add_tail(&padata->list, &pqueue->reorder.list);
343 spin_unlock(&pqueue->reorder.lock);
345 put_cpu();
347 padata_reorder(pd);
349 EXPORT_SYMBOL(padata_do_serial);
351 static int padata_setup_cpumasks(struct parallel_data *pd,
352 const struct cpumask *pcpumask,
353 const struct cpumask *cbcpumask)
355 if (!alloc_cpumask_var(&pd->cpumask.pcpu, GFP_KERNEL))
356 return -ENOMEM;
358 cpumask_and(pd->cpumask.pcpu, pcpumask, cpu_online_mask);
359 if (!alloc_cpumask_var(&pd->cpumask.cbcpu, GFP_KERNEL)) {
360 free_cpumask_var(pd->cpumask.cbcpu);
361 return -ENOMEM;
364 cpumask_and(pd->cpumask.cbcpu, cbcpumask, cpu_online_mask);
365 return 0;
368 static void __padata_list_init(struct padata_list *pd_list)
370 INIT_LIST_HEAD(&pd_list->list);
371 spin_lock_init(&pd_list->lock);
374 /* Initialize all percpu queues used by serial workers */
375 static void padata_init_squeues(struct parallel_data *pd)
377 int cpu;
378 struct padata_serial_queue *squeue;
380 for_each_cpu(cpu, pd->cpumask.cbcpu) {
381 squeue = per_cpu_ptr(pd->squeue, cpu);
382 squeue->pd = pd;
383 __padata_list_init(&squeue->serial);
384 INIT_WORK(&squeue->work, padata_serial_worker);
388 /* Initialize all percpu queues used by parallel workers */
389 static void padata_init_pqueues(struct parallel_data *pd)
391 int cpu_index, cpu;
392 struct padata_parallel_queue *pqueue;
394 cpu_index = 0;
395 for_each_cpu(cpu, pd->cpumask.pcpu) {
396 pqueue = per_cpu_ptr(pd->pqueue, cpu);
397 pqueue->pd = pd;
398 pqueue->cpu_index = cpu_index;
399 cpu_index++;
401 __padata_list_init(&pqueue->reorder);
402 __padata_list_init(&pqueue->parallel);
403 INIT_WORK(&pqueue->work, padata_parallel_worker);
404 atomic_set(&pqueue->num_obj, 0);
408 /* Allocate and initialize the internal cpumask dependend resources. */
409 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
410 const struct cpumask *pcpumask,
411 const struct cpumask *cbcpumask)
413 struct parallel_data *pd;
415 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
416 if (!pd)
417 goto err;
419 pd->pqueue = alloc_percpu(struct padata_parallel_queue);
420 if (!pd->pqueue)
421 goto err_free_pd;
423 pd->squeue = alloc_percpu(struct padata_serial_queue);
424 if (!pd->squeue)
425 goto err_free_pqueue;
426 if (padata_setup_cpumasks(pd, pcpumask, cbcpumask) < 0)
427 goto err_free_squeue;
429 padata_init_pqueues(pd);
430 padata_init_squeues(pd);
431 setup_timer(&pd->timer, padata_reorder_timer, (unsigned long)pd);
432 atomic_set(&pd->seq_nr, -1);
433 atomic_set(&pd->reorder_objects, 0);
434 atomic_set(&pd->refcnt, 0);
435 pd->pinst = pinst;
436 spin_lock_init(&pd->lock);
438 return pd;
440 err_free_squeue:
441 free_percpu(pd->squeue);
442 err_free_pqueue:
443 free_percpu(pd->pqueue);
444 err_free_pd:
445 kfree(pd);
446 err:
447 return NULL;
450 static void padata_free_pd(struct parallel_data *pd)
452 free_cpumask_var(pd->cpumask.pcpu);
453 free_cpumask_var(pd->cpumask.cbcpu);
454 free_percpu(pd->pqueue);
455 free_percpu(pd->squeue);
456 kfree(pd);
459 /* Flush all objects out of the padata queues. */
460 static void padata_flush_queues(struct parallel_data *pd)
462 int cpu;
463 struct padata_parallel_queue *pqueue;
464 struct padata_serial_queue *squeue;
466 for_each_cpu(cpu, pd->cpumask.pcpu) {
467 pqueue = per_cpu_ptr(pd->pqueue, cpu);
468 flush_work(&pqueue->work);
471 del_timer_sync(&pd->timer);
473 if (atomic_read(&pd->reorder_objects))
474 padata_reorder(pd);
476 for_each_cpu(cpu, pd->cpumask.cbcpu) {
477 squeue = per_cpu_ptr(pd->squeue, cpu);
478 flush_work(&squeue->work);
481 BUG_ON(atomic_read(&pd->refcnt) != 0);
484 static void __padata_start(struct padata_instance *pinst)
486 pinst->flags |= PADATA_INIT;
489 static void __padata_stop(struct padata_instance *pinst)
491 if (!(pinst->flags & PADATA_INIT))
492 return;
494 pinst->flags &= ~PADATA_INIT;
496 synchronize_rcu();
498 get_online_cpus();
499 padata_flush_queues(pinst->pd);
500 put_online_cpus();
503 /* Replace the internal control structure with a new one. */
504 static void padata_replace(struct padata_instance *pinst,
505 struct parallel_data *pd_new)
507 struct parallel_data *pd_old = pinst->pd;
508 int notification_mask = 0;
510 pinst->flags |= PADATA_RESET;
512 rcu_assign_pointer(pinst->pd, pd_new);
514 synchronize_rcu();
516 if (!cpumask_equal(pd_old->cpumask.pcpu, pd_new->cpumask.pcpu))
517 notification_mask |= PADATA_CPU_PARALLEL;
518 if (!cpumask_equal(pd_old->cpumask.cbcpu, pd_new->cpumask.cbcpu))
519 notification_mask |= PADATA_CPU_SERIAL;
521 padata_flush_queues(pd_old);
522 padata_free_pd(pd_old);
524 if (notification_mask)
525 blocking_notifier_call_chain(&pinst->cpumask_change_notifier,
526 notification_mask,
527 &pd_new->cpumask);
529 pinst->flags &= ~PADATA_RESET;
533 * padata_register_cpumask_notifier - Registers a notifier that will be called
534 * if either pcpu or cbcpu or both cpumasks change.
536 * @pinst: A poineter to padata instance
537 * @nblock: A pointer to notifier block.
539 int padata_register_cpumask_notifier(struct padata_instance *pinst,
540 struct notifier_block *nblock)
542 return blocking_notifier_chain_register(&pinst->cpumask_change_notifier,
543 nblock);
545 EXPORT_SYMBOL(padata_register_cpumask_notifier);
548 * padata_unregister_cpumask_notifier - Unregisters cpumask notifier
549 * registered earlier using padata_register_cpumask_notifier
551 * @pinst: A pointer to data instance.
552 * @nlock: A pointer to notifier block.
554 int padata_unregister_cpumask_notifier(struct padata_instance *pinst,
555 struct notifier_block *nblock)
557 return blocking_notifier_chain_unregister(
558 &pinst->cpumask_change_notifier,
559 nblock);
561 EXPORT_SYMBOL(padata_unregister_cpumask_notifier);
564 /* If cpumask contains no active cpu, we mark the instance as invalid. */
565 static bool padata_validate_cpumask(struct padata_instance *pinst,
566 const struct cpumask *cpumask)
568 if (!cpumask_intersects(cpumask, cpu_online_mask)) {
569 pinst->flags |= PADATA_INVALID;
570 return false;
573 pinst->flags &= ~PADATA_INVALID;
574 return true;
577 static int __padata_set_cpumasks(struct padata_instance *pinst,
578 cpumask_var_t pcpumask,
579 cpumask_var_t cbcpumask)
581 int valid;
582 struct parallel_data *pd;
584 valid = padata_validate_cpumask(pinst, pcpumask);
585 if (!valid) {
586 __padata_stop(pinst);
587 goto out_replace;
590 valid = padata_validate_cpumask(pinst, cbcpumask);
591 if (!valid)
592 __padata_stop(pinst);
594 out_replace:
595 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
596 if (!pd)
597 return -ENOMEM;
599 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
600 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
602 padata_replace(pinst, pd);
604 if (valid)
605 __padata_start(pinst);
607 return 0;
611 * padata_set_cpumask: Sets specified by @cpumask_type cpumask to the value
612 * equivalent to @cpumask.
614 * @pinst: padata instance
615 * @cpumask_type: PADATA_CPU_SERIAL or PADATA_CPU_PARALLEL corresponding
616 * to parallel and serial cpumasks respectively.
617 * @cpumask: the cpumask to use
619 int padata_set_cpumask(struct padata_instance *pinst, int cpumask_type,
620 cpumask_var_t cpumask)
622 struct cpumask *serial_mask, *parallel_mask;
623 int err = -EINVAL;
625 mutex_lock(&pinst->lock);
626 get_online_cpus();
628 switch (cpumask_type) {
629 case PADATA_CPU_PARALLEL:
630 serial_mask = pinst->cpumask.cbcpu;
631 parallel_mask = cpumask;
632 break;
633 case PADATA_CPU_SERIAL:
634 parallel_mask = pinst->cpumask.pcpu;
635 serial_mask = cpumask;
636 break;
637 default:
638 goto out;
641 err = __padata_set_cpumasks(pinst, parallel_mask, serial_mask);
643 out:
644 put_online_cpus();
645 mutex_unlock(&pinst->lock);
647 return err;
649 EXPORT_SYMBOL(padata_set_cpumask);
652 * padata_start - start the parallel processing
654 * @pinst: padata instance to start
656 int padata_start(struct padata_instance *pinst)
658 int err = 0;
660 mutex_lock(&pinst->lock);
662 if (pinst->flags & PADATA_INVALID)
663 err = -EINVAL;
665 __padata_start(pinst);
667 mutex_unlock(&pinst->lock);
669 return err;
671 EXPORT_SYMBOL(padata_start);
674 * padata_stop - stop the parallel processing
676 * @pinst: padata instance to stop
678 void padata_stop(struct padata_instance *pinst)
680 mutex_lock(&pinst->lock);
681 __padata_stop(pinst);
682 mutex_unlock(&pinst->lock);
684 EXPORT_SYMBOL(padata_stop);
686 #ifdef CONFIG_HOTPLUG_CPU
688 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
690 struct parallel_data *pd;
692 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
693 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
694 pinst->cpumask.cbcpu);
695 if (!pd)
696 return -ENOMEM;
698 padata_replace(pinst, pd);
700 if (padata_validate_cpumask(pinst, pinst->cpumask.pcpu) &&
701 padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
702 __padata_start(pinst);
705 return 0;
708 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
710 struct parallel_data *pd = NULL;
712 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
714 if (!padata_validate_cpumask(pinst, pinst->cpumask.pcpu) ||
715 !padata_validate_cpumask(pinst, pinst->cpumask.cbcpu))
716 __padata_stop(pinst);
718 pd = padata_alloc_pd(pinst, pinst->cpumask.pcpu,
719 pinst->cpumask.cbcpu);
720 if (!pd)
721 return -ENOMEM;
723 padata_replace(pinst, pd);
725 cpumask_clear_cpu(cpu, pd->cpumask.cbcpu);
726 cpumask_clear_cpu(cpu, pd->cpumask.pcpu);
729 return 0;
733 * padata_remove_cpu - remove a cpu from the one or both(serial and parallel)
734 * padata cpumasks.
736 * @pinst: padata instance
737 * @cpu: cpu to remove
738 * @mask: bitmask specifying from which cpumask @cpu should be removed
739 * The @mask may be any combination of the following flags:
740 * PADATA_CPU_SERIAL - serial cpumask
741 * PADATA_CPU_PARALLEL - parallel cpumask
743 int padata_remove_cpu(struct padata_instance *pinst, int cpu, int mask)
745 int err;
747 if (!(mask & (PADATA_CPU_SERIAL | PADATA_CPU_PARALLEL)))
748 return -EINVAL;
750 mutex_lock(&pinst->lock);
752 get_online_cpus();
753 if (mask & PADATA_CPU_SERIAL)
754 cpumask_clear_cpu(cpu, pinst->cpumask.cbcpu);
755 if (mask & PADATA_CPU_PARALLEL)
756 cpumask_clear_cpu(cpu, pinst->cpumask.pcpu);
758 err = __padata_remove_cpu(pinst, cpu);
759 put_online_cpus();
761 mutex_unlock(&pinst->lock);
763 return err;
765 EXPORT_SYMBOL(padata_remove_cpu);
767 static inline int pinst_has_cpu(struct padata_instance *pinst, int cpu)
769 return cpumask_test_cpu(cpu, pinst->cpumask.pcpu) ||
770 cpumask_test_cpu(cpu, pinst->cpumask.cbcpu);
773 static int padata_cpu_online(unsigned int cpu, struct hlist_node *node)
775 struct padata_instance *pinst;
776 int ret;
778 pinst = hlist_entry_safe(node, struct padata_instance, node);
779 if (!pinst_has_cpu(pinst, cpu))
780 return 0;
782 mutex_lock(&pinst->lock);
783 ret = __padata_add_cpu(pinst, cpu);
784 mutex_unlock(&pinst->lock);
785 return ret;
788 static int padata_cpu_prep_down(unsigned int cpu, struct hlist_node *node)
790 struct padata_instance *pinst;
791 int ret;
793 pinst = hlist_entry_safe(node, struct padata_instance, node);
794 if (!pinst_has_cpu(pinst, cpu))
795 return 0;
797 mutex_lock(&pinst->lock);
798 ret = __padata_remove_cpu(pinst, cpu);
799 mutex_unlock(&pinst->lock);
800 return ret;
803 static enum cpuhp_state hp_online;
804 #endif
806 static void __padata_free(struct padata_instance *pinst)
808 #ifdef CONFIG_HOTPLUG_CPU
809 cpuhp_state_remove_instance_nocalls(hp_online, &pinst->node);
810 #endif
812 padata_stop(pinst);
813 padata_free_pd(pinst->pd);
814 free_cpumask_var(pinst->cpumask.pcpu);
815 free_cpumask_var(pinst->cpumask.cbcpu);
816 kfree(pinst);
819 #define kobj2pinst(_kobj) \
820 container_of(_kobj, struct padata_instance, kobj)
821 #define attr2pentry(_attr) \
822 container_of(_attr, struct padata_sysfs_entry, attr)
824 static void padata_sysfs_release(struct kobject *kobj)
826 struct padata_instance *pinst = kobj2pinst(kobj);
827 __padata_free(pinst);
830 struct padata_sysfs_entry {
831 struct attribute attr;
832 ssize_t (*show)(struct padata_instance *, struct attribute *, char *);
833 ssize_t (*store)(struct padata_instance *, struct attribute *,
834 const char *, size_t);
837 static ssize_t show_cpumask(struct padata_instance *pinst,
838 struct attribute *attr, char *buf)
840 struct cpumask *cpumask;
841 ssize_t len;
843 mutex_lock(&pinst->lock);
844 if (!strcmp(attr->name, "serial_cpumask"))
845 cpumask = pinst->cpumask.cbcpu;
846 else
847 cpumask = pinst->cpumask.pcpu;
849 len = snprintf(buf, PAGE_SIZE, "%*pb\n",
850 nr_cpu_ids, cpumask_bits(cpumask));
851 mutex_unlock(&pinst->lock);
852 return len < PAGE_SIZE ? len : -EINVAL;
855 static ssize_t store_cpumask(struct padata_instance *pinst,
856 struct attribute *attr,
857 const char *buf, size_t count)
859 cpumask_var_t new_cpumask;
860 ssize_t ret;
861 int mask_type;
863 if (!alloc_cpumask_var(&new_cpumask, GFP_KERNEL))
864 return -ENOMEM;
866 ret = bitmap_parse(buf, count, cpumask_bits(new_cpumask),
867 nr_cpumask_bits);
868 if (ret < 0)
869 goto out;
871 mask_type = !strcmp(attr->name, "serial_cpumask") ?
872 PADATA_CPU_SERIAL : PADATA_CPU_PARALLEL;
873 ret = padata_set_cpumask(pinst, mask_type, new_cpumask);
874 if (!ret)
875 ret = count;
877 out:
878 free_cpumask_var(new_cpumask);
879 return ret;
882 #define PADATA_ATTR_RW(_name, _show_name, _store_name) \
883 static struct padata_sysfs_entry _name##_attr = \
884 __ATTR(_name, 0644, _show_name, _store_name)
885 #define PADATA_ATTR_RO(_name, _show_name) \
886 static struct padata_sysfs_entry _name##_attr = \
887 __ATTR(_name, 0400, _show_name, NULL)
889 PADATA_ATTR_RW(serial_cpumask, show_cpumask, store_cpumask);
890 PADATA_ATTR_RW(parallel_cpumask, show_cpumask, store_cpumask);
893 * Padata sysfs provides the following objects:
894 * serial_cpumask [RW] - cpumask for serial workers
895 * parallel_cpumask [RW] - cpumask for parallel workers
897 static struct attribute *padata_default_attrs[] = {
898 &serial_cpumask_attr.attr,
899 &parallel_cpumask_attr.attr,
900 NULL,
903 static ssize_t padata_sysfs_show(struct kobject *kobj,
904 struct attribute *attr, char *buf)
906 struct padata_instance *pinst;
907 struct padata_sysfs_entry *pentry;
908 ssize_t ret = -EIO;
910 pinst = kobj2pinst(kobj);
911 pentry = attr2pentry(attr);
912 if (pentry->show)
913 ret = pentry->show(pinst, attr, buf);
915 return ret;
918 static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
919 const char *buf, size_t count)
921 struct padata_instance *pinst;
922 struct padata_sysfs_entry *pentry;
923 ssize_t ret = -EIO;
925 pinst = kobj2pinst(kobj);
926 pentry = attr2pentry(attr);
927 if (pentry->show)
928 ret = pentry->store(pinst, attr, buf, count);
930 return ret;
933 static const struct sysfs_ops padata_sysfs_ops = {
934 .show = padata_sysfs_show,
935 .store = padata_sysfs_store,
938 static struct kobj_type padata_attr_type = {
939 .sysfs_ops = &padata_sysfs_ops,
940 .default_attrs = padata_default_attrs,
941 .release = padata_sysfs_release,
945 * padata_alloc_possible - Allocate and initialize padata instance.
946 * Use the cpu_possible_mask for serial and
947 * parallel workers.
949 * @wq: workqueue to use for the allocated padata instance
951 struct padata_instance *padata_alloc_possible(struct workqueue_struct *wq)
953 return padata_alloc(wq, cpu_possible_mask, cpu_possible_mask);
955 EXPORT_SYMBOL(padata_alloc_possible);
958 * padata_alloc - allocate and initialize a padata instance and specify
959 * cpumasks for serial and parallel workers.
961 * @wq: workqueue to use for the allocated padata instance
962 * @pcpumask: cpumask that will be used for padata parallelization
963 * @cbcpumask: cpumask that will be used for padata serialization
965 struct padata_instance *padata_alloc(struct workqueue_struct *wq,
966 const struct cpumask *pcpumask,
967 const struct cpumask *cbcpumask)
969 struct padata_instance *pinst;
970 struct parallel_data *pd = NULL;
972 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
973 if (!pinst)
974 goto err;
976 get_online_cpus();
977 if (!alloc_cpumask_var(&pinst->cpumask.pcpu, GFP_KERNEL))
978 goto err_free_inst;
979 if (!alloc_cpumask_var(&pinst->cpumask.cbcpu, GFP_KERNEL)) {
980 free_cpumask_var(pinst->cpumask.pcpu);
981 goto err_free_inst;
983 if (!padata_validate_cpumask(pinst, pcpumask) ||
984 !padata_validate_cpumask(pinst, cbcpumask))
985 goto err_free_masks;
987 pd = padata_alloc_pd(pinst, pcpumask, cbcpumask);
988 if (!pd)
989 goto err_free_masks;
991 rcu_assign_pointer(pinst->pd, pd);
993 pinst->wq = wq;
995 cpumask_copy(pinst->cpumask.pcpu, pcpumask);
996 cpumask_copy(pinst->cpumask.cbcpu, cbcpumask);
998 pinst->flags = 0;
1000 put_online_cpus();
1002 BLOCKING_INIT_NOTIFIER_HEAD(&pinst->cpumask_change_notifier);
1003 kobject_init(&pinst->kobj, &padata_attr_type);
1004 mutex_init(&pinst->lock);
1006 #ifdef CONFIG_HOTPLUG_CPU
1007 cpuhp_state_add_instance_nocalls(hp_online, &pinst->node);
1008 #endif
1009 return pinst;
1011 err_free_masks:
1012 free_cpumask_var(pinst->cpumask.pcpu);
1013 free_cpumask_var(pinst->cpumask.cbcpu);
1014 err_free_inst:
1015 kfree(pinst);
1016 put_online_cpus();
1017 err:
1018 return NULL;
1022 * padata_free - free a padata instance
1024 * @padata_inst: padata instance to free
1026 void padata_free(struct padata_instance *pinst)
1028 kobject_put(&pinst->kobj);
1030 EXPORT_SYMBOL(padata_free);
1032 #ifdef CONFIG_HOTPLUG_CPU
1034 static __init int padata_driver_init(void)
1036 int ret;
1038 ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, "padata:online",
1039 padata_cpu_online,
1040 padata_cpu_prep_down);
1041 if (ret < 0)
1042 return ret;
1043 hp_online = ret;
1044 return 0;
1046 module_init(padata_driver_init);
1048 static __exit void padata_driver_exit(void)
1050 cpuhp_remove_multi_state(hp_online);
1052 module_exit(padata_driver_exit);
1053 #endif