powerpc/pmac: Add missing unlocks in error path
[linux-2.6/next.git] / kernel / padata.c
blobfd03513c7327f548c9eb990bfa94e78196fbefe8
1 /*
2 * padata.c - generic interface to process data streams in parallel
4 * Copyright (C) 2008, 2009 secunet Security Networks AG
5 * Copyright (C) 2008, 2009 Steffen Klassert <steffen.klassert@secunet.com>
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms and conditions of the GNU General Public License,
9 * version 2, as published by the Free Software Foundation.
11 * This program is distributed in the hope it will be useful, but WITHOUT
12 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
14 * more details.
16 * You should have received a copy of the GNU General Public License along with
17 * this program; if not, write to the Free Software Foundation, Inc.,
18 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 #include <linux/module.h>
22 #include <linux/cpumask.h>
23 #include <linux/err.h>
24 #include <linux/cpu.h>
25 #include <linux/padata.h>
26 #include <linux/mutex.h>
27 #include <linux/sched.h>
28 #include <linux/slab.h>
29 #include <linux/rcupdate.h>
31 #define MAX_SEQ_NR INT_MAX - NR_CPUS
32 #define MAX_OBJ_NUM 10000 * NR_CPUS
34 static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
36 int cpu, target_cpu;
38 target_cpu = cpumask_first(pd->cpumask);
39 for (cpu = 0; cpu < cpu_index; cpu++)
40 target_cpu = cpumask_next(target_cpu, pd->cpumask);
42 return target_cpu;
45 static int padata_cpu_hash(struct padata_priv *padata)
47 int cpu_index;
48 struct parallel_data *pd;
50 pd = padata->pd;
53 * Hash the sequence numbers to the cpus by taking
54 * seq_nr mod. number of cpus in use.
56 cpu_index = padata->seq_nr % cpumask_weight(pd->cpumask);
58 return padata_index_to_cpu(pd, cpu_index);
61 static void padata_parallel_worker(struct work_struct *work)
63 struct padata_queue *queue;
64 struct parallel_data *pd;
65 struct padata_instance *pinst;
66 LIST_HEAD(local_list);
68 local_bh_disable();
69 queue = container_of(work, struct padata_queue, pwork);
70 pd = queue->pd;
71 pinst = pd->pinst;
73 spin_lock(&queue->parallel.lock);
74 list_replace_init(&queue->parallel.list, &local_list);
75 spin_unlock(&queue->parallel.lock);
77 while (!list_empty(&local_list)) {
78 struct padata_priv *padata;
80 padata = list_entry(local_list.next,
81 struct padata_priv, list);
83 list_del_init(&padata->list);
85 padata->parallel(padata);
88 local_bh_enable();
92 * padata_do_parallel - padata parallelization function
94 * @pinst: padata instance
95 * @padata: object to be parallelized
96 * @cb_cpu: cpu the serialization callback function will run on,
97 * must be in the cpumask of padata.
99 * The parallelization callback function will run with BHs off.
100 * Note: Every object which is parallelized by padata_do_parallel
101 * must be seen by padata_do_serial.
103 int padata_do_parallel(struct padata_instance *pinst,
104 struct padata_priv *padata, int cb_cpu)
106 int target_cpu, err;
107 struct padata_queue *queue;
108 struct parallel_data *pd;
110 rcu_read_lock_bh();
112 pd = rcu_dereference(pinst->pd);
114 err = 0;
115 if (!(pinst->flags & PADATA_INIT))
116 goto out;
118 err = -EBUSY;
119 if ((pinst->flags & PADATA_RESET))
120 goto out;
122 if (atomic_read(&pd->refcnt) >= MAX_OBJ_NUM)
123 goto out;
125 err = -EINVAL;
126 if (!cpumask_test_cpu(cb_cpu, pd->cpumask))
127 goto out;
129 err = -EINPROGRESS;
130 atomic_inc(&pd->refcnt);
131 padata->pd = pd;
132 padata->cb_cpu = cb_cpu;
134 if (unlikely(atomic_read(&pd->seq_nr) == pd->max_seq_nr))
135 atomic_set(&pd->seq_nr, -1);
137 padata->seq_nr = atomic_inc_return(&pd->seq_nr);
139 target_cpu = padata_cpu_hash(padata);
140 queue = per_cpu_ptr(pd->queue, target_cpu);
142 spin_lock(&queue->parallel.lock);
143 list_add_tail(&padata->list, &queue->parallel.list);
144 spin_unlock(&queue->parallel.lock);
146 queue_work_on(target_cpu, pinst->wq, &queue->pwork);
148 out:
149 rcu_read_unlock_bh();
151 return err;
153 EXPORT_SYMBOL(padata_do_parallel);
155 static struct padata_priv *padata_get_next(struct parallel_data *pd)
157 int cpu, num_cpus, empty, calc_seq_nr;
158 int seq_nr, next_nr, overrun, next_overrun;
159 struct padata_queue *queue, *next_queue;
160 struct padata_priv *padata;
161 struct padata_list *reorder;
163 empty = 0;
164 next_nr = -1;
165 next_overrun = 0;
166 next_queue = NULL;
168 num_cpus = cpumask_weight(pd->cpumask);
170 for_each_cpu(cpu, pd->cpumask) {
171 queue = per_cpu_ptr(pd->queue, cpu);
172 reorder = &queue->reorder;
175 * Calculate the seq_nr of the object that should be
176 * next in this queue.
178 overrun = 0;
179 calc_seq_nr = (atomic_read(&queue->num_obj) * num_cpus)
180 + queue->cpu_index;
182 if (unlikely(calc_seq_nr > pd->max_seq_nr)) {
183 calc_seq_nr = calc_seq_nr - pd->max_seq_nr - 1;
184 overrun = 1;
187 if (!list_empty(&reorder->list)) {
188 padata = list_entry(reorder->list.next,
189 struct padata_priv, list);
191 seq_nr = padata->seq_nr;
192 BUG_ON(calc_seq_nr != seq_nr);
193 } else {
194 seq_nr = calc_seq_nr;
195 empty++;
198 if (next_nr < 0 || seq_nr < next_nr
199 || (next_overrun && !overrun)) {
200 next_nr = seq_nr;
201 next_overrun = overrun;
202 next_queue = queue;
206 padata = NULL;
208 if (empty == num_cpus)
209 goto out;
211 reorder = &next_queue->reorder;
213 if (!list_empty(&reorder->list)) {
214 padata = list_entry(reorder->list.next,
215 struct padata_priv, list);
217 if (unlikely(next_overrun)) {
218 for_each_cpu(cpu, pd->cpumask) {
219 queue = per_cpu_ptr(pd->queue, cpu);
220 atomic_set(&queue->num_obj, 0);
224 spin_lock(&reorder->lock);
225 list_del_init(&padata->list);
226 atomic_dec(&pd->reorder_objects);
227 spin_unlock(&reorder->lock);
229 atomic_inc(&next_queue->num_obj);
231 goto out;
234 if (next_nr % num_cpus == next_queue->cpu_index) {
235 padata = ERR_PTR(-ENODATA);
236 goto out;
239 padata = ERR_PTR(-EINPROGRESS);
240 out:
241 return padata;
244 static void padata_reorder(struct parallel_data *pd)
246 struct padata_priv *padata;
247 struct padata_queue *queue;
248 struct padata_instance *pinst = pd->pinst;
250 try_again:
251 if (!spin_trylock_bh(&pd->lock))
252 goto out;
254 while (1) {
255 padata = padata_get_next(pd);
257 if (!padata || PTR_ERR(padata) == -EINPROGRESS)
258 break;
260 if (PTR_ERR(padata) == -ENODATA) {
261 spin_unlock_bh(&pd->lock);
262 goto out;
265 queue = per_cpu_ptr(pd->queue, padata->cb_cpu);
267 spin_lock(&queue->serial.lock);
268 list_add_tail(&padata->list, &queue->serial.list);
269 spin_unlock(&queue->serial.lock);
271 queue_work_on(padata->cb_cpu, pinst->wq, &queue->swork);
274 spin_unlock_bh(&pd->lock);
276 if (atomic_read(&pd->reorder_objects))
277 goto try_again;
279 out:
280 return;
283 static void padata_serial_worker(struct work_struct *work)
285 struct padata_queue *queue;
286 struct parallel_data *pd;
287 LIST_HEAD(local_list);
289 local_bh_disable();
290 queue = container_of(work, struct padata_queue, swork);
291 pd = queue->pd;
293 spin_lock(&queue->serial.lock);
294 list_replace_init(&queue->serial.list, &local_list);
295 spin_unlock(&queue->serial.lock);
297 while (!list_empty(&local_list)) {
298 struct padata_priv *padata;
300 padata = list_entry(local_list.next,
301 struct padata_priv, list);
303 list_del_init(&padata->list);
305 padata->serial(padata);
306 atomic_dec(&pd->refcnt);
308 local_bh_enable();
312 * padata_do_serial - padata serialization function
314 * @padata: object to be serialized.
316 * padata_do_serial must be called for every parallelized object.
317 * The serialization callback function will run with BHs off.
319 void padata_do_serial(struct padata_priv *padata)
321 int cpu;
322 struct padata_queue *queue;
323 struct parallel_data *pd;
325 pd = padata->pd;
327 cpu = get_cpu();
328 queue = per_cpu_ptr(pd->queue, cpu);
330 spin_lock(&queue->reorder.lock);
331 atomic_inc(&pd->reorder_objects);
332 list_add_tail(&padata->list, &queue->reorder.list);
333 spin_unlock(&queue->reorder.lock);
335 put_cpu();
337 padata_reorder(pd);
339 EXPORT_SYMBOL(padata_do_serial);
341 static struct parallel_data *padata_alloc_pd(struct padata_instance *pinst,
342 const struct cpumask *cpumask)
344 int cpu, cpu_index, num_cpus;
345 struct padata_queue *queue;
346 struct parallel_data *pd;
348 cpu_index = 0;
350 pd = kzalloc(sizeof(struct parallel_data), GFP_KERNEL);
351 if (!pd)
352 goto err;
354 pd->queue = alloc_percpu(struct padata_queue);
355 if (!pd->queue)
356 goto err_free_pd;
358 if (!alloc_cpumask_var(&pd->cpumask, GFP_KERNEL))
359 goto err_free_queue;
361 for_each_possible_cpu(cpu) {
362 queue = per_cpu_ptr(pd->queue, cpu);
364 queue->pd = pd;
366 if (cpumask_test_cpu(cpu, cpumask)
367 && cpumask_test_cpu(cpu, cpu_active_mask)) {
368 queue->cpu_index = cpu_index;
369 cpu_index++;
370 } else
371 queue->cpu_index = -1;
373 INIT_LIST_HEAD(&queue->reorder.list);
374 INIT_LIST_HEAD(&queue->parallel.list);
375 INIT_LIST_HEAD(&queue->serial.list);
376 spin_lock_init(&queue->reorder.lock);
377 spin_lock_init(&queue->parallel.lock);
378 spin_lock_init(&queue->serial.lock);
380 INIT_WORK(&queue->pwork, padata_parallel_worker);
381 INIT_WORK(&queue->swork, padata_serial_worker);
382 atomic_set(&queue->num_obj, 0);
385 cpumask_and(pd->cpumask, cpumask, cpu_active_mask);
387 num_cpus = cpumask_weight(pd->cpumask);
388 pd->max_seq_nr = (MAX_SEQ_NR / num_cpus) * num_cpus - 1;
390 atomic_set(&pd->seq_nr, -1);
391 atomic_set(&pd->reorder_objects, 0);
392 atomic_set(&pd->refcnt, 0);
393 pd->pinst = pinst;
394 spin_lock_init(&pd->lock);
396 return pd;
398 err_free_queue:
399 free_percpu(pd->queue);
400 err_free_pd:
401 kfree(pd);
402 err:
403 return NULL;
406 static void padata_free_pd(struct parallel_data *pd)
408 free_cpumask_var(pd->cpumask);
409 free_percpu(pd->queue);
410 kfree(pd);
413 static void padata_replace(struct padata_instance *pinst,
414 struct parallel_data *pd_new)
416 struct parallel_data *pd_old = pinst->pd;
418 pinst->flags |= PADATA_RESET;
420 rcu_assign_pointer(pinst->pd, pd_new);
422 synchronize_rcu();
424 while (atomic_read(&pd_old->refcnt) != 0)
425 yield();
427 flush_workqueue(pinst->wq);
429 padata_free_pd(pd_old);
431 pinst->flags &= ~PADATA_RESET;
435 * padata_set_cpumask - set the cpumask that padata should use
437 * @pinst: padata instance
438 * @cpumask: the cpumask to use
440 int padata_set_cpumask(struct padata_instance *pinst,
441 cpumask_var_t cpumask)
443 struct parallel_data *pd;
444 int err = 0;
446 might_sleep();
448 mutex_lock(&pinst->lock);
450 pd = padata_alloc_pd(pinst, cpumask);
451 if (!pd) {
452 err = -ENOMEM;
453 goto out;
456 cpumask_copy(pinst->cpumask, cpumask);
458 padata_replace(pinst, pd);
460 out:
461 mutex_unlock(&pinst->lock);
463 return err;
465 EXPORT_SYMBOL(padata_set_cpumask);
467 static int __padata_add_cpu(struct padata_instance *pinst, int cpu)
469 struct parallel_data *pd;
471 if (cpumask_test_cpu(cpu, cpu_active_mask)) {
472 pd = padata_alloc_pd(pinst, pinst->cpumask);
473 if (!pd)
474 return -ENOMEM;
476 padata_replace(pinst, pd);
479 return 0;
483 * padata_add_cpu - add a cpu to the padata cpumask
485 * @pinst: padata instance
486 * @cpu: cpu to add
488 int padata_add_cpu(struct padata_instance *pinst, int cpu)
490 int err;
492 might_sleep();
494 mutex_lock(&pinst->lock);
496 cpumask_set_cpu(cpu, pinst->cpumask);
497 err = __padata_add_cpu(pinst, cpu);
499 mutex_unlock(&pinst->lock);
501 return err;
503 EXPORT_SYMBOL(padata_add_cpu);
505 static int __padata_remove_cpu(struct padata_instance *pinst, int cpu)
507 struct parallel_data *pd;
509 if (cpumask_test_cpu(cpu, cpu_online_mask)) {
510 pd = padata_alloc_pd(pinst, pinst->cpumask);
511 if (!pd)
512 return -ENOMEM;
514 padata_replace(pinst, pd);
517 return 0;
521 * padata_remove_cpu - remove a cpu from the padata cpumask
523 * @pinst: padata instance
524 * @cpu: cpu to remove
526 int padata_remove_cpu(struct padata_instance *pinst, int cpu)
528 int err;
530 might_sleep();
532 mutex_lock(&pinst->lock);
534 cpumask_clear_cpu(cpu, pinst->cpumask);
535 err = __padata_remove_cpu(pinst, cpu);
537 mutex_unlock(&pinst->lock);
539 return err;
541 EXPORT_SYMBOL(padata_remove_cpu);
544 * padata_start - start the parallel processing
546 * @pinst: padata instance to start
548 void padata_start(struct padata_instance *pinst)
550 might_sleep();
552 mutex_lock(&pinst->lock);
553 pinst->flags |= PADATA_INIT;
554 mutex_unlock(&pinst->lock);
556 EXPORT_SYMBOL(padata_start);
559 * padata_stop - stop the parallel processing
561 * @pinst: padata instance to stop
563 void padata_stop(struct padata_instance *pinst)
565 might_sleep();
567 mutex_lock(&pinst->lock);
568 pinst->flags &= ~PADATA_INIT;
569 mutex_unlock(&pinst->lock);
571 EXPORT_SYMBOL(padata_stop);
573 static int __cpuinit padata_cpu_callback(struct notifier_block *nfb,
574 unsigned long action, void *hcpu)
576 int err;
577 struct padata_instance *pinst;
578 int cpu = (unsigned long)hcpu;
580 pinst = container_of(nfb, struct padata_instance, cpu_notifier);
582 switch (action) {
583 case CPU_ONLINE:
584 case CPU_ONLINE_FROZEN:
585 if (!cpumask_test_cpu(cpu, pinst->cpumask))
586 break;
587 mutex_lock(&pinst->lock);
588 err = __padata_add_cpu(pinst, cpu);
589 mutex_unlock(&pinst->lock);
590 if (err)
591 return NOTIFY_BAD;
592 break;
594 case CPU_DOWN_PREPARE:
595 case CPU_DOWN_PREPARE_FROZEN:
596 if (!cpumask_test_cpu(cpu, pinst->cpumask))
597 break;
598 mutex_lock(&pinst->lock);
599 err = __padata_remove_cpu(pinst, cpu);
600 mutex_unlock(&pinst->lock);
601 if (err)
602 return NOTIFY_BAD;
603 break;
605 case CPU_UP_CANCELED:
606 case CPU_UP_CANCELED_FROZEN:
607 if (!cpumask_test_cpu(cpu, pinst->cpumask))
608 break;
609 mutex_lock(&pinst->lock);
610 __padata_remove_cpu(pinst, cpu);
611 mutex_unlock(&pinst->lock);
613 case CPU_DOWN_FAILED:
614 case CPU_DOWN_FAILED_FROZEN:
615 if (!cpumask_test_cpu(cpu, pinst->cpumask))
616 break;
617 mutex_lock(&pinst->lock);
618 __padata_add_cpu(pinst, cpu);
619 mutex_unlock(&pinst->lock);
622 return NOTIFY_OK;
626 * padata_alloc - allocate and initialize a padata instance
628 * @cpumask: cpumask that padata uses for parallelization
629 * @wq: workqueue to use for the allocated padata instance
631 struct padata_instance *padata_alloc(const struct cpumask *cpumask,
632 struct workqueue_struct *wq)
634 int err;
635 struct padata_instance *pinst;
636 struct parallel_data *pd;
638 pinst = kzalloc(sizeof(struct padata_instance), GFP_KERNEL);
639 if (!pinst)
640 goto err;
642 pd = padata_alloc_pd(pinst, cpumask);
643 if (!pd)
644 goto err_free_inst;
646 if (!alloc_cpumask_var(&pinst->cpumask, GFP_KERNEL))
647 goto err_free_pd;
649 rcu_assign_pointer(pinst->pd, pd);
651 pinst->wq = wq;
653 cpumask_copy(pinst->cpumask, cpumask);
655 pinst->flags = 0;
657 pinst->cpu_notifier.notifier_call = padata_cpu_callback;
658 pinst->cpu_notifier.priority = 0;
659 err = register_hotcpu_notifier(&pinst->cpu_notifier);
660 if (err)
661 goto err_free_cpumask;
663 mutex_init(&pinst->lock);
665 return pinst;
667 err_free_cpumask:
668 free_cpumask_var(pinst->cpumask);
669 err_free_pd:
670 padata_free_pd(pd);
671 err_free_inst:
672 kfree(pinst);
673 err:
674 return NULL;
676 EXPORT_SYMBOL(padata_alloc);
679 * padata_free - free a padata instance
681 * @ padata_inst: padata instance to free
683 void padata_free(struct padata_instance *pinst)
685 padata_stop(pinst);
687 synchronize_rcu();
689 while (atomic_read(&pinst->pd->refcnt) != 0)
690 yield();
692 unregister_hotcpu_notifier(&pinst->cpu_notifier);
693 padata_free_pd(pinst->pd);
694 free_cpumask_var(pinst->cpumask);
695 kfree(pinst);
697 EXPORT_SYMBOL(padata_free);