Automatic merge of rsync://rsync.kernel.org/pub/scm/linux/kernel/git/gregkh/driver...
[linux-2.6/verdex.git] / drivers / md / kcopyd.c
blobeb7036485975b42216af39d731a2e2eefbe8217b
1 /*
2 * Copyright (C) 2002 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
6 * Kcopyd provides a simple interface for copying an area of one
7 * block-device to one or more other block-devices, with an asynchronous
8 * completion notification.
9 */
11 #include <asm/atomic.h>
13 #include <linux/blkdev.h>
14 #include <linux/config.h>
15 #include <linux/fs.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
25 #include "kcopyd.h"
27 static struct workqueue_struct *_kcopyd_wq;
28 static struct work_struct _kcopyd_work;
30 static inline void wake(void)
32 queue_work(_kcopyd_wq, &_kcopyd_work);
35 /*-----------------------------------------------------------------
36 * Each kcopyd client has its own little pool of preallocated
37 * pages for kcopyd io.
38 *---------------------------------------------------------------*/
39 struct kcopyd_client {
40 struct list_head list;
42 spinlock_t lock;
43 struct page_list *pages;
44 unsigned int nr_pages;
45 unsigned int nr_free_pages;
48 static struct page_list *alloc_pl(void)
50 struct page_list *pl;
52 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
53 if (!pl)
54 return NULL;
56 pl->page = alloc_page(GFP_KERNEL);
57 if (!pl->page) {
58 kfree(pl);
59 return NULL;
62 return pl;
65 static void free_pl(struct page_list *pl)
67 __free_page(pl->page);
68 kfree(pl);
71 static int kcopyd_get_pages(struct kcopyd_client *kc,
72 unsigned int nr, struct page_list **pages)
74 struct page_list *pl;
76 spin_lock(&kc->lock);
77 if (kc->nr_free_pages < nr) {
78 spin_unlock(&kc->lock);
79 return -ENOMEM;
82 kc->nr_free_pages -= nr;
83 for (*pages = pl = kc->pages; --nr; pl = pl->next)
86 kc->pages = pl->next;
87 pl->next = NULL;
89 spin_unlock(&kc->lock);
91 return 0;
94 static void kcopyd_put_pages(struct kcopyd_client *kc, struct page_list *pl)
96 struct page_list *cursor;
98 spin_lock(&kc->lock);
99 for (cursor = pl; cursor->next; cursor = cursor->next)
100 kc->nr_free_pages++;
102 kc->nr_free_pages++;
103 cursor->next = kc->pages;
104 kc->pages = pl;
105 spin_unlock(&kc->lock);
109 * These three functions resize the page pool.
111 static void drop_pages(struct page_list *pl)
113 struct page_list *next;
115 while (pl) {
116 next = pl->next;
117 free_pl(pl);
118 pl = next;
122 static int client_alloc_pages(struct kcopyd_client *kc, unsigned int nr)
124 unsigned int i;
125 struct page_list *pl = NULL, *next;
127 for (i = 0; i < nr; i++) {
128 next = alloc_pl();
129 if (!next) {
130 if (pl)
131 drop_pages(pl);
132 return -ENOMEM;
134 next->next = pl;
135 pl = next;
138 kcopyd_put_pages(kc, pl);
139 kc->nr_pages += nr;
140 return 0;
143 static void client_free_pages(struct kcopyd_client *kc)
145 BUG_ON(kc->nr_free_pages != kc->nr_pages);
146 drop_pages(kc->pages);
147 kc->pages = NULL;
148 kc->nr_free_pages = kc->nr_pages = 0;
151 /*-----------------------------------------------------------------
152 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
153 * for this reason we use a mempool to prevent the client from
154 * ever having to do io (which could cause a deadlock).
155 *---------------------------------------------------------------*/
156 struct kcopyd_job {
157 struct kcopyd_client *kc;
158 struct list_head list;
159 unsigned long flags;
162 * Error state of the job.
164 int read_err;
165 unsigned int write_err;
168 * Either READ or WRITE
170 int rw;
171 struct io_region source;
174 * The destinations for the transfer.
176 unsigned int num_dests;
177 struct io_region dests[KCOPYD_MAX_REGIONS];
179 sector_t offset;
180 unsigned int nr_pages;
181 struct page_list *pages;
184 * Set this to ensure you are notified when the job has
185 * completed. 'context' is for callback to use.
187 kcopyd_notify_fn fn;
188 void *context;
191 * These fields are only used if the job has been split
192 * into more manageable parts.
194 struct semaphore lock;
195 atomic_t sub_jobs;
196 sector_t progress;
199 /* FIXME: this should scale with the number of pages */
200 #define MIN_JOBS 512
202 static kmem_cache_t *_job_cache;
203 static mempool_t *_job_pool;
206 * We maintain three lists of jobs:
208 * i) jobs waiting for pages
209 * ii) jobs that have pages, and are waiting for the io to be issued.
210 * iii) jobs that have completed.
212 * All three of these are protected by job_lock.
214 static DEFINE_SPINLOCK(_job_lock);
216 static LIST_HEAD(_complete_jobs);
217 static LIST_HEAD(_io_jobs);
218 static LIST_HEAD(_pages_jobs);
220 static int jobs_init(void)
222 _job_cache = kmem_cache_create("kcopyd-jobs",
223 sizeof(struct kcopyd_job),
224 __alignof__(struct kcopyd_job),
225 0, NULL, NULL);
226 if (!_job_cache)
227 return -ENOMEM;
229 _job_pool = mempool_create(MIN_JOBS, mempool_alloc_slab,
230 mempool_free_slab, _job_cache);
231 if (!_job_pool) {
232 kmem_cache_destroy(_job_cache);
233 return -ENOMEM;
236 return 0;
239 static void jobs_exit(void)
241 BUG_ON(!list_empty(&_complete_jobs));
242 BUG_ON(!list_empty(&_io_jobs));
243 BUG_ON(!list_empty(&_pages_jobs));
245 mempool_destroy(_job_pool);
246 kmem_cache_destroy(_job_cache);
247 _job_pool = NULL;
248 _job_cache = NULL;
252 * Functions to push and pop a job onto the head of a given job
253 * list.
255 static inline struct kcopyd_job *pop(struct list_head *jobs)
257 struct kcopyd_job *job = NULL;
258 unsigned long flags;
260 spin_lock_irqsave(&_job_lock, flags);
262 if (!list_empty(jobs)) {
263 job = list_entry(jobs->next, struct kcopyd_job, list);
264 list_del(&job->list);
266 spin_unlock_irqrestore(&_job_lock, flags);
268 return job;
271 static inline void push(struct list_head *jobs, struct kcopyd_job *job)
273 unsigned long flags;
275 spin_lock_irqsave(&_job_lock, flags);
276 list_add_tail(&job->list, jobs);
277 spin_unlock_irqrestore(&_job_lock, flags);
281 * These three functions process 1 item from the corresponding
282 * job list.
284 * They return:
285 * < 0: error
286 * 0: success
287 * > 0: can't process yet.
289 static int run_complete_job(struct kcopyd_job *job)
291 void *context = job->context;
292 int read_err = job->read_err;
293 unsigned int write_err = job->write_err;
294 kcopyd_notify_fn fn = job->fn;
296 kcopyd_put_pages(job->kc, job->pages);
297 mempool_free(job, _job_pool);
298 fn(read_err, write_err, context);
299 return 0;
302 static void complete_io(unsigned long error, void *context)
304 struct kcopyd_job *job = (struct kcopyd_job *) context;
306 if (error) {
307 if (job->rw == WRITE)
308 job->write_err &= error;
309 else
310 job->read_err = 1;
312 if (!test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
313 push(&_complete_jobs, job);
314 wake();
315 return;
319 if (job->rw == WRITE)
320 push(&_complete_jobs, job);
322 else {
323 job->rw = WRITE;
324 push(&_io_jobs, job);
327 wake();
331 * Request io on as many buffer heads as we can currently get for
332 * a particular job.
334 static int run_io_job(struct kcopyd_job *job)
336 int r;
338 if (job->rw == READ)
339 r = dm_io_async(1, &job->source, job->rw,
340 job->pages,
341 job->offset, complete_io, job);
343 else
344 r = dm_io_async(job->num_dests, job->dests, job->rw,
345 job->pages,
346 job->offset, complete_io, job);
348 return r;
351 static int run_pages_job(struct kcopyd_job *job)
353 int r;
355 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
356 PAGE_SIZE >> 9);
357 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
358 if (!r) {
359 /* this job is ready for io */
360 push(&_io_jobs, job);
361 return 0;
364 if (r == -ENOMEM)
365 /* can't complete now */
366 return 1;
368 return r;
372 * Run through a list for as long as possible. Returns the count
373 * of successful jobs.
375 static int process_jobs(struct list_head *jobs, int (*fn) (struct kcopyd_job *))
377 struct kcopyd_job *job;
378 int r, count = 0;
380 while ((job = pop(jobs))) {
382 r = fn(job);
384 if (r < 0) {
385 /* error this rogue job */
386 if (job->rw == WRITE)
387 job->write_err = (unsigned int) -1;
388 else
389 job->read_err = 1;
390 push(&_complete_jobs, job);
391 break;
394 if (r > 0) {
396 * We couldn't service this job ATM, so
397 * push this job back onto the list.
399 push(jobs, job);
400 break;
403 count++;
406 return count;
410 * kcopyd does this every time it's woken up.
412 static void do_work(void *ignored)
415 * The order that these are called is *very* important.
416 * complete jobs can free some pages for pages jobs.
417 * Pages jobs when successful will jump onto the io jobs
418 * list. io jobs call wake when they complete and it all
419 * starts again.
421 process_jobs(&_complete_jobs, run_complete_job);
422 process_jobs(&_pages_jobs, run_pages_job);
423 process_jobs(&_io_jobs, run_io_job);
427 * If we are copying a small region we just dispatch a single job
428 * to do the copy, otherwise the io has to be split up into many
429 * jobs.
431 static void dispatch_job(struct kcopyd_job *job)
433 push(&_pages_jobs, job);
434 wake();
437 #define SUB_JOB_SIZE 128
438 static void segment_complete(int read_err,
439 unsigned int write_err, void *context)
441 /* FIXME: tidy this function */
442 sector_t progress = 0;
443 sector_t count = 0;
444 struct kcopyd_job *job = (struct kcopyd_job *) context;
446 down(&job->lock);
448 /* update the error */
449 if (read_err)
450 job->read_err = 1;
452 if (write_err)
453 job->write_err &= write_err;
456 * Only dispatch more work if there hasn't been an error.
458 if ((!job->read_err && !job->write_err) ||
459 test_bit(KCOPYD_IGNORE_ERROR, &job->flags)) {
460 /* get the next chunk of work */
461 progress = job->progress;
462 count = job->source.count - progress;
463 if (count) {
464 if (count > SUB_JOB_SIZE)
465 count = SUB_JOB_SIZE;
467 job->progress += count;
470 up(&job->lock);
472 if (count) {
473 int i;
474 struct kcopyd_job *sub_job = mempool_alloc(_job_pool, GFP_NOIO);
476 *sub_job = *job;
477 sub_job->source.sector += progress;
478 sub_job->source.count = count;
480 for (i = 0; i < job->num_dests; i++) {
481 sub_job->dests[i].sector += progress;
482 sub_job->dests[i].count = count;
485 sub_job->fn = segment_complete;
486 sub_job->context = job;
487 dispatch_job(sub_job);
489 } else if (atomic_dec_and_test(&job->sub_jobs)) {
492 * To avoid a race we must keep the job around
493 * until after the notify function has completed.
494 * Otherwise the client may try and stop the job
495 * after we've completed.
497 job->fn(read_err, write_err, job->context);
498 mempool_free(job, _job_pool);
503 * Create some little jobs that will do the move between
504 * them.
506 #define SPLIT_COUNT 8
507 static void split_job(struct kcopyd_job *job)
509 int i;
511 atomic_set(&job->sub_jobs, SPLIT_COUNT);
512 for (i = 0; i < SPLIT_COUNT; i++)
513 segment_complete(0, 0u, job);
516 int kcopyd_copy(struct kcopyd_client *kc, struct io_region *from,
517 unsigned int num_dests, struct io_region *dests,
518 unsigned int flags, kcopyd_notify_fn fn, void *context)
520 struct kcopyd_job *job;
523 * Allocate a new job.
525 job = mempool_alloc(_job_pool, GFP_NOIO);
528 * set up for the read.
530 job->kc = kc;
531 job->flags = flags;
532 job->read_err = 0;
533 job->write_err = 0;
534 job->rw = READ;
536 job->source = *from;
538 job->num_dests = num_dests;
539 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
541 job->offset = 0;
542 job->nr_pages = 0;
543 job->pages = NULL;
545 job->fn = fn;
546 job->context = context;
548 if (job->source.count < SUB_JOB_SIZE)
549 dispatch_job(job);
551 else {
552 init_MUTEX(&job->lock);
553 job->progress = 0;
554 split_job(job);
557 return 0;
561 * Cancels a kcopyd job, eg. someone might be deactivating a
562 * mirror.
564 int kcopyd_cancel(struct kcopyd_job *job, int block)
566 /* FIXME: finish */
567 return -1;
570 /*-----------------------------------------------------------------
571 * Unit setup
572 *---------------------------------------------------------------*/
573 static DECLARE_MUTEX(_client_lock);
574 static LIST_HEAD(_clients);
576 static void client_add(struct kcopyd_client *kc)
578 down(&_client_lock);
579 list_add(&kc->list, &_clients);
580 up(&_client_lock);
583 static void client_del(struct kcopyd_client *kc)
585 down(&_client_lock);
586 list_del(&kc->list);
587 up(&_client_lock);
590 static DECLARE_MUTEX(kcopyd_init_lock);
591 static int kcopyd_clients = 0;
593 static int kcopyd_init(void)
595 int r;
597 down(&kcopyd_init_lock);
599 if (kcopyd_clients) {
600 /* Already initialized. */
601 kcopyd_clients++;
602 up(&kcopyd_init_lock);
603 return 0;
606 r = jobs_init();
607 if (r) {
608 up(&kcopyd_init_lock);
609 return r;
612 _kcopyd_wq = create_singlethread_workqueue("kcopyd");
613 if (!_kcopyd_wq) {
614 jobs_exit();
615 up(&kcopyd_init_lock);
616 return -ENOMEM;
619 kcopyd_clients++;
620 INIT_WORK(&_kcopyd_work, do_work, NULL);
621 up(&kcopyd_init_lock);
622 return 0;
625 static void kcopyd_exit(void)
627 down(&kcopyd_init_lock);
628 kcopyd_clients--;
629 if (!kcopyd_clients) {
630 jobs_exit();
631 destroy_workqueue(_kcopyd_wq);
632 _kcopyd_wq = NULL;
634 up(&kcopyd_init_lock);
637 int kcopyd_client_create(unsigned int nr_pages, struct kcopyd_client **result)
639 int r = 0;
640 struct kcopyd_client *kc;
642 r = kcopyd_init();
643 if (r)
644 return r;
646 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
647 if (!kc) {
648 kcopyd_exit();
649 return -ENOMEM;
652 spin_lock_init(&kc->lock);
653 kc->pages = NULL;
654 kc->nr_pages = kc->nr_free_pages = 0;
655 r = client_alloc_pages(kc, nr_pages);
656 if (r) {
657 kfree(kc);
658 kcopyd_exit();
659 return r;
662 r = dm_io_get(nr_pages);
663 if (r) {
664 client_free_pages(kc);
665 kfree(kc);
666 kcopyd_exit();
667 return r;
670 client_add(kc);
671 *result = kc;
672 return 0;
675 void kcopyd_client_destroy(struct kcopyd_client *kc)
677 dm_io_put(kc->nr_pages);
678 client_free_pages(kc);
679 client_del(kc);
680 kfree(kc);
681 kcopyd_exit();
684 EXPORT_SYMBOL(kcopyd_client_create);
685 EXPORT_SYMBOL(kcopyd_client_destroy);
686 EXPORT_SYMBOL(kcopyd_copy);
687 EXPORT_SYMBOL(kcopyd_cancel);