ocfs2: Make the left masklogs compat.
[taoma-kernel.git] / drivers / md / dm-kcopyd.c
blob924f5f0084c27191604907eaa9983a2bf687b6c1
1 /*
2 * Copyright (C) 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2006 Red Hat GmbH
5 * This file is released under the GPL.
7 * Kcopyd provides a simple interface for copying an area of one
8 * block-device to one or more other block-devices, with an asynchronous
9 * completion notification.
12 #include <linux/types.h>
13 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
15 #include <linux/fs.h>
16 #include <linux/init.h>
17 #include <linux/list.h>
18 #include <linux/mempool.h>
19 #include <linux/module.h>
20 #include <linux/pagemap.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/workqueue.h>
24 #include <linux/mutex.h>
25 #include <linux/device-mapper.h>
26 #include <linux/dm-kcopyd.h>
28 #include "dm.h"
30 /*-----------------------------------------------------------------
31 * Each kcopyd client has its own little pool of preallocated
32 * pages for kcopyd io.
33 *---------------------------------------------------------------*/
34 struct dm_kcopyd_client {
35 spinlock_t lock;
36 struct page_list *pages;
37 unsigned int nr_pages;
38 unsigned int nr_free_pages;
41 * Block devices to unplug.
42 * Non-NULL pointer means that a block device has some pending requests
43 * and needs to be unplugged.
45 struct block_device *unplug[2];
47 struct dm_io_client *io_client;
49 wait_queue_head_t destroyq;
50 atomic_t nr_jobs;
52 mempool_t *job_pool;
54 struct workqueue_struct *kcopyd_wq;
55 struct work_struct kcopyd_work;
58 * We maintain three lists of jobs:
60 * i) jobs waiting for pages
61 * ii) jobs that have pages, and are waiting for the io to be issued.
62 * iii) jobs that have completed.
64 * All three of these are protected by job_lock.
66 spinlock_t job_lock;
67 struct list_head complete_jobs;
68 struct list_head io_jobs;
69 struct list_head pages_jobs;
72 static void wake(struct dm_kcopyd_client *kc)
74 queue_work(kc->kcopyd_wq, &kc->kcopyd_work);
77 static struct page_list *alloc_pl(void)
79 struct page_list *pl;
81 pl = kmalloc(sizeof(*pl), GFP_KERNEL);
82 if (!pl)
83 return NULL;
85 pl->page = alloc_page(GFP_KERNEL);
86 if (!pl->page) {
87 kfree(pl);
88 return NULL;
91 return pl;
94 static void free_pl(struct page_list *pl)
96 __free_page(pl->page);
97 kfree(pl);
100 static int kcopyd_get_pages(struct dm_kcopyd_client *kc,
101 unsigned int nr, struct page_list **pages)
103 struct page_list *pl;
105 spin_lock(&kc->lock);
106 if (kc->nr_free_pages < nr) {
107 spin_unlock(&kc->lock);
108 return -ENOMEM;
111 kc->nr_free_pages -= nr;
112 for (*pages = pl = kc->pages; --nr; pl = pl->next)
115 kc->pages = pl->next;
116 pl->next = NULL;
118 spin_unlock(&kc->lock);
120 return 0;
123 static void kcopyd_put_pages(struct dm_kcopyd_client *kc, struct page_list *pl)
125 struct page_list *cursor;
127 spin_lock(&kc->lock);
128 for (cursor = pl; cursor->next; cursor = cursor->next)
129 kc->nr_free_pages++;
131 kc->nr_free_pages++;
132 cursor->next = kc->pages;
133 kc->pages = pl;
134 spin_unlock(&kc->lock);
138 * These three functions resize the page pool.
140 static void drop_pages(struct page_list *pl)
142 struct page_list *next;
144 while (pl) {
145 next = pl->next;
146 free_pl(pl);
147 pl = next;
151 static int client_alloc_pages(struct dm_kcopyd_client *kc, unsigned int nr)
153 unsigned int i;
154 struct page_list *pl = NULL, *next;
156 for (i = 0; i < nr; i++) {
157 next = alloc_pl();
158 if (!next) {
159 if (pl)
160 drop_pages(pl);
161 return -ENOMEM;
163 next->next = pl;
164 pl = next;
167 kcopyd_put_pages(kc, pl);
168 kc->nr_pages += nr;
169 return 0;
172 static void client_free_pages(struct dm_kcopyd_client *kc)
174 BUG_ON(kc->nr_free_pages != kc->nr_pages);
175 drop_pages(kc->pages);
176 kc->pages = NULL;
177 kc->nr_free_pages = kc->nr_pages = 0;
180 /*-----------------------------------------------------------------
181 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
182 * for this reason we use a mempool to prevent the client from
183 * ever having to do io (which could cause a deadlock).
184 *---------------------------------------------------------------*/
185 struct kcopyd_job {
186 struct dm_kcopyd_client *kc;
187 struct list_head list;
188 unsigned long flags;
191 * Error state of the job.
193 int read_err;
194 unsigned long write_err;
197 * Either READ or WRITE
199 int rw;
200 struct dm_io_region source;
203 * The destinations for the transfer.
205 unsigned int num_dests;
206 struct dm_io_region dests[DM_KCOPYD_MAX_REGIONS];
208 sector_t offset;
209 unsigned int nr_pages;
210 struct page_list *pages;
213 * Set this to ensure you are notified when the job has
214 * completed. 'context' is for callback to use.
216 dm_kcopyd_notify_fn fn;
217 void *context;
220 * These fields are only used if the job has been split
221 * into more manageable parts.
223 struct mutex lock;
224 atomic_t sub_jobs;
225 sector_t progress;
228 /* FIXME: this should scale with the number of pages */
229 #define MIN_JOBS 512
231 static struct kmem_cache *_job_cache;
233 int __init dm_kcopyd_init(void)
235 _job_cache = KMEM_CACHE(kcopyd_job, 0);
236 if (!_job_cache)
237 return -ENOMEM;
239 return 0;
242 void dm_kcopyd_exit(void)
244 kmem_cache_destroy(_job_cache);
245 _job_cache = NULL;
249 * Functions to push and pop a job onto the head of a given job
250 * list.
252 static struct kcopyd_job *pop(struct list_head *jobs,
253 struct dm_kcopyd_client *kc)
255 struct kcopyd_job *job = NULL;
256 unsigned long flags;
258 spin_lock_irqsave(&kc->job_lock, flags);
260 if (!list_empty(jobs)) {
261 job = list_entry(jobs->next, struct kcopyd_job, list);
262 list_del(&job->list);
264 spin_unlock_irqrestore(&kc->job_lock, flags);
266 return job;
269 static void push(struct list_head *jobs, struct kcopyd_job *job)
271 unsigned long flags;
272 struct dm_kcopyd_client *kc = job->kc;
274 spin_lock_irqsave(&kc->job_lock, flags);
275 list_add_tail(&job->list, jobs);
276 spin_unlock_irqrestore(&kc->job_lock, flags);
280 static void push_head(struct list_head *jobs, struct kcopyd_job *job)
282 unsigned long flags;
283 struct dm_kcopyd_client *kc = job->kc;
285 spin_lock_irqsave(&kc->job_lock, flags);
286 list_add(&job->list, jobs);
287 spin_unlock_irqrestore(&kc->job_lock, flags);
291 * These three functions process 1 item from the corresponding
292 * job list.
294 * They return:
295 * < 0: error
296 * 0: success
297 * > 0: can't process yet.
299 static int run_complete_job(struct kcopyd_job *job)
301 void *context = job->context;
302 int read_err = job->read_err;
303 unsigned long write_err = job->write_err;
304 dm_kcopyd_notify_fn fn = job->fn;
305 struct dm_kcopyd_client *kc = job->kc;
307 if (job->pages)
308 kcopyd_put_pages(kc, job->pages);
309 mempool_free(job, kc->job_pool);
310 fn(read_err, write_err, context);
312 if (atomic_dec_and_test(&kc->nr_jobs))
313 wake_up(&kc->destroyq);
315 return 0;
319 * Unplug the block device at the specified index.
321 static void unplug(struct dm_kcopyd_client *kc, int rw)
323 if (kc->unplug[rw] != NULL) {
324 blk_unplug(bdev_get_queue(kc->unplug[rw]));
325 kc->unplug[rw] = NULL;
330 * Prepare block device unplug. If there's another device
331 * to be unplugged at the same array index, we unplug that
332 * device first.
334 static void prepare_unplug(struct dm_kcopyd_client *kc, int rw,
335 struct block_device *bdev)
337 if (likely(kc->unplug[rw] == bdev))
338 return;
339 unplug(kc, rw);
340 kc->unplug[rw] = bdev;
343 static void complete_io(unsigned long error, void *context)
345 struct kcopyd_job *job = (struct kcopyd_job *) context;
346 struct dm_kcopyd_client *kc = job->kc;
348 if (error) {
349 if (job->rw == WRITE)
350 job->write_err |= error;
351 else
352 job->read_err = 1;
354 if (!test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
355 push(&kc->complete_jobs, job);
356 wake(kc);
357 return;
361 if (job->rw == WRITE)
362 push(&kc->complete_jobs, job);
364 else {
365 job->rw = WRITE;
366 push(&kc->io_jobs, job);
369 wake(kc);
373 * Request io on as many buffer heads as we can currently get for
374 * a particular job.
376 static int run_io_job(struct kcopyd_job *job)
378 int r;
379 struct dm_io_request io_req = {
380 .bi_rw = job->rw,
381 .mem.type = DM_IO_PAGE_LIST,
382 .mem.ptr.pl = job->pages,
383 .mem.offset = job->offset,
384 .notify.fn = complete_io,
385 .notify.context = job,
386 .client = job->kc->io_client,
389 if (job->rw == READ) {
390 r = dm_io(&io_req, 1, &job->source, NULL);
391 prepare_unplug(job->kc, READ, job->source.bdev);
392 } else {
393 if (job->num_dests > 1)
394 io_req.bi_rw |= REQ_UNPLUG;
395 r = dm_io(&io_req, job->num_dests, job->dests, NULL);
396 if (!(io_req.bi_rw & REQ_UNPLUG))
397 prepare_unplug(job->kc, WRITE, job->dests[0].bdev);
400 return r;
403 static int run_pages_job(struct kcopyd_job *job)
405 int r;
407 job->nr_pages = dm_div_up(job->dests[0].count + job->offset,
408 PAGE_SIZE >> 9);
409 r = kcopyd_get_pages(job->kc, job->nr_pages, &job->pages);
410 if (!r) {
411 /* this job is ready for io */
412 push(&job->kc->io_jobs, job);
413 return 0;
416 if (r == -ENOMEM)
417 /* can't complete now */
418 return 1;
420 return r;
424 * Run through a list for as long as possible. Returns the count
425 * of successful jobs.
427 static int process_jobs(struct list_head *jobs, struct dm_kcopyd_client *kc,
428 int (*fn) (struct kcopyd_job *))
430 struct kcopyd_job *job;
431 int r, count = 0;
433 while ((job = pop(jobs, kc))) {
435 r = fn(job);
437 if (r < 0) {
438 /* error this rogue job */
439 if (job->rw == WRITE)
440 job->write_err = (unsigned long) -1L;
441 else
442 job->read_err = 1;
443 push(&kc->complete_jobs, job);
444 break;
447 if (r > 0) {
449 * We couldn't service this job ATM, so
450 * push this job back onto the list.
452 push_head(jobs, job);
453 break;
456 count++;
459 return count;
463 * kcopyd does this every time it's woken up.
465 static void do_work(struct work_struct *work)
467 struct dm_kcopyd_client *kc = container_of(work,
468 struct dm_kcopyd_client, kcopyd_work);
471 * The order that these are called is *very* important.
472 * complete jobs can free some pages for pages jobs.
473 * Pages jobs when successful will jump onto the io jobs
474 * list. io jobs call wake when they complete and it all
475 * starts again.
477 * Note that io_jobs add block devices to the unplug array,
478 * this array is cleared with "unplug" calls. It is thus
479 * forbidden to run complete_jobs after io_jobs and before
480 * unplug because the block device could be destroyed in
481 * job completion callback.
483 process_jobs(&kc->complete_jobs, kc, run_complete_job);
484 process_jobs(&kc->pages_jobs, kc, run_pages_job);
485 process_jobs(&kc->io_jobs, kc, run_io_job);
486 unplug(kc, READ);
487 unplug(kc, WRITE);
491 * If we are copying a small region we just dispatch a single job
492 * to do the copy, otherwise the io has to be split up into many
493 * jobs.
495 static void dispatch_job(struct kcopyd_job *job)
497 struct dm_kcopyd_client *kc = job->kc;
498 atomic_inc(&kc->nr_jobs);
499 if (unlikely(!job->source.count))
500 push(&kc->complete_jobs, job);
501 else
502 push(&kc->pages_jobs, job);
503 wake(kc);
506 #define SUB_JOB_SIZE 128
507 static void segment_complete(int read_err, unsigned long write_err,
508 void *context)
510 /* FIXME: tidy this function */
511 sector_t progress = 0;
512 sector_t count = 0;
513 struct kcopyd_job *job = (struct kcopyd_job *) context;
514 struct dm_kcopyd_client *kc = job->kc;
516 mutex_lock(&job->lock);
518 /* update the error */
519 if (read_err)
520 job->read_err = 1;
522 if (write_err)
523 job->write_err |= write_err;
526 * Only dispatch more work if there hasn't been an error.
528 if ((!job->read_err && !job->write_err) ||
529 test_bit(DM_KCOPYD_IGNORE_ERROR, &job->flags)) {
530 /* get the next chunk of work */
531 progress = job->progress;
532 count = job->source.count - progress;
533 if (count) {
534 if (count > SUB_JOB_SIZE)
535 count = SUB_JOB_SIZE;
537 job->progress += count;
540 mutex_unlock(&job->lock);
542 if (count) {
543 int i;
544 struct kcopyd_job *sub_job = mempool_alloc(kc->job_pool,
545 GFP_NOIO);
547 *sub_job = *job;
548 sub_job->source.sector += progress;
549 sub_job->source.count = count;
551 for (i = 0; i < job->num_dests; i++) {
552 sub_job->dests[i].sector += progress;
553 sub_job->dests[i].count = count;
556 sub_job->fn = segment_complete;
557 sub_job->context = job;
558 dispatch_job(sub_job);
560 } else if (atomic_dec_and_test(&job->sub_jobs)) {
563 * Queue the completion callback to the kcopyd thread.
565 * Some callers assume that all the completions are called
566 * from a single thread and don't race with each other.
568 * We must not call the callback directly here because this
569 * code may not be executing in the thread.
571 push(&kc->complete_jobs, job);
572 wake(kc);
577 * Create some little jobs that will do the move between
578 * them.
580 #define SPLIT_COUNT 8
581 static void split_job(struct kcopyd_job *job)
583 int i;
585 atomic_inc(&job->kc->nr_jobs);
587 atomic_set(&job->sub_jobs, SPLIT_COUNT);
588 for (i = 0; i < SPLIT_COUNT; i++)
589 segment_complete(0, 0u, job);
592 int dm_kcopyd_copy(struct dm_kcopyd_client *kc, struct dm_io_region *from,
593 unsigned int num_dests, struct dm_io_region *dests,
594 unsigned int flags, dm_kcopyd_notify_fn fn, void *context)
596 struct kcopyd_job *job;
599 * Allocate a new job.
601 job = mempool_alloc(kc->job_pool, GFP_NOIO);
604 * set up for the read.
606 job->kc = kc;
607 job->flags = flags;
608 job->read_err = 0;
609 job->write_err = 0;
610 job->rw = READ;
612 job->source = *from;
614 job->num_dests = num_dests;
615 memcpy(&job->dests, dests, sizeof(*dests) * num_dests);
617 job->offset = 0;
618 job->nr_pages = 0;
619 job->pages = NULL;
621 job->fn = fn;
622 job->context = context;
624 if (job->source.count < SUB_JOB_SIZE)
625 dispatch_job(job);
627 else {
628 mutex_init(&job->lock);
629 job->progress = 0;
630 split_job(job);
633 return 0;
635 EXPORT_SYMBOL(dm_kcopyd_copy);
638 * Cancels a kcopyd job, eg. someone might be deactivating a
639 * mirror.
641 #if 0
642 int kcopyd_cancel(struct kcopyd_job *job, int block)
644 /* FIXME: finish */
645 return -1;
647 #endif /* 0 */
649 /*-----------------------------------------------------------------
650 * Client setup
651 *---------------------------------------------------------------*/
652 int dm_kcopyd_client_create(unsigned int nr_pages,
653 struct dm_kcopyd_client **result)
655 int r = -ENOMEM;
656 struct dm_kcopyd_client *kc;
658 kc = kmalloc(sizeof(*kc), GFP_KERNEL);
659 if (!kc)
660 return -ENOMEM;
662 spin_lock_init(&kc->lock);
663 spin_lock_init(&kc->job_lock);
664 INIT_LIST_HEAD(&kc->complete_jobs);
665 INIT_LIST_HEAD(&kc->io_jobs);
666 INIT_LIST_HEAD(&kc->pages_jobs);
668 memset(kc->unplug, 0, sizeof(kc->unplug));
670 kc->job_pool = mempool_create_slab_pool(MIN_JOBS, _job_cache);
671 if (!kc->job_pool)
672 goto bad_slab;
674 INIT_WORK(&kc->kcopyd_work, do_work);
675 kc->kcopyd_wq = alloc_workqueue("kcopyd",
676 WQ_NON_REENTRANT | WQ_MEM_RECLAIM, 0);
677 if (!kc->kcopyd_wq)
678 goto bad_workqueue;
680 kc->pages = NULL;
681 kc->nr_pages = kc->nr_free_pages = 0;
682 r = client_alloc_pages(kc, nr_pages);
683 if (r)
684 goto bad_client_pages;
686 kc->io_client = dm_io_client_create(nr_pages);
687 if (IS_ERR(kc->io_client)) {
688 r = PTR_ERR(kc->io_client);
689 goto bad_io_client;
692 init_waitqueue_head(&kc->destroyq);
693 atomic_set(&kc->nr_jobs, 0);
695 *result = kc;
696 return 0;
698 bad_io_client:
699 client_free_pages(kc);
700 bad_client_pages:
701 destroy_workqueue(kc->kcopyd_wq);
702 bad_workqueue:
703 mempool_destroy(kc->job_pool);
704 bad_slab:
705 kfree(kc);
707 return r;
709 EXPORT_SYMBOL(dm_kcopyd_client_create);
711 void dm_kcopyd_client_destroy(struct dm_kcopyd_client *kc)
713 /* Wait for completion of all jobs submitted by this client. */
714 wait_event(kc->destroyq, !atomic_read(&kc->nr_jobs));
716 BUG_ON(!list_empty(&kc->complete_jobs));
717 BUG_ON(!list_empty(&kc->io_jobs));
718 BUG_ON(!list_empty(&kc->pages_jobs));
719 destroy_workqueue(kc->kcopyd_wq);
720 dm_io_client_destroy(kc->io_client);
721 client_free_pages(kc);
722 mempool_destroy(kc->job_pool);
723 kfree(kc);
725 EXPORT_SYMBOL(dm_kcopyd_client_destroy);