2 * Copyright (C) 2002 Sistina Software (UK) Limited.
4 * This file is released under the GPL.
6 * Kcopyd provides a simple interface for copying an area of one
7 * block-device to one or more other block-devices, with an asynchronous
8 * completion notification.
11 #include <asm/types.h>
12 #include <asm/atomic.h>
14 #include <linux/blkdev.h>
15 #include <linux/config.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/mempool.h>
20 #include <linux/module.h>
21 #include <linux/pagemap.h>
22 #include <linux/slab.h>
23 #include <linux/vmalloc.h>
24 #include <linux/workqueue.h>
25 #include <linux/mutex.h>
29 static struct workqueue_struct
*_kcopyd_wq
;
30 static struct work_struct _kcopyd_work
;
32 static inline void wake(void)
34 queue_work(_kcopyd_wq
, &_kcopyd_work
);
37 /*-----------------------------------------------------------------
38 * Each kcopyd client has its own little pool of preallocated
39 * pages for kcopyd io.
40 *---------------------------------------------------------------*/
41 struct kcopyd_client
{
42 struct list_head list
;
45 struct page_list
*pages
;
46 unsigned int nr_pages
;
47 unsigned int nr_free_pages
;
49 wait_queue_head_t destroyq
;
53 static struct page_list
*alloc_pl(void)
57 pl
= kmalloc(sizeof(*pl
), GFP_KERNEL
);
61 pl
->page
= alloc_page(GFP_KERNEL
);
70 static void free_pl(struct page_list
*pl
)
72 __free_page(pl
->page
);
76 static int kcopyd_get_pages(struct kcopyd_client
*kc
,
77 unsigned int nr
, struct page_list
**pages
)
82 if (kc
->nr_free_pages
< nr
) {
83 spin_unlock(&kc
->lock
);
87 kc
->nr_free_pages
-= nr
;
88 for (*pages
= pl
= kc
->pages
; --nr
; pl
= pl
->next
)
94 spin_unlock(&kc
->lock
);
99 static void kcopyd_put_pages(struct kcopyd_client
*kc
, struct page_list
*pl
)
101 struct page_list
*cursor
;
103 spin_lock(&kc
->lock
);
104 for (cursor
= pl
; cursor
->next
; cursor
= cursor
->next
)
108 cursor
->next
= kc
->pages
;
110 spin_unlock(&kc
->lock
);
114 * These three functions resize the page pool.
116 static void drop_pages(struct page_list
*pl
)
118 struct page_list
*next
;
127 static int client_alloc_pages(struct kcopyd_client
*kc
, unsigned int nr
)
130 struct page_list
*pl
= NULL
, *next
;
132 for (i
= 0; i
< nr
; i
++) {
143 kcopyd_put_pages(kc
, pl
);
148 static void client_free_pages(struct kcopyd_client
*kc
)
150 BUG_ON(kc
->nr_free_pages
!= kc
->nr_pages
);
151 drop_pages(kc
->pages
);
153 kc
->nr_free_pages
= kc
->nr_pages
= 0;
156 /*-----------------------------------------------------------------
157 * kcopyd_jobs need to be allocated by the *clients* of kcopyd,
158 * for this reason we use a mempool to prevent the client from
159 * ever having to do io (which could cause a deadlock).
160 *---------------------------------------------------------------*/
162 struct kcopyd_client
*kc
;
163 struct list_head list
;
167 * Error state of the job.
170 unsigned int write_err
;
173 * Either READ or WRITE
176 struct io_region source
;
179 * The destinations for the transfer.
181 unsigned int num_dests
;
182 struct io_region dests
[KCOPYD_MAX_REGIONS
];
185 unsigned int nr_pages
;
186 struct page_list
*pages
;
189 * Set this to ensure you are notified when the job has
190 * completed. 'context' is for callback to use.
196 * These fields are only used if the job has been split
197 * into more manageable parts.
199 struct semaphore lock
;
204 /* FIXME: this should scale with the number of pages */
207 static kmem_cache_t
*_job_cache
;
208 static mempool_t
*_job_pool
;
211 * We maintain three lists of jobs:
213 * i) jobs waiting for pages
214 * ii) jobs that have pages, and are waiting for the io to be issued.
215 * iii) jobs that have completed.
217 * All three of these are protected by job_lock.
219 static DEFINE_SPINLOCK(_job_lock
);
221 static LIST_HEAD(_complete_jobs
);
222 static LIST_HEAD(_io_jobs
);
223 static LIST_HEAD(_pages_jobs
);
225 static int jobs_init(void)
227 _job_cache
= kmem_cache_create("kcopyd-jobs",
228 sizeof(struct kcopyd_job
),
229 __alignof__(struct kcopyd_job
),
234 _job_pool
= mempool_create_slab_pool(MIN_JOBS
, _job_cache
);
236 kmem_cache_destroy(_job_cache
);
243 static void jobs_exit(void)
245 BUG_ON(!list_empty(&_complete_jobs
));
246 BUG_ON(!list_empty(&_io_jobs
));
247 BUG_ON(!list_empty(&_pages_jobs
));
249 mempool_destroy(_job_pool
);
250 kmem_cache_destroy(_job_cache
);
256 * Functions to push and pop a job onto the head of a given job
259 static inline struct kcopyd_job
*pop(struct list_head
*jobs
)
261 struct kcopyd_job
*job
= NULL
;
264 spin_lock_irqsave(&_job_lock
, flags
);
266 if (!list_empty(jobs
)) {
267 job
= list_entry(jobs
->next
, struct kcopyd_job
, list
);
268 list_del(&job
->list
);
270 spin_unlock_irqrestore(&_job_lock
, flags
);
275 static inline void push(struct list_head
*jobs
, struct kcopyd_job
*job
)
279 spin_lock_irqsave(&_job_lock
, flags
);
280 list_add_tail(&job
->list
, jobs
);
281 spin_unlock_irqrestore(&_job_lock
, flags
);
285 * These three functions process 1 item from the corresponding
291 * > 0: can't process yet.
293 static int run_complete_job(struct kcopyd_job
*job
)
295 void *context
= job
->context
;
296 int read_err
= job
->read_err
;
297 unsigned int write_err
= job
->write_err
;
298 kcopyd_notify_fn fn
= job
->fn
;
299 struct kcopyd_client
*kc
= job
->kc
;
301 kcopyd_put_pages(kc
, job
->pages
);
302 mempool_free(job
, _job_pool
);
303 fn(read_err
, write_err
, context
);
305 if (atomic_dec_and_test(&kc
->nr_jobs
))
306 wake_up(&kc
->destroyq
);
311 static void complete_io(unsigned long error
, void *context
)
313 struct kcopyd_job
*job
= (struct kcopyd_job
*) context
;
316 if (job
->rw
== WRITE
)
317 job
->write_err
&= error
;
321 if (!test_bit(KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
322 push(&_complete_jobs
, job
);
328 if (job
->rw
== WRITE
)
329 push(&_complete_jobs
, job
);
333 push(&_io_jobs
, job
);
340 * Request io on as many buffer heads as we can currently get for
343 static int run_io_job(struct kcopyd_job
*job
)
348 r
= dm_io_async(1, &job
->source
, job
->rw
,
350 job
->offset
, complete_io
, job
);
353 r
= dm_io_async(job
->num_dests
, job
->dests
, job
->rw
,
355 job
->offset
, complete_io
, job
);
360 static int run_pages_job(struct kcopyd_job
*job
)
364 job
->nr_pages
= dm_div_up(job
->dests
[0].count
+ job
->offset
,
366 r
= kcopyd_get_pages(job
->kc
, job
->nr_pages
, &job
->pages
);
368 /* this job is ready for io */
369 push(&_io_jobs
, job
);
374 /* can't complete now */
381 * Run through a list for as long as possible. Returns the count
382 * of successful jobs.
384 static int process_jobs(struct list_head
*jobs
, int (*fn
) (struct kcopyd_job
*))
386 struct kcopyd_job
*job
;
389 while ((job
= pop(jobs
))) {
394 /* error this rogue job */
395 if (job
->rw
== WRITE
)
396 job
->write_err
= (unsigned int) -1;
399 push(&_complete_jobs
, job
);
405 * We couldn't service this job ATM, so
406 * push this job back onto the list.
419 * kcopyd does this every time it's woken up.
421 static void do_work(void *ignored
)
424 * The order that these are called is *very* important.
425 * complete jobs can free some pages for pages jobs.
426 * Pages jobs when successful will jump onto the io jobs
427 * list. io jobs call wake when they complete and it all
430 process_jobs(&_complete_jobs
, run_complete_job
);
431 process_jobs(&_pages_jobs
, run_pages_job
);
432 process_jobs(&_io_jobs
, run_io_job
);
436 * If we are copying a small region we just dispatch a single job
437 * to do the copy, otherwise the io has to be split up into many
440 static void dispatch_job(struct kcopyd_job
*job
)
442 atomic_inc(&job
->kc
->nr_jobs
);
443 push(&_pages_jobs
, job
);
447 #define SUB_JOB_SIZE 128
448 static void segment_complete(int read_err
,
449 unsigned int write_err
, void *context
)
451 /* FIXME: tidy this function */
452 sector_t progress
= 0;
454 struct kcopyd_job
*job
= (struct kcopyd_job
*) context
;
458 /* update the error */
463 job
->write_err
&= write_err
;
466 * Only dispatch more work if there hasn't been an error.
468 if ((!job
->read_err
&& !job
->write_err
) ||
469 test_bit(KCOPYD_IGNORE_ERROR
, &job
->flags
)) {
470 /* get the next chunk of work */
471 progress
= job
->progress
;
472 count
= job
->source
.count
- progress
;
474 if (count
> SUB_JOB_SIZE
)
475 count
= SUB_JOB_SIZE
;
477 job
->progress
+= count
;
484 struct kcopyd_job
*sub_job
= mempool_alloc(_job_pool
, GFP_NOIO
);
487 sub_job
->source
.sector
+= progress
;
488 sub_job
->source
.count
= count
;
490 for (i
= 0; i
< job
->num_dests
; i
++) {
491 sub_job
->dests
[i
].sector
+= progress
;
492 sub_job
->dests
[i
].count
= count
;
495 sub_job
->fn
= segment_complete
;
496 sub_job
->context
= job
;
497 dispatch_job(sub_job
);
499 } else if (atomic_dec_and_test(&job
->sub_jobs
)) {
502 * To avoid a race we must keep the job around
503 * until after the notify function has completed.
504 * Otherwise the client may try and stop the job
505 * after we've completed.
507 job
->fn(read_err
, write_err
, job
->context
);
508 mempool_free(job
, _job_pool
);
513 * Create some little jobs that will do the move between
516 #define SPLIT_COUNT 8
517 static void split_job(struct kcopyd_job
*job
)
521 atomic_set(&job
->sub_jobs
, SPLIT_COUNT
);
522 for (i
= 0; i
< SPLIT_COUNT
; i
++)
523 segment_complete(0, 0u, job
);
526 int kcopyd_copy(struct kcopyd_client
*kc
, struct io_region
*from
,
527 unsigned int num_dests
, struct io_region
*dests
,
528 unsigned int flags
, kcopyd_notify_fn fn
, void *context
)
530 struct kcopyd_job
*job
;
533 * Allocate a new job.
535 job
= mempool_alloc(_job_pool
, GFP_NOIO
);
538 * set up for the read.
548 job
->num_dests
= num_dests
;
549 memcpy(&job
->dests
, dests
, sizeof(*dests
) * num_dests
);
556 job
->context
= context
;
558 if (job
->source
.count
< SUB_JOB_SIZE
)
562 init_MUTEX(&job
->lock
);
571 * Cancels a kcopyd job, eg. someone might be deactivating a
575 int kcopyd_cancel(struct kcopyd_job
*job
, int block
)
582 /*-----------------------------------------------------------------
584 *---------------------------------------------------------------*/
585 static DEFINE_MUTEX(_client_lock
);
586 static LIST_HEAD(_clients
);
588 static void client_add(struct kcopyd_client
*kc
)
590 mutex_lock(&_client_lock
);
591 list_add(&kc
->list
, &_clients
);
592 mutex_unlock(&_client_lock
);
595 static void client_del(struct kcopyd_client
*kc
)
597 mutex_lock(&_client_lock
);
599 mutex_unlock(&_client_lock
);
602 static DEFINE_MUTEX(kcopyd_init_lock
);
603 static int kcopyd_clients
= 0;
605 static int kcopyd_init(void)
609 mutex_lock(&kcopyd_init_lock
);
611 if (kcopyd_clients
) {
612 /* Already initialized. */
614 mutex_unlock(&kcopyd_init_lock
);
620 mutex_unlock(&kcopyd_init_lock
);
624 _kcopyd_wq
= create_singlethread_workqueue("kcopyd");
627 mutex_unlock(&kcopyd_init_lock
);
632 INIT_WORK(&_kcopyd_work
, do_work
, NULL
);
633 mutex_unlock(&kcopyd_init_lock
);
637 static void kcopyd_exit(void)
639 mutex_lock(&kcopyd_init_lock
);
641 if (!kcopyd_clients
) {
643 destroy_workqueue(_kcopyd_wq
);
646 mutex_unlock(&kcopyd_init_lock
);
649 int kcopyd_client_create(unsigned int nr_pages
, struct kcopyd_client
**result
)
652 struct kcopyd_client
*kc
;
658 kc
= kmalloc(sizeof(*kc
), GFP_KERNEL
);
664 spin_lock_init(&kc
->lock
);
666 kc
->nr_pages
= kc
->nr_free_pages
= 0;
667 r
= client_alloc_pages(kc
, nr_pages
);
674 r
= dm_io_get(nr_pages
);
676 client_free_pages(kc
);
682 init_waitqueue_head(&kc
->destroyq
);
683 atomic_set(&kc
->nr_jobs
, 0);
690 void kcopyd_client_destroy(struct kcopyd_client
*kc
)
692 /* Wait for completion of all jobs submitted by this client. */
693 wait_event(kc
->destroyq
, !atomic_read(&kc
->nr_jobs
));
695 dm_io_put(kc
->nr_pages
);
696 client_free_pages(kc
);
702 EXPORT_SYMBOL(kcopyd_client_create
);
703 EXPORT_SYMBOL(kcopyd_client_destroy
);
704 EXPORT_SYMBOL(kcopyd_copy
);