dm bufio: drop the lock when doing GFP_NOIO allocation
[linux/fpc-iii.git] / drivers / md / dm-bufio.c
blob1cf56302edcd3f238ae5b356be3c88ec5c84bc23
1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
7 */
9 #include "dm-bufio.h"
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
18 #define DM_MSG_PREFIX "bufio"
21 * Memory management policy:
22 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
26 * dirty buffers.
28 #define DM_BUFIO_MIN_BUFFERS 8
30 #define DM_BUFIO_MEMORY_PERCENT 2
31 #define DM_BUFIO_VMALLOC_PERCENT 25
32 #define DM_BUFIO_WRITEBACK_PERCENT 75
35 * Check buffer ages in this interval (seconds)
37 #define DM_BUFIO_WORK_TIMER_SECS 10
40 * Free buffers when they are older than this (seconds)
42 #define DM_BUFIO_DEFAULT_AGE_SECS 60
45 * The number of bvec entries that are embedded directly in the buffer.
46 * If the chunk size is larger, dm-io is used to do the io.
48 #define DM_BUFIO_INLINE_VECS 16
51 * Buffer hash
53 #define DM_BUFIO_HASH_BITS 20
54 #define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
66 * dm_buffer->list_mode
68 #define LIST_CLEAN 0
69 #define LIST_DIRTY 1
70 #define LIST_SIZE 2
73 * Linking of buffers:
74 * All buffers are linked to cache_hash with their hash_list field.
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
85 * context.
87 struct dm_bufio_client {
88 struct mutex lock;
90 struct list_head lru[LIST_SIZE];
91 unsigned long n_buffers[LIST_SIZE];
93 struct block_device *bdev;
94 unsigned block_size;
95 unsigned char sectors_per_block_bits;
96 unsigned char pages_per_block_bits;
97 unsigned char blocks_per_page_bits;
98 unsigned aux_size;
99 void (*alloc_callback)(struct dm_buffer *);
100 void (*write_callback)(struct dm_buffer *);
102 struct dm_io_client *dm_io;
104 struct list_head reserved_buffers;
105 unsigned need_reserved_buffers;
107 unsigned minimum_buffers;
109 struct hlist_head *cache_hash;
110 wait_queue_head_t free_buffer_wait;
112 int async_write_error;
114 struct list_head client_list;
115 struct shrinker shrinker;
119 * Buffer state bits.
121 #define B_READING 0
122 #define B_WRITING 1
123 #define B_DIRTY 2
126 * Describes how the block was allocated:
127 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
128 * See the comment at alloc_buffer_data.
130 enum data_mode {
131 DATA_MODE_SLAB = 0,
132 DATA_MODE_GET_FREE_PAGES = 1,
133 DATA_MODE_VMALLOC = 2,
134 DATA_MODE_LIMIT = 3
137 struct dm_buffer {
138 struct hlist_node hash_list;
139 struct list_head lru_list;
140 sector_t block;
141 void *data;
142 enum data_mode data_mode;
143 unsigned char list_mode; /* LIST_* */
144 unsigned hold_count;
145 int read_error;
146 int write_error;
147 unsigned long state;
148 unsigned long last_accessed;
149 struct dm_bufio_client *c;
150 struct list_head write_list;
151 struct bio bio;
152 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
155 /*----------------------------------------------------------------*/
157 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
158 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
160 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
162 unsigned ret = c->blocks_per_page_bits - 1;
164 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
166 return ret;
169 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
170 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
172 #define dm_bufio_in_request() (!!current->bio_list)
174 static void dm_bufio_lock(struct dm_bufio_client *c)
176 mutex_lock_nested(&c->lock, dm_bufio_in_request());
179 static int dm_bufio_trylock(struct dm_bufio_client *c)
181 return mutex_trylock(&c->lock);
184 static void dm_bufio_unlock(struct dm_bufio_client *c)
186 mutex_unlock(&c->lock);
190 * FIXME Move to sched.h?
192 #ifdef CONFIG_PREEMPT_VOLUNTARY
193 # define dm_bufio_cond_resched() \
194 do { \
195 if (unlikely(need_resched())) \
196 _cond_resched(); \
197 } while (0)
198 #else
199 # define dm_bufio_cond_resched() do { } while (0)
200 #endif
202 /*----------------------------------------------------------------*/
205 * Default cache size: available memory divided by the ratio.
207 static unsigned long dm_bufio_default_cache_size;
210 * Total cache size set by the user.
212 static unsigned long dm_bufio_cache_size;
215 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
216 * at any time. If it disagrees, the user has changed cache size.
218 static unsigned long dm_bufio_cache_size_latch;
220 static DEFINE_SPINLOCK(param_spinlock);
223 * Buffers are freed after this timeout
225 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
227 static unsigned long dm_bufio_peak_allocated;
228 static unsigned long dm_bufio_allocated_kmem_cache;
229 static unsigned long dm_bufio_allocated_get_free_pages;
230 static unsigned long dm_bufio_allocated_vmalloc;
231 static unsigned long dm_bufio_current_allocated;
233 /*----------------------------------------------------------------*/
236 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
238 static unsigned long dm_bufio_cache_size_per_client;
241 * The current number of clients.
243 static int dm_bufio_client_count;
246 * The list of all clients.
248 static LIST_HEAD(dm_bufio_all_clients);
251 * This mutex protects dm_bufio_cache_size_latch,
252 * dm_bufio_cache_size_per_client and dm_bufio_client_count
254 static DEFINE_MUTEX(dm_bufio_clients_lock);
256 /*----------------------------------------------------------------*/
258 static void adjust_total_allocated(enum data_mode data_mode, long diff)
260 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
261 &dm_bufio_allocated_kmem_cache,
262 &dm_bufio_allocated_get_free_pages,
263 &dm_bufio_allocated_vmalloc,
266 spin_lock(&param_spinlock);
268 *class_ptr[data_mode] += diff;
270 dm_bufio_current_allocated += diff;
272 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
273 dm_bufio_peak_allocated = dm_bufio_current_allocated;
275 spin_unlock(&param_spinlock);
279 * Change the number of clients and recalculate per-client limit.
281 static void __cache_size_refresh(void)
283 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
284 BUG_ON(dm_bufio_client_count < 0);
286 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
289 * Use default if set to 0 and report the actual cache size used.
291 if (!dm_bufio_cache_size_latch) {
292 (void)cmpxchg(&dm_bufio_cache_size, 0,
293 dm_bufio_default_cache_size);
294 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
297 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
298 (dm_bufio_client_count ? : 1);
302 * Allocating buffer data.
304 * Small buffers are allocated with kmem_cache, to use space optimally.
306 * For large buffers, we choose between get_free_pages and vmalloc.
307 * Each has advantages and disadvantages.
309 * __get_free_pages can randomly fail if the memory is fragmented.
310 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
311 * as low as 128M) so using it for caching is not appropriate.
313 * If the allocation may fail we use __get_free_pages. Memory fragmentation
314 * won't have a fatal effect here, but it just causes flushes of some other
315 * buffers and more I/O will be performed. Don't use __get_free_pages if it
316 * always fails (i.e. order >= MAX_ORDER).
318 * If the allocation shouldn't fail we use __vmalloc. This is only for the
319 * initial reserve allocation, so there's no risk of wasting all vmalloc
320 * space.
322 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
323 enum data_mode *data_mode)
325 unsigned noio_flag;
326 void *ptr;
328 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
329 *data_mode = DATA_MODE_SLAB;
330 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
333 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
334 gfp_mask & __GFP_NORETRY) {
335 *data_mode = DATA_MODE_GET_FREE_PAGES;
336 return (void *)__get_free_pages(gfp_mask,
337 c->pages_per_block_bits);
340 *data_mode = DATA_MODE_VMALLOC;
343 * __vmalloc allocates the data pages and auxiliary structures with
344 * gfp_flags that were specified, but pagetables are always allocated
345 * with GFP_KERNEL, no matter what was specified as gfp_mask.
347 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
348 * all allocations done by this process (including pagetables) are done
349 * as if GFP_NOIO was specified.
352 noio_flag = 0;
353 if (gfp_mask & __GFP_NORETRY)
354 noio_flag = memalloc_noio_save();
356 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
358 if (gfp_mask & __GFP_NORETRY)
359 memalloc_noio_restore(noio_flag);
361 return ptr;
365 * Free buffer's data.
367 static void free_buffer_data(struct dm_bufio_client *c,
368 void *data, enum data_mode data_mode)
370 switch (data_mode) {
371 case DATA_MODE_SLAB:
372 kmem_cache_free(DM_BUFIO_CACHE(c), data);
373 break;
375 case DATA_MODE_GET_FREE_PAGES:
376 free_pages((unsigned long)data, c->pages_per_block_bits);
377 break;
379 case DATA_MODE_VMALLOC:
380 vfree(data);
381 break;
383 default:
384 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
385 data_mode);
386 BUG();
391 * Allocate buffer and its data.
393 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
395 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
396 gfp_mask);
398 if (!b)
399 return NULL;
401 b->c = c;
403 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
404 if (!b->data) {
405 kfree(b);
406 return NULL;
409 adjust_total_allocated(b->data_mode, (long)c->block_size);
411 return b;
415 * Free buffer and its data.
417 static void free_buffer(struct dm_buffer *b)
419 struct dm_bufio_client *c = b->c;
421 adjust_total_allocated(b->data_mode, -(long)c->block_size);
423 free_buffer_data(c, b->data, b->data_mode);
424 kfree(b);
428 * Link buffer to the hash list and clean or dirty queue.
430 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
432 struct dm_bufio_client *c = b->c;
434 c->n_buffers[dirty]++;
435 b->block = block;
436 b->list_mode = dirty;
437 list_add(&b->lru_list, &c->lru[dirty]);
438 hlist_add_head(&b->hash_list, &c->cache_hash[DM_BUFIO_HASH(block)]);
439 b->last_accessed = jiffies;
443 * Unlink buffer from the hash list and dirty or clean queue.
445 static void __unlink_buffer(struct dm_buffer *b)
447 struct dm_bufio_client *c = b->c;
449 BUG_ON(!c->n_buffers[b->list_mode]);
451 c->n_buffers[b->list_mode]--;
452 hlist_del(&b->hash_list);
453 list_del(&b->lru_list);
457 * Place the buffer to the head of dirty or clean LRU queue.
459 static void __relink_lru(struct dm_buffer *b, int dirty)
461 struct dm_bufio_client *c = b->c;
463 BUG_ON(!c->n_buffers[b->list_mode]);
465 c->n_buffers[b->list_mode]--;
466 c->n_buffers[dirty]++;
467 b->list_mode = dirty;
468 list_move(&b->lru_list, &c->lru[dirty]);
469 b->last_accessed = jiffies;
472 /*----------------------------------------------------------------
473 * Submit I/O on the buffer.
475 * Bio interface is faster but it has some problems:
476 * the vector list is limited (increasing this limit increases
477 * memory-consumption per buffer, so it is not viable);
479 * the memory must be direct-mapped, not vmalloced;
481 * the I/O driver can reject requests spuriously if it thinks that
482 * the requests are too big for the device or if they cross a
483 * controller-defined memory boundary.
485 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
486 * it is not vmalloced, try using the bio interface.
488 * If the buffer is big, if it is vmalloced or if the underlying device
489 * rejects the bio because it is too large, use dm-io layer to do the I/O.
490 * The dm-io layer splits the I/O into multiple requests, avoiding the above
491 * shortcomings.
492 *--------------------------------------------------------------*/
495 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
496 * that the request was handled directly with bio interface.
498 static void dmio_complete(unsigned long error, void *context)
500 struct dm_buffer *b = context;
502 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
505 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
506 bio_end_io_t *end_io)
508 int r;
509 struct dm_io_request io_req = {
510 .bi_rw = rw,
511 .notify.fn = dmio_complete,
512 .notify.context = b,
513 .client = b->c->dm_io,
515 struct dm_io_region region = {
516 .bdev = b->c->bdev,
517 .sector = block << b->c->sectors_per_block_bits,
518 .count = b->c->block_size >> SECTOR_SHIFT,
521 if (b->data_mode != DATA_MODE_VMALLOC) {
522 io_req.mem.type = DM_IO_KMEM;
523 io_req.mem.ptr.addr = b->data;
524 } else {
525 io_req.mem.type = DM_IO_VMA;
526 io_req.mem.ptr.vma = b->data;
529 b->bio.bi_end_io = end_io;
531 r = dm_io(&io_req, 1, &region, NULL);
532 if (r)
533 end_io(&b->bio, r);
536 static void inline_endio(struct bio *bio, int error)
538 bio_end_io_t *end_fn = bio->bi_private;
541 * Reset the bio to free any attached resources
542 * (e.g. bio integrity profiles).
544 bio_reset(bio);
546 end_fn(bio, error);
549 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
550 bio_end_io_t *end_io)
552 char *ptr;
553 int len;
555 bio_init(&b->bio);
556 b->bio.bi_io_vec = b->bio_vec;
557 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
558 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
559 b->bio.bi_bdev = b->c->bdev;
560 b->bio.bi_end_io = inline_endio;
562 * Use of .bi_private isn't a problem here because
563 * the dm_buffer's inline bio is local to bufio.
565 b->bio.bi_private = end_io;
568 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
569 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
571 ptr = b->data;
572 len = b->c->block_size;
574 if (len >= PAGE_SIZE)
575 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
576 else
577 BUG_ON((unsigned long)ptr & (len - 1));
579 do {
580 if (!bio_add_page(&b->bio, virt_to_page(ptr),
581 len < PAGE_SIZE ? len : PAGE_SIZE,
582 virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
583 BUG_ON(b->c->block_size <= PAGE_SIZE);
584 use_dmio(b, rw, block, end_io);
585 return;
588 len -= PAGE_SIZE;
589 ptr += PAGE_SIZE;
590 } while (len > 0);
592 submit_bio(rw, &b->bio);
595 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
596 bio_end_io_t *end_io)
598 if (rw == WRITE && b->c->write_callback)
599 b->c->write_callback(b);
601 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
602 b->data_mode != DATA_MODE_VMALLOC)
603 use_inline_bio(b, rw, block, end_io);
604 else
605 use_dmio(b, rw, block, end_io);
608 /*----------------------------------------------------------------
609 * Writing dirty buffers
610 *--------------------------------------------------------------*/
613 * The endio routine for write.
615 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
616 * it.
618 static void write_endio(struct bio *bio, int error)
620 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
622 b->write_error = error;
623 if (unlikely(error)) {
624 struct dm_bufio_client *c = b->c;
625 (void)cmpxchg(&c->async_write_error, 0, error);
628 BUG_ON(!test_bit(B_WRITING, &b->state));
630 smp_mb__before_atomic();
631 clear_bit(B_WRITING, &b->state);
632 smp_mb__after_atomic();
634 wake_up_bit(&b->state, B_WRITING);
638 * Initiate a write on a dirty buffer, but don't wait for it.
640 * - If the buffer is not dirty, exit.
641 * - If there some previous write going on, wait for it to finish (we can't
642 * have two writes on the same buffer simultaneously).
643 * - Submit our write and don't wait on it. We set B_WRITING indicating
644 * that there is a write in progress.
646 static void __write_dirty_buffer(struct dm_buffer *b,
647 struct list_head *write_list)
649 if (!test_bit(B_DIRTY, &b->state))
650 return;
652 clear_bit(B_DIRTY, &b->state);
653 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
655 if (!write_list)
656 submit_io(b, WRITE, b->block, write_endio);
657 else
658 list_add_tail(&b->write_list, write_list);
661 static void __flush_write_list(struct list_head *write_list)
663 struct blk_plug plug;
664 blk_start_plug(&plug);
665 while (!list_empty(write_list)) {
666 struct dm_buffer *b =
667 list_entry(write_list->next, struct dm_buffer, write_list);
668 list_del(&b->write_list);
669 submit_io(b, WRITE, b->block, write_endio);
670 dm_bufio_cond_resched();
672 blk_finish_plug(&plug);
676 * Wait until any activity on the buffer finishes. Possibly write the
677 * buffer if it is dirty. When this function finishes, there is no I/O
678 * running on the buffer and the buffer is not dirty.
680 static void __make_buffer_clean(struct dm_buffer *b)
682 BUG_ON(b->hold_count);
684 if (!b->state) /* fast case */
685 return;
687 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
688 __write_dirty_buffer(b, NULL);
689 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
693 * Find some buffer that is not held by anybody, clean it, unlink it and
694 * return it.
696 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
698 struct dm_buffer *b;
700 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
701 BUG_ON(test_bit(B_WRITING, &b->state));
702 BUG_ON(test_bit(B_DIRTY, &b->state));
704 if (!b->hold_count) {
705 __make_buffer_clean(b);
706 __unlink_buffer(b);
707 return b;
709 dm_bufio_cond_resched();
712 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
713 BUG_ON(test_bit(B_READING, &b->state));
715 if (!b->hold_count) {
716 __make_buffer_clean(b);
717 __unlink_buffer(b);
718 return b;
720 dm_bufio_cond_resched();
723 return NULL;
727 * Wait until some other threads free some buffer or release hold count on
728 * some buffer.
730 * This function is entered with c->lock held, drops it and regains it
731 * before exiting.
733 static void __wait_for_free_buffer(struct dm_bufio_client *c)
735 DECLARE_WAITQUEUE(wait, current);
737 add_wait_queue(&c->free_buffer_wait, &wait);
738 set_task_state(current, TASK_UNINTERRUPTIBLE);
739 dm_bufio_unlock(c);
741 io_schedule();
743 remove_wait_queue(&c->free_buffer_wait, &wait);
745 dm_bufio_lock(c);
748 enum new_flag {
749 NF_FRESH = 0,
750 NF_READ = 1,
751 NF_GET = 2,
752 NF_PREFETCH = 3
756 * Allocate a new buffer. If the allocation is not possible, wait until
757 * some other thread frees a buffer.
759 * May drop the lock and regain it.
761 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
763 struct dm_buffer *b;
764 bool tried_noio_alloc = false;
767 * dm-bufio is resistant to allocation failures (it just keeps
768 * one buffer reserved in cases all the allocations fail).
769 * So set flags to not try too hard:
770 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
771 * mutex and wait ourselves.
772 * __GFP_NORETRY: don't retry and rather return failure
773 * __GFP_NOMEMALLOC: don't use emergency reserves
774 * __GFP_NOWARN: don't print a warning in case of failure
776 * For debugging, if we set the cache size to 1, no new buffers will
777 * be allocated.
779 while (1) {
780 if (dm_bufio_cache_size_latch != 1) {
781 b = alloc_buffer(c, GFP_NOWAIT | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
782 if (b)
783 return b;
786 if (nf == NF_PREFETCH)
787 return NULL;
789 if (dm_bufio_cache_size_latch != 1 && !tried_noio_alloc) {
790 dm_bufio_unlock(c);
791 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
792 dm_bufio_lock(c);
793 if (b)
794 return b;
795 tried_noio_alloc = true;
798 if (!list_empty(&c->reserved_buffers)) {
799 b = list_entry(c->reserved_buffers.next,
800 struct dm_buffer, lru_list);
801 list_del(&b->lru_list);
802 c->need_reserved_buffers++;
804 return b;
807 b = __get_unclaimed_buffer(c);
808 if (b)
809 return b;
811 __wait_for_free_buffer(c);
815 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
817 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
819 if (!b)
820 return NULL;
822 if (c->alloc_callback)
823 c->alloc_callback(b);
825 return b;
829 * Free a buffer and wake other threads waiting for free buffers.
831 static void __free_buffer_wake(struct dm_buffer *b)
833 struct dm_bufio_client *c = b->c;
835 if (!c->need_reserved_buffers)
836 free_buffer(b);
837 else {
838 list_add(&b->lru_list, &c->reserved_buffers);
839 c->need_reserved_buffers--;
842 wake_up(&c->free_buffer_wait);
845 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
846 struct list_head *write_list)
848 struct dm_buffer *b, *tmp;
850 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
851 BUG_ON(test_bit(B_READING, &b->state));
853 if (!test_bit(B_DIRTY, &b->state) &&
854 !test_bit(B_WRITING, &b->state)) {
855 __relink_lru(b, LIST_CLEAN);
856 continue;
859 if (no_wait && test_bit(B_WRITING, &b->state))
860 return;
862 __write_dirty_buffer(b, write_list);
863 dm_bufio_cond_resched();
868 * Get writeback threshold and buffer limit for a given client.
870 static void __get_memory_limit(struct dm_bufio_client *c,
871 unsigned long *threshold_buffers,
872 unsigned long *limit_buffers)
874 unsigned long buffers;
876 if (unlikely(ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch)) {
877 if (mutex_trylock(&dm_bufio_clients_lock)) {
878 __cache_size_refresh();
879 mutex_unlock(&dm_bufio_clients_lock);
883 buffers = dm_bufio_cache_size_per_client >>
884 (c->sectors_per_block_bits + SECTOR_SHIFT);
886 if (buffers < c->minimum_buffers)
887 buffers = c->minimum_buffers;
889 *limit_buffers = buffers;
890 *threshold_buffers = mult_frac(buffers,
891 DM_BUFIO_WRITEBACK_PERCENT, 100);
895 * Check if we're over watermark.
896 * If we are over threshold_buffers, start freeing buffers.
897 * If we're over "limit_buffers", block until we get under the limit.
899 static void __check_watermark(struct dm_bufio_client *c,
900 struct list_head *write_list)
902 unsigned long threshold_buffers, limit_buffers;
904 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
906 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
907 limit_buffers) {
909 struct dm_buffer *b = __get_unclaimed_buffer(c);
911 if (!b)
912 return;
914 __free_buffer_wake(b);
915 dm_bufio_cond_resched();
918 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
919 __write_dirty_buffers_async(c, 1, write_list);
923 * Find a buffer in the hash.
925 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
927 struct dm_buffer *b;
929 hlist_for_each_entry(b, &c->cache_hash[DM_BUFIO_HASH(block)],
930 hash_list) {
931 dm_bufio_cond_resched();
932 if (b->block == block)
933 return b;
936 return NULL;
939 /*----------------------------------------------------------------
940 * Getting a buffer
941 *--------------------------------------------------------------*/
943 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
944 enum new_flag nf, int *need_submit,
945 struct list_head *write_list)
947 struct dm_buffer *b, *new_b = NULL;
949 *need_submit = 0;
951 b = __find(c, block);
952 if (b)
953 goto found_buffer;
955 if (nf == NF_GET)
956 return NULL;
958 new_b = __alloc_buffer_wait(c, nf);
959 if (!new_b)
960 return NULL;
963 * We've had a period where the mutex was unlocked, so need to
964 * recheck the hash table.
966 b = __find(c, block);
967 if (b) {
968 __free_buffer_wake(new_b);
969 goto found_buffer;
972 __check_watermark(c, write_list);
974 b = new_b;
975 b->hold_count = 1;
976 b->read_error = 0;
977 b->write_error = 0;
978 __link_buffer(b, block, LIST_CLEAN);
980 if (nf == NF_FRESH) {
981 b->state = 0;
982 return b;
985 b->state = 1 << B_READING;
986 *need_submit = 1;
988 return b;
990 found_buffer:
991 if (nf == NF_PREFETCH)
992 return NULL;
994 * Note: it is essential that we don't wait for the buffer to be
995 * read if dm_bufio_get function is used. Both dm_bufio_get and
996 * dm_bufio_prefetch can be used in the driver request routine.
997 * If the user called both dm_bufio_prefetch and dm_bufio_get on
998 * the same buffer, it would deadlock if we waited.
1000 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1001 return NULL;
1003 b->hold_count++;
1004 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1005 test_bit(B_WRITING, &b->state));
1006 return b;
1010 * The endio routine for reading: set the error, clear the bit and wake up
1011 * anyone waiting on the buffer.
1013 static void read_endio(struct bio *bio, int error)
1015 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1017 b->read_error = error;
1019 BUG_ON(!test_bit(B_READING, &b->state));
1021 smp_mb__before_atomic();
1022 clear_bit(B_READING, &b->state);
1023 smp_mb__after_atomic();
1025 wake_up_bit(&b->state, B_READING);
1029 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1030 * functions is similar except that dm_bufio_new doesn't read the
1031 * buffer from the disk (assuming that the caller overwrites all the data
1032 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1034 static void *new_read(struct dm_bufio_client *c, sector_t block,
1035 enum new_flag nf, struct dm_buffer **bp)
1037 int need_submit;
1038 struct dm_buffer *b;
1040 LIST_HEAD(write_list);
1042 dm_bufio_lock(c);
1043 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1044 dm_bufio_unlock(c);
1046 __flush_write_list(&write_list);
1048 if (!b)
1049 return b;
1051 if (need_submit)
1052 submit_io(b, READ, b->block, read_endio);
1054 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1056 if (b->read_error) {
1057 int error = b->read_error;
1059 dm_bufio_release(b);
1061 return ERR_PTR(error);
1064 *bp = b;
1066 return b->data;
1069 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1070 struct dm_buffer **bp)
1072 return new_read(c, block, NF_GET, bp);
1074 EXPORT_SYMBOL_GPL(dm_bufio_get);
1076 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1077 struct dm_buffer **bp)
1079 BUG_ON(dm_bufio_in_request());
1081 return new_read(c, block, NF_READ, bp);
1083 EXPORT_SYMBOL_GPL(dm_bufio_read);
1085 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1086 struct dm_buffer **bp)
1088 BUG_ON(dm_bufio_in_request());
1090 return new_read(c, block, NF_FRESH, bp);
1092 EXPORT_SYMBOL_GPL(dm_bufio_new);
1094 void dm_bufio_prefetch(struct dm_bufio_client *c,
1095 sector_t block, unsigned n_blocks)
1097 struct blk_plug plug;
1099 LIST_HEAD(write_list);
1101 BUG_ON(dm_bufio_in_request());
1103 blk_start_plug(&plug);
1104 dm_bufio_lock(c);
1106 for (; n_blocks--; block++) {
1107 int need_submit;
1108 struct dm_buffer *b;
1109 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1110 &write_list);
1111 if (unlikely(!list_empty(&write_list))) {
1112 dm_bufio_unlock(c);
1113 blk_finish_plug(&plug);
1114 __flush_write_list(&write_list);
1115 blk_start_plug(&plug);
1116 dm_bufio_lock(c);
1118 if (unlikely(b != NULL)) {
1119 dm_bufio_unlock(c);
1121 if (need_submit)
1122 submit_io(b, READ, b->block, read_endio);
1123 dm_bufio_release(b);
1125 dm_bufio_cond_resched();
1127 if (!n_blocks)
1128 goto flush_plug;
1129 dm_bufio_lock(c);
1133 dm_bufio_unlock(c);
1135 flush_plug:
1136 blk_finish_plug(&plug);
1138 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1140 void dm_bufio_release(struct dm_buffer *b)
1142 struct dm_bufio_client *c = b->c;
1144 dm_bufio_lock(c);
1146 BUG_ON(!b->hold_count);
1148 b->hold_count--;
1149 if (!b->hold_count) {
1150 wake_up(&c->free_buffer_wait);
1153 * If there were errors on the buffer, and the buffer is not
1154 * to be written, free the buffer. There is no point in caching
1155 * invalid buffer.
1157 if ((b->read_error || b->write_error) &&
1158 !test_bit(B_READING, &b->state) &&
1159 !test_bit(B_WRITING, &b->state) &&
1160 !test_bit(B_DIRTY, &b->state)) {
1161 __unlink_buffer(b);
1162 __free_buffer_wake(b);
1166 dm_bufio_unlock(c);
1168 EXPORT_SYMBOL_GPL(dm_bufio_release);
1170 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1172 struct dm_bufio_client *c = b->c;
1174 dm_bufio_lock(c);
1176 BUG_ON(test_bit(B_READING, &b->state));
1178 if (!test_and_set_bit(B_DIRTY, &b->state))
1179 __relink_lru(b, LIST_DIRTY);
1181 dm_bufio_unlock(c);
1183 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1185 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1187 LIST_HEAD(write_list);
1189 BUG_ON(dm_bufio_in_request());
1191 dm_bufio_lock(c);
1192 __write_dirty_buffers_async(c, 0, &write_list);
1193 dm_bufio_unlock(c);
1194 __flush_write_list(&write_list);
1196 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1199 * For performance, it is essential that the buffers are written asynchronously
1200 * and simultaneously (so that the block layer can merge the writes) and then
1201 * waited upon.
1203 * Finally, we flush hardware disk cache.
1205 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1207 int a, f;
1208 unsigned long buffers_processed = 0;
1209 struct dm_buffer *b, *tmp;
1211 LIST_HEAD(write_list);
1213 dm_bufio_lock(c);
1214 __write_dirty_buffers_async(c, 0, &write_list);
1215 dm_bufio_unlock(c);
1216 __flush_write_list(&write_list);
1217 dm_bufio_lock(c);
1219 again:
1220 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1221 int dropped_lock = 0;
1223 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1224 buffers_processed++;
1226 BUG_ON(test_bit(B_READING, &b->state));
1228 if (test_bit(B_WRITING, &b->state)) {
1229 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1230 dropped_lock = 1;
1231 b->hold_count++;
1232 dm_bufio_unlock(c);
1233 wait_on_bit_io(&b->state, B_WRITING,
1234 TASK_UNINTERRUPTIBLE);
1235 dm_bufio_lock(c);
1236 b->hold_count--;
1237 } else
1238 wait_on_bit_io(&b->state, B_WRITING,
1239 TASK_UNINTERRUPTIBLE);
1242 if (!test_bit(B_DIRTY, &b->state) &&
1243 !test_bit(B_WRITING, &b->state))
1244 __relink_lru(b, LIST_CLEAN);
1246 dm_bufio_cond_resched();
1249 * If we dropped the lock, the list is no longer consistent,
1250 * so we must restart the search.
1252 * In the most common case, the buffer just processed is
1253 * relinked to the clean list, so we won't loop scanning the
1254 * same buffer again and again.
1256 * This may livelock if there is another thread simultaneously
1257 * dirtying buffers, so we count the number of buffers walked
1258 * and if it exceeds the total number of buffers, it means that
1259 * someone is doing some writes simultaneously with us. In
1260 * this case, stop, dropping the lock.
1262 if (dropped_lock)
1263 goto again;
1265 wake_up(&c->free_buffer_wait);
1266 dm_bufio_unlock(c);
1268 a = xchg(&c->async_write_error, 0);
1269 f = dm_bufio_issue_flush(c);
1270 if (a)
1271 return a;
1273 return f;
1275 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1278 * Use dm-io to send and empty barrier flush the device.
1280 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1282 struct dm_io_request io_req = {
1283 .bi_rw = WRITE_FLUSH,
1284 .mem.type = DM_IO_KMEM,
1285 .mem.ptr.addr = NULL,
1286 .client = c->dm_io,
1288 struct dm_io_region io_reg = {
1289 .bdev = c->bdev,
1290 .sector = 0,
1291 .count = 0,
1294 BUG_ON(dm_bufio_in_request());
1296 return dm_io(&io_req, 1, &io_reg, NULL);
1298 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1301 * We first delete any other buffer that may be at that new location.
1303 * Then, we write the buffer to the original location if it was dirty.
1305 * Then, if we are the only one who is holding the buffer, relink the buffer
1306 * in the hash queue for the new location.
1308 * If there was someone else holding the buffer, we write it to the new
1309 * location but not relink it, because that other user needs to have the buffer
1310 * at the same place.
1312 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1314 struct dm_bufio_client *c = b->c;
1315 struct dm_buffer *new;
1317 BUG_ON(dm_bufio_in_request());
1319 dm_bufio_lock(c);
1321 retry:
1322 new = __find(c, new_block);
1323 if (new) {
1324 if (new->hold_count) {
1325 __wait_for_free_buffer(c);
1326 goto retry;
1330 * FIXME: Is there any point waiting for a write that's going
1331 * to be overwritten in a bit?
1333 __make_buffer_clean(new);
1334 __unlink_buffer(new);
1335 __free_buffer_wake(new);
1338 BUG_ON(!b->hold_count);
1339 BUG_ON(test_bit(B_READING, &b->state));
1341 __write_dirty_buffer(b, NULL);
1342 if (b->hold_count == 1) {
1343 wait_on_bit_io(&b->state, B_WRITING,
1344 TASK_UNINTERRUPTIBLE);
1345 set_bit(B_DIRTY, &b->state);
1346 __unlink_buffer(b);
1347 __link_buffer(b, new_block, LIST_DIRTY);
1348 } else {
1349 sector_t old_block;
1350 wait_on_bit_lock_io(&b->state, B_WRITING,
1351 TASK_UNINTERRUPTIBLE);
1353 * Relink buffer to "new_block" so that write_callback
1354 * sees "new_block" as a block number.
1355 * After the write, link the buffer back to old_block.
1356 * All this must be done in bufio lock, so that block number
1357 * change isn't visible to other threads.
1359 old_block = b->block;
1360 __unlink_buffer(b);
1361 __link_buffer(b, new_block, b->list_mode);
1362 submit_io(b, WRITE, new_block, write_endio);
1363 wait_on_bit_io(&b->state, B_WRITING,
1364 TASK_UNINTERRUPTIBLE);
1365 __unlink_buffer(b);
1366 __link_buffer(b, old_block, b->list_mode);
1369 dm_bufio_unlock(c);
1370 dm_bufio_release(b);
1372 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1375 * Free the given buffer.
1377 * This is just a hint, if the buffer is in use or dirty, this function
1378 * does nothing.
1380 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1382 struct dm_buffer *b;
1384 dm_bufio_lock(c);
1386 b = __find(c, block);
1387 if (b && likely(!b->hold_count) && likely(!b->state)) {
1388 __unlink_buffer(b);
1389 __free_buffer_wake(b);
1392 dm_bufio_unlock(c);
1394 EXPORT_SYMBOL(dm_bufio_forget);
1396 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1398 c->minimum_buffers = n;
1400 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1402 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1404 return c->block_size;
1406 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1408 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1410 return i_size_read(c->bdev->bd_inode) >>
1411 (SECTOR_SHIFT + c->sectors_per_block_bits);
1413 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1415 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1417 return b->block;
1419 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1421 void *dm_bufio_get_block_data(struct dm_buffer *b)
1423 return b->data;
1425 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1427 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1429 return b + 1;
1431 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1433 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1435 return b->c;
1437 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1439 static void drop_buffers(struct dm_bufio_client *c)
1441 struct dm_buffer *b;
1442 int i;
1444 BUG_ON(dm_bufio_in_request());
1447 * An optimization so that the buffers are not written one-by-one.
1449 dm_bufio_write_dirty_buffers_async(c);
1451 dm_bufio_lock(c);
1453 while ((b = __get_unclaimed_buffer(c)))
1454 __free_buffer_wake(b);
1456 for (i = 0; i < LIST_SIZE; i++)
1457 list_for_each_entry(b, &c->lru[i], lru_list)
1458 DMERR("leaked buffer %llx, hold count %u, list %d",
1459 (unsigned long long)b->block, b->hold_count, i);
1461 for (i = 0; i < LIST_SIZE; i++)
1462 BUG_ON(!list_empty(&c->lru[i]));
1464 dm_bufio_unlock(c);
1468 * Test if the buffer is unused and too old, and commit it.
1469 * And if GFP_NOFS is used, we must not do any I/O because we hold
1470 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1471 * rerouted to different bufio client.
1473 static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1474 unsigned long max_jiffies)
1476 if (jiffies - b->last_accessed < max_jiffies)
1477 return 0;
1479 if (!(gfp & __GFP_FS)) {
1480 if (test_bit(B_READING, &b->state) ||
1481 test_bit(B_WRITING, &b->state) ||
1482 test_bit(B_DIRTY, &b->state))
1483 return 0;
1486 if (b->hold_count)
1487 return 0;
1489 __make_buffer_clean(b);
1490 __unlink_buffer(b);
1491 __free_buffer_wake(b);
1493 return 1;
1496 static long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1497 gfp_t gfp_mask)
1499 int l;
1500 struct dm_buffer *b, *tmp;
1501 long freed = 0;
1503 for (l = 0; l < LIST_SIZE; l++) {
1504 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1505 freed += __cleanup_old_buffer(b, gfp_mask, 0);
1506 if (!--nr_to_scan)
1507 return freed;
1508 dm_bufio_cond_resched();
1511 return freed;
1514 static unsigned long
1515 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1517 struct dm_bufio_client *c;
1518 unsigned long freed;
1520 c = container_of(shrink, struct dm_bufio_client, shrinker);
1521 if (sc->gfp_mask & __GFP_FS)
1522 dm_bufio_lock(c);
1523 else if (!dm_bufio_trylock(c))
1524 return SHRINK_STOP;
1526 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1527 dm_bufio_unlock(c);
1528 return freed;
1531 static unsigned long
1532 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1534 struct dm_bufio_client *c;
1535 unsigned long count;
1537 c = container_of(shrink, struct dm_bufio_client, shrinker);
1538 if (sc->gfp_mask & __GFP_FS)
1539 dm_bufio_lock(c);
1540 else if (!dm_bufio_trylock(c))
1541 return 0;
1543 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1544 dm_bufio_unlock(c);
1545 return count;
1549 * Create the buffering interface
1551 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1552 unsigned reserved_buffers, unsigned aux_size,
1553 void (*alloc_callback)(struct dm_buffer *),
1554 void (*write_callback)(struct dm_buffer *))
1556 int r;
1557 struct dm_bufio_client *c;
1558 unsigned i;
1560 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1561 (block_size & (block_size - 1)));
1563 c = kzalloc(sizeof(*c), GFP_KERNEL);
1564 if (!c) {
1565 r = -ENOMEM;
1566 goto bad_client;
1568 c->cache_hash = vmalloc(sizeof(struct hlist_head) << DM_BUFIO_HASH_BITS);
1569 if (!c->cache_hash) {
1570 r = -ENOMEM;
1571 goto bad_hash;
1574 c->bdev = bdev;
1575 c->block_size = block_size;
1576 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1577 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1578 ffs(block_size) - 1 - PAGE_SHIFT : 0;
1579 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1580 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1582 c->aux_size = aux_size;
1583 c->alloc_callback = alloc_callback;
1584 c->write_callback = write_callback;
1586 for (i = 0; i < LIST_SIZE; i++) {
1587 INIT_LIST_HEAD(&c->lru[i]);
1588 c->n_buffers[i] = 0;
1591 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1592 INIT_HLIST_HEAD(&c->cache_hash[i]);
1594 mutex_init(&c->lock);
1595 INIT_LIST_HEAD(&c->reserved_buffers);
1596 c->need_reserved_buffers = reserved_buffers;
1598 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1600 init_waitqueue_head(&c->free_buffer_wait);
1601 c->async_write_error = 0;
1603 c->dm_io = dm_io_client_create();
1604 if (IS_ERR(c->dm_io)) {
1605 r = PTR_ERR(c->dm_io);
1606 goto bad_dm_io;
1609 mutex_lock(&dm_bufio_clients_lock);
1610 if (c->blocks_per_page_bits) {
1611 if (!DM_BUFIO_CACHE_NAME(c)) {
1612 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1613 if (!DM_BUFIO_CACHE_NAME(c)) {
1614 r = -ENOMEM;
1615 mutex_unlock(&dm_bufio_clients_lock);
1616 goto bad_cache;
1620 if (!DM_BUFIO_CACHE(c)) {
1621 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1622 c->block_size,
1623 c->block_size, 0, NULL);
1624 if (!DM_BUFIO_CACHE(c)) {
1625 r = -ENOMEM;
1626 mutex_unlock(&dm_bufio_clients_lock);
1627 goto bad_cache;
1631 mutex_unlock(&dm_bufio_clients_lock);
1633 while (c->need_reserved_buffers) {
1634 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1636 if (!b) {
1637 r = -ENOMEM;
1638 goto bad_buffer;
1640 __free_buffer_wake(b);
1643 mutex_lock(&dm_bufio_clients_lock);
1644 dm_bufio_client_count++;
1645 list_add(&c->client_list, &dm_bufio_all_clients);
1646 __cache_size_refresh();
1647 mutex_unlock(&dm_bufio_clients_lock);
1649 c->shrinker.count_objects = dm_bufio_shrink_count;
1650 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1651 c->shrinker.seeks = 1;
1652 c->shrinker.batch = 0;
1653 register_shrinker(&c->shrinker);
1655 return c;
1657 bad_buffer:
1658 bad_cache:
1659 while (!list_empty(&c->reserved_buffers)) {
1660 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1661 struct dm_buffer, lru_list);
1662 list_del(&b->lru_list);
1663 free_buffer(b);
1665 dm_io_client_destroy(c->dm_io);
1666 bad_dm_io:
1667 vfree(c->cache_hash);
1668 bad_hash:
1669 kfree(c);
1670 bad_client:
1671 return ERR_PTR(r);
1673 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1676 * Free the buffering interface.
1677 * It is required that there are no references on any buffers.
1679 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1681 unsigned i;
1683 drop_buffers(c);
1685 unregister_shrinker(&c->shrinker);
1687 mutex_lock(&dm_bufio_clients_lock);
1689 list_del(&c->client_list);
1690 dm_bufio_client_count--;
1691 __cache_size_refresh();
1693 mutex_unlock(&dm_bufio_clients_lock);
1695 for (i = 0; i < 1 << DM_BUFIO_HASH_BITS; i++)
1696 BUG_ON(!hlist_empty(&c->cache_hash[i]));
1698 BUG_ON(c->need_reserved_buffers);
1700 while (!list_empty(&c->reserved_buffers)) {
1701 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1702 struct dm_buffer, lru_list);
1703 list_del(&b->lru_list);
1704 free_buffer(b);
1707 for (i = 0; i < LIST_SIZE; i++)
1708 if (c->n_buffers[i])
1709 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1711 for (i = 0; i < LIST_SIZE; i++)
1712 BUG_ON(c->n_buffers[i]);
1714 dm_io_client_destroy(c->dm_io);
1715 vfree(c->cache_hash);
1716 kfree(c);
1718 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1720 static void cleanup_old_buffers(void)
1722 unsigned long max_age = ACCESS_ONCE(dm_bufio_max_age);
1723 struct dm_bufio_client *c;
1725 if (max_age > ULONG_MAX / HZ)
1726 max_age = ULONG_MAX / HZ;
1728 mutex_lock(&dm_bufio_clients_lock);
1729 list_for_each_entry(c, &dm_bufio_all_clients, client_list) {
1730 if (!dm_bufio_trylock(c))
1731 continue;
1733 while (!list_empty(&c->lru[LIST_CLEAN])) {
1734 struct dm_buffer *b;
1735 b = list_entry(c->lru[LIST_CLEAN].prev,
1736 struct dm_buffer, lru_list);
1737 if (!__cleanup_old_buffer(b, 0, max_age * HZ))
1738 break;
1739 dm_bufio_cond_resched();
1742 dm_bufio_unlock(c);
1743 dm_bufio_cond_resched();
1745 mutex_unlock(&dm_bufio_clients_lock);
1748 static struct workqueue_struct *dm_bufio_wq;
1749 static struct delayed_work dm_bufio_work;
1751 static void work_fn(struct work_struct *w)
1753 cleanup_old_buffers();
1755 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1756 DM_BUFIO_WORK_TIMER_SECS * HZ);
1759 /*----------------------------------------------------------------
1760 * Module setup
1761 *--------------------------------------------------------------*/
1764 * This is called only once for the whole dm_bufio module.
1765 * It initializes memory limit.
1767 static int __init dm_bufio_init(void)
1769 __u64 mem;
1771 dm_bufio_allocated_kmem_cache = 0;
1772 dm_bufio_allocated_get_free_pages = 0;
1773 dm_bufio_allocated_vmalloc = 0;
1774 dm_bufio_current_allocated = 0;
1776 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1777 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1779 mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
1780 DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
1782 if (mem > ULONG_MAX)
1783 mem = ULONG_MAX;
1785 #ifdef CONFIG_MMU
1786 if (mem > mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100))
1787 mem = mult_frac(VMALLOC_TOTAL, DM_BUFIO_VMALLOC_PERCENT, 100);
1788 #endif
1790 dm_bufio_default_cache_size = mem;
1792 mutex_lock(&dm_bufio_clients_lock);
1793 __cache_size_refresh();
1794 mutex_unlock(&dm_bufio_clients_lock);
1796 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1797 if (!dm_bufio_wq)
1798 return -ENOMEM;
1800 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1801 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1802 DM_BUFIO_WORK_TIMER_SECS * HZ);
1804 return 0;
1808 * This is called once when unloading the dm_bufio module.
1810 static void __exit dm_bufio_exit(void)
1812 int bug = 0;
1813 int i;
1815 cancel_delayed_work_sync(&dm_bufio_work);
1816 destroy_workqueue(dm_bufio_wq);
1818 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1819 struct kmem_cache *kc = dm_bufio_caches[i];
1821 if (kc)
1822 kmem_cache_destroy(kc);
1825 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1826 kfree(dm_bufio_cache_names[i]);
1828 if (dm_bufio_client_count) {
1829 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1830 __func__, dm_bufio_client_count);
1831 bug = 1;
1834 if (dm_bufio_current_allocated) {
1835 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1836 __func__, dm_bufio_current_allocated);
1837 bug = 1;
1840 if (dm_bufio_allocated_get_free_pages) {
1841 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1842 __func__, dm_bufio_allocated_get_free_pages);
1843 bug = 1;
1846 if (dm_bufio_allocated_vmalloc) {
1847 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1848 __func__, dm_bufio_allocated_vmalloc);
1849 bug = 1;
1852 if (bug)
1853 BUG();
1856 module_init(dm_bufio_init)
1857 module_exit(dm_bufio_exit)
1859 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1860 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1862 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1863 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1865 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1866 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1868 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1869 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1871 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1872 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1874 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1875 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1877 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1878 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1880 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1881 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1882 MODULE_LICENSE("GPL");