gro: Allow tunnel stacking in the case of FOU/GUE
[linux/fpc-iii.git] / drivers / md / dm-bufio.c
blob86dbbc737402223705dd910331dccc63a3d0156d
1 /*
2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
7 */
9 #include "dm-bufio.h"
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/jiffies.h>
15 #include <linux/vmalloc.h>
16 #include <linux/shrinker.h>
17 #include <linux/module.h>
18 #include <linux/rbtree.h>
20 #define DM_MSG_PREFIX "bufio"
23 * Memory management policy:
24 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
25 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
26 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
27 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28 * dirty buffers.
30 #define DM_BUFIO_MIN_BUFFERS 8
32 #define DM_BUFIO_MEMORY_PERCENT 2
33 #define DM_BUFIO_VMALLOC_PERCENT 25
34 #define DM_BUFIO_WRITEBACK_PERCENT 75
37 * Check buffer ages in this interval (seconds)
39 #define DM_BUFIO_WORK_TIMER_SECS 30
42 * Free buffers when they are older than this (seconds)
44 #define DM_BUFIO_DEFAULT_AGE_SECS 300
47 * The nr of bytes of cached data to keep around.
49 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
52 * The number of bvec entries that are embedded directly in the buffer.
53 * If the chunk size is larger, dm-io is used to do the io.
55 #define DM_BUFIO_INLINE_VECS 16
58 * Don't try to use kmem_cache_alloc for blocks larger than this.
59 * For explanation, see alloc_buffer_data below.
61 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
62 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
65 * dm_buffer->list_mode
67 #define LIST_CLEAN 0
68 #define LIST_DIRTY 1
69 #define LIST_SIZE 2
72 * Linking of buffers:
73 * All buffers are linked to cache_hash with their hash_list field.
75 * Clean buffers that are not being written (B_WRITING not set)
76 * are linked to lru[LIST_CLEAN] with their lru_list field.
78 * Dirty and clean buffers that are being written are linked to
79 * lru[LIST_DIRTY] with their lru_list field. When the write
80 * finishes, the buffer cannot be relinked immediately (because we
81 * are in an interrupt context and relinking requires process
82 * context), so some clean-not-writing buffers can be held on
83 * dirty_lru too. They are later added to lru in the process
84 * context.
86 struct dm_bufio_client {
87 struct mutex lock;
89 struct list_head lru[LIST_SIZE];
90 unsigned long n_buffers[LIST_SIZE];
92 struct block_device *bdev;
93 unsigned block_size;
94 unsigned char sectors_per_block_bits;
95 unsigned char pages_per_block_bits;
96 unsigned char blocks_per_page_bits;
97 unsigned aux_size;
98 void (*alloc_callback)(struct dm_buffer *);
99 void (*write_callback)(struct dm_buffer *);
101 struct dm_io_client *dm_io;
103 struct list_head reserved_buffers;
104 unsigned need_reserved_buffers;
106 unsigned minimum_buffers;
108 struct rb_root buffer_tree;
109 wait_queue_head_t free_buffer_wait;
111 int async_write_error;
113 struct list_head client_list;
114 struct shrinker shrinker;
118 * Buffer state bits.
120 #define B_READING 0
121 #define B_WRITING 1
122 #define B_DIRTY 2
125 * Describes how the block was allocated:
126 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
127 * See the comment at alloc_buffer_data.
129 enum data_mode {
130 DATA_MODE_SLAB = 0,
131 DATA_MODE_GET_FREE_PAGES = 1,
132 DATA_MODE_VMALLOC = 2,
133 DATA_MODE_LIMIT = 3
136 struct dm_buffer {
137 struct rb_node node;
138 struct list_head lru_list;
139 sector_t block;
140 void *data;
141 enum data_mode data_mode;
142 unsigned char list_mode; /* LIST_* */
143 unsigned hold_count;
144 int read_error;
145 int write_error;
146 unsigned long state;
147 unsigned long last_accessed;
148 struct dm_bufio_client *c;
149 struct list_head write_list;
150 struct bio bio;
151 struct bio_vec bio_vec[DM_BUFIO_INLINE_VECS];
154 /*----------------------------------------------------------------*/
156 static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
157 static char *dm_bufio_cache_names[PAGE_SHIFT - SECTOR_SHIFT];
159 static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
161 unsigned ret = c->blocks_per_page_bits - 1;
163 BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
165 return ret;
168 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
169 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
171 #define dm_bufio_in_request() (!!current->bio_list)
173 static void dm_bufio_lock(struct dm_bufio_client *c)
175 mutex_lock_nested(&c->lock, dm_bufio_in_request());
178 static int dm_bufio_trylock(struct dm_bufio_client *c)
180 return mutex_trylock(&c->lock);
183 static void dm_bufio_unlock(struct dm_bufio_client *c)
185 mutex_unlock(&c->lock);
189 * FIXME Move to sched.h?
191 #ifdef CONFIG_PREEMPT_VOLUNTARY
192 # define dm_bufio_cond_resched() \
193 do { \
194 if (unlikely(need_resched())) \
195 _cond_resched(); \
196 } while (0)
197 #else
198 # define dm_bufio_cond_resched() do { } while (0)
199 #endif
201 /*----------------------------------------------------------------*/
204 * Default cache size: available memory divided by the ratio.
206 static unsigned long dm_bufio_default_cache_size;
209 * Total cache size set by the user.
211 static unsigned long dm_bufio_cache_size;
214 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
215 * at any time. If it disagrees, the user has changed cache size.
217 static unsigned long dm_bufio_cache_size_latch;
219 static DEFINE_SPINLOCK(param_spinlock);
222 * Buffers are freed after this timeout
224 static unsigned dm_bufio_max_age = DM_BUFIO_DEFAULT_AGE_SECS;
225 static unsigned dm_bufio_retain_bytes = DM_BUFIO_DEFAULT_RETAIN_BYTES;
227 static unsigned long dm_bufio_peak_allocated;
228 static unsigned long dm_bufio_allocated_kmem_cache;
229 static unsigned long dm_bufio_allocated_get_free_pages;
230 static unsigned long dm_bufio_allocated_vmalloc;
231 static unsigned long dm_bufio_current_allocated;
233 /*----------------------------------------------------------------*/
236 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
238 static unsigned long dm_bufio_cache_size_per_client;
241 * The current number of clients.
243 static int dm_bufio_client_count;
246 * The list of all clients.
248 static LIST_HEAD(dm_bufio_all_clients);
251 * This mutex protects dm_bufio_cache_size_latch,
252 * dm_bufio_cache_size_per_client and dm_bufio_client_count
254 static DEFINE_MUTEX(dm_bufio_clients_lock);
256 /*----------------------------------------------------------------
257 * A red/black tree acts as an index for all the buffers.
258 *--------------------------------------------------------------*/
259 static struct dm_buffer *__find(struct dm_bufio_client *c, sector_t block)
261 struct rb_node *n = c->buffer_tree.rb_node;
262 struct dm_buffer *b;
264 while (n) {
265 b = container_of(n, struct dm_buffer, node);
267 if (b->block == block)
268 return b;
270 n = (b->block < block) ? n->rb_left : n->rb_right;
273 return NULL;
276 static void __insert(struct dm_bufio_client *c, struct dm_buffer *b)
278 struct rb_node **new = &c->buffer_tree.rb_node, *parent = NULL;
279 struct dm_buffer *found;
281 while (*new) {
282 found = container_of(*new, struct dm_buffer, node);
284 if (found->block == b->block) {
285 BUG_ON(found != b);
286 return;
289 parent = *new;
290 new = (found->block < b->block) ?
291 &((*new)->rb_left) : &((*new)->rb_right);
294 rb_link_node(&b->node, parent, new);
295 rb_insert_color(&b->node, &c->buffer_tree);
298 static void __remove(struct dm_bufio_client *c, struct dm_buffer *b)
300 rb_erase(&b->node, &c->buffer_tree);
303 /*----------------------------------------------------------------*/
305 static void adjust_total_allocated(enum data_mode data_mode, long diff)
307 static unsigned long * const class_ptr[DATA_MODE_LIMIT] = {
308 &dm_bufio_allocated_kmem_cache,
309 &dm_bufio_allocated_get_free_pages,
310 &dm_bufio_allocated_vmalloc,
313 spin_lock(&param_spinlock);
315 *class_ptr[data_mode] += diff;
317 dm_bufio_current_allocated += diff;
319 if (dm_bufio_current_allocated > dm_bufio_peak_allocated)
320 dm_bufio_peak_allocated = dm_bufio_current_allocated;
322 spin_unlock(&param_spinlock);
326 * Change the number of clients and recalculate per-client limit.
328 static void __cache_size_refresh(void)
330 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock));
331 BUG_ON(dm_bufio_client_count < 0);
333 dm_bufio_cache_size_latch = ACCESS_ONCE(dm_bufio_cache_size);
336 * Use default if set to 0 and report the actual cache size used.
338 if (!dm_bufio_cache_size_latch) {
339 (void)cmpxchg(&dm_bufio_cache_size, 0,
340 dm_bufio_default_cache_size);
341 dm_bufio_cache_size_latch = dm_bufio_default_cache_size;
344 dm_bufio_cache_size_per_client = dm_bufio_cache_size_latch /
345 (dm_bufio_client_count ? : 1);
349 * Allocating buffer data.
351 * Small buffers are allocated with kmem_cache, to use space optimally.
353 * For large buffers, we choose between get_free_pages and vmalloc.
354 * Each has advantages and disadvantages.
356 * __get_free_pages can randomly fail if the memory is fragmented.
357 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
358 * as low as 128M) so using it for caching is not appropriate.
360 * If the allocation may fail we use __get_free_pages. Memory fragmentation
361 * won't have a fatal effect here, but it just causes flushes of some other
362 * buffers and more I/O will be performed. Don't use __get_free_pages if it
363 * always fails (i.e. order >= MAX_ORDER).
365 * If the allocation shouldn't fail we use __vmalloc. This is only for the
366 * initial reserve allocation, so there's no risk of wasting all vmalloc
367 * space.
369 static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
370 enum data_mode *data_mode)
372 unsigned noio_flag;
373 void *ptr;
375 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
376 *data_mode = DATA_MODE_SLAB;
377 return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
380 if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
381 gfp_mask & __GFP_NORETRY) {
382 *data_mode = DATA_MODE_GET_FREE_PAGES;
383 return (void *)__get_free_pages(gfp_mask,
384 c->pages_per_block_bits);
387 *data_mode = DATA_MODE_VMALLOC;
390 * __vmalloc allocates the data pages and auxiliary structures with
391 * gfp_flags that were specified, but pagetables are always allocated
392 * with GFP_KERNEL, no matter what was specified as gfp_mask.
394 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
395 * all allocations done by this process (including pagetables) are done
396 * as if GFP_NOIO was specified.
399 if (gfp_mask & __GFP_NORETRY)
400 noio_flag = memalloc_noio_save();
402 ptr = __vmalloc(c->block_size, gfp_mask | __GFP_HIGHMEM, PAGE_KERNEL);
404 if (gfp_mask & __GFP_NORETRY)
405 memalloc_noio_restore(noio_flag);
407 return ptr;
411 * Free buffer's data.
413 static void free_buffer_data(struct dm_bufio_client *c,
414 void *data, enum data_mode data_mode)
416 switch (data_mode) {
417 case DATA_MODE_SLAB:
418 kmem_cache_free(DM_BUFIO_CACHE(c), data);
419 break;
421 case DATA_MODE_GET_FREE_PAGES:
422 free_pages((unsigned long)data, c->pages_per_block_bits);
423 break;
425 case DATA_MODE_VMALLOC:
426 vfree(data);
427 break;
429 default:
430 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
431 data_mode);
432 BUG();
437 * Allocate buffer and its data.
439 static struct dm_buffer *alloc_buffer(struct dm_bufio_client *c, gfp_t gfp_mask)
441 struct dm_buffer *b = kmalloc(sizeof(struct dm_buffer) + c->aux_size,
442 gfp_mask);
444 if (!b)
445 return NULL;
447 b->c = c;
449 b->data = alloc_buffer_data(c, gfp_mask, &b->data_mode);
450 if (!b->data) {
451 kfree(b);
452 return NULL;
455 adjust_total_allocated(b->data_mode, (long)c->block_size);
457 return b;
461 * Free buffer and its data.
463 static void free_buffer(struct dm_buffer *b)
465 struct dm_bufio_client *c = b->c;
467 adjust_total_allocated(b->data_mode, -(long)c->block_size);
469 free_buffer_data(c, b->data, b->data_mode);
470 kfree(b);
474 * Link buffer to the hash list and clean or dirty queue.
476 static void __link_buffer(struct dm_buffer *b, sector_t block, int dirty)
478 struct dm_bufio_client *c = b->c;
480 c->n_buffers[dirty]++;
481 b->block = block;
482 b->list_mode = dirty;
483 list_add(&b->lru_list, &c->lru[dirty]);
484 __insert(b->c, b);
485 b->last_accessed = jiffies;
489 * Unlink buffer from the hash list and dirty or clean queue.
491 static void __unlink_buffer(struct dm_buffer *b)
493 struct dm_bufio_client *c = b->c;
495 BUG_ON(!c->n_buffers[b->list_mode]);
497 c->n_buffers[b->list_mode]--;
498 __remove(b->c, b);
499 list_del(&b->lru_list);
503 * Place the buffer to the head of dirty or clean LRU queue.
505 static void __relink_lru(struct dm_buffer *b, int dirty)
507 struct dm_bufio_client *c = b->c;
509 BUG_ON(!c->n_buffers[b->list_mode]);
511 c->n_buffers[b->list_mode]--;
512 c->n_buffers[dirty]++;
513 b->list_mode = dirty;
514 list_move(&b->lru_list, &c->lru[dirty]);
515 b->last_accessed = jiffies;
518 /*----------------------------------------------------------------
519 * Submit I/O on the buffer.
521 * Bio interface is faster but it has some problems:
522 * the vector list is limited (increasing this limit increases
523 * memory-consumption per buffer, so it is not viable);
525 * the memory must be direct-mapped, not vmalloced;
527 * the I/O driver can reject requests spuriously if it thinks that
528 * the requests are too big for the device or if they cross a
529 * controller-defined memory boundary.
531 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
532 * it is not vmalloced, try using the bio interface.
534 * If the buffer is big, if it is vmalloced or if the underlying device
535 * rejects the bio because it is too large, use dm-io layer to do the I/O.
536 * The dm-io layer splits the I/O into multiple requests, avoiding the above
537 * shortcomings.
538 *--------------------------------------------------------------*/
541 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
542 * that the request was handled directly with bio interface.
544 static void dmio_complete(unsigned long error, void *context)
546 struct dm_buffer *b = context;
548 b->bio.bi_end_io(&b->bio, error ? -EIO : 0);
551 static void use_dmio(struct dm_buffer *b, int rw, sector_t block,
552 bio_end_io_t *end_io)
554 int r;
555 struct dm_io_request io_req = {
556 .bi_rw = rw,
557 .notify.fn = dmio_complete,
558 .notify.context = b,
559 .client = b->c->dm_io,
561 struct dm_io_region region = {
562 .bdev = b->c->bdev,
563 .sector = block << b->c->sectors_per_block_bits,
564 .count = b->c->block_size >> SECTOR_SHIFT,
567 if (b->data_mode != DATA_MODE_VMALLOC) {
568 io_req.mem.type = DM_IO_KMEM;
569 io_req.mem.ptr.addr = b->data;
570 } else {
571 io_req.mem.type = DM_IO_VMA;
572 io_req.mem.ptr.vma = b->data;
575 b->bio.bi_end_io = end_io;
577 r = dm_io(&io_req, 1, &region, NULL);
578 if (r)
579 end_io(&b->bio, r);
582 static void inline_endio(struct bio *bio, int error)
584 bio_end_io_t *end_fn = bio->bi_private;
587 * Reset the bio to free any attached resources
588 * (e.g. bio integrity profiles).
590 bio_reset(bio);
592 end_fn(bio, error);
595 static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
596 bio_end_io_t *end_io)
598 char *ptr;
599 int len;
601 bio_init(&b->bio);
602 b->bio.bi_io_vec = b->bio_vec;
603 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
604 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
605 b->bio.bi_bdev = b->c->bdev;
606 b->bio.bi_end_io = inline_endio;
608 * Use of .bi_private isn't a problem here because
609 * the dm_buffer's inline bio is local to bufio.
611 b->bio.bi_private = end_io;
614 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
615 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
617 ptr = b->data;
618 len = b->c->block_size;
620 if (len >= PAGE_SIZE)
621 BUG_ON((unsigned long)ptr & (PAGE_SIZE - 1));
622 else
623 BUG_ON((unsigned long)ptr & (len - 1));
625 do {
626 if (!bio_add_page(&b->bio, virt_to_page(ptr),
627 len < PAGE_SIZE ? len : PAGE_SIZE,
628 virt_to_phys(ptr) & (PAGE_SIZE - 1))) {
629 BUG_ON(b->c->block_size <= PAGE_SIZE);
630 use_dmio(b, rw, block, end_io);
631 return;
634 len -= PAGE_SIZE;
635 ptr += PAGE_SIZE;
636 } while (len > 0);
638 submit_bio(rw, &b->bio);
641 static void submit_io(struct dm_buffer *b, int rw, sector_t block,
642 bio_end_io_t *end_io)
644 if (rw == WRITE && b->c->write_callback)
645 b->c->write_callback(b);
647 if (b->c->block_size <= DM_BUFIO_INLINE_VECS * PAGE_SIZE &&
648 b->data_mode != DATA_MODE_VMALLOC)
649 use_inline_bio(b, rw, block, end_io);
650 else
651 use_dmio(b, rw, block, end_io);
654 /*----------------------------------------------------------------
655 * Writing dirty buffers
656 *--------------------------------------------------------------*/
659 * The endio routine for write.
661 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
662 * it.
664 static void write_endio(struct bio *bio, int error)
666 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
668 b->write_error = error;
669 if (unlikely(error)) {
670 struct dm_bufio_client *c = b->c;
671 (void)cmpxchg(&c->async_write_error, 0, error);
674 BUG_ON(!test_bit(B_WRITING, &b->state));
676 smp_mb__before_atomic();
677 clear_bit(B_WRITING, &b->state);
678 smp_mb__after_atomic();
680 wake_up_bit(&b->state, B_WRITING);
684 * Initiate a write on a dirty buffer, but don't wait for it.
686 * - If the buffer is not dirty, exit.
687 * - If there some previous write going on, wait for it to finish (we can't
688 * have two writes on the same buffer simultaneously).
689 * - Submit our write and don't wait on it. We set B_WRITING indicating
690 * that there is a write in progress.
692 static void __write_dirty_buffer(struct dm_buffer *b,
693 struct list_head *write_list)
695 if (!test_bit(B_DIRTY, &b->state))
696 return;
698 clear_bit(B_DIRTY, &b->state);
699 wait_on_bit_lock_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
701 if (!write_list)
702 submit_io(b, WRITE, b->block, write_endio);
703 else
704 list_add_tail(&b->write_list, write_list);
707 static void __flush_write_list(struct list_head *write_list)
709 struct blk_plug plug;
710 blk_start_plug(&plug);
711 while (!list_empty(write_list)) {
712 struct dm_buffer *b =
713 list_entry(write_list->next, struct dm_buffer, write_list);
714 list_del(&b->write_list);
715 submit_io(b, WRITE, b->block, write_endio);
716 dm_bufio_cond_resched();
718 blk_finish_plug(&plug);
722 * Wait until any activity on the buffer finishes. Possibly write the
723 * buffer if it is dirty. When this function finishes, there is no I/O
724 * running on the buffer and the buffer is not dirty.
726 static void __make_buffer_clean(struct dm_buffer *b)
728 BUG_ON(b->hold_count);
730 if (!b->state) /* fast case */
731 return;
733 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
734 __write_dirty_buffer(b, NULL);
735 wait_on_bit_io(&b->state, B_WRITING, TASK_UNINTERRUPTIBLE);
739 * Find some buffer that is not held by anybody, clean it, unlink it and
740 * return it.
742 static struct dm_buffer *__get_unclaimed_buffer(struct dm_bufio_client *c)
744 struct dm_buffer *b;
746 list_for_each_entry_reverse(b, &c->lru[LIST_CLEAN], lru_list) {
747 BUG_ON(test_bit(B_WRITING, &b->state));
748 BUG_ON(test_bit(B_DIRTY, &b->state));
750 if (!b->hold_count) {
751 __make_buffer_clean(b);
752 __unlink_buffer(b);
753 return b;
755 dm_bufio_cond_resched();
758 list_for_each_entry_reverse(b, &c->lru[LIST_DIRTY], lru_list) {
759 BUG_ON(test_bit(B_READING, &b->state));
761 if (!b->hold_count) {
762 __make_buffer_clean(b);
763 __unlink_buffer(b);
764 return b;
766 dm_bufio_cond_resched();
769 return NULL;
773 * Wait until some other threads free some buffer or release hold count on
774 * some buffer.
776 * This function is entered with c->lock held, drops it and regains it
777 * before exiting.
779 static void __wait_for_free_buffer(struct dm_bufio_client *c)
781 DECLARE_WAITQUEUE(wait, current);
783 add_wait_queue(&c->free_buffer_wait, &wait);
784 set_task_state(current, TASK_UNINTERRUPTIBLE);
785 dm_bufio_unlock(c);
787 io_schedule();
789 remove_wait_queue(&c->free_buffer_wait, &wait);
791 dm_bufio_lock(c);
794 enum new_flag {
795 NF_FRESH = 0,
796 NF_READ = 1,
797 NF_GET = 2,
798 NF_PREFETCH = 3
802 * Allocate a new buffer. If the allocation is not possible, wait until
803 * some other thread frees a buffer.
805 * May drop the lock and regain it.
807 static struct dm_buffer *__alloc_buffer_wait_no_callback(struct dm_bufio_client *c, enum new_flag nf)
809 struct dm_buffer *b;
812 * dm-bufio is resistant to allocation failures (it just keeps
813 * one buffer reserved in cases all the allocations fail).
814 * So set flags to not try too hard:
815 * GFP_NOIO: don't recurse into the I/O layer
816 * __GFP_NORETRY: don't retry and rather return failure
817 * __GFP_NOMEMALLOC: don't use emergency reserves
818 * __GFP_NOWARN: don't print a warning in case of failure
820 * For debugging, if we set the cache size to 1, no new buffers will
821 * be allocated.
823 while (1) {
824 if (dm_bufio_cache_size_latch != 1) {
825 b = alloc_buffer(c, GFP_NOIO | __GFP_NORETRY | __GFP_NOMEMALLOC | __GFP_NOWARN);
826 if (b)
827 return b;
830 if (nf == NF_PREFETCH)
831 return NULL;
833 if (!list_empty(&c->reserved_buffers)) {
834 b = list_entry(c->reserved_buffers.next,
835 struct dm_buffer, lru_list);
836 list_del(&b->lru_list);
837 c->need_reserved_buffers++;
839 return b;
842 b = __get_unclaimed_buffer(c);
843 if (b)
844 return b;
846 __wait_for_free_buffer(c);
850 static struct dm_buffer *__alloc_buffer_wait(struct dm_bufio_client *c, enum new_flag nf)
852 struct dm_buffer *b = __alloc_buffer_wait_no_callback(c, nf);
854 if (!b)
855 return NULL;
857 if (c->alloc_callback)
858 c->alloc_callback(b);
860 return b;
864 * Free a buffer and wake other threads waiting for free buffers.
866 static void __free_buffer_wake(struct dm_buffer *b)
868 struct dm_bufio_client *c = b->c;
870 if (!c->need_reserved_buffers)
871 free_buffer(b);
872 else {
873 list_add(&b->lru_list, &c->reserved_buffers);
874 c->need_reserved_buffers--;
877 wake_up(&c->free_buffer_wait);
880 static void __write_dirty_buffers_async(struct dm_bufio_client *c, int no_wait,
881 struct list_head *write_list)
883 struct dm_buffer *b, *tmp;
885 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
886 BUG_ON(test_bit(B_READING, &b->state));
888 if (!test_bit(B_DIRTY, &b->state) &&
889 !test_bit(B_WRITING, &b->state)) {
890 __relink_lru(b, LIST_CLEAN);
891 continue;
894 if (no_wait && test_bit(B_WRITING, &b->state))
895 return;
897 __write_dirty_buffer(b, write_list);
898 dm_bufio_cond_resched();
903 * Get writeback threshold and buffer limit for a given client.
905 static void __get_memory_limit(struct dm_bufio_client *c,
906 unsigned long *threshold_buffers,
907 unsigned long *limit_buffers)
909 unsigned long buffers;
911 if (ACCESS_ONCE(dm_bufio_cache_size) != dm_bufio_cache_size_latch) {
912 mutex_lock(&dm_bufio_clients_lock);
913 __cache_size_refresh();
914 mutex_unlock(&dm_bufio_clients_lock);
917 buffers = dm_bufio_cache_size_per_client >>
918 (c->sectors_per_block_bits + SECTOR_SHIFT);
920 if (buffers < c->minimum_buffers)
921 buffers = c->minimum_buffers;
923 *limit_buffers = buffers;
924 *threshold_buffers = buffers * DM_BUFIO_WRITEBACK_PERCENT / 100;
928 * Check if we're over watermark.
929 * If we are over threshold_buffers, start freeing buffers.
930 * If we're over "limit_buffers", block until we get under the limit.
932 static void __check_watermark(struct dm_bufio_client *c,
933 struct list_head *write_list)
935 unsigned long threshold_buffers, limit_buffers;
937 __get_memory_limit(c, &threshold_buffers, &limit_buffers);
939 while (c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY] >
940 limit_buffers) {
942 struct dm_buffer *b = __get_unclaimed_buffer(c);
944 if (!b)
945 return;
947 __free_buffer_wake(b);
948 dm_bufio_cond_resched();
951 if (c->n_buffers[LIST_DIRTY] > threshold_buffers)
952 __write_dirty_buffers_async(c, 1, write_list);
955 /*----------------------------------------------------------------
956 * Getting a buffer
957 *--------------------------------------------------------------*/
959 static struct dm_buffer *__bufio_new(struct dm_bufio_client *c, sector_t block,
960 enum new_flag nf, int *need_submit,
961 struct list_head *write_list)
963 struct dm_buffer *b, *new_b = NULL;
965 *need_submit = 0;
967 b = __find(c, block);
968 if (b)
969 goto found_buffer;
971 if (nf == NF_GET)
972 return NULL;
974 new_b = __alloc_buffer_wait(c, nf);
975 if (!new_b)
976 return NULL;
979 * We've had a period where the mutex was unlocked, so need to
980 * recheck the hash table.
982 b = __find(c, block);
983 if (b) {
984 __free_buffer_wake(new_b);
985 goto found_buffer;
988 __check_watermark(c, write_list);
990 b = new_b;
991 b->hold_count = 1;
992 b->read_error = 0;
993 b->write_error = 0;
994 __link_buffer(b, block, LIST_CLEAN);
996 if (nf == NF_FRESH) {
997 b->state = 0;
998 return b;
1001 b->state = 1 << B_READING;
1002 *need_submit = 1;
1004 return b;
1006 found_buffer:
1007 if (nf == NF_PREFETCH)
1008 return NULL;
1010 * Note: it is essential that we don't wait for the buffer to be
1011 * read if dm_bufio_get function is used. Both dm_bufio_get and
1012 * dm_bufio_prefetch can be used in the driver request routine.
1013 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1014 * the same buffer, it would deadlock if we waited.
1016 if (nf == NF_GET && unlikely(test_bit(B_READING, &b->state)))
1017 return NULL;
1019 b->hold_count++;
1020 __relink_lru(b, test_bit(B_DIRTY, &b->state) ||
1021 test_bit(B_WRITING, &b->state));
1022 return b;
1026 * The endio routine for reading: set the error, clear the bit and wake up
1027 * anyone waiting on the buffer.
1029 static void read_endio(struct bio *bio, int error)
1031 struct dm_buffer *b = container_of(bio, struct dm_buffer, bio);
1033 b->read_error = error;
1035 BUG_ON(!test_bit(B_READING, &b->state));
1037 smp_mb__before_atomic();
1038 clear_bit(B_READING, &b->state);
1039 smp_mb__after_atomic();
1041 wake_up_bit(&b->state, B_READING);
1045 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1046 * functions is similar except that dm_bufio_new doesn't read the
1047 * buffer from the disk (assuming that the caller overwrites all the data
1048 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1050 static void *new_read(struct dm_bufio_client *c, sector_t block,
1051 enum new_flag nf, struct dm_buffer **bp)
1053 int need_submit;
1054 struct dm_buffer *b;
1056 LIST_HEAD(write_list);
1058 dm_bufio_lock(c);
1059 b = __bufio_new(c, block, nf, &need_submit, &write_list);
1060 dm_bufio_unlock(c);
1062 __flush_write_list(&write_list);
1064 if (!b)
1065 return b;
1067 if (need_submit)
1068 submit_io(b, READ, b->block, read_endio);
1070 wait_on_bit_io(&b->state, B_READING, TASK_UNINTERRUPTIBLE);
1072 if (b->read_error) {
1073 int error = b->read_error;
1075 dm_bufio_release(b);
1077 return ERR_PTR(error);
1080 *bp = b;
1082 return b->data;
1085 void *dm_bufio_get(struct dm_bufio_client *c, sector_t block,
1086 struct dm_buffer **bp)
1088 return new_read(c, block, NF_GET, bp);
1090 EXPORT_SYMBOL_GPL(dm_bufio_get);
1092 void *dm_bufio_read(struct dm_bufio_client *c, sector_t block,
1093 struct dm_buffer **bp)
1095 BUG_ON(dm_bufio_in_request());
1097 return new_read(c, block, NF_READ, bp);
1099 EXPORT_SYMBOL_GPL(dm_bufio_read);
1101 void *dm_bufio_new(struct dm_bufio_client *c, sector_t block,
1102 struct dm_buffer **bp)
1104 BUG_ON(dm_bufio_in_request());
1106 return new_read(c, block, NF_FRESH, bp);
1108 EXPORT_SYMBOL_GPL(dm_bufio_new);
1110 void dm_bufio_prefetch(struct dm_bufio_client *c,
1111 sector_t block, unsigned n_blocks)
1113 struct blk_plug plug;
1115 LIST_HEAD(write_list);
1117 BUG_ON(dm_bufio_in_request());
1119 blk_start_plug(&plug);
1120 dm_bufio_lock(c);
1122 for (; n_blocks--; block++) {
1123 int need_submit;
1124 struct dm_buffer *b;
1125 b = __bufio_new(c, block, NF_PREFETCH, &need_submit,
1126 &write_list);
1127 if (unlikely(!list_empty(&write_list))) {
1128 dm_bufio_unlock(c);
1129 blk_finish_plug(&plug);
1130 __flush_write_list(&write_list);
1131 blk_start_plug(&plug);
1132 dm_bufio_lock(c);
1134 if (unlikely(b != NULL)) {
1135 dm_bufio_unlock(c);
1137 if (need_submit)
1138 submit_io(b, READ, b->block, read_endio);
1139 dm_bufio_release(b);
1141 dm_bufio_cond_resched();
1143 if (!n_blocks)
1144 goto flush_plug;
1145 dm_bufio_lock(c);
1149 dm_bufio_unlock(c);
1151 flush_plug:
1152 blk_finish_plug(&plug);
1154 EXPORT_SYMBOL_GPL(dm_bufio_prefetch);
1156 void dm_bufio_release(struct dm_buffer *b)
1158 struct dm_bufio_client *c = b->c;
1160 dm_bufio_lock(c);
1162 BUG_ON(!b->hold_count);
1164 b->hold_count--;
1165 if (!b->hold_count) {
1166 wake_up(&c->free_buffer_wait);
1169 * If there were errors on the buffer, and the buffer is not
1170 * to be written, free the buffer. There is no point in caching
1171 * invalid buffer.
1173 if ((b->read_error || b->write_error) &&
1174 !test_bit(B_READING, &b->state) &&
1175 !test_bit(B_WRITING, &b->state) &&
1176 !test_bit(B_DIRTY, &b->state)) {
1177 __unlink_buffer(b);
1178 __free_buffer_wake(b);
1182 dm_bufio_unlock(c);
1184 EXPORT_SYMBOL_GPL(dm_bufio_release);
1186 void dm_bufio_mark_buffer_dirty(struct dm_buffer *b)
1188 struct dm_bufio_client *c = b->c;
1190 dm_bufio_lock(c);
1192 BUG_ON(test_bit(B_READING, &b->state));
1194 if (!test_and_set_bit(B_DIRTY, &b->state))
1195 __relink_lru(b, LIST_DIRTY);
1197 dm_bufio_unlock(c);
1199 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty);
1201 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client *c)
1203 LIST_HEAD(write_list);
1205 BUG_ON(dm_bufio_in_request());
1207 dm_bufio_lock(c);
1208 __write_dirty_buffers_async(c, 0, &write_list);
1209 dm_bufio_unlock(c);
1210 __flush_write_list(&write_list);
1212 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async);
1215 * For performance, it is essential that the buffers are written asynchronously
1216 * and simultaneously (so that the block layer can merge the writes) and then
1217 * waited upon.
1219 * Finally, we flush hardware disk cache.
1221 int dm_bufio_write_dirty_buffers(struct dm_bufio_client *c)
1223 int a, f;
1224 unsigned long buffers_processed = 0;
1225 struct dm_buffer *b, *tmp;
1227 LIST_HEAD(write_list);
1229 dm_bufio_lock(c);
1230 __write_dirty_buffers_async(c, 0, &write_list);
1231 dm_bufio_unlock(c);
1232 __flush_write_list(&write_list);
1233 dm_bufio_lock(c);
1235 again:
1236 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_DIRTY], lru_list) {
1237 int dropped_lock = 0;
1239 if (buffers_processed < c->n_buffers[LIST_DIRTY])
1240 buffers_processed++;
1242 BUG_ON(test_bit(B_READING, &b->state));
1244 if (test_bit(B_WRITING, &b->state)) {
1245 if (buffers_processed < c->n_buffers[LIST_DIRTY]) {
1246 dropped_lock = 1;
1247 b->hold_count++;
1248 dm_bufio_unlock(c);
1249 wait_on_bit_io(&b->state, B_WRITING,
1250 TASK_UNINTERRUPTIBLE);
1251 dm_bufio_lock(c);
1252 b->hold_count--;
1253 } else
1254 wait_on_bit_io(&b->state, B_WRITING,
1255 TASK_UNINTERRUPTIBLE);
1258 if (!test_bit(B_DIRTY, &b->state) &&
1259 !test_bit(B_WRITING, &b->state))
1260 __relink_lru(b, LIST_CLEAN);
1262 dm_bufio_cond_resched();
1265 * If we dropped the lock, the list is no longer consistent,
1266 * so we must restart the search.
1268 * In the most common case, the buffer just processed is
1269 * relinked to the clean list, so we won't loop scanning the
1270 * same buffer again and again.
1272 * This may livelock if there is another thread simultaneously
1273 * dirtying buffers, so we count the number of buffers walked
1274 * and if it exceeds the total number of buffers, it means that
1275 * someone is doing some writes simultaneously with us. In
1276 * this case, stop, dropping the lock.
1278 if (dropped_lock)
1279 goto again;
1281 wake_up(&c->free_buffer_wait);
1282 dm_bufio_unlock(c);
1284 a = xchg(&c->async_write_error, 0);
1285 f = dm_bufio_issue_flush(c);
1286 if (a)
1287 return a;
1289 return f;
1291 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers);
1294 * Use dm-io to send and empty barrier flush the device.
1296 int dm_bufio_issue_flush(struct dm_bufio_client *c)
1298 struct dm_io_request io_req = {
1299 .bi_rw = WRITE_FLUSH,
1300 .mem.type = DM_IO_KMEM,
1301 .mem.ptr.addr = NULL,
1302 .client = c->dm_io,
1304 struct dm_io_region io_reg = {
1305 .bdev = c->bdev,
1306 .sector = 0,
1307 .count = 0,
1310 BUG_ON(dm_bufio_in_request());
1312 return dm_io(&io_req, 1, &io_reg, NULL);
1314 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush);
1317 * We first delete any other buffer that may be at that new location.
1319 * Then, we write the buffer to the original location if it was dirty.
1321 * Then, if we are the only one who is holding the buffer, relink the buffer
1322 * in the hash queue for the new location.
1324 * If there was someone else holding the buffer, we write it to the new
1325 * location but not relink it, because that other user needs to have the buffer
1326 * at the same place.
1328 void dm_bufio_release_move(struct dm_buffer *b, sector_t new_block)
1330 struct dm_bufio_client *c = b->c;
1331 struct dm_buffer *new;
1333 BUG_ON(dm_bufio_in_request());
1335 dm_bufio_lock(c);
1337 retry:
1338 new = __find(c, new_block);
1339 if (new) {
1340 if (new->hold_count) {
1341 __wait_for_free_buffer(c);
1342 goto retry;
1346 * FIXME: Is there any point waiting for a write that's going
1347 * to be overwritten in a bit?
1349 __make_buffer_clean(new);
1350 __unlink_buffer(new);
1351 __free_buffer_wake(new);
1354 BUG_ON(!b->hold_count);
1355 BUG_ON(test_bit(B_READING, &b->state));
1357 __write_dirty_buffer(b, NULL);
1358 if (b->hold_count == 1) {
1359 wait_on_bit_io(&b->state, B_WRITING,
1360 TASK_UNINTERRUPTIBLE);
1361 set_bit(B_DIRTY, &b->state);
1362 __unlink_buffer(b);
1363 __link_buffer(b, new_block, LIST_DIRTY);
1364 } else {
1365 sector_t old_block;
1366 wait_on_bit_lock_io(&b->state, B_WRITING,
1367 TASK_UNINTERRUPTIBLE);
1369 * Relink buffer to "new_block" so that write_callback
1370 * sees "new_block" as a block number.
1371 * After the write, link the buffer back to old_block.
1372 * All this must be done in bufio lock, so that block number
1373 * change isn't visible to other threads.
1375 old_block = b->block;
1376 __unlink_buffer(b);
1377 __link_buffer(b, new_block, b->list_mode);
1378 submit_io(b, WRITE, new_block, write_endio);
1379 wait_on_bit_io(&b->state, B_WRITING,
1380 TASK_UNINTERRUPTIBLE);
1381 __unlink_buffer(b);
1382 __link_buffer(b, old_block, b->list_mode);
1385 dm_bufio_unlock(c);
1386 dm_bufio_release(b);
1388 EXPORT_SYMBOL_GPL(dm_bufio_release_move);
1391 * Free the given buffer.
1393 * This is just a hint, if the buffer is in use or dirty, this function
1394 * does nothing.
1396 void dm_bufio_forget(struct dm_bufio_client *c, sector_t block)
1398 struct dm_buffer *b;
1400 dm_bufio_lock(c);
1402 b = __find(c, block);
1403 if (b && likely(!b->hold_count) && likely(!b->state)) {
1404 __unlink_buffer(b);
1405 __free_buffer_wake(b);
1408 dm_bufio_unlock(c);
1410 EXPORT_SYMBOL(dm_bufio_forget);
1412 void dm_bufio_set_minimum_buffers(struct dm_bufio_client *c, unsigned n)
1414 c->minimum_buffers = n;
1416 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers);
1418 unsigned dm_bufio_get_block_size(struct dm_bufio_client *c)
1420 return c->block_size;
1422 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size);
1424 sector_t dm_bufio_get_device_size(struct dm_bufio_client *c)
1426 return i_size_read(c->bdev->bd_inode) >>
1427 (SECTOR_SHIFT + c->sectors_per_block_bits);
1429 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size);
1431 sector_t dm_bufio_get_block_number(struct dm_buffer *b)
1433 return b->block;
1435 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number);
1437 void *dm_bufio_get_block_data(struct dm_buffer *b)
1439 return b->data;
1441 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data);
1443 void *dm_bufio_get_aux_data(struct dm_buffer *b)
1445 return b + 1;
1447 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data);
1449 struct dm_bufio_client *dm_bufio_get_client(struct dm_buffer *b)
1451 return b->c;
1453 EXPORT_SYMBOL_GPL(dm_bufio_get_client);
1455 static void drop_buffers(struct dm_bufio_client *c)
1457 struct dm_buffer *b;
1458 int i;
1460 BUG_ON(dm_bufio_in_request());
1463 * An optimization so that the buffers are not written one-by-one.
1465 dm_bufio_write_dirty_buffers_async(c);
1467 dm_bufio_lock(c);
1469 while ((b = __get_unclaimed_buffer(c)))
1470 __free_buffer_wake(b);
1472 for (i = 0; i < LIST_SIZE; i++)
1473 list_for_each_entry(b, &c->lru[i], lru_list)
1474 DMERR("leaked buffer %llx, hold count %u, list %d",
1475 (unsigned long long)b->block, b->hold_count, i);
1477 for (i = 0; i < LIST_SIZE; i++)
1478 BUG_ON(!list_empty(&c->lru[i]));
1480 dm_bufio_unlock(c);
1484 * We may not be able to evict this buffer if IO pending or the client
1485 * is still using it. Caller is expected to know buffer is too old.
1487 * And if GFP_NOFS is used, we must not do any I/O because we hold
1488 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1489 * rerouted to different bufio client.
1491 static bool __try_evict_buffer(struct dm_buffer *b, gfp_t gfp)
1493 if (!(gfp & __GFP_FS)) {
1494 if (test_bit(B_READING, &b->state) ||
1495 test_bit(B_WRITING, &b->state) ||
1496 test_bit(B_DIRTY, &b->state))
1497 return false;
1500 if (b->hold_count)
1501 return false;
1503 __make_buffer_clean(b);
1504 __unlink_buffer(b);
1505 __free_buffer_wake(b);
1507 return true;
1510 static unsigned get_retain_buffers(struct dm_bufio_client *c)
1512 unsigned retain_bytes = ACCESS_ONCE(dm_bufio_retain_bytes);
1513 return retain_bytes / c->block_size;
1516 static unsigned long __scan(struct dm_bufio_client *c, unsigned long nr_to_scan,
1517 gfp_t gfp_mask)
1519 int l;
1520 struct dm_buffer *b, *tmp;
1521 unsigned long freed = 0;
1522 unsigned long count = nr_to_scan;
1523 unsigned retain_target = get_retain_buffers(c);
1525 for (l = 0; l < LIST_SIZE; l++) {
1526 list_for_each_entry_safe_reverse(b, tmp, &c->lru[l], lru_list) {
1527 if (__try_evict_buffer(b, gfp_mask))
1528 freed++;
1529 if (!--nr_to_scan || ((count - freed) <= retain_target))
1530 return freed;
1531 dm_bufio_cond_resched();
1534 return freed;
1537 static unsigned long
1538 dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1540 struct dm_bufio_client *c;
1541 unsigned long freed;
1543 c = container_of(shrink, struct dm_bufio_client, shrinker);
1544 if (sc->gfp_mask & __GFP_FS)
1545 dm_bufio_lock(c);
1546 else if (!dm_bufio_trylock(c))
1547 return SHRINK_STOP;
1549 freed = __scan(c, sc->nr_to_scan, sc->gfp_mask);
1550 dm_bufio_unlock(c);
1551 return freed;
1554 static unsigned long
1555 dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1557 struct dm_bufio_client *c;
1558 unsigned long count;
1560 c = container_of(shrink, struct dm_bufio_client, shrinker);
1561 if (sc->gfp_mask & __GFP_FS)
1562 dm_bufio_lock(c);
1563 else if (!dm_bufio_trylock(c))
1564 return 0;
1566 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1567 dm_bufio_unlock(c);
1568 return count;
1572 * Create the buffering interface
1574 struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsigned block_size,
1575 unsigned reserved_buffers, unsigned aux_size,
1576 void (*alloc_callback)(struct dm_buffer *),
1577 void (*write_callback)(struct dm_buffer *))
1579 int r;
1580 struct dm_bufio_client *c;
1581 unsigned i;
1583 BUG_ON(block_size < 1 << SECTOR_SHIFT ||
1584 (block_size & (block_size - 1)));
1586 c = kzalloc(sizeof(*c), GFP_KERNEL);
1587 if (!c) {
1588 r = -ENOMEM;
1589 goto bad_client;
1591 c->buffer_tree = RB_ROOT;
1593 c->bdev = bdev;
1594 c->block_size = block_size;
1595 c->sectors_per_block_bits = ffs(block_size) - 1 - SECTOR_SHIFT;
1596 c->pages_per_block_bits = (ffs(block_size) - 1 >= PAGE_SHIFT) ?
1597 ffs(block_size) - 1 - PAGE_SHIFT : 0;
1598 c->blocks_per_page_bits = (ffs(block_size) - 1 < PAGE_SHIFT ?
1599 PAGE_SHIFT - (ffs(block_size) - 1) : 0);
1601 c->aux_size = aux_size;
1602 c->alloc_callback = alloc_callback;
1603 c->write_callback = write_callback;
1605 for (i = 0; i < LIST_SIZE; i++) {
1606 INIT_LIST_HEAD(&c->lru[i]);
1607 c->n_buffers[i] = 0;
1610 mutex_init(&c->lock);
1611 INIT_LIST_HEAD(&c->reserved_buffers);
1612 c->need_reserved_buffers = reserved_buffers;
1614 c->minimum_buffers = DM_BUFIO_MIN_BUFFERS;
1616 init_waitqueue_head(&c->free_buffer_wait);
1617 c->async_write_error = 0;
1619 c->dm_io = dm_io_client_create();
1620 if (IS_ERR(c->dm_io)) {
1621 r = PTR_ERR(c->dm_io);
1622 goto bad_dm_io;
1625 mutex_lock(&dm_bufio_clients_lock);
1626 if (c->blocks_per_page_bits) {
1627 if (!DM_BUFIO_CACHE_NAME(c)) {
1628 DM_BUFIO_CACHE_NAME(c) = kasprintf(GFP_KERNEL, "dm_bufio_cache-%u", c->block_size);
1629 if (!DM_BUFIO_CACHE_NAME(c)) {
1630 r = -ENOMEM;
1631 mutex_unlock(&dm_bufio_clients_lock);
1632 goto bad_cache;
1636 if (!DM_BUFIO_CACHE(c)) {
1637 DM_BUFIO_CACHE(c) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c),
1638 c->block_size,
1639 c->block_size, 0, NULL);
1640 if (!DM_BUFIO_CACHE(c)) {
1641 r = -ENOMEM;
1642 mutex_unlock(&dm_bufio_clients_lock);
1643 goto bad_cache;
1647 mutex_unlock(&dm_bufio_clients_lock);
1649 while (c->need_reserved_buffers) {
1650 struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
1652 if (!b) {
1653 r = -ENOMEM;
1654 goto bad_buffer;
1656 __free_buffer_wake(b);
1659 mutex_lock(&dm_bufio_clients_lock);
1660 dm_bufio_client_count++;
1661 list_add(&c->client_list, &dm_bufio_all_clients);
1662 __cache_size_refresh();
1663 mutex_unlock(&dm_bufio_clients_lock);
1665 c->shrinker.count_objects = dm_bufio_shrink_count;
1666 c->shrinker.scan_objects = dm_bufio_shrink_scan;
1667 c->shrinker.seeks = 1;
1668 c->shrinker.batch = 0;
1669 register_shrinker(&c->shrinker);
1671 return c;
1673 bad_buffer:
1674 bad_cache:
1675 while (!list_empty(&c->reserved_buffers)) {
1676 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1677 struct dm_buffer, lru_list);
1678 list_del(&b->lru_list);
1679 free_buffer(b);
1681 dm_io_client_destroy(c->dm_io);
1682 bad_dm_io:
1683 kfree(c);
1684 bad_client:
1685 return ERR_PTR(r);
1687 EXPORT_SYMBOL_GPL(dm_bufio_client_create);
1690 * Free the buffering interface.
1691 * It is required that there are no references on any buffers.
1693 void dm_bufio_client_destroy(struct dm_bufio_client *c)
1695 unsigned i;
1697 drop_buffers(c);
1699 unregister_shrinker(&c->shrinker);
1701 mutex_lock(&dm_bufio_clients_lock);
1703 list_del(&c->client_list);
1704 dm_bufio_client_count--;
1705 __cache_size_refresh();
1707 mutex_unlock(&dm_bufio_clients_lock);
1709 BUG_ON(!RB_EMPTY_ROOT(&c->buffer_tree));
1710 BUG_ON(c->need_reserved_buffers);
1712 while (!list_empty(&c->reserved_buffers)) {
1713 struct dm_buffer *b = list_entry(c->reserved_buffers.next,
1714 struct dm_buffer, lru_list);
1715 list_del(&b->lru_list);
1716 free_buffer(b);
1719 for (i = 0; i < LIST_SIZE; i++)
1720 if (c->n_buffers[i])
1721 DMERR("leaked buffer count %d: %ld", i, c->n_buffers[i]);
1723 for (i = 0; i < LIST_SIZE; i++)
1724 BUG_ON(c->n_buffers[i]);
1726 dm_io_client_destroy(c->dm_io);
1727 kfree(c);
1729 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy);
1731 static unsigned get_max_age_hz(void)
1733 unsigned max_age = ACCESS_ONCE(dm_bufio_max_age);
1735 if (max_age > UINT_MAX / HZ)
1736 max_age = UINT_MAX / HZ;
1738 return max_age * HZ;
1741 static bool older_than(struct dm_buffer *b, unsigned long age_hz)
1743 return time_after_eq(jiffies, b->last_accessed + age_hz);
1746 static void __evict_old_buffers(struct dm_bufio_client *c, unsigned long age_hz)
1748 struct dm_buffer *b, *tmp;
1749 unsigned retain_target = get_retain_buffers(c);
1750 unsigned count;
1752 dm_bufio_lock(c);
1754 count = c->n_buffers[LIST_CLEAN] + c->n_buffers[LIST_DIRTY];
1755 list_for_each_entry_safe_reverse(b, tmp, &c->lru[LIST_CLEAN], lru_list) {
1756 if (count <= retain_target)
1757 break;
1759 if (!older_than(b, age_hz))
1760 break;
1762 if (__try_evict_buffer(b, 0))
1763 count--;
1765 dm_bufio_cond_resched();
1768 dm_bufio_unlock(c);
1771 static void cleanup_old_buffers(void)
1773 unsigned long max_age_hz = get_max_age_hz();
1774 struct dm_bufio_client *c;
1776 mutex_lock(&dm_bufio_clients_lock);
1778 list_for_each_entry(c, &dm_bufio_all_clients, client_list)
1779 __evict_old_buffers(c, max_age_hz);
1781 mutex_unlock(&dm_bufio_clients_lock);
1784 static struct workqueue_struct *dm_bufio_wq;
1785 static struct delayed_work dm_bufio_work;
1787 static void work_fn(struct work_struct *w)
1789 cleanup_old_buffers();
1791 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1792 DM_BUFIO_WORK_TIMER_SECS * HZ);
1795 /*----------------------------------------------------------------
1796 * Module setup
1797 *--------------------------------------------------------------*/
1800 * This is called only once for the whole dm_bufio module.
1801 * It initializes memory limit.
1803 static int __init dm_bufio_init(void)
1805 __u64 mem;
1807 dm_bufio_allocated_kmem_cache = 0;
1808 dm_bufio_allocated_get_free_pages = 0;
1809 dm_bufio_allocated_vmalloc = 0;
1810 dm_bufio_current_allocated = 0;
1812 memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1813 memset(&dm_bufio_cache_names, 0, sizeof dm_bufio_cache_names);
1815 mem = (__u64)((totalram_pages - totalhigh_pages) *
1816 DM_BUFIO_MEMORY_PERCENT / 100) << PAGE_SHIFT;
1818 if (mem > ULONG_MAX)
1819 mem = ULONG_MAX;
1821 #ifdef CONFIG_MMU
1823 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1824 * in fs/proc/internal.h
1826 if (mem > (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100)
1827 mem = (VMALLOC_END - VMALLOC_START) * DM_BUFIO_VMALLOC_PERCENT / 100;
1828 #endif
1830 dm_bufio_default_cache_size = mem;
1832 mutex_lock(&dm_bufio_clients_lock);
1833 __cache_size_refresh();
1834 mutex_unlock(&dm_bufio_clients_lock);
1836 dm_bufio_wq = create_singlethread_workqueue("dm_bufio_cache");
1837 if (!dm_bufio_wq)
1838 return -ENOMEM;
1840 INIT_DELAYED_WORK(&dm_bufio_work, work_fn);
1841 queue_delayed_work(dm_bufio_wq, &dm_bufio_work,
1842 DM_BUFIO_WORK_TIMER_SECS * HZ);
1844 return 0;
1848 * This is called once when unloading the dm_bufio module.
1850 static void __exit dm_bufio_exit(void)
1852 int bug = 0;
1853 int i;
1855 cancel_delayed_work_sync(&dm_bufio_work);
1856 destroy_workqueue(dm_bufio_wq);
1858 for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++) {
1859 struct kmem_cache *kc = dm_bufio_caches[i];
1861 if (kc)
1862 kmem_cache_destroy(kc);
1865 for (i = 0; i < ARRAY_SIZE(dm_bufio_cache_names); i++)
1866 kfree(dm_bufio_cache_names[i]);
1868 if (dm_bufio_client_count) {
1869 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1870 __func__, dm_bufio_client_count);
1871 bug = 1;
1874 if (dm_bufio_current_allocated) {
1875 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1876 __func__, dm_bufio_current_allocated);
1877 bug = 1;
1880 if (dm_bufio_allocated_get_free_pages) {
1881 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1882 __func__, dm_bufio_allocated_get_free_pages);
1883 bug = 1;
1886 if (dm_bufio_allocated_vmalloc) {
1887 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1888 __func__, dm_bufio_allocated_vmalloc);
1889 bug = 1;
1892 if (bug)
1893 BUG();
1896 module_init(dm_bufio_init)
1897 module_exit(dm_bufio_exit)
1899 module_param_named(max_cache_size_bytes, dm_bufio_cache_size, ulong, S_IRUGO | S_IWUSR);
1900 MODULE_PARM_DESC(max_cache_size_bytes, "Size of metadata cache");
1902 module_param_named(max_age_seconds, dm_bufio_max_age, uint, S_IRUGO | S_IWUSR);
1903 MODULE_PARM_DESC(max_age_seconds, "Max age of a buffer in seconds");
1905 module_param_named(retain_bytes, dm_bufio_retain_bytes, uint, S_IRUGO | S_IWUSR);
1906 MODULE_PARM_DESC(retain_bytes, "Try to keep at least this many bytes cached in memory");
1908 module_param_named(peak_allocated_bytes, dm_bufio_peak_allocated, ulong, S_IRUGO | S_IWUSR);
1909 MODULE_PARM_DESC(peak_allocated_bytes, "Tracks the maximum allocated memory");
1911 module_param_named(allocated_kmem_cache_bytes, dm_bufio_allocated_kmem_cache, ulong, S_IRUGO);
1912 MODULE_PARM_DESC(allocated_kmem_cache_bytes, "Memory allocated with kmem_cache_alloc");
1914 module_param_named(allocated_get_free_pages_bytes, dm_bufio_allocated_get_free_pages, ulong, S_IRUGO);
1915 MODULE_PARM_DESC(allocated_get_free_pages_bytes, "Memory allocated with get_free_pages");
1917 module_param_named(allocated_vmalloc_bytes, dm_bufio_allocated_vmalloc, ulong, S_IRUGO);
1918 MODULE_PARM_DESC(allocated_vmalloc_bytes, "Memory allocated with vmalloc");
1920 module_param_named(current_allocated_bytes, dm_bufio_current_allocated, ulong, S_IRUGO);
1921 MODULE_PARM_DESC(current_allocated_bytes, "Memory currently used by the cache");
1923 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1924 MODULE_DESCRIPTION(DM_NAME " buffered I/O library");
1925 MODULE_LICENSE("GPL");