2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/sched/mm.h>
15 #include <linux/jiffies.h>
16 #include <linux/vmalloc.h>
17 #include <linux/shrinker.h>
18 #include <linux/module.h>
19 #include <linux/rbtree.h>
20 #include <linux/stacktrace.h>
22 #define DM_MSG_PREFIX "bufio"
25 * Memory management policy:
26 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
27 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
28 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
29 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
32 #define DM_BUFIO_MIN_BUFFERS 8
34 #define DM_BUFIO_MEMORY_PERCENT 2
35 #define DM_BUFIO_VMALLOC_PERCENT 25
36 #define DM_BUFIO_WRITEBACK_PERCENT 75
39 * Check buffer ages in this interval (seconds)
41 #define DM_BUFIO_WORK_TIMER_SECS 30
44 * Free buffers when they are older than this (seconds)
46 #define DM_BUFIO_DEFAULT_AGE_SECS 300
49 * The nr of bytes of cached data to keep around.
51 #define DM_BUFIO_DEFAULT_RETAIN_BYTES (256 * 1024)
54 * The number of bvec entries that are embedded directly in the buffer.
55 * If the chunk size is larger, dm-io is used to do the io.
57 #define DM_BUFIO_INLINE_VECS 16
60 * Don't try to use kmem_cache_alloc for blocks larger than this.
61 * For explanation, see alloc_buffer_data below.
63 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
64 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
67 * Align buffer writes to this boundary.
68 * Tests show that SSDs have the highest IOPS when using 4k writes.
70 #define DM_BUFIO_WRITE_ALIGN 4096
73 * dm_buffer->list_mode
81 * All buffers are linked to cache_hash with their hash_list field.
83 * Clean buffers that are not being written (B_WRITING not set)
84 * are linked to lru[LIST_CLEAN] with their lru_list field.
86 * Dirty and clean buffers that are being written are linked to
87 * lru[LIST_DIRTY] with their lru_list field. When the write
88 * finishes, the buffer cannot be relinked immediately (because we
89 * are in an interrupt context and relinking requires process
90 * context), so some clean-not-writing buffers can be held on
91 * dirty_lru too. They are later added to lru in the process
94 struct dm_bufio_client
{
97 struct list_head lru
[LIST_SIZE
];
98 unsigned long n_buffers
[LIST_SIZE
];
100 struct block_device
*bdev
;
102 unsigned char sectors_per_block_bits
;
103 unsigned char pages_per_block_bits
;
104 unsigned char blocks_per_page_bits
;
106 void (*alloc_callback
)(struct dm_buffer
*);
107 void (*write_callback
)(struct dm_buffer
*);
109 struct dm_io_client
*dm_io
;
111 struct list_head reserved_buffers
;
112 unsigned need_reserved_buffers
;
114 unsigned minimum_buffers
;
116 struct rb_root buffer_tree
;
117 wait_queue_head_t free_buffer_wait
;
121 int async_write_error
;
123 struct list_head client_list
;
124 struct shrinker shrinker
;
135 * Describes how the block was allocated:
136 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
137 * See the comment at alloc_buffer_data.
141 DATA_MODE_GET_FREE_PAGES
= 1,
142 DATA_MODE_VMALLOC
= 2,
148 struct list_head lru_list
;
151 enum data_mode data_mode
;
152 unsigned char list_mode
; /* LIST_* */
154 blk_status_t read_error
;
155 blk_status_t write_error
;
157 unsigned long last_accessed
;
158 unsigned dirty_start
;
160 unsigned write_start
;
162 struct dm_bufio_client
*c
;
163 struct list_head write_list
;
165 struct bio_vec bio_vec
[DM_BUFIO_INLINE_VECS
];
166 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
168 struct stack_trace stack_trace
;
169 unsigned long stack_entries
[MAX_STACK
];
173 /*----------------------------------------------------------------*/
175 static struct kmem_cache
*dm_bufio_caches
[PAGE_SHIFT
- SECTOR_SHIFT
];
176 static char *dm_bufio_cache_names
[PAGE_SHIFT
- SECTOR_SHIFT
];
178 static inline int dm_bufio_cache_index(struct dm_bufio_client
*c
)
180 unsigned ret
= c
->blocks_per_page_bits
- 1;
182 BUG_ON(ret
>= ARRAY_SIZE(dm_bufio_caches
));
187 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
188 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
190 #define dm_bufio_in_request() (!!current->bio_list)
192 static void dm_bufio_lock(struct dm_bufio_client
*c
)
194 mutex_lock_nested(&c
->lock
, dm_bufio_in_request());
197 static int dm_bufio_trylock(struct dm_bufio_client
*c
)
199 return mutex_trylock(&c
->lock
);
202 static void dm_bufio_unlock(struct dm_bufio_client
*c
)
204 mutex_unlock(&c
->lock
);
207 /*----------------------------------------------------------------*/
210 * Default cache size: available memory divided by the ratio.
212 static unsigned long dm_bufio_default_cache_size
;
215 * Total cache size set by the user.
217 static unsigned long dm_bufio_cache_size
;
220 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
221 * at any time. If it disagrees, the user has changed cache size.
223 static unsigned long dm_bufio_cache_size_latch
;
225 static DEFINE_SPINLOCK(param_spinlock
);
228 * Buffers are freed after this timeout
230 static unsigned dm_bufio_max_age
= DM_BUFIO_DEFAULT_AGE_SECS
;
231 static unsigned long dm_bufio_retain_bytes
= DM_BUFIO_DEFAULT_RETAIN_BYTES
;
233 static unsigned long dm_bufio_peak_allocated
;
234 static unsigned long dm_bufio_allocated_kmem_cache
;
235 static unsigned long dm_bufio_allocated_get_free_pages
;
236 static unsigned long dm_bufio_allocated_vmalloc
;
237 static unsigned long dm_bufio_current_allocated
;
239 /*----------------------------------------------------------------*/
242 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
244 static unsigned long dm_bufio_cache_size_per_client
;
247 * The current number of clients.
249 static int dm_bufio_client_count
;
252 * The list of all clients.
254 static LIST_HEAD(dm_bufio_all_clients
);
257 * This mutex protects dm_bufio_cache_size_latch,
258 * dm_bufio_cache_size_per_client and dm_bufio_client_count
260 static DEFINE_MUTEX(dm_bufio_clients_lock
);
262 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
263 static void buffer_record_stack(struct dm_buffer
*b
)
265 b
->stack_trace
.nr_entries
= 0;
266 b
->stack_trace
.max_entries
= MAX_STACK
;
267 b
->stack_trace
.entries
= b
->stack_entries
;
268 b
->stack_trace
.skip
= 2;
269 save_stack_trace(&b
->stack_trace
);
273 /*----------------------------------------------------------------
274 * A red/black tree acts as an index for all the buffers.
275 *--------------------------------------------------------------*/
276 static struct dm_buffer
*__find(struct dm_bufio_client
*c
, sector_t block
)
278 struct rb_node
*n
= c
->buffer_tree
.rb_node
;
282 b
= container_of(n
, struct dm_buffer
, node
);
284 if (b
->block
== block
)
287 n
= (b
->block
< block
) ? n
->rb_left
: n
->rb_right
;
293 static void __insert(struct dm_bufio_client
*c
, struct dm_buffer
*b
)
295 struct rb_node
**new = &c
->buffer_tree
.rb_node
, *parent
= NULL
;
296 struct dm_buffer
*found
;
299 found
= container_of(*new, struct dm_buffer
, node
);
301 if (found
->block
== b
->block
) {
307 new = (found
->block
< b
->block
) ?
308 &((*new)->rb_left
) : &((*new)->rb_right
);
311 rb_link_node(&b
->node
, parent
, new);
312 rb_insert_color(&b
->node
, &c
->buffer_tree
);
315 static void __remove(struct dm_bufio_client
*c
, struct dm_buffer
*b
)
317 rb_erase(&b
->node
, &c
->buffer_tree
);
320 /*----------------------------------------------------------------*/
322 static void adjust_total_allocated(enum data_mode data_mode
, long diff
)
324 static unsigned long * const class_ptr
[DATA_MODE_LIMIT
] = {
325 &dm_bufio_allocated_kmem_cache
,
326 &dm_bufio_allocated_get_free_pages
,
327 &dm_bufio_allocated_vmalloc
,
330 spin_lock(¶m_spinlock
);
332 *class_ptr
[data_mode
] += diff
;
334 dm_bufio_current_allocated
+= diff
;
336 if (dm_bufio_current_allocated
> dm_bufio_peak_allocated
)
337 dm_bufio_peak_allocated
= dm_bufio_current_allocated
;
339 spin_unlock(¶m_spinlock
);
343 * Change the number of clients and recalculate per-client limit.
345 static void __cache_size_refresh(void)
347 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock
));
348 BUG_ON(dm_bufio_client_count
< 0);
350 dm_bufio_cache_size_latch
= ACCESS_ONCE(dm_bufio_cache_size
);
353 * Use default if set to 0 and report the actual cache size used.
355 if (!dm_bufio_cache_size_latch
) {
356 (void)cmpxchg(&dm_bufio_cache_size
, 0,
357 dm_bufio_default_cache_size
);
358 dm_bufio_cache_size_latch
= dm_bufio_default_cache_size
;
361 dm_bufio_cache_size_per_client
= dm_bufio_cache_size_latch
/
362 (dm_bufio_client_count
? : 1);
366 * Allocating buffer data.
368 * Small buffers are allocated with kmem_cache, to use space optimally.
370 * For large buffers, we choose between get_free_pages and vmalloc.
371 * Each has advantages and disadvantages.
373 * __get_free_pages can randomly fail if the memory is fragmented.
374 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
375 * as low as 128M) so using it for caching is not appropriate.
377 * If the allocation may fail we use __get_free_pages. Memory fragmentation
378 * won't have a fatal effect here, but it just causes flushes of some other
379 * buffers and more I/O will be performed. Don't use __get_free_pages if it
380 * always fails (i.e. order >= MAX_ORDER).
382 * If the allocation shouldn't fail we use __vmalloc. This is only for the
383 * initial reserve allocation, so there's no risk of wasting all vmalloc
386 static void *alloc_buffer_data(struct dm_bufio_client
*c
, gfp_t gfp_mask
,
387 enum data_mode
*data_mode
)
389 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT
) {
390 *data_mode
= DATA_MODE_SLAB
;
391 return kmem_cache_alloc(DM_BUFIO_CACHE(c
), gfp_mask
);
394 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT
&&
395 gfp_mask
& __GFP_NORETRY
) {
396 *data_mode
= DATA_MODE_GET_FREE_PAGES
;
397 return (void *)__get_free_pages(gfp_mask
,
398 c
->pages_per_block_bits
);
401 *data_mode
= DATA_MODE_VMALLOC
;
404 * __vmalloc allocates the data pages and auxiliary structures with
405 * gfp_flags that were specified, but pagetables are always allocated
406 * with GFP_KERNEL, no matter what was specified as gfp_mask.
408 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
409 * all allocations done by this process (including pagetables) are done
410 * as if GFP_NOIO was specified.
412 if (gfp_mask
& __GFP_NORETRY
) {
413 unsigned noio_flag
= memalloc_noio_save();
414 void *ptr
= __vmalloc(c
->block_size
, gfp_mask
, PAGE_KERNEL
);
416 memalloc_noio_restore(noio_flag
);
420 return __vmalloc(c
->block_size
, gfp_mask
, PAGE_KERNEL
);
424 * Free buffer's data.
426 static void free_buffer_data(struct dm_bufio_client
*c
,
427 void *data
, enum data_mode data_mode
)
431 kmem_cache_free(DM_BUFIO_CACHE(c
), data
);
434 case DATA_MODE_GET_FREE_PAGES
:
435 free_pages((unsigned long)data
, c
->pages_per_block_bits
);
438 case DATA_MODE_VMALLOC
:
443 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
450 * Allocate buffer and its data.
452 static struct dm_buffer
*alloc_buffer(struct dm_bufio_client
*c
, gfp_t gfp_mask
)
454 struct dm_buffer
*b
= kmalloc(sizeof(struct dm_buffer
) + c
->aux_size
,
462 b
->data
= alloc_buffer_data(c
, gfp_mask
, &b
->data_mode
);
468 adjust_total_allocated(b
->data_mode
, (long)c
->block_size
);
470 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
471 memset(&b
->stack_trace
, 0, sizeof(b
->stack_trace
));
477 * Free buffer and its data.
479 static void free_buffer(struct dm_buffer
*b
)
481 struct dm_bufio_client
*c
= b
->c
;
483 adjust_total_allocated(b
->data_mode
, -(long)c
->block_size
);
485 free_buffer_data(c
, b
->data
, b
->data_mode
);
490 * Link buffer to the hash list and clean or dirty queue.
492 static void __link_buffer(struct dm_buffer
*b
, sector_t block
, int dirty
)
494 struct dm_bufio_client
*c
= b
->c
;
496 c
->n_buffers
[dirty
]++;
498 b
->list_mode
= dirty
;
499 list_add(&b
->lru_list
, &c
->lru
[dirty
]);
501 b
->last_accessed
= jiffies
;
505 * Unlink buffer from the hash list and dirty or clean queue.
507 static void __unlink_buffer(struct dm_buffer
*b
)
509 struct dm_bufio_client
*c
= b
->c
;
511 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
513 c
->n_buffers
[b
->list_mode
]--;
515 list_del(&b
->lru_list
);
519 * Place the buffer to the head of dirty or clean LRU queue.
521 static void __relink_lru(struct dm_buffer
*b
, int dirty
)
523 struct dm_bufio_client
*c
= b
->c
;
525 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
527 c
->n_buffers
[b
->list_mode
]--;
528 c
->n_buffers
[dirty
]++;
529 b
->list_mode
= dirty
;
530 list_move(&b
->lru_list
, &c
->lru
[dirty
]);
531 b
->last_accessed
= jiffies
;
534 /*----------------------------------------------------------------
535 * Submit I/O on the buffer.
537 * Bio interface is faster but it has some problems:
538 * the vector list is limited (increasing this limit increases
539 * memory-consumption per buffer, so it is not viable);
541 * the memory must be direct-mapped, not vmalloced;
543 * the I/O driver can reject requests spuriously if it thinks that
544 * the requests are too big for the device or if they cross a
545 * controller-defined memory boundary.
547 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
548 * it is not vmalloced, try using the bio interface.
550 * If the buffer is big, if it is vmalloced or if the underlying device
551 * rejects the bio because it is too large, use dm-io layer to do the I/O.
552 * The dm-io layer splits the I/O into multiple requests, avoiding the above
554 *--------------------------------------------------------------*/
557 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
558 * that the request was handled directly with bio interface.
560 static void dmio_complete(unsigned long error
, void *context
)
562 struct dm_buffer
*b
= context
;
564 b
->bio
.bi_status
= error
? BLK_STS_IOERR
: 0;
565 b
->bio
.bi_end_io(&b
->bio
);
568 static void use_dmio(struct dm_buffer
*b
, int rw
, sector_t sector
,
569 unsigned n_sectors
, unsigned offset
, bio_end_io_t
*end_io
)
572 struct dm_io_request io_req
= {
575 .notify
.fn
= dmio_complete
,
577 .client
= b
->c
->dm_io
,
579 struct dm_io_region region
= {
585 if (b
->data_mode
!= DATA_MODE_VMALLOC
) {
586 io_req
.mem
.type
= DM_IO_KMEM
;
587 io_req
.mem
.ptr
.addr
= (char *)b
->data
+ offset
;
589 io_req
.mem
.type
= DM_IO_VMA
;
590 io_req
.mem
.ptr
.vma
= (char *)b
->data
+ offset
;
593 b
->bio
.bi_end_io
= end_io
;
595 r
= dm_io(&io_req
, 1, ®ion
, NULL
);
597 b
->bio
.bi_status
= errno_to_blk_status(r
);
602 static void inline_endio(struct bio
*bio
)
604 bio_end_io_t
*end_fn
= bio
->bi_private
;
605 blk_status_t status
= bio
->bi_status
;
608 * Reset the bio to free any attached resources
609 * (e.g. bio integrity profiles).
613 bio
->bi_status
= status
;
617 static void use_inline_bio(struct dm_buffer
*b
, int rw
, sector_t sector
,
618 unsigned n_sectors
, unsigned offset
, bio_end_io_t
*end_io
)
623 bio_init(&b
->bio
, b
->bio_vec
, DM_BUFIO_INLINE_VECS
);
624 b
->bio
.bi_iter
.bi_sector
= sector
;
625 bio_set_dev(&b
->bio
, b
->c
->bdev
);
626 b
->bio
.bi_end_io
= inline_endio
;
628 * Use of .bi_private isn't a problem here because
629 * the dm_buffer's inline bio is local to bufio.
631 b
->bio
.bi_private
= end_io
;
632 bio_set_op_attrs(&b
->bio
, rw
, 0);
634 ptr
= (char *)b
->data
+ offset
;
635 len
= n_sectors
<< SECTOR_SHIFT
;
638 unsigned this_step
= min((unsigned)(PAGE_SIZE
- offset_in_page(ptr
)), len
);
639 if (!bio_add_page(&b
->bio
, virt_to_page(ptr
), this_step
,
640 offset_in_page(ptr
))) {
641 BUG_ON(b
->c
->block_size
<= PAGE_SIZE
);
642 use_dmio(b
, rw
, sector
, n_sectors
, offset
, end_io
);
653 static void submit_io(struct dm_buffer
*b
, int rw
, bio_end_io_t
*end_io
)
657 unsigned offset
, end
;
659 sector
= (b
->block
<< b
->c
->sectors_per_block_bits
) + b
->c
->start
;
662 n_sectors
= 1 << b
->c
->sectors_per_block_bits
;
665 if (b
->c
->write_callback
)
666 b
->c
->write_callback(b
);
667 offset
= b
->write_start
;
669 offset
&= -DM_BUFIO_WRITE_ALIGN
;
670 end
+= DM_BUFIO_WRITE_ALIGN
- 1;
671 end
&= -DM_BUFIO_WRITE_ALIGN
;
672 if (unlikely(end
> b
->c
->block_size
))
673 end
= b
->c
->block_size
;
675 sector
+= offset
>> SECTOR_SHIFT
;
676 n_sectors
= (end
- offset
) >> SECTOR_SHIFT
;
679 if (n_sectors
<= ((DM_BUFIO_INLINE_VECS
* PAGE_SIZE
) >> SECTOR_SHIFT
) &&
680 b
->data_mode
!= DATA_MODE_VMALLOC
)
681 use_inline_bio(b
, rw
, sector
, n_sectors
, offset
, end_io
);
683 use_dmio(b
, rw
, sector
, n_sectors
, offset
, end_io
);
686 /*----------------------------------------------------------------
687 * Writing dirty buffers
688 *--------------------------------------------------------------*/
691 * The endio routine for write.
693 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
696 static void write_endio(struct bio
*bio
)
698 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
700 b
->write_error
= bio
->bi_status
;
701 if (unlikely(bio
->bi_status
)) {
702 struct dm_bufio_client
*c
= b
->c
;
704 (void)cmpxchg(&c
->async_write_error
, 0,
705 blk_status_to_errno(bio
->bi_status
));
708 BUG_ON(!test_bit(B_WRITING
, &b
->state
));
710 smp_mb__before_atomic();
711 clear_bit(B_WRITING
, &b
->state
);
712 smp_mb__after_atomic();
714 wake_up_bit(&b
->state
, B_WRITING
);
718 * Initiate a write on a dirty buffer, but don't wait for it.
720 * - If the buffer is not dirty, exit.
721 * - If there some previous write going on, wait for it to finish (we can't
722 * have two writes on the same buffer simultaneously).
723 * - Submit our write and don't wait on it. We set B_WRITING indicating
724 * that there is a write in progress.
726 static void __write_dirty_buffer(struct dm_buffer
*b
,
727 struct list_head
*write_list
)
729 if (!test_bit(B_DIRTY
, &b
->state
))
732 clear_bit(B_DIRTY
, &b
->state
);
733 wait_on_bit_lock_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
735 b
->write_start
= b
->dirty_start
;
736 b
->write_end
= b
->dirty_end
;
739 submit_io(b
, WRITE
, write_endio
);
741 list_add_tail(&b
->write_list
, write_list
);
744 static void __flush_write_list(struct list_head
*write_list
)
746 struct blk_plug plug
;
747 blk_start_plug(&plug
);
748 while (!list_empty(write_list
)) {
749 struct dm_buffer
*b
=
750 list_entry(write_list
->next
, struct dm_buffer
, write_list
);
751 list_del(&b
->write_list
);
752 submit_io(b
, WRITE
, write_endio
);
755 blk_finish_plug(&plug
);
759 * Wait until any activity on the buffer finishes. Possibly write the
760 * buffer if it is dirty. When this function finishes, there is no I/O
761 * running on the buffer and the buffer is not dirty.
763 static void __make_buffer_clean(struct dm_buffer
*b
)
765 BUG_ON(b
->hold_count
);
767 if (!b
->state
) /* fast case */
770 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
771 __write_dirty_buffer(b
, NULL
);
772 wait_on_bit_io(&b
->state
, B_WRITING
, TASK_UNINTERRUPTIBLE
);
776 * Find some buffer that is not held by anybody, clean it, unlink it and
779 static struct dm_buffer
*__get_unclaimed_buffer(struct dm_bufio_client
*c
)
783 list_for_each_entry_reverse(b
, &c
->lru
[LIST_CLEAN
], lru_list
) {
784 BUG_ON(test_bit(B_WRITING
, &b
->state
));
785 BUG_ON(test_bit(B_DIRTY
, &b
->state
));
787 if (!b
->hold_count
) {
788 __make_buffer_clean(b
);
795 list_for_each_entry_reverse(b
, &c
->lru
[LIST_DIRTY
], lru_list
) {
796 BUG_ON(test_bit(B_READING
, &b
->state
));
798 if (!b
->hold_count
) {
799 __make_buffer_clean(b
);
810 * Wait until some other threads free some buffer or release hold count on
813 * This function is entered with c->lock held, drops it and regains it
816 static void __wait_for_free_buffer(struct dm_bufio_client
*c
)
818 DECLARE_WAITQUEUE(wait
, current
);
820 add_wait_queue(&c
->free_buffer_wait
, &wait
);
821 set_current_state(TASK_UNINTERRUPTIBLE
);
826 remove_wait_queue(&c
->free_buffer_wait
, &wait
);
839 * Allocate a new buffer. If the allocation is not possible, wait until
840 * some other thread frees a buffer.
842 * May drop the lock and regain it.
844 static struct dm_buffer
*__alloc_buffer_wait_no_callback(struct dm_bufio_client
*c
, enum new_flag nf
)
847 bool tried_noio_alloc
= false;
850 * dm-bufio is resistant to allocation failures (it just keeps
851 * one buffer reserved in cases all the allocations fail).
852 * So set flags to not try too hard:
853 * GFP_NOWAIT: don't wait; if we need to sleep we'll release our
854 * mutex and wait ourselves.
855 * __GFP_NORETRY: don't retry and rather return failure
856 * __GFP_NOMEMALLOC: don't use emergency reserves
857 * __GFP_NOWARN: don't print a warning in case of failure
859 * For debugging, if we set the cache size to 1, no new buffers will
863 if (dm_bufio_cache_size_latch
!= 1) {
864 b
= alloc_buffer(c
, GFP_NOWAIT
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
869 if (nf
== NF_PREFETCH
)
872 if (dm_bufio_cache_size_latch
!= 1 && !tried_noio_alloc
) {
874 b
= alloc_buffer(c
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
878 tried_noio_alloc
= true;
881 if (!list_empty(&c
->reserved_buffers
)) {
882 b
= list_entry(c
->reserved_buffers
.next
,
883 struct dm_buffer
, lru_list
);
884 list_del(&b
->lru_list
);
885 c
->need_reserved_buffers
++;
890 b
= __get_unclaimed_buffer(c
);
894 __wait_for_free_buffer(c
);
898 static struct dm_buffer
*__alloc_buffer_wait(struct dm_bufio_client
*c
, enum new_flag nf
)
900 struct dm_buffer
*b
= __alloc_buffer_wait_no_callback(c
, nf
);
905 if (c
->alloc_callback
)
906 c
->alloc_callback(b
);
912 * Free a buffer and wake other threads waiting for free buffers.
914 static void __free_buffer_wake(struct dm_buffer
*b
)
916 struct dm_bufio_client
*c
= b
->c
;
918 if (!c
->need_reserved_buffers
)
921 list_add(&b
->lru_list
, &c
->reserved_buffers
);
922 c
->need_reserved_buffers
--;
925 wake_up(&c
->free_buffer_wait
);
928 static void __write_dirty_buffers_async(struct dm_bufio_client
*c
, int no_wait
,
929 struct list_head
*write_list
)
931 struct dm_buffer
*b
, *tmp
;
933 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
934 BUG_ON(test_bit(B_READING
, &b
->state
));
936 if (!test_bit(B_DIRTY
, &b
->state
) &&
937 !test_bit(B_WRITING
, &b
->state
)) {
938 __relink_lru(b
, LIST_CLEAN
);
942 if (no_wait
&& test_bit(B_WRITING
, &b
->state
))
945 __write_dirty_buffer(b
, write_list
);
951 * Get writeback threshold and buffer limit for a given client.
953 static void __get_memory_limit(struct dm_bufio_client
*c
,
954 unsigned long *threshold_buffers
,
955 unsigned long *limit_buffers
)
957 unsigned long buffers
;
959 if (unlikely(ACCESS_ONCE(dm_bufio_cache_size
) != dm_bufio_cache_size_latch
)) {
960 if (mutex_trylock(&dm_bufio_clients_lock
)) {
961 __cache_size_refresh();
962 mutex_unlock(&dm_bufio_clients_lock
);
966 buffers
= dm_bufio_cache_size_per_client
>>
967 (c
->sectors_per_block_bits
+ SECTOR_SHIFT
);
969 if (buffers
< c
->minimum_buffers
)
970 buffers
= c
->minimum_buffers
;
972 *limit_buffers
= buffers
;
973 *threshold_buffers
= mult_frac(buffers
,
974 DM_BUFIO_WRITEBACK_PERCENT
, 100);
978 * Check if we're over watermark.
979 * If we are over threshold_buffers, start freeing buffers.
980 * If we're over "limit_buffers", block until we get under the limit.
982 static void __check_watermark(struct dm_bufio_client
*c
,
983 struct list_head
*write_list
)
985 unsigned long threshold_buffers
, limit_buffers
;
987 __get_memory_limit(c
, &threshold_buffers
, &limit_buffers
);
989 while (c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
] >
992 struct dm_buffer
*b
= __get_unclaimed_buffer(c
);
997 __free_buffer_wake(b
);
1001 if (c
->n_buffers
[LIST_DIRTY
] > threshold_buffers
)
1002 __write_dirty_buffers_async(c
, 1, write_list
);
1005 /*----------------------------------------------------------------
1007 *--------------------------------------------------------------*/
1009 static struct dm_buffer
*__bufio_new(struct dm_bufio_client
*c
, sector_t block
,
1010 enum new_flag nf
, int *need_submit
,
1011 struct list_head
*write_list
)
1013 struct dm_buffer
*b
, *new_b
= NULL
;
1017 b
= __find(c
, block
);
1024 new_b
= __alloc_buffer_wait(c
, nf
);
1029 * We've had a period where the mutex was unlocked, so need to
1030 * recheck the hash table.
1032 b
= __find(c
, block
);
1034 __free_buffer_wake(new_b
);
1038 __check_watermark(c
, write_list
);
1044 __link_buffer(b
, block
, LIST_CLEAN
);
1046 if (nf
== NF_FRESH
) {
1051 b
->state
= 1 << B_READING
;
1057 if (nf
== NF_PREFETCH
)
1060 * Note: it is essential that we don't wait for the buffer to be
1061 * read if dm_bufio_get function is used. Both dm_bufio_get and
1062 * dm_bufio_prefetch can be used in the driver request routine.
1063 * If the user called both dm_bufio_prefetch and dm_bufio_get on
1064 * the same buffer, it would deadlock if we waited.
1066 if (nf
== NF_GET
&& unlikely(test_bit(B_READING
, &b
->state
)))
1070 __relink_lru(b
, test_bit(B_DIRTY
, &b
->state
) ||
1071 test_bit(B_WRITING
, &b
->state
));
1076 * The endio routine for reading: set the error, clear the bit and wake up
1077 * anyone waiting on the buffer.
1079 static void read_endio(struct bio
*bio
)
1081 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
1083 b
->read_error
= bio
->bi_status
;
1085 BUG_ON(!test_bit(B_READING
, &b
->state
));
1087 smp_mb__before_atomic();
1088 clear_bit(B_READING
, &b
->state
);
1089 smp_mb__after_atomic();
1091 wake_up_bit(&b
->state
, B_READING
);
1095 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
1096 * functions is similar except that dm_bufio_new doesn't read the
1097 * buffer from the disk (assuming that the caller overwrites all the data
1098 * and uses dm_bufio_mark_buffer_dirty to write new data back).
1100 static void *new_read(struct dm_bufio_client
*c
, sector_t block
,
1101 enum new_flag nf
, struct dm_buffer
**bp
)
1104 struct dm_buffer
*b
;
1106 LIST_HEAD(write_list
);
1109 b
= __bufio_new(c
, block
, nf
, &need_submit
, &write_list
);
1110 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1111 if (b
&& b
->hold_count
== 1)
1112 buffer_record_stack(b
);
1116 __flush_write_list(&write_list
);
1122 submit_io(b
, READ
, read_endio
);
1124 wait_on_bit_io(&b
->state
, B_READING
, TASK_UNINTERRUPTIBLE
);
1126 if (b
->read_error
) {
1127 int error
= blk_status_to_errno(b
->read_error
);
1129 dm_bufio_release(b
);
1131 return ERR_PTR(error
);
1139 void *dm_bufio_get(struct dm_bufio_client
*c
, sector_t block
,
1140 struct dm_buffer
**bp
)
1142 return new_read(c
, block
, NF_GET
, bp
);
1144 EXPORT_SYMBOL_GPL(dm_bufio_get
);
1146 void *dm_bufio_read(struct dm_bufio_client
*c
, sector_t block
,
1147 struct dm_buffer
**bp
)
1149 BUG_ON(dm_bufio_in_request());
1151 return new_read(c
, block
, NF_READ
, bp
);
1153 EXPORT_SYMBOL_GPL(dm_bufio_read
);
1155 void *dm_bufio_new(struct dm_bufio_client
*c
, sector_t block
,
1156 struct dm_buffer
**bp
)
1158 BUG_ON(dm_bufio_in_request());
1160 return new_read(c
, block
, NF_FRESH
, bp
);
1162 EXPORT_SYMBOL_GPL(dm_bufio_new
);
1164 void dm_bufio_prefetch(struct dm_bufio_client
*c
,
1165 sector_t block
, unsigned n_blocks
)
1167 struct blk_plug plug
;
1169 LIST_HEAD(write_list
);
1171 BUG_ON(dm_bufio_in_request());
1173 blk_start_plug(&plug
);
1176 for (; n_blocks
--; block
++) {
1178 struct dm_buffer
*b
;
1179 b
= __bufio_new(c
, block
, NF_PREFETCH
, &need_submit
,
1181 if (unlikely(!list_empty(&write_list
))) {
1183 blk_finish_plug(&plug
);
1184 __flush_write_list(&write_list
);
1185 blk_start_plug(&plug
);
1188 if (unlikely(b
!= NULL
)) {
1192 submit_io(b
, READ
, read_endio
);
1193 dm_bufio_release(b
);
1206 blk_finish_plug(&plug
);
1208 EXPORT_SYMBOL_GPL(dm_bufio_prefetch
);
1210 void dm_bufio_release(struct dm_buffer
*b
)
1212 struct dm_bufio_client
*c
= b
->c
;
1216 BUG_ON(!b
->hold_count
);
1219 if (!b
->hold_count
) {
1220 wake_up(&c
->free_buffer_wait
);
1223 * If there were errors on the buffer, and the buffer is not
1224 * to be written, free the buffer. There is no point in caching
1227 if ((b
->read_error
|| b
->write_error
) &&
1228 !test_bit(B_READING
, &b
->state
) &&
1229 !test_bit(B_WRITING
, &b
->state
) &&
1230 !test_bit(B_DIRTY
, &b
->state
)) {
1232 __free_buffer_wake(b
);
1238 EXPORT_SYMBOL_GPL(dm_bufio_release
);
1240 void dm_bufio_mark_partial_buffer_dirty(struct dm_buffer
*b
,
1241 unsigned start
, unsigned end
)
1243 struct dm_bufio_client
*c
= b
->c
;
1245 BUG_ON(start
>= end
);
1246 BUG_ON(end
> b
->c
->block_size
);
1250 BUG_ON(test_bit(B_READING
, &b
->state
));
1252 if (!test_and_set_bit(B_DIRTY
, &b
->state
)) {
1253 b
->dirty_start
= start
;
1255 __relink_lru(b
, LIST_DIRTY
);
1257 if (start
< b
->dirty_start
)
1258 b
->dirty_start
= start
;
1259 if (end
> b
->dirty_end
)
1265 EXPORT_SYMBOL_GPL(dm_bufio_mark_partial_buffer_dirty
);
1267 void dm_bufio_mark_buffer_dirty(struct dm_buffer
*b
)
1269 dm_bufio_mark_partial_buffer_dirty(b
, 0, b
->c
->block_size
);
1271 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty
);
1273 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client
*c
)
1275 LIST_HEAD(write_list
);
1277 BUG_ON(dm_bufio_in_request());
1280 __write_dirty_buffers_async(c
, 0, &write_list
);
1282 __flush_write_list(&write_list
);
1284 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async
);
1287 * For performance, it is essential that the buffers are written asynchronously
1288 * and simultaneously (so that the block layer can merge the writes) and then
1291 * Finally, we flush hardware disk cache.
1293 int dm_bufio_write_dirty_buffers(struct dm_bufio_client
*c
)
1296 unsigned long buffers_processed
= 0;
1297 struct dm_buffer
*b
, *tmp
;
1299 LIST_HEAD(write_list
);
1302 __write_dirty_buffers_async(c
, 0, &write_list
);
1304 __flush_write_list(&write_list
);
1308 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
1309 int dropped_lock
= 0;
1311 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
])
1312 buffers_processed
++;
1314 BUG_ON(test_bit(B_READING
, &b
->state
));
1316 if (test_bit(B_WRITING
, &b
->state
)) {
1317 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
]) {
1321 wait_on_bit_io(&b
->state
, B_WRITING
,
1322 TASK_UNINTERRUPTIBLE
);
1326 wait_on_bit_io(&b
->state
, B_WRITING
,
1327 TASK_UNINTERRUPTIBLE
);
1330 if (!test_bit(B_DIRTY
, &b
->state
) &&
1331 !test_bit(B_WRITING
, &b
->state
))
1332 __relink_lru(b
, LIST_CLEAN
);
1337 * If we dropped the lock, the list is no longer consistent,
1338 * so we must restart the search.
1340 * In the most common case, the buffer just processed is
1341 * relinked to the clean list, so we won't loop scanning the
1342 * same buffer again and again.
1344 * This may livelock if there is another thread simultaneously
1345 * dirtying buffers, so we count the number of buffers walked
1346 * and if it exceeds the total number of buffers, it means that
1347 * someone is doing some writes simultaneously with us. In
1348 * this case, stop, dropping the lock.
1353 wake_up(&c
->free_buffer_wait
);
1356 a
= xchg(&c
->async_write_error
, 0);
1357 f
= dm_bufio_issue_flush(c
);
1363 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers
);
1366 * Use dm-io to send and empty barrier flush the device.
1368 int dm_bufio_issue_flush(struct dm_bufio_client
*c
)
1370 struct dm_io_request io_req
= {
1371 .bi_op
= REQ_OP_WRITE
,
1372 .bi_op_flags
= REQ_PREFLUSH
| REQ_SYNC
,
1373 .mem
.type
= DM_IO_KMEM
,
1374 .mem
.ptr
.addr
= NULL
,
1377 struct dm_io_region io_reg
= {
1383 BUG_ON(dm_bufio_in_request());
1385 return dm_io(&io_req
, 1, &io_reg
, NULL
);
1387 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush
);
1390 * We first delete any other buffer that may be at that new location.
1392 * Then, we write the buffer to the original location if it was dirty.
1394 * Then, if we are the only one who is holding the buffer, relink the buffer
1395 * in the hash queue for the new location.
1397 * If there was someone else holding the buffer, we write it to the new
1398 * location but not relink it, because that other user needs to have the buffer
1399 * at the same place.
1401 void dm_bufio_release_move(struct dm_buffer
*b
, sector_t new_block
)
1403 struct dm_bufio_client
*c
= b
->c
;
1404 struct dm_buffer
*new;
1406 BUG_ON(dm_bufio_in_request());
1411 new = __find(c
, new_block
);
1413 if (new->hold_count
) {
1414 __wait_for_free_buffer(c
);
1419 * FIXME: Is there any point waiting for a write that's going
1420 * to be overwritten in a bit?
1422 __make_buffer_clean(new);
1423 __unlink_buffer(new);
1424 __free_buffer_wake(new);
1427 BUG_ON(!b
->hold_count
);
1428 BUG_ON(test_bit(B_READING
, &b
->state
));
1430 __write_dirty_buffer(b
, NULL
);
1431 if (b
->hold_count
== 1) {
1432 wait_on_bit_io(&b
->state
, B_WRITING
,
1433 TASK_UNINTERRUPTIBLE
);
1434 set_bit(B_DIRTY
, &b
->state
);
1436 b
->dirty_end
= c
->block_size
;
1438 __link_buffer(b
, new_block
, LIST_DIRTY
);
1441 wait_on_bit_lock_io(&b
->state
, B_WRITING
,
1442 TASK_UNINTERRUPTIBLE
);
1444 * Relink buffer to "new_block" so that write_callback
1445 * sees "new_block" as a block number.
1446 * After the write, link the buffer back to old_block.
1447 * All this must be done in bufio lock, so that block number
1448 * change isn't visible to other threads.
1450 old_block
= b
->block
;
1452 __link_buffer(b
, new_block
, b
->list_mode
);
1453 submit_io(b
, WRITE
, write_endio
);
1454 wait_on_bit_io(&b
->state
, B_WRITING
,
1455 TASK_UNINTERRUPTIBLE
);
1457 __link_buffer(b
, old_block
, b
->list_mode
);
1461 dm_bufio_release(b
);
1463 EXPORT_SYMBOL_GPL(dm_bufio_release_move
);
1466 * Free the given buffer.
1468 * This is just a hint, if the buffer is in use or dirty, this function
1471 void dm_bufio_forget(struct dm_bufio_client
*c
, sector_t block
)
1473 struct dm_buffer
*b
;
1477 b
= __find(c
, block
);
1478 if (b
&& likely(!b
->hold_count
) && likely(!b
->state
)) {
1480 __free_buffer_wake(b
);
1485 EXPORT_SYMBOL(dm_bufio_forget
);
1487 void dm_bufio_set_minimum_buffers(struct dm_bufio_client
*c
, unsigned n
)
1489 c
->minimum_buffers
= n
;
1491 EXPORT_SYMBOL(dm_bufio_set_minimum_buffers
);
1493 unsigned dm_bufio_get_block_size(struct dm_bufio_client
*c
)
1495 return c
->block_size
;
1497 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size
);
1499 sector_t
dm_bufio_get_device_size(struct dm_bufio_client
*c
)
1501 return i_size_read(c
->bdev
->bd_inode
) >>
1502 (SECTOR_SHIFT
+ c
->sectors_per_block_bits
);
1504 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size
);
1506 sector_t
dm_bufio_get_block_number(struct dm_buffer
*b
)
1510 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number
);
1512 void *dm_bufio_get_block_data(struct dm_buffer
*b
)
1516 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data
);
1518 void *dm_bufio_get_aux_data(struct dm_buffer
*b
)
1522 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data
);
1524 struct dm_bufio_client
*dm_bufio_get_client(struct dm_buffer
*b
)
1528 EXPORT_SYMBOL_GPL(dm_bufio_get_client
);
1530 static void drop_buffers(struct dm_bufio_client
*c
)
1532 struct dm_buffer
*b
;
1534 bool warned
= false;
1536 BUG_ON(dm_bufio_in_request());
1539 * An optimization so that the buffers are not written one-by-one.
1541 dm_bufio_write_dirty_buffers_async(c
);
1545 while ((b
= __get_unclaimed_buffer(c
)))
1546 __free_buffer_wake(b
);
1548 for (i
= 0; i
< LIST_SIZE
; i
++)
1549 list_for_each_entry(b
, &c
->lru
[i
], lru_list
) {
1552 DMERR("leaked buffer %llx, hold count %u, list %d",
1553 (unsigned long long)b
->block
, b
->hold_count
, i
);
1554 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1555 print_stack_trace(&b
->stack_trace
, 1);
1556 b
->hold_count
= 0; /* mark unclaimed to avoid BUG_ON below */
1560 #ifdef CONFIG_DM_DEBUG_BLOCK_STACK_TRACING
1561 while ((b
= __get_unclaimed_buffer(c
)))
1562 __free_buffer_wake(b
);
1565 for (i
= 0; i
< LIST_SIZE
; i
++)
1566 BUG_ON(!list_empty(&c
->lru
[i
]));
1572 * We may not be able to evict this buffer if IO pending or the client
1573 * is still using it. Caller is expected to know buffer is too old.
1575 * And if GFP_NOFS is used, we must not do any I/O because we hold
1576 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1577 * rerouted to different bufio client.
1579 static bool __try_evict_buffer(struct dm_buffer
*b
, gfp_t gfp
)
1581 if (!(gfp
& __GFP_FS
)) {
1582 if (test_bit(B_READING
, &b
->state
) ||
1583 test_bit(B_WRITING
, &b
->state
) ||
1584 test_bit(B_DIRTY
, &b
->state
))
1591 __make_buffer_clean(b
);
1593 __free_buffer_wake(b
);
1598 static unsigned long get_retain_buffers(struct dm_bufio_client
*c
)
1600 unsigned long retain_bytes
= ACCESS_ONCE(dm_bufio_retain_bytes
);
1601 return retain_bytes
>> (c
->sectors_per_block_bits
+ SECTOR_SHIFT
);
1604 static unsigned long __scan(struct dm_bufio_client
*c
, unsigned long nr_to_scan
,
1608 struct dm_buffer
*b
, *tmp
;
1609 unsigned long freed
= 0;
1610 unsigned long count
= c
->n_buffers
[LIST_CLEAN
] +
1611 c
->n_buffers
[LIST_DIRTY
];
1612 unsigned long retain_target
= get_retain_buffers(c
);
1614 for (l
= 0; l
< LIST_SIZE
; l
++) {
1615 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[l
], lru_list
) {
1616 if (__try_evict_buffer(b
, gfp_mask
))
1618 if (!--nr_to_scan
|| ((count
- freed
) <= retain_target
))
1626 static unsigned long
1627 dm_bufio_shrink_scan(struct shrinker
*shrink
, struct shrink_control
*sc
)
1629 struct dm_bufio_client
*c
;
1630 unsigned long freed
;
1632 c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1633 if (sc
->gfp_mask
& __GFP_FS
)
1635 else if (!dm_bufio_trylock(c
))
1638 freed
= __scan(c
, sc
->nr_to_scan
, sc
->gfp_mask
);
1643 static unsigned long
1644 dm_bufio_shrink_count(struct shrinker
*shrink
, struct shrink_control
*sc
)
1646 struct dm_bufio_client
*c
= container_of(shrink
, struct dm_bufio_client
, shrinker
);
1647 unsigned long count
= ACCESS_ONCE(c
->n_buffers
[LIST_CLEAN
]) +
1648 ACCESS_ONCE(c
->n_buffers
[LIST_DIRTY
]);
1649 unsigned long retain_target
= get_retain_buffers(c
);
1651 return (count
< retain_target
) ? 0 : (count
- retain_target
);
1655 * Create the buffering interface
1657 struct dm_bufio_client
*dm_bufio_client_create(struct block_device
*bdev
, unsigned block_size
,
1658 unsigned reserved_buffers
, unsigned aux_size
,
1659 void (*alloc_callback
)(struct dm_buffer
*),
1660 void (*write_callback
)(struct dm_buffer
*))
1663 struct dm_bufio_client
*c
;
1666 BUG_ON(block_size
< 1 << SECTOR_SHIFT
||
1667 (block_size
& (block_size
- 1)));
1669 c
= kzalloc(sizeof(*c
), GFP_KERNEL
);
1674 c
->buffer_tree
= RB_ROOT
;
1677 c
->block_size
= block_size
;
1678 c
->sectors_per_block_bits
= __ffs(block_size
) - SECTOR_SHIFT
;
1679 c
->pages_per_block_bits
= (__ffs(block_size
) >= PAGE_SHIFT
) ?
1680 __ffs(block_size
) - PAGE_SHIFT
: 0;
1681 c
->blocks_per_page_bits
= (__ffs(block_size
) < PAGE_SHIFT
?
1682 PAGE_SHIFT
- __ffs(block_size
) : 0);
1684 c
->aux_size
= aux_size
;
1685 c
->alloc_callback
= alloc_callback
;
1686 c
->write_callback
= write_callback
;
1688 for (i
= 0; i
< LIST_SIZE
; i
++) {
1689 INIT_LIST_HEAD(&c
->lru
[i
]);
1690 c
->n_buffers
[i
] = 0;
1693 mutex_init(&c
->lock
);
1694 INIT_LIST_HEAD(&c
->reserved_buffers
);
1695 c
->need_reserved_buffers
= reserved_buffers
;
1697 c
->minimum_buffers
= DM_BUFIO_MIN_BUFFERS
;
1699 init_waitqueue_head(&c
->free_buffer_wait
);
1700 c
->async_write_error
= 0;
1702 c
->dm_io
= dm_io_client_create();
1703 if (IS_ERR(c
->dm_io
)) {
1704 r
= PTR_ERR(c
->dm_io
);
1708 mutex_lock(&dm_bufio_clients_lock
);
1709 if (c
->blocks_per_page_bits
) {
1710 if (!DM_BUFIO_CACHE_NAME(c
)) {
1711 DM_BUFIO_CACHE_NAME(c
) = kasprintf(GFP_KERNEL
, "dm_bufio_cache-%u", c
->block_size
);
1712 if (!DM_BUFIO_CACHE_NAME(c
)) {
1714 mutex_unlock(&dm_bufio_clients_lock
);
1719 if (!DM_BUFIO_CACHE(c
)) {
1720 DM_BUFIO_CACHE(c
) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c
),
1722 c
->block_size
, 0, NULL
);
1723 if (!DM_BUFIO_CACHE(c
)) {
1725 mutex_unlock(&dm_bufio_clients_lock
);
1730 mutex_unlock(&dm_bufio_clients_lock
);
1732 while (c
->need_reserved_buffers
) {
1733 struct dm_buffer
*b
= alloc_buffer(c
, GFP_KERNEL
);
1739 __free_buffer_wake(b
);
1742 mutex_lock(&dm_bufio_clients_lock
);
1743 dm_bufio_client_count
++;
1744 list_add(&c
->client_list
, &dm_bufio_all_clients
);
1745 __cache_size_refresh();
1746 mutex_unlock(&dm_bufio_clients_lock
);
1748 c
->shrinker
.count_objects
= dm_bufio_shrink_count
;
1749 c
->shrinker
.scan_objects
= dm_bufio_shrink_scan
;
1750 c
->shrinker
.seeks
= 1;
1751 c
->shrinker
.batch
= 0;
1752 register_shrinker(&c
->shrinker
);
1758 while (!list_empty(&c
->reserved_buffers
)) {
1759 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1760 struct dm_buffer
, lru_list
);
1761 list_del(&b
->lru_list
);
1764 dm_io_client_destroy(c
->dm_io
);
1770 EXPORT_SYMBOL_GPL(dm_bufio_client_create
);
1773 * Free the buffering interface.
1774 * It is required that there are no references on any buffers.
1776 void dm_bufio_client_destroy(struct dm_bufio_client
*c
)
1782 unregister_shrinker(&c
->shrinker
);
1784 mutex_lock(&dm_bufio_clients_lock
);
1786 list_del(&c
->client_list
);
1787 dm_bufio_client_count
--;
1788 __cache_size_refresh();
1790 mutex_unlock(&dm_bufio_clients_lock
);
1792 BUG_ON(!RB_EMPTY_ROOT(&c
->buffer_tree
));
1793 BUG_ON(c
->need_reserved_buffers
);
1795 while (!list_empty(&c
->reserved_buffers
)) {
1796 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1797 struct dm_buffer
, lru_list
);
1798 list_del(&b
->lru_list
);
1802 for (i
= 0; i
< LIST_SIZE
; i
++)
1803 if (c
->n_buffers
[i
])
1804 DMERR("leaked buffer count %d: %ld", i
, c
->n_buffers
[i
]);
1806 for (i
= 0; i
< LIST_SIZE
; i
++)
1807 BUG_ON(c
->n_buffers
[i
]);
1809 dm_io_client_destroy(c
->dm_io
);
1812 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy
);
1814 void dm_bufio_set_sector_offset(struct dm_bufio_client
*c
, sector_t start
)
1818 EXPORT_SYMBOL_GPL(dm_bufio_set_sector_offset
);
1820 static unsigned get_max_age_hz(void)
1822 unsigned max_age
= ACCESS_ONCE(dm_bufio_max_age
);
1824 if (max_age
> UINT_MAX
/ HZ
)
1825 max_age
= UINT_MAX
/ HZ
;
1827 return max_age
* HZ
;
1830 static bool older_than(struct dm_buffer
*b
, unsigned long age_hz
)
1832 return time_after_eq(jiffies
, b
->last_accessed
+ age_hz
);
1835 static void __evict_old_buffers(struct dm_bufio_client
*c
, unsigned long age_hz
)
1837 struct dm_buffer
*b
, *tmp
;
1838 unsigned long retain_target
= get_retain_buffers(c
);
1839 unsigned long count
;
1840 LIST_HEAD(write_list
);
1844 __check_watermark(c
, &write_list
);
1845 if (unlikely(!list_empty(&write_list
))) {
1847 __flush_write_list(&write_list
);
1851 count
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1852 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_CLEAN
], lru_list
) {
1853 if (count
<= retain_target
)
1856 if (!older_than(b
, age_hz
))
1859 if (__try_evict_buffer(b
, 0))
1868 static void cleanup_old_buffers(void)
1870 unsigned long max_age_hz
= get_max_age_hz();
1871 struct dm_bufio_client
*c
;
1873 mutex_lock(&dm_bufio_clients_lock
);
1875 __cache_size_refresh();
1877 list_for_each_entry(c
, &dm_bufio_all_clients
, client_list
)
1878 __evict_old_buffers(c
, max_age_hz
);
1880 mutex_unlock(&dm_bufio_clients_lock
);
1883 static struct workqueue_struct
*dm_bufio_wq
;
1884 static struct delayed_work dm_bufio_work
;
1886 static void work_fn(struct work_struct
*w
)
1888 cleanup_old_buffers();
1890 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1891 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1894 /*----------------------------------------------------------------
1896 *--------------------------------------------------------------*/
1899 * This is called only once for the whole dm_bufio module.
1900 * It initializes memory limit.
1902 static int __init
dm_bufio_init(void)
1906 dm_bufio_allocated_kmem_cache
= 0;
1907 dm_bufio_allocated_get_free_pages
= 0;
1908 dm_bufio_allocated_vmalloc
= 0;
1909 dm_bufio_current_allocated
= 0;
1911 memset(&dm_bufio_caches
, 0, sizeof dm_bufio_caches
);
1912 memset(&dm_bufio_cache_names
, 0, sizeof dm_bufio_cache_names
);
1914 mem
= (__u64
)mult_frac(totalram_pages
- totalhigh_pages
,
1915 DM_BUFIO_MEMORY_PERCENT
, 100) << PAGE_SHIFT
;
1917 if (mem
> ULONG_MAX
)
1921 if (mem
> mult_frac(VMALLOC_TOTAL
, DM_BUFIO_VMALLOC_PERCENT
, 100))
1922 mem
= mult_frac(VMALLOC_TOTAL
, DM_BUFIO_VMALLOC_PERCENT
, 100);
1925 dm_bufio_default_cache_size
= mem
;
1927 mutex_lock(&dm_bufio_clients_lock
);
1928 __cache_size_refresh();
1929 mutex_unlock(&dm_bufio_clients_lock
);
1931 dm_bufio_wq
= alloc_workqueue("dm_bufio_cache", WQ_MEM_RECLAIM
, 0);
1935 INIT_DELAYED_WORK(&dm_bufio_work
, work_fn
);
1936 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1937 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1943 * This is called once when unloading the dm_bufio module.
1945 static void __exit
dm_bufio_exit(void)
1950 cancel_delayed_work_sync(&dm_bufio_work
);
1951 destroy_workqueue(dm_bufio_wq
);
1953 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_caches
); i
++)
1954 kmem_cache_destroy(dm_bufio_caches
[i
]);
1956 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_cache_names
); i
++)
1957 kfree(dm_bufio_cache_names
[i
]);
1959 if (dm_bufio_client_count
) {
1960 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1961 __func__
, dm_bufio_client_count
);
1965 if (dm_bufio_current_allocated
) {
1966 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1967 __func__
, dm_bufio_current_allocated
);
1971 if (dm_bufio_allocated_get_free_pages
) {
1972 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1973 __func__
, dm_bufio_allocated_get_free_pages
);
1977 if (dm_bufio_allocated_vmalloc
) {
1978 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1979 __func__
, dm_bufio_allocated_vmalloc
);
1986 module_init(dm_bufio_init
)
1987 module_exit(dm_bufio_exit
)
1989 module_param_named(max_cache_size_bytes
, dm_bufio_cache_size
, ulong
, S_IRUGO
| S_IWUSR
);
1990 MODULE_PARM_DESC(max_cache_size_bytes
, "Size of metadata cache");
1992 module_param_named(max_age_seconds
, dm_bufio_max_age
, uint
, S_IRUGO
| S_IWUSR
);
1993 MODULE_PARM_DESC(max_age_seconds
, "Max age of a buffer in seconds");
1995 module_param_named(retain_bytes
, dm_bufio_retain_bytes
, ulong
, S_IRUGO
| S_IWUSR
);
1996 MODULE_PARM_DESC(retain_bytes
, "Try to keep at least this many bytes cached in memory");
1998 module_param_named(peak_allocated_bytes
, dm_bufio_peak_allocated
, ulong
, S_IRUGO
| S_IWUSR
);
1999 MODULE_PARM_DESC(peak_allocated_bytes
, "Tracks the maximum allocated memory");
2001 module_param_named(allocated_kmem_cache_bytes
, dm_bufio_allocated_kmem_cache
, ulong
, S_IRUGO
);
2002 MODULE_PARM_DESC(allocated_kmem_cache_bytes
, "Memory allocated with kmem_cache_alloc");
2004 module_param_named(allocated_get_free_pages_bytes
, dm_bufio_allocated_get_free_pages
, ulong
, S_IRUGO
);
2005 MODULE_PARM_DESC(allocated_get_free_pages_bytes
, "Memory allocated with get_free_pages");
2007 module_param_named(allocated_vmalloc_bytes
, dm_bufio_allocated_vmalloc
, ulong
, S_IRUGO
);
2008 MODULE_PARM_DESC(allocated_vmalloc_bytes
, "Memory allocated with vmalloc");
2010 module_param_named(current_allocated_bytes
, dm_bufio_current_allocated
, ulong
, S_IRUGO
);
2011 MODULE_PARM_DESC(current_allocated_bytes
, "Memory currently used by the cache");
2013 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
2014 MODULE_DESCRIPTION(DM_NAME
" buffered I/O library");
2015 MODULE_LICENSE("GPL");