2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/slab.h>
14 #include <linux/vmalloc.h>
15 #include <linux/shrinker.h>
16 #include <linux/module.h>
18 #define DM_MSG_PREFIX "bufio"
21 * Memory management policy:
22 * Limit the number of buffers to DM_BUFIO_MEMORY_PERCENT of main memory
23 * or DM_BUFIO_VMALLOC_PERCENT of vmalloc memory (whichever is lower).
24 * Always allocate at least DM_BUFIO_MIN_BUFFERS buffers.
25 * Start background writeback when there are DM_BUFIO_WRITEBACK_PERCENT
28 #define DM_BUFIO_MIN_BUFFERS 8
30 #define DM_BUFIO_MEMORY_PERCENT 2
31 #define DM_BUFIO_VMALLOC_PERCENT 25
32 #define DM_BUFIO_WRITEBACK_PERCENT 75
35 * Check buffer ages in this interval (seconds)
37 #define DM_BUFIO_WORK_TIMER_SECS 10
40 * Free buffers when they are older than this (seconds)
42 #define DM_BUFIO_DEFAULT_AGE_SECS 60
45 * The number of bvec entries that are embedded directly in the buffer.
46 * If the chunk size is larger, dm-io is used to do the io.
48 #define DM_BUFIO_INLINE_VECS 16
53 #define DM_BUFIO_HASH_BITS 20
54 #define DM_BUFIO_HASH(block) \
55 ((((block) >> DM_BUFIO_HASH_BITS) ^ (block)) & \
56 ((1 << DM_BUFIO_HASH_BITS) - 1))
59 * Don't try to use kmem_cache_alloc for blocks larger than this.
60 * For explanation, see alloc_buffer_data below.
62 #define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
63 #define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
66 * dm_buffer->list_mode
74 * All buffers are linked to cache_hash with their hash_list field.
76 * Clean buffers that are not being written (B_WRITING not set)
77 * are linked to lru[LIST_CLEAN] with their lru_list field.
79 * Dirty and clean buffers that are being written are linked to
80 * lru[LIST_DIRTY] with their lru_list field. When the write
81 * finishes, the buffer cannot be relinked immediately (because we
82 * are in an interrupt context and relinking requires process
83 * context), so some clean-not-writing buffers can be held on
84 * dirty_lru too. They are later added to lru in the process
87 struct dm_bufio_client
{
90 struct list_head lru
[LIST_SIZE
];
91 unsigned long n_buffers
[LIST_SIZE
];
93 struct block_device
*bdev
;
95 unsigned char sectors_per_block_bits
;
96 unsigned char pages_per_block_bits
;
97 unsigned char blocks_per_page_bits
;
99 void (*alloc_callback
)(struct dm_buffer
*);
100 void (*write_callback
)(struct dm_buffer
*);
102 struct dm_io_client
*dm_io
;
104 struct list_head reserved_buffers
;
105 unsigned need_reserved_buffers
;
107 struct hlist_head
*cache_hash
;
108 wait_queue_head_t free_buffer_wait
;
110 int async_write_error
;
112 struct list_head client_list
;
113 struct shrinker shrinker
;
124 * Describes how the block was allocated:
125 * kmem_cache_alloc(), __get_free_pages() or vmalloc().
126 * See the comment at alloc_buffer_data.
130 DATA_MODE_GET_FREE_PAGES
= 1,
131 DATA_MODE_VMALLOC
= 2,
136 struct hlist_node hash_list
;
137 struct list_head lru_list
;
140 enum data_mode data_mode
;
141 unsigned char list_mode
; /* LIST_* */
146 unsigned long last_accessed
;
147 struct dm_bufio_client
*c
;
149 struct bio_vec bio_vec
[DM_BUFIO_INLINE_VECS
];
152 /*----------------------------------------------------------------*/
154 static struct kmem_cache
*dm_bufio_caches
[PAGE_SHIFT
- SECTOR_SHIFT
];
155 static char *dm_bufio_cache_names
[PAGE_SHIFT
- SECTOR_SHIFT
];
157 static inline int dm_bufio_cache_index(struct dm_bufio_client
*c
)
159 unsigned ret
= c
->blocks_per_page_bits
- 1;
161 BUG_ON(ret
>= ARRAY_SIZE(dm_bufio_caches
));
166 #define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
167 #define DM_BUFIO_CACHE_NAME(c) (dm_bufio_cache_names[dm_bufio_cache_index(c)])
169 #define dm_bufio_in_request() (!!current->bio_list)
171 static void dm_bufio_lock(struct dm_bufio_client
*c
)
173 mutex_lock_nested(&c
->lock
, dm_bufio_in_request());
176 static int dm_bufio_trylock(struct dm_bufio_client
*c
)
178 return mutex_trylock(&c
->lock
);
181 static void dm_bufio_unlock(struct dm_bufio_client
*c
)
183 mutex_unlock(&c
->lock
);
187 * FIXME Move to sched.h?
189 #ifdef CONFIG_PREEMPT_VOLUNTARY
190 # define dm_bufio_cond_resched() \
192 if (unlikely(need_resched())) \
196 # define dm_bufio_cond_resched() do { } while (0)
199 /*----------------------------------------------------------------*/
202 * Default cache size: available memory divided by the ratio.
204 static unsigned long dm_bufio_default_cache_size
;
207 * Total cache size set by the user.
209 static unsigned long dm_bufio_cache_size
;
212 * A copy of dm_bufio_cache_size because dm_bufio_cache_size can change
213 * at any time. If it disagrees, the user has changed cache size.
215 static unsigned long dm_bufio_cache_size_latch
;
217 static DEFINE_SPINLOCK(param_spinlock
);
220 * Buffers are freed after this timeout
222 static unsigned dm_bufio_max_age
= DM_BUFIO_DEFAULT_AGE_SECS
;
224 static unsigned long dm_bufio_peak_allocated
;
225 static unsigned long dm_bufio_allocated_kmem_cache
;
226 static unsigned long dm_bufio_allocated_get_free_pages
;
227 static unsigned long dm_bufio_allocated_vmalloc
;
228 static unsigned long dm_bufio_current_allocated
;
230 /*----------------------------------------------------------------*/
233 * Per-client cache: dm_bufio_cache_size / dm_bufio_client_count
235 static unsigned long dm_bufio_cache_size_per_client
;
238 * The current number of clients.
240 static int dm_bufio_client_count
;
243 * The list of all clients.
245 static LIST_HEAD(dm_bufio_all_clients
);
248 * This mutex protects dm_bufio_cache_size_latch,
249 * dm_bufio_cache_size_per_client and dm_bufio_client_count
251 static DEFINE_MUTEX(dm_bufio_clients_lock
);
253 /*----------------------------------------------------------------*/
255 static void adjust_total_allocated(enum data_mode data_mode
, long diff
)
257 static unsigned long * const class_ptr
[DATA_MODE_LIMIT
] = {
258 &dm_bufio_allocated_kmem_cache
,
259 &dm_bufio_allocated_get_free_pages
,
260 &dm_bufio_allocated_vmalloc
,
263 spin_lock(¶m_spinlock
);
265 *class_ptr
[data_mode
] += diff
;
267 dm_bufio_current_allocated
+= diff
;
269 if (dm_bufio_current_allocated
> dm_bufio_peak_allocated
)
270 dm_bufio_peak_allocated
= dm_bufio_current_allocated
;
272 spin_unlock(¶m_spinlock
);
276 * Change the number of clients and recalculate per-client limit.
278 static void __cache_size_refresh(void)
280 BUG_ON(!mutex_is_locked(&dm_bufio_clients_lock
));
281 BUG_ON(dm_bufio_client_count
< 0);
283 dm_bufio_cache_size_latch
= dm_bufio_cache_size
;
288 * Use default if set to 0 and report the actual cache size used.
290 if (!dm_bufio_cache_size_latch
) {
291 (void)cmpxchg(&dm_bufio_cache_size
, 0,
292 dm_bufio_default_cache_size
);
293 dm_bufio_cache_size_latch
= dm_bufio_default_cache_size
;
296 dm_bufio_cache_size_per_client
= dm_bufio_cache_size_latch
/
297 (dm_bufio_client_count
? : 1);
301 * Allocating buffer data.
303 * Small buffers are allocated with kmem_cache, to use space optimally.
305 * For large buffers, we choose between get_free_pages and vmalloc.
306 * Each has advantages and disadvantages.
308 * __get_free_pages can randomly fail if the memory is fragmented.
309 * __vmalloc won't randomly fail, but vmalloc space is limited (it may be
310 * as low as 128M) so using it for caching is not appropriate.
312 * If the allocation may fail we use __get_free_pages. Memory fragmentation
313 * won't have a fatal effect here, but it just causes flushes of some other
314 * buffers and more I/O will be performed. Don't use __get_free_pages if it
315 * always fails (i.e. order >= MAX_ORDER).
317 * If the allocation shouldn't fail we use __vmalloc. This is only for the
318 * initial reserve allocation, so there's no risk of wasting all vmalloc
321 static void *alloc_buffer_data(struct dm_bufio_client
*c
, gfp_t gfp_mask
,
322 enum data_mode
*data_mode
)
327 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT
) {
328 *data_mode
= DATA_MODE_SLAB
;
329 return kmem_cache_alloc(DM_BUFIO_CACHE(c
), gfp_mask
);
332 if (c
->block_size
<= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT
&&
333 gfp_mask
& __GFP_NORETRY
) {
334 *data_mode
= DATA_MODE_GET_FREE_PAGES
;
335 return (void *)__get_free_pages(gfp_mask
,
336 c
->pages_per_block_bits
);
339 *data_mode
= DATA_MODE_VMALLOC
;
342 * __vmalloc allocates the data pages and auxiliary structures with
343 * gfp_flags that were specified, but pagetables are always allocated
344 * with GFP_KERNEL, no matter what was specified as gfp_mask.
346 * Consequently, we must set per-process flag PF_MEMALLOC_NOIO so that
347 * all allocations done by this process (including pagetables) are done
348 * as if GFP_NOIO was specified.
351 if (gfp_mask
& __GFP_NORETRY
) {
352 noio_flag
= current
->flags
& PF_MEMALLOC
;
353 current
->flags
|= PF_MEMALLOC
;
356 ptr
= __vmalloc(c
->block_size
, gfp_mask
, PAGE_KERNEL
);
358 if (gfp_mask
& __GFP_NORETRY
)
359 current
->flags
= (current
->flags
& ~PF_MEMALLOC
) | noio_flag
;
365 * Free buffer's data.
367 static void free_buffer_data(struct dm_bufio_client
*c
,
368 void *data
, enum data_mode data_mode
)
372 kmem_cache_free(DM_BUFIO_CACHE(c
), data
);
375 case DATA_MODE_GET_FREE_PAGES
:
376 free_pages((unsigned long)data
, c
->pages_per_block_bits
);
379 case DATA_MODE_VMALLOC
:
384 DMCRIT("dm_bufio_free_buffer_data: bad data mode: %d",
391 * Allocate buffer and its data.
393 static struct dm_buffer
*alloc_buffer(struct dm_bufio_client
*c
, gfp_t gfp_mask
)
395 struct dm_buffer
*b
= kmalloc(sizeof(struct dm_buffer
) + c
->aux_size
,
403 b
->data
= alloc_buffer_data(c
, gfp_mask
, &b
->data_mode
);
409 adjust_total_allocated(b
->data_mode
, (long)c
->block_size
);
415 * Free buffer and its data.
417 static void free_buffer(struct dm_buffer
*b
)
419 struct dm_bufio_client
*c
= b
->c
;
421 adjust_total_allocated(b
->data_mode
, -(long)c
->block_size
);
423 free_buffer_data(c
, b
->data
, b
->data_mode
);
428 * Link buffer to the hash list and clean or dirty queue.
430 static void __link_buffer(struct dm_buffer
*b
, sector_t block
, int dirty
)
432 struct dm_bufio_client
*c
= b
->c
;
434 c
->n_buffers
[dirty
]++;
436 b
->list_mode
= dirty
;
437 list_add(&b
->lru_list
, &c
->lru
[dirty
]);
438 hlist_add_head(&b
->hash_list
, &c
->cache_hash
[DM_BUFIO_HASH(block
)]);
439 b
->last_accessed
= jiffies
;
443 * Unlink buffer from the hash list and dirty or clean queue.
445 static void __unlink_buffer(struct dm_buffer
*b
)
447 struct dm_bufio_client
*c
= b
->c
;
449 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
451 c
->n_buffers
[b
->list_mode
]--;
452 hlist_del(&b
->hash_list
);
453 list_del(&b
->lru_list
);
457 * Place the buffer to the head of dirty or clean LRU queue.
459 static void __relink_lru(struct dm_buffer
*b
, int dirty
)
461 struct dm_bufio_client
*c
= b
->c
;
463 BUG_ON(!c
->n_buffers
[b
->list_mode
]);
465 c
->n_buffers
[b
->list_mode
]--;
466 c
->n_buffers
[dirty
]++;
467 b
->list_mode
= dirty
;
468 list_del(&b
->lru_list
);
469 list_add(&b
->lru_list
, &c
->lru
[dirty
]);
472 /*----------------------------------------------------------------
473 * Submit I/O on the buffer.
475 * Bio interface is faster but it has some problems:
476 * the vector list is limited (increasing this limit increases
477 * memory-consumption per buffer, so it is not viable);
479 * the memory must be direct-mapped, not vmalloced;
481 * the I/O driver can reject requests spuriously if it thinks that
482 * the requests are too big for the device or if they cross a
483 * controller-defined memory boundary.
485 * If the buffer is small enough (up to DM_BUFIO_INLINE_VECS pages) and
486 * it is not vmalloced, try using the bio interface.
488 * If the buffer is big, if it is vmalloced or if the underlying device
489 * rejects the bio because it is too large, use dm-io layer to do the I/O.
490 * The dm-io layer splits the I/O into multiple requests, avoiding the above
492 *--------------------------------------------------------------*/
495 * dm-io completion routine. It just calls b->bio.bi_end_io, pretending
496 * that the request was handled directly with bio interface.
498 static void dmio_complete(unsigned long error
, void *context
)
500 struct dm_buffer
*b
= context
;
502 b
->bio
.bi_end_io(&b
->bio
, error
? -EIO
: 0);
505 static void use_dmio(struct dm_buffer
*b
, int rw
, sector_t block
,
506 bio_end_io_t
*end_io
)
509 struct dm_io_request io_req
= {
511 .notify
.fn
= dmio_complete
,
513 .client
= b
->c
->dm_io
,
515 struct dm_io_region region
= {
517 .sector
= block
<< b
->c
->sectors_per_block_bits
,
518 .count
= b
->c
->block_size
>> SECTOR_SHIFT
,
521 if (b
->data_mode
!= DATA_MODE_VMALLOC
) {
522 io_req
.mem
.type
= DM_IO_KMEM
;
523 io_req
.mem
.ptr
.addr
= b
->data
;
525 io_req
.mem
.type
= DM_IO_VMA
;
526 io_req
.mem
.ptr
.vma
= b
->data
;
529 b
->bio
.bi_end_io
= end_io
;
531 r
= dm_io(&io_req
, 1, ®ion
, NULL
);
536 static void use_inline_bio(struct dm_buffer
*b
, int rw
, sector_t block
,
537 bio_end_io_t
*end_io
)
543 b
->bio
.bi_io_vec
= b
->bio_vec
;
544 b
->bio
.bi_max_vecs
= DM_BUFIO_INLINE_VECS
;
545 b
->bio
.bi_sector
= block
<< b
->c
->sectors_per_block_bits
;
546 b
->bio
.bi_bdev
= b
->c
->bdev
;
547 b
->bio
.bi_end_io
= end_io
;
550 * We assume that if len >= PAGE_SIZE ptr is page-aligned.
551 * If len < PAGE_SIZE the buffer doesn't cross page boundary.
554 len
= b
->c
->block_size
;
556 if (len
>= PAGE_SIZE
)
557 BUG_ON((unsigned long)ptr
& (PAGE_SIZE
- 1));
559 BUG_ON((unsigned long)ptr
& (len
- 1));
562 if (!bio_add_page(&b
->bio
, virt_to_page(ptr
),
563 len
< PAGE_SIZE
? len
: PAGE_SIZE
,
564 virt_to_phys(ptr
) & (PAGE_SIZE
- 1))) {
565 BUG_ON(b
->c
->block_size
<= PAGE_SIZE
);
566 use_dmio(b
, rw
, block
, end_io
);
574 submit_bio(rw
, &b
->bio
);
577 static void submit_io(struct dm_buffer
*b
, int rw
, sector_t block
,
578 bio_end_io_t
*end_io
)
580 if (rw
== WRITE
&& b
->c
->write_callback
)
581 b
->c
->write_callback(b
);
583 if (b
->c
->block_size
<= DM_BUFIO_INLINE_VECS
* PAGE_SIZE
&&
584 b
->data_mode
!= DATA_MODE_VMALLOC
)
585 use_inline_bio(b
, rw
, block
, end_io
);
587 use_dmio(b
, rw
, block
, end_io
);
590 /*----------------------------------------------------------------
591 * Writing dirty buffers
592 *--------------------------------------------------------------*/
595 * The endio routine for write.
597 * Set the error, clear B_WRITING bit and wake anyone who was waiting on
600 static void write_endio(struct bio
*bio
, int error
)
602 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
604 b
->write_error
= error
;
605 if (unlikely(error
)) {
606 struct dm_bufio_client
*c
= b
->c
;
607 (void)cmpxchg(&c
->async_write_error
, 0, error
);
610 BUG_ON(!test_bit(B_WRITING
, &b
->state
));
612 smp_mb__before_clear_bit();
613 clear_bit(B_WRITING
, &b
->state
);
614 smp_mb__after_clear_bit();
616 wake_up_bit(&b
->state
, B_WRITING
);
620 * This function is called when wait_on_bit is actually waiting.
622 static int do_io_schedule(void *word
)
630 * Initiate a write on a dirty buffer, but don't wait for it.
632 * - If the buffer is not dirty, exit.
633 * - If there some previous write going on, wait for it to finish (we can't
634 * have two writes on the same buffer simultaneously).
635 * - Submit our write and don't wait on it. We set B_WRITING indicating
636 * that there is a write in progress.
638 static void __write_dirty_buffer(struct dm_buffer
*b
)
640 if (!test_bit(B_DIRTY
, &b
->state
))
643 clear_bit(B_DIRTY
, &b
->state
);
644 wait_on_bit_lock(&b
->state
, B_WRITING
,
645 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
647 submit_io(b
, WRITE
, b
->block
, write_endio
);
651 * Wait until any activity on the buffer finishes. Possibly write the
652 * buffer if it is dirty. When this function finishes, there is no I/O
653 * running on the buffer and the buffer is not dirty.
655 static void __make_buffer_clean(struct dm_buffer
*b
)
657 BUG_ON(b
->hold_count
);
659 if (!b
->state
) /* fast case */
662 wait_on_bit(&b
->state
, B_READING
, do_io_schedule
, TASK_UNINTERRUPTIBLE
);
663 __write_dirty_buffer(b
);
664 wait_on_bit(&b
->state
, B_WRITING
, do_io_schedule
, TASK_UNINTERRUPTIBLE
);
668 * Find some buffer that is not held by anybody, clean it, unlink it and
671 static struct dm_buffer
*__get_unclaimed_buffer(struct dm_bufio_client
*c
)
675 list_for_each_entry_reverse(b
, &c
->lru
[LIST_CLEAN
], lru_list
) {
676 BUG_ON(test_bit(B_WRITING
, &b
->state
));
677 BUG_ON(test_bit(B_DIRTY
, &b
->state
));
679 if (!b
->hold_count
) {
680 __make_buffer_clean(b
);
684 dm_bufio_cond_resched();
687 list_for_each_entry_reverse(b
, &c
->lru
[LIST_DIRTY
], lru_list
) {
688 BUG_ON(test_bit(B_READING
, &b
->state
));
690 if (!b
->hold_count
) {
691 __make_buffer_clean(b
);
695 dm_bufio_cond_resched();
702 * Wait until some other threads free some buffer or release hold count on
705 * This function is entered with c->lock held, drops it and regains it
708 static void __wait_for_free_buffer(struct dm_bufio_client
*c
)
710 DECLARE_WAITQUEUE(wait
, current
);
712 add_wait_queue(&c
->free_buffer_wait
, &wait
);
713 set_task_state(current
, TASK_UNINTERRUPTIBLE
);
718 set_task_state(current
, TASK_RUNNING
);
719 remove_wait_queue(&c
->free_buffer_wait
, &wait
);
732 * Allocate a new buffer. If the allocation is not possible, wait until
733 * some other thread frees a buffer.
735 * May drop the lock and regain it.
737 static struct dm_buffer
*__alloc_buffer_wait_no_callback(struct dm_bufio_client
*c
, enum new_flag nf
)
742 * dm-bufio is resistant to allocation failures (it just keeps
743 * one buffer reserved in cases all the allocations fail).
744 * So set flags to not try too hard:
745 * GFP_NOIO: don't recurse into the I/O layer
746 * __GFP_NORETRY: don't retry and rather return failure
747 * __GFP_NOMEMALLOC: don't use emergency reserves
748 * __GFP_NOWARN: don't print a warning in case of failure
750 * For debugging, if we set the cache size to 1, no new buffers will
754 if (dm_bufio_cache_size_latch
!= 1) {
755 b
= alloc_buffer(c
, GFP_NOIO
| __GFP_NORETRY
| __GFP_NOMEMALLOC
| __GFP_NOWARN
);
760 if (nf
== NF_PREFETCH
)
763 if (!list_empty(&c
->reserved_buffers
)) {
764 b
= list_entry(c
->reserved_buffers
.next
,
765 struct dm_buffer
, lru_list
);
766 list_del(&b
->lru_list
);
767 c
->need_reserved_buffers
++;
772 b
= __get_unclaimed_buffer(c
);
776 __wait_for_free_buffer(c
);
780 static struct dm_buffer
*__alloc_buffer_wait(struct dm_bufio_client
*c
, enum new_flag nf
)
782 struct dm_buffer
*b
= __alloc_buffer_wait_no_callback(c
, nf
);
787 if (c
->alloc_callback
)
788 c
->alloc_callback(b
);
794 * Free a buffer and wake other threads waiting for free buffers.
796 static void __free_buffer_wake(struct dm_buffer
*b
)
798 struct dm_bufio_client
*c
= b
->c
;
800 if (!c
->need_reserved_buffers
)
803 list_add(&b
->lru_list
, &c
->reserved_buffers
);
804 c
->need_reserved_buffers
--;
807 wake_up(&c
->free_buffer_wait
);
810 static void __write_dirty_buffers_async(struct dm_bufio_client
*c
, int no_wait
)
812 struct dm_buffer
*b
, *tmp
;
814 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
815 BUG_ON(test_bit(B_READING
, &b
->state
));
817 if (!test_bit(B_DIRTY
, &b
->state
) &&
818 !test_bit(B_WRITING
, &b
->state
)) {
819 __relink_lru(b
, LIST_CLEAN
);
823 if (no_wait
&& test_bit(B_WRITING
, &b
->state
))
826 __write_dirty_buffer(b
);
827 dm_bufio_cond_resched();
832 * Get writeback threshold and buffer limit for a given client.
834 static void __get_memory_limit(struct dm_bufio_client
*c
,
835 unsigned long *threshold_buffers
,
836 unsigned long *limit_buffers
)
838 unsigned long buffers
;
840 if (dm_bufio_cache_size
!= dm_bufio_cache_size_latch
) {
841 mutex_lock(&dm_bufio_clients_lock
);
842 __cache_size_refresh();
843 mutex_unlock(&dm_bufio_clients_lock
);
846 buffers
= dm_bufio_cache_size_per_client
>>
847 (c
->sectors_per_block_bits
+ SECTOR_SHIFT
);
849 if (buffers
< DM_BUFIO_MIN_BUFFERS
)
850 buffers
= DM_BUFIO_MIN_BUFFERS
;
852 *limit_buffers
= buffers
;
853 *threshold_buffers
= buffers
* DM_BUFIO_WRITEBACK_PERCENT
/ 100;
857 * Check if we're over watermark.
858 * If we are over threshold_buffers, start freeing buffers.
859 * If we're over "limit_buffers", block until we get under the limit.
861 static void __check_watermark(struct dm_bufio_client
*c
)
863 unsigned long threshold_buffers
, limit_buffers
;
865 __get_memory_limit(c
, &threshold_buffers
, &limit_buffers
);
867 while (c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
] >
870 struct dm_buffer
*b
= __get_unclaimed_buffer(c
);
875 __free_buffer_wake(b
);
876 dm_bufio_cond_resched();
879 if (c
->n_buffers
[LIST_DIRTY
] > threshold_buffers
)
880 __write_dirty_buffers_async(c
, 1);
884 * Find a buffer in the hash.
886 static struct dm_buffer
*__find(struct dm_bufio_client
*c
, sector_t block
)
889 struct hlist_node
*hn
;
891 hlist_for_each_entry(b
, hn
, &c
->cache_hash
[DM_BUFIO_HASH(block
)],
893 dm_bufio_cond_resched();
894 if (b
->block
== block
)
901 /*----------------------------------------------------------------
903 *--------------------------------------------------------------*/
905 static struct dm_buffer
*__bufio_new(struct dm_bufio_client
*c
, sector_t block
,
906 enum new_flag nf
, int *need_submit
)
908 struct dm_buffer
*b
, *new_b
= NULL
;
912 b
= __find(c
, block
);
919 new_b
= __alloc_buffer_wait(c
, nf
);
924 * We've had a period where the mutex was unlocked, so need to
925 * recheck the hash table.
927 b
= __find(c
, block
);
929 __free_buffer_wake(new_b
);
933 __check_watermark(c
);
939 __link_buffer(b
, block
, LIST_CLEAN
);
941 if (nf
== NF_FRESH
) {
946 b
->state
= 1 << B_READING
;
952 if (nf
== NF_PREFETCH
)
955 * Note: it is essential that we don't wait for the buffer to be
956 * read if dm_bufio_get function is used. Both dm_bufio_get and
957 * dm_bufio_prefetch can be used in the driver request routine.
958 * If the user called both dm_bufio_prefetch and dm_bufio_get on
959 * the same buffer, it would deadlock if we waited.
961 if (nf
== NF_GET
&& unlikely(test_bit(B_READING
, &b
->state
)))
965 __relink_lru(b
, test_bit(B_DIRTY
, &b
->state
) ||
966 test_bit(B_WRITING
, &b
->state
));
971 * The endio routine for reading: set the error, clear the bit and wake up
972 * anyone waiting on the buffer.
974 static void read_endio(struct bio
*bio
, int error
)
976 struct dm_buffer
*b
= container_of(bio
, struct dm_buffer
, bio
);
978 b
->read_error
= error
;
980 BUG_ON(!test_bit(B_READING
, &b
->state
));
982 smp_mb__before_clear_bit();
983 clear_bit(B_READING
, &b
->state
);
984 smp_mb__after_clear_bit();
986 wake_up_bit(&b
->state
, B_READING
);
990 * A common routine for dm_bufio_new and dm_bufio_read. Operation of these
991 * functions is similar except that dm_bufio_new doesn't read the
992 * buffer from the disk (assuming that the caller overwrites all the data
993 * and uses dm_bufio_mark_buffer_dirty to write new data back).
995 static void *new_read(struct dm_bufio_client
*c
, sector_t block
,
996 enum new_flag nf
, struct dm_buffer
**bp
)
1002 b
= __bufio_new(c
, block
, nf
, &need_submit
);
1009 submit_io(b
, READ
, b
->block
, read_endio
);
1011 wait_on_bit(&b
->state
, B_READING
, do_io_schedule
, TASK_UNINTERRUPTIBLE
);
1013 if (b
->read_error
) {
1014 int error
= b
->read_error
;
1016 dm_bufio_release(b
);
1018 return ERR_PTR(error
);
1026 void *dm_bufio_get(struct dm_bufio_client
*c
, sector_t block
,
1027 struct dm_buffer
**bp
)
1029 return new_read(c
, block
, NF_GET
, bp
);
1031 EXPORT_SYMBOL_GPL(dm_bufio_get
);
1033 void *dm_bufio_read(struct dm_bufio_client
*c
, sector_t block
,
1034 struct dm_buffer
**bp
)
1036 BUG_ON(dm_bufio_in_request());
1038 return new_read(c
, block
, NF_READ
, bp
);
1040 EXPORT_SYMBOL_GPL(dm_bufio_read
);
1042 void *dm_bufio_new(struct dm_bufio_client
*c
, sector_t block
,
1043 struct dm_buffer
**bp
)
1045 BUG_ON(dm_bufio_in_request());
1047 return new_read(c
, block
, NF_FRESH
, bp
);
1049 EXPORT_SYMBOL_GPL(dm_bufio_new
);
1051 void dm_bufio_prefetch(struct dm_bufio_client
*c
,
1052 sector_t block
, unsigned n_blocks
)
1054 struct blk_plug plug
;
1056 blk_start_plug(&plug
);
1059 for (; n_blocks
--; block
++) {
1061 struct dm_buffer
*b
;
1062 b
= __bufio_new(c
, block
, NF_PREFETCH
, &need_submit
);
1063 if (unlikely(b
!= NULL
)) {
1067 submit_io(b
, READ
, b
->block
, read_endio
);
1068 dm_bufio_release(b
);
1070 dm_bufio_cond_resched();
1082 blk_finish_plug(&plug
);
1084 EXPORT_SYMBOL_GPL(dm_bufio_prefetch
);
1086 void dm_bufio_release(struct dm_buffer
*b
)
1088 struct dm_bufio_client
*c
= b
->c
;
1092 BUG_ON(!b
->hold_count
);
1095 if (!b
->hold_count
) {
1096 wake_up(&c
->free_buffer_wait
);
1099 * If there were errors on the buffer, and the buffer is not
1100 * to be written, free the buffer. There is no point in caching
1103 if ((b
->read_error
|| b
->write_error
) &&
1104 !test_bit(B_READING
, &b
->state
) &&
1105 !test_bit(B_WRITING
, &b
->state
) &&
1106 !test_bit(B_DIRTY
, &b
->state
)) {
1108 __free_buffer_wake(b
);
1114 EXPORT_SYMBOL_GPL(dm_bufio_release
);
1116 void dm_bufio_mark_buffer_dirty(struct dm_buffer
*b
)
1118 struct dm_bufio_client
*c
= b
->c
;
1122 BUG_ON(test_bit(B_READING
, &b
->state
));
1124 if (!test_and_set_bit(B_DIRTY
, &b
->state
))
1125 __relink_lru(b
, LIST_DIRTY
);
1129 EXPORT_SYMBOL_GPL(dm_bufio_mark_buffer_dirty
);
1131 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client
*c
)
1133 BUG_ON(dm_bufio_in_request());
1136 __write_dirty_buffers_async(c
, 0);
1139 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers_async
);
1142 * For performance, it is essential that the buffers are written asynchronously
1143 * and simultaneously (so that the block layer can merge the writes) and then
1146 * Finally, we flush hardware disk cache.
1148 int dm_bufio_write_dirty_buffers(struct dm_bufio_client
*c
)
1151 unsigned long buffers_processed
= 0;
1152 struct dm_buffer
*b
, *tmp
;
1155 __write_dirty_buffers_async(c
, 0);
1158 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[LIST_DIRTY
], lru_list
) {
1159 int dropped_lock
= 0;
1161 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
])
1162 buffers_processed
++;
1164 BUG_ON(test_bit(B_READING
, &b
->state
));
1166 if (test_bit(B_WRITING
, &b
->state
)) {
1167 if (buffers_processed
< c
->n_buffers
[LIST_DIRTY
]) {
1171 wait_on_bit(&b
->state
, B_WRITING
,
1173 TASK_UNINTERRUPTIBLE
);
1177 wait_on_bit(&b
->state
, B_WRITING
,
1179 TASK_UNINTERRUPTIBLE
);
1182 if (!test_bit(B_DIRTY
, &b
->state
) &&
1183 !test_bit(B_WRITING
, &b
->state
))
1184 __relink_lru(b
, LIST_CLEAN
);
1186 dm_bufio_cond_resched();
1189 * If we dropped the lock, the list is no longer consistent,
1190 * so we must restart the search.
1192 * In the most common case, the buffer just processed is
1193 * relinked to the clean list, so we won't loop scanning the
1194 * same buffer again and again.
1196 * This may livelock if there is another thread simultaneously
1197 * dirtying buffers, so we count the number of buffers walked
1198 * and if it exceeds the total number of buffers, it means that
1199 * someone is doing some writes simultaneously with us. In
1200 * this case, stop, dropping the lock.
1205 wake_up(&c
->free_buffer_wait
);
1208 a
= xchg(&c
->async_write_error
, 0);
1209 f
= dm_bufio_issue_flush(c
);
1215 EXPORT_SYMBOL_GPL(dm_bufio_write_dirty_buffers
);
1218 * Use dm-io to send and empty barrier flush the device.
1220 int dm_bufio_issue_flush(struct dm_bufio_client
*c
)
1222 struct dm_io_request io_req
= {
1224 .mem
.type
= DM_IO_KMEM
,
1225 .mem
.ptr
.addr
= NULL
,
1228 struct dm_io_region io_reg
= {
1234 BUG_ON(dm_bufio_in_request());
1236 return dm_io(&io_req
, 1, &io_reg
, NULL
);
1238 EXPORT_SYMBOL_GPL(dm_bufio_issue_flush
);
1241 * We first delete any other buffer that may be at that new location.
1243 * Then, we write the buffer to the original location if it was dirty.
1245 * Then, if we are the only one who is holding the buffer, relink the buffer
1246 * in the hash queue for the new location.
1248 * If there was someone else holding the buffer, we write it to the new
1249 * location but not relink it, because that other user needs to have the buffer
1250 * at the same place.
1252 void dm_bufio_release_move(struct dm_buffer
*b
, sector_t new_block
)
1254 struct dm_bufio_client
*c
= b
->c
;
1255 struct dm_buffer
*new;
1257 BUG_ON(dm_bufio_in_request());
1262 new = __find(c
, new_block
);
1264 if (new->hold_count
) {
1265 __wait_for_free_buffer(c
);
1270 * FIXME: Is there any point waiting for a write that's going
1271 * to be overwritten in a bit?
1273 __make_buffer_clean(new);
1274 __unlink_buffer(new);
1275 __free_buffer_wake(new);
1278 BUG_ON(!b
->hold_count
);
1279 BUG_ON(test_bit(B_READING
, &b
->state
));
1281 __write_dirty_buffer(b
);
1282 if (b
->hold_count
== 1) {
1283 wait_on_bit(&b
->state
, B_WRITING
,
1284 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
1285 set_bit(B_DIRTY
, &b
->state
);
1287 __link_buffer(b
, new_block
, LIST_DIRTY
);
1290 wait_on_bit_lock(&b
->state
, B_WRITING
,
1291 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
1293 * Relink buffer to "new_block" so that write_callback
1294 * sees "new_block" as a block number.
1295 * After the write, link the buffer back to old_block.
1296 * All this must be done in bufio lock, so that block number
1297 * change isn't visible to other threads.
1299 old_block
= b
->block
;
1301 __link_buffer(b
, new_block
, b
->list_mode
);
1302 submit_io(b
, WRITE
, new_block
, write_endio
);
1303 wait_on_bit(&b
->state
, B_WRITING
,
1304 do_io_schedule
, TASK_UNINTERRUPTIBLE
);
1306 __link_buffer(b
, old_block
, b
->list_mode
);
1310 dm_bufio_release(b
);
1312 EXPORT_SYMBOL_GPL(dm_bufio_release_move
);
1314 unsigned dm_bufio_get_block_size(struct dm_bufio_client
*c
)
1316 return c
->block_size
;
1318 EXPORT_SYMBOL_GPL(dm_bufio_get_block_size
);
1320 sector_t
dm_bufio_get_device_size(struct dm_bufio_client
*c
)
1322 return i_size_read(c
->bdev
->bd_inode
) >>
1323 (SECTOR_SHIFT
+ c
->sectors_per_block_bits
);
1325 EXPORT_SYMBOL_GPL(dm_bufio_get_device_size
);
1327 sector_t
dm_bufio_get_block_number(struct dm_buffer
*b
)
1331 EXPORT_SYMBOL_GPL(dm_bufio_get_block_number
);
1333 void *dm_bufio_get_block_data(struct dm_buffer
*b
)
1337 EXPORT_SYMBOL_GPL(dm_bufio_get_block_data
);
1339 void *dm_bufio_get_aux_data(struct dm_buffer
*b
)
1343 EXPORT_SYMBOL_GPL(dm_bufio_get_aux_data
);
1345 struct dm_bufio_client
*dm_bufio_get_client(struct dm_buffer
*b
)
1349 EXPORT_SYMBOL_GPL(dm_bufio_get_client
);
1351 static void drop_buffers(struct dm_bufio_client
*c
)
1353 struct dm_buffer
*b
;
1356 BUG_ON(dm_bufio_in_request());
1359 * An optimization so that the buffers are not written one-by-one.
1361 dm_bufio_write_dirty_buffers_async(c
);
1365 while ((b
= __get_unclaimed_buffer(c
)))
1366 __free_buffer_wake(b
);
1368 for (i
= 0; i
< LIST_SIZE
; i
++)
1369 list_for_each_entry(b
, &c
->lru
[i
], lru_list
)
1370 DMERR("leaked buffer %llx, hold count %u, list %d",
1371 (unsigned long long)b
->block
, b
->hold_count
, i
);
1373 for (i
= 0; i
< LIST_SIZE
; i
++)
1374 BUG_ON(!list_empty(&c
->lru
[i
]));
1380 * Test if the buffer is unused and too old, and commit it.
1381 * At if noio is set, we must not do any I/O because we hold
1382 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to
1383 * different bufio client.
1385 static int __cleanup_old_buffer(struct dm_buffer
*b
, gfp_t gfp
,
1386 unsigned long max_jiffies
)
1388 if (jiffies
- b
->last_accessed
< max_jiffies
)
1391 if (!(gfp
& __GFP_IO
)) {
1392 if (test_bit(B_READING
, &b
->state
) ||
1393 test_bit(B_WRITING
, &b
->state
) ||
1394 test_bit(B_DIRTY
, &b
->state
))
1401 __make_buffer_clean(b
);
1403 __free_buffer_wake(b
);
1408 static void __scan(struct dm_bufio_client
*c
, unsigned long nr_to_scan
,
1409 struct shrink_control
*sc
)
1412 struct dm_buffer
*b
, *tmp
;
1414 for (l
= 0; l
< LIST_SIZE
; l
++) {
1415 list_for_each_entry_safe_reverse(b
, tmp
, &c
->lru
[l
], lru_list
)
1416 if (!__cleanup_old_buffer(b
, sc
->gfp_mask
, 0) &&
1419 dm_bufio_cond_resched();
1423 static int shrink(struct shrinker
*shrinker
, struct shrink_control
*sc
)
1425 struct dm_bufio_client
*c
=
1426 container_of(shrinker
, struct dm_bufio_client
, shrinker
);
1428 unsigned long nr_to_scan
= sc
->nr_to_scan
;
1430 if (sc
->gfp_mask
& __GFP_IO
)
1432 else if (!dm_bufio_trylock(c
))
1433 return !nr_to_scan
? 0 : -1;
1436 __scan(c
, nr_to_scan
, sc
);
1438 r
= c
->n_buffers
[LIST_CLEAN
] + c
->n_buffers
[LIST_DIRTY
];
1448 * Create the buffering interface
1450 struct dm_bufio_client
*dm_bufio_client_create(struct block_device
*bdev
, unsigned block_size
,
1451 unsigned reserved_buffers
, unsigned aux_size
,
1452 void (*alloc_callback
)(struct dm_buffer
*),
1453 void (*write_callback
)(struct dm_buffer
*))
1456 struct dm_bufio_client
*c
;
1459 BUG_ON(block_size
< 1 << SECTOR_SHIFT
||
1460 (block_size
& (block_size
- 1)));
1462 c
= kmalloc(sizeof(*c
), GFP_KERNEL
);
1467 c
->cache_hash
= vmalloc(sizeof(struct hlist_head
) << DM_BUFIO_HASH_BITS
);
1468 if (!c
->cache_hash
) {
1474 c
->block_size
= block_size
;
1475 c
->sectors_per_block_bits
= ffs(block_size
) - 1 - SECTOR_SHIFT
;
1476 c
->pages_per_block_bits
= (ffs(block_size
) - 1 >= PAGE_SHIFT
) ?
1477 ffs(block_size
) - 1 - PAGE_SHIFT
: 0;
1478 c
->blocks_per_page_bits
= (ffs(block_size
) - 1 < PAGE_SHIFT
?
1479 PAGE_SHIFT
- (ffs(block_size
) - 1) : 0);
1481 c
->aux_size
= aux_size
;
1482 c
->alloc_callback
= alloc_callback
;
1483 c
->write_callback
= write_callback
;
1485 for (i
= 0; i
< LIST_SIZE
; i
++) {
1486 INIT_LIST_HEAD(&c
->lru
[i
]);
1487 c
->n_buffers
[i
] = 0;
1490 for (i
= 0; i
< 1 << DM_BUFIO_HASH_BITS
; i
++)
1491 INIT_HLIST_HEAD(&c
->cache_hash
[i
]);
1493 mutex_init(&c
->lock
);
1494 INIT_LIST_HEAD(&c
->reserved_buffers
);
1495 c
->need_reserved_buffers
= reserved_buffers
;
1497 init_waitqueue_head(&c
->free_buffer_wait
);
1498 c
->async_write_error
= 0;
1500 c
->dm_io
= dm_io_client_create();
1501 if (IS_ERR(c
->dm_io
)) {
1502 r
= PTR_ERR(c
->dm_io
);
1506 mutex_lock(&dm_bufio_clients_lock
);
1507 if (c
->blocks_per_page_bits
) {
1508 if (!DM_BUFIO_CACHE_NAME(c
)) {
1509 DM_BUFIO_CACHE_NAME(c
) = kasprintf(GFP_KERNEL
, "dm_bufio_cache-%u", c
->block_size
);
1510 if (!DM_BUFIO_CACHE_NAME(c
)) {
1512 mutex_unlock(&dm_bufio_clients_lock
);
1517 if (!DM_BUFIO_CACHE(c
)) {
1518 DM_BUFIO_CACHE(c
) = kmem_cache_create(DM_BUFIO_CACHE_NAME(c
),
1520 c
->block_size
, 0, NULL
);
1521 if (!DM_BUFIO_CACHE(c
)) {
1523 mutex_unlock(&dm_bufio_clients_lock
);
1528 mutex_unlock(&dm_bufio_clients_lock
);
1530 while (c
->need_reserved_buffers
) {
1531 struct dm_buffer
*b
= alloc_buffer(c
, GFP_KERNEL
);
1537 __free_buffer_wake(b
);
1540 mutex_lock(&dm_bufio_clients_lock
);
1541 dm_bufio_client_count
++;
1542 list_add(&c
->client_list
, &dm_bufio_all_clients
);
1543 __cache_size_refresh();
1544 mutex_unlock(&dm_bufio_clients_lock
);
1546 c
->shrinker
.shrink
= shrink
;
1547 c
->shrinker
.seeks
= 1;
1548 c
->shrinker
.batch
= 0;
1549 register_shrinker(&c
->shrinker
);
1555 while (!list_empty(&c
->reserved_buffers
)) {
1556 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1557 struct dm_buffer
, lru_list
);
1558 list_del(&b
->lru_list
);
1561 dm_io_client_destroy(c
->dm_io
);
1563 vfree(c
->cache_hash
);
1569 EXPORT_SYMBOL_GPL(dm_bufio_client_create
);
1572 * Free the buffering interface.
1573 * It is required that there are no references on any buffers.
1575 void dm_bufio_client_destroy(struct dm_bufio_client
*c
)
1581 unregister_shrinker(&c
->shrinker
);
1583 mutex_lock(&dm_bufio_clients_lock
);
1585 list_del(&c
->client_list
);
1586 dm_bufio_client_count
--;
1587 __cache_size_refresh();
1589 mutex_unlock(&dm_bufio_clients_lock
);
1591 for (i
= 0; i
< 1 << DM_BUFIO_HASH_BITS
; i
++)
1592 BUG_ON(!hlist_empty(&c
->cache_hash
[i
]));
1594 BUG_ON(c
->need_reserved_buffers
);
1596 while (!list_empty(&c
->reserved_buffers
)) {
1597 struct dm_buffer
*b
= list_entry(c
->reserved_buffers
.next
,
1598 struct dm_buffer
, lru_list
);
1599 list_del(&b
->lru_list
);
1603 for (i
= 0; i
< LIST_SIZE
; i
++)
1604 if (c
->n_buffers
[i
])
1605 DMERR("leaked buffer count %d: %ld", i
, c
->n_buffers
[i
]);
1607 for (i
= 0; i
< LIST_SIZE
; i
++)
1608 BUG_ON(c
->n_buffers
[i
]);
1610 dm_io_client_destroy(c
->dm_io
);
1611 vfree(c
->cache_hash
);
1614 EXPORT_SYMBOL_GPL(dm_bufio_client_destroy
);
1616 static void cleanup_old_buffers(void)
1618 unsigned long max_age
= dm_bufio_max_age
;
1619 struct dm_bufio_client
*c
;
1623 if (max_age
> ULONG_MAX
/ HZ
)
1624 max_age
= ULONG_MAX
/ HZ
;
1626 mutex_lock(&dm_bufio_clients_lock
);
1627 list_for_each_entry(c
, &dm_bufio_all_clients
, client_list
) {
1628 if (!dm_bufio_trylock(c
))
1631 while (!list_empty(&c
->lru
[LIST_CLEAN
])) {
1632 struct dm_buffer
*b
;
1633 b
= list_entry(c
->lru
[LIST_CLEAN
].prev
,
1634 struct dm_buffer
, lru_list
);
1635 if (__cleanup_old_buffer(b
, 0, max_age
* HZ
))
1637 dm_bufio_cond_resched();
1641 dm_bufio_cond_resched();
1643 mutex_unlock(&dm_bufio_clients_lock
);
1646 static struct workqueue_struct
*dm_bufio_wq
;
1647 static struct delayed_work dm_bufio_work
;
1649 static void work_fn(struct work_struct
*w
)
1651 cleanup_old_buffers();
1653 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1654 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1657 /*----------------------------------------------------------------
1659 *--------------------------------------------------------------*/
1662 * This is called only once for the whole dm_bufio module.
1663 * It initializes memory limit.
1665 static int __init
dm_bufio_init(void)
1669 dm_bufio_allocated_kmem_cache
= 0;
1670 dm_bufio_allocated_get_free_pages
= 0;
1671 dm_bufio_allocated_vmalloc
= 0;
1672 dm_bufio_current_allocated
= 0;
1674 memset(&dm_bufio_caches
, 0, sizeof dm_bufio_caches
);
1675 memset(&dm_bufio_cache_names
, 0, sizeof dm_bufio_cache_names
);
1677 mem
= (__u64
)((totalram_pages
- totalhigh_pages
) *
1678 DM_BUFIO_MEMORY_PERCENT
/ 100) << PAGE_SHIFT
;
1680 if (mem
> ULONG_MAX
)
1685 * Get the size of vmalloc space the same way as VMALLOC_TOTAL
1686 * in fs/proc/internal.h
1688 if (mem
> (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100)
1689 mem
= (VMALLOC_END
- VMALLOC_START
) * DM_BUFIO_VMALLOC_PERCENT
/ 100;
1692 dm_bufio_default_cache_size
= mem
;
1694 mutex_lock(&dm_bufio_clients_lock
);
1695 __cache_size_refresh();
1696 mutex_unlock(&dm_bufio_clients_lock
);
1698 dm_bufio_wq
= create_singlethread_workqueue("dm_bufio_cache");
1702 INIT_DELAYED_WORK(&dm_bufio_work
, work_fn
);
1703 queue_delayed_work(dm_bufio_wq
, &dm_bufio_work
,
1704 DM_BUFIO_WORK_TIMER_SECS
* HZ
);
1710 * This is called once when unloading the dm_bufio module.
1712 static void __exit
dm_bufio_exit(void)
1717 cancel_delayed_work_sync(&dm_bufio_work
);
1718 destroy_workqueue(dm_bufio_wq
);
1720 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_caches
); i
++) {
1721 struct kmem_cache
*kc
= dm_bufio_caches
[i
];
1724 kmem_cache_destroy(kc
);
1727 for (i
= 0; i
< ARRAY_SIZE(dm_bufio_cache_names
); i
++)
1728 kfree(dm_bufio_cache_names
[i
]);
1730 if (dm_bufio_client_count
) {
1731 DMCRIT("%s: dm_bufio_client_count leaked: %d",
1732 __func__
, dm_bufio_client_count
);
1736 if (dm_bufio_current_allocated
) {
1737 DMCRIT("%s: dm_bufio_current_allocated leaked: %lu",
1738 __func__
, dm_bufio_current_allocated
);
1742 if (dm_bufio_allocated_get_free_pages
) {
1743 DMCRIT("%s: dm_bufio_allocated_get_free_pages leaked: %lu",
1744 __func__
, dm_bufio_allocated_get_free_pages
);
1748 if (dm_bufio_allocated_vmalloc
) {
1749 DMCRIT("%s: dm_bufio_vmalloc leaked: %lu",
1750 __func__
, dm_bufio_allocated_vmalloc
);
1758 module_init(dm_bufio_init
)
1759 module_exit(dm_bufio_exit
)
1761 module_param_named(max_cache_size_bytes
, dm_bufio_cache_size
, ulong
, S_IRUGO
| S_IWUSR
);
1762 MODULE_PARM_DESC(max_cache_size_bytes
, "Size of metadata cache");
1764 module_param_named(max_age_seconds
, dm_bufio_max_age
, uint
, S_IRUGO
| S_IWUSR
);
1765 MODULE_PARM_DESC(max_age_seconds
, "Max age of a buffer in seconds");
1767 module_param_named(peak_allocated_bytes
, dm_bufio_peak_allocated
, ulong
, S_IRUGO
| S_IWUSR
);
1768 MODULE_PARM_DESC(peak_allocated_bytes
, "Tracks the maximum allocated memory");
1770 module_param_named(allocated_kmem_cache_bytes
, dm_bufio_allocated_kmem_cache
, ulong
, S_IRUGO
);
1771 MODULE_PARM_DESC(allocated_kmem_cache_bytes
, "Memory allocated with kmem_cache_alloc");
1773 module_param_named(allocated_get_free_pages_bytes
, dm_bufio_allocated_get_free_pages
, ulong
, S_IRUGO
);
1774 MODULE_PARM_DESC(allocated_get_free_pages_bytes
, "Memory allocated with get_free_pages");
1776 module_param_named(allocated_vmalloc_bytes
, dm_bufio_allocated_vmalloc
, ulong
, S_IRUGO
);
1777 MODULE_PARM_DESC(allocated_vmalloc_bytes
, "Memory allocated with vmalloc");
1779 module_param_named(current_allocated_bytes
, dm_bufio_current_allocated
, ulong
, S_IRUGO
);
1780 MODULE_PARM_DESC(current_allocated_bytes
, "Memory currently used by the cache");
1782 MODULE_AUTHOR("Mikulas Patocka <dm-devel@redhat.com>");
1783 MODULE_DESCRIPTION(DM_NAME
" buffered I/O library");
1784 MODULE_LICENSE("GPL");