2 * Copyright (C) 2009-2011 Red Hat, Inc.
4 * Author: Mikulas Patocka <mpatocka@redhat.com>
6 * This file is released under the GPL.
12 #include <linux/blkdev.h>
13 #include <linux/types.h>
15 /*----------------------------------------------------------------*/
17 struct dm_bufio_client
;
21 * Create a buffered IO cache on a given device
23 struct dm_bufio_client
*
24 dm_bufio_client_create(struct block_device
*bdev
, unsigned block_size
,
25 unsigned reserved_buffers
, unsigned aux_size
,
26 void (*alloc_callback
)(struct dm_buffer
*),
27 void (*write_callback
)(struct dm_buffer
*));
30 * Release a buffered IO cache.
32 void dm_bufio_client_destroy(struct dm_bufio_client
*c
);
35 * WARNING: to avoid deadlocks, these conditions are observed:
37 * - At most one thread can hold at most "reserved_buffers" simultaneously.
38 * - Each other threads can hold at most one buffer.
39 * - Threads which call only dm_bufio_get can hold unlimited number of
44 * Read a given block from disk. Returns pointer to data. Returns a
45 * pointer to dm_buffer that can be used to release the buffer or to make
48 void *dm_bufio_read(struct dm_bufio_client
*c
, sector_t block
,
49 struct dm_buffer
**bp
);
52 * Like dm_bufio_read, but return buffer from cache, don't read
53 * it. If the buffer is not in the cache, return NULL.
55 void *dm_bufio_get(struct dm_bufio_client
*c
, sector_t block
,
56 struct dm_buffer
**bp
);
59 * Like dm_bufio_read, but don't read anything from the disk. It is
60 * expected that the caller initializes the buffer and marks it dirty.
62 void *dm_bufio_new(struct dm_bufio_client
*c
, sector_t block
,
63 struct dm_buffer
**bp
);
66 * Prefetch the specified blocks to the cache.
67 * The function starts to read the blocks and returns without waiting for
70 void dm_bufio_prefetch(struct dm_bufio_client
*c
,
71 sector_t block
, unsigned n_blocks
);
74 * Release a reference obtained with dm_bufio_{read,get,new}. The data
75 * pointer and dm_buffer pointer is no longer valid after this call.
77 void dm_bufio_release(struct dm_buffer
*b
);
80 * Mark a buffer dirty. It should be called after the buffer is modified.
82 * In case of memory pressure, the buffer may be written after
83 * dm_bufio_mark_buffer_dirty, but before dm_bufio_write_dirty_buffers. So
84 * dm_bufio_write_dirty_buffers guarantees that the buffer is on-disk but
85 * the actual writing may occur earlier.
87 void dm_bufio_mark_buffer_dirty(struct dm_buffer
*b
);
90 * Initiate writing of dirty buffers, without waiting for completion.
92 void dm_bufio_write_dirty_buffers_async(struct dm_bufio_client
*c
);
95 * Write all dirty buffers. Guarantees that all dirty buffers created prior
96 * to this call are on disk when this call exits.
98 int dm_bufio_write_dirty_buffers(struct dm_bufio_client
*c
);
101 * Send an empty write barrier to the device to flush hardware disk cache.
103 int dm_bufio_issue_flush(struct dm_bufio_client
*c
);
106 * Like dm_bufio_release but also move the buffer to the new
107 * block. dm_bufio_write_dirty_buffers is needed to commit the new block.
109 void dm_bufio_release_move(struct dm_buffer
*b
, sector_t new_block
);
112 * Free the given buffer.
113 * This is just a hint, if the buffer is in use or dirty, this function
116 void dm_bufio_forget(struct dm_bufio_client
*c
, sector_t block
);
119 * Set the minimum number of buffers before cleanup happens.
121 void dm_bufio_set_minimum_buffers(struct dm_bufio_client
*c
, unsigned n
);
123 unsigned dm_bufio_get_block_size(struct dm_bufio_client
*c
);
124 sector_t
dm_bufio_get_device_size(struct dm_bufio_client
*c
);
125 sector_t
dm_bufio_get_block_number(struct dm_buffer
*b
);
126 void *dm_bufio_get_block_data(struct dm_buffer
*b
);
127 void *dm_bufio_get_aux_data(struct dm_buffer
*b
);
128 struct dm_bufio_client
*dm_bufio_get_client(struct dm_buffer
*b
);
130 /*----------------------------------------------------------------*/