2 * include/linux/buffer_head.h
4 * Everything to do with buffer_heads.
7 #ifndef _LINUX_BUFFER_HEAD_H
8 #define _LINUX_BUFFER_HEAD_H
10 #include <linux/types.h>
12 #include <linux/linkage.h>
13 #include <linux/pagemap.h>
14 #include <linux/wait.h>
15 #include <linux/atomic.h>
20 BH_Uptodate
, /* Contains valid data */
21 BH_Dirty
, /* Is dirty */
22 BH_Lock
, /* Is locked */
23 BH_Req
, /* Has been submitted for I/O */
24 BH_Uptodate_Lock
,/* Used by the first bh in a page, to serialise
25 * IO completion of other buffers in the page
28 BH_Mapped
, /* Has a disk mapping */
29 BH_New
, /* Disk mapping was newly created by get_block */
30 BH_Async_Read
, /* Is under end_buffer_async_read I/O */
31 BH_Async_Write
, /* Is under end_buffer_async_write I/O */
32 BH_Delay
, /* Buffer is not yet allocated on disk */
33 BH_Boundary
, /* Block is followed by a discontiguity */
34 BH_Write_EIO
, /* I/O error on write */
35 BH_Unwritten
, /* Buffer is allocated on disk but not written */
36 BH_Quiet
, /* Buffer Error Prinks to be quiet */
37 BH_Meta
, /* Buffer contains metadata */
38 BH_Prio
, /* Buffer should be submitted with REQ_PRIO */
39 BH_Defer_Completion
, /* Defer AIO completion to workqueue */
41 BH_PrivateStart
,/* not a state bit, but the first bit available
42 * for private allocation by other entities
46 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
51 typedef void (bh_end_io_t
)(struct buffer_head
*bh
, int uptodate
);
54 * Historically, a buffer_head was used to map a single block
55 * within a page, and of course as the unit of I/O through the
56 * filesystem and block layers. Nowadays the basic I/O unit
57 * is the bio, and buffer_heads are used for extracting block
58 * mappings (via a get_block_t call), for tracking state within
59 * a page (via a page_mapping) and for wrapping bio submission
60 * for backward compatibility reasons (e.g. submit_bh).
63 unsigned long b_state
; /* buffer state bitmap (see above) */
64 struct buffer_head
*b_this_page
;/* circular list of page's buffers */
65 struct page
*b_page
; /* the page this bh is mapped to */
67 sector_t b_blocknr
; /* start block number */
68 size_t b_size
; /* size of mapping */
69 char *b_data
; /* pointer to data within the page */
71 struct block_device
*b_bdev
;
72 bh_end_io_t
*b_end_io
; /* I/O completion */
73 void *b_private
; /* reserved for b_end_io */
74 struct list_head b_assoc_buffers
; /* associated with another mapping */
75 struct address_space
*b_assoc_map
; /* mapping this buffer is
77 atomic_t b_count
; /* users using this buffer_head */
81 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
82 * and buffer_foo() functions.
84 #define BUFFER_FNS(bit, name) \
85 static __always_inline void set_buffer_##name(struct buffer_head *bh) \
87 set_bit(BH_##bit, &(bh)->b_state); \
89 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
91 clear_bit(BH_##bit, &(bh)->b_state); \
93 static __always_inline int buffer_##name(const struct buffer_head *bh) \
95 return test_bit(BH_##bit, &(bh)->b_state); \
99 * test_set_buffer_foo() and test_clear_buffer_foo()
101 #define TAS_BUFFER_FNS(bit, name) \
102 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
104 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
106 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
108 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
112 * Emit the buffer bitops functions. Note that there are also functions
113 * of the form "mark_buffer_foo()". These are higher-level functions which
114 * do something in addition to setting a b_state bit.
116 BUFFER_FNS(Uptodate
, uptodate
)
117 BUFFER_FNS(Dirty
, dirty
)
118 TAS_BUFFER_FNS(Dirty
, dirty
)
119 BUFFER_FNS(Lock
, locked
)
121 TAS_BUFFER_FNS(Req
, req
)
122 BUFFER_FNS(Mapped
, mapped
)
124 BUFFER_FNS(Async_Read
, async_read
)
125 BUFFER_FNS(Async_Write
, async_write
)
126 BUFFER_FNS(Delay
, delay
)
127 BUFFER_FNS(Boundary
, boundary
)
128 BUFFER_FNS(Write_EIO
, write_io_error
)
129 BUFFER_FNS(Unwritten
, unwritten
)
130 BUFFER_FNS(Meta
, meta
)
131 BUFFER_FNS(Prio
, prio
)
132 BUFFER_FNS(Defer_Completion
, defer_completion
)
134 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
136 /* If we *know* page->private refers to buffer_heads */
137 #define page_buffers(page) \
139 BUG_ON(!PagePrivate(page)); \
140 ((struct buffer_head *)page_private(page)); \
142 #define page_has_buffers(page) PagePrivate(page)
144 void buffer_check_dirty_writeback(struct page
*page
,
145 bool *dirty
, bool *writeback
);
151 void mark_buffer_dirty(struct buffer_head
*bh
);
152 void init_buffer(struct buffer_head
*, bh_end_io_t
*, void *);
153 void touch_buffer(struct buffer_head
*bh
);
154 void set_bh_page(struct buffer_head
*bh
,
155 struct page
*page
, unsigned long offset
);
156 int try_to_free_buffers(struct page
*);
157 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
159 void create_empty_buffers(struct page
*, unsigned long,
160 unsigned long b_state
);
161 void end_buffer_read_sync(struct buffer_head
*bh
, int uptodate
);
162 void end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
);
163 void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
);
165 /* Things to do with buffers at mapping->private_list */
166 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
);
167 int inode_has_buffers(struct inode
*);
168 void invalidate_inode_buffers(struct inode
*);
169 int remove_inode_buffers(struct inode
*inode
);
170 int sync_mapping_buffers(struct address_space
*mapping
);
171 void unmap_underlying_metadata(struct block_device
*bdev
, sector_t block
);
173 void mark_buffer_async_write(struct buffer_head
*bh
);
174 void __wait_on_buffer(struct buffer_head
*);
175 wait_queue_head_t
*bh_waitq_head(struct buffer_head
*bh
);
176 struct buffer_head
*__find_get_block(struct block_device
*bdev
, sector_t block
,
178 struct buffer_head
*__getblk_gfp(struct block_device
*bdev
, sector_t block
,
179 unsigned size
, gfp_t gfp
);
180 void __brelse(struct buffer_head
*);
181 void __bforget(struct buffer_head
*);
182 void __breadahead(struct block_device
*, sector_t block
, unsigned int size
);
183 struct buffer_head
*__bread_gfp(struct block_device
*,
184 sector_t block
, unsigned size
, gfp_t gfp
);
185 void invalidate_bh_lrus(void);
186 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
);
187 void free_buffer_head(struct buffer_head
* bh
);
188 void unlock_buffer(struct buffer_head
*bh
);
189 void __lock_buffer(struct buffer_head
*bh
);
190 void ll_rw_block(int, int, int, struct buffer_head
* bh
[]);
191 int sync_dirty_buffer(struct buffer_head
*bh
);
192 int __sync_dirty_buffer(struct buffer_head
*bh
, int op_flags
);
193 void write_dirty_buffer(struct buffer_head
*bh
, int op_flags
);
194 int _submit_bh(int op
, int op_flags
, struct buffer_head
*bh
,
195 unsigned long bio_flags
);
196 int submit_bh(int, int, struct buffer_head
*);
197 void write_boundary_block(struct block_device
*bdev
,
198 sector_t bblock
, unsigned blocksize
);
199 int bh_uptodate_or_lock(struct buffer_head
*bh
);
200 int bh_submit_read(struct buffer_head
*bh
);
202 extern int buffer_heads_over_limit
;
205 * Generic address_space_operations implementations for buffer_head-backed
208 void block_invalidatepage(struct page
*page
, unsigned int offset
,
209 unsigned int length
);
210 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
211 struct writeback_control
*wbc
);
212 int __block_write_full_page(struct inode
*inode
, struct page
*page
,
213 get_block_t
*get_block
, struct writeback_control
*wbc
,
214 bh_end_io_t
*handler
);
215 int block_read_full_page(struct page
*, get_block_t
*);
216 int block_is_partially_uptodate(struct page
*page
, unsigned long from
,
217 unsigned long count
);
218 int block_write_begin(struct address_space
*mapping
, loff_t pos
, unsigned len
,
219 unsigned flags
, struct page
**pagep
, get_block_t
*get_block
);
220 int __block_write_begin(struct page
*page
, loff_t pos
, unsigned len
,
221 get_block_t
*get_block
);
222 int block_write_end(struct file
*, struct address_space
*,
223 loff_t
, unsigned, unsigned,
224 struct page
*, void *);
225 int generic_write_end(struct file
*, struct address_space
*,
226 loff_t
, unsigned, unsigned,
227 struct page
*, void *);
228 void page_zero_new_buffers(struct page
*page
, unsigned from
, unsigned to
);
229 int cont_write_begin(struct file
*, struct address_space
*, loff_t
,
230 unsigned, unsigned, struct page
**, void **,
231 get_block_t
*, loff_t
*);
232 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
);
233 int block_commit_write(struct page
*page
, unsigned from
, unsigned to
);
234 int block_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
235 get_block_t get_block
);
236 /* Convert errno to return value from ->page_mkwrite() call */
237 static inline int block_page_mkwrite_return(int err
)
240 return VM_FAULT_LOCKED
;
242 return VM_FAULT_NOPAGE
;
246 return VM_FAULT_RETRY
;
247 /* -ENOSPC, -EDQUOT, -EIO ... */
248 return VM_FAULT_SIGBUS
;
250 sector_t
generic_block_bmap(struct address_space
*, sector_t
, get_block_t
*);
251 int block_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
252 int nobh_write_begin(struct address_space
*, loff_t
, unsigned, unsigned,
253 struct page
**, void **, get_block_t
*);
254 int nobh_write_end(struct file
*, struct address_space
*,
255 loff_t
, unsigned, unsigned,
256 struct page
*, void *);
257 int nobh_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
258 int nobh_writepage(struct page
*page
, get_block_t
*get_block
,
259 struct writeback_control
*wbc
);
261 void buffer_init(void);
267 static inline void attach_page_buffers(struct page
*page
,
268 struct buffer_head
*head
)
271 SetPagePrivate(page
);
272 set_page_private(page
, (unsigned long)head
);
275 static inline void get_bh(struct buffer_head
*bh
)
277 atomic_inc(&bh
->b_count
);
280 static inline void put_bh(struct buffer_head
*bh
)
282 smp_mb__before_atomic();
283 atomic_dec(&bh
->b_count
);
286 static inline void brelse(struct buffer_head
*bh
)
292 static inline void bforget(struct buffer_head
*bh
)
298 static inline struct buffer_head
*
299 sb_bread(struct super_block
*sb
, sector_t block
)
301 return __bread_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, __GFP_MOVABLE
);
304 static inline struct buffer_head
*
305 sb_bread_unmovable(struct super_block
*sb
, sector_t block
)
307 return __bread_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, 0);
311 sb_breadahead(struct super_block
*sb
, sector_t block
)
313 __breadahead(sb
->s_bdev
, block
, sb
->s_blocksize
);
316 static inline struct buffer_head
*
317 sb_getblk(struct super_block
*sb
, sector_t block
)
319 return __getblk_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, __GFP_MOVABLE
);
323 static inline struct buffer_head
*
324 sb_getblk_gfp(struct super_block
*sb
, sector_t block
, gfp_t gfp
)
326 return __getblk_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, gfp
);
329 static inline struct buffer_head
*
330 sb_find_get_block(struct super_block
*sb
, sector_t block
)
332 return __find_get_block(sb
->s_bdev
, block
, sb
->s_blocksize
);
336 map_bh(struct buffer_head
*bh
, struct super_block
*sb
, sector_t block
)
338 set_buffer_mapped(bh
);
339 bh
->b_bdev
= sb
->s_bdev
;
340 bh
->b_blocknr
= block
;
341 bh
->b_size
= sb
->s_blocksize
;
344 static inline void wait_on_buffer(struct buffer_head
*bh
)
347 if (buffer_locked(bh
))
348 __wait_on_buffer(bh
);
351 static inline int trylock_buffer(struct buffer_head
*bh
)
353 return likely(!test_and_set_bit_lock(BH_Lock
, &bh
->b_state
));
356 static inline void lock_buffer(struct buffer_head
*bh
)
359 if (!trylock_buffer(bh
))
363 static inline struct buffer_head
*getblk_unmovable(struct block_device
*bdev
,
367 return __getblk_gfp(bdev
, block
, size
, 0);
370 static inline struct buffer_head
*__getblk(struct block_device
*bdev
,
374 return __getblk_gfp(bdev
, block
, size
, __GFP_MOVABLE
);
378 * __bread() - reads a specified block and returns the bh
379 * @bdev: the block_device to read from
380 * @block: number of block
381 * @size: size (in bytes) to read
383 * Reads a specified block, and returns buffer head that contains it.
384 * The page cache is allocated from movable area so that it can be migrated.
385 * It returns NULL if the block was unreadable.
387 static inline struct buffer_head
*
388 __bread(struct block_device
*bdev
, sector_t block
, unsigned size
)
390 return __bread_gfp(bdev
, block
, size
, __GFP_MOVABLE
);
393 extern int __set_page_dirty_buffers(struct page
*page
);
395 #else /* CONFIG_BLOCK */
397 static inline void buffer_init(void) {}
398 static inline int try_to_free_buffers(struct page
*page
) { return 1; }
399 static inline int inode_has_buffers(struct inode
*inode
) { return 0; }
400 static inline void invalidate_inode_buffers(struct inode
*inode
) {}
401 static inline int remove_inode_buffers(struct inode
*inode
) { return 1; }
402 static inline int sync_mapping_buffers(struct address_space
*mapping
) { return 0; }
404 #endif /* CONFIG_BLOCK */
405 #endif /* _LINUX_BUFFER_HEAD_H */