1 /* SPDX-License-Identifier: GPL-2.0 */
3 * include/linux/buffer_head.h
5 * Everything to do with buffer_heads.
8 #ifndef _LINUX_BUFFER_HEAD_H
9 #define _LINUX_BUFFER_HEAD_H
11 #include <linux/types.h>
13 #include <linux/linkage.h>
14 #include <linux/pagemap.h>
15 #include <linux/wait.h>
16 #include <linux/atomic.h>
21 BH_Uptodate
, /* Contains valid data */
22 BH_Dirty
, /* Is dirty */
23 BH_Lock
, /* Is locked */
24 BH_Req
, /* Has been submitted for I/O */
25 BH_Uptodate_Lock
,/* Used by the first bh in a page, to serialise
26 * IO completion of other buffers in the page
29 BH_Mapped
, /* Has a disk mapping */
30 BH_New
, /* Disk mapping was newly created by get_block */
31 BH_Async_Read
, /* Is under end_buffer_async_read I/O */
32 BH_Async_Write
, /* Is under end_buffer_async_write I/O */
33 BH_Delay
, /* Buffer is not yet allocated on disk */
34 BH_Boundary
, /* Block is followed by a discontiguity */
35 BH_Write_EIO
, /* I/O error on write */
36 BH_Unwritten
, /* Buffer is allocated on disk but not written */
37 BH_Quiet
, /* Buffer Error Prinks to be quiet */
38 BH_Meta
, /* Buffer contains metadata */
39 BH_Prio
, /* Buffer should be submitted with REQ_PRIO */
40 BH_Defer_Completion
, /* Defer AIO completion to workqueue */
42 BH_PrivateStart
,/* not a state bit, but the first bit available
43 * for private allocation by other entities
47 #define MAX_BUF_PER_PAGE (PAGE_SIZE / 512)
52 typedef void (bh_end_io_t
)(struct buffer_head
*bh
, int uptodate
);
55 * Historically, a buffer_head was used to map a single block
56 * within a page, and of course as the unit of I/O through the
57 * filesystem and block layers. Nowadays the basic I/O unit
58 * is the bio, and buffer_heads are used for extracting block
59 * mappings (via a get_block_t call), for tracking state within
60 * a page (via a page_mapping) and for wrapping bio submission
61 * for backward compatibility reasons (e.g. submit_bh).
64 unsigned long b_state
; /* buffer state bitmap (see above) */
65 struct buffer_head
*b_this_page
;/* circular list of page's buffers */
66 struct page
*b_page
; /* the page this bh is mapped to */
68 sector_t b_blocknr
; /* start block number */
69 size_t b_size
; /* size of mapping */
70 char *b_data
; /* pointer to data within the page */
72 struct block_device
*b_bdev
;
73 bh_end_io_t
*b_end_io
; /* I/O completion */
74 void *b_private
; /* reserved for b_end_io */
75 struct list_head b_assoc_buffers
; /* associated with another mapping */
76 struct address_space
*b_assoc_map
; /* mapping this buffer is
78 atomic_t b_count
; /* users using this buffer_head */
82 * macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
83 * and buffer_foo() functions.
84 * To avoid reset buffer flags that are already set, because that causes
85 * a costly cache line transition, check the flag first.
87 #define BUFFER_FNS(bit, name) \
88 static __always_inline void set_buffer_##name(struct buffer_head *bh) \
90 if (!test_bit(BH_##bit, &(bh)->b_state)) \
91 set_bit(BH_##bit, &(bh)->b_state); \
93 static __always_inline void clear_buffer_##name(struct buffer_head *bh) \
95 clear_bit(BH_##bit, &(bh)->b_state); \
97 static __always_inline int buffer_##name(const struct buffer_head *bh) \
99 return test_bit(BH_##bit, &(bh)->b_state); \
103 * test_set_buffer_foo() and test_clear_buffer_foo()
105 #define TAS_BUFFER_FNS(bit, name) \
106 static __always_inline int test_set_buffer_##name(struct buffer_head *bh) \
108 return test_and_set_bit(BH_##bit, &(bh)->b_state); \
110 static __always_inline int test_clear_buffer_##name(struct buffer_head *bh) \
112 return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
116 * Emit the buffer bitops functions. Note that there are also functions
117 * of the form "mark_buffer_foo()". These are higher-level functions which
118 * do something in addition to setting a b_state bit.
120 BUFFER_FNS(Uptodate
, uptodate
)
121 BUFFER_FNS(Dirty
, dirty
)
122 TAS_BUFFER_FNS(Dirty
, dirty
)
123 BUFFER_FNS(Lock
, locked
)
125 TAS_BUFFER_FNS(Req
, req
)
126 BUFFER_FNS(Mapped
, mapped
)
128 BUFFER_FNS(Async_Read
, async_read
)
129 BUFFER_FNS(Async_Write
, async_write
)
130 BUFFER_FNS(Delay
, delay
)
131 BUFFER_FNS(Boundary
, boundary
)
132 BUFFER_FNS(Write_EIO
, write_io_error
)
133 BUFFER_FNS(Unwritten
, unwritten
)
134 BUFFER_FNS(Meta
, meta
)
135 BUFFER_FNS(Prio
, prio
)
136 BUFFER_FNS(Defer_Completion
, defer_completion
)
138 #define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
140 /* If we *know* page->private refers to buffer_heads */
141 #define page_buffers(page) \
143 BUG_ON(!PagePrivate(page)); \
144 ((struct buffer_head *)page_private(page)); \
146 #define page_has_buffers(page) PagePrivate(page)
148 void buffer_check_dirty_writeback(struct page
*page
,
149 bool *dirty
, bool *writeback
);
155 void mark_buffer_dirty(struct buffer_head
*bh
);
156 void mark_buffer_write_io_error(struct buffer_head
*bh
);
157 void touch_buffer(struct buffer_head
*bh
);
158 void set_bh_page(struct buffer_head
*bh
,
159 struct page
*page
, unsigned long offset
);
160 int try_to_free_buffers(struct page
*);
161 struct buffer_head
*alloc_page_buffers(struct page
*page
, unsigned long size
,
163 void create_empty_buffers(struct page
*, unsigned long,
164 unsigned long b_state
);
165 void end_buffer_read_sync(struct buffer_head
*bh
, int uptodate
);
166 void end_buffer_write_sync(struct buffer_head
*bh
, int uptodate
);
167 void end_buffer_async_write(struct buffer_head
*bh
, int uptodate
);
169 /* Things to do with buffers at mapping->private_list */
170 void mark_buffer_dirty_inode(struct buffer_head
*bh
, struct inode
*inode
);
171 int inode_has_buffers(struct inode
*);
172 void invalidate_inode_buffers(struct inode
*);
173 int remove_inode_buffers(struct inode
*inode
);
174 int sync_mapping_buffers(struct address_space
*mapping
);
175 void clean_bdev_aliases(struct block_device
*bdev
, sector_t block
,
177 static inline void clean_bdev_bh_alias(struct buffer_head
*bh
)
179 clean_bdev_aliases(bh
->b_bdev
, bh
->b_blocknr
, 1);
182 void mark_buffer_async_write(struct buffer_head
*bh
);
183 void __wait_on_buffer(struct buffer_head
*);
184 wait_queue_head_t
*bh_waitq_head(struct buffer_head
*bh
);
185 struct buffer_head
*__find_get_block(struct block_device
*bdev
, sector_t block
,
187 struct buffer_head
*__getblk_gfp(struct block_device
*bdev
, sector_t block
,
188 unsigned size
, gfp_t gfp
);
189 void __brelse(struct buffer_head
*);
190 void __bforget(struct buffer_head
*);
191 void __breadahead(struct block_device
*, sector_t block
, unsigned int size
);
192 struct buffer_head
*__bread_gfp(struct block_device
*,
193 sector_t block
, unsigned size
, gfp_t gfp
);
194 void invalidate_bh_lrus(void);
195 struct buffer_head
*alloc_buffer_head(gfp_t gfp_flags
);
196 void free_buffer_head(struct buffer_head
* bh
);
197 void unlock_buffer(struct buffer_head
*bh
);
198 void __lock_buffer(struct buffer_head
*bh
);
199 void ll_rw_block(int, int, int, struct buffer_head
* bh
[]);
200 int sync_dirty_buffer(struct buffer_head
*bh
);
201 int __sync_dirty_buffer(struct buffer_head
*bh
, int op_flags
);
202 void write_dirty_buffer(struct buffer_head
*bh
, int op_flags
);
203 int submit_bh(int, int, struct buffer_head
*);
204 void write_boundary_block(struct block_device
*bdev
,
205 sector_t bblock
, unsigned blocksize
);
206 int bh_uptodate_or_lock(struct buffer_head
*bh
);
207 int bh_submit_read(struct buffer_head
*bh
);
208 loff_t
page_cache_seek_hole_data(struct inode
*inode
, loff_t offset
,
209 loff_t length
, int whence
);
211 extern int buffer_heads_over_limit
;
214 * Generic address_space_operations implementations for buffer_head-backed
217 void block_invalidatepage(struct page
*page
, unsigned int offset
,
218 unsigned int length
);
219 int block_write_full_page(struct page
*page
, get_block_t
*get_block
,
220 struct writeback_control
*wbc
);
221 int __block_write_full_page(struct inode
*inode
, struct page
*page
,
222 get_block_t
*get_block
, struct writeback_control
*wbc
,
223 bh_end_io_t
*handler
);
224 int block_read_full_page(struct page
*, get_block_t
*);
225 int block_is_partially_uptodate(struct page
*page
, unsigned long from
,
226 unsigned long count
);
227 int block_write_begin(struct address_space
*mapping
, loff_t pos
, unsigned len
,
228 unsigned flags
, struct page
**pagep
, get_block_t
*get_block
);
229 int __block_write_begin(struct page
*page
, loff_t pos
, unsigned len
,
230 get_block_t
*get_block
);
231 int block_write_end(struct file
*, struct address_space
*,
232 loff_t
, unsigned, unsigned,
233 struct page
*, void *);
234 int generic_write_end(struct file
*, struct address_space
*,
235 loff_t
, unsigned, unsigned,
236 struct page
*, void *);
237 void page_zero_new_buffers(struct page
*page
, unsigned from
, unsigned to
);
238 void clean_page_buffers(struct page
*page
);
239 int cont_write_begin(struct file
*, struct address_space
*, loff_t
,
240 unsigned, unsigned, struct page
**, void **,
241 get_block_t
*, loff_t
*);
242 int generic_cont_expand_simple(struct inode
*inode
, loff_t size
);
243 int block_commit_write(struct page
*page
, unsigned from
, unsigned to
);
244 int block_page_mkwrite(struct vm_area_struct
*vma
, struct vm_fault
*vmf
,
245 get_block_t get_block
);
246 /* Convert errno to return value from ->page_mkwrite() call */
247 static inline int block_page_mkwrite_return(int err
)
250 return VM_FAULT_LOCKED
;
251 if (err
== -EFAULT
|| err
== -EAGAIN
)
252 return VM_FAULT_NOPAGE
;
255 /* -ENOSPC, -EDQUOT, -EIO ... */
256 return VM_FAULT_SIGBUS
;
258 sector_t
generic_block_bmap(struct address_space
*, sector_t
, get_block_t
*);
259 int block_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
260 int nobh_write_begin(struct address_space
*, loff_t
, unsigned, unsigned,
261 struct page
**, void **, get_block_t
*);
262 int nobh_write_end(struct file
*, struct address_space
*,
263 loff_t
, unsigned, unsigned,
264 struct page
*, void *);
265 int nobh_truncate_page(struct address_space
*, loff_t
, get_block_t
*);
266 int nobh_writepage(struct page
*page
, get_block_t
*get_block
,
267 struct writeback_control
*wbc
);
269 void buffer_init(void);
275 static inline void attach_page_buffers(struct page
*page
,
276 struct buffer_head
*head
)
279 SetPagePrivate(page
);
280 set_page_private(page
, (unsigned long)head
);
283 static inline void get_bh(struct buffer_head
*bh
)
285 atomic_inc(&bh
->b_count
);
288 static inline void put_bh(struct buffer_head
*bh
)
290 smp_mb__before_atomic();
291 atomic_dec(&bh
->b_count
);
294 static inline void brelse(struct buffer_head
*bh
)
300 static inline void bforget(struct buffer_head
*bh
)
306 static inline struct buffer_head
*
307 sb_bread(struct super_block
*sb
, sector_t block
)
309 return __bread_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, __GFP_MOVABLE
);
312 static inline struct buffer_head
*
313 sb_bread_unmovable(struct super_block
*sb
, sector_t block
)
315 return __bread_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, 0);
319 sb_breadahead(struct super_block
*sb
, sector_t block
)
321 __breadahead(sb
->s_bdev
, block
, sb
->s_blocksize
);
324 static inline struct buffer_head
*
325 sb_getblk(struct super_block
*sb
, sector_t block
)
327 return __getblk_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, __GFP_MOVABLE
);
331 static inline struct buffer_head
*
332 sb_getblk_gfp(struct super_block
*sb
, sector_t block
, gfp_t gfp
)
334 return __getblk_gfp(sb
->s_bdev
, block
, sb
->s_blocksize
, gfp
);
337 static inline struct buffer_head
*
338 sb_find_get_block(struct super_block
*sb
, sector_t block
)
340 return __find_get_block(sb
->s_bdev
, block
, sb
->s_blocksize
);
344 map_bh(struct buffer_head
*bh
, struct super_block
*sb
, sector_t block
)
346 set_buffer_mapped(bh
);
347 bh
->b_bdev
= sb
->s_bdev
;
348 bh
->b_blocknr
= block
;
349 bh
->b_size
= sb
->s_blocksize
;
352 static inline void wait_on_buffer(struct buffer_head
*bh
)
355 if (buffer_locked(bh
))
356 __wait_on_buffer(bh
);
359 static inline int trylock_buffer(struct buffer_head
*bh
)
361 return likely(!test_and_set_bit_lock(BH_Lock
, &bh
->b_state
));
364 static inline void lock_buffer(struct buffer_head
*bh
)
367 if (!trylock_buffer(bh
))
371 static inline struct buffer_head
*getblk_unmovable(struct block_device
*bdev
,
375 return __getblk_gfp(bdev
, block
, size
, 0);
378 static inline struct buffer_head
*__getblk(struct block_device
*bdev
,
382 return __getblk_gfp(bdev
, block
, size
, __GFP_MOVABLE
);
386 * __bread() - reads a specified block and returns the bh
387 * @bdev: the block_device to read from
388 * @block: number of block
389 * @size: size (in bytes) to read
391 * Reads a specified block, and returns buffer head that contains it.
392 * The page cache is allocated from movable area so that it can be migrated.
393 * It returns NULL if the block was unreadable.
395 static inline struct buffer_head
*
396 __bread(struct block_device
*bdev
, sector_t block
, unsigned size
)
398 return __bread_gfp(bdev
, block
, size
, __GFP_MOVABLE
);
401 extern int __set_page_dirty_buffers(struct page
*page
);
403 #else /* CONFIG_BLOCK */
405 static inline void buffer_init(void) {}
406 static inline int try_to_free_buffers(struct page
*page
) { return 1; }
407 static inline int inode_has_buffers(struct inode
*inode
) { return 0; }
408 static inline void invalidate_inode_buffers(struct inode
*inode
) {}
409 static inline int remove_inode_buffers(struct inode
*inode
) { return 1; }
410 static inline int sync_mapping_buffers(struct address_space
*mapping
) { return 0; }
412 #endif /* CONFIG_BLOCK */
413 #endif /* _LINUX_BUFFER_HEAD_H */