1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
9 #include <linux/list.h>
10 #include <linux/types.h>
11 #include <linux/spinlock.h>
14 #include <linux/dax.h>
15 #include <linux/uio.h>
16 #include <linux/list_lru.h>
18 extern struct kmem_cache
*xfs_buf_cache
;
25 #define XFS_BUF_DADDR_NULL ((xfs_daddr_t) (-1LL))
27 #define XBF_READ (1u << 0) /* buffer intended for reading from device */
28 #define XBF_WRITE (1u << 1) /* buffer intended for writing to device */
29 #define XBF_READ_AHEAD (1u << 2) /* asynchronous read-ahead */
30 #define XBF_NO_IOACCT (1u << 3) /* bypass I/O accounting (non-LRU bufs) */
31 #define XBF_ASYNC (1u << 4) /* initiator will not wait for completion */
32 #define XBF_DONE (1u << 5) /* all pages in the buffer uptodate */
33 #define XBF_STALE (1u << 6) /* buffer has been staled, do not find it */
34 #define XBF_WRITE_FAIL (1u << 7) /* async writes have failed on this buffer */
36 /* buffer type flags for write callbacks */
37 #define _XBF_INODES (1u << 16)/* inode buffer */
38 #define _XBF_DQUOTS (1u << 17)/* dquot buffer */
39 #define _XBF_LOGRECOVERY (1u << 18)/* log recovery buffer */
41 /* flags used only internally */
42 #define _XBF_PAGES (1u << 20)/* backed by refcounted pages */
43 #define _XBF_KMEM (1u << 21)/* backed by heap memory */
44 #define _XBF_DELWRI_Q (1u << 22)/* buffer on a delwri queue */
46 /* flags used only as arguments to access routines */
48 * Online fsck is scanning the buffer cache for live buffers. Do not warn
49 * about length mismatches during lookups and do not return stale buffers.
51 #define XBF_LIVESCAN (1u << 28)
52 #define XBF_INCORE (1u << 29)/* lookup only, return if found in cache */
53 #define XBF_TRYLOCK (1u << 30)/* lock requested, but do not wait */
54 #define XBF_UNMAPPED (1u << 31)/* do not map the buffer */
57 typedef unsigned int xfs_buf_flags_t
;
59 #define XFS_BUF_FLAGS \
60 { XBF_READ, "READ" }, \
61 { XBF_WRITE, "WRITE" }, \
62 { XBF_READ_AHEAD, "READ_AHEAD" }, \
63 { XBF_NO_IOACCT, "NO_IOACCT" }, \
64 { XBF_ASYNC, "ASYNC" }, \
65 { XBF_DONE, "DONE" }, \
66 { XBF_STALE, "STALE" }, \
67 { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
68 { _XBF_INODES, "INODES" }, \
69 { _XBF_DQUOTS, "DQUOTS" }, \
70 { _XBF_LOGRECOVERY, "LOG_RECOVERY" }, \
71 { _XBF_PAGES, "PAGES" }, \
72 { _XBF_KMEM, "KMEM" }, \
73 { _XBF_DELWRI_Q, "DELWRI_Q" }, \
74 /* The following interface flags should never be set */ \
75 { XBF_LIVESCAN, "LIVESCAN" }, \
76 { XBF_INCORE, "INCORE" }, \
77 { XBF_TRYLOCK, "TRYLOCK" }, \
78 { XBF_UNMAPPED, "UNMAPPED" }
81 * Internal state flags.
83 #define XFS_BSTATE_DISPOSE (1 << 0) /* buffer being discarded */
84 #define XFS_BSTATE_IN_FLIGHT (1 << 1) /* I/O in flight */
86 struct xfs_buf_cache
{
88 struct rhashtable bc_hash
;
91 int xfs_buf_cache_init(struct xfs_buf_cache
*bch
);
92 void xfs_buf_cache_destroy(struct xfs_buf_cache
*bch
);
95 * The xfs_buftarg contains 2 notions of "sector size" -
97 * 1) The metadata sector size, which is the minimum unit and
98 * alignment of IO which will be performed by metadata operations.
99 * 2) The device logical sector size
101 * The first is specified at mkfs time, and is stored on-disk in the
102 * superblock's sb_sectsize.
104 * The latter is derived from the underlying device, and controls direct IO
105 * alignment constraints.
109 struct file
*bt_bdev_file
;
110 struct block_device
*bt_bdev
;
111 struct dax_device
*bt_daxdev
;
112 struct file
*bt_file
;
114 struct xfs_mount
*bt_mount
;
115 unsigned int bt_meta_sectorsize
;
116 size_t bt_meta_sectormask
;
117 size_t bt_logical_sectorsize
;
118 size_t bt_logical_sectormask
;
120 /* LRU control structures */
121 struct shrinker
*bt_shrinker
;
122 struct list_lru bt_lru
;
124 struct percpu_counter bt_io_count
;
125 struct ratelimit_state bt_ioerror_rl
;
127 /* Atomic write unit values */
128 unsigned int bt_bdev_awu_min
;
129 unsigned int bt_bdev_awu_max
;
131 /* built-in cache, if we're not using the perag one */
132 struct xfs_buf_cache bt_cache
[];
138 xfs_daddr_t bm_bn
; /* block number for I/O */
139 int bm_len
; /* size of I/O */
140 unsigned int bm_flags
;
144 * Online fsck is scanning the buffer cache for live buffers. Do not warn
145 * about length mismatches during lookups and do not return stale buffers.
147 #define XBM_LIVESCAN (1U << 0)
149 #define DEFINE_SINGLE_BUF_MAP(map, blkno, numblk) \
150 struct xfs_buf_map (map) = { .bm_bn = (blkno), .bm_len = (numblk) };
155 __be32 magic
[2]; /* v4 and v5 on disk magic values */
156 __be16 magic16
[2]; /* v4 and v5 on disk magic values */
158 void (*verify_read
)(struct xfs_buf
*);
159 void (*verify_write
)(struct xfs_buf
*);
160 xfs_failaddr_t (*verify_struct
)(struct xfs_buf
*bp
);
165 * first cacheline holds all the fields needed for an uncontended cache
166 * hit to be fully processed. The semaphore straddles the cacheline
167 * boundary, but the counter and lock sits on the first cacheline,
168 * which is the only bit that is touched if we hit the semaphore
169 * fast-path on locking.
171 struct rhash_head b_rhash_head
; /* pag buffer hash node */
173 xfs_daddr_t b_rhash_key
; /* buffer cache index */
174 int b_length
; /* size of buffer in BBs */
175 atomic_t b_hold
; /* reference count */
176 atomic_t b_lru_ref
; /* lru reclaim ref count */
177 xfs_buf_flags_t b_flags
; /* status flags */
178 struct semaphore b_sema
; /* semaphore for lockables */
181 * concurrent access to b_lru and b_lru_flags are protected by
182 * bt_lru_lock and not by b_sema
184 struct list_head b_lru
; /* lru list */
185 spinlock_t b_lock
; /* internal state lock */
186 unsigned int b_state
; /* internal state flags */
187 int b_io_error
; /* internal IO error state */
188 wait_queue_head_t b_waiters
; /* unpin waiters */
189 struct list_head b_list
;
190 struct xfs_perag
*b_pag
; /* contains rbtree root */
191 struct xfs_mount
*b_mount
;
192 struct xfs_buftarg
*b_target
; /* buffer target (device) */
193 void *b_addr
; /* virtual address of buffer */
194 struct work_struct b_ioend_work
;
195 struct completion b_iowait
; /* queue for I/O waiters */
196 struct xfs_buf_log_item
*b_log_item
;
197 struct list_head b_li_list
; /* Log items list head */
198 struct xfs_trans
*b_transp
;
199 struct page
**b_pages
; /* array of page pointers */
200 struct page
*b_page_array
[XB_PAGES
]; /* inline pages */
201 struct xfs_buf_map
*b_maps
; /* compound buffer map */
202 struct xfs_buf_map __b_map
; /* inline compound buffer map */
204 atomic_t b_pin_count
; /* pin count */
205 atomic_t b_io_remaining
; /* #outstanding I/O requests */
206 unsigned int b_page_count
; /* size of page array */
207 unsigned int b_offset
; /* page offset of b_addr,
208 only for _XBF_KMEM buffers */
209 int b_error
; /* error code on I/O */
212 * async write failure retry count. Initialised to zero on the first
213 * failure, then when it exceeds the maximum configured without a
214 * success the write is considered to be failed permanently and the
215 * iodone handler will take appropriate action.
217 * For retry timeouts, we record the jiffy of the first failure. This
218 * means that we can change the retry timeout for buffers already under
219 * I/O and thus avoid getting stuck in a retry loop with a long timeout.
221 * last_error is used to ensure that we are getting repeated errors, not
222 * different errors. e.g. a block device might change ENOSPC to EIO when
223 * a failure timeout occurs, so we want to re-initialise the error
224 * retry behaviour appropriately when that happens.
227 unsigned long b_first_retry_time
; /* in jiffies */
230 const struct xfs_buf_ops
*b_ops
;
231 struct rcu_head b_rcu
;
234 /* Finding and Reading Buffers */
235 int xfs_buf_get_map(struct xfs_buftarg
*target
, struct xfs_buf_map
*map
,
236 int nmaps
, xfs_buf_flags_t flags
, struct xfs_buf
**bpp
);
237 int xfs_buf_read_map(struct xfs_buftarg
*target
, struct xfs_buf_map
*map
,
238 int nmaps
, xfs_buf_flags_t flags
, struct xfs_buf
**bpp
,
239 const struct xfs_buf_ops
*ops
, xfs_failaddr_t fa
);
240 void xfs_buf_readahead_map(struct xfs_buftarg
*target
,
241 struct xfs_buf_map
*map
, int nmaps
,
242 const struct xfs_buf_ops
*ops
);
246 struct xfs_buftarg
*target
,
249 xfs_buf_flags_t flags
,
250 struct xfs_buf
**bpp
)
252 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
254 return xfs_buf_get_map(target
, &map
, 1, XBF_INCORE
| flags
, bpp
);
259 struct xfs_buftarg
*target
,
262 struct xfs_buf
**bpp
)
264 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
266 return xfs_buf_get_map(target
, &map
, 1, 0, bpp
);
271 struct xfs_buftarg
*target
,
274 xfs_buf_flags_t flags
,
275 struct xfs_buf
**bpp
,
276 const struct xfs_buf_ops
*ops
)
278 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
280 return xfs_buf_read_map(target
, &map
, 1, flags
, bpp
, ops
,
281 __builtin_return_address(0));
286 struct xfs_buftarg
*target
,
289 const struct xfs_buf_ops
*ops
)
291 DEFINE_SINGLE_BUF_MAP(map
, blkno
, numblks
);
292 return xfs_buf_readahead_map(target
, &map
, 1, ops
);
295 int xfs_buf_get_uncached(struct xfs_buftarg
*target
, size_t numblks
,
296 xfs_buf_flags_t flags
, struct xfs_buf
**bpp
);
297 int xfs_buf_read_uncached(struct xfs_buftarg
*target
, xfs_daddr_t daddr
,
298 size_t numblks
, xfs_buf_flags_t flags
, struct xfs_buf
**bpp
,
299 const struct xfs_buf_ops
*ops
);
300 int _xfs_buf_read(struct xfs_buf
*bp
, xfs_buf_flags_t flags
);
301 void xfs_buf_hold(struct xfs_buf
*bp
);
303 /* Releasing Buffers */
304 extern void xfs_buf_rele(struct xfs_buf
*);
306 /* Locking and Unlocking Buffers */
307 extern int xfs_buf_trylock(struct xfs_buf
*);
308 extern void xfs_buf_lock(struct xfs_buf
*);
309 extern void xfs_buf_unlock(struct xfs_buf
*);
310 #define xfs_buf_islocked(bp) \
311 ((bp)->b_sema.count <= 0)
313 static inline void xfs_buf_relse(struct xfs_buf
*bp
)
319 /* Buffer Read and Write Routines */
320 extern int xfs_bwrite(struct xfs_buf
*bp
);
322 extern void __xfs_buf_ioerror(struct xfs_buf
*bp
, int error
,
323 xfs_failaddr_t failaddr
);
324 #define xfs_buf_ioerror(bp, err) __xfs_buf_ioerror((bp), (err), __this_address)
325 extern void xfs_buf_ioerror_alert(struct xfs_buf
*bp
, xfs_failaddr_t fa
);
326 void xfs_buf_ioend_fail(struct xfs_buf
*);
327 void xfs_buf_zero(struct xfs_buf
*bp
, size_t boff
, size_t bsize
);
328 void __xfs_buf_mark_corrupt(struct xfs_buf
*bp
, xfs_failaddr_t fa
);
329 #define xfs_buf_mark_corrupt(bp) __xfs_buf_mark_corrupt((bp), __this_address)
331 /* Buffer Utility Routines */
332 extern void *xfs_buf_offset(struct xfs_buf
*, size_t);
333 extern void xfs_buf_stale(struct xfs_buf
*bp
);
335 /* Delayed Write Buffer Routines */
336 extern void xfs_buf_delwri_cancel(struct list_head
*);
337 extern bool xfs_buf_delwri_queue(struct xfs_buf
*, struct list_head
*);
338 void xfs_buf_delwri_queue_here(struct xfs_buf
*bp
, struct list_head
*bl
);
339 extern int xfs_buf_delwri_submit(struct list_head
*);
340 extern int xfs_buf_delwri_submit_nowait(struct list_head
*);
341 extern int xfs_buf_delwri_pushbuf(struct xfs_buf
*, struct list_head
*);
343 static inline xfs_daddr_t
xfs_buf_daddr(struct xfs_buf
*bp
)
345 return bp
->b_maps
[0].bm_bn
;
348 void xfs_buf_set_ref(struct xfs_buf
*bp
, int lru_ref
);
351 * If the buffer is already on the LRU, do nothing. Otherwise set the buffer
352 * up with a reference count of 0 so it will be tossed from the cache when
355 static inline void xfs_buf_oneshot(struct xfs_buf
*bp
)
357 if (!list_empty(&bp
->b_lru
) || atomic_read(&bp
->b_lru_ref
) > 1)
359 atomic_set(&bp
->b_lru_ref
, 0);
362 static inline int xfs_buf_ispinned(struct xfs_buf
*bp
)
364 return atomic_read(&bp
->b_pin_count
);
368 xfs_buf_verify_cksum(struct xfs_buf
*bp
, unsigned long cksum_offset
)
370 return xfs_verify_cksum(bp
->b_addr
, BBTOB(bp
->b_length
),
375 xfs_buf_update_cksum(struct xfs_buf
*bp
, unsigned long cksum_offset
)
377 xfs_update_cksum(bp
->b_addr
, BBTOB(bp
->b_length
),
382 * Handling of buftargs.
384 struct xfs_buftarg
*xfs_alloc_buftarg(struct xfs_mount
*mp
,
385 struct file
*bdev_file
);
386 extern void xfs_free_buftarg(struct xfs_buftarg
*);
387 extern void xfs_buftarg_wait(struct xfs_buftarg
*);
388 extern void xfs_buftarg_drain(struct xfs_buftarg
*);
389 extern int xfs_setsize_buftarg(struct xfs_buftarg
*, unsigned int);
391 #define xfs_getsize_buftarg(buftarg) block_size((buftarg)->bt_bdev)
392 #define xfs_readonly_buftarg(buftarg) bdev_read_only((buftarg)->bt_bdev)
394 int xfs_buf_reverify(struct xfs_buf
*bp
, const struct xfs_buf_ops
*ops
);
395 bool xfs_verify_magic(struct xfs_buf
*bp
, __be32 dmagic
);
396 bool xfs_verify_magic16(struct xfs_buf
*bp
, __be16 dmagic
);
398 /* for xfs_buf_mem.c only: */
399 int xfs_init_buftarg(struct xfs_buftarg
*btp
, size_t logical_sectorsize
,
401 void xfs_destroy_buftarg(struct xfs_buftarg
*btp
);
403 #endif /* __XFS_BUF_H__ */