1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2007 Oracle. All rights reserved.
9 #include <linux/hash.h>
10 #include "extent_map.h"
11 #include "extent_io.h"
12 #include "ordered-data.h"
13 #include "delayed-inode.h"
16 * ordered_data_close is set by truncate when a file that used
17 * to have good data has been truncated to zero. When it is set
18 * the btrfs file release call will add this inode to the
19 * ordered operations list so that we make sure to flush out any
20 * new data the application may have written before commit.
23 BTRFS_INODE_ORDERED_DATA_CLOSE
,
25 BTRFS_INODE_IN_DEFRAG
,
26 BTRFS_INODE_HAS_ASYNC_EXTENT
,
27 BTRFS_INODE_NEEDS_FULL_SYNC
,
28 BTRFS_INODE_COPY_EVERYTHING
,
29 BTRFS_INODE_IN_DELALLOC_LIST
,
30 BTRFS_INODE_READDIO_NEED_LOCK
,
31 BTRFS_INODE_HAS_PROPS
,
32 BTRFS_INODE_SNAPSHOT_FLUSH
,
35 /* in memory btrfs inode */
37 /* which subvolume this inode belongs to */
38 struct btrfs_root
*root
;
40 /* key used to find this inode on disk. This is used by the code
41 * to read in roots of subvolumes
43 struct btrfs_key location
;
46 * Lock for counters and all fields used to determine if the inode is in
47 * the log or not (last_trans, last_sub_trans, last_log_commit,
52 /* the extent_tree has caches of all the extent mappings to disk */
53 struct extent_map_tree extent_tree
;
55 /* the io_tree does range state (DIRTY, LOCKED etc) */
56 struct extent_io_tree io_tree
;
58 /* special utility tree used to record which mirrors have already been
59 * tried when checksums fail for a given block
61 struct extent_io_tree io_failure_tree
;
64 * Keep track of where the inode has extent items mapped in order to
65 * make sure the i_size adjustments are accurate
67 struct extent_io_tree file_extent_tree
;
69 /* held while logging the inode in tree-log.c */
70 struct mutex log_mutex
;
72 /* used to order data wrt metadata */
73 struct btrfs_ordered_inode_tree ordered_tree
;
75 /* list of all the delalloc inodes in the FS. There are times we need
76 * to write all the delalloc pages to disk, and this list is used
79 struct list_head delalloc_inodes
;
81 /* node for the red-black tree that links inodes in subvolume root */
82 struct rb_node rb_node
;
84 unsigned long runtime_flags
;
86 /* Keep track of who's O_SYNC/fsyncing currently */
87 atomic_t sync_writers
;
89 /* full 64 bit generation number, struct vfs_inode doesn't have a big
90 * enough field for this.
95 * transid of the trans_handle that last modified this inode
100 * transid that last logged this inode
105 * log transid when this inode was last modified
109 /* a local copy of root's last_log_commit */
112 /* total number of bytes pending delalloc, used by stat to calc the
113 * real block usage of the file
118 * Total number of bytes pending delalloc that fall within a file
119 * range that is either a hole or beyond EOF (and no prealloc extent
120 * exists in the range). This is always <= delalloc_bytes.
122 u64 new_delalloc_bytes
;
125 * total number of bytes pending defrag, used by stat to check whether
131 * the size of the file stored in the metadata on disk. data=ordered
132 * means the in-memory i_size might be larger than the size on disk
133 * because not all the blocks are written yet.
138 * if this is a directory then index_cnt is the counter for the index
139 * number for new files that are created
143 /* Cache the directory index number to speed the dir/file remove */
146 /* the fsync log has some corner cases that mean we have to check
147 * directories to see if any unlinks have been done before
148 * the directory was logged. See tree-log.c for all the
151 u64 last_unlink_trans
;
154 * Number of bytes outstanding that are going to need csums. This is
155 * used in ENOSPC accounting.
159 /* flags field from the on disk inode */
163 * Counters to keep track of the number of extent item's we may use due
164 * to delalloc and such. outstanding_extents is the number of extent
165 * items we think we'll end up using, and reserved_extents is the number
166 * of extent items we've reserved metadata for.
168 unsigned outstanding_extents
;
170 struct btrfs_block_rsv block_rsv
;
173 * Cached values of inode properties
175 unsigned prop_compress
; /* per-file compression algorithm */
177 * Force compression on the file using the defrag ioctl, could be
178 * different from prop_compress and takes precedence if set
180 unsigned defrag_compress
;
182 struct btrfs_delayed_node
*delayed_node
;
184 /* File creation time. */
185 struct timespec64 i_otime
;
187 /* Hook into fs_info->delayed_iputs */
188 struct list_head delayed_iput
;
191 * To avoid races between lockless (i_mutex not held) direct IO writes
192 * and concurrent fsync requests. Direct IO writes must acquire read
193 * access on this semaphore for creating an extent map and its
194 * corresponding ordered extent. The fast fsync path must acquire write
195 * access on this semaphore before it collects ordered extents and
198 struct rw_semaphore dio_sem
;
200 struct inode vfs_inode
;
203 static inline struct btrfs_inode
*BTRFS_I(const struct inode
*inode
)
205 return container_of(inode
, struct btrfs_inode
, vfs_inode
);
208 static inline unsigned long btrfs_inode_hash(u64 objectid
,
209 const struct btrfs_root
*root
)
211 u64 h
= objectid
^ (root
->root_key
.objectid
* GOLDEN_RATIO_PRIME
);
213 #if BITS_PER_LONG == 32
214 h
= (h
>> 32) ^ (h
& 0xffffffff);
217 return (unsigned long)h
;
220 static inline void btrfs_insert_inode_hash(struct inode
*inode
)
222 unsigned long h
= btrfs_inode_hash(inode
->i_ino
, BTRFS_I(inode
)->root
);
224 __insert_inode_hash(inode
, h
);
227 static inline u64
btrfs_ino(const struct btrfs_inode
*inode
)
229 u64 ino
= inode
->location
.objectid
;
233 * type == BTRFS_ROOT_ITEM_KEY: subvol dir
235 if (!ino
|| inode
->location
.type
== BTRFS_ROOT_ITEM_KEY
)
236 ino
= inode
->vfs_inode
.i_ino
;
240 static inline void btrfs_i_size_write(struct btrfs_inode
*inode
, u64 size
)
242 i_size_write(&inode
->vfs_inode
, size
);
243 inode
->disk_i_size
= size
;
246 static inline bool btrfs_is_free_space_inode(struct btrfs_inode
*inode
)
248 struct btrfs_root
*root
= inode
->root
;
250 if (root
== root
->fs_info
->tree_root
&&
251 btrfs_ino(inode
) != BTRFS_BTREE_INODE_OBJECTID
)
253 if (inode
->location
.objectid
== BTRFS_FREE_INO_OBJECTID
)
258 static inline bool is_data_inode(struct inode
*inode
)
260 return btrfs_ino(BTRFS_I(inode
)) != BTRFS_BTREE_INODE_OBJECTID
;
263 static inline void btrfs_mod_outstanding_extents(struct btrfs_inode
*inode
,
266 lockdep_assert_held(&inode
->lock
);
267 inode
->outstanding_extents
+= mod
;
268 if (btrfs_is_free_space_inode(inode
))
270 trace_btrfs_inode_mod_outstanding_extents(inode
->root
, btrfs_ino(inode
),
274 static inline int btrfs_inode_in_log(struct btrfs_inode
*inode
, u64 generation
)
278 spin_lock(&inode
->lock
);
279 if (inode
->logged_trans
== generation
&&
280 inode
->last_sub_trans
<= inode
->last_log_commit
&&
281 inode
->last_sub_trans
<= inode
->root
->last_log_commit
) {
283 * After a ranged fsync we might have left some extent maps
284 * (that fall outside the fsync's range). So return false
285 * here if the list isn't empty, to make sure btrfs_log_inode()
286 * will be called and process those extent maps.
289 if (list_empty(&inode
->extent_tree
.modified_extents
))
292 spin_unlock(&inode
->lock
);
296 #define BTRFS_DIO_ORIG_BIO_SUBMITTED 0x1
298 struct btrfs_dio_private
{
306 /* number of bios pending for this dio */
307 atomic_t pending_bios
;
312 /* orig_bio is our btrfs_io_bio */
313 struct bio
*orig_bio
;
315 /* dio_bio came from fs/direct-io.c */
319 * The original bio may be split to several sub-bios, this is
320 * done during endio of sub-bios
322 blk_status_t (*subio_endio
)(struct inode
*, struct btrfs_io_bio
*,
327 * Disable DIO read nolock optimization, so new dio readers will be forced
328 * to grab i_mutex. It is used to avoid the endless truncate due to
329 * nonlocked dio read.
331 static inline void btrfs_inode_block_unlocked_dio(struct btrfs_inode
*inode
)
333 set_bit(BTRFS_INODE_READDIO_NEED_LOCK
, &inode
->runtime_flags
);
337 static inline void btrfs_inode_resume_unlocked_dio(struct btrfs_inode
*inode
)
339 smp_mb__before_atomic();
340 clear_bit(BTRFS_INODE_READDIO_NEED_LOCK
, &inode
->runtime_flags
);
343 /* Array of bytes with variable length, hexadecimal format 0x1234 */
344 #define CSUM_FMT "0x%*phN"
345 #define CSUM_FMT_VALUE(size, bytes) size, bytes
347 static inline void btrfs_print_data_csum_error(struct btrfs_inode
*inode
,
348 u64 logical_start
, u8
*csum
, u8
*csum_expected
, int mirror_num
)
350 struct btrfs_root
*root
= inode
->root
;
351 struct btrfs_super_block
*sb
= root
->fs_info
->super_copy
;
352 const u16 csum_size
= btrfs_super_csum_size(sb
);
354 /* Output minus objectid, which is more meaningful */
355 if (root
->root_key
.objectid
>= BTRFS_LAST_FREE_OBJECTID
)
356 btrfs_warn_rl(root
->fs_info
,
357 "csum failed root %lld ino %lld off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
358 root
->root_key
.objectid
, btrfs_ino(inode
),
360 CSUM_FMT_VALUE(csum_size
, csum
),
361 CSUM_FMT_VALUE(csum_size
, csum_expected
),
364 btrfs_warn_rl(root
->fs_info
,
365 "csum failed root %llu ino %llu off %llu csum " CSUM_FMT
" expected csum " CSUM_FMT
" mirror %d",
366 root
->root_key
.objectid
, btrfs_ino(inode
),
368 CSUM_FMT_VALUE(csum_size
, csum
),
369 CSUM_FMT_VALUE(csum_size
, csum_expected
),