1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
6 #ifndef __XFS_INODE_H__
7 #define __XFS_INODE_H__
9 #include "xfs_inode_buf.h"
10 #include "xfs_inode_fork.h"
13 * Kernel only inode definitions
19 struct xfs_inode_log_item
;
24 typedef struct xfs_inode
{
25 /* Inode linking and identification information. */
26 struct xfs_mount
*i_mount
; /* fs mount struct ptr */
27 struct xfs_dquot
*i_udquot
; /* user dquot */
28 struct xfs_dquot
*i_gdquot
; /* group dquot */
29 struct xfs_dquot
*i_pdquot
; /* project dquot */
31 /* Inode location stuff */
32 xfs_ino_t i_ino
; /* inode number (agno/agino)*/
33 struct xfs_imap i_imap
; /* location for xfs_imap() */
35 /* Extent information. */
36 struct xfs_ifork
*i_afp
; /* attribute fork pointer */
37 struct xfs_ifork
*i_cowfp
; /* copy on write extents */
38 struct xfs_ifork i_df
; /* data fork */
40 /* Transaction and locking information. */
41 struct xfs_inode_log_item
*i_itemp
; /* logging information */
42 mrlock_t i_lock
; /* inode lock */
43 mrlock_t i_mmaplock
; /* inode mmap IO lock */
44 atomic_t i_pincount
; /* inode pin count */
47 * Bitsets of inode metadata that have been checked and/or are sick.
48 * Callers must hold i_flags_lock before accessing this field.
53 spinlock_t i_flags_lock
; /* inode i_flags lock */
54 /* Miscellaneous state. */
55 unsigned long i_flags
; /* see defined flags below */
56 uint64_t i_delayed_blks
; /* count of delay alloc blks */
58 struct xfs_icdinode i_d
; /* most of ondisk inode */
61 struct inode i_vnode
; /* embedded VFS inode */
63 /* pending io completions */
64 spinlock_t i_ioend_lock
;
65 struct work_struct i_ioend_work
;
66 struct list_head i_ioend_list
;
69 /* Convert from vfs inode to xfs inode */
70 static inline struct xfs_inode
*XFS_I(struct inode
*inode
)
72 return container_of(inode
, struct xfs_inode
, i_vnode
);
75 /* convert from xfs inode to vfs inode */
76 static inline struct inode
*VFS_I(struct xfs_inode
*ip
)
82 * For regular files we only update the on-disk filesize when actually
83 * writing data back to disk. Until then only the copy in the VFS inode
86 static inline xfs_fsize_t
XFS_ISIZE(struct xfs_inode
*ip
)
88 if (S_ISREG(VFS_I(ip
)->i_mode
))
89 return i_size_read(VFS_I(ip
));
90 return ip
->i_d
.di_size
;
94 * If this I/O goes past the on-disk inode size update it unless it would
95 * be past the current in-core inode size.
97 static inline xfs_fsize_t
98 xfs_new_eof(struct xfs_inode
*ip
, xfs_fsize_t new_size
)
100 xfs_fsize_t i_size
= i_size_read(VFS_I(ip
));
102 if (new_size
> i_size
|| new_size
< 0)
104 return new_size
> ip
->i_d
.di_size
? new_size
: 0;
108 * i_flags helper functions
111 __xfs_iflags_set(xfs_inode_t
*ip
, unsigned short flags
)
113 ip
->i_flags
|= flags
;
117 xfs_iflags_set(xfs_inode_t
*ip
, unsigned short flags
)
119 spin_lock(&ip
->i_flags_lock
);
120 __xfs_iflags_set(ip
, flags
);
121 spin_unlock(&ip
->i_flags_lock
);
125 xfs_iflags_clear(xfs_inode_t
*ip
, unsigned short flags
)
127 spin_lock(&ip
->i_flags_lock
);
128 ip
->i_flags
&= ~flags
;
129 spin_unlock(&ip
->i_flags_lock
);
133 __xfs_iflags_test(xfs_inode_t
*ip
, unsigned short flags
)
135 return (ip
->i_flags
& flags
);
139 xfs_iflags_test(xfs_inode_t
*ip
, unsigned short flags
)
142 spin_lock(&ip
->i_flags_lock
);
143 ret
= __xfs_iflags_test(ip
, flags
);
144 spin_unlock(&ip
->i_flags_lock
);
149 xfs_iflags_test_and_clear(xfs_inode_t
*ip
, unsigned short flags
)
153 spin_lock(&ip
->i_flags_lock
);
154 ret
= ip
->i_flags
& flags
;
156 ip
->i_flags
&= ~flags
;
157 spin_unlock(&ip
->i_flags_lock
);
162 xfs_iflags_test_and_set(xfs_inode_t
*ip
, unsigned short flags
)
166 spin_lock(&ip
->i_flags_lock
);
167 ret
= ip
->i_flags
& flags
;
169 ip
->i_flags
|= flags
;
170 spin_unlock(&ip
->i_flags_lock
);
175 xfs_get_initial_prid(struct xfs_inode
*dp
)
177 if (dp
->i_d
.di_flags
& XFS_DIFLAG_PROJINHERIT
)
178 return dp
->i_d
.di_projid
;
180 return XFS_PROJID_DEFAULT
;
183 static inline bool xfs_is_reflink_inode(struct xfs_inode
*ip
)
185 return ip
->i_d
.di_flags2
& XFS_DIFLAG2_REFLINK
;
189 * Check if an inode has any data in the COW fork. This might be often false
190 * even for inodes with the reflink flag when there is no pending COW operation.
192 static inline bool xfs_inode_has_cow_data(struct xfs_inode
*ip
)
194 return ip
->i_cowfp
&& ip
->i_cowfp
->if_bytes
;
197 static inline bool xfs_inode_has_bigtime(struct xfs_inode
*ip
)
199 return ip
->i_d
.di_flags2
& XFS_DIFLAG2_BIGTIME
;
203 * Return the buftarg used for data allocations on a given inode.
205 #define xfs_inode_buftarg(ip) \
206 (XFS_IS_REALTIME_INODE(ip) ? \
207 (ip)->i_mount->m_rtdev_targp : (ip)->i_mount->m_ddev_targp)
210 * In-core inode flags.
212 #define XFS_IRECLAIM (1 << 0) /* started reclaiming this inode */
213 #define XFS_ISTALE (1 << 1) /* inode has been staled */
214 #define XFS_IRECLAIMABLE (1 << 2) /* inode can be reclaimed */
215 #define __XFS_INEW_BIT 3 /* inode has just been allocated */
216 #define XFS_INEW (1 << __XFS_INEW_BIT)
217 #define XFS_ITRUNCATED (1 << 5) /* truncated down so flush-on-close */
218 #define XFS_IDIRTY_RELEASE (1 << 6) /* dirty release already seen */
219 #define XFS_IFLUSHING (1 << 7) /* inode is being flushed */
220 #define __XFS_IPINNED_BIT 8 /* wakeup key for zero pin count */
221 #define XFS_IPINNED (1 << __XFS_IPINNED_BIT)
222 #define XFS_IEOFBLOCKS (1 << 9) /* has the preallocblocks tag set */
224 * If this unlinked inode is in the middle of recovery, don't let drop_inode
225 * truncate and free the inode. This can happen if we iget the inode during
226 * log recovery to replay a bmap operation on the inode.
228 #define XFS_IRECOVERY (1 << 11)
229 #define XFS_ICOWBLOCKS (1 << 12)/* has the cowblocks tag set */
232 * Per-lifetime flags need to be reset when re-using a reclaimable inode during
233 * inode lookup. This prevents unintended behaviour on the new inode from
236 #define XFS_IRECLAIM_RESET_FLAGS \
237 (XFS_IRECLAIMABLE | XFS_IRECLAIM | \
238 XFS_IDIRTY_RELEASE | XFS_ITRUNCATED)
241 * Flags for inode locking.
242 * Bit ranges: 1<<1 - 1<<16-1 -- iolock/ilock modes (bitfield)
243 * 1<<16 - 1<<32-1 -- lockdep annotation (integers)
245 #define XFS_IOLOCK_EXCL (1<<0)
246 #define XFS_IOLOCK_SHARED (1<<1)
247 #define XFS_ILOCK_EXCL (1<<2)
248 #define XFS_ILOCK_SHARED (1<<3)
249 #define XFS_MMAPLOCK_EXCL (1<<4)
250 #define XFS_MMAPLOCK_SHARED (1<<5)
252 #define XFS_LOCK_MASK (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED \
253 | XFS_ILOCK_EXCL | XFS_ILOCK_SHARED \
254 | XFS_MMAPLOCK_EXCL | XFS_MMAPLOCK_SHARED)
256 #define XFS_LOCK_FLAGS \
257 { XFS_IOLOCK_EXCL, "IOLOCK_EXCL" }, \
258 { XFS_IOLOCK_SHARED, "IOLOCK_SHARED" }, \
259 { XFS_ILOCK_EXCL, "ILOCK_EXCL" }, \
260 { XFS_ILOCK_SHARED, "ILOCK_SHARED" }, \
261 { XFS_MMAPLOCK_EXCL, "MMAPLOCK_EXCL" }, \
262 { XFS_MMAPLOCK_SHARED, "MMAPLOCK_SHARED" }
266 * Flags for lockdep annotations.
268 * XFS_LOCK_PARENT - for directory operations that require locking a
269 * parent directory inode and a child entry inode. IOLOCK requires nesting,
270 * MMAPLOCK does not support this class, ILOCK requires a single subclass
271 * to differentiate parent from child.
273 * XFS_LOCK_RTBITMAP/XFS_LOCK_RTSUM - the realtime device bitmap and summary
274 * inodes do not participate in the normal lock order, and thus have their
277 * XFS_LOCK_INUMORDER - for locking several inodes at the some time
278 * with xfs_lock_inodes(). This flag is used as the starting subclass
279 * and each subsequent lock acquired will increment the subclass by one.
280 * However, MAX_LOCKDEP_SUBCLASSES == 8, which means we are greatly
281 * limited to the subclasses we can represent via nesting. We need at least
282 * 5 inodes nest depth for the ILOCK through rename, and we also have to support
283 * XFS_ILOCK_PARENT, which gives 6 subclasses. Then we have XFS_ILOCK_RTBITMAP
284 * and XFS_ILOCK_RTSUM, which are another 2 unique subclasses, so that's all
285 * 8 subclasses supported by lockdep.
287 * This also means we have to number the sub-classes in the lowest bits of
288 * the mask we keep, and we have to ensure we never exceed 3 bits of lockdep
289 * mask and we can't use bit-masking to build the subclasses. What a mess.
294 * 16-19 XFS_IOLOCK_SHIFT dependencies
295 * 20-23 XFS_MMAPLOCK_SHIFT dependencies
296 * 24-31 XFS_ILOCK_SHIFT dependencies
309 * 0-4 subclass values
310 * 5 PARENT subclass (not nestable)
311 * 6 RTBITMAP subclass (not nestable)
312 * 7 RTSUM subclass (not nestable)
315 #define XFS_IOLOCK_SHIFT 16
316 #define XFS_IOLOCK_MAX_SUBCLASS 3
317 #define XFS_IOLOCK_DEP_MASK 0x000f0000
319 #define XFS_MMAPLOCK_SHIFT 20
320 #define XFS_MMAPLOCK_NUMORDER 0
321 #define XFS_MMAPLOCK_MAX_SUBCLASS 3
322 #define XFS_MMAPLOCK_DEP_MASK 0x00f00000
324 #define XFS_ILOCK_SHIFT 24
325 #define XFS_ILOCK_PARENT_VAL 5
326 #define XFS_ILOCK_MAX_SUBCLASS (XFS_ILOCK_PARENT_VAL - 1)
327 #define XFS_ILOCK_RTBITMAP_VAL 6
328 #define XFS_ILOCK_RTSUM_VAL 7
329 #define XFS_ILOCK_DEP_MASK 0xff000000
330 #define XFS_ILOCK_PARENT (XFS_ILOCK_PARENT_VAL << XFS_ILOCK_SHIFT)
331 #define XFS_ILOCK_RTBITMAP (XFS_ILOCK_RTBITMAP_VAL << XFS_ILOCK_SHIFT)
332 #define XFS_ILOCK_RTSUM (XFS_ILOCK_RTSUM_VAL << XFS_ILOCK_SHIFT)
334 #define XFS_LOCK_SUBCLASS_MASK (XFS_IOLOCK_DEP_MASK | \
335 XFS_MMAPLOCK_DEP_MASK | \
338 #define XFS_IOLOCK_DEP(flags) (((flags) & XFS_IOLOCK_DEP_MASK) \
340 #define XFS_MMAPLOCK_DEP(flags) (((flags) & XFS_MMAPLOCK_DEP_MASK) \
341 >> XFS_MMAPLOCK_SHIFT)
342 #define XFS_ILOCK_DEP(flags) (((flags) & XFS_ILOCK_DEP_MASK) \
346 * Layouts are broken in the BREAK_WRITE case to ensure that
347 * layout-holders do not collide with local writes. Additionally,
348 * layouts are broken in the BREAK_UNMAP case to make sure the
349 * layout-holder has a consistent view of the file's extent map. While
350 * BREAK_WRITE breaks can be satisfied by recalling FL_LAYOUT leases,
351 * BREAK_UNMAP breaks additionally require waiting for busy dax-pages to
354 enum layout_break_reason
{
360 * For multiple groups support: if S_ISGID bit is set in the parent
361 * directory, group of new file is set to that of the parent, and
362 * new subdirectory gets S_ISGID bit from parent.
364 #define XFS_INHERIT_GID(pip) \
365 (((pip)->i_mount->m_flags & XFS_MOUNT_GRPID) || \
366 (VFS_I(pip)->i_mode & S_ISGID))
368 int xfs_release(struct xfs_inode
*ip
);
369 void xfs_inactive(struct xfs_inode
*ip
);
370 int xfs_lookup(struct xfs_inode
*dp
, struct xfs_name
*name
,
371 struct xfs_inode
**ipp
, struct xfs_name
*ci_name
);
372 int xfs_create(struct xfs_inode
*dp
, struct xfs_name
*name
,
373 umode_t mode
, dev_t rdev
, struct xfs_inode
**ipp
);
374 int xfs_create_tmpfile(struct xfs_inode
*dp
, umode_t mode
,
375 struct xfs_inode
**ipp
);
376 int xfs_remove(struct xfs_inode
*dp
, struct xfs_name
*name
,
377 struct xfs_inode
*ip
);
378 int xfs_link(struct xfs_inode
*tdp
, struct xfs_inode
*sip
,
379 struct xfs_name
*target_name
);
380 int xfs_rename(struct xfs_inode
*src_dp
, struct xfs_name
*src_name
,
381 struct xfs_inode
*src_ip
, struct xfs_inode
*target_dp
,
382 struct xfs_name
*target_name
,
383 struct xfs_inode
*target_ip
, unsigned int flags
);
385 void xfs_ilock(xfs_inode_t
*, uint
);
386 int xfs_ilock_nowait(xfs_inode_t
*, uint
);
387 void xfs_iunlock(xfs_inode_t
*, uint
);
388 void xfs_ilock_demote(xfs_inode_t
*, uint
);
389 int xfs_isilocked(xfs_inode_t
*, uint
);
390 uint
xfs_ilock_data_map_shared(struct xfs_inode
*);
391 uint
xfs_ilock_attr_map_shared(struct xfs_inode
*);
393 uint
xfs_ip2xflags(struct xfs_inode
*);
394 int xfs_ifree(struct xfs_trans
*, struct xfs_inode
*);
395 int xfs_itruncate_extents_flags(struct xfs_trans
**,
396 struct xfs_inode
*, int, xfs_fsize_t
, int);
397 void xfs_iext_realloc(xfs_inode_t
*, int, int);
399 int xfs_log_force_inode(struct xfs_inode
*ip
);
400 void xfs_iunpin_wait(xfs_inode_t
*);
401 #define xfs_ipincount(ip) ((unsigned int) atomic_read(&ip->i_pincount))
403 int xfs_iflush_cluster(struct xfs_buf
*);
404 void xfs_lock_two_inodes(struct xfs_inode
*ip0
, uint ip0_mode
,
405 struct xfs_inode
*ip1
, uint ip1_mode
);
407 xfs_extlen_t
xfs_get_extsz_hint(struct xfs_inode
*ip
);
408 xfs_extlen_t
xfs_get_cowextsz_hint(struct xfs_inode
*ip
);
410 int xfs_dir_ialloc(struct xfs_trans
**tpp
, struct xfs_inode
*dp
, umode_t mode
,
411 xfs_nlink_t nlink
, dev_t dev
, prid_t prid
,
412 struct xfs_inode
**ipp
);
415 xfs_itruncate_extents(
416 struct xfs_trans
**tpp
,
417 struct xfs_inode
*ip
,
419 xfs_fsize_t new_size
)
421 return xfs_itruncate_extents_flags(tpp
, ip
, whichfork
, new_size
, 0);
424 /* from xfs_file.c */
425 enum xfs_prealloc_flags
{
426 XFS_PREALLOC_SET
= (1 << 1),
427 XFS_PREALLOC_CLEAR
= (1 << 2),
428 XFS_PREALLOC_SYNC
= (1 << 3),
429 XFS_PREALLOC_INVISIBLE
= (1 << 4),
432 int xfs_update_prealloc_flags(struct xfs_inode
*ip
,
433 enum xfs_prealloc_flags flags
);
434 int xfs_break_layouts(struct inode
*inode
, uint
*iolock
,
435 enum layout_break_reason reason
);
437 /* from xfs_iops.c */
438 extern void xfs_setup_inode(struct xfs_inode
*ip
);
439 extern void xfs_setup_iops(struct xfs_inode
*ip
);
440 extern void xfs_diflags_to_iflags(struct xfs_inode
*ip
, bool init
);
443 * When setting up a newly allocated inode, we need to call
444 * xfs_finish_inode_setup() once the inode is fully instantiated at
445 * the VFS level to prevent the rest of the world seeing the inode
446 * before we've completed instantiation. Otherwise we can do it
447 * the moment the inode lookup is complete.
449 static inline void xfs_finish_inode_setup(struct xfs_inode
*ip
)
451 xfs_iflags_clear(ip
, XFS_INEW
);
453 unlock_new_inode(VFS_I(ip
));
454 wake_up_bit(&ip
->i_flags
, __XFS_INEW_BIT
);
457 static inline void xfs_setup_existing_inode(struct xfs_inode
*ip
)
461 xfs_finish_inode_setup(ip
);
464 void xfs_irele(struct xfs_inode
*ip
);
466 extern struct kmem_zone
*xfs_inode_zone
;
468 /* The default CoW extent size hint. */
469 #define XFS_DEFAULT_COWEXTSZ_HINT 32
471 int xfs_iunlink_init(struct xfs_perag
*pag
);
472 void xfs_iunlink_destroy(struct xfs_perag
*pag
);
474 void xfs_end_io(struct work_struct
*work
);
476 int xfs_ilock2_io_mmap(struct xfs_inode
*ip1
, struct xfs_inode
*ip2
);
477 void xfs_iunlock2_io_mmap(struct xfs_inode
*ip1
, struct xfs_inode
*ip2
);
479 #endif /* __XFS_INODE_H__ */