1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
10 struct xfs_log_vec
*lv_next
; /* next lv in build list */
11 int lv_niovecs
; /* number of iovecs in lv */
12 struct xfs_log_iovec
*lv_iovecp
; /* iovec array */
13 struct xfs_log_item
*lv_item
; /* owner */
14 char *lv_buf
; /* formatted buffer */
15 int lv_bytes
; /* accounted space in buffer */
16 int lv_buf_len
; /* aligned size of buffer */
17 int lv_size
; /* size of allocated lv */
20 #define XFS_LOG_VEC_ORDERED (-1)
23 xlog_prepare_iovec(struct xfs_log_vec
*lv
, struct xfs_log_iovec
**vecp
,
26 struct xfs_log_iovec
*vec
= *vecp
;
29 ASSERT(vec
- lv
->lv_iovecp
< lv
->lv_niovecs
);
32 vec
= &lv
->lv_iovecp
[0];
36 vec
->i_addr
= lv
->lv_buf
+ lv
->lv_buf_len
;
38 ASSERT(IS_ALIGNED((unsigned long)vec
->i_addr
, sizeof(uint64_t)));
45 * We need to make sure the next buffer is naturally aligned for the biggest
46 * basic data type we put into it. We already accounted for this padding when
49 * However, this padding does not get written into the log, and hence we have to
50 * track the space used by the log vectors separately to prevent log space hangs
51 * due to inaccurate accounting (i.e. a leak) of the used log space through the
55 xlog_finish_iovec(struct xfs_log_vec
*lv
, struct xfs_log_iovec
*vec
, int len
)
57 lv
->lv_buf_len
+= round_up(len
, sizeof(uint64_t));
63 xlog_copy_iovec(struct xfs_log_vec
*lv
, struct xfs_log_iovec
**vecp
,
64 uint type
, void *data
, int len
)
68 buf
= xlog_prepare_iovec(lv
, vecp
, type
);
69 memcpy(buf
, data
, len
);
70 xlog_finish_iovec(lv
, *vecp
, len
);
75 * Structure used to pass callback function and the function's argument
78 typedef struct xfs_log_callback
{
79 struct xfs_log_callback
*cb_next
;
80 void (*cb_func
)(void *, int);
85 * By comparing each component, we don't have to worry about extra
86 * endian issues in treating two 32 bit numbers as one 64 bit number
88 static inline xfs_lsn_t
_lsn_cmp(xfs_lsn_t lsn1
, xfs_lsn_t lsn2
)
90 if (CYCLE_LSN(lsn1
) != CYCLE_LSN(lsn2
))
91 return (CYCLE_LSN(lsn1
)<CYCLE_LSN(lsn2
))? -999 : 999;
93 if (BLOCK_LSN(lsn1
) != BLOCK_LSN(lsn2
))
94 return (BLOCK_LSN(lsn1
)<BLOCK_LSN(lsn2
))? -999 : 999;
99 #define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
102 * Flags to xfs_log_force()
104 * XFS_LOG_SYNC: Synchronous force in-core log to disk
106 #define XFS_LOG_SYNC 0x1
108 /* Log manager interfaces */
116 xfs_lsn_t
xfs_log_done(struct xfs_mount
*mp
,
117 struct xlog_ticket
*ticket
,
118 struct xlog_in_core
**iclog
,
120 int xfs_log_force(struct xfs_mount
*mp
, uint flags
);
121 int xfs_log_force_lsn(struct xfs_mount
*mp
, xfs_lsn_t lsn
, uint flags
,
123 int xfs_log_mount(struct xfs_mount
*mp
,
124 struct xfs_buftarg
*log_target
,
125 xfs_daddr_t start_block
,
127 int xfs_log_mount_finish(struct xfs_mount
*mp
);
128 int xfs_log_mount_cancel(struct xfs_mount
*);
129 xfs_lsn_t
xlog_assign_tail_lsn(struct xfs_mount
*mp
);
130 xfs_lsn_t
xlog_assign_tail_lsn_locked(struct xfs_mount
*mp
);
131 void xfs_log_space_wake(struct xfs_mount
*mp
);
132 int xfs_log_notify(struct xlog_in_core
*iclog
,
133 struct xfs_log_callback
*callback_entry
);
134 int xfs_log_release_iclog(struct xfs_mount
*mp
,
135 struct xlog_in_core
*iclog
);
136 int xfs_log_reserve(struct xfs_mount
*mp
,
139 struct xlog_ticket
**ticket
,
142 int xfs_log_regrant(struct xfs_mount
*mp
, struct xlog_ticket
*tic
);
143 void xfs_log_unmount(struct xfs_mount
*mp
);
144 int xfs_log_force_umount(struct xfs_mount
*mp
, int logerror
);
146 struct xlog_ticket
*xfs_log_ticket_get(struct xlog_ticket
*ticket
);
147 void xfs_log_ticket_put(struct xlog_ticket
*ticket
);
149 void xfs_log_commit_cil(struct xfs_mount
*mp
, struct xfs_trans
*tp
,
150 xfs_lsn_t
*commit_lsn
, bool regrant
);
151 bool xfs_log_item_in_current_chkpt(struct xfs_log_item
*lip
);
153 void xfs_log_work_queue(struct xfs_mount
*mp
);
154 void xfs_log_quiesce(struct xfs_mount
*mp
);
155 bool xfs_log_check_lsn(struct xfs_mount
*, xfs_lsn_t
);
156 bool xfs_log_in_recovery(struct xfs_mount
*);
158 #endif /* __XFS_LOG_H__ */