mm: fix exec activate_mm vs TLB shootdown and lazy tlb switching race
[linux/fpc-iii.git] / fs / xfs / xfs_log.h
blobbf212772595cc6fede65e1fc4137405a0eb52d7f
1 /*
2 * Copyright (c) 2000-2003,2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
18 #ifndef __XFS_LOG_H__
19 #define __XFS_LOG_H__
21 struct xfs_log_vec {
22 struct xfs_log_vec *lv_next; /* next lv in build list */
23 int lv_niovecs; /* number of iovecs in lv */
24 struct xfs_log_iovec *lv_iovecp; /* iovec array */
25 struct xfs_log_item *lv_item; /* owner */
26 char *lv_buf; /* formatted buffer */
27 int lv_bytes; /* accounted space in buffer */
28 int lv_buf_len; /* aligned size of buffer */
29 int lv_size; /* size of allocated lv */
32 #define XFS_LOG_VEC_ORDERED (-1)
34 static inline void *
35 xlog_prepare_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
36 uint type)
38 struct xfs_log_iovec *vec = *vecp;
40 if (vec) {
41 ASSERT(vec - lv->lv_iovecp < lv->lv_niovecs);
42 vec++;
43 } else {
44 vec = &lv->lv_iovecp[0];
47 vec->i_type = type;
48 vec->i_addr = lv->lv_buf + lv->lv_buf_len;
50 ASSERT(IS_ALIGNED((unsigned long)vec->i_addr, sizeof(uint64_t)));
52 *vecp = vec;
53 return vec->i_addr;
57 * We need to make sure the next buffer is naturally aligned for the biggest
58 * basic data type we put into it. We already accounted for this padding when
59 * sizing the buffer.
61 * However, this padding does not get written into the log, and hence we have to
62 * track the space used by the log vectors separately to prevent log space hangs
63 * due to inaccurate accounting (i.e. a leak) of the used log space through the
64 * CIL context ticket.
66 static inline void
67 xlog_finish_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec *vec, int len)
69 lv->lv_buf_len += round_up(len, sizeof(uint64_t));
70 lv->lv_bytes += len;
71 vec->i_len = len;
74 static inline void *
75 xlog_copy_iovec(struct xfs_log_vec *lv, struct xfs_log_iovec **vecp,
76 uint type, void *data, int len)
78 void *buf;
80 buf = xlog_prepare_iovec(lv, vecp, type);
81 memcpy(buf, data, len);
82 xlog_finish_iovec(lv, *vecp, len);
83 return buf;
87 * Structure used to pass callback function and the function's argument
88 * to the log manager.
90 typedef struct xfs_log_callback {
91 struct xfs_log_callback *cb_next;
92 void (*cb_func)(void *, int);
93 void *cb_arg;
94 } xfs_log_callback_t;
97 * By comparing each component, we don't have to worry about extra
98 * endian issues in treating two 32 bit numbers as one 64 bit number
100 static inline xfs_lsn_t _lsn_cmp(xfs_lsn_t lsn1, xfs_lsn_t lsn2)
102 if (CYCLE_LSN(lsn1) != CYCLE_LSN(lsn2))
103 return (CYCLE_LSN(lsn1)<CYCLE_LSN(lsn2))? -999 : 999;
105 if (BLOCK_LSN(lsn1) != BLOCK_LSN(lsn2))
106 return (BLOCK_LSN(lsn1)<BLOCK_LSN(lsn2))? -999 : 999;
108 return 0;
111 #define XFS_LSN_CMP(x,y) _lsn_cmp(x,y)
114 * Flags to xfs_log_force()
116 * XFS_LOG_SYNC: Synchronous force in-core log to disk
118 #define XFS_LOG_SYNC 0x1
120 /* Log manager interfaces */
121 struct xfs_mount;
122 struct xlog_in_core;
123 struct xlog_ticket;
124 struct xfs_log_item;
125 struct xfs_item_ops;
126 struct xfs_trans;
128 xfs_lsn_t xfs_log_done(struct xfs_mount *mp,
129 struct xlog_ticket *ticket,
130 struct xlog_in_core **iclog,
131 bool regrant);
132 int _xfs_log_force(struct xfs_mount *mp,
133 uint flags,
134 int *log_forced);
135 void xfs_log_force(struct xfs_mount *mp,
136 uint flags);
137 int _xfs_log_force_lsn(struct xfs_mount *mp,
138 xfs_lsn_t lsn,
139 uint flags,
140 int *log_forced);
141 void xfs_log_force_lsn(struct xfs_mount *mp,
142 xfs_lsn_t lsn,
143 uint flags);
144 int xfs_log_mount(struct xfs_mount *mp,
145 struct xfs_buftarg *log_target,
146 xfs_daddr_t start_block,
147 int num_bblocks);
148 int xfs_log_mount_finish(struct xfs_mount *mp);
149 int xfs_log_mount_cancel(struct xfs_mount *);
150 xfs_lsn_t xlog_assign_tail_lsn(struct xfs_mount *mp);
151 xfs_lsn_t xlog_assign_tail_lsn_locked(struct xfs_mount *mp);
152 void xfs_log_space_wake(struct xfs_mount *mp);
153 int xfs_log_notify(struct xfs_mount *mp,
154 struct xlog_in_core *iclog,
155 struct xfs_log_callback *callback_entry);
156 int xfs_log_release_iclog(struct xfs_mount *mp,
157 struct xlog_in_core *iclog);
158 int xfs_log_reserve(struct xfs_mount *mp,
159 int length,
160 int count,
161 struct xlog_ticket **ticket,
162 uint8_t clientid,
163 bool permanent);
164 int xfs_log_regrant(struct xfs_mount *mp, struct xlog_ticket *tic);
165 void xfs_log_unmount(struct xfs_mount *mp);
166 int xfs_log_force_umount(struct xfs_mount *mp, int logerror);
168 struct xlog_ticket *xfs_log_ticket_get(struct xlog_ticket *ticket);
169 void xfs_log_ticket_put(struct xlog_ticket *ticket);
171 void xfs_log_commit_cil(struct xfs_mount *mp, struct xfs_trans *tp,
172 xfs_lsn_t *commit_lsn, bool regrant);
173 bool xfs_log_item_in_current_chkpt(struct xfs_log_item *lip);
175 void xfs_log_work_queue(struct xfs_mount *mp);
176 void xfs_log_quiesce(struct xfs_mount *mp);
177 bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
179 #endif /* __XFS_LOG_H__ */