iw_cxgb4: Atomically flush per QP HW CQEs
[linux/fpc-iii.git] / fs / gfs2 / trans.c
blobc75cacaa349b1d11d5b2b2c92e6cf13af4f896b1
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
12 #include <linux/sched.h>
13 #include <linux/slab.h>
14 #include <linux/spinlock.h>
15 #include <linux/completion.h>
16 #include <linux/buffer_head.h>
17 #include <linux/kallsyms.h>
18 #include <linux/gfs2_ondisk.h>
20 #include "gfs2.h"
21 #include "incore.h"
22 #include "glock.h"
23 #include "inode.h"
24 #include "log.h"
25 #include "lops.h"
26 #include "meta_io.h"
27 #include "trans.h"
28 #include "util.h"
29 #include "trace_gfs2.h"
31 int gfs2_trans_begin(struct gfs2_sbd *sdp, unsigned int blocks,
32 unsigned int revokes)
34 struct gfs2_trans *tr;
35 int error;
37 BUG_ON(current->journal_info);
38 BUG_ON(blocks == 0 && revokes == 0);
40 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags))
41 return -EROFS;
43 tr = kzalloc(sizeof(struct gfs2_trans), GFP_NOFS);
44 if (!tr)
45 return -ENOMEM;
47 tr->tr_ip = _RET_IP_;
48 tr->tr_blocks = blocks;
49 tr->tr_revokes = revokes;
50 tr->tr_reserved = 1;
51 set_bit(TR_ALLOCED, &tr->tr_flags);
52 if (blocks)
53 tr->tr_reserved += 6 + blocks;
54 if (revokes)
55 tr->tr_reserved += gfs2_struct2blk(sdp, revokes,
56 sizeof(u64));
57 INIT_LIST_HEAD(&tr->tr_databuf);
58 INIT_LIST_HEAD(&tr->tr_buf);
60 sb_start_intwrite(sdp->sd_vfs);
62 error = gfs2_log_reserve(sdp, tr->tr_reserved);
63 if (error)
64 goto fail;
66 current->journal_info = tr;
68 return 0;
70 fail:
71 sb_end_intwrite(sdp->sd_vfs);
72 kfree(tr);
74 return error;
77 static void gfs2_print_trans(const struct gfs2_trans *tr)
79 pr_warn("Transaction created at: %pSR\n", (void *)tr->tr_ip);
80 pr_warn("blocks=%u revokes=%u reserved=%u touched=%u\n",
81 tr->tr_blocks, tr->tr_revokes, tr->tr_reserved,
82 test_bit(TR_TOUCHED, &tr->tr_flags));
83 pr_warn("Buf %u/%u Databuf %u/%u Revoke %u/%u\n",
84 tr->tr_num_buf_new, tr->tr_num_buf_rm,
85 tr->tr_num_databuf_new, tr->tr_num_databuf_rm,
86 tr->tr_num_revoke, tr->tr_num_revoke_rm);
89 void gfs2_trans_end(struct gfs2_sbd *sdp)
91 struct gfs2_trans *tr = current->journal_info;
92 s64 nbuf;
93 int alloced = test_bit(TR_ALLOCED, &tr->tr_flags);
95 current->journal_info = NULL;
97 if (!test_bit(TR_TOUCHED, &tr->tr_flags)) {
98 gfs2_log_release(sdp, tr->tr_reserved);
99 if (alloced) {
100 kfree(tr);
101 sb_end_intwrite(sdp->sd_vfs);
103 return;
106 nbuf = tr->tr_num_buf_new + tr->tr_num_databuf_new;
107 nbuf -= tr->tr_num_buf_rm;
108 nbuf -= tr->tr_num_databuf_rm;
110 if (gfs2_assert_withdraw(sdp, (nbuf <= tr->tr_blocks) &&
111 (tr->tr_num_revoke <= tr->tr_revokes)))
112 gfs2_print_trans(tr);
114 gfs2_log_commit(sdp, tr);
115 if (alloced && !test_bit(TR_ATTACHED, &tr->tr_flags))
116 kfree(tr);
117 up_read(&sdp->sd_log_flush_lock);
119 if (sdp->sd_vfs->s_flags & SB_SYNCHRONOUS)
120 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL |
121 GFS2_LFC_TRANS_END);
122 if (alloced)
123 sb_end_intwrite(sdp->sd_vfs);
126 static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl,
127 struct buffer_head *bh,
128 const struct gfs2_log_operations *lops)
130 struct gfs2_bufdata *bd;
132 bd = kmem_cache_zalloc(gfs2_bufdata_cachep, GFP_NOFS | __GFP_NOFAIL);
133 bd->bd_bh = bh;
134 bd->bd_gl = gl;
135 bd->bd_ops = lops;
136 INIT_LIST_HEAD(&bd->bd_list);
137 bh->b_private = bd;
138 return bd;
142 * gfs2_trans_add_data - Add a databuf to the transaction.
143 * @gl: The inode glock associated with the buffer
144 * @bh: The buffer to add
146 * This is used in two distinct cases:
147 * i) In ordered write mode
148 * We put the data buffer on a list so that we can ensure that it's
149 * synced to disk at the right time
150 * ii) In journaled data mode
151 * We need to journal the data block in the same way as metadata in
152 * the functions above. The difference is that here we have a tag
153 * which is two __be64's being the block number (as per meta data)
154 * and a flag which says whether the data block needs escaping or
155 * not. This means we need a new log entry for each 251 or so data
156 * blocks, which isn't an enormous overhead but twice as much as
157 * for normal metadata blocks.
159 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh)
161 struct gfs2_trans *tr = current->journal_info;
162 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
163 struct address_space *mapping = bh->b_page->mapping;
164 struct gfs2_inode *ip = GFS2_I(mapping->host);
165 struct gfs2_bufdata *bd;
167 if (!gfs2_is_jdata(ip)) {
168 gfs2_ordered_add_inode(ip);
169 return;
172 lock_buffer(bh);
173 if (buffer_pinned(bh)) {
174 set_bit(TR_TOUCHED, &tr->tr_flags);
175 goto out;
177 gfs2_log_lock(sdp);
178 bd = bh->b_private;
179 if (bd == NULL) {
180 gfs2_log_unlock(sdp);
181 unlock_buffer(bh);
182 if (bh->b_private == NULL)
183 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_databuf_lops);
184 else
185 bd = bh->b_private;
186 lock_buffer(bh);
187 gfs2_log_lock(sdp);
189 gfs2_assert(sdp, bd->bd_gl == gl);
190 set_bit(TR_TOUCHED, &tr->tr_flags);
191 if (list_empty(&bd->bd_list)) {
192 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
193 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
194 gfs2_pin(sdp, bd->bd_bh);
195 tr->tr_num_databuf_new++;
196 list_add_tail(&bd->bd_list, &tr->tr_databuf);
198 gfs2_log_unlock(sdp);
199 out:
200 unlock_buffer(bh);
203 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh)
206 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
207 struct gfs2_bufdata *bd;
208 struct gfs2_meta_header *mh;
209 struct gfs2_trans *tr = current->journal_info;
210 enum gfs2_freeze_state state = atomic_read(&sdp->sd_freeze_state);
212 lock_buffer(bh);
213 if (buffer_pinned(bh)) {
214 set_bit(TR_TOUCHED, &tr->tr_flags);
215 goto out;
217 gfs2_log_lock(sdp);
218 bd = bh->b_private;
219 if (bd == NULL) {
220 gfs2_log_unlock(sdp);
221 unlock_buffer(bh);
222 lock_page(bh->b_page);
223 if (bh->b_private == NULL)
224 bd = gfs2_alloc_bufdata(gl, bh, &gfs2_buf_lops);
225 else
226 bd = bh->b_private;
227 unlock_page(bh->b_page);
228 lock_buffer(bh);
229 gfs2_log_lock(sdp);
231 gfs2_assert(sdp, bd->bd_gl == gl);
232 set_bit(TR_TOUCHED, &tr->tr_flags);
233 if (!list_empty(&bd->bd_list))
234 goto out_unlock;
235 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
236 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
237 mh = (struct gfs2_meta_header *)bd->bd_bh->b_data;
238 if (unlikely(mh->mh_magic != cpu_to_be32(GFS2_MAGIC))) {
239 pr_err("Attempting to add uninitialised block to journal (inplace block=%lld)\n",
240 (unsigned long long)bd->bd_bh->b_blocknr);
241 BUG();
243 if (unlikely(state == SFS_FROZEN)) {
244 printk(KERN_INFO "GFS2:adding buf while frozen\n");
245 gfs2_assert_withdraw(sdp, 0);
247 gfs2_pin(sdp, bd->bd_bh);
248 mh->__pad0 = cpu_to_be64(0);
249 mh->mh_jid = cpu_to_be32(sdp->sd_jdesc->jd_jid);
250 list_add(&bd->bd_list, &tr->tr_buf);
251 tr->tr_num_buf_new++;
252 out_unlock:
253 gfs2_log_unlock(sdp);
254 out:
255 unlock_buffer(bh);
258 void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd)
260 struct gfs2_trans *tr = current->journal_info;
262 BUG_ON(!list_empty(&bd->bd_list));
263 gfs2_add_revoke(sdp, bd);
264 set_bit(TR_TOUCHED, &tr->tr_flags);
265 tr->tr_num_revoke++;
268 void gfs2_trans_add_unrevoke(struct gfs2_sbd *sdp, u64 blkno, unsigned int len)
270 struct gfs2_bufdata *bd, *tmp;
271 struct gfs2_trans *tr = current->journal_info;
272 unsigned int n = len;
274 gfs2_log_lock(sdp);
275 list_for_each_entry_safe(bd, tmp, &sdp->sd_log_le_revoke, bd_list) {
276 if ((bd->bd_blkno >= blkno) && (bd->bd_blkno < (blkno + len))) {
277 list_del_init(&bd->bd_list);
278 gfs2_assert_withdraw(sdp, sdp->sd_log_num_revoke);
279 sdp->sd_log_num_revoke--;
280 kmem_cache_free(gfs2_bufdata_cachep, bd);
281 tr->tr_num_revoke_rm++;
282 if (--n == 0)
283 break;
286 gfs2_log_unlock(sdp);