x86: use _ASM_EXTABLE macro in arch/x86/lib/usercopy_32.c
[wrt350n-kernel.git] / fs / gfs2 / lops.c
blobfae59d69d01a30d54df00b49b74d97d07fe1c0c2
1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/spinlock.h>
13 #include <linux/completion.h>
14 #include <linux/buffer_head.h>
15 #include <linux/gfs2_ondisk.h>
16 #include <linux/lm_interface.h>
18 #include "gfs2.h"
19 #include "incore.h"
20 #include "inode.h"
21 #include "glock.h"
22 #include "log.h"
23 #include "lops.h"
24 #include "meta_io.h"
25 #include "recovery.h"
26 #include "rgrp.h"
27 #include "trans.h"
28 #include "util.h"
30 /**
31 * gfs2_pin - Pin a buffer in memory
32 * @sdp: The superblock
33 * @bh: The buffer to be pinned
35 * The log lock must be held when calling this function
37 static void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
39 struct gfs2_bufdata *bd;
41 gfs2_assert_withdraw(sdp, test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags));
43 clear_buffer_dirty(bh);
44 if (test_set_buffer_pinned(bh))
45 gfs2_assert_withdraw(sdp, 0);
46 if (!buffer_uptodate(bh))
47 gfs2_io_error_bh(sdp, bh);
48 bd = bh->b_private;
49 /* If this buffer is in the AIL and it has already been written
50 * to in-place disk block, remove it from the AIL.
52 if (bd->bd_ail)
53 list_move(&bd->bd_ail_st_list, &bd->bd_ail->ai_ail2_list);
54 get_bh(bh);
57 /**
58 * gfs2_unpin - Unpin a buffer
59 * @sdp: the filesystem the buffer belongs to
60 * @bh: The buffer to unpin
61 * @ai:
65 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
66 struct gfs2_ail *ai)
68 struct gfs2_bufdata *bd = bh->b_private;
70 gfs2_assert_withdraw(sdp, buffer_uptodate(bh));
72 if (!buffer_pinned(bh))
73 gfs2_assert_withdraw(sdp, 0);
75 lock_buffer(bh);
76 mark_buffer_dirty(bh);
77 clear_buffer_pinned(bh);
79 gfs2_log_lock(sdp);
80 if (bd->bd_ail) {
81 list_del(&bd->bd_ail_st_list);
82 brelse(bh);
83 } else {
84 struct gfs2_glock *gl = bd->bd_gl;
85 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
86 atomic_inc(&gl->gl_ail_count);
88 bd->bd_ail = ai;
89 list_add(&bd->bd_ail_st_list, &ai->ai_ail1_list);
90 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
91 gfs2_log_unlock(sdp);
92 unlock_buffer(bh);
96 static inline struct gfs2_log_descriptor *bh_log_desc(struct buffer_head *bh)
98 return (struct gfs2_log_descriptor *)bh->b_data;
101 static inline __be64 *bh_log_ptr(struct buffer_head *bh)
103 struct gfs2_log_descriptor *ld = bh_log_desc(bh);
104 return (__force __be64 *)(ld + 1);
107 static inline __be64 *bh_ptr_end(struct buffer_head *bh)
109 return (__force __be64 *)(bh->b_data + bh->b_size);
113 static struct buffer_head *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type)
115 struct buffer_head *bh = gfs2_log_get_buf(sdp);
116 struct gfs2_log_descriptor *ld = bh_log_desc(bh);
117 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
118 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
119 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
120 ld->ld_type = cpu_to_be32(ld_type);
121 ld->ld_length = 0;
122 ld->ld_data1 = 0;
123 ld->ld_data2 = 0;
124 memset(ld->ld_reserved, 0, sizeof(ld->ld_reserved));
125 return bh;
128 static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
130 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
131 struct gfs2_trans *tr;
133 lock_buffer(bd->bd_bh);
134 gfs2_log_lock(sdp);
135 if (!list_empty(&bd->bd_list_tr))
136 goto out;
137 tr = current->journal_info;
138 tr->tr_touched = 1;
139 tr->tr_num_buf++;
140 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
141 if (!list_empty(&le->le_list))
142 goto out;
143 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
144 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
145 gfs2_meta_check(sdp, bd->bd_bh);
146 gfs2_pin(sdp, bd->bd_bh);
147 sdp->sd_log_num_buf++;
148 list_add(&le->le_list, &sdp->sd_log_le_buf);
149 tr->tr_num_buf_new++;
150 out:
151 gfs2_log_unlock(sdp);
152 unlock_buffer(bd->bd_bh);
155 static void buf_lo_incore_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
157 struct list_head *head = &tr->tr_list_buf;
158 struct gfs2_bufdata *bd;
160 gfs2_log_lock(sdp);
161 while (!list_empty(head)) {
162 bd = list_entry(head->next, struct gfs2_bufdata, bd_list_tr);
163 list_del_init(&bd->bd_list_tr);
164 tr->tr_num_buf--;
166 gfs2_log_unlock(sdp);
167 gfs2_assert_warn(sdp, !tr->tr_num_buf);
170 static void buf_lo_before_commit(struct gfs2_sbd *sdp)
172 struct buffer_head *bh;
173 struct gfs2_log_descriptor *ld;
174 struct gfs2_bufdata *bd1 = NULL, *bd2;
175 unsigned int total;
176 unsigned int limit;
177 unsigned int num;
178 unsigned n;
179 __be64 *ptr;
181 limit = buf_limit(sdp);
182 /* for 4k blocks, limit = 503 */
184 gfs2_log_lock(sdp);
185 total = sdp->sd_log_num_buf;
186 bd1 = bd2 = list_prepare_entry(bd1, &sdp->sd_log_le_buf, bd_le.le_list);
187 while(total) {
188 num = total;
189 if (total > limit)
190 num = limit;
191 gfs2_log_unlock(sdp);
192 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_METADATA);
193 gfs2_log_lock(sdp);
194 ld = bh_log_desc(bh);
195 ptr = bh_log_ptr(bh);
196 ld->ld_length = cpu_to_be32(num + 1);
197 ld->ld_data1 = cpu_to_be32(num);
199 n = 0;
200 list_for_each_entry_continue(bd1, &sdp->sd_log_le_buf,
201 bd_le.le_list) {
202 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
203 if (++n >= num)
204 break;
207 gfs2_log_unlock(sdp);
208 submit_bh(WRITE, bh);
209 gfs2_log_lock(sdp);
211 n = 0;
212 list_for_each_entry_continue(bd2, &sdp->sd_log_le_buf,
213 bd_le.le_list) {
214 get_bh(bd2->bd_bh);
215 gfs2_log_unlock(sdp);
216 lock_buffer(bd2->bd_bh);
217 bh = gfs2_log_fake_buf(sdp, bd2->bd_bh);
218 submit_bh(WRITE, bh);
219 gfs2_log_lock(sdp);
220 if (++n >= num)
221 break;
224 BUG_ON(total < num);
225 total -= num;
227 gfs2_log_unlock(sdp);
230 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
232 struct list_head *head = &sdp->sd_log_le_buf;
233 struct gfs2_bufdata *bd;
235 while (!list_empty(head)) {
236 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
237 list_del_init(&bd->bd_le.le_list);
238 sdp->sd_log_num_buf--;
240 gfs2_unpin(sdp, bd->bd_bh, ai);
242 gfs2_assert_warn(sdp, !sdp->sd_log_num_buf);
245 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
246 struct gfs2_log_header_host *head, int pass)
248 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
250 if (pass != 0)
251 return;
253 sdp->sd_found_blocks = 0;
254 sdp->sd_replayed_blocks = 0;
257 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
258 struct gfs2_log_descriptor *ld, __be64 *ptr,
259 int pass)
261 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
262 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
263 struct gfs2_glock *gl = ip->i_gl;
264 unsigned int blks = be32_to_cpu(ld->ld_data1);
265 struct buffer_head *bh_log, *bh_ip;
266 u64 blkno;
267 int error = 0;
269 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
270 return 0;
272 gfs2_replay_incr_blk(sdp, &start);
274 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
275 blkno = be64_to_cpu(*ptr++);
277 sdp->sd_found_blocks++;
279 if (gfs2_revoke_check(sdp, blkno, start))
280 continue;
282 error = gfs2_replay_read_block(jd, start, &bh_log);
283 if (error)
284 return error;
286 bh_ip = gfs2_meta_new(gl, blkno);
287 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
289 if (gfs2_meta_check(sdp, bh_ip))
290 error = -EIO;
291 else
292 mark_buffer_dirty(bh_ip);
294 brelse(bh_log);
295 brelse(bh_ip);
297 if (error)
298 break;
300 sdp->sd_replayed_blocks++;
303 return error;
306 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
308 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
309 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
311 if (error) {
312 gfs2_meta_sync(ip->i_gl);
313 return;
315 if (pass != 1)
316 return;
318 gfs2_meta_sync(ip->i_gl);
320 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
321 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
324 static void revoke_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
326 struct gfs2_trans *tr;
328 tr = current->journal_info;
329 tr->tr_touched = 1;
330 tr->tr_num_revoke++;
331 sdp->sd_log_num_revoke++;
332 list_add(&le->le_list, &sdp->sd_log_le_revoke);
335 static void revoke_lo_before_commit(struct gfs2_sbd *sdp)
337 struct gfs2_log_descriptor *ld;
338 struct gfs2_meta_header *mh;
339 struct buffer_head *bh;
340 unsigned int offset;
341 struct list_head *head = &sdp->sd_log_le_revoke;
342 struct gfs2_bufdata *bd;
344 if (!sdp->sd_log_num_revoke)
345 return;
347 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE);
348 ld = bh_log_desc(bh);
349 ld->ld_length = cpu_to_be32(gfs2_struct2blk(sdp, sdp->sd_log_num_revoke,
350 sizeof(u64)));
351 ld->ld_data1 = cpu_to_be32(sdp->sd_log_num_revoke);
352 offset = sizeof(struct gfs2_log_descriptor);
354 while (!list_empty(head)) {
355 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
356 list_del_init(&bd->bd_le.le_list);
357 sdp->sd_log_num_revoke--;
359 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
360 submit_bh(WRITE, bh);
362 bh = gfs2_log_get_buf(sdp);
363 mh = (struct gfs2_meta_header *)bh->b_data;
364 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
365 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
366 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
367 offset = sizeof(struct gfs2_meta_header);
370 *(__be64 *)(bh->b_data + offset) = cpu_to_be64(bd->bd_blkno);
371 kmem_cache_free(gfs2_bufdata_cachep, bd);
373 offset += sizeof(u64);
375 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
377 submit_bh(WRITE, bh);
380 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
381 struct gfs2_log_header_host *head, int pass)
383 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
385 if (pass != 0)
386 return;
388 sdp->sd_found_revokes = 0;
389 sdp->sd_replay_tail = head->lh_tail;
392 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
393 struct gfs2_log_descriptor *ld, __be64 *ptr,
394 int pass)
396 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
397 unsigned int blks = be32_to_cpu(ld->ld_length);
398 unsigned int revokes = be32_to_cpu(ld->ld_data1);
399 struct buffer_head *bh;
400 unsigned int offset;
401 u64 blkno;
402 int first = 1;
403 int error;
405 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
406 return 0;
408 offset = sizeof(struct gfs2_log_descriptor);
410 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
411 error = gfs2_replay_read_block(jd, start, &bh);
412 if (error)
413 return error;
415 if (!first)
416 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
418 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
419 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
421 error = gfs2_revoke_add(sdp, blkno, start);
422 if (error < 0)
423 return error;
424 else if (error)
425 sdp->sd_found_revokes++;
427 if (!--revokes)
428 break;
429 offset += sizeof(u64);
432 brelse(bh);
433 offset = sizeof(struct gfs2_meta_header);
434 first = 0;
437 return 0;
440 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
442 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
444 if (error) {
445 gfs2_revoke_clean(sdp);
446 return;
448 if (pass != 1)
449 return;
451 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
452 jd->jd_jid, sdp->sd_found_revokes);
454 gfs2_revoke_clean(sdp);
457 static void rg_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
459 struct gfs2_rgrpd *rgd;
460 struct gfs2_trans *tr = current->journal_info;
462 tr->tr_touched = 1;
464 rgd = container_of(le, struct gfs2_rgrpd, rd_le);
466 gfs2_log_lock(sdp);
467 if (!list_empty(&le->le_list)){
468 gfs2_log_unlock(sdp);
469 return;
471 gfs2_rgrp_bh_hold(rgd);
472 sdp->sd_log_num_rg++;
473 list_add(&le->le_list, &sdp->sd_log_le_rg);
474 gfs2_log_unlock(sdp);
477 static void rg_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
479 struct list_head *head = &sdp->sd_log_le_rg;
480 struct gfs2_rgrpd *rgd;
482 while (!list_empty(head)) {
483 rgd = list_entry(head->next, struct gfs2_rgrpd, rd_le.le_list);
484 list_del_init(&rgd->rd_le.le_list);
485 sdp->sd_log_num_rg--;
487 gfs2_rgrp_repolish_clones(rgd);
488 gfs2_rgrp_bh_put(rgd);
490 gfs2_assert_warn(sdp, !sdp->sd_log_num_rg);
494 * databuf_lo_add - Add a databuf to the transaction.
496 * This is used in two distinct cases:
497 * i) In ordered write mode
498 * We put the data buffer on a list so that we can ensure that its
499 * synced to disk at the right time
500 * ii) In journaled data mode
501 * We need to journal the data block in the same way as metadata in
502 * the functions above. The difference is that here we have a tag
503 * which is two __be64's being the block number (as per meta data)
504 * and a flag which says whether the data block needs escaping or
505 * not. This means we need a new log entry for each 251 or so data
506 * blocks, which isn't an enormous overhead but twice as much as
507 * for normal metadata blocks.
509 static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le)
511 struct gfs2_bufdata *bd = container_of(le, struct gfs2_bufdata, bd_le);
512 struct gfs2_trans *tr = current->journal_info;
513 struct address_space *mapping = bd->bd_bh->b_page->mapping;
514 struct gfs2_inode *ip = GFS2_I(mapping->host);
516 lock_buffer(bd->bd_bh);
517 gfs2_log_lock(sdp);
518 if (tr) {
519 if (!list_empty(&bd->bd_list_tr))
520 goto out;
521 tr->tr_touched = 1;
522 if (gfs2_is_jdata(ip)) {
523 tr->tr_num_buf++;
524 list_add(&bd->bd_list_tr, &tr->tr_list_buf);
527 if (!list_empty(&le->le_list))
528 goto out;
530 set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
531 set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags);
532 if (gfs2_is_jdata(ip)) {
533 gfs2_pin(sdp, bd->bd_bh);
534 tr->tr_num_databuf_new++;
535 sdp->sd_log_num_databuf++;
536 list_add(&le->le_list, &sdp->sd_log_le_databuf);
537 } else {
538 list_add(&le->le_list, &sdp->sd_log_le_ordered);
540 out:
541 gfs2_log_unlock(sdp);
542 unlock_buffer(bd->bd_bh);
545 static void gfs2_check_magic(struct buffer_head *bh)
547 void *kaddr;
548 __be32 *ptr;
550 clear_buffer_escaped(bh);
551 kaddr = kmap_atomic(bh->b_page, KM_USER0);
552 ptr = kaddr + bh_offset(bh);
553 if (*ptr == cpu_to_be32(GFS2_MAGIC))
554 set_buffer_escaped(bh);
555 kunmap_atomic(kaddr, KM_USER0);
558 static void gfs2_write_blocks(struct gfs2_sbd *sdp, struct buffer_head *bh,
559 struct list_head *list, struct list_head *done,
560 unsigned int n)
562 struct buffer_head *bh1;
563 struct gfs2_log_descriptor *ld;
564 struct gfs2_bufdata *bd;
565 __be64 *ptr;
567 if (!bh)
568 return;
570 ld = bh_log_desc(bh);
571 ld->ld_length = cpu_to_be32(n + 1);
572 ld->ld_data1 = cpu_to_be32(n);
574 ptr = bh_log_ptr(bh);
576 get_bh(bh);
577 submit_bh(WRITE, bh);
578 gfs2_log_lock(sdp);
579 while(!list_empty(list)) {
580 bd = list_entry(list->next, struct gfs2_bufdata, bd_le.le_list);
581 list_move_tail(&bd->bd_le.le_list, done);
582 get_bh(bd->bd_bh);
583 while (be64_to_cpu(*ptr) != bd->bd_bh->b_blocknr) {
584 gfs2_log_incr_head(sdp);
585 ptr += 2;
587 gfs2_log_unlock(sdp);
588 lock_buffer(bd->bd_bh);
589 if (buffer_escaped(bd->bd_bh)) {
590 void *kaddr;
591 bh1 = gfs2_log_get_buf(sdp);
592 kaddr = kmap_atomic(bd->bd_bh->b_page, KM_USER0);
593 memcpy(bh1->b_data, kaddr + bh_offset(bd->bd_bh),
594 bh1->b_size);
595 kunmap_atomic(kaddr, KM_USER0);
596 *(__be32 *)bh1->b_data = 0;
597 clear_buffer_escaped(bd->bd_bh);
598 unlock_buffer(bd->bd_bh);
599 brelse(bd->bd_bh);
600 } else {
601 bh1 = gfs2_log_fake_buf(sdp, bd->bd_bh);
603 submit_bh(WRITE, bh1);
604 gfs2_log_lock(sdp);
605 ptr += 2;
607 gfs2_log_unlock(sdp);
608 brelse(bh);
612 * databuf_lo_before_commit - Scan the data buffers, writing as we go
616 static void databuf_lo_before_commit(struct gfs2_sbd *sdp)
618 struct gfs2_bufdata *bd = NULL;
619 struct buffer_head *bh = NULL;
620 unsigned int n = 0;
621 __be64 *ptr = NULL, *end = NULL;
622 LIST_HEAD(processed);
623 LIST_HEAD(in_progress);
625 gfs2_log_lock(sdp);
626 while (!list_empty(&sdp->sd_log_le_databuf)) {
627 if (ptr == end) {
628 gfs2_log_unlock(sdp);
629 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
630 n = 0;
631 bh = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_JDATA);
632 ptr = bh_log_ptr(bh);
633 end = bh_ptr_end(bh) - 1;
634 gfs2_log_lock(sdp);
635 continue;
637 bd = list_entry(sdp->sd_log_le_databuf.next, struct gfs2_bufdata, bd_le.le_list);
638 list_move_tail(&bd->bd_le.le_list, &in_progress);
639 gfs2_check_magic(bd->bd_bh);
640 *ptr++ = cpu_to_be64(bd->bd_bh->b_blocknr);
641 *ptr++ = cpu_to_be64(buffer_escaped(bh) ? 1 : 0);
642 n++;
644 gfs2_log_unlock(sdp);
645 gfs2_write_blocks(sdp, bh, &in_progress, &processed, n);
646 gfs2_log_lock(sdp);
647 list_splice(&processed, &sdp->sd_log_le_databuf);
648 gfs2_log_unlock(sdp);
651 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, unsigned int start,
652 struct gfs2_log_descriptor *ld,
653 __be64 *ptr, int pass)
655 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
656 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
657 struct gfs2_glock *gl = ip->i_gl;
658 unsigned int blks = be32_to_cpu(ld->ld_data1);
659 struct buffer_head *bh_log, *bh_ip;
660 u64 blkno;
661 u64 esc;
662 int error = 0;
664 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
665 return 0;
667 gfs2_replay_incr_blk(sdp, &start);
668 for (; blks; gfs2_replay_incr_blk(sdp, &start), blks--) {
669 blkno = be64_to_cpu(*ptr++);
670 esc = be64_to_cpu(*ptr++);
672 sdp->sd_found_blocks++;
674 if (gfs2_revoke_check(sdp, blkno, start))
675 continue;
677 error = gfs2_replay_read_block(jd, start, &bh_log);
678 if (error)
679 return error;
681 bh_ip = gfs2_meta_new(gl, blkno);
682 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
684 /* Unescape */
685 if (esc) {
686 __be32 *eptr = (__be32 *)bh_ip->b_data;
687 *eptr = cpu_to_be32(GFS2_MAGIC);
689 mark_buffer_dirty(bh_ip);
691 brelse(bh_log);
692 brelse(bh_ip);
693 if (error)
694 break;
696 sdp->sd_replayed_blocks++;
699 return error;
702 /* FIXME: sort out accounting for log blocks etc. */
704 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
706 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
707 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
709 if (error) {
710 gfs2_meta_sync(ip->i_gl);
711 return;
713 if (pass != 1)
714 return;
716 /* data sync? */
717 gfs2_meta_sync(ip->i_gl);
719 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
720 jd->jd_jid, sdp->sd_replayed_blocks, sdp->sd_found_blocks);
723 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
725 struct list_head *head = &sdp->sd_log_le_databuf;
726 struct gfs2_bufdata *bd;
728 while (!list_empty(head)) {
729 bd = list_entry(head->next, struct gfs2_bufdata, bd_le.le_list);
730 list_del_init(&bd->bd_le.le_list);
731 sdp->sd_log_num_databuf--;
732 gfs2_unpin(sdp, bd->bd_bh, ai);
734 gfs2_assert_warn(sdp, !sdp->sd_log_num_databuf);
738 const struct gfs2_log_operations gfs2_buf_lops = {
739 .lo_add = buf_lo_add,
740 .lo_incore_commit = buf_lo_incore_commit,
741 .lo_before_commit = buf_lo_before_commit,
742 .lo_after_commit = buf_lo_after_commit,
743 .lo_before_scan = buf_lo_before_scan,
744 .lo_scan_elements = buf_lo_scan_elements,
745 .lo_after_scan = buf_lo_after_scan,
746 .lo_name = "buf",
749 const struct gfs2_log_operations gfs2_revoke_lops = {
750 .lo_add = revoke_lo_add,
751 .lo_before_commit = revoke_lo_before_commit,
752 .lo_before_scan = revoke_lo_before_scan,
753 .lo_scan_elements = revoke_lo_scan_elements,
754 .lo_after_scan = revoke_lo_after_scan,
755 .lo_name = "revoke",
758 const struct gfs2_log_operations gfs2_rg_lops = {
759 .lo_add = rg_lo_add,
760 .lo_after_commit = rg_lo_after_commit,
761 .lo_name = "rg",
764 const struct gfs2_log_operations gfs2_databuf_lops = {
765 .lo_add = databuf_lo_add,
766 .lo_incore_commit = buf_lo_incore_commit,
767 .lo_before_commit = databuf_lo_before_commit,
768 .lo_after_commit = databuf_lo_after_commit,
769 .lo_scan_elements = databuf_lo_scan_elements,
770 .lo_after_scan = databuf_lo_after_scan,
771 .lo_name = "databuf",
774 const struct gfs2_log_operations *gfs2_log_ops[] = {
775 &gfs2_databuf_lops,
776 &gfs2_buf_lops,
777 &gfs2_rg_lops,
778 &gfs2_revoke_lops,
779 NULL,