kmem: add slab-specific documentation about the kmem controller
[linux/fpc-iii.git] / fs / jbd2 / revoke.c
blobf30b80b4ce8bef98cab621bf731e13682661ca6d
1 /*
2 * linux/fs/jbd2/revoke.c
4 * Written by Stephen C. Tweedie <sct@redhat.com>, 2000
6 * Copyright 2000 Red Hat corp --- All Rights Reserved
8 * This file is part of the Linux kernel and is made available under
9 * the terms of the GNU General Public License, version 2, or at your
10 * option, any later version, incorporated herein by reference.
12 * Journal revoke routines for the generic filesystem journaling code;
13 * part of the ext2fs journaling system.
15 * Revoke is the mechanism used to prevent old log records for deleted
16 * metadata from being replayed on top of newer data using the same
17 * blocks. The revoke mechanism is used in two separate places:
19 * + Commit: during commit we write the entire list of the current
20 * transaction's revoked blocks to the journal
22 * + Recovery: during recovery we record the transaction ID of all
23 * revoked blocks. If there are multiple revoke records in the log
24 * for a single block, only the last one counts, and if there is a log
25 * entry for a block beyond the last revoke, then that log entry still
26 * gets replayed.
28 * We can get interactions between revokes and new log data within a
29 * single transaction:
31 * Block is revoked and then journaled:
32 * The desired end result is the journaling of the new block, so we
33 * cancel the revoke before the transaction commits.
35 * Block is journaled and then revoked:
36 * The revoke must take precedence over the write of the block, so we
37 * need either to cancel the journal entry or to write the revoke
38 * later in the log than the log block. In this case, we choose the
39 * latter: journaling a block cancels any revoke record for that block
40 * in the current transaction, so any revoke for that block in the
41 * transaction must have happened after the block was journaled and so
42 * the revoke must take precedence.
44 * Block is revoked and then written as data:
45 * The data write is allowed to succeed, but the revoke is _not_
46 * cancelled. We still need to prevent old log records from
47 * overwriting the new data. We don't even need to clear the revoke
48 * bit here.
50 * We cache revoke status of a buffer in the current transaction in b_states
51 * bits. As the name says, revokevalid flag indicates that the cached revoke
52 * status of a buffer is valid and we can rely on the cached status.
54 * Revoke information on buffers is a tri-state value:
56 * RevokeValid clear: no cached revoke status, need to look it up
57 * RevokeValid set, Revoked clear:
58 * buffer has not been revoked, and cancel_revoke
59 * need do nothing.
60 * RevokeValid set, Revoked set:
61 * buffer has been revoked.
63 * Locking rules:
64 * We keep two hash tables of revoke records. One hashtable belongs to the
65 * running transaction (is pointed to by journal->j_revoke), the other one
66 * belongs to the committing transaction. Accesses to the second hash table
67 * happen only from the kjournald and no other thread touches this table. Also
68 * journal_switch_revoke_table() which switches which hashtable belongs to the
69 * running and which to the committing transaction is called only from
70 * kjournald. Therefore we need no locks when accessing the hashtable belonging
71 * to the committing transaction.
73 * All users operating on the hash table belonging to the running transaction
74 * have a handle to the transaction. Therefore they are safe from kjournald
75 * switching hash tables under them. For operations on the lists of entries in
76 * the hash table j_revoke_lock is used.
78 * Finally, also replay code uses the hash tables but at this moment no one else
79 * can touch them (filesystem isn't mounted yet) and hence no locking is
80 * needed.
83 #ifndef __KERNEL__
84 #include "jfs_user.h"
85 #else
86 #include <linux/time.h>
87 #include <linux/fs.h>
88 #include <linux/jbd2.h>
89 #include <linux/errno.h>
90 #include <linux/slab.h>
91 #include <linux/list.h>
92 #include <linux/init.h>
93 #include <linux/bio.h>
94 #endif
95 #include <linux/log2.h>
97 static struct kmem_cache *jbd2_revoke_record_cache;
98 static struct kmem_cache *jbd2_revoke_table_cache;
100 /* Each revoke record represents one single revoked block. During
101 journal replay, this involves recording the transaction ID of the
102 last transaction to revoke this block. */
104 struct jbd2_revoke_record_s
106 struct list_head hash;
107 tid_t sequence; /* Used for recovery only */
108 unsigned long long blocknr;
112 /* The revoke table is just a simple hash table of revoke records. */
113 struct jbd2_revoke_table_s
115 /* It is conceivable that we might want a larger hash table
116 * for recovery. Must be a power of two. */
117 int hash_size;
118 int hash_shift;
119 struct list_head *hash_table;
123 #ifdef __KERNEL__
124 static void write_one_revoke_record(journal_t *, transaction_t *,
125 struct journal_head **, int *,
126 struct jbd2_revoke_record_s *, int);
127 static void flush_descriptor(journal_t *, struct journal_head *, int, int);
128 #endif
130 /* Utility functions to maintain the revoke table */
132 /* Borrowed from buffer.c: this is a tried and tested block hash function */
133 static inline int hash(journal_t *journal, unsigned long long block)
135 struct jbd2_revoke_table_s *table = journal->j_revoke;
136 int hash_shift = table->hash_shift;
137 int hash = (int)block ^ (int)((block >> 31) >> 1);
139 return ((hash << (hash_shift - 6)) ^
140 (hash >> 13) ^
141 (hash << (hash_shift - 12))) & (table->hash_size - 1);
144 static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
145 tid_t seq)
147 struct list_head *hash_list;
148 struct jbd2_revoke_record_s *record;
150 repeat:
151 record = kmem_cache_alloc(jbd2_revoke_record_cache, GFP_NOFS);
152 if (!record)
153 goto oom;
155 record->sequence = seq;
156 record->blocknr = blocknr;
157 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
158 spin_lock(&journal->j_revoke_lock);
159 list_add(&record->hash, hash_list);
160 spin_unlock(&journal->j_revoke_lock);
161 return 0;
163 oom:
164 if (!journal_oom_retry)
165 return -ENOMEM;
166 jbd_debug(1, "ENOMEM in %s, retrying\n", __func__);
167 yield();
168 goto repeat;
171 /* Find a revoke record in the journal's hash table. */
173 static struct jbd2_revoke_record_s *find_revoke_record(journal_t *journal,
174 unsigned long long blocknr)
176 struct list_head *hash_list;
177 struct jbd2_revoke_record_s *record;
179 hash_list = &journal->j_revoke->hash_table[hash(journal, blocknr)];
181 spin_lock(&journal->j_revoke_lock);
182 record = (struct jbd2_revoke_record_s *) hash_list->next;
183 while (&(record->hash) != hash_list) {
184 if (record->blocknr == blocknr) {
185 spin_unlock(&journal->j_revoke_lock);
186 return record;
188 record = (struct jbd2_revoke_record_s *) record->hash.next;
190 spin_unlock(&journal->j_revoke_lock);
191 return NULL;
194 void jbd2_journal_destroy_revoke_caches(void)
196 if (jbd2_revoke_record_cache) {
197 kmem_cache_destroy(jbd2_revoke_record_cache);
198 jbd2_revoke_record_cache = NULL;
200 if (jbd2_revoke_table_cache) {
201 kmem_cache_destroy(jbd2_revoke_table_cache);
202 jbd2_revoke_table_cache = NULL;
206 int __init jbd2_journal_init_revoke_caches(void)
208 J_ASSERT(!jbd2_revoke_record_cache);
209 J_ASSERT(!jbd2_revoke_table_cache);
211 jbd2_revoke_record_cache = KMEM_CACHE(jbd2_revoke_record_s,
212 SLAB_HWCACHE_ALIGN|SLAB_TEMPORARY);
213 if (!jbd2_revoke_record_cache)
214 goto record_cache_failure;
216 jbd2_revoke_table_cache = KMEM_CACHE(jbd2_revoke_table_s,
217 SLAB_TEMPORARY);
218 if (!jbd2_revoke_table_cache)
219 goto table_cache_failure;
220 return 0;
221 table_cache_failure:
222 jbd2_journal_destroy_revoke_caches();
223 record_cache_failure:
224 return -ENOMEM;
227 static struct jbd2_revoke_table_s *jbd2_journal_init_revoke_table(int hash_size)
229 int shift = 0;
230 int tmp = hash_size;
231 struct jbd2_revoke_table_s *table;
233 table = kmem_cache_alloc(jbd2_revoke_table_cache, GFP_KERNEL);
234 if (!table)
235 goto out;
237 while((tmp >>= 1UL) != 0UL)
238 shift++;
240 table->hash_size = hash_size;
241 table->hash_shift = shift;
242 table->hash_table =
243 kmalloc(hash_size * sizeof(struct list_head), GFP_KERNEL);
244 if (!table->hash_table) {
245 kmem_cache_free(jbd2_revoke_table_cache, table);
246 table = NULL;
247 goto out;
250 for (tmp = 0; tmp < hash_size; tmp++)
251 INIT_LIST_HEAD(&table->hash_table[tmp]);
253 out:
254 return table;
257 static void jbd2_journal_destroy_revoke_table(struct jbd2_revoke_table_s *table)
259 int i;
260 struct list_head *hash_list;
262 for (i = 0; i < table->hash_size; i++) {
263 hash_list = &table->hash_table[i];
264 J_ASSERT(list_empty(hash_list));
267 kfree(table->hash_table);
268 kmem_cache_free(jbd2_revoke_table_cache, table);
271 /* Initialise the revoke table for a given journal to a given size. */
272 int jbd2_journal_init_revoke(journal_t *journal, int hash_size)
274 J_ASSERT(journal->j_revoke_table[0] == NULL);
275 J_ASSERT(is_power_of_2(hash_size));
277 journal->j_revoke_table[0] = jbd2_journal_init_revoke_table(hash_size);
278 if (!journal->j_revoke_table[0])
279 goto fail0;
281 journal->j_revoke_table[1] = jbd2_journal_init_revoke_table(hash_size);
282 if (!journal->j_revoke_table[1])
283 goto fail1;
285 journal->j_revoke = journal->j_revoke_table[1];
287 spin_lock_init(&journal->j_revoke_lock);
289 return 0;
291 fail1:
292 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
293 fail0:
294 return -ENOMEM;
297 /* Destroy a journal's revoke table. The table must already be empty! */
298 void jbd2_journal_destroy_revoke(journal_t *journal)
300 journal->j_revoke = NULL;
301 if (journal->j_revoke_table[0])
302 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[0]);
303 if (journal->j_revoke_table[1])
304 jbd2_journal_destroy_revoke_table(journal->j_revoke_table[1]);
308 #ifdef __KERNEL__
311 * jbd2_journal_revoke: revoke a given buffer_head from the journal. This
312 * prevents the block from being replayed during recovery if we take a
313 * crash after this current transaction commits. Any subsequent
314 * metadata writes of the buffer in this transaction cancel the
315 * revoke.
317 * Note that this call may block --- it is up to the caller to make
318 * sure that there are no further calls to journal_write_metadata
319 * before the revoke is complete. In ext3, this implies calling the
320 * revoke before clearing the block bitmap when we are deleting
321 * metadata.
323 * Revoke performs a jbd2_journal_forget on any buffer_head passed in as a
324 * parameter, but does _not_ forget the buffer_head if the bh was only
325 * found implicitly.
327 * bh_in may not be a journalled buffer - it may have come off
328 * the hash tables without an attached journal_head.
330 * If bh_in is non-zero, jbd2_journal_revoke() will decrement its b_count
331 * by one.
334 int jbd2_journal_revoke(handle_t *handle, unsigned long long blocknr,
335 struct buffer_head *bh_in)
337 struct buffer_head *bh = NULL;
338 journal_t *journal;
339 struct block_device *bdev;
340 int err;
342 might_sleep();
343 if (bh_in)
344 BUFFER_TRACE(bh_in, "enter");
346 journal = handle->h_transaction->t_journal;
347 if (!jbd2_journal_set_features(journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)){
348 J_ASSERT (!"Cannot set revoke feature!");
349 return -EINVAL;
352 bdev = journal->j_fs_dev;
353 bh = bh_in;
355 if (!bh) {
356 bh = __find_get_block(bdev, blocknr, journal->j_blocksize);
357 if (bh)
358 BUFFER_TRACE(bh, "found on hash");
360 #ifdef JBD2_EXPENSIVE_CHECKING
361 else {
362 struct buffer_head *bh2;
364 /* If there is a different buffer_head lying around in
365 * memory anywhere... */
366 bh2 = __find_get_block(bdev, blocknr, journal->j_blocksize);
367 if (bh2) {
368 /* ... and it has RevokeValid status... */
369 if (bh2 != bh && buffer_revokevalid(bh2))
370 /* ...then it better be revoked too,
371 * since it's illegal to create a revoke
372 * record against a buffer_head which is
373 * not marked revoked --- that would
374 * risk missing a subsequent revoke
375 * cancel. */
376 J_ASSERT_BH(bh2, buffer_revoked(bh2));
377 put_bh(bh2);
380 #endif
382 /* We really ought not ever to revoke twice in a row without
383 first having the revoke cancelled: it's illegal to free a
384 block twice without allocating it in between! */
385 if (bh) {
386 if (!J_EXPECT_BH(bh, !buffer_revoked(bh),
387 "inconsistent data on disk")) {
388 if (!bh_in)
389 brelse(bh);
390 return -EIO;
392 set_buffer_revoked(bh);
393 set_buffer_revokevalid(bh);
394 if (bh_in) {
395 BUFFER_TRACE(bh_in, "call jbd2_journal_forget");
396 jbd2_journal_forget(handle, bh_in);
397 } else {
398 BUFFER_TRACE(bh, "call brelse");
399 __brelse(bh);
403 jbd_debug(2, "insert revoke for block %llu, bh_in=%p\n",blocknr, bh_in);
404 err = insert_revoke_hash(journal, blocknr,
405 handle->h_transaction->t_tid);
406 BUFFER_TRACE(bh_in, "exit");
407 return err;
411 * Cancel an outstanding revoke. For use only internally by the
412 * journaling code (called from jbd2_journal_get_write_access).
414 * We trust buffer_revoked() on the buffer if the buffer is already
415 * being journaled: if there is no revoke pending on the buffer, then we
416 * don't do anything here.
418 * This would break if it were possible for a buffer to be revoked and
419 * discarded, and then reallocated within the same transaction. In such
420 * a case we would have lost the revoked bit, but when we arrived here
421 * the second time we would still have a pending revoke to cancel. So,
422 * do not trust the Revoked bit on buffers unless RevokeValid is also
423 * set.
425 int jbd2_journal_cancel_revoke(handle_t *handle, struct journal_head *jh)
427 struct jbd2_revoke_record_s *record;
428 journal_t *journal = handle->h_transaction->t_journal;
429 int need_cancel;
430 int did_revoke = 0; /* akpm: debug */
431 struct buffer_head *bh = jh2bh(jh);
433 jbd_debug(4, "journal_head %p, cancelling revoke\n", jh);
435 /* Is the existing Revoke bit valid? If so, we trust it, and
436 * only perform the full cancel if the revoke bit is set. If
437 * not, we can't trust the revoke bit, and we need to do the
438 * full search for a revoke record. */
439 if (test_set_buffer_revokevalid(bh)) {
440 need_cancel = test_clear_buffer_revoked(bh);
441 } else {
442 need_cancel = 1;
443 clear_buffer_revoked(bh);
446 if (need_cancel) {
447 record = find_revoke_record(journal, bh->b_blocknr);
448 if (record) {
449 jbd_debug(4, "cancelled existing revoke on "
450 "blocknr %llu\n", (unsigned long long)bh->b_blocknr);
451 spin_lock(&journal->j_revoke_lock);
452 list_del(&record->hash);
453 spin_unlock(&journal->j_revoke_lock);
454 kmem_cache_free(jbd2_revoke_record_cache, record);
455 did_revoke = 1;
459 #ifdef JBD2_EXPENSIVE_CHECKING
460 /* There better not be one left behind by now! */
461 record = find_revoke_record(journal, bh->b_blocknr);
462 J_ASSERT_JH(jh, record == NULL);
463 #endif
465 /* Finally, have we just cleared revoke on an unhashed
466 * buffer_head? If so, we'd better make sure we clear the
467 * revoked status on any hashed alias too, otherwise the revoke
468 * state machine will get very upset later on. */
469 if (need_cancel) {
470 struct buffer_head *bh2;
471 bh2 = __find_get_block(bh->b_bdev, bh->b_blocknr, bh->b_size);
472 if (bh2) {
473 if (bh2 != bh)
474 clear_buffer_revoked(bh2);
475 __brelse(bh2);
478 return did_revoke;
482 * journal_clear_revoked_flag clears revoked flag of buffers in
483 * revoke table to reflect there is no revoked buffers in the next
484 * transaction which is going to be started.
486 void jbd2_clear_buffer_revoked_flags(journal_t *journal)
488 struct jbd2_revoke_table_s *revoke = journal->j_revoke;
489 int i = 0;
491 for (i = 0; i < revoke->hash_size; i++) {
492 struct list_head *hash_list;
493 struct list_head *list_entry;
494 hash_list = &revoke->hash_table[i];
496 list_for_each(list_entry, hash_list) {
497 struct jbd2_revoke_record_s *record;
498 struct buffer_head *bh;
499 record = (struct jbd2_revoke_record_s *)list_entry;
500 bh = __find_get_block(journal->j_fs_dev,
501 record->blocknr,
502 journal->j_blocksize);
503 if (bh) {
504 clear_buffer_revoked(bh);
505 __brelse(bh);
511 /* journal_switch_revoke table select j_revoke for next transaction
512 * we do not want to suspend any processing until all revokes are
513 * written -bzzz
515 void jbd2_journal_switch_revoke_table(journal_t *journal)
517 int i;
519 if (journal->j_revoke == journal->j_revoke_table[0])
520 journal->j_revoke = journal->j_revoke_table[1];
521 else
522 journal->j_revoke = journal->j_revoke_table[0];
524 for (i = 0; i < journal->j_revoke->hash_size; i++)
525 INIT_LIST_HEAD(&journal->j_revoke->hash_table[i]);
529 * Write revoke records to the journal for all entries in the current
530 * revoke hash, deleting the entries as we go.
532 void jbd2_journal_write_revoke_records(journal_t *journal,
533 transaction_t *transaction,
534 int write_op)
536 struct journal_head *descriptor;
537 struct jbd2_revoke_record_s *record;
538 struct jbd2_revoke_table_s *revoke;
539 struct list_head *hash_list;
540 int i, offset, count;
542 descriptor = NULL;
543 offset = 0;
544 count = 0;
546 /* select revoke table for committing transaction */
547 revoke = journal->j_revoke == journal->j_revoke_table[0] ?
548 journal->j_revoke_table[1] : journal->j_revoke_table[0];
550 for (i = 0; i < revoke->hash_size; i++) {
551 hash_list = &revoke->hash_table[i];
553 while (!list_empty(hash_list)) {
554 record = (struct jbd2_revoke_record_s *)
555 hash_list->next;
556 write_one_revoke_record(journal, transaction,
557 &descriptor, &offset,
558 record, write_op);
559 count++;
560 list_del(&record->hash);
561 kmem_cache_free(jbd2_revoke_record_cache, record);
564 if (descriptor)
565 flush_descriptor(journal, descriptor, offset, write_op);
566 jbd_debug(1, "Wrote %d revoke records\n", count);
570 * Write out one revoke record. We need to create a new descriptor
571 * block if the old one is full or if we have not already created one.
574 static void write_one_revoke_record(journal_t *journal,
575 transaction_t *transaction,
576 struct journal_head **descriptorp,
577 int *offsetp,
578 struct jbd2_revoke_record_s *record,
579 int write_op)
581 int csum_size = 0;
582 struct journal_head *descriptor;
583 int offset;
584 journal_header_t *header;
586 /* If we are already aborting, this all becomes a noop. We
587 still need to go round the loop in
588 jbd2_journal_write_revoke_records in order to free all of the
589 revoke records: only the IO to the journal is omitted. */
590 if (is_journal_aborted(journal))
591 return;
593 descriptor = *descriptorp;
594 offset = *offsetp;
596 /* Do we need to leave space at the end for a checksum? */
597 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
598 csum_size = sizeof(struct jbd2_journal_revoke_tail);
600 /* Make sure we have a descriptor with space left for the record */
601 if (descriptor) {
602 if (offset >= journal->j_blocksize - csum_size) {
603 flush_descriptor(journal, descriptor, offset, write_op);
604 descriptor = NULL;
608 if (!descriptor) {
609 descriptor = jbd2_journal_get_descriptor_buffer(journal);
610 if (!descriptor)
611 return;
612 header = (journal_header_t *) &jh2bh(descriptor)->b_data[0];
613 header->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
614 header->h_blocktype = cpu_to_be32(JBD2_REVOKE_BLOCK);
615 header->h_sequence = cpu_to_be32(transaction->t_tid);
617 /* Record it so that we can wait for IO completion later */
618 JBUFFER_TRACE(descriptor, "file as BJ_LogCtl");
619 jbd2_journal_file_buffer(descriptor, transaction, BJ_LogCtl);
621 offset = sizeof(jbd2_journal_revoke_header_t);
622 *descriptorp = descriptor;
625 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
626 * ((__be64 *)(&jh2bh(descriptor)->b_data[offset])) =
627 cpu_to_be64(record->blocknr);
628 offset += 8;
630 } else {
631 * ((__be32 *)(&jh2bh(descriptor)->b_data[offset])) =
632 cpu_to_be32(record->blocknr);
633 offset += 4;
636 *offsetp = offset;
639 static void jbd2_revoke_csum_set(journal_t *j,
640 struct journal_head *descriptor)
642 struct jbd2_journal_revoke_tail *tail;
643 __u32 csum;
645 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
646 return;
648 tail = (struct jbd2_journal_revoke_tail *)
649 (jh2bh(descriptor)->b_data + j->j_blocksize -
650 sizeof(struct jbd2_journal_revoke_tail));
651 tail->r_checksum = 0;
652 csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
653 j->j_blocksize);
654 tail->r_checksum = cpu_to_be32(csum);
658 * Flush a revoke descriptor out to the journal. If we are aborting,
659 * this is a noop; otherwise we are generating a buffer which needs to
660 * be waited for during commit, so it has to go onto the appropriate
661 * journal buffer list.
664 static void flush_descriptor(journal_t *journal,
665 struct journal_head *descriptor,
666 int offset, int write_op)
668 jbd2_journal_revoke_header_t *header;
669 struct buffer_head *bh = jh2bh(descriptor);
671 if (is_journal_aborted(journal)) {
672 put_bh(bh);
673 return;
676 header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
677 header->r_count = cpu_to_be32(offset);
678 jbd2_revoke_csum_set(journal, descriptor);
680 set_buffer_jwrite(bh);
681 BUFFER_TRACE(bh, "write");
682 set_buffer_dirty(bh);
683 write_dirty_buffer(bh, write_op);
685 #endif
688 * Revoke support for recovery.
690 * Recovery needs to be able to:
692 * record all revoke records, including the tid of the latest instance
693 * of each revoke in the journal
695 * check whether a given block in a given transaction should be replayed
696 * (ie. has not been revoked by a revoke record in that or a subsequent
697 * transaction)
699 * empty the revoke table after recovery.
703 * First, setting revoke records. We create a new revoke record for
704 * every block ever revoked in the log as we scan it for recovery, and
705 * we update the existing records if we find multiple revokes for a
706 * single block.
709 int jbd2_journal_set_revoke(journal_t *journal,
710 unsigned long long blocknr,
711 tid_t sequence)
713 struct jbd2_revoke_record_s *record;
715 record = find_revoke_record(journal, blocknr);
716 if (record) {
717 /* If we have multiple occurrences, only record the
718 * latest sequence number in the hashed record */
719 if (tid_gt(sequence, record->sequence))
720 record->sequence = sequence;
721 return 0;
723 return insert_revoke_hash(journal, blocknr, sequence);
727 * Test revoke records. For a given block referenced in the log, has
728 * that block been revoked? A revoke record with a given transaction
729 * sequence number revokes all blocks in that transaction and earlier
730 * ones, but later transactions still need replayed.
733 int jbd2_journal_test_revoke(journal_t *journal,
734 unsigned long long blocknr,
735 tid_t sequence)
737 struct jbd2_revoke_record_s *record;
739 record = find_revoke_record(journal, blocknr);
740 if (!record)
741 return 0;
742 if (tid_gt(sequence, record->sequence))
743 return 0;
744 return 1;
748 * Finally, once recovery is over, we need to clear the revoke table so
749 * that it can be reused by the running filesystem.
752 void jbd2_journal_clear_revoke(journal_t *journal)
754 int i;
755 struct list_head *hash_list;
756 struct jbd2_revoke_record_s *record;
757 struct jbd2_revoke_table_s *revoke;
759 revoke = journal->j_revoke;
761 for (i = 0; i < revoke->hash_size; i++) {
762 hash_list = &revoke->hash_table[i];
763 while (!list_empty(hash_list)) {
764 record = (struct jbd2_revoke_record_s*) hash_list->next;
765 list_del(&record->hash);
766 kmem_cache_free(jbd2_revoke_record_cache, record);