Staging: hv: util.h: fix up space mess again
[linux/fpc-iii.git] / fs / ext3 / balloc.c
bloba177122a1b2584170d609b0282bf1fd094bd86b9
1 /*
2 * linux/fs/ext3/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/time.h>
15 #include <linux/capability.h>
16 #include <linux/fs.h>
17 #include <linux/slab.h>
18 #include <linux/jbd.h>
19 #include <linux/ext3_fs.h>
20 #include <linux/ext3_jbd.h>
21 #include <linux/quotaops.h>
22 #include <linux/buffer_head.h>
25 * balloc.c contains the blocks allocation and deallocation routines
29 * The free blocks are managed by bitmaps. A file system contains several
30 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
31 * block for inodes, N blocks for the inode table and data blocks.
33 * The file system contains group descriptors which are located after the
34 * super block. Each descriptor contains the number of the bitmap block and
35 * the free blocks count in the block. The descriptors are loaded in memory
36 * when a file system is mounted (see ext3_fill_super).
40 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
42 /**
43 * ext3_get_group_desc() -- load group descriptor from disk
44 * @sb: super block
45 * @block_group: given block group
46 * @bh: pointer to the buffer head to store the block
47 * group descriptor
49 struct ext3_group_desc * ext3_get_group_desc(struct super_block * sb,
50 unsigned int block_group,
51 struct buffer_head ** bh)
53 unsigned long group_desc;
54 unsigned long offset;
55 struct ext3_group_desc * desc;
56 struct ext3_sb_info *sbi = EXT3_SB(sb);
58 if (block_group >= sbi->s_groups_count) {
59 ext3_error (sb, "ext3_get_group_desc",
60 "block_group >= groups_count - "
61 "block_group = %d, groups_count = %lu",
62 block_group, sbi->s_groups_count);
64 return NULL;
66 smp_rmb();
68 group_desc = block_group >> EXT3_DESC_PER_BLOCK_BITS(sb);
69 offset = block_group & (EXT3_DESC_PER_BLOCK(sb) - 1);
70 if (!sbi->s_group_desc[group_desc]) {
71 ext3_error (sb, "ext3_get_group_desc",
72 "Group descriptor not loaded - "
73 "block_group = %d, group_desc = %lu, desc = %lu",
74 block_group, group_desc, offset);
75 return NULL;
78 desc = (struct ext3_group_desc *) sbi->s_group_desc[group_desc]->b_data;
79 if (bh)
80 *bh = sbi->s_group_desc[group_desc];
81 return desc + offset;
84 static int ext3_valid_block_bitmap(struct super_block *sb,
85 struct ext3_group_desc *desc,
86 unsigned int block_group,
87 struct buffer_head *bh)
89 ext3_grpblk_t offset;
90 ext3_grpblk_t next_zero_bit;
91 ext3_fsblk_t bitmap_blk;
92 ext3_fsblk_t group_first_block;
94 group_first_block = ext3_group_first_block_no(sb, block_group);
96 /* check whether block bitmap block number is set */
97 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
98 offset = bitmap_blk - group_first_block;
99 if (!ext3_test_bit(offset, bh->b_data))
100 /* bad block bitmap */
101 goto err_out;
103 /* check whether the inode bitmap block number is set */
104 bitmap_blk = le32_to_cpu(desc->bg_inode_bitmap);
105 offset = bitmap_blk - group_first_block;
106 if (!ext3_test_bit(offset, bh->b_data))
107 /* bad block bitmap */
108 goto err_out;
110 /* check whether the inode table block number is set */
111 bitmap_blk = le32_to_cpu(desc->bg_inode_table);
112 offset = bitmap_blk - group_first_block;
113 next_zero_bit = ext3_find_next_zero_bit(bh->b_data,
114 offset + EXT3_SB(sb)->s_itb_per_group,
115 offset);
116 if (next_zero_bit >= offset + EXT3_SB(sb)->s_itb_per_group)
117 /* good bitmap for inode tables */
118 return 1;
120 err_out:
121 ext3_error(sb, __func__,
122 "Invalid block bitmap - "
123 "block_group = %d, block = %lu",
124 block_group, bitmap_blk);
125 return 0;
129 * read_block_bitmap()
130 * @sb: super block
131 * @block_group: given block group
133 * Read the bitmap for a given block_group,and validate the
134 * bits for block/inode/inode tables are set in the bitmaps
136 * Return buffer_head on success or NULL in case of failure.
138 static struct buffer_head *
139 read_block_bitmap(struct super_block *sb, unsigned int block_group)
141 struct ext3_group_desc * desc;
142 struct buffer_head * bh = NULL;
143 ext3_fsblk_t bitmap_blk;
145 desc = ext3_get_group_desc(sb, block_group, NULL);
146 if (!desc)
147 return NULL;
148 bitmap_blk = le32_to_cpu(desc->bg_block_bitmap);
149 bh = sb_getblk(sb, bitmap_blk);
150 if (unlikely(!bh)) {
151 ext3_error(sb, __func__,
152 "Cannot read block bitmap - "
153 "block_group = %d, block_bitmap = %u",
154 block_group, le32_to_cpu(desc->bg_block_bitmap));
155 return NULL;
157 if (likely(bh_uptodate_or_lock(bh)))
158 return bh;
160 if (bh_submit_read(bh) < 0) {
161 brelse(bh);
162 ext3_error(sb, __func__,
163 "Cannot read block bitmap - "
164 "block_group = %d, block_bitmap = %u",
165 block_group, le32_to_cpu(desc->bg_block_bitmap));
166 return NULL;
168 ext3_valid_block_bitmap(sb, desc, block_group, bh);
170 * file system mounted not to panic on error, continue with corrupt
171 * bitmap
173 return bh;
176 * The reservation window structure operations
177 * --------------------------------------------
178 * Operations include:
179 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
181 * We use a red-black tree to represent per-filesystem reservation
182 * windows.
187 * __rsv_window_dump() -- Dump the filesystem block allocation reservation map
188 * @rb_root: root of per-filesystem reservation rb tree
189 * @verbose: verbose mode
190 * @fn: function which wishes to dump the reservation map
192 * If verbose is turned on, it will print the whole block reservation
193 * windows(start, end). Otherwise, it will only print out the "bad" windows,
194 * those windows that overlap with their immediate neighbors.
196 #if 1
197 static void __rsv_window_dump(struct rb_root *root, int verbose,
198 const char *fn)
200 struct rb_node *n;
201 struct ext3_reserve_window_node *rsv, *prev;
202 int bad;
204 restart:
205 n = rb_first(root);
206 bad = 0;
207 prev = NULL;
209 printk("Block Allocation Reservation Windows Map (%s):\n", fn);
210 while (n) {
211 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
212 if (verbose)
213 printk("reservation window 0x%p "
214 "start: %lu, end: %lu\n",
215 rsv, rsv->rsv_start, rsv->rsv_end);
216 if (rsv->rsv_start && rsv->rsv_start >= rsv->rsv_end) {
217 printk("Bad reservation %p (start >= end)\n",
218 rsv);
219 bad = 1;
221 if (prev && prev->rsv_end >= rsv->rsv_start) {
222 printk("Bad reservation %p (prev->end >= start)\n",
223 rsv);
224 bad = 1;
226 if (bad) {
227 if (!verbose) {
228 printk("Restarting reservation walk in verbose mode\n");
229 verbose = 1;
230 goto restart;
233 n = rb_next(n);
234 prev = rsv;
236 printk("Window map complete.\n");
237 BUG_ON(bad);
239 #define rsv_window_dump(root, verbose) \
240 __rsv_window_dump((root), (verbose), __func__)
241 #else
242 #define rsv_window_dump(root, verbose) do {} while (0)
243 #endif
246 * goal_in_my_reservation()
247 * @rsv: inode's reservation window
248 * @grp_goal: given goal block relative to the allocation block group
249 * @group: the current allocation block group
250 * @sb: filesystem super block
252 * Test if the given goal block (group relative) is within the file's
253 * own block reservation window range.
255 * If the reservation window is outside the goal allocation group, return 0;
256 * grp_goal (given goal block) could be -1, which means no specific
257 * goal block. In this case, always return 1.
258 * If the goal block is within the reservation window, return 1;
259 * otherwise, return 0;
261 static int
262 goal_in_my_reservation(struct ext3_reserve_window *rsv, ext3_grpblk_t grp_goal,
263 unsigned int group, struct super_block * sb)
265 ext3_fsblk_t group_first_block, group_last_block;
267 group_first_block = ext3_group_first_block_no(sb, group);
268 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
270 if ((rsv->_rsv_start > group_last_block) ||
271 (rsv->_rsv_end < group_first_block))
272 return 0;
273 if ((grp_goal >= 0) && ((grp_goal + group_first_block < rsv->_rsv_start)
274 || (grp_goal + group_first_block > rsv->_rsv_end)))
275 return 0;
276 return 1;
280 * search_reserve_window()
281 * @rb_root: root of reservation tree
282 * @goal: target allocation block
284 * Find the reserved window which includes the goal, or the previous one
285 * if the goal is not in any window.
286 * Returns NULL if there are no windows or if all windows start after the goal.
288 static struct ext3_reserve_window_node *
289 search_reserve_window(struct rb_root *root, ext3_fsblk_t goal)
291 struct rb_node *n = root->rb_node;
292 struct ext3_reserve_window_node *rsv;
294 if (!n)
295 return NULL;
297 do {
298 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
300 if (goal < rsv->rsv_start)
301 n = n->rb_left;
302 else if (goal > rsv->rsv_end)
303 n = n->rb_right;
304 else
305 return rsv;
306 } while (n);
308 * We've fallen off the end of the tree: the goal wasn't inside
309 * any particular node. OK, the previous node must be to one
310 * side of the interval containing the goal. If it's the RHS,
311 * we need to back up one.
313 if (rsv->rsv_start > goal) {
314 n = rb_prev(&rsv->rsv_node);
315 rsv = rb_entry(n, struct ext3_reserve_window_node, rsv_node);
317 return rsv;
321 * ext3_rsv_window_add() -- Insert a window to the block reservation rb tree.
322 * @sb: super block
323 * @rsv: reservation window to add
325 * Must be called with rsv_lock hold.
327 void ext3_rsv_window_add(struct super_block *sb,
328 struct ext3_reserve_window_node *rsv)
330 struct rb_root *root = &EXT3_SB(sb)->s_rsv_window_root;
331 struct rb_node *node = &rsv->rsv_node;
332 ext3_fsblk_t start = rsv->rsv_start;
334 struct rb_node ** p = &root->rb_node;
335 struct rb_node * parent = NULL;
336 struct ext3_reserve_window_node *this;
338 while (*p)
340 parent = *p;
341 this = rb_entry(parent, struct ext3_reserve_window_node, rsv_node);
343 if (start < this->rsv_start)
344 p = &(*p)->rb_left;
345 else if (start > this->rsv_end)
346 p = &(*p)->rb_right;
347 else {
348 rsv_window_dump(root, 1);
349 BUG();
353 rb_link_node(node, parent, p);
354 rb_insert_color(node, root);
358 * ext3_rsv_window_remove() -- unlink a window from the reservation rb tree
359 * @sb: super block
360 * @rsv: reservation window to remove
362 * Mark the block reservation window as not allocated, and unlink it
363 * from the filesystem reservation window rb tree. Must be called with
364 * rsv_lock hold.
366 static void rsv_window_remove(struct super_block *sb,
367 struct ext3_reserve_window_node *rsv)
369 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
370 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
371 rsv->rsv_alloc_hit = 0;
372 rb_erase(&rsv->rsv_node, &EXT3_SB(sb)->s_rsv_window_root);
376 * rsv_is_empty() -- Check if the reservation window is allocated.
377 * @rsv: given reservation window to check
379 * returns 1 if the end block is EXT3_RESERVE_WINDOW_NOT_ALLOCATED.
381 static inline int rsv_is_empty(struct ext3_reserve_window *rsv)
383 /* a valid reservation end block could not be 0 */
384 return rsv->_rsv_end == EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
388 * ext3_init_block_alloc_info()
389 * @inode: file inode structure
391 * Allocate and initialize the reservation window structure, and
392 * link the window to the ext3 inode structure at last
394 * The reservation window structure is only dynamically allocated
395 * and linked to ext3 inode the first time the open file
396 * needs a new block. So, before every ext3_new_block(s) call, for
397 * regular files, we should check whether the reservation window
398 * structure exists or not. In the latter case, this function is called.
399 * Fail to do so will result in block reservation being turned off for that
400 * open file.
402 * This function is called from ext3_get_blocks_handle(), also called
403 * when setting the reservation window size through ioctl before the file
404 * is open for write (needs block allocation).
406 * Needs truncate_mutex protection prior to call this function.
408 void ext3_init_block_alloc_info(struct inode *inode)
410 struct ext3_inode_info *ei = EXT3_I(inode);
411 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
412 struct super_block *sb = inode->i_sb;
414 block_i = kmalloc(sizeof(*block_i), GFP_NOFS);
415 if (block_i) {
416 struct ext3_reserve_window_node *rsv = &block_i->rsv_window_node;
418 rsv->rsv_start = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
419 rsv->rsv_end = EXT3_RESERVE_WINDOW_NOT_ALLOCATED;
422 * if filesystem is mounted with NORESERVATION, the goal
423 * reservation window size is set to zero to indicate
424 * block reservation is off
426 if (!test_opt(sb, RESERVATION))
427 rsv->rsv_goal_size = 0;
428 else
429 rsv->rsv_goal_size = EXT3_DEFAULT_RESERVE_BLOCKS;
430 rsv->rsv_alloc_hit = 0;
431 block_i->last_alloc_logical_block = 0;
432 block_i->last_alloc_physical_block = 0;
434 ei->i_block_alloc_info = block_i;
438 * ext3_discard_reservation()
439 * @inode: inode
441 * Discard(free) block reservation window on last file close, or truncate
442 * or at last iput().
444 * It is being called in three cases:
445 * ext3_release_file(): last writer close the file
446 * ext3_clear_inode(): last iput(), when nobody link to this file.
447 * ext3_truncate(): when the block indirect map is about to change.
450 void ext3_discard_reservation(struct inode *inode)
452 struct ext3_inode_info *ei = EXT3_I(inode);
453 struct ext3_block_alloc_info *block_i = ei->i_block_alloc_info;
454 struct ext3_reserve_window_node *rsv;
455 spinlock_t *rsv_lock = &EXT3_SB(inode->i_sb)->s_rsv_window_lock;
457 if (!block_i)
458 return;
460 rsv = &block_i->rsv_window_node;
461 if (!rsv_is_empty(&rsv->rsv_window)) {
462 spin_lock(rsv_lock);
463 if (!rsv_is_empty(&rsv->rsv_window))
464 rsv_window_remove(inode->i_sb, rsv);
465 spin_unlock(rsv_lock);
470 * ext3_free_blocks_sb() -- Free given blocks and update quota
471 * @handle: handle to this transaction
472 * @sb: super block
473 * @block: start physcial block to free
474 * @count: number of blocks to free
475 * @pdquot_freed_blocks: pointer to quota
477 void ext3_free_blocks_sb(handle_t *handle, struct super_block *sb,
478 ext3_fsblk_t block, unsigned long count,
479 unsigned long *pdquot_freed_blocks)
481 struct buffer_head *bitmap_bh = NULL;
482 struct buffer_head *gd_bh;
483 unsigned long block_group;
484 ext3_grpblk_t bit;
485 unsigned long i;
486 unsigned long overflow;
487 struct ext3_group_desc * desc;
488 struct ext3_super_block * es;
489 struct ext3_sb_info *sbi;
490 int err = 0, ret;
491 ext3_grpblk_t group_freed;
493 *pdquot_freed_blocks = 0;
494 sbi = EXT3_SB(sb);
495 es = sbi->s_es;
496 if (block < le32_to_cpu(es->s_first_data_block) ||
497 block + count < block ||
498 block + count > le32_to_cpu(es->s_blocks_count)) {
499 ext3_error (sb, "ext3_free_blocks",
500 "Freeing blocks not in datazone - "
501 "block = "E3FSBLK", count = %lu", block, count);
502 goto error_return;
505 ext3_debug ("freeing block(s) %lu-%lu\n", block, block + count - 1);
507 do_more:
508 overflow = 0;
509 block_group = (block - le32_to_cpu(es->s_first_data_block)) /
510 EXT3_BLOCKS_PER_GROUP(sb);
511 bit = (block - le32_to_cpu(es->s_first_data_block)) %
512 EXT3_BLOCKS_PER_GROUP(sb);
514 * Check to see if we are freeing blocks across a group
515 * boundary.
517 if (bit + count > EXT3_BLOCKS_PER_GROUP(sb)) {
518 overflow = bit + count - EXT3_BLOCKS_PER_GROUP(sb);
519 count -= overflow;
521 brelse(bitmap_bh);
522 bitmap_bh = read_block_bitmap(sb, block_group);
523 if (!bitmap_bh)
524 goto error_return;
525 desc = ext3_get_group_desc (sb, block_group, &gd_bh);
526 if (!desc)
527 goto error_return;
529 if (in_range (le32_to_cpu(desc->bg_block_bitmap), block, count) ||
530 in_range (le32_to_cpu(desc->bg_inode_bitmap), block, count) ||
531 in_range (block, le32_to_cpu(desc->bg_inode_table),
532 sbi->s_itb_per_group) ||
533 in_range (block + count - 1, le32_to_cpu(desc->bg_inode_table),
534 sbi->s_itb_per_group)) {
535 ext3_error (sb, "ext3_free_blocks",
536 "Freeing blocks in system zones - "
537 "Block = "E3FSBLK", count = %lu",
538 block, count);
539 goto error_return;
543 * We are about to start releasing blocks in the bitmap,
544 * so we need undo access.
546 /* @@@ check errors */
547 BUFFER_TRACE(bitmap_bh, "getting undo access");
548 err = ext3_journal_get_undo_access(handle, bitmap_bh);
549 if (err)
550 goto error_return;
553 * We are about to modify some metadata. Call the journal APIs
554 * to unshare ->b_data if a currently-committing transaction is
555 * using it
557 BUFFER_TRACE(gd_bh, "get_write_access");
558 err = ext3_journal_get_write_access(handle, gd_bh);
559 if (err)
560 goto error_return;
562 jbd_lock_bh_state(bitmap_bh);
564 for (i = 0, group_freed = 0; i < count; i++) {
566 * An HJ special. This is expensive...
568 #ifdef CONFIG_JBD_DEBUG
569 jbd_unlock_bh_state(bitmap_bh);
571 struct buffer_head *debug_bh;
572 debug_bh = sb_find_get_block(sb, block + i);
573 if (debug_bh) {
574 BUFFER_TRACE(debug_bh, "Deleted!");
575 if (!bh2jh(bitmap_bh)->b_committed_data)
576 BUFFER_TRACE(debug_bh,
577 "No commited data in bitmap");
578 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap");
579 __brelse(debug_bh);
582 jbd_lock_bh_state(bitmap_bh);
583 #endif
584 if (need_resched()) {
585 jbd_unlock_bh_state(bitmap_bh);
586 cond_resched();
587 jbd_lock_bh_state(bitmap_bh);
589 /* @@@ This prevents newly-allocated data from being
590 * freed and then reallocated within the same
591 * transaction.
593 * Ideally we would want to allow that to happen, but to
594 * do so requires making journal_forget() capable of
595 * revoking the queued write of a data block, which
596 * implies blocking on the journal lock. *forget()
597 * cannot block due to truncate races.
599 * Eventually we can fix this by making journal_forget()
600 * return a status indicating whether or not it was able
601 * to revoke the buffer. On successful revoke, it is
602 * safe not to set the allocation bit in the committed
603 * bitmap, because we know that there is no outstanding
604 * activity on the buffer any more and so it is safe to
605 * reallocate it.
607 BUFFER_TRACE(bitmap_bh, "set in b_committed_data");
608 J_ASSERT_BH(bitmap_bh,
609 bh2jh(bitmap_bh)->b_committed_data != NULL);
610 ext3_set_bit_atomic(sb_bgl_lock(sbi, block_group), bit + i,
611 bh2jh(bitmap_bh)->b_committed_data);
614 * We clear the bit in the bitmap after setting the committed
615 * data bit, because this is the reverse order to that which
616 * the allocator uses.
618 BUFFER_TRACE(bitmap_bh, "clear bit");
619 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi, block_group),
620 bit + i, bitmap_bh->b_data)) {
621 jbd_unlock_bh_state(bitmap_bh);
622 ext3_error(sb, __func__,
623 "bit already cleared for block "E3FSBLK,
624 block + i);
625 jbd_lock_bh_state(bitmap_bh);
626 BUFFER_TRACE(bitmap_bh, "bit already cleared");
627 } else {
628 group_freed++;
631 jbd_unlock_bh_state(bitmap_bh);
633 spin_lock(sb_bgl_lock(sbi, block_group));
634 le16_add_cpu(&desc->bg_free_blocks_count, group_freed);
635 spin_unlock(sb_bgl_lock(sbi, block_group));
636 percpu_counter_add(&sbi->s_freeblocks_counter, count);
638 /* We dirtied the bitmap block */
639 BUFFER_TRACE(bitmap_bh, "dirtied bitmap block");
640 err = ext3_journal_dirty_metadata(handle, bitmap_bh);
642 /* And the group descriptor block */
643 BUFFER_TRACE(gd_bh, "dirtied group descriptor block");
644 ret = ext3_journal_dirty_metadata(handle, gd_bh);
645 if (!err) err = ret;
646 *pdquot_freed_blocks += group_freed;
648 if (overflow && !err) {
649 block += count;
650 count = overflow;
651 goto do_more;
654 error_return:
655 brelse(bitmap_bh);
656 ext3_std_error(sb, err);
657 return;
661 * ext3_free_blocks() -- Free given blocks and update quota
662 * @handle: handle for this transaction
663 * @inode: inode
664 * @block: start physical block to free
665 * @count: number of blocks to count
667 void ext3_free_blocks(handle_t *handle, struct inode *inode,
668 ext3_fsblk_t block, unsigned long count)
670 struct super_block * sb;
671 unsigned long dquot_freed_blocks;
673 sb = inode->i_sb;
674 if (!sb) {
675 printk ("ext3_free_blocks: nonexistent device");
676 return;
678 ext3_free_blocks_sb(handle, sb, block, count, &dquot_freed_blocks);
679 if (dquot_freed_blocks)
680 dquot_free_block(inode, dquot_freed_blocks);
681 return;
685 * ext3_test_allocatable()
686 * @nr: given allocation block group
687 * @bh: bufferhead contains the bitmap of the given block group
689 * For ext3 allocations, we must not reuse any blocks which are
690 * allocated in the bitmap buffer's "last committed data" copy. This
691 * prevents deletes from freeing up the page for reuse until we have
692 * committed the delete transaction.
694 * If we didn't do this, then deleting something and reallocating it as
695 * data would allow the old block to be overwritten before the
696 * transaction committed (because we force data to disk before commit).
697 * This would lead to corruption if we crashed between overwriting the
698 * data and committing the delete.
700 * @@@ We may want to make this allocation behaviour conditional on
701 * data-writes at some point, and disable it for metadata allocations or
702 * sync-data inodes.
704 static int ext3_test_allocatable(ext3_grpblk_t nr, struct buffer_head *bh)
706 int ret;
707 struct journal_head *jh = bh2jh(bh);
709 if (ext3_test_bit(nr, bh->b_data))
710 return 0;
712 jbd_lock_bh_state(bh);
713 if (!jh->b_committed_data)
714 ret = 1;
715 else
716 ret = !ext3_test_bit(nr, jh->b_committed_data);
717 jbd_unlock_bh_state(bh);
718 return ret;
722 * bitmap_search_next_usable_block()
723 * @start: the starting block (group relative) of the search
724 * @bh: bufferhead contains the block group bitmap
725 * @maxblocks: the ending block (group relative) of the reservation
727 * The bitmap search --- search forward alternately through the actual
728 * bitmap on disk and the last-committed copy in journal, until we find a
729 * bit free in both bitmaps.
731 static ext3_grpblk_t
732 bitmap_search_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
733 ext3_grpblk_t maxblocks)
735 ext3_grpblk_t next;
736 struct journal_head *jh = bh2jh(bh);
738 while (start < maxblocks) {
739 next = ext3_find_next_zero_bit(bh->b_data, maxblocks, start);
740 if (next >= maxblocks)
741 return -1;
742 if (ext3_test_allocatable(next, bh))
743 return next;
744 jbd_lock_bh_state(bh);
745 if (jh->b_committed_data)
746 start = ext3_find_next_zero_bit(jh->b_committed_data,
747 maxblocks, next);
748 jbd_unlock_bh_state(bh);
750 return -1;
754 * find_next_usable_block()
755 * @start: the starting block (group relative) to find next
756 * allocatable block in bitmap.
757 * @bh: bufferhead contains the block group bitmap
758 * @maxblocks: the ending block (group relative) for the search
760 * Find an allocatable block in a bitmap. We honor both the bitmap and
761 * its last-committed copy (if that exists), and perform the "most
762 * appropriate allocation" algorithm of looking for a free block near
763 * the initial goal; then for a free byte somewhere in the bitmap; then
764 * for any free bit in the bitmap.
766 static ext3_grpblk_t
767 find_next_usable_block(ext3_grpblk_t start, struct buffer_head *bh,
768 ext3_grpblk_t maxblocks)
770 ext3_grpblk_t here, next;
771 char *p, *r;
773 if (start > 0) {
775 * The goal was occupied; search forward for a free
776 * block within the next XX blocks.
778 * end_goal is more or less random, but it has to be
779 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
780 * next 64-bit boundary is simple..
782 ext3_grpblk_t end_goal = (start + 63) & ~63;
783 if (end_goal > maxblocks)
784 end_goal = maxblocks;
785 here = ext3_find_next_zero_bit(bh->b_data, end_goal, start);
786 if (here < end_goal && ext3_test_allocatable(here, bh))
787 return here;
788 ext3_debug("Bit not found near goal\n");
791 here = start;
792 if (here < 0)
793 here = 0;
795 p = ((char *)bh->b_data) + (here >> 3);
796 r = memscan(p, 0, ((maxblocks + 7) >> 3) - (here >> 3));
797 next = (r - ((char *)bh->b_data)) << 3;
799 if (next < maxblocks && next >= start && ext3_test_allocatable(next, bh))
800 return next;
803 * The bitmap search --- search forward alternately through the actual
804 * bitmap and the last-committed copy until we find a bit free in
805 * both
807 here = bitmap_search_next_usable_block(here, bh, maxblocks);
808 return here;
812 * claim_block()
813 * @block: the free block (group relative) to allocate
814 * @bh: the bufferhead containts the block group bitmap
816 * We think we can allocate this block in this bitmap. Try to set the bit.
817 * If that succeeds then check that nobody has allocated and then freed the
818 * block since we saw that is was not marked in b_committed_data. If it _was_
819 * allocated and freed then clear the bit in the bitmap again and return
820 * zero (failure).
822 static inline int
823 claim_block(spinlock_t *lock, ext3_grpblk_t block, struct buffer_head *bh)
825 struct journal_head *jh = bh2jh(bh);
826 int ret;
828 if (ext3_set_bit_atomic(lock, block, bh->b_data))
829 return 0;
830 jbd_lock_bh_state(bh);
831 if (jh->b_committed_data && ext3_test_bit(block,jh->b_committed_data)) {
832 ext3_clear_bit_atomic(lock, block, bh->b_data);
833 ret = 0;
834 } else {
835 ret = 1;
837 jbd_unlock_bh_state(bh);
838 return ret;
842 * ext3_try_to_allocate()
843 * @sb: superblock
844 * @handle: handle to this transaction
845 * @group: given allocation block group
846 * @bitmap_bh: bufferhead holds the block bitmap
847 * @grp_goal: given target block within the group
848 * @count: target number of blocks to allocate
849 * @my_rsv: reservation window
851 * Attempt to allocate blocks within a give range. Set the range of allocation
852 * first, then find the first free bit(s) from the bitmap (within the range),
853 * and at last, allocate the blocks by claiming the found free bit as allocated.
855 * To set the range of this allocation:
856 * if there is a reservation window, only try to allocate block(s) from the
857 * file's own reservation window;
858 * Otherwise, the allocation range starts from the give goal block, ends at
859 * the block group's last block.
861 * If we failed to allocate the desired block then we may end up crossing to a
862 * new bitmap. In that case we must release write access to the old one via
863 * ext3_journal_release_buffer(), else we'll run out of credits.
865 static ext3_grpblk_t
866 ext3_try_to_allocate(struct super_block *sb, handle_t *handle, int group,
867 struct buffer_head *bitmap_bh, ext3_grpblk_t grp_goal,
868 unsigned long *count, struct ext3_reserve_window *my_rsv)
870 ext3_fsblk_t group_first_block;
871 ext3_grpblk_t start, end;
872 unsigned long num = 0;
874 /* we do allocation within the reservation window if we have a window */
875 if (my_rsv) {
876 group_first_block = ext3_group_first_block_no(sb, group);
877 if (my_rsv->_rsv_start >= group_first_block)
878 start = my_rsv->_rsv_start - group_first_block;
879 else
880 /* reservation window cross group boundary */
881 start = 0;
882 end = my_rsv->_rsv_end - group_first_block + 1;
883 if (end > EXT3_BLOCKS_PER_GROUP(sb))
884 /* reservation window crosses group boundary */
885 end = EXT3_BLOCKS_PER_GROUP(sb);
886 if ((start <= grp_goal) && (grp_goal < end))
887 start = grp_goal;
888 else
889 grp_goal = -1;
890 } else {
891 if (grp_goal > 0)
892 start = grp_goal;
893 else
894 start = 0;
895 end = EXT3_BLOCKS_PER_GROUP(sb);
898 BUG_ON(start > EXT3_BLOCKS_PER_GROUP(sb));
900 repeat:
901 if (grp_goal < 0 || !ext3_test_allocatable(grp_goal, bitmap_bh)) {
902 grp_goal = find_next_usable_block(start, bitmap_bh, end);
903 if (grp_goal < 0)
904 goto fail_access;
905 if (!my_rsv) {
906 int i;
908 for (i = 0; i < 7 && grp_goal > start &&
909 ext3_test_allocatable(grp_goal - 1,
910 bitmap_bh);
911 i++, grp_goal--)
915 start = grp_goal;
917 if (!claim_block(sb_bgl_lock(EXT3_SB(sb), group),
918 grp_goal, bitmap_bh)) {
920 * The block was allocated by another thread, or it was
921 * allocated and then freed by another thread
923 start++;
924 grp_goal++;
925 if (start >= end)
926 goto fail_access;
927 goto repeat;
929 num++;
930 grp_goal++;
931 while (num < *count && grp_goal < end
932 && ext3_test_allocatable(grp_goal, bitmap_bh)
933 && claim_block(sb_bgl_lock(EXT3_SB(sb), group),
934 grp_goal, bitmap_bh)) {
935 num++;
936 grp_goal++;
938 *count = num;
939 return grp_goal - num;
940 fail_access:
941 *count = num;
942 return -1;
946 * find_next_reservable_window():
947 * find a reservable space within the given range.
948 * It does not allocate the reservation window for now:
949 * alloc_new_reservation() will do the work later.
951 * @search_head: the head of the searching list;
952 * This is not necessarily the list head of the whole filesystem
954 * We have both head and start_block to assist the search
955 * for the reservable space. The list starts from head,
956 * but we will shift to the place where start_block is,
957 * then start from there, when looking for a reservable space.
959 * @size: the target new reservation window size
961 * @group_first_block: the first block we consider to start
962 * the real search from
964 * @last_block:
965 * the maximum block number that our goal reservable space
966 * could start from. This is normally the last block in this
967 * group. The search will end when we found the start of next
968 * possible reservable space is out of this boundary.
969 * This could handle the cross boundary reservation window
970 * request.
972 * basically we search from the given range, rather than the whole
973 * reservation double linked list, (start_block, last_block)
974 * to find a free region that is of my size and has not
975 * been reserved.
978 static int find_next_reservable_window(
979 struct ext3_reserve_window_node *search_head,
980 struct ext3_reserve_window_node *my_rsv,
981 struct super_block * sb,
982 ext3_fsblk_t start_block,
983 ext3_fsblk_t last_block)
985 struct rb_node *next;
986 struct ext3_reserve_window_node *rsv, *prev;
987 ext3_fsblk_t cur;
988 int size = my_rsv->rsv_goal_size;
990 /* TODO: make the start of the reservation window byte-aligned */
991 /* cur = *start_block & ~7;*/
992 cur = start_block;
993 rsv = search_head;
994 if (!rsv)
995 return -1;
997 while (1) {
998 if (cur <= rsv->rsv_end)
999 cur = rsv->rsv_end + 1;
1001 /* TODO?
1002 * in the case we could not find a reservable space
1003 * that is what is expected, during the re-search, we could
1004 * remember what's the largest reservable space we could have
1005 * and return that one.
1007 * For now it will fail if we could not find the reservable
1008 * space with expected-size (or more)...
1010 if (cur > last_block)
1011 return -1; /* fail */
1013 prev = rsv;
1014 next = rb_next(&rsv->rsv_node);
1015 rsv = rb_entry(next,struct ext3_reserve_window_node,rsv_node);
1018 * Reached the last reservation, we can just append to the
1019 * previous one.
1021 if (!next)
1022 break;
1024 if (cur + size <= rsv->rsv_start) {
1026 * Found a reserveable space big enough. We could
1027 * have a reservation across the group boundary here
1029 break;
1033 * we come here either :
1034 * when we reach the end of the whole list,
1035 * and there is empty reservable space after last entry in the list.
1036 * append it to the end of the list.
1038 * or we found one reservable space in the middle of the list,
1039 * return the reservation window that we could append to.
1040 * succeed.
1043 if ((prev != my_rsv) && (!rsv_is_empty(&my_rsv->rsv_window)))
1044 rsv_window_remove(sb, my_rsv);
1047 * Let's book the whole avaliable window for now. We will check the
1048 * disk bitmap later and then, if there are free blocks then we adjust
1049 * the window size if it's larger than requested.
1050 * Otherwise, we will remove this node from the tree next time
1051 * call find_next_reservable_window.
1053 my_rsv->rsv_start = cur;
1054 my_rsv->rsv_end = cur + size - 1;
1055 my_rsv->rsv_alloc_hit = 0;
1057 if (prev != my_rsv)
1058 ext3_rsv_window_add(sb, my_rsv);
1060 return 0;
1064 * alloc_new_reservation()--allocate a new reservation window
1066 * To make a new reservation, we search part of the filesystem
1067 * reservation list (the list that inside the group). We try to
1068 * allocate a new reservation window near the allocation goal,
1069 * or the beginning of the group, if there is no goal.
1071 * We first find a reservable space after the goal, then from
1072 * there, we check the bitmap for the first free block after
1073 * it. If there is no free block until the end of group, then the
1074 * whole group is full, we failed. Otherwise, check if the free
1075 * block is inside the expected reservable space, if so, we
1076 * succeed.
1077 * If the first free block is outside the reservable space, then
1078 * start from the first free block, we search for next available
1079 * space, and go on.
1081 * on succeed, a new reservation will be found and inserted into the list
1082 * It contains at least one free block, and it does not overlap with other
1083 * reservation windows.
1085 * failed: we failed to find a reservation window in this group
1087 * @rsv: the reservation
1089 * @grp_goal: The goal (group-relative). It is where the search for a
1090 * free reservable space should start from.
1091 * if we have a grp_goal(grp_goal >0 ), then start from there,
1092 * no grp_goal(grp_goal = -1), we start from the first block
1093 * of the group.
1095 * @sb: the super block
1096 * @group: the group we are trying to allocate in
1097 * @bitmap_bh: the block group block bitmap
1100 static int alloc_new_reservation(struct ext3_reserve_window_node *my_rsv,
1101 ext3_grpblk_t grp_goal, struct super_block *sb,
1102 unsigned int group, struct buffer_head *bitmap_bh)
1104 struct ext3_reserve_window_node *search_head;
1105 ext3_fsblk_t group_first_block, group_end_block, start_block;
1106 ext3_grpblk_t first_free_block;
1107 struct rb_root *fs_rsv_root = &EXT3_SB(sb)->s_rsv_window_root;
1108 unsigned long size;
1109 int ret;
1110 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1112 group_first_block = ext3_group_first_block_no(sb, group);
1113 group_end_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1115 if (grp_goal < 0)
1116 start_block = group_first_block;
1117 else
1118 start_block = grp_goal + group_first_block;
1120 size = my_rsv->rsv_goal_size;
1122 if (!rsv_is_empty(&my_rsv->rsv_window)) {
1124 * if the old reservation is cross group boundary
1125 * and if the goal is inside the old reservation window,
1126 * we will come here when we just failed to allocate from
1127 * the first part of the window. We still have another part
1128 * that belongs to the next group. In this case, there is no
1129 * point to discard our window and try to allocate a new one
1130 * in this group(which will fail). we should
1131 * keep the reservation window, just simply move on.
1133 * Maybe we could shift the start block of the reservation
1134 * window to the first block of next group.
1137 if ((my_rsv->rsv_start <= group_end_block) &&
1138 (my_rsv->rsv_end > group_end_block) &&
1139 (start_block >= my_rsv->rsv_start))
1140 return -1;
1142 if ((my_rsv->rsv_alloc_hit >
1143 (my_rsv->rsv_end - my_rsv->rsv_start + 1) / 2)) {
1145 * if the previously allocation hit ratio is
1146 * greater than 1/2, then we double the size of
1147 * the reservation window the next time,
1148 * otherwise we keep the same size window
1150 size = size * 2;
1151 if (size > EXT3_MAX_RESERVE_BLOCKS)
1152 size = EXT3_MAX_RESERVE_BLOCKS;
1153 my_rsv->rsv_goal_size= size;
1157 spin_lock(rsv_lock);
1159 * shift the search start to the window near the goal block
1161 search_head = search_reserve_window(fs_rsv_root, start_block);
1164 * find_next_reservable_window() simply finds a reservable window
1165 * inside the given range(start_block, group_end_block).
1167 * To make sure the reservation window has a free bit inside it, we
1168 * need to check the bitmap after we found a reservable window.
1170 retry:
1171 ret = find_next_reservable_window(search_head, my_rsv, sb,
1172 start_block, group_end_block);
1174 if (ret == -1) {
1175 if (!rsv_is_empty(&my_rsv->rsv_window))
1176 rsv_window_remove(sb, my_rsv);
1177 spin_unlock(rsv_lock);
1178 return -1;
1182 * On success, find_next_reservable_window() returns the
1183 * reservation window where there is a reservable space after it.
1184 * Before we reserve this reservable space, we need
1185 * to make sure there is at least a free block inside this region.
1187 * searching the first free bit on the block bitmap and copy of
1188 * last committed bitmap alternatively, until we found a allocatable
1189 * block. Search start from the start block of the reservable space
1190 * we just found.
1192 spin_unlock(rsv_lock);
1193 first_free_block = bitmap_search_next_usable_block(
1194 my_rsv->rsv_start - group_first_block,
1195 bitmap_bh, group_end_block - group_first_block + 1);
1197 if (first_free_block < 0) {
1199 * no free block left on the bitmap, no point
1200 * to reserve the space. return failed.
1202 spin_lock(rsv_lock);
1203 if (!rsv_is_empty(&my_rsv->rsv_window))
1204 rsv_window_remove(sb, my_rsv);
1205 spin_unlock(rsv_lock);
1206 return -1; /* failed */
1209 start_block = first_free_block + group_first_block;
1211 * check if the first free block is within the
1212 * free space we just reserved
1214 if (start_block >= my_rsv->rsv_start && start_block <= my_rsv->rsv_end)
1215 return 0; /* success */
1217 * if the first free bit we found is out of the reservable space
1218 * continue search for next reservable space,
1219 * start from where the free block is,
1220 * we also shift the list head to where we stopped last time
1222 search_head = my_rsv;
1223 spin_lock(rsv_lock);
1224 goto retry;
1228 * try_to_extend_reservation()
1229 * @my_rsv: given reservation window
1230 * @sb: super block
1231 * @size: the delta to extend
1233 * Attempt to expand the reservation window large enough to have
1234 * required number of free blocks
1236 * Since ext3_try_to_allocate() will always allocate blocks within
1237 * the reservation window range, if the window size is too small,
1238 * multiple blocks allocation has to stop at the end of the reservation
1239 * window. To make this more efficient, given the total number of
1240 * blocks needed and the current size of the window, we try to
1241 * expand the reservation window size if necessary on a best-effort
1242 * basis before ext3_new_blocks() tries to allocate blocks,
1244 static void try_to_extend_reservation(struct ext3_reserve_window_node *my_rsv,
1245 struct super_block *sb, int size)
1247 struct ext3_reserve_window_node *next_rsv;
1248 struct rb_node *next;
1249 spinlock_t *rsv_lock = &EXT3_SB(sb)->s_rsv_window_lock;
1251 if (!spin_trylock(rsv_lock))
1252 return;
1254 next = rb_next(&my_rsv->rsv_node);
1256 if (!next)
1257 my_rsv->rsv_end += size;
1258 else {
1259 next_rsv = rb_entry(next, struct ext3_reserve_window_node, rsv_node);
1261 if ((next_rsv->rsv_start - my_rsv->rsv_end - 1) >= size)
1262 my_rsv->rsv_end += size;
1263 else
1264 my_rsv->rsv_end = next_rsv->rsv_start - 1;
1266 spin_unlock(rsv_lock);
1270 * ext3_try_to_allocate_with_rsv()
1271 * @sb: superblock
1272 * @handle: handle to this transaction
1273 * @group: given allocation block group
1274 * @bitmap_bh: bufferhead holds the block bitmap
1275 * @grp_goal: given target block within the group
1276 * @count: target number of blocks to allocate
1277 * @my_rsv: reservation window
1278 * @errp: pointer to store the error code
1280 * This is the main function used to allocate a new block and its reservation
1281 * window.
1283 * Each time when a new block allocation is need, first try to allocate from
1284 * its own reservation. If it does not have a reservation window, instead of
1285 * looking for a free bit on bitmap first, then look up the reservation list to
1286 * see if it is inside somebody else's reservation window, we try to allocate a
1287 * reservation window for it starting from the goal first. Then do the block
1288 * allocation within the reservation window.
1290 * This will avoid keeping on searching the reservation list again and
1291 * again when somebody is looking for a free block (without
1292 * reservation), and there are lots of free blocks, but they are all
1293 * being reserved.
1295 * We use a red-black tree for the per-filesystem reservation list.
1298 static ext3_grpblk_t
1299 ext3_try_to_allocate_with_rsv(struct super_block *sb, handle_t *handle,
1300 unsigned int group, struct buffer_head *bitmap_bh,
1301 ext3_grpblk_t grp_goal,
1302 struct ext3_reserve_window_node * my_rsv,
1303 unsigned long *count, int *errp)
1305 ext3_fsblk_t group_first_block, group_last_block;
1306 ext3_grpblk_t ret = 0;
1307 int fatal;
1308 unsigned long num = *count;
1310 *errp = 0;
1313 * Make sure we use undo access for the bitmap, because it is critical
1314 * that we do the frozen_data COW on bitmap buffers in all cases even
1315 * if the buffer is in BJ_Forget state in the committing transaction.
1317 BUFFER_TRACE(bitmap_bh, "get undo access for new block");
1318 fatal = ext3_journal_get_undo_access(handle, bitmap_bh);
1319 if (fatal) {
1320 *errp = fatal;
1321 return -1;
1325 * we don't deal with reservation when
1326 * filesystem is mounted without reservation
1327 * or the file is not a regular file
1328 * or last attempt to allocate a block with reservation turned on failed
1330 if (my_rsv == NULL ) {
1331 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1332 grp_goal, count, NULL);
1333 goto out;
1336 * grp_goal is a group relative block number (if there is a goal)
1337 * 0 <= grp_goal < EXT3_BLOCKS_PER_GROUP(sb)
1338 * first block is a filesystem wide block number
1339 * first block is the block number of the first block in this group
1341 group_first_block = ext3_group_first_block_no(sb, group);
1342 group_last_block = group_first_block + (EXT3_BLOCKS_PER_GROUP(sb) - 1);
1345 * Basically we will allocate a new block from inode's reservation
1346 * window.
1348 * We need to allocate a new reservation window, if:
1349 * a) inode does not have a reservation window; or
1350 * b) last attempt to allocate a block from existing reservation
1351 * failed; or
1352 * c) we come here with a goal and with a reservation window
1354 * We do not need to allocate a new reservation window if we come here
1355 * at the beginning with a goal and the goal is inside the window, or
1356 * we don't have a goal but already have a reservation window.
1357 * then we could go to allocate from the reservation window directly.
1359 while (1) {
1360 if (rsv_is_empty(&my_rsv->rsv_window) || (ret < 0) ||
1361 !goal_in_my_reservation(&my_rsv->rsv_window,
1362 grp_goal, group, sb)) {
1363 if (my_rsv->rsv_goal_size < *count)
1364 my_rsv->rsv_goal_size = *count;
1365 ret = alloc_new_reservation(my_rsv, grp_goal, sb,
1366 group, bitmap_bh);
1367 if (ret < 0)
1368 break; /* failed */
1370 if (!goal_in_my_reservation(&my_rsv->rsv_window,
1371 grp_goal, group, sb))
1372 grp_goal = -1;
1373 } else if (grp_goal >= 0) {
1374 int curr = my_rsv->rsv_end -
1375 (grp_goal + group_first_block) + 1;
1377 if (curr < *count)
1378 try_to_extend_reservation(my_rsv, sb,
1379 *count - curr);
1382 if ((my_rsv->rsv_start > group_last_block) ||
1383 (my_rsv->rsv_end < group_first_block)) {
1384 rsv_window_dump(&EXT3_SB(sb)->s_rsv_window_root, 1);
1385 BUG();
1387 ret = ext3_try_to_allocate(sb, handle, group, bitmap_bh,
1388 grp_goal, &num, &my_rsv->rsv_window);
1389 if (ret >= 0) {
1390 my_rsv->rsv_alloc_hit += num;
1391 *count = num;
1392 break; /* succeed */
1394 num = *count;
1396 out:
1397 if (ret >= 0) {
1398 BUFFER_TRACE(bitmap_bh, "journal_dirty_metadata for "
1399 "bitmap block");
1400 fatal = ext3_journal_dirty_metadata(handle, bitmap_bh);
1401 if (fatal) {
1402 *errp = fatal;
1403 return -1;
1405 return ret;
1408 BUFFER_TRACE(bitmap_bh, "journal_release_buffer");
1409 ext3_journal_release_buffer(handle, bitmap_bh);
1410 return ret;
1414 * ext3_has_free_blocks()
1415 * @sbi: in-core super block structure.
1417 * Check if filesystem has at least 1 free block available for allocation.
1419 static int ext3_has_free_blocks(struct ext3_sb_info *sbi)
1421 ext3_fsblk_t free_blocks, root_blocks;
1423 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
1424 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
1425 if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
1426 sbi->s_resuid != current_fsuid() &&
1427 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
1428 return 0;
1430 return 1;
1434 * ext3_should_retry_alloc()
1435 * @sb: super block
1436 * @retries number of attemps has been made
1438 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1439 * it is profitable to retry the operation, this function will wait
1440 * for the current or commiting transaction to complete, and then
1441 * return TRUE.
1443 * if the total number of retries exceed three times, return FALSE.
1445 int ext3_should_retry_alloc(struct super_block *sb, int *retries)
1447 if (!ext3_has_free_blocks(EXT3_SB(sb)) || (*retries)++ > 3)
1448 return 0;
1450 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb->s_id);
1452 return journal_force_commit_nested(EXT3_SB(sb)->s_journal);
1456 * ext3_new_blocks() -- core block(s) allocation function
1457 * @handle: handle to this transaction
1458 * @inode: file inode
1459 * @goal: given target block(filesystem wide)
1460 * @count: target number of blocks to allocate
1461 * @errp: error code
1463 * ext3_new_blocks uses a goal block to assist allocation. It tries to
1464 * allocate block(s) from the block group contains the goal block first. If that
1465 * fails, it will try to allocate block(s) from other block groups without
1466 * any specific goal block.
1469 ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
1470 ext3_fsblk_t goal, unsigned long *count, int *errp)
1472 struct buffer_head *bitmap_bh = NULL;
1473 struct buffer_head *gdp_bh;
1474 int group_no;
1475 int goal_group;
1476 ext3_grpblk_t grp_target_blk; /* blockgroup relative goal block */
1477 ext3_grpblk_t grp_alloc_blk; /* blockgroup-relative allocated block*/
1478 ext3_fsblk_t ret_block; /* filesyetem-wide allocated block */
1479 int bgi; /* blockgroup iteration index */
1480 int fatal = 0, err;
1481 int performed_allocation = 0;
1482 ext3_grpblk_t free_blocks; /* number of free blocks in a group */
1483 struct super_block *sb;
1484 struct ext3_group_desc *gdp;
1485 struct ext3_super_block *es;
1486 struct ext3_sb_info *sbi;
1487 struct ext3_reserve_window_node *my_rsv = NULL;
1488 struct ext3_block_alloc_info *block_i;
1489 unsigned short windowsz = 0;
1490 #ifdef EXT3FS_DEBUG
1491 static int goal_hits, goal_attempts;
1492 #endif
1493 unsigned long ngroups;
1494 unsigned long num = *count;
1496 *errp = -ENOSPC;
1497 sb = inode->i_sb;
1498 if (!sb) {
1499 printk("ext3_new_block: nonexistent device");
1500 return 0;
1504 * Check quota for allocation of this block.
1506 err = dquot_alloc_block(inode, num);
1507 if (err) {
1508 *errp = err;
1509 return 0;
1512 sbi = EXT3_SB(sb);
1513 es = EXT3_SB(sb)->s_es;
1514 ext3_debug("goal=%lu.\n", goal);
1516 * Allocate a block from reservation only when
1517 * filesystem is mounted with reservation(default,-o reservation), and
1518 * it's a regular file, and
1519 * the desired window size is greater than 0 (One could use ioctl
1520 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
1521 * reservation on that particular file)
1523 block_i = EXT3_I(inode)->i_block_alloc_info;
1524 if (block_i && ((windowsz = block_i->rsv_window_node.rsv_goal_size) > 0))
1525 my_rsv = &block_i->rsv_window_node;
1527 if (!ext3_has_free_blocks(sbi)) {
1528 *errp = -ENOSPC;
1529 goto out;
1533 * First, test whether the goal block is free.
1535 if (goal < le32_to_cpu(es->s_first_data_block) ||
1536 goal >= le32_to_cpu(es->s_blocks_count))
1537 goal = le32_to_cpu(es->s_first_data_block);
1538 group_no = (goal - le32_to_cpu(es->s_first_data_block)) /
1539 EXT3_BLOCKS_PER_GROUP(sb);
1540 goal_group = group_no;
1541 retry_alloc:
1542 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1543 if (!gdp)
1544 goto io_error;
1546 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1548 * if there is not enough free blocks to make a new resevation
1549 * turn off reservation for this allocation
1551 if (my_rsv && (free_blocks < windowsz)
1552 && (free_blocks > 0)
1553 && (rsv_is_empty(&my_rsv->rsv_window)))
1554 my_rsv = NULL;
1556 if (free_blocks > 0) {
1557 grp_target_blk = ((goal - le32_to_cpu(es->s_first_data_block)) %
1558 EXT3_BLOCKS_PER_GROUP(sb));
1559 bitmap_bh = read_block_bitmap(sb, group_no);
1560 if (!bitmap_bh)
1561 goto io_error;
1562 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1563 group_no, bitmap_bh, grp_target_blk,
1564 my_rsv, &num, &fatal);
1565 if (fatal)
1566 goto out;
1567 if (grp_alloc_blk >= 0)
1568 goto allocated;
1571 ngroups = EXT3_SB(sb)->s_groups_count;
1572 smp_rmb();
1575 * Now search the rest of the groups. We assume that
1576 * group_no and gdp correctly point to the last group visited.
1578 for (bgi = 0; bgi < ngroups; bgi++) {
1579 group_no++;
1580 if (group_no >= ngroups)
1581 group_no = 0;
1582 gdp = ext3_get_group_desc(sb, group_no, &gdp_bh);
1583 if (!gdp)
1584 goto io_error;
1585 free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
1587 * skip this group if the number of
1588 * free blocks is less than half of the reservation
1589 * window size.
1591 if (my_rsv && (free_blocks <= (windowsz/2)))
1592 continue;
1594 brelse(bitmap_bh);
1595 bitmap_bh = read_block_bitmap(sb, group_no);
1596 if (!bitmap_bh)
1597 goto io_error;
1599 * try to allocate block(s) from this group, without a goal(-1).
1601 grp_alloc_blk = ext3_try_to_allocate_with_rsv(sb, handle,
1602 group_no, bitmap_bh, -1, my_rsv,
1603 &num, &fatal);
1604 if (fatal)
1605 goto out;
1606 if (grp_alloc_blk >= 0)
1607 goto allocated;
1610 * We may end up a bogus ealier ENOSPC error due to
1611 * filesystem is "full" of reservations, but
1612 * there maybe indeed free blocks avaliable on disk
1613 * In this case, we just forget about the reservations
1614 * just do block allocation as without reservations.
1616 if (my_rsv) {
1617 my_rsv = NULL;
1618 windowsz = 0;
1619 group_no = goal_group;
1620 goto retry_alloc;
1622 /* No space left on the device */
1623 *errp = -ENOSPC;
1624 goto out;
1626 allocated:
1628 ext3_debug("using block group %d(%d)\n",
1629 group_no, gdp->bg_free_blocks_count);
1631 BUFFER_TRACE(gdp_bh, "get_write_access");
1632 fatal = ext3_journal_get_write_access(handle, gdp_bh);
1633 if (fatal)
1634 goto out;
1636 ret_block = grp_alloc_blk + ext3_group_first_block_no(sb, group_no);
1638 if (in_range(le32_to_cpu(gdp->bg_block_bitmap), ret_block, num) ||
1639 in_range(le32_to_cpu(gdp->bg_inode_bitmap), ret_block, num) ||
1640 in_range(ret_block, le32_to_cpu(gdp->bg_inode_table),
1641 EXT3_SB(sb)->s_itb_per_group) ||
1642 in_range(ret_block + num - 1, le32_to_cpu(gdp->bg_inode_table),
1643 EXT3_SB(sb)->s_itb_per_group)) {
1644 ext3_error(sb, "ext3_new_block",
1645 "Allocating block in system zone - "
1646 "blocks from "E3FSBLK", length %lu",
1647 ret_block, num);
1649 * claim_block() marked the blocks we allocated as in use. So we
1650 * may want to selectively mark some of the blocks as free.
1652 goto retry_alloc;
1655 performed_allocation = 1;
1657 #ifdef CONFIG_JBD_DEBUG
1659 struct buffer_head *debug_bh;
1661 /* Record bitmap buffer state in the newly allocated block */
1662 debug_bh = sb_find_get_block(sb, ret_block);
1663 if (debug_bh) {
1664 BUFFER_TRACE(debug_bh, "state when allocated");
1665 BUFFER_TRACE2(debug_bh, bitmap_bh, "bitmap state");
1666 brelse(debug_bh);
1669 jbd_lock_bh_state(bitmap_bh);
1670 spin_lock(sb_bgl_lock(sbi, group_no));
1671 if (buffer_jbd(bitmap_bh) && bh2jh(bitmap_bh)->b_committed_data) {
1672 int i;
1674 for (i = 0; i < num; i++) {
1675 if (ext3_test_bit(grp_alloc_blk+i,
1676 bh2jh(bitmap_bh)->b_committed_data)) {
1677 printk("%s: block was unexpectedly set in "
1678 "b_committed_data\n", __func__);
1682 ext3_debug("found bit %d\n", grp_alloc_blk);
1683 spin_unlock(sb_bgl_lock(sbi, group_no));
1684 jbd_unlock_bh_state(bitmap_bh);
1685 #endif
1687 if (ret_block + num - 1 >= le32_to_cpu(es->s_blocks_count)) {
1688 ext3_error(sb, "ext3_new_block",
1689 "block("E3FSBLK") >= blocks count(%d) - "
1690 "block_group = %d, es == %p ", ret_block,
1691 le32_to_cpu(es->s_blocks_count), group_no, es);
1692 goto out;
1696 * It is up to the caller to add the new buffer to a journal
1697 * list of some description. We don't know in advance whether
1698 * the caller wants to use it as metadata or data.
1700 ext3_debug("allocating block %lu. Goal hits %d of %d.\n",
1701 ret_block, goal_hits, goal_attempts);
1703 spin_lock(sb_bgl_lock(sbi, group_no));
1704 le16_add_cpu(&gdp->bg_free_blocks_count, -num);
1705 spin_unlock(sb_bgl_lock(sbi, group_no));
1706 percpu_counter_sub(&sbi->s_freeblocks_counter, num);
1708 BUFFER_TRACE(gdp_bh, "journal_dirty_metadata for group descriptor");
1709 err = ext3_journal_dirty_metadata(handle, gdp_bh);
1710 if (!fatal)
1711 fatal = err;
1713 if (fatal)
1714 goto out;
1716 *errp = 0;
1717 brelse(bitmap_bh);
1718 dquot_free_block(inode, *count-num);
1719 *count = num;
1720 return ret_block;
1722 io_error:
1723 *errp = -EIO;
1724 out:
1725 if (fatal) {
1726 *errp = fatal;
1727 ext3_std_error(sb, fatal);
1730 * Undo the block allocation
1732 if (!performed_allocation)
1733 dquot_free_block(inode, *count);
1734 brelse(bitmap_bh);
1735 return 0;
1738 ext3_fsblk_t ext3_new_block(handle_t *handle, struct inode *inode,
1739 ext3_fsblk_t goal, int *errp)
1741 unsigned long count = 1;
1743 return ext3_new_blocks(handle, inode, goal, &count, errp);
1747 * ext3_count_free_blocks() -- count filesystem free blocks
1748 * @sb: superblock
1750 * Adds up the number of free blocks from each block group.
1752 ext3_fsblk_t ext3_count_free_blocks(struct super_block *sb)
1754 ext3_fsblk_t desc_count;
1755 struct ext3_group_desc *gdp;
1756 int i;
1757 unsigned long ngroups = EXT3_SB(sb)->s_groups_count;
1758 #ifdef EXT3FS_DEBUG
1759 struct ext3_super_block *es;
1760 ext3_fsblk_t bitmap_count;
1761 unsigned long x;
1762 struct buffer_head *bitmap_bh = NULL;
1764 es = EXT3_SB(sb)->s_es;
1765 desc_count = 0;
1766 bitmap_count = 0;
1767 gdp = NULL;
1769 smp_rmb();
1770 for (i = 0; i < ngroups; i++) {
1771 gdp = ext3_get_group_desc(sb, i, NULL);
1772 if (!gdp)
1773 continue;
1774 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1775 brelse(bitmap_bh);
1776 bitmap_bh = read_block_bitmap(sb, i);
1777 if (bitmap_bh == NULL)
1778 continue;
1780 x = ext3_count_free(bitmap_bh, sb->s_blocksize);
1781 printk("group %d: stored = %d, counted = %lu\n",
1782 i, le16_to_cpu(gdp->bg_free_blocks_count), x);
1783 bitmap_count += x;
1785 brelse(bitmap_bh);
1786 printk("ext3_count_free_blocks: stored = "E3FSBLK
1787 ", computed = "E3FSBLK", "E3FSBLK"\n",
1788 le32_to_cpu(es->s_free_blocks_count),
1789 desc_count, bitmap_count);
1790 return bitmap_count;
1791 #else
1792 desc_count = 0;
1793 smp_rmb();
1794 for (i = 0; i < ngroups; i++) {
1795 gdp = ext3_get_group_desc(sb, i, NULL);
1796 if (!gdp)
1797 continue;
1798 desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
1801 return desc_count;
1802 #endif
1805 static inline int test_root(int a, int b)
1807 int num = b;
1809 while (a > num)
1810 num *= b;
1811 return num == a;
1814 static int ext3_group_sparse(int group)
1816 if (group <= 1)
1817 return 1;
1818 if (!(group & 1))
1819 return 0;
1820 return (test_root(group, 7) || test_root(group, 5) ||
1821 test_root(group, 3));
1825 * ext3_bg_has_super - number of blocks used by the superblock in group
1826 * @sb: superblock for filesystem
1827 * @group: group number to check
1829 * Return the number of blocks used by the superblock (primary or backup)
1830 * in this group. Currently this will be only 0 or 1.
1832 int ext3_bg_has_super(struct super_block *sb, int group)
1834 if (EXT3_HAS_RO_COMPAT_FEATURE(sb,
1835 EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER) &&
1836 !ext3_group_sparse(group))
1837 return 0;
1838 return 1;
1841 static unsigned long ext3_bg_num_gdb_meta(struct super_block *sb, int group)
1843 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1844 unsigned long first = metagroup * EXT3_DESC_PER_BLOCK(sb);
1845 unsigned long last = first + EXT3_DESC_PER_BLOCK(sb) - 1;
1847 if (group == first || group == first + 1 || group == last)
1848 return 1;
1849 return 0;
1852 static unsigned long ext3_bg_num_gdb_nometa(struct super_block *sb, int group)
1854 return ext3_bg_has_super(sb, group) ? EXT3_SB(sb)->s_gdb_count : 0;
1858 * ext3_bg_num_gdb - number of blocks used by the group table in group
1859 * @sb: superblock for filesystem
1860 * @group: group number to check
1862 * Return the number of blocks used by the group descriptor table
1863 * (primary or backup) in this group. In the future there may be a
1864 * different number of descriptor blocks in each group.
1866 unsigned long ext3_bg_num_gdb(struct super_block *sb, int group)
1868 unsigned long first_meta_bg =
1869 le32_to_cpu(EXT3_SB(sb)->s_es->s_first_meta_bg);
1870 unsigned long metagroup = group / EXT3_DESC_PER_BLOCK(sb);
1872 if (!EXT3_HAS_INCOMPAT_FEATURE(sb,EXT3_FEATURE_INCOMPAT_META_BG) ||
1873 metagroup < first_meta_bg)
1874 return ext3_bg_num_gdb_nometa(sb,group);
1876 return ext3_bg_num_gdb_meta(sb,group);