2 * linux/fs/ext3/balloc.c
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
9 * Enhanced block allocation by Stephen Tweedie (sct@redhat.com), 1993
10 * Big-endian to little-endian byte-swapping/bitmaps by
11 * David S. Miller (davem@caip.rutgers.edu), 1995
14 #include <linux/config.h>
15 #include <linux/time.h>
16 #include <linux/capability.h>
18 #include <linux/jbd.h>
19 #include <linux/ext3_fs.h>
20 #include <linux/ext3_jbd.h>
21 #include <linux/quotaops.h>
22 #include <linux/buffer_head.h>
25 * balloc.c contains the blocks allocation and deallocation routines
29 * The free blocks are managed by bitmaps. A file system contains several
30 * blocks groups. Each group contains 1 bitmap block for blocks, 1 bitmap
31 * block for inodes, N blocks for the inode table and data blocks.
33 * The file system contains group descriptors which are located after the
34 * super block. Each descriptor contains the number of the bitmap block and
35 * the free blocks count in the block. The descriptors are loaded in memory
36 * when a file system is mounted (see ext3_read_super).
40 #define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
42 struct ext3_group_desc
* ext3_get_group_desc(struct super_block
* sb
,
43 unsigned int block_group
,
44 struct buffer_head
** bh
)
46 unsigned long group_desc
;
48 struct ext3_group_desc
* desc
;
49 struct ext3_sb_info
*sbi
= EXT3_SB(sb
);
51 if (block_group
>= sbi
->s_groups_count
) {
52 ext3_error (sb
, "ext3_get_group_desc",
53 "block_group >= groups_count - "
54 "block_group = %d, groups_count = %lu",
55 block_group
, sbi
->s_groups_count
);
61 group_desc
= block_group
>> EXT3_DESC_PER_BLOCK_BITS(sb
);
62 offset
= block_group
& (EXT3_DESC_PER_BLOCK(sb
) - 1);
63 if (!sbi
->s_group_desc
[group_desc
]) {
64 ext3_error (sb
, "ext3_get_group_desc",
65 "Group descriptor not loaded - "
66 "block_group = %d, group_desc = %lu, desc = %lu",
67 block_group
, group_desc
, offset
);
71 desc
= (struct ext3_group_desc
*) sbi
->s_group_desc
[group_desc
]->b_data
;
73 *bh
= sbi
->s_group_desc
[group_desc
];
78 * Read the bitmap for a given block_group, reading into the specified
79 * slot in the superblock's bitmap cache.
81 * Return buffer_head on success or NULL in case of failure.
83 static struct buffer_head
*
84 read_block_bitmap(struct super_block
*sb
, unsigned int block_group
)
86 struct ext3_group_desc
* desc
;
87 struct buffer_head
* bh
= NULL
;
89 desc
= ext3_get_group_desc (sb
, block_group
, NULL
);
92 bh
= sb_bread(sb
, le32_to_cpu(desc
->bg_block_bitmap
));
94 ext3_error (sb
, "read_block_bitmap",
95 "Cannot read block bitmap - "
96 "block_group = %d, block_bitmap = %u",
97 block_group
, le32_to_cpu(desc
->bg_block_bitmap
));
102 * The reservation window structure operations
103 * --------------------------------------------
104 * Operations include:
105 * dump, find, add, remove, is_empty, find_next_reservable_window, etc.
107 * We use sorted double linked list for the per-filesystem reservation
108 * window list. (like in vm_region).
110 * Initially, we keep those small operations in the abstract functions,
111 * so later if we need a better searching tree than double linked-list,
112 * we could easily switch to that without changing too much
116 static void __rsv_window_dump(struct rb_root
*root
, int verbose
,
120 struct ext3_reserve_window_node
*rsv
, *prev
;
128 printk("Block Allocation Reservation Windows Map (%s):\n", fn
);
130 rsv
= list_entry(n
, struct ext3_reserve_window_node
, rsv_node
);
132 printk("reservation window 0x%p "
133 "start: %d, end: %d\n",
134 rsv
, rsv
->rsv_start
, rsv
->rsv_end
);
135 if (rsv
->rsv_start
&& rsv
->rsv_start
>= rsv
->rsv_end
) {
136 printk("Bad reservation %p (start >= end)\n",
140 if (prev
&& prev
->rsv_end
>= rsv
->rsv_start
) {
141 printk("Bad reservation %p (prev->end >= start)\n",
147 printk("Restarting reservation walk in verbose mode\n");
155 printk("Window map complete.\n");
159 #define rsv_window_dump(root, verbose) \
160 __rsv_window_dump((root), (verbose), __FUNCTION__)
162 #define rsv_window_dump(root, verbose) do {} while (0)
166 goal_in_my_reservation(struct ext3_reserve_window
*rsv
, int goal
,
167 unsigned int group
, struct super_block
* sb
)
169 unsigned long group_first_block
, group_last_block
;
171 group_first_block
= le32_to_cpu(EXT3_SB(sb
)->s_es
->s_first_data_block
) +
172 group
* EXT3_BLOCKS_PER_GROUP(sb
);
173 group_last_block
= group_first_block
+ EXT3_BLOCKS_PER_GROUP(sb
) - 1;
175 if ((rsv
->_rsv_start
> group_last_block
) ||
176 (rsv
->_rsv_end
< group_first_block
))
178 if ((goal
>= 0) && ((goal
+ group_first_block
< rsv
->_rsv_start
)
179 || (goal
+ group_first_block
> rsv
->_rsv_end
)))
185 * Find the reserved window which includes the goal, or the previous one
186 * if the goal is not in any window.
187 * Returns NULL if there are no windows or if all windows start after the goal.
189 static struct ext3_reserve_window_node
*
190 search_reserve_window(struct rb_root
*root
, unsigned long goal
)
192 struct rb_node
*n
= root
->rb_node
;
193 struct ext3_reserve_window_node
*rsv
;
199 rsv
= rb_entry(n
, struct ext3_reserve_window_node
, rsv_node
);
201 if (goal
< rsv
->rsv_start
)
203 else if (goal
> rsv
->rsv_end
)
209 * We've fallen off the end of the tree: the goal wasn't inside
210 * any particular node. OK, the previous node must be to one
211 * side of the interval containing the goal. If it's the RHS,
212 * we need to back up one.
214 if (rsv
->rsv_start
> goal
) {
215 n
= rb_prev(&rsv
->rsv_node
);
216 rsv
= rb_entry(n
, struct ext3_reserve_window_node
, rsv_node
);
221 void ext3_rsv_window_add(struct super_block
*sb
,
222 struct ext3_reserve_window_node
*rsv
)
224 struct rb_root
*root
= &EXT3_SB(sb
)->s_rsv_window_root
;
225 struct rb_node
*node
= &rsv
->rsv_node
;
226 unsigned int start
= rsv
->rsv_start
;
228 struct rb_node
** p
= &root
->rb_node
;
229 struct rb_node
* parent
= NULL
;
230 struct ext3_reserve_window_node
*this;
235 this = rb_entry(parent
, struct ext3_reserve_window_node
, rsv_node
);
237 if (start
< this->rsv_start
)
239 else if (start
> this->rsv_end
)
245 rb_link_node(node
, parent
, p
);
246 rb_insert_color(node
, root
);
249 static void rsv_window_remove(struct super_block
*sb
,
250 struct ext3_reserve_window_node
*rsv
)
252 rsv
->rsv_start
= EXT3_RESERVE_WINDOW_NOT_ALLOCATED
;
253 rsv
->rsv_end
= EXT3_RESERVE_WINDOW_NOT_ALLOCATED
;
254 rsv
->rsv_alloc_hit
= 0;
255 rb_erase(&rsv
->rsv_node
, &EXT3_SB(sb
)->s_rsv_window_root
);
258 static inline int rsv_is_empty(struct ext3_reserve_window
*rsv
)
260 /* a valid reservation end block could not be 0 */
261 return (rsv
->_rsv_end
== EXT3_RESERVE_WINDOW_NOT_ALLOCATED
);
263 void ext3_init_block_alloc_info(struct inode
*inode
)
265 struct ext3_inode_info
*ei
= EXT3_I(inode
);
266 struct ext3_block_alloc_info
*block_i
= ei
->i_block_alloc_info
;
267 struct super_block
*sb
= inode
->i_sb
;
269 block_i
= kmalloc(sizeof(*block_i
), GFP_NOFS
);
271 struct ext3_reserve_window_node
*rsv
= &block_i
->rsv_window_node
;
273 rsv
->rsv_start
= EXT3_RESERVE_WINDOW_NOT_ALLOCATED
;
274 rsv
->rsv_end
= EXT3_RESERVE_WINDOW_NOT_ALLOCATED
;
277 * if filesystem is mounted with NORESERVATION, the goal
278 * reservation window size is set to zero to indicate
279 * block reservation is off
281 if (!test_opt(sb
, RESERVATION
))
282 rsv
->rsv_goal_size
= 0;
284 rsv
->rsv_goal_size
= EXT3_DEFAULT_RESERVE_BLOCKS
;
285 rsv
->rsv_alloc_hit
= 0;
286 block_i
->last_alloc_logical_block
= 0;
287 block_i
->last_alloc_physical_block
= 0;
289 ei
->i_block_alloc_info
= block_i
;
292 void ext3_discard_reservation(struct inode
*inode
)
294 struct ext3_inode_info
*ei
= EXT3_I(inode
);
295 struct ext3_block_alloc_info
*block_i
= ei
->i_block_alloc_info
;
296 struct ext3_reserve_window_node
*rsv
;
297 spinlock_t
*rsv_lock
= &EXT3_SB(inode
->i_sb
)->s_rsv_window_lock
;
302 rsv
= &block_i
->rsv_window_node
;
303 if (!rsv_is_empty(&rsv
->rsv_window
)) {
305 if (!rsv_is_empty(&rsv
->rsv_window
))
306 rsv_window_remove(inode
->i_sb
, rsv
);
307 spin_unlock(rsv_lock
);
311 /* Free given blocks, update quota and i_blocks field */
312 void ext3_free_blocks_sb(handle_t
*handle
, struct super_block
*sb
,
313 unsigned long block
, unsigned long count
,
314 int *pdquot_freed_blocks
)
316 struct buffer_head
*bitmap_bh
= NULL
;
317 struct buffer_head
*gd_bh
;
318 unsigned long block_group
;
321 unsigned long overflow
;
322 struct ext3_group_desc
* desc
;
323 struct ext3_super_block
* es
;
324 struct ext3_sb_info
*sbi
;
326 unsigned group_freed
;
328 *pdquot_freed_blocks
= 0;
331 if (block
< le32_to_cpu(es
->s_first_data_block
) ||
332 block
+ count
< block
||
333 block
+ count
> le32_to_cpu(es
->s_blocks_count
)) {
334 ext3_error (sb
, "ext3_free_blocks",
335 "Freeing blocks not in datazone - "
336 "block = %lu, count = %lu", block
, count
);
340 ext3_debug ("freeing block(s) %lu-%lu\n", block
, block
+ count
- 1);
344 block_group
= (block
- le32_to_cpu(es
->s_first_data_block
)) /
345 EXT3_BLOCKS_PER_GROUP(sb
);
346 bit
= (block
- le32_to_cpu(es
->s_first_data_block
)) %
347 EXT3_BLOCKS_PER_GROUP(sb
);
349 * Check to see if we are freeing blocks across a group
352 if (bit
+ count
> EXT3_BLOCKS_PER_GROUP(sb
)) {
353 overflow
= bit
+ count
- EXT3_BLOCKS_PER_GROUP(sb
);
357 bitmap_bh
= read_block_bitmap(sb
, block_group
);
360 desc
= ext3_get_group_desc (sb
, block_group
, &gd_bh
);
364 if (in_range (le32_to_cpu(desc
->bg_block_bitmap
), block
, count
) ||
365 in_range (le32_to_cpu(desc
->bg_inode_bitmap
), block
, count
) ||
366 in_range (block
, le32_to_cpu(desc
->bg_inode_table
),
367 sbi
->s_itb_per_group
) ||
368 in_range (block
+ count
- 1, le32_to_cpu(desc
->bg_inode_table
),
369 sbi
->s_itb_per_group
))
370 ext3_error (sb
, "ext3_free_blocks",
371 "Freeing blocks in system zones - "
372 "Block = %lu, count = %lu",
376 * We are about to start releasing blocks in the bitmap,
377 * so we need undo access.
379 /* @@@ check errors */
380 BUFFER_TRACE(bitmap_bh
, "getting undo access");
381 err
= ext3_journal_get_undo_access(handle
, bitmap_bh
);
386 * We are about to modify some metadata. Call the journal APIs
387 * to unshare ->b_data if a currently-committing transaction is
390 BUFFER_TRACE(gd_bh
, "get_write_access");
391 err
= ext3_journal_get_write_access(handle
, gd_bh
);
395 jbd_lock_bh_state(bitmap_bh
);
397 for (i
= 0, group_freed
= 0; i
< count
; i
++) {
399 * An HJ special. This is expensive...
401 #ifdef CONFIG_JBD_DEBUG
402 jbd_unlock_bh_state(bitmap_bh
);
404 struct buffer_head
*debug_bh
;
405 debug_bh
= sb_find_get_block(sb
, block
+ i
);
407 BUFFER_TRACE(debug_bh
, "Deleted!");
408 if (!bh2jh(bitmap_bh
)->b_committed_data
)
409 BUFFER_TRACE(debug_bh
,
410 "No commited data in bitmap");
411 BUFFER_TRACE2(debug_bh
, bitmap_bh
, "bitmap");
415 jbd_lock_bh_state(bitmap_bh
);
417 if (need_resched()) {
418 jbd_unlock_bh_state(bitmap_bh
);
420 jbd_lock_bh_state(bitmap_bh
);
422 /* @@@ This prevents newly-allocated data from being
423 * freed and then reallocated within the same
426 * Ideally we would want to allow that to happen, but to
427 * do so requires making journal_forget() capable of
428 * revoking the queued write of a data block, which
429 * implies blocking on the journal lock. *forget()
430 * cannot block due to truncate races.
432 * Eventually we can fix this by making journal_forget()
433 * return a status indicating whether or not it was able
434 * to revoke the buffer. On successful revoke, it is
435 * safe not to set the allocation bit in the committed
436 * bitmap, because we know that there is no outstanding
437 * activity on the buffer any more and so it is safe to
440 BUFFER_TRACE(bitmap_bh
, "set in b_committed_data");
441 J_ASSERT_BH(bitmap_bh
,
442 bh2jh(bitmap_bh
)->b_committed_data
!= NULL
);
443 ext3_set_bit_atomic(sb_bgl_lock(sbi
, block_group
), bit
+ i
,
444 bh2jh(bitmap_bh
)->b_committed_data
);
447 * We clear the bit in the bitmap after setting the committed
448 * data bit, because this is the reverse order to that which
449 * the allocator uses.
451 BUFFER_TRACE(bitmap_bh
, "clear bit");
452 if (!ext3_clear_bit_atomic(sb_bgl_lock(sbi
, block_group
),
453 bit
+ i
, bitmap_bh
->b_data
)) {
454 jbd_unlock_bh_state(bitmap_bh
);
455 ext3_error(sb
, __FUNCTION__
,
456 "bit already cleared for block %lu", block
+ i
);
457 jbd_lock_bh_state(bitmap_bh
);
458 BUFFER_TRACE(bitmap_bh
, "bit already cleared");
463 jbd_unlock_bh_state(bitmap_bh
);
465 spin_lock(sb_bgl_lock(sbi
, block_group
));
466 desc
->bg_free_blocks_count
=
467 cpu_to_le16(le16_to_cpu(desc
->bg_free_blocks_count
) +
469 spin_unlock(sb_bgl_lock(sbi
, block_group
));
470 percpu_counter_mod(&sbi
->s_freeblocks_counter
, count
);
472 /* We dirtied the bitmap block */
473 BUFFER_TRACE(bitmap_bh
, "dirtied bitmap block");
474 err
= ext3_journal_dirty_metadata(handle
, bitmap_bh
);
476 /* And the group descriptor block */
477 BUFFER_TRACE(gd_bh
, "dirtied group descriptor block");
478 ret
= ext3_journal_dirty_metadata(handle
, gd_bh
);
480 *pdquot_freed_blocks
+= group_freed
;
482 if (overflow
&& !err
) {
490 ext3_std_error(sb
, err
);
494 /* Free given blocks, update quota and i_blocks field */
495 void ext3_free_blocks(handle_t
*handle
, struct inode
*inode
,
496 unsigned long block
, unsigned long count
)
498 struct super_block
* sb
;
499 int dquot_freed_blocks
;
503 printk ("ext3_free_blocks: nonexistent device");
506 ext3_free_blocks_sb(handle
, sb
, block
, count
, &dquot_freed_blocks
);
507 if (dquot_freed_blocks
)
508 DQUOT_FREE_BLOCK(inode
, dquot_freed_blocks
);
513 * For ext3 allocations, we must not reuse any blocks which are
514 * allocated in the bitmap buffer's "last committed data" copy. This
515 * prevents deletes from freeing up the page for reuse until we have
516 * committed the delete transaction.
518 * If we didn't do this, then deleting something and reallocating it as
519 * data would allow the old block to be overwritten before the
520 * transaction committed (because we force data to disk before commit).
521 * This would lead to corruption if we crashed between overwriting the
522 * data and committing the delete.
524 * @@@ We may want to make this allocation behaviour conditional on
525 * data-writes at some point, and disable it for metadata allocations or
528 static int ext3_test_allocatable(int nr
, struct buffer_head
*bh
)
531 struct journal_head
*jh
= bh2jh(bh
);
533 if (ext3_test_bit(nr
, bh
->b_data
))
536 jbd_lock_bh_state(bh
);
537 if (!jh
->b_committed_data
)
540 ret
= !ext3_test_bit(nr
, jh
->b_committed_data
);
541 jbd_unlock_bh_state(bh
);
546 bitmap_search_next_usable_block(int start
, struct buffer_head
*bh
,
550 struct journal_head
*jh
= bh2jh(bh
);
553 * The bitmap search --- search forward alternately through the actual
554 * bitmap and the last-committed copy until we find a bit free in
557 while (start
< maxblocks
) {
558 next
= ext3_find_next_zero_bit(bh
->b_data
, maxblocks
, start
);
559 if (next
>= maxblocks
)
561 if (ext3_test_allocatable(next
, bh
))
563 jbd_lock_bh_state(bh
);
564 if (jh
->b_committed_data
)
565 start
= ext3_find_next_zero_bit(jh
->b_committed_data
,
567 jbd_unlock_bh_state(bh
);
573 * Find an allocatable block in a bitmap. We honour both the bitmap and
574 * its last-committed copy (if that exists), and perform the "most
575 * appropriate allocation" algorithm of looking for a free block near
576 * the initial goal; then for a free byte somewhere in the bitmap; then
577 * for any free bit in the bitmap.
580 find_next_usable_block(int start
, struct buffer_head
*bh
, int maxblocks
)
587 * The goal was occupied; search forward for a free
588 * block within the next XX blocks.
590 * end_goal is more or less random, but it has to be
591 * less than EXT3_BLOCKS_PER_GROUP. Aligning up to the
592 * next 64-bit boundary is simple..
594 int end_goal
= (start
+ 63) & ~63;
595 if (end_goal
> maxblocks
)
596 end_goal
= maxblocks
;
597 here
= ext3_find_next_zero_bit(bh
->b_data
, end_goal
, start
);
598 if (here
< end_goal
&& ext3_test_allocatable(here
, bh
))
600 ext3_debug("Bit not found near goal\n");
607 p
= ((char *)bh
->b_data
) + (here
>> 3);
608 r
= memscan(p
, 0, (maxblocks
- here
+ 7) >> 3);
609 next
= (r
- ((char *)bh
->b_data
)) << 3;
611 if (next
< maxblocks
&& next
>= start
&& ext3_test_allocatable(next
, bh
))
615 * The bitmap search --- search forward alternately through the actual
616 * bitmap and the last-committed copy until we find a bit free in
619 here
= bitmap_search_next_usable_block(here
, bh
, maxblocks
);
624 * We think we can allocate this block in this bitmap. Try to set the bit.
625 * If that succeeds then check that nobody has allocated and then freed the
626 * block since we saw that is was not marked in b_committed_data. If it _was_
627 * allocated and freed then clear the bit in the bitmap again and return
631 claim_block(spinlock_t
*lock
, int block
, struct buffer_head
*bh
)
633 struct journal_head
*jh
= bh2jh(bh
);
636 if (ext3_set_bit_atomic(lock
, block
, bh
->b_data
))
638 jbd_lock_bh_state(bh
);
639 if (jh
->b_committed_data
&& ext3_test_bit(block
,jh
->b_committed_data
)) {
640 ext3_clear_bit_atomic(lock
, block
, bh
->b_data
);
645 jbd_unlock_bh_state(bh
);
650 * If we failed to allocate the desired block then we may end up crossing to a
651 * new bitmap. In that case we must release write access to the old one via
652 * ext3_journal_release_buffer(), else we'll run out of credits.
655 ext3_try_to_allocate(struct super_block
*sb
, handle_t
*handle
, int group
,
656 struct buffer_head
*bitmap_bh
, int goal
, struct ext3_reserve_window
*my_rsv
)
658 int group_first_block
, start
, end
;
660 /* we do allocation within the reservation window if we have a window */
663 le32_to_cpu(EXT3_SB(sb
)->s_es
->s_first_data_block
) +
664 group
* EXT3_BLOCKS_PER_GROUP(sb
);
665 if (my_rsv
->_rsv_start
>= group_first_block
)
666 start
= my_rsv
->_rsv_start
- group_first_block
;
668 /* reservation window cross group boundary */
670 end
= my_rsv
->_rsv_end
- group_first_block
+ 1;
671 if (end
> EXT3_BLOCKS_PER_GROUP(sb
))
672 /* reservation window crosses group boundary */
673 end
= EXT3_BLOCKS_PER_GROUP(sb
);
674 if ((start
<= goal
) && (goal
< end
))
683 end
= EXT3_BLOCKS_PER_GROUP(sb
);
686 BUG_ON(start
> EXT3_BLOCKS_PER_GROUP(sb
));
689 if (goal
< 0 || !ext3_test_allocatable(goal
, bitmap_bh
)) {
690 goal
= find_next_usable_block(start
, bitmap_bh
, end
);
696 for (i
= 0; i
< 7 && goal
> start
&&
697 ext3_test_allocatable(goal
- 1,
705 if (!claim_block(sb_bgl_lock(EXT3_SB(sb
), group
), goal
, bitmap_bh
)) {
707 * The block was allocated by another thread, or it was
708 * allocated and then freed by another thread
722 * find_next_reservable_window():
723 * find a reservable space within the given range.
724 * It does not allocate the reservation window for now:
725 * alloc_new_reservation() will do the work later.
727 * @search_head: the head of the searching list;
728 * This is not necessarily the list head of the whole filesystem
730 * We have both head and start_block to assist the search
731 * for the reservable space. The list starts from head,
732 * but we will shift to the place where start_block is,
733 * then start from there, when looking for a reservable space.
735 * @size: the target new reservation window size
737 * @group_first_block: the first block we consider to start
738 * the real search from
741 * the maximum block number that our goal reservable space
742 * could start from. This is normally the last block in this
743 * group. The search will end when we found the start of next
744 * possible reservable space is out of this boundary.
745 * This could handle the cross boundary reservation window
748 * basically we search from the given range, rather than the whole
749 * reservation double linked list, (start_block, last_block)
750 * to find a free region that is of my size and has not
754 static int find_next_reservable_window(
755 struct ext3_reserve_window_node
*search_head
,
756 struct ext3_reserve_window_node
*my_rsv
,
757 struct super_block
* sb
, int start_block
,
760 struct rb_node
*next
;
761 struct ext3_reserve_window_node
*rsv
, *prev
;
763 int size
= my_rsv
->rsv_goal_size
;
765 /* TODO: make the start of the reservation window byte-aligned */
766 /* cur = *start_block & ~7;*/
773 if (cur
<= rsv
->rsv_end
)
774 cur
= rsv
->rsv_end
+ 1;
777 * in the case we could not find a reservable space
778 * that is what is expected, during the re-search, we could
779 * remember what's the largest reservable space we could have
780 * and return that one.
782 * For now it will fail if we could not find the reservable
783 * space with expected-size (or more)...
785 if (cur
> last_block
)
786 return -1; /* fail */
789 next
= rb_next(&rsv
->rsv_node
);
790 rsv
= list_entry(next
,struct ext3_reserve_window_node
,rsv_node
);
793 * Reached the last reservation, we can just append to the
799 if (cur
+ size
<= rsv
->rsv_start
) {
801 * Found a reserveable space big enough. We could
802 * have a reservation across the group boundary here
808 * we come here either :
809 * when we reach the end of the whole list,
810 * and there is empty reservable space after last entry in the list.
811 * append it to the end of the list.
813 * or we found one reservable space in the middle of the list,
814 * return the reservation window that we could append to.
818 if ((prev
!= my_rsv
) && (!rsv_is_empty(&my_rsv
->rsv_window
)))
819 rsv_window_remove(sb
, my_rsv
);
822 * Let's book the whole avaliable window for now. We will check the
823 * disk bitmap later and then, if there are free blocks then we adjust
824 * the window size if it's larger than requested.
825 * Otherwise, we will remove this node from the tree next time
826 * call find_next_reservable_window.
828 my_rsv
->rsv_start
= cur
;
829 my_rsv
->rsv_end
= cur
+ size
- 1;
830 my_rsv
->rsv_alloc_hit
= 0;
833 ext3_rsv_window_add(sb
, my_rsv
);
839 * alloc_new_reservation()--allocate a new reservation window
841 * To make a new reservation, we search part of the filesystem
842 * reservation list (the list that inside the group). We try to
843 * allocate a new reservation window near the allocation goal,
844 * or the beginning of the group, if there is no goal.
846 * We first find a reservable space after the goal, then from
847 * there, we check the bitmap for the first free block after
848 * it. If there is no free block until the end of group, then the
849 * whole group is full, we failed. Otherwise, check if the free
850 * block is inside the expected reservable space, if so, we
852 * If the first free block is outside the reservable space, then
853 * start from the first free block, we search for next available
856 * on succeed, a new reservation will be found and inserted into the list
857 * It contains at least one free block, and it does not overlap with other
858 * reservation windows.
860 * failed: we failed to find a reservation window in this group
862 * @rsv: the reservation
864 * @goal: The goal (group-relative). It is where the search for a
865 * free reservable space should start from.
866 * if we have a goal(goal >0 ), then start from there,
867 * no goal(goal = -1), we start from the first block
870 * @sb: the super block
871 * @group: the group we are trying to allocate in
872 * @bitmap_bh: the block group block bitmap
875 static int alloc_new_reservation(struct ext3_reserve_window_node
*my_rsv
,
876 int goal
, struct super_block
*sb
,
877 unsigned int group
, struct buffer_head
*bitmap_bh
)
879 struct ext3_reserve_window_node
*search_head
;
880 int group_first_block
, group_end_block
, start_block
;
881 int first_free_block
;
882 struct rb_root
*fs_rsv_root
= &EXT3_SB(sb
)->s_rsv_window_root
;
885 spinlock_t
*rsv_lock
= &EXT3_SB(sb
)->s_rsv_window_lock
;
887 group_first_block
= le32_to_cpu(EXT3_SB(sb
)->s_es
->s_first_data_block
) +
888 group
* EXT3_BLOCKS_PER_GROUP(sb
);
889 group_end_block
= group_first_block
+ EXT3_BLOCKS_PER_GROUP(sb
) - 1;
892 start_block
= group_first_block
;
894 start_block
= goal
+ group_first_block
;
896 size
= my_rsv
->rsv_goal_size
;
898 if (!rsv_is_empty(&my_rsv
->rsv_window
)) {
900 * if the old reservation is cross group boundary
901 * and if the goal is inside the old reservation window,
902 * we will come here when we just failed to allocate from
903 * the first part of the window. We still have another part
904 * that belongs to the next group. In this case, there is no
905 * point to discard our window and try to allocate a new one
906 * in this group(which will fail). we should
907 * keep the reservation window, just simply move on.
909 * Maybe we could shift the start block of the reservation
910 * window to the first block of next group.
913 if ((my_rsv
->rsv_start
<= group_end_block
) &&
914 (my_rsv
->rsv_end
> group_end_block
) &&
915 (start_block
>= my_rsv
->rsv_start
))
918 if ((my_rsv
->rsv_alloc_hit
>
919 (my_rsv
->rsv_end
- my_rsv
->rsv_start
+ 1) / 2)) {
921 * if we previously allocation hit ration is greater than half
922 * we double the size of reservation window next time
923 * otherwise keep the same
926 if (size
> EXT3_MAX_RESERVE_BLOCKS
)
927 size
= EXT3_MAX_RESERVE_BLOCKS
;
928 my_rsv
->rsv_goal_size
= size
;
934 * shift the search start to the window near the goal block
936 search_head
= search_reserve_window(fs_rsv_root
, start_block
);
939 * find_next_reservable_window() simply finds a reservable window
940 * inside the given range(start_block, group_end_block).
942 * To make sure the reservation window has a free bit inside it, we
943 * need to check the bitmap after we found a reservable window.
946 ret
= find_next_reservable_window(search_head
, my_rsv
, sb
,
947 start_block
, group_end_block
);
950 if (!rsv_is_empty(&my_rsv
->rsv_window
))
951 rsv_window_remove(sb
, my_rsv
);
952 spin_unlock(rsv_lock
);
957 * On success, find_next_reservable_window() returns the
958 * reservation window where there is a reservable space after it.
959 * Before we reserve this reservable space, we need
960 * to make sure there is at least a free block inside this region.
962 * searching the first free bit on the block bitmap and copy of
963 * last committed bitmap alternatively, until we found a allocatable
964 * block. Search start from the start block of the reservable space
967 spin_unlock(rsv_lock
);
968 first_free_block
= bitmap_search_next_usable_block(
969 my_rsv
->rsv_start
- group_first_block
,
970 bitmap_bh
, group_end_block
- group_first_block
+ 1);
972 if (first_free_block
< 0) {
974 * no free block left on the bitmap, no point
975 * to reserve the space. return failed.
978 if (!rsv_is_empty(&my_rsv
->rsv_window
))
979 rsv_window_remove(sb
, my_rsv
);
980 spin_unlock(rsv_lock
);
981 return -1; /* failed */
984 start_block
= first_free_block
+ group_first_block
;
986 * check if the first free block is within the
987 * free space we just reserved
989 if (start_block
>= my_rsv
->rsv_start
&& start_block
< my_rsv
->rsv_end
)
990 return 0; /* success */
992 * if the first free bit we found is out of the reservable space
993 * continue search for next reservable space,
994 * start from where the free block is,
995 * we also shift the list head to where we stopped last time
997 search_head
= my_rsv
;
1003 * This is the main function used to allocate a new block and its reservation
1006 * Each time when a new block allocation is need, first try to allocate from
1007 * its own reservation. If it does not have a reservation window, instead of
1008 * looking for a free bit on bitmap first, then look up the reservation list to
1009 * see if it is inside somebody else's reservation window, we try to allocate a
1010 * reservation window for it starting from the goal first. Then do the block
1011 * allocation within the reservation window.
1013 * This will avoid keeping on searching the reservation list again and
1014 * again when somebody is looking for a free block (without
1015 * reservation), and there are lots of free blocks, but they are all
1018 * We use a sorted double linked list for the per-filesystem reservation list.
1019 * The insert, remove and find a free space(non-reserved) operations for the
1020 * sorted double linked list should be fast.
1024 ext3_try_to_allocate_with_rsv(struct super_block
*sb
, handle_t
*handle
,
1025 unsigned int group
, struct buffer_head
*bitmap_bh
,
1026 int goal
, struct ext3_reserve_window_node
* my_rsv
,
1029 unsigned long group_first_block
;
1036 * Make sure we use undo access for the bitmap, because it is critical
1037 * that we do the frozen_data COW on bitmap buffers in all cases even
1038 * if the buffer is in BJ_Forget state in the committing transaction.
1040 BUFFER_TRACE(bitmap_bh
, "get undo access for new block");
1041 fatal
= ext3_journal_get_undo_access(handle
, bitmap_bh
);
1048 * we don't deal with reservation when
1049 * filesystem is mounted without reservation
1050 * or the file is not a regular file
1051 * or last attempt to allocate a block with reservation turned on failed
1053 if (my_rsv
== NULL
) {
1054 ret
= ext3_try_to_allocate(sb
, handle
, group
, bitmap_bh
, goal
, NULL
);
1058 * goal is a group relative block number (if there is a goal)
1059 * 0 < goal < EXT3_BLOCKS_PER_GROUP(sb)
1060 * first block is a filesystem wide block number
1061 * first block is the block number of the first block in this group
1063 group_first_block
= le32_to_cpu(EXT3_SB(sb
)->s_es
->s_first_data_block
) +
1064 group
* EXT3_BLOCKS_PER_GROUP(sb
);
1067 * Basically we will allocate a new block from inode's reservation
1070 * We need to allocate a new reservation window, if:
1071 * a) inode does not have a reservation window; or
1072 * b) last attempt to allocate a block from existing reservation
1074 * c) we come here with a goal and with a reservation window
1076 * We do not need to allocate a new reservation window if we come here
1077 * at the beginning with a goal and the goal is inside the window, or
1078 * we don't have a goal but already have a reservation window.
1079 * then we could go to allocate from the reservation window directly.
1082 if (rsv_is_empty(&my_rsv
->rsv_window
) || (ret
< 0) ||
1083 !goal_in_my_reservation(&my_rsv
->rsv_window
, goal
, group
, sb
)) {
1084 ret
= alloc_new_reservation(my_rsv
, goal
, sb
,
1089 if (!goal_in_my_reservation(&my_rsv
->rsv_window
, goal
, group
, sb
))
1092 if ((my_rsv
->rsv_start
>= group_first_block
+ EXT3_BLOCKS_PER_GROUP(sb
))
1093 || (my_rsv
->rsv_end
< group_first_block
))
1095 ret
= ext3_try_to_allocate(sb
, handle
, group
, bitmap_bh
, goal
,
1096 &my_rsv
->rsv_window
);
1098 my_rsv
->rsv_alloc_hit
++;
1099 break; /* succeed */
1104 BUFFER_TRACE(bitmap_bh
, "journal_dirty_metadata for "
1106 fatal
= ext3_journal_dirty_metadata(handle
, bitmap_bh
);
1114 BUFFER_TRACE(bitmap_bh
, "journal_release_buffer");
1115 ext3_journal_release_buffer(handle
, bitmap_bh
);
1119 static int ext3_has_free_blocks(struct ext3_sb_info
*sbi
)
1121 int free_blocks
, root_blocks
;
1123 free_blocks
= percpu_counter_read_positive(&sbi
->s_freeblocks_counter
);
1124 root_blocks
= le32_to_cpu(sbi
->s_es
->s_r_blocks_count
);
1125 if (free_blocks
< root_blocks
+ 1 && !capable(CAP_SYS_RESOURCE
) &&
1126 sbi
->s_resuid
!= current
->fsuid
&&
1127 (sbi
->s_resgid
== 0 || !in_group_p (sbi
->s_resgid
))) {
1134 * ext3_should_retry_alloc() is called when ENOSPC is returned, and if
1135 * it is profitable to retry the operation, this function will wait
1136 * for the current or commiting transaction to complete, and then
1139 int ext3_should_retry_alloc(struct super_block
*sb
, int *retries
)
1141 if (!ext3_has_free_blocks(EXT3_SB(sb
)) || (*retries
)++ > 3)
1144 jbd_debug(1, "%s: retrying operation after ENOSPC\n", sb
->s_id
);
1146 return journal_force_commit_nested(EXT3_SB(sb
)->s_journal
);
1150 * ext3_new_block uses a goal block to assist allocation. If the goal is
1151 * free, or there is a free block within 32 blocks of the goal, that block
1152 * is allocated. Otherwise a forward search is made for a free block; within
1153 * each block group the search first looks for an entire free byte in the block
1154 * bitmap, and then for any free bit if that fails.
1155 * This function also updates quota and i_blocks field.
1157 int ext3_new_block(handle_t
*handle
, struct inode
*inode
,
1158 unsigned long goal
, int *errp
)
1160 struct buffer_head
*bitmap_bh
= NULL
;
1161 struct buffer_head
*gdp_bh
;
1165 int bgi
; /* blockgroup iteration index */
1168 int performed_allocation
= 0;
1170 struct super_block
*sb
;
1171 struct ext3_group_desc
*gdp
;
1172 struct ext3_super_block
*es
;
1173 struct ext3_sb_info
*sbi
;
1174 struct ext3_reserve_window_node
*my_rsv
= NULL
;
1175 struct ext3_block_alloc_info
*block_i
;
1176 unsigned short windowsz
= 0;
1178 static int goal_hits
, goal_attempts
;
1180 unsigned long ngroups
;
1185 printk("ext3_new_block: nonexistent device");
1190 * Check quota for allocation of this block.
1192 if (DQUOT_ALLOC_BLOCK(inode
, 1)) {
1198 es
= EXT3_SB(sb
)->s_es
;
1199 ext3_debug("goal=%lu.\n", goal
);
1201 * Allocate a block from reservation only when
1202 * filesystem is mounted with reservation(default,-o reservation), and
1203 * it's a regular file, and
1204 * the desired window size is greater than 0 (One could use ioctl
1205 * command EXT3_IOC_SETRSVSZ to set the window size to 0 to turn off
1206 * reservation on that particular file)
1208 block_i
= EXT3_I(inode
)->i_block_alloc_info
;
1209 if (block_i
&& ((windowsz
= block_i
->rsv_window_node
.rsv_goal_size
) > 0))
1210 my_rsv
= &block_i
->rsv_window_node
;
1212 if (!ext3_has_free_blocks(sbi
)) {
1218 * First, test whether the goal block is free.
1220 if (goal
< le32_to_cpu(es
->s_first_data_block
) ||
1221 goal
>= le32_to_cpu(es
->s_blocks_count
))
1222 goal
= le32_to_cpu(es
->s_first_data_block
);
1223 group_no
= (goal
- le32_to_cpu(es
->s_first_data_block
)) /
1224 EXT3_BLOCKS_PER_GROUP(sb
);
1225 gdp
= ext3_get_group_desc(sb
, group_no
, &gdp_bh
);
1229 goal_group
= group_no
;
1231 free_blocks
= le16_to_cpu(gdp
->bg_free_blocks_count
);
1233 * if there is not enough free blocks to make a new resevation
1234 * turn off reservation for this allocation
1236 if (my_rsv
&& (free_blocks
< windowsz
)
1237 && (rsv_is_empty(&my_rsv
->rsv_window
)))
1240 if (free_blocks
> 0) {
1241 ret_block
= ((goal
- le32_to_cpu(es
->s_first_data_block
)) %
1242 EXT3_BLOCKS_PER_GROUP(sb
));
1243 bitmap_bh
= read_block_bitmap(sb
, group_no
);
1246 ret_block
= ext3_try_to_allocate_with_rsv(sb
, handle
, group_no
,
1247 bitmap_bh
, ret_block
, my_rsv
, &fatal
);
1254 ngroups
= EXT3_SB(sb
)->s_groups_count
;
1258 * Now search the rest of the groups. We assume that
1259 * i and gdp correctly point to the last group visited.
1261 for (bgi
= 0; bgi
< ngroups
; bgi
++) {
1263 if (group_no
>= ngroups
)
1265 gdp
= ext3_get_group_desc(sb
, group_no
, &gdp_bh
);
1270 free_blocks
= le16_to_cpu(gdp
->bg_free_blocks_count
);
1272 * skip this group if the number of
1273 * free blocks is less than half of the reservation
1276 if (free_blocks
<= (windowsz
/2))
1280 bitmap_bh
= read_block_bitmap(sb
, group_no
);
1283 ret_block
= ext3_try_to_allocate_with_rsv(sb
, handle
, group_no
,
1284 bitmap_bh
, -1, my_rsv
, &fatal
);
1291 * We may end up a bogus ealier ENOSPC error due to
1292 * filesystem is "full" of reservations, but
1293 * there maybe indeed free blocks avaliable on disk
1294 * In this case, we just forget about the reservations
1295 * just do block allocation as without reservations.
1299 group_no
= goal_group
;
1302 /* No space left on the device */
1308 ext3_debug("using block group %d(%d)\n",
1309 group_no
, gdp
->bg_free_blocks_count
);
1311 BUFFER_TRACE(gdp_bh
, "get_write_access");
1312 fatal
= ext3_journal_get_write_access(handle
, gdp_bh
);
1316 target_block
= ret_block
+ group_no
* EXT3_BLOCKS_PER_GROUP(sb
)
1317 + le32_to_cpu(es
->s_first_data_block
);
1319 if (target_block
== le32_to_cpu(gdp
->bg_block_bitmap
) ||
1320 target_block
== le32_to_cpu(gdp
->bg_inode_bitmap
) ||
1321 in_range(target_block
, le32_to_cpu(gdp
->bg_inode_table
),
1322 EXT3_SB(sb
)->s_itb_per_group
))
1323 ext3_error(sb
, "ext3_new_block",
1324 "Allocating block in system zone - "
1325 "block = %u", target_block
);
1327 performed_allocation
= 1;
1329 #ifdef CONFIG_JBD_DEBUG
1331 struct buffer_head
*debug_bh
;
1333 /* Record bitmap buffer state in the newly allocated block */
1334 debug_bh
= sb_find_get_block(sb
, target_block
);
1336 BUFFER_TRACE(debug_bh
, "state when allocated");
1337 BUFFER_TRACE2(debug_bh
, bitmap_bh
, "bitmap state");
1341 jbd_lock_bh_state(bitmap_bh
);
1342 spin_lock(sb_bgl_lock(sbi
, group_no
));
1343 if (buffer_jbd(bitmap_bh
) && bh2jh(bitmap_bh
)->b_committed_data
) {
1344 if (ext3_test_bit(ret_block
,
1345 bh2jh(bitmap_bh
)->b_committed_data
)) {
1346 printk("%s: block was unexpectedly set in "
1347 "b_committed_data\n", __FUNCTION__
);
1350 ext3_debug("found bit %d\n", ret_block
);
1351 spin_unlock(sb_bgl_lock(sbi
, group_no
));
1352 jbd_unlock_bh_state(bitmap_bh
);
1355 /* ret_block was blockgroup-relative. Now it becomes fs-relative */
1356 ret_block
= target_block
;
1358 if (ret_block
>= le32_to_cpu(es
->s_blocks_count
)) {
1359 ext3_error(sb
, "ext3_new_block",
1360 "block(%d) >= blocks count(%d) - "
1361 "block_group = %d, es == %p ", ret_block
,
1362 le32_to_cpu(es
->s_blocks_count
), group_no
, es
);
1367 * It is up to the caller to add the new buffer to a journal
1368 * list of some description. We don't know in advance whether
1369 * the caller wants to use it as metadata or data.
1371 ext3_debug("allocating block %d. Goal hits %d of %d.\n",
1372 ret_block
, goal_hits
, goal_attempts
);
1374 spin_lock(sb_bgl_lock(sbi
, group_no
));
1375 gdp
->bg_free_blocks_count
=
1376 cpu_to_le16(le16_to_cpu(gdp
->bg_free_blocks_count
) - 1);
1377 spin_unlock(sb_bgl_lock(sbi
, group_no
));
1378 percpu_counter_mod(&sbi
->s_freeblocks_counter
, -1);
1380 BUFFER_TRACE(gdp_bh
, "journal_dirty_metadata for group descriptor");
1381 err
= ext3_journal_dirty_metadata(handle
, gdp_bh
);
1398 ext3_std_error(sb
, fatal
);
1401 * Undo the block allocation
1403 if (!performed_allocation
)
1404 DQUOT_FREE_BLOCK(inode
, 1);
1409 unsigned long ext3_count_free_blocks(struct super_block
*sb
)
1411 unsigned long desc_count
;
1412 struct ext3_group_desc
*gdp
;
1414 unsigned long ngroups
= EXT3_SB(sb
)->s_groups_count
;
1416 struct ext3_super_block
*es
;
1417 unsigned long bitmap_count
, x
;
1418 struct buffer_head
*bitmap_bh
= NULL
;
1420 es
= EXT3_SB(sb
)->s_es
;
1426 for (i
= 0; i
< ngroups
; i
++) {
1427 gdp
= ext3_get_group_desc(sb
, i
, NULL
);
1430 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
1432 bitmap_bh
= read_block_bitmap(sb
, i
);
1433 if (bitmap_bh
== NULL
)
1436 x
= ext3_count_free(bitmap_bh
, sb
->s_blocksize
);
1437 printk("group %d: stored = %d, counted = %lu\n",
1438 i
, le16_to_cpu(gdp
->bg_free_blocks_count
), x
);
1442 printk("ext3_count_free_blocks: stored = %u, computed = %lu, %lu\n",
1443 le32_to_cpu(es
->s_free_blocks_count
), desc_count
, bitmap_count
);
1444 return bitmap_count
;
1448 for (i
= 0; i
< ngroups
; i
++) {
1449 gdp
= ext3_get_group_desc(sb
, i
, NULL
);
1452 desc_count
+= le16_to_cpu(gdp
->bg_free_blocks_count
);
1460 block_in_use(unsigned long block
, struct super_block
*sb
, unsigned char *map
)
1462 return ext3_test_bit ((block
-
1463 le32_to_cpu(EXT3_SB(sb
)->s_es
->s_first_data_block
)) %
1464 EXT3_BLOCKS_PER_GROUP(sb
), map
);
1467 static inline int test_root(int a
, int b
)
1476 static int ext3_group_sparse(int group
)
1482 return (test_root(group
, 7) || test_root(group
, 5) ||
1483 test_root(group
, 3));
1487 * ext3_bg_has_super - number of blocks used by the superblock in group
1488 * @sb: superblock for filesystem
1489 * @group: group number to check
1491 * Return the number of blocks used by the superblock (primary or backup)
1492 * in this group. Currently this will be only 0 or 1.
1494 int ext3_bg_has_super(struct super_block
*sb
, int group
)
1496 if (EXT3_HAS_RO_COMPAT_FEATURE(sb
,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER
)&&
1497 !ext3_group_sparse(group
))
1503 * ext3_bg_num_gdb - number of blocks used by the group table in group
1504 * @sb: superblock for filesystem
1505 * @group: group number to check
1507 * Return the number of blocks used by the group descriptor table
1508 * (primary or backup) in this group. In the future there may be a
1509 * different number of descriptor blocks in each group.
1511 unsigned long ext3_bg_num_gdb(struct super_block
*sb
, int group
)
1513 if (EXT3_HAS_RO_COMPAT_FEATURE(sb
,EXT3_FEATURE_RO_COMPAT_SPARSE_SUPER
)&&
1514 !ext3_group_sparse(group
))
1516 return EXT3_SB(sb
)->s_gdb_count
;