perf tools: Move start conditions to start of the flex file
[linux/fpc-iii.git] / fs / ext4 / migrate.c
blob2ae73a80c19be145c9c0e3dc9cd1ef2ba4e75869
1 /*
2 * Copyright IBM Corporation, 2007
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 #include <linux/slab.h>
16 #include "ext4_jbd2.h"
17 #include "ext4_extents.h"
20 * The contiguous blocks details which can be
21 * represented by a single extent
23 struct migrate_struct {
24 ext4_lblk_t first_block, last_block, curr_block;
25 ext4_fsblk_t first_pblock, last_pblock;
28 static int finish_range(handle_t *handle, struct inode *inode,
29 struct migrate_struct *lb)
32 int retval = 0, needed;
33 struct ext4_extent newext;
34 struct ext4_ext_path *path;
35 if (lb->first_pblock == 0)
36 return 0;
38 /* Add the extent to temp inode*/
39 newext.ee_block = cpu_to_le32(lb->first_block);
40 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
41 ext4_ext_store_pblock(&newext, lb->first_pblock);
42 path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0);
44 if (IS_ERR(path)) {
45 retval = PTR_ERR(path);
46 path = NULL;
47 goto err_out;
51 * Calculate the credit needed to inserting this extent
52 * Since we are doing this in loop we may accumalate extra
53 * credit. But below we try to not accumalate too much
54 * of them by restarting the journal.
56 needed = ext4_ext_calc_credits_for_single_extent(inode,
57 lb->last_block - lb->first_block + 1, path);
60 * Make sure the credit we accumalated is not really high
62 if (needed && ext4_handle_has_enough_credits(handle,
63 EXT4_RESERVE_TRANS_BLOCKS)) {
64 retval = ext4_journal_restart(handle, needed);
65 if (retval)
66 goto err_out;
67 } else if (needed) {
68 retval = ext4_journal_extend(handle, needed);
69 if (retval) {
71 * IF not able to extend the journal restart the journal
73 retval = ext4_journal_restart(handle, needed);
74 if (retval)
75 goto err_out;
78 retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0);
79 err_out:
80 if (path) {
81 ext4_ext_drop_refs(path);
82 kfree(path);
84 lb->first_pblock = 0;
85 return retval;
88 static int update_extent_range(handle_t *handle, struct inode *inode,
89 ext4_fsblk_t pblock, struct migrate_struct *lb)
91 int retval;
93 * See if we can add on to the existing range (if it exists)
95 if (lb->first_pblock &&
96 (lb->last_pblock+1 == pblock) &&
97 (lb->last_block+1 == lb->curr_block)) {
98 lb->last_pblock = pblock;
99 lb->last_block = lb->curr_block;
100 lb->curr_block++;
101 return 0;
104 * Start a new range.
106 retval = finish_range(handle, inode, lb);
107 lb->first_pblock = lb->last_pblock = pblock;
108 lb->first_block = lb->last_block = lb->curr_block;
109 lb->curr_block++;
110 return retval;
113 static int update_ind_extent_range(handle_t *handle, struct inode *inode,
114 ext4_fsblk_t pblock,
115 struct migrate_struct *lb)
117 struct buffer_head *bh;
118 __le32 *i_data;
119 int i, retval = 0;
120 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
122 bh = sb_bread(inode->i_sb, pblock);
123 if (!bh)
124 return -EIO;
126 i_data = (__le32 *)bh->b_data;
127 for (i = 0; i < max_entries; i++) {
128 if (i_data[i]) {
129 retval = update_extent_range(handle, inode,
130 le32_to_cpu(i_data[i]), lb);
131 if (retval)
132 break;
133 } else {
134 lb->curr_block++;
137 put_bh(bh);
138 return retval;
142 static int update_dind_extent_range(handle_t *handle, struct inode *inode,
143 ext4_fsblk_t pblock,
144 struct migrate_struct *lb)
146 struct buffer_head *bh;
147 __le32 *i_data;
148 int i, retval = 0;
149 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
151 bh = sb_bread(inode->i_sb, pblock);
152 if (!bh)
153 return -EIO;
155 i_data = (__le32 *)bh->b_data;
156 for (i = 0; i < max_entries; i++) {
157 if (i_data[i]) {
158 retval = update_ind_extent_range(handle, inode,
159 le32_to_cpu(i_data[i]), lb);
160 if (retval)
161 break;
162 } else {
163 /* Only update the file block number */
164 lb->curr_block += max_entries;
167 put_bh(bh);
168 return retval;
172 static int update_tind_extent_range(handle_t *handle, struct inode *inode,
173 ext4_fsblk_t pblock,
174 struct migrate_struct *lb)
176 struct buffer_head *bh;
177 __le32 *i_data;
178 int i, retval = 0;
179 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
181 bh = sb_bread(inode->i_sb, pblock);
182 if (!bh)
183 return -EIO;
185 i_data = (__le32 *)bh->b_data;
186 for (i = 0; i < max_entries; i++) {
187 if (i_data[i]) {
188 retval = update_dind_extent_range(handle, inode,
189 le32_to_cpu(i_data[i]), lb);
190 if (retval)
191 break;
192 } else {
193 /* Only update the file block number */
194 lb->curr_block += max_entries * max_entries;
197 put_bh(bh);
198 return retval;
202 static int extend_credit_for_blkdel(handle_t *handle, struct inode *inode)
204 int retval = 0, needed;
206 if (ext4_handle_has_enough_credits(handle, EXT4_RESERVE_TRANS_BLOCKS+1))
207 return 0;
209 * We are freeing a blocks. During this we touch
210 * superblock, group descriptor and block bitmap.
211 * So allocate a credit of 3. We may update
212 * quota (user and group).
214 needed = 3 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb);
216 if (ext4_journal_extend(handle, needed) != 0)
217 retval = ext4_journal_restart(handle, needed);
219 return retval;
222 static int free_dind_blocks(handle_t *handle,
223 struct inode *inode, __le32 i_data)
225 int i;
226 __le32 *tmp_idata;
227 struct buffer_head *bh;
228 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
230 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
231 if (!bh)
232 return -EIO;
234 tmp_idata = (__le32 *)bh->b_data;
235 for (i = 0; i < max_entries; i++) {
236 if (tmp_idata[i]) {
237 extend_credit_for_blkdel(handle, inode);
238 ext4_free_blocks(handle, inode, NULL,
239 le32_to_cpu(tmp_idata[i]), 1,
240 EXT4_FREE_BLOCKS_METADATA |
241 EXT4_FREE_BLOCKS_FORGET);
244 put_bh(bh);
245 extend_credit_for_blkdel(handle, inode);
246 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
247 EXT4_FREE_BLOCKS_METADATA |
248 EXT4_FREE_BLOCKS_FORGET);
249 return 0;
252 static int free_tind_blocks(handle_t *handle,
253 struct inode *inode, __le32 i_data)
255 int i, retval = 0;
256 __le32 *tmp_idata;
257 struct buffer_head *bh;
258 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
260 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
261 if (!bh)
262 return -EIO;
264 tmp_idata = (__le32 *)bh->b_data;
265 for (i = 0; i < max_entries; i++) {
266 if (tmp_idata[i]) {
267 retval = free_dind_blocks(handle,
268 inode, tmp_idata[i]);
269 if (retval) {
270 put_bh(bh);
271 return retval;
275 put_bh(bh);
276 extend_credit_for_blkdel(handle, inode);
277 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(i_data), 1,
278 EXT4_FREE_BLOCKS_METADATA |
279 EXT4_FREE_BLOCKS_FORGET);
280 return 0;
283 static int free_ind_block(handle_t *handle, struct inode *inode, __le32 *i_data)
285 int retval;
287 /* ei->i_data[EXT4_IND_BLOCK] */
288 if (i_data[0]) {
289 extend_credit_for_blkdel(handle, inode);
290 ext4_free_blocks(handle, inode, NULL,
291 le32_to_cpu(i_data[0]), 1,
292 EXT4_FREE_BLOCKS_METADATA |
293 EXT4_FREE_BLOCKS_FORGET);
296 /* ei->i_data[EXT4_DIND_BLOCK] */
297 if (i_data[1]) {
298 retval = free_dind_blocks(handle, inode, i_data[1]);
299 if (retval)
300 return retval;
303 /* ei->i_data[EXT4_TIND_BLOCK] */
304 if (i_data[2]) {
305 retval = free_tind_blocks(handle, inode, i_data[2]);
306 if (retval)
307 return retval;
309 return 0;
312 static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
313 struct inode *tmp_inode)
315 int retval;
316 __le32 i_data[3];
317 struct ext4_inode_info *ei = EXT4_I(inode);
318 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
321 * One credit accounted for writing the
322 * i_data field of the original inode
324 retval = ext4_journal_extend(handle, 1);
325 if (retval) {
326 retval = ext4_journal_restart(handle, 1);
327 if (retval)
328 goto err_out;
331 i_data[0] = ei->i_data[EXT4_IND_BLOCK];
332 i_data[1] = ei->i_data[EXT4_DIND_BLOCK];
333 i_data[2] = ei->i_data[EXT4_TIND_BLOCK];
335 down_write(&EXT4_I(inode)->i_data_sem);
337 * if EXT4_STATE_EXT_MIGRATE is cleared a block allocation
338 * happened after we started the migrate. We need to
339 * fail the migrate
341 if (!ext4_test_inode_state(inode, EXT4_STATE_EXT_MIGRATE)) {
342 retval = -EAGAIN;
343 up_write(&EXT4_I(inode)->i_data_sem);
344 goto err_out;
345 } else
346 ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
348 * We have the extent map build with the tmp inode.
349 * Now copy the i_data across
351 ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
352 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
355 * Update i_blocks with the new blocks that got
356 * allocated while adding extents for extent index
357 * blocks.
359 * While converting to extents we need not
360 * update the orignal inode i_blocks for extent blocks
361 * via quota APIs. The quota update happened via tmp_inode already.
363 spin_lock(&inode->i_lock);
364 inode->i_blocks += tmp_inode->i_blocks;
365 spin_unlock(&inode->i_lock);
366 up_write(&EXT4_I(inode)->i_data_sem);
369 * We mark the inode dirty after, because we decrement the
370 * i_blocks when freeing the indirect meta-data blocks
372 retval = free_ind_block(handle, inode, i_data);
373 ext4_mark_inode_dirty(handle, inode);
375 err_out:
376 return retval;
379 static int free_ext_idx(handle_t *handle, struct inode *inode,
380 struct ext4_extent_idx *ix)
382 int i, retval = 0;
383 ext4_fsblk_t block;
384 struct buffer_head *bh;
385 struct ext4_extent_header *eh;
387 block = ext4_idx_pblock(ix);
388 bh = sb_bread(inode->i_sb, block);
389 if (!bh)
390 return -EIO;
392 eh = (struct ext4_extent_header *)bh->b_data;
393 if (eh->eh_depth != 0) {
394 ix = EXT_FIRST_INDEX(eh);
395 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
396 retval = free_ext_idx(handle, inode, ix);
397 if (retval)
398 break;
401 put_bh(bh);
402 extend_credit_for_blkdel(handle, inode);
403 ext4_free_blocks(handle, inode, NULL, block, 1,
404 EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET);
405 return retval;
409 * Free the extent meta data blocks only
411 static int free_ext_block(handle_t *handle, struct inode *inode)
413 int i, retval = 0;
414 struct ext4_inode_info *ei = EXT4_I(inode);
415 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
416 struct ext4_extent_idx *ix;
417 if (eh->eh_depth == 0)
419 * No extra blocks allocated for extent meta data
421 return 0;
422 ix = EXT_FIRST_INDEX(eh);
423 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
424 retval = free_ext_idx(handle, inode, ix);
425 if (retval)
426 return retval;
428 return retval;
431 int ext4_ext_migrate(struct inode *inode)
433 handle_t *handle;
434 int retval = 0, i;
435 __le32 *i_data;
436 struct ext4_inode_info *ei;
437 struct inode *tmp_inode = NULL;
438 struct migrate_struct lb;
439 unsigned long max_entries;
440 __u32 goal;
441 uid_t owner[2];
444 * If the filesystem does not support extents, or the inode
445 * already is extent-based, error out.
447 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
448 EXT4_FEATURE_INCOMPAT_EXTENTS) ||
449 (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
450 return -EINVAL;
452 if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
454 * don't migrate fast symlink
456 return retval;
459 * Worst case we can touch the allocation bitmaps, a bgd
460 * block, and a block to link in the orphan list. We do need
461 * need to worry about credits for modifying the quota inode.
463 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE,
464 4 + EXT4_MAXQUOTAS_TRANS_BLOCKS(inode->i_sb));
466 if (IS_ERR(handle)) {
467 retval = PTR_ERR(handle);
468 return retval;
470 goal = (((inode->i_ino - 1) / EXT4_INODES_PER_GROUP(inode->i_sb)) *
471 EXT4_INODES_PER_GROUP(inode->i_sb)) + 1;
472 owner[0] = i_uid_read(inode);
473 owner[1] = i_gid_read(inode);
474 tmp_inode = ext4_new_inode(handle, inode->i_sb->s_root->d_inode,
475 S_IFREG, NULL, goal, owner);
476 if (IS_ERR(tmp_inode)) {
477 retval = PTR_ERR(tmp_inode);
478 ext4_journal_stop(handle);
479 return retval;
481 i_size_write(tmp_inode, i_size_read(inode));
483 * Set the i_nlink to zero so it will be deleted later
484 * when we drop inode reference.
486 clear_nlink(tmp_inode);
488 ext4_ext_tree_init(handle, tmp_inode);
489 ext4_orphan_add(handle, tmp_inode);
490 ext4_journal_stop(handle);
493 * start with one credit accounted for
494 * superblock modification.
496 * For the tmp_inode we already have committed the
497 * transaction that created the inode. Later as and
498 * when we add extents we extent the journal
501 * Even though we take i_mutex we can still cause block
502 * allocation via mmap write to holes. If we have allocated
503 * new blocks we fail migrate. New block allocation will
504 * clear EXT4_STATE_EXT_MIGRATE flag. The flag is updated
505 * with i_data_sem held to prevent racing with block
506 * allocation.
508 down_read((&EXT4_I(inode)->i_data_sem));
509 ext4_set_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
510 up_read((&EXT4_I(inode)->i_data_sem));
512 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
513 if (IS_ERR(handle)) {
515 * It is impossible to update on-disk structures without
516 * a handle, so just rollback in-core changes and live other
517 * work to orphan_list_cleanup()
519 ext4_orphan_del(NULL, tmp_inode);
520 retval = PTR_ERR(handle);
521 goto out;
524 ei = EXT4_I(inode);
525 i_data = ei->i_data;
526 memset(&lb, 0, sizeof(lb));
528 /* 32 bit block address 4 bytes */
529 max_entries = inode->i_sb->s_blocksize >> 2;
530 for (i = 0; i < EXT4_NDIR_BLOCKS; i++) {
531 if (i_data[i]) {
532 retval = update_extent_range(handle, tmp_inode,
533 le32_to_cpu(i_data[i]), &lb);
534 if (retval)
535 goto err_out;
536 } else
537 lb.curr_block++;
539 if (i_data[EXT4_IND_BLOCK]) {
540 retval = update_ind_extent_range(handle, tmp_inode,
541 le32_to_cpu(i_data[EXT4_IND_BLOCK]), &lb);
542 if (retval)
543 goto err_out;
544 } else
545 lb.curr_block += max_entries;
546 if (i_data[EXT4_DIND_BLOCK]) {
547 retval = update_dind_extent_range(handle, tmp_inode,
548 le32_to_cpu(i_data[EXT4_DIND_BLOCK]), &lb);
549 if (retval)
550 goto err_out;
551 } else
552 lb.curr_block += max_entries * max_entries;
553 if (i_data[EXT4_TIND_BLOCK]) {
554 retval = update_tind_extent_range(handle, tmp_inode,
555 le32_to_cpu(i_data[EXT4_TIND_BLOCK]), &lb);
556 if (retval)
557 goto err_out;
560 * Build the last extent
562 retval = finish_range(handle, tmp_inode, &lb);
563 err_out:
564 if (retval)
566 * Failure case delete the extent information with the
567 * tmp_inode
569 free_ext_block(handle, tmp_inode);
570 else {
571 retval = ext4_ext_swap_inode_data(handle, inode, tmp_inode);
572 if (retval)
574 * if we fail to swap inode data free the extent
575 * details of the tmp inode
577 free_ext_block(handle, tmp_inode);
580 /* We mark the tmp_inode dirty via ext4_ext_tree_init. */
581 if (ext4_journal_extend(handle, 1) != 0)
582 ext4_journal_restart(handle, 1);
585 * Mark the tmp_inode as of size zero
587 i_size_write(tmp_inode, 0);
590 * set the i_blocks count to zero
591 * so that the ext4_delete_inode does the
592 * right job
594 * We don't need to take the i_lock because
595 * the inode is not visible to user space.
597 tmp_inode->i_blocks = 0;
599 /* Reset the extent details */
600 ext4_ext_tree_init(handle, tmp_inode);
601 ext4_journal_stop(handle);
602 out:
603 unlock_new_inode(tmp_inode);
604 iput(tmp_inode);
606 return retval;
610 * Migrate a simple extent-based inode to use the i_blocks[] array
612 int ext4_ind_migrate(struct inode *inode)
614 struct ext4_extent_header *eh;
615 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
616 struct ext4_inode_info *ei = EXT4_I(inode);
617 struct ext4_extent *ex;
618 unsigned int i, len;
619 ext4_fsblk_t blk;
620 handle_t *handle;
621 int ret;
623 if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
624 EXT4_FEATURE_INCOMPAT_EXTENTS) ||
625 (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
626 return -EINVAL;
628 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
629 EXT4_FEATURE_RO_COMPAT_BIGALLOC))
630 return -EOPNOTSUPP;
632 handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1);
633 if (IS_ERR(handle))
634 return PTR_ERR(handle);
636 down_write(&EXT4_I(inode)->i_data_sem);
637 ret = ext4_ext_check_inode(inode);
638 if (ret)
639 goto errout;
641 eh = ext_inode_hdr(inode);
642 ex = EXT_FIRST_EXTENT(eh);
643 if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS ||
644 eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) {
645 ret = -EOPNOTSUPP;
646 goto errout;
648 if (eh->eh_entries == 0)
649 blk = len = 0;
650 else {
651 len = le16_to_cpu(ex->ee_len);
652 blk = ext4_ext_pblock(ex);
653 if (len > EXT4_NDIR_BLOCKS) {
654 ret = -EOPNOTSUPP;
655 goto errout;
659 ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
660 memset(ei->i_data, 0, sizeof(ei->i_data));
661 for (i=0; i < len; i++)
662 ei->i_data[i] = cpu_to_le32(blk++);
663 ext4_mark_inode_dirty(handle, inode);
664 errout:
665 ext4_journal_stop(handle);
666 up_write(&EXT4_I(inode)->i_data_sem);
667 return ret;