x86: rodata config hookup
[wrt350n-kernel.git] / fs / ext4 / migrate.c
blob3ebc2332f52ec072da353ef1466dfaaea3cd2baf
1 /*
2 * Copyright IBM Corporation, 2007
3 * Author Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2.1 of the GNU Lesser General Public License
7 * as published by the Free Software Foundation.
9 * This program is distributed in the hope that it would be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 #include <linux/module.h>
16 #include <linux/ext4_jbd2.h>
17 #include <linux/ext4_fs_extents.h>
20 * The contiguous blocks details which can be
21 * represented by a single extent
23 struct list_blocks_struct {
24 ext4_lblk_t first_block, last_block;
25 ext4_fsblk_t first_pblock, last_pblock;
28 static int finish_range(handle_t *handle, struct inode *inode,
29 struct list_blocks_struct *lb)
32 int retval = 0, needed;
33 struct ext4_extent newext;
34 struct ext4_ext_path *path;
35 if (lb->first_pblock == 0)
36 return 0;
38 /* Add the extent to temp inode*/
39 newext.ee_block = cpu_to_le32(lb->first_block);
40 newext.ee_len = cpu_to_le16(lb->last_block - lb->first_block + 1);
41 ext4_ext_store_pblock(&newext, lb->first_pblock);
42 path = ext4_ext_find_extent(inode, lb->first_block, NULL);
44 if (IS_ERR(path)) {
45 retval = PTR_ERR(path);
46 goto err_out;
50 * Calculate the credit needed to inserting this extent
51 * Since we are doing this in loop we may accumalate extra
52 * credit. But below we try to not accumalate too much
53 * of them by restarting the journal.
55 needed = ext4_ext_calc_credits_for_insert(inode, path);
58 * Make sure the credit we accumalated is not really high
60 if (needed && handle->h_buffer_credits >= EXT4_RESERVE_TRANS_BLOCKS) {
61 retval = ext4_journal_restart(handle, needed);
62 if (retval)
63 goto err_out;
65 if (needed) {
66 retval = ext4_journal_extend(handle, needed);
67 if (retval != 0) {
69 * IF not able to extend the journal restart the journal
71 retval = ext4_journal_restart(handle, needed);
72 if (retval)
73 goto err_out;
76 retval = ext4_ext_insert_extent(handle, inode, path, &newext);
77 err_out:
78 lb->first_pblock = 0;
79 return retval;
82 static int update_extent_range(handle_t *handle, struct inode *inode,
83 ext4_fsblk_t pblock, ext4_lblk_t blk_num,
84 struct list_blocks_struct *lb)
86 int retval;
88 * See if we can add on to the existing range (if it exists)
90 if (lb->first_pblock &&
91 (lb->last_pblock+1 == pblock) &&
92 (lb->last_block+1 == blk_num)) {
93 lb->last_pblock = pblock;
94 lb->last_block = blk_num;
95 return 0;
98 * Start a new range.
100 retval = finish_range(handle, inode, lb);
101 lb->first_pblock = lb->last_pblock = pblock;
102 lb->first_block = lb->last_block = blk_num;
104 return retval;
107 static int update_ind_extent_range(handle_t *handle, struct inode *inode,
108 ext4_fsblk_t pblock, ext4_lblk_t *blk_nump,
109 struct list_blocks_struct *lb)
111 struct buffer_head *bh;
112 __le32 *i_data;
113 int i, retval = 0;
114 ext4_lblk_t blk_count = *blk_nump;
115 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
117 if (!pblock) {
118 /* Only update the file block number */
119 *blk_nump += max_entries;
120 return 0;
123 bh = sb_bread(inode->i_sb, pblock);
124 if (!bh)
125 return -EIO;
127 i_data = (__le32 *)bh->b_data;
128 for (i = 0; i < max_entries; i++, blk_count++) {
129 if (i_data[i]) {
130 retval = update_extent_range(handle, inode,
131 le32_to_cpu(i_data[i]),
132 blk_count, lb);
133 if (retval)
134 break;
138 /* Update the file block number */
139 *blk_nump = blk_count;
140 put_bh(bh);
141 return retval;
145 static int update_dind_extent_range(handle_t *handle, struct inode *inode,
146 ext4_fsblk_t pblock, ext4_lblk_t *blk_nump,
147 struct list_blocks_struct *lb)
149 struct buffer_head *bh;
150 __le32 *i_data;
151 int i, retval = 0;
152 ext4_lblk_t blk_count = *blk_nump;
153 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
155 if (!pblock) {
156 /* Only update the file block number */
157 *blk_nump += max_entries * max_entries;
158 return 0;
160 bh = sb_bread(inode->i_sb, pblock);
161 if (!bh)
162 return -EIO;
164 i_data = (__le32 *)bh->b_data;
165 for (i = 0; i < max_entries; i++) {
166 if (i_data[i]) {
167 retval = update_ind_extent_range(handle, inode,
168 le32_to_cpu(i_data[i]),
169 &blk_count, lb);
170 if (retval)
171 break;
172 } else {
173 /* Only update the file block number */
174 blk_count += max_entries;
178 /* Update the file block number */
179 *blk_nump = blk_count;
180 put_bh(bh);
181 return retval;
185 static int update_tind_extent_range(handle_t *handle, struct inode *inode,
186 ext4_fsblk_t pblock, ext4_lblk_t *blk_nump,
187 struct list_blocks_struct *lb)
189 struct buffer_head *bh;
190 __le32 *i_data;
191 int i, retval = 0;
192 ext4_lblk_t blk_count = *blk_nump;
193 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
195 if (!pblock) {
196 /* Only update the file block number */
197 *blk_nump += max_entries * max_entries * max_entries;
198 return 0;
200 bh = sb_bread(inode->i_sb, pblock);
201 if (!bh)
202 return -EIO;
204 i_data = (__le32 *)bh->b_data;
205 for (i = 0; i < max_entries; i++) {
206 if (i_data[i]) {
207 retval = update_dind_extent_range(handle, inode,
208 le32_to_cpu(i_data[i]),
209 &blk_count, lb);
210 if (retval)
211 break;
212 } else
213 /* Only update the file block number */
214 blk_count += max_entries * max_entries;
216 /* Update the file block number */
217 *blk_nump = blk_count;
218 put_bh(bh);
219 return retval;
223 static int free_dind_blocks(handle_t *handle,
224 struct inode *inode, __le32 i_data)
226 int i;
227 __le32 *tmp_idata;
228 struct buffer_head *bh;
229 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
231 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
232 if (!bh)
233 return -EIO;
235 tmp_idata = (__le32 *)bh->b_data;
236 for (i = 0; i < max_entries; i++) {
237 if (tmp_idata[i])
238 ext4_free_blocks(handle, inode,
239 le32_to_cpu(tmp_idata[i]), 1, 1);
241 put_bh(bh);
242 ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1);
243 return 0;
246 static int free_tind_blocks(handle_t *handle,
247 struct inode *inode, __le32 i_data)
249 int i, retval = 0;
250 __le32 *tmp_idata;
251 struct buffer_head *bh;
252 unsigned long max_entries = inode->i_sb->s_blocksize >> 2;
254 bh = sb_bread(inode->i_sb, le32_to_cpu(i_data));
255 if (!bh)
256 return -EIO;
258 tmp_idata = (__le32 *)bh->b_data;
259 for (i = 0; i < max_entries; i++) {
260 if (tmp_idata[i]) {
261 retval = free_dind_blocks(handle,
262 inode, tmp_idata[i]);
263 if (retval) {
264 put_bh(bh);
265 return retval;
269 put_bh(bh);
270 ext4_free_blocks(handle, inode, le32_to_cpu(i_data), 1, 1);
271 return 0;
274 static int free_ind_block(handle_t *handle, struct inode *inode)
276 int retval;
277 struct ext4_inode_info *ei = EXT4_I(inode);
279 if (ei->i_data[EXT4_IND_BLOCK])
280 ext4_free_blocks(handle, inode,
281 le32_to_cpu(ei->i_data[EXT4_IND_BLOCK]), 1, 1);
283 if (ei->i_data[EXT4_DIND_BLOCK]) {
284 retval = free_dind_blocks(handle, inode,
285 ei->i_data[EXT4_DIND_BLOCK]);
286 if (retval)
287 return retval;
290 if (ei->i_data[EXT4_TIND_BLOCK]) {
291 retval = free_tind_blocks(handle, inode,
292 ei->i_data[EXT4_TIND_BLOCK]);
293 if (retval)
294 return retval;
296 return 0;
299 static int ext4_ext_swap_inode_data(handle_t *handle, struct inode *inode,
300 struct inode *tmp_inode, int retval)
302 struct ext4_inode_info *ei = EXT4_I(inode);
303 struct ext4_inode_info *tmp_ei = EXT4_I(tmp_inode);
305 retval = free_ind_block(handle, inode);
306 if (retval)
307 goto err_out;
310 * One credit accounted for writing the
311 * i_data field of the original inode
313 retval = ext4_journal_extend(handle, 1);
314 if (retval != 0) {
315 retval = ext4_journal_restart(handle, 1);
316 if (retval)
317 goto err_out;
321 * We have the extent map build with the tmp inode.
322 * Now copy the i_data across
324 ei->i_flags |= EXT4_EXTENTS_FL;
325 memcpy(ei->i_data, tmp_ei->i_data, sizeof(ei->i_data));
328 * Update i_blocks with the new blocks that got
329 * allocated while adding extents for extent index
330 * blocks.
332 * While converting to extents we need not
333 * update the orignal inode i_blocks for extent blocks
334 * via quota APIs. The quota update happened via tmp_inode already.
336 spin_lock(&inode->i_lock);
337 inode->i_blocks += tmp_inode->i_blocks;
338 spin_unlock(&inode->i_lock);
340 ext4_mark_inode_dirty(handle, inode);
341 err_out:
342 return retval;
345 static int free_ext_idx(handle_t *handle, struct inode *inode,
346 struct ext4_extent_idx *ix)
348 int i, retval = 0;
349 ext4_fsblk_t block;
350 struct buffer_head *bh;
351 struct ext4_extent_header *eh;
353 block = idx_pblock(ix);
354 bh = sb_bread(inode->i_sb, block);
355 if (!bh)
356 return -EIO;
358 eh = (struct ext4_extent_header *)bh->b_data;
359 if (eh->eh_depth != 0) {
360 ix = EXT_FIRST_INDEX(eh);
361 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
362 retval = free_ext_idx(handle, inode, ix);
363 if (retval)
364 break;
367 put_bh(bh);
368 ext4_free_blocks(handle, inode, block, 1, 1);
369 return retval;
373 * Free the extent meta data blocks only
375 static int free_ext_block(handle_t *handle, struct inode *inode)
377 int i, retval = 0;
378 struct ext4_inode_info *ei = EXT4_I(inode);
379 struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data;
380 struct ext4_extent_idx *ix;
381 if (eh->eh_depth == 0)
383 * No extra blocks allocated for extent meta data
385 return 0;
386 ix = EXT_FIRST_INDEX(eh);
387 for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) {
388 retval = free_ext_idx(handle, inode, ix);
389 if (retval)
390 return retval;
392 return retval;
396 int ext4_ext_migrate(struct inode *inode, struct file *filp,
397 unsigned int cmd, unsigned long arg)
399 handle_t *handle;
400 int retval = 0, i;
401 __le32 *i_data;
402 ext4_lblk_t blk_count = 0;
403 struct ext4_inode_info *ei;
404 struct inode *tmp_inode = NULL;
405 struct list_blocks_struct lb;
406 unsigned long max_entries;
408 if (!test_opt(inode->i_sb, EXTENTS))
410 * if mounted with noextents we don't allow the migrate
412 return -EINVAL;
414 if ((EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
415 return -EINVAL;
417 down_write(&EXT4_I(inode)->i_data_sem);
418 handle = ext4_journal_start(inode,
419 EXT4_DATA_TRANS_BLOCKS(inode->i_sb) +
420 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 3 +
421 2 * EXT4_QUOTA_INIT_BLOCKS(inode->i_sb)
422 + 1);
423 if (IS_ERR(handle)) {
424 retval = PTR_ERR(handle);
425 goto err_out;
427 tmp_inode = ext4_new_inode(handle,
428 inode->i_sb->s_root->d_inode,
429 S_IFREG);
430 if (IS_ERR(tmp_inode)) {
431 retval = -ENOMEM;
432 ext4_journal_stop(handle);
433 tmp_inode = NULL;
434 goto err_out;
436 i_size_write(tmp_inode, i_size_read(inode));
438 * We don't want the inode to be reclaimed
439 * if we got interrupted in between. We have
440 * this tmp inode carrying reference to the
441 * data blocks of the original file. We set
442 * the i_nlink to zero at the last stage after
443 * switching the original file to extent format
445 tmp_inode->i_nlink = 1;
447 ext4_ext_tree_init(handle, tmp_inode);
448 ext4_orphan_add(handle, tmp_inode);
449 ext4_journal_stop(handle);
451 ei = EXT4_I(inode);
452 i_data = ei->i_data;
453 memset(&lb, 0, sizeof(lb));
455 /* 32 bit block address 4 bytes */
456 max_entries = inode->i_sb->s_blocksize >> 2;
459 * start with one credit accounted for
460 * superblock modification.
462 * For the tmp_inode we already have commited the
463 * trascation that created the inode. Later as and
464 * when we add extents we extent the journal
466 handle = ext4_journal_start(inode, 1);
467 for (i = 0; i < EXT4_NDIR_BLOCKS; i++, blk_count++) {
468 if (i_data[i]) {
469 retval = update_extent_range(handle, tmp_inode,
470 le32_to_cpu(i_data[i]),
471 blk_count, &lb);
472 if (retval)
473 goto err_out;
476 if (i_data[EXT4_IND_BLOCK]) {
477 retval = update_ind_extent_range(handle, tmp_inode,
478 le32_to_cpu(i_data[EXT4_IND_BLOCK]),
479 &blk_count, &lb);
480 if (retval)
481 goto err_out;
482 } else
483 blk_count += max_entries;
484 if (i_data[EXT4_DIND_BLOCK]) {
485 retval = update_dind_extent_range(handle, tmp_inode,
486 le32_to_cpu(i_data[EXT4_DIND_BLOCK]),
487 &blk_count, &lb);
488 if (retval)
489 goto err_out;
490 } else
491 blk_count += max_entries * max_entries;
492 if (i_data[EXT4_TIND_BLOCK]) {
493 retval = update_tind_extent_range(handle, tmp_inode,
494 le32_to_cpu(i_data[EXT4_TIND_BLOCK]),
495 &blk_count, &lb);
496 if (retval)
497 goto err_out;
500 * Build the last extent
502 retval = finish_range(handle, tmp_inode, &lb);
503 err_out:
505 * We are either freeing extent information or indirect
506 * blocks. During this we touch superblock, group descriptor
507 * and block bitmap. Later we mark the tmp_inode dirty
508 * via ext4_ext_tree_init. So allocate a credit of 4
509 * We may update quota (user and group).
511 * FIXME!! we may be touching bitmaps in different block groups.
513 if (ext4_journal_extend(handle,
514 4 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb)) != 0)
515 ext4_journal_restart(handle,
516 4 + 2*EXT4_QUOTA_TRANS_BLOCKS(inode->i_sb));
517 if (retval)
519 * Failure case delete the extent information with the
520 * tmp_inode
522 free_ext_block(handle, tmp_inode);
523 else
524 retval = ext4_ext_swap_inode_data(handle, inode,
525 tmp_inode, retval);
528 * Mark the tmp_inode as of size zero
530 i_size_write(tmp_inode, 0);
533 * set the i_blocks count to zero
534 * so that the ext4_delete_inode does the
535 * right job
537 * We don't need to take the i_lock because
538 * the inode is not visible to user space.
540 tmp_inode->i_blocks = 0;
542 /* Reset the extent details */
543 ext4_ext_tree_init(handle, tmp_inode);
546 * Set the i_nlink to zero so that
547 * generic_drop_inode really deletes the
548 * inode
550 tmp_inode->i_nlink = 0;
552 ext4_journal_stop(handle);
554 up_write(&EXT4_I(inode)->i_data_sem);
556 if (tmp_inode)
557 iput(tmp_inode);
559 return retval;