1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /* -*- mode: c; c-basic-offset: 8; -*-
3 * vim: noexpandtab sw=8 ts=8 sts=0:
8 * Inspired by ext3/resize.c.
10 * Copyright (C) 2007 Oracle. All rights reserved.
14 #include <linux/types.h>
16 #include <cluster/masklog.h>
27 #include "ocfs2_trace.h"
29 #include "buffer_head_io.h"
34 * Check whether there are new backup superblocks exist
35 * in the last group. If there are some, mark them or clear
38 * Return how many backups we find in the last group.
40 static u16
ocfs2_calc_new_backup_super(struct inode
*inode
,
41 struct ocfs2_group_desc
*gd
,
48 u32 cluster
, lgd_cluster
;
49 u64 blkno
, gd_blkno
, lgd_blkno
= le64_to_cpu(gd
->bg_blkno
);
51 for (i
= 0; i
< OCFS2_MAX_BACKUP_SUPERBLOCKS
; i
++) {
52 blkno
= ocfs2_backup_super_blkno(inode
->i_sb
, i
);
53 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
55 gd_blkno
= ocfs2_which_cluster_group(inode
, cluster
);
56 if (gd_blkno
< lgd_blkno
)
58 else if (gd_blkno
> lgd_blkno
)
61 /* check if already done backup super */
62 lgd_cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, lgd_blkno
);
63 lgd_cluster
+= old_bg_clusters
;
64 if (lgd_cluster
>= cluster
)
68 ocfs2_set_bit(cluster
% cl_cpg
,
69 (unsigned long *)gd
->bg_bitmap
);
71 ocfs2_clear_bit(cluster
% cl_cpg
,
72 (unsigned long *)gd
->bg_bitmap
);
79 static int ocfs2_update_last_group_and_inode(handle_t
*handle
,
80 struct inode
*bm_inode
,
81 struct buffer_head
*bm_bh
,
82 struct buffer_head
*group_bh
,
83 u32 first_new_cluster
,
87 struct ocfs2_super
*osb
= OCFS2_SB(bm_inode
->i_sb
);
88 struct ocfs2_dinode
*fe
= (struct ocfs2_dinode
*) bm_bh
->b_data
;
89 struct ocfs2_chain_list
*cl
= &fe
->id2
.i_chain
;
90 struct ocfs2_chain_rec
*cr
;
91 struct ocfs2_group_desc
*group
;
92 u16 chain
, num_bits
, backups
= 0;
93 u16 cl_bpc
= le16_to_cpu(cl
->cl_bpc
);
94 u16 cl_cpg
= le16_to_cpu(cl
->cl_cpg
);
97 trace_ocfs2_update_last_group_and_inode(new_clusters
,
100 ret
= ocfs2_journal_access_gd(handle
, INODE_CACHE(bm_inode
),
101 group_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
107 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
109 old_bg_clusters
= le16_to_cpu(group
->bg_bits
) / cl_bpc
;
110 /* update the group first. */
111 num_bits
= new_clusters
* cl_bpc
;
112 le16_add_cpu(&group
->bg_bits
, num_bits
);
113 le16_add_cpu(&group
->bg_free_bits_count
, num_bits
);
116 * check whether there are some new backup superblocks exist in
117 * this group and update the group bitmap accordingly.
119 if (OCFS2_HAS_COMPAT_FEATURE(osb
->sb
,
120 OCFS2_FEATURE_COMPAT_BACKUP_SB
)) {
121 backups
= ocfs2_calc_new_backup_super(bm_inode
,
123 cl_cpg
, old_bg_clusters
, 1);
124 le16_add_cpu(&group
->bg_free_bits_count
, -1 * backups
);
127 ocfs2_journal_dirty(handle
, group_bh
);
129 /* update the inode accordingly. */
130 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(bm_inode
), bm_bh
,
131 OCFS2_JOURNAL_ACCESS_WRITE
);
137 chain
= le16_to_cpu(group
->bg_chain
);
138 cr
= (&cl
->cl_recs
[chain
]);
139 le32_add_cpu(&cr
->c_total
, num_bits
);
140 le32_add_cpu(&cr
->c_free
, num_bits
);
141 le32_add_cpu(&fe
->id1
.bitmap1
.i_total
, num_bits
);
142 le32_add_cpu(&fe
->i_clusters
, new_clusters
);
145 le32_add_cpu(&cr
->c_free
, -1 * backups
);
146 le32_add_cpu(&fe
->id1
.bitmap1
.i_used
, backups
);
149 spin_lock(&OCFS2_I(bm_inode
)->ip_lock
);
150 OCFS2_I(bm_inode
)->ip_clusters
= le32_to_cpu(fe
->i_clusters
);
151 le64_add_cpu(&fe
->i_size
, (u64
)new_clusters
<< osb
->s_clustersize_bits
);
152 spin_unlock(&OCFS2_I(bm_inode
)->ip_lock
);
153 i_size_write(bm_inode
, le64_to_cpu(fe
->i_size
));
155 ocfs2_journal_dirty(handle
, bm_bh
);
159 ocfs2_calc_new_backup_super(bm_inode
,
161 cl_cpg
, old_bg_clusters
, 0);
162 le16_add_cpu(&group
->bg_free_bits_count
, backups
);
163 le16_add_cpu(&group
->bg_bits
, -1 * num_bits
);
164 le16_add_cpu(&group
->bg_free_bits_count
, -1 * num_bits
);
172 static int update_backups(struct inode
* inode
, u32 clusters
, char *data
)
177 struct buffer_head
*backup
= NULL
;
178 struct ocfs2_dinode
*backup_di
= NULL
;
179 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
181 /* calculate the real backups we need to update. */
182 for (i
= 0; i
< OCFS2_MAX_BACKUP_SUPERBLOCKS
; i
++) {
183 blkno
= ocfs2_backup_super_blkno(inode
->i_sb
, i
);
184 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, blkno
);
185 if (cluster
>= clusters
)
188 ret
= ocfs2_read_blocks_sync(osb
, blkno
, 1, &backup
);
194 memcpy(backup
->b_data
, data
, inode
->i_sb
->s_blocksize
);
196 backup_di
= (struct ocfs2_dinode
*)backup
->b_data
;
197 backup_di
->i_blkno
= cpu_to_le64(blkno
);
199 ret
= ocfs2_write_super_or_backup(osb
, backup
);
211 static void ocfs2_update_super_and_backups(struct inode
*inode
,
216 struct buffer_head
*super_bh
= NULL
;
217 struct ocfs2_dinode
*super_di
= NULL
;
218 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
221 * update the superblock last.
222 * It doesn't matter if the write failed.
224 ret
= ocfs2_read_blocks_sync(osb
, OCFS2_SUPER_BLOCK_BLKNO
, 1,
231 super_di
= (struct ocfs2_dinode
*)super_bh
->b_data
;
232 le32_add_cpu(&super_di
->i_clusters
, new_clusters
);
233 clusters
= le32_to_cpu(super_di
->i_clusters
);
235 ret
= ocfs2_write_super_or_backup(osb
, super_bh
);
241 if (OCFS2_HAS_COMPAT_FEATURE(osb
->sb
, OCFS2_FEATURE_COMPAT_BACKUP_SB
))
242 ret
= update_backups(inode
, clusters
, super_bh
->b_data
);
247 printk(KERN_WARNING
"ocfs2: Failed to update super blocks on %s"
248 " during fs resize. This condition is not fatal,"
249 " but fsck.ocfs2 should be run to fix it\n",
255 * Extend the filesystem to the new number of clusters specified. This entry
256 * point is only used to extend the current filesystem to the end of the last
259 int ocfs2_group_extend(struct inode
* inode
, int new_clusters
)
263 struct buffer_head
*main_bm_bh
= NULL
;
264 struct buffer_head
*group_bh
= NULL
;
265 struct inode
*main_bm_inode
= NULL
;
266 struct ocfs2_dinode
*fe
= NULL
;
267 struct ocfs2_group_desc
*group
= NULL
;
268 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
270 u32 first_new_cluster
;
273 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
276 if (new_clusters
< 0)
278 else if (new_clusters
== 0)
281 main_bm_inode
= ocfs2_get_system_file_inode(osb
,
282 GLOBAL_BITMAP_SYSTEM_INODE
,
284 if (!main_bm_inode
) {
290 inode_lock(main_bm_inode
);
292 ret
= ocfs2_inode_lock(main_bm_inode
, &main_bm_bh
, 1);
298 fe
= (struct ocfs2_dinode
*)main_bm_bh
->b_data
;
300 /* main_bm_bh is validated by inode read inside ocfs2_inode_lock(),
301 * so any corruption is a code bug. */
302 BUG_ON(!OCFS2_IS_VALID_DINODE(fe
));
304 if (le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
) !=
305 ocfs2_group_bitmap_size(osb
->sb
, 0,
306 osb
->s_feature_incompat
) * 8) {
307 mlog(ML_ERROR
, "The disk is too old and small. "
308 "Force to do offline resize.");
313 first_new_cluster
= le32_to_cpu(fe
->i_clusters
);
314 lgd_blkno
= ocfs2_which_cluster_group(main_bm_inode
,
315 first_new_cluster
- 1);
317 ret
= ocfs2_read_group_descriptor(main_bm_inode
, fe
, lgd_blkno
,
323 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
325 cl_bpc
= le16_to_cpu(fe
->id2
.i_chain
.cl_bpc
);
326 if (le16_to_cpu(group
->bg_bits
) / cl_bpc
+ new_clusters
>
327 le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
)) {
333 trace_ocfs2_group_extend(
334 (unsigned long long)le64_to_cpu(group
->bg_blkno
), new_clusters
);
336 handle
= ocfs2_start_trans(osb
, OCFS2_GROUP_EXTEND_CREDITS
);
337 if (IS_ERR(handle
)) {
338 mlog_errno(PTR_ERR(handle
));
343 /* update the last group descriptor and inode. */
344 ret
= ocfs2_update_last_group_and_inode(handle
, main_bm_inode
,
345 main_bm_bh
, group_bh
,
353 ocfs2_update_super_and_backups(main_bm_inode
, new_clusters
);
356 ocfs2_commit_trans(osb
, handle
);
361 ocfs2_inode_unlock(main_bm_inode
, 1);
364 inode_unlock(main_bm_inode
);
371 static int ocfs2_check_new_group(struct inode
*inode
,
372 struct ocfs2_dinode
*di
,
373 struct ocfs2_new_group_input
*input
,
374 struct buffer_head
*group_bh
)
377 struct ocfs2_group_desc
*gd
=
378 (struct ocfs2_group_desc
*)group_bh
->b_data
;
379 u16 cl_bpc
= le16_to_cpu(di
->id2
.i_chain
.cl_bpc
);
381 ret
= ocfs2_check_group_descriptor(inode
->i_sb
, di
, group_bh
);
386 if (le16_to_cpu(gd
->bg_chain
) != input
->chain
)
387 mlog(ML_ERROR
, "Group descriptor # %llu has bad chain %u "
388 "while input has %u set.\n",
389 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
390 le16_to_cpu(gd
->bg_chain
), input
->chain
);
391 else if (le16_to_cpu(gd
->bg_bits
) != input
->clusters
* cl_bpc
)
392 mlog(ML_ERROR
, "Group descriptor # %llu has bit count %u but "
393 "input has %u clusters set\n",
394 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
395 le16_to_cpu(gd
->bg_bits
), input
->clusters
);
396 else if (le16_to_cpu(gd
->bg_free_bits_count
) != input
->frees
* cl_bpc
)
397 mlog(ML_ERROR
, "Group descriptor # %llu has free bit count %u "
398 "but it should have %u set\n",
399 (unsigned long long)le64_to_cpu(gd
->bg_blkno
),
400 le16_to_cpu(gd
->bg_bits
),
401 input
->frees
* cl_bpc
);
409 static int ocfs2_verify_group_and_input(struct inode
*inode
,
410 struct ocfs2_dinode
*di
,
411 struct ocfs2_new_group_input
*input
,
412 struct buffer_head
*group_bh
)
414 u16 cl_count
= le16_to_cpu(di
->id2
.i_chain
.cl_count
);
415 u16 cl_cpg
= le16_to_cpu(di
->id2
.i_chain
.cl_cpg
);
416 u16 next_free
= le16_to_cpu(di
->id2
.i_chain
.cl_next_free_rec
);
417 u32 cluster
= ocfs2_blocks_to_clusters(inode
->i_sb
, input
->group
);
418 u32 total_clusters
= le32_to_cpu(di
->i_clusters
);
421 if (cluster
< total_clusters
)
422 mlog(ML_ERROR
, "add a group which is in the current volume.\n");
423 else if (input
->chain
>= cl_count
)
424 mlog(ML_ERROR
, "input chain exceeds the limit.\n");
425 else if (next_free
!= cl_count
&& next_free
!= input
->chain
)
427 "the add group should be in chain %u\n", next_free
);
428 else if (total_clusters
+ input
->clusters
< total_clusters
)
429 mlog(ML_ERROR
, "add group's clusters overflow.\n");
430 else if (input
->clusters
> cl_cpg
)
431 mlog(ML_ERROR
, "the cluster exceeds the maximum of a group\n");
432 else if (input
->frees
> input
->clusters
)
433 mlog(ML_ERROR
, "the free cluster exceeds the total clusters\n");
434 else if (total_clusters
% cl_cpg
!= 0)
436 "the last group isn't full. Use group extend first.\n");
437 else if (input
->group
!= ocfs2_which_cluster_group(inode
, cluster
))
438 mlog(ML_ERROR
, "group blkno is invalid\n");
439 else if ((ret
= ocfs2_check_new_group(inode
, di
, input
, group_bh
)))
440 mlog(ML_ERROR
, "group descriptor check failed.\n");
447 /* Add a new group descriptor to global_bitmap. */
448 int ocfs2_group_add(struct inode
*inode
, struct ocfs2_new_group_input
*input
)
452 struct buffer_head
*main_bm_bh
= NULL
;
453 struct inode
*main_bm_inode
= NULL
;
454 struct ocfs2_dinode
*fe
= NULL
;
455 struct ocfs2_super
*osb
= OCFS2_SB(inode
->i_sb
);
456 struct buffer_head
*group_bh
= NULL
;
457 struct ocfs2_group_desc
*group
= NULL
;
458 struct ocfs2_chain_list
*cl
;
459 struct ocfs2_chain_rec
*cr
;
463 if (ocfs2_is_hard_readonly(osb
) || ocfs2_is_soft_readonly(osb
))
466 main_bm_inode
= ocfs2_get_system_file_inode(osb
,
467 GLOBAL_BITMAP_SYSTEM_INODE
,
469 if (!main_bm_inode
) {
475 inode_lock(main_bm_inode
);
477 ret
= ocfs2_inode_lock(main_bm_inode
, &main_bm_bh
, 1);
483 fe
= (struct ocfs2_dinode
*)main_bm_bh
->b_data
;
485 if (le16_to_cpu(fe
->id2
.i_chain
.cl_cpg
) !=
486 ocfs2_group_bitmap_size(osb
->sb
, 0,
487 osb
->s_feature_incompat
) * 8) {
488 mlog(ML_ERROR
, "The disk is too old and small."
489 " Force to do offline resize.");
494 ret
= ocfs2_read_blocks_sync(osb
, input
->group
, 1, &group_bh
);
496 mlog(ML_ERROR
, "Can't read the group descriptor # %llu "
497 "from the device.", (unsigned long long)input
->group
);
501 ocfs2_set_new_buffer_uptodate(INODE_CACHE(inode
), group_bh
);
503 ret
= ocfs2_verify_group_and_input(main_bm_inode
, fe
, input
, group_bh
);
506 goto out_free_group_bh
;
509 trace_ocfs2_group_add((unsigned long long)input
->group
,
510 input
->chain
, input
->clusters
, input
->frees
);
512 handle
= ocfs2_start_trans(osb
, OCFS2_GROUP_ADD_CREDITS
);
513 if (IS_ERR(handle
)) {
514 mlog_errno(PTR_ERR(handle
));
516 goto out_free_group_bh
;
519 cl_bpc
= le16_to_cpu(fe
->id2
.i_chain
.cl_bpc
);
520 cl
= &fe
->id2
.i_chain
;
521 cr
= &cl
->cl_recs
[input
->chain
];
523 ret
= ocfs2_journal_access_gd(handle
, INODE_CACHE(main_bm_inode
),
524 group_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
530 group
= (struct ocfs2_group_desc
*)group_bh
->b_data
;
531 bg_ptr
= le64_to_cpu(group
->bg_next_group
);
532 group
->bg_next_group
= cr
->c_blkno
;
533 ocfs2_journal_dirty(handle
, group_bh
);
535 ret
= ocfs2_journal_access_di(handle
, INODE_CACHE(main_bm_inode
),
536 main_bm_bh
, OCFS2_JOURNAL_ACCESS_WRITE
);
538 group
->bg_next_group
= cpu_to_le64(bg_ptr
);
543 if (input
->chain
== le16_to_cpu(cl
->cl_next_free_rec
)) {
544 le16_add_cpu(&cl
->cl_next_free_rec
, 1);
545 memset(cr
, 0, sizeof(struct ocfs2_chain_rec
));
548 cr
->c_blkno
= cpu_to_le64(input
->group
);
549 le32_add_cpu(&cr
->c_total
, input
->clusters
* cl_bpc
);
550 le32_add_cpu(&cr
->c_free
, input
->frees
* cl_bpc
);
552 le32_add_cpu(&fe
->id1
.bitmap1
.i_total
, input
->clusters
*cl_bpc
);
553 le32_add_cpu(&fe
->id1
.bitmap1
.i_used
,
554 (input
->clusters
- input
->frees
) * cl_bpc
);
555 le32_add_cpu(&fe
->i_clusters
, input
->clusters
);
557 ocfs2_journal_dirty(handle
, main_bm_bh
);
559 spin_lock(&OCFS2_I(main_bm_inode
)->ip_lock
);
560 OCFS2_I(main_bm_inode
)->ip_clusters
= le32_to_cpu(fe
->i_clusters
);
561 le64_add_cpu(&fe
->i_size
, (u64
)input
->clusters
<< osb
->s_clustersize_bits
);
562 spin_unlock(&OCFS2_I(main_bm_inode
)->ip_lock
);
563 i_size_write(main_bm_inode
, le64_to_cpu(fe
->i_size
));
565 ocfs2_update_super_and_backups(main_bm_inode
, input
->clusters
);
568 ocfs2_commit_trans(osb
, handle
);
576 ocfs2_inode_unlock(main_bm_inode
, 1);
579 inode_unlock(main_bm_inode
);