1 // SPDX-License-Identifier: GPL-2.0+
3 * NILFS segment usage file.
5 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
7 * Written by Koji Sato.
8 * Revised by Ryusuke Konishi.
11 #include <linux/kernel.h>
13 #include <linux/string.h>
14 #include <linux/buffer_head.h>
15 #include <linux/errno.h>
19 #include <trace/events/nilfs2.h>
22 * struct nilfs_sufile_info - on-memory private data of sufile
23 * @mi: on-memory private data of metadata file
24 * @ncleansegs: number of clean segments
25 * @allocmin: lower limit of allocatable segment range
26 * @allocmax: upper limit of allocatable segment range
28 struct nilfs_sufile_info
{
29 struct nilfs_mdt_info mi
;
30 unsigned long ncleansegs
;/* number of clean segments */
31 __u64 allocmin
; /* lower limit of allocatable segment range */
32 __u64 allocmax
; /* upper limit of allocatable segment range */
35 static inline struct nilfs_sufile_info
*NILFS_SUI(struct inode
*sufile
)
37 return (struct nilfs_sufile_info
*)NILFS_MDT(sufile
);
40 static inline unsigned long
41 nilfs_sufile_segment_usages_per_block(const struct inode
*sufile
)
43 return NILFS_MDT(sufile
)->mi_entries_per_block
;
47 nilfs_sufile_get_blkoff(const struct inode
*sufile
, __u64 segnum
)
49 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
51 t
= div64_ul(t
, nilfs_sufile_segment_usages_per_block(sufile
));
52 return (unsigned long)t
;
56 nilfs_sufile_get_offset(const struct inode
*sufile
, __u64 segnum
)
58 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
60 return do_div(t
, nilfs_sufile_segment_usages_per_block(sufile
));
64 nilfs_sufile_segment_usages_in_block(const struct inode
*sufile
, __u64 curr
,
67 return min_t(unsigned long,
68 nilfs_sufile_segment_usages_per_block(sufile
) -
69 nilfs_sufile_get_offset(sufile
, curr
),
74 * nilfs_sufile_segment_usage_offset - calculate the byte offset of a segment
75 * usage entry in the folio containing it
76 * @sufile: segment usage file inode
77 * @segnum: number of segment usage
78 * @bh: buffer head of block containing segment usage indexed by @segnum
80 * Return: Byte offset in the folio of the segment usage entry.
82 static size_t nilfs_sufile_segment_usage_offset(const struct inode
*sufile
,
84 struct buffer_head
*bh
)
86 return offset_in_folio(bh
->b_folio
, bh
->b_data
) +
87 nilfs_sufile_get_offset(sufile
, segnum
) *
88 NILFS_MDT(sufile
)->mi_entry_size
;
91 static int nilfs_sufile_get_header_block(struct inode
*sufile
,
92 struct buffer_head
**bhp
)
94 int err
= nilfs_mdt_get_block(sufile
, 0, 0, NULL
, bhp
);
96 if (unlikely(err
== -ENOENT
)) {
97 nilfs_error(sufile
->i_sb
,
98 "missing header block in segment usage metadata");
105 nilfs_sufile_get_segment_usage_block(struct inode
*sufile
, __u64 segnum
,
106 int create
, struct buffer_head
**bhp
)
108 return nilfs_mdt_get_block(sufile
,
109 nilfs_sufile_get_blkoff(sufile
, segnum
),
113 static int nilfs_sufile_delete_segment_usage_block(struct inode
*sufile
,
116 return nilfs_mdt_delete_block(sufile
,
117 nilfs_sufile_get_blkoff(sufile
, segnum
));
120 static void nilfs_sufile_mod_counter(struct buffer_head
*header_bh
,
121 u64 ncleanadd
, u64 ndirtyadd
)
123 struct nilfs_sufile_header
*header
;
125 header
= kmap_local_folio(header_bh
->b_folio
, 0);
126 le64_add_cpu(&header
->sh_ncleansegs
, ncleanadd
);
127 le64_add_cpu(&header
->sh_ndirtysegs
, ndirtyadd
);
128 kunmap_local(header
);
130 mark_buffer_dirty(header_bh
);
134 * nilfs_sufile_get_ncleansegs - return the number of clean segments
135 * @sufile: inode of segment usage file
137 unsigned long nilfs_sufile_get_ncleansegs(struct inode
*sufile
)
139 return NILFS_SUI(sufile
)->ncleansegs
;
143 * nilfs_sufile_updatev - modify multiple segment usages at a time
144 * @sufile: inode of segment usage file
145 * @segnumv: array of segment numbers
146 * @nsegs: size of @segnumv array
147 * @create: creation flag
148 * @ndone: place to store number of modified segments on @segnumv
149 * @dofunc: primitive operation for the update
151 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
152 * against the given array of segments. The @dofunc is called with
153 * buffers of a header block and the sufile block in which the target
154 * segment usage entry is contained. If @ndone is given, the number
155 * of successfully modified segments from the head is stored in the
156 * place @ndone points to.
158 * Return Value: On success, zero is returned. On error, one of the
159 * following negative error codes is returned.
163 * %-ENOMEM - Insufficient amount of memory available.
165 * %-ENOENT - Given segment usage is in hole block (may be returned if
168 * %-EINVAL - Invalid segment usage number
170 int nilfs_sufile_updatev(struct inode
*sufile
, __u64
*segnumv
, size_t nsegs
,
171 int create
, size_t *ndone
,
172 void (*dofunc
)(struct inode
*, __u64
,
173 struct buffer_head
*,
174 struct buffer_head
*))
176 struct buffer_head
*header_bh
, *bh
;
177 unsigned long blkoff
, prev_blkoff
;
179 size_t nerr
= 0, n
= 0;
182 if (unlikely(nsegs
== 0))
185 down_write(&NILFS_MDT(sufile
)->mi_sem
);
186 for (seg
= segnumv
; seg
< segnumv
+ nsegs
; seg
++) {
187 if (unlikely(*seg
>= nilfs_sufile_get_nsegments(sufile
))) {
188 nilfs_warn(sufile
->i_sb
,
189 "%s: invalid segment number: %llu",
190 __func__
, (unsigned long long)*seg
);
199 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
204 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
205 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
210 dofunc(sufile
, *seg
, header_bh
, bh
);
212 if (++seg
>= segnumv
+ nsegs
)
214 prev_blkoff
= blkoff
;
215 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
216 if (blkoff
== prev_blkoff
)
219 /* get different block */
221 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
222 if (unlikely(ret
< 0))
231 up_write(&NILFS_MDT(sufile
)->mi_sem
);
238 int nilfs_sufile_update(struct inode
*sufile
, __u64 segnum
, int create
,
239 void (*dofunc
)(struct inode
*, __u64
,
240 struct buffer_head
*,
241 struct buffer_head
*))
243 struct buffer_head
*header_bh
, *bh
;
246 if (unlikely(segnum
>= nilfs_sufile_get_nsegments(sufile
))) {
247 nilfs_warn(sufile
->i_sb
, "%s: invalid segment number: %llu",
248 __func__
, (unsigned long long)segnum
);
251 down_write(&NILFS_MDT(sufile
)->mi_sem
);
253 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
257 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, create
, &bh
);
259 dofunc(sufile
, segnum
, header_bh
, bh
);
265 up_write(&NILFS_MDT(sufile
)->mi_sem
);
270 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
271 * @sufile: inode of segment usage file
272 * @start: minimum segment number of allocatable region (inclusive)
273 * @end: maximum segment number of allocatable region (inclusive)
275 * Return Value: On success, 0 is returned. On error, one of the
276 * following negative error codes is returned.
278 * %-ERANGE - invalid segment region
280 int nilfs_sufile_set_alloc_range(struct inode
*sufile
, __u64 start
, __u64 end
)
282 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
286 down_write(&NILFS_MDT(sufile
)->mi_sem
);
287 nsegs
= nilfs_sufile_get_nsegments(sufile
);
289 if (start
<= end
&& end
< nsegs
) {
290 sui
->allocmin
= start
;
294 up_write(&NILFS_MDT(sufile
)->mi_sem
);
299 * nilfs_sufile_alloc - allocate a segment
300 * @sufile: inode of segment usage file
301 * @segnump: pointer to segment number
303 * Description: nilfs_sufile_alloc() allocates a clean segment.
305 * Return Value: On success, 0 is returned and the segment number of the
306 * allocated segment is stored in the place pointed by @segnump. On error, one
307 * of the following negative error codes is returned.
311 * %-ENOMEM - Insufficient amount of memory available.
313 * %-ENOSPC - No clean segment left.
315 int nilfs_sufile_alloc(struct inode
*sufile
, __u64
*segnump
)
317 struct buffer_head
*header_bh
, *su_bh
;
318 struct nilfs_sufile_header
*header
;
319 struct nilfs_segment_usage
*su
;
320 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
321 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
322 __u64 segnum
, maxsegnum
, last_alloc
;
325 unsigned long nsegments
, nsus
, cnt
;
328 down_write(&NILFS_MDT(sufile
)->mi_sem
);
330 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
333 header
= kmap_local_folio(header_bh
->b_folio
, 0);
334 last_alloc
= le64_to_cpu(header
->sh_last_alloc
);
335 kunmap_local(header
);
337 nsegments
= nilfs_sufile_get_nsegments(sufile
);
338 maxsegnum
= sui
->allocmax
;
339 segnum
= last_alloc
+ 1;
340 if (segnum
< sui
->allocmin
|| segnum
> sui
->allocmax
)
341 segnum
= sui
->allocmin
;
343 for (cnt
= 0; cnt
< nsegments
; cnt
+= nsus
) {
344 if (segnum
> maxsegnum
) {
345 if (cnt
< sui
->allocmax
- sui
->allocmin
+ 1) {
347 * wrap around in the limited region.
348 * if allocation started from
349 * sui->allocmin, this never happens.
351 segnum
= sui
->allocmin
;
352 maxsegnum
= last_alloc
;
353 } else if (segnum
> sui
->allocmin
&&
354 sui
->allocmax
+ 1 < nsegments
) {
355 segnum
= sui
->allocmax
+ 1;
356 maxsegnum
= nsegments
- 1;
357 } else if (sui
->allocmin
> 0) {
359 maxsegnum
= sui
->allocmin
- 1;
361 break; /* never happens */
364 trace_nilfs2_segment_usage_check(sufile
, segnum
, cnt
);
365 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 1,
370 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
,
372 su
= kaddr
= kmap_local_folio(su_bh
->b_folio
, offset
);
374 nsus
= nilfs_sufile_segment_usages_in_block(
375 sufile
, segnum
, maxsegnum
);
376 for (j
= 0; j
< nsus
; j
++, su
= (void *)su
+ susz
, segnum
++) {
377 if (!nilfs_segment_usage_clean(su
))
379 /* found a clean segment */
380 nilfs_segment_usage_set_dirty(su
);
383 header
= kmap_local_folio(header_bh
->b_folio
, 0);
384 le64_add_cpu(&header
->sh_ncleansegs
, -1);
385 le64_add_cpu(&header
->sh_ndirtysegs
, 1);
386 header
->sh_last_alloc
= cpu_to_le64(segnum
);
387 kunmap_local(header
);
390 mark_buffer_dirty(header_bh
);
391 mark_buffer_dirty(su_bh
);
392 nilfs_mdt_mark_dirty(sufile
);
396 trace_nilfs2_segment_usage_allocated(sufile
, segnum
);
405 /* no segments left */
412 up_write(&NILFS_MDT(sufile
)->mi_sem
);
416 void nilfs_sufile_do_cancel_free(struct inode
*sufile
, __u64 segnum
,
417 struct buffer_head
*header_bh
,
418 struct buffer_head
*su_bh
)
420 struct nilfs_segment_usage
*su
;
423 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
, su_bh
);
424 su
= kmap_local_folio(su_bh
->b_folio
, offset
);
425 if (unlikely(!nilfs_segment_usage_clean(su
))) {
426 nilfs_warn(sufile
->i_sb
, "%s: segment %llu must be clean",
427 __func__
, (unsigned long long)segnum
);
431 nilfs_segment_usage_set_dirty(su
);
434 nilfs_sufile_mod_counter(header_bh
, -1, 1);
435 NILFS_SUI(sufile
)->ncleansegs
--;
437 mark_buffer_dirty(su_bh
);
438 nilfs_mdt_mark_dirty(sufile
);
441 void nilfs_sufile_do_scrap(struct inode
*sufile
, __u64 segnum
,
442 struct buffer_head
*header_bh
,
443 struct buffer_head
*su_bh
)
445 struct nilfs_segment_usage
*su
;
449 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
, su_bh
);
450 su
= kmap_local_folio(su_bh
->b_folio
, offset
);
451 if (su
->su_flags
== cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY
)) &&
452 su
->su_nblocks
== cpu_to_le32(0)) {
456 clean
= nilfs_segment_usage_clean(su
);
457 dirty
= nilfs_segment_usage_dirty(su
);
459 /* make the segment garbage */
460 su
->su_lastmod
= cpu_to_le64(0);
461 su
->su_nblocks
= cpu_to_le32(0);
462 su
->su_flags
= cpu_to_le32(BIT(NILFS_SEGMENT_USAGE_DIRTY
));
465 nilfs_sufile_mod_counter(header_bh
, clean
? (u64
)-1 : 0, dirty
? 0 : 1);
466 NILFS_SUI(sufile
)->ncleansegs
-= clean
;
468 mark_buffer_dirty(su_bh
);
469 nilfs_mdt_mark_dirty(sufile
);
472 void nilfs_sufile_do_free(struct inode
*sufile
, __u64 segnum
,
473 struct buffer_head
*header_bh
,
474 struct buffer_head
*su_bh
)
476 struct nilfs_segment_usage
*su
;
480 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
, su_bh
);
481 su
= kmap_local_folio(su_bh
->b_folio
, offset
);
482 if (nilfs_segment_usage_clean(su
)) {
483 nilfs_warn(sufile
->i_sb
, "%s: segment %llu is already clean",
484 __func__
, (unsigned long long)segnum
);
488 if (unlikely(nilfs_segment_usage_error(su
)))
489 nilfs_warn(sufile
->i_sb
, "free segment %llu marked in error",
490 (unsigned long long)segnum
);
492 sudirty
= nilfs_segment_usage_dirty(su
);
493 if (unlikely(!sudirty
))
494 nilfs_warn(sufile
->i_sb
, "free unallocated segment %llu",
495 (unsigned long long)segnum
);
497 nilfs_segment_usage_set_clean(su
);
499 mark_buffer_dirty(su_bh
);
501 nilfs_sufile_mod_counter(header_bh
, 1, sudirty
? (u64
)-1 : 0);
502 NILFS_SUI(sufile
)->ncleansegs
++;
504 nilfs_mdt_mark_dirty(sufile
);
506 trace_nilfs2_segment_usage_freed(sufile
, segnum
);
510 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
511 * @sufile: inode of segment usage file
512 * @segnum: segment number
514 int nilfs_sufile_mark_dirty(struct inode
*sufile
, __u64 segnum
)
516 struct buffer_head
*bh
;
518 struct nilfs_segment_usage
*su
;
521 down_write(&NILFS_MDT(sufile
)->mi_sem
);
522 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
524 if (ret
== -ENOENT
) {
525 nilfs_error(sufile
->i_sb
,
526 "segment usage for segment %llu is unreadable due to a hole block",
527 (unsigned long long)segnum
);
533 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
, bh
);
534 su
= kmap_local_folio(bh
->b_folio
, offset
);
535 if (unlikely(nilfs_segment_usage_error(su
))) {
536 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
540 if (nilfs_segment_is_active(nilfs
, segnum
)) {
541 nilfs_error(sufile
->i_sb
,
542 "active segment %llu is erroneous",
543 (unsigned long long)segnum
);
546 * Segments marked erroneous are never allocated by
547 * nilfs_sufile_alloc(); only active segments, ie,
548 * the segments indexed by ns_segnum or ns_nextnum,
549 * can be erroneous here.
555 nilfs_segment_usage_set_dirty(su
);
557 mark_buffer_dirty(bh
);
558 nilfs_mdt_mark_dirty(sufile
);
562 up_write(&NILFS_MDT(sufile
)->mi_sem
);
567 * nilfs_sufile_set_segment_usage - set usage of a segment
568 * @sufile: inode of segment usage file
569 * @segnum: segment number
570 * @nblocks: number of live blocks in the segment
571 * @modtime: modification time (option)
573 int nilfs_sufile_set_segment_usage(struct inode
*sufile
, __u64 segnum
,
574 unsigned long nblocks
, time64_t modtime
)
576 struct buffer_head
*bh
;
577 struct nilfs_segment_usage
*su
;
581 down_write(&NILFS_MDT(sufile
)->mi_sem
);
582 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
586 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
, bh
);
587 su
= kmap_local_folio(bh
->b_folio
, offset
);
590 * Check segusage error and set su_lastmod only when updating
591 * this entry with a valid timestamp, not for cancellation.
593 WARN_ON_ONCE(nilfs_segment_usage_error(su
));
594 su
->su_lastmod
= cpu_to_le64(modtime
);
596 su
->su_nblocks
= cpu_to_le32(nblocks
);
599 mark_buffer_dirty(bh
);
600 nilfs_mdt_mark_dirty(sufile
);
604 up_write(&NILFS_MDT(sufile
)->mi_sem
);
609 * nilfs_sufile_get_stat - get segment usage statistics
610 * @sufile: inode of segment usage file
611 * @sustat: pointer to a structure of segment usage statistics
613 * Description: nilfs_sufile_get_stat() returns information about segment
616 * Return Value: On success, 0 is returned, and segment usage information is
617 * stored in the place pointed by @sustat. On error, one of the following
618 * negative error codes is returned.
622 * %-ENOMEM - Insufficient amount of memory available.
624 int nilfs_sufile_get_stat(struct inode
*sufile
, struct nilfs_sustat
*sustat
)
626 struct buffer_head
*header_bh
;
627 struct nilfs_sufile_header
*header
;
628 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
631 down_read(&NILFS_MDT(sufile
)->mi_sem
);
633 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
637 header
= kmap_local_folio(header_bh
->b_folio
, 0);
638 sustat
->ss_nsegs
= nilfs_sufile_get_nsegments(sufile
);
639 sustat
->ss_ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
640 sustat
->ss_ndirtysegs
= le64_to_cpu(header
->sh_ndirtysegs
);
641 sustat
->ss_ctime
= nilfs
->ns_ctime
;
642 sustat
->ss_nongc_ctime
= nilfs
->ns_nongc_ctime
;
643 spin_lock(&nilfs
->ns_last_segment_lock
);
644 sustat
->ss_prot_seq
= nilfs
->ns_prot_seq
;
645 spin_unlock(&nilfs
->ns_last_segment_lock
);
646 kunmap_local(header
);
650 up_read(&NILFS_MDT(sufile
)->mi_sem
);
654 void nilfs_sufile_do_set_error(struct inode
*sufile
, __u64 segnum
,
655 struct buffer_head
*header_bh
,
656 struct buffer_head
*su_bh
)
658 struct nilfs_segment_usage
*su
;
662 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
, su_bh
);
663 su
= kmap_local_folio(su_bh
->b_folio
, offset
);
664 if (nilfs_segment_usage_error(su
)) {
668 suclean
= nilfs_segment_usage_clean(su
);
669 nilfs_segment_usage_set_error(su
);
673 nilfs_sufile_mod_counter(header_bh
, -1, 0);
674 NILFS_SUI(sufile
)->ncleansegs
--;
676 mark_buffer_dirty(su_bh
);
677 nilfs_mdt_mark_dirty(sufile
);
681 * nilfs_sufile_truncate_range - truncate range of segment array
682 * @sufile: inode of segment usage file
683 * @start: start segment number (inclusive)
684 * @end: end segment number (inclusive)
686 * Return Value: On success, 0 is returned. On error, one of the
687 * following negative error codes is returned.
691 * %-ENOMEM - Insufficient amount of memory available.
693 * %-EINVAL - Invalid number of segments specified
695 * %-EBUSY - Dirty or active segments are present in the range
697 static int nilfs_sufile_truncate_range(struct inode
*sufile
,
698 __u64 start
, __u64 end
)
700 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
701 struct buffer_head
*header_bh
;
702 struct buffer_head
*su_bh
;
703 struct nilfs_segment_usage
*su
, *su2
;
704 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
705 unsigned long segusages_per_block
;
706 unsigned long nsegs
, ncleaned
;
713 nsegs
= nilfs_sufile_get_nsegments(sufile
);
716 if (start
> end
|| start
>= nsegs
)
719 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
723 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
726 for (segnum
= start
; segnum
<= end
; segnum
+= n
) {
727 n
= min_t(unsigned long,
728 segusages_per_block
-
729 nilfs_sufile_get_offset(sufile
, segnum
),
731 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
739 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
,
741 su
= kmap_local_folio(su_bh
->b_folio
, offset
);
743 for (j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
744 if ((le32_to_cpu(su
->su_flags
) &
745 ~BIT(NILFS_SEGMENT_USAGE_ERROR
)) ||
746 nilfs_segment_is_active(nilfs
, segnum
+ j
)) {
754 for (su
= su2
, j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
755 if (nilfs_segment_usage_error(su
)) {
756 nilfs_segment_usage_set_clean(su
);
762 mark_buffer_dirty(su_bh
);
767 if (n
== segusages_per_block
) {
769 nilfs_sufile_delete_segment_usage_block(sufile
, segnum
);
776 NILFS_SUI(sufile
)->ncleansegs
+= ncleaned
;
777 nilfs_sufile_mod_counter(header_bh
, ncleaned
, 0);
778 nilfs_mdt_mark_dirty(sufile
);
786 * nilfs_sufile_resize - resize segment array
787 * @sufile: inode of segment usage file
788 * @newnsegs: new number of segments
790 * Return Value: On success, 0 is returned. On error, one of the
791 * following negative error codes is returned.
795 * %-ENOMEM - Insufficient amount of memory available.
797 * %-ENOSPC - Enough free space is not left for shrinking
799 * %-EBUSY - Dirty or active segments exist in the region to be truncated
801 int nilfs_sufile_resize(struct inode
*sufile
, __u64 newnsegs
)
803 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
804 struct buffer_head
*header_bh
;
805 struct nilfs_sufile_header
*header
;
806 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
807 unsigned long nsegs
, nrsvsegs
;
810 down_write(&NILFS_MDT(sufile
)->mi_sem
);
812 nsegs
= nilfs_sufile_get_nsegments(sufile
);
813 if (nsegs
== newnsegs
)
817 nrsvsegs
= nilfs_nrsvsegs(nilfs
, newnsegs
);
818 if (newnsegs
< nsegs
&& nsegs
- newnsegs
+ nrsvsegs
> sui
->ncleansegs
)
821 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
825 if (newnsegs
> nsegs
) {
826 sui
->ncleansegs
+= newnsegs
- nsegs
;
827 } else /* newnsegs < nsegs */ {
828 ret
= nilfs_sufile_truncate_range(sufile
, newnsegs
, nsegs
- 1);
832 sui
->ncleansegs
-= nsegs
- newnsegs
;
835 * If the sufile is successfully truncated, immediately adjust
836 * the segment allocation space while locking the semaphore
837 * "mi_sem" so that nilfs_sufile_alloc() never allocates
838 * segments in the truncated space.
840 sui
->allocmax
= newnsegs
- 1;
844 header
= kmap_local_folio(header_bh
->b_folio
, 0);
845 header
->sh_ncleansegs
= cpu_to_le64(sui
->ncleansegs
);
846 kunmap_local(header
);
848 mark_buffer_dirty(header_bh
);
849 nilfs_mdt_mark_dirty(sufile
);
850 nilfs_set_nsegments(nilfs
, newnsegs
);
855 up_write(&NILFS_MDT(sufile
)->mi_sem
);
860 * nilfs_sufile_get_suinfo - get segment usage information
861 * @sufile: inode of segment usage file
862 * @segnum: segment number to start looking
863 * @buf: array of suinfo
864 * @sisz: byte size of suinfo
865 * @nsi: size of suinfo array
867 * Return: Count of segment usage info items stored in the output buffer on
868 * success, or the following negative error code on failure.
869 * * %-EIO - I/O error (including metadata corruption).
870 * * %-ENOMEM - Insufficient memory available.
872 ssize_t
nilfs_sufile_get_suinfo(struct inode
*sufile
, __u64 segnum
, void *buf
,
873 unsigned int sisz
, size_t nsi
)
875 struct buffer_head
*su_bh
;
876 struct nilfs_segment_usage
*su
;
877 struct nilfs_suinfo
*si
= buf
;
878 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
879 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
882 unsigned long nsegs
, segusages_per_block
;
886 down_read(&NILFS_MDT(sufile
)->mi_sem
);
888 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
889 nsegs
= min_t(unsigned long,
890 nilfs_sufile_get_nsegments(sufile
) - segnum
,
892 for (i
= 0; i
< nsegs
; i
+= n
, segnum
+= n
) {
893 n
= min_t(unsigned long,
894 segusages_per_block
-
895 nilfs_sufile_get_offset(sufile
, segnum
),
897 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
903 memset(si
, 0, sisz
* n
);
904 si
= (void *)si
+ sisz
* n
;
908 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
,
910 su
= kaddr
= kmap_local_folio(su_bh
->b_folio
, offset
);
912 j
++, su
= (void *)su
+ susz
, si
= (void *)si
+ sisz
) {
913 si
->sui_lastmod
= le64_to_cpu(su
->su_lastmod
);
914 si
->sui_nblocks
= le32_to_cpu(su
->su_nblocks
);
915 si
->sui_flags
= le32_to_cpu(su
->su_flags
) &
916 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE
);
917 if (nilfs_segment_is_active(nilfs
, segnum
+ j
))
919 BIT(NILFS_SEGMENT_USAGE_ACTIVE
);
927 up_read(&NILFS_MDT(sufile
)->mi_sem
);
932 * nilfs_sufile_set_suinfo - sets segment usage info
933 * @sufile: inode of segment usage file
934 * @buf: array of suinfo_update
935 * @supsz: byte size of suinfo_update
936 * @nsup: size of suinfo_update array
938 * Description: Takes an array of nilfs_suinfo_update structs and updates
939 * segment usage accordingly. Only the fields indicated by the sup_flags
942 * Return Value: On success, 0 is returned. On error, one of the
943 * following negative error codes is returned.
947 * %-ENOMEM - Insufficient amount of memory available.
949 * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
951 ssize_t
nilfs_sufile_set_suinfo(struct inode
*sufile
, void *buf
,
952 unsigned int supsz
, size_t nsup
)
954 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
955 struct buffer_head
*header_bh
, *bh
;
956 struct nilfs_suinfo_update
*sup
, *supend
= buf
+ supsz
* nsup
;
957 struct nilfs_segment_usage
*su
;
959 unsigned long blkoff
, prev_blkoff
;
960 int cleansi
, cleansu
, dirtysi
, dirtysu
;
961 long ncleaned
= 0, ndirtied
= 0;
964 if (unlikely(nsup
== 0))
967 for (sup
= buf
; sup
< supend
; sup
= (void *)sup
+ supsz
) {
968 if (sup
->sup_segnum
>= nilfs
->ns_nsegments
970 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS
))
971 || (nilfs_suinfo_update_nblocks(sup
) &&
972 sup
->sup_sui
.sui_nblocks
>
973 nilfs
->ns_blocks_per_segment
))
977 down_write(&NILFS_MDT(sufile
)->mi_sem
);
979 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
984 blkoff
= nilfs_sufile_get_blkoff(sufile
, sup
->sup_segnum
);
985 ret
= nilfs_mdt_get_block(sufile
, blkoff
, 1, NULL
, &bh
);
990 offset
= nilfs_sufile_segment_usage_offset(
991 sufile
, sup
->sup_segnum
, bh
);
992 su
= kmap_local_folio(bh
->b_folio
, offset
);
994 if (nilfs_suinfo_update_lastmod(sup
))
995 su
->su_lastmod
= cpu_to_le64(sup
->sup_sui
.sui_lastmod
);
997 if (nilfs_suinfo_update_nblocks(sup
))
998 su
->su_nblocks
= cpu_to_le32(sup
->sup_sui
.sui_nblocks
);
1000 if (nilfs_suinfo_update_flags(sup
)) {
1002 * Active flag is a virtual flag projected by running
1003 * nilfs kernel code - drop it not to write it to
1006 sup
->sup_sui
.sui_flags
&=
1007 ~BIT(NILFS_SEGMENT_USAGE_ACTIVE
);
1009 cleansi
= nilfs_suinfo_clean(&sup
->sup_sui
);
1010 cleansu
= nilfs_segment_usage_clean(su
);
1011 dirtysi
= nilfs_suinfo_dirty(&sup
->sup_sui
);
1012 dirtysu
= nilfs_segment_usage_dirty(su
);
1014 if (cleansi
&& !cleansu
)
1016 else if (!cleansi
&& cleansu
)
1019 if (dirtysi
&& !dirtysu
)
1021 else if (!dirtysi
&& dirtysu
)
1024 su
->su_flags
= cpu_to_le32(sup
->sup_sui
.sui_flags
);
1029 sup
= (void *)sup
+ supsz
;
1033 prev_blkoff
= blkoff
;
1034 blkoff
= nilfs_sufile_get_blkoff(sufile
, sup
->sup_segnum
);
1035 if (blkoff
== prev_blkoff
)
1038 /* get different block */
1039 mark_buffer_dirty(bh
);
1041 ret
= nilfs_mdt_get_block(sufile
, blkoff
, 1, NULL
, &bh
);
1042 if (unlikely(ret
< 0))
1045 mark_buffer_dirty(bh
);
1049 if (ncleaned
|| ndirtied
) {
1050 nilfs_sufile_mod_counter(header_bh
, (u64
)ncleaned
,
1052 NILFS_SUI(sufile
)->ncleansegs
+= ncleaned
;
1054 nilfs_mdt_mark_dirty(sufile
);
1058 up_write(&NILFS_MDT(sufile
)->mi_sem
);
1063 * nilfs_sufile_trim_fs() - trim ioctl handle function
1064 * @sufile: inode of segment usage file
1065 * @range: fstrim_range structure
1067 * start: First Byte to trim
1068 * len: number of Bytes to trim from start
1069 * minlen: minimum extent length in Bytes
1071 * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
1072 * from start to start+len. start is rounded up to the next block boundary
1073 * and start+len is rounded down. For each clean segment blkdev_issue_discard
1074 * function is invoked.
1076 * Return Value: On success, 0 is returned or negative error code, otherwise.
1078 int nilfs_sufile_trim_fs(struct inode
*sufile
, struct fstrim_range
*range
)
1080 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
1081 struct buffer_head
*su_bh
;
1082 struct nilfs_segment_usage
*su
;
1085 size_t n
, i
, susz
= NILFS_MDT(sufile
)->mi_entry_size
;
1086 sector_t seg_start
, seg_end
, start_block
, end_block
;
1087 sector_t start
= 0, nblocks
= 0;
1088 u64 segnum
, segnum_end
, minlen
, len
, max_blocks
, ndiscarded
= 0;
1090 unsigned int sects_per_block
;
1092 sects_per_block
= (1 << nilfs
->ns_blocksize_bits
) /
1093 bdev_logical_block_size(nilfs
->ns_bdev
);
1094 len
= range
->len
>> nilfs
->ns_blocksize_bits
;
1095 minlen
= range
->minlen
>> nilfs
->ns_blocksize_bits
;
1096 max_blocks
= ((u64
)nilfs
->ns_nsegments
* nilfs
->ns_blocks_per_segment
);
1098 if (!len
|| range
->start
>= max_blocks
<< nilfs
->ns_blocksize_bits
)
1101 start_block
= (range
->start
+ nilfs
->ns_blocksize
- 1) >>
1102 nilfs
->ns_blocksize_bits
;
1105 * range->len can be very large (actually, it is set to
1106 * ULLONG_MAX by default) - truncate upper end of the range
1107 * carefully so as not to overflow.
1109 if (max_blocks
- start_block
< len
)
1110 end_block
= max_blocks
- 1;
1112 end_block
= start_block
+ len
- 1;
1114 segnum
= nilfs_get_segnum_of_block(nilfs
, start_block
);
1115 segnum_end
= nilfs_get_segnum_of_block(nilfs
, end_block
);
1117 down_read(&NILFS_MDT(sufile
)->mi_sem
);
1119 while (segnum
<= segnum_end
) {
1120 n
= nilfs_sufile_segment_usages_in_block(sufile
, segnum
,
1123 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
1133 offset
= nilfs_sufile_segment_usage_offset(sufile
, segnum
,
1135 su
= kaddr
= kmap_local_folio(su_bh
->b_folio
, offset
);
1136 for (i
= 0; i
< n
; ++i
, ++segnum
, su
= (void *)su
+ susz
) {
1137 if (!nilfs_segment_usage_clean(su
))
1140 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
,
1144 /* start new extent */
1146 nblocks
= seg_end
- seg_start
+ 1;
1150 if (start
+ nblocks
== seg_start
) {
1151 /* add to previous extent */
1152 nblocks
+= seg_end
- seg_start
+ 1;
1156 /* discard previous extent */
1157 if (start
< start_block
) {
1158 nblocks
-= start_block
- start
;
1159 start
= start_block
;
1162 if (nblocks
>= minlen
) {
1163 kunmap_local(kaddr
);
1165 ret
= blkdev_issue_discard(nilfs
->ns_bdev
,
1166 start
* sects_per_block
,
1167 nblocks
* sects_per_block
,
1174 ndiscarded
+= nblocks
;
1175 offset
= nilfs_sufile_segment_usage_offset(
1176 sufile
, segnum
, su_bh
);
1177 su
= kaddr
= kmap_local_folio(su_bh
->b_folio
,
1181 /* start new extent */
1183 nblocks
= seg_end
- seg_start
+ 1;
1185 kunmap_local(kaddr
);
1191 /* discard last extent */
1192 if (start
< start_block
) {
1193 nblocks
-= start_block
- start
;
1194 start
= start_block
;
1196 if (start
+ nblocks
> end_block
+ 1)
1197 nblocks
= end_block
- start
+ 1;
1199 if (nblocks
>= minlen
) {
1200 ret
= blkdev_issue_discard(nilfs
->ns_bdev
,
1201 start
* sects_per_block
,
1202 nblocks
* sects_per_block
,
1205 ndiscarded
+= nblocks
;
1210 up_read(&NILFS_MDT(sufile
)->mi_sem
);
1212 range
->len
= ndiscarded
<< nilfs
->ns_blocksize_bits
;
1217 * nilfs_sufile_read - read or get sufile inode
1218 * @sb: super block instance
1219 * @susize: size of a segment usage entry
1220 * @raw_inode: on-disk sufile inode
1221 * @inodep: buffer to store the inode
1223 int nilfs_sufile_read(struct super_block
*sb
, size_t susize
,
1224 struct nilfs_inode
*raw_inode
, struct inode
**inodep
)
1226 struct inode
*sufile
;
1227 struct nilfs_sufile_info
*sui
;
1228 struct buffer_head
*header_bh
;
1229 struct nilfs_sufile_header
*header
;
1232 if (susize
> sb
->s_blocksize
) {
1233 nilfs_err(sb
, "too large segment usage size: %zu bytes",
1236 } else if (susize
< NILFS_MIN_SEGMENT_USAGE_SIZE
) {
1237 nilfs_err(sb
, "too small segment usage size: %zu bytes",
1242 sufile
= nilfs_iget_locked(sb
, NULL
, NILFS_SUFILE_INO
);
1243 if (unlikely(!sufile
))
1245 if (!(sufile
->i_state
& I_NEW
))
1248 err
= nilfs_mdt_init(sufile
, NILFS_MDT_GFP
, sizeof(*sui
));
1252 nilfs_mdt_set_entry_size(sufile
, susize
,
1253 sizeof(struct nilfs_sufile_header
));
1255 err
= nilfs_read_inode_common(sufile
, raw_inode
);
1259 err
= nilfs_mdt_get_block(sufile
, 0, 0, NULL
, &header_bh
);
1260 if (unlikely(err
)) {
1261 if (err
== -ENOENT
) {
1263 "missing header block in segment usage metadata");
1269 sui
= NILFS_SUI(sufile
);
1270 header
= kmap_local_folio(header_bh
->b_folio
, 0);
1271 sui
->ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
1272 kunmap_local(header
);
1275 sui
->allocmax
= nilfs_sufile_get_nsegments(sufile
) - 1;
1278 unlock_new_inode(sufile
);
1283 iget_failed(sufile
);