2 * sufile.c - NILFS segment usage file.
4 * Copyright (C) 2006-2008 Nippon Telegraph and Telephone Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
20 * Written by Koji Sato <koji@osrg.net>.
21 * Revised by Ryusuke Konishi <ryusuke@osrg.net>.
24 #include <linux/kernel.h>
26 #include <linux/string.h>
27 #include <linux/buffer_head.h>
28 #include <linux/errno.h>
29 #include <linux/nilfs2_fs.h>
34 * struct nilfs_sufile_info - on-memory private data of sufile
35 * @mi: on-memory private data of metadata file
36 * @ncleansegs: number of clean segments
37 * @allocmin: lower limit of allocatable segment range
38 * @allocmax: upper limit of allocatable segment range
40 struct nilfs_sufile_info
{
41 struct nilfs_mdt_info mi
;
42 unsigned long ncleansegs
;/* number of clean segments */
43 __u64 allocmin
; /* lower limit of allocatable segment range */
44 __u64 allocmax
; /* upper limit of allocatable segment range */
47 static inline struct nilfs_sufile_info
*NILFS_SUI(struct inode
*sufile
)
49 return (struct nilfs_sufile_info
*)NILFS_MDT(sufile
);
52 static inline unsigned long
53 nilfs_sufile_segment_usages_per_block(const struct inode
*sufile
)
55 return NILFS_MDT(sufile
)->mi_entries_per_block
;
59 nilfs_sufile_get_blkoff(const struct inode
*sufile
, __u64 segnum
)
61 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
62 do_div(t
, nilfs_sufile_segment_usages_per_block(sufile
));
63 return (unsigned long)t
;
67 nilfs_sufile_get_offset(const struct inode
*sufile
, __u64 segnum
)
69 __u64 t
= segnum
+ NILFS_MDT(sufile
)->mi_first_entry_offset
;
70 return do_div(t
, nilfs_sufile_segment_usages_per_block(sufile
));
74 nilfs_sufile_segment_usages_in_block(const struct inode
*sufile
, __u64 curr
,
77 return min_t(unsigned long,
78 nilfs_sufile_segment_usages_per_block(sufile
) -
79 nilfs_sufile_get_offset(sufile
, curr
),
83 static struct nilfs_segment_usage
*
84 nilfs_sufile_block_get_segment_usage(const struct inode
*sufile
, __u64 segnum
,
85 struct buffer_head
*bh
, void *kaddr
)
87 return kaddr
+ bh_offset(bh
) +
88 nilfs_sufile_get_offset(sufile
, segnum
) *
89 NILFS_MDT(sufile
)->mi_entry_size
;
92 static inline int nilfs_sufile_get_header_block(struct inode
*sufile
,
93 struct buffer_head
**bhp
)
95 return nilfs_mdt_get_block(sufile
, 0, 0, NULL
, bhp
);
99 nilfs_sufile_get_segment_usage_block(struct inode
*sufile
, __u64 segnum
,
100 int create
, struct buffer_head
**bhp
)
102 return nilfs_mdt_get_block(sufile
,
103 nilfs_sufile_get_blkoff(sufile
, segnum
),
107 static int nilfs_sufile_delete_segment_usage_block(struct inode
*sufile
,
110 return nilfs_mdt_delete_block(sufile
,
111 nilfs_sufile_get_blkoff(sufile
, segnum
));
114 static void nilfs_sufile_mod_counter(struct buffer_head
*header_bh
,
115 u64 ncleanadd
, u64 ndirtyadd
)
117 struct nilfs_sufile_header
*header
;
120 kaddr
= kmap_atomic(header_bh
->b_page
);
121 header
= kaddr
+ bh_offset(header_bh
);
122 le64_add_cpu(&header
->sh_ncleansegs
, ncleanadd
);
123 le64_add_cpu(&header
->sh_ndirtysegs
, ndirtyadd
);
124 kunmap_atomic(kaddr
);
126 mark_buffer_dirty(header_bh
);
130 * nilfs_sufile_get_ncleansegs - return the number of clean segments
131 * @sufile: inode of segment usage file
133 unsigned long nilfs_sufile_get_ncleansegs(struct inode
*sufile
)
135 return NILFS_SUI(sufile
)->ncleansegs
;
139 * nilfs_sufile_updatev - modify multiple segment usages at a time
140 * @sufile: inode of segment usage file
141 * @segnumv: array of segment numbers
142 * @nsegs: size of @segnumv array
143 * @create: creation flag
144 * @ndone: place to store number of modified segments on @segnumv
145 * @dofunc: primitive operation for the update
147 * Description: nilfs_sufile_updatev() repeatedly calls @dofunc
148 * against the given array of segments. The @dofunc is called with
149 * buffers of a header block and the sufile block in which the target
150 * segment usage entry is contained. If @ndone is given, the number
151 * of successfully modified segments from the head is stored in the
152 * place @ndone points to.
154 * Return Value: On success, zero is returned. On error, one of the
155 * following negative error codes is returned.
159 * %-ENOMEM - Insufficient amount of memory available.
161 * %-ENOENT - Given segment usage is in hole block (may be returned if
164 * %-EINVAL - Invalid segment usage number
166 int nilfs_sufile_updatev(struct inode
*sufile
, __u64
*segnumv
, size_t nsegs
,
167 int create
, size_t *ndone
,
168 void (*dofunc
)(struct inode
*, __u64
,
169 struct buffer_head
*,
170 struct buffer_head
*))
172 struct buffer_head
*header_bh
, *bh
;
173 unsigned long blkoff
, prev_blkoff
;
175 size_t nerr
= 0, n
= 0;
178 if (unlikely(nsegs
== 0))
181 down_write(&NILFS_MDT(sufile
)->mi_sem
);
182 for (seg
= segnumv
; seg
< segnumv
+ nsegs
; seg
++) {
183 if (unlikely(*seg
>= nilfs_sufile_get_nsegments(sufile
))) {
185 "%s: invalid segment number: %llu\n", __func__
,
186 (unsigned long long)*seg
);
195 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
200 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
201 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
206 dofunc(sufile
, *seg
, header_bh
, bh
);
208 if (++seg
>= segnumv
+ nsegs
)
210 prev_blkoff
= blkoff
;
211 blkoff
= nilfs_sufile_get_blkoff(sufile
, *seg
);
212 if (blkoff
== prev_blkoff
)
215 /* get different block */
217 ret
= nilfs_mdt_get_block(sufile
, blkoff
, create
, NULL
, &bh
);
218 if (unlikely(ret
< 0))
227 up_write(&NILFS_MDT(sufile
)->mi_sem
);
234 int nilfs_sufile_update(struct inode
*sufile
, __u64 segnum
, int create
,
235 void (*dofunc
)(struct inode
*, __u64
,
236 struct buffer_head
*,
237 struct buffer_head
*))
239 struct buffer_head
*header_bh
, *bh
;
242 if (unlikely(segnum
>= nilfs_sufile_get_nsegments(sufile
))) {
243 printk(KERN_WARNING
"%s: invalid segment number: %llu\n",
244 __func__
, (unsigned long long)segnum
);
247 down_write(&NILFS_MDT(sufile
)->mi_sem
);
249 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
253 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, create
, &bh
);
255 dofunc(sufile
, segnum
, header_bh
, bh
);
261 up_write(&NILFS_MDT(sufile
)->mi_sem
);
266 * nilfs_sufile_set_alloc_range - limit range of segment to be allocated
267 * @sufile: inode of segment usage file
268 * @start: minimum segment number of allocatable region (inclusive)
269 * @end: maximum segment number of allocatable region (inclusive)
271 * Return Value: On success, 0 is returned. On error, one of the
272 * following negative error codes is returned.
274 * %-ERANGE - invalid segment region
276 int nilfs_sufile_set_alloc_range(struct inode
*sufile
, __u64 start
, __u64 end
)
278 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
282 down_write(&NILFS_MDT(sufile
)->mi_sem
);
283 nsegs
= nilfs_sufile_get_nsegments(sufile
);
285 if (start
<= end
&& end
< nsegs
) {
286 sui
->allocmin
= start
;
290 up_write(&NILFS_MDT(sufile
)->mi_sem
);
295 * nilfs_sufile_alloc - allocate a segment
296 * @sufile: inode of segment usage file
297 * @segnump: pointer to segment number
299 * Description: nilfs_sufile_alloc() allocates a clean segment.
301 * Return Value: On success, 0 is returned and the segment number of the
302 * allocated segment is stored in the place pointed by @segnump. On error, one
303 * of the following negative error codes is returned.
307 * %-ENOMEM - Insufficient amount of memory available.
309 * %-ENOSPC - No clean segment left.
311 int nilfs_sufile_alloc(struct inode
*sufile
, __u64
*segnump
)
313 struct buffer_head
*header_bh
, *su_bh
;
314 struct nilfs_sufile_header
*header
;
315 struct nilfs_segment_usage
*su
;
316 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
317 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
318 __u64 segnum
, maxsegnum
, last_alloc
;
320 unsigned long nsegments
, ncleansegs
, nsus
, cnt
;
323 down_write(&NILFS_MDT(sufile
)->mi_sem
);
325 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
328 kaddr
= kmap_atomic(header_bh
->b_page
);
329 header
= kaddr
+ bh_offset(header_bh
);
330 ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
331 last_alloc
= le64_to_cpu(header
->sh_last_alloc
);
332 kunmap_atomic(kaddr
);
334 nsegments
= nilfs_sufile_get_nsegments(sufile
);
335 maxsegnum
= sui
->allocmax
;
336 segnum
= last_alloc
+ 1;
337 if (segnum
< sui
->allocmin
|| segnum
> sui
->allocmax
)
338 segnum
= sui
->allocmin
;
340 for (cnt
= 0; cnt
< nsegments
; cnt
+= nsus
) {
341 if (segnum
> maxsegnum
) {
342 if (cnt
< sui
->allocmax
- sui
->allocmin
+ 1) {
344 * wrap around in the limited region.
345 * if allocation started from
346 * sui->allocmin, this never happens.
348 segnum
= sui
->allocmin
;
349 maxsegnum
= last_alloc
;
350 } else if (segnum
> sui
->allocmin
&&
351 sui
->allocmax
+ 1 < nsegments
) {
352 segnum
= sui
->allocmax
+ 1;
353 maxsegnum
= nsegments
- 1;
354 } else if (sui
->allocmin
> 0) {
356 maxsegnum
= sui
->allocmin
- 1;
358 break; /* never happens */
361 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 1,
365 kaddr
= kmap_atomic(su_bh
->b_page
);
366 su
= nilfs_sufile_block_get_segment_usage(
367 sufile
, segnum
, su_bh
, kaddr
);
369 nsus
= nilfs_sufile_segment_usages_in_block(
370 sufile
, segnum
, maxsegnum
);
371 for (j
= 0; j
< nsus
; j
++, su
= (void *)su
+ susz
, segnum
++) {
372 if (!nilfs_segment_usage_clean(su
))
374 /* found a clean segment */
375 nilfs_segment_usage_set_dirty(su
);
376 kunmap_atomic(kaddr
);
378 kaddr
= kmap_atomic(header_bh
->b_page
);
379 header
= kaddr
+ bh_offset(header_bh
);
380 le64_add_cpu(&header
->sh_ncleansegs
, -1);
381 le64_add_cpu(&header
->sh_ndirtysegs
, 1);
382 header
->sh_last_alloc
= cpu_to_le64(segnum
);
383 kunmap_atomic(kaddr
);
386 mark_buffer_dirty(header_bh
);
387 mark_buffer_dirty(su_bh
);
388 nilfs_mdt_mark_dirty(sufile
);
394 kunmap_atomic(kaddr
);
398 /* no segments left */
405 up_write(&NILFS_MDT(sufile
)->mi_sem
);
409 void nilfs_sufile_do_cancel_free(struct inode
*sufile
, __u64 segnum
,
410 struct buffer_head
*header_bh
,
411 struct buffer_head
*su_bh
)
413 struct nilfs_segment_usage
*su
;
416 kaddr
= kmap_atomic(su_bh
->b_page
);
417 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
418 if (unlikely(!nilfs_segment_usage_clean(su
))) {
419 printk(KERN_WARNING
"%s: segment %llu must be clean\n",
420 __func__
, (unsigned long long)segnum
);
421 kunmap_atomic(kaddr
);
424 nilfs_segment_usage_set_dirty(su
);
425 kunmap_atomic(kaddr
);
427 nilfs_sufile_mod_counter(header_bh
, -1, 1);
428 NILFS_SUI(sufile
)->ncleansegs
--;
430 mark_buffer_dirty(su_bh
);
431 nilfs_mdt_mark_dirty(sufile
);
434 void nilfs_sufile_do_scrap(struct inode
*sufile
, __u64 segnum
,
435 struct buffer_head
*header_bh
,
436 struct buffer_head
*su_bh
)
438 struct nilfs_segment_usage
*su
;
442 kaddr
= kmap_atomic(su_bh
->b_page
);
443 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
444 if (su
->su_flags
== cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY
) &&
445 su
->su_nblocks
== cpu_to_le32(0)) {
446 kunmap_atomic(kaddr
);
449 clean
= nilfs_segment_usage_clean(su
);
450 dirty
= nilfs_segment_usage_dirty(su
);
452 /* make the segment garbage */
453 su
->su_lastmod
= cpu_to_le64(0);
454 su
->su_nblocks
= cpu_to_le32(0);
455 su
->su_flags
= cpu_to_le32(1UL << NILFS_SEGMENT_USAGE_DIRTY
);
456 kunmap_atomic(kaddr
);
458 nilfs_sufile_mod_counter(header_bh
, clean
? (u64
)-1 : 0, dirty
? 0 : 1);
459 NILFS_SUI(sufile
)->ncleansegs
-= clean
;
461 mark_buffer_dirty(su_bh
);
462 nilfs_mdt_mark_dirty(sufile
);
465 void nilfs_sufile_do_free(struct inode
*sufile
, __u64 segnum
,
466 struct buffer_head
*header_bh
,
467 struct buffer_head
*su_bh
)
469 struct nilfs_segment_usage
*su
;
473 kaddr
= kmap_atomic(su_bh
->b_page
);
474 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
475 if (nilfs_segment_usage_clean(su
)) {
476 printk(KERN_WARNING
"%s: segment %llu is already clean\n",
477 __func__
, (unsigned long long)segnum
);
478 kunmap_atomic(kaddr
);
481 WARN_ON(nilfs_segment_usage_error(su
));
482 WARN_ON(!nilfs_segment_usage_dirty(su
));
484 sudirty
= nilfs_segment_usage_dirty(su
);
485 nilfs_segment_usage_set_clean(su
);
486 kunmap_atomic(kaddr
);
487 mark_buffer_dirty(su_bh
);
489 nilfs_sufile_mod_counter(header_bh
, 1, sudirty
? (u64
)-1 : 0);
490 NILFS_SUI(sufile
)->ncleansegs
++;
492 nilfs_mdt_mark_dirty(sufile
);
496 * nilfs_sufile_mark_dirty - mark the buffer having a segment usage dirty
497 * @sufile: inode of segment usage file
498 * @segnum: segment number
500 int nilfs_sufile_mark_dirty(struct inode
*sufile
, __u64 segnum
)
502 struct buffer_head
*bh
;
505 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
507 mark_buffer_dirty(bh
);
508 nilfs_mdt_mark_dirty(sufile
);
515 * nilfs_sufile_set_segment_usage - set usage of a segment
516 * @sufile: inode of segment usage file
517 * @segnum: segment number
518 * @nblocks: number of live blocks in the segment
519 * @modtime: modification time (option)
521 int nilfs_sufile_set_segment_usage(struct inode
*sufile
, __u64 segnum
,
522 unsigned long nblocks
, time_t modtime
)
524 struct buffer_head
*bh
;
525 struct nilfs_segment_usage
*su
;
529 down_write(&NILFS_MDT(sufile
)->mi_sem
);
530 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0, &bh
);
534 kaddr
= kmap_atomic(bh
->b_page
);
535 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, bh
, kaddr
);
536 WARN_ON(nilfs_segment_usage_error(su
));
538 su
->su_lastmod
= cpu_to_le64(modtime
);
539 su
->su_nblocks
= cpu_to_le32(nblocks
);
540 kunmap_atomic(kaddr
);
542 mark_buffer_dirty(bh
);
543 nilfs_mdt_mark_dirty(sufile
);
547 up_write(&NILFS_MDT(sufile
)->mi_sem
);
552 * nilfs_sufile_get_stat - get segment usage statistics
553 * @sufile: inode of segment usage file
554 * @stat: pointer to a structure of segment usage statistics
556 * Description: nilfs_sufile_get_stat() returns information about segment
559 * Return Value: On success, 0 is returned, and segment usage information is
560 * stored in the place pointed by @stat. On error, one of the following
561 * negative error codes is returned.
565 * %-ENOMEM - Insufficient amount of memory available.
567 int nilfs_sufile_get_stat(struct inode
*sufile
, struct nilfs_sustat
*sustat
)
569 struct buffer_head
*header_bh
;
570 struct nilfs_sufile_header
*header
;
571 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
575 down_read(&NILFS_MDT(sufile
)->mi_sem
);
577 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
581 kaddr
= kmap_atomic(header_bh
->b_page
);
582 header
= kaddr
+ bh_offset(header_bh
);
583 sustat
->ss_nsegs
= nilfs_sufile_get_nsegments(sufile
);
584 sustat
->ss_ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
585 sustat
->ss_ndirtysegs
= le64_to_cpu(header
->sh_ndirtysegs
);
586 sustat
->ss_ctime
= nilfs
->ns_ctime
;
587 sustat
->ss_nongc_ctime
= nilfs
->ns_nongc_ctime
;
588 spin_lock(&nilfs
->ns_last_segment_lock
);
589 sustat
->ss_prot_seq
= nilfs
->ns_prot_seq
;
590 spin_unlock(&nilfs
->ns_last_segment_lock
);
591 kunmap_atomic(kaddr
);
595 up_read(&NILFS_MDT(sufile
)->mi_sem
);
599 void nilfs_sufile_do_set_error(struct inode
*sufile
, __u64 segnum
,
600 struct buffer_head
*header_bh
,
601 struct buffer_head
*su_bh
)
603 struct nilfs_segment_usage
*su
;
607 kaddr
= kmap_atomic(su_bh
->b_page
);
608 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
, su_bh
, kaddr
);
609 if (nilfs_segment_usage_error(su
)) {
610 kunmap_atomic(kaddr
);
613 suclean
= nilfs_segment_usage_clean(su
);
614 nilfs_segment_usage_set_error(su
);
615 kunmap_atomic(kaddr
);
618 nilfs_sufile_mod_counter(header_bh
, -1, 0);
619 NILFS_SUI(sufile
)->ncleansegs
--;
621 mark_buffer_dirty(su_bh
);
622 nilfs_mdt_mark_dirty(sufile
);
626 * nilfs_sufile_truncate_range - truncate range of segment array
627 * @sufile: inode of segment usage file
628 * @start: start segment number (inclusive)
629 * @end: end segment number (inclusive)
631 * Return Value: On success, 0 is returned. On error, one of the
632 * following negative error codes is returned.
636 * %-ENOMEM - Insufficient amount of memory available.
638 * %-EINVAL - Invalid number of segments specified
640 * %-EBUSY - Dirty or active segments are present in the range
642 static int nilfs_sufile_truncate_range(struct inode
*sufile
,
643 __u64 start
, __u64 end
)
645 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
646 struct buffer_head
*header_bh
;
647 struct buffer_head
*su_bh
;
648 struct nilfs_segment_usage
*su
, *su2
;
649 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
650 unsigned long segusages_per_block
;
651 unsigned long nsegs
, ncleaned
;
658 nsegs
= nilfs_sufile_get_nsegments(sufile
);
661 if (start
> end
|| start
>= nsegs
)
664 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
668 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
671 for (segnum
= start
; segnum
<= end
; segnum
+= n
) {
672 n
= min_t(unsigned long,
673 segusages_per_block
-
674 nilfs_sufile_get_offset(sufile
, segnum
),
676 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
684 kaddr
= kmap_atomic(su_bh
->b_page
);
685 su
= nilfs_sufile_block_get_segment_usage(
686 sufile
, segnum
, su_bh
, kaddr
);
688 for (j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
689 if ((le32_to_cpu(su
->su_flags
) &
690 ~(1UL << NILFS_SEGMENT_USAGE_ERROR
)) ||
691 nilfs_segment_is_active(nilfs
, segnum
+ j
)) {
693 kunmap_atomic(kaddr
);
699 for (su
= su2
, j
= 0; j
< n
; j
++, su
= (void *)su
+ susz
) {
700 if (nilfs_segment_usage_error(su
)) {
701 nilfs_segment_usage_set_clean(su
);
705 kunmap_atomic(kaddr
);
707 mark_buffer_dirty(su_bh
);
712 if (n
== segusages_per_block
) {
714 nilfs_sufile_delete_segment_usage_block(sufile
, segnum
);
721 NILFS_SUI(sufile
)->ncleansegs
+= ncleaned
;
722 nilfs_sufile_mod_counter(header_bh
, ncleaned
, 0);
723 nilfs_mdt_mark_dirty(sufile
);
731 * nilfs_sufile_resize - resize segment array
732 * @sufile: inode of segment usage file
733 * @newnsegs: new number of segments
735 * Return Value: On success, 0 is returned. On error, one of the
736 * following negative error codes is returned.
740 * %-ENOMEM - Insufficient amount of memory available.
742 * %-ENOSPC - Enough free space is not left for shrinking
744 * %-EBUSY - Dirty or active segments exist in the region to be truncated
746 int nilfs_sufile_resize(struct inode
*sufile
, __u64 newnsegs
)
748 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
749 struct buffer_head
*header_bh
;
750 struct nilfs_sufile_header
*header
;
751 struct nilfs_sufile_info
*sui
= NILFS_SUI(sufile
);
753 unsigned long nsegs
, nrsvsegs
;
756 down_write(&NILFS_MDT(sufile
)->mi_sem
);
758 nsegs
= nilfs_sufile_get_nsegments(sufile
);
759 if (nsegs
== newnsegs
)
763 nrsvsegs
= nilfs_nrsvsegs(nilfs
, newnsegs
);
764 if (newnsegs
< nsegs
&& nsegs
- newnsegs
+ nrsvsegs
> sui
->ncleansegs
)
767 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
771 if (newnsegs
> nsegs
) {
772 sui
->ncleansegs
+= newnsegs
- nsegs
;
773 } else /* newnsegs < nsegs */ {
774 ret
= nilfs_sufile_truncate_range(sufile
, newnsegs
, nsegs
- 1);
778 sui
->ncleansegs
-= nsegs
- newnsegs
;
781 kaddr
= kmap_atomic(header_bh
->b_page
);
782 header
= kaddr
+ bh_offset(header_bh
);
783 header
->sh_ncleansegs
= cpu_to_le64(sui
->ncleansegs
);
784 kunmap_atomic(kaddr
);
786 mark_buffer_dirty(header_bh
);
787 nilfs_mdt_mark_dirty(sufile
);
788 nilfs_set_nsegments(nilfs
, newnsegs
);
793 up_write(&NILFS_MDT(sufile
)->mi_sem
);
798 * nilfs_sufile_get_suinfo -
799 * @sufile: inode of segment usage file
800 * @segnum: segment number to start looking
801 * @buf: array of suinfo
802 * @sisz: byte size of suinfo
803 * @nsi: size of suinfo array
807 * Return Value: On success, 0 is returned and .... On error, one of the
808 * following negative error codes is returned.
812 * %-ENOMEM - Insufficient amount of memory available.
814 ssize_t
nilfs_sufile_get_suinfo(struct inode
*sufile
, __u64 segnum
, void *buf
,
815 unsigned sisz
, size_t nsi
)
817 struct buffer_head
*su_bh
;
818 struct nilfs_segment_usage
*su
;
819 struct nilfs_suinfo
*si
= buf
;
820 size_t susz
= NILFS_MDT(sufile
)->mi_entry_size
;
821 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
823 unsigned long nsegs
, segusages_per_block
;
827 down_read(&NILFS_MDT(sufile
)->mi_sem
);
829 segusages_per_block
= nilfs_sufile_segment_usages_per_block(sufile
);
830 nsegs
= min_t(unsigned long,
831 nilfs_sufile_get_nsegments(sufile
) - segnum
,
833 for (i
= 0; i
< nsegs
; i
+= n
, segnum
+= n
) {
834 n
= min_t(unsigned long,
835 segusages_per_block
-
836 nilfs_sufile_get_offset(sufile
, segnum
),
838 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
844 memset(si
, 0, sisz
* n
);
845 si
= (void *)si
+ sisz
* n
;
849 kaddr
= kmap_atomic(su_bh
->b_page
);
850 su
= nilfs_sufile_block_get_segment_usage(
851 sufile
, segnum
, su_bh
, kaddr
);
853 j
++, su
= (void *)su
+ susz
, si
= (void *)si
+ sisz
) {
854 si
->sui_lastmod
= le64_to_cpu(su
->su_lastmod
);
855 si
->sui_nblocks
= le32_to_cpu(su
->su_nblocks
);
856 si
->sui_flags
= le32_to_cpu(su
->su_flags
) &
857 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE
);
858 if (nilfs_segment_is_active(nilfs
, segnum
+ j
))
860 (1UL << NILFS_SEGMENT_USAGE_ACTIVE
);
862 kunmap_atomic(kaddr
);
868 up_read(&NILFS_MDT(sufile
)->mi_sem
);
873 * nilfs_sufile_set_suinfo - sets segment usage info
874 * @sufile: inode of segment usage file
875 * @buf: array of suinfo_update
876 * @supsz: byte size of suinfo_update
877 * @nsup: size of suinfo_update array
879 * Description: Takes an array of nilfs_suinfo_update structs and updates
880 * segment usage accordingly. Only the fields indicated by the sup_flags
883 * Return Value: On success, 0 is returned. On error, one of the
884 * following negative error codes is returned.
888 * %-ENOMEM - Insufficient amount of memory available.
890 * %-EINVAL - Invalid values in input (segment number, flags or nblocks)
892 ssize_t
nilfs_sufile_set_suinfo(struct inode
*sufile
, void *buf
,
893 unsigned supsz
, size_t nsup
)
895 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
896 struct buffer_head
*header_bh
, *bh
;
897 struct nilfs_suinfo_update
*sup
, *supend
= buf
+ supsz
* nsup
;
898 struct nilfs_segment_usage
*su
;
900 unsigned long blkoff
, prev_blkoff
;
901 int cleansi
, cleansu
, dirtysi
, dirtysu
;
902 long ncleaned
= 0, ndirtied
= 0;
905 if (unlikely(nsup
== 0))
908 for (sup
= buf
; sup
< supend
; sup
= (void *)sup
+ supsz
) {
909 if (sup
->sup_segnum
>= nilfs
->ns_nsegments
911 (~0UL << __NR_NILFS_SUINFO_UPDATE_FIELDS
))
912 || (nilfs_suinfo_update_nblocks(sup
) &&
913 sup
->sup_sui
.sui_nblocks
>
914 nilfs
->ns_blocks_per_segment
))
918 down_write(&NILFS_MDT(sufile
)->mi_sem
);
920 ret
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
925 blkoff
= nilfs_sufile_get_blkoff(sufile
, sup
->sup_segnum
);
926 ret
= nilfs_mdt_get_block(sufile
, blkoff
, 1, NULL
, &bh
);
931 kaddr
= kmap_atomic(bh
->b_page
);
932 su
= nilfs_sufile_block_get_segment_usage(
933 sufile
, sup
->sup_segnum
, bh
, kaddr
);
935 if (nilfs_suinfo_update_lastmod(sup
))
936 su
->su_lastmod
= cpu_to_le64(sup
->sup_sui
.sui_lastmod
);
938 if (nilfs_suinfo_update_nblocks(sup
))
939 su
->su_nblocks
= cpu_to_le32(sup
->sup_sui
.sui_nblocks
);
941 if (nilfs_suinfo_update_flags(sup
)) {
943 * Active flag is a virtual flag projected by running
944 * nilfs kernel code - drop it not to write it to
947 sup
->sup_sui
.sui_flags
&=
948 ~(1UL << NILFS_SEGMENT_USAGE_ACTIVE
);
950 cleansi
= nilfs_suinfo_clean(&sup
->sup_sui
);
951 cleansu
= nilfs_segment_usage_clean(su
);
952 dirtysi
= nilfs_suinfo_dirty(&sup
->sup_sui
);
953 dirtysu
= nilfs_segment_usage_dirty(su
);
955 if (cleansi
&& !cleansu
)
957 else if (!cleansi
&& cleansu
)
960 if (dirtysi
&& !dirtysu
)
962 else if (!dirtysi
&& dirtysu
)
965 su
->su_flags
= cpu_to_le32(sup
->sup_sui
.sui_flags
);
968 kunmap_atomic(kaddr
);
970 sup
= (void *)sup
+ supsz
;
974 prev_blkoff
= blkoff
;
975 blkoff
= nilfs_sufile_get_blkoff(sufile
, sup
->sup_segnum
);
976 if (blkoff
== prev_blkoff
)
979 /* get different block */
980 mark_buffer_dirty(bh
);
982 ret
= nilfs_mdt_get_block(sufile
, blkoff
, 1, NULL
, &bh
);
983 if (unlikely(ret
< 0))
986 mark_buffer_dirty(bh
);
990 if (ncleaned
|| ndirtied
) {
991 nilfs_sufile_mod_counter(header_bh
, (u64
)ncleaned
,
993 NILFS_SUI(sufile
)->ncleansegs
+= ncleaned
;
995 nilfs_mdt_mark_dirty(sufile
);
999 up_write(&NILFS_MDT(sufile
)->mi_sem
);
1004 * nilfs_sufile_trim_fs() - trim ioctl handle function
1005 * @sufile: inode of segment usage file
1006 * @range: fstrim_range structure
1008 * start: First Byte to trim
1009 * len: number of Bytes to trim from start
1010 * minlen: minimum extent length in Bytes
1012 * Decription: nilfs_sufile_trim_fs goes through all segments containing bytes
1013 * from start to start+len. start is rounded up to the next block boundary
1014 * and start+len is rounded down. For each clean segment blkdev_issue_discard
1015 * function is invoked.
1017 * Return Value: On success, 0 is returned or negative error code, otherwise.
1019 int nilfs_sufile_trim_fs(struct inode
*sufile
, struct fstrim_range
*range
)
1021 struct the_nilfs
*nilfs
= sufile
->i_sb
->s_fs_info
;
1022 struct buffer_head
*su_bh
;
1023 struct nilfs_segment_usage
*su
;
1025 size_t n
, i
, susz
= NILFS_MDT(sufile
)->mi_entry_size
;
1026 sector_t seg_start
, seg_end
, start_block
, end_block
;
1027 sector_t start
= 0, nblocks
= 0;
1028 u64 segnum
, segnum_end
, minlen
, len
, max_blocks
, ndiscarded
= 0;
1030 unsigned int sects_per_block
;
1032 sects_per_block
= (1 << nilfs
->ns_blocksize_bits
) /
1033 bdev_logical_block_size(nilfs
->ns_bdev
);
1034 len
= range
->len
>> nilfs
->ns_blocksize_bits
;
1035 minlen
= range
->minlen
>> nilfs
->ns_blocksize_bits
;
1036 max_blocks
= ((u64
)nilfs
->ns_nsegments
* nilfs
->ns_blocks_per_segment
);
1038 if (!len
|| range
->start
>= max_blocks
<< nilfs
->ns_blocksize_bits
)
1041 start_block
= (range
->start
+ nilfs
->ns_blocksize
- 1) >>
1042 nilfs
->ns_blocksize_bits
;
1045 * range->len can be very large (actually, it is set to
1046 * ULLONG_MAX by default) - truncate upper end of the range
1047 * carefully so as not to overflow.
1049 if (max_blocks
- start_block
< len
)
1050 end_block
= max_blocks
- 1;
1052 end_block
= start_block
+ len
- 1;
1054 segnum
= nilfs_get_segnum_of_block(nilfs
, start_block
);
1055 segnum_end
= nilfs_get_segnum_of_block(nilfs
, end_block
);
1057 down_read(&NILFS_MDT(sufile
)->mi_sem
);
1059 while (segnum
<= segnum_end
) {
1060 n
= nilfs_sufile_segment_usages_in_block(sufile
, segnum
,
1063 ret
= nilfs_sufile_get_segment_usage_block(sufile
, segnum
, 0,
1073 kaddr
= kmap_atomic(su_bh
->b_page
);
1074 su
= nilfs_sufile_block_get_segment_usage(sufile
, segnum
,
1076 for (i
= 0; i
< n
; ++i
, ++segnum
, su
= (void *)su
+ susz
) {
1077 if (!nilfs_segment_usage_clean(su
))
1080 nilfs_get_segment_range(nilfs
, segnum
, &seg_start
,
1084 /* start new extent */
1086 nblocks
= seg_end
- seg_start
+ 1;
1090 if (start
+ nblocks
== seg_start
) {
1091 /* add to previous extent */
1092 nblocks
+= seg_end
- seg_start
+ 1;
1096 /* discard previous extent */
1097 if (start
< start_block
) {
1098 nblocks
-= start_block
- start
;
1099 start
= start_block
;
1102 if (nblocks
>= minlen
) {
1103 kunmap_atomic(kaddr
);
1105 ret
= blkdev_issue_discard(nilfs
->ns_bdev
,
1106 start
* sects_per_block
,
1107 nblocks
* sects_per_block
,
1114 ndiscarded
+= nblocks
;
1115 kaddr
= kmap_atomic(su_bh
->b_page
);
1116 su
= nilfs_sufile_block_get_segment_usage(
1117 sufile
, segnum
, su_bh
, kaddr
);
1120 /* start new extent */
1122 nblocks
= seg_end
- seg_start
+ 1;
1124 kunmap_atomic(kaddr
);
1130 /* discard last extent */
1131 if (start
< start_block
) {
1132 nblocks
-= start_block
- start
;
1133 start
= start_block
;
1135 if (start
+ nblocks
> end_block
+ 1)
1136 nblocks
= end_block
- start
+ 1;
1138 if (nblocks
>= minlen
) {
1139 ret
= blkdev_issue_discard(nilfs
->ns_bdev
,
1140 start
* sects_per_block
,
1141 nblocks
* sects_per_block
,
1144 ndiscarded
+= nblocks
;
1149 up_read(&NILFS_MDT(sufile
)->mi_sem
);
1151 range
->len
= ndiscarded
<< nilfs
->ns_blocksize_bits
;
1156 * nilfs_sufile_read - read or get sufile inode
1157 * @sb: super block instance
1158 * @susize: size of a segment usage entry
1159 * @raw_inode: on-disk sufile inode
1160 * @inodep: buffer to store the inode
1162 int nilfs_sufile_read(struct super_block
*sb
, size_t susize
,
1163 struct nilfs_inode
*raw_inode
, struct inode
**inodep
)
1165 struct inode
*sufile
;
1166 struct nilfs_sufile_info
*sui
;
1167 struct buffer_head
*header_bh
;
1168 struct nilfs_sufile_header
*header
;
1172 if (susize
> sb
->s_blocksize
) {
1174 "NILFS: too large segment usage size: %zu bytes.\n",
1177 } else if (susize
< NILFS_MIN_SEGMENT_USAGE_SIZE
) {
1179 "NILFS: too small segment usage size: %zu bytes.\n",
1184 sufile
= nilfs_iget_locked(sb
, NULL
, NILFS_SUFILE_INO
);
1185 if (unlikely(!sufile
))
1187 if (!(sufile
->i_state
& I_NEW
))
1190 err
= nilfs_mdt_init(sufile
, NILFS_MDT_GFP
, sizeof(*sui
));
1194 nilfs_mdt_set_entry_size(sufile
, susize
,
1195 sizeof(struct nilfs_sufile_header
));
1197 err
= nilfs_read_inode_common(sufile
, raw_inode
);
1201 err
= nilfs_sufile_get_header_block(sufile
, &header_bh
);
1205 sui
= NILFS_SUI(sufile
);
1206 kaddr
= kmap_atomic(header_bh
->b_page
);
1207 header
= kaddr
+ bh_offset(header_bh
);
1208 sui
->ncleansegs
= le64_to_cpu(header
->sh_ncleansegs
);
1209 kunmap_atomic(kaddr
);
1212 sui
->allocmax
= nilfs_sufile_get_nsegments(sufile
) - 1;
1215 unlock_new_inode(sufile
);
1220 iget_failed(sufile
);