1 // SPDX-License-Identifier: GPL-2.0
3 * f2fs debugging statistics
5 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
6 * http://www.samsung.com/
7 * Copyright (c) 2012 Linux Foundation
8 * Copyright (c) 2012 Greg Kroah-Hartman <gregkh@linuxfoundation.org>
12 #include <linux/backing-dev.h>
13 #include <linux/f2fs_fs.h>
14 #include <linux/blkdev.h>
15 #include <linux/debugfs.h>
16 #include <linux/seq_file.h>
23 static LIST_HEAD(f2fs_stat_list
);
24 static DEFINE_RAW_SPINLOCK(f2fs_stat_lock
);
25 #ifdef CONFIG_DEBUG_FS
26 static struct dentry
*f2fs_debugfs_root
;
30 * This function calculates BDF of every segments
32 void f2fs_update_sit_info(struct f2fs_sb_info
*sbi
)
34 struct f2fs_stat_info
*si
= F2FS_STAT(sbi
);
35 unsigned long long blks_per_sec
, hblks_per_sec
, total_vblocks
;
36 unsigned long long bimodal
, dist
;
37 unsigned int segno
, vblocks
;
42 blks_per_sec
= CAP_BLKS_PER_SEC(sbi
);
43 hblks_per_sec
= blks_per_sec
/ 2;
44 for (segno
= 0; segno
< MAIN_SEGS(sbi
); segno
+= SEGS_PER_SEC(sbi
)) {
45 vblocks
= get_valid_blocks(sbi
, segno
, true);
46 dist
= abs(vblocks
- hblks_per_sec
);
47 bimodal
+= dist
* dist
;
49 if (vblocks
> 0 && vblocks
< blks_per_sec
) {
50 total_vblocks
+= vblocks
;
54 dist
= div_u64(MAIN_SECS(sbi
) * hblks_per_sec
* hblks_per_sec
, 100);
55 si
->bimodal
= div64_u64(bimodal
, dist
);
57 si
->avg_vblocks
= div_u64(total_vblocks
, ndirty
);
62 #ifdef CONFIG_DEBUG_FS
63 static void update_multidevice_stats(struct f2fs_sb_info
*sbi
)
65 struct f2fs_stat_info
*si
= F2FS_STAT(sbi
);
66 struct f2fs_dev_stats
*dev_stats
= si
->dev_stats
;
69 if (!f2fs_is_multi_device(sbi
))
72 memset(dev_stats
, 0, sizeof(struct f2fs_dev_stats
) * sbi
->s_ndevs
);
73 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
74 unsigned int start_segno
, end_segno
;
75 block_t start_blk
, end_blk
;
78 start_blk
= MAIN_BLKADDR(sbi
);
79 end_blk
= FDEV(i
).end_blk
+ 1 - SEG0_BLKADDR(sbi
);
81 start_blk
= FDEV(i
).start_blk
;
82 end_blk
= FDEV(i
).end_blk
+ 1;
85 start_segno
= GET_SEGNO(sbi
, start_blk
);
86 end_segno
= GET_SEGNO(sbi
, end_blk
);
88 for (j
= start_segno
; j
< end_segno
; j
++) {
89 unsigned int seg_blks
, sec_blks
;
91 seg_blks
= get_seg_entry(sbi
, j
)->valid_blocks
;
93 /* update segment stats */
94 if (IS_CURSEG(sbi
, j
))
95 dev_stats
[i
].devstats
[0][DEVSTAT_INUSE
]++;
96 else if (seg_blks
== BLKS_PER_SEG(sbi
))
97 dev_stats
[i
].devstats
[0][DEVSTAT_FULL
]++;
98 else if (seg_blks
!= 0)
99 dev_stats
[i
].devstats
[0][DEVSTAT_DIRTY
]++;
100 else if (!test_bit(j
, FREE_I(sbi
)->free_segmap
))
101 dev_stats
[i
].devstats
[0][DEVSTAT_FREE
]++;
103 dev_stats
[i
].devstats
[0][DEVSTAT_PREFREE
]++;
105 if (!__is_large_section(sbi
) ||
106 (j
% SEGS_PER_SEC(sbi
)) != 0)
109 sec_blks
= get_sec_entry(sbi
, j
)->valid_blocks
;
111 /* update section stats */
112 if (IS_CURSEC(sbi
, GET_SEC_FROM_SEG(sbi
, j
)))
113 dev_stats
[i
].devstats
[1][DEVSTAT_INUSE
]++;
114 else if (sec_blks
== BLKS_PER_SEC(sbi
))
115 dev_stats
[i
].devstats
[1][DEVSTAT_FULL
]++;
116 else if (sec_blks
!= 0)
117 dev_stats
[i
].devstats
[1][DEVSTAT_DIRTY
]++;
118 else if (!test_bit(GET_SEC_FROM_SEG(sbi
, j
),
119 FREE_I(sbi
)->free_secmap
))
120 dev_stats
[i
].devstats
[1][DEVSTAT_FREE
]++;
122 dev_stats
[i
].devstats
[1][DEVSTAT_PREFREE
]++;
127 static void update_general_status(struct f2fs_sb_info
*sbi
)
129 struct f2fs_stat_info
*si
= F2FS_STAT(sbi
);
130 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
133 /* these will be changed if online resize is done */
134 si
->main_area_segs
= le32_to_cpu(raw_super
->segment_count_main
);
135 si
->main_area_sections
= le32_to_cpu(raw_super
->section_count
);
136 si
->main_area_zones
= si
->main_area_sections
/
137 le32_to_cpu(raw_super
->secs_per_zone
);
139 /* general extent cache stats */
140 for (i
= 0; i
< NR_EXTENT_CACHES
; i
++) {
141 struct extent_tree_info
*eti
= &sbi
->extent_tree
[i
];
143 si
->hit_cached
[i
] = atomic64_read(&sbi
->read_hit_cached
[i
]);
144 si
->hit_rbtree
[i
] = atomic64_read(&sbi
->read_hit_rbtree
[i
]);
145 si
->total_ext
[i
] = atomic64_read(&sbi
->total_hit_ext
[i
]);
146 si
->hit_total
[i
] = si
->hit_cached
[i
] + si
->hit_rbtree
[i
];
147 si
->ext_tree
[i
] = atomic_read(&eti
->total_ext_tree
);
148 si
->zombie_tree
[i
] = atomic_read(&eti
->total_zombie_tree
);
149 si
->ext_node
[i
] = atomic_read(&eti
->total_ext_node
);
151 /* read extent_cache only */
152 si
->hit_largest
= atomic64_read(&sbi
->read_hit_largest
);
153 si
->hit_total
[EX_READ
] += si
->hit_largest
;
155 /* block age extent_cache only */
156 si
->allocated_data_blocks
= atomic64_read(&sbi
->allocated_data_blocks
);
158 /* validation check of the segment numbers */
159 si
->ndirty_node
= get_pages(sbi
, F2FS_DIRTY_NODES
);
160 si
->ndirty_dent
= get_pages(sbi
, F2FS_DIRTY_DENTS
);
161 si
->ndirty_meta
= get_pages(sbi
, F2FS_DIRTY_META
);
162 si
->ndirty_data
= get_pages(sbi
, F2FS_DIRTY_DATA
);
163 si
->ndirty_qdata
= get_pages(sbi
, F2FS_DIRTY_QDATA
);
164 si
->ndirty_imeta
= get_pages(sbi
, F2FS_DIRTY_IMETA
);
165 si
->ndirty_dirs
= sbi
->ndirty_inode
[DIR_INODE
];
166 si
->ndirty_files
= sbi
->ndirty_inode
[FILE_INODE
];
167 si
->nquota_files
= sbi
->nquota_files
;
168 si
->ndirty_all
= sbi
->ndirty_inode
[DIRTY_META
];
169 si
->aw_cnt
= atomic_read(&sbi
->atomic_files
);
170 si
->max_aw_cnt
= atomic_read(&sbi
->max_aw_cnt
);
171 si
->nr_dio_read
= get_pages(sbi
, F2FS_DIO_READ
);
172 si
->nr_dio_write
= get_pages(sbi
, F2FS_DIO_WRITE
);
173 si
->nr_wb_cp_data
= get_pages(sbi
, F2FS_WB_CP_DATA
);
174 si
->nr_wb_data
= get_pages(sbi
, F2FS_WB_DATA
);
175 si
->nr_rd_data
= get_pages(sbi
, F2FS_RD_DATA
);
176 si
->nr_rd_node
= get_pages(sbi
, F2FS_RD_NODE
);
177 si
->nr_rd_meta
= get_pages(sbi
, F2FS_RD_META
);
178 if (SM_I(sbi
)->fcc_info
) {
180 atomic_read(&SM_I(sbi
)->fcc_info
->issued_flush
);
182 atomic_read(&SM_I(sbi
)->fcc_info
->queued_flush
);
183 si
->flush_list_empty
=
184 llist_empty(&SM_I(sbi
)->fcc_info
->issue_list
);
186 if (SM_I(sbi
)->dcc_info
) {
188 atomic_read(&SM_I(sbi
)->dcc_info
->issued_discard
);
190 atomic_read(&SM_I(sbi
)->dcc_info
->queued_discard
);
192 atomic_read(&SM_I(sbi
)->dcc_info
->discard_cmd_cnt
);
193 si
->undiscard_blks
= SM_I(sbi
)->dcc_info
->undiscard_blks
;
195 si
->nr_issued_ckpt
= atomic_read(&sbi
->cprc_info
.issued_ckpt
);
196 si
->nr_total_ckpt
= atomic_read(&sbi
->cprc_info
.total_ckpt
);
197 si
->nr_queued_ckpt
= atomic_read(&sbi
->cprc_info
.queued_ckpt
);
198 spin_lock(&sbi
->cprc_info
.stat_lock
);
199 si
->cur_ckpt_time
= sbi
->cprc_info
.cur_time
;
200 si
->peak_ckpt_time
= sbi
->cprc_info
.peak_time
;
201 spin_unlock(&sbi
->cprc_info
.stat_lock
);
202 si
->total_count
= BLKS_TO_SEGS(sbi
, (int)sbi
->user_block_count
);
203 si
->rsvd_segs
= reserved_segments(sbi
);
204 si
->overp_segs
= overprovision_segments(sbi
);
205 si
->valid_count
= valid_user_blocks(sbi
);
206 si
->discard_blks
= discard_blocks(sbi
);
207 si
->valid_node_count
= valid_node_count(sbi
);
208 si
->valid_inode_count
= valid_inode_count(sbi
);
209 si
->inline_xattr
= atomic_read(&sbi
->inline_xattr
);
210 si
->inline_inode
= atomic_read(&sbi
->inline_inode
);
211 si
->inline_dir
= atomic_read(&sbi
->inline_dir
);
212 si
->compr_inode
= atomic_read(&sbi
->compr_inode
);
213 si
->swapfile_inode
= atomic_read(&sbi
->swapfile_inode
);
214 si
->compr_blocks
= atomic64_read(&sbi
->compr_blocks
);
215 si
->append
= sbi
->im
[APPEND_INO
].ino_num
;
216 si
->update
= sbi
->im
[UPDATE_INO
].ino_num
;
217 si
->orphans
= sbi
->im
[ORPHAN_INO
].ino_num
;
218 si
->utilization
= utilization(sbi
);
220 si
->free_segs
= free_segments(sbi
);
221 si
->free_secs
= free_sections(sbi
);
222 si
->prefree_count
= prefree_segments(sbi
);
223 si
->dirty_count
= dirty_segments(sbi
);
225 si
->node_pages
= NODE_MAPPING(sbi
)->nrpages
;
227 si
->meta_pages
= META_MAPPING(sbi
)->nrpages
;
228 #ifdef CONFIG_F2FS_FS_COMPRESSION
229 if (sbi
->compress_inode
) {
230 si
->compress_pages
= COMPRESS_MAPPING(sbi
)->nrpages
;
231 si
->compress_page_hit
= atomic_read(&sbi
->compress_page_hit
);
234 si
->nats
= NM_I(sbi
)->nat_cnt
[TOTAL_NAT
];
235 si
->dirty_nats
= NM_I(sbi
)->nat_cnt
[DIRTY_NAT
];
236 si
->sits
= MAIN_SEGS(sbi
);
237 si
->dirty_sits
= SIT_I(sbi
)->dirty_sentries
;
238 si
->free_nids
= NM_I(sbi
)->nid_cnt
[FREE_NID
];
239 si
->avail_nids
= NM_I(sbi
)->available_nids
;
240 si
->alloc_nids
= NM_I(sbi
)->nid_cnt
[PREALLOC_NID
];
241 si
->io_skip_bggc
= sbi
->io_skip_bggc
;
242 si
->other_skip_bggc
= sbi
->other_skip_bggc
;
243 si
->util_free
= (int)(BLKS_TO_SEGS(sbi
, free_user_blocks(sbi
)))
244 * 100 / (int)(sbi
->user_block_count
>> sbi
->log_blocks_per_seg
)
246 si
->util_valid
= (int)(BLKS_TO_SEGS(sbi
, written_block_count(sbi
)))
247 * 100 / (int)(sbi
->user_block_count
>> sbi
->log_blocks_per_seg
)
249 si
->util_invalid
= 50 - si
->util_free
- si
->util_valid
;
250 for (i
= CURSEG_HOT_DATA
; i
< NO_CHECK_TYPE
; i
++) {
251 struct curseg_info
*curseg
= CURSEG_I(sbi
, i
);
253 si
->curseg
[i
] = curseg
->segno
;
254 si
->cursec
[i
] = GET_SEC_FROM_SEG(sbi
, curseg
->segno
);
255 si
->curzone
[i
] = GET_ZONE_FROM_SEC(sbi
, si
->cursec
[i
]);
258 for (i
= META_CP
; i
< META_MAX
; i
++)
259 si
->meta_count
[i
] = atomic_read(&sbi
->meta_count
[i
]);
261 for (i
= 0; i
< NO_CHECK_TYPE
; i
++) {
262 si
->dirty_seg
[i
] = 0;
264 si
->valid_blks
[i
] = 0;
267 for (i
= 0; i
< MAIN_SEGS(sbi
); i
++) {
268 int blks
= get_seg_entry(sbi
, i
)->valid_blocks
;
269 int type
= get_seg_entry(sbi
, i
)->type
;
274 if (blks
== BLKS_PER_SEG(sbi
))
275 si
->full_seg
[type
]++;
277 si
->dirty_seg
[type
]++;
278 si
->valid_blks
[type
] += blks
;
281 update_multidevice_stats(sbi
);
283 for (i
= 0; i
< MAX_CALL_TYPE
; i
++)
284 si
->cp_call_count
[i
] = atomic_read(&sbi
->cp_call_count
[i
]);
286 for (i
= 0; i
< 2; i
++) {
287 si
->segment_count
[i
] = sbi
->segment_count
[i
];
288 si
->block_count
[i
] = sbi
->block_count
[i
];
291 si
->inplace_count
= atomic_read(&sbi
->inplace_count
);
295 * This function calculates memory footprint.
297 static void update_mem_info(struct f2fs_sb_info
*sbi
)
299 struct f2fs_stat_info
*si
= F2FS_STAT(sbi
);
306 si
->base_mem
= sizeof(struct f2fs_stat_info
);
308 /* build superblock */
309 si
->base_mem
+= sizeof(struct f2fs_sb_info
) + sbi
->sb
->s_blocksize
;
310 si
->base_mem
+= 2 * sizeof(struct f2fs_inode_info
);
311 si
->base_mem
+= sizeof(*sbi
->ckpt
);
314 si
->base_mem
+= sizeof(struct f2fs_sm_info
);
317 si
->base_mem
+= sizeof(struct sit_info
);
318 si
->base_mem
+= MAIN_SEGS(sbi
) * sizeof(struct seg_entry
);
319 si
->base_mem
+= f2fs_bitmap_size(MAIN_SEGS(sbi
));
320 si
->base_mem
+= 2 * SIT_VBLOCK_MAP_SIZE
* MAIN_SEGS(sbi
);
321 si
->base_mem
+= SIT_VBLOCK_MAP_SIZE
* MAIN_SEGS(sbi
);
322 si
->base_mem
+= SIT_VBLOCK_MAP_SIZE
;
323 if (__is_large_section(sbi
))
324 si
->base_mem
+= MAIN_SECS(sbi
) * sizeof(struct sec_entry
);
325 si
->base_mem
+= __bitmap_size(sbi
, SIT_BITMAP
);
327 /* build free segmap */
328 si
->base_mem
+= sizeof(struct free_segmap_info
);
329 si
->base_mem
+= f2fs_bitmap_size(MAIN_SEGS(sbi
));
330 si
->base_mem
+= f2fs_bitmap_size(MAIN_SECS(sbi
));
333 si
->base_mem
+= sizeof(struct curseg_info
) * NR_CURSEG_TYPE
;
334 si
->base_mem
+= PAGE_SIZE
* NR_CURSEG_TYPE
;
336 /* build dirty segmap */
337 si
->base_mem
+= sizeof(struct dirty_seglist_info
);
338 si
->base_mem
+= NR_DIRTY_TYPE
* f2fs_bitmap_size(MAIN_SEGS(sbi
));
339 si
->base_mem
+= f2fs_bitmap_size(MAIN_SECS(sbi
));
342 si
->base_mem
+= sizeof(struct f2fs_nm_info
);
343 si
->base_mem
+= __bitmap_size(sbi
, NAT_BITMAP
);
344 si
->base_mem
+= F2FS_BLK_TO_BYTES(NM_I(sbi
)->nat_bits_blocks
);
345 si
->base_mem
+= NM_I(sbi
)->nat_blocks
*
346 f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK
);
347 si
->base_mem
+= NM_I(sbi
)->nat_blocks
/ 8;
348 si
->base_mem
+= NM_I(sbi
)->nat_blocks
* sizeof(unsigned short);
355 si
->cache_mem
+= sizeof(struct f2fs_gc_kthread
);
357 /* build merge flush thread */
358 if (SM_I(sbi
)->fcc_info
)
359 si
->cache_mem
+= sizeof(struct flush_cmd_control
);
360 if (SM_I(sbi
)->dcc_info
) {
361 si
->cache_mem
+= sizeof(struct discard_cmd_control
);
362 si
->cache_mem
+= sizeof(struct discard_cmd
) *
363 atomic_read(&SM_I(sbi
)->dcc_info
->discard_cmd_cnt
);
367 si
->cache_mem
+= (NM_I(sbi
)->nid_cnt
[FREE_NID
] +
368 NM_I(sbi
)->nid_cnt
[PREALLOC_NID
]) *
369 sizeof(struct free_nid
);
370 si
->cache_mem
+= NM_I(sbi
)->nat_cnt
[TOTAL_NAT
] *
371 sizeof(struct nat_entry
);
372 si
->cache_mem
+= NM_I(sbi
)->nat_cnt
[DIRTY_NAT
] *
373 sizeof(struct nat_entry_set
);
374 for (i
= 0; i
< MAX_INO_ENTRY
; i
++)
375 si
->cache_mem
+= sbi
->im
[i
].ino_num
* sizeof(struct ino_entry
);
377 for (i
= 0; i
< NR_EXTENT_CACHES
; i
++) {
378 struct extent_tree_info
*eti
= &sbi
->extent_tree
[i
];
380 si
->ext_mem
[i
] = atomic_read(&eti
->total_ext_tree
) *
381 sizeof(struct extent_tree
);
382 si
->ext_mem
[i
] += atomic_read(&eti
->total_ext_node
) *
383 sizeof(struct extent_node
);
384 si
->cache_mem
+= si
->ext_mem
[i
];
388 if (sbi
->node_inode
) {
389 unsigned long npages
= NODE_MAPPING(sbi
)->nrpages
;
391 si
->page_mem
+= (unsigned long long)npages
<< PAGE_SHIFT
;
393 if (sbi
->meta_inode
) {
394 unsigned long npages
= META_MAPPING(sbi
)->nrpages
;
396 si
->page_mem
+= (unsigned long long)npages
<< PAGE_SHIFT
;
398 #ifdef CONFIG_F2FS_FS_COMPRESSION
399 if (sbi
->compress_inode
) {
400 unsigned long npages
= COMPRESS_MAPPING(sbi
)->nrpages
;
402 si
->page_mem
+= (unsigned long long)npages
<< PAGE_SHIFT
;
407 static const char *s_flag
[MAX_SBI_FLAG
] = {
408 [SBI_IS_DIRTY
] = "fs_dirty",
409 [SBI_IS_CLOSE
] = "closing",
410 [SBI_NEED_FSCK
] = "need_fsck",
411 [SBI_POR_DOING
] = "recovering",
412 [SBI_NEED_SB_WRITE
] = "sb_dirty",
413 [SBI_NEED_CP
] = "need_cp",
414 [SBI_IS_SHUTDOWN
] = "shutdown",
415 [SBI_IS_RECOVERED
] = "recovered",
416 [SBI_CP_DISABLED
] = "cp_disabled",
417 [SBI_CP_DISABLED_QUICK
] = "cp_disabled_quick",
418 [SBI_QUOTA_NEED_FLUSH
] = "quota_need_flush",
419 [SBI_QUOTA_SKIP_FLUSH
] = "quota_skip_flush",
420 [SBI_QUOTA_NEED_REPAIR
] = "quota_need_repair",
421 [SBI_IS_RESIZEFS
] = "resizefs",
422 [SBI_IS_FREEZING
] = "freezefs",
423 [SBI_IS_WRITABLE
] = "writable",
426 static const char *ipu_mode_names
[F2FS_IPU_MAX
] = {
427 [F2FS_IPU_FORCE
] = "FORCE",
428 [F2FS_IPU_SSR
] = "SSR",
429 [F2FS_IPU_UTIL
] = "UTIL",
430 [F2FS_IPU_SSR_UTIL
] = "SSR_UTIL",
431 [F2FS_IPU_FSYNC
] = "FSYNC",
432 [F2FS_IPU_ASYNC
] = "ASYNC",
433 [F2FS_IPU_NOCACHE
] = "NOCACHE",
434 [F2FS_IPU_HONOR_OPU_WRITE
] = "HONOR_OPU_WRITE",
437 static int stat_show(struct seq_file
*s
, void *v
)
439 struct f2fs_stat_info
*si
;
443 raw_spin_lock_irqsave(&f2fs_stat_lock
, flags
);
444 list_for_each_entry(si
, &f2fs_stat_list
, stat_list
) {
445 struct f2fs_sb_info
*sbi
= si
->sbi
;
447 update_general_status(sbi
);
449 seq_printf(s
, "\n=====[ partition info(%pg). #%d, %s, CP: %s]=====\n",
450 sbi
->sb
->s_bdev
, i
++,
451 f2fs_readonly(sbi
->sb
) ? "RO" : "RW",
452 is_set_ckpt_flags(sbi
, CP_DISABLED_FLAG
) ?
453 "Disabled" : (f2fs_cp_error(sbi
) ? "Error" : "Good"));
455 seq_puts(s
, "[SBI:");
456 for_each_set_bit(j
, &sbi
->s_flag
, MAX_SBI_FLAG
)
457 seq_printf(s
, " %s", s_flag
[j
]);
460 seq_printf(s
, "[SB: 1] [CP: 2] [SIT: %d] [NAT: %d] ",
461 si
->sit_area_segs
, si
->nat_area_segs
);
462 seq_printf(s
, "[SSA: %d] [MAIN: %d",
463 si
->ssa_area_segs
, si
->main_area_segs
);
464 seq_printf(s
, "(OverProv:%d Resv:%d)]\n\n",
465 si
->overp_segs
, si
->rsvd_segs
);
466 seq_printf(s
, "Current Time Sec: %llu / Mounted Time Sec: %llu\n\n",
467 ktime_get_boottime_seconds(),
468 SIT_I(sbi
)->mounted_time
);
470 seq_puts(s
, "Policy:\n");
471 seq_puts(s
, " - IPU: [");
472 if (IS_F2FS_IPU_DISABLE(sbi
)) {
473 seq_puts(s
, " DISABLE");
475 unsigned long policy
= SM_I(sbi
)->ipu_policy
;
477 for_each_set_bit(j
, &policy
, F2FS_IPU_MAX
)
478 seq_printf(s
, " %s", ipu_mode_names
[j
]);
480 seq_puts(s
, " ]\n\n");
482 if (test_opt(sbi
, DISCARD
))
483 seq_printf(s
, "Utilization: %u%% (%u valid blocks, %u discard blocks)\n",
484 si
->utilization
, si
->valid_count
, si
->discard_blks
);
486 seq_printf(s
, "Utilization: %u%% (%u valid blocks)\n",
487 si
->utilization
, si
->valid_count
);
489 seq_printf(s
, " - Node: %u (Inode: %u, ",
490 si
->valid_node_count
, si
->valid_inode_count
);
491 seq_printf(s
, "Other: %u)\n - Data: %u\n",
492 si
->valid_node_count
- si
->valid_inode_count
,
493 si
->valid_count
- si
->valid_node_count
);
494 seq_printf(s
, " - Inline_xattr Inode: %u\n",
496 seq_printf(s
, " - Inline_data Inode: %u\n",
498 seq_printf(s
, " - Inline_dentry Inode: %u\n",
500 seq_printf(s
, " - Compressed Inode: %u, Blocks: %llu\n",
501 si
->compr_inode
, si
->compr_blocks
);
502 seq_printf(s
, " - Swapfile Inode: %u\n",
504 seq_printf(s
, " - Orphan/Append/Update Inode: %u, %u, %u\n",
505 si
->orphans
, si
->append
, si
->update
);
506 seq_printf(s
, "\nMain area: %d segs, %d secs %d zones\n",
507 si
->main_area_segs
, si
->main_area_sections
,
508 si
->main_area_zones
);
509 seq_printf(s
, " TYPE %8s %8s %8s %10s %10s %10s\n",
510 "segno", "secno", "zoneno", "dirty_seg", "full_seg", "valid_blk");
511 seq_printf(s
, " - COLD data: %8d %8d %8d %10u %10u %10u\n",
512 si
->curseg
[CURSEG_COLD_DATA
],
513 si
->cursec
[CURSEG_COLD_DATA
],
514 si
->curzone
[CURSEG_COLD_DATA
],
515 si
->dirty_seg
[CURSEG_COLD_DATA
],
516 si
->full_seg
[CURSEG_COLD_DATA
],
517 si
->valid_blks
[CURSEG_COLD_DATA
]);
518 seq_printf(s
, " - WARM data: %8d %8d %8d %10u %10u %10u\n",
519 si
->curseg
[CURSEG_WARM_DATA
],
520 si
->cursec
[CURSEG_WARM_DATA
],
521 si
->curzone
[CURSEG_WARM_DATA
],
522 si
->dirty_seg
[CURSEG_WARM_DATA
],
523 si
->full_seg
[CURSEG_WARM_DATA
],
524 si
->valid_blks
[CURSEG_WARM_DATA
]);
525 seq_printf(s
, " - HOT data: %8d %8d %8d %10u %10u %10u\n",
526 si
->curseg
[CURSEG_HOT_DATA
],
527 si
->cursec
[CURSEG_HOT_DATA
],
528 si
->curzone
[CURSEG_HOT_DATA
],
529 si
->dirty_seg
[CURSEG_HOT_DATA
],
530 si
->full_seg
[CURSEG_HOT_DATA
],
531 si
->valid_blks
[CURSEG_HOT_DATA
]);
532 seq_printf(s
, " - Dir dnode: %8d %8d %8d %10u %10u %10u\n",
533 si
->curseg
[CURSEG_HOT_NODE
],
534 si
->cursec
[CURSEG_HOT_NODE
],
535 si
->curzone
[CURSEG_HOT_NODE
],
536 si
->dirty_seg
[CURSEG_HOT_NODE
],
537 si
->full_seg
[CURSEG_HOT_NODE
],
538 si
->valid_blks
[CURSEG_HOT_NODE
]);
539 seq_printf(s
, " - File dnode: %8d %8d %8d %10u %10u %10u\n",
540 si
->curseg
[CURSEG_WARM_NODE
],
541 si
->cursec
[CURSEG_WARM_NODE
],
542 si
->curzone
[CURSEG_WARM_NODE
],
543 si
->dirty_seg
[CURSEG_WARM_NODE
],
544 si
->full_seg
[CURSEG_WARM_NODE
],
545 si
->valid_blks
[CURSEG_WARM_NODE
]);
546 seq_printf(s
, " - Indir nodes: %8d %8d %8d %10u %10u %10u\n",
547 si
->curseg
[CURSEG_COLD_NODE
],
548 si
->cursec
[CURSEG_COLD_NODE
],
549 si
->curzone
[CURSEG_COLD_NODE
],
550 si
->dirty_seg
[CURSEG_COLD_NODE
],
551 si
->full_seg
[CURSEG_COLD_NODE
],
552 si
->valid_blks
[CURSEG_COLD_NODE
]);
553 seq_printf(s
, " - Pinned file: %8d %8d %8d\n",
554 si
->curseg
[CURSEG_COLD_DATA_PINNED
],
555 si
->cursec
[CURSEG_COLD_DATA_PINNED
],
556 si
->curzone
[CURSEG_COLD_DATA_PINNED
]);
557 seq_printf(s
, " - ATGC data: %8d %8d %8d\n",
558 si
->curseg
[CURSEG_ALL_DATA_ATGC
],
559 si
->cursec
[CURSEG_ALL_DATA_ATGC
],
560 si
->curzone
[CURSEG_ALL_DATA_ATGC
]);
561 seq_printf(s
, "\n - Valid: %d\n - Dirty: %d\n",
562 si
->main_area_segs
- si
->dirty_count
-
563 si
->prefree_count
- si
->free_segs
,
565 seq_printf(s
, " - Prefree: %d\n - Free: %d (%d)\n\n",
566 si
->prefree_count
, si
->free_segs
, si
->free_secs
);
567 if (f2fs_is_multi_device(sbi
)) {
568 seq_puts(s
, "Multidevice stats:\n");
569 seq_printf(s
, " [seg: %8s %8s %8s %8s %8s]",
570 "inuse", "dirty", "full", "free", "prefree");
571 if (__is_large_section(sbi
))
572 seq_printf(s
, " [sec: %8s %8s %8s %8s %8s]\n",
573 "inuse", "dirty", "full", "free", "prefree");
577 for (i
= 0; i
< sbi
->s_ndevs
; i
++) {
578 seq_printf(s
, " #%-2d %8u %8u %8u %8u %8u", i
,
579 si
->dev_stats
[i
].devstats
[0][DEVSTAT_INUSE
],
580 si
->dev_stats
[i
].devstats
[0][DEVSTAT_DIRTY
],
581 si
->dev_stats
[i
].devstats
[0][DEVSTAT_FULL
],
582 si
->dev_stats
[i
].devstats
[0][DEVSTAT_FREE
],
583 si
->dev_stats
[i
].devstats
[0][DEVSTAT_PREFREE
]);
584 if (!__is_large_section(sbi
)) {
588 seq_printf(s
, " %8u %8u %8u %8u %8u\n",
589 si
->dev_stats
[i
].devstats
[1][DEVSTAT_INUSE
],
590 si
->dev_stats
[i
].devstats
[1][DEVSTAT_DIRTY
],
591 si
->dev_stats
[i
].devstats
[1][DEVSTAT_FULL
],
592 si
->dev_stats
[i
].devstats
[1][DEVSTAT_FREE
],
593 si
->dev_stats
[i
].devstats
[1][DEVSTAT_PREFREE
]);
597 seq_printf(s
, "CP calls: %d (BG: %d)\n",
598 si
->cp_call_count
[TOTAL_CALL
],
599 si
->cp_call_count
[BACKGROUND
]);
600 seq_printf(s
, "CP count: %d\n", si
->cp_count
);
601 seq_printf(s
, " - cp blocks : %u\n", si
->meta_count
[META_CP
]);
602 seq_printf(s
, " - sit blocks : %u\n",
603 si
->meta_count
[META_SIT
]);
604 seq_printf(s
, " - nat blocks : %u\n",
605 si
->meta_count
[META_NAT
]);
606 seq_printf(s
, " - ssa blocks : %u\n",
607 si
->meta_count
[META_SSA
]);
608 seq_puts(s
, "CP merge:\n");
609 seq_printf(s
, " - Queued : %4d\n", si
->nr_queued_ckpt
);
610 seq_printf(s
, " - Issued : %4d\n", si
->nr_issued_ckpt
);
611 seq_printf(s
, " - Total : %4d\n", si
->nr_total_ckpt
);
612 seq_printf(s
, " - Cur time : %4d(ms)\n", si
->cur_ckpt_time
);
613 seq_printf(s
, " - Peak time : %4d(ms)\n", si
->peak_ckpt_time
);
614 seq_printf(s
, "GC calls: %d (gc_thread: %d)\n",
615 si
->gc_call_count
[BACKGROUND
] +
616 si
->gc_call_count
[FOREGROUND
],
617 si
->gc_call_count
[BACKGROUND
]);
618 if (__is_large_section(sbi
)) {
619 seq_printf(s
, " - data sections : %d (BG: %d)\n",
620 si
->gc_secs
[DATA
][BG_GC
] + si
->gc_secs
[DATA
][FG_GC
],
621 si
->gc_secs
[DATA
][BG_GC
]);
622 seq_printf(s
, " - node sections : %d (BG: %d)\n",
623 si
->gc_secs
[NODE
][BG_GC
] + si
->gc_secs
[NODE
][FG_GC
],
624 si
->gc_secs
[NODE
][BG_GC
]);
626 seq_printf(s
, " - data segments : %d (BG: %d)\n",
627 si
->gc_segs
[DATA
][BG_GC
] + si
->gc_segs
[DATA
][FG_GC
],
628 si
->gc_segs
[DATA
][BG_GC
]);
629 seq_printf(s
, " - node segments : %d (BG: %d)\n",
630 si
->gc_segs
[NODE
][BG_GC
] + si
->gc_segs
[NODE
][FG_GC
],
631 si
->gc_segs
[NODE
][BG_GC
]);
632 seq_puts(s
, " - Reclaimed segs :\n");
633 seq_printf(s
, " - Normal : %d\n", sbi
->gc_reclaimed_segs
[GC_NORMAL
]);
634 seq_printf(s
, " - Idle CB : %d\n", sbi
->gc_reclaimed_segs
[GC_IDLE_CB
]);
635 seq_printf(s
, " - Idle Greedy : %d\n",
636 sbi
->gc_reclaimed_segs
[GC_IDLE_GREEDY
]);
637 seq_printf(s
, " - Idle AT : %d\n", sbi
->gc_reclaimed_segs
[GC_IDLE_AT
]);
638 seq_printf(s
, " - Urgent High : %d\n",
639 sbi
->gc_reclaimed_segs
[GC_URGENT_HIGH
]);
640 seq_printf(s
, " - Urgent Mid : %d\n", sbi
->gc_reclaimed_segs
[GC_URGENT_MID
]);
641 seq_printf(s
, " - Urgent Low : %d\n", sbi
->gc_reclaimed_segs
[GC_URGENT_LOW
]);
642 seq_printf(s
, "Try to move %d blocks (BG: %d)\n", si
->tot_blks
,
643 si
->bg_data_blks
+ si
->bg_node_blks
);
644 seq_printf(s
, " - data blocks : %d (%d)\n", si
->data_blks
,
646 seq_printf(s
, " - node blocks : %d (%d)\n", si
->node_blks
,
648 seq_printf(s
, "BG skip : IO: %u, Other: %u\n",
649 si
->io_skip_bggc
, si
->other_skip_bggc
);
650 seq_puts(s
, "\nExtent Cache (Read):\n");
651 seq_printf(s
, " - Hit Count: L1-1:%llu L1-2:%llu L2:%llu\n",
652 si
->hit_largest
, si
->hit_cached
[EX_READ
],
653 si
->hit_rbtree
[EX_READ
]);
654 seq_printf(s
, " - Hit Ratio: %llu%% (%llu / %llu)\n",
655 !si
->total_ext
[EX_READ
] ? 0 :
656 div64_u64(si
->hit_total
[EX_READ
] * 100,
657 si
->total_ext
[EX_READ
]),
658 si
->hit_total
[EX_READ
], si
->total_ext
[EX_READ
]);
659 seq_printf(s
, " - Inner Struct Count: tree: %d(%d), node: %d\n",
660 si
->ext_tree
[EX_READ
], si
->zombie_tree
[EX_READ
],
661 si
->ext_node
[EX_READ
]);
662 seq_puts(s
, "\nExtent Cache (Block Age):\n");
663 seq_printf(s
, " - Allocated Data Blocks: %llu\n",
664 si
->allocated_data_blocks
);
665 seq_printf(s
, " - Hit Count: L1:%llu L2:%llu\n",
666 si
->hit_cached
[EX_BLOCK_AGE
],
667 si
->hit_rbtree
[EX_BLOCK_AGE
]);
668 seq_printf(s
, " - Hit Ratio: %llu%% (%llu / %llu)\n",
669 !si
->total_ext
[EX_BLOCK_AGE
] ? 0 :
670 div64_u64(si
->hit_total
[EX_BLOCK_AGE
] * 100,
671 si
->total_ext
[EX_BLOCK_AGE
]),
672 si
->hit_total
[EX_BLOCK_AGE
],
673 si
->total_ext
[EX_BLOCK_AGE
]);
674 seq_printf(s
, " - Inner Struct Count: tree: %d(%d), node: %d\n",
675 si
->ext_tree
[EX_BLOCK_AGE
],
676 si
->zombie_tree
[EX_BLOCK_AGE
],
677 si
->ext_node
[EX_BLOCK_AGE
]);
678 seq_puts(s
, "\nBalancing F2FS Async:\n");
679 seq_printf(s
, " - DIO (R: %4d, W: %4d)\n",
680 si
->nr_dio_read
, si
->nr_dio_write
);
681 seq_printf(s
, " - IO_R (Data: %4d, Node: %4d, Meta: %4d\n",
682 si
->nr_rd_data
, si
->nr_rd_node
, si
->nr_rd_meta
);
683 seq_printf(s
, " - IO_W (CP: %4d, Data: %4d, Flush: (%4d %4d %4d), ",
684 si
->nr_wb_cp_data
, si
->nr_wb_data
,
685 si
->nr_flushing
, si
->nr_flushed
,
686 si
->flush_list_empty
);
687 seq_printf(s
, "Discard: (%4d %4d)) cmd: %4d undiscard:%4u\n",
688 si
->nr_discarding
, si
->nr_discarded
,
689 si
->nr_discard_cmd
, si
->undiscard_blks
);
690 seq_printf(s
, " - atomic IO: %4d (Max. %4d)\n",
691 si
->aw_cnt
, si
->max_aw_cnt
);
692 seq_printf(s
, " - compress: %4d, hit:%8d\n", si
->compress_pages
, si
->compress_page_hit
);
693 seq_printf(s
, " - nodes: %4d in %4d\n",
694 si
->ndirty_node
, si
->node_pages
);
695 seq_printf(s
, " - dents: %4d in dirs:%4d (%4d)\n",
696 si
->ndirty_dent
, si
->ndirty_dirs
, si
->ndirty_all
);
697 seq_printf(s
, " - data: %4d in files:%4d\n",
698 si
->ndirty_data
, si
->ndirty_files
);
699 seq_printf(s
, " - quota data: %4d in quota files:%4d\n",
700 si
->ndirty_qdata
, si
->nquota_files
);
701 seq_printf(s
, " - meta: %4d in %4d\n",
702 si
->ndirty_meta
, si
->meta_pages
);
703 seq_printf(s
, " - imeta: %4d\n",
705 seq_printf(s
, " - fsync mark: %4lld\n",
706 percpu_counter_sum_positive(
707 &sbi
->rf_node_block_count
));
708 seq_printf(s
, " - NATs: %9d/%9d\n - SITs: %9d/%9d\n",
709 si
->dirty_nats
, si
->nats
, si
->dirty_sits
, si
->sits
);
710 seq_printf(s
, " - free_nids: %9d/%9d\n - alloc_nids: %9d\n",
711 si
->free_nids
, si
->avail_nids
, si
->alloc_nids
);
712 seq_puts(s
, "\nDistribution of User Blocks:");
713 seq_puts(s
, " [ valid | invalid | free ]\n");
716 for (j
= 0; j
< si
->util_valid
; j
++)
720 for (j
= 0; j
< si
->util_invalid
; j
++)
724 for (j
= 0; j
< si
->util_free
; j
++)
726 seq_puts(s
, "]\n\n");
727 seq_printf(s
, "IPU: %u blocks\n", si
->inplace_count
);
728 seq_printf(s
, "SSR: %u blocks in %u segments\n",
729 si
->block_count
[SSR
], si
->segment_count
[SSR
]);
730 seq_printf(s
, "LFS: %u blocks in %u segments\n",
731 si
->block_count
[LFS
], si
->segment_count
[LFS
]);
733 /* segment usage info */
734 f2fs_update_sit_info(sbi
);
735 seq_printf(s
, "\nBDF: %u, avg. vblocks: %u\n",
736 si
->bimodal
, si
->avg_vblocks
);
738 /* memory footprint */
739 update_mem_info(sbi
);
740 seq_printf(s
, "\nMemory: %llu KB\n",
741 (si
->base_mem
+ si
->cache_mem
+ si
->page_mem
) >> 10);
742 seq_printf(s
, " - static: %llu KB\n",
744 seq_printf(s
, " - cached all: %llu KB\n",
745 si
->cache_mem
>> 10);
746 seq_printf(s
, " - read extent cache: %llu KB\n",
747 si
->ext_mem
[EX_READ
] >> 10);
748 seq_printf(s
, " - block age extent cache: %llu KB\n",
749 si
->ext_mem
[EX_BLOCK_AGE
] >> 10);
750 seq_printf(s
, " - paged : %llu KB\n",
753 raw_spin_unlock_irqrestore(&f2fs_stat_lock
, flags
);
757 DEFINE_SHOW_ATTRIBUTE(stat
);
760 int f2fs_build_stats(struct f2fs_sb_info
*sbi
)
762 struct f2fs_super_block
*raw_super
= F2FS_RAW_SUPER(sbi
);
763 struct f2fs_stat_info
*si
;
764 struct f2fs_dev_stats
*dev_stats
;
768 si
= f2fs_kzalloc(sbi
, sizeof(struct f2fs_stat_info
), GFP_KERNEL
);
772 dev_stats
= f2fs_kzalloc(sbi
, sizeof(struct f2fs_dev_stats
) *
773 sbi
->s_ndevs
, GFP_KERNEL
);
779 si
->dev_stats
= dev_stats
;
781 si
->all_area_segs
= le32_to_cpu(raw_super
->segment_count
);
782 si
->sit_area_segs
= le32_to_cpu(raw_super
->segment_count_sit
);
783 si
->nat_area_segs
= le32_to_cpu(raw_super
->segment_count_nat
);
784 si
->ssa_area_segs
= le32_to_cpu(raw_super
->segment_count_ssa
);
785 si
->main_area_segs
= le32_to_cpu(raw_super
->segment_count_main
);
786 si
->main_area_sections
= le32_to_cpu(raw_super
->section_count
);
787 si
->main_area_zones
= si
->main_area_sections
/
788 le32_to_cpu(raw_super
->secs_per_zone
);
792 /* general extent cache stats */
793 for (i
= 0; i
< NR_EXTENT_CACHES
; i
++) {
794 atomic64_set(&sbi
->total_hit_ext
[i
], 0);
795 atomic64_set(&sbi
->read_hit_rbtree
[i
], 0);
796 atomic64_set(&sbi
->read_hit_cached
[i
], 0);
799 /* read extent_cache only */
800 atomic64_set(&sbi
->read_hit_largest
, 0);
802 atomic_set(&sbi
->inline_xattr
, 0);
803 atomic_set(&sbi
->inline_inode
, 0);
804 atomic_set(&sbi
->inline_dir
, 0);
805 atomic_set(&sbi
->compr_inode
, 0);
806 atomic64_set(&sbi
->compr_blocks
, 0);
807 atomic_set(&sbi
->swapfile_inode
, 0);
808 atomic_set(&sbi
->atomic_files
, 0);
809 atomic_set(&sbi
->inplace_count
, 0);
810 for (i
= META_CP
; i
< META_MAX
; i
++)
811 atomic_set(&sbi
->meta_count
[i
], 0);
812 for (i
= 0; i
< MAX_CALL_TYPE
; i
++)
813 atomic_set(&sbi
->cp_call_count
[i
], 0);
815 atomic_set(&sbi
->max_aw_cnt
, 0);
817 raw_spin_lock_irqsave(&f2fs_stat_lock
, flags
);
818 list_add_tail(&si
->stat_list
, &f2fs_stat_list
);
819 raw_spin_unlock_irqrestore(&f2fs_stat_lock
, flags
);
824 void f2fs_destroy_stats(struct f2fs_sb_info
*sbi
)
826 struct f2fs_stat_info
*si
= F2FS_STAT(sbi
);
829 raw_spin_lock_irqsave(&f2fs_stat_lock
, flags
);
830 list_del(&si
->stat_list
);
831 raw_spin_unlock_irqrestore(&f2fs_stat_lock
, flags
);
833 kfree(si
->dev_stats
);
837 void __init
f2fs_create_root_stats(void)
839 #ifdef CONFIG_DEBUG_FS
840 f2fs_debugfs_root
= debugfs_create_dir("f2fs", NULL
);
842 debugfs_create_file("status", 0444, f2fs_debugfs_root
, NULL
,
847 void f2fs_destroy_root_stats(void)
849 #ifdef CONFIG_DEBUG_FS
850 debugfs_remove_recursive(f2fs_debugfs_root
);
851 f2fs_debugfs_root
= NULL
;