2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "androidcompat.h"
22 #include <sys/ioctl.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
28 #include <sys/syscall.h>
31 #include <uuid/uuid.h>
49 static const char * const scrub_cmd_group_usage
[] = {
50 "btrfs scrub <command> [options] <path>|<device>",
54 #define SCRUB_DATA_FILE "/var/lib/btrfs/scrub.status"
55 #define SCRUB_PROGRESS_SOCKET_PATH "/var/lib/btrfs/scrub.progress"
56 #define SCRUB_FILE_VERSION_PREFIX "scrub status"
57 #define SCRUB_FILE_VERSION "1"
68 /* TBD: replace with #include "linux/ioprio.h" in some years */
69 #if !defined (IOPRIO_H)
70 #define IOPRIO_WHO_PROCESS 1
71 #define IOPRIO_CLASS_SHIFT 13
72 #define IOPRIO_PRIO_VALUE(class, data) \
73 (((class) << IOPRIO_CLASS_SHIFT) | (data))
74 #define IOPRIO_CLASS_IDLE 3
77 struct scrub_progress
{
78 struct btrfs_ioctl_scrub_args scrub_args
;
82 struct scrub_stats stats
;
83 struct scrub_file_record
*resumed
;
85 pthread_mutex_t progress_mutex
;
90 struct scrub_file_record
{
91 u8 fsid
[BTRFS_FSID_SIZE
];
93 struct scrub_stats stats
;
94 struct btrfs_scrub_progress p
;
97 struct scrub_progress_cycle
{
101 struct btrfs_ioctl_fs_info_args
*fi
;
102 struct scrub_progress
*progress
;
103 struct scrub_progress
*shared_progress
;
104 pthread_mutex_t
*write_mutex
;
107 struct scrub_fs_stat
{
108 struct btrfs_scrub_progress p
;
109 struct scrub_stats s
;
113 static void print_scrub_full(struct btrfs_scrub_progress
*sp
)
115 printf("\tdata_extents_scrubbed: %lld\n", sp
->data_extents_scrubbed
);
116 printf("\ttree_extents_scrubbed: %lld\n", sp
->tree_extents_scrubbed
);
117 printf("\tdata_bytes_scrubbed: %lld\n", sp
->data_bytes_scrubbed
);
118 printf("\ttree_bytes_scrubbed: %lld\n", sp
->tree_bytes_scrubbed
);
119 printf("\tread_errors: %lld\n", sp
->read_errors
);
120 printf("\tcsum_errors: %lld\n", sp
->csum_errors
);
121 printf("\tverify_errors: %lld\n", sp
->verify_errors
);
122 printf("\tno_csum: %lld\n", sp
->no_csum
);
123 printf("\tcsum_discards: %lld\n", sp
->csum_discards
);
124 printf("\tsuper_errors: %lld\n", sp
->super_errors
);
125 printf("\tmalloc_errors: %lld\n", sp
->malloc_errors
);
126 printf("\tuncorrectable_errors: %lld\n", sp
->uncorrectable_errors
);
127 printf("\tunverified_errors: %lld\n", sp
->unverified_errors
);
128 printf("\tcorrected_errors: %lld\n", sp
->corrected_errors
);
129 printf("\tlast_physical: %lld\n", sp
->last_physical
);
132 #define PRINT_SCRUB_ERROR(test, desc) do { \
134 printf(" %s=%llu", desc, test); \
137 static void print_scrub_summary(struct btrfs_scrub_progress
*p
)
142 err_cnt
= p
->read_errors
+
147 err_cnt2
= p
->corrected_errors
+ p
->uncorrectable_errors
;
149 if (p
->malloc_errors
)
150 printf("*** WARNING: memory allocation failed while scrubbing. "
151 "results may be inaccurate\n");
153 printf("\ttotal bytes scrubbed: %s with %llu errors\n",
154 pretty_size(p
->data_bytes_scrubbed
+ p
->tree_bytes_scrubbed
),
155 max(err_cnt
, err_cnt2
));
157 if (err_cnt
|| err_cnt2
) {
158 printf("\terror details:");
159 PRINT_SCRUB_ERROR(p
->read_errors
, "read");
160 PRINT_SCRUB_ERROR(p
->super_errors
, "super");
161 PRINT_SCRUB_ERROR(p
->verify_errors
, "verify");
162 PRINT_SCRUB_ERROR(p
->csum_errors
, "csum");
164 printf("\tcorrected errors: %llu, uncorrectable errors: %llu, "
165 "unverified errors: %llu\n", p
->corrected_errors
,
166 p
->uncorrectable_errors
, p
->unverified_errors
);
170 #define _SCRUB_FS_STAT(p, name, fs_stat) do { \
171 fs_stat->p.name += p->name; \
174 #define _SCRUB_FS_STAT_MIN(ss, name, fs_stat) \
176 if (fs_stat->s.name > ss->name) { \
177 fs_stat->s.name = ss->name; \
181 #define _SCRUB_FS_STAT_ZMIN(ss, name, fs_stat) \
183 if (!fs_stat->s.name || fs_stat->s.name > ss->name) { \
184 fs_stat->s.name = ss->name; \
188 #define _SCRUB_FS_STAT_ZMAX(ss, name, fs_stat) \
190 if (!(fs_stat)->s.name || (fs_stat)->s.name < (ss)->name) { \
191 (fs_stat)->s.name = (ss)->name; \
195 static void add_to_fs_stat(struct btrfs_scrub_progress
*p
,
196 struct scrub_stats
*ss
,
197 struct scrub_fs_stat
*fs_stat
)
199 _SCRUB_FS_STAT(p
, data_extents_scrubbed
, fs_stat
);
200 _SCRUB_FS_STAT(p
, tree_extents_scrubbed
, fs_stat
);
201 _SCRUB_FS_STAT(p
, data_bytes_scrubbed
, fs_stat
);
202 _SCRUB_FS_STAT(p
, tree_bytes_scrubbed
, fs_stat
);
203 _SCRUB_FS_STAT(p
, read_errors
, fs_stat
);
204 _SCRUB_FS_STAT(p
, csum_errors
, fs_stat
);
205 _SCRUB_FS_STAT(p
, verify_errors
, fs_stat
);
206 _SCRUB_FS_STAT(p
, no_csum
, fs_stat
);
207 _SCRUB_FS_STAT(p
, csum_discards
, fs_stat
);
208 _SCRUB_FS_STAT(p
, super_errors
, fs_stat
);
209 _SCRUB_FS_STAT(p
, malloc_errors
, fs_stat
);
210 _SCRUB_FS_STAT(p
, uncorrectable_errors
, fs_stat
);
211 _SCRUB_FS_STAT(p
, corrected_errors
, fs_stat
);
212 _SCRUB_FS_STAT(p
, last_physical
, fs_stat
);
213 _SCRUB_FS_STAT_ZMIN(ss
, t_start
, fs_stat
);
214 _SCRUB_FS_STAT_ZMIN(ss
, t_resumed
, fs_stat
);
215 _SCRUB_FS_STAT_ZMAX(ss
, duration
, fs_stat
);
216 _SCRUB_FS_STAT_ZMAX(ss
, canceled
, fs_stat
);
217 _SCRUB_FS_STAT_MIN(ss
, finished
, fs_stat
);
220 static void init_fs_stat(struct scrub_fs_stat
*fs_stat
)
222 memset(fs_stat
, 0, sizeof(*fs_stat
));
223 fs_stat
->s
.finished
= 1;
226 static void _print_scrub_ss(struct scrub_stats
*ss
)
233 if (!ss
|| !ss
->t_start
) {
234 printf("\tno stats available\n");
238 localtime_r(&ss
->t_resumed
, &tm
);
239 strftime(t
, sizeof(t
), "%c", &tm
);
240 t
[sizeof(t
) - 1] = '\0';
241 printf("\tscrub resumed at %s", t
);
243 localtime_r(&ss
->t_start
, &tm
);
244 strftime(t
, sizeof(t
), "%c", &tm
);
245 t
[sizeof(t
) - 1] = '\0';
246 printf("\tscrub started at %s", t
);
249 seconds
= ss
->duration
;
250 hours
= ss
->duration
/ (60 * 60);
251 gmtime_r(&seconds
, &tm
);
252 strftime(t
, sizeof(t
), "%M:%S", &tm
);
254 printf(", running for %02u:%s\n", hours
, t
);
255 else if (ss
->canceled
)
256 printf(" and was aborted after %02u:%s\n", hours
, t
);
257 else if (ss
->finished
)
258 printf(" and finished after %02u:%s\n", hours
, t
);
260 printf(", interrupted after %02u:%s, not running\n",
264 static void print_scrub_dev(struct btrfs_ioctl_dev_info_args
*di
,
265 struct btrfs_scrub_progress
*p
, int raw
,
266 const char *append
, struct scrub_stats
*ss
)
268 printf("scrub device %s (id %llu) %s\n", di
->path
, di
->devid
,
269 append
? append
: "");
277 print_scrub_summary(p
);
281 static void print_fs_stat(struct scrub_fs_stat
*fs_stat
, int raw
)
283 _print_scrub_ss(&fs_stat
->s
);
286 print_scrub_full(&fs_stat
->p
);
288 print_scrub_summary(&fs_stat
->p
);
291 static void free_history(struct scrub_file_record
**last_scrubs
)
293 struct scrub_file_record
**l
= last_scrubs
;
302 * cancels a running scrub and makes the master process record the current
303 * progress status before exiting.
305 static int cancel_fd
= -1;
306 static void scrub_sigint_record_progress(int signal
)
310 ret
= ioctl(cancel_fd
, BTRFS_IOC_SCRUB_CANCEL
, NULL
);
312 perror("Scrub cancel failed");
315 static int scrub_handle_sigint_parent(void)
317 struct sigaction sa
= {
318 .sa_handler
= SIG_IGN
,
319 .sa_flags
= SA_RESTART
,
322 return sigaction(SIGINT
, &sa
, NULL
);
325 static int scrub_handle_sigint_child(int fd
)
327 struct sigaction sa
= {
328 .sa_handler
= fd
== -1 ? SIG_DFL
: scrub_sigint_record_progress
,
332 return sigaction(SIGINT
, &sa
, NULL
);
335 static int scrub_datafile(const char *fn_base
, const char *fn_local
,
336 const char *fn_tmp
, char *datafile
, int size
)
341 datafile
[end
+ 1] = '\0';
342 strncpy(datafile
, fn_base
, end
);
343 ret
= strlen(datafile
);
349 strncpy(datafile
+ ret
+ 1, fn_local
, end
- ret
- 1);
350 ret
= strlen(datafile
);
357 strncpy(datafile
+ ret
+ 1, fn_tmp
, end
- ret
- 1);
358 ret
= strlen(datafile
);
367 static int scrub_open_file(const char *datafile
, int m
)
372 fd
= open(datafile
, m
, 0600);
376 ret
= flock(fd
, LOCK_EX
|LOCK_NB
);
386 static int scrub_open_file_r(const char *fn_base
, const char *fn_local
)
389 char datafile
[PATH_MAX
];
390 ret
= scrub_datafile(fn_base
, fn_local
, NULL
,
391 datafile
, sizeof(datafile
));
394 return scrub_open_file(datafile
, O_RDONLY
);
397 static int scrub_open_file_w(const char *fn_base
, const char *fn_local
,
401 char datafile
[PATH_MAX
];
402 ret
= scrub_datafile(fn_base
, fn_local
, tmp
,
403 datafile
, sizeof(datafile
));
406 return scrub_open_file(datafile
, O_WRONLY
|O_CREAT
);
409 static int scrub_rename_file(const char *fn_base
, const char *fn_local
,
413 char datafile_old
[PATH_MAX
];
414 char datafile_new
[PATH_MAX
];
415 ret
= scrub_datafile(fn_base
, fn_local
, tmp
,
416 datafile_old
, sizeof(datafile_old
));
419 ret
= scrub_datafile(fn_base
, fn_local
, NULL
,
420 datafile_new
, sizeof(datafile_new
));
423 ret
= rename(datafile_old
, datafile_new
);
424 return ret
? -errno
: 0;
427 #define _SCRUB_KVREAD(ret, i, name, avail, l, dest) if (ret == 0) { \
428 ret = scrub_kvread(i, sizeof(#name), avail, l, #name, dest.name); \
432 * returns 0 if the key did not match (nothing was read)
433 * 1 if the key did match (success)
434 * -1 if the key did match and an error occurred
436 static int scrub_kvread(int *i
, int len
, int avail
, const char *buf
,
437 const char *key
, u64
*dest
)
441 if (*i
+ len
+ 1 < avail
&& strncmp(&buf
[*i
], key
, len
- 1) == 0) {
446 for (j
= 0; isdigit(buf
[*i
+ j
]) && *i
+ j
< avail
; ++j
)
450 *dest
= atoll(&buf
[*i
]);
458 #define _SCRUB_INVALID do { \
460 warning("invalid data on line %d pos " \
461 "%d state %d (near \"%.*s\") at %s:%d", \
462 lineno, i, state, 20 > avail ? avail : 20, \
463 l + i, __FILE__, __LINE__); \
467 static struct scrub_file_record
**scrub_read_file(int fd
, int report_errors
)
480 char empty_uuid
[BTRFS_FSID_SIZE
] = {0};
481 struct scrub_file_record
**p
= NULL
;
484 old_avail
= avail
- i
;
486 error("scrub record file corrupted near byte %d", i
);
487 return ERR_PTR(-EINVAL
);
490 memmove(l
, l
+ i
, old_avail
);
491 avail
= read(fd
, l
+ old_avail
, sizeof(l
) - old_avail
);
494 if (avail
== 0 && old_avail
== 0) {
496 memcmp(p
[curr
]->fsid
, empty_uuid
, BTRFS_FSID_SIZE
) == 0) {
498 } else if (curr
== -1) {
499 p
= ERR_PTR(-ENODATA
);
505 return ERR_PTR(-errno
);
514 case 0: /* start of file */
515 ret
= scrub_kvread(&i
,
516 sizeof(SCRUB_FILE_VERSION_PREFIX
), avail
, l
,
517 SCRUB_FILE_VERSION_PREFIX
, &version
);
520 if (version
!= atoll(SCRUB_FILE_VERSION
))
521 return ERR_PTR(-ENOTSUP
);
524 case 1: /* start of line, alloc */
526 * this state makes sure we have a complete line in
527 * further processing, so we don't need wrap-tracking
530 if (!eof
&& !memchr(l
+ i
, '\n', avail
- i
))
533 if (curr
> -1 && memcmp(p
[curr
]->fsid
, empty_uuid
,
534 BTRFS_FSID_SIZE
) == 0) {
540 p
= realloc(p
, (curr
+ 2) * sizeof(*p
));
543 return ERR_PTR(-errno
);
545 p
[curr
] = malloc(sizeof(**p
));
548 return ERR_PTR(-errno
);
550 memset(p
[curr
], 0, sizeof(**p
));
554 case 2: /* start of line, skip space */
555 while (isspace(l
[i
]) && i
< avail
) {
561 (!eof
&& !memchr(l
+ i
, '\n', avail
- i
)))
565 case 3: /* read fsid */
568 for (j
= 0; l
[i
+ j
] != ':' && i
+ j
< avail
; ++j
)
570 if (i
+ j
+ 1 >= avail
)
572 if (j
!= BTRFS_UUID_UNPARSED_SIZE
- 1)
575 ret
= uuid_parse(l
+ i
, p
[curr
]->fsid
);
581 case 4: /* read dev id */
582 for (j
= 0; isdigit(l
[i
+ j
]) && i
+j
< avail
; ++j
)
584 if (j
== 0 || i
+ j
+ 1 >= avail
)
586 p
[curr
]->devid
= atoll(&l
[i
]);
590 case 5: /* read key/value pair */
592 _SCRUB_KVREAD(ret
, &i
, data_extents_scrubbed
, avail
, l
,
594 _SCRUB_KVREAD(ret
, &i
, data_extents_scrubbed
, avail
, l
,
596 _SCRUB_KVREAD(ret
, &i
, tree_extents_scrubbed
, avail
, l
,
598 _SCRUB_KVREAD(ret
, &i
, data_bytes_scrubbed
, avail
, l
,
600 _SCRUB_KVREAD(ret
, &i
, tree_bytes_scrubbed
, avail
, l
,
602 _SCRUB_KVREAD(ret
, &i
, read_errors
, avail
, l
,
604 _SCRUB_KVREAD(ret
, &i
, csum_errors
, avail
, l
,
606 _SCRUB_KVREAD(ret
, &i
, verify_errors
, avail
, l
,
608 _SCRUB_KVREAD(ret
, &i
, no_csum
, avail
, l
,
610 _SCRUB_KVREAD(ret
, &i
, csum_discards
, avail
, l
,
612 _SCRUB_KVREAD(ret
, &i
, super_errors
, avail
, l
,
614 _SCRUB_KVREAD(ret
, &i
, malloc_errors
, avail
, l
,
616 _SCRUB_KVREAD(ret
, &i
, uncorrectable_errors
, avail
, l
,
618 _SCRUB_KVREAD(ret
, &i
, corrected_errors
, avail
, l
,
620 _SCRUB_KVREAD(ret
, &i
, last_physical
, avail
, l
,
622 _SCRUB_KVREAD(ret
, &i
, finished
, avail
, l
,
624 _SCRUB_KVREAD(ret
, &i
, t_start
, avail
, l
,
625 (u64
*)&p
[curr
]->stats
);
626 _SCRUB_KVREAD(ret
, &i
, t_resumed
, avail
, l
,
627 (u64
*)&p
[curr
]->stats
);
628 _SCRUB_KVREAD(ret
, &i
, duration
, avail
, l
,
629 (u64
*)&p
[curr
]->stats
);
630 _SCRUB_KVREAD(ret
, &i
, canceled
, avail
, l
,
636 case 6: /* after number */
639 else if (l
[i
] == '\n')
645 case 99: /* skip rest of line */
650 if (l
[i
- 1] == '\n') {
657 error("internal error: unknown parser state %d near byte %d",
659 return ERR_PTR(-EINVAL
);
664 static int scrub_write_buf(int fd
, const void *data
, int len
)
667 ret
= write(fd
, data
, len
);
671 static int scrub_writev(int fd
, char *buf
, int max
, const char *fmt
, ...)
672 __attribute__ ((format (printf
, 4, 5)));
673 static int scrub_writev(int fd
, char *buf
, int max
, const char *fmt
, ...)
679 ret
= vsnprintf(buf
, max
, fmt
, args
);
683 return scrub_write_buf(fd
, buf
, ret
);
686 #define _SCRUB_SUM(dest, data, name) dest->scrub_args.progress.name = \
687 data->resumed->p.name + data->scrub_args.progress.name
689 static struct scrub_progress
*scrub_resumed_stats(struct scrub_progress
*data
,
690 struct scrub_progress
*dest
)
692 if (!data
->resumed
|| data
->skip
)
695 _SCRUB_SUM(dest
, data
, data_extents_scrubbed
);
696 _SCRUB_SUM(dest
, data
, tree_extents_scrubbed
);
697 _SCRUB_SUM(dest
, data
, data_bytes_scrubbed
);
698 _SCRUB_SUM(dest
, data
, tree_bytes_scrubbed
);
699 _SCRUB_SUM(dest
, data
, read_errors
);
700 _SCRUB_SUM(dest
, data
, csum_errors
);
701 _SCRUB_SUM(dest
, data
, verify_errors
);
702 _SCRUB_SUM(dest
, data
, no_csum
);
703 _SCRUB_SUM(dest
, data
, csum_discards
);
704 _SCRUB_SUM(dest
, data
, super_errors
);
705 _SCRUB_SUM(dest
, data
, malloc_errors
);
706 _SCRUB_SUM(dest
, data
, uncorrectable_errors
);
707 _SCRUB_SUM(dest
, data
, corrected_errors
);
708 _SCRUB_SUM(dest
, data
, last_physical
);
709 dest
->stats
.canceled
= data
->stats
.canceled
;
710 dest
->stats
.finished
= data
->stats
.finished
;
711 dest
->stats
.t_resumed
= data
->stats
.t_start
;
712 dest
->stats
.t_start
= data
->resumed
->stats
.t_start
;
713 dest
->stats
.duration
= data
->resumed
->stats
.duration
+
714 data
->stats
.duration
;
715 dest
->scrub_args
.devid
= data
->scrub_args
.devid
;
719 #define _SCRUB_KVWRITE(fd, buf, name, use) \
720 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
721 use->scrub_args.progress.name)
723 #define _SCRUB_KVWRITE_STATS(fd, buf, name, use) \
724 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
727 static int scrub_kvwrite(int fd
, char *buf
, int max
, const char *key
, u64 val
)
729 return scrub_writev(fd
, buf
, max
, "|%s:%lld", key
, val
);
732 static int scrub_write_file(int fd
, const char *fsid
,
733 struct scrub_progress
*data
, int n
)
738 struct scrub_progress local
;
739 struct scrub_progress
*use
;
744 /* each -1 is to subtract one \0 byte, the + 2 is for ':' and '\n' */
745 ret
= scrub_write_buf(fd
, SCRUB_FILE_VERSION_PREFIX
":"
746 SCRUB_FILE_VERSION
"\n",
747 (sizeof(SCRUB_FILE_VERSION_PREFIX
) - 1) +
748 (sizeof(SCRUB_FILE_VERSION
) - 1) + 2);
752 for (i
= 0; i
< n
; ++i
) {
753 use
= scrub_resumed_stats(&data
[i
], &local
);
754 if (scrub_write_buf(fd
, fsid
, strlen(fsid
)) ||
755 scrub_write_buf(fd
, ":", 1) ||
756 scrub_writev(fd
, buf
, sizeof(buf
), "%lld",
757 use
->scrub_args
.devid
) ||
758 scrub_write_buf(fd
, buf
, ret
) ||
759 _SCRUB_KVWRITE(fd
, buf
, data_extents_scrubbed
, use
) ||
760 _SCRUB_KVWRITE(fd
, buf
, tree_extents_scrubbed
, use
) ||
761 _SCRUB_KVWRITE(fd
, buf
, data_bytes_scrubbed
, use
) ||
762 _SCRUB_KVWRITE(fd
, buf
, tree_bytes_scrubbed
, use
) ||
763 _SCRUB_KVWRITE(fd
, buf
, read_errors
, use
) ||
764 _SCRUB_KVWRITE(fd
, buf
, csum_errors
, use
) ||
765 _SCRUB_KVWRITE(fd
, buf
, verify_errors
, use
) ||
766 _SCRUB_KVWRITE(fd
, buf
, no_csum
, use
) ||
767 _SCRUB_KVWRITE(fd
, buf
, csum_discards
, use
) ||
768 _SCRUB_KVWRITE(fd
, buf
, super_errors
, use
) ||
769 _SCRUB_KVWRITE(fd
, buf
, malloc_errors
, use
) ||
770 _SCRUB_KVWRITE(fd
, buf
, uncorrectable_errors
, use
) ||
771 _SCRUB_KVWRITE(fd
, buf
, corrected_errors
, use
) ||
772 _SCRUB_KVWRITE(fd
, buf
, last_physical
, use
) ||
773 _SCRUB_KVWRITE_STATS(fd
, buf
, t_start
, use
) ||
774 _SCRUB_KVWRITE_STATS(fd
, buf
, t_resumed
, use
) ||
775 _SCRUB_KVWRITE_STATS(fd
, buf
, duration
, use
) ||
776 _SCRUB_KVWRITE_STATS(fd
, buf
, canceled
, use
) ||
777 _SCRUB_KVWRITE_STATS(fd
, buf
, finished
, use
) ||
778 scrub_write_buf(fd
, "\n", 1)) {
786 static int scrub_write_progress(pthread_mutex_t
*m
, const char *fsid
,
787 struct scrub_progress
*data
, int n
)
794 ret
= pthread_setcancelstate(PTHREAD_CANCEL_DISABLE
, &old
);
800 ret
= pthread_mutex_lock(m
);
806 fd
= scrub_open_file_w(SCRUB_DATA_FILE
, fsid
, "tmp");
811 err
= scrub_write_file(fd
, fsid
, data
, n
);
814 err
= scrub_rename_file(SCRUB_DATA_FILE
, fsid
, "tmp");
825 ret
= pthread_mutex_unlock(m
);
830 ret
= pthread_setcancelstate(PTHREAD_CANCEL_ENABLE
, &old
);
838 static void *scrub_one_dev(void *ctx
)
840 struct scrub_progress
*sp
= ctx
;
844 sp
->stats
.canceled
= 0;
845 sp
->stats
.duration
= 0;
846 sp
->stats
.finished
= 0;
848 ret
= syscall(SYS_ioprio_set
, IOPRIO_WHO_PROCESS
, 0,
849 IOPRIO_PRIO_VALUE(sp
->ioprio_class
,
850 sp
->ioprio_classdata
));
852 warning("setting ioprio failed: %m (ignored)");
854 ret
= ioctl(sp
->fd
, BTRFS_IOC_SCRUB
, &sp
->scrub_args
);
855 gettimeofday(&tv
, NULL
);
857 sp
->stats
.duration
= tv
.tv_sec
- sp
->stats
.t_start
;
858 sp
->stats
.canceled
= !!ret
;
859 sp
->ioctl_errno
= errno
;
860 ret
= pthread_mutex_lock(&sp
->progress_mutex
);
862 return ERR_PTR(-ret
);
863 sp
->stats
.finished
= 1;
864 ret
= pthread_mutex_unlock(&sp
->progress_mutex
);
866 return ERR_PTR(-ret
);
871 static void *progress_one_dev(void *ctx
)
873 struct scrub_progress
*sp
= ctx
;
875 sp
->ret
= ioctl(sp
->fd
, BTRFS_IOC_SCRUB_PROGRESS
, &sp
->scrub_args
);
876 sp
->ioctl_errno
= errno
;
881 /* nb: returns a negative errno via ERR_PTR */
882 static void *scrub_progress_cycle(void *ctx
)
885 int perr
= 0; /* positive / pthread error returns */
888 char fsid
[BTRFS_UUID_UNPARSED_SIZE
];
889 struct scrub_progress
*sp
;
890 struct scrub_progress
*sp_last
;
891 struct scrub_progress
*sp_shared
;
893 struct scrub_progress_cycle
*spc
= ctx
;
894 int ndev
= spc
->fi
->num_devices
;
898 struct pollfd accept_poll_fd
= {
903 struct pollfd write_poll_fd
= {
907 struct sockaddr_un peer
;
908 socklen_t peer_size
= sizeof(peer
);
910 perr
= pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS
, &old
);
914 uuid_unparse(spc
->fi
->fsid
, fsid
);
916 for (i
= 0; i
< ndev
; ++i
) {
917 sp
= &spc
->progress
[i
];
918 sp_last
= &spc
->progress
[i
+ ndev
];
919 sp_shared
= &spc
->shared_progress
[i
];
920 sp
->scrub_args
.devid
= sp_last
->scrub_args
.devid
=
921 sp_shared
->scrub_args
.devid
;
922 sp
->fd
= sp_last
->fd
= spc
->fdmnt
;
923 sp
->stats
.t_start
= sp_last
->stats
.t_start
=
924 sp_shared
->stats
.t_start
;
925 sp
->resumed
= sp_last
->resumed
= sp_shared
->resumed
;
926 sp
->skip
= sp_last
->skip
= sp_shared
->skip
;
927 sp
->stats
.finished
= sp_last
->stats
.finished
=
928 sp_shared
->stats
.finished
;
932 ret
= poll(&accept_poll_fd
, 1, 5 * 1000);
938 peer_fd
= accept(spc
->prg_fd
, (struct sockaddr
*)&peer
,
940 gettimeofday(&tv
, NULL
);
943 for (i
= 0; i
< ndev
; ++i
) {
944 sp
= &spc
->progress
[this * ndev
+ i
];
945 sp_last
= &spc
->progress
[last
* ndev
+ i
];
946 sp_shared
= &spc
->shared_progress
[i
];
947 if (sp
->stats
.finished
)
949 progress_one_dev(sp
);
950 sp
->stats
.duration
= tv
.tv_sec
- sp
->stats
.t_start
;
953 if (sp
->ioctl_errno
!= ENOTCONN
&&
954 sp
->ioctl_errno
!= ENODEV
) {
955 ret
= -sp
->ioctl_errno
;
959 * scrub finished or device removed, check the
960 * finished flag. if unset, just use the last
961 * result we got for the current write and go
962 * on. flag should be set on next cycle, then.
964 perr
= pthread_setcancelstate(
965 PTHREAD_CANCEL_DISABLE
, &old
);
968 perr
= pthread_mutex_lock(&sp_shared
->progress_mutex
);
971 if (!sp_shared
->stats
.finished
) {
972 perr
= pthread_mutex_unlock(
973 &sp_shared
->progress_mutex
);
976 perr
= pthread_setcancelstate(
977 PTHREAD_CANCEL_ENABLE
, &old
);
980 memcpy(sp
, sp_last
, sizeof(*sp
));
983 perr
= pthread_mutex_unlock(&sp_shared
->progress_mutex
);
986 perr
= pthread_setcancelstate(
987 PTHREAD_CANCEL_ENABLE
, &old
);
990 memcpy(sp
, sp_shared
, sizeof(*sp
));
991 memcpy(sp_last
, sp_shared
, sizeof(*sp
));
994 write_poll_fd
.fd
= peer_fd
;
995 ret
= poll(&write_poll_fd
, 1, 0);
1001 ret
= scrub_write_file(
1003 &spc
->progress
[this * ndev
], ndev
);
1010 if (!spc
->do_record
)
1012 ret
= scrub_write_progress(spc
->write_mutex
, fsid
,
1013 &spc
->progress
[this * ndev
], ndev
);
1022 return ERR_PTR(ret
);
1025 static struct scrub_file_record
*last_dev_scrub(
1026 struct scrub_file_record
*const *const past_scrubs
, u64 devid
)
1030 if (!past_scrubs
|| IS_ERR(past_scrubs
))
1033 for (i
= 0; past_scrubs
[i
]; ++i
)
1034 if (past_scrubs
[i
]->devid
== devid
)
1035 return past_scrubs
[i
];
1040 static int mkdir_p(char *path
)
1045 for (i
= 1; i
< strlen(path
); ++i
) {
1049 ret
= mkdir(path
, 0777);
1050 if (ret
&& errno
!= EEXIST
)
1058 static int is_scrub_running_on_fs(struct btrfs_ioctl_fs_info_args
*fi_args
,
1059 struct btrfs_ioctl_dev_info_args
*di_args
,
1060 struct scrub_file_record
**past_scrubs
)
1064 if (!fi_args
|| !di_args
|| !past_scrubs
)
1067 for (i
= 0; i
< fi_args
->num_devices
; i
++) {
1068 struct scrub_file_record
*sfr
=
1069 last_dev_scrub(past_scrubs
, di_args
[i
].devid
);
1073 if (!(sfr
->stats
.finished
|| sfr
->stats
.canceled
))
1079 static int is_scrub_running_in_kernel(int fd
,
1080 struct btrfs_ioctl_dev_info_args
*di_args
, u64 max_devices
)
1082 struct scrub_progress sp
;
1086 for (i
= 0; i
< max_devices
; i
++) {
1087 memset(&sp
, 0, sizeof(sp
));
1088 sp
.scrub_args
.devid
= di_args
[i
].devid
;
1089 ret
= ioctl(fd
, BTRFS_IOC_SCRUB_PROGRESS
, &sp
.scrub_args
);
1097 static const char * const cmd_scrub_start_usage
[];
1098 static const char * const cmd_scrub_resume_usage
[];
1100 static int scrub_start(int argc
, char **argv
, int resume
)
1110 int e_uncorrectable
= 0;
1111 int e_correctable
= 0;
1114 int do_background
= 1;
1120 int do_stats_per_dev
= 0;
1121 int ioprio_class
= IOPRIO_CLASS_IDLE
;
1122 int ioprio_classdata
= 0;
1126 struct btrfs_ioctl_fs_info_args fi_args
;
1127 struct btrfs_ioctl_dev_info_args
*di_args
= NULL
;
1128 struct scrub_progress
*sp
= NULL
;
1129 struct scrub_fs_stat fs_stat
;
1131 struct sockaddr_un addr
= {
1132 .sun_family
= AF_UNIX
,
1134 pthread_t
*t_devs
= NULL
;
1136 struct scrub_file_record
**past_scrubs
= NULL
;
1137 struct scrub_file_record
*last_scrub
= NULL
;
1138 char *datafile
= strdup(SCRUB_DATA_FILE
);
1139 char fsid
[BTRFS_UUID_UNPARSED_SIZE
];
1140 char sock_path
[PATH_MAX
] = "";
1141 struct scrub_progress_cycle spc
;
1142 pthread_mutex_t spc_write_mutex
= PTHREAD_MUTEX_INITIALIZER
;
1145 DIR *dirstream
= NULL
;
1147 int nothing_to_resume
= 0;
1149 while ((c
= getopt(argc
, argv
, "BdqrRc:n:f")) != -1) {
1157 do_stats_per_dev
= 1;
1169 ioprio_class
= (int)strtol(optarg
, NULL
, 10);
1172 ioprio_classdata
= (int)strtol(optarg
, NULL
, 10);
1179 usage(resume
? cmd_scrub_resume_usage
:
1180 cmd_scrub_start_usage
);
1184 /* try to catch most error cases before forking */
1186 if (check_argc_exact(argc
- optind
, 1)) {
1187 usage(resume
? cmd_scrub_resume_usage
:
1188 cmd_scrub_start_usage
);
1191 spc
.progress
= NULL
;
1192 if (do_quiet
&& do_print
)
1195 if (mkdir_p(datafile
)) {
1196 warning_on(!do_quiet
,
1197 "cannot create scrub data file, mkdir %s failed: %m. Status recording disabled",
1203 path
= argv
[optind
];
1205 fdmnt
= open_path_or_dev_mnt(path
, &dirstream
, !do_quiet
);
1209 ret
= get_fs_info(path
, &fi_args
, &di_args
);
1212 "getting dev info for scrub failed: %s",
1217 if (!fi_args
.num_devices
) {
1218 error_on(!do_quiet
, "no devices found");
1223 uuid_unparse(fi_args
.fsid
, fsid
);
1224 fdres
= scrub_open_file_r(SCRUB_DATA_FILE
, fsid
);
1225 if (fdres
< 0 && fdres
!= -ENOENT
) {
1226 warning_on(!do_quiet
, "failed to open status file: %s",
1228 } else if (fdres
>= 0) {
1229 past_scrubs
= scrub_read_file(fdres
, !do_quiet
);
1230 if (IS_ERR(past_scrubs
))
1231 warning_on(!do_quiet
, "failed to read status file: %s",
1232 strerror(-PTR_ERR(past_scrubs
)));
1237 * Check for stale information in the status file, ie. if it's
1238 * canceled=0, finished=0 but no scrub is running.
1240 if (!is_scrub_running_in_kernel(fdmnt
, di_args
, fi_args
.num_devices
))
1244 * check whether any involved device is already busy running a
1245 * scrub. This would cause damaged status messages and the state
1246 * "aborted" without the explanation that a scrub was already
1247 * running. Therefore check it first, prevent it and give some
1248 * feedback to the user if scrub is already running.
1249 * Note that if scrub is started with a block device as the
1250 * parameter, only that particular block device is checked. It
1251 * is a normal mode of operation to start scrub on multiple
1252 * single devices, there is no reason to prevent this.
1254 if (!force
&& is_scrub_running_on_fs(&fi_args
, di_args
, past_scrubs
)) {
1256 "Scrub is already running.\n"
1257 "To cancel use 'btrfs scrub cancel %s'.\n"
1258 "To see the status use 'btrfs scrub status [-d] %s'",
1264 t_devs
= malloc(fi_args
.num_devices
* sizeof(*t_devs
));
1265 sp
= calloc(fi_args
.num_devices
, sizeof(*sp
));
1266 spc
.progress
= calloc(fi_args
.num_devices
* 2, sizeof(*spc
.progress
));
1268 if (!t_devs
|| !sp
|| !spc
.progress
) {
1269 error_on(!do_quiet
, "scrub failed: %m");
1274 for (i
= 0; i
< fi_args
.num_devices
; ++i
) {
1275 devid
= di_args
[i
].devid
;
1276 ret
= pthread_mutex_init(&sp
[i
].progress_mutex
, NULL
);
1278 error_on(!do_quiet
, "pthread_mutex_init failed: %s",
1283 last_scrub
= last_dev_scrub(past_scrubs
, devid
);
1284 sp
[i
].scrub_args
.devid
= devid
;
1286 if (resume
&& last_scrub
&& (last_scrub
->stats
.canceled
||
1287 !last_scrub
->stats
.finished
)) {
1289 sp
[i
].scrub_args
.start
= last_scrub
->p
.last_physical
;
1290 sp
[i
].resumed
= last_scrub
;
1291 } else if (resume
) {
1294 sp
[i
].resumed
= last_scrub
;
1298 sp
[i
].scrub_args
.start
= 0ll;
1299 sp
[i
].resumed
= NULL
;
1302 sp
[i
].scrub_args
.end
= (u64
)-1ll;
1303 sp
[i
].scrub_args
.flags
= readonly
? BTRFS_SCRUB_READONLY
: 0;
1304 sp
[i
].ioprio_class
= ioprio_class
;
1305 sp
[i
].ioprio_classdata
= ioprio_classdata
;
1308 if (!n_start
&& !n_resume
) {
1310 printf("scrub: nothing to resume for %s, fsid %s\n",
1312 nothing_to_resume
= 1;
1316 ret
= prg_fd
= socket(AF_UNIX
, SOCK_STREAM
, 0);
1318 ret
= scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH
, fsid
, NULL
,
1319 sock_path
, sizeof(sock_path
));
1320 /* ignore EOVERFLOW, try using a shorter path for the socket */
1321 addr
.sun_path
[sizeof(addr
.sun_path
) - 1] = '\0';
1322 strncpy(addr
.sun_path
, sock_path
, sizeof(addr
.sun_path
) - 1);
1323 ret
= bind(prg_fd
, (struct sockaddr
*)&addr
, sizeof(addr
));
1324 if (ret
!= -1 || errno
!= EADDRINUSE
)
1327 * bind failed with EADDRINUSE. so let's see if anyone answers
1328 * when we make a call to the socket ...
1330 ret
= connect(prg_fd
, (struct sockaddr
*)&addr
, sizeof(addr
));
1331 if (!ret
|| errno
!= ECONNREFUSED
) {
1332 /* ... yes, so scrub must be running. error out */
1333 error("scrub already running");
1339 * ... no, this means someone left us alone with an unused
1340 * socket in the file system. remove it and try again.
1342 ret
= unlink(sock_path
);
1345 ret
= listen(prg_fd
, 100);
1347 warning_on(!do_quiet
,
1348 "failed to open the progress status socket at %s: %m. Progress cannot be queried",
1349 sock_path
[0] ? sock_path
:
1350 SCRUB_PROGRESS_SOCKET_PATH
);
1360 /* write all-zero progress file for a start */
1361 ret
= scrub_write_progress(&spc_write_mutex
, fsid
, sp
,
1362 fi_args
.num_devices
);
1364 warning_on(!do_quiet
,
1365 "failed to write the progress status file: %s. Status recording disabled",
1371 if (do_background
) {
1374 error_on(!do_quiet
, "cannot scrub, fork failed: %m");
1381 scrub_handle_sigint_parent();
1383 printf("scrub %s on %s, fsid %s (pid=%d)\n",
1384 n_start
? "started" : "resumed",
1392 error_on(!do_quiet
, "wait failed (ret=%d): %m",
1397 if (!WIFEXITED(stat
) || WEXITSTATUS(stat
)) {
1398 error_on(!do_quiet
, "scrub process failed");
1399 err
= WIFEXITED(stat
) ? WEXITSTATUS(stat
) : -1;
1407 scrub_handle_sigint_child(fdmnt
);
1409 for (i
= 0; i
< fi_args
.num_devices
; ++i
) {
1411 sp
[i
].scrub_args
.progress
= sp
[i
].resumed
->p
;
1412 sp
[i
].stats
= sp
[i
].resumed
->stats
;
1414 sp
[i
].stats
.finished
= 1;
1417 devid
= di_args
[i
].devid
;
1418 gettimeofday(&tv
, NULL
);
1419 sp
[i
].stats
.t_start
= tv
.tv_sec
;
1420 ret
= pthread_create(&t_devs
[i
], NULL
,
1421 scrub_one_dev
, &sp
[i
]);
1424 error("creating scrub_one_dev[%llu] thread failed: %s",
1425 devid
, strerror(ret
));
1432 spc
.prg_fd
= prg_fd
;
1433 spc
.do_record
= do_record
;
1434 spc
.write_mutex
= &spc_write_mutex
;
1435 spc
.shared_progress
= sp
;
1437 ret
= pthread_create(&t_prog
, NULL
, scrub_progress_cycle
, &spc
);
1440 error("creating progress thread failed: %s",
1447 for (i
= 0; i
< fi_args
.num_devices
; ++i
) {
1450 devid
= di_args
[i
].devid
;
1451 ret
= pthread_join(t_devs
[i
], NULL
);
1454 error("pthread_join failed for scrub_one_dev[%llu]: %s",
1455 devid
, strerror(ret
));
1460 switch (sp
[i
].ioctl_errno
) {
1463 warning("device %lld not present",
1471 error("scrubbing %s failed for device id %lld: ret=%d, errno=%d (%s)",
1473 sp
[i
].ret
, sp
[i
].ioctl_errno
,
1474 strerror(sp
[i
].ioctl_errno
));
1479 if (sp
[i
].scrub_args
.progress
.uncorrectable_errors
> 0)
1481 if (sp
[i
].scrub_args
.progress
.corrected_errors
> 0
1482 || sp
[i
].scrub_args
.progress
.unverified_errors
> 0)
1487 const char *append
= "done";
1488 if (!do_stats_per_dev
)
1489 init_fs_stat(&fs_stat
);
1490 for (i
= 0; i
< fi_args
.num_devices
; ++i
) {
1491 if (do_stats_per_dev
) {
1492 print_scrub_dev(&di_args
[i
],
1493 &sp
[i
].scrub_args
.progress
,
1495 sp
[i
].ret
? "canceled" : "done",
1499 append
= "canceled";
1500 add_to_fs_stat(&sp
[i
].scrub_args
.progress
,
1501 &sp
[i
].stats
, &fs_stat
);
1504 if (!do_stats_per_dev
) {
1505 printf("scrub %s for %s\n", append
, fsid
);
1506 print_fs_stat(&fs_stat
, print_raw
);
1510 ret
= pthread_cancel(t_prog
);
1512 ret
= pthread_join(t_prog
, &terr
);
1514 /* check for errors from the handling of the progress thread */
1515 if (do_print
&& ret
) {
1516 error("progress thread handling failed: %s",
1520 /* check for errors returned from the progress thread itself */
1521 if (do_print
&& terr
&& terr
!= PTHREAD_CANCELED
)
1522 error("recording progress failed: %s",
1523 strerror(-PTR_ERR(terr
)));
1526 ret
= scrub_write_progress(&spc_write_mutex
, fsid
, sp
,
1527 fi_args
.num_devices
);
1528 if (ret
&& do_print
)
1529 error("failed to record the result: %s",
1533 scrub_handle_sigint_child(-1);
1536 free_history(past_scrubs
);
1546 close_file_or_dir(fdmnt
, dirstream
);
1550 if (nothing_to_resume
)
1552 if (e_uncorrectable
) {
1553 error_on(!do_quiet
, "there are uncorrectable errors");
1557 warning_on(!do_quiet
,
1558 "errors detected during scrubbing, corrected");
1563 static const char * const cmd_scrub_start_usage
[] = {
1564 "btrfs scrub start [-BdqrRf] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1565 "Start a new scrub. If a scrub is already running, the new one fails.",
1567 "-B do not background",
1568 "-d stats per device (-B only)",
1570 "-r read only mode",
1571 "-R raw print mode, print full data instead of summary",
1572 "-c set ioprio class (see ionice(1) manpage)",
1573 "-n set ioprio classdata (see ionice(1) manpage)",
1574 "-f force starting new scrub even if a scrub is already running",
1575 " this is useful when scrub stats record file is damaged",
1579 static int cmd_scrub_start(int argc
, char **argv
)
1581 return scrub_start(argc
, argv
, 0);
1584 static const char * const cmd_scrub_cancel_usage
[] = {
1585 "btrfs scrub cancel <path>|<device>",
1586 "Cancel a running scrub",
1590 static int cmd_scrub_cancel(int argc
, char **argv
)
1595 DIR *dirstream
= NULL
;
1597 clean_args_no_options(argc
, argv
, cmd_scrub_cancel_usage
);
1599 if (check_argc_exact(argc
- optind
, 1))
1600 usage(cmd_scrub_cancel_usage
);
1602 path
= argv
[optind
];
1604 fdmnt
= open_path_or_dev_mnt(path
, &dirstream
, 1);
1610 ret
= ioctl(fdmnt
, BTRFS_IOC_SCRUB_CANCEL
, NULL
);
1613 error("scrub cancel failed on %s: %s", path
,
1614 errno
== ENOTCONN
? "not running" : strerror(errno
));
1615 if (errno
== ENOTCONN
)
1623 printf("scrub cancelled\n");
1626 close_file_or_dir(fdmnt
, dirstream
);
1630 static const char * const cmd_scrub_resume_usage
[] = {
1631 "btrfs scrub resume [-BdqrR] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1632 "Resume previously canceled or interrupted scrub",
1634 "-B do not background",
1635 "-d stats per device (-B only)",
1637 "-r read only mode",
1638 "-R raw print mode, print full data instead of summary",
1639 "-c set ioprio class (see ionice(1) manpage)",
1640 "-n set ioprio classdata (see ionice(1) manpage)",
1644 static int cmd_scrub_resume(int argc
, char **argv
)
1646 return scrub_start(argc
, argv
, 1);
1649 static const char * const cmd_scrub_status_usage
[] = {
1650 "btrfs scrub status [-dR] <path>|<device>",
1651 "Show status of running or finished scrub",
1653 "-d stats per device",
1654 "-R print raw stats",
1658 static int cmd_scrub_status(int argc
, char **argv
)
1661 struct btrfs_ioctl_fs_info_args fi_args
;
1662 struct btrfs_ioctl_dev_info_args
*di_args
= NULL
;
1663 struct scrub_file_record
**past_scrubs
= NULL
;
1664 struct scrub_file_record
*last_scrub
;
1665 struct scrub_fs_stat fs_stat
;
1666 struct sockaddr_un addr
= {
1667 .sun_family
= AF_UNIX
,
1674 int do_stats_per_dev
= 0;
1676 char fsid
[BTRFS_UUID_UNPARSED_SIZE
];
1679 DIR *dirstream
= NULL
;
1681 while ((c
= getopt(argc
, argv
, "dR")) != -1) {
1684 do_stats_per_dev
= 1;
1691 usage(cmd_scrub_status_usage
);
1695 if (check_argc_exact(argc
- optind
, 1))
1696 usage(cmd_scrub_status_usage
);
1698 path
= argv
[optind
];
1700 fdmnt
= open_path_or_dev_mnt(path
, &dirstream
, 1);
1704 ret
= get_fs_info(path
, &fi_args
, &di_args
);
1706 error("getting dev info for scrub failed: %s",
1711 if (!fi_args
.num_devices
) {
1712 error("no devices found");
1717 uuid_unparse(fi_args
.fsid
, fsid
);
1719 fdres
= socket(AF_UNIX
, SOCK_STREAM
, 0);
1721 error("failed to create socket to receive progress information: %m");
1725 scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH
, fsid
,
1726 NULL
, addr
.sun_path
, sizeof(addr
.sun_path
));
1727 /* ignore EOVERFLOW, just use shorter name and hope for the best */
1728 addr
.sun_path
[sizeof(addr
.sun_path
) - 1] = '\0';
1729 ret
= connect(fdres
, (struct sockaddr
*)&addr
, sizeof(addr
));
1732 fdres
= scrub_open_file_r(SCRUB_DATA_FILE
, fsid
);
1733 if (fdres
< 0 && fdres
!= -ENOENT
) {
1734 warning("failed to open status file: %s",
1742 past_scrubs
= scrub_read_file(fdres
, 1);
1743 if (IS_ERR(past_scrubs
))
1744 warning("failed to read status: %s",
1745 strerror(-PTR_ERR(past_scrubs
)));
1747 in_progress
= is_scrub_running_in_kernel(fdmnt
, di_args
, fi_args
.num_devices
);
1749 printf("scrub status for %s\n", fsid
);
1751 if (do_stats_per_dev
) {
1752 for (i
= 0; i
< fi_args
.num_devices
; ++i
) {
1753 last_scrub
= last_dev_scrub(past_scrubs
,
1756 print_scrub_dev(&di_args
[i
], NULL
, print_raw
,
1760 last_scrub
->stats
.in_progress
= in_progress
;
1761 print_scrub_dev(&di_args
[i
], &last_scrub
->p
, print_raw
,
1762 last_scrub
->stats
.finished
?
1763 "history" : "status",
1764 &last_scrub
->stats
);
1767 init_fs_stat(&fs_stat
);
1768 fs_stat
.s
.in_progress
= in_progress
;
1769 for (i
= 0; i
< fi_args
.num_devices
; ++i
) {
1770 last_scrub
= last_dev_scrub(past_scrubs
,
1774 add_to_fs_stat(&last_scrub
->p
, &last_scrub
->stats
,
1777 print_fs_stat(&fs_stat
, print_raw
);
1781 free_history(past_scrubs
);
1785 close_file_or_dir(fdmnt
, dirstream
);
1790 static const char scrub_cmd_group_info
[] =
1791 "verify checksums of data and metadata";
1793 const struct cmd_group scrub_cmd_group
= {
1794 scrub_cmd_group_usage
, scrub_cmd_group_info
, {
1795 { "start", cmd_scrub_start
, cmd_scrub_start_usage
, NULL
, 0 },
1796 { "cancel", cmd_scrub_cancel
, cmd_scrub_cancel_usage
, NULL
, 0 },
1797 { "resume", cmd_scrub_resume
, cmd_scrub_resume_usage
, NULL
, 0 },
1798 { "status", cmd_scrub_status
, cmd_scrub_status_usage
, NULL
, 0 },
1803 int cmd_scrub(int argc
, char **argv
)
1805 return handle_command_group(&scrub_cmd_group
, argc
, argv
);