libbtrfsutil: use pkg-config detection for the right Python version
[btrfs-progs-unstable/devel.git] / cmds-scrub.c
blobdabe7d9a2db0cb12eedf7eadbb96d5fcda0df269
1 /*
2 * Copyright (C) 2011 STRATO. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include "kerncompat.h"
20 #include "androidcompat.h"
22 #include <sys/ioctl.h>
23 #include <sys/wait.h>
24 #include <sys/stat.h>
25 #include <sys/types.h>
26 #include <sys/socket.h>
27 #include <sys/un.h>
28 #include <sys/syscall.h>
29 #include <poll.h>
30 #include <sys/file.h>
31 #include <uuid/uuid.h>
32 #include <fcntl.h>
33 #include <unistd.h>
34 #include <pthread.h>
35 #include <ctype.h>
36 #include <signal.h>
37 #include <stdarg.h>
38 #include <limits.h>
40 #include "ctree.h"
41 #include "ioctl.h"
42 #include "utils.h"
43 #include "volumes.h"
44 #include "disk-io.h"
46 #include "commands.h"
47 #include "help.h"
49 static const char * const scrub_cmd_group_usage[] = {
50 "btrfs scrub <command> [options] <path>|<device>",
51 NULL
54 #define SCRUB_DATA_FILE "/var/lib/btrfs/scrub.status"
55 #define SCRUB_PROGRESS_SOCKET_PATH "/var/lib/btrfs/scrub.progress"
56 #define SCRUB_FILE_VERSION_PREFIX "scrub status"
57 #define SCRUB_FILE_VERSION "1"
59 struct scrub_stats {
60 time_t t_start;
61 time_t t_resumed;
62 u64 duration;
63 u64 finished;
64 u64 canceled;
65 int in_progress;
68 /* TBD: replace with #include "linux/ioprio.h" in some years */
69 #if !defined (IOPRIO_H)
70 #define IOPRIO_WHO_PROCESS 1
71 #define IOPRIO_CLASS_SHIFT 13
72 #define IOPRIO_PRIO_VALUE(class, data) \
73 (((class) << IOPRIO_CLASS_SHIFT) | (data))
74 #define IOPRIO_CLASS_IDLE 3
75 #endif
77 struct scrub_progress {
78 struct btrfs_ioctl_scrub_args scrub_args;
79 int fd;
80 int ret;
81 int skip;
82 struct scrub_stats stats;
83 struct scrub_file_record *resumed;
84 int ioctl_errno;
85 pthread_mutex_t progress_mutex;
86 int ioprio_class;
87 int ioprio_classdata;
90 struct scrub_file_record {
91 u8 fsid[BTRFS_FSID_SIZE];
92 u64 devid;
93 struct scrub_stats stats;
94 struct btrfs_scrub_progress p;
97 struct scrub_progress_cycle {
98 int fdmnt;
99 int prg_fd;
100 int do_record;
101 struct btrfs_ioctl_fs_info_args *fi;
102 struct scrub_progress *progress;
103 struct scrub_progress *shared_progress;
104 pthread_mutex_t *write_mutex;
107 struct scrub_fs_stat {
108 struct btrfs_scrub_progress p;
109 struct scrub_stats s;
110 int i;
113 static void print_scrub_full(struct btrfs_scrub_progress *sp)
115 printf("\tdata_extents_scrubbed: %lld\n", sp->data_extents_scrubbed);
116 printf("\ttree_extents_scrubbed: %lld\n", sp->tree_extents_scrubbed);
117 printf("\tdata_bytes_scrubbed: %lld\n", sp->data_bytes_scrubbed);
118 printf("\ttree_bytes_scrubbed: %lld\n", sp->tree_bytes_scrubbed);
119 printf("\tread_errors: %lld\n", sp->read_errors);
120 printf("\tcsum_errors: %lld\n", sp->csum_errors);
121 printf("\tverify_errors: %lld\n", sp->verify_errors);
122 printf("\tno_csum: %lld\n", sp->no_csum);
123 printf("\tcsum_discards: %lld\n", sp->csum_discards);
124 printf("\tsuper_errors: %lld\n", sp->super_errors);
125 printf("\tmalloc_errors: %lld\n", sp->malloc_errors);
126 printf("\tuncorrectable_errors: %lld\n", sp->uncorrectable_errors);
127 printf("\tunverified_errors: %lld\n", sp->unverified_errors);
128 printf("\tcorrected_errors: %lld\n", sp->corrected_errors);
129 printf("\tlast_physical: %lld\n", sp->last_physical);
132 #define PRINT_SCRUB_ERROR(test, desc) do { \
133 if (test) \
134 printf(" %s=%llu", desc, test); \
135 } while (0)
137 static void print_scrub_summary(struct btrfs_scrub_progress *p)
139 u64 err_cnt;
140 u64 err_cnt2;
142 err_cnt = p->read_errors +
143 p->csum_errors +
144 p->verify_errors +
145 p->super_errors;
147 err_cnt2 = p->corrected_errors + p->uncorrectable_errors;
149 if (p->malloc_errors)
150 printf("*** WARNING: memory allocation failed while scrubbing. "
151 "results may be inaccurate\n");
153 printf("\ttotal bytes scrubbed: %s with %llu errors\n",
154 pretty_size(p->data_bytes_scrubbed + p->tree_bytes_scrubbed),
155 max(err_cnt, err_cnt2));
157 if (err_cnt || err_cnt2) {
158 printf("\terror details:");
159 PRINT_SCRUB_ERROR(p->read_errors, "read");
160 PRINT_SCRUB_ERROR(p->super_errors, "super");
161 PRINT_SCRUB_ERROR(p->verify_errors, "verify");
162 PRINT_SCRUB_ERROR(p->csum_errors, "csum");
163 printf("\n");
164 printf("\tcorrected errors: %llu, uncorrectable errors: %llu, "
165 "unverified errors: %llu\n", p->corrected_errors,
166 p->uncorrectable_errors, p->unverified_errors);
170 #define _SCRUB_FS_STAT(p, name, fs_stat) do { \
171 fs_stat->p.name += p->name; \
172 } while (0)
174 #define _SCRUB_FS_STAT_MIN(ss, name, fs_stat) \
175 do { \
176 if (fs_stat->s.name > ss->name) { \
177 fs_stat->s.name = ss->name; \
179 } while (0)
181 #define _SCRUB_FS_STAT_ZMIN(ss, name, fs_stat) \
182 do { \
183 if (!fs_stat->s.name || fs_stat->s.name > ss->name) { \
184 fs_stat->s.name = ss->name; \
186 } while (0)
188 #define _SCRUB_FS_STAT_ZMAX(ss, name, fs_stat) \
189 do { \
190 if (!(fs_stat)->s.name || (fs_stat)->s.name < (ss)->name) { \
191 (fs_stat)->s.name = (ss)->name; \
193 } while (0)
195 static void add_to_fs_stat(struct btrfs_scrub_progress *p,
196 struct scrub_stats *ss,
197 struct scrub_fs_stat *fs_stat)
199 _SCRUB_FS_STAT(p, data_extents_scrubbed, fs_stat);
200 _SCRUB_FS_STAT(p, tree_extents_scrubbed, fs_stat);
201 _SCRUB_FS_STAT(p, data_bytes_scrubbed, fs_stat);
202 _SCRUB_FS_STAT(p, tree_bytes_scrubbed, fs_stat);
203 _SCRUB_FS_STAT(p, read_errors, fs_stat);
204 _SCRUB_FS_STAT(p, csum_errors, fs_stat);
205 _SCRUB_FS_STAT(p, verify_errors, fs_stat);
206 _SCRUB_FS_STAT(p, no_csum, fs_stat);
207 _SCRUB_FS_STAT(p, csum_discards, fs_stat);
208 _SCRUB_FS_STAT(p, super_errors, fs_stat);
209 _SCRUB_FS_STAT(p, malloc_errors, fs_stat);
210 _SCRUB_FS_STAT(p, uncorrectable_errors, fs_stat);
211 _SCRUB_FS_STAT(p, corrected_errors, fs_stat);
212 _SCRUB_FS_STAT(p, last_physical, fs_stat);
213 _SCRUB_FS_STAT_ZMIN(ss, t_start, fs_stat);
214 _SCRUB_FS_STAT_ZMIN(ss, t_resumed, fs_stat);
215 _SCRUB_FS_STAT_ZMAX(ss, duration, fs_stat);
216 _SCRUB_FS_STAT_ZMAX(ss, canceled, fs_stat);
217 _SCRUB_FS_STAT_MIN(ss, finished, fs_stat);
220 static void init_fs_stat(struct scrub_fs_stat *fs_stat)
222 memset(fs_stat, 0, sizeof(*fs_stat));
223 fs_stat->s.finished = 1;
226 static void _print_scrub_ss(struct scrub_stats *ss)
228 char t[4096];
229 struct tm tm;
230 time_t seconds;
231 unsigned hours;
233 if (!ss || !ss->t_start) {
234 printf("\tno stats available\n");
235 return;
237 if (ss->t_resumed) {
238 localtime_r(&ss->t_resumed, &tm);
239 strftime(t, sizeof(t), "%c", &tm);
240 t[sizeof(t) - 1] = '\0';
241 printf("\tscrub resumed at %s", t);
242 } else {
243 localtime_r(&ss->t_start, &tm);
244 strftime(t, sizeof(t), "%c", &tm);
245 t[sizeof(t) - 1] = '\0';
246 printf("\tscrub started at %s", t);
249 seconds = ss->duration;
250 hours = ss->duration / (60 * 60);
251 gmtime_r(&seconds, &tm);
252 strftime(t, sizeof(t), "%M:%S", &tm);
253 if (ss->in_progress)
254 printf(", running for %02u:%s\n", hours, t);
255 else if (ss->canceled)
256 printf(" and was aborted after %02u:%s\n", hours, t);
257 else if (ss->finished)
258 printf(" and finished after %02u:%s\n", hours, t);
259 else
260 printf(", interrupted after %02u:%s, not running\n",
261 hours, t);
264 static void print_scrub_dev(struct btrfs_ioctl_dev_info_args *di,
265 struct btrfs_scrub_progress *p, int raw,
266 const char *append, struct scrub_stats *ss)
268 printf("scrub device %s (id %llu) %s\n", di->path, di->devid,
269 append ? append : "");
271 _print_scrub_ss(ss);
273 if (p) {
274 if (raw)
275 print_scrub_full(p);
276 else
277 print_scrub_summary(p);
281 static void print_fs_stat(struct scrub_fs_stat *fs_stat, int raw)
283 _print_scrub_ss(&fs_stat->s);
285 if (raw)
286 print_scrub_full(&fs_stat->p);
287 else
288 print_scrub_summary(&fs_stat->p);
291 static void free_history(struct scrub_file_record **last_scrubs)
293 struct scrub_file_record **l = last_scrubs;
294 if (!l || IS_ERR(l))
295 return;
296 while (*l)
297 free(*l++);
298 free(last_scrubs);
302 * cancels a running scrub and makes the master process record the current
303 * progress status before exiting.
305 static int cancel_fd = -1;
306 static void scrub_sigint_record_progress(int signal)
308 int ret;
310 ret = ioctl(cancel_fd, BTRFS_IOC_SCRUB_CANCEL, NULL);
311 if (ret < 0)
312 perror("Scrub cancel failed");
315 static int scrub_handle_sigint_parent(void)
317 struct sigaction sa = {
318 .sa_handler = SIG_IGN,
319 .sa_flags = SA_RESTART,
322 return sigaction(SIGINT, &sa, NULL);
325 static int scrub_handle_sigint_child(int fd)
327 struct sigaction sa = {
328 .sa_handler = fd == -1 ? SIG_DFL : scrub_sigint_record_progress,
331 cancel_fd = fd;
332 return sigaction(SIGINT, &sa, NULL);
335 static int scrub_datafile(const char *fn_base, const char *fn_local,
336 const char *fn_tmp, char *datafile, int size)
338 int ret;
339 int end = size - 2;
341 datafile[end + 1] = '\0';
342 strncpy(datafile, fn_base, end);
343 ret = strlen(datafile);
345 if (ret + 1 > end)
346 return -EOVERFLOW;
348 datafile[ret] = '.';
349 strncpy(datafile + ret + 1, fn_local, end - ret - 1);
350 ret = strlen(datafile);
352 if (ret + 1 > end)
353 return -EOVERFLOW;
355 if (fn_tmp) {
356 datafile[ret] = '_';
357 strncpy(datafile + ret + 1, fn_tmp, end - ret - 1);
358 ret = strlen(datafile);
360 if (ret > end)
361 return -EOVERFLOW;
364 return 0;
367 static int scrub_open_file(const char *datafile, int m)
369 int fd;
370 int ret;
372 fd = open(datafile, m, 0600);
373 if (fd < 0)
374 return -errno;
376 ret = flock(fd, LOCK_EX|LOCK_NB);
377 if (ret) {
378 ret = errno;
379 close(fd);
380 return -ret;
383 return fd;
386 static int scrub_open_file_r(const char *fn_base, const char *fn_local)
388 int ret;
389 char datafile[PATH_MAX];
390 ret = scrub_datafile(fn_base, fn_local, NULL,
391 datafile, sizeof(datafile));
392 if (ret < 0)
393 return ret;
394 return scrub_open_file(datafile, O_RDONLY);
397 static int scrub_open_file_w(const char *fn_base, const char *fn_local,
398 const char *tmp)
400 int ret;
401 char datafile[PATH_MAX];
402 ret = scrub_datafile(fn_base, fn_local, tmp,
403 datafile, sizeof(datafile));
404 if (ret < 0)
405 return ret;
406 return scrub_open_file(datafile, O_WRONLY|O_CREAT);
409 static int scrub_rename_file(const char *fn_base, const char *fn_local,
410 const char *tmp)
412 int ret;
413 char datafile_old[PATH_MAX];
414 char datafile_new[PATH_MAX];
415 ret = scrub_datafile(fn_base, fn_local, tmp,
416 datafile_old, sizeof(datafile_old));
417 if (ret < 0)
418 return ret;
419 ret = scrub_datafile(fn_base, fn_local, NULL,
420 datafile_new, sizeof(datafile_new));
421 if (ret < 0)
422 return ret;
423 ret = rename(datafile_old, datafile_new);
424 return ret ? -errno : 0;
427 #define _SCRUB_KVREAD(ret, i, name, avail, l, dest) if (ret == 0) { \
428 ret = scrub_kvread(i, sizeof(#name), avail, l, #name, dest.name); \
432 * returns 0 if the key did not match (nothing was read)
433 * 1 if the key did match (success)
434 * -1 if the key did match and an error occurred
436 static int scrub_kvread(int *i, int len, int avail, const char *buf,
437 const char *key, u64 *dest)
439 int j;
441 if (*i + len + 1 < avail && strncmp(&buf[*i], key, len - 1) == 0) {
442 *i += len - 1;
443 if (buf[*i] != ':')
444 return -1;
445 *i += 1;
446 for (j = 0; isdigit(buf[*i + j]) && *i + j < avail; ++j)
448 if (*i + j >= avail)
449 return -1;
450 *dest = atoll(&buf[*i]);
451 *i += j;
452 return 1;
455 return 0;
458 #define _SCRUB_INVALID do { \
459 if (report_errors) \
460 warning("invalid data on line %d pos " \
461 "%d state %d (near \"%.*s\") at %s:%d", \
462 lineno, i, state, 20 > avail ? avail : 20, \
463 l + i, __FILE__, __LINE__); \
464 goto skip; \
465 } while (0)
467 static struct scrub_file_record **scrub_read_file(int fd, int report_errors)
469 int avail = 0;
470 int old_avail = 0;
471 char l[SZ_16K];
472 int state = 0;
473 int curr = -1;
474 int i = 0;
475 int j;
476 int ret;
477 int eof = 0;
478 int lineno = 0;
479 u64 version;
480 char empty_uuid[BTRFS_FSID_SIZE] = {0};
481 struct scrub_file_record **p = NULL;
483 again:
484 old_avail = avail - i;
485 if (old_avail < 0) {
486 error("scrub record file corrupted near byte %d", i);
487 return ERR_PTR(-EINVAL);
489 if (old_avail)
490 memmove(l, l + i, old_avail);
491 avail = read(fd, l + old_avail, sizeof(l) - old_avail);
492 if (avail == 0)
493 eof = 1;
494 if (avail == 0 && old_avail == 0) {
495 if (curr >= 0 &&
496 memcmp(p[curr]->fsid, empty_uuid, BTRFS_FSID_SIZE) == 0) {
497 p[curr] = NULL;
498 } else if (curr == -1) {
499 p = ERR_PTR(-ENODATA);
501 return p;
503 if (avail == -1) {
504 free_history(p);
505 return ERR_PTR(-errno);
507 avail += old_avail;
509 i = 0;
510 while (i < avail) {
511 void *tmp;
513 switch (state) {
514 case 0: /* start of file */
515 ret = scrub_kvread(&i,
516 sizeof(SCRUB_FILE_VERSION_PREFIX), avail, l,
517 SCRUB_FILE_VERSION_PREFIX, &version);
518 if (ret != 1)
519 _SCRUB_INVALID;
520 if (version != atoll(SCRUB_FILE_VERSION))
521 return ERR_PTR(-ENOTSUP);
522 state = 6;
523 continue;
524 case 1: /* start of line, alloc */
526 * this state makes sure we have a complete line in
527 * further processing, so we don't need wrap-tracking
528 * everywhere.
530 if (!eof && !memchr(l + i, '\n', avail - i))
531 goto again;
532 ++lineno;
533 if (curr > -1 && memcmp(p[curr]->fsid, empty_uuid,
534 BTRFS_FSID_SIZE) == 0) {
535 state = 2;
536 continue;
538 ++curr;
539 tmp = p;
540 p = realloc(p, (curr + 2) * sizeof(*p));
541 if (!p) {
542 free_history(tmp);
543 return ERR_PTR(-errno);
545 p[curr] = malloc(sizeof(**p));
546 if (!p[curr]) {
547 free_history(p);
548 return ERR_PTR(-errno);
550 memset(p[curr], 0, sizeof(**p));
551 p[curr + 1] = NULL;
552 ++state;
553 /* fall through */
554 case 2: /* start of line, skip space */
555 while (isspace(l[i]) && i < avail) {
556 if (l[i] == '\n')
557 ++lineno;
558 ++i;
560 if (i >= avail ||
561 (!eof && !memchr(l + i, '\n', avail - i)))
562 goto again;
563 ++state;
564 /* fall through */
565 case 3: /* read fsid */
566 if (i == avail)
567 continue;
568 for (j = 0; l[i + j] != ':' && i + j < avail; ++j)
570 if (i + j + 1 >= avail)
571 _SCRUB_INVALID;
572 if (j != BTRFS_UUID_UNPARSED_SIZE - 1)
573 _SCRUB_INVALID;
574 l[i + j] = '\0';
575 ret = uuid_parse(l + i, p[curr]->fsid);
576 if (ret)
577 _SCRUB_INVALID;
578 i += j + 1;
579 ++state;
580 /* fall through */
581 case 4: /* read dev id */
582 for (j = 0; isdigit(l[i + j]) && i+j < avail; ++j)
584 if (j == 0 || i + j + 1 >= avail)
585 _SCRUB_INVALID;
586 p[curr]->devid = atoll(&l[i]);
587 i += j + 1;
588 ++state;
589 /* fall through */
590 case 5: /* read key/value pair */
591 ret = 0;
592 _SCRUB_KVREAD(ret, &i, data_extents_scrubbed, avail, l,
593 &p[curr]->p);
594 _SCRUB_KVREAD(ret, &i, data_extents_scrubbed, avail, l,
595 &p[curr]->p);
596 _SCRUB_KVREAD(ret, &i, tree_extents_scrubbed, avail, l,
597 &p[curr]->p);
598 _SCRUB_KVREAD(ret, &i, data_bytes_scrubbed, avail, l,
599 &p[curr]->p);
600 _SCRUB_KVREAD(ret, &i, tree_bytes_scrubbed, avail, l,
601 &p[curr]->p);
602 _SCRUB_KVREAD(ret, &i, read_errors, avail, l,
603 &p[curr]->p);
604 _SCRUB_KVREAD(ret, &i, csum_errors, avail, l,
605 &p[curr]->p);
606 _SCRUB_KVREAD(ret, &i, verify_errors, avail, l,
607 &p[curr]->p);
608 _SCRUB_KVREAD(ret, &i, no_csum, avail, l,
609 &p[curr]->p);
610 _SCRUB_KVREAD(ret, &i, csum_discards, avail, l,
611 &p[curr]->p);
612 _SCRUB_KVREAD(ret, &i, super_errors, avail, l,
613 &p[curr]->p);
614 _SCRUB_KVREAD(ret, &i, malloc_errors, avail, l,
615 &p[curr]->p);
616 _SCRUB_KVREAD(ret, &i, uncorrectable_errors, avail, l,
617 &p[curr]->p);
618 _SCRUB_KVREAD(ret, &i, corrected_errors, avail, l,
619 &p[curr]->p);
620 _SCRUB_KVREAD(ret, &i, last_physical, avail, l,
621 &p[curr]->p);
622 _SCRUB_KVREAD(ret, &i, finished, avail, l,
623 &p[curr]->stats);
624 _SCRUB_KVREAD(ret, &i, t_start, avail, l,
625 (u64 *)&p[curr]->stats);
626 _SCRUB_KVREAD(ret, &i, t_resumed, avail, l,
627 (u64 *)&p[curr]->stats);
628 _SCRUB_KVREAD(ret, &i, duration, avail, l,
629 (u64 *)&p[curr]->stats);
630 _SCRUB_KVREAD(ret, &i, canceled, avail, l,
631 &p[curr]->stats);
632 if (ret != 1)
633 _SCRUB_INVALID;
634 ++state;
635 /* fall through */
636 case 6: /* after number */
637 if (l[i] == '|')
638 state = 5;
639 else if (l[i] == '\n')
640 state = 1;
641 else
642 _SCRUB_INVALID;
643 ++i;
644 continue;
645 case 99: /* skip rest of line */
646 skip:
647 state = 99;
648 do {
649 ++i;
650 if (l[i - 1] == '\n') {
651 state = 1;
652 break;
654 } while (i < avail);
655 continue;
657 error("internal error: unknown parser state %d near byte %d",
658 state, i);
659 return ERR_PTR(-EINVAL);
661 goto again;
664 static int scrub_write_buf(int fd, const void *data, int len)
666 int ret;
667 ret = write(fd, data, len);
668 return ret - len;
671 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
672 __attribute__ ((format (printf, 4, 5)));
673 static int scrub_writev(int fd, char *buf, int max, const char *fmt, ...)
675 int ret;
676 va_list args;
678 va_start(args, fmt);
679 ret = vsnprintf(buf, max, fmt, args);
680 va_end(args);
681 if (ret >= max)
682 return ret - max;
683 return scrub_write_buf(fd, buf, ret);
686 #define _SCRUB_SUM(dest, data, name) dest->scrub_args.progress.name = \
687 data->resumed->p.name + data->scrub_args.progress.name
689 static struct scrub_progress *scrub_resumed_stats(struct scrub_progress *data,
690 struct scrub_progress *dest)
692 if (!data->resumed || data->skip)
693 return data;
695 _SCRUB_SUM(dest, data, data_extents_scrubbed);
696 _SCRUB_SUM(dest, data, tree_extents_scrubbed);
697 _SCRUB_SUM(dest, data, data_bytes_scrubbed);
698 _SCRUB_SUM(dest, data, tree_bytes_scrubbed);
699 _SCRUB_SUM(dest, data, read_errors);
700 _SCRUB_SUM(dest, data, csum_errors);
701 _SCRUB_SUM(dest, data, verify_errors);
702 _SCRUB_SUM(dest, data, no_csum);
703 _SCRUB_SUM(dest, data, csum_discards);
704 _SCRUB_SUM(dest, data, super_errors);
705 _SCRUB_SUM(dest, data, malloc_errors);
706 _SCRUB_SUM(dest, data, uncorrectable_errors);
707 _SCRUB_SUM(dest, data, corrected_errors);
708 _SCRUB_SUM(dest, data, last_physical);
709 dest->stats.canceled = data->stats.canceled;
710 dest->stats.finished = data->stats.finished;
711 dest->stats.t_resumed = data->stats.t_start;
712 dest->stats.t_start = data->resumed->stats.t_start;
713 dest->stats.duration = data->resumed->stats.duration +
714 data->stats.duration;
715 dest->scrub_args.devid = data->scrub_args.devid;
716 return dest;
719 #define _SCRUB_KVWRITE(fd, buf, name, use) \
720 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
721 use->scrub_args.progress.name)
723 #define _SCRUB_KVWRITE_STATS(fd, buf, name, use) \
724 scrub_kvwrite(fd, buf, sizeof(buf), #name, \
725 use->stats.name)
727 static int scrub_kvwrite(int fd, char *buf, int max, const char *key, u64 val)
729 return scrub_writev(fd, buf, max, "|%s:%lld", key, val);
732 static int scrub_write_file(int fd, const char *fsid,
733 struct scrub_progress *data, int n)
735 int ret = 0;
736 int i;
737 char buf[1024];
738 struct scrub_progress local;
739 struct scrub_progress *use;
741 if (n < 1)
742 return -EINVAL;
744 /* each -1 is to subtract one \0 byte, the + 2 is for ':' and '\n' */
745 ret = scrub_write_buf(fd, SCRUB_FILE_VERSION_PREFIX ":"
746 SCRUB_FILE_VERSION "\n",
747 (sizeof(SCRUB_FILE_VERSION_PREFIX) - 1) +
748 (sizeof(SCRUB_FILE_VERSION) - 1) + 2);
749 if (ret)
750 return -EOVERFLOW;
752 for (i = 0; i < n; ++i) {
753 use = scrub_resumed_stats(&data[i], &local);
754 if (scrub_write_buf(fd, fsid, strlen(fsid)) ||
755 scrub_write_buf(fd, ":", 1) ||
756 scrub_writev(fd, buf, sizeof(buf), "%lld",
757 use->scrub_args.devid) ||
758 scrub_write_buf(fd, buf, ret) ||
759 _SCRUB_KVWRITE(fd, buf, data_extents_scrubbed, use) ||
760 _SCRUB_KVWRITE(fd, buf, tree_extents_scrubbed, use) ||
761 _SCRUB_KVWRITE(fd, buf, data_bytes_scrubbed, use) ||
762 _SCRUB_KVWRITE(fd, buf, tree_bytes_scrubbed, use) ||
763 _SCRUB_KVWRITE(fd, buf, read_errors, use) ||
764 _SCRUB_KVWRITE(fd, buf, csum_errors, use) ||
765 _SCRUB_KVWRITE(fd, buf, verify_errors, use) ||
766 _SCRUB_KVWRITE(fd, buf, no_csum, use) ||
767 _SCRUB_KVWRITE(fd, buf, csum_discards, use) ||
768 _SCRUB_KVWRITE(fd, buf, super_errors, use) ||
769 _SCRUB_KVWRITE(fd, buf, malloc_errors, use) ||
770 _SCRUB_KVWRITE(fd, buf, uncorrectable_errors, use) ||
771 _SCRUB_KVWRITE(fd, buf, corrected_errors, use) ||
772 _SCRUB_KVWRITE(fd, buf, last_physical, use) ||
773 _SCRUB_KVWRITE_STATS(fd, buf, t_start, use) ||
774 _SCRUB_KVWRITE_STATS(fd, buf, t_resumed, use) ||
775 _SCRUB_KVWRITE_STATS(fd, buf, duration, use) ||
776 _SCRUB_KVWRITE_STATS(fd, buf, canceled, use) ||
777 _SCRUB_KVWRITE_STATS(fd, buf, finished, use) ||
778 scrub_write_buf(fd, "\n", 1)) {
779 return -EOVERFLOW;
783 return 0;
786 static int scrub_write_progress(pthread_mutex_t *m, const char *fsid,
787 struct scrub_progress *data, int n)
789 int ret;
790 int err;
791 int fd = -1;
792 int old;
794 ret = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &old);
795 if (ret) {
796 err = -ret;
797 goto out3;
800 ret = pthread_mutex_lock(m);
801 if (ret) {
802 err = -ret;
803 goto out2;
806 fd = scrub_open_file_w(SCRUB_DATA_FILE, fsid, "tmp");
807 if (fd < 0) {
808 err = fd;
809 goto out1;
811 err = scrub_write_file(fd, fsid, data, n);
812 if (err)
813 goto out1;
814 err = scrub_rename_file(SCRUB_DATA_FILE, fsid, "tmp");
815 if (err)
816 goto out1;
818 out1:
819 if (fd >= 0) {
820 ret = close(fd);
821 if (ret)
822 err = -errno;
825 ret = pthread_mutex_unlock(m);
826 if (ret && !err)
827 err = -ret;
829 out2:
830 ret = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old);
831 if (ret && !err)
832 err = -ret;
834 out3:
835 return err;
838 static void *scrub_one_dev(void *ctx)
840 struct scrub_progress *sp = ctx;
841 int ret;
842 struct timeval tv;
844 sp->stats.canceled = 0;
845 sp->stats.duration = 0;
846 sp->stats.finished = 0;
848 ret = syscall(SYS_ioprio_set, IOPRIO_WHO_PROCESS, 0,
849 IOPRIO_PRIO_VALUE(sp->ioprio_class,
850 sp->ioprio_classdata));
851 if (ret)
852 warning("setting ioprio failed: %m (ignored)");
854 ret = ioctl(sp->fd, BTRFS_IOC_SCRUB, &sp->scrub_args);
855 gettimeofday(&tv, NULL);
856 sp->ret = ret;
857 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
858 sp->stats.canceled = !!ret;
859 sp->ioctl_errno = errno;
860 ret = pthread_mutex_lock(&sp->progress_mutex);
861 if (ret)
862 return ERR_PTR(-ret);
863 sp->stats.finished = 1;
864 ret = pthread_mutex_unlock(&sp->progress_mutex);
865 if (ret)
866 return ERR_PTR(-ret);
868 return NULL;
871 static void *progress_one_dev(void *ctx)
873 struct scrub_progress *sp = ctx;
875 sp->ret = ioctl(sp->fd, BTRFS_IOC_SCRUB_PROGRESS, &sp->scrub_args);
876 sp->ioctl_errno = errno;
878 return NULL;
881 /* nb: returns a negative errno via ERR_PTR */
882 static void *scrub_progress_cycle(void *ctx)
884 int ret = 0;
885 int perr = 0; /* positive / pthread error returns */
886 int old;
887 int i;
888 char fsid[BTRFS_UUID_UNPARSED_SIZE];
889 struct scrub_progress *sp;
890 struct scrub_progress *sp_last;
891 struct scrub_progress *sp_shared;
892 struct timeval tv;
893 struct scrub_progress_cycle *spc = ctx;
894 int ndev = spc->fi->num_devices;
895 int this = 1;
896 int last = 0;
897 int peer_fd = -1;
898 struct pollfd accept_poll_fd = {
899 .fd = spc->prg_fd,
900 .events = POLLIN,
901 .revents = 0,
903 struct pollfd write_poll_fd = {
904 .events = POLLOUT,
905 .revents = 0,
907 struct sockaddr_un peer;
908 socklen_t peer_size = sizeof(peer);
910 perr = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old);
911 if (perr)
912 goto out;
914 uuid_unparse(spc->fi->fsid, fsid);
916 for (i = 0; i < ndev; ++i) {
917 sp = &spc->progress[i];
918 sp_last = &spc->progress[i + ndev];
919 sp_shared = &spc->shared_progress[i];
920 sp->scrub_args.devid = sp_last->scrub_args.devid =
921 sp_shared->scrub_args.devid;
922 sp->fd = sp_last->fd = spc->fdmnt;
923 sp->stats.t_start = sp_last->stats.t_start =
924 sp_shared->stats.t_start;
925 sp->resumed = sp_last->resumed = sp_shared->resumed;
926 sp->skip = sp_last->skip = sp_shared->skip;
927 sp->stats.finished = sp_last->stats.finished =
928 sp_shared->stats.finished;
931 while (1) {
932 ret = poll(&accept_poll_fd, 1, 5 * 1000);
933 if (ret == -1) {
934 ret = -errno;
935 goto out;
937 if (ret)
938 peer_fd = accept(spc->prg_fd, (struct sockaddr *)&peer,
939 &peer_size);
940 gettimeofday(&tv, NULL);
941 this = (this + 1)%2;
942 last = (last + 1)%2;
943 for (i = 0; i < ndev; ++i) {
944 sp = &spc->progress[this * ndev + i];
945 sp_last = &spc->progress[last * ndev + i];
946 sp_shared = &spc->shared_progress[i];
947 if (sp->stats.finished)
948 continue;
949 progress_one_dev(sp);
950 sp->stats.duration = tv.tv_sec - sp->stats.t_start;
951 if (!sp->ret)
952 continue;
953 if (sp->ioctl_errno != ENOTCONN &&
954 sp->ioctl_errno != ENODEV) {
955 ret = -sp->ioctl_errno;
956 goto out;
959 * scrub finished or device removed, check the
960 * finished flag. if unset, just use the last
961 * result we got for the current write and go
962 * on. flag should be set on next cycle, then.
964 perr = pthread_setcancelstate(
965 PTHREAD_CANCEL_DISABLE, &old);
966 if (perr)
967 goto out;
968 perr = pthread_mutex_lock(&sp_shared->progress_mutex);
969 if (perr)
970 goto out;
971 if (!sp_shared->stats.finished) {
972 perr = pthread_mutex_unlock(
973 &sp_shared->progress_mutex);
974 if (perr)
975 goto out;
976 perr = pthread_setcancelstate(
977 PTHREAD_CANCEL_ENABLE, &old);
978 if (perr)
979 goto out;
980 memcpy(sp, sp_last, sizeof(*sp));
981 continue;
983 perr = pthread_mutex_unlock(&sp_shared->progress_mutex);
984 if (perr)
985 goto out;
986 perr = pthread_setcancelstate(
987 PTHREAD_CANCEL_ENABLE, &old);
988 if (perr)
989 goto out;
990 memcpy(sp, sp_shared, sizeof(*sp));
991 memcpy(sp_last, sp_shared, sizeof(*sp));
993 if (peer_fd != -1) {
994 write_poll_fd.fd = peer_fd;
995 ret = poll(&write_poll_fd, 1, 0);
996 if (ret == -1) {
997 ret = -errno;
998 goto out;
1000 if (ret) {
1001 ret = scrub_write_file(
1002 peer_fd, fsid,
1003 &spc->progress[this * ndev], ndev);
1004 if (ret)
1005 goto out;
1007 close(peer_fd);
1008 peer_fd = -1;
1010 if (!spc->do_record)
1011 continue;
1012 ret = scrub_write_progress(spc->write_mutex, fsid,
1013 &spc->progress[this * ndev], ndev);
1014 if (ret)
1015 goto out;
1017 out:
1018 if (peer_fd != -1)
1019 close(peer_fd);
1020 if (perr)
1021 ret = -perr;
1022 return ERR_PTR(ret);
1025 static struct scrub_file_record *last_dev_scrub(
1026 struct scrub_file_record *const *const past_scrubs, u64 devid)
1028 int i;
1030 if (!past_scrubs || IS_ERR(past_scrubs))
1031 return NULL;
1033 for (i = 0; past_scrubs[i]; ++i)
1034 if (past_scrubs[i]->devid == devid)
1035 return past_scrubs[i];
1037 return NULL;
1040 static int mkdir_p(char *path)
1042 int i;
1043 int ret;
1045 for (i = 1; i < strlen(path); ++i) {
1046 if (path[i] != '/')
1047 continue;
1048 path[i] = '\0';
1049 ret = mkdir(path, 0777);
1050 if (ret && errno != EEXIST)
1051 return -errno;
1052 path[i] = '/';
1055 return 0;
1058 static int is_scrub_running_on_fs(struct btrfs_ioctl_fs_info_args *fi_args,
1059 struct btrfs_ioctl_dev_info_args *di_args,
1060 struct scrub_file_record **past_scrubs)
1062 int i;
1064 if (!fi_args || !di_args || !past_scrubs)
1065 return 0;
1067 for (i = 0; i < fi_args->num_devices; i++) {
1068 struct scrub_file_record *sfr =
1069 last_dev_scrub(past_scrubs, di_args[i].devid);
1071 if (!sfr)
1072 continue;
1073 if (!(sfr->stats.finished || sfr->stats.canceled))
1074 return 1;
1076 return 0;
1079 static int is_scrub_running_in_kernel(int fd,
1080 struct btrfs_ioctl_dev_info_args *di_args, u64 max_devices)
1082 struct scrub_progress sp;
1083 int i;
1084 int ret;
1086 for (i = 0; i < max_devices; i++) {
1087 memset(&sp, 0, sizeof(sp));
1088 sp.scrub_args.devid = di_args[i].devid;
1089 ret = ioctl(fd, BTRFS_IOC_SCRUB_PROGRESS, &sp.scrub_args);
1090 if (!ret)
1091 return 1;
1094 return 0;
1097 static const char * const cmd_scrub_start_usage[];
1098 static const char * const cmd_scrub_resume_usage[];
1100 static int scrub_start(int argc, char **argv, int resume)
1102 int fdmnt;
1103 int prg_fd = -1;
1104 int fdres = -1;
1105 int ret;
1106 pid_t pid;
1107 int c;
1108 int i;
1109 int err = 0;
1110 int e_uncorrectable = 0;
1111 int e_correctable = 0;
1112 int print_raw = 0;
1113 char *path;
1114 int do_background = 1;
1115 int do_wait = 0;
1116 int do_print = 0;
1117 int do_quiet = 0;
1118 int do_record = 1;
1119 int readonly = 0;
1120 int do_stats_per_dev = 0;
1121 int ioprio_class = IOPRIO_CLASS_IDLE;
1122 int ioprio_classdata = 0;
1123 int n_start = 0;
1124 int n_skip = 0;
1125 int n_resume = 0;
1126 struct btrfs_ioctl_fs_info_args fi_args;
1127 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1128 struct scrub_progress *sp = NULL;
1129 struct scrub_fs_stat fs_stat;
1130 struct timeval tv;
1131 struct sockaddr_un addr = {
1132 .sun_family = AF_UNIX,
1134 pthread_t *t_devs = NULL;
1135 pthread_t t_prog;
1136 struct scrub_file_record **past_scrubs = NULL;
1137 struct scrub_file_record *last_scrub = NULL;
1138 char *datafile = strdup(SCRUB_DATA_FILE);
1139 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1140 char sock_path[PATH_MAX] = "";
1141 struct scrub_progress_cycle spc;
1142 pthread_mutex_t spc_write_mutex = PTHREAD_MUTEX_INITIALIZER;
1143 void *terr;
1144 u64 devid;
1145 DIR *dirstream = NULL;
1146 int force = 0;
1147 int nothing_to_resume = 0;
1149 while ((c = getopt(argc, argv, "BdqrRc:n:f")) != -1) {
1150 switch (c) {
1151 case 'B':
1152 do_background = 0;
1153 do_wait = 1;
1154 do_print = 1;
1155 break;
1156 case 'd':
1157 do_stats_per_dev = 1;
1158 break;
1159 case 'q':
1160 do_quiet = 1;
1161 break;
1162 case 'r':
1163 readonly = 1;
1164 break;
1165 case 'R':
1166 print_raw = 1;
1167 break;
1168 case 'c':
1169 ioprio_class = (int)strtol(optarg, NULL, 10);
1170 break;
1171 case 'n':
1172 ioprio_classdata = (int)strtol(optarg, NULL, 10);
1173 break;
1174 case 'f':
1175 force = 1;
1176 break;
1177 case '?':
1178 default:
1179 usage(resume ? cmd_scrub_resume_usage :
1180 cmd_scrub_start_usage);
1184 /* try to catch most error cases before forking */
1186 if (check_argc_exact(argc - optind, 1)) {
1187 usage(resume ? cmd_scrub_resume_usage :
1188 cmd_scrub_start_usage);
1191 spc.progress = NULL;
1192 if (do_quiet && do_print)
1193 do_print = 0;
1195 if (mkdir_p(datafile)) {
1196 warning_on(!do_quiet,
1197 "cannot create scrub data file, mkdir %s failed: %m. Status recording disabled",
1198 datafile);
1199 do_record = 0;
1201 free(datafile);
1203 path = argv[optind];
1205 fdmnt = open_path_or_dev_mnt(path, &dirstream, !do_quiet);
1206 if (fdmnt < 0)
1207 return 1;
1209 ret = get_fs_info(path, &fi_args, &di_args);
1210 if (ret) {
1211 error_on(!do_quiet,
1212 "getting dev info for scrub failed: %s",
1213 strerror(-ret));
1214 err = 1;
1215 goto out;
1217 if (!fi_args.num_devices) {
1218 error_on(!do_quiet, "no devices found");
1219 err = 1;
1220 goto out;
1223 uuid_unparse(fi_args.fsid, fsid);
1224 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1225 if (fdres < 0 && fdres != -ENOENT) {
1226 warning_on(!do_quiet, "failed to open status file: %s",
1227 strerror(-fdres));
1228 } else if (fdres >= 0) {
1229 past_scrubs = scrub_read_file(fdres, !do_quiet);
1230 if (IS_ERR(past_scrubs))
1231 warning_on(!do_quiet, "failed to read status file: %s",
1232 strerror(-PTR_ERR(past_scrubs)));
1233 close(fdres);
1237 * Check for stale information in the status file, ie. if it's
1238 * canceled=0, finished=0 but no scrub is running.
1240 if (!is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices))
1241 force = 1;
1244 * check whether any involved device is already busy running a
1245 * scrub. This would cause damaged status messages and the state
1246 * "aborted" without the explanation that a scrub was already
1247 * running. Therefore check it first, prevent it and give some
1248 * feedback to the user if scrub is already running.
1249 * Note that if scrub is started with a block device as the
1250 * parameter, only that particular block device is checked. It
1251 * is a normal mode of operation to start scrub on multiple
1252 * single devices, there is no reason to prevent this.
1254 if (!force && is_scrub_running_on_fs(&fi_args, di_args, past_scrubs)) {
1255 error_on(!do_quiet,
1256 "Scrub is already running.\n"
1257 "To cancel use 'btrfs scrub cancel %s'.\n"
1258 "To see the status use 'btrfs scrub status [-d] %s'",
1259 path, path);
1260 err = 1;
1261 goto out;
1264 t_devs = malloc(fi_args.num_devices * sizeof(*t_devs));
1265 sp = calloc(fi_args.num_devices, sizeof(*sp));
1266 spc.progress = calloc(fi_args.num_devices * 2, sizeof(*spc.progress));
1268 if (!t_devs || !sp || !spc.progress) {
1269 error_on(!do_quiet, "scrub failed: %m");
1270 err = 1;
1271 goto out;
1274 for (i = 0; i < fi_args.num_devices; ++i) {
1275 devid = di_args[i].devid;
1276 ret = pthread_mutex_init(&sp[i].progress_mutex, NULL);
1277 if (ret) {
1278 error_on(!do_quiet, "pthread_mutex_init failed: %s",
1279 strerror(ret));
1280 err = 1;
1281 goto out;
1283 last_scrub = last_dev_scrub(past_scrubs, devid);
1284 sp[i].scrub_args.devid = devid;
1285 sp[i].fd = fdmnt;
1286 if (resume && last_scrub && (last_scrub->stats.canceled ||
1287 !last_scrub->stats.finished)) {
1288 ++n_resume;
1289 sp[i].scrub_args.start = last_scrub->p.last_physical;
1290 sp[i].resumed = last_scrub;
1291 } else if (resume) {
1292 ++n_skip;
1293 sp[i].skip = 1;
1294 sp[i].resumed = last_scrub;
1295 continue;
1296 } else {
1297 ++n_start;
1298 sp[i].scrub_args.start = 0ll;
1299 sp[i].resumed = NULL;
1301 sp[i].skip = 0;
1302 sp[i].scrub_args.end = (u64)-1ll;
1303 sp[i].scrub_args.flags = readonly ? BTRFS_SCRUB_READONLY : 0;
1304 sp[i].ioprio_class = ioprio_class;
1305 sp[i].ioprio_classdata = ioprio_classdata;
1308 if (!n_start && !n_resume) {
1309 if (!do_quiet)
1310 printf("scrub: nothing to resume for %s, fsid %s\n",
1311 path, fsid);
1312 nothing_to_resume = 1;
1313 goto out;
1316 ret = prg_fd = socket(AF_UNIX, SOCK_STREAM, 0);
1317 while (ret != -1) {
1318 ret = scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid, NULL,
1319 sock_path, sizeof(sock_path));
1320 /* ignore EOVERFLOW, try using a shorter path for the socket */
1321 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1322 strncpy(addr.sun_path, sock_path, sizeof(addr.sun_path) - 1);
1323 ret = bind(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1324 if (ret != -1 || errno != EADDRINUSE)
1325 break;
1327 * bind failed with EADDRINUSE. so let's see if anyone answers
1328 * when we make a call to the socket ...
1330 ret = connect(prg_fd, (struct sockaddr *)&addr, sizeof(addr));
1331 if (!ret || errno != ECONNREFUSED) {
1332 /* ... yes, so scrub must be running. error out */
1333 error("scrub already running");
1334 close(prg_fd);
1335 prg_fd = -1;
1336 goto out;
1339 * ... no, this means someone left us alone with an unused
1340 * socket in the file system. remove it and try again.
1342 ret = unlink(sock_path);
1344 if (ret != -1)
1345 ret = listen(prg_fd, 100);
1346 if (ret == -1) {
1347 warning_on(!do_quiet,
1348 "failed to open the progress status socket at %s: %m. Progress cannot be queried",
1349 sock_path[0] ? sock_path :
1350 SCRUB_PROGRESS_SOCKET_PATH);
1351 if (prg_fd != -1) {
1352 close(prg_fd);
1353 prg_fd = -1;
1354 if (sock_path[0])
1355 unlink(sock_path);
1359 if (do_record) {
1360 /* write all-zero progress file for a start */
1361 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1362 fi_args.num_devices);
1363 if (ret) {
1364 warning_on(!do_quiet,
1365 "failed to write the progress status file: %s. Status recording disabled",
1366 strerror(-ret));
1367 do_record = 0;
1371 if (do_background) {
1372 pid = fork();
1373 if (pid == -1) {
1374 error_on(!do_quiet, "cannot scrub, fork failed: %m");
1375 err = 1;
1376 goto out;
1379 if (pid) {
1380 int stat;
1381 scrub_handle_sigint_parent();
1382 if (!do_quiet)
1383 printf("scrub %s on %s, fsid %s (pid=%d)\n",
1384 n_start ? "started" : "resumed",
1385 path, fsid, pid);
1386 if (!do_wait) {
1387 err = 0;
1388 goto out;
1390 ret = wait(&stat);
1391 if (ret != pid) {
1392 error_on(!do_quiet, "wait failed (ret=%d): %m",
1393 ret);
1394 err = 1;
1395 goto out;
1397 if (!WIFEXITED(stat) || WEXITSTATUS(stat)) {
1398 error_on(!do_quiet, "scrub process failed");
1399 err = WIFEXITED(stat) ? WEXITSTATUS(stat) : -1;
1400 goto out;
1402 err = 0;
1403 goto out;
1407 scrub_handle_sigint_child(fdmnt);
1409 for (i = 0; i < fi_args.num_devices; ++i) {
1410 if (sp[i].skip) {
1411 sp[i].scrub_args.progress = sp[i].resumed->p;
1412 sp[i].stats = sp[i].resumed->stats;
1413 sp[i].ret = 0;
1414 sp[i].stats.finished = 1;
1415 continue;
1417 devid = di_args[i].devid;
1418 gettimeofday(&tv, NULL);
1419 sp[i].stats.t_start = tv.tv_sec;
1420 ret = pthread_create(&t_devs[i], NULL,
1421 scrub_one_dev, &sp[i]);
1422 if (ret) {
1423 if (do_print)
1424 error("creating scrub_one_dev[%llu] thread failed: %s",
1425 devid, strerror(ret));
1426 err = 1;
1427 goto out;
1431 spc.fdmnt = fdmnt;
1432 spc.prg_fd = prg_fd;
1433 spc.do_record = do_record;
1434 spc.write_mutex = &spc_write_mutex;
1435 spc.shared_progress = sp;
1436 spc.fi = &fi_args;
1437 ret = pthread_create(&t_prog, NULL, scrub_progress_cycle, &spc);
1438 if (ret) {
1439 if (do_print)
1440 error("creating progress thread failed: %s",
1441 strerror(ret));
1442 err = 1;
1443 goto out;
1446 err = 0;
1447 for (i = 0; i < fi_args.num_devices; ++i) {
1448 if (sp[i].skip)
1449 continue;
1450 devid = di_args[i].devid;
1451 ret = pthread_join(t_devs[i], NULL);
1452 if (ret) {
1453 if (do_print)
1454 error("pthread_join failed for scrub_one_dev[%llu]: %s",
1455 devid, strerror(ret));
1456 ++err;
1457 continue;
1459 if (sp[i].ret) {
1460 switch (sp[i].ioctl_errno) {
1461 case ENODEV:
1462 if (do_print)
1463 warning("device %lld not present",
1464 devid);
1465 continue;
1466 case ECANCELED:
1467 ++err;
1468 break;
1469 default:
1470 if (do_print)
1471 error("scrubbing %s failed for device id %lld: ret=%d, errno=%d (%s)",
1472 path, devid,
1473 sp[i].ret, sp[i].ioctl_errno,
1474 strerror(sp[i].ioctl_errno));
1475 ++err;
1476 continue;
1479 if (sp[i].scrub_args.progress.uncorrectable_errors > 0)
1480 e_uncorrectable++;
1481 if (sp[i].scrub_args.progress.corrected_errors > 0
1482 || sp[i].scrub_args.progress.unverified_errors > 0)
1483 e_correctable++;
1486 if (do_print) {
1487 const char *append = "done";
1488 if (!do_stats_per_dev)
1489 init_fs_stat(&fs_stat);
1490 for (i = 0; i < fi_args.num_devices; ++i) {
1491 if (do_stats_per_dev) {
1492 print_scrub_dev(&di_args[i],
1493 &sp[i].scrub_args.progress,
1494 print_raw,
1495 sp[i].ret ? "canceled" : "done",
1496 &sp[i].stats);
1497 } else {
1498 if (sp[i].ret)
1499 append = "canceled";
1500 add_to_fs_stat(&sp[i].scrub_args.progress,
1501 &sp[i].stats, &fs_stat);
1504 if (!do_stats_per_dev) {
1505 printf("scrub %s for %s\n", append, fsid);
1506 print_fs_stat(&fs_stat, print_raw);
1510 ret = pthread_cancel(t_prog);
1511 if (!ret)
1512 ret = pthread_join(t_prog, &terr);
1514 /* check for errors from the handling of the progress thread */
1515 if (do_print && ret) {
1516 error("progress thread handling failed: %s",
1517 strerror(ret));
1520 /* check for errors returned from the progress thread itself */
1521 if (do_print && terr && terr != PTHREAD_CANCELED)
1522 error("recording progress failed: %s",
1523 strerror(-PTR_ERR(terr)));
1525 if (do_record) {
1526 ret = scrub_write_progress(&spc_write_mutex, fsid, sp,
1527 fi_args.num_devices);
1528 if (ret && do_print)
1529 error("failed to record the result: %s",
1530 strerror(-ret));
1533 scrub_handle_sigint_child(-1);
1535 out:
1536 free_history(past_scrubs);
1537 free(di_args);
1538 free(t_devs);
1539 free(sp);
1540 free(spc.progress);
1541 if (prg_fd > -1) {
1542 close(prg_fd);
1543 if (sock_path[0])
1544 unlink(sock_path);
1546 close_file_or_dir(fdmnt, dirstream);
1548 if (err)
1549 return 1;
1550 if (nothing_to_resume)
1551 return 2;
1552 if (e_uncorrectable) {
1553 error_on(!do_quiet, "there are uncorrectable errors");
1554 return 3;
1556 if (e_correctable)
1557 warning_on(!do_quiet,
1558 "errors detected during scrubbing, corrected");
1560 return 0;
1563 static const char * const cmd_scrub_start_usage[] = {
1564 "btrfs scrub start [-BdqrRf] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1565 "Start a new scrub. If a scrub is already running, the new one fails.",
1567 "-B do not background",
1568 "-d stats per device (-B only)",
1569 "-q be quiet",
1570 "-r read only mode",
1571 "-R raw print mode, print full data instead of summary",
1572 "-c set ioprio class (see ionice(1) manpage)",
1573 "-n set ioprio classdata (see ionice(1) manpage)",
1574 "-f force starting new scrub even if a scrub is already running",
1575 " this is useful when scrub stats record file is damaged",
1576 NULL
1579 static int cmd_scrub_start(int argc, char **argv)
1581 return scrub_start(argc, argv, 0);
1584 static const char * const cmd_scrub_cancel_usage[] = {
1585 "btrfs scrub cancel <path>|<device>",
1586 "Cancel a running scrub",
1587 NULL
1590 static int cmd_scrub_cancel(int argc, char **argv)
1592 char *path;
1593 int ret;
1594 int fdmnt = -1;
1595 DIR *dirstream = NULL;
1597 clean_args_no_options(argc, argv, cmd_scrub_cancel_usage);
1599 if (check_argc_exact(argc - optind, 1))
1600 usage(cmd_scrub_cancel_usage);
1602 path = argv[optind];
1604 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1605 if (fdmnt < 0) {
1606 ret = 1;
1607 goto out;
1610 ret = ioctl(fdmnt, BTRFS_IOC_SCRUB_CANCEL, NULL);
1612 if (ret < 0) {
1613 error("scrub cancel failed on %s: %s", path,
1614 errno == ENOTCONN ? "not running" : strerror(errno));
1615 if (errno == ENOTCONN)
1616 ret = 2;
1617 else
1618 ret = 1;
1619 goto out;
1622 ret = 0;
1623 printf("scrub cancelled\n");
1625 out:
1626 close_file_or_dir(fdmnt, dirstream);
1627 return ret;
1630 static const char * const cmd_scrub_resume_usage[] = {
1631 "btrfs scrub resume [-BdqrR] [-c ioprio_class -n ioprio_classdata] <path>|<device>",
1632 "Resume previously canceled or interrupted scrub",
1634 "-B do not background",
1635 "-d stats per device (-B only)",
1636 "-q be quiet",
1637 "-r read only mode",
1638 "-R raw print mode, print full data instead of summary",
1639 "-c set ioprio class (see ionice(1) manpage)",
1640 "-n set ioprio classdata (see ionice(1) manpage)",
1641 NULL
1644 static int cmd_scrub_resume(int argc, char **argv)
1646 return scrub_start(argc, argv, 1);
1649 static const char * const cmd_scrub_status_usage[] = {
1650 "btrfs scrub status [-dR] <path>|<device>",
1651 "Show status of running or finished scrub",
1653 "-d stats per device",
1654 "-R print raw stats",
1655 NULL
1658 static int cmd_scrub_status(int argc, char **argv)
1660 char *path;
1661 struct btrfs_ioctl_fs_info_args fi_args;
1662 struct btrfs_ioctl_dev_info_args *di_args = NULL;
1663 struct scrub_file_record **past_scrubs = NULL;
1664 struct scrub_file_record *last_scrub;
1665 struct scrub_fs_stat fs_stat;
1666 struct sockaddr_un addr = {
1667 .sun_family = AF_UNIX,
1669 int in_progress;
1670 int ret;
1671 int i;
1672 int fdmnt;
1673 int print_raw = 0;
1674 int do_stats_per_dev = 0;
1675 int c;
1676 char fsid[BTRFS_UUID_UNPARSED_SIZE];
1677 int fdres = -1;
1678 int err = 0;
1679 DIR *dirstream = NULL;
1681 while ((c = getopt(argc, argv, "dR")) != -1) {
1682 switch (c) {
1683 case 'd':
1684 do_stats_per_dev = 1;
1685 break;
1686 case 'R':
1687 print_raw = 1;
1688 break;
1689 case '?':
1690 default:
1691 usage(cmd_scrub_status_usage);
1695 if (check_argc_exact(argc - optind, 1))
1696 usage(cmd_scrub_status_usage);
1698 path = argv[optind];
1700 fdmnt = open_path_or_dev_mnt(path, &dirstream, 1);
1701 if (fdmnt < 0)
1702 return 1;
1704 ret = get_fs_info(path, &fi_args, &di_args);
1705 if (ret) {
1706 error("getting dev info for scrub failed: %s",
1707 strerror(-ret));
1708 err = 1;
1709 goto out;
1711 if (!fi_args.num_devices) {
1712 error("no devices found");
1713 err = 1;
1714 goto out;
1717 uuid_unparse(fi_args.fsid, fsid);
1719 fdres = socket(AF_UNIX, SOCK_STREAM, 0);
1720 if (fdres == -1) {
1721 error("failed to create socket to receive progress information: %m");
1722 err = 1;
1723 goto out;
1725 scrub_datafile(SCRUB_PROGRESS_SOCKET_PATH, fsid,
1726 NULL, addr.sun_path, sizeof(addr.sun_path));
1727 /* ignore EOVERFLOW, just use shorter name and hope for the best */
1728 addr.sun_path[sizeof(addr.sun_path) - 1] = '\0';
1729 ret = connect(fdres, (struct sockaddr *)&addr, sizeof(addr));
1730 if (ret == -1) {
1731 close(fdres);
1732 fdres = scrub_open_file_r(SCRUB_DATA_FILE, fsid);
1733 if (fdres < 0 && fdres != -ENOENT) {
1734 warning("failed to open status file: %s",
1735 strerror(-fdres));
1736 err = 1;
1737 goto out;
1741 if (fdres >= 0) {
1742 past_scrubs = scrub_read_file(fdres, 1);
1743 if (IS_ERR(past_scrubs))
1744 warning("failed to read status: %s",
1745 strerror(-PTR_ERR(past_scrubs)));
1747 in_progress = is_scrub_running_in_kernel(fdmnt, di_args, fi_args.num_devices);
1749 printf("scrub status for %s\n", fsid);
1751 if (do_stats_per_dev) {
1752 for (i = 0; i < fi_args.num_devices; ++i) {
1753 last_scrub = last_dev_scrub(past_scrubs,
1754 di_args[i].devid);
1755 if (!last_scrub) {
1756 print_scrub_dev(&di_args[i], NULL, print_raw,
1757 NULL, NULL);
1758 continue;
1760 last_scrub->stats.in_progress = in_progress;
1761 print_scrub_dev(&di_args[i], &last_scrub->p, print_raw,
1762 last_scrub->stats.finished ?
1763 "history" : "status",
1764 &last_scrub->stats);
1766 } else {
1767 init_fs_stat(&fs_stat);
1768 fs_stat.s.in_progress = in_progress;
1769 for (i = 0; i < fi_args.num_devices; ++i) {
1770 last_scrub = last_dev_scrub(past_scrubs,
1771 di_args[i].devid);
1772 if (!last_scrub)
1773 continue;
1774 add_to_fs_stat(&last_scrub->p, &last_scrub->stats,
1775 &fs_stat);
1777 print_fs_stat(&fs_stat, print_raw);
1780 out:
1781 free_history(past_scrubs);
1782 free(di_args);
1783 if (fdres > -1)
1784 close(fdres);
1785 close_file_or_dir(fdmnt, dirstream);
1787 return !!err;
1790 static const char scrub_cmd_group_info[] =
1791 "verify checksums of data and metadata";
1793 const struct cmd_group scrub_cmd_group = {
1794 scrub_cmd_group_usage, scrub_cmd_group_info, {
1795 { "start", cmd_scrub_start, cmd_scrub_start_usage, NULL, 0 },
1796 { "cancel", cmd_scrub_cancel, cmd_scrub_cancel_usage, NULL, 0 },
1797 { "resume", cmd_scrub_resume, cmd_scrub_resume_usage, NULL, 0 },
1798 { "status", cmd_scrub_status, cmd_scrub_status_usage, NULL, 0 },
1799 NULL_CMD_STRUCT
1803 int cmd_scrub(int argc, char **argv)
1805 return handle_command_group(&scrub_cmd_group, argc, argv);