ZIL: Call brt_pending_add() replaying TX_CLONE_RANGE
[zfs.git] / module / zfs / spa_errlog.c
blob5dd08f597f338f0086e99691074e404d19cabcb8
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2014, Delphix. All rights reserved.
24 * Copyright (c) 2019 Datto Inc.
25 * Copyright (c) 2021, 2022, George Amanakis. All rights reserved.
29 * Routines to manage the on-disk persistent error log.
31 * Each pool stores a log of all logical data errors seen during normal
32 * operation. This is actually the union of two distinct logs: the last log,
33 * and the current log. All errors seen are logged to the current log. When a
34 * scrub completes, the current log becomes the last log, the last log is thrown
35 * out, and the current log is reinitialized. This way, if an error is somehow
36 * corrected, a new scrub will show that it no longer exists, and will be
37 * deleted from the log when the scrub completes.
39 * The log is stored using a ZAP object whose key is a string form of the
40 * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an
41 * optional 'objset:object' human-readable string describing the data. When an
42 * error is first logged, this string will be empty, indicating that no name is
43 * known. This prevents us from having to issue a potentially large amount of
44 * I/O to discover the object name during an error path. Instead, we do the
45 * calculation when the data is requested, storing the result so future queries
46 * will be faster.
48 * If the head_errlog feature is enabled, a different on-disk format is used.
49 * The error log of each head dataset is stored separately in the zap object
50 * and keyed by the head id. This enables listing every dataset affected in
51 * userland. In order to be able to track whether an error block has been
52 * modified or added to snapshots since it was marked as an error, a new tuple
53 * is introduced: zbookmark_err_phys_t. It allows the storage of the birth
54 * transaction group of an error block on-disk. The birth transaction group is
55 * used by check_filesystem() to assess whether this block was freed,
56 * re-written or added to a snapshot since its marking as an error.
58 * This log is then shipped into an nvlist where the key is the dataset name and
59 * the value is the object name. Userland is then responsible for uniquifying
60 * this list and displaying it to the user.
63 #include <sys/dmu_tx.h>
64 #include <sys/spa.h>
65 #include <sys/spa_impl.h>
66 #include <sys/zap.h>
67 #include <sys/zio.h>
68 #include <sys/dsl_dir.h>
69 #include <sys/dmu_objset.h>
70 #include <sys/dbuf.h>
71 #include <sys/zfs_znode.h>
73 #define NAME_MAX_LEN 64
75 typedef struct clones {
76 uint64_t clone_ds;
77 list_node_t node;
78 } clones_t;
81 * spa_upgrade_errlog_limit : A zfs module parameter that controls the number
82 * of on-disk error log entries that will be converted to the new
83 * format when enabling head_errlog. Defaults to 0 which converts
84 * all log entries.
86 static uint_t spa_upgrade_errlog_limit = 0;
89 * Convert a bookmark to a string.
91 static void
92 bookmark_to_name(zbookmark_phys_t *zb, char *buf, size_t len)
94 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
95 (u_longlong_t)zb->zb_objset, (u_longlong_t)zb->zb_object,
96 (u_longlong_t)zb->zb_level, (u_longlong_t)zb->zb_blkid);
100 * Convert an err_phys to a string.
102 static void
103 errphys_to_name(zbookmark_err_phys_t *zep, char *buf, size_t len)
105 (void) snprintf(buf, len, "%llx:%llx:%llx:%llx",
106 (u_longlong_t)zep->zb_object, (u_longlong_t)zep->zb_level,
107 (u_longlong_t)zep->zb_blkid, (u_longlong_t)zep->zb_birth);
111 * Convert a string to a err_phys.
113 void
114 name_to_errphys(char *buf, zbookmark_err_phys_t *zep)
116 zep->zb_object = zfs_strtonum(buf, &buf);
117 ASSERT(*buf == ':');
118 zep->zb_level = (int)zfs_strtonum(buf + 1, &buf);
119 ASSERT(*buf == ':');
120 zep->zb_blkid = zfs_strtonum(buf + 1, &buf);
121 ASSERT(*buf == ':');
122 zep->zb_birth = zfs_strtonum(buf + 1, &buf);
123 ASSERT(*buf == '\0');
127 * Convert a string to a bookmark.
129 static void
130 name_to_bookmark(char *buf, zbookmark_phys_t *zb)
132 zb->zb_objset = zfs_strtonum(buf, &buf);
133 ASSERT(*buf == ':');
134 zb->zb_object = zfs_strtonum(buf + 1, &buf);
135 ASSERT(*buf == ':');
136 zb->zb_level = (int)zfs_strtonum(buf + 1, &buf);
137 ASSERT(*buf == ':');
138 zb->zb_blkid = zfs_strtonum(buf + 1, &buf);
139 ASSERT(*buf == '\0');
142 void
143 zep_to_zb(uint64_t dataset, zbookmark_err_phys_t *zep, zbookmark_phys_t *zb)
145 zb->zb_objset = dataset;
146 zb->zb_object = zep->zb_object;
147 zb->zb_level = zep->zb_level;
148 zb->zb_blkid = zep->zb_blkid;
151 static void
152 name_to_object(char *buf, uint64_t *obj)
154 *obj = zfs_strtonum(buf, &buf);
155 ASSERT(*buf == '\0');
159 * Retrieve the head filesystem.
161 static int get_head_ds(spa_t *spa, uint64_t dsobj, uint64_t *head_ds)
163 dsl_dataset_t *ds;
164 int error = dsl_dataset_hold_obj_flags(spa->spa_dsl_pool,
165 dsobj, DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
167 if (error != 0)
168 return (error);
170 ASSERT(head_ds);
171 *head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
172 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
174 return (error);
178 * Log an uncorrectable error to the persistent error log. We add it to the
179 * spa's list of pending errors. The changes are actually synced out to disk
180 * during spa_errlog_sync().
182 void
183 spa_log_error(spa_t *spa, const zbookmark_phys_t *zb, const uint64_t *birth)
185 spa_error_entry_t search;
186 spa_error_entry_t *new;
187 avl_tree_t *tree;
188 avl_index_t where;
191 * If we are trying to import a pool, ignore any errors, as we won't be
192 * writing to the pool any time soon.
194 if (spa_load_state(spa) == SPA_LOAD_TRYIMPORT)
195 return;
197 mutex_enter(&spa->spa_errlist_lock);
200 * If we have had a request to rotate the log, log it to the next list
201 * instead of the current one.
203 if (spa->spa_scrub_active || spa->spa_scrub_finished)
204 tree = &spa->spa_errlist_scrub;
205 else
206 tree = &spa->spa_errlist_last;
208 search.se_bookmark = *zb;
209 if (avl_find(tree, &search, &where) != NULL) {
210 mutex_exit(&spa->spa_errlist_lock);
211 return;
214 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
215 new->se_bookmark = *zb;
218 * If the head_errlog feature is enabled, store the birth txg now. In
219 * case the file is deleted before spa_errlog_sync() runs, we will not
220 * be able to retrieve the birth txg.
222 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
223 new->se_zep.zb_object = zb->zb_object;
224 new->se_zep.zb_level = zb->zb_level;
225 new->se_zep.zb_blkid = zb->zb_blkid;
228 * birth may end up being NULL, e.g. in zio_done(). We
229 * will handle this in process_error_block().
231 if (birth != NULL)
232 new->se_zep.zb_birth = *birth;
235 avl_insert(tree, new, where);
236 mutex_exit(&spa->spa_errlist_lock);
240 find_birth_txg(dsl_dataset_t *ds, zbookmark_err_phys_t *zep,
241 uint64_t *birth_txg)
243 objset_t *os;
244 int error = dmu_objset_from_ds(ds, &os);
245 if (error != 0)
246 return (error);
248 dnode_t *dn;
249 blkptr_t bp;
251 error = dnode_hold(os, zep->zb_object, FTAG, &dn);
252 if (error != 0)
253 return (error);
255 rw_enter(&dn->dn_struct_rwlock, RW_READER);
256 error = dbuf_dnode_findbp(dn, zep->zb_level, zep->zb_blkid, &bp, NULL,
257 NULL);
258 if (error == 0 && BP_IS_HOLE(&bp))
259 error = SET_ERROR(ENOENT);
261 *birth_txg = bp.blk_birth;
262 rw_exit(&dn->dn_struct_rwlock);
263 dnode_rele(dn, FTAG);
264 return (error);
268 * This function finds the oldest affected filesystem containing an error
269 * block.
272 find_top_affected_fs(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
273 uint64_t *top_affected_fs)
275 uint64_t oldest_dsobj;
276 int error = dsl_dataset_oldest_snapshot(spa, head_ds, zep->zb_birth,
277 &oldest_dsobj);
278 if (error != 0)
279 return (error);
281 dsl_dataset_t *ds;
282 error = dsl_dataset_hold_obj_flags(spa->spa_dsl_pool, oldest_dsobj,
283 DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
284 if (error != 0)
285 return (error);
287 *top_affected_fs =
288 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
289 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
290 return (0);
294 #ifdef _KERNEL
296 * Copy the bookmark to the end of the user-space buffer which starts at
297 * uaddr and has *count unused entries, and decrement *count by 1.
299 static int
300 copyout_entry(const zbookmark_phys_t *zb, void *uaddr, uint64_t *count)
302 if (*count == 0)
303 return (SET_ERROR(ENOMEM));
305 *count -= 1;
306 if (copyout(zb, (char *)uaddr + (*count) * sizeof (zbookmark_phys_t),
307 sizeof (zbookmark_phys_t)) != 0)
308 return (SET_ERROR(EFAULT));
309 return (0);
313 * Each time the error block is referenced by a snapshot or clone, add a
314 * zbookmark_phys_t entry to the userspace array at uaddr. The array is
315 * filled from the back and the in-out parameter *count is modified to be the
316 * number of unused entries at the beginning of the array. The function
317 * scrub_filesystem() is modelled after this one.
319 static int
320 check_filesystem(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
321 void *uaddr, uint64_t *count, list_t *clones_list)
323 dsl_dataset_t *ds;
324 dsl_pool_t *dp = spa->spa_dsl_pool;
326 int error = dsl_dataset_hold_obj_flags(dp, head_ds,
327 DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
328 if (error != 0)
329 return (error);
331 uint64_t latest_txg;
332 uint64_t txg_to_consider = spa->spa_syncing_txg;
333 boolean_t check_snapshot = B_TRUE;
334 error = find_birth_txg(ds, zep, &latest_txg);
337 * If find_birth_txg() errors out otherwise, let txg_to_consider be
338 * equal to the spa's syncing txg: if check_filesystem() errors out
339 * then affected snapshots or clones will not be checked.
341 if (error == 0 && zep->zb_birth == latest_txg) {
342 /* Block neither free nor rewritten. */
343 zbookmark_phys_t zb;
344 zep_to_zb(head_ds, zep, &zb);
345 error = copyout_entry(&zb, uaddr, count);
346 if (error != 0) {
347 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
348 return (error);
350 check_snapshot = B_FALSE;
351 } else if (error == 0) {
352 txg_to_consider = latest_txg;
356 * Retrieve the number of snapshots if the dataset is not a snapshot.
358 uint64_t snap_count = 0;
359 if (dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0) {
361 error = zap_count(spa->spa_meta_objset,
362 dsl_dataset_phys(ds)->ds_snapnames_zapobj, &snap_count);
364 if (error != 0) {
365 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
366 return (error);
370 if (snap_count == 0) {
371 /* Filesystem without snapshots. */
372 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
373 return (0);
376 uint64_t *snap_obj_array = kmem_zalloc(snap_count * sizeof (uint64_t),
377 KM_SLEEP);
379 int aff_snap_count = 0;
380 uint64_t snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
381 uint64_t snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
382 uint64_t zap_clone = dsl_dir_phys(ds->ds_dir)->dd_clones;
384 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
386 /* Check only snapshots created from this file system. */
387 while (snap_obj != 0 && zep->zb_birth < snap_obj_txg &&
388 snap_obj_txg <= txg_to_consider) {
390 error = dsl_dataset_hold_obj_flags(dp, snap_obj,
391 DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
392 if (error != 0)
393 goto out;
395 if (dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj != head_ds) {
396 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
397 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
398 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
399 continue;
402 boolean_t affected = B_TRUE;
403 if (check_snapshot) {
404 uint64_t blk_txg;
405 error = find_birth_txg(ds, zep, &blk_txg);
406 affected = (error == 0 && zep->zb_birth == blk_txg);
409 /* Report errors in snapshots. */
410 if (affected) {
411 snap_obj_array[aff_snap_count] = snap_obj;
412 aff_snap_count++;
414 zbookmark_phys_t zb;
415 zep_to_zb(snap_obj, zep, &zb);
416 error = copyout_entry(&zb, uaddr, count);
417 if (error != 0) {
418 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT,
419 FTAG);
420 goto out;
423 snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
424 snap_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
425 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
428 if (zap_clone == 0 || aff_snap_count == 0)
429 return (0);
431 /* Check clones. */
432 zap_cursor_t *zc;
433 zap_attribute_t *za;
435 zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
436 za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
438 for (zap_cursor_init(zc, spa->spa_meta_objset, zap_clone);
439 zap_cursor_retrieve(zc, za) == 0;
440 zap_cursor_advance(zc)) {
442 dsl_dataset_t *clone;
443 error = dsl_dataset_hold_obj_flags(dp, za->za_first_integer,
444 DS_HOLD_FLAG_DECRYPT, FTAG, &clone);
446 if (error != 0)
447 break;
450 * Only clones whose origins were affected could also
451 * have affected snapshots.
453 boolean_t found = B_FALSE;
454 for (int i = 0; i < snap_count; i++) {
455 if (dsl_dir_phys(clone->ds_dir)->dd_origin_obj
456 == snap_obj_array[i])
457 found = B_TRUE;
459 dsl_dataset_rele_flags(clone, DS_HOLD_FLAG_DECRYPT, FTAG);
461 if (!found)
462 continue;
464 clones_t *ct = kmem_zalloc(sizeof (*ct), KM_SLEEP);
465 ct->clone_ds = za->za_first_integer;
466 list_insert_tail(clones_list, ct);
469 zap_cursor_fini(zc);
470 kmem_free(za, sizeof (*za));
471 kmem_free(zc, sizeof (*zc));
473 out:
474 kmem_free(snap_obj_array, sizeof (*snap_obj_array));
475 return (error);
478 static int
479 process_error_block(spa_t *spa, uint64_t head_ds, zbookmark_err_phys_t *zep,
480 void *uaddr, uint64_t *count)
483 * If zb_birth == 0 or head_ds == 0 it means we failed to retrieve the
484 * birth txg or the head filesystem of the block pointer. This may
485 * happen e.g. when an encrypted filesystem is not mounted or when
486 * the key is not loaded. In this case do not proceed to
487 * check_filesystem(), instead do the accounting here.
489 if (zep->zb_birth == 0 || head_ds == 0) {
490 zbookmark_phys_t zb;
491 zep_to_zb(head_ds, zep, &zb);
492 int error = copyout_entry(&zb, uaddr, count);
493 if (error != 0) {
494 return (error);
496 return (0);
499 uint64_t top_affected_fs;
500 uint64_t init_count = *count;
501 int error = find_top_affected_fs(spa, head_ds, zep, &top_affected_fs);
502 if (error == 0) {
503 clones_t *ct;
504 list_t clones_list;
506 list_create(&clones_list, sizeof (clones_t),
507 offsetof(clones_t, node));
509 error = check_filesystem(spa, top_affected_fs, zep,
510 uaddr, count, &clones_list);
512 while ((ct = list_remove_head(&clones_list)) != NULL) {
513 error = check_filesystem(spa, ct->clone_ds, zep,
514 uaddr, count, &clones_list);
515 kmem_free(ct, sizeof (*ct));
517 if (error) {
518 while (!list_is_empty(&clones_list)) {
519 ct = list_remove_head(&clones_list);
520 kmem_free(ct, sizeof (*ct));
522 break;
526 list_destroy(&clones_list);
528 if (error == 0 && init_count == *count) {
530 * If we reach this point, no errors have been detected
531 * in the checked filesystems/snapshots. Before returning mark
532 * the error block to be removed from the error lists and logs.
534 zbookmark_phys_t zb;
535 zep_to_zb(head_ds, zep, &zb);
536 spa_remove_error(spa, &zb, &zep->zb_birth);
539 return (error);
541 #endif
543 /* Return the number of errors in the error log */
544 uint64_t
545 spa_get_last_errlog_size(spa_t *spa)
547 uint64_t total = 0, count;
548 mutex_enter(&spa->spa_errlog_lock);
550 if (spa->spa_errlog_last != 0 &&
551 zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
552 &count) == 0)
553 total += count;
554 mutex_exit(&spa->spa_errlog_lock);
555 return (total);
559 * If a healed bookmark matches an entry in the error log we stash it in a tree
560 * so that we can later remove the related log entries in sync context.
562 static void
563 spa_add_healed_error(spa_t *spa, uint64_t obj, zbookmark_phys_t *healed_zb,
564 const uint64_t *birth)
566 char name[NAME_MAX_LEN];
568 if (obj == 0)
569 return;
571 boolean_t held_list = B_FALSE;
572 boolean_t held_log = B_FALSE;
574 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
575 bookmark_to_name(healed_zb, name, sizeof (name));
577 if (zap_contains(spa->spa_meta_objset, healed_zb->zb_objset,
578 name) == 0) {
579 if (!MUTEX_HELD(&spa->spa_errlog_lock)) {
580 mutex_enter(&spa->spa_errlog_lock);
581 held_log = B_TRUE;
585 * Found an error matching healed zb, add zb to our
586 * tree of healed errors
588 avl_tree_t *tree = &spa->spa_errlist_healed;
589 spa_error_entry_t search;
590 spa_error_entry_t *new;
591 avl_index_t where;
592 search.se_bookmark = *healed_zb;
593 if (!MUTEX_HELD(&spa->spa_errlist_lock)) {
594 mutex_enter(&spa->spa_errlist_lock);
595 held_list = B_TRUE;
597 if (avl_find(tree, &search, &where) != NULL) {
598 if (held_list)
599 mutex_exit(&spa->spa_errlist_lock);
600 if (held_log)
601 mutex_exit(&spa->spa_errlog_lock);
602 return;
604 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
605 new->se_bookmark = *healed_zb;
606 avl_insert(tree, new, where);
607 if (held_list)
608 mutex_exit(&spa->spa_errlist_lock);
609 if (held_log)
610 mutex_exit(&spa->spa_errlog_lock);
612 return;
615 zbookmark_err_phys_t healed_zep;
616 healed_zep.zb_object = healed_zb->zb_object;
617 healed_zep.zb_level = healed_zb->zb_level;
618 healed_zep.zb_blkid = healed_zb->zb_blkid;
620 if (birth != NULL)
621 healed_zep.zb_birth = *birth;
622 else
623 healed_zep.zb_birth = 0;
625 errphys_to_name(&healed_zep, name, sizeof (name));
627 zap_cursor_t zc;
628 zap_attribute_t za;
629 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa->spa_errlog_last);
630 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
631 if (zap_contains(spa->spa_meta_objset, za.za_first_integer,
632 name) == 0) {
633 if (!MUTEX_HELD(&spa->spa_errlog_lock)) {
634 mutex_enter(&spa->spa_errlog_lock);
635 held_log = B_TRUE;
638 avl_tree_t *tree = &spa->spa_errlist_healed;
639 spa_error_entry_t search;
640 spa_error_entry_t *new;
641 avl_index_t where;
642 search.se_bookmark = *healed_zb;
644 if (!MUTEX_HELD(&spa->spa_errlist_lock)) {
645 mutex_enter(&spa->spa_errlist_lock);
646 held_list = B_TRUE;
649 if (avl_find(tree, &search, &where) != NULL) {
650 if (held_list)
651 mutex_exit(&spa->spa_errlist_lock);
652 if (held_log)
653 mutex_exit(&spa->spa_errlog_lock);
654 continue;
656 new = kmem_zalloc(sizeof (spa_error_entry_t), KM_SLEEP);
657 new->se_bookmark = *healed_zb;
658 new->se_zep = healed_zep;
659 avl_insert(tree, new, where);
661 if (held_list)
662 mutex_exit(&spa->spa_errlist_lock);
663 if (held_log)
664 mutex_exit(&spa->spa_errlog_lock);
667 zap_cursor_fini(&zc);
671 * If this error exists in the given tree remove it.
673 static void
674 remove_error_from_list(spa_t *spa, avl_tree_t *t, const zbookmark_phys_t *zb)
676 spa_error_entry_t search, *found;
677 avl_index_t where;
679 mutex_enter(&spa->spa_errlist_lock);
680 search.se_bookmark = *zb;
681 if ((found = avl_find(t, &search, &where)) != NULL) {
682 avl_remove(t, found);
683 kmem_free(found, sizeof (spa_error_entry_t));
685 mutex_exit(&spa->spa_errlist_lock);
690 * Removes all of the recv healed errors from both on-disk error logs
692 static void
693 spa_remove_healed_errors(spa_t *spa, avl_tree_t *s, avl_tree_t *l, dmu_tx_t *tx)
695 char name[NAME_MAX_LEN];
696 spa_error_entry_t *se;
697 void *cookie = NULL;
699 ASSERT(MUTEX_HELD(&spa->spa_errlog_lock));
701 while ((se = avl_destroy_nodes(&spa->spa_errlist_healed,
702 &cookie)) != NULL) {
703 remove_error_from_list(spa, s, &se->se_bookmark);
704 remove_error_from_list(spa, l, &se->se_bookmark);
706 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
707 bookmark_to_name(&se->se_bookmark, name, sizeof (name));
708 (void) zap_remove(spa->spa_meta_objset,
709 spa->spa_errlog_last, name, tx);
710 (void) zap_remove(spa->spa_meta_objset,
711 spa->spa_errlog_scrub, name, tx);
712 } else {
713 errphys_to_name(&se->se_zep, name, sizeof (name));
714 zap_cursor_t zc;
715 zap_attribute_t za;
716 for (zap_cursor_init(&zc, spa->spa_meta_objset,
717 spa->spa_errlog_last);
718 zap_cursor_retrieve(&zc, &za) == 0;
719 zap_cursor_advance(&zc)) {
720 zap_remove(spa->spa_meta_objset,
721 za.za_first_integer, name, tx);
723 zap_cursor_fini(&zc);
725 for (zap_cursor_init(&zc, spa->spa_meta_objset,
726 spa->spa_errlog_scrub);
727 zap_cursor_retrieve(&zc, &za) == 0;
728 zap_cursor_advance(&zc)) {
729 zap_remove(spa->spa_meta_objset,
730 za.za_first_integer, name, tx);
732 zap_cursor_fini(&zc);
734 kmem_free(se, sizeof (spa_error_entry_t));
739 * Stash away healed bookmarks to remove them from the on-disk error logs
740 * later in spa_remove_healed_errors().
742 void
743 spa_remove_error(spa_t *spa, zbookmark_phys_t *zb, const uint64_t *birth)
745 spa_add_healed_error(spa, spa->spa_errlog_last, zb, birth);
746 spa_add_healed_error(spa, spa->spa_errlog_scrub, zb, birth);
749 static uint64_t
750 approx_errlog_size_impl(spa_t *spa, uint64_t spa_err_obj)
752 if (spa_err_obj == 0)
753 return (0);
754 uint64_t total = 0;
756 zap_cursor_t zc;
757 zap_attribute_t za;
758 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
759 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
760 uint64_t count;
761 if (zap_count(spa->spa_meta_objset, za.za_first_integer,
762 &count) == 0)
763 total += count;
765 zap_cursor_fini(&zc);
766 return (total);
770 * Return the approximate number of errors currently in the error log. This
771 * will be nonzero if there are some errors, but otherwise it may be more
772 * or less than the number of entries returned by spa_get_errlog().
774 uint64_t
775 spa_approx_errlog_size(spa_t *spa)
777 uint64_t total = 0;
779 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
780 mutex_enter(&spa->spa_errlog_lock);
781 uint64_t count;
782 if (spa->spa_errlog_scrub != 0 &&
783 zap_count(spa->spa_meta_objset, spa->spa_errlog_scrub,
784 &count) == 0)
785 total += count;
787 if (spa->spa_errlog_last != 0 && !spa->spa_scrub_finished &&
788 zap_count(spa->spa_meta_objset, spa->spa_errlog_last,
789 &count) == 0)
790 total += count;
791 mutex_exit(&spa->spa_errlog_lock);
793 } else {
794 mutex_enter(&spa->spa_errlog_lock);
795 total += approx_errlog_size_impl(spa, spa->spa_errlog_last);
796 total += approx_errlog_size_impl(spa, spa->spa_errlog_scrub);
797 mutex_exit(&spa->spa_errlog_lock);
799 mutex_enter(&spa->spa_errlist_lock);
800 total += avl_numnodes(&spa->spa_errlist_last);
801 total += avl_numnodes(&spa->spa_errlist_scrub);
802 mutex_exit(&spa->spa_errlist_lock);
803 return (total);
807 * This function sweeps through an on-disk error log and stores all bookmarks
808 * as error bookmarks in a new ZAP object. At the end we discard the old one,
809 * and spa_update_errlog() will set the spa's on-disk error log to new ZAP
810 * object.
812 static void
813 sync_upgrade_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t *newobj,
814 dmu_tx_t *tx)
816 zap_cursor_t zc;
817 zap_attribute_t za;
818 zbookmark_phys_t zb;
819 uint64_t count;
821 *newobj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
822 DMU_OT_NONE, 0, tx);
825 * If we cannnot perform the upgrade we should clear the old on-disk
826 * error logs.
828 if (zap_count(spa->spa_meta_objset, spa_err_obj, &count) != 0) {
829 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
830 return;
833 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
834 zap_cursor_retrieve(&zc, &za) == 0;
835 zap_cursor_advance(&zc)) {
836 if (spa_upgrade_errlog_limit != 0 &&
837 zc.zc_cd == spa_upgrade_errlog_limit)
838 break;
840 name_to_bookmark(za.za_name, &zb);
842 zbookmark_err_phys_t zep;
843 zep.zb_object = zb.zb_object;
844 zep.zb_level = zb.zb_level;
845 zep.zb_blkid = zb.zb_blkid;
846 zep.zb_birth = 0;
849 * In case of an error we should simply continue instead of
850 * returning prematurely. See the next comment.
852 uint64_t head_ds;
853 dsl_pool_t *dp = spa->spa_dsl_pool;
854 dsl_dataset_t *ds;
855 objset_t *os;
857 int error = dsl_dataset_hold_obj_flags(dp, zb.zb_objset,
858 DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
859 if (error != 0)
860 continue;
862 head_ds = dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj;
865 * The objset and the dnode are required for getting the block
866 * pointer, which is used to determine if BP_IS_HOLE(). If
867 * getting the objset or the dnode fails, do not create a
868 * zap entry (presuming we know the dataset) as this may create
869 * spurious errors that we cannot ever resolve. If an error is
870 * truly persistent, it should re-appear after a scan.
872 if (dmu_objset_from_ds(ds, &os) != 0) {
873 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
874 continue;
877 dnode_t *dn;
878 blkptr_t bp;
880 if (dnode_hold(os, zep.zb_object, FTAG, &dn) != 0) {
881 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
882 continue;
885 rw_enter(&dn->dn_struct_rwlock, RW_READER);
886 error = dbuf_dnode_findbp(dn, zep.zb_level, zep.zb_blkid, &bp,
887 NULL, NULL);
888 if (error == EACCES)
889 error = 0;
890 else if (!error)
891 zep.zb_birth = bp.blk_birth;
893 rw_exit(&dn->dn_struct_rwlock);
894 dnode_rele(dn, FTAG);
895 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
897 if (error != 0 || BP_IS_HOLE(&bp))
898 continue;
900 uint64_t err_obj;
901 error = zap_lookup_int_key(spa->spa_meta_objset, *newobj,
902 head_ds, &err_obj);
904 if (error == ENOENT) {
905 err_obj = zap_create(spa->spa_meta_objset,
906 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
908 (void) zap_update_int_key(spa->spa_meta_objset,
909 *newobj, head_ds, err_obj, tx);
912 char buf[64];
913 errphys_to_name(&zep, buf, sizeof (buf));
915 const char *name = "";
916 (void) zap_update(spa->spa_meta_objset, err_obj,
917 buf, 1, strlen(name) + 1, name, tx);
919 zap_cursor_fini(&zc);
921 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
924 void
925 spa_upgrade_errlog(spa_t *spa, dmu_tx_t *tx)
927 uint64_t newobj = 0;
929 mutex_enter(&spa->spa_errlog_lock);
930 if (spa->spa_errlog_last != 0) {
931 sync_upgrade_errlog(spa, spa->spa_errlog_last, &newobj, tx);
932 spa->spa_errlog_last = newobj;
934 (void) zap_update(spa->spa_meta_objset,
935 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_LAST,
936 sizeof (uint64_t), 1, &spa->spa_errlog_last, tx);
939 if (spa->spa_errlog_scrub != 0) {
940 sync_upgrade_errlog(spa, spa->spa_errlog_scrub, &newobj, tx);
941 spa->spa_errlog_scrub = newobj;
943 (void) zap_update(spa->spa_meta_objset,
944 DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_ERRLOG_SCRUB,
945 sizeof (uint64_t), 1, &spa->spa_errlog_scrub, tx);
948 mutex_exit(&spa->spa_errlog_lock);
951 #ifdef _KERNEL
953 * If an error block is shared by two datasets it will be counted twice.
955 static int
956 process_error_log(spa_t *spa, uint64_t obj, void *uaddr, uint64_t *count)
958 if (obj == 0)
959 return (0);
961 zap_cursor_t *zc;
962 zap_attribute_t *za;
964 zc = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
965 za = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
967 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
968 for (zap_cursor_init(zc, spa->spa_meta_objset, obj);
969 zap_cursor_retrieve(zc, za) == 0;
970 zap_cursor_advance(zc)) {
971 if (*count == 0) {
972 zap_cursor_fini(zc);
973 kmem_free(zc, sizeof (*zc));
974 kmem_free(za, sizeof (*za));
975 return (SET_ERROR(ENOMEM));
978 zbookmark_phys_t zb;
979 name_to_bookmark(za->za_name, &zb);
981 int error = copyout_entry(&zb, uaddr, count);
982 if (error != 0) {
983 zap_cursor_fini(zc);
984 kmem_free(zc, sizeof (*zc));
985 kmem_free(za, sizeof (*za));
986 return (error);
989 zap_cursor_fini(zc);
990 kmem_free(zc, sizeof (*zc));
991 kmem_free(za, sizeof (*za));
992 return (0);
995 for (zap_cursor_init(zc, spa->spa_meta_objset, obj);
996 zap_cursor_retrieve(zc, za) == 0;
997 zap_cursor_advance(zc)) {
999 zap_cursor_t *head_ds_cursor;
1000 zap_attribute_t *head_ds_attr;
1002 head_ds_cursor = kmem_zalloc(sizeof (zap_cursor_t), KM_SLEEP);
1003 head_ds_attr = kmem_zalloc(sizeof (zap_attribute_t), KM_SLEEP);
1005 uint64_t head_ds_err_obj = za->za_first_integer;
1006 uint64_t head_ds;
1007 name_to_object(za->za_name, &head_ds);
1008 for (zap_cursor_init(head_ds_cursor, spa->spa_meta_objset,
1009 head_ds_err_obj); zap_cursor_retrieve(head_ds_cursor,
1010 head_ds_attr) == 0; zap_cursor_advance(head_ds_cursor)) {
1012 zbookmark_err_phys_t head_ds_block;
1013 name_to_errphys(head_ds_attr->za_name, &head_ds_block);
1014 int error = process_error_block(spa, head_ds,
1015 &head_ds_block, uaddr, count);
1017 if (error != 0) {
1018 zap_cursor_fini(head_ds_cursor);
1019 kmem_free(head_ds_cursor,
1020 sizeof (*head_ds_cursor));
1021 kmem_free(head_ds_attr, sizeof (*head_ds_attr));
1023 zap_cursor_fini(zc);
1024 kmem_free(za, sizeof (*za));
1025 kmem_free(zc, sizeof (*zc));
1026 return (error);
1029 zap_cursor_fini(head_ds_cursor);
1030 kmem_free(head_ds_cursor, sizeof (*head_ds_cursor));
1031 kmem_free(head_ds_attr, sizeof (*head_ds_attr));
1033 zap_cursor_fini(zc);
1034 kmem_free(za, sizeof (*za));
1035 kmem_free(zc, sizeof (*zc));
1036 return (0);
1039 static int
1040 process_error_list(spa_t *spa, avl_tree_t *list, void *uaddr, uint64_t *count)
1042 spa_error_entry_t *se;
1044 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1045 for (se = avl_first(list); se != NULL;
1046 se = AVL_NEXT(list, se)) {
1047 int error =
1048 copyout_entry(&se->se_bookmark, uaddr, count);
1049 if (error != 0) {
1050 return (error);
1053 return (0);
1056 for (se = avl_first(list); se != NULL; se = AVL_NEXT(list, se)) {
1057 uint64_t head_ds = 0;
1058 int error = get_head_ds(spa, se->se_bookmark.zb_objset,
1059 &head_ds);
1062 * If get_head_ds() errors out, set the head filesystem
1063 * to the filesystem stored in the bookmark of the
1064 * error block.
1066 if (error != 0)
1067 head_ds = se->se_bookmark.zb_objset;
1069 error = process_error_block(spa, head_ds,
1070 &se->se_zep, uaddr, count);
1071 if (error != 0)
1072 return (error);
1074 return (0);
1076 #endif
1079 * Copy all known errors to userland as an array of bookmarks. This is
1080 * actually a union of the on-disk last log and current log, as well as any
1081 * pending error requests.
1083 * Because the act of reading the on-disk log could cause errors to be
1084 * generated, we have two separate locks: one for the error log and one for the
1085 * in-core error lists. We only need the error list lock to log and error, so
1086 * we grab the error log lock while we read the on-disk logs, and only pick up
1087 * the error list lock when we are finished.
1090 spa_get_errlog(spa_t *spa, void *uaddr, uint64_t *count)
1092 int ret = 0;
1094 #ifdef _KERNEL
1096 * The pool config lock is needed to hold a dataset_t via (among other
1097 * places) process_error_list() -> process_error_block()->
1098 * find_top_affected_fs(), and lock ordering requires that we get it
1099 * before the spa_errlog_lock.
1101 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
1102 mutex_enter(&spa->spa_errlog_lock);
1104 ret = process_error_log(spa, spa->spa_errlog_scrub, uaddr, count);
1106 if (!ret && !spa->spa_scrub_finished)
1107 ret = process_error_log(spa, spa->spa_errlog_last, uaddr,
1108 count);
1110 mutex_enter(&spa->spa_errlist_lock);
1111 if (!ret)
1112 ret = process_error_list(spa, &spa->spa_errlist_scrub, uaddr,
1113 count);
1114 if (!ret)
1115 ret = process_error_list(spa, &spa->spa_errlist_last, uaddr,
1116 count);
1117 mutex_exit(&spa->spa_errlist_lock);
1119 mutex_exit(&spa->spa_errlog_lock);
1120 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
1121 #else
1122 (void) spa, (void) uaddr, (void) count;
1123 #endif
1125 return (ret);
1129 * Called when a scrub completes. This simply set a bit which tells which AVL
1130 * tree to add new errors. spa_errlog_sync() is responsible for actually
1131 * syncing the changes to the underlying objects.
1133 void
1134 spa_errlog_rotate(spa_t *spa)
1136 mutex_enter(&spa->spa_errlist_lock);
1137 spa->spa_scrub_finished = B_TRUE;
1138 mutex_exit(&spa->spa_errlist_lock);
1142 * Discard any pending errors from the spa_t. Called when unloading a faulted
1143 * pool, as the errors encountered during the open cannot be synced to disk.
1145 void
1146 spa_errlog_drain(spa_t *spa)
1148 spa_error_entry_t *se;
1149 void *cookie;
1151 mutex_enter(&spa->spa_errlist_lock);
1153 cookie = NULL;
1154 while ((se = avl_destroy_nodes(&spa->spa_errlist_last,
1155 &cookie)) != NULL)
1156 kmem_free(se, sizeof (spa_error_entry_t));
1157 cookie = NULL;
1158 while ((se = avl_destroy_nodes(&spa->spa_errlist_scrub,
1159 &cookie)) != NULL)
1160 kmem_free(se, sizeof (spa_error_entry_t));
1162 mutex_exit(&spa->spa_errlist_lock);
1166 * Process a list of errors into the current on-disk log.
1168 void
1169 sync_error_list(spa_t *spa, avl_tree_t *t, uint64_t *obj, dmu_tx_t *tx)
1171 spa_error_entry_t *se;
1172 char buf[NAME_MAX_LEN];
1173 void *cookie;
1175 if (avl_numnodes(t) == 0)
1176 return;
1178 /* create log if necessary */
1179 if (*obj == 0)
1180 *obj = zap_create(spa->spa_meta_objset, DMU_OT_ERROR_LOG,
1181 DMU_OT_NONE, 0, tx);
1183 /* add errors to the current log */
1184 if (!spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1185 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1186 bookmark_to_name(&se->se_bookmark, buf, sizeof (buf));
1188 const char *name = se->se_name ? se->se_name : "";
1189 (void) zap_update(spa->spa_meta_objset, *obj, buf, 1,
1190 strlen(name) + 1, name, tx);
1192 } else {
1193 for (se = avl_first(t); se != NULL; se = AVL_NEXT(t, se)) {
1194 zbookmark_err_phys_t zep;
1195 zep.zb_object = se->se_zep.zb_object;
1196 zep.zb_level = se->se_zep.zb_level;
1197 zep.zb_blkid = se->se_zep.zb_blkid;
1198 zep.zb_birth = se->se_zep.zb_birth;
1200 uint64_t head_ds = 0;
1201 int error = get_head_ds(spa, se->se_bookmark.zb_objset,
1202 &head_ds);
1205 * If get_head_ds() errors out, set the head filesystem
1206 * to the filesystem stored in the bookmark of the
1207 * error block.
1209 if (error != 0)
1210 head_ds = se->se_bookmark.zb_objset;
1212 uint64_t err_obj;
1213 error = zap_lookup_int_key(spa->spa_meta_objset,
1214 *obj, head_ds, &err_obj);
1216 if (error == ENOENT) {
1217 err_obj = zap_create(spa->spa_meta_objset,
1218 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1220 (void) zap_update_int_key(spa->spa_meta_objset,
1221 *obj, head_ds, err_obj, tx);
1223 errphys_to_name(&zep, buf, sizeof (buf));
1225 const char *name = se->se_name ? se->se_name : "";
1226 (void) zap_update(spa->spa_meta_objset,
1227 err_obj, buf, 1, strlen(name) + 1, name, tx);
1230 /* purge the error list */
1231 cookie = NULL;
1232 while ((se = avl_destroy_nodes(t, &cookie)) != NULL)
1233 kmem_free(se, sizeof (spa_error_entry_t));
1236 static void
1237 delete_errlog(spa_t *spa, uint64_t spa_err_obj, dmu_tx_t *tx)
1239 if (spa_feature_is_enabled(spa, SPA_FEATURE_HEAD_ERRLOG)) {
1240 zap_cursor_t zc;
1241 zap_attribute_t za;
1242 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1243 zap_cursor_retrieve(&zc, &za) == 0;
1244 zap_cursor_advance(&zc)) {
1245 VERIFY0(dmu_object_free(spa->spa_meta_objset,
1246 za.za_first_integer, tx));
1248 zap_cursor_fini(&zc);
1250 VERIFY0(dmu_object_free(spa->spa_meta_objset, spa_err_obj, tx));
1254 * Sync the error log out to disk. This is a little tricky because the act of
1255 * writing the error log requires the spa_errlist_lock. So, we need to lock the
1256 * error lists, take a copy of the lists, and then reinitialize them. Then, we
1257 * drop the error list lock and take the error log lock, at which point we
1258 * do the errlog processing. Then, if we encounter an I/O error during this
1259 * process, we can successfully add the error to the list. Note that this will
1260 * result in the perpetual recycling of errors, but it is an unlikely situation
1261 * and not a performance critical operation.
1263 void
1264 spa_errlog_sync(spa_t *spa, uint64_t txg)
1266 dmu_tx_t *tx;
1267 avl_tree_t scrub, last;
1268 int scrub_finished;
1270 mutex_enter(&spa->spa_errlist_lock);
1273 * Bail out early under normal circumstances.
1275 if (avl_numnodes(&spa->spa_errlist_scrub) == 0 &&
1276 avl_numnodes(&spa->spa_errlist_last) == 0 &&
1277 avl_numnodes(&spa->spa_errlist_healed) == 0 &&
1278 !spa->spa_scrub_finished) {
1279 mutex_exit(&spa->spa_errlist_lock);
1280 return;
1283 spa_get_errlists(spa, &last, &scrub);
1284 scrub_finished = spa->spa_scrub_finished;
1285 spa->spa_scrub_finished = B_FALSE;
1287 mutex_exit(&spa->spa_errlist_lock);
1290 * The pool config lock is needed to hold a dataset_t via
1291 * sync_error_list() -> get_head_ds(), and lock ordering
1292 * requires that we get it before the spa_errlog_lock.
1294 dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
1295 mutex_enter(&spa->spa_errlog_lock);
1297 tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
1300 * Remove healed errors from errors.
1302 spa_remove_healed_errors(spa, &last, &scrub, tx);
1305 * Sync out the current list of errors.
1307 sync_error_list(spa, &last, &spa->spa_errlog_last, tx);
1310 * Rotate the log if necessary.
1312 if (scrub_finished) {
1313 if (spa->spa_errlog_last != 0)
1314 delete_errlog(spa, spa->spa_errlog_last, tx);
1315 spa->spa_errlog_last = spa->spa_errlog_scrub;
1316 spa->spa_errlog_scrub = 0;
1318 sync_error_list(spa, &scrub, &spa->spa_errlog_last, tx);
1322 * Sync out any pending scrub errors.
1324 sync_error_list(spa, &scrub, &spa->spa_errlog_scrub, tx);
1327 * Update the MOS to reflect the new values.
1329 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1330 DMU_POOL_ERRLOG_LAST, sizeof (uint64_t), 1,
1331 &spa->spa_errlog_last, tx);
1332 (void) zap_update(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
1333 DMU_POOL_ERRLOG_SCRUB, sizeof (uint64_t), 1,
1334 &spa->spa_errlog_scrub, tx);
1336 dmu_tx_commit(tx);
1338 mutex_exit(&spa->spa_errlog_lock);
1339 dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
1342 static void
1343 delete_dataset_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t ds,
1344 dmu_tx_t *tx)
1346 if (spa_err_obj == 0)
1347 return;
1349 zap_cursor_t zc;
1350 zap_attribute_t za;
1351 for (zap_cursor_init(&zc, spa->spa_meta_objset, spa_err_obj);
1352 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1353 uint64_t head_ds;
1354 name_to_object(za.za_name, &head_ds);
1355 if (head_ds == ds) {
1356 (void) zap_remove(spa->spa_meta_objset, spa_err_obj,
1357 za.za_name, tx);
1358 VERIFY0(dmu_object_free(spa->spa_meta_objset,
1359 za.za_first_integer, tx));
1360 break;
1363 zap_cursor_fini(&zc);
1366 void
1367 spa_delete_dataset_errlog(spa_t *spa, uint64_t ds, dmu_tx_t *tx)
1369 mutex_enter(&spa->spa_errlog_lock);
1370 delete_dataset_errlog(spa, spa->spa_errlog_scrub, ds, tx);
1371 delete_dataset_errlog(spa, spa->spa_errlog_last, ds, tx);
1372 mutex_exit(&spa->spa_errlog_lock);
1375 static int
1376 find_txg_ancestor_snapshot(spa_t *spa, uint64_t new_head, uint64_t old_head,
1377 uint64_t *txg)
1379 dsl_dataset_t *ds;
1380 dsl_pool_t *dp = spa->spa_dsl_pool;
1382 int error = dsl_dataset_hold_obj_flags(dp, old_head,
1383 DS_HOLD_FLAG_DECRYPT, FTAG, &ds);
1384 if (error != 0)
1385 return (error);
1387 uint64_t prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1388 uint64_t prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1390 while (prev_obj != 0) {
1391 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1392 if ((error = dsl_dataset_hold_obj_flags(dp, prev_obj,
1393 DS_HOLD_FLAG_DECRYPT, FTAG, &ds)) == 0 &&
1394 dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj == new_head)
1395 break;
1397 if (error != 0)
1398 return (error);
1400 prev_obj_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
1401 prev_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
1403 dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
1404 ASSERT(prev_obj != 0);
1405 *txg = prev_obj_txg;
1406 return (0);
1409 static void
1410 swap_errlog(spa_t *spa, uint64_t spa_err_obj, uint64_t new_head, uint64_t
1411 old_head, dmu_tx_t *tx)
1413 if (spa_err_obj == 0)
1414 return;
1416 uint64_t old_head_errlog;
1417 int error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj,
1418 old_head, &old_head_errlog);
1420 /* If no error log, then there is nothing to do. */
1421 if (error != 0)
1422 return;
1424 uint64_t txg;
1425 error = find_txg_ancestor_snapshot(spa, new_head, old_head, &txg);
1426 if (error != 0)
1427 return;
1430 * Create an error log if the file system being promoted does not
1431 * already have one.
1433 uint64_t new_head_errlog;
1434 error = zap_lookup_int_key(spa->spa_meta_objset, spa_err_obj, new_head,
1435 &new_head_errlog);
1437 if (error != 0) {
1438 new_head_errlog = zap_create(spa->spa_meta_objset,
1439 DMU_OT_ERROR_LOG, DMU_OT_NONE, 0, tx);
1441 (void) zap_update_int_key(spa->spa_meta_objset, spa_err_obj,
1442 new_head, new_head_errlog, tx);
1445 zap_cursor_t zc;
1446 zap_attribute_t za;
1447 zbookmark_err_phys_t err_block;
1448 for (zap_cursor_init(&zc, spa->spa_meta_objset, old_head_errlog);
1449 zap_cursor_retrieve(&zc, &za) == 0; zap_cursor_advance(&zc)) {
1451 const char *name = "";
1452 name_to_errphys(za.za_name, &err_block);
1453 if (err_block.zb_birth < txg) {
1454 (void) zap_update(spa->spa_meta_objset, new_head_errlog,
1455 za.za_name, 1, strlen(name) + 1, name, tx);
1457 (void) zap_remove(spa->spa_meta_objset, old_head_errlog,
1458 za.za_name, tx);
1461 zap_cursor_fini(&zc);
1464 void
1465 spa_swap_errlog(spa_t *spa, uint64_t new_head_ds, uint64_t old_head_ds,
1466 dmu_tx_t *tx)
1468 mutex_enter(&spa->spa_errlog_lock);
1469 swap_errlog(spa, spa->spa_errlog_scrub, new_head_ds, old_head_ds, tx);
1470 swap_errlog(spa, spa->spa_errlog_last, new_head_ds, old_head_ds, tx);
1471 mutex_exit(&spa->spa_errlog_lock);
1474 #if defined(_KERNEL)
1475 /* error handling */
1476 EXPORT_SYMBOL(spa_log_error);
1477 EXPORT_SYMBOL(spa_approx_errlog_size);
1478 EXPORT_SYMBOL(spa_get_last_errlog_size);
1479 EXPORT_SYMBOL(spa_get_errlog);
1480 EXPORT_SYMBOL(spa_errlog_rotate);
1481 EXPORT_SYMBOL(spa_errlog_drain);
1482 EXPORT_SYMBOL(spa_errlog_sync);
1483 EXPORT_SYMBOL(spa_get_errlists);
1484 EXPORT_SYMBOL(spa_delete_dataset_errlog);
1485 EXPORT_SYMBOL(spa_swap_errlog);
1486 EXPORT_SYMBOL(sync_error_list);
1487 EXPORT_SYMBOL(spa_upgrade_errlog);
1488 EXPORT_SYMBOL(find_top_affected_fs);
1489 EXPORT_SYMBOL(find_birth_txg);
1490 EXPORT_SYMBOL(zep_to_zb);
1491 EXPORT_SYMBOL(name_to_errphys);
1492 #endif
1494 /* BEGIN CSTYLED */
1495 ZFS_MODULE_PARAM(zfs_spa, spa_, upgrade_errlog_limit, UINT, ZMOD_RW,
1496 "Limit the number of errors which will be upgraded to the new "
1497 "on-disk error log when enabling head_errlog");
1498 /* END CSTYLED */