4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013, 2014, Delphix. All rights reserved.
24 * Copyright (c) 2019 Datto Inc.
25 * Copyright (c) 2021, 2022, George Amanakis. All rights reserved.
29 * Routines to manage the on-disk persistent error log.
31 * Each pool stores a log of all logical data errors seen during normal
32 * operation. This is actually the union of two distinct logs: the last log,
33 * and the current log. All errors seen are logged to the current log. When a
34 * scrub completes, the current log becomes the last log, the last log is thrown
35 * out, and the current log is reinitialized. This way, if an error is somehow
36 * corrected, a new scrub will show that it no longer exists, and will be
37 * deleted from the log when the scrub completes.
39 * The log is stored using a ZAP object whose key is a string form of the
40 * zbookmark_phys tuple (objset, object, level, blkid), and whose contents is an
41 * optional 'objset:object' human-readable string describing the data. When an
42 * error is first logged, this string will be empty, indicating that no name is
43 * known. This prevents us from having to issue a potentially large amount of
44 * I/O to discover the object name during an error path. Instead, we do the
45 * calculation when the data is requested, storing the result so future queries
48 * If the head_errlog feature is enabled, a different on-disk format is used.
49 * The error log of each head dataset is stored separately in the zap object
50 * and keyed by the head id. This enables listing every dataset affected in
51 * userland. In order to be able to track whether an error block has been
52 * modified or added to snapshots since it was marked as an error, a new tuple
53 * is introduced: zbookmark_err_phys_t. It allows the storage of the birth
54 * transaction group of an error block on-disk. The birth transaction group is
55 * used by check_filesystem() to assess whether this block was freed,
56 * re-written or added to a snapshot since its marking as an error.
58 * This log is then shipped into an nvlist where the key is the dataset name and
59 * the value is the object name. Userland is then responsible for uniquifying
60 * this list and displaying it to the user.
63 #include <sys/dmu_tx.h>
65 #include <sys/spa_impl.h>
68 #include <sys/dsl_dir.h>
69 #include <sys/dmu_objset.h>
71 #include <sys/zfs_znode.h>
73 #define NAME_MAX_LEN 64
75 typedef struct clones
{
81 * spa_upgrade_errlog_limit : A zfs module parameter that controls the number
82 * of on-disk error log entries that will be converted to the new
83 * format when enabling head_errlog. Defaults to 0 which converts
86 static uint_t spa_upgrade_errlog_limit
= 0;
89 * Convert a bookmark to a string.
92 bookmark_to_name(zbookmark_phys_t
*zb
, char *buf
, size_t len
)
94 (void) snprintf(buf
, len
, "%llx:%llx:%llx:%llx",
95 (u_longlong_t
)zb
->zb_objset
, (u_longlong_t
)zb
->zb_object
,
96 (u_longlong_t
)zb
->zb_level
, (u_longlong_t
)zb
->zb_blkid
);
100 * Convert an err_phys to a string.
103 errphys_to_name(zbookmark_err_phys_t
*zep
, char *buf
, size_t len
)
105 (void) snprintf(buf
, len
, "%llx:%llx:%llx:%llx",
106 (u_longlong_t
)zep
->zb_object
, (u_longlong_t
)zep
->zb_level
,
107 (u_longlong_t
)zep
->zb_blkid
, (u_longlong_t
)zep
->zb_birth
);
111 * Convert a string to a err_phys.
114 name_to_errphys(char *buf
, zbookmark_err_phys_t
*zep
)
116 zep
->zb_object
= zfs_strtonum(buf
, &buf
);
118 zep
->zb_level
= (int)zfs_strtonum(buf
+ 1, &buf
);
120 zep
->zb_blkid
= zfs_strtonum(buf
+ 1, &buf
);
122 zep
->zb_birth
= zfs_strtonum(buf
+ 1, &buf
);
123 ASSERT(*buf
== '\0');
127 * Convert a string to a bookmark.
130 name_to_bookmark(char *buf
, zbookmark_phys_t
*zb
)
132 zb
->zb_objset
= zfs_strtonum(buf
, &buf
);
134 zb
->zb_object
= zfs_strtonum(buf
+ 1, &buf
);
136 zb
->zb_level
= (int)zfs_strtonum(buf
+ 1, &buf
);
138 zb
->zb_blkid
= zfs_strtonum(buf
+ 1, &buf
);
139 ASSERT(*buf
== '\0');
143 zep_to_zb(uint64_t dataset
, zbookmark_err_phys_t
*zep
, zbookmark_phys_t
*zb
)
145 zb
->zb_objset
= dataset
;
146 zb
->zb_object
= zep
->zb_object
;
147 zb
->zb_level
= zep
->zb_level
;
148 zb
->zb_blkid
= zep
->zb_blkid
;
152 name_to_object(char *buf
, uint64_t *obj
)
154 *obj
= zfs_strtonum(buf
, &buf
);
155 ASSERT(*buf
== '\0');
159 * Retrieve the head filesystem.
161 static int get_head_ds(spa_t
*spa
, uint64_t dsobj
, uint64_t *head_ds
)
164 int error
= dsl_dataset_hold_obj_flags(spa
->spa_dsl_pool
,
165 dsobj
, DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
);
171 *head_ds
= dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
;
172 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
178 * Log an uncorrectable error to the persistent error log. We add it to the
179 * spa's list of pending errors. The changes are actually synced out to disk
180 * during spa_errlog_sync().
183 spa_log_error(spa_t
*spa
, const zbookmark_phys_t
*zb
, const uint64_t birth
)
185 spa_error_entry_t search
;
186 spa_error_entry_t
*new;
191 * If we are trying to import a pool, ignore any errors, as we won't be
192 * writing to the pool any time soon.
194 if (spa_load_state(spa
) == SPA_LOAD_TRYIMPORT
)
197 mutex_enter(&spa
->spa_errlist_lock
);
200 * If we have had a request to rotate the log, log it to the next list
201 * instead of the current one.
203 if (spa
->spa_scrub_active
|| spa
->spa_scrub_finished
)
204 tree
= &spa
->spa_errlist_scrub
;
206 tree
= &spa
->spa_errlist_last
;
208 search
.se_bookmark
= *zb
;
209 if (avl_find(tree
, &search
, &where
) != NULL
) {
210 mutex_exit(&spa
->spa_errlist_lock
);
214 new = kmem_zalloc(sizeof (spa_error_entry_t
), KM_SLEEP
);
215 new->se_bookmark
= *zb
;
218 * If the head_errlog feature is enabled, store the birth txg now. In
219 * case the file is deleted before spa_errlog_sync() runs, we will not
220 * be able to retrieve the birth txg.
222 if (spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
223 new->se_zep
.zb_object
= zb
->zb_object
;
224 new->se_zep
.zb_level
= zb
->zb_level
;
225 new->se_zep
.zb_blkid
= zb
->zb_blkid
;
226 new->se_zep
.zb_birth
= birth
;
229 avl_insert(tree
, new, where
);
230 mutex_exit(&spa
->spa_errlist_lock
);
234 find_birth_txg(dsl_dataset_t
*ds
, zbookmark_err_phys_t
*zep
,
238 int error
= dmu_objset_from_ds(ds
, &os
);
245 error
= dnode_hold(os
, zep
->zb_object
, FTAG
, &dn
);
249 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
250 error
= dbuf_dnode_findbp(dn
, zep
->zb_level
, zep
->zb_blkid
, &bp
, NULL
,
252 if (error
== 0 && BP_IS_HOLE(&bp
))
253 error
= SET_ERROR(ENOENT
);
255 *birth_txg
= BP_GET_LOGICAL_BIRTH(&bp
);
256 rw_exit(&dn
->dn_struct_rwlock
);
257 dnode_rele(dn
, FTAG
);
262 * This function finds the oldest affected filesystem containing an error
266 find_top_affected_fs(spa_t
*spa
, uint64_t head_ds
, zbookmark_err_phys_t
*zep
,
267 uint64_t *top_affected_fs
)
269 uint64_t oldest_dsobj
;
270 int error
= dsl_dataset_oldest_snapshot(spa
, head_ds
, zep
->zb_birth
,
276 error
= dsl_dataset_hold_obj_flags(spa
->spa_dsl_pool
, oldest_dsobj
,
277 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
);
282 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
;
283 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
290 * Copy the bookmark to the end of the user-space buffer which starts at
291 * uaddr and has *count unused entries, and decrement *count by 1.
294 copyout_entry(const zbookmark_phys_t
*zb
, void *uaddr
, uint64_t *count
)
297 return (SET_ERROR(ENOMEM
));
300 if (copyout(zb
, (char *)uaddr
+ (*count
) * sizeof (zbookmark_phys_t
),
301 sizeof (zbookmark_phys_t
)) != 0)
302 return (SET_ERROR(EFAULT
));
307 * Each time the error block is referenced by a snapshot or clone, add a
308 * zbookmark_phys_t entry to the userspace array at uaddr. The array is
309 * filled from the back and the in-out parameter *count is modified to be the
310 * number of unused entries at the beginning of the array. The function
311 * scrub_filesystem() is modelled after this one.
314 check_filesystem(spa_t
*spa
, uint64_t head_ds
, zbookmark_err_phys_t
*zep
,
315 void *uaddr
, uint64_t *count
, list_t
*clones_list
)
318 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
320 int error
= dsl_dataset_hold_obj_flags(dp
, head_ds
,
321 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
);
326 uint64_t txg_to_consider
= spa
->spa_syncing_txg
;
327 boolean_t check_snapshot
= B_TRUE
;
328 error
= find_birth_txg(ds
, zep
, &latest_txg
);
331 * If find_birth_txg() errors out otherwise, let txg_to_consider be
332 * equal to the spa's syncing txg: if check_filesystem() errors out
333 * then affected snapshots or clones will not be checked.
335 if (error
== 0 && zep
->zb_birth
== latest_txg
) {
336 /* Block neither free nor rewritten. */
338 zep_to_zb(head_ds
, zep
, &zb
);
339 error
= copyout_entry(&zb
, uaddr
, count
);
341 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
344 check_snapshot
= B_FALSE
;
345 } else if (error
== 0) {
346 txg_to_consider
= latest_txg
;
350 * Retrieve the number of snapshots if the dataset is not a snapshot.
352 uint64_t snap_count
= 0;
353 if (dsl_dataset_phys(ds
)->ds_snapnames_zapobj
!= 0) {
355 error
= zap_count(spa
->spa_meta_objset
,
356 dsl_dataset_phys(ds
)->ds_snapnames_zapobj
, &snap_count
);
359 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
364 if (snap_count
== 0) {
365 /* Filesystem without snapshots. */
366 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
370 uint64_t *snap_obj_array
= kmem_zalloc(snap_count
* sizeof (uint64_t),
373 int aff_snap_count
= 0;
374 uint64_t snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
375 uint64_t snap_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
376 uint64_t zap_clone
= dsl_dir_phys(ds
->ds_dir
)->dd_clones
;
378 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
380 /* Check only snapshots created from this file system. */
381 while (snap_obj
!= 0 && zep
->zb_birth
< snap_obj_txg
&&
382 snap_obj_txg
<= txg_to_consider
) {
384 error
= dsl_dataset_hold_obj_flags(dp
, snap_obj
,
385 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
);
389 if (dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
!= head_ds
) {
390 snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
391 snap_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
392 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
396 boolean_t affected
= B_TRUE
;
397 if (check_snapshot
) {
399 error
= find_birth_txg(ds
, zep
, &blk_txg
);
400 affected
= (error
== 0 && zep
->zb_birth
== blk_txg
);
403 /* Report errors in snapshots. */
405 snap_obj_array
[aff_snap_count
] = snap_obj
;
409 zep_to_zb(snap_obj
, zep
, &zb
);
410 error
= copyout_entry(&zb
, uaddr
, count
);
412 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
,
417 snap_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
418 snap_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
419 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
422 if (zap_clone
== 0 || aff_snap_count
== 0) {
431 zc
= kmem_zalloc(sizeof (zap_cursor_t
), KM_SLEEP
);
432 za
= zap_attribute_alloc();
434 for (zap_cursor_init(zc
, spa
->spa_meta_objset
, zap_clone
);
435 zap_cursor_retrieve(zc
, za
) == 0;
436 zap_cursor_advance(zc
)) {
438 dsl_dataset_t
*clone
;
439 error
= dsl_dataset_hold_obj_flags(dp
, za
->za_first_integer
,
440 DS_HOLD_FLAG_DECRYPT
, FTAG
, &clone
);
446 * Only clones whose origins were affected could also
447 * have affected snapshots.
449 boolean_t found
= B_FALSE
;
450 for (int i
= 0; i
< snap_count
; i
++) {
451 if (dsl_dir_phys(clone
->ds_dir
)->dd_origin_obj
452 == snap_obj_array
[i
])
455 dsl_dataset_rele_flags(clone
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
460 clones_t
*ct
= kmem_zalloc(sizeof (*ct
), KM_SLEEP
);
461 ct
->clone_ds
= za
->za_first_integer
;
462 list_insert_tail(clones_list
, ct
);
466 zap_attribute_free(za
);
467 kmem_free(zc
, sizeof (*zc
));
470 kmem_free(snap_obj_array
, sizeof (*snap_obj_array
));
475 process_error_block(spa_t
*spa
, uint64_t head_ds
, zbookmark_err_phys_t
*zep
,
476 void *uaddr
, uint64_t *count
)
479 * If zb_birth == 0 or head_ds == 0 it means we failed to retrieve the
480 * birth txg or the head filesystem of the block pointer. This may
481 * happen e.g. when an encrypted filesystem is not mounted or when
482 * the key is not loaded. In this case do not proceed to
483 * check_filesystem(), instead do the accounting here.
485 if (zep
->zb_birth
== 0 || head_ds
== 0) {
487 zep_to_zb(head_ds
, zep
, &zb
);
488 int error
= copyout_entry(&zb
, uaddr
, count
);
495 uint64_t top_affected_fs
;
496 uint64_t init_count
= *count
;
497 int error
= find_top_affected_fs(spa
, head_ds
, zep
, &top_affected_fs
);
502 list_create(&clones_list
, sizeof (clones_t
),
503 offsetof(clones_t
, node
));
505 error
= check_filesystem(spa
, top_affected_fs
, zep
,
506 uaddr
, count
, &clones_list
);
508 while ((ct
= list_remove_head(&clones_list
)) != NULL
) {
509 error
= check_filesystem(spa
, ct
->clone_ds
, zep
,
510 uaddr
, count
, &clones_list
);
511 kmem_free(ct
, sizeof (*ct
));
514 while (!list_is_empty(&clones_list
)) {
515 ct
= list_remove_head(&clones_list
);
516 kmem_free(ct
, sizeof (*ct
));
522 list_destroy(&clones_list
);
524 if (error
== 0 && init_count
== *count
) {
526 * If we reach this point, no errors have been detected
527 * in the checked filesystems/snapshots. Before returning mark
528 * the error block to be removed from the error lists and logs.
531 zep_to_zb(head_ds
, zep
, &zb
);
532 spa_remove_error(spa
, &zb
, zep
->zb_birth
);
539 /* Return the number of errors in the error log */
541 spa_get_last_errlog_size(spa_t
*spa
)
543 uint64_t total
= 0, count
;
544 mutex_enter(&spa
->spa_errlog_lock
);
546 if (spa
->spa_errlog_last
!= 0 &&
547 zap_count(spa
->spa_meta_objset
, spa
->spa_errlog_last
,
550 mutex_exit(&spa
->spa_errlog_lock
);
555 * If a healed bookmark matches an entry in the error log we stash it in a tree
556 * so that we can later remove the related log entries in sync context.
559 spa_add_healed_error(spa_t
*spa
, uint64_t obj
, zbookmark_phys_t
*healed_zb
,
560 const uint64_t birth
)
562 char name
[NAME_MAX_LEN
];
567 boolean_t held_list
= B_FALSE
;
568 boolean_t held_log
= B_FALSE
;
570 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
571 bookmark_to_name(healed_zb
, name
, sizeof (name
));
573 if (zap_contains(spa
->spa_meta_objset
, healed_zb
->zb_objset
,
575 if (!MUTEX_HELD(&spa
->spa_errlog_lock
)) {
576 mutex_enter(&spa
->spa_errlog_lock
);
581 * Found an error matching healed zb, add zb to our
582 * tree of healed errors
584 avl_tree_t
*tree
= &spa
->spa_errlist_healed
;
585 spa_error_entry_t search
;
586 spa_error_entry_t
*new;
588 search
.se_bookmark
= *healed_zb
;
589 if (!MUTEX_HELD(&spa
->spa_errlist_lock
)) {
590 mutex_enter(&spa
->spa_errlist_lock
);
593 if (avl_find(tree
, &search
, &where
) != NULL
) {
595 mutex_exit(&spa
->spa_errlist_lock
);
597 mutex_exit(&spa
->spa_errlog_lock
);
600 new = kmem_zalloc(sizeof (spa_error_entry_t
), KM_SLEEP
);
601 new->se_bookmark
= *healed_zb
;
602 avl_insert(tree
, new, where
);
604 mutex_exit(&spa
->spa_errlist_lock
);
606 mutex_exit(&spa
->spa_errlog_lock
);
611 zbookmark_err_phys_t healed_zep
;
612 healed_zep
.zb_object
= healed_zb
->zb_object
;
613 healed_zep
.zb_level
= healed_zb
->zb_level
;
614 healed_zep
.zb_blkid
= healed_zb
->zb_blkid
;
615 healed_zep
.zb_birth
= birth
;
617 errphys_to_name(&healed_zep
, name
, sizeof (name
));
620 zap_attribute_t
*za
= zap_attribute_alloc();
621 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
, spa
->spa_errlog_last
);
622 zap_cursor_retrieve(&zc
, za
) == 0; zap_cursor_advance(&zc
)) {
623 if (zap_contains(spa
->spa_meta_objset
, za
->za_first_integer
,
625 if (!MUTEX_HELD(&spa
->spa_errlog_lock
)) {
626 mutex_enter(&spa
->spa_errlog_lock
);
630 avl_tree_t
*tree
= &spa
->spa_errlist_healed
;
631 spa_error_entry_t search
;
632 spa_error_entry_t
*new;
634 search
.se_bookmark
= *healed_zb
;
636 if (!MUTEX_HELD(&spa
->spa_errlist_lock
)) {
637 mutex_enter(&spa
->spa_errlist_lock
);
641 if (avl_find(tree
, &search
, &where
) != NULL
) {
643 mutex_exit(&spa
->spa_errlist_lock
);
645 mutex_exit(&spa
->spa_errlog_lock
);
648 new = kmem_zalloc(sizeof (spa_error_entry_t
), KM_SLEEP
);
649 new->se_bookmark
= *healed_zb
;
650 new->se_zep
= healed_zep
;
651 avl_insert(tree
, new, where
);
654 mutex_exit(&spa
->spa_errlist_lock
);
656 mutex_exit(&spa
->spa_errlog_lock
);
659 zap_cursor_fini(&zc
);
660 zap_attribute_free(za
);
664 * If this error exists in the given tree remove it.
667 remove_error_from_list(spa_t
*spa
, avl_tree_t
*t
, const zbookmark_phys_t
*zb
)
669 spa_error_entry_t search
, *found
;
672 mutex_enter(&spa
->spa_errlist_lock
);
673 search
.se_bookmark
= *zb
;
674 if ((found
= avl_find(t
, &search
, &where
)) != NULL
) {
675 avl_remove(t
, found
);
676 kmem_free(found
, sizeof (spa_error_entry_t
));
678 mutex_exit(&spa
->spa_errlist_lock
);
683 * Removes all of the recv healed errors from both on-disk error logs
686 spa_remove_healed_errors(spa_t
*spa
, avl_tree_t
*s
, avl_tree_t
*l
, dmu_tx_t
*tx
)
688 char name
[NAME_MAX_LEN
];
689 spa_error_entry_t
*se
;
692 ASSERT(MUTEX_HELD(&spa
->spa_errlog_lock
));
694 while ((se
= avl_destroy_nodes(&spa
->spa_errlist_healed
,
696 remove_error_from_list(spa
, s
, &se
->se_bookmark
);
697 remove_error_from_list(spa
, l
, &se
->se_bookmark
);
699 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
700 bookmark_to_name(&se
->se_bookmark
, name
, sizeof (name
));
701 (void) zap_remove(spa
->spa_meta_objset
,
702 spa
->spa_errlog_last
, name
, tx
);
703 (void) zap_remove(spa
->spa_meta_objset
,
704 spa
->spa_errlog_scrub
, name
, tx
);
706 errphys_to_name(&se
->se_zep
, name
, sizeof (name
));
708 zap_attribute_t
*za
= zap_attribute_alloc();
709 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
,
710 spa
->spa_errlog_last
);
711 zap_cursor_retrieve(&zc
, za
) == 0;
712 zap_cursor_advance(&zc
)) {
713 zap_remove(spa
->spa_meta_objset
,
714 za
->za_first_integer
, name
, tx
);
716 zap_cursor_fini(&zc
);
718 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
,
719 spa
->spa_errlog_scrub
);
720 zap_cursor_retrieve(&zc
, za
) == 0;
721 zap_cursor_advance(&zc
)) {
722 zap_remove(spa
->spa_meta_objset
,
723 za
->za_first_integer
, name
, tx
);
725 zap_cursor_fini(&zc
);
726 zap_attribute_free(za
);
728 kmem_free(se
, sizeof (spa_error_entry_t
));
733 * Stash away healed bookmarks to remove them from the on-disk error logs
734 * later in spa_remove_healed_errors().
737 spa_remove_error(spa_t
*spa
, zbookmark_phys_t
*zb
, uint64_t birth
)
739 spa_add_healed_error(spa
, spa
->spa_errlog_last
, zb
, birth
);
740 spa_add_healed_error(spa
, spa
->spa_errlog_scrub
, zb
, birth
);
744 approx_errlog_size_impl(spa_t
*spa
, uint64_t spa_err_obj
)
746 if (spa_err_obj
== 0)
751 zap_attribute_t
*za
= zap_attribute_alloc();
752 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
, spa_err_obj
);
753 zap_cursor_retrieve(&zc
, za
) == 0; zap_cursor_advance(&zc
)) {
755 if (zap_count(spa
->spa_meta_objset
, za
->za_first_integer
,
759 zap_cursor_fini(&zc
);
760 zap_attribute_free(za
);
765 * Return the approximate number of errors currently in the error log. This
766 * will be nonzero if there are some errors, but otherwise it may be more
767 * or less than the number of entries returned by spa_get_errlog().
770 spa_approx_errlog_size(spa_t
*spa
)
774 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
775 mutex_enter(&spa
->spa_errlog_lock
);
777 if (spa
->spa_errlog_scrub
!= 0 &&
778 zap_count(spa
->spa_meta_objset
, spa
->spa_errlog_scrub
,
782 if (spa
->spa_errlog_last
!= 0 && !spa
->spa_scrub_finished
&&
783 zap_count(spa
->spa_meta_objset
, spa
->spa_errlog_last
,
786 mutex_exit(&spa
->spa_errlog_lock
);
789 mutex_enter(&spa
->spa_errlog_lock
);
790 total
+= approx_errlog_size_impl(spa
, spa
->spa_errlog_last
);
791 total
+= approx_errlog_size_impl(spa
, spa
->spa_errlog_scrub
);
792 mutex_exit(&spa
->spa_errlog_lock
);
794 mutex_enter(&spa
->spa_errlist_lock
);
795 total
+= avl_numnodes(&spa
->spa_errlist_last
);
796 total
+= avl_numnodes(&spa
->spa_errlist_scrub
);
797 mutex_exit(&spa
->spa_errlist_lock
);
802 * This function sweeps through an on-disk error log and stores all bookmarks
803 * as error bookmarks in a new ZAP object. At the end we discard the old one,
804 * and spa_update_errlog() will set the spa's on-disk error log to new ZAP
808 sync_upgrade_errlog(spa_t
*spa
, uint64_t spa_err_obj
, uint64_t *newobj
,
816 *newobj
= zap_create(spa
->spa_meta_objset
, DMU_OT_ERROR_LOG
,
820 * If we cannnot perform the upgrade we should clear the old on-disk
823 if (zap_count(spa
->spa_meta_objset
, spa_err_obj
, &count
) != 0) {
824 VERIFY0(dmu_object_free(spa
->spa_meta_objset
, spa_err_obj
, tx
));
828 za
= zap_attribute_alloc();
829 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
, spa_err_obj
);
830 zap_cursor_retrieve(&zc
, za
) == 0;
831 zap_cursor_advance(&zc
)) {
832 if (spa_upgrade_errlog_limit
!= 0 &&
833 zc
.zc_cd
== spa_upgrade_errlog_limit
)
836 name_to_bookmark(za
->za_name
, &zb
);
838 zbookmark_err_phys_t zep
;
839 zep
.zb_object
= zb
.zb_object
;
840 zep
.zb_level
= zb
.zb_level
;
841 zep
.zb_blkid
= zb
.zb_blkid
;
845 * In case of an error we should simply continue instead of
846 * returning prematurely. See the next comment.
849 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
853 int error
= dsl_dataset_hold_obj_flags(dp
, zb
.zb_objset
,
854 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
);
858 head_ds
= dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
;
861 * The objset and the dnode are required for getting the block
862 * pointer, which is used to determine if BP_IS_HOLE(). If
863 * getting the objset or the dnode fails, do not create a
864 * zap entry (presuming we know the dataset) as this may create
865 * spurious errors that we cannot ever resolve. If an error is
866 * truly persistent, it should re-appear after a scan.
868 if (dmu_objset_from_ds(ds
, &os
) != 0) {
869 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
876 if (dnode_hold(os
, zep
.zb_object
, FTAG
, &dn
) != 0) {
877 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
881 rw_enter(&dn
->dn_struct_rwlock
, RW_READER
);
882 error
= dbuf_dnode_findbp(dn
, zep
.zb_level
, zep
.zb_blkid
, &bp
,
887 zep
.zb_birth
= BP_GET_LOGICAL_BIRTH(&bp
);
889 rw_exit(&dn
->dn_struct_rwlock
);
890 dnode_rele(dn
, FTAG
);
891 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
893 if (error
!= 0 || BP_IS_HOLE(&bp
))
897 error
= zap_lookup_int_key(spa
->spa_meta_objset
, *newobj
,
900 if (error
== ENOENT
) {
901 err_obj
= zap_create(spa
->spa_meta_objset
,
902 DMU_OT_ERROR_LOG
, DMU_OT_NONE
, 0, tx
);
904 (void) zap_update_int_key(spa
->spa_meta_objset
,
905 *newobj
, head_ds
, err_obj
, tx
);
909 errphys_to_name(&zep
, buf
, sizeof (buf
));
911 const char *name
= "";
912 (void) zap_update(spa
->spa_meta_objset
, err_obj
,
913 buf
, 1, strlen(name
) + 1, name
, tx
);
915 zap_cursor_fini(&zc
);
916 zap_attribute_free(za
);
918 VERIFY0(dmu_object_free(spa
->spa_meta_objset
, spa_err_obj
, tx
));
922 spa_upgrade_errlog(spa_t
*spa
, dmu_tx_t
*tx
)
926 mutex_enter(&spa
->spa_errlog_lock
);
927 if (spa
->spa_errlog_last
!= 0) {
928 sync_upgrade_errlog(spa
, spa
->spa_errlog_last
, &newobj
, tx
);
929 spa
->spa_errlog_last
= newobj
;
931 (void) zap_update(spa
->spa_meta_objset
,
932 DMU_POOL_DIRECTORY_OBJECT
, DMU_POOL_ERRLOG_LAST
,
933 sizeof (uint64_t), 1, &spa
->spa_errlog_last
, tx
);
936 if (spa
->spa_errlog_scrub
!= 0) {
937 sync_upgrade_errlog(spa
, spa
->spa_errlog_scrub
, &newobj
, tx
);
938 spa
->spa_errlog_scrub
= newobj
;
940 (void) zap_update(spa
->spa_meta_objset
,
941 DMU_POOL_DIRECTORY_OBJECT
, DMU_POOL_ERRLOG_SCRUB
,
942 sizeof (uint64_t), 1, &spa
->spa_errlog_scrub
, tx
);
945 mutex_exit(&spa
->spa_errlog_lock
);
950 * If an error block is shared by two datasets it will be counted twice.
953 process_error_log(spa_t
*spa
, uint64_t obj
, void *uaddr
, uint64_t *count
)
961 zc
= kmem_zalloc(sizeof (zap_cursor_t
), KM_SLEEP
);
962 za
= zap_attribute_alloc();
964 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
965 for (zap_cursor_init(zc
, spa
->spa_meta_objset
, obj
);
966 zap_cursor_retrieve(zc
, za
) == 0;
967 zap_cursor_advance(zc
)) {
970 kmem_free(zc
, sizeof (*zc
));
971 zap_attribute_free(za
);
972 return (SET_ERROR(ENOMEM
));
976 name_to_bookmark(za
->za_name
, &zb
);
978 int error
= copyout_entry(&zb
, uaddr
, count
);
981 kmem_free(zc
, sizeof (*zc
));
982 zap_attribute_free(za
);
987 kmem_free(zc
, sizeof (*zc
));
988 zap_attribute_free(za
);
992 for (zap_cursor_init(zc
, spa
->spa_meta_objset
, obj
);
993 zap_cursor_retrieve(zc
, za
) == 0;
994 zap_cursor_advance(zc
)) {
996 zap_cursor_t
*head_ds_cursor
;
997 zap_attribute_t
*head_ds_attr
;
999 head_ds_cursor
= kmem_zalloc(sizeof (zap_cursor_t
), KM_SLEEP
);
1000 head_ds_attr
= zap_attribute_alloc();
1002 uint64_t head_ds_err_obj
= za
->za_first_integer
;
1004 name_to_object(za
->za_name
, &head_ds
);
1005 for (zap_cursor_init(head_ds_cursor
, spa
->spa_meta_objset
,
1006 head_ds_err_obj
); zap_cursor_retrieve(head_ds_cursor
,
1007 head_ds_attr
) == 0; zap_cursor_advance(head_ds_cursor
)) {
1009 zbookmark_err_phys_t head_ds_block
;
1010 name_to_errphys(head_ds_attr
->za_name
, &head_ds_block
);
1011 int error
= process_error_block(spa
, head_ds
,
1012 &head_ds_block
, uaddr
, count
);
1015 zap_cursor_fini(head_ds_cursor
);
1016 kmem_free(head_ds_cursor
,
1017 sizeof (*head_ds_cursor
));
1018 zap_attribute_free(head_ds_attr
);
1020 zap_cursor_fini(zc
);
1021 zap_attribute_free(za
);
1022 kmem_free(zc
, sizeof (*zc
));
1026 zap_cursor_fini(head_ds_cursor
);
1027 kmem_free(head_ds_cursor
, sizeof (*head_ds_cursor
));
1028 zap_attribute_free(head_ds_attr
);
1030 zap_cursor_fini(zc
);
1031 zap_attribute_free(za
);
1032 kmem_free(zc
, sizeof (*zc
));
1037 process_error_list(spa_t
*spa
, avl_tree_t
*list
, void *uaddr
, uint64_t *count
)
1039 spa_error_entry_t
*se
;
1041 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
1042 for (se
= avl_first(list
); se
!= NULL
;
1043 se
= AVL_NEXT(list
, se
)) {
1045 copyout_entry(&se
->se_bookmark
, uaddr
, count
);
1053 for (se
= avl_first(list
); se
!= NULL
; se
= AVL_NEXT(list
, se
)) {
1054 uint64_t head_ds
= 0;
1055 int error
= get_head_ds(spa
, se
->se_bookmark
.zb_objset
,
1059 * If get_head_ds() errors out, set the head filesystem
1060 * to the filesystem stored in the bookmark of the
1064 head_ds
= se
->se_bookmark
.zb_objset
;
1066 error
= process_error_block(spa
, head_ds
,
1067 &se
->se_zep
, uaddr
, count
);
1076 * Copy all known errors to userland as an array of bookmarks. This is
1077 * actually a union of the on-disk last log and current log, as well as any
1078 * pending error requests.
1080 * Because the act of reading the on-disk log could cause errors to be
1081 * generated, we have two separate locks: one for the error log and one for the
1082 * in-core error lists. We only need the error list lock to log and error, so
1083 * we grab the error log lock while we read the on-disk logs, and only pick up
1084 * the error list lock when we are finished.
1087 spa_get_errlog(spa_t
*spa
, void *uaddr
, uint64_t *count
)
1093 * The pool config lock is needed to hold a dataset_t via (among other
1094 * places) process_error_list() -> process_error_block()->
1095 * find_top_affected_fs(), and lock ordering requires that we get it
1096 * before the spa_errlog_lock.
1098 dsl_pool_config_enter(spa
->spa_dsl_pool
, FTAG
);
1099 mutex_enter(&spa
->spa_errlog_lock
);
1101 ret
= process_error_log(spa
, spa
->spa_errlog_scrub
, uaddr
, count
);
1103 if (!ret
&& !spa
->spa_scrub_finished
)
1104 ret
= process_error_log(spa
, spa
->spa_errlog_last
, uaddr
,
1107 mutex_enter(&spa
->spa_errlist_lock
);
1109 ret
= process_error_list(spa
, &spa
->spa_errlist_scrub
, uaddr
,
1112 ret
= process_error_list(spa
, &spa
->spa_errlist_last
, uaddr
,
1114 mutex_exit(&spa
->spa_errlist_lock
);
1116 mutex_exit(&spa
->spa_errlog_lock
);
1117 dsl_pool_config_exit(spa
->spa_dsl_pool
, FTAG
);
1119 (void) spa
, (void) uaddr
, (void) count
;
1126 * Called when a scrub completes. This simply set a bit which tells which AVL
1127 * tree to add new errors. spa_errlog_sync() is responsible for actually
1128 * syncing the changes to the underlying objects.
1131 spa_errlog_rotate(spa_t
*spa
)
1133 mutex_enter(&spa
->spa_errlist_lock
);
1134 spa
->spa_scrub_finished
= B_TRUE
;
1135 mutex_exit(&spa
->spa_errlist_lock
);
1139 * Discard any pending errors from the spa_t. Called when unloading a faulted
1140 * pool, as the errors encountered during the open cannot be synced to disk.
1143 spa_errlog_drain(spa_t
*spa
)
1145 spa_error_entry_t
*se
;
1148 mutex_enter(&spa
->spa_errlist_lock
);
1151 while ((se
= avl_destroy_nodes(&spa
->spa_errlist_last
,
1153 kmem_free(se
, sizeof (spa_error_entry_t
));
1155 while ((se
= avl_destroy_nodes(&spa
->spa_errlist_scrub
,
1157 kmem_free(se
, sizeof (spa_error_entry_t
));
1159 mutex_exit(&spa
->spa_errlist_lock
);
1163 * Process a list of errors into the current on-disk log.
1166 sync_error_list(spa_t
*spa
, avl_tree_t
*t
, uint64_t *obj
, dmu_tx_t
*tx
)
1168 spa_error_entry_t
*se
;
1169 char buf
[NAME_MAX_LEN
];
1172 if (avl_numnodes(t
) == 0)
1175 /* create log if necessary */
1177 *obj
= zap_create(spa
->spa_meta_objset
, DMU_OT_ERROR_LOG
,
1178 DMU_OT_NONE
, 0, tx
);
1180 /* add errors to the current log */
1181 if (!spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
1182 for (se
= avl_first(t
); se
!= NULL
; se
= AVL_NEXT(t
, se
)) {
1183 bookmark_to_name(&se
->se_bookmark
, buf
, sizeof (buf
));
1185 const char *name
= se
->se_name
? se
->se_name
: "";
1186 (void) zap_update(spa
->spa_meta_objset
, *obj
, buf
, 1,
1187 strlen(name
) + 1, name
, tx
);
1190 for (se
= avl_first(t
); se
!= NULL
; se
= AVL_NEXT(t
, se
)) {
1191 zbookmark_err_phys_t zep
;
1192 zep
.zb_object
= se
->se_zep
.zb_object
;
1193 zep
.zb_level
= se
->se_zep
.zb_level
;
1194 zep
.zb_blkid
= se
->se_zep
.zb_blkid
;
1195 zep
.zb_birth
= se
->se_zep
.zb_birth
;
1197 uint64_t head_ds
= 0;
1198 int error
= get_head_ds(spa
, se
->se_bookmark
.zb_objset
,
1202 * If get_head_ds() errors out, set the head filesystem
1203 * to the filesystem stored in the bookmark of the
1207 head_ds
= se
->se_bookmark
.zb_objset
;
1210 error
= zap_lookup_int_key(spa
->spa_meta_objset
,
1211 *obj
, head_ds
, &err_obj
);
1213 if (error
== ENOENT
) {
1214 err_obj
= zap_create(spa
->spa_meta_objset
,
1215 DMU_OT_ERROR_LOG
, DMU_OT_NONE
, 0, tx
);
1217 (void) zap_update_int_key(spa
->spa_meta_objset
,
1218 *obj
, head_ds
, err_obj
, tx
);
1220 errphys_to_name(&zep
, buf
, sizeof (buf
));
1222 const char *name
= se
->se_name
? se
->se_name
: "";
1223 (void) zap_update(spa
->spa_meta_objset
,
1224 err_obj
, buf
, 1, strlen(name
) + 1, name
, tx
);
1227 /* purge the error list */
1229 while ((se
= avl_destroy_nodes(t
, &cookie
)) != NULL
)
1230 kmem_free(se
, sizeof (spa_error_entry_t
));
1234 delete_errlog(spa_t
*spa
, uint64_t spa_err_obj
, dmu_tx_t
*tx
)
1236 if (spa_feature_is_enabled(spa
, SPA_FEATURE_HEAD_ERRLOG
)) {
1238 zap_attribute_t
*za
= zap_attribute_alloc();
1239 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
, spa_err_obj
);
1240 zap_cursor_retrieve(&zc
, za
) == 0;
1241 zap_cursor_advance(&zc
)) {
1242 VERIFY0(dmu_object_free(spa
->spa_meta_objset
,
1243 za
->za_first_integer
, tx
));
1245 zap_cursor_fini(&zc
);
1246 zap_attribute_free(za
);
1248 VERIFY0(dmu_object_free(spa
->spa_meta_objset
, spa_err_obj
, tx
));
1252 * Sync the error log out to disk. This is a little tricky because the act of
1253 * writing the error log requires the spa_errlist_lock. So, we need to lock the
1254 * error lists, take a copy of the lists, and then reinitialize them. Then, we
1255 * drop the error list lock and take the error log lock, at which point we
1256 * do the errlog processing. Then, if we encounter an I/O error during this
1257 * process, we can successfully add the error to the list. Note that this will
1258 * result in the perpetual recycling of errors, but it is an unlikely situation
1259 * and not a performance critical operation.
1262 spa_errlog_sync(spa_t
*spa
, uint64_t txg
)
1265 avl_tree_t scrub
, last
;
1268 mutex_enter(&spa
->spa_errlist_lock
);
1271 * Bail out early under normal circumstances.
1273 if (avl_numnodes(&spa
->spa_errlist_scrub
) == 0 &&
1274 avl_numnodes(&spa
->spa_errlist_last
) == 0 &&
1275 avl_numnodes(&spa
->spa_errlist_healed
) == 0 &&
1276 !spa
->spa_scrub_finished
) {
1277 mutex_exit(&spa
->spa_errlist_lock
);
1281 spa_get_errlists(spa
, &last
, &scrub
);
1282 scrub_finished
= spa
->spa_scrub_finished
;
1283 spa
->spa_scrub_finished
= B_FALSE
;
1285 mutex_exit(&spa
->spa_errlist_lock
);
1288 * The pool config lock is needed to hold a dataset_t via
1289 * sync_error_list() -> get_head_ds(), and lock ordering
1290 * requires that we get it before the spa_errlog_lock.
1292 dsl_pool_config_enter(spa
->spa_dsl_pool
, FTAG
);
1293 mutex_enter(&spa
->spa_errlog_lock
);
1295 tx
= dmu_tx_create_assigned(spa
->spa_dsl_pool
, txg
);
1298 * Remove healed errors from errors.
1300 spa_remove_healed_errors(spa
, &last
, &scrub
, tx
);
1303 * Sync out the current list of errors.
1305 sync_error_list(spa
, &last
, &spa
->spa_errlog_last
, tx
);
1308 * Rotate the log if necessary.
1310 if (scrub_finished
) {
1311 if (spa
->spa_errlog_last
!= 0)
1312 delete_errlog(spa
, spa
->spa_errlog_last
, tx
);
1313 spa
->spa_errlog_last
= spa
->spa_errlog_scrub
;
1314 spa
->spa_errlog_scrub
= 0;
1316 sync_error_list(spa
, &scrub
, &spa
->spa_errlog_last
, tx
);
1320 * Sync out any pending scrub errors.
1322 sync_error_list(spa
, &scrub
, &spa
->spa_errlog_scrub
, tx
);
1325 * Update the MOS to reflect the new values.
1327 (void) zap_update(spa
->spa_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
1328 DMU_POOL_ERRLOG_LAST
, sizeof (uint64_t), 1,
1329 &spa
->spa_errlog_last
, tx
);
1330 (void) zap_update(spa
->spa_meta_objset
, DMU_POOL_DIRECTORY_OBJECT
,
1331 DMU_POOL_ERRLOG_SCRUB
, sizeof (uint64_t), 1,
1332 &spa
->spa_errlog_scrub
, tx
);
1336 mutex_exit(&spa
->spa_errlog_lock
);
1337 dsl_pool_config_exit(spa
->spa_dsl_pool
, FTAG
);
1341 delete_dataset_errlog(spa_t
*spa
, uint64_t spa_err_obj
, uint64_t ds
,
1344 if (spa_err_obj
== 0)
1348 zap_attribute_t
*za
= zap_attribute_alloc();
1349 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
, spa_err_obj
);
1350 zap_cursor_retrieve(&zc
, za
) == 0; zap_cursor_advance(&zc
)) {
1352 name_to_object(za
->za_name
, &head_ds
);
1353 if (head_ds
== ds
) {
1354 (void) zap_remove(spa
->spa_meta_objset
, spa_err_obj
,
1356 VERIFY0(dmu_object_free(spa
->spa_meta_objset
,
1357 za
->za_first_integer
, tx
));
1361 zap_cursor_fini(&zc
);
1362 zap_attribute_free(za
);
1366 spa_delete_dataset_errlog(spa_t
*spa
, uint64_t ds
, dmu_tx_t
*tx
)
1368 mutex_enter(&spa
->spa_errlog_lock
);
1369 delete_dataset_errlog(spa
, spa
->spa_errlog_scrub
, ds
, tx
);
1370 delete_dataset_errlog(spa
, spa
->spa_errlog_last
, ds
, tx
);
1371 mutex_exit(&spa
->spa_errlog_lock
);
1375 find_txg_ancestor_snapshot(spa_t
*spa
, uint64_t new_head
, uint64_t old_head
,
1379 dsl_pool_t
*dp
= spa
->spa_dsl_pool
;
1381 int error
= dsl_dataset_hold_obj_flags(dp
, old_head
,
1382 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
);
1386 uint64_t prev_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1387 uint64_t prev_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1389 while (prev_obj
!= 0) {
1390 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
1391 if ((error
= dsl_dataset_hold_obj_flags(dp
, prev_obj
,
1392 DS_HOLD_FLAG_DECRYPT
, FTAG
, &ds
)) == 0 &&
1393 dsl_dir_phys(ds
->ds_dir
)->dd_head_dataset_obj
== new_head
)
1399 prev_obj_txg
= dsl_dataset_phys(ds
)->ds_prev_snap_txg
;
1400 prev_obj
= dsl_dataset_phys(ds
)->ds_prev_snap_obj
;
1402 dsl_dataset_rele_flags(ds
, DS_HOLD_FLAG_DECRYPT
, FTAG
);
1403 ASSERT(prev_obj
!= 0);
1404 *txg
= prev_obj_txg
;
1409 swap_errlog(spa_t
*spa
, uint64_t spa_err_obj
, uint64_t new_head
, uint64_t
1410 old_head
, dmu_tx_t
*tx
)
1412 if (spa_err_obj
== 0)
1415 uint64_t old_head_errlog
;
1416 int error
= zap_lookup_int_key(spa
->spa_meta_objset
, spa_err_obj
,
1417 old_head
, &old_head_errlog
);
1419 /* If no error log, then there is nothing to do. */
1424 error
= find_txg_ancestor_snapshot(spa
, new_head
, old_head
, &txg
);
1429 * Create an error log if the file system being promoted does not
1432 uint64_t new_head_errlog
;
1433 error
= zap_lookup_int_key(spa
->spa_meta_objset
, spa_err_obj
, new_head
,
1437 new_head_errlog
= zap_create(spa
->spa_meta_objset
,
1438 DMU_OT_ERROR_LOG
, DMU_OT_NONE
, 0, tx
);
1440 (void) zap_update_int_key(spa
->spa_meta_objset
, spa_err_obj
,
1441 new_head
, new_head_errlog
, tx
);
1445 zap_attribute_t
*za
= zap_attribute_alloc();
1446 zbookmark_err_phys_t err_block
;
1447 for (zap_cursor_init(&zc
, spa
->spa_meta_objset
, old_head_errlog
);
1448 zap_cursor_retrieve(&zc
, za
) == 0; zap_cursor_advance(&zc
)) {
1450 const char *name
= "";
1451 name_to_errphys(za
->za_name
, &err_block
);
1452 if (err_block
.zb_birth
< txg
) {
1453 (void) zap_update(spa
->spa_meta_objset
, new_head_errlog
,
1454 za
->za_name
, 1, strlen(name
) + 1, name
, tx
);
1456 (void) zap_remove(spa
->spa_meta_objset
, old_head_errlog
,
1460 zap_cursor_fini(&zc
);
1461 zap_attribute_free(za
);
1465 spa_swap_errlog(spa_t
*spa
, uint64_t new_head_ds
, uint64_t old_head_ds
,
1468 mutex_enter(&spa
->spa_errlog_lock
);
1469 swap_errlog(spa
, spa
->spa_errlog_scrub
, new_head_ds
, old_head_ds
, tx
);
1470 swap_errlog(spa
, spa
->spa_errlog_last
, new_head_ds
, old_head_ds
, tx
);
1471 mutex_exit(&spa
->spa_errlog_lock
);
1474 #if defined(_KERNEL)
1475 /* error handling */
1476 EXPORT_SYMBOL(spa_log_error
);
1477 EXPORT_SYMBOL(spa_approx_errlog_size
);
1478 EXPORT_SYMBOL(spa_get_last_errlog_size
);
1479 EXPORT_SYMBOL(spa_get_errlog
);
1480 EXPORT_SYMBOL(spa_errlog_rotate
);
1481 EXPORT_SYMBOL(spa_errlog_drain
);
1482 EXPORT_SYMBOL(spa_errlog_sync
);
1483 EXPORT_SYMBOL(spa_get_errlists
);
1484 EXPORT_SYMBOL(spa_delete_dataset_errlog
);
1485 EXPORT_SYMBOL(spa_swap_errlog
);
1486 EXPORT_SYMBOL(sync_error_list
);
1487 EXPORT_SYMBOL(spa_upgrade_errlog
);
1488 EXPORT_SYMBOL(find_top_affected_fs
);
1489 EXPORT_SYMBOL(find_birth_txg
);
1490 EXPORT_SYMBOL(zep_to_zb
);
1491 EXPORT_SYMBOL(name_to_errphys
);
1495 ZFS_MODULE_PARAM(zfs_spa
, spa_
, upgrade_errlog_limit
, UINT
, ZMOD_RW
,
1496 "Limit the number of errors which will be upgraded to the new "
1497 "on-disk error log when enabling head_errlog");