4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016, 2017, Intel Corporation.
26 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
27 * Copyright (c) 2023, Klara Inc.
31 * ZFS syseventd module.
33 * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
35 * The purpose of this module is to identify when devices are added to the
36 * system, and appropriately online or replace the affected vdevs.
38 * When a device is added to the system:
40 * 1. Search for any vdevs whose devid matches that of the newly added
43 * 2. If no vdevs are found, then search for any vdevs whose udev path
44 * matches that of the new device.
46 * 3. If no vdevs match by either method, then ignore the event.
48 * 4. Attempt to online the device with a flag to indicate that it should
49 * be unspared when resilvering completes. If this succeeds, then the
50 * same device was inserted and we should continue normally.
52 * 5. If the pool does not have the 'autoreplace' property set, attempt to
53 * online the device again without the unspare flag, which will
54 * generate a FMA fault.
56 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
57 * is a whole disk, then label the new disk and attempt a 'zpool
60 * The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
61 * event indicates that a device failed to open during pool load, but the
62 * autoreplace property was set. In this case, we deferred the associated
63 * FMA fault until our module had a chance to process the autoreplace logic.
64 * If the device could not be replaced, then the second online attempt will
65 * trigger the FMA fault that we skipped earlier.
67 * On Linux udev provides a disk insert for both the disk and the partition.
72 #include <libnvpair.h>
81 #include <sys/sunddi.h>
82 #include <sys/sysevent/eventdefs.h>
83 #include <sys/sysevent/dev.h>
84 #include <thread_pool.h>
88 #include "zfs_agents.h"
89 #include "../zed_log.h"
91 #define DEV_BYID_PATH "/dev/disk/by-id/"
92 #define DEV_BYPATH_PATH "/dev/disk/by-path/"
93 #define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
95 typedef void (*zfs_process_func_t
)(zpool_handle_t
*, nvlist_t
*, boolean_t
);
97 libzfs_handle_t
*g_zfshdl
;
98 list_t g_pool_list
; /* list of unavailable pools at initialization */
99 list_t g_device_list
; /* list of disks with asynchronous label request */
101 boolean_t g_enumeration_done
;
102 pthread_t g_zfs_tid
; /* zfs_enum_pools() thread */
104 typedef struct unavailpool
{
105 zpool_handle_t
*uap_zhp
;
106 list_node_t uap_node
;
109 typedef struct pendingdev
{
110 char pd_physpath
[128];
115 zfs_toplevel_state(zpool_handle_t
*zhp
)
121 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
122 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
123 verify(nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_VDEV_STATS
,
124 (uint64_t **)&vs
, &c
) == 0);
125 return (vs
->vs_state
);
129 zfs_unavail_pool(zpool_handle_t
*zhp
, void *data
)
131 zed_log_msg(LOG_INFO
, "zfs_unavail_pool: examining '%s' (state %d)",
132 zpool_get_name(zhp
), (int)zfs_toplevel_state(zhp
));
134 if (zfs_toplevel_state(zhp
) < VDEV_STATE_DEGRADED
) {
136 uap
= malloc(sizeof (unavailpool_t
));
143 list_insert_tail((list_t
*)data
, uap
);
151 * Write an array of strings to the zed log
153 static void lines_to_zed_log_msg(char **lines
, int lines_cnt
)
156 for (i
= 0; i
< lines_cnt
; i
++) {
157 zed_log_msg(LOG_INFO
, "%s", lines
[i
]);
162 * Two stage replace on Linux
163 * since we get disk notifications
164 * we can wait for partitioned disk slice to show up!
166 * First stage tags the disk, initiates async partitioning, and returns
167 * Second stage finds the tag and proceeds to ZFS labeling/replace
169 * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
171 * 1. physical match with no fs, no partition
172 * tag it top, partition disk
174 * 2. physical match again, see partition and tag
179 * The device associated with the given vdev (either by devid or physical path)
180 * has been added to the system. If 'isdisk' is set, then we only attempt a
181 * replacement if it's a whole disk. This also implies that we should label the
184 * First, we attempt to online the device (making sure to undo any spare
185 * operation when finished). If this succeeds, then we're done. If it fails,
186 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
187 * but that the label was not what we expected. If the 'autoreplace' property
188 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
189 * replace'. If the online is successful, but the new state is something else
190 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
191 * race, and we should avoid attempting to relabel the disk.
193 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
196 zfs_process_add(zpool_handle_t
*zhp
, nvlist_t
*vdev
, boolean_t labeled
)
199 vdev_state_t newstate
;
200 nvlist_t
*nvroot
, *newvd
;
201 pendingdev_t
*device
;
202 uint64_t wholedisk
= 0ULL;
203 uint64_t offline
= 0ULL, faulted
= 0ULL;
204 uint64_t guid
= 0ULL;
205 uint64_t is_spare
= 0;
206 const char *physpath
= NULL
, *new_devid
= NULL
, *enc_sysfs_path
= NULL
;
207 char rawpath
[PATH_MAX
], fullpath
[PATH_MAX
];
208 char pathbuf
[PATH_MAX
];
210 int online_flag
= ZFS_ONLINE_CHECKREMOVE
| ZFS_ONLINE_UNSPARE
;
211 boolean_t is_sd
= B_FALSE
;
212 boolean_t is_mpath_wholedisk
= B_FALSE
;
219 * Get the persistent path, typically under the '/dev/disk/by-id' or
220 * '/dev/disk/by-vdev' directories. Note that this path can change
221 * when a vdev is replaced with a new disk.
223 if (nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PATH
, &path
) != 0)
226 /* Skip healthy disks */
227 verify(nvlist_lookup_uint64_array(vdev
, ZPOOL_CONFIG_VDEV_STATS
,
228 (uint64_t **)&vs
, &c
) == 0);
229 if (vs
->vs_state
== VDEV_STATE_HEALTHY
) {
230 zed_log_msg(LOG_INFO
, "%s: %s is already healthy, skip it.",
235 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PHYS_PATH
, &physpath
);
237 update_vdev_config_dev_sysfs_path(vdev
, path
,
238 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
);
239 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
,
242 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
243 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_OFFLINE
, &offline
);
244 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_FAULTED
, &faulted
);
246 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_GUID
, &guid
);
247 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_IS_SPARE
, &is_spare
);
252 * We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
253 * entry in their config. For example, on this force-faulted disk:
258 * guid: 14309659774640089719
259 * path: '/dev/disk/by-vdev/L28'
263 * com.delphix:vdev_zap_leaf: 1161
265 * aux_state: 'external'
269 * guid: 16002508084177980912
270 * path: '/dev/disk/by-vdev/L29'
271 * devid: 'dm-uuid-mpath-35000c500a61d68a3'
273 * vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
277 * com.delphix:vdev_zap_leaf: 131
279 * If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
280 * the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
282 if (physpath
== NULL
&& path
!= NULL
) {
283 /* If path begins with "/dev/disk/by-vdev/" ... */
284 if (strncmp(path
, DEV_BYVDEV_PATH
,
285 strlen(DEV_BYVDEV_PATH
)) == 0) {
286 /* Set physpath to the char after "/dev/disk/by-vdev" */
287 physpath
= &path
[strlen(DEV_BYVDEV_PATH
)];
292 * We don't want to autoreplace offlined disks. However, we do want to
293 * replace force-faulted disks (`zpool offline -f`). Force-faulted
294 * disks have both offline=1 and faulted=1 in the nvlist.
296 if (offline
&& !faulted
) {
297 zed_log_msg(LOG_INFO
, "%s: %s is offline, skip autoreplace",
302 is_mpath_wholedisk
= is_mpath_whole_disk(path
);
303 zed_log_msg(LOG_INFO
, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
304 " %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
306 zpool_get_name(zhp
), path
,
307 physpath
? physpath
: "NULL",
308 wholedisk
? "is" : "not",
309 is_mpath_wholedisk
? "is" : "not",
310 labeled
? "is" : "not",
312 (long long unsigned int)guid
);
315 * The VDEV guid is preferred for identification (gets passed in path)
318 (void) snprintf(fullpath
, sizeof (fullpath
), "%llu",
319 (long long unsigned int)guid
);
322 * otherwise use path sans partition suffix for whole disks
324 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
326 char *spath
= zfs_strip_partition(fullpath
);
328 zed_log_msg(LOG_INFO
, "%s: Can't alloc",
333 (void) strlcpy(fullpath
, spath
, sizeof (fullpath
));
339 online_flag
|= ZFS_ONLINE_SPARE
;
342 * Attempt to online the device.
344 if (zpool_vdev_online(zhp
, fullpath
, online_flag
, &newstate
) == 0 &&
345 (newstate
== VDEV_STATE_HEALTHY
||
346 newstate
== VDEV_STATE_DEGRADED
)) {
347 zed_log_msg(LOG_INFO
,
348 " zpool_vdev_online: vdev '%s' ('%s') is "
349 "%s", fullpath
, physpath
, (newstate
== VDEV_STATE_HEALTHY
) ?
350 "HEALTHY" : "DEGRADED");
355 * vdev_id alias rule for using scsi_debug devices (FMA automated
358 if (physpath
!= NULL
&& strcmp("scsidebug", physpath
) == 0)
362 * If the pool doesn't have the autoreplace property set, then use
363 * vdev online to trigger a FMA fault by posting an ereport.
365 if (!zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOREPLACE
, NULL
) ||
366 !(wholedisk
|| is_mpath_wholedisk
) || (physpath
== NULL
)) {
367 (void) zpool_vdev_online(zhp
, fullpath
, ZFS_ONLINE_FORCEFAULT
,
369 zed_log_msg(LOG_INFO
, "Pool's autoreplace is not enabled or "
370 "not a blank disk for '%s' ('%s')", fullpath
,
376 * Convert physical path into its current device node. Rawpath
377 * needs to be /dev/disk/by-vdev for a scsi_debug device since
378 * /dev/disk/by-path will not be present.
380 (void) snprintf(rawpath
, sizeof (rawpath
), "%s%s",
381 is_sd
? DEV_BYVDEV_PATH
: DEV_BYPATH_PATH
, physpath
);
383 if (realpath(rawpath
, pathbuf
) == NULL
&& !is_mpath_wholedisk
) {
384 zed_log_msg(LOG_INFO
, " realpath: %s failed (%s)",
385 rawpath
, strerror(errno
));
387 int err
= zpool_vdev_online(zhp
, fullpath
,
388 ZFS_ONLINE_FORCEFAULT
, &newstate
);
390 zed_log_msg(LOG_INFO
, " zpool_vdev_online: %s FORCEFAULT (%s) "
391 "err %d, new state %d",
392 fullpath
, libzfs_error_description(g_zfshdl
), err
,
393 err
? (int)newstate
: 0);
397 /* Only autoreplace bad disks */
398 if ((vs
->vs_state
!= VDEV_STATE_DEGRADED
) &&
399 (vs
->vs_state
!= VDEV_STATE_FAULTED
) &&
400 (vs
->vs_state
!= VDEV_STATE_REMOVED
) &&
401 (vs
->vs_state
!= VDEV_STATE_CANT_OPEN
)) {
402 zed_log_msg(LOG_INFO
, " not autoreplacing since disk isn't in "
403 "a bad state (currently %llu)", vs
->vs_state
);
407 nvlist_lookup_string(vdev
, "new_devid", &new_devid
);
409 if (is_mpath_wholedisk
) {
410 /* Don't label device mapper or multipath disks. */
411 zed_log_msg(LOG_INFO
,
412 " it's a multipath wholedisk, don't label");
413 if (zpool_prepare_disk(zhp
, vdev
, "autoreplace", &lines
,
415 zed_log_msg(LOG_INFO
,
416 " zpool_prepare_disk: could not "
417 "prepare '%s' (%s)", fullpath
,
418 libzfs_error_description(g_zfshdl
));
420 zed_log_msg(LOG_INFO
,
421 " zfs_prepare_disk output:");
422 lines_to_zed_log_msg(lines
, lines_cnt
);
424 libzfs_free_str_array(lines
, lines_cnt
);
427 } else if (!labeled
) {
429 * we're auto-replacing a raw disk, so label it first
434 * If this is a request to label a whole disk, then attempt to
435 * write out the label. Before we can label the disk, we need
436 * to map the physical string that was matched on to the under
439 * If any part of this process fails, then do a force online
440 * to trigger a ZFS fault for the device (and any hot spare
443 leafname
= strrchr(pathbuf
, '/') + 1;
446 * If this is a request to label a whole disk, then attempt to
447 * write out the label.
449 if (zpool_prepare_and_label_disk(g_zfshdl
, zhp
, leafname
,
450 vdev
, "autoreplace", &lines
, &lines_cnt
) != 0) {
451 zed_log_msg(LOG_WARNING
,
452 " zpool_prepare_and_label_disk: could not "
453 "label '%s' (%s)", leafname
,
454 libzfs_error_description(g_zfshdl
));
456 zed_log_msg(LOG_INFO
,
457 " zfs_prepare_disk output:");
458 lines_to_zed_log_msg(lines
, lines_cnt
);
460 libzfs_free_str_array(lines
, lines_cnt
);
462 (void) zpool_vdev_online(zhp
, fullpath
,
463 ZFS_ONLINE_FORCEFAULT
, &newstate
);
468 * The disk labeling is asynchronous on Linux. Just record
469 * this label request and return as there will be another
470 * disk add event for the partition after the labeling is
473 device
= malloc(sizeof (pendingdev_t
));
474 if (device
== NULL
) {
479 (void) strlcpy(device
->pd_physpath
, physpath
,
480 sizeof (device
->pd_physpath
));
481 list_insert_tail(&g_device_list
, device
);
483 zed_log_msg(LOG_NOTICE
, " zpool_label_disk: async '%s' (%llu)",
484 leafname
, (u_longlong_t
)guid
);
486 return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
488 } else /* labeled */ {
489 boolean_t found
= B_FALSE
;
491 * match up with request above to label the disk
493 for (device
= list_head(&g_device_list
); device
!= NULL
;
494 device
= list_next(&g_device_list
, device
)) {
495 if (strcmp(physpath
, device
->pd_physpath
) == 0) {
496 list_remove(&g_device_list
, device
);
501 zed_log_msg(LOG_INFO
, "zpool_label_disk: %s != %s",
502 physpath
, device
->pd_physpath
);
505 /* unexpected partition slice encountered */
506 zed_log_msg(LOG_WARNING
, "labeled disk %s was "
507 "unexpected here", fullpath
);
508 (void) zpool_vdev_online(zhp
, fullpath
,
509 ZFS_ONLINE_FORCEFAULT
, &newstate
);
513 zed_log_msg(LOG_INFO
, " zpool_label_disk: resume '%s' (%llu)",
514 physpath
, (u_longlong_t
)guid
);
517 * Paths that begin with '/dev/disk/by-id/' will change and so
518 * they must be updated before calling zpool_vdev_attach().
520 if (strncmp(path
, DEV_BYID_PATH
, strlen(DEV_BYID_PATH
)) == 0) {
521 (void) snprintf(pathbuf
, sizeof (pathbuf
), "%s%s",
522 DEV_BYID_PATH
, new_devid
);
523 zed_log_msg(LOG_INFO
, " zpool_label_disk: path '%s' "
524 "replaced by '%s'", path
, pathbuf
);
529 libzfs_free_str_array(lines
, lines_cnt
);
532 * Construct the root vdev to pass to zpool_vdev_attach(). While adding
533 * the entire vdev structure is harmless, we construct a reduced set of
534 * path/physpath/wholedisk to keep it simple.
536 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0) {
537 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
540 if (nvlist_alloc(&newvd
, NV_UNIQUE_NAME
, 0) != 0) {
541 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
546 if (nvlist_add_string(newvd
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_DISK
) != 0 ||
547 nvlist_add_string(newvd
, ZPOOL_CONFIG_PATH
, path
) != 0 ||
548 nvlist_add_string(newvd
, ZPOOL_CONFIG_DEVID
, new_devid
) != 0 ||
549 (physpath
!= NULL
&& nvlist_add_string(newvd
,
550 ZPOOL_CONFIG_PHYS_PATH
, physpath
) != 0) ||
551 (enc_sysfs_path
!= NULL
&& nvlist_add_string(newvd
,
552 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
, enc_sysfs_path
) != 0) ||
553 nvlist_add_uint64(newvd
, ZPOOL_CONFIG_WHOLE_DISK
, wholedisk
) != 0 ||
554 nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_ROOT
) != 0 ||
555 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
556 (const nvlist_t
**)&newvd
, 1) != 0) {
557 zed_log_msg(LOG_WARNING
, "zfs_mod: unable to add nvlist pairs");
566 * Wait for udev to verify the links exist, then auto-replace
567 * the leaf disk at same physical location.
569 if (zpool_label_disk_wait(path
, DISK_LABEL_WAIT
) != 0) {
570 zed_log_msg(LOG_WARNING
, "zfs_mod: pool '%s', after labeling "
571 "replacement disk, the expected disk partition link '%s' "
572 "is missing after waiting %u ms",
573 zpool_get_name(zhp
), path
, DISK_LABEL_WAIT
);
579 * Prefer sequential resilvering when supported (mirrors and dRAID),
580 * otherwise fallback to a traditional healing resilver.
582 ret
= zpool_vdev_attach(zhp
, fullpath
, path
, nvroot
, B_TRUE
, B_TRUE
);
584 ret
= zpool_vdev_attach(zhp
, fullpath
, path
, nvroot
,
588 zed_log_msg(LOG_WARNING
, " zpool_vdev_replace: %s with %s (%s)",
589 fullpath
, path
, (ret
== 0) ? "no errors" :
590 libzfs_error_description(g_zfshdl
));
596 * Utility functions to find a vdev matching given criteria.
598 typedef struct dev_data
{
599 const char *dd_compare
;
601 zfs_process_func_t dd_func
;
603 boolean_t dd_islabeled
;
604 uint64_t dd_pool_guid
;
605 uint64_t dd_vdev_guid
;
606 uint64_t dd_new_vdev_guid
;
607 const char *dd_new_devid
;
608 uint64_t dd_num_spares
;
612 zfs_iter_vdev(zpool_handle_t
*zhp
, nvlist_t
*nvl
, void *data
)
614 dev_data_t
*dp
= data
;
615 const char *path
= NULL
;
619 uint64_t isspare
= 0;
622 * First iterate over any children.
624 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_CHILDREN
,
625 &child
, &children
) == 0) {
626 for (c
= 0; c
< children
; c
++)
627 zfs_iter_vdev(zhp
, child
[c
], data
);
631 * Iterate over any spares and cache devices
633 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_SPARES
,
634 &child
, &children
) == 0) {
635 for (c
= 0; c
< children
; c
++)
636 zfs_iter_vdev(zhp
, child
[c
], data
);
638 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_L2CACHE
,
639 &child
, &children
) == 0) {
640 for (c
= 0; c
< children
; c
++)
641 zfs_iter_vdev(zhp
, child
[c
], data
);
644 /* once a vdev was matched and processed there is nothing left to do */
645 if (dp
->dd_found
&& dp
->dd_num_spares
== 0)
647 (void) nvlist_lookup_uint64(nvl
, ZPOOL_CONFIG_GUID
, &guid
);
650 * Match by GUID if available otherwise fallback to devid or physical
652 if (dp
->dd_vdev_guid
!= 0) {
653 if (guid
!= dp
->dd_vdev_guid
)
655 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched on %llu", guid
);
656 dp
->dd_found
= B_TRUE
;
658 } else if (dp
->dd_compare
!= NULL
) {
660 * NOTE: On Linux there is an event for partition, so unlike
661 * illumos, substring matching is not required to accommodate
662 * the partition suffix. An exact match will be present in
663 * the dp->dd_compare value.
664 * If the attached disk already contains a vdev GUID, it means
665 * the disk is not clean. In such a scenario, the physical path
666 * would be a match that makes the disk faulted when trying to
667 * online it. So, we would only want to proceed if either GUID
668 * matches with the last attached disk or the disk is in clean
671 if (nvlist_lookup_string(nvl
, dp
->dd_prop
, &path
) != 0 ||
672 strcmp(dp
->dd_compare
, path
) != 0) {
675 if (dp
->dd_new_vdev_guid
!= 0 && dp
->dd_new_vdev_guid
!= guid
) {
676 zed_log_msg(LOG_INFO
, " %s: no match (GUID:%llu"
677 " != vdev GUID:%llu)", __func__
,
678 dp
->dd_new_vdev_guid
, guid
);
682 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched %s on %s",
684 dp
->dd_found
= B_TRUE
;
686 /* pass the new devid for use by auto-replacing code */
687 if (dp
->dd_new_devid
!= NULL
) {
688 (void) nvlist_add_string(nvl
, "new_devid",
693 if (dp
->dd_found
== B_TRUE
&& nvlist_lookup_uint64(nvl
,
694 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
697 (dp
->dd_func
)(zhp
, nvl
, dp
->dd_islabeled
);
701 zfs_enable_ds(void *arg
)
703 unavailpool_t
*pool
= (unavailpool_t
*)arg
;
705 (void) zpool_enable_datasets(pool
->uap_zhp
, NULL
, 0, 512);
706 zpool_close(pool
->uap_zhp
);
711 zfs_iter_pool(zpool_handle_t
*zhp
, void *data
)
713 nvlist_t
*config
, *nvl
;
714 dev_data_t
*dp
= data
;
718 zed_log_msg(LOG_INFO
, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
719 zpool_get_name(zhp
), dp
->dd_vdev_guid
? "GUID" : dp
->dd_prop
);
722 * For each vdev in this pool, look for a match to apply dd_func
724 if ((config
= zpool_get_config(zhp
, NULL
)) != NULL
) {
725 if (dp
->dd_pool_guid
== 0 ||
726 (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
727 &pool_guid
) == 0 && pool_guid
== dp
->dd_pool_guid
)) {
728 (void) nvlist_lookup_nvlist(config
,
729 ZPOOL_CONFIG_VDEV_TREE
, &nvl
);
730 zfs_iter_vdev(zhp
, nvl
, data
);
733 zed_log_msg(LOG_INFO
, "%s: no config\n", __func__
);
737 * if this pool was originally unavailable,
738 * then enable its datasets asynchronously
740 if (g_enumeration_done
) {
741 for (pool
= list_head(&g_pool_list
); pool
!= NULL
;
742 pool
= list_next(&g_pool_list
, pool
)) {
744 if (strcmp(zpool_get_name(zhp
),
745 zpool_get_name(pool
->uap_zhp
)))
747 if (zfs_toplevel_state(zhp
) >= VDEV_STATE_DEGRADED
) {
748 list_remove(&g_pool_list
, pool
);
749 (void) tpool_dispatch(g_tpool
, zfs_enable_ds
,
758 /* cease iteration after a match */
759 return (dp
->dd_found
&& dp
->dd_num_spares
== 0);
763 * Given a physical device location, iterate over all
764 * (pool, vdev) pairs which correspond to that location.
767 devphys_iter(const char *physical
, const char *devid
, zfs_process_func_t func
,
768 boolean_t is_slice
, uint64_t new_vdev_guid
)
770 dev_data_t data
= { 0 };
772 data
.dd_compare
= physical
;
774 data
.dd_prop
= ZPOOL_CONFIG_PHYS_PATH
;
775 data
.dd_found
= B_FALSE
;
776 data
.dd_islabeled
= is_slice
;
777 data
.dd_new_devid
= devid
; /* used by auto replace code */
778 data
.dd_new_vdev_guid
= new_vdev_guid
;
780 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
782 return (data
.dd_found
);
786 * Given a device identifier, find any vdevs with a matching by-vdev
787 * path. Normally we shouldn't need this as the comparison would be
788 * made earlier in the devphys_iter(). For example, if we were replacing
789 * /dev/disk/by-vdev/L28, normally devphys_iter() would match the
790 * ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
791 * of the new disk config. However, we've seen cases where
792 * ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's
793 * an example of a real 2-disk mirror pool where one disk was force
796 * com.delphix:vdev_zap_top: 129
800 * guid: 14309659774640089719
801 * path: '/dev/disk/by-vdev/L28'
805 * com.delphix:vdev_zap_leaf: 1161
807 * aux_state: 'external'
811 * guid: 16002508084177980912
812 * path: '/dev/disk/by-vdev/L29'
813 * devid: 'dm-uuid-mpath-35000c500a61d68a3'
815 * vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
819 * com.delphix:vdev_zap_leaf: 131
821 * So in the case above, the only thing we could compare is the path.
823 * We can do this because we assume by-vdev paths are authoritative as physical
824 * paths. We could not assume this for normal paths like /dev/sda since the
825 * physical location /dev/sda points to could change over time.
828 by_vdev_path_iter(const char *by_vdev_path
, const char *devid
,
829 zfs_process_func_t func
, boolean_t is_slice
)
831 dev_data_t data
= { 0 };
833 data
.dd_compare
= by_vdev_path
;
835 data
.dd_prop
= ZPOOL_CONFIG_PATH
;
836 data
.dd_found
= B_FALSE
;
837 data
.dd_islabeled
= is_slice
;
838 data
.dd_new_devid
= devid
;
840 if (strncmp(by_vdev_path
, DEV_BYVDEV_PATH
,
841 strlen(DEV_BYVDEV_PATH
)) != 0) {
842 /* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
846 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
848 return (data
.dd_found
);
852 * Given a device identifier, find any vdevs with a matching devid.
853 * On Linux we can match devid directly which is always a whole disk.
856 devid_iter(const char *devid
, zfs_process_func_t func
, boolean_t is_slice
)
858 dev_data_t data
= { 0 };
860 data
.dd_compare
= devid
;
862 data
.dd_prop
= ZPOOL_CONFIG_DEVID
;
863 data
.dd_found
= B_FALSE
;
864 data
.dd_islabeled
= is_slice
;
865 data
.dd_new_devid
= devid
;
867 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
869 return (data
.dd_found
);
873 * Given a device guid, find any vdevs with a matching guid.
876 guid_iter(uint64_t pool_guid
, uint64_t vdev_guid
, const char *devid
,
877 zfs_process_func_t func
, boolean_t is_slice
)
879 dev_data_t data
= { 0 };
882 data
.dd_found
= B_FALSE
;
883 data
.dd_pool_guid
= pool_guid
;
884 data
.dd_vdev_guid
= vdev_guid
;
885 data
.dd_islabeled
= is_slice
;
886 data
.dd_new_devid
= devid
;
888 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
890 return (data
.dd_found
);
894 * Handle a EC_DEV_ADD.ESC_DISK event.
897 * Expects: DEV_PHYS_PATH string in schema
898 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
900 * path: '/dev/dsk/c0t1d0s0' (persistent)
901 * devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
902 * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
905 * provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
906 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
908 * path: '/dev/sdc1' (not persistent)
909 * devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
910 * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
913 zfs_deliver_add(nvlist_t
*nvl
)
915 const char *devpath
= NULL
, *devid
= NULL
;
916 uint64_t pool_guid
= 0, vdev_guid
= 0;
920 * Expecting a devid string and an optional physical location and guid
922 if (nvlist_lookup_string(nvl
, DEV_IDENTIFIER
, &devid
) != 0) {
923 zed_log_msg(LOG_INFO
, "%s: no dev identifier\n", __func__
);
927 (void) nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devpath
);
928 (void) nvlist_lookup_uint64(nvl
, ZFS_EV_POOL_GUID
, &pool_guid
);
929 (void) nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
, &vdev_guid
);
931 is_slice
= (nvlist_lookup_boolean(nvl
, DEV_IS_PART
) == 0);
933 zed_log_msg(LOG_INFO
, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
934 devid
, devpath
? devpath
: "NULL", is_slice
);
937 * Iterate over all vdevs looking for a match in the following order:
938 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
939 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
940 * 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
941 * 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
942 * by-vdev paths represent physical paths).
944 if (devid_iter(devid
, zfs_process_add
, is_slice
))
946 if (devpath
!= NULL
&& devphys_iter(devpath
, devid
, zfs_process_add
,
947 is_slice
, vdev_guid
))
950 (void) guid_iter(pool_guid
, vdev_guid
, devid
, zfs_process_add
,
953 if (devpath
!= NULL
) {
954 /* Can we match a /dev/disk/by-vdev/ path? */
955 char by_vdev_path
[MAXPATHLEN
];
956 snprintf(by_vdev_path
, sizeof (by_vdev_path
),
957 "/dev/disk/by-vdev/%s", devpath
);
958 if (by_vdev_path_iter(by_vdev_path
, devid
, zfs_process_add
,
967 * Called when we receive a VDEV_CHECK event, which indicates a device could not
968 * be opened during initial pool open, but the autoreplace property was set on
969 * the pool. In this case, we treat it as if it were an add event.
972 zfs_deliver_check(nvlist_t
*nvl
)
974 dev_data_t data
= { 0 };
976 if (nvlist_lookup_uint64(nvl
, ZFS_EV_POOL_GUID
,
977 &data
.dd_pool_guid
) != 0 ||
978 nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
,
979 &data
.dd_vdev_guid
) != 0 ||
980 data
.dd_vdev_guid
== 0)
983 zed_log_msg(LOG_INFO
, "zfs_deliver_check: pool '%llu', vdev %llu",
984 data
.dd_pool_guid
, data
.dd_vdev_guid
);
986 data
.dd_func
= zfs_process_add
;
988 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
994 * Given a path to a vdev, lookup the vdev's physical size from its
997 * Returns the vdev's physical size in bytes on success, 0 on error.
1000 vdev_size_from_config(zpool_handle_t
*zhp
, const char *vdev_path
)
1002 nvlist_t
*nvl
= NULL
;
1003 boolean_t avail_spare
, l2cache
, log
;
1004 vdev_stat_t
*vs
= NULL
;
1007 nvl
= zpool_find_vdev(zhp
, vdev_path
, &avail_spare
, &l2cache
, &log
);
1011 verify(nvlist_lookup_uint64_array(nvl
, ZPOOL_CONFIG_VDEV_STATS
,
1012 (uint64_t **)&vs
, &c
) == 0);
1014 zed_log_msg(LOG_INFO
, "%s: no nvlist for '%s'", __func__
,
1019 return (vs
->vs_pspace
);
1023 * Given a path to a vdev, lookup if the vdev is a "whole disk" in the
1024 * config nvlist. "whole disk" means that ZFS was passed a whole disk
1025 * at pool creation time, which it partitioned up and has full control over.
1026 * Thus a partition with wholedisk=1 set tells us that zfs created the
1027 * partition at creation time. A partition without whole disk set would have
1028 * been created by externally (like with fdisk) and passed to ZFS.
1030 * Returns the whole disk value (either 0 or 1).
1033 vdev_whole_disk_from_config(zpool_handle_t
*zhp
, const char *vdev_path
)
1035 nvlist_t
*nvl
= NULL
;
1036 boolean_t avail_spare
, l2cache
, log
;
1037 uint64_t wholedisk
= 0;
1039 nvl
= zpool_find_vdev(zhp
, vdev_path
, &avail_spare
, &l2cache
, &log
);
1043 (void) nvlist_lookup_uint64(nvl
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
1049 * If the device size grew more than 1% then return true.
1051 #define DEVICE_GREW(oldsize, newsize) \
1052 ((newsize > oldsize) && \
1053 ((newsize / (newsize - oldsize)) <= 100))
1056 zfsdle_vdev_online(zpool_handle_t
*zhp
, void *data
)
1058 boolean_t avail_spare
, l2cache
;
1059 nvlist_t
*udev_nvl
= data
;
1063 const char *tmp_devname
;
1064 char devname
[MAXPATHLEN
] = "";
1067 if (nvlist_lookup_uint64(udev_nvl
, ZFS_EV_VDEV_GUID
, &guid
) == 0) {
1068 sprintf(devname
, "%llu", (u_longlong_t
)guid
);
1069 } else if (nvlist_lookup_string(udev_nvl
, DEV_PHYS_PATH
,
1070 &tmp_devname
) == 0) {
1071 strlcpy(devname
, tmp_devname
, MAXPATHLEN
);
1072 zfs_append_partition(devname
, MAXPATHLEN
);
1074 zed_log_msg(LOG_INFO
, "%s: no guid or physpath", __func__
);
1077 zed_log_msg(LOG_INFO
, "zfsdle_vdev_online: searching for '%s' in '%s'",
1078 devname
, zpool_get_name(zhp
));
1080 if ((tgt
= zpool_find_vdev_by_physpath(zhp
, devname
,
1081 &avail_spare
, &l2cache
, NULL
)) != NULL
) {
1083 char fullpath
[MAXPATHLEN
];
1084 uint64_t wholedisk
= 0;
1086 error
= nvlist_lookup_string(tgt
, ZPOOL_CONFIG_PATH
, &path
);
1092 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
1097 path
= strrchr(path
, '/');
1099 tmp
= zfs_strip_partition(path
+ 1);
1109 (void) strlcpy(fullpath
, tmp
, sizeof (fullpath
));
1113 * We need to reopen the pool associated with this
1114 * device so that the kernel can update the size of
1115 * the expanded device. When expanding there is no
1116 * need to restart the scrub from the beginning.
1118 boolean_t scrub_restart
= B_FALSE
;
1119 (void) zpool_reopen_one(zhp
, &scrub_restart
);
1121 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
1124 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
1125 vdev_state_t newstate
;
1127 if (zpool_get_state(zhp
) != POOL_STATE_UNAVAIL
) {
1129 * If this disk size has not changed, then
1130 * there's no need to do an autoexpand. To
1131 * check we look at the disk's size in its
1132 * config, and compare it to the disk size
1133 * that udev is reporting.
1135 uint64_t udev_size
= 0, conf_size
= 0,
1136 wholedisk
= 0, udev_parent_size
= 0;
1139 * Get the size of our disk that udev is
1142 if (nvlist_lookup_uint64(udev_nvl
, DEV_SIZE
,
1148 * Get the size of our disk's parent device
1149 * from udev (where sda1's parent is sda).
1151 if (nvlist_lookup_uint64(udev_nvl
,
1152 DEV_PARENT_SIZE
, &udev_parent_size
) != 0) {
1153 udev_parent_size
= 0;
1156 conf_size
= vdev_size_from_config(zhp
,
1159 wholedisk
= vdev_whole_disk_from_config(zhp
,
1163 * Only attempt an autoexpand if the vdev size
1164 * changed. There are two different cases
1168 * If you do a 'zpool create' on a whole disk
1169 * (like /dev/sda), then zfs will create
1170 * partitions on the disk (like /dev/sda1). In
1171 * that case, wholedisk=1 will be set in the
1172 * partition's nvlist config. So zed will need
1173 * to see if your parent device (/dev/sda)
1174 * expanded in size, and if so, then attempt
1178 * If you do a 'zpool create' on an existing
1179 * partition, or a device that doesn't allow
1180 * partitions, then wholedisk=0, and you will
1181 * simply need to check if the device itself
1184 if (DEVICE_GREW(conf_size
, udev_size
) ||
1185 (wholedisk
&& DEVICE_GREW(conf_size
,
1186 udev_parent_size
))) {
1187 error
= zpool_vdev_online(zhp
, fullpath
,
1190 zed_log_msg(LOG_INFO
,
1191 "%s: autoexpanding '%s' from %llu"
1192 " to %llu bytes in pool '%s': %d",
1193 __func__
, fullpath
, conf_size
,
1194 MAX(udev_size
, udev_parent_size
),
1195 zpool_get_name(zhp
), error
);
1207 * This function handles the ESC_DEV_DLE device change event. Use the
1208 * provided vdev guid when looking up a disk or partition, when the guid
1209 * is not present assume the entire disk is owned by ZFS and append the
1210 * expected -part1 partition information then lookup by physical path.
1213 zfs_deliver_dle(nvlist_t
*nvl
)
1215 const char *devname
;
1216 char name
[MAXPATHLEN
];
1219 if (nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
, &guid
) == 0) {
1220 sprintf(name
, "%llu", (u_longlong_t
)guid
);
1221 } else if (nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devname
) == 0) {
1222 strlcpy(name
, devname
, MAXPATHLEN
);
1223 zfs_append_partition(name
, MAXPATHLEN
);
1225 sprintf(name
, "unknown");
1226 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: no guid or physpath");
1229 if (zpool_iter(g_zfshdl
, zfsdle_vdev_online
, nvl
) != 1) {
1230 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: device '%s' not "
1239 * syseventd daemon module event handler
1241 * Handles syseventd daemon zfs device related events:
1243 * EC_DEV_ADD.ESC_DISK
1244 * EC_DEV_STATUS.ESC_DEV_DLE
1245 * EC_ZFS.ESC_ZFS_VDEV_CHECK
1247 * Note: assumes only one thread active at a time (not thread safe)
1250 zfs_slm_deliver_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
1253 boolean_t is_check
= B_FALSE
, is_dle
= B_FALSE
;
1255 if (strcmp(class, EC_DEV_ADD
) == 0) {
1257 * We're mainly interested in disk additions, but we also listen
1258 * for new loop devices, to allow for simplified testing.
1260 if (strcmp(subclass
, ESC_DISK
) != 0 &&
1261 strcmp(subclass
, ESC_LOFI
) != 0)
1265 } else if (strcmp(class, EC_ZFS
) == 0 &&
1266 strcmp(subclass
, ESC_ZFS_VDEV_CHECK
) == 0) {
1268 * This event signifies that a device failed to open
1269 * during pool load, but the 'autoreplace' property was
1270 * set, so we should pretend it's just been added.
1273 } else if (strcmp(class, EC_DEV_STATUS
) == 0 &&
1274 strcmp(subclass
, ESC_DEV_DLE
) == 0) {
1281 ret
= zfs_deliver_dle(nvl
);
1283 ret
= zfs_deliver_check(nvl
);
1285 ret
= zfs_deliver_add(nvl
);
1291 zfs_enum_pools(void *arg
)
1295 (void) zpool_iter(g_zfshdl
, zfs_unavail_pool
, (void *)&g_pool_list
);
1297 * Linux - instead of using a thread pool, each list entry
1298 * will spawn a thread when an unavailable pool transitions
1299 * to available. zfs_slm_fini will wait for these threads.
1301 g_enumeration_done
= B_TRUE
;
1306 * called from zed daemon at startup
1308 * sent messages from zevents or udev monitor
1310 * For now, each agent has its own libzfs instance
1315 if ((g_zfshdl
= libzfs_init()) == NULL
)
1319 * collect a list of unavailable pools (asynchronously,
1320 * since this can take a while)
1322 list_create(&g_pool_list
, sizeof (struct unavailpool
),
1323 offsetof(struct unavailpool
, uap_node
));
1325 if (pthread_create(&g_zfs_tid
, NULL
, zfs_enum_pools
, NULL
) != 0) {
1326 list_destroy(&g_pool_list
);
1327 libzfs_fini(g_zfshdl
);
1331 pthread_setname_np(g_zfs_tid
, "enum-pools");
1332 list_create(&g_device_list
, sizeof (struct pendingdev
),
1333 offsetof(struct pendingdev
, pd_node
));
1341 unavailpool_t
*pool
;
1342 pendingdev_t
*device
;
1344 /* wait for zfs_enum_pools thread to complete */
1345 (void) pthread_join(g_zfs_tid
, NULL
);
1346 /* destroy the thread pool */
1347 if (g_tpool
!= NULL
) {
1348 tpool_wait(g_tpool
);
1349 tpool_destroy(g_tpool
);
1352 while ((pool
= list_remove_head(&g_pool_list
)) != NULL
) {
1353 zpool_close(pool
->uap_zhp
);
1356 list_destroy(&g_pool_list
);
1358 while ((device
= list_remove_head(&g_device_list
)) != NULL
)
1360 list_destroy(&g_device_list
);
1362 libzfs_fini(g_zfshdl
);
1366 zfs_slm_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
1368 zed_log_msg(LOG_INFO
, "zfs_slm_event: %s.%s", class, subclass
);
1369 (void) zfs_slm_deliver_event(class, subclass
, nvl
);