4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012 by Delphix. All rights reserved.
24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2016, 2017, Intel Corporation.
26 * Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
30 * ZFS syseventd module.
32 * file origin: openzfs/usr/src/cmd/syseventd/modules/zfs_mod/zfs_mod.c
34 * The purpose of this module is to identify when devices are added to the
35 * system, and appropriately online or replace the affected vdevs.
37 * When a device is added to the system:
39 * 1. Search for any vdevs whose devid matches that of the newly added
42 * 2. If no vdevs are found, then search for any vdevs whose udev path
43 * matches that of the new device.
45 * 3. If no vdevs match by either method, then ignore the event.
47 * 4. Attempt to online the device with a flag to indicate that it should
48 * be unspared when resilvering completes. If this succeeds, then the
49 * same device was inserted and we should continue normally.
51 * 5. If the pool does not have the 'autoreplace' property set, attempt to
52 * online the device again without the unspare flag, which will
53 * generate a FMA fault.
55 * 6. If the pool has the 'autoreplace' property set, and the matching vdev
56 * is a whole disk, then label the new disk and attempt a 'zpool
59 * The module responds to EC_DEV_ADD events. The special ESC_ZFS_VDEV_CHECK
60 * event indicates that a device failed to open during pool load, but the
61 * autoreplace property was set. In this case, we deferred the associated
62 * FMA fault until our module had a chance to process the autoreplace logic.
63 * If the device could not be replaced, then the second online attempt will
64 * trigger the FMA fault that we skipped earlier.
66 * On Linux udev provides a disk insert for both the disk and the partition.
71 #include <libnvpair.h>
80 #include <sys/sunddi.h>
81 #include <sys/sysevent/eventdefs.h>
82 #include <sys/sysevent/dev.h>
83 #include <thread_pool.h>
87 #include "zfs_agents.h"
88 #include "../zed_log.h"
90 #define DEV_BYID_PATH "/dev/disk/by-id/"
91 #define DEV_BYPATH_PATH "/dev/disk/by-path/"
92 #define DEV_BYVDEV_PATH "/dev/disk/by-vdev/"
94 typedef void (*zfs_process_func_t
)(zpool_handle_t
*, nvlist_t
*, boolean_t
);
96 libzfs_handle_t
*g_zfshdl
;
97 list_t g_pool_list
; /* list of unavailable pools at initialization */
98 list_t g_device_list
; /* list of disks with asynchronous label request */
100 boolean_t g_enumeration_done
;
101 pthread_t g_zfs_tid
; /* zfs_enum_pools() thread */
103 typedef struct unavailpool
{
104 zpool_handle_t
*uap_zhp
;
105 list_node_t uap_node
;
108 typedef struct pendingdev
{
109 char pd_physpath
[128];
114 zfs_toplevel_state(zpool_handle_t
*zhp
)
120 verify(nvlist_lookup_nvlist(zpool_get_config(zhp
, NULL
),
121 ZPOOL_CONFIG_VDEV_TREE
, &nvroot
) == 0);
122 verify(nvlist_lookup_uint64_array(nvroot
, ZPOOL_CONFIG_VDEV_STATS
,
123 (uint64_t **)&vs
, &c
) == 0);
124 return (vs
->vs_state
);
128 zfs_unavail_pool(zpool_handle_t
*zhp
, void *data
)
130 zed_log_msg(LOG_INFO
, "zfs_unavail_pool: examining '%s' (state %d)",
131 zpool_get_name(zhp
), (int)zfs_toplevel_state(zhp
));
133 if (zfs_toplevel_state(zhp
) < VDEV_STATE_DEGRADED
) {
135 uap
= malloc(sizeof (unavailpool_t
));
142 list_insert_tail((list_t
*)data
, uap
);
150 * Two stage replace on Linux
151 * since we get disk notifications
152 * we can wait for partitioned disk slice to show up!
154 * First stage tags the disk, initiates async partitioning, and returns
155 * Second stage finds the tag and proceeds to ZFS labeling/replace
157 * disk-add --> label-disk + tag-disk --> partition-add --> zpool_vdev_attach
159 * 1. physical match with no fs, no partition
160 * tag it top, partition disk
162 * 2. physical match again, see partition and tag
167 * The device associated with the given vdev (either by devid or physical path)
168 * has been added to the system. If 'isdisk' is set, then we only attempt a
169 * replacement if it's a whole disk. This also implies that we should label the
172 * First, we attempt to online the device (making sure to undo any spare
173 * operation when finished). If this succeeds, then we're done. If it fails,
174 * and the new state is VDEV_CANT_OPEN, it indicates that the device was opened,
175 * but that the label was not what we expected. If the 'autoreplace' property
176 * is enabled, then we relabel the disk (if specified), and attempt a 'zpool
177 * replace'. If the online is successful, but the new state is something else
178 * (REMOVED or FAULTED), it indicates that we're out of sync or in some sort of
179 * race, and we should avoid attempting to relabel the disk.
181 * Also can arrive here from a ESC_ZFS_VDEV_CHECK event
184 zfs_process_add(zpool_handle_t
*zhp
, nvlist_t
*vdev
, boolean_t labeled
)
187 vdev_state_t newstate
;
188 nvlist_t
*nvroot
, *newvd
;
189 pendingdev_t
*device
;
190 uint64_t wholedisk
= 0ULL;
191 uint64_t offline
= 0ULL, faulted
= 0ULL;
192 uint64_t guid
= 0ULL;
193 uint64_t is_spare
= 0;
194 const char *physpath
= NULL
, *new_devid
= NULL
, *enc_sysfs_path
= NULL
;
195 char rawpath
[PATH_MAX
], fullpath
[PATH_MAX
];
196 char devpath
[PATH_MAX
];
198 int online_flag
= ZFS_ONLINE_CHECKREMOVE
| ZFS_ONLINE_UNSPARE
;
199 boolean_t is_sd
= B_FALSE
;
200 boolean_t is_mpath_wholedisk
= B_FALSE
;
204 if (nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PATH
, &path
) != 0)
207 /* Skip healthy disks */
208 verify(nvlist_lookup_uint64_array(vdev
, ZPOOL_CONFIG_VDEV_STATS
,
209 (uint64_t **)&vs
, &c
) == 0);
210 if (vs
->vs_state
== VDEV_STATE_HEALTHY
) {
211 zed_log_msg(LOG_INFO
, "%s: %s is already healthy, skip it.",
216 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_PHYS_PATH
, &physpath
);
217 (void) nvlist_lookup_string(vdev
, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
,
219 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
220 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_OFFLINE
, &offline
);
221 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_FAULTED
, &faulted
);
223 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_GUID
, &guid
);
224 (void) nvlist_lookup_uint64(vdev
, ZPOOL_CONFIG_IS_SPARE
, &is_spare
);
229 * We've seen times where a disk won't have a ZPOOL_CONFIG_PHYS_PATH
230 * entry in their config. For example, on this force-faulted disk:
235 * guid: 14309659774640089719
236 * path: '/dev/disk/by-vdev/L28'
240 * com.delphix:vdev_zap_leaf: 1161
242 * aux_state: 'external'
246 * guid: 16002508084177980912
247 * path: '/dev/disk/by-vdev/L29'
248 * devid: 'dm-uuid-mpath-35000c500a61d68a3'
250 * vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
254 * com.delphix:vdev_zap_leaf: 131
256 * If the disk's path is a /dev/disk/by-vdev/ path, then we can infer
257 * the ZPOOL_CONFIG_PHYS_PATH from the by-vdev disk name.
259 if (physpath
== NULL
&& path
!= NULL
) {
260 /* If path begins with "/dev/disk/by-vdev/" ... */
261 if (strncmp(path
, DEV_BYVDEV_PATH
,
262 strlen(DEV_BYVDEV_PATH
)) == 0) {
263 /* Set physpath to the char after "/dev/disk/by-vdev" */
264 physpath
= &path
[strlen(DEV_BYVDEV_PATH
)];
269 * We don't want to autoreplace offlined disks. However, we do want to
270 * replace force-faulted disks (`zpool offline -f`). Force-faulted
271 * disks have both offline=1 and faulted=1 in the nvlist.
273 if (offline
&& !faulted
) {
274 zed_log_msg(LOG_INFO
, "%s: %s is offline, skip autoreplace",
279 is_mpath_wholedisk
= is_mpath_whole_disk(path
);
280 zed_log_msg(LOG_INFO
, "zfs_process_add: pool '%s' vdev '%s', phys '%s'"
281 " %s blank disk, %s mpath blank disk, %s labeled, enc sysfs '%s', "
283 zpool_get_name(zhp
), path
,
284 physpath
? physpath
: "NULL",
285 wholedisk
? "is" : "not",
286 is_mpath_wholedisk
? "is" : "not",
287 labeled
? "is" : "not",
289 (long long unsigned int)guid
);
292 * The VDEV guid is preferred for identification (gets passed in path)
295 (void) snprintf(fullpath
, sizeof (fullpath
), "%llu",
296 (long long unsigned int)guid
);
299 * otherwise use path sans partition suffix for whole disks
301 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
303 char *spath
= zfs_strip_partition(fullpath
);
305 zed_log_msg(LOG_INFO
, "%s: Can't alloc",
310 (void) strlcpy(fullpath
, spath
, sizeof (fullpath
));
316 online_flag
|= ZFS_ONLINE_SPARE
;
319 * Attempt to online the device.
321 if (zpool_vdev_online(zhp
, fullpath
, online_flag
, &newstate
) == 0 &&
322 (newstate
== VDEV_STATE_HEALTHY
||
323 newstate
== VDEV_STATE_DEGRADED
)) {
324 zed_log_msg(LOG_INFO
,
325 " zpool_vdev_online: vdev '%s' ('%s') is "
326 "%s", fullpath
, physpath
, (newstate
== VDEV_STATE_HEALTHY
) ?
327 "HEALTHY" : "DEGRADED");
332 * vdev_id alias rule for using scsi_debug devices (FMA automated
335 if (physpath
!= NULL
&& strcmp("scsidebug", physpath
) == 0)
339 * If the pool doesn't have the autoreplace property set, then use
340 * vdev online to trigger a FMA fault by posting an ereport.
342 if (!zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOREPLACE
, NULL
) ||
343 !(wholedisk
|| is_mpath_wholedisk
) || (physpath
== NULL
)) {
344 (void) zpool_vdev_online(zhp
, fullpath
, ZFS_ONLINE_FORCEFAULT
,
346 zed_log_msg(LOG_INFO
, "Pool's autoreplace is not enabled or "
347 "not a blank disk for '%s' ('%s')", fullpath
,
353 * Convert physical path into its current device node. Rawpath
354 * needs to be /dev/disk/by-vdev for a scsi_debug device since
355 * /dev/disk/by-path will not be present.
357 (void) snprintf(rawpath
, sizeof (rawpath
), "%s%s",
358 is_sd
? DEV_BYVDEV_PATH
: DEV_BYPATH_PATH
, physpath
);
360 if (realpath(rawpath
, devpath
) == NULL
&& !is_mpath_wholedisk
) {
361 zed_log_msg(LOG_INFO
, " realpath: %s failed (%s)",
362 rawpath
, strerror(errno
));
364 (void) zpool_vdev_online(zhp
, fullpath
, ZFS_ONLINE_FORCEFAULT
,
367 zed_log_msg(LOG_INFO
, " zpool_vdev_online: %s FORCEFAULT (%s)",
368 fullpath
, libzfs_error_description(g_zfshdl
));
372 /* Only autoreplace bad disks */
373 if ((vs
->vs_state
!= VDEV_STATE_DEGRADED
) &&
374 (vs
->vs_state
!= VDEV_STATE_FAULTED
) &&
375 (vs
->vs_state
!= VDEV_STATE_CANT_OPEN
)) {
376 zed_log_msg(LOG_INFO
, " not autoreplacing since disk isn't in "
377 "a bad state (currently %llu)", vs
->vs_state
);
381 nvlist_lookup_string(vdev
, "new_devid", &new_devid
);
383 if (is_mpath_wholedisk
) {
384 /* Don't label device mapper or multipath disks. */
385 } else if (!labeled
) {
387 * we're auto-replacing a raw disk, so label it first
392 * If this is a request to label a whole disk, then attempt to
393 * write out the label. Before we can label the disk, we need
394 * to map the physical string that was matched on to the under
397 * If any part of this process fails, then do a force online
398 * to trigger a ZFS fault for the device (and any hot spare
401 leafname
= strrchr(devpath
, '/') + 1;
404 * If this is a request to label a whole disk, then attempt to
405 * write out the label.
407 if (zpool_label_disk(g_zfshdl
, zhp
, leafname
) != 0) {
408 zed_log_msg(LOG_INFO
, " zpool_label_disk: could not "
409 "label '%s' (%s)", leafname
,
410 libzfs_error_description(g_zfshdl
));
412 (void) zpool_vdev_online(zhp
, fullpath
,
413 ZFS_ONLINE_FORCEFAULT
, &newstate
);
418 * The disk labeling is asynchronous on Linux. Just record
419 * this label request and return as there will be another
420 * disk add event for the partition after the labeling is
423 device
= malloc(sizeof (pendingdev_t
));
424 if (device
== NULL
) {
429 (void) strlcpy(device
->pd_physpath
, physpath
,
430 sizeof (device
->pd_physpath
));
431 list_insert_tail(&g_device_list
, device
);
433 zed_log_msg(LOG_INFO
, " zpool_label_disk: async '%s' (%llu)",
434 leafname
, (u_longlong_t
)guid
);
436 return; /* resumes at EC_DEV_ADD.ESC_DISK for partition */
438 } else /* labeled */ {
439 boolean_t found
= B_FALSE
;
441 * match up with request above to label the disk
443 for (device
= list_head(&g_device_list
); device
!= NULL
;
444 device
= list_next(&g_device_list
, device
)) {
445 if (strcmp(physpath
, device
->pd_physpath
) == 0) {
446 list_remove(&g_device_list
, device
);
451 zed_log_msg(LOG_INFO
, "zpool_label_disk: %s != %s",
452 physpath
, device
->pd_physpath
);
455 /* unexpected partition slice encountered */
456 zed_log_msg(LOG_INFO
, "labeled disk %s unexpected here",
458 (void) zpool_vdev_online(zhp
, fullpath
,
459 ZFS_ONLINE_FORCEFAULT
, &newstate
);
463 zed_log_msg(LOG_INFO
, " zpool_label_disk: resume '%s' (%llu)",
464 physpath
, (u_longlong_t
)guid
);
466 (void) snprintf(devpath
, sizeof (devpath
), "%s%s",
467 DEV_BYID_PATH
, new_devid
);
471 * Construct the root vdev to pass to zpool_vdev_attach(). While adding
472 * the entire vdev structure is harmless, we construct a reduced set of
473 * path/physpath/wholedisk to keep it simple.
475 if (nvlist_alloc(&nvroot
, NV_UNIQUE_NAME
, 0) != 0) {
476 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
479 if (nvlist_alloc(&newvd
, NV_UNIQUE_NAME
, 0) != 0) {
480 zed_log_msg(LOG_WARNING
, "zfs_mod: nvlist_alloc out of memory");
485 if (nvlist_add_string(newvd
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_DISK
) != 0 ||
486 nvlist_add_string(newvd
, ZPOOL_CONFIG_PATH
, path
) != 0 ||
487 nvlist_add_string(newvd
, ZPOOL_CONFIG_DEVID
, new_devid
) != 0 ||
488 (physpath
!= NULL
&& nvlist_add_string(newvd
,
489 ZPOOL_CONFIG_PHYS_PATH
, physpath
) != 0) ||
490 (enc_sysfs_path
!= NULL
&& nvlist_add_string(newvd
,
491 ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH
, enc_sysfs_path
) != 0) ||
492 nvlist_add_uint64(newvd
, ZPOOL_CONFIG_WHOLE_DISK
, wholedisk
) != 0 ||
493 nvlist_add_string(nvroot
, ZPOOL_CONFIG_TYPE
, VDEV_TYPE_ROOT
) != 0 ||
494 nvlist_add_nvlist_array(nvroot
, ZPOOL_CONFIG_CHILDREN
,
495 (const nvlist_t
**)&newvd
, 1) != 0) {
496 zed_log_msg(LOG_WARNING
, "zfs_mod: unable to add nvlist pairs");
505 * Wait for udev to verify the links exist, then auto-replace
506 * the leaf disk at same physical location.
508 if (zpool_label_disk_wait(path
, 3000) != 0) {
509 zed_log_msg(LOG_WARNING
, "zfs_mod: expected replacement "
510 "disk %s is missing", path
);
516 * Prefer sequential resilvering when supported (mirrors and dRAID),
517 * otherwise fallback to a traditional healing resilver.
519 ret
= zpool_vdev_attach(zhp
, fullpath
, path
, nvroot
, B_TRUE
, B_TRUE
);
521 ret
= zpool_vdev_attach(zhp
, fullpath
, path
, nvroot
,
525 zed_log_msg(LOG_INFO
, " zpool_vdev_replace: %s with %s (%s)",
526 fullpath
, path
, (ret
== 0) ? "no errors" :
527 libzfs_error_description(g_zfshdl
));
533 * Utility functions to find a vdev matching given criteria.
535 typedef struct dev_data
{
536 const char *dd_compare
;
538 zfs_process_func_t dd_func
;
540 boolean_t dd_islabeled
;
541 uint64_t dd_pool_guid
;
542 uint64_t dd_vdev_guid
;
543 uint64_t dd_new_vdev_guid
;
544 const char *dd_new_devid
;
545 uint64_t dd_num_spares
;
549 zfs_iter_vdev(zpool_handle_t
*zhp
, nvlist_t
*nvl
, void *data
)
551 dev_data_t
*dp
= data
;
552 const char *path
= NULL
;
556 uint64_t isspare
= 0;
559 * First iterate over any children.
561 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_CHILDREN
,
562 &child
, &children
) == 0) {
563 for (c
= 0; c
< children
; c
++)
564 zfs_iter_vdev(zhp
, child
[c
], data
);
568 * Iterate over any spares and cache devices
570 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_SPARES
,
571 &child
, &children
) == 0) {
572 for (c
= 0; c
< children
; c
++)
573 zfs_iter_vdev(zhp
, child
[c
], data
);
575 if (nvlist_lookup_nvlist_array(nvl
, ZPOOL_CONFIG_L2CACHE
,
576 &child
, &children
) == 0) {
577 for (c
= 0; c
< children
; c
++)
578 zfs_iter_vdev(zhp
, child
[c
], data
);
581 /* once a vdev was matched and processed there is nothing left to do */
582 if (dp
->dd_found
&& dp
->dd_num_spares
== 0)
584 (void) nvlist_lookup_uint64(nvl
, ZPOOL_CONFIG_GUID
, &guid
);
587 * Match by GUID if available otherwise fallback to devid or physical
589 if (dp
->dd_vdev_guid
!= 0) {
590 if (guid
!= dp
->dd_vdev_guid
)
592 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched on %llu", guid
);
593 dp
->dd_found
= B_TRUE
;
595 } else if (dp
->dd_compare
!= NULL
) {
597 * NOTE: On Linux there is an event for partition, so unlike
598 * illumos, substring matching is not required to accommodate
599 * the partition suffix. An exact match will be present in
600 * the dp->dd_compare value.
601 * If the attached disk already contains a vdev GUID, it means
602 * the disk is not clean. In such a scenario, the physical path
603 * would be a match that makes the disk faulted when trying to
604 * online it. So, we would only want to proceed if either GUID
605 * matches with the last attached disk or the disk is in clean
608 if (nvlist_lookup_string(nvl
, dp
->dd_prop
, &path
) != 0 ||
609 strcmp(dp
->dd_compare
, path
) != 0) {
612 if (dp
->dd_new_vdev_guid
!= 0 && dp
->dd_new_vdev_guid
!= guid
) {
613 zed_log_msg(LOG_INFO
, " %s: no match (GUID:%llu"
614 " != vdev GUID:%llu)", __func__
,
615 dp
->dd_new_vdev_guid
, guid
);
619 zed_log_msg(LOG_INFO
, " zfs_iter_vdev: matched %s on %s",
621 dp
->dd_found
= B_TRUE
;
623 /* pass the new devid for use by replacing code */
624 if (dp
->dd_new_devid
!= NULL
) {
625 (void) nvlist_add_string(nvl
, "new_devid",
630 if (dp
->dd_found
== B_TRUE
&& nvlist_lookup_uint64(nvl
,
631 ZPOOL_CONFIG_IS_SPARE
, &isspare
) == 0 && isspare
)
634 (dp
->dd_func
)(zhp
, nvl
, dp
->dd_islabeled
);
638 zfs_enable_ds(void *arg
)
640 unavailpool_t
*pool
= (unavailpool_t
*)arg
;
642 (void) zpool_enable_datasets(pool
->uap_zhp
, NULL
, 0);
643 zpool_close(pool
->uap_zhp
);
648 zfs_iter_pool(zpool_handle_t
*zhp
, void *data
)
650 nvlist_t
*config
, *nvl
;
651 dev_data_t
*dp
= data
;
655 zed_log_msg(LOG_INFO
, "zfs_iter_pool: evaluating vdevs on %s (by %s)",
656 zpool_get_name(zhp
), dp
->dd_vdev_guid
? "GUID" : dp
->dd_prop
);
659 * For each vdev in this pool, look for a match to apply dd_func
661 if ((config
= zpool_get_config(zhp
, NULL
)) != NULL
) {
662 if (dp
->dd_pool_guid
== 0 ||
663 (nvlist_lookup_uint64(config
, ZPOOL_CONFIG_POOL_GUID
,
664 &pool_guid
) == 0 && pool_guid
== dp
->dd_pool_guid
)) {
665 (void) nvlist_lookup_nvlist(config
,
666 ZPOOL_CONFIG_VDEV_TREE
, &nvl
);
667 zfs_iter_vdev(zhp
, nvl
, data
);
670 zed_log_msg(LOG_INFO
, "%s: no config\n", __func__
);
674 * if this pool was originally unavailable,
675 * then enable its datasets asynchronously
677 if (g_enumeration_done
) {
678 for (pool
= list_head(&g_pool_list
); pool
!= NULL
;
679 pool
= list_next(&g_pool_list
, pool
)) {
681 if (strcmp(zpool_get_name(zhp
),
682 zpool_get_name(pool
->uap_zhp
)))
684 if (zfs_toplevel_state(zhp
) >= VDEV_STATE_DEGRADED
) {
685 list_remove(&g_pool_list
, pool
);
686 (void) tpool_dispatch(g_tpool
, zfs_enable_ds
,
695 /* cease iteration after a match */
696 return (dp
->dd_found
&& dp
->dd_num_spares
== 0);
700 * Given a physical device location, iterate over all
701 * (pool, vdev) pairs which correspond to that location.
704 devphys_iter(const char *physical
, const char *devid
, zfs_process_func_t func
,
705 boolean_t is_slice
, uint64_t new_vdev_guid
)
707 dev_data_t data
= { 0 };
709 data
.dd_compare
= physical
;
711 data
.dd_prop
= ZPOOL_CONFIG_PHYS_PATH
;
712 data
.dd_found
= B_FALSE
;
713 data
.dd_islabeled
= is_slice
;
714 data
.dd_new_devid
= devid
; /* used by auto replace code */
715 data
.dd_new_vdev_guid
= new_vdev_guid
;
717 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
719 return (data
.dd_found
);
723 * Given a device identifier, find any vdevs with a matching by-vdev
724 * path. Normally we shouldn't need this as the comparison would be
725 * made earlier in the devphys_iter(). For example, if we were replacing
726 * /dev/disk/by-vdev/L28, normally devphys_iter() would match the
727 * ZPOOL_CONFIG_PHYS_PATH of "L28" from the old disk config to "L28"
728 * of the new disk config. However, we've seen cases where
729 * ZPOOL_CONFIG_PHYS_PATH was not in the config for the old disk. Here's
730 * an example of a real 2-disk mirror pool where one disk was force
733 * com.delphix:vdev_zap_top: 129
737 * guid: 14309659774640089719
738 * path: '/dev/disk/by-vdev/L28'
742 * com.delphix:vdev_zap_leaf: 1161
744 * aux_state: 'external'
748 * guid: 16002508084177980912
749 * path: '/dev/disk/by-vdev/L29'
750 * devid: 'dm-uuid-mpath-35000c500a61d68a3'
752 * vdev_enc_sysfs_path: '/sys/class/enclosure/0:0:1:0/SLOT 30 32'
756 * com.delphix:vdev_zap_leaf: 131
758 * So in the case above, the only thing we could compare is the path.
760 * We can do this because we assume by-vdev paths are authoritative as physical
761 * paths. We could not assume this for normal paths like /dev/sda since the
762 * physical location /dev/sda points to could change over time.
765 by_vdev_path_iter(const char *by_vdev_path
, const char *devid
,
766 zfs_process_func_t func
, boolean_t is_slice
)
768 dev_data_t data
= { 0 };
770 data
.dd_compare
= by_vdev_path
;
772 data
.dd_prop
= ZPOOL_CONFIG_PATH
;
773 data
.dd_found
= B_FALSE
;
774 data
.dd_islabeled
= is_slice
;
775 data
.dd_new_devid
= devid
;
777 if (strncmp(by_vdev_path
, DEV_BYVDEV_PATH
,
778 strlen(DEV_BYVDEV_PATH
)) != 0) {
779 /* by_vdev_path doesn't start with "/dev/disk/by-vdev/" */
783 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
785 return (data
.dd_found
);
789 * Given a device identifier, find any vdevs with a matching devid.
790 * On Linux we can match devid directly which is always a whole disk.
793 devid_iter(const char *devid
, zfs_process_func_t func
, boolean_t is_slice
)
795 dev_data_t data
= { 0 };
797 data
.dd_compare
= devid
;
799 data
.dd_prop
= ZPOOL_CONFIG_DEVID
;
800 data
.dd_found
= B_FALSE
;
801 data
.dd_islabeled
= is_slice
;
802 data
.dd_new_devid
= devid
;
804 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
806 return (data
.dd_found
);
810 * Given a device guid, find any vdevs with a matching guid.
813 guid_iter(uint64_t pool_guid
, uint64_t vdev_guid
, const char *devid
,
814 zfs_process_func_t func
, boolean_t is_slice
)
816 dev_data_t data
= { 0 };
819 data
.dd_found
= B_FALSE
;
820 data
.dd_pool_guid
= pool_guid
;
821 data
.dd_vdev_guid
= vdev_guid
;
822 data
.dd_islabeled
= is_slice
;
823 data
.dd_new_devid
= devid
;
825 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
827 return (data
.dd_found
);
831 * Handle a EC_DEV_ADD.ESC_DISK event.
834 * Expects: DEV_PHYS_PATH string in schema
835 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
837 * path: '/dev/dsk/c0t1d0s0' (persistent)
838 * devid: 'id1,sd@SATA_____Hitachi_HDS72101______JP2940HZ3H74MC/a'
839 * phys_path: '/pci@0,0/pci103c,1609@11/disk@1,0:a'
842 * provides: DEV_PHYS_PATH and DEV_IDENTIFIER strings in schema
843 * Matches: vdev's ZPOOL_CONFIG_PHYS_PATH or ZPOOL_CONFIG_DEVID
845 * path: '/dev/sdc1' (not persistent)
846 * devid: 'ata-SAMSUNG_HD204UI_S2HGJD2Z805891-part1'
847 * phys_path: 'pci-0000:04:00.0-sas-0x4433221106000000-lun-0'
850 zfs_deliver_add(nvlist_t
*nvl
)
852 const char *devpath
= NULL
, *devid
= NULL
;
853 uint64_t pool_guid
= 0, vdev_guid
= 0;
857 * Expecting a devid string and an optional physical location and guid
859 if (nvlist_lookup_string(nvl
, DEV_IDENTIFIER
, &devid
) != 0) {
860 zed_log_msg(LOG_INFO
, "%s: no dev identifier\n", __func__
);
864 (void) nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devpath
);
865 (void) nvlist_lookup_uint64(nvl
, ZFS_EV_POOL_GUID
, &pool_guid
);
866 (void) nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
, &vdev_guid
);
868 is_slice
= (nvlist_lookup_boolean(nvl
, DEV_IS_PART
) == 0);
870 zed_log_msg(LOG_INFO
, "zfs_deliver_add: adding %s (%s) (is_slice %d)",
871 devid
, devpath
? devpath
: "NULL", is_slice
);
874 * Iterate over all vdevs looking for a match in the following order:
875 * 1. ZPOOL_CONFIG_DEVID (identifies the unique disk)
876 * 2. ZPOOL_CONFIG_PHYS_PATH (identifies disk physical location).
877 * 3. ZPOOL_CONFIG_GUID (identifies unique vdev).
878 * 4. ZPOOL_CONFIG_PATH for /dev/disk/by-vdev devices only (since
879 * by-vdev paths represent physical paths).
881 if (devid_iter(devid
, zfs_process_add
, is_slice
))
883 if (devpath
!= NULL
&& devphys_iter(devpath
, devid
, zfs_process_add
,
884 is_slice
, vdev_guid
))
887 (void) guid_iter(pool_guid
, vdev_guid
, devid
, zfs_process_add
,
890 if (devpath
!= NULL
) {
891 /* Can we match a /dev/disk/by-vdev/ path? */
892 char by_vdev_path
[MAXPATHLEN
];
893 snprintf(by_vdev_path
, sizeof (by_vdev_path
),
894 "/dev/disk/by-vdev/%s", devpath
);
895 if (by_vdev_path_iter(by_vdev_path
, devid
, zfs_process_add
,
904 * Called when we receive a VDEV_CHECK event, which indicates a device could not
905 * be opened during initial pool open, but the autoreplace property was set on
906 * the pool. In this case, we treat it as if it were an add event.
909 zfs_deliver_check(nvlist_t
*nvl
)
911 dev_data_t data
= { 0 };
913 if (nvlist_lookup_uint64(nvl
, ZFS_EV_POOL_GUID
,
914 &data
.dd_pool_guid
) != 0 ||
915 nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
,
916 &data
.dd_vdev_guid
) != 0 ||
917 data
.dd_vdev_guid
== 0)
920 zed_log_msg(LOG_INFO
, "zfs_deliver_check: pool '%llu', vdev %llu",
921 data
.dd_pool_guid
, data
.dd_vdev_guid
);
923 data
.dd_func
= zfs_process_add
;
925 (void) zpool_iter(g_zfshdl
, zfs_iter_pool
, &data
);
931 * Given a path to a vdev, lookup the vdev's physical size from its
934 * Returns the vdev's physical size in bytes on success, 0 on error.
937 vdev_size_from_config(zpool_handle_t
*zhp
, const char *vdev_path
)
939 nvlist_t
*nvl
= NULL
;
940 boolean_t avail_spare
, l2cache
, log
;
941 vdev_stat_t
*vs
= NULL
;
944 nvl
= zpool_find_vdev(zhp
, vdev_path
, &avail_spare
, &l2cache
, &log
);
948 verify(nvlist_lookup_uint64_array(nvl
, ZPOOL_CONFIG_VDEV_STATS
,
949 (uint64_t **)&vs
, &c
) == 0);
951 zed_log_msg(LOG_INFO
, "%s: no nvlist for '%s'", __func__
,
956 return (vs
->vs_pspace
);
960 * Given a path to a vdev, lookup if the vdev is a "whole disk" in the
961 * config nvlist. "whole disk" means that ZFS was passed a whole disk
962 * at pool creation time, which it partitioned up and has full control over.
963 * Thus a partition with wholedisk=1 set tells us that zfs created the
964 * partition at creation time. A partition without whole disk set would have
965 * been created by externally (like with fdisk) and passed to ZFS.
967 * Returns the whole disk value (either 0 or 1).
970 vdev_whole_disk_from_config(zpool_handle_t
*zhp
, const char *vdev_path
)
972 nvlist_t
*nvl
= NULL
;
973 boolean_t avail_spare
, l2cache
, log
;
974 uint64_t wholedisk
= 0;
976 nvl
= zpool_find_vdev(zhp
, vdev_path
, &avail_spare
, &l2cache
, &log
);
980 (void) nvlist_lookup_uint64(nvl
, ZPOOL_CONFIG_WHOLE_DISK
, &wholedisk
);
986 * If the device size grew more than 1% then return true.
988 #define DEVICE_GREW(oldsize, newsize) \
989 ((newsize > oldsize) && \
990 ((newsize / (newsize - oldsize)) <= 100))
993 zfsdle_vdev_online(zpool_handle_t
*zhp
, void *data
)
995 boolean_t avail_spare
, l2cache
;
996 nvlist_t
*udev_nvl
= data
;
1000 const char *tmp_devname
;
1001 char devname
[MAXPATHLEN
] = "";
1004 if (nvlist_lookup_uint64(udev_nvl
, ZFS_EV_VDEV_GUID
, &guid
) == 0) {
1005 sprintf(devname
, "%llu", (u_longlong_t
)guid
);
1006 } else if (nvlist_lookup_string(udev_nvl
, DEV_PHYS_PATH
,
1007 &tmp_devname
) == 0) {
1008 strlcpy(devname
, tmp_devname
, MAXPATHLEN
);
1009 zfs_append_partition(devname
, MAXPATHLEN
);
1011 zed_log_msg(LOG_INFO
, "%s: no guid or physpath", __func__
);
1014 zed_log_msg(LOG_INFO
, "zfsdle_vdev_online: searching for '%s' in '%s'",
1015 devname
, zpool_get_name(zhp
));
1017 if ((tgt
= zpool_find_vdev_by_physpath(zhp
, devname
,
1018 &avail_spare
, &l2cache
, NULL
)) != NULL
) {
1020 char fullpath
[MAXPATHLEN
];
1021 uint64_t wholedisk
= 0;
1023 error
= nvlist_lookup_string(tgt
, ZPOOL_CONFIG_PATH
, &path
);
1029 (void) nvlist_lookup_uint64(tgt
, ZPOOL_CONFIG_WHOLE_DISK
,
1034 path
= strrchr(path
, '/');
1036 tmp
= zfs_strip_partition(path
+ 1);
1046 (void) strlcpy(fullpath
, tmp
, sizeof (fullpath
));
1050 * We need to reopen the pool associated with this
1051 * device so that the kernel can update the size of
1052 * the expanded device. When expanding there is no
1053 * need to restart the scrub from the beginning.
1055 boolean_t scrub_restart
= B_FALSE
;
1056 (void) zpool_reopen_one(zhp
, &scrub_restart
);
1058 (void) strlcpy(fullpath
, path
, sizeof (fullpath
));
1061 if (zpool_get_prop_int(zhp
, ZPOOL_PROP_AUTOEXPAND
, NULL
)) {
1062 vdev_state_t newstate
;
1064 if (zpool_get_state(zhp
) != POOL_STATE_UNAVAIL
) {
1066 * If this disk size has not changed, then
1067 * there's no need to do an autoexpand. To
1068 * check we look at the disk's size in its
1069 * config, and compare it to the disk size
1070 * that udev is reporting.
1072 uint64_t udev_size
= 0, conf_size
= 0,
1073 wholedisk
= 0, udev_parent_size
= 0;
1076 * Get the size of our disk that udev is
1079 if (nvlist_lookup_uint64(udev_nvl
, DEV_SIZE
,
1085 * Get the size of our disk's parent device
1086 * from udev (where sda1's parent is sda).
1088 if (nvlist_lookup_uint64(udev_nvl
,
1089 DEV_PARENT_SIZE
, &udev_parent_size
) != 0) {
1090 udev_parent_size
= 0;
1093 conf_size
= vdev_size_from_config(zhp
,
1096 wholedisk
= vdev_whole_disk_from_config(zhp
,
1100 * Only attempt an autoexpand if the vdev size
1101 * changed. There are two different cases
1105 * If you do a 'zpool create' on a whole disk
1106 * (like /dev/sda), then zfs will create
1107 * partitions on the disk (like /dev/sda1). In
1108 * that case, wholedisk=1 will be set in the
1109 * partition's nvlist config. So zed will need
1110 * to see if your parent device (/dev/sda)
1111 * expanded in size, and if so, then attempt
1115 * If you do a 'zpool create' on an existing
1116 * partition, or a device that doesn't allow
1117 * partitions, then wholedisk=0, and you will
1118 * simply need to check if the device itself
1121 if (DEVICE_GREW(conf_size
, udev_size
) ||
1122 (wholedisk
&& DEVICE_GREW(conf_size
,
1123 udev_parent_size
))) {
1124 error
= zpool_vdev_online(zhp
, fullpath
,
1127 zed_log_msg(LOG_INFO
,
1128 "%s: autoexpanding '%s' from %llu"
1129 " to %llu bytes in pool '%s': %d",
1130 __func__
, fullpath
, conf_size
,
1131 MAX(udev_size
, udev_parent_size
),
1132 zpool_get_name(zhp
), error
);
1144 * This function handles the ESC_DEV_DLE device change event. Use the
1145 * provided vdev guid when looking up a disk or partition, when the guid
1146 * is not present assume the entire disk is owned by ZFS and append the
1147 * expected -part1 partition information then lookup by physical path.
1150 zfs_deliver_dle(nvlist_t
*nvl
)
1152 const char *devname
;
1153 char name
[MAXPATHLEN
];
1156 if (nvlist_lookup_uint64(nvl
, ZFS_EV_VDEV_GUID
, &guid
) == 0) {
1157 sprintf(name
, "%llu", (u_longlong_t
)guid
);
1158 } else if (nvlist_lookup_string(nvl
, DEV_PHYS_PATH
, &devname
) == 0) {
1159 strlcpy(name
, devname
, MAXPATHLEN
);
1160 zfs_append_partition(name
, MAXPATHLEN
);
1162 sprintf(name
, "unknown");
1163 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: no guid or physpath");
1166 if (zpool_iter(g_zfshdl
, zfsdle_vdev_online
, nvl
) != 1) {
1167 zed_log_msg(LOG_INFO
, "zfs_deliver_dle: device '%s' not "
1176 * syseventd daemon module event handler
1178 * Handles syseventd daemon zfs device related events:
1180 * EC_DEV_ADD.ESC_DISK
1181 * EC_DEV_STATUS.ESC_DEV_DLE
1182 * EC_ZFS.ESC_ZFS_VDEV_CHECK
1184 * Note: assumes only one thread active at a time (not thread safe)
1187 zfs_slm_deliver_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
1190 boolean_t is_check
= B_FALSE
, is_dle
= B_FALSE
;
1192 if (strcmp(class, EC_DEV_ADD
) == 0) {
1194 * We're mainly interested in disk additions, but we also listen
1195 * for new loop devices, to allow for simplified testing.
1197 if (strcmp(subclass
, ESC_DISK
) != 0 &&
1198 strcmp(subclass
, ESC_LOFI
) != 0)
1202 } else if (strcmp(class, EC_ZFS
) == 0 &&
1203 strcmp(subclass
, ESC_ZFS_VDEV_CHECK
) == 0) {
1205 * This event signifies that a device failed to open
1206 * during pool load, but the 'autoreplace' property was
1207 * set, so we should pretend it's just been added.
1210 } else if (strcmp(class, EC_DEV_STATUS
) == 0 &&
1211 strcmp(subclass
, ESC_DEV_DLE
) == 0) {
1218 ret
= zfs_deliver_dle(nvl
);
1220 ret
= zfs_deliver_check(nvl
);
1222 ret
= zfs_deliver_add(nvl
);
1228 zfs_enum_pools(void *arg
)
1232 (void) zpool_iter(g_zfshdl
, zfs_unavail_pool
, (void *)&g_pool_list
);
1234 * Linux - instead of using a thread pool, each list entry
1235 * will spawn a thread when an unavailable pool transitions
1236 * to available. zfs_slm_fini will wait for these threads.
1238 g_enumeration_done
= B_TRUE
;
1243 * called from zed daemon at startup
1245 * sent messages from zevents or udev monitor
1247 * For now, each agent has its own libzfs instance
1252 if ((g_zfshdl
= libzfs_init()) == NULL
)
1256 * collect a list of unavailable pools (asynchronously,
1257 * since this can take a while)
1259 list_create(&g_pool_list
, sizeof (struct unavailpool
),
1260 offsetof(struct unavailpool
, uap_node
));
1262 if (pthread_create(&g_zfs_tid
, NULL
, zfs_enum_pools
, NULL
) != 0) {
1263 list_destroy(&g_pool_list
);
1264 libzfs_fini(g_zfshdl
);
1268 pthread_setname_np(g_zfs_tid
, "enum-pools");
1269 list_create(&g_device_list
, sizeof (struct pendingdev
),
1270 offsetof(struct pendingdev
, pd_node
));
1278 unavailpool_t
*pool
;
1279 pendingdev_t
*device
;
1281 /* wait for zfs_enum_pools thread to complete */
1282 (void) pthread_join(g_zfs_tid
, NULL
);
1283 /* destroy the thread pool */
1284 if (g_tpool
!= NULL
) {
1285 tpool_wait(g_tpool
);
1286 tpool_destroy(g_tpool
);
1289 while ((pool
= list_remove_head(&g_pool_list
)) != NULL
) {
1290 zpool_close(pool
->uap_zhp
);
1293 list_destroy(&g_pool_list
);
1295 while ((device
= list_remove_head(&g_device_list
)) != NULL
)
1297 list_destroy(&g_device_list
);
1299 libzfs_fini(g_zfshdl
);
1303 zfs_slm_event(const char *class, const char *subclass
, nvlist_t
*nvl
)
1305 zed_log_msg(LOG_INFO
, "zfs_slm_event: %s.%s", class, subclass
);
1306 (void) zfs_slm_deliver_event(class, subclass
, nvl
);