4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or https://opensource.org/licenses/CDDL-1.0.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2011, 2020 by Delphix. All rights reserved.
26 * Copyright 2017 Joyent, Inc.
27 * Copyright (c) 2021, Colm Buckley <colm@tuatha.org>
32 #include <sys/fm/fs/zfs.h>
33 #include <sys/spa_impl.h>
34 #include <sys/nvpair.h>
35 #include <sys/fs/zfs.h>
36 #include <sys/vdev_impl.h>
37 #include <sys/zfs_ioctl.h>
38 #include <sys/systeminfo.h>
39 #include <sys/sunddi.h>
40 #include <sys/zfeature.h>
41 #include <sys/zfs_file.h>
42 #include <sys/zfs_context.h>
48 * Pool configuration repository.
50 * Pool configuration is stored as a packed nvlist on the filesystem. By
51 * default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot
52 * (when the ZFS module is loaded). Pools can also have the 'cachefile'
53 * property set that allows them to be stored in an alternate location until
54 * the control of external software.
56 * For each cache file, we have a single nvlist which holds all the
57 * configuration information. When the module loads, we read this information
58 * from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is
59 * maintained independently in spa.c. Whenever the namespace is modified, or
60 * the configuration of a pool is changed, we call spa_write_cachefile(), which
61 * walks through all the active pools and writes the configuration to disk.
64 static uint64_t spa_config_generation
= 1;
67 * This can be overridden in userland to preserve an alternate namespace for
68 * userland pools when doing testing.
70 char *spa_config_path
= (char *)ZPOOL_CACHE
;
72 static int zfs_autoimport_disable
= B_TRUE
;
76 * Called when the module is first loaded, this routine loads the configuration
77 * file into the SPA namespace. It does not actually open or load the pools; it
78 * only populates the namespace.
84 nvlist_t
*nvlist
, *child
;
93 if (zfs_autoimport_disable
)
98 * Open the configuration file.
100 pathname
= kmem_alloc(MAXPATHLEN
, KM_SLEEP
);
102 (void) snprintf(pathname
, MAXPATHLEN
, "%s", spa_config_path
);
104 err
= zfs_file_open(pathname
, O_RDONLY
, 0, &fp
);
108 err
= zfs_file_open(ZPOOL_CACHE_BOOT
, O_RDONLY
, 0, &fp
);
110 kmem_free(pathname
, MAXPATHLEN
);
115 if (zfs_file_getattr(fp
, &zfa
))
118 fsize
= zfa
.zfa_size
;
119 buf
= kmem_alloc(fsize
, KM_SLEEP
);
122 * Read the nvlist from the file.
124 if (zfs_file_read(fp
, buf
, fsize
, NULL
) < 0)
130 if (nvlist_unpack(buf
, fsize
, &nvlist
, KM_SLEEP
) != 0)
134 * Iterate over all elements in the nvlist, creating a new spa_t for
135 * each one with the specified configuration.
137 mutex_enter(&spa_namespace_lock
);
139 while ((nvpair
= nvlist_next_nvpair(nvlist
, nvpair
)) != NULL
) {
140 if (nvpair_type(nvpair
) != DATA_TYPE_NVLIST
)
143 child
= fnvpair_value_nvlist(nvpair
);
145 if (spa_lookup(nvpair_name(nvpair
)) != NULL
)
147 (void) spa_add(nvpair_name(nvpair
), child
, NULL
);
149 mutex_exit(&spa_namespace_lock
);
155 kmem_free(buf
, fsize
);
161 spa_config_remove(spa_config_dirent_t
*dp
)
166 * Remove the cache file. If zfs_file_unlink() in not supported by the
167 * platform fallback to truncating the file which is functionally
170 error
= zfs_file_unlink(dp
->scd_path
);
171 if (error
== EOPNOTSUPP
) {
172 int flags
= O_RDWR
| O_TRUNC
;
175 error
= zfs_file_open(dp
->scd_path
, flags
, 0644, &fp
);
177 (void) zfs_file_fsync(fp
, O_SYNC
);
178 (void) zfs_file_close(fp
);
186 spa_config_write(spa_config_dirent_t
*dp
, nvlist_t
*nvl
)
190 int oflags
= O_RDWR
| O_TRUNC
| O_CREAT
| O_LARGEFILE
;
196 * If the nvlist is empty (NULL), then remove the old cachefile.
199 err
= spa_config_remove(dp
);
207 * Pack the configuration into a buffer.
209 buf
= fnvlist_pack(nvl
, &buflen
);
210 temp
= kmem_zalloc(MAXPATHLEN
, KM_SLEEP
);
213 * Write the configuration to disk. Due to the complexity involved
214 * in performing a rename and remove from within the kernel the file
215 * is instead truncated and overwritten in place. This way we always
216 * have a consistent view of the data or a zero length file.
218 err
= zfs_file_open(dp
->scd_path
, oflags
, 0644, &fp
);
220 err
= zfs_file_write(fp
, buf
, buflen
, NULL
);
222 err
= zfs_file_fsync(fp
, O_SYNC
);
226 (void) spa_config_remove(dp
);
228 fnvlist_pack_free(buf
, buflen
);
229 kmem_free(temp
, MAXPATHLEN
);
234 * Synchronize pool configuration to disk. This must be called with the
235 * namespace lock held. Synchronizing the pool cache is typically done after
236 * the configuration has been synced to the MOS. This exposes a window where
237 * the MOS config will have been updated but the cache file has not. If
238 * the system were to crash at that instant then the cached config may not
239 * contain the correct information to open the pool and an explicit import
243 spa_write_cachefile(spa_t
*target
, boolean_t removing
, boolean_t postsysevent
,
244 boolean_t postblkidevent
)
246 spa_config_dirent_t
*dp
, *tdp
;
248 const char *pool_name
;
249 boolean_t ccw_failure
;
252 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
254 if (!(spa_mode_global
& SPA_MODE_WRITE
))
258 * Iterate over all cachefiles for the pool, past or present. When the
259 * cachefile is changed, the new one is pushed onto this list, allowing
260 * us to update previous cachefiles that no longer contain this pool.
262 ccw_failure
= B_FALSE
;
263 for (dp
= list_head(&target
->spa_config_list
); dp
!= NULL
;
264 dp
= list_next(&target
->spa_config_list
, dp
)) {
266 if (dp
->scd_path
== NULL
)
270 * Iterate over all pools, adding any matching pools to 'nvl'.
273 while ((spa
= spa_next(spa
)) != NULL
) {
275 * Skip over our own pool if we're about to remove
276 * ourselves from the spa namespace or any pool that
277 * is readonly. Since we cannot guarantee that a
278 * readonly pool would successfully import upon reboot,
279 * we don't allow them to be written to the cache file.
281 if ((spa
== target
&& removing
) ||
285 mutex_enter(&spa
->spa_props_lock
);
286 tdp
= list_head(&spa
->spa_config_list
);
287 if (spa
->spa_config
== NULL
||
289 tdp
->scd_path
== NULL
||
290 strcmp(tdp
->scd_path
, dp
->scd_path
) != 0) {
291 mutex_exit(&spa
->spa_props_lock
);
296 nvl
= fnvlist_alloc();
298 if (spa
->spa_import_flags
& ZFS_IMPORT_TEMP_NAME
)
299 pool_name
= fnvlist_lookup_string(
300 spa
->spa_config
, ZPOOL_CONFIG_POOL_NAME
);
302 pool_name
= spa_name(spa
);
304 fnvlist_add_nvlist(nvl
, pool_name
, spa
->spa_config
);
305 mutex_exit(&spa
->spa_props_lock
);
308 error
= spa_config_write(dp
, nvl
);
310 ccw_failure
= B_TRUE
;
316 * Keep trying so that configuration data is
317 * written if/when any temporary filesystem
318 * resource issues are resolved.
320 if (target
->spa_ccw_fail_time
== 0) {
321 (void) zfs_ereport_post(
322 FM_EREPORT_ZFS_CONFIG_CACHE_WRITE
,
323 target
, NULL
, NULL
, NULL
, 0);
325 target
->spa_ccw_fail_time
= gethrtime();
326 spa_async_request(target
, SPA_ASYNC_CONFIG_UPDATE
);
329 * Do not rate limit future attempts to update
332 target
->spa_ccw_fail_time
= 0;
336 * Remove any config entries older than the current one.
338 dp
= list_head(&target
->spa_config_list
);
339 while ((tdp
= list_next(&target
->spa_config_list
, dp
)) != NULL
) {
340 list_remove(&target
->spa_config_list
, tdp
);
341 if (tdp
->scd_path
!= NULL
)
342 spa_strfree(tdp
->scd_path
);
343 kmem_free(tdp
, sizeof (spa_config_dirent_t
));
346 spa_config_generation
++;
349 spa_event_notify(target
, NULL
, NULL
, ESC_ZFS_CONFIG_SYNC
);
352 * Post udev event to sync blkid information if the pool is created
353 * or a new vdev is added to the pool.
355 if ((target
->spa_root_vdev
) && postblkidevent
) {
356 vdev_post_kobj_evt(target
->spa_root_vdev
);
357 for (int i
= 0; i
< target
->spa_l2cache
.sav_count
; i
++)
358 vdev_post_kobj_evt(target
->spa_l2cache
.sav_vdevs
[i
]);
359 for (int i
= 0; i
< target
->spa_spares
.sav_count
; i
++)
360 vdev_post_kobj_evt(target
->spa_spares
.sav_vdevs
[i
]);
365 * Sigh. Inside a local zone, we don't have access to /etc/zfs/zpool.cache,
366 * and we don't want to allow the local zone to see all the pools anyway.
367 * So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration
368 * information for all pool visible within the zone.
371 spa_all_configs(uint64_t *generation
, nvlist_t
**pools
)
375 if (*generation
== spa_config_generation
)
376 return (SET_ERROR(EEXIST
));
378 int error
= mutex_enter_interruptible(&spa_namespace_lock
);
380 return (SET_ERROR(EINTR
));
382 *pools
= fnvlist_alloc();
383 while ((spa
= spa_next(spa
)) != NULL
) {
384 if (INGLOBALZONE(curproc
) ||
385 zone_dataset_visible(spa_name(spa
), NULL
)) {
386 mutex_enter(&spa
->spa_props_lock
);
387 fnvlist_add_nvlist(*pools
, spa_name(spa
),
389 mutex_exit(&spa
->spa_props_lock
);
392 *generation
= spa_config_generation
;
393 mutex_exit(&spa_namespace_lock
);
399 spa_config_set(spa_t
*spa
, nvlist_t
*config
)
401 mutex_enter(&spa
->spa_props_lock
);
402 if (spa
->spa_config
!= NULL
&& spa
->spa_config
!= config
)
403 nvlist_free(spa
->spa_config
);
404 spa
->spa_config
= config
;
405 mutex_exit(&spa
->spa_props_lock
);
409 * Generate the pool's configuration based on the current in-core state.
411 * We infer whether to generate a complete config or just one top-level config
412 * based on whether vd is the root vdev.
415 spa_config_generate(spa_t
*spa
, vdev_t
*vd
, uint64_t txg
, int getstats
)
417 nvlist_t
*config
, *nvroot
;
418 vdev_t
*rvd
= spa
->spa_root_vdev
;
419 unsigned long hostid
= 0;
420 boolean_t locked
= B_FALSE
;
422 const char *pool_name
;
427 spa_config_enter(spa
, SCL_CONFIG
| SCL_STATE
, FTAG
, RW_READER
);
430 ASSERT(spa_config_held(spa
, SCL_CONFIG
| SCL_STATE
, RW_READER
) ==
431 (SCL_CONFIG
| SCL_STATE
));
434 * If txg is -1, report the current value of spa->spa_config_txg.
437 txg
= spa
->spa_config_txg
;
440 * Originally, users had to handle spa namespace collisions by either
441 * exporting the already imported pool or by specifying a new name for
442 * the pool with a conflicting name. In the case of root pools from
443 * virtual guests, neither approach to collision resolution is
444 * reasonable. This is addressed by extending the new name syntax with
445 * an option to specify that the new name is temporary. When specified,
446 * ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us
447 * to use the previous name, which we do below.
449 if (spa
->spa_import_flags
& ZFS_IMPORT_TEMP_NAME
) {
450 VERIFY0(nvlist_lookup_string(spa
->spa_config
,
451 ZPOOL_CONFIG_POOL_NAME
, &pool_name
));
453 pool_name
= spa_name(spa
);
455 config
= fnvlist_alloc();
457 fnvlist_add_uint64(config
, ZPOOL_CONFIG_VERSION
, spa_version(spa
));
458 fnvlist_add_string(config
, ZPOOL_CONFIG_POOL_NAME
, pool_name
);
459 fnvlist_add_uint64(config
, ZPOOL_CONFIG_POOL_STATE
, spa_state(spa
));
460 fnvlist_add_uint64(config
, ZPOOL_CONFIG_POOL_TXG
, txg
);
461 fnvlist_add_uint64(config
, ZPOOL_CONFIG_POOL_GUID
, spa_guid(spa
));
462 fnvlist_add_uint64(config
, ZPOOL_CONFIG_ERRATA
, spa
->spa_errata
);
463 if (spa
->spa_comment
!= NULL
)
464 fnvlist_add_string(config
, ZPOOL_CONFIG_COMMENT
,
466 if (spa
->spa_compatibility
!= NULL
)
467 fnvlist_add_string(config
, ZPOOL_CONFIG_COMPATIBILITY
,
468 spa
->spa_compatibility
);
470 hostid
= spa_get_hostid(spa
);
472 fnvlist_add_uint64(config
, ZPOOL_CONFIG_HOSTID
, hostid
);
473 fnvlist_add_string(config
, ZPOOL_CONFIG_HOSTNAME
, utsname()->nodename
);
475 int config_gen_flags
= 0;
477 fnvlist_add_uint64(config
, ZPOOL_CONFIG_TOP_GUID
,
478 vd
->vdev_top
->vdev_guid
);
479 fnvlist_add_uint64(config
, ZPOOL_CONFIG_GUID
,
481 if (vd
->vdev_isspare
)
482 fnvlist_add_uint64(config
,
483 ZPOOL_CONFIG_IS_SPARE
, 1ULL);
485 fnvlist_add_uint64(config
,
486 ZPOOL_CONFIG_IS_LOG
, 1ULL);
487 vd
= vd
->vdev_top
; /* label contains top config */
490 * Only add the (potentially large) split information
491 * in the mos config, and not in the vdev labels
493 if (spa
->spa_config_splitting
!= NULL
)
494 fnvlist_add_nvlist(config
, ZPOOL_CONFIG_SPLIT
,
495 spa
->spa_config_splitting
);
497 fnvlist_add_boolean(config
, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS
);
499 config_gen_flags
|= VDEV_CONFIG_MOS
;
503 * Add the top-level config. We even add this on pools which
504 * don't support holes in the namespace.
506 vdev_top_config_generate(spa
, config
);
509 * If we're splitting, record the original pool's guid.
511 if (spa
->spa_config_splitting
!= NULL
&&
512 nvlist_lookup_uint64(spa
->spa_config_splitting
,
513 ZPOOL_CONFIG_SPLIT_GUID
, &split_guid
) == 0) {
514 fnvlist_add_uint64(config
, ZPOOL_CONFIG_SPLIT_GUID
, split_guid
);
517 nvroot
= vdev_config_generate(spa
, vd
, getstats
, config_gen_flags
);
518 fnvlist_add_nvlist(config
, ZPOOL_CONFIG_VDEV_TREE
, nvroot
);
522 * Store what's necessary for reading the MOS in the label.
524 fnvlist_add_nvlist(config
, ZPOOL_CONFIG_FEATURES_FOR_READ
,
525 spa
->spa_label_features
);
527 if (getstats
&& spa_load_state(spa
) == SPA_LOAD_NONE
) {
528 ddt_histogram_t
*ddh
;
532 ddh
= kmem_zalloc(sizeof (ddt_histogram_t
), KM_SLEEP
);
533 ddt_get_dedup_histogram(spa
, ddh
);
534 fnvlist_add_uint64_array(config
,
535 ZPOOL_CONFIG_DDT_HISTOGRAM
,
536 (uint64_t *)ddh
, sizeof (*ddh
) / sizeof (uint64_t));
537 kmem_free(ddh
, sizeof (ddt_histogram_t
));
539 ddo
= kmem_zalloc(sizeof (ddt_object_t
), KM_SLEEP
);
540 ddt_get_dedup_object_stats(spa
, ddo
);
541 fnvlist_add_uint64_array(config
,
542 ZPOOL_CONFIG_DDT_OBJ_STATS
,
543 (uint64_t *)ddo
, sizeof (*ddo
) / sizeof (uint64_t));
544 kmem_free(ddo
, sizeof (ddt_object_t
));
546 dds
= kmem_zalloc(sizeof (ddt_stat_t
), KM_SLEEP
);
547 ddt_get_dedup_stats(spa
, dds
);
548 fnvlist_add_uint64_array(config
,
549 ZPOOL_CONFIG_DDT_STATS
,
550 (uint64_t *)dds
, sizeof (*dds
) / sizeof (uint64_t));
551 kmem_free(dds
, sizeof (ddt_stat_t
));
555 spa_config_exit(spa
, SCL_CONFIG
| SCL_STATE
, FTAG
);
561 * Update all disk labels, generate a fresh config based on the current
562 * in-core state, and sync the global config cache (do not sync the config
563 * cache if this is a booting rootpool).
566 spa_config_update(spa_t
*spa
, int what
)
568 vdev_t
*rvd
= spa
->spa_root_vdev
;
572 ASSERT(MUTEX_HELD(&spa_namespace_lock
));
574 spa_config_enter(spa
, SCL_ALL
, FTAG
, RW_WRITER
);
575 txg
= spa_last_synced_txg(spa
) + 1;
576 if (what
== SPA_CONFIG_UPDATE_POOL
) {
577 vdev_config_dirty(rvd
);
580 * If we have top-level vdevs that were added but have
581 * not yet been prepared for allocation, do that now.
582 * (It's safe now because the config cache is up to date,
583 * so it will be able to translate the new DVAs.)
584 * See comments in spa_vdev_add() for full details.
586 for (c
= 0; c
< rvd
->vdev_children
; c
++) {
587 vdev_t
*tvd
= rvd
->vdev_child
[c
];
590 * Explicitly skip vdevs that are indirect or
591 * log vdevs that are being removed. The reason
592 * is that both of those can have vdev_ms_array
593 * set to 0 and we wouldn't want to change their
594 * metaslab size nor call vdev_expand() on them.
596 if (!vdev_is_concrete(tvd
) ||
597 (tvd
->vdev_islog
&& tvd
->vdev_removing
))
600 if (tvd
->vdev_ms_array
== 0)
601 vdev_metaslab_set_size(tvd
);
602 vdev_expand(tvd
, txg
);
605 spa_config_exit(spa
, SCL_ALL
, FTAG
);
608 * Wait for the mosconfig to be regenerated and synced.
610 txg_wait_synced(spa
->spa_dsl_pool
, txg
);
613 * Update the global config cache to reflect the new mosconfig.
615 if (!spa
->spa_is_root
) {
616 spa_write_cachefile(spa
, B_FALSE
,
617 what
!= SPA_CONFIG_UPDATE_POOL
,
618 what
!= SPA_CONFIG_UPDATE_POOL
);
621 if (what
== SPA_CONFIG_UPDATE_POOL
)
622 spa_config_update(spa
, SPA_CONFIG_UPDATE_VDEVS
);
625 EXPORT_SYMBOL(spa_config_load
);
626 EXPORT_SYMBOL(spa_all_configs
);
627 EXPORT_SYMBOL(spa_config_set
);
628 EXPORT_SYMBOL(spa_config_generate
);
629 EXPORT_SYMBOL(spa_config_update
);
632 /* string sysctls require a char array on FreeBSD */
633 ZFS_MODULE_PARAM(zfs_spa
, spa_
, config_path
, STRING
, ZMOD_RD
,
634 "SPA config file (/etc/zfs/zpool.cache)");
637 ZFS_MODULE_PARAM(zfs
, zfs_
, autoimport_disable
, INT
, ZMOD_RW
,
638 "Disable pool import at module load");