Enclosure LED fixes
[zfs.git] / cmd / zpool / zpool_iter.c
blob7ce0ccf9efbc9b873db27a2c6392183e95425bbb
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
30 #include <libintl.h>
31 #include <libuutil.h>
32 #include <stddef.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
37 #include <libzfs.h>
38 #include <sys/zfs_context.h>
40 #include "zpool_util.h"
43 * Private interface for iterating over pools specified on the command line.
44 * Most consumers will call for_each_pool, but in order to support iostat, we
45 * allow fined grained control through the zpool_list_t interface.
48 typedef struct zpool_node {
49 zpool_handle_t *zn_handle;
50 uu_avl_node_t zn_avlnode;
51 int zn_mark;
52 } zpool_node_t;
54 struct zpool_list {
55 boolean_t zl_findall;
56 uu_avl_t *zl_avl;
57 uu_avl_pool_t *zl_pool;
58 zprop_list_t **zl_proplist;
61 /* ARGSUSED */
62 static int
63 zpool_compare(const void *larg, const void *rarg, void *unused)
65 zpool_handle_t *l = ((zpool_node_t *)larg)->zn_handle;
66 zpool_handle_t *r = ((zpool_node_t *)rarg)->zn_handle;
67 const char *lname = zpool_get_name(l);
68 const char *rname = zpool_get_name(r);
70 return (strcmp(lname, rname));
74 * Callback function for pool_list_get(). Adds the given pool to the AVL tree
75 * of known pools.
77 static int
78 add_pool(zpool_handle_t *zhp, void *data)
80 zpool_list_t *zlp = data;
81 zpool_node_t *node = safe_malloc(sizeof (zpool_node_t));
82 uu_avl_index_t idx;
84 node->zn_handle = zhp;
85 uu_avl_node_init(node, &node->zn_avlnode, zlp->zl_pool);
86 if (uu_avl_find(zlp->zl_avl, node, NULL, &idx) == NULL) {
87 if (zlp->zl_proplist &&
88 zpool_expand_proplist(zhp, zlp->zl_proplist) != 0) {
89 zpool_close(zhp);
90 free(node);
91 return (-1);
93 uu_avl_insert(zlp->zl_avl, node, idx);
94 } else {
95 zpool_close(zhp);
96 free(node);
97 return (-1);
100 return (0);
104 * Create a list of pools based on the given arguments. If we're given no
105 * arguments, then iterate over all pools in the system and add them to the AVL
106 * tree. Otherwise, add only those pool explicitly specified on the command
107 * line.
109 zpool_list_t *
110 pool_list_get(int argc, char **argv, zprop_list_t **proplist, int *err)
112 zpool_list_t *zlp;
114 zlp = safe_malloc(sizeof (zpool_list_t));
116 zlp->zl_pool = uu_avl_pool_create("zfs_pool", sizeof (zpool_node_t),
117 offsetof(zpool_node_t, zn_avlnode), zpool_compare, UU_DEFAULT);
119 if (zlp->zl_pool == NULL)
120 zpool_no_memory();
122 if ((zlp->zl_avl = uu_avl_create(zlp->zl_pool, NULL,
123 UU_DEFAULT)) == NULL)
124 zpool_no_memory();
126 zlp->zl_proplist = proplist;
128 if (argc == 0) {
129 (void) zpool_iter(g_zfs, add_pool, zlp);
130 zlp->zl_findall = B_TRUE;
131 } else {
132 int i;
134 for (i = 0; i < argc; i++) {
135 zpool_handle_t *zhp;
137 if ((zhp = zpool_open_canfail(g_zfs, argv[i])) !=
138 NULL) {
139 if (add_pool(zhp, zlp) != 0)
140 *err = B_TRUE;
141 } else {
142 *err = B_TRUE;
147 return (zlp);
151 * Search for any new pools, adding them to the list. We only add pools when no
152 * options were given on the command line. Otherwise, we keep the list fixed as
153 * those that were explicitly specified.
155 void
156 pool_list_update(zpool_list_t *zlp)
158 if (zlp->zl_findall)
159 (void) zpool_iter(g_zfs, add_pool, zlp);
163 * Iterate over all pools in the list, executing the callback for each
166 pool_list_iter(zpool_list_t *zlp, int unavail, zpool_iter_f func,
167 void *data)
169 zpool_node_t *node, *next_node;
170 int ret = 0;
172 for (node = uu_avl_first(zlp->zl_avl); node != NULL; node = next_node) {
173 next_node = uu_avl_next(zlp->zl_avl, node);
174 if (zpool_get_state(node->zn_handle) != POOL_STATE_UNAVAIL ||
175 unavail)
176 ret |= func(node->zn_handle, data);
179 return (ret);
183 * Remove the given pool from the list. When running iostat, we want to remove
184 * those pools that no longer exist.
186 void
187 pool_list_remove(zpool_list_t *zlp, zpool_handle_t *zhp)
189 zpool_node_t search, *node;
191 search.zn_handle = zhp;
192 if ((node = uu_avl_find(zlp->zl_avl, &search, NULL, NULL)) != NULL) {
193 uu_avl_remove(zlp->zl_avl, node);
194 zpool_close(node->zn_handle);
195 free(node);
200 * Free all the handles associated with this list.
202 void
203 pool_list_free(zpool_list_t *zlp)
205 uu_avl_walk_t *walk;
206 zpool_node_t *node;
208 if ((walk = uu_avl_walk_start(zlp->zl_avl, UU_WALK_ROBUST)) == NULL) {
209 (void) fprintf(stderr,
210 gettext("internal error: out of memory"));
211 exit(1);
214 while ((node = uu_avl_walk_next(walk)) != NULL) {
215 uu_avl_remove(zlp->zl_avl, node);
216 zpool_close(node->zn_handle);
217 free(node);
220 uu_avl_walk_end(walk);
221 uu_avl_destroy(zlp->zl_avl);
222 uu_avl_pool_destroy(zlp->zl_pool);
224 free(zlp);
228 * Returns the number of elements in the pool list.
231 pool_list_count(zpool_list_t *zlp)
233 return (uu_avl_numnodes(zlp->zl_avl));
237 * High level function which iterates over all pools given on the command line,
238 * using the pool_list_* interfaces.
241 for_each_pool(int argc, char **argv, boolean_t unavail,
242 zprop_list_t **proplist, zpool_iter_f func, void *data)
244 zpool_list_t *list;
245 int ret = 0;
247 if ((list = pool_list_get(argc, argv, proplist, &ret)) == NULL)
248 return (1);
250 if (pool_list_iter(list, unavail, func, data) != 0)
251 ret = 1;
253 pool_list_free(list);
255 return (ret);
258 static int
259 for_each_vdev_cb(zpool_handle_t *zhp, nvlist_t *nv, pool_vdev_iter_f func,
260 void *data)
262 nvlist_t **child;
263 uint_t c, children;
264 int ret = 0;
265 int i;
266 char *type;
268 const char *list[] = {
269 ZPOOL_CONFIG_SPARES,
270 ZPOOL_CONFIG_L2CACHE,
271 ZPOOL_CONFIG_CHILDREN
274 for (i = 0; i < ARRAY_SIZE(list); i++) {
275 if (nvlist_lookup_nvlist_array(nv, list[i], &child,
276 &children) == 0) {
277 for (c = 0; c < children; c++) {
278 uint64_t ishole = 0;
280 (void) nvlist_lookup_uint64(child[c],
281 ZPOOL_CONFIG_IS_HOLE, &ishole);
283 if (ishole)
284 continue;
286 ret |= for_each_vdev_cb(zhp, child[c], func,
287 data);
292 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
293 return (ret);
295 /* Don't run our function on root vdevs */
296 if (strcmp(type, VDEV_TYPE_ROOT) != 0) {
297 ret |= func(zhp, nv, data);
300 return (ret);
304 * This is the equivalent of for_each_pool() for vdevs. It iterates thorough
305 * all vdevs in the pool, ignoring root vdevs and holes, calling func() on
306 * each one.
308 * @zhp: Zpool handle
309 * @func: Function to call on each vdev
310 * @data: Custom data to pass to the function
313 for_each_vdev(zpool_handle_t *zhp, pool_vdev_iter_f func, void *data)
315 nvlist_t *config, *nvroot = NULL;
317 if ((config = zpool_get_config(zhp, NULL)) != NULL) {
318 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
319 &nvroot) == 0);
321 return (for_each_vdev_cb(zhp, nvroot, func, data));
324 /* Thread function run for each vdev */
325 static void
326 vdev_run_cmd_thread(void *cb_cmd_data)
328 vdev_cmd_data_t *data = cb_cmd_data;
329 char *pos = NULL;
330 FILE *fp;
331 size_t len = 0;
332 char cmd[_POSIX_ARG_MAX];
334 /* Set our VDEV_PATH and VDEV_UPATH env vars and run command */
335 if (snprintf(cmd, sizeof (cmd), "VDEV_PATH=%s && VDEV_UPATH=\"%s\" && "
336 "VDEV_ENC_SYSFS_PATH=\"%s\" && %s", data->path ? data->path : "",
337 data->upath ? data->upath : "",
338 data->vdev_enc_sysfs_path ? data->vdev_enc_sysfs_path : "",
339 data->cmd) >= sizeof (cmd)) {
340 /* Our string was truncated */
341 return;
344 fp = popen(cmd, "r");
345 if (fp == NULL)
346 return;
348 data->line = NULL;
350 /* Save the first line of output from the command */
351 if (getline(&data->line, &len, fp) != -1) {
352 /* Success. Remove newline from the end, if necessary. */
353 if ((pos = strchr(data->line, '\n')) != NULL)
354 *pos = '\0';
355 } else {
356 data->line = NULL;
358 pclose(fp);
361 /* For each vdev in the pool run a command */
362 static int
363 for_each_vdev_run_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_vcdl)
365 vdev_cmd_data_list_t *vcdl = cb_vcdl;
366 vdev_cmd_data_t *data;
367 char *path = NULL;
368 char *vname = NULL;
369 char *vdev_enc_sysfs_path = NULL;
370 int i, match = 0;
372 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) != 0)
373 return (1);
375 nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
376 &vdev_enc_sysfs_path);
378 /* Spares show more than once if they're in use, so skip if exists */
379 for (i = 0; i < vcdl->count; i++) {
380 if ((strcmp(vcdl->data[i].path, path) == 0) &&
381 (strcmp(vcdl->data[i].pool, zpool_get_name(zhp)) == 0)) {
382 /* vdev already exists, skip it */
383 return (0);
387 /* Check for whitelisted vdevs here, if any */
388 for (i = 0; i < vcdl->vdev_names_count; i++) {
389 vname = zpool_vdev_name(g_zfs, zhp, nv, vcdl->cb_name_flags);
390 if (strcmp(vcdl->vdev_names[i], vname) == 0) {
391 free(vname);
392 match = 1;
393 break; /* match */
395 free(vname);
398 /* If we whitelisted vdevs, and this isn't one of them, then bail out */
399 if (!match && vcdl->vdev_names_count)
400 return (0);
403 * Resize our array and add in the new element.
405 if (!(vcdl->data = realloc(vcdl->data,
406 sizeof (*vcdl->data) * (vcdl->count + 1))))
407 return (ENOMEM); /* couldn't realloc */
409 data = &vcdl->data[vcdl->count];
411 data->pool = strdup(zpool_get_name(zhp));
412 data->path = strdup(path);
413 data->upath = zfs_get_underlying_path(path);
414 data->cmd = vcdl->cmd;
415 if (vdev_enc_sysfs_path)
416 data->vdev_enc_sysfs_path = strdup(vdev_enc_sysfs_path);
417 else
418 data->vdev_enc_sysfs_path = NULL;
420 vcdl->count++;
422 return (0);
425 /* Get the names and count of the vdevs */
426 static int
427 all_pools_for_each_vdev_gather_cb(zpool_handle_t *zhp, void *cb_vcdl)
429 return (for_each_vdev(zhp, for_each_vdev_run_cb, cb_vcdl));
433 * Now that vcdl is populated with our complete list of vdevs, spawn
434 * off the commands.
436 static void
437 all_pools_for_each_vdev_run_vcdl(vdev_cmd_data_list_t *vcdl)
439 taskq_t *t;
440 int i;
441 /* 5 * boot_ncpus selfishly chosen since it works best on LLNL's HW */
442 int max_threads = 5 * boot_ncpus;
445 * Under Linux we use a taskq to parallelize running a command
446 * on each vdev. It is therefore necessary to initialize this
447 * functionality for the duration of the threads.
449 thread_init();
451 t = taskq_create("z_pool_cmd", max_threads, defclsyspri, max_threads,
452 INT_MAX, 0);
453 if (t == NULL)
454 return;
456 /* Spawn off the command for each vdev */
457 for (i = 0; i < vcdl->count; i++) {
458 (void) taskq_dispatch(t, vdev_run_cmd_thread,
459 (void *) &vcdl->data[i], TQ_SLEEP);
462 /* Wait for threads to finish */
463 taskq_wait(t);
464 taskq_destroy(t);
465 thread_fini();
469 * Run command 'cmd' on all vdevs in all pools in argv. Saves the first line of
470 * output from the command in vcdk->data[].line for all vdevs. If you want
471 * to run the command on only certain vdevs, fill in g_zfs, vdev_names,
472 * vdev_names_count, and cb_name_flags. Otherwise leave them as zero.
474 * Returns a vdev_cmd_data_list_t that must be freed with
475 * free_vdev_cmd_data_list();
477 vdev_cmd_data_list_t *
478 all_pools_for_each_vdev_run(int argc, char **argv, char *cmd,
479 libzfs_handle_t *g_zfs, char **vdev_names, int vdev_names_count,
480 int cb_name_flags)
482 vdev_cmd_data_list_t *vcdl;
483 vcdl = safe_malloc(sizeof (vdev_cmd_data_list_t));
484 vcdl->cmd = cmd;
486 vcdl->vdev_names = vdev_names;
487 vcdl->vdev_names_count = vdev_names_count;
488 vcdl->cb_name_flags = cb_name_flags;
489 vcdl->g_zfs = g_zfs;
491 /* Gather our list of all vdevs in all pools */
492 for_each_pool(argc, argv, B_TRUE, NULL,
493 all_pools_for_each_vdev_gather_cb, vcdl);
495 /* Run command on all vdevs in all pools */
496 all_pools_for_each_vdev_run_vcdl(vcdl);
498 return (vcdl);
502 * Free the vdev_cmd_data_list_t created by all_pools_for_each_vdev_run()
504 void
505 free_vdev_cmd_data_list(vdev_cmd_data_list_t *vcdl)
507 int i;
508 for (i = 0; i < vcdl->count; i++) {
509 free(vcdl->data[i].path);
510 free(vcdl->data[i].pool);
511 free(vcdl->data[i].upath);
512 free(vcdl->data[i].line);
513 free(vcdl->data[i].vdev_enc_sysfs_path);
515 free(vcdl->data);
516 free(vcdl);