From 185a0bdc99e760c627834404665bd7106b5c62d3 Mon Sep 17 00:00:00 2001 From: Richard Yao Date: Sun, 29 Dec 2013 13:40:46 -0500 Subject: [PATCH] Add option to zpool status to print guids The use of vdev GUIDs are a necessary workaround in edge cases where the names provided by `zpool status` are not accepted by the zpool detach/offline/remove/replace commands. The current method of obtaining them uses zdb, but this does not work in all cases (see zfsonlinux/zfs#1530). This provides a method of obtaining vdev GUIDs that is more reliable and straightforward than zdb. It would be better to fix all edge cases that require the use of GUIDs as a workaround, but Linux's /dev design makes it difficult to anticipate such edge cases, which makes this option necessary. Note that this adds a new boolean parameter to `zpool_vdev_name`, which changes the libzfs interface. Closes zfsonlinux/zfs#2011 Signed-off-by: Richard Yao --- cmd/zpool/zpool_main.c | 123 ++++++++++++++++++++++----------------- include/libzfs.h | 2 +- lib/libzfs/libzfs_pool.c | 12 ++-- man/man8/zpool.8 | 15 ++++- 4 files changed, 88 insertions(+), 64 deletions(-) diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c index a856fd49e85c..79cd8295fae3 100644 --- a/cmd/zpool/zpool_main.c +++ b/cmd/zpool/zpool_main.c @@ -256,7 +256,7 @@ get_usage(zpool_help_t idx) { case HELP_SCRUB: return (gettext("\tscrub [-s] ...\n")); case HELP_STATUS: - return (gettext("\tstatus [-vxD] [-T d|u] [pool] ... [interval " + return (gettext("\tstatus [-gvxD] [-T d|u] [pool] ... [interval " "[count]]\n")); case HELP_UPGRADE: return (gettext("\tupgrade\n" @@ -369,7 +369,7 @@ usage(boolean_t requested) void print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, - boolean_t print_logs) + boolean_t print_logs, boolean_t print_guid) { nvlist_t **child; uint_t c, children; @@ -390,9 +390,9 @@ print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent, if ((is_log && !print_logs) || (!is_log && print_logs)) continue; - vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE); + vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE, print_guid); print_vdev_tree(zhp, vname, child[c], indent + 2, - B_FALSE); + B_FALSE, print_guid); free(vname); } } @@ -585,15 +585,15 @@ zpool_do_add(int argc, char **argv) "configuration:\n"), zpool_get_name(zhp)); /* print original main pool and new tree */ - print_vdev_tree(zhp, poolname, poolnvroot, 0, B_FALSE); - print_vdev_tree(zhp, NULL, nvroot, 0, B_FALSE); + print_vdev_tree(zhp, poolname, poolnvroot, 0, B_FALSE, B_FALSE); + print_vdev_tree(zhp, NULL, nvroot, 0, B_FALSE, B_FALSE); /* Do the same for the logs */ if (num_logs(poolnvroot) > 0) { - print_vdev_tree(zhp, "logs", poolnvroot, 0, B_TRUE); - print_vdev_tree(zhp, NULL, nvroot, 0, B_TRUE); + print_vdev_tree(zhp, "logs", poolnvroot, 0, B_TRUE, B_FALSE); + print_vdev_tree(zhp, NULL, nvroot, 0, B_TRUE, B_FALSE); } else if (num_logs(nvroot) > 0) { - print_vdev_tree(zhp, "logs", nvroot, 0, B_TRUE); + print_vdev_tree(zhp, "logs", nvroot, 0, B_TRUE, B_FALSE); } ret = 0; @@ -1019,9 +1019,9 @@ zpool_do_create(int argc, char **argv) (void) printf(gettext("would create '%s' with the " "following layout:\n\n"), poolname); - print_vdev_tree(NULL, poolname, nvroot, 0, B_FALSE); + print_vdev_tree(NULL, poolname, nvroot, 0, B_FALSE, B_FALSE); if (num_logs(nvroot) > 0) - print_vdev_tree(NULL, "logs", nvroot, 0, B_TRUE); + print_vdev_tree(NULL, "logs", nvroot, 0, B_TRUE, B_FALSE); ret = 0; } else { @@ -1228,9 +1228,9 @@ zpool_do_export(int argc, char **argv) * name column. */ static int -max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max) +max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max, boolean_t print_guid) { - char *name = zpool_vdev_name(g_zfs, zhp, nv, B_TRUE); + char *name = zpool_vdev_name(g_zfs, zhp, nv, B_TRUE, print_guid); nvlist_t **child; uint_t c, children; int ret; @@ -1244,7 +1244,7 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max) &child, &children) == 0) { for (c = 0; c < children; c++) if ((ret = max_width(zhp, child[c], depth + 2, - max)) > max) + max, print_guid)) > max) max = ret; } @@ -1252,7 +1252,7 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max) &child, &children) == 0) { for (c = 0; c < children; c++) if ((ret = max_width(zhp, child[c], depth + 2, - max)) > max) + max, print_guid)) > max) max = ret; } @@ -1260,7 +1260,7 @@ max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max) &child, &children) == 0) { for (c = 0; c < children; c++) if ((ret = max_width(zhp, child[c], depth + 2, - max)) > max) + max, print_guid)) > max) max = ret; } @@ -1318,7 +1318,7 @@ find_spare(zpool_handle_t *zhp, void *data) */ void print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv, - int namewidth, int depth, boolean_t isspare) + int namewidth, int depth, boolean_t isspare, boolean_t print_guid) { nvlist_t **child; uint_t c, children; @@ -1454,9 +1454,9 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv, &ishole); if (islog || ishole) continue; - vname = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE); + vname = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE, print_guid); print_status_config(zhp, vname, child[c], - namewidth, depth + 2, isspare); + namewidth, depth + 2, isspare, print_guid); free(vname); } } @@ -1467,7 +1467,8 @@ print_status_config(zpool_handle_t *zhp, const char *name, nvlist_t *nv, * pool, printing out the name and status for each one. */ void -print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth) +print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth, + boolean_t print_guid) { nvlist_t **child; uint_t c, children; @@ -1532,8 +1533,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth) if (is_log) continue; - vname = zpool_vdev_name(g_zfs, NULL, child[c], B_TRUE); - print_import_config(vname, child[c], namewidth, depth + 2); + vname = zpool_vdev_name(g_zfs, NULL, child[c], B_TRUE, print_guid); + print_import_config(vname, child[c], namewidth, depth + 2, print_guid); free(vname); } @@ -1541,7 +1542,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth) &child, &children) == 0) { (void) printf(gettext("\tcache\n")); for (c = 0; c < children; c++) { - vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE); + vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE, + print_guid); (void) printf("\t %s\n", vname); free(vname); } @@ -1551,7 +1553,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth) &child, &children) == 0) { (void) printf(gettext("\tspares\n")); for (c = 0; c < children; c++) { - vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE); + vname = zpool_vdev_name(g_zfs, NULL, child[c], B_FALSE, + print_guid); (void) printf("\t %s\n", vname); free(vname); } @@ -1567,7 +1570,8 @@ print_import_config(const char *name, nvlist_t *nv, int namewidth, int depth) * works because only the top level vdev is marked "is_log" */ static void -print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose) +print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose, + boolean_t print_guid) { uint_t c, children; nvlist_t **child; @@ -1586,12 +1590,12 @@ print_logs(zpool_handle_t *zhp, nvlist_t *nv, int namewidth, boolean_t verbose) &is_log); if (!is_log) continue; - name = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE); + name = zpool_vdev_name(g_zfs, zhp, child[c], B_TRUE, print_guid); if (verbose) print_status_config(zhp, name, child[c], namewidth, - 2, B_FALSE); + 2, B_FALSE, print_guid); else - print_import_config(name, child[c], namewidth, 2); + print_import_config(name, child[c], namewidth, 2, print_guid); free(name); } } @@ -1806,13 +1810,13 @@ show_import(nvlist_t *config) (void) printf(gettext(" config:\n\n")); - namewidth = max_width(NULL, nvroot, 0, 0); + namewidth = max_width(NULL, nvroot, 0, 0, B_FALSE); if (namewidth < 10) namewidth = 10; - print_import_config(name, nvroot, namewidth, 0); + print_import_config(name, nvroot, namewidth, 0, B_FALSE); if (num_logs(nvroot) > 0) - print_logs(NULL, nvroot, namewidth, B_FALSE); + print_logs(NULL, nvroot, namewidth, B_FALSE, B_FALSE); if (reason == ZPOOL_STATUS_BAD_GUID_SUM) { (void) printf(gettext("\n\tAdditional devices are known to " @@ -2323,6 +2327,7 @@ zpool_do_import(int argc, char **argv) typedef struct iostat_cbdata { boolean_t cb_verbose; + boolean_t cb_print_guid; int cb_namewidth; int cb_iteration; zpool_list_t *cb_list; @@ -2367,7 +2372,7 @@ print_one_stat(uint64_t value) */ void print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, - nvlist_t *newnv, iostat_cbdata_t *cb, int depth) + nvlist_t *newnv, iostat_cbdata_t *cb, int depth, boolean_t print_guid) { nvlist_t **oldchild, **newchild; uint_t c, children; @@ -2445,9 +2450,9 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, if (ishole || islog) continue; - vname = zpool_vdev_name(g_zfs, zhp, newchild[c], B_FALSE); + vname = zpool_vdev_name(g_zfs, zhp, newchild[c], B_FALSE, print_guid); print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, - newchild[c], cb, depth + 2); + newchild[c], cb, depth + 2, print_guid); free(vname); } @@ -2466,10 +2471,10 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, if (islog) { vname = zpool_vdev_name(g_zfs, zhp, newchild[c], - B_FALSE); + B_FALSE, print_guid); print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, newchild[c], - cb, depth + 2); + cb, depth + 2, print_guid); free(vname); } } @@ -2492,9 +2497,9 @@ print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv, "-\n", cb->cb_namewidth, "cache"); for (c = 0; c < children; c++) { vname = zpool_vdev_name(g_zfs, zhp, newchild[c], - B_FALSE); + B_FALSE, print_guid); print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL, - newchild[c], cb, depth + 2); + newchild[c], cb, depth + 2, print_guid); free(vname); } } @@ -2545,7 +2550,7 @@ print_iostat(zpool_handle_t *zhp, void *data) /* * Print out the statistics for the pool. */ - print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, cb, 0); + print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot, cb, 0, B_FALSE); if (cb->cb_verbose) print_iostat_separator(cb); @@ -2585,7 +2590,7 @@ get_namewidth(zpool_handle_t *zhp, void *data) cb->cb_namewidth = strlen(zpool_get_name(zhp)); else cb->cb_namewidth = max_width(zhp, nvroot, 0, - cb->cb_namewidth); + cb->cb_namewidth, cb->cb_print_guid); } /* @@ -2705,6 +2710,7 @@ zpool_do_iostat(int argc, char **argv) unsigned long interval = 0, count = 0; zpool_list_t *list; boolean_t verbose = B_FALSE; + boolean_t print_guid = B_FALSE; iostat_cbdata_t cb; /* check options */ @@ -2751,6 +2757,7 @@ zpool_do_iostat(int argc, char **argv) */ cb.cb_list = list; cb.cb_verbose = verbose; + cb.cb_print_guid = print_guid; cb.cb_iteration = 0; cb.cb_namewidth = 0; @@ -2823,6 +2830,7 @@ zpool_do_iostat(int argc, char **argv) typedef struct list_cbdata { boolean_t cb_verbose; + boolean_t cb_print_guid; int cb_namewidth; boolean_t cb_scripted; zprop_list_t *cb_proplist; @@ -3022,7 +3030,7 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole) continue; - vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE); + vname = zpool_vdev_name(g_zfs, zhp, child[c], B_FALSE, cb->cb_print_guid); print_list_stats(zhp, vname, child[c], cb, depth + 2); free(vname); } @@ -3039,7 +3047,7 @@ print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv, "-\n", cb->cb_namewidth, "cache"); for (c = 0; c < children; c++) { vname = zpool_vdev_name(g_zfs, zhp, child[c], - B_FALSE); + B_FALSE, cb->cb_print_guid); print_list_stats(zhp, vname, child[c], cb, depth + 2); free(vname); } @@ -3468,7 +3476,7 @@ zpool_do_split(int argc, char **argv) if (flags.dryrun) { (void) printf(gettext("would create '%s' with the " "following layout:\n\n"), newpool); - print_vdev_tree(NULL, newpool, config, 0, B_FALSE); + print_vdev_tree(NULL, newpool, config, 0, B_FALSE, B_FALSE); } nvlist_free(config); } @@ -3876,6 +3884,7 @@ typedef struct status_cbdata { int cb_count; boolean_t cb_allpools; boolean_t cb_verbose; + boolean_t cb_print_guid; boolean_t cb_explain; boolean_t cb_first; boolean_t cb_dedup_stats; @@ -4030,7 +4039,7 @@ print_error_log(zpool_handle_t *zhp) static void print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares, - int namewidth) + int namewidth, boolean_t print_guid) { uint_t i; char *name; @@ -4041,16 +4050,16 @@ print_spares(zpool_handle_t *zhp, nvlist_t **spares, uint_t nspares, (void) printf(gettext("\tspares\n")); for (i = 0; i < nspares; i++) { - name = zpool_vdev_name(g_zfs, zhp, spares[i], B_FALSE); + name = zpool_vdev_name(g_zfs, zhp, spares[i], B_FALSE, print_guid); print_status_config(zhp, name, spares[i], - namewidth, 2, B_TRUE); + namewidth, 2, B_TRUE, print_guid); free(name); } } static void print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache, - int namewidth) + int namewidth, boolean_t print_guid) { uint_t i; char *name; @@ -4061,9 +4070,9 @@ print_l2cache(zpool_handle_t *zhp, nvlist_t **l2cache, uint_t nl2cache, (void) printf(gettext("\tcache\n")); for (i = 0; i < nl2cache; i++) { - name = zpool_vdev_name(g_zfs, zhp, l2cache[i], B_FALSE); + name = zpool_vdev_name(g_zfs, zhp, l2cache[i], B_FALSE, print_guid); print_status_config(zhp, name, l2cache[i], - namewidth, 2, B_FALSE); + namewidth, 2, B_FALSE, print_guid); free(name); } } @@ -4371,7 +4380,7 @@ status_callback(zpool_handle_t *zhp, void *data) ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &c); print_scan_status(ps); - namewidth = max_width(zhp, nvroot, 0, 0); + namewidth = max_width(zhp, nvroot, 0, 0, cbp->cb_print_guid); if (namewidth < 10) namewidth = 10; @@ -4379,17 +4388,18 @@ status_callback(zpool_handle_t *zhp, void *data) (void) printf(gettext("\t%-*s %-8s %5s %5s %5s\n"), namewidth, "NAME", "STATE", "READ", "WRITE", "CKSUM"); print_status_config(zhp, zpool_get_name(zhp), nvroot, - namewidth, 0, B_FALSE); + namewidth, 0, B_FALSE, cbp->cb_print_guid); if (num_logs(nvroot) > 0) - print_logs(zhp, nvroot, namewidth, B_TRUE); + print_logs(zhp, nvroot, namewidth, B_TRUE, cbp->cb_print_guid); if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0) - print_l2cache(zhp, l2cache, nl2cache, namewidth); + print_l2cache(zhp, l2cache, nl2cache, namewidth, + cbp->cb_print_guid); if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) - print_spares(zhp, spares, nspares, namewidth); + print_spares(zhp, spares, nspares, namewidth, cbp->cb_print_guid); if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT, &nerr) == 0) { @@ -4455,8 +4465,11 @@ zpool_do_status(int argc, char **argv) status_cbdata_t cb = { 0 }; /* check options */ - while ((c = getopt(argc, argv, "vxDT:")) != -1) { + while ((c = getopt(argc, argv, "gvxDT:")) != -1) { switch (c) { + case 'g': + cb.cb_print_guid = B_TRUE; + break; case 'v': cb.cb_verbose = B_TRUE; break; diff --git a/include/libzfs.h b/include/libzfs.h index 742f39f944e9..653dd5a4630b 100644 --- a/include/libzfs.h +++ b/include/libzfs.h @@ -400,7 +400,7 @@ struct zfs_cmd; extern const char *zfs_history_event_names[]; extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *, - boolean_t verbose); + boolean_t verbose, boolean_t guid); extern int zpool_upgrade(zpool_handle_t *, uint64_t); extern int zpool_get_history(zpool_handle_t *, nvlist_t **); extern int zpool_history_unpack(char *, uint64_t, uint64_t *, diff --git a/lib/libzfs/libzfs_pool.c b/lib/libzfs/libzfs_pool.c index b822ace688c9..9ed1d25ada33 100644 --- a/lib/libzfs/libzfs_pool.c +++ b/lib/libzfs/libzfs_pool.c @@ -1672,7 +1672,7 @@ print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv, return; for (c = 0; c < children; c++) { - vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE); + vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE, B_FALSE); print_vdev_tree(hdl, vname, child[c], indent + 2); free(vname); } @@ -2660,7 +2660,7 @@ zpool_vdev_attach(zpool_handle_t *zhp, verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL), ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0); - if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL) + if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE, B_FALSE)) == NULL) return (-1); /* @@ -2850,11 +2850,11 @@ find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren, for (mc = 0; mc < mchildren; mc++) { uint_t sc; char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp, - mchild[mc], B_FALSE); + mchild[mc], B_FALSE, B_FALSE); for (sc = 0; sc < schildren; sc++) { char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp, - schild[sc], B_FALSE); + schild[sc], B_FALSE, B_FALSE); boolean_t result = (strcmp(mpath, spath) == 0); free(spath); @@ -3392,7 +3392,7 @@ strip_partition(libzfs_handle_t *hdl, char *path) */ char * zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, - boolean_t verbose) + boolean_t verbose, boolean_t guid) { char *path, *devid, *type; uint64_t value; @@ -3402,7 +3402,7 @@ zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv, uint_t vsc; if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, - &value) == 0) { + &value) == 0 || guid) { verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value) == 0); (void) snprintf(buf, sizeof (buf), "%llu", diff --git a/man/man8/zpool.8 b/man/man8/zpool.8 index 63037760a045..90ffb9eb6285 100644 --- a/man/man8/zpool.8 +++ b/man/man8/zpool.8 @@ -150,7 +150,7 @@ zpool \- configures ZFS storage pools .LP .nf -\fBzpool status\fR [\fB-xvD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] +\fBzpool status\fR [\fB-gxvD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] .fi .LP @@ -1724,13 +1724,24 @@ Sets the specified property for \fInewpool\fR. See the “Properties” section .ne 2 .mk .na -\fBzpool status\fR [\fB-xvD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] +\fBzpool status\fR [\fB-gxvD\fR] [\fB-T\fR d | u] [\fIpool\fR] ... [\fIinterval\fR [\fIcount\fR]] .ad .sp .6 .RS 4n Displays the detailed health status for the given pools. If no \fIpool\fR is specified, then the status of each pool in the system is displayed. For more information on pool and device health, see the "Device Failure and Recovery" section. .sp If a scrub or resilver is in progress, this command reports the percentage done and the estimated time to completion. Both of these are only approximate, because the amount of data in the pool and the other workloads on the system can change. +.sp +.ne 2 +.mk +.na +\fB\fB-g\fR\fR +.ad +.RS 12n +.rt +Display vdev GUIDs instead of the normal short/long device names. These GUIDs can be used in-place of device names for the zpool detach/offline/remove/replace commands. +.RE + .sp .ne 2 .mk