1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <libgen.h>
38 #include <sys/efi_partition.h>
39 #include <sys/vtoc.h>
40 #include <sys/zfs_ioctl.h>
41 #include <dlfcn.h>
42
43 #include "zfs_namecheck.h"
44 #include "zfs_prop.h"
45 #include "libzfs_impl.h"
46 #include "zfs_comutil.h"
47 #include "zfeature_common.h"
48
49 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
50
51 #define DISK_ROOT "/dev/dsk"
52 #define RDISK_ROOT "/dev/rdsk"
53 #define BACKUP_SLICE "s2"
54
55 typedef struct prop_flags {
56 int create:1; /* Validate property on creation */
57 int import:1; /* Validate property on import */
58 } prop_flags_t;
59
60 /*
61 * ====================================================================
62 * zpool property functions
63 * ====================================================================
64 */
65
66 static int
67 zpool_get_all_props(zpool_handle_t *zhp)
68 {
69 zfs_cmd_t zc = { 0 };
70 libzfs_handle_t *hdl = zhp->zpool_hdl;
71
72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
73
74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
75 return (-1);
76
77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
78 if (errno == ENOMEM) {
79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
80 zcmd_free_nvlists(&zc);
81 return (-1);
82 }
83 } else {
84 zcmd_free_nvlists(&zc);
85 return (-1);
86 }
87 }
88
89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
90 zcmd_free_nvlists(&zc);
91 return (-1);
92 }
93
94 zcmd_free_nvlists(&zc);
95
96 return (0);
97 }
98
99 static int
100 zpool_props_refresh(zpool_handle_t *zhp)
101 {
102 nvlist_t *old_props;
103
104 old_props = zhp->zpool_props;
105
106 if (zpool_get_all_props(zhp) != 0)
107 return (-1);
108
109 nvlist_free(old_props);
110 return (0);
111 }
112
113 static char *
114 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
115 zprop_source_t *src)
116 {
117 nvlist_t *nv, *nvl;
118 uint64_t ival;
119 char *value;
120 zprop_source_t source;
121
122 nvl = zhp->zpool_props;
123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
125 source = ival;
126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
127 } else {
128 source = ZPROP_SRC_DEFAULT;
129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
130 value = "-";
131 }
132
133 if (src)
134 *src = source;
135
136 return (value);
137 }
138
139 uint64_t
140 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
141 {
142 nvlist_t *nv, *nvl;
143 uint64_t value;
144 zprop_source_t source;
145
146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
147 /*
148 * zpool_get_all_props() has most likely failed because
149 * the pool is faulted, but if all we need is the top level
150 * vdev's guid then get it from the zhp config nvlist.
151 */
152 if ((prop == ZPOOL_PROP_GUID) &&
153 (nvlist_lookup_nvlist(zhp->zpool_config,
154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
156 == 0)) {
157 return (value);
158 }
159 return (zpool_prop_default_numeric(prop));
160 }
161
162 nvl = zhp->zpool_props;
163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
165 source = value;
166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
167 } else {
168 source = ZPROP_SRC_DEFAULT;
169 value = zpool_prop_default_numeric(prop);
170 }
171
172 if (src)
173 *src = source;
174
175 return (value);
176 }
177
178 /*
179 * Map VDEV STATE to printed strings.
180 */
181 char *
182 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
183 {
184 switch (state) {
185 case VDEV_STATE_CLOSED:
186 case VDEV_STATE_OFFLINE:
187 return (gettext("OFFLINE"));
188 case VDEV_STATE_REMOVED:
189 return (gettext("REMOVED"));
190 case VDEV_STATE_CANT_OPEN:
191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
192 return (gettext("FAULTED"));
193 else if (aux == VDEV_AUX_SPLIT_POOL)
194 return (gettext("SPLIT"));
195 else
196 return (gettext("UNAVAIL"));
197 case VDEV_STATE_FAULTED:
198 return (gettext("FAULTED"));
199 case VDEV_STATE_DEGRADED:
200 return (gettext("DEGRADED"));
201 case VDEV_STATE_HEALTHY:
202 return (gettext("ONLINE"));
203 }
204
205 return (gettext("UNKNOWN"));
206 }
207
208 /*
209 * Get a zpool property value for 'prop' and return the value in
210 * a pre-allocated buffer.
211 */
212 int
213 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
214 zprop_source_t *srctype)
215 {
216 uint64_t intval;
217 const char *strval;
218 zprop_source_t src = ZPROP_SRC_NONE;
219 nvlist_t *nvroot;
220 vdev_stat_t *vs;
221 uint_t vsc;
222
223 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
224 switch (prop) {
225 case ZPOOL_PROP_NAME:
226 (void) strlcpy(buf, zpool_get_name(zhp), len);
227 break;
228
229 case ZPOOL_PROP_HEALTH:
230 (void) strlcpy(buf, "FAULTED", len);
231 break;
232
233 case ZPOOL_PROP_GUID:
234 intval = zpool_get_prop_int(zhp, prop, &src);
235 (void) snprintf(buf, len, "%llu", intval);
236 break;
237
238 case ZPOOL_PROP_ALTROOT:
239 case ZPOOL_PROP_CACHEFILE:
240 case ZPOOL_PROP_COMMENT:
241 if (zhp->zpool_props != NULL ||
242 zpool_get_all_props(zhp) == 0) {
243 (void) strlcpy(buf,
244 zpool_get_prop_string(zhp, prop, &src),
245 len);
246 if (srctype != NULL)
247 *srctype = src;
248 return (0);
249 }
250 /* FALLTHROUGH */
251 default:
252 (void) strlcpy(buf, "-", len);
253 break;
254 }
255
256 if (srctype != NULL)
257 *srctype = src;
258 return (0);
259 }
260
261 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
262 prop != ZPOOL_PROP_NAME)
263 return (-1);
264
265 switch (zpool_prop_get_type(prop)) {
266 case PROP_TYPE_STRING:
267 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
268 len);
269 break;
270
271 case PROP_TYPE_NUMBER:
272 intval = zpool_get_prop_int(zhp, prop, &src);
273
274 switch (prop) {
275 case ZPOOL_PROP_SIZE:
276 case ZPOOL_PROP_ALLOCATED:
277 case ZPOOL_PROP_FREE:
278 case ZPOOL_PROP_FREEING:
279 case ZPOOL_PROP_EXPANDSZ:
280 (void) zfs_nicenum(intval, buf, len);
281 break;
282
283 case ZPOOL_PROP_CAPACITY:
284 (void) snprintf(buf, len, "%llu%%",
285 (u_longlong_t)intval);
286 break;
287
288 case ZPOOL_PROP_DEDUPRATIO:
289 (void) snprintf(buf, len, "%llu.%02llux",
290 (u_longlong_t)(intval / 100),
291 (u_longlong_t)(intval % 100));
292 break;
293
294 case ZPOOL_PROP_HEALTH:
295 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
296 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
297 verify(nvlist_lookup_uint64_array(nvroot,
298 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
299 == 0);
300
301 (void) strlcpy(buf, zpool_state_to_name(intval,
302 vs->vs_aux), len);
303 break;
304 case ZPOOL_PROP_VERSION:
305 if (intval >= SPA_VERSION_FEATURES) {
306 (void) snprintf(buf, len, "-");
307 break;
308 }
309 /* FALLTHROUGH */
310 default:
311 (void) snprintf(buf, len, "%llu", intval);
312 }
313 break;
314
315 case PROP_TYPE_INDEX:
316 intval = zpool_get_prop_int(zhp, prop, &src);
317 if (zpool_prop_index_to_string(prop, intval, &strval)
318 != 0)
319 return (-1);
320 (void) strlcpy(buf, strval, len);
321 break;
322
323 default:
324 abort();
325 }
326
327 if (srctype)
328 *srctype = src;
329
330 return (0);
331 }
332
333 /*
334 * Check if the bootfs name has the same pool name as it is set to.
335 * Assuming bootfs is a valid dataset name.
336 */
337 static boolean_t
338 bootfs_name_valid(const char *pool, char *bootfs)
339 {
340 int len = strlen(pool);
341
342 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
343 return (B_FALSE);
344
345 if (strncmp(pool, bootfs, len) == 0 &&
346 (bootfs[len] == '/' || bootfs[len] == '\0'))
347 return (B_TRUE);
348
349 return (B_FALSE);
350 }
351
352 /*
353 * Inspect the configuration to determine if any of the devices contain
354 * an EFI label.
355 */
356 static boolean_t
357 pool_uses_efi(nvlist_t *config)
358 {
359 nvlist_t **child;
360 uint_t c, children;
361
362 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
363 &child, &children) != 0)
364 return (read_efi_label(config, NULL) >= 0);
365
366 for (c = 0; c < children; c++) {
367 if (pool_uses_efi(child[c]))
368 return (B_TRUE);
369 }
370 return (B_FALSE);
371 }
372
373 boolean_t
374 zpool_is_bootable(zpool_handle_t *zhp)
375 {
376 char bootfs[ZPOOL_MAXNAMELEN];
377
378 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
379 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
380 sizeof (bootfs)) != 0);
381 }
382
383
384 /*
385 * Given an nvlist of zpool properties to be set, validate that they are
386 * correct, and parse any numeric properties (index, boolean, etc) if they are
387 * specified as strings.
388 */
389 static nvlist_t *
390 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
391 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
392 {
393 nvpair_t *elem;
394 nvlist_t *retprops;
395 zpool_prop_t prop;
396 char *strval;
397 uint64_t intval;
398 char *slash, *check;
399 struct stat64 statbuf;
400 zpool_handle_t *zhp;
401 nvlist_t *nvroot;
402
403 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
404 (void) no_memory(hdl);
405 return (NULL);
406 }
407
408 elem = NULL;
409 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
410 const char *propname = nvpair_name(elem);
411
412 prop = zpool_name_to_prop(propname);
413 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
414 int err;
415 zfeature_info_t *feature;
416 char *fname = strchr(propname, '@') + 1;
417
418 err = zfeature_lookup_name(fname, &feature);
419 if (err != 0) {
420 ASSERT3U(err, ==, ENOENT);
421 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
422 "invalid feature '%s'"), fname);
423 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
424 goto error;
425 }
426
427 if (nvpair_type(elem) != DATA_TYPE_STRING) {
428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
429 "'%s' must be a string"), propname);
430 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
431 goto error;
432 }
433
434 (void) nvpair_value_string(elem, &strval);
435 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
437 "property '%s' can only be set to "
438 "'enabled'"), propname);
439 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
440 goto error;
441 }
442
443 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
444 (void) no_memory(hdl);
445 goto error;
446 }
447 continue;
448 }
449
450 /*
451 * Make sure this property is valid and applies to this type.
452 */
453 if (prop == ZPROP_INVAL) {
454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
455 "invalid property '%s'"), propname);
456 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
457 goto error;
458 }
459
460 if (zpool_prop_readonly(prop)) {
461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
462 "is readonly"), propname);
463 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
464 goto error;
465 }
466
467 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
468 &strval, &intval, errbuf) != 0)
469 goto error;
470
471 /*
472 * Perform additional checking for specific properties.
473 */
474 switch (prop) {
475 case ZPOOL_PROP_VERSION:
476 if (intval < version ||
477 !SPA_VERSION_IS_SUPPORTED(intval)) {
478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
479 "property '%s' number %d is invalid."),
480 propname, intval);
481 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
482 goto error;
483 }
484 break;
485
486 case ZPOOL_PROP_BOOTFS:
487 if (flags.create || flags.import) {
488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
489 "property '%s' cannot be set at creation "
490 "or import time"), propname);
491 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
492 goto error;
493 }
494
495 if (version < SPA_VERSION_BOOTFS) {
496 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
497 "pool must be upgraded to support "
498 "'%s' property"), propname);
499 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
500 goto error;
501 }
502
503 /*
504 * bootfs property value has to be a dataset name and
505 * the dataset has to be in the same pool as it sets to.
506 */
507 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
508 strval)) {
509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
510 "is an invalid name"), strval);
511 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
512 goto error;
513 }
514
515 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
517 "could not open pool '%s'"), poolname);
518 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
519 goto error;
520 }
521 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
522 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
523
524 /*
525 * bootfs property cannot be set on a disk which has
526 * been EFI labeled.
527 */
528 if (pool_uses_efi(nvroot)) {
529 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
530 "property '%s' not supported on "
531 "EFI labeled devices"), propname);
532 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
533 zpool_close(zhp);
534 goto error;
535 }
536 zpool_close(zhp);
537 break;
538
539 case ZPOOL_PROP_ALTROOT:
540 if (!flags.create && !flags.import) {
541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
542 "property '%s' can only be set during pool "
543 "creation or import"), propname);
544 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
545 goto error;
546 }
547
548 if (strval[0] != '/') {
549 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
550 "bad alternate root '%s'"), strval);
551 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
552 goto error;
553 }
554 break;
555
556 case ZPOOL_PROP_CACHEFILE:
557 if (strval[0] == '\0')
558 break;
559
560 if (strcmp(strval, "none") == 0)
561 break;
562
563 if (strval[0] != '/') {
564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
565 "property '%s' must be empty, an "
566 "absolute path, or 'none'"), propname);
567 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
568 goto error;
569 }
570
571 slash = strrchr(strval, '/');
572
573 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
574 strcmp(slash, "/..") == 0) {
575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
576 "'%s' is not a valid file"), strval);
577 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
578 goto error;
579 }
580
581 *slash = '\0';
582
583 if (strval[0] != '\0' &&
584 (stat64(strval, &statbuf) != 0 ||
585 !S_ISDIR(statbuf.st_mode))) {
586 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
587 "'%s' is not a valid directory"),
588 strval);
589 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
590 goto error;
591 }
592
593 *slash = '/';
594 break;
595
596 case ZPOOL_PROP_COMMENT:
597 for (check = strval; *check != '\0'; check++) {
598 if (!isprint(*check)) {
599 zfs_error_aux(hdl,
600 dgettext(TEXT_DOMAIN,
601 "comment may only have printable "
602 "characters"));
603 (void) zfs_error(hdl, EZFS_BADPROP,
604 errbuf);
605 goto error;
606 }
607 }
608 if (strlen(strval) > ZPROP_MAX_COMMENT) {
609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
610 "comment must not exceed %d characters"),
611 ZPROP_MAX_COMMENT);
612 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
613 goto error;
614 }
615 break;
616 case ZPOOL_PROP_READONLY:
617 if (!flags.import) {
618 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
619 "property '%s' can only be set at "
620 "import time"), propname);
621 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
622 goto error;
623 }
624 break;
625 }
626 }
627
628 return (retprops);
629 error:
630 nvlist_free(retprops);
631 return (NULL);
632 }
633
634 /*
635 * Set zpool property : propname=propval.
636 */
637 int
638 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
639 {
640 zfs_cmd_t zc = { 0 };
641 int ret = -1;
642 char errbuf[1024];
643 nvlist_t *nvl = NULL;
644 nvlist_t *realprops;
645 uint64_t version;
646 prop_flags_t flags = { 0 };
647
648 (void) snprintf(errbuf, sizeof (errbuf),
649 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
650 zhp->zpool_name);
651
652 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
653 return (no_memory(zhp->zpool_hdl));
654
655 if (nvlist_add_string(nvl, propname, propval) != 0) {
656 nvlist_free(nvl);
657 return (no_memory(zhp->zpool_hdl));
658 }
659
660 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
661 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
662 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
663 nvlist_free(nvl);
664 return (-1);
665 }
666
667 nvlist_free(nvl);
668 nvl = realprops;
669
670 /*
671 * Execute the corresponding ioctl() to set this property.
672 */
673 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
674
675 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
676 nvlist_free(nvl);
677 return (-1);
678 }
679
680 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
681
682 zcmd_free_nvlists(&zc);
683 nvlist_free(nvl);
684
685 if (ret)
686 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
687 else
688 (void) zpool_props_refresh(zhp);
689
690 return (ret);
691 }
692
693 int
694 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
695 {
696 libzfs_handle_t *hdl = zhp->zpool_hdl;
697 zprop_list_t *entry;
698 char buf[ZFS_MAXPROPLEN];
699 nvlist_t *features = NULL;
700 zprop_list_t **last;
701 boolean_t firstexpand = (NULL == *plp);
702
703 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
704 return (-1);
705
706 last = plp;
707 while (*last != NULL)
708 last = &(*last)->pl_next;
709
710 if ((*plp)->pl_all)
711 features = zpool_get_features(zhp);
712
713 if ((*plp)->pl_all && firstexpand) {
714 for (int i = 0; i < SPA_FEATURES; i++) {
715 zprop_list_t *entry = zfs_alloc(hdl,
716 sizeof (zprop_list_t));
717 entry->pl_prop = ZPROP_INVAL;
718 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
719 spa_feature_table[i].fi_uname);
720 entry->pl_width = strlen(entry->pl_user_prop);
721 entry->pl_all = B_TRUE;
722
723 *last = entry;
724 last = &entry->pl_next;
725 }
726 }
727
728 /* add any unsupported features */
729 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
730 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
731 char *propname;
732 boolean_t found;
733 zprop_list_t *entry;
734
735 if (zfeature_is_supported(nvpair_name(nvp)))
736 continue;
737
738 propname = zfs_asprintf(hdl, "unsupported@%s",
739 nvpair_name(nvp));
740
741 /*
742 * Before adding the property to the list make sure that no
743 * other pool already added the same property.
744 */
745 found = B_FALSE;
746 entry = *plp;
747 while (entry != NULL) {
748 if (entry->pl_user_prop != NULL &&
749 strcmp(propname, entry->pl_user_prop) == 0) {
750 found = B_TRUE;
751 break;
752 }
753 entry = entry->pl_next;
754 }
755 if (found) {
756 free(propname);
757 continue;
758 }
759
760 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
761 entry->pl_prop = ZPROP_INVAL;
762 entry->pl_user_prop = propname;
763 entry->pl_width = strlen(entry->pl_user_prop);
764 entry->pl_all = B_TRUE;
765
766 *last = entry;
767 last = &entry->pl_next;
768 }
769
770 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
771
772 if (entry->pl_fixed)
773 continue;
774
775 if (entry->pl_prop != ZPROP_INVAL &&
776 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
777 NULL) == 0) {
778 if (strlen(buf) > entry->pl_width)
779 entry->pl_width = strlen(buf);
780 }
781 }
782
783 return (0);
784 }
785
786 /*
787 * Get the state for the given feature on the given ZFS pool.
788 */
789 int
790 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
791 size_t len)
792 {
793 uint64_t refcount;
794 boolean_t found = B_FALSE;
795 nvlist_t *features = zpool_get_features(zhp);
796 boolean_t supported;
797 const char *feature = strchr(propname, '@') + 1;
798
799 supported = zpool_prop_feature(propname);
800 ASSERT(supported || zfs_prop_unsupported(propname));
801
802 /*
803 * Convert from feature name to feature guid. This conversion is
804 * unecessary for unsupported@... properties because they already
805 * use guids.
806 */
807 if (supported) {
808 int ret;
809 zfeature_info_t *fi;
810
811 ret = zfeature_lookup_name(feature, &fi);
812 if (ret != 0) {
813 (void) strlcpy(buf, "-", len);
814 return (ENOTSUP);
815 }
816 feature = fi->fi_guid;
817 }
818
819 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
820 found = B_TRUE;
821
822 if (supported) {
823 if (!found) {
824 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
825 } else {
826 if (refcount == 0)
827 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
828 else
829 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
830 }
831 } else {
832 if (found) {
833 if (refcount == 0) {
834 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
835 } else {
836 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
837 }
838 } else {
839 (void) strlcpy(buf, "-", len);
840 return (ENOTSUP);
841 }
842 }
843
844 return (0);
845 }
846
847 /*
848 * Don't start the slice at the default block of 34; many storage
849 * devices will use a stripe width of 128k, so start there instead.
850 */
851 #define NEW_START_BLOCK 256
852
853 /*
854 * Validate the given pool name, optionally putting an extended error message in
855 * 'buf'.
856 */
857 boolean_t
858 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
859 {
860 namecheck_err_t why;
861 char what;
862 int ret;
863
864 ret = pool_namecheck(pool, &why, &what);
865
866 /*
867 * The rules for reserved pool names were extended at a later point.
868 * But we need to support users with existing pools that may now be
869 * invalid. So we only check for this expanded set of names during a
870 * create (or import), and only in userland.
871 */
872 if (ret == 0 && !isopen &&
873 (strncmp(pool, "mirror", 6) == 0 ||
874 strncmp(pool, "raidz", 5) == 0 ||
875 strncmp(pool, "spare", 5) == 0 ||
876 strcmp(pool, "log") == 0)) {
877 if (hdl != NULL)
878 zfs_error_aux(hdl,
879 dgettext(TEXT_DOMAIN, "name is reserved"));
880 return (B_FALSE);
881 }
882
883
884 if (ret != 0) {
885 if (hdl != NULL) {
886 switch (why) {
887 case NAME_ERR_TOOLONG:
888 zfs_error_aux(hdl,
889 dgettext(TEXT_DOMAIN, "name is too long"));
890 break;
891
892 case NAME_ERR_INVALCHAR:
893 zfs_error_aux(hdl,
894 dgettext(TEXT_DOMAIN, "invalid character "
895 "'%c' in pool name"), what);
896 break;
897
898 case NAME_ERR_NOLETTER:
899 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
900 "name must begin with a letter"));
901 break;
902
903 case NAME_ERR_RESERVED:
904 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
905 "name is reserved"));
906 break;
907
908 case NAME_ERR_DISKLIKE:
909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
910 "pool name is reserved"));
911 break;
912
913 case NAME_ERR_LEADING_SLASH:
914 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
915 "leading slash in name"));
916 break;
917
918 case NAME_ERR_EMPTY_COMPONENT:
919 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
920 "empty component in name"));
921 break;
922
923 case NAME_ERR_TRAILING_SLASH:
924 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
925 "trailing slash in name"));
926 break;
927
928 case NAME_ERR_MULTIPLE_AT:
929 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
930 "multiple '@' delimiters in name"));
931 break;
932
933 }
934 }
935 return (B_FALSE);
936 }
937
938 return (B_TRUE);
939 }
940
941 /*
942 * Open a handle to the given pool, even if the pool is currently in the FAULTED
943 * state.
944 */
945 zpool_handle_t *
946 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
947 {
948 zpool_handle_t *zhp;
949 boolean_t missing;
950
951 /*
952 * Make sure the pool name is valid.
953 */
954 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
955 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
956 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
957 pool);
958 return (NULL);
959 }
960
961 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
962 return (NULL);
963
964 zhp->zpool_hdl = hdl;
965 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
966
967 if (zpool_refresh_stats(zhp, &missing) != 0) {
968 zpool_close(zhp);
969 return (NULL);
970 }
971
972 if (missing) {
973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
974 (void) zfs_error_fmt(hdl, EZFS_NOENT,
975 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
976 zpool_close(zhp);
977 return (NULL);
978 }
979
980 return (zhp);
981 }
982
983 /*
984 * Like the above, but silent on error. Used when iterating over pools (because
985 * the configuration cache may be out of date).
986 */
987 int
988 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
989 {
990 zpool_handle_t *zhp;
991 boolean_t missing;
992
993 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
994 return (-1);
995
996 zhp->zpool_hdl = hdl;
997 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
998
999 if (zpool_refresh_stats(zhp, &missing) != 0) {
1000 zpool_close(zhp);
1001 return (-1);
1002 }
1003
1004 if (missing) {
1005 zpool_close(zhp);
1006 *ret = NULL;
1007 return (0);
1008 }
1009
1010 *ret = zhp;
1011 return (0);
1012 }
1013
1014 /*
1015 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1016 * state.
1017 */
1018 zpool_handle_t *
1019 zpool_open(libzfs_handle_t *hdl, const char *pool)
1020 {
1021 zpool_handle_t *zhp;
1022
1023 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1024 return (NULL);
1025
1026 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1027 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1028 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1029 zpool_close(zhp);
1030 return (NULL);
1031 }
1032
1033 return (zhp);
1034 }
1035
1036 /*
1037 * Close the handle. Simply frees the memory associated with the handle.
1038 */
1039 void
1040 zpool_close(zpool_handle_t *zhp)
1041 {
1042 if (zhp->zpool_config)
1043 nvlist_free(zhp->zpool_config);
1044 if (zhp->zpool_old_config)
1045 nvlist_free(zhp->zpool_old_config);
1046 if (zhp->zpool_props)
1047 nvlist_free(zhp->zpool_props);
1048 free(zhp);
1049 }
1050
1051 /*
1052 * Return the name of the pool.
1053 */
1054 const char *
1055 zpool_get_name(zpool_handle_t *zhp)
1056 {
1057 return (zhp->zpool_name);
1058 }
1059
1060
1061 /*
1062 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1063 */
1064 int
1065 zpool_get_state(zpool_handle_t *zhp)
1066 {
1067 return (zhp->zpool_state);
1068 }
1069
1070 /*
1071 * Create the named pool, using the provided vdev list. It is assumed
1072 * that the consumer has already validated the contents of the nvlist, so we
1073 * don't have to worry about error semantics.
1074 */
1075 int
1076 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1077 nvlist_t *props, nvlist_t *fsprops)
1078 {
1079 zfs_cmd_t zc = { 0 };
1080 nvlist_t *zc_fsprops = NULL;
1081 nvlist_t *zc_props = NULL;
1082 char msg[1024];
1083 char *altroot;
1084 int ret = -1;
1085
1086 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1087 "cannot create '%s'"), pool);
1088
1089 if (!zpool_name_valid(hdl, B_FALSE, pool))
1090 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1091
1092 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1093 return (-1);
1094
1095 if (props) {
1096 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1097
1098 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1099 SPA_VERSION_1, flags, msg)) == NULL) {
1100 goto create_failed;
1101 }
1102 }
1103
1104 if (fsprops) {
1105 uint64_t zoned;
1106 char *zonestr;
1107
1108 zoned = ((nvlist_lookup_string(fsprops,
1109 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1110 strcmp(zonestr, "on") == 0);
1111
1112 if ((zc_fsprops = zfs_valid_proplist(hdl,
1113 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1114 goto create_failed;
1115 }
1116 if (!zc_props &&
1117 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1118 goto create_failed;
1119 }
1120 if (nvlist_add_nvlist(zc_props,
1121 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1122 goto create_failed;
1123 }
1124 }
1125
1126 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1127 goto create_failed;
1128
1129 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1130
1131 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1132
1133 zcmd_free_nvlists(&zc);
1134 nvlist_free(zc_props);
1135 nvlist_free(zc_fsprops);
1136
1137 switch (errno) {
1138 case EBUSY:
1139 /*
1140 * This can happen if the user has specified the same
1141 * device multiple times. We can't reliably detect this
1142 * until we try to add it and see we already have a
1143 * label.
1144 */
1145 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1146 "one or more vdevs refer to the same device"));
1147 return (zfs_error(hdl, EZFS_BADDEV, msg));
1148
1149 case EOVERFLOW:
1150 /*
1151 * This occurs when one of the devices is below
1152 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1153 * device was the problem device since there's no
1154 * reliable way to determine device size from userland.
1155 */
1156 {
1157 char buf[64];
1158
1159 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1160
1161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1162 "one or more devices is less than the "
1163 "minimum size (%s)"), buf);
1164 }
1165 return (zfs_error(hdl, EZFS_BADDEV, msg));
1166
1167 case ENOSPC:
1168 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1169 "one or more devices is out of space"));
1170 return (zfs_error(hdl, EZFS_BADDEV, msg));
1171
1172 case ENOTBLK:
1173 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1174 "cache device must be a disk or disk slice"));
1175 return (zfs_error(hdl, EZFS_BADDEV, msg));
1176
1177 default:
1178 return (zpool_standard_error(hdl, errno, msg));
1179 }
1180 }
1181
1182 /*
1183 * If this is an alternate root pool, then we automatically set the
1184 * mountpoint of the root dataset to be '/'.
1185 */
1186 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1187 &altroot) == 0) {
1188 zfs_handle_t *zhp;
1189
1190 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1191 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1192 "/") == 0);
1193
1194 zfs_close(zhp);
1195 }
1196
1197 create_failed:
1198 zcmd_free_nvlists(&zc);
1199 nvlist_free(zc_props);
1200 nvlist_free(zc_fsprops);
1201 return (ret);
1202 }
1203
1204 /*
1205 * Destroy the given pool. It is up to the caller to ensure that there are no
1206 * datasets left in the pool.
1207 */
1208 int
1209 zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1210 {
1211 zfs_cmd_t zc = { 0 };
1212 zfs_handle_t *zfp = NULL;
1213 libzfs_handle_t *hdl = zhp->zpool_hdl;
1214 char msg[1024];
1215
1216 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1217 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1218 return (-1);
1219
1220 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1221 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1222
1223 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1224 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1225 "cannot destroy '%s'"), zhp->zpool_name);
1226
1227 if (errno == EROFS) {
1228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1229 "one or more devices is read only"));
1230 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1231 } else {
1232 (void) zpool_standard_error(hdl, errno, msg);
1233 }
1234
1235 if (zfp)
1236 zfs_close(zfp);
1237 return (-1);
1238 }
1239
1240 if (zfp) {
1241 remove_mountpoint(zfp);
1242 zfs_close(zfp);
1243 }
1244
1245 return (0);
1246 }
1247
1248 /*
1249 * Add the given vdevs to the pool. The caller must have already performed the
1250 * necessary verification to ensure that the vdev specification is well-formed.
1251 */
1252 int
1253 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1254 {
1255 zfs_cmd_t zc = { 0 };
1256 int ret;
1257 libzfs_handle_t *hdl = zhp->zpool_hdl;
1258 char msg[1024];
1259 nvlist_t **spares, **l2cache;
1260 uint_t nspares, nl2cache;
1261
1262 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1263 "cannot add to '%s'"), zhp->zpool_name);
1264
1265 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1266 SPA_VERSION_SPARES &&
1267 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1268 &spares, &nspares) == 0) {
1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1270 "upgraded to add hot spares"));
1271 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1272 }
1273
1274 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1275 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1276 uint64_t s;
1277
1278 for (s = 0; s < nspares; s++) {
1279 char *path;
1280
1281 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1282 &path) == 0 && pool_uses_efi(spares[s])) {
1283 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1284 "device '%s' contains an EFI label and "
1285 "cannot be used on root pools."),
1286 zpool_vdev_name(hdl, NULL, spares[s],
1287 B_FALSE));
1288 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1289 }
1290 }
1291 }
1292
1293 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1294 SPA_VERSION_L2CACHE &&
1295 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1296 &l2cache, &nl2cache) == 0) {
1297 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1298 "upgraded to add cache devices"));
1299 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1300 }
1301
1302 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1303 return (-1);
1304 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1305
1306 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1307 switch (errno) {
1308 case EBUSY:
1309 /*
1310 * This can happen if the user has specified the same
1311 * device multiple times. We can't reliably detect this
1312 * until we try to add it and see we already have a
1313 * label.
1314 */
1315 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1316 "one or more vdevs refer to the same device"));
1317 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1318 break;
1319
1320 case EOVERFLOW:
1321 /*
1322 * This occurrs when one of the devices is below
1323 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1324 * device was the problem device since there's no
1325 * reliable way to determine device size from userland.
1326 */
1327 {
1328 char buf[64];
1329
1330 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1331
1332 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1333 "device is less than the minimum "
1334 "size (%s)"), buf);
1335 }
1336 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1337 break;
1338
1339 case ENOTSUP:
1340 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1341 "pool must be upgraded to add these vdevs"));
1342 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1343 break;
1344
1345 case EDOM:
1346 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1347 "root pool can not have multiple vdevs"
1348 " or separate logs"));
1349 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1350 break;
1351
1352 case ENOTBLK:
1353 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1354 "cache device must be a disk or disk slice"));
1355 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1356 break;
1357
1358 default:
1359 (void) zpool_standard_error(hdl, errno, msg);
1360 }
1361
1362 ret = -1;
1363 } else {
1364 ret = 0;
1365 }
1366
1367 zcmd_free_nvlists(&zc);
1368
1369 return (ret);
1370 }
1371
1372 /*
1373 * Exports the pool from the system. The caller must ensure that there are no
1374 * mounted datasets in the pool.
1375 */
1376 static int
1377 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1378 const char *log_str)
1379 {
1380 zfs_cmd_t zc = { 0 };
1381 char msg[1024];
1382
1383 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1384 "cannot export '%s'"), zhp->zpool_name);
1385
1386 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1387 zc.zc_cookie = force;
1388 zc.zc_guid = hardforce;
1389 zc.zc_history = (uint64_t)(uintptr_t)log_str;
1390
1391 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1392 switch (errno) {
1393 case EXDEV:
1394 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1395 "use '-f' to override the following errors:\n"
1396 "'%s' has an active shared spare which could be"
1397 " used by other pools once '%s' is exported."),
1398 zhp->zpool_name, zhp->zpool_name);
1399 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1400 msg));
1401 default:
1402 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1403 msg));
1404 }
1405 }
1406
1407 return (0);
1408 }
1409
1410 int
1411 zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1412 {
1413 return (zpool_export_common(zhp, force, B_FALSE, log_str));
1414 }
1415
1416 int
1417 zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1418 {
1419 return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1420 }
1421
1422 static void
1423 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1424 nvlist_t *config)
1425 {
1426 nvlist_t *nv = NULL;
1427 uint64_t rewindto;
1428 int64_t loss = -1;
1429 struct tm t;
1430 char timestr[128];
1431
1432 if (!hdl->libzfs_printerr || config == NULL)
1433 return;
1434
1435 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1436 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1437 return;
1438 }
1439
1440 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1441 return;
1442 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1443
1444 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1445 strftime(timestr, 128, 0, &t) != 0) {
1446 if (dryrun) {
1447 (void) printf(dgettext(TEXT_DOMAIN,
1448 "Would be able to return %s "
1449 "to its state as of %s.\n"),
1450 name, timestr);
1451 } else {
1452 (void) printf(dgettext(TEXT_DOMAIN,
1453 "Pool %s returned to its state as of %s.\n"),
1454 name, timestr);
1455 }
1456 if (loss > 120) {
1457 (void) printf(dgettext(TEXT_DOMAIN,
1458 "%s approximately %lld "),
1459 dryrun ? "Would discard" : "Discarded",
1460 (loss + 30) / 60);
1461 (void) printf(dgettext(TEXT_DOMAIN,
1462 "minutes of transactions.\n"));
1463 } else if (loss > 0) {
1464 (void) printf(dgettext(TEXT_DOMAIN,
1465 "%s approximately %lld "),
1466 dryrun ? "Would discard" : "Discarded", loss);
1467 (void) printf(dgettext(TEXT_DOMAIN,
1468 "seconds of transactions.\n"));
1469 }
1470 }
1471 }
1472
1473 void
1474 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1475 nvlist_t *config)
1476 {
1477 nvlist_t *nv = NULL;
1478 int64_t loss = -1;
1479 uint64_t edata = UINT64_MAX;
1480 uint64_t rewindto;
1481 struct tm t;
1482 char timestr[128];
1483
1484 if (!hdl->libzfs_printerr)
1485 return;
1486
1487 if (reason >= 0)
1488 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1489 else
1490 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1491
1492 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1493 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1494 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1495 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1496 goto no_info;
1497
1498 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1499 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1500 &edata);
1501
1502 (void) printf(dgettext(TEXT_DOMAIN,
1503 "Recovery is possible, but will result in some data loss.\n"));
1504
1505 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1506 strftime(timestr, 128, 0, &t) != 0) {
1507 (void) printf(dgettext(TEXT_DOMAIN,
1508 "\tReturning the pool to its state as of %s\n"
1509 "\tshould correct the problem. "),
1510 timestr);
1511 } else {
1512 (void) printf(dgettext(TEXT_DOMAIN,
1513 "\tReverting the pool to an earlier state "
1514 "should correct the problem.\n\t"));
1515 }
1516
1517 if (loss > 120) {
1518 (void) printf(dgettext(TEXT_DOMAIN,
1519 "Approximately %lld minutes of data\n"
1520 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1521 } else if (loss > 0) {
1522 (void) printf(dgettext(TEXT_DOMAIN,
1523 "Approximately %lld seconds of data\n"
1524 "\tmust be discarded, irreversibly. "), loss);
1525 }
1526 if (edata != 0 && edata != UINT64_MAX) {
1527 if (edata == 1) {
1528 (void) printf(dgettext(TEXT_DOMAIN,
1529 "After rewind, at least\n"
1530 "\tone persistent user-data error will remain. "));
1531 } else {
1532 (void) printf(dgettext(TEXT_DOMAIN,
1533 "After rewind, several\n"
1534 "\tpersistent user-data errors will remain. "));
1535 }
1536 }
1537 (void) printf(dgettext(TEXT_DOMAIN,
1538 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1539 reason >= 0 ? "clear" : "import", name);
1540
1541 (void) printf(dgettext(TEXT_DOMAIN,
1542 "A scrub of the pool\n"
1543 "\tis strongly recommended after recovery.\n"));
1544 return;
1545
1546 no_info:
1547 (void) printf(dgettext(TEXT_DOMAIN,
1548 "Destroy and re-create the pool from\n\ta backup source.\n"));
1549 }
1550
1551 /*
1552 * zpool_import() is a contracted interface. Should be kept the same
1553 * if possible.
1554 *
1555 * Applications should use zpool_import_props() to import a pool with
1556 * new properties value to be set.
1557 */
1558 int
1559 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1560 char *altroot)
1561 {
1562 nvlist_t *props = NULL;
1563 int ret;
1564
1565 if (altroot != NULL) {
1566 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1567 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1568 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1569 newname));
1570 }
1571
1572 if (nvlist_add_string(props,
1573 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1574 nvlist_add_string(props,
1575 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1576 nvlist_free(props);
1577 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1578 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1579 newname));
1580 }
1581 }
1582
1583 ret = zpool_import_props(hdl, config, newname, props,
1584 ZFS_IMPORT_NORMAL);
1585 if (props)
1586 nvlist_free(props);
1587 return (ret);
1588 }
1589
1590 static void
1591 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1592 int indent)
1593 {
1594 nvlist_t **child;
1595 uint_t c, children;
1596 char *vname;
1597 uint64_t is_log = 0;
1598
1599 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1600 &is_log);
1601
1602 if (name != NULL)
1603 (void) printf("\t%*s%s%s\n", indent, "", name,
1604 is_log ? " [log]" : "");
1605
1606 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1607 &child, &children) != 0)
1608 return;
1609
1610 for (c = 0; c < children; c++) {
1611 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1612 print_vdev_tree(hdl, vname, child[c], indent + 2);
1613 free(vname);
1614 }
1615 }
1616
1617 void
1618 zpool_print_unsup_feat(nvlist_t *config)
1619 {
1620 nvlist_t *nvinfo, *unsup_feat;
1621
1622 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1623 0);
1624 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1625 &unsup_feat) == 0);
1626
1627 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1628 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1629 char *desc;
1630
1631 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1632 verify(nvpair_value_string(nvp, &desc) == 0);
1633
1634 if (strlen(desc) > 0)
1635 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1636 else
1637 (void) printf("\t%s\n", nvpair_name(nvp));
1638 }
1639 }
1640
1641 /*
1642 * Import the given pool using the known configuration and a list of
1643 * properties to be set. The configuration should have come from
1644 * zpool_find_import(). The 'newname' parameters control whether the pool
1645 * is imported with a different name.
1646 */
1647 int
1648 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1649 nvlist_t *props, int flags)
1650 {
1651 zfs_cmd_t zc = { 0 };
1652 zpool_rewind_policy_t policy;
1653 nvlist_t *nv = NULL;
1654 nvlist_t *nvinfo = NULL;
1655 nvlist_t *missing = NULL;
1656 char *thename;
1657 char *origname;
1658 int ret;
1659 int error = 0;
1660 char errbuf[1024];
1661
1662 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1663 &origname) == 0);
1664
1665 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1666 "cannot import pool '%s'"), origname);
1667
1668 if (newname != NULL) {
1669 if (!zpool_name_valid(hdl, B_FALSE, newname))
1670 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1671 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1672 newname));
1673 thename = (char *)newname;
1674 } else {
1675 thename = origname;
1676 }
1677
1678 if (props) {
1679 uint64_t version;
1680 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1681
1682 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1683 &version) == 0);
1684
1685 if ((props = zpool_valid_proplist(hdl, origname,
1686 props, version, flags, errbuf)) == NULL) {
1687 return (-1);
1688 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1689 nvlist_free(props);
1690 return (-1);
1691 }
1692 }
1693
1694 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1695
1696 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1697 &zc.zc_guid) == 0);
1698
1699 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1700 nvlist_free(props);
1701 return (-1);
1702 }
1703 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1704 nvlist_free(props);
1705 return (-1);
1706 }
1707
1708 zc.zc_cookie = flags;
1709 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1710 errno == ENOMEM) {
1711 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1712 zcmd_free_nvlists(&zc);
1713 return (-1);
1714 }
1715 }
1716 if (ret != 0)
1717 error = errno;
1718
1719 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1720 zpool_get_rewind_policy(config, &policy);
1721
1722 if (error) {
1723 char desc[1024];
1724
1725 /*
1726 * Dry-run failed, but we print out what success
1727 * looks like if we found a best txg
1728 */
1729 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1730 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1731 B_TRUE, nv);
1732 nvlist_free(nv);
1733 return (-1);
1734 }
1735
1736 if (newname == NULL)
1737 (void) snprintf(desc, sizeof (desc),
1738 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1739 thename);
1740 else
1741 (void) snprintf(desc, sizeof (desc),
1742 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1743 origname, thename);
1744
1745 switch (error) {
1746 case ENOTSUP:
1747 if (nv != NULL && nvlist_lookup_nvlist(nv,
1748 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1749 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1750 (void) printf(dgettext(TEXT_DOMAIN, "This "
1751 "pool uses the following feature(s) not "
1752 "supported by this system:\n"));
1753 zpool_print_unsup_feat(nv);
1754 if (nvlist_exists(nvinfo,
1755 ZPOOL_CONFIG_CAN_RDONLY)) {
1756 (void) printf(dgettext(TEXT_DOMAIN,
1757 "All unsupported features are only "
1758 "required for writing to the pool."
1759 "\nThe pool can be imported using "
1760 "'-o readonly=on'.\n"));
1761 }
1762 }
1763 /*
1764 * Unsupported version.
1765 */
1766 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1767 break;
1768
1769 case EINVAL:
1770 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1771 break;
1772
1773 case EROFS:
1774 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1775 "one or more devices is read only"));
1776 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1777 break;
1778
1779 case ENXIO:
1780 if (nv && nvlist_lookup_nvlist(nv,
1781 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1782 nvlist_lookup_nvlist(nvinfo,
1783 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1784 (void) printf(dgettext(TEXT_DOMAIN,
1785 "The devices below are missing, use "
1786 "'-m' to import the pool anyway:\n"));
1787 print_vdev_tree(hdl, NULL, missing, 2);
1788 (void) printf("\n");
1789 }
1790 (void) zpool_standard_error(hdl, error, desc);
1791 break;
1792
1793 case EEXIST:
1794 (void) zpool_standard_error(hdl, error, desc);
1795 break;
1796
1797 default:
1798 (void) zpool_standard_error(hdl, error, desc);
1799 zpool_explain_recover(hdl,
1800 newname ? origname : thename, -error, nv);
1801 break;
1802 }
1803
1804 nvlist_free(nv);
1805 ret = -1;
1806 } else {
1807 zpool_handle_t *zhp;
1808
1809 /*
1810 * This should never fail, but play it safe anyway.
1811 */
1812 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1813 ret = -1;
1814 else if (zhp != NULL)
1815 zpool_close(zhp);
1816 if (policy.zrp_request &
1817 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1818 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1819 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1820 }
1821 nvlist_free(nv);
1822 return (0);
1823 }
1824
1825 zcmd_free_nvlists(&zc);
1826 nvlist_free(props);
1827
1828 return (ret);
1829 }
1830
1831 /*
1832 * Scan the pool.
1833 */
1834 int
1835 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1836 {
1837 zfs_cmd_t zc = { 0 };
1838 char msg[1024];
1839 libzfs_handle_t *hdl = zhp->zpool_hdl;
1840
1841 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1842 zc.zc_cookie = func;
1843
1844 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1845 (errno == ENOENT && func != POOL_SCAN_NONE))
1846 return (0);
1847
1848 if (func == POOL_SCAN_SCRUB) {
1849 (void) snprintf(msg, sizeof (msg),
1850 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1851 } else if (func == POOL_SCAN_NONE) {
1852 (void) snprintf(msg, sizeof (msg),
1853 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1854 zc.zc_name);
1855 } else {
1856 assert(!"unexpected result");
1857 }
1858
1859 if (errno == EBUSY) {
1860 nvlist_t *nvroot;
1861 pool_scan_stat_t *ps = NULL;
1862 uint_t psc;
1863
1864 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1865 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1866 (void) nvlist_lookup_uint64_array(nvroot,
1867 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1868 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1869 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1870 else
1871 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1872 } else if (errno == ENOENT) {
1873 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1874 } else {
1875 return (zpool_standard_error(hdl, errno, msg));
1876 }
1877 }
1878
1879 /*
1880 * This provides a very minimal check whether a given string is likely a
1881 * c#t#d# style string. Users of this are expected to do their own
1882 * verification of the s# part.
1883 */
1884 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1885
1886 /*
1887 * More elaborate version for ones which may start with "/dev/dsk/"
1888 * and the like.
1889 */
1890 static int
1891 ctd_check_path(char *str) {
1892 /*
1893 * If it starts with a slash, check the last component.
1894 */
1895 if (str && str[0] == '/') {
1896 char *tmp = strrchr(str, '/');
1897
1898 /*
1899 * If it ends in "/old", check the second-to-last
1900 * component of the string instead.
1901 */
1902 if (tmp != str && strcmp(tmp, "/old") == 0) {
1903 for (tmp--; *tmp != '/'; tmp--)
1904 ;
1905 }
1906 str = tmp + 1;
1907 }
1908 return (CTD_CHECK(str));
1909 }
1910
1911 /*
1912 * Find a vdev that matches the search criteria specified. We use the
1913 * the nvpair name to determine how we should look for the device.
1914 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1915 * spare; but FALSE if its an INUSE spare.
1916 */
1917 static nvlist_t *
1918 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1919 boolean_t *l2cache, boolean_t *log)
1920 {
1921 uint_t c, children;
1922 nvlist_t **child;
1923 nvlist_t *ret;
1924 uint64_t is_log;
1925 char *srchkey;
1926 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1927
1928 /* Nothing to look for */
1929 if (search == NULL || pair == NULL)
1930 return (NULL);
1931
1932 /* Obtain the key we will use to search */
1933 srchkey = nvpair_name(pair);
1934
1935 switch (nvpair_type(pair)) {
1936 case DATA_TYPE_UINT64:
1937 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1938 uint64_t srchval, theguid;
1939
1940 verify(nvpair_value_uint64(pair, &srchval) == 0);
1941 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1942 &theguid) == 0);
1943 if (theguid == srchval)
1944 return (nv);
1945 }
1946 break;
1947
1948 case DATA_TYPE_STRING: {
1949 char *srchval, *val;
1950
1951 verify(nvpair_value_string(pair, &srchval) == 0);
1952 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1953 break;
1954
1955 /*
1956 * Search for the requested value. Special cases:
1957 *
1958 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1959 * "s0" or "s0/old". The "s0" part is hidden from the user,
1960 * but included in the string, so this matches around it.
1961 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1962 *
1963 * Otherwise, all other searches are simple string compares.
1964 */
1965 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1966 ctd_check_path(val)) {
1967 uint64_t wholedisk = 0;
1968
1969 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1970 &wholedisk);
1971 if (wholedisk) {
1972 int slen = strlen(srchval);
1973 int vlen = strlen(val);
1974
1975 if (slen != vlen - 2)
1976 break;
1977
1978 /*
1979 * make_leaf_vdev() should only set
1980 * wholedisk for ZPOOL_CONFIG_PATHs which
1981 * will include "/dev/dsk/", giving plenty of
1982 * room for the indices used next.
1983 */
1984 ASSERT(vlen >= 6);
1985
1986 /*
1987 * strings identical except trailing "s0"
1988 */
1989 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1990 strncmp(srchval, val, slen) == 0)
1991 return (nv);
1992
1993 /*
1994 * strings identical except trailing "s0/old"
1995 */
1996 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1997 strcmp(&srchval[slen - 4], "/old") == 0 &&
1998 strncmp(srchval, val, slen - 4) == 0)
1999 return (nv);
2000
2001 break;
2002 }
2003 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2004 char *type, *idx, *end, *p;
2005 uint64_t id, vdev_id;
2006
2007 /*
2008 * Determine our vdev type, keeping in mind
2009 * that the srchval is composed of a type and
2010 * vdev id pair (i.e. mirror-4).
2011 */
2012 if ((type = strdup(srchval)) == NULL)
2013 return (NULL);
2014
2015 if ((p = strrchr(type, '-')) == NULL) {
2016 free(type);
2017 break;
2018 }
2019 idx = p + 1;
2020 *p = '\0';
2021
2022 /*
2023 * If the types don't match then keep looking.
2024 */
2025 if (strncmp(val, type, strlen(val)) != 0) {
2026 free(type);
2027 break;
2028 }
2029
2030 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2031 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2032 strncmp(type, VDEV_TYPE_MIRROR,
2033 strlen(VDEV_TYPE_MIRROR)) == 0);
2034 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2035 &id) == 0);
2036
2037 errno = 0;
2038 vdev_id = strtoull(idx, &end, 10);
2039
2040 free(type);
2041 if (errno != 0)
2042 return (NULL);
2043
2044 /*
2045 * Now verify that we have the correct vdev id.
2046 */
2047 if (vdev_id == id)
2048 return (nv);
2049 }
2050
2051 /*
2052 * Common case
2053 */
2054 if (strcmp(srchval, val) == 0)
2055 return (nv);
2056 break;
2057 }
2058
2059 default:
2060 break;
2061 }
2062
2063 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2064 &child, &children) != 0)
2065 return (NULL);
2066
2067 for (c = 0; c < children; c++) {
2068 if ((ret = vdev_to_nvlist_iter(child[c], search,
2069 avail_spare, l2cache, NULL)) != NULL) {
2070 /*
2071 * The 'is_log' value is only set for the toplevel
2072 * vdev, not the leaf vdevs. So we always lookup the
2073 * log device from the root of the vdev tree (where
2074 * 'log' is non-NULL).
2075 */
2076 if (log != NULL &&
2077 nvlist_lookup_uint64(child[c],
2078 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2079 is_log) {
2080 *log = B_TRUE;
2081 }
2082 return (ret);
2083 }
2084 }
2085
2086 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2087 &child, &children) == 0) {
2088 for (c = 0; c < children; c++) {
2089 if ((ret = vdev_to_nvlist_iter(child[c], search,
2090 avail_spare, l2cache, NULL)) != NULL) {
2091 *avail_spare = B_TRUE;
2092 return (ret);
2093 }
2094 }
2095 }
2096
2097 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2098 &child, &children) == 0) {
2099 for (c = 0; c < children; c++) {
2100 if ((ret = vdev_to_nvlist_iter(child[c], search,
2101 avail_spare, l2cache, NULL)) != NULL) {
2102 *l2cache = B_TRUE;
2103 return (ret);
2104 }
2105 }
2106 }
2107
2108 return (NULL);
2109 }
2110
2111 /*
2112 * Given a physical path (minus the "/devices" prefix), find the
2113 * associated vdev.
2114 */
2115 nvlist_t *
2116 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2117 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2118 {
2119 nvlist_t *search, *nvroot, *ret;
2120
2121 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2122 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2123
2124 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2125 &nvroot) == 0);
2126
2127 *avail_spare = B_FALSE;
2128 *l2cache = B_FALSE;
2129 if (log != NULL)
2130 *log = B_FALSE;
2131 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2132 nvlist_free(search);
2133
2134 return (ret);
2135 }
2136
2137 /*
2138 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2139 */
2140 boolean_t
2141 zpool_vdev_is_interior(const char *name)
2142 {
2143 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2144 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2145 return (B_TRUE);
2146 return (B_FALSE);
2147 }
2148
2149 nvlist_t *
2150 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2151 boolean_t *l2cache, boolean_t *log)
2152 {
2153 char buf[MAXPATHLEN];
2154 char *end;
2155 nvlist_t *nvroot, *search, *ret;
2156 uint64_t guid;
2157
2158 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2159
2160 guid = strtoull(path, &end, 10);
2161 if (guid != 0 && *end == '\0') {
2162 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2163 } else if (zpool_vdev_is_interior(path)) {
2164 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2165 } else if (path[0] != '/') {
2166 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
2167 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2168 } else {
2169 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2170 }
2171
2172 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2173 &nvroot) == 0);
2174
2175 *avail_spare = B_FALSE;
2176 *l2cache = B_FALSE;
2177 if (log != NULL)
2178 *log = B_FALSE;
2179 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2180 nvlist_free(search);
2181
2182 return (ret);
2183 }
2184
2185 static int
2186 vdev_online(nvlist_t *nv)
2187 {
2188 uint64_t ival;
2189
2190 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2191 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2192 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2193 return (0);
2194
2195 return (1);
2196 }
2197
2198 /*
2199 * Helper function for zpool_get_physpaths().
2200 */
2201 static int
2202 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2203 size_t *bytes_written)
2204 {
2205 size_t bytes_left, pos, rsz;
2206 char *tmppath;
2207 const char *format;
2208
2209 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2210 &tmppath) != 0)
2211 return (EZFS_NODEVICE);
2212
2213 pos = *bytes_written;
2214 bytes_left = physpath_size - pos;
2215 format = (pos == 0) ? "%s" : " %s";
2216
2217 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2218 *bytes_written += rsz;
2219
2220 if (rsz >= bytes_left) {
2221 /* if physpath was not copied properly, clear it */
2222 if (bytes_left != 0) {
2223 physpath[pos] = 0;
2224 }
2225 return (EZFS_NOSPC);
2226 }
2227 return (0);
2228 }
2229
2230 static int
2231 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2232 size_t *rsz, boolean_t is_spare)
2233 {
2234 char *type;
2235 int ret;
2236
2237 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2238 return (EZFS_INVALCONFIG);
2239
2240 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2241 /*
2242 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2243 * For a spare vdev, we only want to boot from the active
2244 * spare device.
2245 */
2246 if (is_spare) {
2247 uint64_t spare = 0;
2248 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2249 &spare);
2250 if (!spare)
2251 return (EZFS_INVALCONFIG);
2252 }
2253
2254 if (vdev_online(nv)) {
2255 if ((ret = vdev_get_one_physpath(nv, physpath,
2256 phypath_size, rsz)) != 0)
2257 return (ret);
2258 }
2259 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2260 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2261 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2262 nvlist_t **child;
2263 uint_t count;
2264 int i, ret;
2265
2266 if (nvlist_lookup_nvlist_array(nv,
2267 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2268 return (EZFS_INVALCONFIG);
2269
2270 for (i = 0; i < count; i++) {
2271 ret = vdev_get_physpaths(child[i], physpath,
2272 phypath_size, rsz, is_spare);
2273 if (ret == EZFS_NOSPC)
2274 return (ret);
2275 }
2276 }
2277
2278 return (EZFS_POOL_INVALARG);
2279 }
2280
2281 /*
2282 * Get phys_path for a root pool config.
2283 * Return 0 on success; non-zero on failure.
2284 */
2285 static int
2286 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2287 {
2288 size_t rsz;
2289 nvlist_t *vdev_root;
2290 nvlist_t **child;
2291 uint_t count;
2292 char *type;
2293
2294 rsz = 0;
2295
2296 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2297 &vdev_root) != 0)
2298 return (EZFS_INVALCONFIG);
2299
2300 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2301 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2302 &child, &count) != 0)
2303 return (EZFS_INVALCONFIG);
2304
2305 /*
2306 * root pool can not have EFI labeled disks and can only have
2307 * a single top-level vdev.
2308 */
2309 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2310 pool_uses_efi(vdev_root))
2311 return (EZFS_POOL_INVALARG);
2312
2313 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2314 B_FALSE);
2315
2316 /* No online devices */
2317 if (rsz == 0)
2318 return (EZFS_NODEVICE);
2319
2320 return (0);
2321 }
2322
2323 /*
2324 * Get phys_path for a root pool
2325 * Return 0 on success; non-zero on failure.
2326 */
2327 int
2328 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2329 {
2330 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2331 phypath_size));
2332 }
2333
2334 /*
2335 * If the device has being dynamically expanded then we need to relabel
2336 * the disk to use the new unallocated space.
2337 */
2338 static int
2339 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2340 {
2341 char path[MAXPATHLEN];
2342 char errbuf[1024];
2343 int fd, error;
2344 int (*_efi_use_whole_disk)(int);
2345
2346 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2347 "efi_use_whole_disk")) == NULL)
2348 return (-1);
2349
2350 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2351
2352 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2353 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2354 "relabel '%s': unable to open device"), name);
2355 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2356 }
2357
2358 /*
2359 * It's possible that we might encounter an error if the device
2360 * does not have any unallocated space left. If so, we simply
2361 * ignore that error and continue on.
2362 */
2363 error = _efi_use_whole_disk(fd);
2364 (void) close(fd);
2365 if (error && error != VT_ENOSPC) {
2366 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2367 "relabel '%s': unable to read disk capacity"), name);
2368 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2369 }
2370 return (0);
2371 }
2372
2373 /*
2374 * Bring the specified vdev online. The 'flags' parameter is a set of the
2375 * ZFS_ONLINE_* flags.
2376 */
2377 int
2378 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2379 vdev_state_t *newstate)
2380 {
2381 zfs_cmd_t zc = { 0 };
2382 char msg[1024];
2383 nvlist_t *tgt;
2384 boolean_t avail_spare, l2cache, islog;
2385 libzfs_handle_t *hdl = zhp->zpool_hdl;
2386
2387 if (flags & ZFS_ONLINE_EXPAND) {
2388 (void) snprintf(msg, sizeof (msg),
2389 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2390 } else {
2391 (void) snprintf(msg, sizeof (msg),
2392 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2393 }
2394
2395 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2396 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2397 &islog)) == NULL)
2398 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2399
2400 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2401
2402 if (avail_spare)
2403 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2404
2405 if (flags & ZFS_ONLINE_EXPAND ||
2406 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2407 char *pathname = NULL;
2408 uint64_t wholedisk = 0;
2409
2410 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2411 &wholedisk);
2412 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2413 &pathname) == 0);
2414
2415 /*
2416 * XXX - L2ARC 1.0 devices can't support expansion.
2417 */
2418 if (l2cache) {
2419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2420 "cannot expand cache devices"));
2421 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2422 }
2423
2424 if (wholedisk) {
2425 pathname += strlen(DISK_ROOT) + 1;
2426 (void) zpool_relabel_disk(hdl, pathname);
2427 }
2428 }
2429
2430 zc.zc_cookie = VDEV_STATE_ONLINE;
2431 zc.zc_obj = flags;
2432
2433 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2434 if (errno == EINVAL) {
2435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2436 "from this pool into a new one. Use '%s' "
2437 "instead"), "zpool detach");
2438 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2439 }
2440 return (zpool_standard_error(hdl, errno, msg));
2441 }
2442
2443 *newstate = zc.zc_cookie;
2444 return (0);
2445 }
2446
2447 /*
2448 * Take the specified vdev offline
2449 */
2450 int
2451 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2452 {
2453 zfs_cmd_t zc = { 0 };
2454 char msg[1024];
2455 nvlist_t *tgt;
2456 boolean_t avail_spare, l2cache;
2457 libzfs_handle_t *hdl = zhp->zpool_hdl;
2458
2459 (void) snprintf(msg, sizeof (msg),
2460 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2461
2462 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2463 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2464 NULL)) == NULL)
2465 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2466
2467 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2468
2469 if (avail_spare)
2470 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2471
2472 zc.zc_cookie = VDEV_STATE_OFFLINE;
2473 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2474
2475 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2476 return (0);
2477
2478 switch (errno) {
2479 case EBUSY:
2480
2481 /*
2482 * There are no other replicas of this device.
2483 */
2484 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2485
2486 case EEXIST:
2487 /*
2488 * The log device has unplayed logs
2489 */
2490 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2491
2492 default:
2493 return (zpool_standard_error(hdl, errno, msg));
2494 }
2495 }
2496
2497 /*
2498 * Mark the given vdev faulted.
2499 */
2500 int
2501 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2502 {
2503 zfs_cmd_t zc = { 0 };
2504 char msg[1024];
2505 libzfs_handle_t *hdl = zhp->zpool_hdl;
2506
2507 (void) snprintf(msg, sizeof (msg),
2508 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2509
2510 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2511 zc.zc_guid = guid;
2512 zc.zc_cookie = VDEV_STATE_FAULTED;
2513 zc.zc_obj = aux;
2514
2515 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2516 return (0);
2517
2518 switch (errno) {
2519 case EBUSY:
2520
2521 /*
2522 * There are no other replicas of this device.
2523 */
2524 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2525
2526 default:
2527 return (zpool_standard_error(hdl, errno, msg));
2528 }
2529
2530 }
2531
2532 /*
2533 * Mark the given vdev degraded.
2534 */
2535 int
2536 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2537 {
2538 zfs_cmd_t zc = { 0 };
2539 char msg[1024];
2540 libzfs_handle_t *hdl = zhp->zpool_hdl;
2541
2542 (void) snprintf(msg, sizeof (msg),
2543 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2544
2545 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2546 zc.zc_guid = guid;
2547 zc.zc_cookie = VDEV_STATE_DEGRADED;
2548 zc.zc_obj = aux;
2549
2550 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2551 return (0);
2552
2553 return (zpool_standard_error(hdl, errno, msg));
2554 }
2555
2556 /*
2557 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2558 * a hot spare.
2559 */
2560 static boolean_t
2561 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2562 {
2563 nvlist_t **child;
2564 uint_t c, children;
2565 char *type;
2566
2567 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2568 &children) == 0) {
2569 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2570 &type) == 0);
2571
2572 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2573 children == 2 && child[which] == tgt)
2574 return (B_TRUE);
2575
2576 for (c = 0; c < children; c++)
2577 if (is_replacing_spare(child[c], tgt, which))
2578 return (B_TRUE);
2579 }
2580
2581 return (B_FALSE);
2582 }
2583
2584 /*
2585 * Attach new_disk (fully described by nvroot) to old_disk.
2586 * If 'replacing' is specified, the new disk will replace the old one.
2587 */
2588 int
2589 zpool_vdev_attach(zpool_handle_t *zhp,
2590 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2591 {
2592 zfs_cmd_t zc = { 0 };
2593 char msg[1024];
2594 int ret;
2595 nvlist_t *tgt;
2596 boolean_t avail_spare, l2cache, islog;
2597 uint64_t val;
2598 char *newname;
2599 nvlist_t **child;
2600 uint_t children;
2601 nvlist_t *config_root;
2602 libzfs_handle_t *hdl = zhp->zpool_hdl;
2603 boolean_t rootpool = zpool_is_bootable(zhp);
2604
2605 if (replacing)
2606 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2607 "cannot replace %s with %s"), old_disk, new_disk);
2608 else
2609 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2610 "cannot attach %s to %s"), new_disk, old_disk);
2611
2612 /*
2613 * If this is a root pool, make sure that we're not attaching an
2614 * EFI labeled device.
2615 */
2616 if (rootpool && pool_uses_efi(nvroot)) {
2617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2618 "EFI labeled devices are not supported on root pools."));
2619 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2620 }
2621
2622 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2623 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2624 &islog)) == 0)
2625 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2626
2627 if (avail_spare)
2628 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2629
2630 if (l2cache)
2631 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2632
2633 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2634 zc.zc_cookie = replacing;
2635
2636 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2637 &child, &children) != 0 || children != 1) {
2638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2639 "new device must be a single disk"));
2640 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2641 }
2642
2643 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2644 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2645
2646 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2647 return (-1);
2648
2649 /*
2650 * If the target is a hot spare that has been swapped in, we can only
2651 * replace it with another hot spare.
2652 */
2653 if (replacing &&
2654 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2655 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2656 NULL) == NULL || !avail_spare) &&
2657 is_replacing_spare(config_root, tgt, 1)) {
2658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2659 "can only be replaced by another hot spare"));
2660 free(newname);
2661 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2662 }
2663
2664 free(newname);
2665
2666 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2667 return (-1);
2668
2669 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2670
2671 zcmd_free_nvlists(&zc);
2672
2673 if (ret == 0) {
2674 if (rootpool) {
2675 /*
2676 * XXX need a better way to prevent user from
2677 * booting up a half-baked vdev.
2678 */
2679 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2680 "sure to wait until resilver is done "
2681 "before rebooting.\n"));
2682 }
2683 return (0);
2684 }
2685
2686 switch (errno) {
2687 case ENOTSUP:
2688 /*
2689 * Can't attach to or replace this type of vdev.
2690 */
2691 if (replacing) {
2692 uint64_t version = zpool_get_prop_int(zhp,
2693 ZPOOL_PROP_VERSION, NULL);
2694
2695 if (islog)
2696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2697 "cannot replace a log with a spare"));
2698 else if (version >= SPA_VERSION_MULTI_REPLACE)
2699 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2700 "already in replacing/spare config; wait "
2701 "for completion or use 'zpool detach'"));
2702 else
2703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2704 "cannot replace a replacing device"));
2705 } else {
2706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2707 "can only attach to mirrors and top-level "
2708 "disks"));
2709 }
2710 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2711 break;
2712
2713 case EINVAL:
2714 /*
2715 * The new device must be a single disk.
2716 */
2717 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2718 "new device must be a single disk"));
2719 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2720 break;
2721
2722 case EBUSY:
2723 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2724 new_disk);
2725 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2726 break;
2727
2728 case EOVERFLOW:
2729 /*
2730 * The new device is too small.
2731 */
2732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2733 "device is too small"));
2734 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2735 break;
2736
2737 case EDOM:
2738 /*
2739 * The new device has a different alignment requirement.
2740 */
2741 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2742 "devices have different sector alignment"));
2743 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2744 break;
2745
2746 case ENAMETOOLONG:
2747 /*
2748 * The resulting top-level vdev spec won't fit in the label.
2749 */
2750 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2751 break;
2752
2753 default:
2754 (void) zpool_standard_error(hdl, errno, msg);
2755 }
2756
2757 return (-1);
2758 }
2759
2760 /*
2761 * Detach the specified device.
2762 */
2763 int
2764 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2765 {
2766 zfs_cmd_t zc = { 0 };
2767 char msg[1024];
2768 nvlist_t *tgt;
2769 boolean_t avail_spare, l2cache;
2770 libzfs_handle_t *hdl = zhp->zpool_hdl;
2771
2772 (void) snprintf(msg, sizeof (msg),
2773 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2774
2775 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2776 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2777 NULL)) == 0)
2778 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2779
2780 if (avail_spare)
2781 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2782
2783 if (l2cache)
2784 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2785
2786 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2787
2788 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2789 return (0);
2790
2791 switch (errno) {
2792
2793 case ENOTSUP:
2794 /*
2795 * Can't detach from this type of vdev.
2796 */
2797 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2798 "applicable to mirror and replacing vdevs"));
2799 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2800 break;
2801
2802 case EBUSY:
2803 /*
2804 * There are no other replicas of this device.
2805 */
2806 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2807 break;
2808
2809 default:
2810 (void) zpool_standard_error(hdl, errno, msg);
2811 }
2812
2813 return (-1);
2814 }
2815
2816 /*
2817 * Find a mirror vdev in the source nvlist.
2818 *
2819 * The mchild array contains a list of disks in one of the top-level mirrors
2820 * of the source pool. The schild array contains a list of disks that the
2821 * user specified on the command line. We loop over the mchild array to
2822 * see if any entry in the schild array matches.
2823 *
2824 * If a disk in the mchild array is found in the schild array, we return
2825 * the index of that entry. Otherwise we return -1.
2826 */
2827 static int
2828 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2829 nvlist_t **schild, uint_t schildren)
2830 {
2831 uint_t mc;
2832
2833 for (mc = 0; mc < mchildren; mc++) {
2834 uint_t sc;
2835 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2836 mchild[mc], B_FALSE);
2837
2838 for (sc = 0; sc < schildren; sc++) {
2839 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2840 schild[sc], B_FALSE);
2841 boolean_t result = (strcmp(mpath, spath) == 0);
2842
2843 free(spath);
2844 if (result) {
2845 free(mpath);
2846 return (mc);
2847 }
2848 }
2849
2850 free(mpath);
2851 }
2852
2853 return (-1);
2854 }
2855
2856 /*
2857 * Split a mirror pool. If newroot points to null, then a new nvlist
2858 * is generated and it is the responsibility of the caller to free it.
2859 */
2860 int
2861 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2862 nvlist_t *props, splitflags_t flags)
2863 {
2864 zfs_cmd_t zc = { 0 };
2865 char msg[1024];
2866 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2867 nvlist_t **varray = NULL, *zc_props = NULL;
2868 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2869 libzfs_handle_t *hdl = zhp->zpool_hdl;
2870 uint64_t vers;
2871 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2872 int retval = 0;
2873
2874 (void) snprintf(msg, sizeof (msg),
2875 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2876
2877 if (!zpool_name_valid(hdl, B_FALSE, newname))
2878 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2879
2880 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2881 (void) fprintf(stderr, gettext("Internal error: unable to "
2882 "retrieve pool configuration\n"));
2883 return (-1);
2884 }
2885
2886 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2887 == 0);
2888 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2889
2890 if (props) {
2891 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2892 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2893 props, vers, flags, msg)) == NULL)
2894 return (-1);
2895 }
2896
2897 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2898 &children) != 0) {
2899 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2900 "Source pool is missing vdev tree"));
2901 if (zc_props)
2902 nvlist_free(zc_props);
2903 return (-1);
2904 }
2905
2906 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2907 vcount = 0;
2908
2909 if (*newroot == NULL ||
2910 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2911 &newchild, &newchildren) != 0)
2912 newchildren = 0;
2913
2914 for (c = 0; c < children; c++) {
2915 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2916 char *type;
2917 nvlist_t **mchild, *vdev;
2918 uint_t mchildren;
2919 int entry;
2920
2921 /*
2922 * Unlike cache & spares, slogs are stored in the
2923 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2924 */
2925 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2926 &is_log);
2927 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2928 &is_hole);
2929 if (is_log || is_hole) {
2930 /*
2931 * Create a hole vdev and put it in the config.
2932 */
2933 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2934 goto out;
2935 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2936 VDEV_TYPE_HOLE) != 0)
2937 goto out;
2938 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2939 1) != 0)
2940 goto out;
2941 if (lastlog == 0)
2942 lastlog = vcount;
2943 varray[vcount++] = vdev;
2944 continue;
2945 }
2946 lastlog = 0;
2947 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2948 == 0);
2949 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2951 "Source pool must be composed only of mirrors\n"));
2952 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2953 goto out;
2954 }
2955
2956 verify(nvlist_lookup_nvlist_array(child[c],
2957 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2958
2959 /* find or add an entry for this top-level vdev */
2960 if (newchildren > 0 &&
2961 (entry = find_vdev_entry(zhp, mchild, mchildren,
2962 newchild, newchildren)) >= 0) {
2963 /* We found a disk that the user specified. */
2964 vdev = mchild[entry];
2965 ++found;
2966 } else {
2967 /* User didn't specify a disk for this vdev. */
2968 vdev = mchild[mchildren - 1];
2969 }
2970
2971 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2972 goto out;
2973 }
2974
2975 /* did we find every disk the user specified? */
2976 if (found != newchildren) {
2977 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2978 "include at most one disk from each mirror"));
2979 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2980 goto out;
2981 }
2982
2983 /* Prepare the nvlist for populating. */
2984 if (*newroot == NULL) {
2985 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2986 goto out;
2987 freelist = B_TRUE;
2988 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2989 VDEV_TYPE_ROOT) != 0)
2990 goto out;
2991 } else {
2992 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2993 }
2994
2995 /* Add all the children we found */
2996 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2997 lastlog == 0 ? vcount : lastlog) != 0)
2998 goto out;
2999
3000 /*
3001 * If we're just doing a dry run, exit now with success.
3002 */
3003 if (flags.dryrun) {
3004 memory_err = B_FALSE;
3005 freelist = B_FALSE;
3006 goto out;
3007 }
3008
3009 /* now build up the config list & call the ioctl */
3010 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3011 goto out;
3012
3013 if (nvlist_add_nvlist(newconfig,
3014 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3015 nvlist_add_string(newconfig,
3016 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3017 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3018 goto out;
3019
3020 /*
3021 * The new pool is automatically part of the namespace unless we
3022 * explicitly export it.
3023 */
3024 if (!flags.import)
3025 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3026 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3027 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3028 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3029 goto out;
3030 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3031 goto out;
3032
3033 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3034 retval = zpool_standard_error(hdl, errno, msg);
3035 goto out;
3036 }
3037
3038 freelist = B_FALSE;
3039 memory_err = B_FALSE;
3040
3041 out:
3042 if (varray != NULL) {
3043 int v;
3044
3045 for (v = 0; v < vcount; v++)
3046 nvlist_free(varray[v]);
3047 free(varray);
3048 }
3049 zcmd_free_nvlists(&zc);
3050 if (zc_props)
3051 nvlist_free(zc_props);
3052 if (newconfig)
3053 nvlist_free(newconfig);
3054 if (freelist) {
3055 nvlist_free(*newroot);
3056 *newroot = NULL;
3057 }
3058
3059 if (retval != 0)
3060 return (retval);
3061
3062 if (memory_err)
3063 return (no_memory(hdl));
3064
3065 return (0);
3066 }
3067
3068 /*
3069 * Remove the given device. Currently, this is supported only for hot spares
3070 * and level 2 cache devices.
3071 */
3072 int
3073 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3074 {
3075 zfs_cmd_t zc = { 0 };
3076 char msg[1024];
3077 nvlist_t *tgt;
3078 boolean_t avail_spare, l2cache, islog;
3079 libzfs_handle_t *hdl = zhp->zpool_hdl;
3080 uint64_t version;
3081
3082 (void) snprintf(msg, sizeof (msg),
3083 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3084
3085 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3086 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3087 &islog)) == 0)
3088 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3089 /*
3090 * XXX - this should just go away.
3091 */
3092 if (!avail_spare && !l2cache && !islog) {
3093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3094 "only inactive hot spares, cache, top-level, "
3095 "or log devices can be removed"));
3096 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3097 }
3098
3099 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3100 if (islog && version < SPA_VERSION_HOLES) {
3101 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3102 "pool must be upgrade to support log removal"));
3103 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3104 }
3105
3106 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3107
3108 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3109 return (0);
3110
3111 return (zpool_standard_error(hdl, errno, msg));
3112 }
3113
3114 /*
3115 * Clear the errors for the pool, or the particular device if specified.
3116 */
3117 int
3118 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3119 {
3120 zfs_cmd_t zc = { 0 };
3121 char msg[1024];
3122 nvlist_t *tgt;
3123 zpool_rewind_policy_t policy;
3124 boolean_t avail_spare, l2cache;
3125 libzfs_handle_t *hdl = zhp->zpool_hdl;
3126 nvlist_t *nvi = NULL;
3127 int error;
3128
3129 if (path)
3130 (void) snprintf(msg, sizeof (msg),
3131 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3132 path);
3133 else
3134 (void) snprintf(msg, sizeof (msg),
3135 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3136 zhp->zpool_name);
3137
3138 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3139 if (path) {
3140 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3141 &l2cache, NULL)) == 0)
3142 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3143
3144 /*
3145 * Don't allow error clearing for hot spares. Do allow
3146 * error clearing for l2cache devices.
3147 */
3148 if (avail_spare)
3149 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3150
3151 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3152 &zc.zc_guid) == 0);
3153 }
3154
3155 zpool_get_rewind_policy(rewindnvl, &policy);
3156 zc.zc_cookie = policy.zrp_request;
3157
3158 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3159 return (-1);
3160
3161 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3162 return (-1);
3163
3164 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3165 errno == ENOMEM) {
3166 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3167 zcmd_free_nvlists(&zc);
3168 return (-1);
3169 }
3170 }
3171
3172 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3173 errno != EPERM && errno != EACCES)) {
3174 if (policy.zrp_request &
3175 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3176 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3177 zpool_rewind_exclaim(hdl, zc.zc_name,
3178 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3179 nvi);
3180 nvlist_free(nvi);
3181 }
3182 zcmd_free_nvlists(&zc);
3183 return (0);
3184 }
3185
3186 zcmd_free_nvlists(&zc);
3187 return (zpool_standard_error(hdl, errno, msg));
3188 }
3189
3190 /*
3191 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3192 */
3193 int
3194 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3195 {
3196 zfs_cmd_t zc = { 0 };
3197 char msg[1024];
3198 libzfs_handle_t *hdl = zhp->zpool_hdl;
3199
3200 (void) snprintf(msg, sizeof (msg),
3201 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3202 guid);
3203
3204 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3205 zc.zc_guid = guid;
3206 zc.zc_cookie = ZPOOL_NO_REWIND;
3207
3208 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3209 return (0);
3210
3211 return (zpool_standard_error(hdl, errno, msg));
3212 }
3213
3214 /*
3215 * Change the GUID for a pool.
3216 */
3217 int
3218 zpool_reguid(zpool_handle_t *zhp)
3219 {
3220 char msg[1024];
3221 libzfs_handle_t *hdl = zhp->zpool_hdl;
3222 zfs_cmd_t zc = { 0 };
3223
3224 (void) snprintf(msg, sizeof (msg),
3225 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3226
3227 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3228 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3229 return (0);
3230
3231 return (zpool_standard_error(hdl, errno, msg));
3232 }
3233
3234 /*
3235 * Reopen the pool.
3236 */
3237 int
3238 zpool_reopen(zpool_handle_t *zhp)
3239 {
3240 zfs_cmd_t zc = { 0 };
3241 char msg[1024];
3242 libzfs_handle_t *hdl = zhp->zpool_hdl;
3243
3244 (void) snprintf(msg, sizeof (msg),
3245 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3246 zhp->zpool_name);
3247
3248 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3249 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3250 return (0);
3251 return (zpool_standard_error(hdl, errno, msg));
3252 }
3253
3254 /*
3255 * Convert from a devid string to a path.
3256 */
3257 static char *
3258 devid_to_path(char *devid_str)
3259 {
3260 ddi_devid_t devid;
3261 char *minor;
3262 char *path;
3263 devid_nmlist_t *list = NULL;
3264 int ret;
3265
3266 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3267 return (NULL);
3268
3269 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3270
3271 devid_str_free(minor);
3272 devid_free(devid);
3273
3274 if (ret != 0)
3275 return (NULL);
3276
3277 if ((path = strdup(list[0].devname)) == NULL)
3278 return (NULL);
3279
3280 devid_free_nmlist(list);
3281
3282 return (path);
3283 }
3284
3285 /*
3286 * Convert from a path to a devid string.
3287 */
3288 static char *
3289 path_to_devid(const char *path)
3290 {
3291 int fd;
3292 ddi_devid_t devid;
3293 char *minor, *ret;
3294
3295 if ((fd = open(path, O_RDONLY)) < 0)
3296 return (NULL);
3297
3298 minor = NULL;
3299 ret = NULL;
3300 if (devid_get(fd, &devid) == 0) {
3301 if (devid_get_minor_name(fd, &minor) == 0)
3302 ret = devid_str_encode(devid, minor);
3303 if (minor != NULL)
3304 devid_str_free(minor);
3305 devid_free(devid);
3306 }
3307 (void) close(fd);
3308
3309 return (ret);
3310 }
3311
3312 /*
3313 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3314 * ignore any failure here, since a common case is for an unprivileged user to
3315 * type 'zpool status', and we'll display the correct information anyway.
3316 */
3317 static void
3318 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3319 {
3320 zfs_cmd_t zc = { 0 };
3321
3322 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3323 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3324 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3325 &zc.zc_guid) == 0);
3326
3327 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3328 }
3329
3330 /*
3331 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3332 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3333 * We also check if this is a whole disk, in which case we strip off the
3334 * trailing 's0' slice name.
3335 *
3336 * This routine is also responsible for identifying when disks have been
3337 * reconfigured in a new location. The kernel will have opened the device by
3338 * devid, but the path will still refer to the old location. To catch this, we
3339 * first do a path -> devid translation (which is fast for the common case). If
3340 * the devid matches, we're done. If not, we do a reverse devid -> path
3341 * translation and issue the appropriate ioctl() to update the path of the vdev.
3342 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3343 * of these checks.
3344 */
3345 char *
3346 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3347 boolean_t verbose)
3348 {
3349 char *path, *devid;
3350 uint64_t value;
3351 char buf[64];
3352 vdev_stat_t *vs;
3353 uint_t vsc;
3354
3355 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3356 &value) == 0) {
3357 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3358 &value) == 0);
3359 (void) snprintf(buf, sizeof (buf), "%llu",
3360 (u_longlong_t)value);
3361 path = buf;
3362 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3363
3364 /*
3365 * If the device is dead (faulted, offline, etc) then don't
3366 * bother opening it. Otherwise we may be forcing the user to
3367 * open a misbehaving device, which can have undesirable
3368 * effects.
3369 */
3370 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3371 (uint64_t **)&vs, &vsc) != 0 ||
3372 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3373 zhp != NULL &&
3374 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3375 /*
3376 * Determine if the current path is correct.
3377 */
3378 char *newdevid = path_to_devid(path);
3379
3380 if (newdevid == NULL ||
3381 strcmp(devid, newdevid) != 0) {
3382 char *newpath;
3383
3384 if ((newpath = devid_to_path(devid)) != NULL) {
3385 /*
3386 * Update the path appropriately.
3387 */
3388 set_path(zhp, nv, newpath);
3389 if (nvlist_add_string(nv,
3390 ZPOOL_CONFIG_PATH, newpath) == 0)
3391 verify(nvlist_lookup_string(nv,
3392 ZPOOL_CONFIG_PATH,
3393 &path) == 0);
3394 free(newpath);
3395 }
3396 }
3397
3398 if (newdevid)
3399 devid_str_free(newdevid);
3400 }
3401
3402 if (strncmp(path, "/dev/dsk/", 9) == 0)
3403 path += 9;
3404
3405 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3406 &value) == 0 && value) {
3407 int pathlen = strlen(path);
3408 char *tmp = zfs_strdup(hdl, path);
3409
3410 /*
3411 * If it starts with c#, and ends with "s0", chop
3412 * the "s0" off, or if it ends with "s0/old", remove
3413 * the "s0" from the middle.
3414 */
3415 if (CTD_CHECK(tmp)) {
3416 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3417 tmp[pathlen - 2] = '\0';
3418 } else if (pathlen > 6 &&
3419 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3420 (void) strcpy(&tmp[pathlen - 6],
3421 "/old");
3422 }
3423 }
3424 return (tmp);
3425 }
3426 } else {
3427 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3428
3429 /*
3430 * If it's a raidz device, we need to stick in the parity level.
3431 */
3432 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3433 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3434 &value) == 0);
3435 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3436 (u_longlong_t)value);
3437 path = buf;
3438 }
3439
3440 /*
3441 * We identify each top-level vdev by using a <type-id>
3442 * naming convention.
3443 */
3444 if (verbose) {
3445 uint64_t id;
3446
3447 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3448 &id) == 0);
3449 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3450 (u_longlong_t)id);
3451 path = buf;
3452 }
3453 }
3454
3455 return (zfs_strdup(hdl, path));
3456 }
3457
3458 static int
3459 zbookmark_compare(const void *a, const void *b)
3460 {
3461 return (memcmp(a, b, sizeof (zbookmark_t)));
3462 }
3463
3464 /*
3465 * Retrieve the persistent error log, uniquify the members, and return to the
3466 * caller.
3467 */
3468 int
3469 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3470 {
3471 zfs_cmd_t zc = { 0 };
3472 uint64_t count;
3473 zbookmark_t *zb = NULL;
3474 int i;
3475
3476 /*
3477 * Retrieve the raw error list from the kernel. If the number of errors
3478 * has increased, allocate more space and continue until we get the
3479 * entire list.
3480 */
3481 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3482 &count) == 0);
3483 if (count == 0)
3484 return (0);
3485 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3486 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3487 return (-1);
3488 zc.zc_nvlist_dst_size = count;
3489 (void) strcpy(zc.zc_name, zhp->zpool_name);
3490 for (;;) {
3491 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3492 &zc) != 0) {
3493 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3494 if (errno == ENOMEM) {
3495 count = zc.zc_nvlist_dst_size;
3496 if ((zc.zc_nvlist_dst = (uintptr_t)
3497 zfs_alloc(zhp->zpool_hdl, count *
3498 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3499 return (-1);
3500 } else {
3501 return (-1);
3502 }
3503 } else {
3504 break;
3505 }
3506 }
3507
3508 /*
3509 * Sort the resulting bookmarks. This is a little confusing due to the
3510 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3511 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3512 * _not_ copied as part of the process. So we point the start of our
3513 * array appropriate and decrement the total number of elements.
3514 */
3515 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3516 zc.zc_nvlist_dst_size;
3517 count -= zc.zc_nvlist_dst_size;
3518
3519 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3520
3521 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3522
3523 /*
3524 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3525 */
3526 for (i = 0; i < count; i++) {
3527 nvlist_t *nv;
3528
3529 /* ignoring zb_blkid and zb_level for now */
3530 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3531 zb[i-1].zb_object == zb[i].zb_object)
3532 continue;
3533
3534 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3535 goto nomem;
3536 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3537 zb[i].zb_objset) != 0) {
3538 nvlist_free(nv);
3539 goto nomem;
3540 }
3541 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3542 zb[i].zb_object) != 0) {
3543 nvlist_free(nv);
3544 goto nomem;
3545 }
3546 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3547 nvlist_free(nv);
3548 goto nomem;
3549 }
3550 nvlist_free(nv);
3551 }
3552
3553 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3554 return (0);
3555
3556 nomem:
3557 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3558 return (no_memory(zhp->zpool_hdl));
3559 }
3560
3561 /*
3562 * Upgrade a ZFS pool to the latest on-disk version.
3563 */
3564 int
3565 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3566 {
3567 zfs_cmd_t zc = { 0 };
3568 libzfs_handle_t *hdl = zhp->zpool_hdl;
3569
3570 (void) strcpy(zc.zc_name, zhp->zpool_name);
3571 zc.zc_cookie = new_version;
3572
3573 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3574 return (zpool_standard_error_fmt(hdl, errno,
3575 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3576 zhp->zpool_name));
3577 return (0);
3578 }
3579
3580 void
3581 zfs_save_arguments(int argc, char **argv, char *string, int len)
3582 {
3583 (void) strlcpy(string, basename(argv[0]), len);
3584 for (int i = 1; i < argc; i++) {
3585 (void) strlcat(string, " ", len);
3586 (void) strlcat(string, argv[i], len);
3587 }
3588 }
3589
3590 int
3591 zpool_log_history(libzfs_handle_t *hdl, const char *message)
3592 {
3593 zfs_cmd_t zc = { 0 };
3594 nvlist_t *args;
3595 int err;
3596
3597 args = fnvlist_alloc();
3598 fnvlist_add_string(args, "message", message);
3599 err = zcmd_write_src_nvlist(hdl, &zc, args);
3600 if (err == 0)
3601 err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3602 nvlist_free(args);
3603 zcmd_free_nvlists(&zc);
3604 return (err);
3605 }
3606
3607 /*
3608 * Perform ioctl to get some command history of a pool.
3609 *
3610 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3611 * logical offset of the history buffer to start reading from.
3612 *
3613 * Upon return, 'off' is the next logical offset to read from and
3614 * 'len' is the actual amount of bytes read into 'buf'.
3615 */
3616 static int
3617 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3618 {
3619 zfs_cmd_t zc = { 0 };
3620 libzfs_handle_t *hdl = zhp->zpool_hdl;
3621
3622 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3623
3624 zc.zc_history = (uint64_t)(uintptr_t)buf;
3625 zc.zc_history_len = *len;
3626 zc.zc_history_offset = *off;
3627
3628 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3629 switch (errno) {
3630 case EPERM:
3631 return (zfs_error_fmt(hdl, EZFS_PERM,
3632 dgettext(TEXT_DOMAIN,
3633 "cannot show history for pool '%s'"),
3634 zhp->zpool_name));
3635 case ENOENT:
3636 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3637 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3638 "'%s'"), zhp->zpool_name));
3639 case ENOTSUP:
3640 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3641 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3642 "'%s', pool must be upgraded"), zhp->zpool_name));
3643 default:
3644 return (zpool_standard_error_fmt(hdl, errno,
3645 dgettext(TEXT_DOMAIN,
3646 "cannot get history for '%s'"), zhp->zpool_name));
3647 }
3648 }
3649
3650 *len = zc.zc_history_len;
3651 *off = zc.zc_history_offset;
3652
3653 return (0);
3654 }
3655
3656 /*
3657 * Process the buffer of nvlists, unpacking and storing each nvlist record
3658 * into 'records'. 'leftover' is set to the number of bytes that weren't
3659 * processed as there wasn't a complete record.
3660 */
3661 int
3662 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3663 nvlist_t ***records, uint_t *numrecords)
3664 {
3665 uint64_t reclen;
3666 nvlist_t *nv;
3667 int i;
3668
3669 while (bytes_read > sizeof (reclen)) {
3670
3671 /* get length of packed record (stored as little endian) */
3672 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3673 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3674
3675 if (bytes_read < sizeof (reclen) + reclen)
3676 break;
3677
3678 /* unpack record */
3679 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3680 return (ENOMEM);
3681 bytes_read -= sizeof (reclen) + reclen;
3682 buf += sizeof (reclen) + reclen;
3683
3684 /* add record to nvlist array */
3685 (*numrecords)++;
3686 if (ISP2(*numrecords + 1)) {
3687 *records = realloc(*records,
3688 *numrecords * 2 * sizeof (nvlist_t *));
3689 }
3690 (*records)[*numrecords - 1] = nv;
3691 }
3692
3693 *leftover = bytes_read;
3694 return (0);
3695 }
3696
3697 #define HIS_BUF_LEN (128*1024)
3698
3699 /*
3700 * Retrieve the command history of a pool.
3701 */
3702 int
3703 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3704 {
3705 char buf[HIS_BUF_LEN];
3706 uint64_t off = 0;
3707 nvlist_t **records = NULL;
3708 uint_t numrecords = 0;
3709 int err, i;
3710
3711 do {
3712 uint64_t bytes_read = sizeof (buf);
3713 uint64_t leftover;
3714
3715 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3716 break;
3717
3718 /* if nothing else was read in, we're at EOF, just return */
3719 if (!bytes_read)
3720 break;
3721
3722 if ((err = zpool_history_unpack(buf, bytes_read,
3723 &leftover, &records, &numrecords)) != 0)
3724 break;
3725 off -= leftover;
3726
3727 /* CONSTCOND */
3728 } while (1);
3729
3730 if (!err) {
3731 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3732 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3733 records, numrecords) == 0);
3734 }
3735 for (i = 0; i < numrecords; i++)
3736 nvlist_free(records[i]);
3737 free(records);
3738
3739 return (err);
3740 }
3741
3742 void
3743 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3744 char *pathname, size_t len)
3745 {
3746 zfs_cmd_t zc = { 0 };
3747 boolean_t mounted = B_FALSE;
3748 char *mntpnt = NULL;
3749 char dsname[MAXNAMELEN];
3750
3751 if (dsobj == 0) {
3752 /* special case for the MOS */
3753 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3754 return;
3755 }
3756
3757 /* get the dataset's name */
3758 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3759 zc.zc_obj = dsobj;
3760 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3761 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3762 /* just write out a path of two object numbers */
3763 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3764 dsobj, obj);
3765 return;
3766 }
3767 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3768
3769 /* find out if the dataset is mounted */
3770 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3771
3772 /* get the corrupted object's path */
3773 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3774 zc.zc_obj = obj;
3775 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3776 &zc) == 0) {
3777 if (mounted) {
3778 (void) snprintf(pathname, len, "%s%s", mntpnt,
3779 zc.zc_value);
3780 } else {
3781 (void) snprintf(pathname, len, "%s:%s",
3782 dsname, zc.zc_value);
3783 }
3784 } else {
3785 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3786 }
3787 free(mntpnt);
3788 }
3789
3790 /*
3791 * Read the EFI label from the config, if a label does not exist then
3792 * pass back the error to the caller. If the caller has passed a non-NULL
3793 * diskaddr argument then we set it to the starting address of the EFI
3794 * partition.
3795 */
3796 static int
3797 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3798 {
3799 char *path;
3800 int fd;
3801 char diskname[MAXPATHLEN];
3802 int err = -1;
3803
3804 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3805 return (err);
3806
3807 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3808 strrchr(path, '/'));
3809 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3810 struct dk_gpt *vtoc;
3811
3812 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3813 if (sb != NULL)
3814 *sb = vtoc->efi_parts[0].p_start;
3815 efi_free(vtoc);
3816 }
3817 (void) close(fd);
3818 }
3819 return (err);
3820 }
3821
3822 /*
3823 * determine where a partition starts on a disk in the current
3824 * configuration
3825 */
3826 static diskaddr_t
3827 find_start_block(nvlist_t *config)
3828 {
3829 nvlist_t **child;
3830 uint_t c, children;
3831 diskaddr_t sb = MAXOFFSET_T;
3832 uint64_t wholedisk;
3833
3834 if (nvlist_lookup_nvlist_array(config,
3835 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3836 if (nvlist_lookup_uint64(config,
3837 ZPOOL_CONFIG_WHOLE_DISK,
3838 &wholedisk) != 0 || !wholedisk) {
3839 return (MAXOFFSET_T);
3840 }
3841 if (read_efi_label(config, &sb) < 0)
3842 sb = MAXOFFSET_T;
3843 return (sb);
3844 }
3845
3846 for (c = 0; c < children; c++) {
3847 sb = find_start_block(child[c]);
3848 if (sb != MAXOFFSET_T) {
3849 return (sb);
3850 }
3851 }
3852 return (MAXOFFSET_T);
3853 }
3854
3855 /*
3856 * Label an individual disk. The name provided is the short name,
3857 * stripped of any leading /dev path.
3858 */
3859 int
3860 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3861 {
3862 char path[MAXPATHLEN];
3863 struct dk_gpt *vtoc;
3864 int fd;
3865 size_t resv = EFI_MIN_RESV_SIZE;
3866 uint64_t slice_size;
3867 diskaddr_t start_block;
3868 char errbuf[1024];
3869
3870 /* prepare an error message just in case */
3871 (void) snprintf(errbuf, sizeof (errbuf),
3872 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3873
3874 if (zhp) {
3875 nvlist_t *nvroot;
3876
3877 if (zpool_is_bootable(zhp)) {
3878 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3879 "EFI labeled devices are not supported on root "
3880 "pools."));
3881 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3882 }
3883
3884 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3885 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3886
3887 if (zhp->zpool_start_block == 0)
3888 start_block = find_start_block(nvroot);
3889 else
3890 start_block = zhp->zpool_start_block;
3891 zhp->zpool_start_block = start_block;
3892 } else {
3893 /* new pool */
3894 start_block = NEW_START_BLOCK;
3895 }
3896
3897 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3898 BACKUP_SLICE);
3899
3900 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3901 /*
3902 * This shouldn't happen. We've long since verified that this
3903 * is a valid device.
3904 */
3905 zfs_error_aux(hdl,
3906 dgettext(TEXT_DOMAIN, "unable to open device"));
3907 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3908 }
3909
3910 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3911 /*
3912 * The only way this can fail is if we run out of memory, or we
3913 * were unable to read the disk's capacity
3914 */
3915 if (errno == ENOMEM)
3916 (void) no_memory(hdl);
3917
3918 (void) close(fd);
3919 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3920 "unable to read disk capacity"), name);
3921
3922 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3923 }
3924
3925 slice_size = vtoc->efi_last_u_lba + 1;
3926 slice_size -= EFI_MIN_RESV_SIZE;
3927 if (start_block == MAXOFFSET_T)
3928 start_block = NEW_START_BLOCK;
3929 slice_size -= start_block;
3930
3931 vtoc->efi_parts[0].p_start = start_block;
3932 vtoc->efi_parts[0].p_size = slice_size;
3933
3934 /*
3935 * Why we use V_USR: V_BACKUP confuses users, and is considered
3936 * disposable by some EFI utilities (since EFI doesn't have a backup
3937 * slice). V_UNASSIGNED is supposed to be used only for zero size
3938 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3939 * etc. were all pretty specific. V_USR is as close to reality as we
3940 * can get, in the absence of V_OTHER.
3941 */
3942 vtoc->efi_parts[0].p_tag = V_USR;
3943 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3944
3945 vtoc->efi_parts[8].p_start = slice_size + start_block;
3946 vtoc->efi_parts[8].p_size = resv;
3947 vtoc->efi_parts[8].p_tag = V_RESERVED;
3948
3949 if (efi_write(fd, vtoc) != 0) {
3950 /*
3951 * Some block drivers (like pcata) may not support EFI
3952 * GPT labels. Print out a helpful error message dir-
3953 * ecting the user to manually label the disk and give
3954 * a specific slice.
3955 */
3956 (void) close(fd);
3957 efi_free(vtoc);
3958
3959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3960 "try using fdisk(1M) and then provide a specific slice"));
3961 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3962 }
3963
3964 (void) close(fd);
3965 efi_free(vtoc);
3966 return (0);
3967 }
3968
3969 static boolean_t
3970 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3971 {
3972 char *type;
3973 nvlist_t **child;
3974 uint_t children, c;
3975
3976 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3977 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3978 strcmp(type, VDEV_TYPE_FILE) == 0 ||
3979 strcmp(type, VDEV_TYPE_LOG) == 0 ||
3980 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3981 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3983 "vdev type '%s' is not supported"), type);
3984 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3985 return (B_FALSE);
3986 }
3987 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3988 &child, &children) == 0) {
3989 for (c = 0; c < children; c++) {
3990 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3991 return (B_FALSE);
3992 }
3993 }
3994 return (B_TRUE);
3995 }
3996
3997 /*
3998 * check if this zvol is allowable for use as a dump device; zero if
3999 * it is, > 0 if it isn't, < 0 if it isn't a zvol
4000 */
4001 int
4002 zvol_check_dump_config(char *arg)
4003 {
4004 zpool_handle_t *zhp = NULL;
4005 nvlist_t *config, *nvroot;
4006 char *p, *volname;
4007 nvlist_t **top;
4008 uint_t toplevels;
4009 libzfs_handle_t *hdl;
4010 char errbuf[1024];
4011 char poolname[ZPOOL_MAXNAMELEN];
4012 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4013 int ret = 1;
4014
4015 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4016 return (-1);
4017 }
4018
4019 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4020 "dump is not supported on device '%s'"), arg);
4021
4022 if ((hdl = libzfs_init()) == NULL)
4023 return (1);
4024 libzfs_print_on_error(hdl, B_TRUE);
4025
4026 volname = arg + pathlen;
4027
4028 /* check the configuration of the pool */
4029 if ((p = strchr(volname, '/')) == NULL) {
4030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4031 "malformed dataset name"));
4032 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4033 return (1);
4034 } else if (p - volname >= ZFS_MAXNAMELEN) {
4035 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4036 "dataset name is too long"));
4037 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4038 return (1);
4039 } else {
4040 (void) strncpy(poolname, volname, p - volname);
4041 poolname[p - volname] = '\0';
4042 }
4043
4044 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4045 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4046 "could not open pool '%s'"), poolname);
4047 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4048 goto out;
4049 }
4050 config = zpool_get_config(zhp, NULL);
4051 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4052 &nvroot) != 0) {
4053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4054 "could not obtain vdev configuration for '%s'"), poolname);
4055 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4056 goto out;
4057 }
4058
4059 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4060 &top, &toplevels) == 0);
4061 if (toplevels != 1) {
4062 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4063 "'%s' has multiple top level vdevs"), poolname);
4064 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
4065 goto out;
4066 }
4067
4068 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4069 goto out;
4070 }
4071 ret = 0;
4072
4073 out:
4074 if (zhp)
4075 zpool_close(zhp);
4076 libzfs_fini(hdl);
4077 return (ret);
4078 }