1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2012 by Delphix. All rights reserved.
26 */
27
28 #include <ctype.h>
29 #include <errno.h>
30 #include <devid.h>
31 #include <fcntl.h>
32 #include <libintl.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <strings.h>
36 #include <unistd.h>
37 #include <sys/efi_partition.h>
38 #include <sys/vtoc.h>
39 #include <sys/zfs_ioctl.h>
40 #include <dlfcn.h>
41
42 #include "zfs_namecheck.h"
43 #include "zfs_prop.h"
44 #include "libzfs_impl.h"
45 #include "zfs_comutil.h"
46 #include "zfeature_common.h"
47
48 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
49
50 #define DISK_ROOT "/dev/dsk"
51 #define RDISK_ROOT "/dev/rdsk"
52 #define BACKUP_SLICE "s2"
53
54 typedef struct prop_flags {
55 int create:1; /* Validate property on creation */
56 int import:1; /* Validate property on import */
57 } prop_flags_t;
58
59 /*
60 * ====================================================================
61 * zpool property functions
62 * ====================================================================
63 */
64
65 static int
66 zpool_get_all_props(zpool_handle_t *zhp)
67 {
68 zfs_cmd_t zc = { 0 };
69 libzfs_handle_t *hdl = zhp->zpool_hdl;
70
71 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
72
73 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
74 return (-1);
75
76 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
77 if (errno == ENOMEM) {
78 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
79 zcmd_free_nvlists(&zc);
80 return (-1);
81 }
82 } else {
83 zcmd_free_nvlists(&zc);
84 return (-1);
85 }
86 }
87
88 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
89 zcmd_free_nvlists(&zc);
90 return (-1);
91 }
92
93 zcmd_free_nvlists(&zc);
94
95 return (0);
96 }
97
98 static int
99 zpool_props_refresh(zpool_handle_t *zhp)
100 {
101 nvlist_t *old_props;
102
103 old_props = zhp->zpool_props;
104
105 if (zpool_get_all_props(zhp) != 0)
106 return (-1);
107
108 nvlist_free(old_props);
109 return (0);
110 }
111
112 static char *
113 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
114 zprop_source_t *src)
115 {
116 nvlist_t *nv, *nvl;
117 uint64_t ival;
118 char *value;
119 zprop_source_t source;
120
121 nvl = zhp->zpool_props;
122 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
123 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
124 source = ival;
125 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
126 } else {
127 source = ZPROP_SRC_DEFAULT;
128 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
129 value = "-";
130 }
131
132 if (src)
133 *src = source;
134
135 return (value);
136 }
137
138 uint64_t
139 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 {
141 nvlist_t *nv, *nvl;
142 uint64_t value;
143 zprop_source_t source;
144
145 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
146 /*
147 * zpool_get_all_props() has most likely failed because
148 * the pool is faulted, but if all we need is the top level
149 * vdev's guid then get it from the zhp config nvlist.
150 */
151 if ((prop == ZPOOL_PROP_GUID) &&
152 (nvlist_lookup_nvlist(zhp->zpool_config,
153 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
154 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 == 0)) {
156 return (value);
157 }
158 return (zpool_prop_default_numeric(prop));
159 }
160
161 nvl = zhp->zpool_props;
162 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
163 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
164 source = value;
165 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
166 } else {
167 source = ZPROP_SRC_DEFAULT;
168 value = zpool_prop_default_numeric(prop);
169 }
170
171 if (src)
172 *src = source;
173
174 return (value);
175 }
176
177 /*
178 * Map VDEV STATE to printed strings.
179 */
180 char *
181 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
182 {
183 switch (state) {
184 case VDEV_STATE_CLOSED:
185 case VDEV_STATE_OFFLINE:
186 return (gettext("OFFLINE"));
187 case VDEV_STATE_REMOVED:
188 return (gettext("REMOVED"));
189 case VDEV_STATE_CANT_OPEN:
190 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
191 return (gettext("FAULTED"));
192 else if (aux == VDEV_AUX_SPLIT_POOL)
193 return (gettext("SPLIT"));
194 else
195 return (gettext("UNAVAIL"));
196 case VDEV_STATE_FAULTED:
197 return (gettext("FAULTED"));
198 case VDEV_STATE_DEGRADED:
199 return (gettext("DEGRADED"));
200 case VDEV_STATE_HEALTHY:
201 return (gettext("ONLINE"));
202 }
203
204 return (gettext("UNKNOWN"));
205 }
206
207 /*
208 * Get a zpool property value for 'prop' and return the value in
209 * a pre-allocated buffer.
210 */
211 int
212 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
213 zprop_source_t *srctype)
214 {
215 uint64_t intval;
216 const char *strval;
217 zprop_source_t src = ZPROP_SRC_NONE;
218 nvlist_t *nvroot;
219 vdev_stat_t *vs;
220 uint_t vsc;
221
222 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
223 switch (prop) {
224 case ZPOOL_PROP_NAME:
225 (void) strlcpy(buf, zpool_get_name(zhp), len);
226 break;
227
228 case ZPOOL_PROP_HEALTH:
229 (void) strlcpy(buf, "FAULTED", len);
230 break;
231
232 case ZPOOL_PROP_GUID:
233 intval = zpool_get_prop_int(zhp, prop, &src);
234 (void) snprintf(buf, len, "%llu", intval);
235 break;
236
237 case ZPOOL_PROP_ALTROOT:
238 case ZPOOL_PROP_CACHEFILE:
239 case ZPOOL_PROP_COMMENT:
240 if (zhp->zpool_props != NULL ||
241 zpool_get_all_props(zhp) == 0) {
242 (void) strlcpy(buf,
243 zpool_get_prop_string(zhp, prop, &src),
244 len);
245 if (srctype != NULL)
246 *srctype = src;
247 return (0);
248 }
249 /* FALLTHROUGH */
250 default:
251 (void) strlcpy(buf, "-", len);
252 break;
253 }
254
255 if (srctype != NULL)
256 *srctype = src;
257 return (0);
258 }
259
260 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
261 prop != ZPOOL_PROP_NAME)
262 return (-1);
263
264 switch (zpool_prop_get_type(prop)) {
265 case PROP_TYPE_STRING:
266 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
267 len);
268 break;
269
270 case PROP_TYPE_NUMBER:
271 intval = zpool_get_prop_int(zhp, prop, &src);
272
273 switch (prop) {
274 case ZPOOL_PROP_SIZE:
275 case ZPOOL_PROP_ALLOCATED:
276 case ZPOOL_PROP_FREE:
277 case ZPOOL_PROP_FREEING:
278 case ZPOOL_PROP_EXPANDSZ:
279 (void) zfs_nicenum(intval, buf, len);
280 break;
281
282 case ZPOOL_PROP_CAPACITY:
283 (void) snprintf(buf, len, "%llu%%",
284 (u_longlong_t)intval);
285 break;
286
287 case ZPOOL_PROP_DEDUPRATIO:
288 (void) snprintf(buf, len, "%llu.%02llux",
289 (u_longlong_t)(intval / 100),
290 (u_longlong_t)(intval % 100));
291 break;
292
293 case ZPOOL_PROP_HEALTH:
294 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
295 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
296 verify(nvlist_lookup_uint64_array(nvroot,
297 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
298 == 0);
299
300 (void) strlcpy(buf, zpool_state_to_name(intval,
301 vs->vs_aux), len);
302 break;
303 case ZPOOL_PROP_VERSION:
304 if (intval >= SPA_VERSION_FEATURES) {
305 (void) snprintf(buf, len, "-");
306 break;
307 }
308 /* FALLTHROUGH */
309 default:
310 (void) snprintf(buf, len, "%llu", intval);
311 }
312 break;
313
314 case PROP_TYPE_INDEX:
315 intval = zpool_get_prop_int(zhp, prop, &src);
316 if (zpool_prop_index_to_string(prop, intval, &strval)
317 != 0)
318 return (-1);
319 (void) strlcpy(buf, strval, len);
320 break;
321
322 default:
323 abort();
324 }
325
326 if (srctype)
327 *srctype = src;
328
329 return (0);
330 }
331
332 /*
333 * Check if the bootfs name has the same pool name as it is set to.
334 * Assuming bootfs is a valid dataset name.
335 */
336 static boolean_t
337 bootfs_name_valid(const char *pool, char *bootfs)
338 {
339 int len = strlen(pool);
340
341 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
342 return (B_FALSE);
343
344 if (strncmp(pool, bootfs, len) == 0 &&
345 (bootfs[len] == '/' || bootfs[len] == '\0'))
346 return (B_TRUE);
347
348 return (B_FALSE);
349 }
350
351 /*
352 * Inspect the configuration to determine if any of the devices contain
353 * an EFI label.
354 */
355 static boolean_t
356 pool_uses_efi(nvlist_t *config)
357 {
358 nvlist_t **child;
359 uint_t c, children;
360
361 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
362 &child, &children) != 0)
363 return (read_efi_label(config, NULL) >= 0);
364
365 for (c = 0; c < children; c++) {
366 if (pool_uses_efi(child[c]))
367 return (B_TRUE);
368 }
369 return (B_FALSE);
370 }
371
372 boolean_t
373 zpool_is_bootable(zpool_handle_t *zhp)
374 {
375 char bootfs[ZPOOL_MAXNAMELEN];
376
377 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
378 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
379 sizeof (bootfs)) != 0);
380 }
381
382
383 /*
384 * Given an nvlist of zpool properties to be set, validate that they are
385 * correct, and parse any numeric properties (index, boolean, etc) if they are
386 * specified as strings.
387 */
388 static nvlist_t *
389 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
390 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
391 {
392 nvpair_t *elem;
393 nvlist_t *retprops;
394 zpool_prop_t prop;
395 char *strval;
396 uint64_t intval;
397 char *slash, *check;
398 struct stat64 statbuf;
399 zpool_handle_t *zhp;
400 nvlist_t *nvroot;
401
402 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
403 (void) no_memory(hdl);
404 return (NULL);
405 }
406
407 elem = NULL;
408 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
409 const char *propname = nvpair_name(elem);
410
411 prop = zpool_name_to_prop(propname);
412 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
413 int err;
414 zfeature_info_t *feature;
415 char *fname = strchr(propname, '@') + 1;
416
417 err = zfeature_lookup_name(fname, &feature);
418 if (err != 0) {
419 ASSERT3U(err, ==, ENOENT);
420 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
421 "invalid feature '%s'"), fname);
422 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
423 goto error;
424 }
425
426 if (nvpair_type(elem) != DATA_TYPE_STRING) {
427 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428 "'%s' must be a string"), propname);
429 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
430 goto error;
431 }
432
433 (void) nvpair_value_string(elem, &strval);
434 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
436 "property '%s' can only be set to "
437 "'enabled'"), propname);
438 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
439 goto error;
440 }
441
442 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
443 (void) no_memory(hdl);
444 goto error;
445 }
446 continue;
447 }
448
449 /*
450 * Make sure this property is valid and applies to this type.
451 */
452 if (prop == ZPROP_INVAL) {
453 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
454 "invalid property '%s'"), propname);
455 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
456 goto error;
457 }
458
459 if (zpool_prop_readonly(prop)) {
460 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
461 "is readonly"), propname);
462 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
463 goto error;
464 }
465
466 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
467 &strval, &intval, errbuf) != 0)
468 goto error;
469
470 /*
471 * Perform additional checking for specific properties.
472 */
473 switch (prop) {
474 case ZPOOL_PROP_VERSION:
475 if (intval < version ||
476 !SPA_VERSION_IS_SUPPORTED(intval)) {
477 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
478 "property '%s' number %d is invalid."),
479 propname, intval);
480 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
481 goto error;
482 }
483 break;
484
485 case ZPOOL_PROP_BOOTFS:
486 if (flags.create || flags.import) {
487 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 "property '%s' cannot be set at creation "
489 "or import time"), propname);
490 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
491 goto error;
492 }
493
494 if (version < SPA_VERSION_BOOTFS) {
495 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 "pool must be upgraded to support "
497 "'%s' property"), propname);
498 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
499 goto error;
500 }
501
502 /*
503 * bootfs property value has to be a dataset name and
504 * the dataset has to be in the same pool as it sets to.
505 */
506 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
507 strval)) {
508 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
509 "is an invalid name"), strval);
510 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
511 goto error;
512 }
513
514 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
515 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
516 "could not open pool '%s'"), poolname);
517 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
518 goto error;
519 }
520 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
521 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
522
523 /*
524 * bootfs property cannot be set on a disk which has
525 * been EFI labeled.
526 */
527 if (pool_uses_efi(nvroot)) {
528 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
529 "property '%s' not supported on "
530 "EFI labeled devices"), propname);
531 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
532 zpool_close(zhp);
533 goto error;
534 }
535 zpool_close(zhp);
536 break;
537
538 case ZPOOL_PROP_ALTROOT:
539 if (!flags.create && !flags.import) {
540 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
541 "property '%s' can only be set during pool "
542 "creation or import"), propname);
543 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
544 goto error;
545 }
546
547 if (strval[0] != '/') {
548 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 "bad alternate root '%s'"), strval);
550 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
551 goto error;
552 }
553 break;
554
555 case ZPOOL_PROP_CACHEFILE:
556 if (strval[0] == '\0')
557 break;
558
559 if (strcmp(strval, "none") == 0)
560 break;
561
562 if (strval[0] != '/') {
563 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
564 "property '%s' must be empty, an "
565 "absolute path, or 'none'"), propname);
566 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
567 goto error;
568 }
569
570 slash = strrchr(strval, '/');
571
572 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
573 strcmp(slash, "/..") == 0) {
574 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
575 "'%s' is not a valid file"), strval);
576 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
577 goto error;
578 }
579
580 *slash = '\0';
581
582 if (strval[0] != '\0' &&
583 (stat64(strval, &statbuf) != 0 ||
584 !S_ISDIR(statbuf.st_mode))) {
585 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
586 "'%s' is not a valid directory"),
587 strval);
588 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
589 goto error;
590 }
591
592 *slash = '/';
593 break;
594
595 case ZPOOL_PROP_COMMENT:
596 for (check = strval; *check != '\0'; check++) {
597 if (!isprint(*check)) {
598 zfs_error_aux(hdl,
599 dgettext(TEXT_DOMAIN,
600 "comment may only have printable "
601 "characters"));
602 (void) zfs_error(hdl, EZFS_BADPROP,
603 errbuf);
604 goto error;
605 }
606 }
607 if (strlen(strval) > ZPROP_MAX_COMMENT) {
608 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
609 "comment must not exceed %d characters"),
610 ZPROP_MAX_COMMENT);
611 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
612 goto error;
613 }
614 break;
615 case ZPOOL_PROP_READONLY:
616 if (!flags.import) {
617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
618 "property '%s' can only be set at "
619 "import time"), propname);
620 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
621 goto error;
622 }
623 break;
624 }
625 }
626
627 return (retprops);
628 error:
629 nvlist_free(retprops);
630 return (NULL);
631 }
632
633 /*
634 * Set zpool property : propname=propval.
635 */
636 int
637 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
638 {
639 zfs_cmd_t zc = { 0 };
640 int ret = -1;
641 char errbuf[1024];
642 nvlist_t *nvl = NULL;
643 nvlist_t *realprops;
644 uint64_t version;
645 prop_flags_t flags = { 0 };
646
647 (void) snprintf(errbuf, sizeof (errbuf),
648 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
649 zhp->zpool_name);
650
651 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
652 return (no_memory(zhp->zpool_hdl));
653
654 if (nvlist_add_string(nvl, propname, propval) != 0) {
655 nvlist_free(nvl);
656 return (no_memory(zhp->zpool_hdl));
657 }
658
659 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
660 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
661 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
662 nvlist_free(nvl);
663 return (-1);
664 }
665
666 nvlist_free(nvl);
667 nvl = realprops;
668
669 /*
670 * Execute the corresponding ioctl() to set this property.
671 */
672 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
673
674 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
675 nvlist_free(nvl);
676 return (-1);
677 }
678
679 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
680
681 zcmd_free_nvlists(&zc);
682 nvlist_free(nvl);
683
684 if (ret)
685 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
686 else
687 (void) zpool_props_refresh(zhp);
688
689 return (ret);
690 }
691
692 int
693 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
694 {
695 libzfs_handle_t *hdl = zhp->zpool_hdl;
696 zprop_list_t *entry;
697 char buf[ZFS_MAXPROPLEN];
698 nvlist_t *features = NULL;
699 zprop_list_t **last;
700 boolean_t firstexpand = (NULL == *plp);
701
702 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
703 return (-1);
704
705 last = plp;
706 while (*last != NULL)
707 last = &(*last)->pl_next;
708
709 if ((*plp)->pl_all)
710 features = zpool_get_features(zhp);
711
712 if ((*plp)->pl_all && firstexpand) {
713 for (int i = 0; i < SPA_FEATURES; i++) {
714 zprop_list_t *entry = zfs_alloc(hdl,
715 sizeof (zprop_list_t));
716 entry->pl_prop = ZPROP_INVAL;
717 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
718 spa_feature_table[i].fi_uname);
719 entry->pl_width = strlen(entry->pl_user_prop);
720 entry->pl_all = B_TRUE;
721
722 *last = entry;
723 last = &entry->pl_next;
724 }
725 }
726
727 /* add any unsupported features */
728 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
729 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
730 char *propname;
731 boolean_t found;
732 zprop_list_t *entry;
733
734 if (zfeature_is_supported(nvpair_name(nvp)))
735 continue;
736
737 propname = zfs_asprintf(hdl, "unsupported@%s",
738 nvpair_name(nvp));
739
740 /*
741 * Before adding the property to the list make sure that no
742 * other pool already added the same property.
743 */
744 found = B_FALSE;
745 entry = *plp;
746 while (entry != NULL) {
747 if (entry->pl_user_prop != NULL &&
748 strcmp(propname, entry->pl_user_prop) == 0) {
749 found = B_TRUE;
750 break;
751 }
752 entry = entry->pl_next;
753 }
754 if (found) {
755 free(propname);
756 continue;
757 }
758
759 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
760 entry->pl_prop = ZPROP_INVAL;
761 entry->pl_user_prop = propname;
762 entry->pl_width = strlen(entry->pl_user_prop);
763 entry->pl_all = B_TRUE;
764
765 *last = entry;
766 last = &entry->pl_next;
767 }
768
769 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
770
771 if (entry->pl_fixed)
772 continue;
773
774 if (entry->pl_prop != ZPROP_INVAL &&
775 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
776 NULL) == 0) {
777 if (strlen(buf) > entry->pl_width)
778 entry->pl_width = strlen(buf);
779 }
780 }
781
782 return (0);
783 }
784
785 /*
786 * Get the state for the given feature on the given ZFS pool.
787 */
788 int
789 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
790 size_t len)
791 {
792 uint64_t refcount;
793 boolean_t found = B_FALSE;
794 nvlist_t *features = zpool_get_features(zhp);
795 boolean_t supported;
796 const char *feature = strchr(propname, '@') + 1;
797
798 supported = zpool_prop_feature(propname);
799 ASSERT(supported || zfs_prop_unsupported(propname));
800
801 /*
802 * Convert from feature name to feature guid. This conversion is
803 * unecessary for unsupported@... properties because they already
804 * use guids.
805 */
806 if (supported) {
807 int ret;
808 zfeature_info_t *fi;
809
810 ret = zfeature_lookup_name(feature, &fi);
811 if (ret != 0) {
812 (void) strlcpy(buf, "-", len);
813 return (ENOTSUP);
814 }
815 feature = fi->fi_guid;
816 }
817
818 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
819 found = B_TRUE;
820
821 if (supported) {
822 if (!found) {
823 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
824 } else {
825 if (refcount == 0)
826 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
827 else
828 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
829 }
830 } else {
831 if (found) {
832 if (refcount == 0) {
833 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
834 } else {
835 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
836 }
837 } else {
838 (void) strlcpy(buf, "-", len);
839 return (ENOTSUP);
840 }
841 }
842
843 return (0);
844 }
845
846 /*
847 * Don't start the slice at the default block of 34; many storage
848 * devices will use a stripe width of 128k, so start there instead.
849 */
850 #define NEW_START_BLOCK 256
851
852 /*
853 * Validate the given pool name, optionally putting an extended error message in
854 * 'buf'.
855 */
856 boolean_t
857 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
858 {
859 namecheck_err_t why;
860 char what;
861 int ret;
862
863 ret = pool_namecheck(pool, &why, &what);
864
865 /*
866 * The rules for reserved pool names were extended at a later point.
867 * But we need to support users with existing pools that may now be
868 * invalid. So we only check for this expanded set of names during a
869 * create (or import), and only in userland.
870 */
871 if (ret == 0 && !isopen &&
872 (strncmp(pool, "mirror", 6) == 0 ||
873 strncmp(pool, "raidz", 5) == 0 ||
874 strncmp(pool, "spare", 5) == 0 ||
875 strcmp(pool, "log") == 0)) {
876 if (hdl != NULL)
877 zfs_error_aux(hdl,
878 dgettext(TEXT_DOMAIN, "name is reserved"));
879 return (B_FALSE);
880 }
881
882
883 if (ret != 0) {
884 if (hdl != NULL) {
885 switch (why) {
886 case NAME_ERR_TOOLONG:
887 zfs_error_aux(hdl,
888 dgettext(TEXT_DOMAIN, "name is too long"));
889 break;
890
891 case NAME_ERR_INVALCHAR:
892 zfs_error_aux(hdl,
893 dgettext(TEXT_DOMAIN, "invalid character "
894 "'%c' in pool name"), what);
895 break;
896
897 case NAME_ERR_NOLETTER:
898 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
899 "name must begin with a letter"));
900 break;
901
902 case NAME_ERR_RESERVED:
903 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
904 "name is reserved"));
905 break;
906
907 case NAME_ERR_DISKLIKE:
908 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
909 "pool name is reserved"));
910 break;
911
912 case NAME_ERR_LEADING_SLASH:
913 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
914 "leading slash in name"));
915 break;
916
917 case NAME_ERR_EMPTY_COMPONENT:
918 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
919 "empty component in name"));
920 break;
921
922 case NAME_ERR_TRAILING_SLASH:
923 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
924 "trailing slash in name"));
925 break;
926
927 case NAME_ERR_MULTIPLE_AT:
928 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
929 "multiple '@' delimiters in name"));
930 break;
931
932 }
933 }
934 return (B_FALSE);
935 }
936
937 return (B_TRUE);
938 }
939
940 /*
941 * Open a handle to the given pool, even if the pool is currently in the FAULTED
942 * state.
943 */
944 zpool_handle_t *
945 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
946 {
947 zpool_handle_t *zhp;
948 boolean_t missing;
949
950 /*
951 * Make sure the pool name is valid.
952 */
953 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
954 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
955 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
956 pool);
957 return (NULL);
958 }
959
960 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
961 return (NULL);
962
963 zhp->zpool_hdl = hdl;
964 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
965
966 if (zpool_refresh_stats(zhp, &missing) != 0) {
967 zpool_close(zhp);
968 return (NULL);
969 }
970
971 if (missing) {
972 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
973 (void) zfs_error_fmt(hdl, EZFS_NOENT,
974 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
975 zpool_close(zhp);
976 return (NULL);
977 }
978
979 return (zhp);
980 }
981
982 /*
983 * Like the above, but silent on error. Used when iterating over pools (because
984 * the configuration cache may be out of date).
985 */
986 int
987 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
988 {
989 zpool_handle_t *zhp;
990 boolean_t missing;
991
992 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
993 return (-1);
994
995 zhp->zpool_hdl = hdl;
996 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
997
998 if (zpool_refresh_stats(zhp, &missing) != 0) {
999 zpool_close(zhp);
1000 return (-1);
1001 }
1002
1003 if (missing) {
1004 zpool_close(zhp);
1005 *ret = NULL;
1006 return (0);
1007 }
1008
1009 *ret = zhp;
1010 return (0);
1011 }
1012
1013 /*
1014 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1015 * state.
1016 */
1017 zpool_handle_t *
1018 zpool_open(libzfs_handle_t *hdl, const char *pool)
1019 {
1020 zpool_handle_t *zhp;
1021
1022 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1023 return (NULL);
1024
1025 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1026 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1027 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1028 zpool_close(zhp);
1029 return (NULL);
1030 }
1031
1032 return (zhp);
1033 }
1034
1035 /*
1036 * Close the handle. Simply frees the memory associated with the handle.
1037 */
1038 void
1039 zpool_close(zpool_handle_t *zhp)
1040 {
1041 if (zhp->zpool_config)
1042 nvlist_free(zhp->zpool_config);
1043 if (zhp->zpool_old_config)
1044 nvlist_free(zhp->zpool_old_config);
1045 if (zhp->zpool_props)
1046 nvlist_free(zhp->zpool_props);
1047 free(zhp);
1048 }
1049
1050 /*
1051 * Return the name of the pool.
1052 */
1053 const char *
1054 zpool_get_name(zpool_handle_t *zhp)
1055 {
1056 return (zhp->zpool_name);
1057 }
1058
1059
1060 /*
1061 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1062 */
1063 int
1064 zpool_get_state(zpool_handle_t *zhp)
1065 {
1066 return (zhp->zpool_state);
1067 }
1068
1069 /*
1070 * Create the named pool, using the provided vdev list. It is assumed
1071 * that the consumer has already validated the contents of the nvlist, so we
1072 * don't have to worry about error semantics.
1073 */
1074 int
1075 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1076 nvlist_t *props, nvlist_t *fsprops)
1077 {
1078 zfs_cmd_t zc = { 0 };
1079 nvlist_t *zc_fsprops = NULL;
1080 nvlist_t *zc_props = NULL;
1081 char msg[1024];
1082 char *altroot;
1083 int ret = -1;
1084
1085 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1086 "cannot create '%s'"), pool);
1087
1088 if (!zpool_name_valid(hdl, B_FALSE, pool))
1089 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1090
1091 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1092 return (-1);
1093
1094 if (props) {
1095 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1096
1097 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1098 SPA_VERSION_1, flags, msg)) == NULL) {
1099 goto create_failed;
1100 }
1101 }
1102
1103 if (fsprops) {
1104 uint64_t zoned;
1105 char *zonestr;
1106
1107 zoned = ((nvlist_lookup_string(fsprops,
1108 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1109 strcmp(zonestr, "on") == 0);
1110
1111 if ((zc_fsprops = zfs_valid_proplist(hdl,
1112 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1113 goto create_failed;
1114 }
1115 if (!zc_props &&
1116 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1117 goto create_failed;
1118 }
1119 if (nvlist_add_nvlist(zc_props,
1120 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1121 goto create_failed;
1122 }
1123 }
1124
1125 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1126 goto create_failed;
1127
1128 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1129
1130 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1131
1132 zcmd_free_nvlists(&zc);
1133 nvlist_free(zc_props);
1134 nvlist_free(zc_fsprops);
1135
1136 switch (errno) {
1137 case EBUSY:
1138 /*
1139 * This can happen if the user has specified the same
1140 * device multiple times. We can't reliably detect this
1141 * until we try to add it and see we already have a
1142 * label.
1143 */
1144 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1145 "one or more vdevs refer to the same device"));
1146 return (zfs_error(hdl, EZFS_BADDEV, msg));
1147
1148 case EOVERFLOW:
1149 /*
1150 * This occurs when one of the devices is below
1151 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1152 * device was the problem device since there's no
1153 * reliable way to determine device size from userland.
1154 */
1155 {
1156 char buf[64];
1157
1158 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1159
1160 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1161 "one or more devices is less than the "
1162 "minimum size (%s)"), buf);
1163 }
1164 return (zfs_error(hdl, EZFS_BADDEV, msg));
1165
1166 case ENOSPC:
1167 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1168 "one or more devices is out of space"));
1169 return (zfs_error(hdl, EZFS_BADDEV, msg));
1170
1171 case ENOTBLK:
1172 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1173 "cache device must be a disk or disk slice"));
1174 return (zfs_error(hdl, EZFS_BADDEV, msg));
1175
1176 default:
1177 return (zpool_standard_error(hdl, errno, msg));
1178 }
1179 }
1180
1181 /*
1182 * If this is an alternate root pool, then we automatically set the
1183 * mountpoint of the root dataset to be '/'.
1184 */
1185 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1186 &altroot) == 0) {
1187 zfs_handle_t *zhp;
1188
1189 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1190 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1191 "/") == 0);
1192
1193 zfs_close(zhp);
1194 }
1195
1196 create_failed:
1197 zcmd_free_nvlists(&zc);
1198 nvlist_free(zc_props);
1199 nvlist_free(zc_fsprops);
1200 return (ret);
1201 }
1202
1203 /*
1204 * Destroy the given pool. It is up to the caller to ensure that there are no
1205 * datasets left in the pool.
1206 */
1207 int
1208 zpool_destroy(zpool_handle_t *zhp)
1209 {
1210 zfs_cmd_t zc = { 0 };
1211 zfs_handle_t *zfp = NULL;
1212 libzfs_handle_t *hdl = zhp->zpool_hdl;
1213 char msg[1024];
1214
1215 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1216 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1217 return (-1);
1218
1219 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1220
1221 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1222 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1223 "cannot destroy '%s'"), zhp->zpool_name);
1224
1225 if (errno == EROFS) {
1226 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1227 "one or more devices is read only"));
1228 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1229 } else {
1230 (void) zpool_standard_error(hdl, errno, msg);
1231 }
1232
1233 if (zfp)
1234 zfs_close(zfp);
1235 return (-1);
1236 }
1237
1238 if (zfp) {
1239 remove_mountpoint(zfp);
1240 zfs_close(zfp);
1241 }
1242
1243 return (0);
1244 }
1245
1246 /*
1247 * Add the given vdevs to the pool. The caller must have already performed the
1248 * necessary verification to ensure that the vdev specification is well-formed.
1249 */
1250 int
1251 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1252 {
1253 zfs_cmd_t zc = { 0 };
1254 int ret;
1255 libzfs_handle_t *hdl = zhp->zpool_hdl;
1256 char msg[1024];
1257 nvlist_t **spares, **l2cache;
1258 uint_t nspares, nl2cache;
1259
1260 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1261 "cannot add to '%s'"), zhp->zpool_name);
1262
1263 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1264 SPA_VERSION_SPARES &&
1265 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1266 &spares, &nspares) == 0) {
1267 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1268 "upgraded to add hot spares"));
1269 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1270 }
1271
1272 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1273 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1274 uint64_t s;
1275
1276 for (s = 0; s < nspares; s++) {
1277 char *path;
1278
1279 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1280 &path) == 0 && pool_uses_efi(spares[s])) {
1281 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1282 "device '%s' contains an EFI label and "
1283 "cannot be used on root pools."),
1284 zpool_vdev_name(hdl, NULL, spares[s],
1285 B_FALSE));
1286 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1287 }
1288 }
1289 }
1290
1291 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1292 SPA_VERSION_L2CACHE &&
1293 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1294 &l2cache, &nl2cache) == 0) {
1295 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1296 "upgraded to add cache devices"));
1297 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1298 }
1299
1300 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1301 return (-1);
1302 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1303
1304 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1305 switch (errno) {
1306 case EBUSY:
1307 /*
1308 * This can happen if the user has specified the same
1309 * device multiple times. We can't reliably detect this
1310 * until we try to add it and see we already have a
1311 * label.
1312 */
1313 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1314 "one or more vdevs refer to the same device"));
1315 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1316 break;
1317
1318 case EOVERFLOW:
1319 /*
1320 * This occurrs when one of the devices is below
1321 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1322 * device was the problem device since there's no
1323 * reliable way to determine device size from userland.
1324 */
1325 {
1326 char buf[64];
1327
1328 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1329
1330 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1331 "device is less than the minimum "
1332 "size (%s)"), buf);
1333 }
1334 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1335 break;
1336
1337 case ENOTSUP:
1338 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1339 "pool must be upgraded to add these vdevs"));
1340 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1341 break;
1342
1343 case EDOM:
1344 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1345 "root pool can not have multiple vdevs"
1346 " or separate logs"));
1347 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1348 break;
1349
1350 case ENOTBLK:
1351 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1352 "cache device must be a disk or disk slice"));
1353 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1354 break;
1355
1356 default:
1357 (void) zpool_standard_error(hdl, errno, msg);
1358 }
1359
1360 ret = -1;
1361 } else {
1362 ret = 0;
1363 }
1364
1365 zcmd_free_nvlists(&zc);
1366
1367 return (ret);
1368 }
1369
1370 /*
1371 * Exports the pool from the system. The caller must ensure that there are no
1372 * mounted datasets in the pool.
1373 */
1374 int
1375 zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1376 {
1377 zfs_cmd_t zc = { 0 };
1378 char msg[1024];
1379
1380 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1381 "cannot export '%s'"), zhp->zpool_name);
1382
1383 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1384 zc.zc_cookie = force;
1385 zc.zc_guid = hardforce;
1386
1387 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1388 switch (errno) {
1389 case EXDEV:
1390 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1391 "use '-f' to override the following errors:\n"
1392 "'%s' has an active shared spare which could be"
1393 " used by other pools once '%s' is exported."),
1394 zhp->zpool_name, zhp->zpool_name);
1395 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1396 msg));
1397 default:
1398 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1399 msg));
1400 }
1401 }
1402
1403 return (0);
1404 }
1405
1406 int
1407 zpool_export(zpool_handle_t *zhp, boolean_t force)
1408 {
1409 return (zpool_export_common(zhp, force, B_FALSE));
1410 }
1411
1412 int
1413 zpool_export_force(zpool_handle_t *zhp)
1414 {
1415 return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1416 }
1417
1418 static void
1419 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1420 nvlist_t *config)
1421 {
1422 nvlist_t *nv = NULL;
1423 uint64_t rewindto;
1424 int64_t loss = -1;
1425 struct tm t;
1426 char timestr[128];
1427
1428 if (!hdl->libzfs_printerr || config == NULL)
1429 return;
1430
1431 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1432 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1433 return;
1434 }
1435
1436 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1437 return;
1438 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1439
1440 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1441 strftime(timestr, 128, 0, &t) != 0) {
1442 if (dryrun) {
1443 (void) printf(dgettext(TEXT_DOMAIN,
1444 "Would be able to return %s "
1445 "to its state as of %s.\n"),
1446 name, timestr);
1447 } else {
1448 (void) printf(dgettext(TEXT_DOMAIN,
1449 "Pool %s returned to its state as of %s.\n"),
1450 name, timestr);
1451 }
1452 if (loss > 120) {
1453 (void) printf(dgettext(TEXT_DOMAIN,
1454 "%s approximately %lld "),
1455 dryrun ? "Would discard" : "Discarded",
1456 (loss + 30) / 60);
1457 (void) printf(dgettext(TEXT_DOMAIN,
1458 "minutes of transactions.\n"));
1459 } else if (loss > 0) {
1460 (void) printf(dgettext(TEXT_DOMAIN,
1461 "%s approximately %lld "),
1462 dryrun ? "Would discard" : "Discarded", loss);
1463 (void) printf(dgettext(TEXT_DOMAIN,
1464 "seconds of transactions.\n"));
1465 }
1466 }
1467 }
1468
1469 void
1470 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1471 nvlist_t *config)
1472 {
1473 nvlist_t *nv = NULL;
1474 int64_t loss = -1;
1475 uint64_t edata = UINT64_MAX;
1476 uint64_t rewindto;
1477 struct tm t;
1478 char timestr[128];
1479
1480 if (!hdl->libzfs_printerr)
1481 return;
1482
1483 if (reason >= 0)
1484 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1485 else
1486 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1487
1488 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1489 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1490 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1491 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1492 goto no_info;
1493
1494 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1495 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1496 &edata);
1497
1498 (void) printf(dgettext(TEXT_DOMAIN,
1499 "Recovery is possible, but will result in some data loss.\n"));
1500
1501 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1502 strftime(timestr, 128, 0, &t) != 0) {
1503 (void) printf(dgettext(TEXT_DOMAIN,
1504 "\tReturning the pool to its state as of %s\n"
1505 "\tshould correct the problem. "),
1506 timestr);
1507 } else {
1508 (void) printf(dgettext(TEXT_DOMAIN,
1509 "\tReverting the pool to an earlier state "
1510 "should correct the problem.\n\t"));
1511 }
1512
1513 if (loss > 120) {
1514 (void) printf(dgettext(TEXT_DOMAIN,
1515 "Approximately %lld minutes of data\n"
1516 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1517 } else if (loss > 0) {
1518 (void) printf(dgettext(TEXT_DOMAIN,
1519 "Approximately %lld seconds of data\n"
1520 "\tmust be discarded, irreversibly. "), loss);
1521 }
1522 if (edata != 0 && edata != UINT64_MAX) {
1523 if (edata == 1) {
1524 (void) printf(dgettext(TEXT_DOMAIN,
1525 "After rewind, at least\n"
1526 "\tone persistent user-data error will remain. "));
1527 } else {
1528 (void) printf(dgettext(TEXT_DOMAIN,
1529 "After rewind, several\n"
1530 "\tpersistent user-data errors will remain. "));
1531 }
1532 }
1533 (void) printf(dgettext(TEXT_DOMAIN,
1534 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1535 reason >= 0 ? "clear" : "import", name);
1536
1537 (void) printf(dgettext(TEXT_DOMAIN,
1538 "A scrub of the pool\n"
1539 "\tis strongly recommended after recovery.\n"));
1540 return;
1541
1542 no_info:
1543 (void) printf(dgettext(TEXT_DOMAIN,
1544 "Destroy and re-create the pool from\n\ta backup source.\n"));
1545 }
1546
1547 /*
1548 * zpool_import() is a contracted interface. Should be kept the same
1549 * if possible.
1550 *
1551 * Applications should use zpool_import_props() to import a pool with
1552 * new properties value to be set.
1553 */
1554 int
1555 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1556 char *altroot)
1557 {
1558 nvlist_t *props = NULL;
1559 int ret;
1560
1561 if (altroot != NULL) {
1562 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1563 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1564 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1565 newname));
1566 }
1567
1568 if (nvlist_add_string(props,
1569 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1570 nvlist_add_string(props,
1571 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1572 nvlist_free(props);
1573 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1574 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1575 newname));
1576 }
1577 }
1578
1579 ret = zpool_import_props(hdl, config, newname, props,
1580 ZFS_IMPORT_NORMAL);
1581 if (props)
1582 nvlist_free(props);
1583 return (ret);
1584 }
1585
1586 static void
1587 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1588 int indent)
1589 {
1590 nvlist_t **child;
1591 uint_t c, children;
1592 char *vname;
1593 uint64_t is_log = 0;
1594
1595 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1596 &is_log);
1597
1598 if (name != NULL)
1599 (void) printf("\t%*s%s%s\n", indent, "", name,
1600 is_log ? " [log]" : "");
1601
1602 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1603 &child, &children) != 0)
1604 return;
1605
1606 for (c = 0; c < children; c++) {
1607 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1608 print_vdev_tree(hdl, vname, child[c], indent + 2);
1609 free(vname);
1610 }
1611 }
1612
1613 void
1614 zpool_print_unsup_feat(nvlist_t *config)
1615 {
1616 nvlist_t *nvinfo, *unsup_feat;
1617
1618 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1619 0);
1620 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1621 &unsup_feat) == 0);
1622
1623 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1624 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1625 char *desc;
1626
1627 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1628 verify(nvpair_value_string(nvp, &desc) == 0);
1629
1630 if (strlen(desc) > 0)
1631 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1632 else
1633 (void) printf("\t%s\n", nvpair_name(nvp));
1634 }
1635 }
1636
1637 /*
1638 * Import the given pool using the known configuration and a list of
1639 * properties to be set. The configuration should have come from
1640 * zpool_find_import(). The 'newname' parameters control whether the pool
1641 * is imported with a different name.
1642 */
1643 int
1644 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1645 nvlist_t *props, int flags)
1646 {
1647 zfs_cmd_t zc = { 0 };
1648 zpool_rewind_policy_t policy;
1649 nvlist_t *nv = NULL;
1650 nvlist_t *nvinfo = NULL;
1651 nvlist_t *missing = NULL;
1652 char *thename;
1653 char *origname;
1654 int ret;
1655 int error = 0;
1656 char errbuf[1024];
1657
1658 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1659 &origname) == 0);
1660
1661 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1662 "cannot import pool '%s'"), origname);
1663
1664 if (newname != NULL) {
1665 if (!zpool_name_valid(hdl, B_FALSE, newname))
1666 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1667 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1668 newname));
1669 thename = (char *)newname;
1670 } else {
1671 thename = origname;
1672 }
1673
1674 if (props) {
1675 uint64_t version;
1676 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1677
1678 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1679 &version) == 0);
1680
1681 if ((props = zpool_valid_proplist(hdl, origname,
1682 props, version, flags, errbuf)) == NULL) {
1683 return (-1);
1684 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1685 nvlist_free(props);
1686 return (-1);
1687 }
1688 }
1689
1690 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1691
1692 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1693 &zc.zc_guid) == 0);
1694
1695 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1696 nvlist_free(props);
1697 return (-1);
1698 }
1699 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1700 nvlist_free(props);
1701 return (-1);
1702 }
1703
1704 zc.zc_cookie = flags;
1705 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1706 errno == ENOMEM) {
1707 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1708 zcmd_free_nvlists(&zc);
1709 return (-1);
1710 }
1711 }
1712 if (ret != 0)
1713 error = errno;
1714
1715 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1716 zpool_get_rewind_policy(config, &policy);
1717
1718 if (error) {
1719 char desc[1024];
1720
1721 /*
1722 * Dry-run failed, but we print out what success
1723 * looks like if we found a best txg
1724 */
1725 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1726 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1727 B_TRUE, nv);
1728 nvlist_free(nv);
1729 return (-1);
1730 }
1731
1732 if (newname == NULL)
1733 (void) snprintf(desc, sizeof (desc),
1734 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1735 thename);
1736 else
1737 (void) snprintf(desc, sizeof (desc),
1738 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1739 origname, thename);
1740
1741 switch (error) {
1742 case ENOTSUP:
1743 if (nv != NULL && nvlist_lookup_nvlist(nv,
1744 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1745 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1746 (void) printf(dgettext(TEXT_DOMAIN, "This "
1747 "pool uses the following feature(s) not "
1748 "supported by this system:\n"));
1749 zpool_print_unsup_feat(nv);
1750 if (nvlist_exists(nvinfo,
1751 ZPOOL_CONFIG_CAN_RDONLY)) {
1752 (void) printf(dgettext(TEXT_DOMAIN,
1753 "All unsupported features are only "
1754 "required for writing to the pool."
1755 "\nThe pool can be imported using "
1756 "'-o readonly=on'.\n"));
1757 }
1758 }
1759 /*
1760 * Unsupported version.
1761 */
1762 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1763 break;
1764
1765 case EINVAL:
1766 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1767 break;
1768
1769 case EROFS:
1770 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1771 "one or more devices is read only"));
1772 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1773 break;
1774
1775 case ENXIO:
1776 if (nv && nvlist_lookup_nvlist(nv,
1777 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1778 nvlist_lookup_nvlist(nvinfo,
1779 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1780 (void) printf(dgettext(TEXT_DOMAIN,
1781 "The devices below are missing, use "
1782 "'-m' to import the pool anyway:\n"));
1783 print_vdev_tree(hdl, NULL, missing, 2);
1784 (void) printf("\n");
1785 }
1786 (void) zpool_standard_error(hdl, error, desc);
1787 break;
1788
1789 case EEXIST:
1790 (void) zpool_standard_error(hdl, error, desc);
1791 break;
1792
1793 default:
1794 (void) zpool_standard_error(hdl, error, desc);
1795 zpool_explain_recover(hdl,
1796 newname ? origname : thename, -error, nv);
1797 break;
1798 }
1799
1800 nvlist_free(nv);
1801 ret = -1;
1802 } else {
1803 zpool_handle_t *zhp;
1804
1805 /*
1806 * This should never fail, but play it safe anyway.
1807 */
1808 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1809 ret = -1;
1810 else if (zhp != NULL)
1811 zpool_close(zhp);
1812 if (policy.zrp_request &
1813 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1814 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1815 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1816 }
1817 nvlist_free(nv);
1818 return (0);
1819 }
1820
1821 zcmd_free_nvlists(&zc);
1822 nvlist_free(props);
1823
1824 return (ret);
1825 }
1826
1827 /*
1828 * Scan the pool.
1829 */
1830 int
1831 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1832 {
1833 zfs_cmd_t zc = { 0 };
1834 char msg[1024];
1835 libzfs_handle_t *hdl = zhp->zpool_hdl;
1836
1837 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1838 zc.zc_cookie = func;
1839
1840 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1841 (errno == ENOENT && func != POOL_SCAN_NONE))
1842 return (0);
1843
1844 if (func == POOL_SCAN_SCRUB) {
1845 (void) snprintf(msg, sizeof (msg),
1846 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1847 } else if (func == POOL_SCAN_NONE) {
1848 (void) snprintf(msg, sizeof (msg),
1849 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1850 zc.zc_name);
1851 } else {
1852 assert(!"unexpected result");
1853 }
1854
1855 if (errno == EBUSY) {
1856 nvlist_t *nvroot;
1857 pool_scan_stat_t *ps = NULL;
1858 uint_t psc;
1859
1860 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1861 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1862 (void) nvlist_lookup_uint64_array(nvroot,
1863 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1864 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1865 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1866 else
1867 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1868 } else if (errno == ENOENT) {
1869 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1870 } else {
1871 return (zpool_standard_error(hdl, errno, msg));
1872 }
1873 }
1874
1875 /*
1876 * This provides a very minimal check whether a given string is likely a
1877 * c#t#d# style string. Users of this are expected to do their own
1878 * verification of the s# part.
1879 */
1880 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1881
1882 /*
1883 * More elaborate version for ones which may start with "/dev/dsk/"
1884 * and the like.
1885 */
1886 static int
1887 ctd_check_path(char *str) {
1888 /*
1889 * If it starts with a slash, check the last component.
1890 */
1891 if (str && str[0] == '/') {
1892 char *tmp = strrchr(str, '/');
1893
1894 /*
1895 * If it ends in "/old", check the second-to-last
1896 * component of the string instead.
1897 */
1898 if (tmp != str && strcmp(tmp, "/old") == 0) {
1899 for (tmp--; *tmp != '/'; tmp--)
1900 ;
1901 }
1902 str = tmp + 1;
1903 }
1904 return (CTD_CHECK(str));
1905 }
1906
1907 /*
1908 * Find a vdev that matches the search criteria specified. We use the
1909 * the nvpair name to determine how we should look for the device.
1910 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1911 * spare; but FALSE if its an INUSE spare.
1912 */
1913 static nvlist_t *
1914 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1915 boolean_t *l2cache, boolean_t *log)
1916 {
1917 uint_t c, children;
1918 nvlist_t **child;
1919 nvlist_t *ret;
1920 uint64_t is_log;
1921 char *srchkey;
1922 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1923
1924 /* Nothing to look for */
1925 if (search == NULL || pair == NULL)
1926 return (NULL);
1927
1928 /* Obtain the key we will use to search */
1929 srchkey = nvpair_name(pair);
1930
1931 switch (nvpair_type(pair)) {
1932 case DATA_TYPE_UINT64:
1933 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1934 uint64_t srchval, theguid;
1935
1936 verify(nvpair_value_uint64(pair, &srchval) == 0);
1937 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1938 &theguid) == 0);
1939 if (theguid == srchval)
1940 return (nv);
1941 }
1942 break;
1943
1944 case DATA_TYPE_STRING: {
1945 char *srchval, *val;
1946
1947 verify(nvpair_value_string(pair, &srchval) == 0);
1948 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1949 break;
1950
1951 /*
1952 * Search for the requested value. Special cases:
1953 *
1954 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1955 * "s0" or "s0/old". The "s0" part is hidden from the user,
1956 * but included in the string, so this matches around it.
1957 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1958 *
1959 * Otherwise, all other searches are simple string compares.
1960 */
1961 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1962 ctd_check_path(val)) {
1963 uint64_t wholedisk = 0;
1964
1965 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1966 &wholedisk);
1967 if (wholedisk) {
1968 int slen = strlen(srchval);
1969 int vlen = strlen(val);
1970
1971 if (slen != vlen - 2)
1972 break;
1973
1974 /*
1975 * make_leaf_vdev() should only set
1976 * wholedisk for ZPOOL_CONFIG_PATHs which
1977 * will include "/dev/dsk/", giving plenty of
1978 * room for the indices used next.
1979 */
1980 ASSERT(vlen >= 6);
1981
1982 /*
1983 * strings identical except trailing "s0"
1984 */
1985 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1986 strncmp(srchval, val, slen) == 0)
1987 return (nv);
1988
1989 /*
1990 * strings identical except trailing "s0/old"
1991 */
1992 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1993 strcmp(&srchval[slen - 4], "/old") == 0 &&
1994 strncmp(srchval, val, slen - 4) == 0)
1995 return (nv);
1996
1997 break;
1998 }
1999 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2000 char *type, *idx, *end, *p;
2001 uint64_t id, vdev_id;
2002
2003 /*
2004 * Determine our vdev type, keeping in mind
2005 * that the srchval is composed of a type and
2006 * vdev id pair (i.e. mirror-4).
2007 */
2008 if ((type = strdup(srchval)) == NULL)
2009 return (NULL);
2010
2011 if ((p = strrchr(type, '-')) == NULL) {
2012 free(type);
2013 break;
2014 }
2015 idx = p + 1;
2016 *p = '\0';
2017
2018 /*
2019 * If the types don't match then keep looking.
2020 */
2021 if (strncmp(val, type, strlen(val)) != 0) {
2022 free(type);
2023 break;
2024 }
2025
2026 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2027 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2028 strncmp(type, VDEV_TYPE_MIRROR,
2029 strlen(VDEV_TYPE_MIRROR)) == 0);
2030 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2031 &id) == 0);
2032
2033 errno = 0;
2034 vdev_id = strtoull(idx, &end, 10);
2035
2036 free(type);
2037 if (errno != 0)
2038 return (NULL);
2039
2040 /*
2041 * Now verify that we have the correct vdev id.
2042 */
2043 if (vdev_id == id)
2044 return (nv);
2045 }
2046
2047 /*
2048 * Common case
2049 */
2050 if (strcmp(srchval, val) == 0)
2051 return (nv);
2052 break;
2053 }
2054
2055 default:
2056 break;
2057 }
2058
2059 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2060 &child, &children) != 0)
2061 return (NULL);
2062
2063 for (c = 0; c < children; c++) {
2064 if ((ret = vdev_to_nvlist_iter(child[c], search,
2065 avail_spare, l2cache, NULL)) != NULL) {
2066 /*
2067 * The 'is_log' value is only set for the toplevel
2068 * vdev, not the leaf vdevs. So we always lookup the
2069 * log device from the root of the vdev tree (where
2070 * 'log' is non-NULL).
2071 */
2072 if (log != NULL &&
2073 nvlist_lookup_uint64(child[c],
2074 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2075 is_log) {
2076 *log = B_TRUE;
2077 }
2078 return (ret);
2079 }
2080 }
2081
2082 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2083 &child, &children) == 0) {
2084 for (c = 0; c < children; c++) {
2085 if ((ret = vdev_to_nvlist_iter(child[c], search,
2086 avail_spare, l2cache, NULL)) != NULL) {
2087 *avail_spare = B_TRUE;
2088 return (ret);
2089 }
2090 }
2091 }
2092
2093 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2094 &child, &children) == 0) {
2095 for (c = 0; c < children; c++) {
2096 if ((ret = vdev_to_nvlist_iter(child[c], search,
2097 avail_spare, l2cache, NULL)) != NULL) {
2098 *l2cache = B_TRUE;
2099 return (ret);
2100 }
2101 }
2102 }
2103
2104 return (NULL);
2105 }
2106
2107 /*
2108 * Given a physical path (minus the "/devices" prefix), find the
2109 * associated vdev.
2110 */
2111 nvlist_t *
2112 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2113 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2114 {
2115 nvlist_t *search, *nvroot, *ret;
2116
2117 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2118 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2119
2120 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2121 &nvroot) == 0);
2122
2123 *avail_spare = B_FALSE;
2124 *l2cache = B_FALSE;
2125 if (log != NULL)
2126 *log = B_FALSE;
2127 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2128 nvlist_free(search);
2129
2130 return (ret);
2131 }
2132
2133 /*
2134 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2135 */
2136 boolean_t
2137 zpool_vdev_is_interior(const char *name)
2138 {
2139 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2140 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2141 return (B_TRUE);
2142 return (B_FALSE);
2143 }
2144
2145 nvlist_t *
2146 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2147 boolean_t *l2cache, boolean_t *log)
2148 {
2149 char buf[MAXPATHLEN];
2150 char *end;
2151 nvlist_t *nvroot, *search, *ret;
2152 uint64_t guid;
2153
2154 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2155
2156 guid = strtoull(path, &end, 10);
2157 if (guid != 0 && *end == '\0') {
2158 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2159 } else if (zpool_vdev_is_interior(path)) {
2160 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2161 } else if (path[0] != '/') {
2162 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
2163 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2164 } else {
2165 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2166 }
2167
2168 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2169 &nvroot) == 0);
2170
2171 *avail_spare = B_FALSE;
2172 *l2cache = B_FALSE;
2173 if (log != NULL)
2174 *log = B_FALSE;
2175 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2176 nvlist_free(search);
2177
2178 return (ret);
2179 }
2180
2181 static int
2182 vdev_online(nvlist_t *nv)
2183 {
2184 uint64_t ival;
2185
2186 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2187 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2188 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2189 return (0);
2190
2191 return (1);
2192 }
2193
2194 /*
2195 * Helper function for zpool_get_physpaths().
2196 */
2197 static int
2198 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2199 size_t *bytes_written)
2200 {
2201 size_t bytes_left, pos, rsz;
2202 char *tmppath;
2203 const char *format;
2204
2205 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2206 &tmppath) != 0)
2207 return (EZFS_NODEVICE);
2208
2209 pos = *bytes_written;
2210 bytes_left = physpath_size - pos;
2211 format = (pos == 0) ? "%s" : " %s";
2212
2213 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2214 *bytes_written += rsz;
2215
2216 if (rsz >= bytes_left) {
2217 /* if physpath was not copied properly, clear it */
2218 if (bytes_left != 0) {
2219 physpath[pos] = 0;
2220 }
2221 return (EZFS_NOSPC);
2222 }
2223 return (0);
2224 }
2225
2226 static int
2227 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2228 size_t *rsz, boolean_t is_spare)
2229 {
2230 char *type;
2231 int ret;
2232
2233 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2234 return (EZFS_INVALCONFIG);
2235
2236 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2237 /*
2238 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2239 * For a spare vdev, we only want to boot from the active
2240 * spare device.
2241 */
2242 if (is_spare) {
2243 uint64_t spare = 0;
2244 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2245 &spare);
2246 if (!spare)
2247 return (EZFS_INVALCONFIG);
2248 }
2249
2250 if (vdev_online(nv)) {
2251 if ((ret = vdev_get_one_physpath(nv, physpath,
2252 phypath_size, rsz)) != 0)
2253 return (ret);
2254 }
2255 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2256 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2257 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2258 nvlist_t **child;
2259 uint_t count;
2260 int i, ret;
2261
2262 if (nvlist_lookup_nvlist_array(nv,
2263 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2264 return (EZFS_INVALCONFIG);
2265
2266 for (i = 0; i < count; i++) {
2267 ret = vdev_get_physpaths(child[i], physpath,
2268 phypath_size, rsz, is_spare);
2269 if (ret == EZFS_NOSPC)
2270 return (ret);
2271 }
2272 }
2273
2274 return (EZFS_POOL_INVALARG);
2275 }
2276
2277 /*
2278 * Get phys_path for a root pool config.
2279 * Return 0 on success; non-zero on failure.
2280 */
2281 static int
2282 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2283 {
2284 size_t rsz;
2285 nvlist_t *vdev_root;
2286 nvlist_t **child;
2287 uint_t count;
2288 char *type;
2289
2290 rsz = 0;
2291
2292 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2293 &vdev_root) != 0)
2294 return (EZFS_INVALCONFIG);
2295
2296 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2297 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2298 &child, &count) != 0)
2299 return (EZFS_INVALCONFIG);
2300
2301 /*
2302 * root pool can not have EFI labeled disks and can only have
2303 * a single top-level vdev.
2304 */
2305 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2306 pool_uses_efi(vdev_root))
2307 return (EZFS_POOL_INVALARG);
2308
2309 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2310 B_FALSE);
2311
2312 /* No online devices */
2313 if (rsz == 0)
2314 return (EZFS_NODEVICE);
2315
2316 return (0);
2317 }
2318
2319 /*
2320 * Get phys_path for a root pool
2321 * Return 0 on success; non-zero on failure.
2322 */
2323 int
2324 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2325 {
2326 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2327 phypath_size));
2328 }
2329
2330 /*
2331 * If the device has being dynamically expanded then we need to relabel
2332 * the disk to use the new unallocated space.
2333 */
2334 static int
2335 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2336 {
2337 char path[MAXPATHLEN];
2338 char errbuf[1024];
2339 int fd, error;
2340 int (*_efi_use_whole_disk)(int);
2341
2342 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2343 "efi_use_whole_disk")) == NULL)
2344 return (-1);
2345
2346 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2347
2348 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2349 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2350 "relabel '%s': unable to open device"), name);
2351 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2352 }
2353
2354 /*
2355 * It's possible that we might encounter an error if the device
2356 * does not have any unallocated space left. If so, we simply
2357 * ignore that error and continue on.
2358 */
2359 error = _efi_use_whole_disk(fd);
2360 (void) close(fd);
2361 if (error && error != VT_ENOSPC) {
2362 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2363 "relabel '%s': unable to read disk capacity"), name);
2364 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2365 }
2366 return (0);
2367 }
2368
2369 /*
2370 * Bring the specified vdev online. The 'flags' parameter is a set of the
2371 * ZFS_ONLINE_* flags.
2372 */
2373 int
2374 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2375 vdev_state_t *newstate)
2376 {
2377 zfs_cmd_t zc = { 0 };
2378 char msg[1024];
2379 nvlist_t *tgt;
2380 boolean_t avail_spare, l2cache, islog;
2381 libzfs_handle_t *hdl = zhp->zpool_hdl;
2382
2383 if (flags & ZFS_ONLINE_EXPAND) {
2384 (void) snprintf(msg, sizeof (msg),
2385 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2386 } else {
2387 (void) snprintf(msg, sizeof (msg),
2388 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2389 }
2390
2391 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2392 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2393 &islog)) == NULL)
2394 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2395
2396 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2397
2398 if (avail_spare)
2399 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2400
2401 if (flags & ZFS_ONLINE_EXPAND ||
2402 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2403 char *pathname = NULL;
2404 uint64_t wholedisk = 0;
2405
2406 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2407 &wholedisk);
2408 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2409 &pathname) == 0);
2410
2411 /*
2412 * XXX - L2ARC 1.0 devices can't support expansion.
2413 */
2414 if (l2cache) {
2415 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2416 "cannot expand cache devices"));
2417 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2418 }
2419
2420 if (wholedisk) {
2421 pathname += strlen(DISK_ROOT) + 1;
2422 (void) zpool_relabel_disk(hdl, pathname);
2423 }
2424 }
2425
2426 zc.zc_cookie = VDEV_STATE_ONLINE;
2427 zc.zc_obj = flags;
2428
2429 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2430 if (errno == EINVAL) {
2431 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2432 "from this pool into a new one. Use '%s' "
2433 "instead"), "zpool detach");
2434 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2435 }
2436 return (zpool_standard_error(hdl, errno, msg));
2437 }
2438
2439 *newstate = zc.zc_cookie;
2440 return (0);
2441 }
2442
2443 /*
2444 * Take the specified vdev offline
2445 */
2446 int
2447 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2448 {
2449 zfs_cmd_t zc = { 0 };
2450 char msg[1024];
2451 nvlist_t *tgt;
2452 boolean_t avail_spare, l2cache;
2453 libzfs_handle_t *hdl = zhp->zpool_hdl;
2454
2455 (void) snprintf(msg, sizeof (msg),
2456 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2457
2458 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2459 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2460 NULL)) == NULL)
2461 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2462
2463 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2464
2465 if (avail_spare)
2466 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2467
2468 zc.zc_cookie = VDEV_STATE_OFFLINE;
2469 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2470
2471 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2472 return (0);
2473
2474 switch (errno) {
2475 case EBUSY:
2476
2477 /*
2478 * There are no other replicas of this device.
2479 */
2480 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2481
2482 case EEXIST:
2483 /*
2484 * The log device has unplayed logs
2485 */
2486 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2487
2488 default:
2489 return (zpool_standard_error(hdl, errno, msg));
2490 }
2491 }
2492
2493 /*
2494 * Mark the given vdev faulted.
2495 */
2496 int
2497 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2498 {
2499 zfs_cmd_t zc = { 0 };
2500 char msg[1024];
2501 libzfs_handle_t *hdl = zhp->zpool_hdl;
2502
2503 (void) snprintf(msg, sizeof (msg),
2504 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2505
2506 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2507 zc.zc_guid = guid;
2508 zc.zc_cookie = VDEV_STATE_FAULTED;
2509 zc.zc_obj = aux;
2510
2511 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2512 return (0);
2513
2514 switch (errno) {
2515 case EBUSY:
2516
2517 /*
2518 * There are no other replicas of this device.
2519 */
2520 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2521
2522 default:
2523 return (zpool_standard_error(hdl, errno, msg));
2524 }
2525
2526 }
2527
2528 /*
2529 * Mark the given vdev degraded.
2530 */
2531 int
2532 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2533 {
2534 zfs_cmd_t zc = { 0 };
2535 char msg[1024];
2536 libzfs_handle_t *hdl = zhp->zpool_hdl;
2537
2538 (void) snprintf(msg, sizeof (msg),
2539 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2540
2541 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2542 zc.zc_guid = guid;
2543 zc.zc_cookie = VDEV_STATE_DEGRADED;
2544 zc.zc_obj = aux;
2545
2546 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2547 return (0);
2548
2549 return (zpool_standard_error(hdl, errno, msg));
2550 }
2551
2552 /*
2553 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2554 * a hot spare.
2555 */
2556 static boolean_t
2557 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2558 {
2559 nvlist_t **child;
2560 uint_t c, children;
2561 char *type;
2562
2563 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2564 &children) == 0) {
2565 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2566 &type) == 0);
2567
2568 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2569 children == 2 && child[which] == tgt)
2570 return (B_TRUE);
2571
2572 for (c = 0; c < children; c++)
2573 if (is_replacing_spare(child[c], tgt, which))
2574 return (B_TRUE);
2575 }
2576
2577 return (B_FALSE);
2578 }
2579
2580 /*
2581 * Attach new_disk (fully described by nvroot) to old_disk.
2582 * If 'replacing' is specified, the new disk will replace the old one.
2583 */
2584 int
2585 zpool_vdev_attach(zpool_handle_t *zhp,
2586 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2587 {
2588 zfs_cmd_t zc = { 0 };
2589 char msg[1024];
2590 int ret;
2591 nvlist_t *tgt;
2592 boolean_t avail_spare, l2cache, islog;
2593 uint64_t val;
2594 char *newname;
2595 nvlist_t **child;
2596 uint_t children;
2597 nvlist_t *config_root;
2598 libzfs_handle_t *hdl = zhp->zpool_hdl;
2599 boolean_t rootpool = zpool_is_bootable(zhp);
2600
2601 if (replacing)
2602 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2603 "cannot replace %s with %s"), old_disk, new_disk);
2604 else
2605 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2606 "cannot attach %s to %s"), new_disk, old_disk);
2607
2608 /*
2609 * If this is a root pool, make sure that we're not attaching an
2610 * EFI labeled device.
2611 */
2612 if (rootpool && pool_uses_efi(nvroot)) {
2613 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2614 "EFI labeled devices are not supported on root pools."));
2615 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2616 }
2617
2618 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2619 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2620 &islog)) == 0)
2621 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2622
2623 if (avail_spare)
2624 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2625
2626 if (l2cache)
2627 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2628
2629 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2630 zc.zc_cookie = replacing;
2631
2632 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2633 &child, &children) != 0 || children != 1) {
2634 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2635 "new device must be a single disk"));
2636 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2637 }
2638
2639 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2640 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2641
2642 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2643 return (-1);
2644
2645 /*
2646 * If the target is a hot spare that has been swapped in, we can only
2647 * replace it with another hot spare.
2648 */
2649 if (replacing &&
2650 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2651 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2652 NULL) == NULL || !avail_spare) &&
2653 is_replacing_spare(config_root, tgt, 1)) {
2654 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2655 "can only be replaced by another hot spare"));
2656 free(newname);
2657 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2658 }
2659
2660 free(newname);
2661
2662 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2663 return (-1);
2664
2665 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2666
2667 zcmd_free_nvlists(&zc);
2668
2669 if (ret == 0) {
2670 if (rootpool) {
2671 /*
2672 * XXX need a better way to prevent user from
2673 * booting up a half-baked vdev.
2674 */
2675 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2676 "sure to wait until resilver is done "
2677 "before rebooting.\n"));
2678 }
2679 return (0);
2680 }
2681
2682 switch (errno) {
2683 case ENOTSUP:
2684 /*
2685 * Can't attach to or replace this type of vdev.
2686 */
2687 if (replacing) {
2688 uint64_t version = zpool_get_prop_int(zhp,
2689 ZPOOL_PROP_VERSION, NULL);
2690
2691 if (islog)
2692 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2693 "cannot replace a log with a spare"));
2694 else if (version >= SPA_VERSION_MULTI_REPLACE)
2695 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2696 "already in replacing/spare config; wait "
2697 "for completion or use 'zpool detach'"));
2698 else
2699 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2700 "cannot replace a replacing device"));
2701 } else {
2702 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2703 "can only attach to mirrors and top-level "
2704 "disks"));
2705 }
2706 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2707 break;
2708
2709 case EINVAL:
2710 /*
2711 * The new device must be a single disk.
2712 */
2713 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2714 "new device must be a single disk"));
2715 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2716 break;
2717
2718 case EBUSY:
2719 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2720 new_disk);
2721 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2722 break;
2723
2724 case EOVERFLOW:
2725 /*
2726 * The new device is too small.
2727 */
2728 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2729 "device is too small"));
2730 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2731 break;
2732
2733 case EDOM:
2734 /*
2735 * The new device has a different alignment requirement.
2736 */
2737 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2738 "devices have different sector alignment"));
2739 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2740 break;
2741
2742 case ENAMETOOLONG:
2743 /*
2744 * The resulting top-level vdev spec won't fit in the label.
2745 */
2746 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2747 break;
2748
2749 default:
2750 (void) zpool_standard_error(hdl, errno, msg);
2751 }
2752
2753 return (-1);
2754 }
2755
2756 /*
2757 * Detach the specified device.
2758 */
2759 int
2760 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2761 {
2762 zfs_cmd_t zc = { 0 };
2763 char msg[1024];
2764 nvlist_t *tgt;
2765 boolean_t avail_spare, l2cache;
2766 libzfs_handle_t *hdl = zhp->zpool_hdl;
2767
2768 (void) snprintf(msg, sizeof (msg),
2769 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2770
2771 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2772 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2773 NULL)) == 0)
2774 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2775
2776 if (avail_spare)
2777 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2778
2779 if (l2cache)
2780 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2781
2782 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2783
2784 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2785 return (0);
2786
2787 switch (errno) {
2788
2789 case ENOTSUP:
2790 /*
2791 * Can't detach from this type of vdev.
2792 */
2793 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2794 "applicable to mirror and replacing vdevs"));
2795 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2796 break;
2797
2798 case EBUSY:
2799 /*
2800 * There are no other replicas of this device.
2801 */
2802 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2803 break;
2804
2805 default:
2806 (void) zpool_standard_error(hdl, errno, msg);
2807 }
2808
2809 return (-1);
2810 }
2811
2812 /*
2813 * Find a mirror vdev in the source nvlist.
2814 *
2815 * The mchild array contains a list of disks in one of the top-level mirrors
2816 * of the source pool. The schild array contains a list of disks that the
2817 * user specified on the command line. We loop over the mchild array to
2818 * see if any entry in the schild array matches.
2819 *
2820 * If a disk in the mchild array is found in the schild array, we return
2821 * the index of that entry. Otherwise we return -1.
2822 */
2823 static int
2824 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2825 nvlist_t **schild, uint_t schildren)
2826 {
2827 uint_t mc;
2828
2829 for (mc = 0; mc < mchildren; mc++) {
2830 uint_t sc;
2831 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2832 mchild[mc], B_FALSE);
2833
2834 for (sc = 0; sc < schildren; sc++) {
2835 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2836 schild[sc], B_FALSE);
2837 boolean_t result = (strcmp(mpath, spath) == 0);
2838
2839 free(spath);
2840 if (result) {
2841 free(mpath);
2842 return (mc);
2843 }
2844 }
2845
2846 free(mpath);
2847 }
2848
2849 return (-1);
2850 }
2851
2852 /*
2853 * Split a mirror pool. If newroot points to null, then a new nvlist
2854 * is generated and it is the responsibility of the caller to free it.
2855 */
2856 int
2857 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2858 nvlist_t *props, splitflags_t flags)
2859 {
2860 zfs_cmd_t zc = { 0 };
2861 char msg[1024];
2862 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2863 nvlist_t **varray = NULL, *zc_props = NULL;
2864 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2865 libzfs_handle_t *hdl = zhp->zpool_hdl;
2866 uint64_t vers;
2867 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2868 int retval = 0;
2869
2870 (void) snprintf(msg, sizeof (msg),
2871 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2872
2873 if (!zpool_name_valid(hdl, B_FALSE, newname))
2874 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2875
2876 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2877 (void) fprintf(stderr, gettext("Internal error: unable to "
2878 "retrieve pool configuration\n"));
2879 return (-1);
2880 }
2881
2882 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2883 == 0);
2884 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2885
2886 if (props) {
2887 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2888 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2889 props, vers, flags, msg)) == NULL)
2890 return (-1);
2891 }
2892
2893 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2894 &children) != 0) {
2895 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2896 "Source pool is missing vdev tree"));
2897 if (zc_props)
2898 nvlist_free(zc_props);
2899 return (-1);
2900 }
2901
2902 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2903 vcount = 0;
2904
2905 if (*newroot == NULL ||
2906 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2907 &newchild, &newchildren) != 0)
2908 newchildren = 0;
2909
2910 for (c = 0; c < children; c++) {
2911 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2912 char *type;
2913 nvlist_t **mchild, *vdev;
2914 uint_t mchildren;
2915 int entry;
2916
2917 /*
2918 * Unlike cache & spares, slogs are stored in the
2919 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2920 */
2921 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2922 &is_log);
2923 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2924 &is_hole);
2925 if (is_log || is_hole) {
2926 /*
2927 * Create a hole vdev and put it in the config.
2928 */
2929 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2930 goto out;
2931 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2932 VDEV_TYPE_HOLE) != 0)
2933 goto out;
2934 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2935 1) != 0)
2936 goto out;
2937 if (lastlog == 0)
2938 lastlog = vcount;
2939 varray[vcount++] = vdev;
2940 continue;
2941 }
2942 lastlog = 0;
2943 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2944 == 0);
2945 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2946 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2947 "Source pool must be composed only of mirrors\n"));
2948 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2949 goto out;
2950 }
2951
2952 verify(nvlist_lookup_nvlist_array(child[c],
2953 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2954
2955 /* find or add an entry for this top-level vdev */
2956 if (newchildren > 0 &&
2957 (entry = find_vdev_entry(zhp, mchild, mchildren,
2958 newchild, newchildren)) >= 0) {
2959 /* We found a disk that the user specified. */
2960 vdev = mchild[entry];
2961 ++found;
2962 } else {
2963 /* User didn't specify a disk for this vdev. */
2964 vdev = mchild[mchildren - 1];
2965 }
2966
2967 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2968 goto out;
2969 }
2970
2971 /* did we find every disk the user specified? */
2972 if (found != newchildren) {
2973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2974 "include at most one disk from each mirror"));
2975 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2976 goto out;
2977 }
2978
2979 /* Prepare the nvlist for populating. */
2980 if (*newroot == NULL) {
2981 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2982 goto out;
2983 freelist = B_TRUE;
2984 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2985 VDEV_TYPE_ROOT) != 0)
2986 goto out;
2987 } else {
2988 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2989 }
2990
2991 /* Add all the children we found */
2992 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2993 lastlog == 0 ? vcount : lastlog) != 0)
2994 goto out;
2995
2996 /*
2997 * If we're just doing a dry run, exit now with success.
2998 */
2999 if (flags.dryrun) {
3000 memory_err = B_FALSE;
3001 freelist = B_FALSE;
3002 goto out;
3003 }
3004
3005 /* now build up the config list & call the ioctl */
3006 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3007 goto out;
3008
3009 if (nvlist_add_nvlist(newconfig,
3010 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3011 nvlist_add_string(newconfig,
3012 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3013 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3014 goto out;
3015
3016 /*
3017 * The new pool is automatically part of the namespace unless we
3018 * explicitly export it.
3019 */
3020 if (!flags.import)
3021 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3022 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3023 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3024 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3025 goto out;
3026 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3027 goto out;
3028
3029 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3030 retval = zpool_standard_error(hdl, errno, msg);
3031 goto out;
3032 }
3033
3034 freelist = B_FALSE;
3035 memory_err = B_FALSE;
3036
3037 out:
3038 if (varray != NULL) {
3039 int v;
3040
3041 for (v = 0; v < vcount; v++)
3042 nvlist_free(varray[v]);
3043 free(varray);
3044 }
3045 zcmd_free_nvlists(&zc);
3046 if (zc_props)
3047 nvlist_free(zc_props);
3048 if (newconfig)
3049 nvlist_free(newconfig);
3050 if (freelist) {
3051 nvlist_free(*newroot);
3052 *newroot = NULL;
3053 }
3054
3055 if (retval != 0)
3056 return (retval);
3057
3058 if (memory_err)
3059 return (no_memory(hdl));
3060
3061 return (0);
3062 }
3063
3064 /*
3065 * Remove the given device. Currently, this is supported only for hot spares
3066 * and level 2 cache devices.
3067 */
3068 int
3069 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3070 {
3071 zfs_cmd_t zc = { 0 };
3072 char msg[1024];
3073 nvlist_t *tgt;
3074 boolean_t avail_spare, l2cache, islog;
3075 libzfs_handle_t *hdl = zhp->zpool_hdl;
3076 uint64_t version;
3077
3078 (void) snprintf(msg, sizeof (msg),
3079 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3080
3081 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3082 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3083 &islog)) == 0)
3084 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3085 /*
3086 * XXX - this should just go away.
3087 */
3088 if (!avail_spare && !l2cache && !islog) {
3089 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3090 "only inactive hot spares, cache, top-level, "
3091 "or log devices can be removed"));
3092 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3093 }
3094
3095 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3096 if (islog && version < SPA_VERSION_HOLES) {
3097 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3098 "pool must be upgrade to support log removal"));
3099 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3100 }
3101
3102 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3103
3104 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3105 return (0);
3106
3107 return (zpool_standard_error(hdl, errno, msg));
3108 }
3109
3110 /*
3111 * Clear the errors for the pool, or the particular device if specified.
3112 */
3113 int
3114 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3115 {
3116 zfs_cmd_t zc = { 0 };
3117 char msg[1024];
3118 nvlist_t *tgt;
3119 zpool_rewind_policy_t policy;
3120 boolean_t avail_spare, l2cache;
3121 libzfs_handle_t *hdl = zhp->zpool_hdl;
3122 nvlist_t *nvi = NULL;
3123 int error;
3124
3125 if (path)
3126 (void) snprintf(msg, sizeof (msg),
3127 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3128 path);
3129 else
3130 (void) snprintf(msg, sizeof (msg),
3131 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3132 zhp->zpool_name);
3133
3134 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3135 if (path) {
3136 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3137 &l2cache, NULL)) == 0)
3138 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3139
3140 /*
3141 * Don't allow error clearing for hot spares. Do allow
3142 * error clearing for l2cache devices.
3143 */
3144 if (avail_spare)
3145 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3146
3147 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3148 &zc.zc_guid) == 0);
3149 }
3150
3151 zpool_get_rewind_policy(rewindnvl, &policy);
3152 zc.zc_cookie = policy.zrp_request;
3153
3154 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3155 return (-1);
3156
3157 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3158 return (-1);
3159
3160 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3161 errno == ENOMEM) {
3162 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3163 zcmd_free_nvlists(&zc);
3164 return (-1);
3165 }
3166 }
3167
3168 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3169 errno != EPERM && errno != EACCES)) {
3170 if (policy.zrp_request &
3171 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3172 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3173 zpool_rewind_exclaim(hdl, zc.zc_name,
3174 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3175 nvi);
3176 nvlist_free(nvi);
3177 }
3178 zcmd_free_nvlists(&zc);
3179 return (0);
3180 }
3181
3182 zcmd_free_nvlists(&zc);
3183 return (zpool_standard_error(hdl, errno, msg));
3184 }
3185
3186 /*
3187 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3188 */
3189 int
3190 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3191 {
3192 zfs_cmd_t zc = { 0 };
3193 char msg[1024];
3194 libzfs_handle_t *hdl = zhp->zpool_hdl;
3195
3196 (void) snprintf(msg, sizeof (msg),
3197 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3198 guid);
3199
3200 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3201 zc.zc_guid = guid;
3202 zc.zc_cookie = ZPOOL_NO_REWIND;
3203
3204 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3205 return (0);
3206
3207 return (zpool_standard_error(hdl, errno, msg));
3208 }
3209
3210 /*
3211 * Change the GUID for a pool.
3212 */
3213 int
3214 zpool_reguid(zpool_handle_t *zhp)
3215 {
3216 char msg[1024];
3217 libzfs_handle_t *hdl = zhp->zpool_hdl;
3218 zfs_cmd_t zc = { 0 };
3219
3220 (void) snprintf(msg, sizeof (msg),
3221 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3222
3223 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3224 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3225 return (0);
3226
3227 return (zpool_standard_error(hdl, errno, msg));
3228 }
3229
3230 /*
3231 * Reopen the pool.
3232 */
3233 int
3234 zpool_reopen(zpool_handle_t *zhp)
3235 {
3236 zfs_cmd_t zc = { 0 };
3237 char msg[1024];
3238 libzfs_handle_t *hdl = zhp->zpool_hdl;
3239
3240 (void) snprintf(msg, sizeof (msg),
3241 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3242 zhp->zpool_name);
3243
3244 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3245 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3246 return (0);
3247 return (zpool_standard_error(hdl, errno, msg));
3248 }
3249
3250 /*
3251 * Convert from a devid string to a path.
3252 */
3253 static char *
3254 devid_to_path(char *devid_str)
3255 {
3256 ddi_devid_t devid;
3257 char *minor;
3258 char *path;
3259 devid_nmlist_t *list = NULL;
3260 int ret;
3261
3262 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3263 return (NULL);
3264
3265 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3266
3267 devid_str_free(minor);
3268 devid_free(devid);
3269
3270 if (ret != 0)
3271 return (NULL);
3272
3273 if ((path = strdup(list[0].devname)) == NULL)
3274 return (NULL);
3275
3276 devid_free_nmlist(list);
3277
3278 return (path);
3279 }
3280
3281 /*
3282 * Convert from a path to a devid string.
3283 */
3284 static char *
3285 path_to_devid(const char *path)
3286 {
3287 int fd;
3288 ddi_devid_t devid;
3289 char *minor, *ret;
3290
3291 if ((fd = open(path, O_RDONLY)) < 0)
3292 return (NULL);
3293
3294 minor = NULL;
3295 ret = NULL;
3296 if (devid_get(fd, &devid) == 0) {
3297 if (devid_get_minor_name(fd, &minor) == 0)
3298 ret = devid_str_encode(devid, minor);
3299 if (minor != NULL)
3300 devid_str_free(minor);
3301 devid_free(devid);
3302 }
3303 (void) close(fd);
3304
3305 return (ret);
3306 }
3307
3308 /*
3309 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3310 * ignore any failure here, since a common case is for an unprivileged user to
3311 * type 'zpool status', and we'll display the correct information anyway.
3312 */
3313 static void
3314 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3315 {
3316 zfs_cmd_t zc = { 0 };
3317
3318 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3319 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3320 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3321 &zc.zc_guid) == 0);
3322
3323 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3324 }
3325
3326 /*
3327 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3328 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3329 * We also check if this is a whole disk, in which case we strip off the
3330 * trailing 's0' slice name.
3331 *
3332 * This routine is also responsible for identifying when disks have been
3333 * reconfigured in a new location. The kernel will have opened the device by
3334 * devid, but the path will still refer to the old location. To catch this, we
3335 * first do a path -> devid translation (which is fast for the common case). If
3336 * the devid matches, we're done. If not, we do a reverse devid -> path
3337 * translation and issue the appropriate ioctl() to update the path of the vdev.
3338 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3339 * of these checks.
3340 */
3341 char *
3342 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3343 boolean_t verbose)
3344 {
3345 char *path, *devid;
3346 uint64_t value;
3347 char buf[64];
3348 vdev_stat_t *vs;
3349 uint_t vsc;
3350
3351 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3352 &value) == 0) {
3353 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3354 &value) == 0);
3355 (void) snprintf(buf, sizeof (buf), "%llu",
3356 (u_longlong_t)value);
3357 path = buf;
3358 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3359
3360 /*
3361 * If the device is dead (faulted, offline, etc) then don't
3362 * bother opening it. Otherwise we may be forcing the user to
3363 * open a misbehaving device, which can have undesirable
3364 * effects.
3365 */
3366 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3367 (uint64_t **)&vs, &vsc) != 0 ||
3368 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3369 zhp != NULL &&
3370 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3371 /*
3372 * Determine if the current path is correct.
3373 */
3374 char *newdevid = path_to_devid(path);
3375
3376 if (newdevid == NULL ||
3377 strcmp(devid, newdevid) != 0) {
3378 char *newpath;
3379
3380 if ((newpath = devid_to_path(devid)) != NULL) {
3381 /*
3382 * Update the path appropriately.
3383 */
3384 set_path(zhp, nv, newpath);
3385 if (nvlist_add_string(nv,
3386 ZPOOL_CONFIG_PATH, newpath) == 0)
3387 verify(nvlist_lookup_string(nv,
3388 ZPOOL_CONFIG_PATH,
3389 &path) == 0);
3390 free(newpath);
3391 }
3392 }
3393
3394 if (newdevid)
3395 devid_str_free(newdevid);
3396 }
3397
3398 if (strncmp(path, "/dev/dsk/", 9) == 0)
3399 path += 9;
3400
3401 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3402 &value) == 0 && value) {
3403 int pathlen = strlen(path);
3404 char *tmp = zfs_strdup(hdl, path);
3405
3406 /*
3407 * If it starts with c#, and ends with "s0", chop
3408 * the "s0" off, or if it ends with "s0/old", remove
3409 * the "s0" from the middle.
3410 */
3411 if (CTD_CHECK(tmp)) {
3412 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3413 tmp[pathlen - 2] = '\0';
3414 } else if (pathlen > 6 &&
3415 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3416 (void) strcpy(&tmp[pathlen - 6],
3417 "/old");
3418 }
3419 }
3420 return (tmp);
3421 }
3422 } else {
3423 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3424
3425 /*
3426 * If it's a raidz device, we need to stick in the parity level.
3427 */
3428 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3429 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3430 &value) == 0);
3431 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3432 (u_longlong_t)value);
3433 path = buf;
3434 }
3435
3436 /*
3437 * We identify each top-level vdev by using a <type-id>
3438 * naming convention.
3439 */
3440 if (verbose) {
3441 uint64_t id;
3442
3443 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3444 &id) == 0);
3445 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3446 (u_longlong_t)id);
3447 path = buf;
3448 }
3449 }
3450
3451 return (zfs_strdup(hdl, path));
3452 }
3453
3454 static int
3455 zbookmark_compare(const void *a, const void *b)
3456 {
3457 return (memcmp(a, b, sizeof (zbookmark_t)));
3458 }
3459
3460 /*
3461 * Retrieve the persistent error log, uniquify the members, and return to the
3462 * caller.
3463 */
3464 int
3465 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3466 {
3467 zfs_cmd_t zc = { 0 };
3468 uint64_t count;
3469 zbookmark_t *zb = NULL;
3470 int i;
3471
3472 /*
3473 * Retrieve the raw error list from the kernel. If the number of errors
3474 * has increased, allocate more space and continue until we get the
3475 * entire list.
3476 */
3477 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3478 &count) == 0);
3479 if (count == 0)
3480 return (0);
3481 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3482 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3483 return (-1);
3484 zc.zc_nvlist_dst_size = count;
3485 (void) strcpy(zc.zc_name, zhp->zpool_name);
3486 for (;;) {
3487 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3488 &zc) != 0) {
3489 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3490 if (errno == ENOMEM) {
3491 count = zc.zc_nvlist_dst_size;
3492 if ((zc.zc_nvlist_dst = (uintptr_t)
3493 zfs_alloc(zhp->zpool_hdl, count *
3494 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3495 return (-1);
3496 } else {
3497 return (-1);
3498 }
3499 } else {
3500 break;
3501 }
3502 }
3503
3504 /*
3505 * Sort the resulting bookmarks. This is a little confusing due to the
3506 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3507 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3508 * _not_ copied as part of the process. So we point the start of our
3509 * array appropriate and decrement the total number of elements.
3510 */
3511 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3512 zc.zc_nvlist_dst_size;
3513 count -= zc.zc_nvlist_dst_size;
3514
3515 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3516
3517 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3518
3519 /*
3520 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3521 */
3522 for (i = 0; i < count; i++) {
3523 nvlist_t *nv;
3524
3525 /* ignoring zb_blkid and zb_level for now */
3526 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3527 zb[i-1].zb_object == zb[i].zb_object)
3528 continue;
3529
3530 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3531 goto nomem;
3532 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3533 zb[i].zb_objset) != 0) {
3534 nvlist_free(nv);
3535 goto nomem;
3536 }
3537 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3538 zb[i].zb_object) != 0) {
3539 nvlist_free(nv);
3540 goto nomem;
3541 }
3542 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3543 nvlist_free(nv);
3544 goto nomem;
3545 }
3546 nvlist_free(nv);
3547 }
3548
3549 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3550 return (0);
3551
3552 nomem:
3553 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3554 return (no_memory(zhp->zpool_hdl));
3555 }
3556
3557 /*
3558 * Upgrade a ZFS pool to the latest on-disk version.
3559 */
3560 int
3561 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3562 {
3563 zfs_cmd_t zc = { 0 };
3564 libzfs_handle_t *hdl = zhp->zpool_hdl;
3565
3566 (void) strcpy(zc.zc_name, zhp->zpool_name);
3567 zc.zc_cookie = new_version;
3568
3569 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3570 return (zpool_standard_error_fmt(hdl, errno,
3571 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3572 zhp->zpool_name));
3573 return (0);
3574 }
3575
3576 void
3577 zpool_set_history_str(const char *subcommand, int argc, char **argv,
3578 char *history_str)
3579 {
3580 int i;
3581
3582 (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3583 for (i = 1; i < argc; i++) {
3584 if (strlen(history_str) + 1 + strlen(argv[i]) >
3585 HIS_MAX_RECORD_LEN)
3586 break;
3587 (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3588 (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3589 }
3590 }
3591
3592 /*
3593 * Stage command history for logging.
3594 */
3595 int
3596 zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3597 {
3598 if (history_str == NULL)
3599 return (EINVAL);
3600
3601 if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3602 return (EINVAL);
3603
3604 if (hdl->libzfs_log_str != NULL)
3605 free(hdl->libzfs_log_str);
3606
3607 if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3608 return (no_memory(hdl));
3609
3610 return (0);
3611 }
3612
3613 /*
3614 * Perform ioctl to get some command history of a pool.
3615 *
3616 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3617 * logical offset of the history buffer to start reading from.
3618 *
3619 * Upon return, 'off' is the next logical offset to read from and
3620 * 'len' is the actual amount of bytes read into 'buf'.
3621 */
3622 static int
3623 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3624 {
3625 zfs_cmd_t zc = { 0 };
3626 libzfs_handle_t *hdl = zhp->zpool_hdl;
3627
3628 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3629
3630 zc.zc_history = (uint64_t)(uintptr_t)buf;
3631 zc.zc_history_len = *len;
3632 zc.zc_history_offset = *off;
3633
3634 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3635 switch (errno) {
3636 case EPERM:
3637 return (zfs_error_fmt(hdl, EZFS_PERM,
3638 dgettext(TEXT_DOMAIN,
3639 "cannot show history for pool '%s'"),
3640 zhp->zpool_name));
3641 case ENOENT:
3642 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3643 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3644 "'%s'"), zhp->zpool_name));
3645 case ENOTSUP:
3646 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3647 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3648 "'%s', pool must be upgraded"), zhp->zpool_name));
3649 default:
3650 return (zpool_standard_error_fmt(hdl, errno,
3651 dgettext(TEXT_DOMAIN,
3652 "cannot get history for '%s'"), zhp->zpool_name));
3653 }
3654 }
3655
3656 *len = zc.zc_history_len;
3657 *off = zc.zc_history_offset;
3658
3659 return (0);
3660 }
3661
3662 /*
3663 * Process the buffer of nvlists, unpacking and storing each nvlist record
3664 * into 'records'. 'leftover' is set to the number of bytes that weren't
3665 * processed as there wasn't a complete record.
3666 */
3667 int
3668 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3669 nvlist_t ***records, uint_t *numrecords)
3670 {
3671 uint64_t reclen;
3672 nvlist_t *nv;
3673 int i;
3674
3675 while (bytes_read > sizeof (reclen)) {
3676
3677 /* get length of packed record (stored as little endian) */
3678 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3679 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3680
3681 if (bytes_read < sizeof (reclen) + reclen)
3682 break;
3683
3684 /* unpack record */
3685 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3686 return (ENOMEM);
3687 bytes_read -= sizeof (reclen) + reclen;
3688 buf += sizeof (reclen) + reclen;
3689
3690 /* add record to nvlist array */
3691 (*numrecords)++;
3692 if (ISP2(*numrecords + 1)) {
3693 *records = realloc(*records,
3694 *numrecords * 2 * sizeof (nvlist_t *));
3695 }
3696 (*records)[*numrecords - 1] = nv;
3697 }
3698
3699 *leftover = bytes_read;
3700 return (0);
3701 }
3702
3703 #define HIS_BUF_LEN (128*1024)
3704
3705 /*
3706 * Retrieve the command history of a pool.
3707 */
3708 int
3709 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3710 {
3711 char buf[HIS_BUF_LEN];
3712 uint64_t off = 0;
3713 nvlist_t **records = NULL;
3714 uint_t numrecords = 0;
3715 int err, i;
3716
3717 do {
3718 uint64_t bytes_read = sizeof (buf);
3719 uint64_t leftover;
3720
3721 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3722 break;
3723
3724 /* if nothing else was read in, we're at EOF, just return */
3725 if (!bytes_read)
3726 break;
3727
3728 if ((err = zpool_history_unpack(buf, bytes_read,
3729 &leftover, &records, &numrecords)) != 0)
3730 break;
3731 off -= leftover;
3732
3733 /* CONSTCOND */
3734 } while (1);
3735
3736 if (!err) {
3737 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3738 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3739 records, numrecords) == 0);
3740 }
3741 for (i = 0; i < numrecords; i++)
3742 nvlist_free(records[i]);
3743 free(records);
3744
3745 return (err);
3746 }
3747
3748 void
3749 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3750 char *pathname, size_t len)
3751 {
3752 zfs_cmd_t zc = { 0 };
3753 boolean_t mounted = B_FALSE;
3754 char *mntpnt = NULL;
3755 char dsname[MAXNAMELEN];
3756
3757 if (dsobj == 0) {
3758 /* special case for the MOS */
3759 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3760 return;
3761 }
3762
3763 /* get the dataset's name */
3764 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3765 zc.zc_obj = dsobj;
3766 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3767 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3768 /* just write out a path of two object numbers */
3769 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3770 dsobj, obj);
3771 return;
3772 }
3773 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3774
3775 /* find out if the dataset is mounted */
3776 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3777
3778 /* get the corrupted object's path */
3779 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3780 zc.zc_obj = obj;
3781 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3782 &zc) == 0) {
3783 if (mounted) {
3784 (void) snprintf(pathname, len, "%s%s", mntpnt,
3785 zc.zc_value);
3786 } else {
3787 (void) snprintf(pathname, len, "%s:%s",
3788 dsname, zc.zc_value);
3789 }
3790 } else {
3791 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3792 }
3793 free(mntpnt);
3794 }
3795
3796 /*
3797 * Read the EFI label from the config, if a label does not exist then
3798 * pass back the error to the caller. If the caller has passed a non-NULL
3799 * diskaddr argument then we set it to the starting address of the EFI
3800 * partition.
3801 */
3802 static int
3803 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3804 {
3805 char *path;
3806 int fd;
3807 char diskname[MAXPATHLEN];
3808 int err = -1;
3809
3810 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3811 return (err);
3812
3813 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3814 strrchr(path, '/'));
3815 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3816 struct dk_gpt *vtoc;
3817
3818 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3819 if (sb != NULL)
3820 *sb = vtoc->efi_parts[0].p_start;
3821 efi_free(vtoc);
3822 }
3823 (void) close(fd);
3824 }
3825 return (err);
3826 }
3827
3828 /*
3829 * determine where a partition starts on a disk in the current
3830 * configuration
3831 */
3832 static diskaddr_t
3833 find_start_block(nvlist_t *config)
3834 {
3835 nvlist_t **child;
3836 uint_t c, children;
3837 diskaddr_t sb = MAXOFFSET_T;
3838 uint64_t wholedisk;
3839
3840 if (nvlist_lookup_nvlist_array(config,
3841 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3842 if (nvlist_lookup_uint64(config,
3843 ZPOOL_CONFIG_WHOLE_DISK,
3844 &wholedisk) != 0 || !wholedisk) {
3845 return (MAXOFFSET_T);
3846 }
3847 if (read_efi_label(config, &sb) < 0)
3848 sb = MAXOFFSET_T;
3849 return (sb);
3850 }
3851
3852 for (c = 0; c < children; c++) {
3853 sb = find_start_block(child[c]);
3854 if (sb != MAXOFFSET_T) {
3855 return (sb);
3856 }
3857 }
3858 return (MAXOFFSET_T);
3859 }
3860
3861 /*
3862 * Label an individual disk. The name provided is the short name,
3863 * stripped of any leading /dev path.
3864 */
3865 int
3866 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3867 {
3868 char path[MAXPATHLEN];
3869 struct dk_gpt *vtoc;
3870 int fd;
3871 size_t resv = EFI_MIN_RESV_SIZE;
3872 uint64_t slice_size;
3873 diskaddr_t start_block;
3874 char errbuf[1024];
3875
3876 /* prepare an error message just in case */
3877 (void) snprintf(errbuf, sizeof (errbuf),
3878 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3879
3880 if (zhp) {
3881 nvlist_t *nvroot;
3882
3883 if (zpool_is_bootable(zhp)) {
3884 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3885 "EFI labeled devices are not supported on root "
3886 "pools."));
3887 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3888 }
3889
3890 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3891 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3892
3893 if (zhp->zpool_start_block == 0)
3894 start_block = find_start_block(nvroot);
3895 else
3896 start_block = zhp->zpool_start_block;
3897 zhp->zpool_start_block = start_block;
3898 } else {
3899 /* new pool */
3900 start_block = NEW_START_BLOCK;
3901 }
3902
3903 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3904 BACKUP_SLICE);
3905
3906 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3907 /*
3908 * This shouldn't happen. We've long since verified that this
3909 * is a valid device.
3910 */
3911 zfs_error_aux(hdl,
3912 dgettext(TEXT_DOMAIN, "unable to open device"));
3913 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3914 }
3915
3916 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3917 /*
3918 * The only way this can fail is if we run out of memory, or we
3919 * were unable to read the disk's capacity
3920 */
3921 if (errno == ENOMEM)
3922 (void) no_memory(hdl);
3923
3924 (void) close(fd);
3925 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3926 "unable to read disk capacity"), name);
3927
3928 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3929 }
3930
3931 slice_size = vtoc->efi_last_u_lba + 1;
3932 slice_size -= EFI_MIN_RESV_SIZE;
3933 if (start_block == MAXOFFSET_T)
3934 start_block = NEW_START_BLOCK;
3935 slice_size -= start_block;
3936
3937 vtoc->efi_parts[0].p_start = start_block;
3938 vtoc->efi_parts[0].p_size = slice_size;
3939
3940 /*
3941 * Why we use V_USR: V_BACKUP confuses users, and is considered
3942 * disposable by some EFI utilities (since EFI doesn't have a backup
3943 * slice). V_UNASSIGNED is supposed to be used only for zero size
3944 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3945 * etc. were all pretty specific. V_USR is as close to reality as we
3946 * can get, in the absence of V_OTHER.
3947 */
3948 vtoc->efi_parts[0].p_tag = V_USR;
3949 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3950
3951 vtoc->efi_parts[8].p_start = slice_size + start_block;
3952 vtoc->efi_parts[8].p_size = resv;
3953 vtoc->efi_parts[8].p_tag = V_RESERVED;
3954
3955 if (efi_write(fd, vtoc) != 0) {
3956 /*
3957 * Some block drivers (like pcata) may not support EFI
3958 * GPT labels. Print out a helpful error message dir-
3959 * ecting the user to manually label the disk and give
3960 * a specific slice.
3961 */
3962 (void) close(fd);
3963 efi_free(vtoc);
3964
3965 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3966 "try using fdisk(1M) and then provide a specific slice"));
3967 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3968 }
3969
3970 (void) close(fd);
3971 efi_free(vtoc);
3972 return (0);
3973 }
3974
3975 static boolean_t
3976 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3977 {
3978 char *type;
3979 nvlist_t **child;
3980 uint_t children, c;
3981
3982 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3983 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3984 strcmp(type, VDEV_TYPE_FILE) == 0 ||
3985 strcmp(type, VDEV_TYPE_LOG) == 0 ||
3986 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3987 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3988 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3989 "vdev type '%s' is not supported"), type);
3990 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3991 return (B_FALSE);
3992 }
3993 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3994 &child, &children) == 0) {
3995 for (c = 0; c < children; c++) {
3996 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3997 return (B_FALSE);
3998 }
3999 }
4000 return (B_TRUE);
4001 }
4002
4003 /*
4004 * check if this zvol is allowable for use as a dump device; zero if
4005 * it is, > 0 if it isn't, < 0 if it isn't a zvol
4006 */
4007 int
4008 zvol_check_dump_config(char *arg)
4009 {
4010 zpool_handle_t *zhp = NULL;
4011 nvlist_t *config, *nvroot;
4012 char *p, *volname;
4013 nvlist_t **top;
4014 uint_t toplevels;
4015 libzfs_handle_t *hdl;
4016 char errbuf[1024];
4017 char poolname[ZPOOL_MAXNAMELEN];
4018 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4019 int ret = 1;
4020
4021 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4022 return (-1);
4023 }
4024
4025 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4026 "dump is not supported on device '%s'"), arg);
4027
4028 if ((hdl = libzfs_init()) == NULL)
4029 return (1);
4030 libzfs_print_on_error(hdl, B_TRUE);
4031
4032 volname = arg + pathlen;
4033
4034 /* check the configuration of the pool */
4035 if ((p = strchr(volname, '/')) == NULL) {
4036 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4037 "malformed dataset name"));
4038 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4039 return (1);
4040 } else if (p - volname >= ZFS_MAXNAMELEN) {
4041 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4042 "dataset name is too long"));
4043 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4044 return (1);
4045 } else {
4046 (void) strncpy(poolname, volname, p - volname);
4047 poolname[p - volname] = '\0';
4048 }
4049
4050 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4051 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4052 "could not open pool '%s'"), poolname);
4053 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4054 goto out;
4055 }
4056 config = zpool_get_config(zhp, NULL);
4057 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4058 &nvroot) != 0) {
4059 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4060 "could not obtain vdev configuration for '%s'"), poolname);
4061 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4062 goto out;
4063 }
4064
4065 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4066 &top, &toplevels) == 0);
4067 if (toplevels != 1) {
4068 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4069 "'%s' has multiple top level vdevs"), poolname);
4070 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
4071 goto out;
4072 }
4073
4074 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4075 goto out;
4076 }
4077 ret = 0;
4078
4079 out:
4080 if (zhp)
4081 zpool_close(zhp);
4082 libzfs_fini(hdl);
4083 return (ret);
4084 }