Print this page
2882 implement libzfs_core
2883 changing "canmount" property to "on" should not always remount dataset
2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Bill Pijewski <wdp@joyent.com>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libzfs/common/libzfs_pool.c
+++ new/usr/src/lib/libzfs/common/libzfs_pool.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2012 by Delphix. All rights reserved.
26 26 */
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
27 27
28 28 #include <ctype.h>
29 29 #include <errno.h>
30 30 #include <devid.h>
31 31 #include <fcntl.h>
32 32 #include <libintl.h>
33 33 #include <stdio.h>
34 34 #include <stdlib.h>
35 35 #include <strings.h>
36 36 #include <unistd.h>
37 +#include <libgen.h>
37 38 #include <sys/efi_partition.h>
38 39 #include <sys/vtoc.h>
39 40 #include <sys/zfs_ioctl.h>
40 41 #include <dlfcn.h>
41 42
42 43 #include "zfs_namecheck.h"
43 44 #include "zfs_prop.h"
44 45 #include "libzfs_impl.h"
45 46 #include "zfs_comutil.h"
46 47 #include "zfeature_common.h"
47 48
48 49 static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
49 50
50 51 #define DISK_ROOT "/dev/dsk"
51 52 #define RDISK_ROOT "/dev/rdsk"
52 53 #define BACKUP_SLICE "s2"
53 54
54 55 typedef struct prop_flags {
55 56 int create:1; /* Validate property on creation */
56 57 int import:1; /* Validate property on import */
57 58 } prop_flags_t;
58 59
59 60 /*
60 61 * ====================================================================
61 62 * zpool property functions
62 63 * ====================================================================
63 64 */
64 65
65 66 static int
66 67 zpool_get_all_props(zpool_handle_t *zhp)
67 68 {
68 69 zfs_cmd_t zc = { 0 };
69 70 libzfs_handle_t *hdl = zhp->zpool_hdl;
70 71
71 72 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
72 73
73 74 if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
74 75 return (-1);
75 76
76 77 while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
77 78 if (errno == ENOMEM) {
78 79 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
79 80 zcmd_free_nvlists(&zc);
80 81 return (-1);
81 82 }
82 83 } else {
83 84 zcmd_free_nvlists(&zc);
84 85 return (-1);
85 86 }
86 87 }
87 88
88 89 if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
89 90 zcmd_free_nvlists(&zc);
90 91 return (-1);
91 92 }
92 93
93 94 zcmd_free_nvlists(&zc);
94 95
95 96 return (0);
96 97 }
97 98
98 99 static int
99 100 zpool_props_refresh(zpool_handle_t *zhp)
100 101 {
101 102 nvlist_t *old_props;
102 103
103 104 old_props = zhp->zpool_props;
104 105
105 106 if (zpool_get_all_props(zhp) != 0)
106 107 return (-1);
107 108
108 109 nvlist_free(old_props);
109 110 return (0);
110 111 }
111 112
112 113 static char *
113 114 zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
114 115 zprop_source_t *src)
115 116 {
116 117 nvlist_t *nv, *nvl;
117 118 uint64_t ival;
118 119 char *value;
119 120 zprop_source_t source;
120 121
121 122 nvl = zhp->zpool_props;
122 123 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
123 124 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
124 125 source = ival;
125 126 verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
126 127 } else {
127 128 source = ZPROP_SRC_DEFAULT;
128 129 if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
129 130 value = "-";
130 131 }
131 132
132 133 if (src)
133 134 *src = source;
134 135
135 136 return (value);
136 137 }
137 138
138 139 uint64_t
139 140 zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
140 141 {
141 142 nvlist_t *nv, *nvl;
142 143 uint64_t value;
143 144 zprop_source_t source;
144 145
145 146 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
146 147 /*
147 148 * zpool_get_all_props() has most likely failed because
148 149 * the pool is faulted, but if all we need is the top level
149 150 * vdev's guid then get it from the zhp config nvlist.
150 151 */
151 152 if ((prop == ZPOOL_PROP_GUID) &&
152 153 (nvlist_lookup_nvlist(zhp->zpool_config,
153 154 ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
154 155 (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
155 156 == 0)) {
156 157 return (value);
157 158 }
158 159 return (zpool_prop_default_numeric(prop));
159 160 }
160 161
161 162 nvl = zhp->zpool_props;
162 163 if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
163 164 verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
164 165 source = value;
165 166 verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
166 167 } else {
167 168 source = ZPROP_SRC_DEFAULT;
168 169 value = zpool_prop_default_numeric(prop);
169 170 }
170 171
171 172 if (src)
172 173 *src = source;
173 174
174 175 return (value);
175 176 }
176 177
177 178 /*
178 179 * Map VDEV STATE to printed strings.
179 180 */
180 181 char *
181 182 zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
182 183 {
183 184 switch (state) {
184 185 case VDEV_STATE_CLOSED:
185 186 case VDEV_STATE_OFFLINE:
186 187 return (gettext("OFFLINE"));
187 188 case VDEV_STATE_REMOVED:
188 189 return (gettext("REMOVED"));
189 190 case VDEV_STATE_CANT_OPEN:
190 191 if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
191 192 return (gettext("FAULTED"));
192 193 else if (aux == VDEV_AUX_SPLIT_POOL)
193 194 return (gettext("SPLIT"));
194 195 else
195 196 return (gettext("UNAVAIL"));
196 197 case VDEV_STATE_FAULTED:
197 198 return (gettext("FAULTED"));
198 199 case VDEV_STATE_DEGRADED:
199 200 return (gettext("DEGRADED"));
200 201 case VDEV_STATE_HEALTHY:
201 202 return (gettext("ONLINE"));
202 203 }
203 204
204 205 return (gettext("UNKNOWN"));
205 206 }
206 207
207 208 /*
208 209 * Get a zpool property value for 'prop' and return the value in
209 210 * a pre-allocated buffer.
210 211 */
211 212 int
212 213 zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf, size_t len,
213 214 zprop_source_t *srctype)
214 215 {
215 216 uint64_t intval;
216 217 const char *strval;
217 218 zprop_source_t src = ZPROP_SRC_NONE;
218 219 nvlist_t *nvroot;
219 220 vdev_stat_t *vs;
220 221 uint_t vsc;
221 222
222 223 if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
223 224 switch (prop) {
224 225 case ZPOOL_PROP_NAME:
225 226 (void) strlcpy(buf, zpool_get_name(zhp), len);
226 227 break;
227 228
228 229 case ZPOOL_PROP_HEALTH:
229 230 (void) strlcpy(buf, "FAULTED", len);
230 231 break;
231 232
232 233 case ZPOOL_PROP_GUID:
233 234 intval = zpool_get_prop_int(zhp, prop, &src);
234 235 (void) snprintf(buf, len, "%llu", intval);
235 236 break;
236 237
237 238 case ZPOOL_PROP_ALTROOT:
238 239 case ZPOOL_PROP_CACHEFILE:
239 240 case ZPOOL_PROP_COMMENT:
240 241 if (zhp->zpool_props != NULL ||
241 242 zpool_get_all_props(zhp) == 0) {
242 243 (void) strlcpy(buf,
243 244 zpool_get_prop_string(zhp, prop, &src),
244 245 len);
245 246 if (srctype != NULL)
246 247 *srctype = src;
247 248 return (0);
248 249 }
249 250 /* FALLTHROUGH */
250 251 default:
251 252 (void) strlcpy(buf, "-", len);
252 253 break;
253 254 }
254 255
255 256 if (srctype != NULL)
256 257 *srctype = src;
257 258 return (0);
258 259 }
259 260
260 261 if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
261 262 prop != ZPOOL_PROP_NAME)
262 263 return (-1);
263 264
264 265 switch (zpool_prop_get_type(prop)) {
265 266 case PROP_TYPE_STRING:
266 267 (void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
267 268 len);
268 269 break;
269 270
270 271 case PROP_TYPE_NUMBER:
271 272 intval = zpool_get_prop_int(zhp, prop, &src);
272 273
273 274 switch (prop) {
274 275 case ZPOOL_PROP_SIZE:
275 276 case ZPOOL_PROP_ALLOCATED:
276 277 case ZPOOL_PROP_FREE:
277 278 case ZPOOL_PROP_FREEING:
278 279 case ZPOOL_PROP_EXPANDSZ:
279 280 (void) zfs_nicenum(intval, buf, len);
280 281 break;
281 282
282 283 case ZPOOL_PROP_CAPACITY:
283 284 (void) snprintf(buf, len, "%llu%%",
284 285 (u_longlong_t)intval);
285 286 break;
286 287
287 288 case ZPOOL_PROP_DEDUPRATIO:
288 289 (void) snprintf(buf, len, "%llu.%02llux",
289 290 (u_longlong_t)(intval / 100),
290 291 (u_longlong_t)(intval % 100));
291 292 break;
292 293
293 294 case ZPOOL_PROP_HEALTH:
294 295 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
295 296 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
296 297 verify(nvlist_lookup_uint64_array(nvroot,
297 298 ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
298 299 == 0);
299 300
300 301 (void) strlcpy(buf, zpool_state_to_name(intval,
301 302 vs->vs_aux), len);
302 303 break;
303 304 case ZPOOL_PROP_VERSION:
304 305 if (intval >= SPA_VERSION_FEATURES) {
305 306 (void) snprintf(buf, len, "-");
306 307 break;
307 308 }
308 309 /* FALLTHROUGH */
309 310 default:
310 311 (void) snprintf(buf, len, "%llu", intval);
311 312 }
312 313 break;
313 314
314 315 case PROP_TYPE_INDEX:
315 316 intval = zpool_get_prop_int(zhp, prop, &src);
316 317 if (zpool_prop_index_to_string(prop, intval, &strval)
317 318 != 0)
318 319 return (-1);
319 320 (void) strlcpy(buf, strval, len);
320 321 break;
321 322
322 323 default:
323 324 abort();
324 325 }
325 326
326 327 if (srctype)
327 328 *srctype = src;
328 329
329 330 return (0);
330 331 }
331 332
332 333 /*
333 334 * Check if the bootfs name has the same pool name as it is set to.
334 335 * Assuming bootfs is a valid dataset name.
335 336 */
336 337 static boolean_t
337 338 bootfs_name_valid(const char *pool, char *bootfs)
338 339 {
339 340 int len = strlen(pool);
340 341
341 342 if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
342 343 return (B_FALSE);
343 344
344 345 if (strncmp(pool, bootfs, len) == 0 &&
345 346 (bootfs[len] == '/' || bootfs[len] == '\0'))
346 347 return (B_TRUE);
347 348
348 349 return (B_FALSE);
349 350 }
350 351
351 352 /*
352 353 * Inspect the configuration to determine if any of the devices contain
353 354 * an EFI label.
354 355 */
355 356 static boolean_t
356 357 pool_uses_efi(nvlist_t *config)
357 358 {
358 359 nvlist_t **child;
359 360 uint_t c, children;
360 361
361 362 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
362 363 &child, &children) != 0)
363 364 return (read_efi_label(config, NULL) >= 0);
364 365
365 366 for (c = 0; c < children; c++) {
366 367 if (pool_uses_efi(child[c]))
367 368 return (B_TRUE);
368 369 }
369 370 return (B_FALSE);
370 371 }
371 372
372 373 boolean_t
373 374 zpool_is_bootable(zpool_handle_t *zhp)
374 375 {
375 376 char bootfs[ZPOOL_MAXNAMELEN];
376 377
377 378 return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
378 379 sizeof (bootfs), NULL) == 0 && strncmp(bootfs, "-",
379 380 sizeof (bootfs)) != 0);
380 381 }
381 382
382 383
383 384 /*
384 385 * Given an nvlist of zpool properties to be set, validate that they are
385 386 * correct, and parse any numeric properties (index, boolean, etc) if they are
386 387 * specified as strings.
387 388 */
388 389 static nvlist_t *
389 390 zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
390 391 nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
391 392 {
392 393 nvpair_t *elem;
393 394 nvlist_t *retprops;
394 395 zpool_prop_t prop;
395 396 char *strval;
396 397 uint64_t intval;
397 398 char *slash, *check;
398 399 struct stat64 statbuf;
399 400 zpool_handle_t *zhp;
400 401 nvlist_t *nvroot;
401 402
402 403 if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
403 404 (void) no_memory(hdl);
404 405 return (NULL);
405 406 }
406 407
407 408 elem = NULL;
408 409 while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
409 410 const char *propname = nvpair_name(elem);
410 411
411 412 prop = zpool_name_to_prop(propname);
412 413 if (prop == ZPROP_INVAL && zpool_prop_feature(propname)) {
413 414 int err;
414 415 zfeature_info_t *feature;
415 416 char *fname = strchr(propname, '@') + 1;
416 417
417 418 err = zfeature_lookup_name(fname, &feature);
418 419 if (err != 0) {
419 420 ASSERT3U(err, ==, ENOENT);
420 421 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
421 422 "invalid feature '%s'"), fname);
422 423 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
423 424 goto error;
424 425 }
425 426
426 427 if (nvpair_type(elem) != DATA_TYPE_STRING) {
427 428 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
428 429 "'%s' must be a string"), propname);
429 430 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
430 431 goto error;
431 432 }
432 433
433 434 (void) nvpair_value_string(elem, &strval);
434 435 if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0) {
435 436 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
436 437 "property '%s' can only be set to "
437 438 "'enabled'"), propname);
438 439 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
439 440 goto error;
440 441 }
441 442
442 443 if (nvlist_add_uint64(retprops, propname, 0) != 0) {
443 444 (void) no_memory(hdl);
444 445 goto error;
445 446 }
446 447 continue;
447 448 }
448 449
449 450 /*
450 451 * Make sure this property is valid and applies to this type.
451 452 */
452 453 if (prop == ZPROP_INVAL) {
453 454 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
454 455 "invalid property '%s'"), propname);
455 456 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
456 457 goto error;
457 458 }
458 459
459 460 if (zpool_prop_readonly(prop)) {
460 461 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
461 462 "is readonly"), propname);
462 463 (void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
463 464 goto error;
464 465 }
465 466
466 467 if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
467 468 &strval, &intval, errbuf) != 0)
468 469 goto error;
469 470
470 471 /*
471 472 * Perform additional checking for specific properties.
472 473 */
473 474 switch (prop) {
474 475 case ZPOOL_PROP_VERSION:
475 476 if (intval < version ||
476 477 !SPA_VERSION_IS_SUPPORTED(intval)) {
477 478 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
478 479 "property '%s' number %d is invalid."),
479 480 propname, intval);
480 481 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
481 482 goto error;
482 483 }
483 484 break;
484 485
485 486 case ZPOOL_PROP_BOOTFS:
486 487 if (flags.create || flags.import) {
487 488 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
488 489 "property '%s' cannot be set at creation "
489 490 "or import time"), propname);
490 491 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
491 492 goto error;
492 493 }
493 494
494 495 if (version < SPA_VERSION_BOOTFS) {
495 496 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
496 497 "pool must be upgraded to support "
497 498 "'%s' property"), propname);
498 499 (void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
499 500 goto error;
500 501 }
501 502
502 503 /*
503 504 * bootfs property value has to be a dataset name and
504 505 * the dataset has to be in the same pool as it sets to.
505 506 */
506 507 if (strval[0] != '\0' && !bootfs_name_valid(poolname,
507 508 strval)) {
508 509 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
509 510 "is an invalid name"), strval);
510 511 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
511 512 goto error;
512 513 }
513 514
514 515 if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
515 516 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
516 517 "could not open pool '%s'"), poolname);
517 518 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
518 519 goto error;
519 520 }
520 521 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
521 522 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
522 523
523 524 /*
524 525 * bootfs property cannot be set on a disk which has
525 526 * been EFI labeled.
526 527 */
527 528 if (pool_uses_efi(nvroot)) {
528 529 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
529 530 "property '%s' not supported on "
530 531 "EFI labeled devices"), propname);
531 532 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf);
532 533 zpool_close(zhp);
533 534 goto error;
534 535 }
535 536 zpool_close(zhp);
536 537 break;
537 538
538 539 case ZPOOL_PROP_ALTROOT:
539 540 if (!flags.create && !flags.import) {
540 541 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
541 542 "property '%s' can only be set during pool "
542 543 "creation or import"), propname);
543 544 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
544 545 goto error;
545 546 }
546 547
547 548 if (strval[0] != '/') {
548 549 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
549 550 "bad alternate root '%s'"), strval);
550 551 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
551 552 goto error;
552 553 }
553 554 break;
554 555
555 556 case ZPOOL_PROP_CACHEFILE:
556 557 if (strval[0] == '\0')
557 558 break;
558 559
559 560 if (strcmp(strval, "none") == 0)
560 561 break;
561 562
562 563 if (strval[0] != '/') {
563 564 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
564 565 "property '%s' must be empty, an "
565 566 "absolute path, or 'none'"), propname);
566 567 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
567 568 goto error;
568 569 }
569 570
570 571 slash = strrchr(strval, '/');
571 572
572 573 if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
573 574 strcmp(slash, "/..") == 0) {
574 575 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
575 576 "'%s' is not a valid file"), strval);
576 577 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
577 578 goto error;
578 579 }
579 580
580 581 *slash = '\0';
581 582
582 583 if (strval[0] != '\0' &&
583 584 (stat64(strval, &statbuf) != 0 ||
584 585 !S_ISDIR(statbuf.st_mode))) {
585 586 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
586 587 "'%s' is not a valid directory"),
587 588 strval);
588 589 (void) zfs_error(hdl, EZFS_BADPATH, errbuf);
589 590 goto error;
590 591 }
591 592
592 593 *slash = '/';
593 594 break;
594 595
595 596 case ZPOOL_PROP_COMMENT:
596 597 for (check = strval; *check != '\0'; check++) {
597 598 if (!isprint(*check)) {
598 599 zfs_error_aux(hdl,
599 600 dgettext(TEXT_DOMAIN,
600 601 "comment may only have printable "
601 602 "characters"));
602 603 (void) zfs_error(hdl, EZFS_BADPROP,
603 604 errbuf);
604 605 goto error;
605 606 }
606 607 }
607 608 if (strlen(strval) > ZPROP_MAX_COMMENT) {
608 609 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
609 610 "comment must not exceed %d characters"),
610 611 ZPROP_MAX_COMMENT);
611 612 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
612 613 goto error;
613 614 }
614 615 break;
615 616 case ZPOOL_PROP_READONLY:
616 617 if (!flags.import) {
617 618 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
618 619 "property '%s' can only be set at "
619 620 "import time"), propname);
620 621 (void) zfs_error(hdl, EZFS_BADPROP, errbuf);
621 622 goto error;
622 623 }
623 624 break;
624 625 }
625 626 }
626 627
627 628 return (retprops);
628 629 error:
629 630 nvlist_free(retprops);
630 631 return (NULL);
631 632 }
632 633
633 634 /*
634 635 * Set zpool property : propname=propval.
635 636 */
636 637 int
637 638 zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
638 639 {
639 640 zfs_cmd_t zc = { 0 };
640 641 int ret = -1;
641 642 char errbuf[1024];
642 643 nvlist_t *nvl = NULL;
643 644 nvlist_t *realprops;
644 645 uint64_t version;
645 646 prop_flags_t flags = { 0 };
646 647
647 648 (void) snprintf(errbuf, sizeof (errbuf),
648 649 dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
649 650 zhp->zpool_name);
650 651
651 652 if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
652 653 return (no_memory(zhp->zpool_hdl));
653 654
654 655 if (nvlist_add_string(nvl, propname, propval) != 0) {
655 656 nvlist_free(nvl);
656 657 return (no_memory(zhp->zpool_hdl));
657 658 }
658 659
659 660 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
660 661 if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
661 662 zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
662 663 nvlist_free(nvl);
663 664 return (-1);
664 665 }
665 666
666 667 nvlist_free(nvl);
667 668 nvl = realprops;
668 669
669 670 /*
670 671 * Execute the corresponding ioctl() to set this property.
671 672 */
672 673 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
673 674
674 675 if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
675 676 nvlist_free(nvl);
676 677 return (-1);
677 678 }
678 679
679 680 ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
680 681
681 682 zcmd_free_nvlists(&zc);
682 683 nvlist_free(nvl);
683 684
684 685 if (ret)
685 686 (void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
686 687 else
687 688 (void) zpool_props_refresh(zhp);
688 689
689 690 return (ret);
690 691 }
691 692
692 693 int
693 694 zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
694 695 {
695 696 libzfs_handle_t *hdl = zhp->zpool_hdl;
696 697 zprop_list_t *entry;
697 698 char buf[ZFS_MAXPROPLEN];
698 699 nvlist_t *features = NULL;
699 700 zprop_list_t **last;
700 701 boolean_t firstexpand = (NULL == *plp);
701 702
702 703 if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
703 704 return (-1);
704 705
705 706 last = plp;
706 707 while (*last != NULL)
707 708 last = &(*last)->pl_next;
708 709
709 710 if ((*plp)->pl_all)
710 711 features = zpool_get_features(zhp);
711 712
712 713 if ((*plp)->pl_all && firstexpand) {
713 714 for (int i = 0; i < SPA_FEATURES; i++) {
714 715 zprop_list_t *entry = zfs_alloc(hdl,
715 716 sizeof (zprop_list_t));
716 717 entry->pl_prop = ZPROP_INVAL;
717 718 entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
718 719 spa_feature_table[i].fi_uname);
719 720 entry->pl_width = strlen(entry->pl_user_prop);
720 721 entry->pl_all = B_TRUE;
721 722
722 723 *last = entry;
723 724 last = &entry->pl_next;
724 725 }
725 726 }
726 727
727 728 /* add any unsupported features */
728 729 for (nvpair_t *nvp = nvlist_next_nvpair(features, NULL);
729 730 nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
730 731 char *propname;
731 732 boolean_t found;
732 733 zprop_list_t *entry;
733 734
734 735 if (zfeature_is_supported(nvpair_name(nvp)))
735 736 continue;
736 737
737 738 propname = zfs_asprintf(hdl, "unsupported@%s",
738 739 nvpair_name(nvp));
739 740
740 741 /*
741 742 * Before adding the property to the list make sure that no
742 743 * other pool already added the same property.
743 744 */
744 745 found = B_FALSE;
745 746 entry = *plp;
746 747 while (entry != NULL) {
747 748 if (entry->pl_user_prop != NULL &&
748 749 strcmp(propname, entry->pl_user_prop) == 0) {
749 750 found = B_TRUE;
750 751 break;
751 752 }
752 753 entry = entry->pl_next;
753 754 }
754 755 if (found) {
755 756 free(propname);
756 757 continue;
757 758 }
758 759
759 760 entry = zfs_alloc(hdl, sizeof (zprop_list_t));
760 761 entry->pl_prop = ZPROP_INVAL;
761 762 entry->pl_user_prop = propname;
762 763 entry->pl_width = strlen(entry->pl_user_prop);
763 764 entry->pl_all = B_TRUE;
764 765
765 766 *last = entry;
766 767 last = &entry->pl_next;
767 768 }
768 769
769 770 for (entry = *plp; entry != NULL; entry = entry->pl_next) {
770 771
771 772 if (entry->pl_fixed)
772 773 continue;
773 774
774 775 if (entry->pl_prop != ZPROP_INVAL &&
775 776 zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
776 777 NULL) == 0) {
777 778 if (strlen(buf) > entry->pl_width)
778 779 entry->pl_width = strlen(buf);
779 780 }
780 781 }
781 782
782 783 return (0);
783 784 }
784 785
785 786 /*
786 787 * Get the state for the given feature on the given ZFS pool.
787 788 */
788 789 int
789 790 zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
790 791 size_t len)
791 792 {
792 793 uint64_t refcount;
793 794 boolean_t found = B_FALSE;
794 795 nvlist_t *features = zpool_get_features(zhp);
795 796 boolean_t supported;
796 797 const char *feature = strchr(propname, '@') + 1;
797 798
798 799 supported = zpool_prop_feature(propname);
799 800 ASSERT(supported || zfs_prop_unsupported(propname));
800 801
801 802 /*
802 803 * Convert from feature name to feature guid. This conversion is
803 804 * unecessary for unsupported@... properties because they already
804 805 * use guids.
805 806 */
806 807 if (supported) {
807 808 int ret;
808 809 zfeature_info_t *fi;
809 810
810 811 ret = zfeature_lookup_name(feature, &fi);
811 812 if (ret != 0) {
812 813 (void) strlcpy(buf, "-", len);
813 814 return (ENOTSUP);
814 815 }
815 816 feature = fi->fi_guid;
816 817 }
817 818
818 819 if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
819 820 found = B_TRUE;
820 821
821 822 if (supported) {
822 823 if (!found) {
823 824 (void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
824 825 } else {
825 826 if (refcount == 0)
826 827 (void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
827 828 else
828 829 (void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
829 830 }
830 831 } else {
831 832 if (found) {
832 833 if (refcount == 0) {
833 834 (void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
834 835 } else {
835 836 (void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
836 837 }
837 838 } else {
838 839 (void) strlcpy(buf, "-", len);
839 840 return (ENOTSUP);
840 841 }
841 842 }
842 843
843 844 return (0);
844 845 }
845 846
846 847 /*
847 848 * Don't start the slice at the default block of 34; many storage
848 849 * devices will use a stripe width of 128k, so start there instead.
849 850 */
850 851 #define NEW_START_BLOCK 256
851 852
852 853 /*
853 854 * Validate the given pool name, optionally putting an extended error message in
854 855 * 'buf'.
855 856 */
856 857 boolean_t
857 858 zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
858 859 {
859 860 namecheck_err_t why;
860 861 char what;
861 862 int ret;
862 863
863 864 ret = pool_namecheck(pool, &why, &what);
864 865
865 866 /*
866 867 * The rules for reserved pool names were extended at a later point.
867 868 * But we need to support users with existing pools that may now be
868 869 * invalid. So we only check for this expanded set of names during a
869 870 * create (or import), and only in userland.
870 871 */
871 872 if (ret == 0 && !isopen &&
872 873 (strncmp(pool, "mirror", 6) == 0 ||
873 874 strncmp(pool, "raidz", 5) == 0 ||
874 875 strncmp(pool, "spare", 5) == 0 ||
875 876 strcmp(pool, "log") == 0)) {
876 877 if (hdl != NULL)
877 878 zfs_error_aux(hdl,
878 879 dgettext(TEXT_DOMAIN, "name is reserved"));
879 880 return (B_FALSE);
880 881 }
881 882
882 883
883 884 if (ret != 0) {
884 885 if (hdl != NULL) {
885 886 switch (why) {
886 887 case NAME_ERR_TOOLONG:
887 888 zfs_error_aux(hdl,
888 889 dgettext(TEXT_DOMAIN, "name is too long"));
889 890 break;
890 891
891 892 case NAME_ERR_INVALCHAR:
892 893 zfs_error_aux(hdl,
893 894 dgettext(TEXT_DOMAIN, "invalid character "
894 895 "'%c' in pool name"), what);
895 896 break;
896 897
897 898 case NAME_ERR_NOLETTER:
898 899 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
899 900 "name must begin with a letter"));
900 901 break;
901 902
902 903 case NAME_ERR_RESERVED:
903 904 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
904 905 "name is reserved"));
905 906 break;
906 907
907 908 case NAME_ERR_DISKLIKE:
908 909 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
909 910 "pool name is reserved"));
910 911 break;
911 912
912 913 case NAME_ERR_LEADING_SLASH:
913 914 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
914 915 "leading slash in name"));
915 916 break;
916 917
917 918 case NAME_ERR_EMPTY_COMPONENT:
918 919 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
919 920 "empty component in name"));
920 921 break;
921 922
922 923 case NAME_ERR_TRAILING_SLASH:
923 924 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
924 925 "trailing slash in name"));
925 926 break;
926 927
927 928 case NAME_ERR_MULTIPLE_AT:
928 929 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
929 930 "multiple '@' delimiters in name"));
930 931 break;
931 932
932 933 }
933 934 }
934 935 return (B_FALSE);
935 936 }
936 937
937 938 return (B_TRUE);
938 939 }
939 940
940 941 /*
941 942 * Open a handle to the given pool, even if the pool is currently in the FAULTED
942 943 * state.
943 944 */
944 945 zpool_handle_t *
945 946 zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
946 947 {
947 948 zpool_handle_t *zhp;
948 949 boolean_t missing;
949 950
950 951 /*
951 952 * Make sure the pool name is valid.
952 953 */
953 954 if (!zpool_name_valid(hdl, B_TRUE, pool)) {
954 955 (void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
955 956 dgettext(TEXT_DOMAIN, "cannot open '%s'"),
956 957 pool);
957 958 return (NULL);
958 959 }
959 960
960 961 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
961 962 return (NULL);
962 963
963 964 zhp->zpool_hdl = hdl;
964 965 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
965 966
966 967 if (zpool_refresh_stats(zhp, &missing) != 0) {
967 968 zpool_close(zhp);
968 969 return (NULL);
969 970 }
970 971
971 972 if (missing) {
972 973 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
973 974 (void) zfs_error_fmt(hdl, EZFS_NOENT,
974 975 dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
975 976 zpool_close(zhp);
976 977 return (NULL);
977 978 }
978 979
979 980 return (zhp);
980 981 }
981 982
982 983 /*
983 984 * Like the above, but silent on error. Used when iterating over pools (because
984 985 * the configuration cache may be out of date).
985 986 */
986 987 int
987 988 zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
988 989 {
989 990 zpool_handle_t *zhp;
990 991 boolean_t missing;
991 992
992 993 if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
993 994 return (-1);
994 995
995 996 zhp->zpool_hdl = hdl;
996 997 (void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
997 998
998 999 if (zpool_refresh_stats(zhp, &missing) != 0) {
999 1000 zpool_close(zhp);
1000 1001 return (-1);
1001 1002 }
1002 1003
1003 1004 if (missing) {
1004 1005 zpool_close(zhp);
1005 1006 *ret = NULL;
1006 1007 return (0);
1007 1008 }
1008 1009
1009 1010 *ret = zhp;
1010 1011 return (0);
1011 1012 }
1012 1013
1013 1014 /*
1014 1015 * Similar to zpool_open_canfail(), but refuses to open pools in the faulted
1015 1016 * state.
1016 1017 */
1017 1018 zpool_handle_t *
1018 1019 zpool_open(libzfs_handle_t *hdl, const char *pool)
1019 1020 {
1020 1021 zpool_handle_t *zhp;
1021 1022
1022 1023 if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
1023 1024 return (NULL);
1024 1025
1025 1026 if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
1026 1027 (void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
1027 1028 dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
1028 1029 zpool_close(zhp);
1029 1030 return (NULL);
1030 1031 }
1031 1032
1032 1033 return (zhp);
1033 1034 }
1034 1035
1035 1036 /*
1036 1037 * Close the handle. Simply frees the memory associated with the handle.
1037 1038 */
1038 1039 void
1039 1040 zpool_close(zpool_handle_t *zhp)
1040 1041 {
1041 1042 if (zhp->zpool_config)
1042 1043 nvlist_free(zhp->zpool_config);
1043 1044 if (zhp->zpool_old_config)
1044 1045 nvlist_free(zhp->zpool_old_config);
1045 1046 if (zhp->zpool_props)
1046 1047 nvlist_free(zhp->zpool_props);
1047 1048 free(zhp);
1048 1049 }
1049 1050
1050 1051 /*
1051 1052 * Return the name of the pool.
1052 1053 */
1053 1054 const char *
1054 1055 zpool_get_name(zpool_handle_t *zhp)
1055 1056 {
1056 1057 return (zhp->zpool_name);
1057 1058 }
1058 1059
1059 1060
1060 1061 /*
1061 1062 * Return the state of the pool (ACTIVE or UNAVAILABLE)
1062 1063 */
1063 1064 int
1064 1065 zpool_get_state(zpool_handle_t *zhp)
1065 1066 {
1066 1067 return (zhp->zpool_state);
1067 1068 }
1068 1069
1069 1070 /*
1070 1071 * Create the named pool, using the provided vdev list. It is assumed
1071 1072 * that the consumer has already validated the contents of the nvlist, so we
1072 1073 * don't have to worry about error semantics.
1073 1074 */
1074 1075 int
1075 1076 zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
1076 1077 nvlist_t *props, nvlist_t *fsprops)
1077 1078 {
1078 1079 zfs_cmd_t zc = { 0 };
1079 1080 nvlist_t *zc_fsprops = NULL;
1080 1081 nvlist_t *zc_props = NULL;
1081 1082 char msg[1024];
1082 1083 char *altroot;
1083 1084 int ret = -1;
1084 1085
1085 1086 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1086 1087 "cannot create '%s'"), pool);
1087 1088
1088 1089 if (!zpool_name_valid(hdl, B_FALSE, pool))
1089 1090 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
1090 1091
1091 1092 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1092 1093 return (-1);
1093 1094
1094 1095 if (props) {
1095 1096 prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
1096 1097
1097 1098 if ((zc_props = zpool_valid_proplist(hdl, pool, props,
1098 1099 SPA_VERSION_1, flags, msg)) == NULL) {
1099 1100 goto create_failed;
1100 1101 }
1101 1102 }
1102 1103
1103 1104 if (fsprops) {
1104 1105 uint64_t zoned;
1105 1106 char *zonestr;
1106 1107
1107 1108 zoned = ((nvlist_lookup_string(fsprops,
1108 1109 zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
1109 1110 strcmp(zonestr, "on") == 0);
1110 1111
1111 1112 if ((zc_fsprops = zfs_valid_proplist(hdl,
1112 1113 ZFS_TYPE_FILESYSTEM, fsprops, zoned, NULL, msg)) == NULL) {
1113 1114 goto create_failed;
1114 1115 }
1115 1116 if (!zc_props &&
1116 1117 (nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
1117 1118 goto create_failed;
1118 1119 }
1119 1120 if (nvlist_add_nvlist(zc_props,
1120 1121 ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
1121 1122 goto create_failed;
1122 1123 }
1123 1124 }
1124 1125
1125 1126 if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
1126 1127 goto create_failed;
1127 1128
1128 1129 (void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
1129 1130
1130 1131 if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
1131 1132
1132 1133 zcmd_free_nvlists(&zc);
1133 1134 nvlist_free(zc_props);
1134 1135 nvlist_free(zc_fsprops);
1135 1136
1136 1137 switch (errno) {
1137 1138 case EBUSY:
1138 1139 /*
1139 1140 * This can happen if the user has specified the same
1140 1141 * device multiple times. We can't reliably detect this
1141 1142 * until we try to add it and see we already have a
1142 1143 * label.
1143 1144 */
1144 1145 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1145 1146 "one or more vdevs refer to the same device"));
1146 1147 return (zfs_error(hdl, EZFS_BADDEV, msg));
1147 1148
1148 1149 case EOVERFLOW:
1149 1150 /*
1150 1151 * This occurs when one of the devices is below
1151 1152 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1152 1153 * device was the problem device since there's no
1153 1154 * reliable way to determine device size from userland.
1154 1155 */
1155 1156 {
1156 1157 char buf[64];
1157 1158
1158 1159 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1159 1160
1160 1161 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1161 1162 "one or more devices is less than the "
1162 1163 "minimum size (%s)"), buf);
1163 1164 }
1164 1165 return (zfs_error(hdl, EZFS_BADDEV, msg));
1165 1166
1166 1167 case ENOSPC:
1167 1168 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1168 1169 "one or more devices is out of space"));
1169 1170 return (zfs_error(hdl, EZFS_BADDEV, msg));
1170 1171
1171 1172 case ENOTBLK:
1172 1173 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1173 1174 "cache device must be a disk or disk slice"));
1174 1175 return (zfs_error(hdl, EZFS_BADDEV, msg));
1175 1176
1176 1177 default:
1177 1178 return (zpool_standard_error(hdl, errno, msg));
1178 1179 }
1179 1180 }
1180 1181
1181 1182 /*
1182 1183 * If this is an alternate root pool, then we automatically set the
1183 1184 * mountpoint of the root dataset to be '/'.
1184 1185 */
1185 1186 if (nvlist_lookup_string(props, zpool_prop_to_name(ZPOOL_PROP_ALTROOT),
1186 1187 &altroot) == 0) {
1187 1188 zfs_handle_t *zhp;
1188 1189
1189 1190 verify((zhp = zfs_open(hdl, pool, ZFS_TYPE_DATASET)) != NULL);
1190 1191 verify(zfs_prop_set(zhp, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
1191 1192 "/") == 0);
1192 1193
1193 1194 zfs_close(zhp);
1194 1195 }
1195 1196
1196 1197 create_failed:
1197 1198 zcmd_free_nvlists(&zc);
↓ open down ↓ |
1151 lines elided |
↑ open up ↑ |
1198 1199 nvlist_free(zc_props);
1199 1200 nvlist_free(zc_fsprops);
1200 1201 return (ret);
1201 1202 }
1202 1203
1203 1204 /*
1204 1205 * Destroy the given pool. It is up to the caller to ensure that there are no
1205 1206 * datasets left in the pool.
1206 1207 */
1207 1208 int
1208 -zpool_destroy(zpool_handle_t *zhp)
1209 +zpool_destroy(zpool_handle_t *zhp, const char *log_str)
1209 1210 {
1210 1211 zfs_cmd_t zc = { 0 };
1211 1212 zfs_handle_t *zfp = NULL;
1212 1213 libzfs_handle_t *hdl = zhp->zpool_hdl;
1213 1214 char msg[1024];
1214 1215
1215 1216 if (zhp->zpool_state == POOL_STATE_ACTIVE &&
1216 1217 (zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
1217 1218 return (-1);
1218 1219
1219 1220 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1221 + zc.zc_history = (uint64_t)(uintptr_t)log_str;
1220 1222
1221 1223 if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
1222 1224 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1223 1225 "cannot destroy '%s'"), zhp->zpool_name);
1224 1226
1225 1227 if (errno == EROFS) {
1226 1228 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1227 1229 "one or more devices is read only"));
1228 1230 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1229 1231 } else {
1230 1232 (void) zpool_standard_error(hdl, errno, msg);
1231 1233 }
1232 1234
1233 1235 if (zfp)
1234 1236 zfs_close(zfp);
1235 1237 return (-1);
1236 1238 }
1237 1239
1238 1240 if (zfp) {
1239 1241 remove_mountpoint(zfp);
1240 1242 zfs_close(zfp);
1241 1243 }
1242 1244
1243 1245 return (0);
1244 1246 }
1245 1247
1246 1248 /*
1247 1249 * Add the given vdevs to the pool. The caller must have already performed the
1248 1250 * necessary verification to ensure that the vdev specification is well-formed.
1249 1251 */
1250 1252 int
1251 1253 zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
1252 1254 {
1253 1255 zfs_cmd_t zc = { 0 };
1254 1256 int ret;
1255 1257 libzfs_handle_t *hdl = zhp->zpool_hdl;
1256 1258 char msg[1024];
1257 1259 nvlist_t **spares, **l2cache;
1258 1260 uint_t nspares, nl2cache;
1259 1261
1260 1262 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1261 1263 "cannot add to '%s'"), zhp->zpool_name);
1262 1264
1263 1265 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1264 1266 SPA_VERSION_SPARES &&
1265 1267 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
1266 1268 &spares, &nspares) == 0) {
1267 1269 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1268 1270 "upgraded to add hot spares"));
1269 1271 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1270 1272 }
1271 1273
1272 1274 if (zpool_is_bootable(zhp) && nvlist_lookup_nvlist_array(nvroot,
1273 1275 ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0) {
1274 1276 uint64_t s;
1275 1277
1276 1278 for (s = 0; s < nspares; s++) {
1277 1279 char *path;
1278 1280
1279 1281 if (nvlist_lookup_string(spares[s], ZPOOL_CONFIG_PATH,
1280 1282 &path) == 0 && pool_uses_efi(spares[s])) {
1281 1283 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1282 1284 "device '%s' contains an EFI label and "
1283 1285 "cannot be used on root pools."),
1284 1286 zpool_vdev_name(hdl, NULL, spares[s],
1285 1287 B_FALSE));
1286 1288 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
1287 1289 }
1288 1290 }
1289 1291 }
1290 1292
1291 1293 if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
1292 1294 SPA_VERSION_L2CACHE &&
1293 1295 nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
1294 1296 &l2cache, &nl2cache) == 0) {
1295 1297 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
1296 1298 "upgraded to add cache devices"));
1297 1299 return (zfs_error(hdl, EZFS_BADVERSION, msg));
1298 1300 }
1299 1301
1300 1302 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
1301 1303 return (-1);
1302 1304 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1303 1305
1304 1306 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
1305 1307 switch (errno) {
1306 1308 case EBUSY:
1307 1309 /*
1308 1310 * This can happen if the user has specified the same
1309 1311 * device multiple times. We can't reliably detect this
1310 1312 * until we try to add it and see we already have a
1311 1313 * label.
1312 1314 */
1313 1315 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1314 1316 "one or more vdevs refer to the same device"));
1315 1317 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1316 1318 break;
1317 1319
1318 1320 case EOVERFLOW:
1319 1321 /*
1320 1322 * This occurrs when one of the devices is below
1321 1323 * SPA_MINDEVSIZE. Unfortunately, we can't detect which
1322 1324 * device was the problem device since there's no
1323 1325 * reliable way to determine device size from userland.
1324 1326 */
1325 1327 {
1326 1328 char buf[64];
1327 1329
1328 1330 zfs_nicenum(SPA_MINDEVSIZE, buf, sizeof (buf));
1329 1331
1330 1332 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1331 1333 "device is less than the minimum "
1332 1334 "size (%s)"), buf);
1333 1335 }
1334 1336 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1335 1337 break;
1336 1338
1337 1339 case ENOTSUP:
1338 1340 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1339 1341 "pool must be upgraded to add these vdevs"));
1340 1342 (void) zfs_error(hdl, EZFS_BADVERSION, msg);
1341 1343 break;
1342 1344
1343 1345 case EDOM:
1344 1346 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1345 1347 "root pool can not have multiple vdevs"
1346 1348 " or separate logs"));
1347 1349 (void) zfs_error(hdl, EZFS_POOL_NOTSUP, msg);
1348 1350 break;
1349 1351
1350 1352 case ENOTBLK:
1351 1353 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1352 1354 "cache device must be a disk or disk slice"));
1353 1355 (void) zfs_error(hdl, EZFS_BADDEV, msg);
1354 1356 break;
1355 1357
1356 1358 default:
1357 1359 (void) zpool_standard_error(hdl, errno, msg);
1358 1360 }
1359 1361
1360 1362 ret = -1;
1361 1363 } else {
1362 1364 ret = 0;
1363 1365 }
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
1364 1366
1365 1367 zcmd_free_nvlists(&zc);
1366 1368
1367 1369 return (ret);
1368 1370 }
1369 1371
1370 1372 /*
1371 1373 * Exports the pool from the system. The caller must ensure that there are no
1372 1374 * mounted datasets in the pool.
1373 1375 */
1374 -int
1375 -zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce)
1376 +static int
1377 +zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
1378 + const char *log_str)
1376 1379 {
1377 1380 zfs_cmd_t zc = { 0 };
1378 1381 char msg[1024];
1379 1382
1380 1383 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
1381 1384 "cannot export '%s'"), zhp->zpool_name);
1382 1385
1383 1386 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1384 1387 zc.zc_cookie = force;
1385 1388 zc.zc_guid = hardforce;
1389 + zc.zc_history = (uint64_t)(uintptr_t)log_str;
1386 1390
1387 1391 if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
1388 1392 switch (errno) {
1389 1393 case EXDEV:
1390 1394 zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
1391 1395 "use '-f' to override the following errors:\n"
1392 1396 "'%s' has an active shared spare which could be"
1393 1397 " used by other pools once '%s' is exported."),
1394 1398 zhp->zpool_name, zhp->zpool_name);
1395 1399 return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
1396 1400 msg));
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1397 1401 default:
1398 1402 return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
1399 1403 msg));
1400 1404 }
1401 1405 }
1402 1406
1403 1407 return (0);
1404 1408 }
1405 1409
1406 1410 int
1407 -zpool_export(zpool_handle_t *zhp, boolean_t force)
1411 +zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
1408 1412 {
1409 - return (zpool_export_common(zhp, force, B_FALSE));
1413 + return (zpool_export_common(zhp, force, B_FALSE, log_str));
1410 1414 }
1411 1415
1412 1416 int
1413 -zpool_export_force(zpool_handle_t *zhp)
1417 +zpool_export_force(zpool_handle_t *zhp, const char *log_str)
1414 1418 {
1415 - return (zpool_export_common(zhp, B_TRUE, B_TRUE));
1419 + return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
1416 1420 }
1417 1421
1418 1422 static void
1419 1423 zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
1420 1424 nvlist_t *config)
1421 1425 {
1422 1426 nvlist_t *nv = NULL;
1423 1427 uint64_t rewindto;
1424 1428 int64_t loss = -1;
1425 1429 struct tm t;
1426 1430 char timestr[128];
1427 1431
1428 1432 if (!hdl->libzfs_printerr || config == NULL)
1429 1433 return;
1430 1434
1431 1435 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1432 1436 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
1433 1437 return;
1434 1438 }
1435 1439
1436 1440 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1437 1441 return;
1438 1442 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1439 1443
1440 1444 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1441 1445 strftime(timestr, 128, 0, &t) != 0) {
1442 1446 if (dryrun) {
1443 1447 (void) printf(dgettext(TEXT_DOMAIN,
1444 1448 "Would be able to return %s "
1445 1449 "to its state as of %s.\n"),
1446 1450 name, timestr);
1447 1451 } else {
1448 1452 (void) printf(dgettext(TEXT_DOMAIN,
1449 1453 "Pool %s returned to its state as of %s.\n"),
1450 1454 name, timestr);
1451 1455 }
1452 1456 if (loss > 120) {
1453 1457 (void) printf(dgettext(TEXT_DOMAIN,
1454 1458 "%s approximately %lld "),
1455 1459 dryrun ? "Would discard" : "Discarded",
1456 1460 (loss + 30) / 60);
1457 1461 (void) printf(dgettext(TEXT_DOMAIN,
1458 1462 "minutes of transactions.\n"));
1459 1463 } else if (loss > 0) {
1460 1464 (void) printf(dgettext(TEXT_DOMAIN,
1461 1465 "%s approximately %lld "),
1462 1466 dryrun ? "Would discard" : "Discarded", loss);
1463 1467 (void) printf(dgettext(TEXT_DOMAIN,
1464 1468 "seconds of transactions.\n"));
1465 1469 }
1466 1470 }
1467 1471 }
1468 1472
1469 1473 void
1470 1474 zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
1471 1475 nvlist_t *config)
1472 1476 {
1473 1477 nvlist_t *nv = NULL;
1474 1478 int64_t loss = -1;
1475 1479 uint64_t edata = UINT64_MAX;
1476 1480 uint64_t rewindto;
1477 1481 struct tm t;
1478 1482 char timestr[128];
1479 1483
1480 1484 if (!hdl->libzfs_printerr)
1481 1485 return;
1482 1486
1483 1487 if (reason >= 0)
1484 1488 (void) printf(dgettext(TEXT_DOMAIN, "action: "));
1485 1489 else
1486 1490 (void) printf(dgettext(TEXT_DOMAIN, "\t"));
1487 1491
1488 1492 /* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
1489 1493 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
1490 1494 nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
1491 1495 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
1492 1496 goto no_info;
1493 1497
1494 1498 (void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
1495 1499 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
1496 1500 &edata);
1497 1501
1498 1502 (void) printf(dgettext(TEXT_DOMAIN,
1499 1503 "Recovery is possible, but will result in some data loss.\n"));
1500 1504
1501 1505 if (localtime_r((time_t *)&rewindto, &t) != NULL &&
1502 1506 strftime(timestr, 128, 0, &t) != 0) {
1503 1507 (void) printf(dgettext(TEXT_DOMAIN,
1504 1508 "\tReturning the pool to its state as of %s\n"
1505 1509 "\tshould correct the problem. "),
1506 1510 timestr);
1507 1511 } else {
1508 1512 (void) printf(dgettext(TEXT_DOMAIN,
1509 1513 "\tReverting the pool to an earlier state "
1510 1514 "should correct the problem.\n\t"));
1511 1515 }
1512 1516
1513 1517 if (loss > 120) {
1514 1518 (void) printf(dgettext(TEXT_DOMAIN,
1515 1519 "Approximately %lld minutes of data\n"
1516 1520 "\tmust be discarded, irreversibly. "), (loss + 30) / 60);
1517 1521 } else if (loss > 0) {
1518 1522 (void) printf(dgettext(TEXT_DOMAIN,
1519 1523 "Approximately %lld seconds of data\n"
1520 1524 "\tmust be discarded, irreversibly. "), loss);
1521 1525 }
1522 1526 if (edata != 0 && edata != UINT64_MAX) {
1523 1527 if (edata == 1) {
1524 1528 (void) printf(dgettext(TEXT_DOMAIN,
1525 1529 "After rewind, at least\n"
1526 1530 "\tone persistent user-data error will remain. "));
1527 1531 } else {
1528 1532 (void) printf(dgettext(TEXT_DOMAIN,
1529 1533 "After rewind, several\n"
1530 1534 "\tpersistent user-data errors will remain. "));
1531 1535 }
1532 1536 }
1533 1537 (void) printf(dgettext(TEXT_DOMAIN,
1534 1538 "Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
1535 1539 reason >= 0 ? "clear" : "import", name);
1536 1540
1537 1541 (void) printf(dgettext(TEXT_DOMAIN,
1538 1542 "A scrub of the pool\n"
1539 1543 "\tis strongly recommended after recovery.\n"));
1540 1544 return;
1541 1545
1542 1546 no_info:
1543 1547 (void) printf(dgettext(TEXT_DOMAIN,
1544 1548 "Destroy and re-create the pool from\n\ta backup source.\n"));
1545 1549 }
1546 1550
1547 1551 /*
1548 1552 * zpool_import() is a contracted interface. Should be kept the same
1549 1553 * if possible.
1550 1554 *
1551 1555 * Applications should use zpool_import_props() to import a pool with
1552 1556 * new properties value to be set.
1553 1557 */
1554 1558 int
1555 1559 zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1556 1560 char *altroot)
1557 1561 {
1558 1562 nvlist_t *props = NULL;
1559 1563 int ret;
1560 1564
1561 1565 if (altroot != NULL) {
1562 1566 if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
1563 1567 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1564 1568 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1565 1569 newname));
1566 1570 }
1567 1571
1568 1572 if (nvlist_add_string(props,
1569 1573 zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
1570 1574 nvlist_add_string(props,
1571 1575 zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
1572 1576 nvlist_free(props);
1573 1577 return (zfs_error_fmt(hdl, EZFS_NOMEM,
1574 1578 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1575 1579 newname));
1576 1580 }
1577 1581 }
1578 1582
1579 1583 ret = zpool_import_props(hdl, config, newname, props,
1580 1584 ZFS_IMPORT_NORMAL);
1581 1585 if (props)
1582 1586 nvlist_free(props);
1583 1587 return (ret);
1584 1588 }
1585 1589
1586 1590 static void
1587 1591 print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
1588 1592 int indent)
1589 1593 {
1590 1594 nvlist_t **child;
1591 1595 uint_t c, children;
1592 1596 char *vname;
1593 1597 uint64_t is_log = 0;
1594 1598
1595 1599 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
1596 1600 &is_log);
1597 1601
1598 1602 if (name != NULL)
1599 1603 (void) printf("\t%*s%s%s\n", indent, "", name,
1600 1604 is_log ? " [log]" : "");
1601 1605
1602 1606 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
1603 1607 &child, &children) != 0)
1604 1608 return;
1605 1609
1606 1610 for (c = 0; c < children; c++) {
1607 1611 vname = zpool_vdev_name(hdl, NULL, child[c], B_TRUE);
1608 1612 print_vdev_tree(hdl, vname, child[c], indent + 2);
1609 1613 free(vname);
1610 1614 }
1611 1615 }
1612 1616
1613 1617 void
1614 1618 zpool_print_unsup_feat(nvlist_t *config)
1615 1619 {
1616 1620 nvlist_t *nvinfo, *unsup_feat;
1617 1621
1618 1622 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
1619 1623 0);
1620 1624 verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
1621 1625 &unsup_feat) == 0);
1622 1626
1623 1627 for (nvpair_t *nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
1624 1628 nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
1625 1629 char *desc;
1626 1630
1627 1631 verify(nvpair_type(nvp) == DATA_TYPE_STRING);
1628 1632 verify(nvpair_value_string(nvp, &desc) == 0);
1629 1633
1630 1634 if (strlen(desc) > 0)
1631 1635 (void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
1632 1636 else
1633 1637 (void) printf("\t%s\n", nvpair_name(nvp));
1634 1638 }
1635 1639 }
1636 1640
1637 1641 /*
1638 1642 * Import the given pool using the known configuration and a list of
1639 1643 * properties to be set. The configuration should have come from
1640 1644 * zpool_find_import(). The 'newname' parameters control whether the pool
1641 1645 * is imported with a different name.
1642 1646 */
1643 1647 int
1644 1648 zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
1645 1649 nvlist_t *props, int flags)
1646 1650 {
1647 1651 zfs_cmd_t zc = { 0 };
1648 1652 zpool_rewind_policy_t policy;
1649 1653 nvlist_t *nv = NULL;
1650 1654 nvlist_t *nvinfo = NULL;
1651 1655 nvlist_t *missing = NULL;
1652 1656 char *thename;
1653 1657 char *origname;
1654 1658 int ret;
1655 1659 int error = 0;
1656 1660 char errbuf[1024];
1657 1661
1658 1662 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
1659 1663 &origname) == 0);
1660 1664
1661 1665 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
1662 1666 "cannot import pool '%s'"), origname);
1663 1667
1664 1668 if (newname != NULL) {
1665 1669 if (!zpool_name_valid(hdl, B_FALSE, newname))
1666 1670 return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
1667 1671 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1668 1672 newname));
1669 1673 thename = (char *)newname;
1670 1674 } else {
1671 1675 thename = origname;
1672 1676 }
1673 1677
1674 1678 if (props) {
1675 1679 uint64_t version;
1676 1680 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
1677 1681
1678 1682 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
1679 1683 &version) == 0);
1680 1684
1681 1685 if ((props = zpool_valid_proplist(hdl, origname,
1682 1686 props, version, flags, errbuf)) == NULL) {
1683 1687 return (-1);
1684 1688 } else if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
1685 1689 nvlist_free(props);
1686 1690 return (-1);
1687 1691 }
1688 1692 }
1689 1693
1690 1694 (void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
1691 1695
1692 1696 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
1693 1697 &zc.zc_guid) == 0);
1694 1698
1695 1699 if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
1696 1700 nvlist_free(props);
1697 1701 return (-1);
1698 1702 }
1699 1703 if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
1700 1704 nvlist_free(props);
1701 1705 return (-1);
1702 1706 }
1703 1707
1704 1708 zc.zc_cookie = flags;
1705 1709 while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
1706 1710 errno == ENOMEM) {
1707 1711 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
1708 1712 zcmd_free_nvlists(&zc);
1709 1713 return (-1);
1710 1714 }
1711 1715 }
1712 1716 if (ret != 0)
1713 1717 error = errno;
1714 1718
1715 1719 (void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
1716 1720 zpool_get_rewind_policy(config, &policy);
1717 1721
1718 1722 if (error) {
1719 1723 char desc[1024];
1720 1724
1721 1725 /*
1722 1726 * Dry-run failed, but we print out what success
1723 1727 * looks like if we found a best txg
1724 1728 */
1725 1729 if (policy.zrp_request & ZPOOL_TRY_REWIND) {
1726 1730 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1727 1731 B_TRUE, nv);
1728 1732 nvlist_free(nv);
1729 1733 return (-1);
1730 1734 }
1731 1735
1732 1736 if (newname == NULL)
1733 1737 (void) snprintf(desc, sizeof (desc),
1734 1738 dgettext(TEXT_DOMAIN, "cannot import '%s'"),
1735 1739 thename);
1736 1740 else
1737 1741 (void) snprintf(desc, sizeof (desc),
1738 1742 dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
1739 1743 origname, thename);
1740 1744
1741 1745 switch (error) {
1742 1746 case ENOTSUP:
1743 1747 if (nv != NULL && nvlist_lookup_nvlist(nv,
1744 1748 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1745 1749 nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
1746 1750 (void) printf(dgettext(TEXT_DOMAIN, "This "
1747 1751 "pool uses the following feature(s) not "
1748 1752 "supported by this system:\n"));
1749 1753 zpool_print_unsup_feat(nv);
1750 1754 if (nvlist_exists(nvinfo,
1751 1755 ZPOOL_CONFIG_CAN_RDONLY)) {
1752 1756 (void) printf(dgettext(TEXT_DOMAIN,
1753 1757 "All unsupported features are only "
1754 1758 "required for writing to the pool."
1755 1759 "\nThe pool can be imported using "
1756 1760 "'-o readonly=on'.\n"));
1757 1761 }
1758 1762 }
1759 1763 /*
1760 1764 * Unsupported version.
1761 1765 */
1762 1766 (void) zfs_error(hdl, EZFS_BADVERSION, desc);
1763 1767 break;
1764 1768
1765 1769 case EINVAL:
1766 1770 (void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
1767 1771 break;
1768 1772
1769 1773 case EROFS:
1770 1774 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
1771 1775 "one or more devices is read only"));
1772 1776 (void) zfs_error(hdl, EZFS_BADDEV, desc);
1773 1777 break;
1774 1778
1775 1779 case ENXIO:
1776 1780 if (nv && nvlist_lookup_nvlist(nv,
1777 1781 ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
1778 1782 nvlist_lookup_nvlist(nvinfo,
1779 1783 ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
1780 1784 (void) printf(dgettext(TEXT_DOMAIN,
1781 1785 "The devices below are missing, use "
1782 1786 "'-m' to import the pool anyway:\n"));
1783 1787 print_vdev_tree(hdl, NULL, missing, 2);
1784 1788 (void) printf("\n");
1785 1789 }
1786 1790 (void) zpool_standard_error(hdl, error, desc);
1787 1791 break;
1788 1792
1789 1793 case EEXIST:
1790 1794 (void) zpool_standard_error(hdl, error, desc);
1791 1795 break;
1792 1796
1793 1797 default:
1794 1798 (void) zpool_standard_error(hdl, error, desc);
1795 1799 zpool_explain_recover(hdl,
1796 1800 newname ? origname : thename, -error, nv);
1797 1801 break;
1798 1802 }
1799 1803
1800 1804 nvlist_free(nv);
1801 1805 ret = -1;
1802 1806 } else {
1803 1807 zpool_handle_t *zhp;
1804 1808
1805 1809 /*
1806 1810 * This should never fail, but play it safe anyway.
1807 1811 */
1808 1812 if (zpool_open_silent(hdl, thename, &zhp) != 0)
1809 1813 ret = -1;
1810 1814 else if (zhp != NULL)
1811 1815 zpool_close(zhp);
1812 1816 if (policy.zrp_request &
1813 1817 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
1814 1818 zpool_rewind_exclaim(hdl, newname ? origname : thename,
1815 1819 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
1816 1820 }
1817 1821 nvlist_free(nv);
1818 1822 return (0);
1819 1823 }
1820 1824
1821 1825 zcmd_free_nvlists(&zc);
1822 1826 nvlist_free(props);
1823 1827
1824 1828 return (ret);
1825 1829 }
1826 1830
1827 1831 /*
1828 1832 * Scan the pool.
1829 1833 */
1830 1834 int
1831 1835 zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func)
1832 1836 {
1833 1837 zfs_cmd_t zc = { 0 };
1834 1838 char msg[1024];
1835 1839 libzfs_handle_t *hdl = zhp->zpool_hdl;
1836 1840
1837 1841 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
1838 1842 zc.zc_cookie = func;
1839 1843
1840 1844 if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0 ||
1841 1845 (errno == ENOENT && func != POOL_SCAN_NONE))
1842 1846 return (0);
1843 1847
1844 1848 if (func == POOL_SCAN_SCRUB) {
1845 1849 (void) snprintf(msg, sizeof (msg),
1846 1850 dgettext(TEXT_DOMAIN, "cannot scrub %s"), zc.zc_name);
1847 1851 } else if (func == POOL_SCAN_NONE) {
1848 1852 (void) snprintf(msg, sizeof (msg),
1849 1853 dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
1850 1854 zc.zc_name);
1851 1855 } else {
1852 1856 assert(!"unexpected result");
1853 1857 }
1854 1858
1855 1859 if (errno == EBUSY) {
1856 1860 nvlist_t *nvroot;
1857 1861 pool_scan_stat_t *ps = NULL;
1858 1862 uint_t psc;
1859 1863
1860 1864 verify(nvlist_lookup_nvlist(zhp->zpool_config,
1861 1865 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
1862 1866 (void) nvlist_lookup_uint64_array(nvroot,
1863 1867 ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
1864 1868 if (ps && ps->pss_func == POOL_SCAN_SCRUB)
1865 1869 return (zfs_error(hdl, EZFS_SCRUBBING, msg));
1866 1870 else
1867 1871 return (zfs_error(hdl, EZFS_RESILVERING, msg));
1868 1872 } else if (errno == ENOENT) {
1869 1873 return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
1870 1874 } else {
1871 1875 return (zpool_standard_error(hdl, errno, msg));
1872 1876 }
1873 1877 }
1874 1878
1875 1879 /*
1876 1880 * This provides a very minimal check whether a given string is likely a
1877 1881 * c#t#d# style string. Users of this are expected to do their own
1878 1882 * verification of the s# part.
1879 1883 */
1880 1884 #define CTD_CHECK(str) (str && str[0] == 'c' && isdigit(str[1]))
1881 1885
1882 1886 /*
1883 1887 * More elaborate version for ones which may start with "/dev/dsk/"
1884 1888 * and the like.
1885 1889 */
1886 1890 static int
1887 1891 ctd_check_path(char *str) {
1888 1892 /*
1889 1893 * If it starts with a slash, check the last component.
1890 1894 */
1891 1895 if (str && str[0] == '/') {
1892 1896 char *tmp = strrchr(str, '/');
1893 1897
1894 1898 /*
1895 1899 * If it ends in "/old", check the second-to-last
1896 1900 * component of the string instead.
1897 1901 */
1898 1902 if (tmp != str && strcmp(tmp, "/old") == 0) {
1899 1903 for (tmp--; *tmp != '/'; tmp--)
1900 1904 ;
1901 1905 }
1902 1906 str = tmp + 1;
1903 1907 }
1904 1908 return (CTD_CHECK(str));
1905 1909 }
1906 1910
1907 1911 /*
1908 1912 * Find a vdev that matches the search criteria specified. We use the
1909 1913 * the nvpair name to determine how we should look for the device.
1910 1914 * 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
1911 1915 * spare; but FALSE if its an INUSE spare.
1912 1916 */
1913 1917 static nvlist_t *
1914 1918 vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
1915 1919 boolean_t *l2cache, boolean_t *log)
1916 1920 {
1917 1921 uint_t c, children;
1918 1922 nvlist_t **child;
1919 1923 nvlist_t *ret;
1920 1924 uint64_t is_log;
1921 1925 char *srchkey;
1922 1926 nvpair_t *pair = nvlist_next_nvpair(search, NULL);
1923 1927
1924 1928 /* Nothing to look for */
1925 1929 if (search == NULL || pair == NULL)
1926 1930 return (NULL);
1927 1931
1928 1932 /* Obtain the key we will use to search */
1929 1933 srchkey = nvpair_name(pair);
1930 1934
1931 1935 switch (nvpair_type(pair)) {
1932 1936 case DATA_TYPE_UINT64:
1933 1937 if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
1934 1938 uint64_t srchval, theguid;
1935 1939
1936 1940 verify(nvpair_value_uint64(pair, &srchval) == 0);
1937 1941 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
1938 1942 &theguid) == 0);
1939 1943 if (theguid == srchval)
1940 1944 return (nv);
1941 1945 }
1942 1946 break;
1943 1947
1944 1948 case DATA_TYPE_STRING: {
1945 1949 char *srchval, *val;
1946 1950
1947 1951 verify(nvpair_value_string(pair, &srchval) == 0);
1948 1952 if (nvlist_lookup_string(nv, srchkey, &val) != 0)
1949 1953 break;
1950 1954
1951 1955 /*
1952 1956 * Search for the requested value. Special cases:
1953 1957 *
1954 1958 * - ZPOOL_CONFIG_PATH for whole disk entries. These end in
1955 1959 * "s0" or "s0/old". The "s0" part is hidden from the user,
1956 1960 * but included in the string, so this matches around it.
1957 1961 * - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
1958 1962 *
1959 1963 * Otherwise, all other searches are simple string compares.
1960 1964 */
1961 1965 if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0 &&
1962 1966 ctd_check_path(val)) {
1963 1967 uint64_t wholedisk = 0;
1964 1968
1965 1969 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
1966 1970 &wholedisk);
1967 1971 if (wholedisk) {
1968 1972 int slen = strlen(srchval);
1969 1973 int vlen = strlen(val);
1970 1974
1971 1975 if (slen != vlen - 2)
1972 1976 break;
1973 1977
1974 1978 /*
1975 1979 * make_leaf_vdev() should only set
1976 1980 * wholedisk for ZPOOL_CONFIG_PATHs which
1977 1981 * will include "/dev/dsk/", giving plenty of
1978 1982 * room for the indices used next.
1979 1983 */
1980 1984 ASSERT(vlen >= 6);
1981 1985
1982 1986 /*
1983 1987 * strings identical except trailing "s0"
1984 1988 */
1985 1989 if (strcmp(&val[vlen - 2], "s0") == 0 &&
1986 1990 strncmp(srchval, val, slen) == 0)
1987 1991 return (nv);
1988 1992
1989 1993 /*
1990 1994 * strings identical except trailing "s0/old"
1991 1995 */
1992 1996 if (strcmp(&val[vlen - 6], "s0/old") == 0 &&
1993 1997 strcmp(&srchval[slen - 4], "/old") == 0 &&
1994 1998 strncmp(srchval, val, slen - 4) == 0)
1995 1999 return (nv);
1996 2000
1997 2001 break;
1998 2002 }
1999 2003 } else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
2000 2004 char *type, *idx, *end, *p;
2001 2005 uint64_t id, vdev_id;
2002 2006
2003 2007 /*
2004 2008 * Determine our vdev type, keeping in mind
2005 2009 * that the srchval is composed of a type and
2006 2010 * vdev id pair (i.e. mirror-4).
2007 2011 */
2008 2012 if ((type = strdup(srchval)) == NULL)
2009 2013 return (NULL);
2010 2014
2011 2015 if ((p = strrchr(type, '-')) == NULL) {
2012 2016 free(type);
2013 2017 break;
2014 2018 }
2015 2019 idx = p + 1;
2016 2020 *p = '\0';
2017 2021
2018 2022 /*
2019 2023 * If the types don't match then keep looking.
2020 2024 */
2021 2025 if (strncmp(val, type, strlen(val)) != 0) {
2022 2026 free(type);
2023 2027 break;
2024 2028 }
2025 2029
2026 2030 verify(strncmp(type, VDEV_TYPE_RAIDZ,
2027 2031 strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2028 2032 strncmp(type, VDEV_TYPE_MIRROR,
2029 2033 strlen(VDEV_TYPE_MIRROR)) == 0);
2030 2034 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
2031 2035 &id) == 0);
2032 2036
2033 2037 errno = 0;
2034 2038 vdev_id = strtoull(idx, &end, 10);
2035 2039
2036 2040 free(type);
2037 2041 if (errno != 0)
2038 2042 return (NULL);
2039 2043
2040 2044 /*
2041 2045 * Now verify that we have the correct vdev id.
2042 2046 */
2043 2047 if (vdev_id == id)
2044 2048 return (nv);
2045 2049 }
2046 2050
2047 2051 /*
2048 2052 * Common case
2049 2053 */
2050 2054 if (strcmp(srchval, val) == 0)
2051 2055 return (nv);
2052 2056 break;
2053 2057 }
2054 2058
2055 2059 default:
2056 2060 break;
2057 2061 }
2058 2062
2059 2063 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
2060 2064 &child, &children) != 0)
2061 2065 return (NULL);
2062 2066
2063 2067 for (c = 0; c < children; c++) {
2064 2068 if ((ret = vdev_to_nvlist_iter(child[c], search,
2065 2069 avail_spare, l2cache, NULL)) != NULL) {
2066 2070 /*
2067 2071 * The 'is_log' value is only set for the toplevel
2068 2072 * vdev, not the leaf vdevs. So we always lookup the
2069 2073 * log device from the root of the vdev tree (where
2070 2074 * 'log' is non-NULL).
2071 2075 */
2072 2076 if (log != NULL &&
2073 2077 nvlist_lookup_uint64(child[c],
2074 2078 ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
2075 2079 is_log) {
2076 2080 *log = B_TRUE;
2077 2081 }
2078 2082 return (ret);
2079 2083 }
2080 2084 }
2081 2085
2082 2086 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
2083 2087 &child, &children) == 0) {
2084 2088 for (c = 0; c < children; c++) {
2085 2089 if ((ret = vdev_to_nvlist_iter(child[c], search,
2086 2090 avail_spare, l2cache, NULL)) != NULL) {
2087 2091 *avail_spare = B_TRUE;
2088 2092 return (ret);
2089 2093 }
2090 2094 }
2091 2095 }
2092 2096
2093 2097 if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
2094 2098 &child, &children) == 0) {
2095 2099 for (c = 0; c < children; c++) {
2096 2100 if ((ret = vdev_to_nvlist_iter(child[c], search,
2097 2101 avail_spare, l2cache, NULL)) != NULL) {
2098 2102 *l2cache = B_TRUE;
2099 2103 return (ret);
2100 2104 }
2101 2105 }
2102 2106 }
2103 2107
2104 2108 return (NULL);
2105 2109 }
2106 2110
2107 2111 /*
2108 2112 * Given a physical path (minus the "/devices" prefix), find the
2109 2113 * associated vdev.
2110 2114 */
2111 2115 nvlist_t *
2112 2116 zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
2113 2117 boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
2114 2118 {
2115 2119 nvlist_t *search, *nvroot, *ret;
2116 2120
2117 2121 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2118 2122 verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
2119 2123
2120 2124 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2121 2125 &nvroot) == 0);
2122 2126
2123 2127 *avail_spare = B_FALSE;
2124 2128 *l2cache = B_FALSE;
2125 2129 if (log != NULL)
2126 2130 *log = B_FALSE;
2127 2131 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2128 2132 nvlist_free(search);
2129 2133
2130 2134 return (ret);
2131 2135 }
2132 2136
2133 2137 /*
2134 2138 * Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
2135 2139 */
2136 2140 boolean_t
2137 2141 zpool_vdev_is_interior(const char *name)
2138 2142 {
2139 2143 if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
2140 2144 strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
2141 2145 return (B_TRUE);
2142 2146 return (B_FALSE);
2143 2147 }
2144 2148
2145 2149 nvlist_t *
2146 2150 zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
2147 2151 boolean_t *l2cache, boolean_t *log)
2148 2152 {
2149 2153 char buf[MAXPATHLEN];
2150 2154 char *end;
2151 2155 nvlist_t *nvroot, *search, *ret;
2152 2156 uint64_t guid;
2153 2157
2154 2158 verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2155 2159
2156 2160 guid = strtoull(path, &end, 10);
2157 2161 if (guid != 0 && *end == '\0') {
2158 2162 verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
2159 2163 } else if (zpool_vdev_is_interior(path)) {
2160 2164 verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
2161 2165 } else if (path[0] != '/') {
2162 2166 (void) snprintf(buf, sizeof (buf), "%s%s", "/dev/dsk/", path);
2163 2167 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, buf) == 0);
2164 2168 } else {
2165 2169 verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
2166 2170 }
2167 2171
2168 2172 verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
2169 2173 &nvroot) == 0);
2170 2174
2171 2175 *avail_spare = B_FALSE;
2172 2176 *l2cache = B_FALSE;
2173 2177 if (log != NULL)
2174 2178 *log = B_FALSE;
2175 2179 ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
2176 2180 nvlist_free(search);
2177 2181
2178 2182 return (ret);
2179 2183 }
2180 2184
2181 2185 static int
2182 2186 vdev_online(nvlist_t *nv)
2183 2187 {
2184 2188 uint64_t ival;
2185 2189
2186 2190 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
2187 2191 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
2188 2192 nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
2189 2193 return (0);
2190 2194
2191 2195 return (1);
2192 2196 }
2193 2197
2194 2198 /*
2195 2199 * Helper function for zpool_get_physpaths().
2196 2200 */
2197 2201 static int
2198 2202 vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
2199 2203 size_t *bytes_written)
2200 2204 {
2201 2205 size_t bytes_left, pos, rsz;
2202 2206 char *tmppath;
2203 2207 const char *format;
2204 2208
2205 2209 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
2206 2210 &tmppath) != 0)
2207 2211 return (EZFS_NODEVICE);
2208 2212
2209 2213 pos = *bytes_written;
2210 2214 bytes_left = physpath_size - pos;
2211 2215 format = (pos == 0) ? "%s" : " %s";
2212 2216
2213 2217 rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
2214 2218 *bytes_written += rsz;
2215 2219
2216 2220 if (rsz >= bytes_left) {
2217 2221 /* if physpath was not copied properly, clear it */
2218 2222 if (bytes_left != 0) {
2219 2223 physpath[pos] = 0;
2220 2224 }
2221 2225 return (EZFS_NOSPC);
2222 2226 }
2223 2227 return (0);
2224 2228 }
2225 2229
2226 2230 static int
2227 2231 vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
2228 2232 size_t *rsz, boolean_t is_spare)
2229 2233 {
2230 2234 char *type;
2231 2235 int ret;
2232 2236
2233 2237 if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
2234 2238 return (EZFS_INVALCONFIG);
2235 2239
2236 2240 if (strcmp(type, VDEV_TYPE_DISK) == 0) {
2237 2241 /*
2238 2242 * An active spare device has ZPOOL_CONFIG_IS_SPARE set.
2239 2243 * For a spare vdev, we only want to boot from the active
2240 2244 * spare device.
2241 2245 */
2242 2246 if (is_spare) {
2243 2247 uint64_t spare = 0;
2244 2248 (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
2245 2249 &spare);
2246 2250 if (!spare)
2247 2251 return (EZFS_INVALCONFIG);
2248 2252 }
2249 2253
2250 2254 if (vdev_online(nv)) {
2251 2255 if ((ret = vdev_get_one_physpath(nv, physpath,
2252 2256 phypath_size, rsz)) != 0)
2253 2257 return (ret);
2254 2258 }
2255 2259 } else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
2256 2260 strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
2257 2261 (is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
2258 2262 nvlist_t **child;
2259 2263 uint_t count;
2260 2264 int i, ret;
2261 2265
2262 2266 if (nvlist_lookup_nvlist_array(nv,
2263 2267 ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
2264 2268 return (EZFS_INVALCONFIG);
2265 2269
2266 2270 for (i = 0; i < count; i++) {
2267 2271 ret = vdev_get_physpaths(child[i], physpath,
2268 2272 phypath_size, rsz, is_spare);
2269 2273 if (ret == EZFS_NOSPC)
2270 2274 return (ret);
2271 2275 }
2272 2276 }
2273 2277
2274 2278 return (EZFS_POOL_INVALARG);
2275 2279 }
2276 2280
2277 2281 /*
2278 2282 * Get phys_path for a root pool config.
2279 2283 * Return 0 on success; non-zero on failure.
2280 2284 */
2281 2285 static int
2282 2286 zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
2283 2287 {
2284 2288 size_t rsz;
2285 2289 nvlist_t *vdev_root;
2286 2290 nvlist_t **child;
2287 2291 uint_t count;
2288 2292 char *type;
2289 2293
2290 2294 rsz = 0;
2291 2295
2292 2296 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
2293 2297 &vdev_root) != 0)
2294 2298 return (EZFS_INVALCONFIG);
2295 2299
2296 2300 if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
2297 2301 nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
2298 2302 &child, &count) != 0)
2299 2303 return (EZFS_INVALCONFIG);
2300 2304
2301 2305 /*
2302 2306 * root pool can not have EFI labeled disks and can only have
2303 2307 * a single top-level vdev.
2304 2308 */
2305 2309 if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1 ||
2306 2310 pool_uses_efi(vdev_root))
2307 2311 return (EZFS_POOL_INVALARG);
2308 2312
2309 2313 (void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
2310 2314 B_FALSE);
2311 2315
2312 2316 /* No online devices */
2313 2317 if (rsz == 0)
2314 2318 return (EZFS_NODEVICE);
2315 2319
2316 2320 return (0);
2317 2321 }
2318 2322
2319 2323 /*
2320 2324 * Get phys_path for a root pool
2321 2325 * Return 0 on success; non-zero on failure.
2322 2326 */
2323 2327 int
2324 2328 zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
2325 2329 {
2326 2330 return (zpool_get_config_physpath(zhp->zpool_config, physpath,
2327 2331 phypath_size));
2328 2332 }
2329 2333
2330 2334 /*
2331 2335 * If the device has being dynamically expanded then we need to relabel
2332 2336 * the disk to use the new unallocated space.
2333 2337 */
2334 2338 static int
2335 2339 zpool_relabel_disk(libzfs_handle_t *hdl, const char *name)
2336 2340 {
2337 2341 char path[MAXPATHLEN];
2338 2342 char errbuf[1024];
2339 2343 int fd, error;
2340 2344 int (*_efi_use_whole_disk)(int);
2341 2345
2342 2346 if ((_efi_use_whole_disk = (int (*)(int))dlsym(RTLD_DEFAULT,
2343 2347 "efi_use_whole_disk")) == NULL)
2344 2348 return (-1);
2345 2349
2346 2350 (void) snprintf(path, sizeof (path), "%s/%s", RDISK_ROOT, name);
2347 2351
2348 2352 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
2349 2353 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2350 2354 "relabel '%s': unable to open device"), name);
2351 2355 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
2352 2356 }
2353 2357
2354 2358 /*
2355 2359 * It's possible that we might encounter an error if the device
2356 2360 * does not have any unallocated space left. If so, we simply
2357 2361 * ignore that error and continue on.
2358 2362 */
2359 2363 error = _efi_use_whole_disk(fd);
2360 2364 (void) close(fd);
2361 2365 if (error && error != VT_ENOSPC) {
2362 2366 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
2363 2367 "relabel '%s': unable to read disk capacity"), name);
2364 2368 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
2365 2369 }
2366 2370 return (0);
2367 2371 }
2368 2372
2369 2373 /*
2370 2374 * Bring the specified vdev online. The 'flags' parameter is a set of the
2371 2375 * ZFS_ONLINE_* flags.
2372 2376 */
2373 2377 int
2374 2378 zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
2375 2379 vdev_state_t *newstate)
2376 2380 {
2377 2381 zfs_cmd_t zc = { 0 };
2378 2382 char msg[1024];
2379 2383 nvlist_t *tgt;
2380 2384 boolean_t avail_spare, l2cache, islog;
2381 2385 libzfs_handle_t *hdl = zhp->zpool_hdl;
2382 2386
2383 2387 if (flags & ZFS_ONLINE_EXPAND) {
2384 2388 (void) snprintf(msg, sizeof (msg),
2385 2389 dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
2386 2390 } else {
2387 2391 (void) snprintf(msg, sizeof (msg),
2388 2392 dgettext(TEXT_DOMAIN, "cannot online %s"), path);
2389 2393 }
2390 2394
2391 2395 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2392 2396 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2393 2397 &islog)) == NULL)
2394 2398 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2395 2399
2396 2400 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2397 2401
2398 2402 if (avail_spare)
2399 2403 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2400 2404
2401 2405 if (flags & ZFS_ONLINE_EXPAND ||
2402 2406 zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) {
2403 2407 char *pathname = NULL;
2404 2408 uint64_t wholedisk = 0;
2405 2409
2406 2410 (void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
2407 2411 &wholedisk);
2408 2412 verify(nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH,
2409 2413 &pathname) == 0);
2410 2414
2411 2415 /*
2412 2416 * XXX - L2ARC 1.0 devices can't support expansion.
2413 2417 */
2414 2418 if (l2cache) {
2415 2419 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2416 2420 "cannot expand cache devices"));
2417 2421 return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
2418 2422 }
2419 2423
2420 2424 if (wholedisk) {
2421 2425 pathname += strlen(DISK_ROOT) + 1;
2422 2426 (void) zpool_relabel_disk(hdl, pathname);
2423 2427 }
2424 2428 }
2425 2429
2426 2430 zc.zc_cookie = VDEV_STATE_ONLINE;
2427 2431 zc.zc_obj = flags;
2428 2432
2429 2433 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
2430 2434 if (errno == EINVAL) {
2431 2435 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
2432 2436 "from this pool into a new one. Use '%s' "
2433 2437 "instead"), "zpool detach");
2434 2438 return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
2435 2439 }
2436 2440 return (zpool_standard_error(hdl, errno, msg));
2437 2441 }
2438 2442
2439 2443 *newstate = zc.zc_cookie;
2440 2444 return (0);
2441 2445 }
2442 2446
2443 2447 /*
2444 2448 * Take the specified vdev offline
2445 2449 */
2446 2450 int
2447 2451 zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
2448 2452 {
2449 2453 zfs_cmd_t zc = { 0 };
2450 2454 char msg[1024];
2451 2455 nvlist_t *tgt;
2452 2456 boolean_t avail_spare, l2cache;
2453 2457 libzfs_handle_t *hdl = zhp->zpool_hdl;
2454 2458
2455 2459 (void) snprintf(msg, sizeof (msg),
2456 2460 dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
2457 2461
2458 2462 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2459 2463 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2460 2464 NULL)) == NULL)
2461 2465 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2462 2466
2463 2467 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2464 2468
2465 2469 if (avail_spare)
2466 2470 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2467 2471
2468 2472 zc.zc_cookie = VDEV_STATE_OFFLINE;
2469 2473 zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
2470 2474
2471 2475 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2472 2476 return (0);
2473 2477
2474 2478 switch (errno) {
2475 2479 case EBUSY:
2476 2480
2477 2481 /*
2478 2482 * There are no other replicas of this device.
2479 2483 */
2480 2484 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2481 2485
2482 2486 case EEXIST:
2483 2487 /*
2484 2488 * The log device has unplayed logs
2485 2489 */
2486 2490 return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
2487 2491
2488 2492 default:
2489 2493 return (zpool_standard_error(hdl, errno, msg));
2490 2494 }
2491 2495 }
2492 2496
2493 2497 /*
2494 2498 * Mark the given vdev faulted.
2495 2499 */
2496 2500 int
2497 2501 zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2498 2502 {
2499 2503 zfs_cmd_t zc = { 0 };
2500 2504 char msg[1024];
2501 2505 libzfs_handle_t *hdl = zhp->zpool_hdl;
2502 2506
2503 2507 (void) snprintf(msg, sizeof (msg),
2504 2508 dgettext(TEXT_DOMAIN, "cannot fault %llu"), guid);
2505 2509
2506 2510 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2507 2511 zc.zc_guid = guid;
2508 2512 zc.zc_cookie = VDEV_STATE_FAULTED;
2509 2513 zc.zc_obj = aux;
2510 2514
2511 2515 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2512 2516 return (0);
2513 2517
2514 2518 switch (errno) {
2515 2519 case EBUSY:
2516 2520
2517 2521 /*
2518 2522 * There are no other replicas of this device.
2519 2523 */
2520 2524 return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
2521 2525
2522 2526 default:
2523 2527 return (zpool_standard_error(hdl, errno, msg));
2524 2528 }
2525 2529
2526 2530 }
2527 2531
2528 2532 /*
2529 2533 * Mark the given vdev degraded.
2530 2534 */
2531 2535 int
2532 2536 zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
2533 2537 {
2534 2538 zfs_cmd_t zc = { 0 };
2535 2539 char msg[1024];
2536 2540 libzfs_handle_t *hdl = zhp->zpool_hdl;
2537 2541
2538 2542 (void) snprintf(msg, sizeof (msg),
2539 2543 dgettext(TEXT_DOMAIN, "cannot degrade %llu"), guid);
2540 2544
2541 2545 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2542 2546 zc.zc_guid = guid;
2543 2547 zc.zc_cookie = VDEV_STATE_DEGRADED;
2544 2548 zc.zc_obj = aux;
2545 2549
2546 2550 if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
2547 2551 return (0);
2548 2552
2549 2553 return (zpool_standard_error(hdl, errno, msg));
2550 2554 }
2551 2555
2552 2556 /*
2553 2557 * Returns TRUE if the given nvlist is a vdev that was originally swapped in as
2554 2558 * a hot spare.
2555 2559 */
2556 2560 static boolean_t
2557 2561 is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
2558 2562 {
2559 2563 nvlist_t **child;
2560 2564 uint_t c, children;
2561 2565 char *type;
2562 2566
2563 2567 if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
2564 2568 &children) == 0) {
2565 2569 verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
2566 2570 &type) == 0);
2567 2571
2568 2572 if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
2569 2573 children == 2 && child[which] == tgt)
2570 2574 return (B_TRUE);
2571 2575
2572 2576 for (c = 0; c < children; c++)
2573 2577 if (is_replacing_spare(child[c], tgt, which))
2574 2578 return (B_TRUE);
2575 2579 }
2576 2580
2577 2581 return (B_FALSE);
2578 2582 }
2579 2583
2580 2584 /*
2581 2585 * Attach new_disk (fully described by nvroot) to old_disk.
2582 2586 * If 'replacing' is specified, the new disk will replace the old one.
2583 2587 */
2584 2588 int
2585 2589 zpool_vdev_attach(zpool_handle_t *zhp,
2586 2590 const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
2587 2591 {
2588 2592 zfs_cmd_t zc = { 0 };
2589 2593 char msg[1024];
2590 2594 int ret;
2591 2595 nvlist_t *tgt;
2592 2596 boolean_t avail_spare, l2cache, islog;
2593 2597 uint64_t val;
2594 2598 char *newname;
2595 2599 nvlist_t **child;
2596 2600 uint_t children;
2597 2601 nvlist_t *config_root;
2598 2602 libzfs_handle_t *hdl = zhp->zpool_hdl;
2599 2603 boolean_t rootpool = zpool_is_bootable(zhp);
2600 2604
2601 2605 if (replacing)
2602 2606 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2603 2607 "cannot replace %s with %s"), old_disk, new_disk);
2604 2608 else
2605 2609 (void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
2606 2610 "cannot attach %s to %s"), new_disk, old_disk);
2607 2611
2608 2612 /*
2609 2613 * If this is a root pool, make sure that we're not attaching an
2610 2614 * EFI labeled device.
2611 2615 */
2612 2616 if (rootpool && pool_uses_efi(nvroot)) {
2613 2617 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2614 2618 "EFI labeled devices are not supported on root pools."));
2615 2619 return (zfs_error(hdl, EZFS_POOL_NOTSUP, msg));
2616 2620 }
2617 2621
2618 2622 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2619 2623 if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
2620 2624 &islog)) == 0)
2621 2625 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2622 2626
2623 2627 if (avail_spare)
2624 2628 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2625 2629
2626 2630 if (l2cache)
2627 2631 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2628 2632
2629 2633 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2630 2634 zc.zc_cookie = replacing;
2631 2635
2632 2636 if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
2633 2637 &child, &children) != 0 || children != 1) {
2634 2638 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2635 2639 "new device must be a single disk"));
2636 2640 return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
2637 2641 }
2638 2642
2639 2643 verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
2640 2644 ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
2641 2645
2642 2646 if ((newname = zpool_vdev_name(NULL, NULL, child[0], B_FALSE)) == NULL)
2643 2647 return (-1);
2644 2648
2645 2649 /*
2646 2650 * If the target is a hot spare that has been swapped in, we can only
2647 2651 * replace it with another hot spare.
2648 2652 */
2649 2653 if (replacing &&
2650 2654 nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
2651 2655 (zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
2652 2656 NULL) == NULL || !avail_spare) &&
2653 2657 is_replacing_spare(config_root, tgt, 1)) {
2654 2658 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2655 2659 "can only be replaced by another hot spare"));
2656 2660 free(newname);
2657 2661 return (zfs_error(hdl, EZFS_BADTARGET, msg));
2658 2662 }
2659 2663
2660 2664 free(newname);
2661 2665
2662 2666 if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
2663 2667 return (-1);
2664 2668
2665 2669 ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
2666 2670
2667 2671 zcmd_free_nvlists(&zc);
2668 2672
2669 2673 if (ret == 0) {
2670 2674 if (rootpool) {
2671 2675 /*
2672 2676 * XXX need a better way to prevent user from
2673 2677 * booting up a half-baked vdev.
2674 2678 */
2675 2679 (void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
2676 2680 "sure to wait until resilver is done "
2677 2681 "before rebooting.\n"));
2678 2682 }
2679 2683 return (0);
2680 2684 }
2681 2685
2682 2686 switch (errno) {
2683 2687 case ENOTSUP:
2684 2688 /*
2685 2689 * Can't attach to or replace this type of vdev.
2686 2690 */
2687 2691 if (replacing) {
2688 2692 uint64_t version = zpool_get_prop_int(zhp,
2689 2693 ZPOOL_PROP_VERSION, NULL);
2690 2694
2691 2695 if (islog)
2692 2696 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2693 2697 "cannot replace a log with a spare"));
2694 2698 else if (version >= SPA_VERSION_MULTI_REPLACE)
2695 2699 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2696 2700 "already in replacing/spare config; wait "
2697 2701 "for completion or use 'zpool detach'"));
2698 2702 else
2699 2703 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2700 2704 "cannot replace a replacing device"));
2701 2705 } else {
2702 2706 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2703 2707 "can only attach to mirrors and top-level "
2704 2708 "disks"));
2705 2709 }
2706 2710 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2707 2711 break;
2708 2712
2709 2713 case EINVAL:
2710 2714 /*
2711 2715 * The new device must be a single disk.
2712 2716 */
2713 2717 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2714 2718 "new device must be a single disk"));
2715 2719 (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
2716 2720 break;
2717 2721
2718 2722 case EBUSY:
2719 2723 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
2720 2724 new_disk);
2721 2725 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2722 2726 break;
2723 2727
2724 2728 case EOVERFLOW:
2725 2729 /*
2726 2730 * The new device is too small.
2727 2731 */
2728 2732 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2729 2733 "device is too small"));
2730 2734 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2731 2735 break;
2732 2736
2733 2737 case EDOM:
2734 2738 /*
2735 2739 * The new device has a different alignment requirement.
2736 2740 */
2737 2741 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2738 2742 "devices have different sector alignment"));
2739 2743 (void) zfs_error(hdl, EZFS_BADDEV, msg);
2740 2744 break;
2741 2745
2742 2746 case ENAMETOOLONG:
2743 2747 /*
2744 2748 * The resulting top-level vdev spec won't fit in the label.
2745 2749 */
2746 2750 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
2747 2751 break;
2748 2752
2749 2753 default:
2750 2754 (void) zpool_standard_error(hdl, errno, msg);
2751 2755 }
2752 2756
2753 2757 return (-1);
2754 2758 }
2755 2759
2756 2760 /*
2757 2761 * Detach the specified device.
2758 2762 */
2759 2763 int
2760 2764 zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
2761 2765 {
2762 2766 zfs_cmd_t zc = { 0 };
2763 2767 char msg[1024];
2764 2768 nvlist_t *tgt;
2765 2769 boolean_t avail_spare, l2cache;
2766 2770 libzfs_handle_t *hdl = zhp->zpool_hdl;
2767 2771
2768 2772 (void) snprintf(msg, sizeof (msg),
2769 2773 dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
2770 2774
2771 2775 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
2772 2776 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
2773 2777 NULL)) == 0)
2774 2778 return (zfs_error(hdl, EZFS_NODEVICE, msg));
2775 2779
2776 2780 if (avail_spare)
2777 2781 return (zfs_error(hdl, EZFS_ISSPARE, msg));
2778 2782
2779 2783 if (l2cache)
2780 2784 return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
2781 2785
2782 2786 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
2783 2787
2784 2788 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
2785 2789 return (0);
2786 2790
2787 2791 switch (errno) {
2788 2792
2789 2793 case ENOTSUP:
2790 2794 /*
2791 2795 * Can't detach from this type of vdev.
2792 2796 */
2793 2797 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
2794 2798 "applicable to mirror and replacing vdevs"));
2795 2799 (void) zfs_error(hdl, EZFS_BADTARGET, msg);
2796 2800 break;
2797 2801
2798 2802 case EBUSY:
2799 2803 /*
2800 2804 * There are no other replicas of this device.
2801 2805 */
2802 2806 (void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
2803 2807 break;
2804 2808
2805 2809 default:
2806 2810 (void) zpool_standard_error(hdl, errno, msg);
2807 2811 }
2808 2812
2809 2813 return (-1);
2810 2814 }
2811 2815
2812 2816 /*
2813 2817 * Find a mirror vdev in the source nvlist.
2814 2818 *
2815 2819 * The mchild array contains a list of disks in one of the top-level mirrors
2816 2820 * of the source pool. The schild array contains a list of disks that the
2817 2821 * user specified on the command line. We loop over the mchild array to
2818 2822 * see if any entry in the schild array matches.
2819 2823 *
2820 2824 * If a disk in the mchild array is found in the schild array, we return
2821 2825 * the index of that entry. Otherwise we return -1.
2822 2826 */
2823 2827 static int
2824 2828 find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
2825 2829 nvlist_t **schild, uint_t schildren)
2826 2830 {
2827 2831 uint_t mc;
2828 2832
2829 2833 for (mc = 0; mc < mchildren; mc++) {
2830 2834 uint_t sc;
2831 2835 char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2832 2836 mchild[mc], B_FALSE);
2833 2837
2834 2838 for (sc = 0; sc < schildren; sc++) {
2835 2839 char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
2836 2840 schild[sc], B_FALSE);
2837 2841 boolean_t result = (strcmp(mpath, spath) == 0);
2838 2842
2839 2843 free(spath);
2840 2844 if (result) {
2841 2845 free(mpath);
2842 2846 return (mc);
2843 2847 }
2844 2848 }
2845 2849
2846 2850 free(mpath);
2847 2851 }
2848 2852
2849 2853 return (-1);
2850 2854 }
2851 2855
2852 2856 /*
2853 2857 * Split a mirror pool. If newroot points to null, then a new nvlist
2854 2858 * is generated and it is the responsibility of the caller to free it.
2855 2859 */
2856 2860 int
2857 2861 zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
2858 2862 nvlist_t *props, splitflags_t flags)
2859 2863 {
2860 2864 zfs_cmd_t zc = { 0 };
2861 2865 char msg[1024];
2862 2866 nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
2863 2867 nvlist_t **varray = NULL, *zc_props = NULL;
2864 2868 uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
2865 2869 libzfs_handle_t *hdl = zhp->zpool_hdl;
2866 2870 uint64_t vers;
2867 2871 boolean_t freelist = B_FALSE, memory_err = B_TRUE;
2868 2872 int retval = 0;
2869 2873
2870 2874 (void) snprintf(msg, sizeof (msg),
2871 2875 dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
2872 2876
2873 2877 if (!zpool_name_valid(hdl, B_FALSE, newname))
2874 2878 return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
2875 2879
2876 2880 if ((config = zpool_get_config(zhp, NULL)) == NULL) {
2877 2881 (void) fprintf(stderr, gettext("Internal error: unable to "
2878 2882 "retrieve pool configuration\n"));
2879 2883 return (-1);
2880 2884 }
2881 2885
2882 2886 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
2883 2887 == 0);
2884 2888 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
2885 2889
2886 2890 if (props) {
2887 2891 prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
2888 2892 if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
2889 2893 props, vers, flags, msg)) == NULL)
2890 2894 return (-1);
2891 2895 }
2892 2896
2893 2897 if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
2894 2898 &children) != 0) {
2895 2899 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2896 2900 "Source pool is missing vdev tree"));
2897 2901 if (zc_props)
2898 2902 nvlist_free(zc_props);
2899 2903 return (-1);
2900 2904 }
2901 2905
2902 2906 varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
2903 2907 vcount = 0;
2904 2908
2905 2909 if (*newroot == NULL ||
2906 2910 nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
2907 2911 &newchild, &newchildren) != 0)
2908 2912 newchildren = 0;
2909 2913
2910 2914 for (c = 0; c < children; c++) {
2911 2915 uint64_t is_log = B_FALSE, is_hole = B_FALSE;
2912 2916 char *type;
2913 2917 nvlist_t **mchild, *vdev;
2914 2918 uint_t mchildren;
2915 2919 int entry;
2916 2920
2917 2921 /*
2918 2922 * Unlike cache & spares, slogs are stored in the
2919 2923 * ZPOOL_CONFIG_CHILDREN array. We filter them out here.
2920 2924 */
2921 2925 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
2922 2926 &is_log);
2923 2927 (void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
2924 2928 &is_hole);
2925 2929 if (is_log || is_hole) {
2926 2930 /*
2927 2931 * Create a hole vdev and put it in the config.
2928 2932 */
2929 2933 if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
2930 2934 goto out;
2931 2935 if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
2932 2936 VDEV_TYPE_HOLE) != 0)
2933 2937 goto out;
2934 2938 if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
2935 2939 1) != 0)
2936 2940 goto out;
2937 2941 if (lastlog == 0)
2938 2942 lastlog = vcount;
2939 2943 varray[vcount++] = vdev;
2940 2944 continue;
2941 2945 }
2942 2946 lastlog = 0;
2943 2947 verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
2944 2948 == 0);
2945 2949 if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
2946 2950 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
2947 2951 "Source pool must be composed only of mirrors\n"));
2948 2952 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2949 2953 goto out;
2950 2954 }
2951 2955
2952 2956 verify(nvlist_lookup_nvlist_array(child[c],
2953 2957 ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
2954 2958
2955 2959 /* find or add an entry for this top-level vdev */
2956 2960 if (newchildren > 0 &&
2957 2961 (entry = find_vdev_entry(zhp, mchild, mchildren,
2958 2962 newchild, newchildren)) >= 0) {
2959 2963 /* We found a disk that the user specified. */
2960 2964 vdev = mchild[entry];
2961 2965 ++found;
2962 2966 } else {
2963 2967 /* User didn't specify a disk for this vdev. */
2964 2968 vdev = mchild[mchildren - 1];
2965 2969 }
2966 2970
2967 2971 if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
2968 2972 goto out;
2969 2973 }
2970 2974
2971 2975 /* did we find every disk the user specified? */
2972 2976 if (found != newchildren) {
2973 2977 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
2974 2978 "include at most one disk from each mirror"));
2975 2979 retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
2976 2980 goto out;
2977 2981 }
2978 2982
2979 2983 /* Prepare the nvlist for populating. */
2980 2984 if (*newroot == NULL) {
2981 2985 if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
2982 2986 goto out;
2983 2987 freelist = B_TRUE;
2984 2988 if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
2985 2989 VDEV_TYPE_ROOT) != 0)
2986 2990 goto out;
2987 2991 } else {
2988 2992 verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
2989 2993 }
2990 2994
2991 2995 /* Add all the children we found */
2992 2996 if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
2993 2997 lastlog == 0 ? vcount : lastlog) != 0)
2994 2998 goto out;
2995 2999
2996 3000 /*
2997 3001 * If we're just doing a dry run, exit now with success.
2998 3002 */
2999 3003 if (flags.dryrun) {
3000 3004 memory_err = B_FALSE;
3001 3005 freelist = B_FALSE;
3002 3006 goto out;
3003 3007 }
3004 3008
3005 3009 /* now build up the config list & call the ioctl */
3006 3010 if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
3007 3011 goto out;
3008 3012
3009 3013 if (nvlist_add_nvlist(newconfig,
3010 3014 ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
3011 3015 nvlist_add_string(newconfig,
3012 3016 ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
3013 3017 nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
3014 3018 goto out;
3015 3019
3016 3020 /*
3017 3021 * The new pool is automatically part of the namespace unless we
3018 3022 * explicitly export it.
3019 3023 */
3020 3024 if (!flags.import)
3021 3025 zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
3022 3026 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3023 3027 (void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
3024 3028 if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
3025 3029 goto out;
3026 3030 if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
3027 3031 goto out;
3028 3032
3029 3033 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
3030 3034 retval = zpool_standard_error(hdl, errno, msg);
3031 3035 goto out;
3032 3036 }
3033 3037
3034 3038 freelist = B_FALSE;
3035 3039 memory_err = B_FALSE;
3036 3040
3037 3041 out:
3038 3042 if (varray != NULL) {
3039 3043 int v;
3040 3044
3041 3045 for (v = 0; v < vcount; v++)
3042 3046 nvlist_free(varray[v]);
3043 3047 free(varray);
3044 3048 }
3045 3049 zcmd_free_nvlists(&zc);
3046 3050 if (zc_props)
3047 3051 nvlist_free(zc_props);
3048 3052 if (newconfig)
3049 3053 nvlist_free(newconfig);
3050 3054 if (freelist) {
3051 3055 nvlist_free(*newroot);
3052 3056 *newroot = NULL;
3053 3057 }
3054 3058
3055 3059 if (retval != 0)
3056 3060 return (retval);
3057 3061
3058 3062 if (memory_err)
3059 3063 return (no_memory(hdl));
3060 3064
3061 3065 return (0);
3062 3066 }
3063 3067
3064 3068 /*
3065 3069 * Remove the given device. Currently, this is supported only for hot spares
3066 3070 * and level 2 cache devices.
3067 3071 */
3068 3072 int
3069 3073 zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
3070 3074 {
3071 3075 zfs_cmd_t zc = { 0 };
3072 3076 char msg[1024];
3073 3077 nvlist_t *tgt;
3074 3078 boolean_t avail_spare, l2cache, islog;
3075 3079 libzfs_handle_t *hdl = zhp->zpool_hdl;
3076 3080 uint64_t version;
3077 3081
3078 3082 (void) snprintf(msg, sizeof (msg),
3079 3083 dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
3080 3084
3081 3085 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3082 3086 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
3083 3087 &islog)) == 0)
3084 3088 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3085 3089 /*
3086 3090 * XXX - this should just go away.
3087 3091 */
3088 3092 if (!avail_spare && !l2cache && !islog) {
3089 3093 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3090 3094 "only inactive hot spares, cache, top-level, "
3091 3095 "or log devices can be removed"));
3092 3096 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3093 3097 }
3094 3098
3095 3099 version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
3096 3100 if (islog && version < SPA_VERSION_HOLES) {
3097 3101 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3098 3102 "pool must be upgrade to support log removal"));
3099 3103 return (zfs_error(hdl, EZFS_BADVERSION, msg));
3100 3104 }
3101 3105
3102 3106 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
3103 3107
3104 3108 if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
3105 3109 return (0);
3106 3110
3107 3111 return (zpool_standard_error(hdl, errno, msg));
3108 3112 }
3109 3113
3110 3114 /*
3111 3115 * Clear the errors for the pool, or the particular device if specified.
3112 3116 */
3113 3117 int
3114 3118 zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
3115 3119 {
3116 3120 zfs_cmd_t zc = { 0 };
3117 3121 char msg[1024];
3118 3122 nvlist_t *tgt;
3119 3123 zpool_rewind_policy_t policy;
3120 3124 boolean_t avail_spare, l2cache;
3121 3125 libzfs_handle_t *hdl = zhp->zpool_hdl;
3122 3126 nvlist_t *nvi = NULL;
3123 3127 int error;
3124 3128
3125 3129 if (path)
3126 3130 (void) snprintf(msg, sizeof (msg),
3127 3131 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3128 3132 path);
3129 3133 else
3130 3134 (void) snprintf(msg, sizeof (msg),
3131 3135 dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
3132 3136 zhp->zpool_name);
3133 3137
3134 3138 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3135 3139 if (path) {
3136 3140 if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
3137 3141 &l2cache, NULL)) == 0)
3138 3142 return (zfs_error(hdl, EZFS_NODEVICE, msg));
3139 3143
3140 3144 /*
3141 3145 * Don't allow error clearing for hot spares. Do allow
3142 3146 * error clearing for l2cache devices.
3143 3147 */
3144 3148 if (avail_spare)
3145 3149 return (zfs_error(hdl, EZFS_ISSPARE, msg));
3146 3150
3147 3151 verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
3148 3152 &zc.zc_guid) == 0);
3149 3153 }
3150 3154
3151 3155 zpool_get_rewind_policy(rewindnvl, &policy);
3152 3156 zc.zc_cookie = policy.zrp_request;
3153 3157
3154 3158 if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
3155 3159 return (-1);
3156 3160
3157 3161 if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
3158 3162 return (-1);
3159 3163
3160 3164 while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
3161 3165 errno == ENOMEM) {
3162 3166 if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
3163 3167 zcmd_free_nvlists(&zc);
3164 3168 return (-1);
3165 3169 }
3166 3170 }
3167 3171
3168 3172 if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
3169 3173 errno != EPERM && errno != EACCES)) {
3170 3174 if (policy.zrp_request &
3171 3175 (ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
3172 3176 (void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
3173 3177 zpool_rewind_exclaim(hdl, zc.zc_name,
3174 3178 ((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
3175 3179 nvi);
3176 3180 nvlist_free(nvi);
3177 3181 }
3178 3182 zcmd_free_nvlists(&zc);
3179 3183 return (0);
3180 3184 }
3181 3185
3182 3186 zcmd_free_nvlists(&zc);
3183 3187 return (zpool_standard_error(hdl, errno, msg));
3184 3188 }
3185 3189
3186 3190 /*
3187 3191 * Similar to zpool_clear(), but takes a GUID (used by fmd).
3188 3192 */
3189 3193 int
3190 3194 zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
3191 3195 {
3192 3196 zfs_cmd_t zc = { 0 };
3193 3197 char msg[1024];
3194 3198 libzfs_handle_t *hdl = zhp->zpool_hdl;
3195 3199
3196 3200 (void) snprintf(msg, sizeof (msg),
3197 3201 dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
3198 3202 guid);
3199 3203
3200 3204 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3201 3205 zc.zc_guid = guid;
3202 3206 zc.zc_cookie = ZPOOL_NO_REWIND;
3203 3207
3204 3208 if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
3205 3209 return (0);
3206 3210
3207 3211 return (zpool_standard_error(hdl, errno, msg));
3208 3212 }
3209 3213
3210 3214 /*
3211 3215 * Change the GUID for a pool.
3212 3216 */
3213 3217 int
3214 3218 zpool_reguid(zpool_handle_t *zhp)
3215 3219 {
3216 3220 char msg[1024];
3217 3221 libzfs_handle_t *hdl = zhp->zpool_hdl;
3218 3222 zfs_cmd_t zc = { 0 };
3219 3223
3220 3224 (void) snprintf(msg, sizeof (msg),
3221 3225 dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
3222 3226
3223 3227 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3224 3228 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
3225 3229 return (0);
3226 3230
3227 3231 return (zpool_standard_error(hdl, errno, msg));
3228 3232 }
3229 3233
3230 3234 /*
3231 3235 * Reopen the pool.
3232 3236 */
3233 3237 int
3234 3238 zpool_reopen(zpool_handle_t *zhp)
3235 3239 {
3236 3240 zfs_cmd_t zc = { 0 };
3237 3241 char msg[1024];
3238 3242 libzfs_handle_t *hdl = zhp->zpool_hdl;
3239 3243
3240 3244 (void) snprintf(msg, sizeof (msg),
3241 3245 dgettext(TEXT_DOMAIN, "cannot reopen '%s'"),
3242 3246 zhp->zpool_name);
3243 3247
3244 3248 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3245 3249 if (zfs_ioctl(hdl, ZFS_IOC_POOL_REOPEN, &zc) == 0)
3246 3250 return (0);
3247 3251 return (zpool_standard_error(hdl, errno, msg));
3248 3252 }
3249 3253
3250 3254 /*
3251 3255 * Convert from a devid string to a path.
3252 3256 */
3253 3257 static char *
3254 3258 devid_to_path(char *devid_str)
3255 3259 {
3256 3260 ddi_devid_t devid;
3257 3261 char *minor;
3258 3262 char *path;
3259 3263 devid_nmlist_t *list = NULL;
3260 3264 int ret;
3261 3265
3262 3266 if (devid_str_decode(devid_str, &devid, &minor) != 0)
3263 3267 return (NULL);
3264 3268
3265 3269 ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
3266 3270
3267 3271 devid_str_free(minor);
3268 3272 devid_free(devid);
3269 3273
3270 3274 if (ret != 0)
3271 3275 return (NULL);
3272 3276
3273 3277 if ((path = strdup(list[0].devname)) == NULL)
3274 3278 return (NULL);
3275 3279
3276 3280 devid_free_nmlist(list);
3277 3281
3278 3282 return (path);
3279 3283 }
3280 3284
3281 3285 /*
3282 3286 * Convert from a path to a devid string.
3283 3287 */
3284 3288 static char *
3285 3289 path_to_devid(const char *path)
3286 3290 {
3287 3291 int fd;
3288 3292 ddi_devid_t devid;
3289 3293 char *minor, *ret;
3290 3294
3291 3295 if ((fd = open(path, O_RDONLY)) < 0)
3292 3296 return (NULL);
3293 3297
3294 3298 minor = NULL;
3295 3299 ret = NULL;
3296 3300 if (devid_get(fd, &devid) == 0) {
3297 3301 if (devid_get_minor_name(fd, &minor) == 0)
3298 3302 ret = devid_str_encode(devid, minor);
3299 3303 if (minor != NULL)
3300 3304 devid_str_free(minor);
3301 3305 devid_free(devid);
3302 3306 }
3303 3307 (void) close(fd);
3304 3308
3305 3309 return (ret);
3306 3310 }
3307 3311
3308 3312 /*
3309 3313 * Issue the necessary ioctl() to update the stored path value for the vdev. We
3310 3314 * ignore any failure here, since a common case is for an unprivileged user to
3311 3315 * type 'zpool status', and we'll display the correct information anyway.
3312 3316 */
3313 3317 static void
3314 3318 set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
3315 3319 {
3316 3320 zfs_cmd_t zc = { 0 };
3317 3321
3318 3322 (void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3319 3323 (void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
3320 3324 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3321 3325 &zc.zc_guid) == 0);
3322 3326
3323 3327 (void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
3324 3328 }
3325 3329
3326 3330 /*
3327 3331 * Given a vdev, return the name to display in iostat. If the vdev has a path,
3328 3332 * we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
3329 3333 * We also check if this is a whole disk, in which case we strip off the
3330 3334 * trailing 's0' slice name.
3331 3335 *
3332 3336 * This routine is also responsible for identifying when disks have been
3333 3337 * reconfigured in a new location. The kernel will have opened the device by
3334 3338 * devid, but the path will still refer to the old location. To catch this, we
3335 3339 * first do a path -> devid translation (which is fast for the common case). If
3336 3340 * the devid matches, we're done. If not, we do a reverse devid -> path
3337 3341 * translation and issue the appropriate ioctl() to update the path of the vdev.
3338 3342 * If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
3339 3343 * of these checks.
3340 3344 */
3341 3345 char *
3342 3346 zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
3343 3347 boolean_t verbose)
3344 3348 {
3345 3349 char *path, *devid;
3346 3350 uint64_t value;
3347 3351 char buf[64];
3348 3352 vdev_stat_t *vs;
3349 3353 uint_t vsc;
3350 3354
3351 3355 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
3352 3356 &value) == 0) {
3353 3357 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
3354 3358 &value) == 0);
3355 3359 (void) snprintf(buf, sizeof (buf), "%llu",
3356 3360 (u_longlong_t)value);
3357 3361 path = buf;
3358 3362 } else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
3359 3363
3360 3364 /*
3361 3365 * If the device is dead (faulted, offline, etc) then don't
3362 3366 * bother opening it. Otherwise we may be forcing the user to
3363 3367 * open a misbehaving device, which can have undesirable
3364 3368 * effects.
3365 3369 */
3366 3370 if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
3367 3371 (uint64_t **)&vs, &vsc) != 0 ||
3368 3372 vs->vs_state >= VDEV_STATE_DEGRADED) &&
3369 3373 zhp != NULL &&
3370 3374 nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
3371 3375 /*
3372 3376 * Determine if the current path is correct.
3373 3377 */
3374 3378 char *newdevid = path_to_devid(path);
3375 3379
3376 3380 if (newdevid == NULL ||
3377 3381 strcmp(devid, newdevid) != 0) {
3378 3382 char *newpath;
3379 3383
3380 3384 if ((newpath = devid_to_path(devid)) != NULL) {
3381 3385 /*
3382 3386 * Update the path appropriately.
3383 3387 */
3384 3388 set_path(zhp, nv, newpath);
3385 3389 if (nvlist_add_string(nv,
3386 3390 ZPOOL_CONFIG_PATH, newpath) == 0)
3387 3391 verify(nvlist_lookup_string(nv,
3388 3392 ZPOOL_CONFIG_PATH,
3389 3393 &path) == 0);
3390 3394 free(newpath);
3391 3395 }
3392 3396 }
3393 3397
3394 3398 if (newdevid)
3395 3399 devid_str_free(newdevid);
3396 3400 }
3397 3401
3398 3402 if (strncmp(path, "/dev/dsk/", 9) == 0)
3399 3403 path += 9;
3400 3404
3401 3405 if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
3402 3406 &value) == 0 && value) {
3403 3407 int pathlen = strlen(path);
3404 3408 char *tmp = zfs_strdup(hdl, path);
3405 3409
3406 3410 /*
3407 3411 * If it starts with c#, and ends with "s0", chop
3408 3412 * the "s0" off, or if it ends with "s0/old", remove
3409 3413 * the "s0" from the middle.
3410 3414 */
3411 3415 if (CTD_CHECK(tmp)) {
3412 3416 if (strcmp(&tmp[pathlen - 2], "s0") == 0) {
3413 3417 tmp[pathlen - 2] = '\0';
3414 3418 } else if (pathlen > 6 &&
3415 3419 strcmp(&tmp[pathlen - 6], "s0/old") == 0) {
3416 3420 (void) strcpy(&tmp[pathlen - 6],
3417 3421 "/old");
3418 3422 }
3419 3423 }
3420 3424 return (tmp);
3421 3425 }
3422 3426 } else {
3423 3427 verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &path) == 0);
3424 3428
3425 3429 /*
3426 3430 * If it's a raidz device, we need to stick in the parity level.
3427 3431 */
3428 3432 if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
3429 3433 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
3430 3434 &value) == 0);
3431 3435 (void) snprintf(buf, sizeof (buf), "%s%llu", path,
3432 3436 (u_longlong_t)value);
3433 3437 path = buf;
3434 3438 }
3435 3439
3436 3440 /*
3437 3441 * We identify each top-level vdev by using a <type-id>
3438 3442 * naming convention.
3439 3443 */
3440 3444 if (verbose) {
3441 3445 uint64_t id;
3442 3446
3443 3447 verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
3444 3448 &id) == 0);
3445 3449 (void) snprintf(buf, sizeof (buf), "%s-%llu", path,
3446 3450 (u_longlong_t)id);
3447 3451 path = buf;
3448 3452 }
3449 3453 }
3450 3454
3451 3455 return (zfs_strdup(hdl, path));
3452 3456 }
3453 3457
3454 3458 static int
3455 3459 zbookmark_compare(const void *a, const void *b)
3456 3460 {
3457 3461 return (memcmp(a, b, sizeof (zbookmark_t)));
3458 3462 }
3459 3463
3460 3464 /*
3461 3465 * Retrieve the persistent error log, uniquify the members, and return to the
3462 3466 * caller.
3463 3467 */
3464 3468 int
3465 3469 zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
3466 3470 {
3467 3471 zfs_cmd_t zc = { 0 };
3468 3472 uint64_t count;
3469 3473 zbookmark_t *zb = NULL;
3470 3474 int i;
3471 3475
3472 3476 /*
3473 3477 * Retrieve the raw error list from the kernel. If the number of errors
3474 3478 * has increased, allocate more space and continue until we get the
3475 3479 * entire list.
3476 3480 */
3477 3481 verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
3478 3482 &count) == 0);
3479 3483 if (count == 0)
3480 3484 return (0);
3481 3485 if ((zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
3482 3486 count * sizeof (zbookmark_t))) == (uintptr_t)NULL)
3483 3487 return (-1);
3484 3488 zc.zc_nvlist_dst_size = count;
3485 3489 (void) strcpy(zc.zc_name, zhp->zpool_name);
3486 3490 for (;;) {
3487 3491 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
3488 3492 &zc) != 0) {
3489 3493 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3490 3494 if (errno == ENOMEM) {
3491 3495 count = zc.zc_nvlist_dst_size;
3492 3496 if ((zc.zc_nvlist_dst = (uintptr_t)
3493 3497 zfs_alloc(zhp->zpool_hdl, count *
3494 3498 sizeof (zbookmark_t))) == (uintptr_t)NULL)
3495 3499 return (-1);
3496 3500 } else {
3497 3501 return (-1);
3498 3502 }
3499 3503 } else {
3500 3504 break;
3501 3505 }
3502 3506 }
3503 3507
3504 3508 /*
3505 3509 * Sort the resulting bookmarks. This is a little confusing due to the
3506 3510 * implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
3507 3511 * to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
3508 3512 * _not_ copied as part of the process. So we point the start of our
3509 3513 * array appropriate and decrement the total number of elements.
3510 3514 */
3511 3515 zb = ((zbookmark_t *)(uintptr_t)zc.zc_nvlist_dst) +
3512 3516 zc.zc_nvlist_dst_size;
3513 3517 count -= zc.zc_nvlist_dst_size;
3514 3518
3515 3519 qsort(zb, count, sizeof (zbookmark_t), zbookmark_compare);
3516 3520
3517 3521 verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
3518 3522
3519 3523 /*
3520 3524 * Fill in the nverrlistp with nvlist's of dataset and object numbers.
3521 3525 */
3522 3526 for (i = 0; i < count; i++) {
3523 3527 nvlist_t *nv;
3524 3528
3525 3529 /* ignoring zb_blkid and zb_level for now */
3526 3530 if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
3527 3531 zb[i-1].zb_object == zb[i].zb_object)
3528 3532 continue;
3529 3533
3530 3534 if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
3531 3535 goto nomem;
3532 3536 if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
3533 3537 zb[i].zb_objset) != 0) {
3534 3538 nvlist_free(nv);
3535 3539 goto nomem;
3536 3540 }
3537 3541 if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
3538 3542 zb[i].zb_object) != 0) {
3539 3543 nvlist_free(nv);
3540 3544 goto nomem;
3541 3545 }
3542 3546 if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
3543 3547 nvlist_free(nv);
3544 3548 goto nomem;
3545 3549 }
3546 3550 nvlist_free(nv);
3547 3551 }
3548 3552
3549 3553 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3550 3554 return (0);
3551 3555
3552 3556 nomem:
3553 3557 free((void *)(uintptr_t)zc.zc_nvlist_dst);
3554 3558 return (no_memory(zhp->zpool_hdl));
3555 3559 }
3556 3560
3557 3561 /*
3558 3562 * Upgrade a ZFS pool to the latest on-disk version.
3559 3563 */
3560 3564 int
3561 3565 zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
3562 3566 {
3563 3567 zfs_cmd_t zc = { 0 };
3564 3568 libzfs_handle_t *hdl = zhp->zpool_hdl;
3565 3569
3566 3570 (void) strcpy(zc.zc_name, zhp->zpool_name);
↓ open down ↓ |
2141 lines elided |
↑ open up ↑ |
3567 3571 zc.zc_cookie = new_version;
3568 3572
3569 3573 if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
3570 3574 return (zpool_standard_error_fmt(hdl, errno,
3571 3575 dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
3572 3576 zhp->zpool_name));
3573 3577 return (0);
3574 3578 }
3575 3579
3576 3580 void
3577 -zpool_set_history_str(const char *subcommand, int argc, char **argv,
3578 - char *history_str)
3581 +zfs_save_arguments(int argc, char **argv, char *string, int len)
3579 3582 {
3580 - int i;
3581 -
3582 - (void) strlcpy(history_str, subcommand, HIS_MAX_RECORD_LEN);
3583 - for (i = 1; i < argc; i++) {
3584 - if (strlen(history_str) + 1 + strlen(argv[i]) >
3585 - HIS_MAX_RECORD_LEN)
3586 - break;
3587 - (void) strlcat(history_str, " ", HIS_MAX_RECORD_LEN);
3588 - (void) strlcat(history_str, argv[i], HIS_MAX_RECORD_LEN);
3583 + (void) strlcpy(string, basename(argv[0]), len);
3584 + for (int i = 1; i < argc; i++) {
3585 + (void) strlcat(string, " ", len);
3586 + (void) strlcat(string, argv[i], len);
3589 3587 }
3590 3588 }
3591 3589
3592 -/*
3593 - * Stage command history for logging.
3594 - */
3595 3590 int
3596 -zpool_stage_history(libzfs_handle_t *hdl, const char *history_str)
3591 +zpool_log_history(libzfs_handle_t *hdl, const char *message)
3597 3592 {
3598 - if (history_str == NULL)
3599 - return (EINVAL);
3600 -
3601 - if (strlen(history_str) > HIS_MAX_RECORD_LEN)
3602 - return (EINVAL);
3603 -
3604 - if (hdl->libzfs_log_str != NULL)
3605 - free(hdl->libzfs_log_str);
3606 -
3607 - if ((hdl->libzfs_log_str = strdup(history_str)) == NULL)
3608 - return (no_memory(hdl));
3593 + zfs_cmd_t zc = { 0 };
3594 + nvlist_t *args;
3595 + int err;
3609 3596
3610 - return (0);
3597 + args = fnvlist_alloc();
3598 + fnvlist_add_string(args, "message", message);
3599 + err = zcmd_write_src_nvlist(hdl, &zc, args);
3600 + if (err == 0)
3601 + err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
3602 + nvlist_free(args);
3603 + zcmd_free_nvlists(&zc);
3604 + return (err);
3611 3605 }
3612 3606
3613 3607 /*
3614 3608 * Perform ioctl to get some command history of a pool.
3615 3609 *
3616 3610 * 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
3617 3611 * logical offset of the history buffer to start reading from.
3618 3612 *
3619 3613 * Upon return, 'off' is the next logical offset to read from and
3620 3614 * 'len' is the actual amount of bytes read into 'buf'.
3621 3615 */
3622 3616 static int
3623 3617 get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
3624 3618 {
3625 3619 zfs_cmd_t zc = { 0 };
3626 3620 libzfs_handle_t *hdl = zhp->zpool_hdl;
3627 3621
3628 3622 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3629 3623
3630 3624 zc.zc_history = (uint64_t)(uintptr_t)buf;
3631 3625 zc.zc_history_len = *len;
3632 3626 zc.zc_history_offset = *off;
3633 3627
3634 3628 if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
3635 3629 switch (errno) {
3636 3630 case EPERM:
3637 3631 return (zfs_error_fmt(hdl, EZFS_PERM,
3638 3632 dgettext(TEXT_DOMAIN,
3639 3633 "cannot show history for pool '%s'"),
3640 3634 zhp->zpool_name));
3641 3635 case ENOENT:
3642 3636 return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
3643 3637 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3644 3638 "'%s'"), zhp->zpool_name));
3645 3639 case ENOTSUP:
3646 3640 return (zfs_error_fmt(hdl, EZFS_BADVERSION,
3647 3641 dgettext(TEXT_DOMAIN, "cannot get history for pool "
3648 3642 "'%s', pool must be upgraded"), zhp->zpool_name));
3649 3643 default:
3650 3644 return (zpool_standard_error_fmt(hdl, errno,
3651 3645 dgettext(TEXT_DOMAIN,
3652 3646 "cannot get history for '%s'"), zhp->zpool_name));
3653 3647 }
3654 3648 }
3655 3649
3656 3650 *len = zc.zc_history_len;
3657 3651 *off = zc.zc_history_offset;
3658 3652
3659 3653 return (0);
3660 3654 }
3661 3655
3662 3656 /*
3663 3657 * Process the buffer of nvlists, unpacking and storing each nvlist record
3664 3658 * into 'records'. 'leftover' is set to the number of bytes that weren't
3665 3659 * processed as there wasn't a complete record.
3666 3660 */
3667 3661 int
3668 3662 zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
3669 3663 nvlist_t ***records, uint_t *numrecords)
3670 3664 {
3671 3665 uint64_t reclen;
3672 3666 nvlist_t *nv;
3673 3667 int i;
3674 3668
3675 3669 while (bytes_read > sizeof (reclen)) {
3676 3670
3677 3671 /* get length of packed record (stored as little endian) */
3678 3672 for (i = 0, reclen = 0; i < sizeof (reclen); i++)
3679 3673 reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
3680 3674
3681 3675 if (bytes_read < sizeof (reclen) + reclen)
3682 3676 break;
3683 3677
3684 3678 /* unpack record */
3685 3679 if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
3686 3680 return (ENOMEM);
3687 3681 bytes_read -= sizeof (reclen) + reclen;
3688 3682 buf += sizeof (reclen) + reclen;
3689 3683
3690 3684 /* add record to nvlist array */
3691 3685 (*numrecords)++;
3692 3686 if (ISP2(*numrecords + 1)) {
3693 3687 *records = realloc(*records,
3694 3688 *numrecords * 2 * sizeof (nvlist_t *));
3695 3689 }
3696 3690 (*records)[*numrecords - 1] = nv;
3697 3691 }
3698 3692
3699 3693 *leftover = bytes_read;
3700 3694 return (0);
3701 3695 }
3702 3696
3703 3697 #define HIS_BUF_LEN (128*1024)
3704 3698
3705 3699 /*
3706 3700 * Retrieve the command history of a pool.
3707 3701 */
3708 3702 int
3709 3703 zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
3710 3704 {
3711 3705 char buf[HIS_BUF_LEN];
3712 3706 uint64_t off = 0;
3713 3707 nvlist_t **records = NULL;
3714 3708 uint_t numrecords = 0;
3715 3709 int err, i;
3716 3710
3717 3711 do {
3718 3712 uint64_t bytes_read = sizeof (buf);
3719 3713 uint64_t leftover;
3720 3714
3721 3715 if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
3722 3716 break;
3723 3717
3724 3718 /* if nothing else was read in, we're at EOF, just return */
3725 3719 if (!bytes_read)
3726 3720 break;
3727 3721
3728 3722 if ((err = zpool_history_unpack(buf, bytes_read,
3729 3723 &leftover, &records, &numrecords)) != 0)
3730 3724 break;
3731 3725 off -= leftover;
3732 3726
3733 3727 /* CONSTCOND */
3734 3728 } while (1);
3735 3729
3736 3730 if (!err) {
3737 3731 verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
3738 3732 verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
3739 3733 records, numrecords) == 0);
3740 3734 }
3741 3735 for (i = 0; i < numrecords; i++)
3742 3736 nvlist_free(records[i]);
3743 3737 free(records);
3744 3738
3745 3739 return (err);
3746 3740 }
3747 3741
3748 3742 void
3749 3743 zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
3750 3744 char *pathname, size_t len)
3751 3745 {
3752 3746 zfs_cmd_t zc = { 0 };
3753 3747 boolean_t mounted = B_FALSE;
3754 3748 char *mntpnt = NULL;
3755 3749 char dsname[MAXNAMELEN];
3756 3750
3757 3751 if (dsobj == 0) {
3758 3752 /* special case for the MOS */
3759 3753 (void) snprintf(pathname, len, "<metadata>:<0x%llx>", obj);
3760 3754 return;
3761 3755 }
3762 3756
3763 3757 /* get the dataset's name */
3764 3758 (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
3765 3759 zc.zc_obj = dsobj;
3766 3760 if (ioctl(zhp->zpool_hdl->libzfs_fd,
3767 3761 ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
3768 3762 /* just write out a path of two object numbers */
3769 3763 (void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
3770 3764 dsobj, obj);
3771 3765 return;
3772 3766 }
3773 3767 (void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
3774 3768
3775 3769 /* find out if the dataset is mounted */
3776 3770 mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
3777 3771
3778 3772 /* get the corrupted object's path */
3779 3773 (void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
3780 3774 zc.zc_obj = obj;
3781 3775 if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
3782 3776 &zc) == 0) {
3783 3777 if (mounted) {
3784 3778 (void) snprintf(pathname, len, "%s%s", mntpnt,
3785 3779 zc.zc_value);
3786 3780 } else {
3787 3781 (void) snprintf(pathname, len, "%s:%s",
3788 3782 dsname, zc.zc_value);
3789 3783 }
3790 3784 } else {
3791 3785 (void) snprintf(pathname, len, "%s:<0x%llx>", dsname, obj);
3792 3786 }
3793 3787 free(mntpnt);
3794 3788 }
3795 3789
3796 3790 /*
3797 3791 * Read the EFI label from the config, if a label does not exist then
3798 3792 * pass back the error to the caller. If the caller has passed a non-NULL
3799 3793 * diskaddr argument then we set it to the starting address of the EFI
3800 3794 * partition.
3801 3795 */
3802 3796 static int
3803 3797 read_efi_label(nvlist_t *config, diskaddr_t *sb)
3804 3798 {
3805 3799 char *path;
3806 3800 int fd;
3807 3801 char diskname[MAXPATHLEN];
3808 3802 int err = -1;
3809 3803
3810 3804 if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
3811 3805 return (err);
3812 3806
3813 3807 (void) snprintf(diskname, sizeof (diskname), "%s%s", RDISK_ROOT,
3814 3808 strrchr(path, '/'));
3815 3809 if ((fd = open(diskname, O_RDONLY|O_NDELAY)) >= 0) {
3816 3810 struct dk_gpt *vtoc;
3817 3811
3818 3812 if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
3819 3813 if (sb != NULL)
3820 3814 *sb = vtoc->efi_parts[0].p_start;
3821 3815 efi_free(vtoc);
3822 3816 }
3823 3817 (void) close(fd);
3824 3818 }
3825 3819 return (err);
3826 3820 }
3827 3821
3828 3822 /*
3829 3823 * determine where a partition starts on a disk in the current
3830 3824 * configuration
3831 3825 */
3832 3826 static diskaddr_t
3833 3827 find_start_block(nvlist_t *config)
3834 3828 {
3835 3829 nvlist_t **child;
3836 3830 uint_t c, children;
3837 3831 diskaddr_t sb = MAXOFFSET_T;
3838 3832 uint64_t wholedisk;
3839 3833
3840 3834 if (nvlist_lookup_nvlist_array(config,
3841 3835 ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
3842 3836 if (nvlist_lookup_uint64(config,
3843 3837 ZPOOL_CONFIG_WHOLE_DISK,
3844 3838 &wholedisk) != 0 || !wholedisk) {
3845 3839 return (MAXOFFSET_T);
3846 3840 }
3847 3841 if (read_efi_label(config, &sb) < 0)
3848 3842 sb = MAXOFFSET_T;
3849 3843 return (sb);
3850 3844 }
3851 3845
3852 3846 for (c = 0; c < children; c++) {
3853 3847 sb = find_start_block(child[c]);
3854 3848 if (sb != MAXOFFSET_T) {
3855 3849 return (sb);
3856 3850 }
3857 3851 }
3858 3852 return (MAXOFFSET_T);
3859 3853 }
3860 3854
3861 3855 /*
3862 3856 * Label an individual disk. The name provided is the short name,
3863 3857 * stripped of any leading /dev path.
3864 3858 */
3865 3859 int
3866 3860 zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
3867 3861 {
3868 3862 char path[MAXPATHLEN];
3869 3863 struct dk_gpt *vtoc;
3870 3864 int fd;
3871 3865 size_t resv = EFI_MIN_RESV_SIZE;
3872 3866 uint64_t slice_size;
3873 3867 diskaddr_t start_block;
3874 3868 char errbuf[1024];
3875 3869
3876 3870 /* prepare an error message just in case */
3877 3871 (void) snprintf(errbuf, sizeof (errbuf),
3878 3872 dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
3879 3873
3880 3874 if (zhp) {
3881 3875 nvlist_t *nvroot;
3882 3876
3883 3877 if (zpool_is_bootable(zhp)) {
3884 3878 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3885 3879 "EFI labeled devices are not supported on root "
3886 3880 "pools."));
3887 3881 return (zfs_error(hdl, EZFS_POOL_NOTSUP, errbuf));
3888 3882 }
3889 3883
3890 3884 verify(nvlist_lookup_nvlist(zhp->zpool_config,
3891 3885 ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
3892 3886
3893 3887 if (zhp->zpool_start_block == 0)
3894 3888 start_block = find_start_block(nvroot);
3895 3889 else
3896 3890 start_block = zhp->zpool_start_block;
3897 3891 zhp->zpool_start_block = start_block;
3898 3892 } else {
3899 3893 /* new pool */
3900 3894 start_block = NEW_START_BLOCK;
3901 3895 }
3902 3896
3903 3897 (void) snprintf(path, sizeof (path), "%s/%s%s", RDISK_ROOT, name,
3904 3898 BACKUP_SLICE);
3905 3899
3906 3900 if ((fd = open(path, O_RDWR | O_NDELAY)) < 0) {
3907 3901 /*
3908 3902 * This shouldn't happen. We've long since verified that this
3909 3903 * is a valid device.
3910 3904 */
3911 3905 zfs_error_aux(hdl,
3912 3906 dgettext(TEXT_DOMAIN, "unable to open device"));
3913 3907 return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
3914 3908 }
3915 3909
3916 3910 if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
3917 3911 /*
3918 3912 * The only way this can fail is if we run out of memory, or we
3919 3913 * were unable to read the disk's capacity
3920 3914 */
3921 3915 if (errno == ENOMEM)
3922 3916 (void) no_memory(hdl);
3923 3917
3924 3918 (void) close(fd);
3925 3919 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3926 3920 "unable to read disk capacity"), name);
3927 3921
3928 3922 return (zfs_error(hdl, EZFS_NOCAP, errbuf));
3929 3923 }
3930 3924
3931 3925 slice_size = vtoc->efi_last_u_lba + 1;
3932 3926 slice_size -= EFI_MIN_RESV_SIZE;
3933 3927 if (start_block == MAXOFFSET_T)
3934 3928 start_block = NEW_START_BLOCK;
3935 3929 slice_size -= start_block;
3936 3930
3937 3931 vtoc->efi_parts[0].p_start = start_block;
3938 3932 vtoc->efi_parts[0].p_size = slice_size;
3939 3933
3940 3934 /*
3941 3935 * Why we use V_USR: V_BACKUP confuses users, and is considered
3942 3936 * disposable by some EFI utilities (since EFI doesn't have a backup
3943 3937 * slice). V_UNASSIGNED is supposed to be used only for zero size
3944 3938 * partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
3945 3939 * etc. were all pretty specific. V_USR is as close to reality as we
3946 3940 * can get, in the absence of V_OTHER.
3947 3941 */
3948 3942 vtoc->efi_parts[0].p_tag = V_USR;
3949 3943 (void) strcpy(vtoc->efi_parts[0].p_name, "zfs");
3950 3944
3951 3945 vtoc->efi_parts[8].p_start = slice_size + start_block;
3952 3946 vtoc->efi_parts[8].p_size = resv;
3953 3947 vtoc->efi_parts[8].p_tag = V_RESERVED;
3954 3948
3955 3949 if (efi_write(fd, vtoc) != 0) {
3956 3950 /*
3957 3951 * Some block drivers (like pcata) may not support EFI
3958 3952 * GPT labels. Print out a helpful error message dir-
3959 3953 * ecting the user to manually label the disk and give
3960 3954 * a specific slice.
3961 3955 */
3962 3956 (void) close(fd);
3963 3957 efi_free(vtoc);
3964 3958
3965 3959 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3966 3960 "try using fdisk(1M) and then provide a specific slice"));
3967 3961 return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
3968 3962 }
3969 3963
3970 3964 (void) close(fd);
3971 3965 efi_free(vtoc);
3972 3966 return (0);
3973 3967 }
3974 3968
3975 3969 static boolean_t
3976 3970 supported_dump_vdev_type(libzfs_handle_t *hdl, nvlist_t *config, char *errbuf)
3977 3971 {
3978 3972 char *type;
3979 3973 nvlist_t **child;
3980 3974 uint_t children, c;
3981 3975
3982 3976 verify(nvlist_lookup_string(config, ZPOOL_CONFIG_TYPE, &type) == 0);
3983 3977 if (strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
3984 3978 strcmp(type, VDEV_TYPE_FILE) == 0 ||
3985 3979 strcmp(type, VDEV_TYPE_LOG) == 0 ||
3986 3980 strcmp(type, VDEV_TYPE_HOLE) == 0 ||
3987 3981 strcmp(type, VDEV_TYPE_MISSING) == 0) {
3988 3982 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
3989 3983 "vdev type '%s' is not supported"), type);
3990 3984 (void) zfs_error(hdl, EZFS_VDEVNOTSUP, errbuf);
3991 3985 return (B_FALSE);
3992 3986 }
3993 3987 if (nvlist_lookup_nvlist_array(config, ZPOOL_CONFIG_CHILDREN,
3994 3988 &child, &children) == 0) {
3995 3989 for (c = 0; c < children; c++) {
3996 3990 if (!supported_dump_vdev_type(hdl, child[c], errbuf))
3997 3991 return (B_FALSE);
3998 3992 }
3999 3993 }
4000 3994 return (B_TRUE);
4001 3995 }
4002 3996
4003 3997 /*
4004 3998 * check if this zvol is allowable for use as a dump device; zero if
4005 3999 * it is, > 0 if it isn't, < 0 if it isn't a zvol
4006 4000 */
4007 4001 int
4008 4002 zvol_check_dump_config(char *arg)
4009 4003 {
4010 4004 zpool_handle_t *zhp = NULL;
4011 4005 nvlist_t *config, *nvroot;
4012 4006 char *p, *volname;
4013 4007 nvlist_t **top;
4014 4008 uint_t toplevels;
4015 4009 libzfs_handle_t *hdl;
4016 4010 char errbuf[1024];
4017 4011 char poolname[ZPOOL_MAXNAMELEN];
4018 4012 int pathlen = strlen(ZVOL_FULL_DEV_DIR);
4019 4013 int ret = 1;
4020 4014
4021 4015 if (strncmp(arg, ZVOL_FULL_DEV_DIR, pathlen)) {
4022 4016 return (-1);
4023 4017 }
4024 4018
4025 4019 (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
4026 4020 "dump is not supported on device '%s'"), arg);
4027 4021
4028 4022 if ((hdl = libzfs_init()) == NULL)
4029 4023 return (1);
4030 4024 libzfs_print_on_error(hdl, B_TRUE);
4031 4025
4032 4026 volname = arg + pathlen;
4033 4027
4034 4028 /* check the configuration of the pool */
4035 4029 if ((p = strchr(volname, '/')) == NULL) {
4036 4030 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4037 4031 "malformed dataset name"));
4038 4032 (void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
4039 4033 return (1);
4040 4034 } else if (p - volname >= ZFS_MAXNAMELEN) {
4041 4035 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4042 4036 "dataset name is too long"));
4043 4037 (void) zfs_error(hdl, EZFS_NAMETOOLONG, errbuf);
4044 4038 return (1);
4045 4039 } else {
4046 4040 (void) strncpy(poolname, volname, p - volname);
4047 4041 poolname[p - volname] = '\0';
4048 4042 }
4049 4043
4050 4044 if ((zhp = zpool_open(hdl, poolname)) == NULL) {
4051 4045 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4052 4046 "could not open pool '%s'"), poolname);
4053 4047 (void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
4054 4048 goto out;
4055 4049 }
4056 4050 config = zpool_get_config(zhp, NULL);
4057 4051 if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
4058 4052 &nvroot) != 0) {
4059 4053 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4060 4054 "could not obtain vdev configuration for '%s'"), poolname);
4061 4055 (void) zfs_error(hdl, EZFS_INVALCONFIG, errbuf);
4062 4056 goto out;
4063 4057 }
4064 4058
4065 4059 verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
4066 4060 &top, &toplevels) == 0);
4067 4061 if (toplevels != 1) {
4068 4062 zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
4069 4063 "'%s' has multiple top level vdevs"), poolname);
4070 4064 (void) zfs_error(hdl, EZFS_DEVOVERFLOW, errbuf);
4071 4065 goto out;
4072 4066 }
4073 4067
4074 4068 if (!supported_dump_vdev_type(hdl, top[0], errbuf)) {
4075 4069 goto out;
4076 4070 }
4077 4071 ret = 0;
4078 4072
4079 4073 out:
4080 4074 if (zhp)
4081 4075 zpool_close(zhp);
4082 4076 libzfs_fini(hdl);
4083 4077 return (ret);
4084 4078 }
↓ open down ↓ |
464 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX