1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2012 by Delphix. All rights reserved. 24 * Copyright (c) 2012, Joyent, Inc. All rights reserved. 25 * Copyright 2012 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 #include <sys/zfs_context.h> 29 #include <sys/zfs_zone.h> 30 #include <sys/spa_impl.h> 31 #include <sys/refcount.h> 32 #include <sys/vdev_disk.h> 33 #include <sys/vdev_impl.h> 34 #include <sys/fs/zfs.h> 35 #include <sys/zio.h> 36 #include <sys/sunldi.h> 37 #include <sys/efi_partition.h> 38 #include <sys/fm/fs/zfs.h> 39 40 /* 41 * Virtual device vector for disks. 42 */ 43 44 extern ldi_ident_t zfs_li; 45 46 static void vdev_disk_close(vdev_t *); 47 48 typedef struct vdev_disk_buf { 49 buf_t vdb_buf; 50 zio_t *vdb_io; 51 } vdev_disk_buf_t; 52 53 typedef struct vdev_disk_ldi_cb { 54 list_node_t lcb_next; 55 ldi_callback_id_t lcb_id; 56 } vdev_disk_ldi_cb_t; 57 58 static void vdev_disk_alloc(vdev_t *vd) 59 { 60 vdev_disk_t *dvd; 61 62 dvd = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP); 63 /* 64 * Create the LDI event callback list. 65 */ 66 list_create(&dvd->vd_ldi_cbs, sizeof (vdev_disk_ldi_cb_t), 67 offsetof(vdev_disk_ldi_cb_t, lcb_next)); 68 } 69 70 static void vdev_disk_free(vdev_t *vd) 71 { 72 vdev_disk_t *dvd = vd->vdev_tsd; 73 vdev_disk_ldi_cb_t *lcb; 74 75 /* 76 * We have already closed the LDI handle. Clean up the LDI event 77 * callbacks and free vd->vdev_tsd. 78 */ 79 while ((lcb = list_head(&dvd->vd_ldi_cbs)) != NULL) { 80 list_remove(&dvd->vd_ldi_cbs, lcb); 81 (void) ldi_ev_remove_callbacks(lcb->lcb_id); 82 kmem_free(lcb, sizeof (vdev_disk_ldi_cb_t)); 83 } 84 list_destroy(&dvd->vd_ldi_cbs); 85 kmem_free(dvd, sizeof (vdev_disk_t)); 86 vd->vdev_tsd = NULL; 87 } 88 89 /* ARGSUSED */ 90 static int 91 vdev_disk_off_notify(ldi_handle_t lh, ldi_ev_cookie_t ecookie, void *arg, 92 void *ev_data) 93 { 94 vdev_t *vd = (vdev_t *)arg; 95 vdev_disk_t *dvd = vd->vdev_tsd; 96 97 /* 98 * Ignore events other than offline. 99 */ 100 if (strcmp(ldi_ev_get_type(ecookie), LDI_EV_OFFLINE) != 0) 101 return (LDI_EV_SUCCESS); 102 103 /* 104 * All LDI handles must be closed for the state change to succeed, so 105 * call on vdev_disk_close() to do this. 106 * 107 * We inform vdev_disk_close that it is being called from offline 108 * notify context so it will defer cleanup of LDI event callbacks and 109 * freeing of vd->vdev_tsd to the offline finalize or a reopen. 110 */ 111 dvd->vd_ldi_offline = B_TRUE; 112 vdev_disk_close(vd); 113 114 /* 115 * Now that the device is closed, request that the spa_async_thread 116 * mark the device as REMOVED and notify FMA of the removal. 117 */ 118 zfs_post_remove(vd->vdev_spa, vd); 119 vd->vdev_remove_wanted = B_TRUE; 120 spa_async_request(vd->vdev_spa, SPA_ASYNC_REMOVE); 121 122 return (LDI_EV_SUCCESS); 123 } 124 125 /* ARGSUSED */ 126 static void 127 vdev_disk_off_finalize(ldi_handle_t lh, ldi_ev_cookie_t ecookie, 128 int ldi_result, void *arg, void *ev_data) 129 { 130 vdev_t *vd = (vdev_t *)arg; 131 vdev_disk_t *dvd = vd->vdev_tsd; 132 vdev_disk_ldi_cb_t *lcb; 133 134 /* 135 * Ignore events other than offline. 136 */ 137 if (strcmp(ldi_ev_get_type(ecookie), LDI_EV_OFFLINE) != 0) 138 return; 139 140 /* 141 * We have already closed the LDI handle in notify. 142 * Clean up the LDI event callbacks and free vd->vdev_tsd. 143 */ 144 vdev_disk_free(vd); 145 146 /* 147 * Request that the vdev be reopened if the offline state change was 148 * unsuccessful. 149 */ 150 if (ldi_result != LDI_EV_SUCCESS) { 151 vd->vdev_probe_wanted = B_TRUE; 152 spa_async_request(vd->vdev_spa, SPA_ASYNC_PROBE); 153 } 154 } 155 156 static ldi_ev_callback_t vdev_disk_off_callb = { 157 .cb_vers = LDI_EV_CB_VERS, 158 .cb_notify = vdev_disk_off_notify, 159 .cb_finalize = vdev_disk_off_finalize 160 }; 161 162 /* ARGSUSED */ 163 static void 164 vdev_disk_dgrd_finalize(ldi_handle_t lh, ldi_ev_cookie_t ecookie, 165 int ldi_result, void *arg, void *ev_data) 166 { 167 vdev_t *vd = (vdev_t *)arg; 168 169 /* 170 * Ignore events other than degrade. 171 */ 172 if (strcmp(ldi_ev_get_type(ecookie), LDI_EV_DEGRADE) != 0) 173 return; 174 175 /* 176 * Degrade events always succeed. Mark the vdev as degraded. 177 * This status is purely informative for the user. 178 */ 179 (void) vdev_degrade(vd->vdev_spa, vd->vdev_guid, 0); 180 } 181 182 static ldi_ev_callback_t vdev_disk_dgrd_callb = { 183 .cb_vers = LDI_EV_CB_VERS, 184 .cb_notify = NULL, 185 .cb_finalize = vdev_disk_dgrd_finalize 186 }; 187 188 static void 189 vdev_disk_hold(vdev_t *vd) 190 { 191 ddi_devid_t devid; 192 char *minor; 193 194 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 195 196 /* 197 * We must have a pathname, and it must be absolute. 198 */ 199 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') 200 return; 201 202 /* 203 * Only prefetch path and devid info if the device has 204 * never been opened. 205 */ 206 if (vd->vdev_tsd != NULL) 207 return; 208 209 if (vd->vdev_wholedisk == -1ULL) { 210 size_t len = strlen(vd->vdev_path) + 3; 211 char *buf = kmem_alloc(len, KM_SLEEP); 212 213 (void) snprintf(buf, len, "%ss0", vd->vdev_path); 214 215 (void) ldi_vp_from_name(buf, &vd->vdev_name_vp); 216 kmem_free(buf, len); 217 } 218 219 if (vd->vdev_name_vp == NULL) 220 (void) ldi_vp_from_name(vd->vdev_path, &vd->vdev_name_vp); 221 222 if (vd->vdev_devid != NULL && 223 ddi_devid_str_decode(vd->vdev_devid, &devid, &minor) == 0) { 224 (void) ldi_vp_from_devid(devid, minor, &vd->vdev_devid_vp); 225 ddi_devid_str_free(minor); 226 ddi_devid_free(devid); 227 } 228 } 229 230 static void 231 vdev_disk_rele(vdev_t *vd) 232 { 233 ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER)); 234 235 if (vd->vdev_name_vp) { 236 VN_RELE_ASYNC(vd->vdev_name_vp, 237 dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool)); 238 vd->vdev_name_vp = NULL; 239 } 240 if (vd->vdev_devid_vp) { 241 VN_RELE_ASYNC(vd->vdev_devid_vp, 242 dsl_pool_vnrele_taskq(vd->vdev_spa->spa_dsl_pool)); 243 vd->vdev_devid_vp = NULL; 244 } 245 } 246 247 static uint64_t 248 vdev_disk_get_space(vdev_t *vd, uint64_t capacity, uint_t blksz) 249 { 250 ASSERT(vd->vdev_wholedisk); 251 252 vdev_disk_t *dvd = vd->vdev_tsd; 253 dk_efi_t dk_ioc; 254 efi_gpt_t *efi; 255 uint64_t avail_space = 0; 256 int efisize = EFI_LABEL_SIZE * 2; 257 258 dk_ioc.dki_data = kmem_alloc(efisize, KM_SLEEP); 259 dk_ioc.dki_lba = 1; 260 dk_ioc.dki_length = efisize; 261 dk_ioc.dki_data_64 = (uint64_t)(uintptr_t)dk_ioc.dki_data; 262 efi = dk_ioc.dki_data; 263 264 if (ldi_ioctl(dvd->vd_lh, DKIOCGETEFI, (intptr_t)&dk_ioc, 265 FKIOCTL, kcred, NULL) == 0) { 266 uint64_t efi_altern_lba = LE_64(efi->efi_gpt_AlternateLBA); 267 268 zfs_dbgmsg("vdev %s, capacity %llu, altern lba %llu", 269 vd->vdev_path, capacity, efi_altern_lba); 270 if (capacity > efi_altern_lba) 271 avail_space = (capacity - efi_altern_lba) * blksz; 272 } 273 kmem_free(dk_ioc.dki_data, efisize); 274 return (avail_space); 275 } 276 277 static int 278 vdev_disk_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize, 279 uint64_t *ashift) 280 { 281 spa_t *spa = vd->vdev_spa; 282 vdev_disk_t *dvd = vd->vdev_tsd; 283 struct dk_minfo_ext dkmext; 284 ldi_ev_cookie_t ecookie; 285 vdev_disk_ldi_cb_t *lcb; 286 int error; 287 dev_t dev; 288 int otyp; 289 290 /* 291 * We must have a pathname, and it must be absolute. 292 */ 293 if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') { 294 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 295 return (EINVAL); 296 } 297 298 /* 299 * Reopen the device if it's not currently open. Otherwise, 300 * just update the physical size of the device. 301 */ 302 if (dvd != NULL) { 303 if (dvd->vd_ldi_offline && dvd->vd_lh == NULL) { 304 /* 305 * If we are opening a device in its offline notify 306 * context, the LDI handle was just closed. Clean 307 * up the LDI event callbacks and free vd->vdev_tsd. 308 */ 309 vdev_disk_free(vd); 310 } else { 311 VERIFY(vd->vdev_reopening); 312 goto skip_open; 313 } 314 } 315 316 /* 317 * Create vd->vdev_tsd. 318 */ 319 vdev_disk_alloc(vd); 320 dvd = vd->vdev_tsd; 321 322 /* 323 * When opening a disk device, we want to preserve the user's original 324 * intent. We always want to open the device by the path the user gave 325 * us, even if it is one of multiple paths to the save device. But we 326 * also want to be able to survive disks being removed/recabled. 327 * Therefore the sequence of opening devices is: 328 * 329 * 1. Try opening the device by path. For legacy pools without the 330 * 'whole_disk' property, attempt to fix the path by appending 's0'. 331 * 332 * 2. If the devid of the device matches the stored value, return 333 * success. 334 * 335 * 3. Otherwise, the device may have moved. Try opening the device 336 * by the devid instead. 337 */ 338 if (vd->vdev_devid != NULL) { 339 if (ddi_devid_str_decode(vd->vdev_devid, &dvd->vd_devid, 340 &dvd->vd_minor) != 0) { 341 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL; 342 return (EINVAL); 343 } 344 } 345 346 error = EINVAL; /* presume failure */ 347 348 if (vd->vdev_path != NULL) { 349 ddi_devid_t devid; 350 351 if (vd->vdev_wholedisk == -1ULL) { 352 size_t len = strlen(vd->vdev_path) + 3; 353 char *buf = kmem_alloc(len, KM_SLEEP); 354 355 (void) snprintf(buf, len, "%ss0", vd->vdev_path); 356 357 error = ldi_open_by_name(buf, spa_mode(spa), kcred, 358 &dvd->vd_lh, zfs_li); 359 if (error == 0) { 360 spa_strfree(vd->vdev_path); 361 vd->vdev_path = buf; 362 vd->vdev_wholedisk = 1ULL; 363 } else { 364 kmem_free(buf, len); 365 } 366 } 367 368 /* 369 * If we have not yet opened the device, try to open it by the 370 * specified path. 371 */ 372 if (error != 0) { 373 error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), 374 kcred, &dvd->vd_lh, zfs_li); 375 } 376 377 /* 378 * Compare the devid to the stored value. 379 */ 380 if (error == 0 && vd->vdev_devid != NULL && 381 ldi_get_devid(dvd->vd_lh, &devid) == 0) { 382 if (ddi_devid_compare(devid, dvd->vd_devid) != 0) { 383 error = EINVAL; 384 (void) ldi_close(dvd->vd_lh, spa_mode(spa), 385 kcred); 386 dvd->vd_lh = NULL; 387 } 388 ddi_devid_free(devid); 389 } 390 391 /* 392 * If we succeeded in opening the device, but 'vdev_wholedisk' 393 * is not yet set, then this must be a slice. 394 */ 395 if (error == 0 && vd->vdev_wholedisk == -1ULL) 396 vd->vdev_wholedisk = 0; 397 } 398 399 /* 400 * If we were unable to open by path, or the devid check fails, open by 401 * devid instead. 402 */ 403 if (error != 0 && vd->vdev_devid != NULL) 404 error = ldi_open_by_devid(dvd->vd_devid, dvd->vd_minor, 405 spa_mode(spa), kcred, &dvd->vd_lh, zfs_li); 406 407 /* 408 * If all else fails, then try opening by physical path (if available) 409 * or the logical path (if we failed due to the devid check). While not 410 * as reliable as the devid, this will give us something, and the higher 411 * level vdev validation will prevent us from opening the wrong device. 412 */ 413 if (error) { 414 if (vd->vdev_physpath != NULL && 415 (dev = ddi_pathname_to_dev_t(vd->vdev_physpath)) != NODEV) 416 error = ldi_open_by_dev(&dev, OTYP_BLK, spa_mode(spa), 417 kcred, &dvd->vd_lh, zfs_li); 418 419 /* 420 * Note that we don't support the legacy auto-wholedisk support 421 * as above. This hasn't been used in a very long time and we 422 * don't need to propagate its oddities to this edge condition. 423 */ 424 if (error && vd->vdev_path != NULL) 425 error = ldi_open_by_name(vd->vdev_path, spa_mode(spa), 426 kcred, &dvd->vd_lh, zfs_li); 427 } 428 429 if (error) { 430 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 431 return (error); 432 } 433 434 /* 435 * Once a device is opened, verify that the physical device path (if 436 * available) is up to date. 437 */ 438 if (ldi_get_dev(dvd->vd_lh, &dev) == 0 && 439 ldi_get_otyp(dvd->vd_lh, &otyp) == 0) { 440 char *physpath, *minorname; 441 442 physpath = kmem_alloc(MAXPATHLEN, KM_SLEEP); 443 minorname = NULL; 444 if (ddi_dev_pathname(dev, otyp, physpath) == 0 && 445 ldi_get_minor_name(dvd->vd_lh, &minorname) == 0 && 446 (vd->vdev_physpath == NULL || 447 strcmp(vd->vdev_physpath, physpath) != 0)) { 448 if (vd->vdev_physpath) 449 spa_strfree(vd->vdev_physpath); 450 (void) strlcat(physpath, ":", MAXPATHLEN); 451 (void) strlcat(physpath, minorname, MAXPATHLEN); 452 vd->vdev_physpath = spa_strdup(physpath); 453 } 454 if (minorname) 455 kmem_free(minorname, strlen(minorname) + 1); 456 kmem_free(physpath, MAXPATHLEN); 457 } 458 459 /* 460 * Register callbacks for the LDI offline event. 461 */ 462 if (ldi_ev_get_cookie(dvd->vd_lh, LDI_EV_OFFLINE, &ecookie) == 463 LDI_EV_SUCCESS) { 464 lcb = kmem_zalloc(sizeof (vdev_disk_ldi_cb_t), KM_SLEEP); 465 list_insert_tail(&dvd->vd_ldi_cbs, lcb); 466 (void) ldi_ev_register_callbacks(dvd->vd_lh, ecookie, 467 &vdev_disk_off_callb, (void *) vd, &lcb->lcb_id); 468 } 469 470 /* 471 * Register callbacks for the LDI degrade event. 472 */ 473 if (ldi_ev_get_cookie(dvd->vd_lh, LDI_EV_DEGRADE, &ecookie) == 474 LDI_EV_SUCCESS) { 475 lcb = kmem_zalloc(sizeof (vdev_disk_ldi_cb_t), KM_SLEEP); 476 list_insert_tail(&dvd->vd_ldi_cbs, lcb); 477 (void) ldi_ev_register_callbacks(dvd->vd_lh, ecookie, 478 &vdev_disk_dgrd_callb, (void *) vd, &lcb->lcb_id); 479 } 480 skip_open: 481 /* 482 * Determine the actual size of the device. 483 */ 484 if (ldi_get_size(dvd->vd_lh, psize) != 0) { 485 vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED; 486 return (EINVAL); 487 } 488 489 /* 490 * Determine the device's minimum transfer size. 491 * If the ioctl isn't supported, assume DEV_BSIZE. 492 */ 493 if (ldi_ioctl(dvd->vd_lh, DKIOCGMEDIAINFOEXT, (intptr_t)&dkmext, 494 FKIOCTL, kcred, NULL) != 0) 495 dkmext.dki_pbsize = DEV_BSIZE; 496 497 *ashift = highbit(MAX(dkmext.dki_pbsize, SPA_MINBLOCKSIZE)) - 1; 498 499 if (vd->vdev_wholedisk == 1) { 500 uint64_t capacity = dkmext.dki_capacity - 1; 501 uint64_t blksz = dkmext.dki_lbsize; 502 int wce = 1; 503 504 /* 505 * If we own the whole disk, try to enable disk write caching. 506 * We ignore errors because it's OK if we can't do it. 507 */ 508 (void) ldi_ioctl(dvd->vd_lh, DKIOCSETWCE, (intptr_t)&wce, 509 FKIOCTL, kcred, NULL); 510 511 *max_psize = *psize + vdev_disk_get_space(vd, capacity, blksz); 512 zfs_dbgmsg("capacity change: vdev %s, psize %llu, " 513 "max_psize %llu", vd->vdev_path, *psize, *max_psize); 514 } else { 515 *max_psize = *psize; 516 } 517 518 /* 519 * Clear the nowritecache bit, so that on a vdev_reopen() we will 520 * try again. 521 */ 522 vd->vdev_nowritecache = B_FALSE; 523 524 return (0); 525 } 526 527 static void 528 vdev_disk_close(vdev_t *vd) 529 { 530 vdev_disk_t *dvd = vd->vdev_tsd; 531 vdev_disk_ldi_cb_t *lcb; 532 533 if (vd->vdev_reopening || dvd == NULL) 534 return; 535 536 if (dvd->vd_minor != NULL) { 537 ddi_devid_str_free(dvd->vd_minor); 538 dvd->vd_minor = NULL; 539 } 540 541 if (dvd->vd_devid != NULL) { 542 ddi_devid_free(dvd->vd_devid); 543 dvd->vd_devid = NULL; 544 } 545 546 if (dvd->vd_lh != NULL) { 547 (void) ldi_close(dvd->vd_lh, spa_mode(vd->vdev_spa), kcred); 548 dvd->vd_lh = NULL; 549 } 550 551 vd->vdev_delayed_close = B_FALSE; 552 /* 553 * If we closed the LDI handle due to an offline notify from LDI, 554 * don't free vd->vdev_tsd or unregister the callbacks here; 555 * the offline finalize callback or a reopen will take care of it. 556 */ 557 if (dvd->vd_ldi_offline) 558 return; 559 560 vdev_disk_free(vd); 561 } 562 563 int 564 vdev_disk_physio(vdev_t *vd, caddr_t data, 565 size_t size, uint64_t offset, int flags) 566 { 567 vdev_disk_t *dvd = vd->vdev_tsd; 568 569 /* 570 * If the vdev is closed, it's likely in the REMOVED or FAULTED state. 571 * Nothing to be done here but return failure. 572 */ 573 if (dvd == NULL || (dvd->vd_ldi_offline && dvd->vd_lh == NULL)) 574 return (EIO); 575 576 ASSERT(vd->vdev_ops == &vdev_disk_ops); 577 return (vdev_disk_ldi_physio(dvd->vd_lh, data, size, offset, flags)); 578 } 579 580 int 581 vdev_disk_ldi_physio(ldi_handle_t vd_lh, caddr_t data, 582 size_t size, uint64_t offset, int flags) 583 { 584 buf_t *bp; 585 int error = 0; 586 587 if (vd_lh == NULL) 588 return (EINVAL); 589 590 ASSERT(flags & B_READ || flags & B_WRITE); 591 592 bp = getrbuf(KM_SLEEP); 593 bp->b_flags = flags | B_BUSY | B_NOCACHE | B_FAILFAST; 594 bp->b_bcount = size; 595 bp->b_un.b_addr = (void *)data; 596 bp->b_lblkno = lbtodb(offset); 597 bp->b_bufsize = size; 598 599 error = ldi_strategy(vd_lh, bp); 600 ASSERT(error == 0); 601 if ((error = biowait(bp)) == 0 && bp->b_resid != 0) 602 error = EIO; 603 freerbuf(bp); 604 605 return (error); 606 } 607 608 static void 609 vdev_disk_io_intr(buf_t *bp) 610 { 611 vdev_disk_buf_t *vdb = (vdev_disk_buf_t *)bp; 612 zio_t *zio = vdb->vdb_io; 613 614 /* 615 * The rest of the zio stack only deals with EIO, ECKSUM, and ENXIO. 616 * Rather than teach the rest of the stack about other error 617 * possibilities (EFAULT, etc), we normalize the error value here. 618 */ 619 zio->io_error = (geterror(bp) != 0 ? EIO : 0); 620 621 if (zio->io_error == 0 && bp->b_resid != 0) 622 zio->io_error = EIO; 623 624 kmem_free(vdb, sizeof (vdev_disk_buf_t)); 625 626 zio_interrupt(zio); 627 } 628 629 static void 630 vdev_disk_ioctl_free(zio_t *zio) 631 { 632 kmem_free(zio->io_vsd, sizeof (struct dk_callback)); 633 } 634 635 static const zio_vsd_ops_t vdev_disk_vsd_ops = { 636 vdev_disk_ioctl_free, 637 zio_vsd_default_cksum_report 638 }; 639 640 static void 641 vdev_disk_ioctl_done(void *zio_arg, int error) 642 { 643 zio_t *zio = zio_arg; 644 645 zio->io_error = error; 646 647 zio_interrupt(zio); 648 } 649 650 static int 651 vdev_disk_io_start(zio_t *zio) 652 { 653 vdev_t *vd = zio->io_vd; 654 vdev_disk_t *dvd = vd->vdev_tsd; 655 vdev_disk_buf_t *vdb; 656 struct dk_callback *dkc; 657 buf_t *bp; 658 int error; 659 660 /* 661 * If the vdev is closed, it's likely in the REMOVED or FAULTED state. 662 * Nothing to be done here but return failure. 663 */ 664 if (dvd == NULL || (dvd->vd_ldi_offline && dvd->vd_lh == NULL)) { 665 zio->io_error = ENXIO; 666 return (ZIO_PIPELINE_CONTINUE); 667 } 668 669 if (zio->io_type == ZIO_TYPE_IOCTL) { 670 /* XXPOLICY */ 671 if (!vdev_readable(vd)) { 672 zio->io_error = ENXIO; 673 return (ZIO_PIPELINE_CONTINUE); 674 } 675 676 switch (zio->io_cmd) { 677 678 case DKIOCFLUSHWRITECACHE: 679 680 if (zfs_nocacheflush) 681 break; 682 683 if (vd->vdev_nowritecache) { 684 zio->io_error = ENOTSUP; 685 break; 686 } 687 688 zio->io_vsd = dkc = kmem_alloc(sizeof (*dkc), KM_SLEEP); 689 zio->io_vsd_ops = &vdev_disk_vsd_ops; 690 691 dkc->dkc_callback = vdev_disk_ioctl_done; 692 dkc->dkc_flag = FLUSH_VOLATILE; 693 dkc->dkc_cookie = zio; 694 695 error = ldi_ioctl(dvd->vd_lh, zio->io_cmd, 696 (uintptr_t)dkc, FKIOCTL, kcred, NULL); 697 698 if (error == 0) { 699 /* 700 * The ioctl will be done asychronously, 701 * and will call vdev_disk_ioctl_done() 702 * upon completion. 703 */ 704 return (ZIO_PIPELINE_STOP); 705 } 706 707 if (error == ENOTSUP || error == ENOTTY) { 708 /* 709 * If we get ENOTSUP or ENOTTY, we know that 710 * no future attempts will ever succeed. 711 * In this case we set a persistent bit so 712 * that we don't bother with the ioctl in the 713 * future. 714 */ 715 vd->vdev_nowritecache = B_TRUE; 716 } 717 zio->io_error = error; 718 719 break; 720 721 default: 722 zio->io_error = ENOTSUP; 723 } 724 725 return (ZIO_PIPELINE_CONTINUE); 726 } 727 728 vdb = kmem_alloc(sizeof (vdev_disk_buf_t), KM_SLEEP); 729 730 vdb->vdb_io = zio; 731 bp = &vdb->vdb_buf; 732 733 bioinit(bp); 734 bp->b_flags = B_BUSY | B_NOCACHE | 735 (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE); 736 if (!(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD))) 737 bp->b_flags |= B_FAILFAST; 738 bp->b_bcount = zio->io_size; 739 bp->b_un.b_addr = zio->io_data; 740 bp->b_lblkno = lbtodb(zio->io_offset); 741 bp->b_bufsize = zio->io_size; 742 bp->b_iodone = (int (*)())vdev_disk_io_intr; 743 744 zfs_zone_zio_start(zio); 745 746 /* ldi_strategy() will return non-zero only on programming errors */ 747 VERIFY(ldi_strategy(dvd->vd_lh, bp) == 0); 748 749 return (ZIO_PIPELINE_STOP); 750 } 751 752 static void 753 vdev_disk_io_done(zio_t *zio) 754 { 755 vdev_t *vd = zio->io_vd; 756 757 zfs_zone_zio_done(zio); 758 759 /* 760 * If the device returned EIO, then attempt a DKIOCSTATE ioctl to see if 761 * the device has been removed. If this is the case, then we trigger an 762 * asynchronous removal of the device. Otherwise, probe the device and 763 * make sure it's still accessible. 764 */ 765 if (zio->io_error == EIO && !vd->vdev_remove_wanted) { 766 vdev_disk_t *dvd = vd->vdev_tsd; 767 int state = DKIO_NONE; 768 769 if (ldi_ioctl(dvd->vd_lh, DKIOCSTATE, (intptr_t)&state, 770 FKIOCTL, kcred, NULL) == 0 && state != DKIO_INSERTED) { 771 /* 772 * We post the resource as soon as possible, instead of 773 * when the async removal actually happens, because the 774 * DE is using this information to discard previous I/O 775 * errors. 776 */ 777 zfs_post_remove(zio->io_spa, vd); 778 vd->vdev_remove_wanted = B_TRUE; 779 spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE); 780 } else if (!vd->vdev_delayed_close) { 781 vd->vdev_delayed_close = B_TRUE; 782 } 783 } 784 } 785 786 vdev_ops_t vdev_disk_ops = { 787 vdev_disk_open, 788 vdev_disk_close, 789 vdev_default_asize, 790 vdev_disk_io_start, 791 vdev_disk_io_done, 792 NULL, 793 vdev_disk_hold, 794 vdev_disk_rele, 795 VDEV_TYPE_DISK, /* name of this vdev type */ 796 B_TRUE /* leaf vdev */ 797 }; 798 799 /* 800 * Given the root disk device devid or pathname, read the label from 801 * the device, and construct a configuration nvlist. 802 */ 803 int 804 vdev_disk_read_rootlabel(char *devpath, char *devid, nvlist_t **config) 805 { 806 ldi_handle_t vd_lh; 807 vdev_label_t *label; 808 uint64_t s, size; 809 int l; 810 ddi_devid_t tmpdevid; 811 int error = -1; 812 char *minor_name; 813 814 /* 815 * Read the device label and build the nvlist. 816 */ 817 if (devid != NULL && ddi_devid_str_decode(devid, &tmpdevid, 818 &minor_name) == 0) { 819 error = ldi_open_by_devid(tmpdevid, minor_name, 820 FREAD, kcred, &vd_lh, zfs_li); 821 ddi_devid_free(tmpdevid); 822 ddi_devid_str_free(minor_name); 823 } 824 825 if (error && (error = ldi_open_by_name(devpath, FREAD, kcred, &vd_lh, 826 zfs_li))) 827 return (error); 828 829 if (ldi_get_size(vd_lh, &s)) { 830 (void) ldi_close(vd_lh, FREAD, kcred); 831 return (EIO); 832 } 833 834 size = P2ALIGN_TYPED(s, sizeof (vdev_label_t), uint64_t); 835 label = kmem_alloc(sizeof (vdev_label_t), KM_SLEEP); 836 837 *config = NULL; 838 for (l = 0; l < VDEV_LABELS; l++) { 839 uint64_t offset, state, txg = 0; 840 841 /* read vdev label */ 842 offset = vdev_label_offset(size, l, 0); 843 if (vdev_disk_ldi_physio(vd_lh, (caddr_t)label, 844 VDEV_SKIP_SIZE + VDEV_PHYS_SIZE, offset, B_READ) != 0) 845 continue; 846 847 if (nvlist_unpack(label->vl_vdev_phys.vp_nvlist, 848 sizeof (label->vl_vdev_phys.vp_nvlist), config, 0) != 0) { 849 *config = NULL; 850 continue; 851 } 852 853 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_STATE, 854 &state) != 0 || state >= POOL_STATE_DESTROYED) { 855 nvlist_free(*config); 856 *config = NULL; 857 continue; 858 } 859 860 if (nvlist_lookup_uint64(*config, ZPOOL_CONFIG_POOL_TXG, 861 &txg) != 0 || txg == 0) { 862 nvlist_free(*config); 863 *config = NULL; 864 continue; 865 } 866 867 break; 868 } 869 870 kmem_free(label, sizeof (vdev_label_t)); 871 (void) ldi_close(vd_lh, FREAD, kcred); 872 if (*config == NULL) 873 error = EIDRM; 874 875 return (error); 876 }