Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/zvol.c
+++ new/usr/src/uts/common/fs/zfs/zvol.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 *
24 24 * Portions Copyright 2010 Robert Milkowski
25 25 *
26 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 27 * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
28 28 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 29 * Copyright (c) 2014 Integros [integros.com]
30 30 */
31 31
32 32 /*
33 33 * ZFS volume emulation driver.
34 34 *
35 35 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
36 36 * Volumes are accessed through the symbolic links named:
37 37 *
38 38 * /dev/zvol/dsk/<pool_name>/<dataset_name>
39 39 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
40 40 *
41 41 * These links are created by the /dev filesystem (sdev_zvolops.c).
42 42 * Volumes are persistent through reboot. No user command needs to be
43 43 * run before opening and using a device.
44 44 */
45 45
46 46 #include <sys/types.h>
47 47 #include <sys/param.h>
48 48 #include <sys/errno.h>
49 49 #include <sys/uio.h>
50 50 #include <sys/buf.h>
51 51 #include <sys/modctl.h>
52 52 #include <sys/open.h>
53 53 #include <sys/kmem.h>
54 54 #include <sys/conf.h>
55 55 #include <sys/cmn_err.h>
56 56 #include <sys/stat.h>
57 57 #include <sys/zap.h>
58 58 #include <sys/spa.h>
59 59 #include <sys/spa_impl.h>
60 60 #include <sys/zio.h>
61 61 #include <sys/dmu_traverse.h>
62 62 #include <sys/dnode.h>
63 63 #include <sys/dsl_dataset.h>
64 64 #include <sys/dsl_prop.h>
65 65 #include <sys/dkio.h>
66 66 #include <sys/efi_partition.h>
67 67 #include <sys/byteorder.h>
68 68 #include <sys/pathname.h>
69 69 #include <sys/ddi.h>
70 70 #include <sys/sunddi.h>
71 71 #include <sys/crc32.h>
72 72 #include <sys/dirent.h>
73 73 #include <sys/policy.h>
74 74 #include <sys/fs/zfs.h>
75 75 #include <sys/zfs_ioctl.h>
76 76 #include <sys/mkdev.h>
77 77 #include <sys/zil.h>
78 78 #include <sys/refcount.h>
79 79 #include <sys/zfs_znode.h>
80 80 #include <sys/zfs_rlock.h>
81 81 #include <sys/vdev_disk.h>
82 82 #include <sys/vdev_impl.h>
83 83 #include <sys/vdev_raidz.h>
84 84 #include <sys/zvol.h>
85 85 #include <sys/dumphdr.h>
86 86 #include <sys/zil_impl.h>
87 87 #include <sys/dbuf.h>
88 88 #include <sys/dmu_tx.h>
89 89 #include <sys/zfeature.h>
90 90 #include <sys/zio_checksum.h>
91 91
92 92 #include "zfs_namecheck.h"
93 93
94 94 void *zfsdev_state;
95 95 static char *zvol_tag = "zvol_tag";
96 96
97 97 #define ZVOL_DUMPSIZE "dumpsize"
98 98
99 99 /*
100 100 * This lock protects the zfsdev_state structure from being modified
101 101 * while it's being used, e.g. an open that comes in before a create
102 102 * finishes. It also protects temporary opens of the dataset so that,
103 103 * e.g., an open doesn't get a spurious EBUSY.
104 104 */
105 105 kmutex_t zfsdev_state_lock;
106 106 static uint32_t zvol_minors;
107 107
108 108 typedef struct zvol_extent {
109 109 list_node_t ze_node;
110 110 dva_t ze_dva; /* dva associated with this extent */
111 111 uint64_t ze_nblks; /* number of blocks in extent */
112 112 } zvol_extent_t;
113 113
114 114 /*
115 115 * The in-core state of each volume.
116 116 */
117 117 typedef struct zvol_state {
118 118 char zv_name[MAXPATHLEN]; /* pool/dd name */
119 119 uint64_t zv_volsize; /* amount of space we advertise */
120 120 uint64_t zv_volblocksize; /* volume block size */
121 121 minor_t zv_minor; /* minor number */
122 122 uint8_t zv_min_bs; /* minimum addressable block shift */
123 123 uint8_t zv_flags; /* readonly, dumpified, etc. */
124 124 objset_t *zv_objset; /* objset handle */
125 125 uint32_t zv_open_count[OTYPCNT]; /* open counts */
126 126 uint32_t zv_total_opens; /* total open count */
127 127 zilog_t *zv_zilog; /* ZIL handle */
128 128 list_t zv_extents; /* List of extents for dump */
129 129 znode_t zv_znode; /* for range locking */
130 130 dmu_buf_t *zv_dbuf; /* bonus handle */
131 131 } zvol_state_t;
132 132
133 133 /*
134 134 * zvol specific flags
135 135 */
136 136 #define ZVOL_RDONLY 0x1
137 137 #define ZVOL_DUMPIFIED 0x2
138 138 #define ZVOL_EXCL 0x4
139 139 #define ZVOL_WCE 0x8
140 140
141 141 /*
142 142 * zvol maximum transfer in one DMU tx.
143 143 */
144 144 int zvol_maxphys = DMU_MAX_ACCESS/2;
145 145
146 146 /*
147 147 * Toggle unmap functionality.
148 148 */
149 149 boolean_t zvol_unmap_enabled = B_TRUE;
150 150
151 151 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
152 152 nvlist_t *, nvlist_t *);
153 153 static int zvol_remove_zv(zvol_state_t *);
154 154 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
155 155 static int zvol_dumpify(zvol_state_t *zv);
156 156 static int zvol_dump_fini(zvol_state_t *zv);
157 157 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
158 158
159 159 static void
160 160 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
161 161 {
162 162 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
163 163
164 164 zv->zv_volsize = volsize;
165 165 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
166 166 "Size", volsize) == DDI_SUCCESS);
167 167 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
168 168 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
169 169
170 170 /* Notify specfs to invalidate the cached size */
171 171 spec_size_invalidate(dev, VBLK);
172 172 spec_size_invalidate(dev, VCHR);
173 173 }
174 174
175 175 int
176 176 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
177 177 {
178 178 if (volsize == 0)
179 179 return (SET_ERROR(EINVAL));
180 180
181 181 if (volsize % blocksize != 0)
182 182 return (SET_ERROR(EINVAL));
183 183
184 184 #ifdef _ILP32
185 185 if (volsize - 1 > SPEC_MAXOFFSET_T)
186 186 return (SET_ERROR(EOVERFLOW));
187 187 #endif
188 188 return (0);
189 189 }
190 190
191 191 int
192 192 zvol_check_volblocksize(uint64_t volblocksize)
193 193 {
194 194 if (volblocksize < SPA_MINBLOCKSIZE ||
195 195 volblocksize > SPA_OLD_MAXBLOCKSIZE ||
196 196 !ISP2(volblocksize))
197 197 return (SET_ERROR(EDOM));
198 198
199 199 return (0);
200 200 }
201 201
202 202 int
203 203 zvol_get_stats(objset_t *os, nvlist_t *nv)
204 204 {
205 205 int error;
206 206 dmu_object_info_t doi;
207 207 uint64_t val;
208 208
209 209 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
210 210 if (error)
211 211 return (error);
212 212
213 213 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
214 214
215 215 error = dmu_object_info(os, ZVOL_OBJ, &doi);
216 216
217 217 if (error == 0) {
218 218 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
219 219 doi.doi_data_block_size);
220 220 }
221 221
222 222 return (error);
223 223 }
224 224
225 225 static zvol_state_t *
226 226 zvol_minor_lookup(const char *name)
227 227 {
228 228 minor_t minor;
229 229 zvol_state_t *zv;
230 230
231 231 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
232 232
233 233 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
234 234 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
235 235 if (zv == NULL)
236 236 continue;
237 237 if (strcmp(zv->zv_name, name) == 0)
238 238 return (zv);
239 239 }
240 240
241 241 return (NULL);
242 242 }
243 243
244 244 /* extent mapping arg */
245 245 struct maparg {
246 246 zvol_state_t *ma_zv;
247 247 uint64_t ma_blks;
248 248 };
249 249
250 250 /*ARGSUSED*/
251 251 static int
252 252 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
253 253 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
254 254 {
255 255 struct maparg *ma = arg;
256 256 zvol_extent_t *ze;
257 257 int bs = ma->ma_zv->zv_volblocksize;
258 258
259 259 if (bp == NULL || BP_IS_HOLE(bp) ||
260 260 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
261 261 return (0);
262 262
263 263 VERIFY(!BP_IS_EMBEDDED(bp));
264 264
265 265 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
266 266 ma->ma_blks++;
267 267
268 268 /* Abort immediately if we have encountered gang blocks */
269 269 if (BP_IS_GANG(bp))
270 270 return (SET_ERROR(EFRAGS));
271 271
272 272 /*
273 273 * See if the block is at the end of the previous extent.
274 274 */
275 275 ze = list_tail(&ma->ma_zv->zv_extents);
276 276 if (ze &&
277 277 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
278 278 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
279 279 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
280 280 ze->ze_nblks++;
281 281 return (0);
282 282 }
283 283
284 284 dprintf_bp(bp, "%s", "next blkptr:");
285 285
286 286 /* start a new extent */
287 287 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
288 288 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
289 289 ze->ze_nblks = 1;
290 290 list_insert_tail(&ma->ma_zv->zv_extents, ze);
291 291 return (0);
292 292 }
293 293
294 294 static void
295 295 zvol_free_extents(zvol_state_t *zv)
296 296 {
297 297 zvol_extent_t *ze;
298 298
299 299 while (ze = list_head(&zv->zv_extents)) {
300 300 list_remove(&zv->zv_extents, ze);
301 301 kmem_free(ze, sizeof (zvol_extent_t));
302 302 }
303 303 }
304 304
305 305 static int
306 306 zvol_get_lbas(zvol_state_t *zv)
307 307 {
308 308 objset_t *os = zv->zv_objset;
309 309 struct maparg ma;
310 310 int err;
311 311
312 312 ma.ma_zv = zv;
313 313 ma.ma_blks = 0;
314 314 zvol_free_extents(zv);
315 315
316 316 /* commit any in-flight changes before traversing the dataset */
317 317 txg_wait_synced(dmu_objset_pool(os), 0);
318 318 err = traverse_dataset(dmu_objset_ds(os), 0,
319 319 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
320 320 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
321 321 zvol_free_extents(zv);
322 322 return (err ? err : EIO);
323 323 }
324 324
325 325 return (0);
326 326 }
327 327
328 328 /* ARGSUSED */
329 329 void
330 330 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
331 331 {
332 332 zfs_creat_t *zct = arg;
333 333 nvlist_t *nvprops = zct->zct_props;
334 334 int error;
335 335 uint64_t volblocksize, volsize;
336 336
337 337 VERIFY(nvlist_lookup_uint64(nvprops,
338 338 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
339 339 if (nvlist_lookup_uint64(nvprops,
340 340 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
341 341 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
342 342
343 343 /*
344 344 * These properties must be removed from the list so the generic
345 345 * property setting step won't apply to them.
346 346 */
347 347 VERIFY(nvlist_remove_all(nvprops,
348 348 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
349 349 (void) nvlist_remove_all(nvprops,
350 350 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
351 351
352 352 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
353 353 DMU_OT_NONE, 0, tx);
354 354 ASSERT(error == 0);
355 355
356 356 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
357 357 DMU_OT_NONE, 0, tx);
358 358 ASSERT(error == 0);
359 359
360 360 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
361 361 ASSERT(error == 0);
362 362 }
363 363
364 364 /*
365 365 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
366 366 * implement DKIOCFREE/free-long-range.
367 367 */
368 368 static int
369 369 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
370 370 {
371 371 uint64_t offset, length;
372 372
373 373 if (byteswap)
374 374 byteswap_uint64_array(lr, sizeof (*lr));
375 375
376 376 offset = lr->lr_offset;
377 377 length = lr->lr_length;
378 378
379 379 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
380 380 }
381 381
382 382 /*
383 383 * Replay a TX_WRITE ZIL transaction that didn't get committed
384 384 * after a system failure
385 385 */
386 386 static int
387 387 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
388 388 {
389 389 objset_t *os = zv->zv_objset;
390 390 char *data = (char *)(lr + 1); /* data follows lr_write_t */
391 391 uint64_t offset, length;
392 392 dmu_tx_t *tx;
393 393 int error;
394 394
395 395 if (byteswap)
396 396 byteswap_uint64_array(lr, sizeof (*lr));
397 397
398 398 offset = lr->lr_offset;
399 399 length = lr->lr_length;
400 400
401 401 /* If it's a dmu_sync() block, write the whole block */
402 402 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
403 403 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
404 404 if (length < blocksize) {
405 405 offset -= offset % blocksize;
406 406 length = blocksize;
407 407 }
408 408 }
409 409
410 410 tx = dmu_tx_create(os);
411 411 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
412 412 error = dmu_tx_assign(tx, TXG_WAIT);
413 413 if (error) {
414 414 dmu_tx_abort(tx);
415 415 } else {
416 416 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
417 417 dmu_tx_commit(tx);
418 418 }
419 419
420 420 return (error);
421 421 }
422 422
423 423 /* ARGSUSED */
424 424 static int
425 425 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
426 426 {
427 427 return (SET_ERROR(ENOTSUP));
428 428 }
429 429
430 430 /*
431 431 * Callback vectors for replaying records.
432 432 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
433 433 */
434 434 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
435 435 zvol_replay_err, /* 0 no such transaction type */
436 436 zvol_replay_err, /* TX_CREATE */
437 437 zvol_replay_err, /* TX_MKDIR */
438 438 zvol_replay_err, /* TX_MKXATTR */
439 439 zvol_replay_err, /* TX_SYMLINK */
440 440 zvol_replay_err, /* TX_REMOVE */
441 441 zvol_replay_err, /* TX_RMDIR */
442 442 zvol_replay_err, /* TX_LINK */
443 443 zvol_replay_err, /* TX_RENAME */
444 444 zvol_replay_write, /* TX_WRITE */
445 445 zvol_replay_truncate, /* TX_TRUNCATE */
446 446 zvol_replay_err, /* TX_SETATTR */
447 447 zvol_replay_err, /* TX_ACL */
448 448 zvol_replay_err, /* TX_CREATE_ACL */
449 449 zvol_replay_err, /* TX_CREATE_ATTR */
450 450 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
451 451 zvol_replay_err, /* TX_MKDIR_ACL */
452 452 zvol_replay_err, /* TX_MKDIR_ATTR */
453 453 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
454 454 zvol_replay_err, /* TX_WRITE2 */
455 455 };
456 456
457 457 int
458 458 zvol_name2minor(const char *name, minor_t *minor)
459 459 {
460 460 zvol_state_t *zv;
461 461
462 462 mutex_enter(&zfsdev_state_lock);
463 463 zv = zvol_minor_lookup(name);
464 464 if (minor && zv)
465 465 *minor = zv->zv_minor;
466 466 mutex_exit(&zfsdev_state_lock);
467 467 return (zv ? 0 : -1);
468 468 }
469 469
470 470 /*
471 471 * Create a minor node (plus a whole lot more) for the specified volume.
472 472 */
473 473 int
474 474 zvol_create_minor(const char *name)
475 475 {
476 476 zfs_soft_state_t *zs;
477 477 zvol_state_t *zv;
478 478 objset_t *os;
479 479 dmu_object_info_t doi;
480 480 minor_t minor = 0;
481 481 char chrbuf[30], blkbuf[30];
482 482 int error;
483 483
484 484 mutex_enter(&zfsdev_state_lock);
485 485
486 486 if (zvol_minor_lookup(name) != NULL) {
487 487 mutex_exit(&zfsdev_state_lock);
488 488 return (SET_ERROR(EEXIST));
489 489 }
490 490
491 491 /* lie and say we're read-only */
492 492 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
493 493
494 494 if (error) {
495 495 mutex_exit(&zfsdev_state_lock);
496 496 return (error);
497 497 }
498 498
499 499 if ((minor = zfsdev_minor_alloc()) == 0) {
500 500 dmu_objset_disown(os, FTAG);
501 501 mutex_exit(&zfsdev_state_lock);
502 502 return (SET_ERROR(ENXIO));
503 503 }
504 504
505 505 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
506 506 dmu_objset_disown(os, FTAG);
507 507 mutex_exit(&zfsdev_state_lock);
508 508 return (SET_ERROR(EAGAIN));
509 509 }
510 510 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
511 511 (char *)name);
512 512
513 513 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
514 514
515 515 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
516 516 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
517 517 ddi_soft_state_free(zfsdev_state, minor);
518 518 dmu_objset_disown(os, FTAG);
519 519 mutex_exit(&zfsdev_state_lock);
520 520 return (SET_ERROR(EAGAIN));
521 521 }
522 522
523 523 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
524 524
525 525 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
526 526 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
527 527 ddi_remove_minor_node(zfs_dip, chrbuf);
528 528 ddi_soft_state_free(zfsdev_state, minor);
529 529 dmu_objset_disown(os, FTAG);
530 530 mutex_exit(&zfsdev_state_lock);
531 531 return (SET_ERROR(EAGAIN));
532 532 }
533 533
534 534 zs = ddi_get_soft_state(zfsdev_state, minor);
535 535 zs->zss_type = ZSST_ZVOL;
536 536 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
537 537 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
538 538 zv->zv_min_bs = DEV_BSHIFT;
539 539 zv->zv_minor = minor;
540 540 zv->zv_objset = os;
541 541 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
542 542 zv->zv_flags |= ZVOL_RDONLY;
543 543 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
544 544 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
545 545 sizeof (rl_t), offsetof(rl_t, r_node));
546 546 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
547 547 offsetof(zvol_extent_t, ze_node));
548 548 /* get and cache the blocksize */
549 549 error = dmu_object_info(os, ZVOL_OBJ, &doi);
550 550 ASSERT(error == 0);
551 551 zv->zv_volblocksize = doi.doi_data_block_size;
552 552
553 553 if (spa_writeable(dmu_objset_spa(os))) {
554 554 if (zil_replay_disable)
555 555 zil_destroy(dmu_objset_zil(os), B_FALSE);
556 556 else
557 557 zil_replay(os, zv, zvol_replay_vector);
558 558 }
559 559 dmu_objset_disown(os, FTAG);
560 560 zv->zv_objset = NULL;
561 561
562 562 zvol_minors++;
563 563
564 564 mutex_exit(&zfsdev_state_lock);
565 565
566 566 return (0);
567 567 }
568 568
569 569 /*
570 570 * Remove minor node for the specified volume.
571 571 */
572 572 static int
573 573 zvol_remove_zv(zvol_state_t *zv)
574 574 {
575 575 char nmbuf[20];
576 576 minor_t minor = zv->zv_minor;
577 577
578 578 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
579 579 if (zv->zv_total_opens != 0)
580 580 return (SET_ERROR(EBUSY));
581 581
582 582 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
583 583 ddi_remove_minor_node(zfs_dip, nmbuf);
584 584
585 585 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
586 586 ddi_remove_minor_node(zfs_dip, nmbuf);
587 587
588 588 avl_destroy(&zv->zv_znode.z_range_avl);
589 589 mutex_destroy(&zv->zv_znode.z_range_lock);
590 590
591 591 kmem_free(zv, sizeof (zvol_state_t));
592 592
593 593 ddi_soft_state_free(zfsdev_state, minor);
594 594
595 595 zvol_minors--;
596 596 return (0);
597 597 }
598 598
599 599 int
600 600 zvol_remove_minor(const char *name)
601 601 {
602 602 zvol_state_t *zv;
603 603 int rc;
604 604
605 605 mutex_enter(&zfsdev_state_lock);
606 606 if ((zv = zvol_minor_lookup(name)) == NULL) {
607 607 mutex_exit(&zfsdev_state_lock);
608 608 return (SET_ERROR(ENXIO));
609 609 }
610 610 rc = zvol_remove_zv(zv);
611 611 mutex_exit(&zfsdev_state_lock);
612 612 return (rc);
613 613 }
614 614
615 615 int
616 616 zvol_first_open(zvol_state_t *zv)
617 617 {
618 618 objset_t *os;
619 619 uint64_t volsize;
620 620 int error;
621 621 uint64_t readonly;
622 622
623 623 /* lie and say we're read-only */
624 624 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
625 625 zvol_tag, &os);
626 626 if (error)
627 627 return (error);
628 628
629 629 zv->zv_objset = os;
630 630 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
631 631 if (error) {
632 632 ASSERT(error == 0);
633 633 dmu_objset_disown(os, zvol_tag);
634 634 return (error);
635 635 }
636 636
637 637 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
638 638 if (error) {
639 639 dmu_objset_disown(os, zvol_tag);
640 640 return (error);
641 641 }
642 642
643 643 zvol_size_changed(zv, volsize);
644 644 zv->zv_zilog = zil_open(os, zvol_get_data);
645 645
646 646 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
647 647 NULL) == 0);
648 648 if (readonly || dmu_objset_is_snapshot(os) ||
649 649 !spa_writeable(dmu_objset_spa(os)))
650 650 zv->zv_flags |= ZVOL_RDONLY;
651 651 else
652 652 zv->zv_flags &= ~ZVOL_RDONLY;
653 653 return (error);
654 654 }
655 655
656 656 void
657 657 zvol_last_close(zvol_state_t *zv)
658 658 {
659 659 zil_close(zv->zv_zilog);
660 660 zv->zv_zilog = NULL;
661 661
662 662 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
663 663 zv->zv_dbuf = NULL;
664 664
665 665 /*
666 666 * Evict cached data
667 667 */
668 668 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
669 669 !(zv->zv_flags & ZVOL_RDONLY))
670 670 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
671 671 dmu_objset_evict_dbufs(zv->zv_objset);
672 672
673 673 dmu_objset_disown(zv->zv_objset, zvol_tag);
674 674 zv->zv_objset = NULL;
675 675 }
676 676
677 677 int
678 678 zvol_prealloc(zvol_state_t *zv)
679 679 {
680 680 objset_t *os = zv->zv_objset;
681 681 dmu_tx_t *tx;
682 682 uint64_t refd, avail, usedobjs, availobjs;
683 683 uint64_t resid = zv->zv_volsize;
684 684 uint64_t off = 0;
685 685
686 686 /* Check the space usage before attempting to allocate the space */
687 687 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
688 688 if (avail < zv->zv_volsize)
689 689 return (SET_ERROR(ENOSPC));
690 690
691 691 /* Free old extents if they exist */
692 692 zvol_free_extents(zv);
693 693
694 694 while (resid != 0) {
695 695 int error;
696 696 uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
697 697
698 698 tx = dmu_tx_create(os);
699 699 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
700 700 error = dmu_tx_assign(tx, TXG_WAIT);
701 701 if (error) {
702 702 dmu_tx_abort(tx);
703 703 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
704 704 return (error);
705 705 }
706 706 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
707 707 dmu_tx_commit(tx);
708 708 off += bytes;
709 709 resid -= bytes;
710 710 }
711 711 txg_wait_synced(dmu_objset_pool(os), 0);
712 712
713 713 return (0);
714 714 }
715 715
716 716 static int
717 717 zvol_update_volsize(objset_t *os, uint64_t volsize)
718 718 {
719 719 dmu_tx_t *tx;
720 720 int error;
721 721
722 722 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
723 723
724 724 tx = dmu_tx_create(os);
725 725 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
726 726 dmu_tx_mark_netfree(tx);
727 727 error = dmu_tx_assign(tx, TXG_WAIT);
728 728 if (error) {
729 729 dmu_tx_abort(tx);
730 730 return (error);
731 731 }
732 732
733 733 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
734 734 &volsize, tx);
735 735 dmu_tx_commit(tx);
736 736
737 737 if (error == 0)
738 738 error = dmu_free_long_range(os,
739 739 ZVOL_OBJ, volsize, DMU_OBJECT_END);
740 740 return (error);
741 741 }
742 742
743 743 void
744 744 zvol_remove_minors(const char *name)
745 745 {
746 746 zvol_state_t *zv;
747 747 char *namebuf;
748 748 minor_t minor;
749 749
750 750 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
751 751 (void) strncpy(namebuf, name, strlen(name));
752 752 (void) strcat(namebuf, "/");
753 753 mutex_enter(&zfsdev_state_lock);
754 754 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
755 755
756 756 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
757 757 if (zv == NULL)
758 758 continue;
759 759 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
760 760 (void) zvol_remove_zv(zv);
761 761 }
762 762 kmem_free(namebuf, strlen(name) + 2);
763 763
764 764 mutex_exit(&zfsdev_state_lock);
765 765 }
766 766
767 767 static int
768 768 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
769 769 {
770 770 uint64_t old_volsize = 0ULL;
771 771 int error = 0;
772 772
773 773 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
774 774
775 775 /*
776 776 * Reinitialize the dump area to the new size. If we
777 777 * failed to resize the dump area then restore it back to
778 778 * its original size. We must set the new volsize prior
779 779 * to calling dumpvp_resize() to ensure that the devices'
780 780 * size(9P) is not visible by the dump subsystem.
781 781 */
782 782 old_volsize = zv->zv_volsize;
783 783 zvol_size_changed(zv, volsize);
784 784
785 785 if (zv->zv_flags & ZVOL_DUMPIFIED) {
786 786 if ((error = zvol_dumpify(zv)) != 0 ||
787 787 (error = dumpvp_resize()) != 0) {
788 788 int dumpify_error;
789 789
790 790 (void) zvol_update_volsize(zv->zv_objset, old_volsize);
791 791 zvol_size_changed(zv, old_volsize);
792 792 dumpify_error = zvol_dumpify(zv);
793 793 error = dumpify_error ? dumpify_error : error;
794 794 }
795 795 }
796 796
797 797 /*
798 798 * Generate a LUN expansion event.
799 799 */
800 800 if (error == 0) {
801 801 sysevent_id_t eid;
802 802 nvlist_t *attr;
803 803 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
804 804
805 805 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
806 806 zv->zv_minor);
807 807
808 808 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
809 809 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
810 810
811 811 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
812 812 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
813 813
814 814 nvlist_free(attr);
815 815 kmem_free(physpath, MAXPATHLEN);
816 816 }
817 817 return (error);
818 818 }
819 819
820 820 int
821 821 zvol_set_volsize(const char *name, uint64_t volsize)
822 822 {
823 823 zvol_state_t *zv = NULL;
824 824 objset_t *os;
825 825 int error;
826 826 dmu_object_info_t doi;
827 827 uint64_t readonly;
828 828 boolean_t owned = B_FALSE;
829 829
830 830 error = dsl_prop_get_integer(name,
831 831 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
832 832 if (error != 0)
833 833 return (error);
834 834 if (readonly)
835 835 return (SET_ERROR(EROFS));
836 836
837 837 mutex_enter(&zfsdev_state_lock);
838 838 zv = zvol_minor_lookup(name);
839 839
840 840 if (zv == NULL || zv->zv_objset == NULL) {
841 841 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
842 842 FTAG, &os)) != 0) {
843 843 mutex_exit(&zfsdev_state_lock);
844 844 return (error);
845 845 }
846 846 owned = B_TRUE;
847 847 if (zv != NULL)
848 848 zv->zv_objset = os;
849 849 } else {
850 850 os = zv->zv_objset;
851 851 }
852 852
853 853 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
854 854 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
855 855 goto out;
856 856
857 857 error = zvol_update_volsize(os, volsize);
858 858
859 859 if (error == 0 && zv != NULL)
860 860 error = zvol_update_live_volsize(zv, volsize);
861 861 out:
862 862 if (owned) {
863 863 dmu_objset_disown(os, FTAG);
864 864 if (zv != NULL)
865 865 zv->zv_objset = NULL;
866 866 }
867 867 mutex_exit(&zfsdev_state_lock);
868 868 return (error);
869 869 }
870 870
871 871 /*ARGSUSED*/
872 872 int
873 873 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
874 874 {
875 875 zvol_state_t *zv;
876 876 int err = 0;
877 877
878 878 mutex_enter(&zfsdev_state_lock);
879 879
880 880 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
881 881 if (zv == NULL) {
882 882 mutex_exit(&zfsdev_state_lock);
883 883 return (SET_ERROR(ENXIO));
884 884 }
885 885
886 886 if (zv->zv_total_opens == 0)
887 887 err = zvol_first_open(zv);
888 888 if (err) {
889 889 mutex_exit(&zfsdev_state_lock);
890 890 return (err);
891 891 }
892 892 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
893 893 err = SET_ERROR(EROFS);
894 894 goto out;
895 895 }
896 896 if (zv->zv_flags & ZVOL_EXCL) {
897 897 err = SET_ERROR(EBUSY);
898 898 goto out;
899 899 }
900 900 if (flag & FEXCL) {
901 901 if (zv->zv_total_opens != 0) {
902 902 err = SET_ERROR(EBUSY);
903 903 goto out;
904 904 }
905 905 zv->zv_flags |= ZVOL_EXCL;
906 906 }
907 907
908 908 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
909 909 zv->zv_open_count[otyp]++;
910 910 zv->zv_total_opens++;
911 911 }
912 912 mutex_exit(&zfsdev_state_lock);
913 913
914 914 return (err);
915 915 out:
916 916 if (zv->zv_total_opens == 0)
917 917 zvol_last_close(zv);
918 918 mutex_exit(&zfsdev_state_lock);
919 919 return (err);
920 920 }
921 921
922 922 /*ARGSUSED*/
923 923 int
924 924 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
925 925 {
926 926 minor_t minor = getminor(dev);
927 927 zvol_state_t *zv;
928 928 int error = 0;
929 929
930 930 mutex_enter(&zfsdev_state_lock);
931 931
932 932 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
933 933 if (zv == NULL) {
934 934 mutex_exit(&zfsdev_state_lock);
935 935 return (SET_ERROR(ENXIO));
936 936 }
937 937
938 938 if (zv->zv_flags & ZVOL_EXCL) {
939 939 ASSERT(zv->zv_total_opens == 1);
940 940 zv->zv_flags &= ~ZVOL_EXCL;
941 941 }
942 942
943 943 /*
944 944 * If the open count is zero, this is a spurious close.
945 945 * That indicates a bug in the kernel / DDI framework.
946 946 */
947 947 ASSERT(zv->zv_open_count[otyp] != 0);
948 948 ASSERT(zv->zv_total_opens != 0);
949 949
950 950 /*
951 951 * You may get multiple opens, but only one close.
952 952 */
953 953 zv->zv_open_count[otyp]--;
954 954 zv->zv_total_opens--;
955 955
956 956 if (zv->zv_total_opens == 0)
957 957 zvol_last_close(zv);
958 958
959 959 mutex_exit(&zfsdev_state_lock);
960 960 return (error);
961 961 }
962 962
963 963 static void
964 964 zvol_get_done(zgd_t *zgd, int error)
965 965 {
966 966 if (zgd->zgd_db)
967 967 dmu_buf_rele(zgd->zgd_db, zgd);
968 968
969 969 zfs_range_unlock(zgd->zgd_rl);
970 970
971 971 if (error == 0 && zgd->zgd_bp)
972 972 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
973 973
974 974 kmem_free(zgd, sizeof (zgd_t));
975 975 }
976 976
977 977 /*
978 978 * Get data to generate a TX_WRITE intent log record.
979 979 */
980 980 static int
981 981 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
982 982 {
983 983 zvol_state_t *zv = arg;
984 984 objset_t *os = zv->zv_objset;
985 985 uint64_t object = ZVOL_OBJ;
986 986 uint64_t offset = lr->lr_offset;
987 987 uint64_t size = lr->lr_length; /* length of user data */
988 988 blkptr_t *bp = &lr->lr_blkptr;
989 989 dmu_buf_t *db;
990 990 zgd_t *zgd;
991 991 int error;
992 992
993 993 ASSERT(zio != NULL);
994 994 ASSERT(size != 0);
995 995
996 996 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
997 997 zgd->zgd_zilog = zv->zv_zilog;
998 998 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
999 999
1000 1000 /*
1001 1001 * Write records come in two flavors: immediate and indirect.
1002 1002 * For small writes it's cheaper to store the data with the
1003 1003 * log record (immediate); for large writes it's cheaper to
1004 1004 * sync the data and get a pointer to it (indirect) so that
1005 1005 * we don't have to write the data twice.
1006 1006 */
1007 1007 if (buf != NULL) { /* immediate write */
1008 1008 error = dmu_read(os, object, offset, size, buf,
1009 1009 DMU_READ_NO_PREFETCH);
1010 1010 } else {
1011 1011 size = zv->zv_volblocksize;
1012 1012 offset = P2ALIGN(offset, size);
1013 1013 error = dmu_buf_hold(os, object, offset, zgd, &db,
1014 1014 DMU_READ_NO_PREFETCH);
1015 1015 if (error == 0) {
1016 1016 blkptr_t *obp = dmu_buf_get_blkptr(db);
1017 1017 if (obp) {
1018 1018 ASSERT(BP_IS_HOLE(bp));
1019 1019 *bp = *obp;
1020 1020 }
1021 1021
1022 1022 zgd->zgd_db = db;
1023 1023 zgd->zgd_bp = bp;
1024 1024
1025 1025 ASSERT(db->db_offset == offset);
1026 1026 ASSERT(db->db_size == size);
1027 1027
1028 1028 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1029 1029 zvol_get_done, zgd);
1030 1030
1031 1031 if (error == 0)
1032 1032 return (0);
1033 1033 }
1034 1034 }
1035 1035
1036 1036 zvol_get_done(zgd, error);
1037 1037
1038 1038 return (error);
1039 1039 }
1040 1040
1041 1041 /*
1042 1042 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1043 1043 *
1044 1044 * We store data in the log buffers if it's small enough.
1045 1045 * Otherwise we will later flush the data out via dmu_sync().
1046 1046 */
1047 1047 ssize_t zvol_immediate_write_sz = 32768;
1048 1048
1049 1049 static void
1050 1050 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1051 1051 boolean_t sync)
1052 1052 {
1053 1053 uint32_t blocksize = zv->zv_volblocksize;
1054 1054 zilog_t *zilog = zv->zv_zilog;
1055 1055 boolean_t slogging;
1056 1056 ssize_t immediate_write_sz;
1057 1057
1058 1058 if (zil_replaying(zilog, tx))
1059 1059 return;
1060 1060
1061 1061 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1062 1062 ? 0 : zvol_immediate_write_sz;
1063 1063
1064 1064 slogging = spa_has_slogs(zilog->zl_spa) &&
1065 1065 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1066 1066
1067 1067 while (resid) {
1068 1068 itx_t *itx;
1069 1069 lr_write_t *lr;
1070 1070 ssize_t len;
1071 1071 itx_wr_state_t write_state;
1072 1072
1073 1073 /*
1074 1074 * Unlike zfs_log_write() we can be called with
1075 1075 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1076 1076 */
1077 1077 if (blocksize > immediate_write_sz && !slogging &&
1078 1078 resid >= blocksize && off % blocksize == 0) {
1079 1079 write_state = WR_INDIRECT; /* uses dmu_sync */
1080 1080 len = blocksize;
1081 1081 } else if (sync) {
1082 1082 write_state = WR_COPIED;
1083 1083 len = MIN(ZIL_MAX_LOG_DATA, resid);
1084 1084 } else {
1085 1085 write_state = WR_NEED_COPY;
1086 1086 len = MIN(ZIL_MAX_LOG_DATA, resid);
1087 1087 }
1088 1088
1089 1089 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1090 1090 (write_state == WR_COPIED ? len : 0));
1091 1091 lr = (lr_write_t *)&itx->itx_lr;
1092 1092 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1093 1093 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1094 1094 zil_itx_destroy(itx);
1095 1095 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1096 1096 lr = (lr_write_t *)&itx->itx_lr;
1097 1097 write_state = WR_NEED_COPY;
1098 1098 }
1099 1099
1100 1100 itx->itx_wr_state = write_state;
1101 1101 if (write_state == WR_NEED_COPY)
1102 1102 itx->itx_sod += len;
1103 1103 lr->lr_foid = ZVOL_OBJ;
1104 1104 lr->lr_offset = off;
1105 1105 lr->lr_length = len;
1106 1106 lr->lr_blkoff = 0;
1107 1107 BP_ZERO(&lr->lr_blkptr);
1108 1108
1109 1109 itx->itx_private = zv;
1110 1110 itx->itx_sync = sync;
1111 1111
1112 1112 zil_itx_assign(zilog, itx, tx);
1113 1113
1114 1114 off += len;
1115 1115 resid -= len;
1116 1116 }
1117 1117 }
1118 1118
1119 1119 static int
1120 1120 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1121 1121 uint64_t size, boolean_t doread, boolean_t isdump)
1122 1122 {
1123 1123 vdev_disk_t *dvd;
1124 1124 int c;
1125 1125 int numerrors = 0;
1126 1126
1127 1127 if (vd->vdev_ops == &vdev_mirror_ops ||
1128 1128 vd->vdev_ops == &vdev_replacing_ops ||
1129 1129 vd->vdev_ops == &vdev_spare_ops) {
1130 1130 for (c = 0; c < vd->vdev_children; c++) {
1131 1131 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1132 1132 addr, offset, origoffset, size, doread, isdump);
1133 1133 if (err != 0) {
1134 1134 numerrors++;
1135 1135 } else if (doread) {
1136 1136 break;
1137 1137 }
1138 1138 }
1139 1139 }
1140 1140
1141 1141 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1142 1142 return (numerrors < vd->vdev_children ? 0 : EIO);
1143 1143
1144 1144 if (doread && !vdev_readable(vd))
1145 1145 return (SET_ERROR(EIO));
1146 1146 else if (!doread && !vdev_writeable(vd))
1147 1147 return (SET_ERROR(EIO));
1148 1148
1149 1149 if (vd->vdev_ops == &vdev_raidz_ops) {
1150 1150 return (vdev_raidz_physio(vd,
1151 1151 addr, size, offset, origoffset, doread, isdump));
1152 1152 }
1153 1153
1154 1154 offset += VDEV_LABEL_START_SIZE;
1155 1155
1156 1156 if (ddi_in_panic() || isdump) {
1157 1157 ASSERT(!doread);
1158 1158 if (doread)
1159 1159 return (SET_ERROR(EIO));
1160 1160 dvd = vd->vdev_tsd;
1161 1161 ASSERT3P(dvd, !=, NULL);
1162 1162 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1163 1163 lbtodb(size)));
1164 1164 } else {
1165 1165 dvd = vd->vdev_tsd;
1166 1166 ASSERT3P(dvd, !=, NULL);
1167 1167 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1168 1168 offset, doread ? B_READ : B_WRITE));
1169 1169 }
1170 1170 }
1171 1171
1172 1172 static int
1173 1173 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1174 1174 boolean_t doread, boolean_t isdump)
1175 1175 {
1176 1176 vdev_t *vd;
1177 1177 int error;
1178 1178 zvol_extent_t *ze;
1179 1179 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1180 1180
1181 1181 /* Must be sector aligned, and not stradle a block boundary. */
1182 1182 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1183 1183 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1184 1184 return (SET_ERROR(EINVAL));
1185 1185 }
1186 1186 ASSERT(size <= zv->zv_volblocksize);
1187 1187
1188 1188 /* Locate the extent this belongs to */
1189 1189 ze = list_head(&zv->zv_extents);
1190 1190 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1191 1191 offset -= ze->ze_nblks * zv->zv_volblocksize;
1192 1192 ze = list_next(&zv->zv_extents, ze);
1193 1193 }
1194 1194
1195 1195 if (ze == NULL)
1196 1196 return (SET_ERROR(EINVAL));
1197 1197
1198 1198 if (!ddi_in_panic())
1199 1199 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1200 1200
1201 1201 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1202 1202 offset += DVA_GET_OFFSET(&ze->ze_dva);
1203 1203 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1204 1204 size, doread, isdump);
1205 1205
1206 1206 if (!ddi_in_panic())
1207 1207 spa_config_exit(spa, SCL_STATE, FTAG);
1208 1208
1209 1209 return (error);
1210 1210 }
1211 1211
1212 1212 int
1213 1213 zvol_strategy(buf_t *bp)
1214 1214 {
1215 1215 zfs_soft_state_t *zs = NULL;
1216 1216 zvol_state_t *zv;
1217 1217 uint64_t off, volsize;
1218 1218 size_t resid;
1219 1219 char *addr;
1220 1220 objset_t *os;
1221 1221 rl_t *rl;
1222 1222 int error = 0;
1223 1223 boolean_t doread = bp->b_flags & B_READ;
1224 1224 boolean_t is_dumpified;
1225 1225 boolean_t sync;
1226 1226
1227 1227 if (getminor(bp->b_edev) == 0) {
1228 1228 error = SET_ERROR(EINVAL);
1229 1229 } else {
1230 1230 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1231 1231 if (zs == NULL)
1232 1232 error = SET_ERROR(ENXIO);
1233 1233 else if (zs->zss_type != ZSST_ZVOL)
1234 1234 error = SET_ERROR(EINVAL);
1235 1235 }
1236 1236
1237 1237 if (error) {
1238 1238 bioerror(bp, error);
1239 1239 biodone(bp);
1240 1240 return (0);
1241 1241 }
1242 1242
1243 1243 zv = zs->zss_data;
1244 1244
1245 1245 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1246 1246 bioerror(bp, EROFS);
1247 1247 biodone(bp);
1248 1248 return (0);
1249 1249 }
1250 1250
1251 1251 off = ldbtob(bp->b_blkno);
1252 1252 volsize = zv->zv_volsize;
1253 1253
1254 1254 os = zv->zv_objset;
1255 1255 ASSERT(os != NULL);
1256 1256
1257 1257 bp_mapin(bp);
1258 1258 addr = bp->b_un.b_addr;
1259 1259 resid = bp->b_bcount;
1260 1260
1261 1261 if (resid > 0 && (off < 0 || off >= volsize)) {
1262 1262 bioerror(bp, EIO);
1263 1263 biodone(bp);
1264 1264 return (0);
1265 1265 }
1266 1266
1267 1267 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1268 1268 sync = ((!(bp->b_flags & B_ASYNC) &&
1269 1269 !(zv->zv_flags & ZVOL_WCE)) ||
1270 1270 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1271 1271 !doread && !is_dumpified;
1272 1272
1273 1273 /*
1274 1274 * There must be no buffer changes when doing a dmu_sync() because
1275 1275 * we can't change the data whilst calculating the checksum.
1276 1276 */
1277 1277 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1278 1278 doread ? RL_READER : RL_WRITER);
1279 1279
1280 1280 while (resid != 0 && off < volsize) {
1281 1281 size_t size = MIN(resid, zvol_maxphys);
1282 1282 if (is_dumpified) {
1283 1283 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1284 1284 error = zvol_dumpio(zv, addr, off, size,
1285 1285 doread, B_FALSE);
1286 1286 } else if (doread) {
1287 1287 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1288 1288 DMU_READ_PREFETCH);
1289 1289 } else {
1290 1290 dmu_tx_t *tx = dmu_tx_create(os);
1291 1291 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1292 1292 error = dmu_tx_assign(tx, TXG_WAIT);
1293 1293 if (error) {
1294 1294 dmu_tx_abort(tx);
1295 1295 } else {
1296 1296 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1297 1297 zvol_log_write(zv, tx, off, size, sync);
1298 1298 dmu_tx_commit(tx);
1299 1299 }
1300 1300 }
1301 1301 if (error) {
1302 1302 /* convert checksum errors into IO errors */
1303 1303 if (error == ECKSUM)
1304 1304 error = SET_ERROR(EIO);
1305 1305 break;
1306 1306 }
1307 1307 off += size;
1308 1308 addr += size;
1309 1309 resid -= size;
1310 1310 }
1311 1311 zfs_range_unlock(rl);
1312 1312
1313 1313 if ((bp->b_resid = resid) == bp->b_bcount)
1314 1314 bioerror(bp, off > volsize ? EINVAL : error);
1315 1315
1316 1316 if (sync)
1317 1317 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1318 1318 biodone(bp);
1319 1319
1320 1320 return (0);
1321 1321 }
1322 1322
1323 1323 /*
1324 1324 * Set the buffer count to the zvol maximum transfer.
1325 1325 * Using our own routine instead of the default minphys()
1326 1326 * means that for larger writes we write bigger buffers on X86
1327 1327 * (128K instead of 56K) and flush the disk write cache less often
1328 1328 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1329 1329 * 56K on X86 and 128K on sparc).
1330 1330 */
1331 1331 void
1332 1332 zvol_minphys(struct buf *bp)
1333 1333 {
1334 1334 if (bp->b_bcount > zvol_maxphys)
1335 1335 bp->b_bcount = zvol_maxphys;
1336 1336 }
1337 1337
1338 1338 int
1339 1339 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1340 1340 {
1341 1341 minor_t minor = getminor(dev);
1342 1342 zvol_state_t *zv;
1343 1343 int error = 0;
1344 1344 uint64_t size;
1345 1345 uint64_t boff;
1346 1346 uint64_t resid;
1347 1347
1348 1348 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1349 1349 if (zv == NULL)
1350 1350 return (SET_ERROR(ENXIO));
1351 1351
1352 1352 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1353 1353 return (SET_ERROR(EINVAL));
1354 1354
1355 1355 boff = ldbtob(blkno);
1356 1356 resid = ldbtob(nblocks);
1357 1357
1358 1358 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1359 1359
1360 1360 while (resid) {
1361 1361 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1362 1362 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1363 1363 if (error)
1364 1364 break;
1365 1365 boff += size;
1366 1366 addr += size;
1367 1367 resid -= size;
1368 1368 }
1369 1369
1370 1370 return (error);
1371 1371 }
1372 1372
1373 1373 /*ARGSUSED*/
1374 1374 int
1375 1375 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1376 1376 {
1377 1377 minor_t minor = getminor(dev);
1378 1378 zvol_state_t *zv;
1379 1379 uint64_t volsize;
1380 1380 rl_t *rl;
1381 1381 int error = 0;
1382 1382
1383 1383 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1384 1384 if (zv == NULL)
1385 1385 return (SET_ERROR(ENXIO));
1386 1386
1387 1387 volsize = zv->zv_volsize;
1388 1388 if (uio->uio_resid > 0 &&
1389 1389 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1390 1390 return (SET_ERROR(EIO));
1391 1391
1392 1392 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1393 1393 error = physio(zvol_strategy, NULL, dev, B_READ,
1394 1394 zvol_minphys, uio);
1395 1395 return (error);
1396 1396 }
1397 1397
1398 1398 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1399 1399 RL_READER);
1400 1400 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1401 1401 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1402 1402
1403 1403 /* don't read past the end */
1404 1404 if (bytes > volsize - uio->uio_loffset)
1405 1405 bytes = volsize - uio->uio_loffset;
1406 1406
1407 1407 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1408 1408 if (error) {
1409 1409 /* convert checksum errors into IO errors */
1410 1410 if (error == ECKSUM)
1411 1411 error = SET_ERROR(EIO);
1412 1412 break;
1413 1413 }
1414 1414 }
1415 1415 zfs_range_unlock(rl);
1416 1416 return (error);
1417 1417 }
1418 1418
1419 1419 /*ARGSUSED*/
1420 1420 int
1421 1421 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1422 1422 {
1423 1423 minor_t minor = getminor(dev);
1424 1424 zvol_state_t *zv;
1425 1425 uint64_t volsize;
1426 1426 rl_t *rl;
1427 1427 int error = 0;
1428 1428 boolean_t sync;
1429 1429
1430 1430 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1431 1431 if (zv == NULL)
1432 1432 return (SET_ERROR(ENXIO));
1433 1433
1434 1434 volsize = zv->zv_volsize;
1435 1435 if (uio->uio_resid > 0 &&
1436 1436 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1437 1437 return (SET_ERROR(EIO));
1438 1438
1439 1439 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1440 1440 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1441 1441 zvol_minphys, uio);
1442 1442 return (error);
1443 1443 }
1444 1444
1445 1445 sync = !(zv->zv_flags & ZVOL_WCE) ||
1446 1446 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1447 1447
1448 1448 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1449 1449 RL_WRITER);
1450 1450 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1451 1451 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1452 1452 uint64_t off = uio->uio_loffset;
1453 1453 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1454 1454
1455 1455 if (bytes > volsize - off) /* don't write past the end */
1456 1456 bytes = volsize - off;
1457 1457
1458 1458 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1459 1459 error = dmu_tx_assign(tx, TXG_WAIT);
1460 1460 if (error) {
1461 1461 dmu_tx_abort(tx);
1462 1462 break;
1463 1463 }
1464 1464 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1465 1465 if (error == 0)
1466 1466 zvol_log_write(zv, tx, off, bytes, sync);
1467 1467 dmu_tx_commit(tx);
1468 1468
1469 1469 if (error)
1470 1470 break;
1471 1471 }
↓ open down ↓ |
1471 lines elided |
↑ open up ↑ |
1472 1472 zfs_range_unlock(rl);
1473 1473 if (sync)
1474 1474 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1475 1475 return (error);
1476 1476 }
1477 1477
1478 1478 int
1479 1479 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1480 1480 {
1481 1481 struct uuid uuid = EFI_RESERVED;
1482 - efi_gpe_t gpe = { 0 };
1482 + efi_gpe_t gpe = { {0} };
1483 1483 uint32_t crc;
1484 1484 dk_efi_t efi;
1485 1485 int length;
1486 1486 char *ptr;
1487 1487
1488 1488 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1489 1489 return (SET_ERROR(EFAULT));
1490 1490 ptr = (char *)(uintptr_t)efi.dki_data_64;
1491 1491 length = efi.dki_length;
1492 1492 /*
1493 1493 * Some clients may attempt to request a PMBR for the
1494 1494 * zvol. Currently this interface will return EINVAL to
1495 1495 * such requests. These requests could be supported by
1496 1496 * adding a check for lba == 0 and consing up an appropriate
1497 1497 * PMBR.
1498 1498 */
1499 1499 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1500 1500 return (SET_ERROR(EINVAL));
1501 1501
1502 1502 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1503 1503 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1504 1504 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1505 1505
1506 1506 if (efi.dki_lba == 1) {
1507 1507 efi_gpt_t gpt = { 0 };
1508 1508
1509 1509 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1510 1510 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1511 1511 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1512 1512 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1513 1513 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1514 1514 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1515 1515 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1516 1516 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1517 1517 gpt.efi_gpt_SizeOfPartitionEntry =
1518 1518 LE_32(sizeof (efi_gpe_t));
1519 1519 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1520 1520 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1521 1521 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1522 1522 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1523 1523 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1524 1524 flag))
1525 1525 return (SET_ERROR(EFAULT));
1526 1526 ptr += sizeof (gpt);
1527 1527 length -= sizeof (gpt);
1528 1528 }
1529 1529 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1530 1530 length), flag))
1531 1531 return (SET_ERROR(EFAULT));
1532 1532 return (0);
1533 1533 }
1534 1534
1535 1535 /*
1536 1536 * BEGIN entry points to allow external callers access to the volume.
1537 1537 */
1538 1538 /*
1539 1539 * Return the volume parameters needed for access from an external caller.
1540 1540 * These values are invariant as long as the volume is held open.
1541 1541 */
1542 1542 int
1543 1543 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1544 1544 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1545 1545 void **rl_hdl, void **bonus_hdl)
1546 1546 {
1547 1547 zvol_state_t *zv;
1548 1548
1549 1549 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1550 1550 if (zv == NULL)
1551 1551 return (SET_ERROR(ENXIO));
1552 1552 if (zv->zv_flags & ZVOL_DUMPIFIED)
1553 1553 return (SET_ERROR(ENXIO));
1554 1554
1555 1555 ASSERT(blksize && max_xfer_len && minor_hdl &&
1556 1556 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1557 1557
1558 1558 *blksize = zv->zv_volblocksize;
1559 1559 *max_xfer_len = (uint64_t)zvol_maxphys;
1560 1560 *minor_hdl = zv;
1561 1561 *objset_hdl = zv->zv_objset;
1562 1562 *zil_hdl = zv->zv_zilog;
1563 1563 *rl_hdl = &zv->zv_znode;
1564 1564 *bonus_hdl = zv->zv_dbuf;
1565 1565 return (0);
1566 1566 }
1567 1567
1568 1568 /*
1569 1569 * Return the current volume size to an external caller.
1570 1570 * The size can change while the volume is open.
1571 1571 */
1572 1572 uint64_t
1573 1573 zvol_get_volume_size(void *minor_hdl)
1574 1574 {
1575 1575 zvol_state_t *zv = minor_hdl;
1576 1576
1577 1577 return (zv->zv_volsize);
1578 1578 }
1579 1579
1580 1580 /*
1581 1581 * Return the current WCE setting to an external caller.
1582 1582 * The WCE setting can change while the volume is open.
1583 1583 */
1584 1584 int
1585 1585 zvol_get_volume_wce(void *minor_hdl)
1586 1586 {
1587 1587 zvol_state_t *zv = minor_hdl;
1588 1588
1589 1589 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1590 1590 }
1591 1591
1592 1592 /*
1593 1593 * Entry point for external callers to zvol_log_write
1594 1594 */
1595 1595 void
1596 1596 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1597 1597 boolean_t sync)
1598 1598 {
1599 1599 zvol_state_t *zv = minor_hdl;
1600 1600
1601 1601 zvol_log_write(zv, tx, off, resid, sync);
1602 1602 }
1603 1603 /*
1604 1604 * END entry points to allow external callers access to the volume.
1605 1605 */
1606 1606
1607 1607 /*
1608 1608 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1609 1609 */
1610 1610 static void
1611 1611 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1612 1612 boolean_t sync)
1613 1613 {
1614 1614 itx_t *itx;
1615 1615 lr_truncate_t *lr;
1616 1616 zilog_t *zilog = zv->zv_zilog;
1617 1617
1618 1618 if (zil_replaying(zilog, tx))
1619 1619 return;
1620 1620
1621 1621 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1622 1622 lr = (lr_truncate_t *)&itx->itx_lr;
1623 1623 lr->lr_foid = ZVOL_OBJ;
1624 1624 lr->lr_offset = off;
1625 1625 lr->lr_length = len;
1626 1626
1627 1627 itx->itx_sync = sync;
1628 1628 zil_itx_assign(zilog, itx, tx);
1629 1629 }
1630 1630
1631 1631 /*
1632 1632 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1633 1633 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1634 1634 */
1635 1635 /*ARGSUSED*/
1636 1636 int
1637 1637 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1638 1638 {
1639 1639 zvol_state_t *zv;
1640 1640 struct dk_callback *dkc;
1641 1641 int error = 0;
1642 1642 rl_t *rl;
1643 1643
1644 1644 mutex_enter(&zfsdev_state_lock);
1645 1645
1646 1646 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1647 1647
1648 1648 if (zv == NULL) {
1649 1649 mutex_exit(&zfsdev_state_lock);
1650 1650 return (SET_ERROR(ENXIO));
1651 1651 }
1652 1652 ASSERT(zv->zv_total_opens > 0);
1653 1653
1654 1654 switch (cmd) {
1655 1655
1656 1656 case DKIOCINFO:
1657 1657 {
1658 1658 struct dk_cinfo dki;
1659 1659
1660 1660 bzero(&dki, sizeof (dki));
1661 1661 (void) strcpy(dki.dki_cname, "zvol");
1662 1662 (void) strcpy(dki.dki_dname, "zvol");
1663 1663 dki.dki_ctype = DKC_UNKNOWN;
1664 1664 dki.dki_unit = getminor(dev);
1665 1665 dki.dki_maxtransfer =
1666 1666 1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1667 1667 mutex_exit(&zfsdev_state_lock);
1668 1668 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1669 1669 error = SET_ERROR(EFAULT);
1670 1670 return (error);
1671 1671 }
1672 1672
1673 1673 case DKIOCGMEDIAINFO:
1674 1674 {
1675 1675 struct dk_minfo dkm;
1676 1676
1677 1677 bzero(&dkm, sizeof (dkm));
1678 1678 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1679 1679 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1680 1680 dkm.dki_media_type = DK_UNKNOWN;
1681 1681 mutex_exit(&zfsdev_state_lock);
1682 1682 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1683 1683 error = SET_ERROR(EFAULT);
1684 1684 return (error);
1685 1685 }
1686 1686
1687 1687 case DKIOCGMEDIAINFOEXT:
1688 1688 {
1689 1689 struct dk_minfo_ext dkmext;
1690 1690
1691 1691 bzero(&dkmext, sizeof (dkmext));
1692 1692 dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1693 1693 dkmext.dki_pbsize = zv->zv_volblocksize;
1694 1694 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1695 1695 dkmext.dki_media_type = DK_UNKNOWN;
1696 1696 mutex_exit(&zfsdev_state_lock);
1697 1697 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1698 1698 error = SET_ERROR(EFAULT);
1699 1699 return (error);
1700 1700 }
1701 1701
1702 1702 case DKIOCGETEFI:
1703 1703 {
1704 1704 uint64_t vs = zv->zv_volsize;
1705 1705 uint8_t bs = zv->zv_min_bs;
1706 1706
1707 1707 mutex_exit(&zfsdev_state_lock);
1708 1708 error = zvol_getefi((void *)arg, flag, vs, bs);
1709 1709 return (error);
1710 1710 }
1711 1711
1712 1712 case DKIOCFLUSHWRITECACHE:
1713 1713 dkc = (struct dk_callback *)arg;
1714 1714 mutex_exit(&zfsdev_state_lock);
1715 1715 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1716 1716 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1717 1717 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1718 1718 error = 0;
1719 1719 }
1720 1720 return (error);
1721 1721
1722 1722 case DKIOCGETWCE:
1723 1723 {
1724 1724 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1725 1725 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1726 1726 flag))
1727 1727 error = SET_ERROR(EFAULT);
1728 1728 break;
1729 1729 }
1730 1730 case DKIOCSETWCE:
1731 1731 {
1732 1732 int wce;
1733 1733 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1734 1734 flag)) {
1735 1735 error = SET_ERROR(EFAULT);
1736 1736 break;
1737 1737 }
1738 1738 if (wce) {
1739 1739 zv->zv_flags |= ZVOL_WCE;
1740 1740 mutex_exit(&zfsdev_state_lock);
1741 1741 } else {
1742 1742 zv->zv_flags &= ~ZVOL_WCE;
1743 1743 mutex_exit(&zfsdev_state_lock);
1744 1744 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1745 1745 }
1746 1746 return (0);
1747 1747 }
1748 1748
1749 1749 case DKIOCGGEOM:
1750 1750 case DKIOCGVTOC:
1751 1751 /*
1752 1752 * commands using these (like prtvtoc) expect ENOTSUP
1753 1753 * since we're emulating an EFI label
1754 1754 */
1755 1755 error = SET_ERROR(ENOTSUP);
1756 1756 break;
1757 1757
1758 1758 case DKIOCDUMPINIT:
1759 1759 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1760 1760 RL_WRITER);
1761 1761 error = zvol_dumpify(zv);
1762 1762 zfs_range_unlock(rl);
1763 1763 break;
1764 1764
1765 1765 case DKIOCDUMPFINI:
1766 1766 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1767 1767 break;
1768 1768 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1769 1769 RL_WRITER);
1770 1770 error = zvol_dump_fini(zv);
1771 1771 zfs_range_unlock(rl);
1772 1772 break;
1773 1773
1774 1774 case DKIOCFREE:
1775 1775 {
1776 1776 dkioc_free_t df;
1777 1777 dmu_tx_t *tx;
1778 1778
1779 1779 if (!zvol_unmap_enabled)
1780 1780 break;
1781 1781
1782 1782 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1783 1783 error = SET_ERROR(EFAULT);
1784 1784 break;
1785 1785 }
1786 1786
1787 1787 /*
1788 1788 * Apply Postel's Law to length-checking. If they overshoot,
1789 1789 * just blank out until the end, if there's a need to blank
1790 1790 * out anything.
1791 1791 */
1792 1792 if (df.df_start >= zv->zv_volsize)
1793 1793 break; /* No need to do anything... */
1794 1794
1795 1795 mutex_exit(&zfsdev_state_lock);
1796 1796
1797 1797 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1798 1798 RL_WRITER);
1799 1799 tx = dmu_tx_create(zv->zv_objset);
1800 1800 dmu_tx_mark_netfree(tx);
1801 1801 error = dmu_tx_assign(tx, TXG_WAIT);
1802 1802 if (error != 0) {
1803 1803 dmu_tx_abort(tx);
1804 1804 } else {
1805 1805 zvol_log_truncate(zv, tx, df.df_start,
1806 1806 df.df_length, B_TRUE);
1807 1807 dmu_tx_commit(tx);
1808 1808 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1809 1809 df.df_start, df.df_length);
1810 1810 }
1811 1811
1812 1812 zfs_range_unlock(rl);
1813 1813
1814 1814 if (error == 0) {
1815 1815 /*
1816 1816 * If the write-cache is disabled or 'sync' property
1817 1817 * is set to 'always' then treat this as a synchronous
1818 1818 * operation (i.e. commit to zil).
1819 1819 */
1820 1820 if (!(zv->zv_flags & ZVOL_WCE) ||
1821 1821 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1822 1822 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1823 1823
1824 1824 /*
1825 1825 * If the caller really wants synchronous writes, and
1826 1826 * can't wait for them, don't return until the write
1827 1827 * is done.
1828 1828 */
1829 1829 if (df.df_flags & DF_WAIT_SYNC) {
1830 1830 txg_wait_synced(
1831 1831 dmu_objset_pool(zv->zv_objset), 0);
1832 1832 }
1833 1833 }
1834 1834 return (error);
1835 1835 }
1836 1836
1837 1837 default:
1838 1838 error = SET_ERROR(ENOTTY);
1839 1839 break;
1840 1840
1841 1841 }
1842 1842 mutex_exit(&zfsdev_state_lock);
1843 1843 return (error);
1844 1844 }
1845 1845
1846 1846 int
1847 1847 zvol_busy(void)
1848 1848 {
1849 1849 return (zvol_minors != 0);
1850 1850 }
1851 1851
1852 1852 void
1853 1853 zvol_init(void)
1854 1854 {
1855 1855 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1856 1856 1) == 0);
1857 1857 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1858 1858 }
1859 1859
1860 1860 void
1861 1861 zvol_fini(void)
1862 1862 {
1863 1863 mutex_destroy(&zfsdev_state_lock);
1864 1864 ddi_soft_state_fini(&zfsdev_state);
1865 1865 }
1866 1866
1867 1867 /*ARGSUSED*/
1868 1868 static int
1869 1869 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1870 1870 {
1871 1871 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1872 1872
1873 1873 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1874 1874 return (1);
1875 1875 return (0);
1876 1876 }
1877 1877
1878 1878 /*ARGSUSED*/
1879 1879 static void
1880 1880 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1881 1881 {
1882 1882 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1883 1883
1884 1884 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1885 1885 }
1886 1886
1887 1887 static int
1888 1888 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1889 1889 {
1890 1890 dmu_tx_t *tx;
1891 1891 int error;
1892 1892 objset_t *os = zv->zv_objset;
1893 1893 spa_t *spa = dmu_objset_spa(os);
1894 1894 vdev_t *vd = spa->spa_root_vdev;
1895 1895 nvlist_t *nv = NULL;
1896 1896 uint64_t version = spa_version(spa);
1897 1897 uint64_t checksum, compress, refresrv, vbs, dedup;
1898 1898
1899 1899 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1900 1900 ASSERT(vd->vdev_ops == &vdev_root_ops);
1901 1901
1902 1902 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1903 1903 DMU_OBJECT_END);
1904 1904 if (error != 0)
1905 1905 return (error);
1906 1906 /* wait for dmu_free_long_range to actually free the blocks */
1907 1907 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1908 1908
1909 1909 /*
1910 1910 * If the pool on which the dump device is being initialized has more
1911 1911 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1912 1912 * enabled. If so, bump that feature's counter to indicate that the
1913 1913 * feature is active. We also check the vdev type to handle the
1914 1914 * following case:
1915 1915 * # zpool create test raidz disk1 disk2 disk3
1916 1916 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1917 1917 * the raidz vdev itself has 3 children.
1918 1918 */
1919 1919 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1920 1920 if (!spa_feature_is_enabled(spa,
1921 1921 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1922 1922 return (SET_ERROR(ENOTSUP));
1923 1923 (void) dsl_sync_task(spa_name(spa),
1924 1924 zfs_mvdev_dump_feature_check,
1925 1925 zfs_mvdev_dump_activate_feature_sync, NULL,
1926 1926 2, ZFS_SPACE_CHECK_RESERVED);
1927 1927 }
1928 1928
1929 1929 if (!resize) {
1930 1930 error = dsl_prop_get_integer(zv->zv_name,
1931 1931 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1932 1932 if (error == 0) {
1933 1933 error = dsl_prop_get_integer(zv->zv_name,
1934 1934 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1935 1935 NULL);
1936 1936 }
1937 1937 if (error == 0) {
1938 1938 error = dsl_prop_get_integer(zv->zv_name,
1939 1939 zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1940 1940 &refresrv, NULL);
1941 1941 }
1942 1942 if (error == 0) {
1943 1943 error = dsl_prop_get_integer(zv->zv_name,
1944 1944 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1945 1945 NULL);
1946 1946 }
1947 1947 if (version >= SPA_VERSION_DEDUP && error == 0) {
1948 1948 error = dsl_prop_get_integer(zv->zv_name,
1949 1949 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1950 1950 }
1951 1951 }
1952 1952 if (error != 0)
1953 1953 return (error);
1954 1954
1955 1955 tx = dmu_tx_create(os);
1956 1956 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1957 1957 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1958 1958 error = dmu_tx_assign(tx, TXG_WAIT);
1959 1959 if (error != 0) {
1960 1960 dmu_tx_abort(tx);
1961 1961 return (error);
1962 1962 }
1963 1963
1964 1964 /*
1965 1965 * If we are resizing the dump device then we only need to
1966 1966 * update the refreservation to match the newly updated
1967 1967 * zvolsize. Otherwise, we save off the original state of the
1968 1968 * zvol so that we can restore them if the zvol is ever undumpified.
1969 1969 */
1970 1970 if (resize) {
1971 1971 error = zap_update(os, ZVOL_ZAP_OBJ,
1972 1972 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1973 1973 &zv->zv_volsize, tx);
1974 1974 } else {
1975 1975 error = zap_update(os, ZVOL_ZAP_OBJ,
1976 1976 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1977 1977 &compress, tx);
1978 1978 if (error == 0) {
1979 1979 error = zap_update(os, ZVOL_ZAP_OBJ,
1980 1980 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
1981 1981 &checksum, tx);
1982 1982 }
1983 1983 if (error == 0) {
1984 1984 error = zap_update(os, ZVOL_ZAP_OBJ,
1985 1985 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1986 1986 &refresrv, tx);
1987 1987 }
1988 1988 if (error == 0) {
1989 1989 error = zap_update(os, ZVOL_ZAP_OBJ,
1990 1990 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1991 1991 &vbs, tx);
1992 1992 }
1993 1993 if (error == 0) {
1994 1994 error = dmu_object_set_blocksize(
1995 1995 os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
1996 1996 }
1997 1997 if (version >= SPA_VERSION_DEDUP && error == 0) {
1998 1998 error = zap_update(os, ZVOL_ZAP_OBJ,
1999 1999 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2000 2000 &dedup, tx);
2001 2001 }
2002 2002 if (error == 0)
2003 2003 zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2004 2004 }
2005 2005 dmu_tx_commit(tx);
2006 2006
2007 2007 /*
2008 2008 * We only need update the zvol's property if we are initializing
2009 2009 * the dump area for the first time.
2010 2010 */
2011 2011 if (error == 0 && !resize) {
2012 2012 /*
2013 2013 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2014 2014 * function. Otherwise, use the old default -- OFF.
2015 2015 */
2016 2016 checksum = spa_feature_is_active(spa,
2017 2017 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2018 2018 ZIO_CHECKSUM_OFF;
2019 2019
2020 2020 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2021 2021 VERIFY(nvlist_add_uint64(nv,
2022 2022 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2023 2023 VERIFY(nvlist_add_uint64(nv,
2024 2024 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2025 2025 ZIO_COMPRESS_OFF) == 0);
2026 2026 VERIFY(nvlist_add_uint64(nv,
2027 2027 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2028 2028 checksum) == 0);
2029 2029 if (version >= SPA_VERSION_DEDUP) {
2030 2030 VERIFY(nvlist_add_uint64(nv,
2031 2031 zfs_prop_to_name(ZFS_PROP_DEDUP),
2032 2032 ZIO_CHECKSUM_OFF) == 0);
2033 2033 }
2034 2034
2035 2035 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2036 2036 nv, NULL);
2037 2037 nvlist_free(nv);
2038 2038 }
2039 2039
2040 2040 /* Allocate the space for the dump */
2041 2041 if (error == 0)
2042 2042 error = zvol_prealloc(zv);
2043 2043 return (error);
2044 2044 }
2045 2045
2046 2046 static int
2047 2047 zvol_dumpify(zvol_state_t *zv)
2048 2048 {
2049 2049 int error = 0;
2050 2050 uint64_t dumpsize = 0;
2051 2051 dmu_tx_t *tx;
2052 2052 objset_t *os = zv->zv_objset;
2053 2053
2054 2054 if (zv->zv_flags & ZVOL_RDONLY)
2055 2055 return (SET_ERROR(EROFS));
2056 2056
2057 2057 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2058 2058 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2059 2059 boolean_t resize = (dumpsize > 0);
2060 2060
2061 2061 if ((error = zvol_dump_init(zv, resize)) != 0) {
2062 2062 (void) zvol_dump_fini(zv);
2063 2063 return (error);
2064 2064 }
2065 2065 }
2066 2066
2067 2067 /*
2068 2068 * Build up our lba mapping.
2069 2069 */
2070 2070 error = zvol_get_lbas(zv);
2071 2071 if (error) {
2072 2072 (void) zvol_dump_fini(zv);
2073 2073 return (error);
2074 2074 }
2075 2075
2076 2076 tx = dmu_tx_create(os);
2077 2077 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2078 2078 error = dmu_tx_assign(tx, TXG_WAIT);
2079 2079 if (error) {
2080 2080 dmu_tx_abort(tx);
2081 2081 (void) zvol_dump_fini(zv);
2082 2082 return (error);
2083 2083 }
2084 2084
2085 2085 zv->zv_flags |= ZVOL_DUMPIFIED;
2086 2086 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2087 2087 &zv->zv_volsize, tx);
2088 2088 dmu_tx_commit(tx);
2089 2089
2090 2090 if (error) {
2091 2091 (void) zvol_dump_fini(zv);
2092 2092 return (error);
2093 2093 }
2094 2094
2095 2095 txg_wait_synced(dmu_objset_pool(os), 0);
2096 2096 return (0);
2097 2097 }
2098 2098
2099 2099 static int
2100 2100 zvol_dump_fini(zvol_state_t *zv)
2101 2101 {
2102 2102 dmu_tx_t *tx;
2103 2103 objset_t *os = zv->zv_objset;
2104 2104 nvlist_t *nv;
2105 2105 int error = 0;
2106 2106 uint64_t checksum, compress, refresrv, vbs, dedup;
2107 2107 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2108 2108
2109 2109 /*
2110 2110 * Attempt to restore the zvol back to its pre-dumpified state.
2111 2111 * This is a best-effort attempt as it's possible that not all
2112 2112 * of these properties were initialized during the dumpify process
2113 2113 * (i.e. error during zvol_dump_init).
2114 2114 */
2115 2115
2116 2116 tx = dmu_tx_create(os);
2117 2117 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2118 2118 error = dmu_tx_assign(tx, TXG_WAIT);
2119 2119 if (error) {
2120 2120 dmu_tx_abort(tx);
2121 2121 return (error);
2122 2122 }
2123 2123 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2124 2124 dmu_tx_commit(tx);
2125 2125
2126 2126 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2127 2127 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2128 2128 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2129 2129 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2130 2130 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2131 2131 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2132 2132 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2133 2133 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2134 2134
2135 2135 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2136 2136 (void) nvlist_add_uint64(nv,
2137 2137 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2138 2138 (void) nvlist_add_uint64(nv,
2139 2139 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2140 2140 (void) nvlist_add_uint64(nv,
2141 2141 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2142 2142 if (version >= SPA_VERSION_DEDUP &&
2143 2143 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2144 2144 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2145 2145 (void) nvlist_add_uint64(nv,
2146 2146 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2147 2147 }
2148 2148 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2149 2149 nv, NULL);
2150 2150 nvlist_free(nv);
2151 2151
2152 2152 zvol_free_extents(zv);
2153 2153 zv->zv_flags &= ~ZVOL_DUMPIFIED;
2154 2154 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2155 2155 /* wait for dmu_free_long_range to actually free the blocks */
2156 2156 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2157 2157 tx = dmu_tx_create(os);
2158 2158 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2159 2159 error = dmu_tx_assign(tx, TXG_WAIT);
2160 2160 if (error) {
2161 2161 dmu_tx_abort(tx);
2162 2162 return (error);
2163 2163 }
2164 2164 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2165 2165 zv->zv_volblocksize = vbs;
2166 2166 dmu_tx_commit(tx);
2167 2167
2168 2168 return (0);
2169 2169 }
↓ open down ↓ |
677 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX