Print this page
OS-7125 Need mitigation of L1TF (CVE-2018-3646)
Reviewed by: Robert Mustacchi <rm@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/zvol.c
+++ new/usr/src/uts/common/fs/zfs/zvol.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 *
24 24 * Portions Copyright 2010 Robert Milkowski
25 25 *
26 26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved.
27 27 * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
28 - * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 28 * Copyright (c) 2014 Integros [integros.com]
30 29 * Copyright (c) 2019, Joyent, Inc.
31 30 */
32 31
33 32 /*
34 33 * ZFS volume emulation driver.
35 34 *
36 35 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
37 36 * Volumes are accessed through the symbolic links named:
38 37 *
39 38 * /dev/zvol/dsk/<pool_name>/<dataset_name>
40 39 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
41 40 *
42 41 * These links are created by the /dev filesystem (sdev_zvolops.c).
43 42 * Volumes are persistent through reboot. No user command needs to be
44 43 * run before opening and using a device.
45 44 */
46 45
47 46 #include <sys/types.h>
48 47 #include <sys/param.h>
49 48 #include <sys/errno.h>
50 49 #include <sys/uio.h>
51 50 #include <sys/buf.h>
52 51 #include <sys/modctl.h>
53 52 #include <sys/open.h>
54 53 #include <sys/kmem.h>
55 54 #include <sys/conf.h>
56 55 #include <sys/cmn_err.h>
57 56 #include <sys/stat.h>
58 57 #include <sys/zap.h>
59 58 #include <sys/spa.h>
60 59 #include <sys/spa_impl.h>
61 60 #include <sys/zio.h>
62 61 #include <sys/dmu_traverse.h>
63 62 #include <sys/dnode.h>
64 63 #include <sys/dsl_dataset.h>
65 64 #include <sys/dsl_prop.h>
66 65 #include <sys/dkio.h>
67 66 #include <sys/efi_partition.h>
68 67 #include <sys/byteorder.h>
69 68 #include <sys/pathname.h>
70 69 #include <sys/ddi.h>
71 70 #include <sys/sunddi.h>
72 71 #include <sys/crc32.h>
73 72 #include <sys/dirent.h>
74 73 #include <sys/policy.h>
75 74 #include <sys/fs/zfs.h>
76 75 #include <sys/zfs_ioctl.h>
77 76 #include <sys/mkdev.h>
78 77 #include <sys/zil.h>
79 78 #include <sys/refcount.h>
80 79 #include <sys/zfs_znode.h>
81 80 #include <sys/zfs_rlock.h>
82 81 #include <sys/vdev_disk.h>
↓ open down ↓ |
44 lines elided |
↑ open up ↑ |
83 82 #include <sys/vdev_impl.h>
84 83 #include <sys/vdev_raidz.h>
85 84 #include <sys/zvol.h>
86 85 #include <sys/dumphdr.h>
87 86 #include <sys/zil_impl.h>
88 87 #include <sys/dbuf.h>
89 88 #include <sys/dmu_tx.h>
90 89 #include <sys/zfeature.h>
91 90 #include <sys/zio_checksum.h>
92 91 #include <sys/zil_impl.h>
92 +#include <sys/ht.h>
93 93 #include <sys/dkioc_free_util.h>
94 94 #include <sys/zfs_rlock.h>
95 95
96 96 #include "zfs_namecheck.h"
97 97
98 98 void *zfsdev_state;
99 99 static char *zvol_tag = "zvol_tag";
100 100
101 101 #define ZVOL_DUMPSIZE "dumpsize"
102 102
103 103 /*
104 104 * This lock protects the zfsdev_state structure from being modified
105 105 * while it's being used, e.g. an open that comes in before a create
106 106 * finishes. It also protects temporary opens of the dataset so that,
107 107 * e.g., an open doesn't get a spurious EBUSY.
108 108 */
109 109 kmutex_t zfsdev_state_lock;
110 110 static uint32_t zvol_minors;
111 111
112 112 typedef struct zvol_extent {
113 113 list_node_t ze_node;
114 114 dva_t ze_dva; /* dva associated with this extent */
115 115 uint64_t ze_nblks; /* number of blocks in extent */
116 116 } zvol_extent_t;
117 117
118 118 /*
119 119 * The in-core state of each volume.
120 120 */
121 121 typedef struct zvol_state {
122 122 char zv_name[MAXPATHLEN]; /* pool/dd name */
123 123 uint64_t zv_volsize; /* amount of space we advertise */
124 124 uint64_t zv_volblocksize; /* volume block size */
125 125 minor_t zv_minor; /* minor number */
126 126 uint8_t zv_min_bs; /* minimum addressable block shift */
127 127 uint8_t zv_flags; /* readonly, dumpified, etc. */
128 128 objset_t *zv_objset; /* objset handle */
129 129 uint32_t zv_open_count[OTYPCNT]; /* open counts */
130 130 uint32_t zv_total_opens; /* total open count */
131 131 zilog_t *zv_zilog; /* ZIL handle */
132 132 list_t zv_extents; /* List of extents for dump */
133 133 rangelock_t zv_rangelock;
134 134 dnode_t *zv_dn; /* dnode hold */
135 135 } zvol_state_t;
136 136
137 137 /*
138 138 * zvol specific flags
139 139 */
140 140 #define ZVOL_RDONLY 0x1
141 141 #define ZVOL_DUMPIFIED 0x2
142 142 #define ZVOL_EXCL 0x4
143 143 #define ZVOL_WCE 0x8
144 144
145 145 /*
146 146 * zvol maximum transfer in one DMU tx.
147 147 */
148 148 int zvol_maxphys = DMU_MAX_ACCESS/2;
149 149
150 150 /*
151 151 * Toggle unmap functionality.
152 152 */
153 153 boolean_t zvol_unmap_enabled = B_TRUE;
154 154
155 155 /*
156 156 * If true, unmaps requested as synchronous are executed synchronously,
157 157 * otherwise all unmaps are asynchronous.
158 158 */
159 159 boolean_t zvol_unmap_sync_enabled = B_FALSE;
160 160
161 161 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
162 162 nvlist_t *, nvlist_t *);
163 163 static int zvol_remove_zv(zvol_state_t *);
164 164 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf,
165 165 struct lwb *lwb, zio_t *zio);
166 166 static int zvol_dumpify(zvol_state_t *zv);
167 167 static int zvol_dump_fini(zvol_state_t *zv);
168 168 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
169 169
170 170 static void
171 171 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
172 172 {
173 173 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
174 174
175 175 zv->zv_volsize = volsize;
176 176 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
177 177 "Size", volsize) == DDI_SUCCESS);
178 178 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
179 179 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
180 180
181 181 /* Notify specfs to invalidate the cached size */
182 182 spec_size_invalidate(dev, VBLK);
183 183 spec_size_invalidate(dev, VCHR);
184 184 }
185 185
186 186 int
187 187 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
188 188 {
189 189 if (volsize == 0)
190 190 return (SET_ERROR(EINVAL));
191 191
192 192 if (volsize % blocksize != 0)
193 193 return (SET_ERROR(EINVAL));
194 194
195 195 #ifdef _ILP32
196 196 if (volsize - 1 > SPEC_MAXOFFSET_T)
197 197 return (SET_ERROR(EOVERFLOW));
198 198 #endif
199 199 return (0);
200 200 }
201 201
202 202 int
203 203 zvol_check_volblocksize(uint64_t volblocksize)
204 204 {
205 205 if (volblocksize < SPA_MINBLOCKSIZE ||
206 206 volblocksize > SPA_OLD_MAXBLOCKSIZE ||
207 207 !ISP2(volblocksize))
208 208 return (SET_ERROR(EDOM));
209 209
210 210 return (0);
211 211 }
212 212
213 213 int
214 214 zvol_get_stats(objset_t *os, nvlist_t *nv)
215 215 {
216 216 int error;
217 217 dmu_object_info_t doi;
218 218 uint64_t val;
219 219
220 220 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
221 221 if (error)
222 222 return (error);
223 223
224 224 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
225 225
226 226 error = dmu_object_info(os, ZVOL_OBJ, &doi);
227 227
228 228 if (error == 0) {
229 229 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
230 230 doi.doi_data_block_size);
231 231 }
232 232
233 233 return (error);
234 234 }
235 235
236 236 static zvol_state_t *
237 237 zvol_minor_lookup(const char *name)
238 238 {
239 239 minor_t minor;
240 240 zvol_state_t *zv;
241 241
242 242 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
243 243
244 244 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
245 245 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
246 246 if (zv == NULL)
247 247 continue;
248 248 if (strcmp(zv->zv_name, name) == 0)
249 249 return (zv);
250 250 }
251 251
252 252 return (NULL);
253 253 }
254 254
255 255 /* extent mapping arg */
256 256 struct maparg {
257 257 zvol_state_t *ma_zv;
258 258 uint64_t ma_blks;
259 259 };
260 260
261 261 /*ARGSUSED*/
262 262 static int
263 263 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
264 264 const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
265 265 {
266 266 struct maparg *ma = arg;
267 267 zvol_extent_t *ze;
268 268 int bs = ma->ma_zv->zv_volblocksize;
269 269
270 270 if (bp == NULL || BP_IS_HOLE(bp) ||
271 271 zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
272 272 return (0);
273 273
274 274 VERIFY(!BP_IS_EMBEDDED(bp));
275 275
276 276 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
277 277 ma->ma_blks++;
278 278
279 279 /* Abort immediately if we have encountered gang blocks */
280 280 if (BP_IS_GANG(bp))
281 281 return (SET_ERROR(EFRAGS));
282 282
283 283 /*
284 284 * See if the block is at the end of the previous extent.
285 285 */
286 286 ze = list_tail(&ma->ma_zv->zv_extents);
287 287 if (ze &&
288 288 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
289 289 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
290 290 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
291 291 ze->ze_nblks++;
292 292 return (0);
293 293 }
294 294
295 295 dprintf_bp(bp, "%s", "next blkptr:");
296 296
297 297 /* start a new extent */
298 298 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
299 299 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
300 300 ze->ze_nblks = 1;
301 301 list_insert_tail(&ma->ma_zv->zv_extents, ze);
302 302 return (0);
303 303 }
304 304
305 305 static void
306 306 zvol_free_extents(zvol_state_t *zv)
307 307 {
308 308 zvol_extent_t *ze;
309 309
310 310 while (ze = list_head(&zv->zv_extents)) {
311 311 list_remove(&zv->zv_extents, ze);
312 312 kmem_free(ze, sizeof (zvol_extent_t));
313 313 }
314 314 }
315 315
316 316 static int
317 317 zvol_get_lbas(zvol_state_t *zv)
318 318 {
319 319 objset_t *os = zv->zv_objset;
320 320 struct maparg ma;
321 321 int err;
322 322
323 323 ma.ma_zv = zv;
324 324 ma.ma_blks = 0;
325 325 zvol_free_extents(zv);
326 326
327 327 /* commit any in-flight changes before traversing the dataset */
328 328 txg_wait_synced(dmu_objset_pool(os), 0);
329 329 err = traverse_dataset(dmu_objset_ds(os), 0,
330 330 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
331 331 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
332 332 zvol_free_extents(zv);
333 333 return (err ? err : EIO);
334 334 }
335 335
336 336 return (0);
337 337 }
338 338
339 339 /* ARGSUSED */
340 340 void
341 341 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
342 342 {
343 343 zfs_creat_t *zct = arg;
344 344 nvlist_t *nvprops = zct->zct_props;
345 345 int error;
346 346 uint64_t volblocksize, volsize;
347 347
348 348 VERIFY(nvlist_lookup_uint64(nvprops,
349 349 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
350 350 if (nvlist_lookup_uint64(nvprops,
351 351 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
352 352 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
353 353
354 354 /*
355 355 * These properties must be removed from the list so the generic
356 356 * property setting step won't apply to them.
357 357 */
358 358 VERIFY(nvlist_remove_all(nvprops,
359 359 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
360 360 (void) nvlist_remove_all(nvprops,
361 361 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
362 362
363 363 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
364 364 DMU_OT_NONE, 0, tx);
365 365 ASSERT(error == 0);
366 366
367 367 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
368 368 DMU_OT_NONE, 0, tx);
369 369 ASSERT(error == 0);
370 370
371 371 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
372 372 ASSERT(error == 0);
373 373 }
374 374
375 375 /*
376 376 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
377 377 * implement DKIOCFREE/free-long-range.
378 378 */
379 379 static int
380 380 zvol_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
381 381 {
382 382 zvol_state_t *zv = arg1;
383 383 lr_truncate_t *lr = arg2;
384 384 uint64_t offset, length;
385 385
386 386 if (byteswap)
387 387 byteswap_uint64_array(lr, sizeof (*lr));
388 388
389 389 offset = lr->lr_offset;
390 390 length = lr->lr_length;
391 391
392 392 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
393 393 }
394 394
395 395 /*
396 396 * Replay a TX_WRITE ZIL transaction that didn't get committed
397 397 * after a system failure
398 398 */
399 399 static int
400 400 zvol_replay_write(void *arg1, void *arg2, boolean_t byteswap)
401 401 {
402 402 zvol_state_t *zv = arg1;
403 403 lr_write_t *lr = arg2;
404 404 objset_t *os = zv->zv_objset;
405 405 char *data = (char *)(lr + 1); /* data follows lr_write_t */
406 406 uint64_t offset, length;
407 407 dmu_tx_t *tx;
408 408 int error;
409 409
410 410 if (byteswap)
411 411 byteswap_uint64_array(lr, sizeof (*lr));
412 412
413 413 offset = lr->lr_offset;
414 414 length = lr->lr_length;
415 415
416 416 /* If it's a dmu_sync() block, write the whole block */
417 417 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
418 418 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
419 419 if (length < blocksize) {
420 420 offset -= offset % blocksize;
421 421 length = blocksize;
422 422 }
423 423 }
424 424
425 425 tx = dmu_tx_create(os);
426 426 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
427 427 error = dmu_tx_assign(tx, TXG_WAIT);
428 428 if (error) {
429 429 dmu_tx_abort(tx);
430 430 } else {
431 431 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
432 432 dmu_tx_commit(tx);
433 433 }
434 434
435 435 return (error);
436 436 }
437 437
438 438 /* ARGSUSED */
439 439 static int
440 440 zvol_replay_err(void *arg1, void *arg2, boolean_t byteswap)
441 441 {
442 442 return (SET_ERROR(ENOTSUP));
443 443 }
444 444
445 445 /*
446 446 * Callback vectors for replaying records.
447 447 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
448 448 */
449 449 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
450 450 zvol_replay_err, /* 0 no such transaction type */
451 451 zvol_replay_err, /* TX_CREATE */
452 452 zvol_replay_err, /* TX_MKDIR */
453 453 zvol_replay_err, /* TX_MKXATTR */
454 454 zvol_replay_err, /* TX_SYMLINK */
455 455 zvol_replay_err, /* TX_REMOVE */
456 456 zvol_replay_err, /* TX_RMDIR */
457 457 zvol_replay_err, /* TX_LINK */
458 458 zvol_replay_err, /* TX_RENAME */
459 459 zvol_replay_write, /* TX_WRITE */
460 460 zvol_replay_truncate, /* TX_TRUNCATE */
461 461 zvol_replay_err, /* TX_SETATTR */
462 462 zvol_replay_err, /* TX_ACL */
463 463 zvol_replay_err, /* TX_CREATE_ACL */
464 464 zvol_replay_err, /* TX_CREATE_ATTR */
465 465 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
466 466 zvol_replay_err, /* TX_MKDIR_ACL */
467 467 zvol_replay_err, /* TX_MKDIR_ATTR */
468 468 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
469 469 zvol_replay_err, /* TX_WRITE2 */
470 470 };
471 471
472 472 int
473 473 zvol_name2minor(const char *name, minor_t *minor)
474 474 {
475 475 zvol_state_t *zv;
476 476
477 477 mutex_enter(&zfsdev_state_lock);
478 478 zv = zvol_minor_lookup(name);
479 479 if (minor && zv)
480 480 *minor = zv->zv_minor;
481 481 mutex_exit(&zfsdev_state_lock);
482 482 return (zv ? 0 : -1);
483 483 }
484 484
485 485 /*
486 486 * Create a minor node (plus a whole lot more) for the specified volume.
487 487 */
488 488 int
489 489 zvol_create_minor(const char *name)
490 490 {
491 491 zfs_soft_state_t *zs;
492 492 zvol_state_t *zv;
493 493 objset_t *os;
494 494 dmu_object_info_t doi;
495 495 minor_t minor = 0;
496 496 char chrbuf[30], blkbuf[30];
497 497 int error;
498 498
499 499 mutex_enter(&zfsdev_state_lock);
500 500
501 501 if (zvol_minor_lookup(name) != NULL) {
502 502 mutex_exit(&zfsdev_state_lock);
503 503 return (SET_ERROR(EEXIST));
504 504 }
505 505
506 506 /* lie and say we're read-only */
507 507 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
508 508
509 509 if (error) {
510 510 mutex_exit(&zfsdev_state_lock);
511 511 return (error);
512 512 }
513 513
514 514 if ((minor = zfsdev_minor_alloc()) == 0) {
515 515 dmu_objset_disown(os, FTAG);
516 516 mutex_exit(&zfsdev_state_lock);
517 517 return (SET_ERROR(ENXIO));
518 518 }
519 519
520 520 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
521 521 dmu_objset_disown(os, FTAG);
522 522 mutex_exit(&zfsdev_state_lock);
523 523 return (SET_ERROR(EAGAIN));
524 524 }
525 525 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
526 526 (char *)name);
527 527
528 528 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
529 529
530 530 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
531 531 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
532 532 ddi_soft_state_free(zfsdev_state, minor);
533 533 dmu_objset_disown(os, FTAG);
534 534 mutex_exit(&zfsdev_state_lock);
535 535 return (SET_ERROR(EAGAIN));
536 536 }
537 537
538 538 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
539 539
540 540 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
541 541 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
542 542 ddi_remove_minor_node(zfs_dip, chrbuf);
543 543 ddi_soft_state_free(zfsdev_state, minor);
544 544 dmu_objset_disown(os, FTAG);
545 545 mutex_exit(&zfsdev_state_lock);
546 546 return (SET_ERROR(EAGAIN));
547 547 }
548 548
549 549 zs = ddi_get_soft_state(zfsdev_state, minor);
550 550 zs->zss_type = ZSST_ZVOL;
551 551 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
552 552 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
553 553 zv->zv_min_bs = DEV_BSHIFT;
554 554 zv->zv_minor = minor;
555 555 zv->zv_objset = os;
556 556 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
557 557 zv->zv_flags |= ZVOL_RDONLY;
558 558 rangelock_init(&zv->zv_rangelock, NULL, NULL);
559 559 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
560 560 offsetof(zvol_extent_t, ze_node));
561 561 /* get and cache the blocksize */
562 562 error = dmu_object_info(os, ZVOL_OBJ, &doi);
563 563 ASSERT(error == 0);
564 564 zv->zv_volblocksize = doi.doi_data_block_size;
565 565
566 566 if (spa_writeable(dmu_objset_spa(os))) {
567 567 if (zil_replay_disable)
568 568 zil_destroy(dmu_objset_zil(os), B_FALSE);
569 569 else
570 570 zil_replay(os, zv, zvol_replay_vector);
571 571 }
572 572 dmu_objset_disown(os, FTAG);
573 573 zv->zv_objset = NULL;
574 574
575 575 zvol_minors++;
576 576
577 577 mutex_exit(&zfsdev_state_lock);
578 578
579 579 return (0);
580 580 }
581 581
582 582 /*
583 583 * Remove minor node for the specified volume.
584 584 */
585 585 static int
586 586 zvol_remove_zv(zvol_state_t *zv)
587 587 {
588 588 char nmbuf[20];
589 589 minor_t minor = zv->zv_minor;
590 590
591 591 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
592 592 if (zv->zv_total_opens != 0)
593 593 return (SET_ERROR(EBUSY));
594 594
595 595 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
596 596 ddi_remove_minor_node(zfs_dip, nmbuf);
597 597
598 598 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
599 599 ddi_remove_minor_node(zfs_dip, nmbuf);
600 600
601 601 rangelock_fini(&zv->zv_rangelock);
602 602
603 603 kmem_free(zv, sizeof (zvol_state_t));
604 604
605 605 ddi_soft_state_free(zfsdev_state, minor);
606 606
607 607 zvol_minors--;
608 608 return (0);
609 609 }
610 610
611 611 int
612 612 zvol_remove_minor(const char *name)
613 613 {
614 614 zvol_state_t *zv;
615 615 int rc;
616 616
617 617 mutex_enter(&zfsdev_state_lock);
618 618 if ((zv = zvol_minor_lookup(name)) == NULL) {
619 619 mutex_exit(&zfsdev_state_lock);
620 620 return (SET_ERROR(ENXIO));
621 621 }
622 622 rc = zvol_remove_zv(zv);
623 623 mutex_exit(&zfsdev_state_lock);
624 624 return (rc);
625 625 }
626 626
627 627 int
628 628 zvol_first_open(zvol_state_t *zv)
629 629 {
630 630 objset_t *os;
631 631 uint64_t volsize;
632 632 int error;
633 633 uint64_t readonly;
634 634
635 635 /* lie and say we're read-only */
636 636 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
637 637 zvol_tag, &os);
638 638 if (error)
639 639 return (error);
640 640
641 641 zv->zv_objset = os;
642 642 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
643 643 if (error) {
644 644 ASSERT(error == 0);
645 645 dmu_objset_disown(os, zvol_tag);
646 646 return (error);
647 647 }
648 648
649 649 error = dnode_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dn);
650 650 if (error) {
651 651 dmu_objset_disown(os, zvol_tag);
652 652 return (error);
653 653 }
654 654
655 655 zvol_size_changed(zv, volsize);
656 656 zv->zv_zilog = zil_open(os, zvol_get_data);
657 657
658 658 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
659 659 NULL) == 0);
660 660 if (readonly || dmu_objset_is_snapshot(os) ||
661 661 !spa_writeable(dmu_objset_spa(os)))
662 662 zv->zv_flags |= ZVOL_RDONLY;
663 663 else
664 664 zv->zv_flags &= ~ZVOL_RDONLY;
665 665 return (error);
666 666 }
667 667
668 668 void
669 669 zvol_last_close(zvol_state_t *zv)
670 670 {
671 671 zil_close(zv->zv_zilog);
672 672 zv->zv_zilog = NULL;
673 673
674 674 dnode_rele(zv->zv_dn, zvol_tag);
675 675 zv->zv_dn = NULL;
676 676
677 677 /*
678 678 * Evict cached data
679 679 */
680 680 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
681 681 !(zv->zv_flags & ZVOL_RDONLY))
682 682 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
683 683 dmu_objset_evict_dbufs(zv->zv_objset);
684 684
685 685 dmu_objset_disown(zv->zv_objset, zvol_tag);
686 686 zv->zv_objset = NULL;
687 687 }
688 688
689 689 int
690 690 zvol_prealloc(zvol_state_t *zv)
691 691 {
692 692 objset_t *os = zv->zv_objset;
693 693 dmu_tx_t *tx;
694 694 uint64_t refd, avail, usedobjs, availobjs;
695 695 uint64_t resid = zv->zv_volsize;
696 696 uint64_t off = 0;
697 697
698 698 /* Check the space usage before attempting to allocate the space */
699 699 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
700 700 if (avail < zv->zv_volsize)
701 701 return (SET_ERROR(ENOSPC));
702 702
703 703 /* Free old extents if they exist */
704 704 zvol_free_extents(zv);
705 705
706 706 while (resid != 0) {
707 707 int error;
708 708 uint64_t bytes = MIN(resid, SPA_OLD_MAXBLOCKSIZE);
709 709
710 710 tx = dmu_tx_create(os);
711 711 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
712 712 error = dmu_tx_assign(tx, TXG_WAIT);
713 713 if (error) {
714 714 dmu_tx_abort(tx);
715 715 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
716 716 return (error);
717 717 }
718 718 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
719 719 dmu_tx_commit(tx);
720 720 off += bytes;
721 721 resid -= bytes;
722 722 }
723 723 txg_wait_synced(dmu_objset_pool(os), 0);
724 724
725 725 return (0);
726 726 }
727 727
728 728 static int
729 729 zvol_update_volsize(objset_t *os, uint64_t volsize)
730 730 {
731 731 dmu_tx_t *tx;
732 732 int error;
733 733
734 734 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
735 735
736 736 tx = dmu_tx_create(os);
737 737 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
738 738 dmu_tx_mark_netfree(tx);
739 739 error = dmu_tx_assign(tx, TXG_WAIT);
740 740 if (error) {
741 741 dmu_tx_abort(tx);
742 742 return (error);
743 743 }
744 744
745 745 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
746 746 &volsize, tx);
747 747 dmu_tx_commit(tx);
748 748
749 749 if (error == 0)
750 750 error = dmu_free_long_range(os,
751 751 ZVOL_OBJ, volsize, DMU_OBJECT_END);
752 752 return (error);
753 753 }
754 754
755 755 void
756 756 zvol_remove_minors(const char *name)
757 757 {
758 758 zvol_state_t *zv;
759 759 char *namebuf;
760 760 minor_t minor;
761 761
762 762 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
763 763 (void) strncpy(namebuf, name, strlen(name));
764 764 (void) strcat(namebuf, "/");
765 765 mutex_enter(&zfsdev_state_lock);
766 766 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
767 767
768 768 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
769 769 if (zv == NULL)
770 770 continue;
771 771 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
772 772 (void) zvol_remove_zv(zv);
773 773 }
774 774 kmem_free(namebuf, strlen(name) + 2);
775 775
776 776 mutex_exit(&zfsdev_state_lock);
777 777 }
778 778
779 779 static int
780 780 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
781 781 {
782 782 uint64_t old_volsize = 0ULL;
783 783 int error = 0;
784 784
785 785 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
786 786
787 787 /*
788 788 * Reinitialize the dump area to the new size. If we
789 789 * failed to resize the dump area then restore it back to
790 790 * its original size. We must set the new volsize prior
791 791 * to calling dumpvp_resize() to ensure that the devices'
792 792 * size(9P) is not visible by the dump subsystem.
793 793 */
794 794 old_volsize = zv->zv_volsize;
795 795 zvol_size_changed(zv, volsize);
796 796
797 797 if (zv->zv_flags & ZVOL_DUMPIFIED) {
798 798 if ((error = zvol_dumpify(zv)) != 0 ||
799 799 (error = dumpvp_resize()) != 0) {
800 800 int dumpify_error;
801 801
802 802 (void) zvol_update_volsize(zv->zv_objset, old_volsize);
803 803 zvol_size_changed(zv, old_volsize);
804 804 dumpify_error = zvol_dumpify(zv);
805 805 error = dumpify_error ? dumpify_error : error;
806 806 }
807 807 }
808 808
809 809 /*
810 810 * Generate a LUN expansion event.
811 811 */
812 812 if (error == 0) {
813 813 sysevent_id_t eid;
814 814 nvlist_t *attr;
815 815 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
816 816
817 817 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
818 818 zv->zv_minor);
819 819
820 820 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
821 821 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
822 822
823 823 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
824 824 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
825 825
826 826 nvlist_free(attr);
827 827 kmem_free(physpath, MAXPATHLEN);
828 828 }
829 829 return (error);
830 830 }
831 831
832 832 int
833 833 zvol_set_volsize(const char *name, uint64_t volsize)
834 834 {
835 835 zvol_state_t *zv = NULL;
836 836 objset_t *os;
837 837 int error;
838 838 dmu_object_info_t doi;
839 839 uint64_t readonly;
840 840 boolean_t owned = B_FALSE;
841 841
842 842 error = dsl_prop_get_integer(name,
843 843 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
844 844 if (error != 0)
845 845 return (error);
846 846 if (readonly)
847 847 return (SET_ERROR(EROFS));
848 848
849 849 mutex_enter(&zfsdev_state_lock);
850 850 zv = zvol_minor_lookup(name);
851 851
852 852 if (zv == NULL || zv->zv_objset == NULL) {
853 853 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
854 854 FTAG, &os)) != 0) {
855 855 mutex_exit(&zfsdev_state_lock);
856 856 return (error);
857 857 }
858 858 owned = B_TRUE;
859 859 if (zv != NULL)
860 860 zv->zv_objset = os;
861 861 } else {
862 862 os = zv->zv_objset;
863 863 }
864 864
865 865 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
866 866 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
867 867 goto out;
868 868
869 869 error = zvol_update_volsize(os, volsize);
870 870
871 871 if (error == 0 && zv != NULL)
872 872 error = zvol_update_live_volsize(zv, volsize);
873 873 out:
874 874 if (owned) {
875 875 dmu_objset_disown(os, FTAG);
876 876 if (zv != NULL)
877 877 zv->zv_objset = NULL;
878 878 }
879 879 mutex_exit(&zfsdev_state_lock);
880 880 return (error);
881 881 }
882 882
883 883 /*ARGSUSED*/
884 884 int
885 885 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
886 886 {
887 887 zvol_state_t *zv;
888 888 int err = 0;
889 889
890 890 mutex_enter(&zfsdev_state_lock);
891 891
892 892 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
893 893 if (zv == NULL) {
894 894 mutex_exit(&zfsdev_state_lock);
895 895 return (SET_ERROR(ENXIO));
896 896 }
897 897
898 898 if (zv->zv_total_opens == 0)
899 899 err = zvol_first_open(zv);
900 900 if (err) {
901 901 mutex_exit(&zfsdev_state_lock);
902 902 return (err);
903 903 }
904 904 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
905 905 err = SET_ERROR(EROFS);
906 906 goto out;
907 907 }
908 908 if (zv->zv_flags & ZVOL_EXCL) {
909 909 err = SET_ERROR(EBUSY);
910 910 goto out;
911 911 }
912 912 if (flag & FEXCL) {
913 913 if (zv->zv_total_opens != 0) {
914 914 err = SET_ERROR(EBUSY);
915 915 goto out;
916 916 }
917 917 zv->zv_flags |= ZVOL_EXCL;
918 918 }
919 919
920 920 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
921 921 zv->zv_open_count[otyp]++;
922 922 zv->zv_total_opens++;
923 923 }
924 924 mutex_exit(&zfsdev_state_lock);
925 925
926 926 return (err);
927 927 out:
928 928 if (zv->zv_total_opens == 0)
929 929 zvol_last_close(zv);
930 930 mutex_exit(&zfsdev_state_lock);
931 931 return (err);
932 932 }
933 933
934 934 /*ARGSUSED*/
935 935 int
936 936 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
937 937 {
938 938 minor_t minor = getminor(dev);
939 939 zvol_state_t *zv;
940 940 int error = 0;
941 941
942 942 mutex_enter(&zfsdev_state_lock);
943 943
944 944 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
945 945 if (zv == NULL) {
946 946 mutex_exit(&zfsdev_state_lock);
947 947 return (SET_ERROR(ENXIO));
948 948 }
949 949
950 950 if (zv->zv_flags & ZVOL_EXCL) {
951 951 ASSERT(zv->zv_total_opens == 1);
952 952 zv->zv_flags &= ~ZVOL_EXCL;
953 953 }
954 954
955 955 /*
956 956 * If the open count is zero, this is a spurious close.
957 957 * That indicates a bug in the kernel / DDI framework.
958 958 */
959 959 ASSERT(zv->zv_open_count[otyp] != 0);
960 960 ASSERT(zv->zv_total_opens != 0);
961 961
962 962 /*
963 963 * You may get multiple opens, but only one close.
964 964 */
965 965 zv->zv_open_count[otyp]--;
966 966 zv->zv_total_opens--;
967 967
968 968 if (zv->zv_total_opens == 0)
969 969 zvol_last_close(zv);
970 970
971 971 mutex_exit(&zfsdev_state_lock);
972 972 return (error);
973 973 }
974 974
975 975 /* ARGSUSED */
976 976 static void
977 977 zvol_get_done(zgd_t *zgd, int error)
978 978 {
979 979 if (zgd->zgd_db)
980 980 dmu_buf_rele(zgd->zgd_db, zgd);
981 981
982 982 rangelock_exit(zgd->zgd_lr);
983 983
984 984 kmem_free(zgd, sizeof (zgd_t));
985 985 }
986 986
987 987 /*
988 988 * Get data to generate a TX_WRITE intent log record.
989 989 */
990 990 static int
991 991 zvol_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
992 992 {
993 993 zvol_state_t *zv = arg;
994 994 uint64_t offset = lr->lr_offset;
995 995 uint64_t size = lr->lr_length; /* length of user data */
996 996 dmu_buf_t *db;
997 997 zgd_t *zgd;
998 998 int error;
999 999
1000 1000 ASSERT3P(lwb, !=, NULL);
1001 1001 ASSERT3P(zio, !=, NULL);
1002 1002 ASSERT3U(size, !=, 0);
1003 1003
1004 1004 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
1005 1005 zgd->zgd_lwb = lwb;
1006 1006
1007 1007 /*
1008 1008 * Write records come in two flavors: immediate and indirect.
1009 1009 * For small writes it's cheaper to store the data with the
1010 1010 * log record (immediate); for large writes it's cheaper to
1011 1011 * sync the data and get a pointer to it (indirect) so that
1012 1012 * we don't have to write the data twice.
1013 1013 */
1014 1014 if (buf != NULL) { /* immediate write */
1015 1015 zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
1016 1016 RL_READER);
1017 1017 error = dmu_read_by_dnode(zv->zv_dn, offset, size, buf,
1018 1018 DMU_READ_NO_PREFETCH);
1019 1019 } else { /* indirect write */
1020 1020 /*
1021 1021 * Have to lock the whole block to ensure when it's written out
1022 1022 * and its checksum is being calculated that no one can change
1023 1023 * the data. Contrarily to zfs_get_data we need not re-check
1024 1024 * blocksize after we get the lock because it cannot be changed.
1025 1025 */
1026 1026 size = zv->zv_volblocksize;
1027 1027 offset = P2ALIGN(offset, size);
1028 1028 zgd->zgd_lr = rangelock_enter(&zv->zv_rangelock, offset, size,
1029 1029 RL_READER);
1030 1030 error = dmu_buf_hold_by_dnode(zv->zv_dn, offset, zgd, &db,
1031 1031 DMU_READ_NO_PREFETCH);
1032 1032 if (error == 0) {
1033 1033 blkptr_t *bp = &lr->lr_blkptr;
1034 1034
1035 1035 zgd->zgd_db = db;
1036 1036 zgd->zgd_bp = bp;
1037 1037
1038 1038 ASSERT(db->db_offset == offset);
1039 1039 ASSERT(db->db_size == size);
1040 1040
1041 1041 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1042 1042 zvol_get_done, zgd);
1043 1043
1044 1044 if (error == 0)
1045 1045 return (0);
1046 1046 }
1047 1047 }
1048 1048
1049 1049 zvol_get_done(zgd, error);
1050 1050
1051 1051 return (error);
1052 1052 }
1053 1053
1054 1054 /*
1055 1055 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1056 1056 *
1057 1057 * We store data in the log buffers if it's small enough.
1058 1058 * Otherwise we will later flush the data out via dmu_sync().
1059 1059 */
1060 1060 ssize_t zvol_immediate_write_sz = 32768;
1061 1061
1062 1062 static void
1063 1063 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1064 1064 boolean_t sync)
1065 1065 {
1066 1066 uint32_t blocksize = zv->zv_volblocksize;
1067 1067 zilog_t *zilog = zv->zv_zilog;
1068 1068 itx_wr_state_t write_state;
1069 1069
1070 1070 if (zil_replaying(zilog, tx))
1071 1071 return;
1072 1072
1073 1073 if (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1074 1074 write_state = WR_INDIRECT;
1075 1075 else if (!spa_has_slogs(zilog->zl_spa) &&
1076 1076 resid >= blocksize && blocksize > zvol_immediate_write_sz)
1077 1077 write_state = WR_INDIRECT;
1078 1078 else if (sync)
1079 1079 write_state = WR_COPIED;
1080 1080 else
1081 1081 write_state = WR_NEED_COPY;
1082 1082
1083 1083 while (resid) {
1084 1084 itx_t *itx;
1085 1085 lr_write_t *lr;
1086 1086 itx_wr_state_t wr_state = write_state;
1087 1087 ssize_t len = resid;
1088 1088
1089 1089 if (wr_state == WR_COPIED && resid > ZIL_MAX_COPIED_DATA)
1090 1090 wr_state = WR_NEED_COPY;
1091 1091 else if (wr_state == WR_INDIRECT)
1092 1092 len = MIN(blocksize - P2PHASE(off, blocksize), resid);
1093 1093
1094 1094 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1095 1095 (wr_state == WR_COPIED ? len : 0));
1096 1096 lr = (lr_write_t *)&itx->itx_lr;
1097 1097 if (wr_state == WR_COPIED && dmu_read_by_dnode(zv->zv_dn,
1098 1098 off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1099 1099 zil_itx_destroy(itx);
1100 1100 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1101 1101 lr = (lr_write_t *)&itx->itx_lr;
1102 1102 wr_state = WR_NEED_COPY;
1103 1103 }
1104 1104
1105 1105 itx->itx_wr_state = wr_state;
1106 1106 lr->lr_foid = ZVOL_OBJ;
1107 1107 lr->lr_offset = off;
1108 1108 lr->lr_length = len;
1109 1109 lr->lr_blkoff = 0;
1110 1110 BP_ZERO(&lr->lr_blkptr);
1111 1111
1112 1112 itx->itx_private = zv;
1113 1113 itx->itx_sync = sync;
1114 1114
1115 1115 zil_itx_assign(zilog, itx, tx);
1116 1116
1117 1117 off += len;
1118 1118 resid -= len;
1119 1119 }
1120 1120 }
1121 1121
1122 1122 static int
1123 1123 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1124 1124 uint64_t size, boolean_t doread, boolean_t isdump)
1125 1125 {
1126 1126 vdev_disk_t *dvd;
1127 1127 int c;
1128 1128 int numerrors = 0;
1129 1129
1130 1130 if (vd->vdev_ops == &vdev_mirror_ops ||
1131 1131 vd->vdev_ops == &vdev_replacing_ops ||
1132 1132 vd->vdev_ops == &vdev_spare_ops) {
1133 1133 for (c = 0; c < vd->vdev_children; c++) {
1134 1134 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1135 1135 addr, offset, origoffset, size, doread, isdump);
1136 1136 if (err != 0) {
1137 1137 numerrors++;
1138 1138 } else if (doread) {
1139 1139 break;
1140 1140 }
1141 1141 }
1142 1142 }
1143 1143
1144 1144 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1145 1145 return (numerrors < vd->vdev_children ? 0 : EIO);
1146 1146
1147 1147 if (doread && !vdev_readable(vd))
1148 1148 return (SET_ERROR(EIO));
1149 1149 else if (!doread && !vdev_writeable(vd))
1150 1150 return (SET_ERROR(EIO));
1151 1151
1152 1152 if (vd->vdev_ops == &vdev_raidz_ops) {
1153 1153 return (vdev_raidz_physio(vd,
1154 1154 addr, size, offset, origoffset, doread, isdump));
1155 1155 }
1156 1156
1157 1157 offset += VDEV_LABEL_START_SIZE;
1158 1158
1159 1159 if (ddi_in_panic() || isdump) {
1160 1160 ASSERT(!doread);
1161 1161 if (doread)
1162 1162 return (SET_ERROR(EIO));
1163 1163 dvd = vd->vdev_tsd;
1164 1164 ASSERT3P(dvd, !=, NULL);
1165 1165 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1166 1166 lbtodb(size)));
1167 1167 } else {
1168 1168 dvd = vd->vdev_tsd;
1169 1169 ASSERT3P(dvd, !=, NULL);
1170 1170 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1171 1171 offset, doread ? B_READ : B_WRITE));
1172 1172 }
1173 1173 }
1174 1174
1175 1175 static int
1176 1176 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1177 1177 boolean_t doread, boolean_t isdump)
1178 1178 {
1179 1179 vdev_t *vd;
1180 1180 int error;
1181 1181 zvol_extent_t *ze;
1182 1182 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1183 1183
1184 1184 /* Must be sector aligned, and not stradle a block boundary. */
1185 1185 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1186 1186 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1187 1187 return (SET_ERROR(EINVAL));
1188 1188 }
1189 1189 ASSERT(size <= zv->zv_volblocksize);
1190 1190
1191 1191 /* Locate the extent this belongs to */
1192 1192 ze = list_head(&zv->zv_extents);
1193 1193 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1194 1194 offset -= ze->ze_nblks * zv->zv_volblocksize;
1195 1195 ze = list_next(&zv->zv_extents, ze);
1196 1196 }
1197 1197
1198 1198 if (ze == NULL)
1199 1199 return (SET_ERROR(EINVAL));
1200 1200
1201 1201 if (!ddi_in_panic())
1202 1202 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1203 1203
1204 1204 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1205 1205 offset += DVA_GET_OFFSET(&ze->ze_dva);
1206 1206 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1207 1207 size, doread, isdump);
1208 1208
1209 1209 if (!ddi_in_panic())
1210 1210 spa_config_exit(spa, SCL_STATE, FTAG);
1211 1211
1212 1212 return (error);
1213 1213 }
1214 1214
1215 1215 int
1216 1216 zvol_strategy(buf_t *bp)
1217 1217 {
1218 1218 zfs_soft_state_t *zs = NULL;
1219 1219 zvol_state_t *zv;
1220 1220 uint64_t off, volsize;
1221 1221 size_t resid;
1222 1222 char *addr;
1223 1223 objset_t *os;
1224 1224 int error = 0;
1225 1225 boolean_t doread = bp->b_flags & B_READ;
1226 1226 boolean_t is_dumpified;
1227 1227 boolean_t sync;
1228 1228
1229 1229 if (getminor(bp->b_edev) == 0) {
1230 1230 error = SET_ERROR(EINVAL);
1231 1231 } else {
1232 1232 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1233 1233 if (zs == NULL)
1234 1234 error = SET_ERROR(ENXIO);
1235 1235 else if (zs->zss_type != ZSST_ZVOL)
1236 1236 error = SET_ERROR(EINVAL);
1237 1237 }
1238 1238
1239 1239 if (error) {
1240 1240 bioerror(bp, error);
1241 1241 biodone(bp);
1242 1242 return (0);
1243 1243 }
1244 1244
1245 1245 zv = zs->zss_data;
1246 1246
1247 1247 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1248 1248 bioerror(bp, EROFS);
1249 1249 biodone(bp);
1250 1250 return (0);
1251 1251 }
1252 1252
1253 1253 off = ldbtob(bp->b_blkno);
1254 1254 volsize = zv->zv_volsize;
1255 1255
1256 1256 os = zv->zv_objset;
1257 1257 ASSERT(os != NULL);
1258 1258
1259 1259 bp_mapin(bp);
1260 1260 addr = bp->b_un.b_addr;
1261 1261 resid = bp->b_bcount;
1262 1262
1263 1263 if (resid > 0 && (off < 0 || off >= volsize)) {
1264 1264 bioerror(bp, EIO);
↓ open down ↓ |
1162 lines elided |
↑ open up ↑ |
1265 1265 biodone(bp);
1266 1266 return (0);
1267 1267 }
1268 1268
1269 1269 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1270 1270 sync = ((!(bp->b_flags & B_ASYNC) &&
1271 1271 !(zv->zv_flags & ZVOL_WCE)) ||
1272 1272 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1273 1273 !doread && !is_dumpified;
1274 1274
1275 + ht_begin_unsafe();
1276 +
1275 1277 /*
1276 1278 * There must be no buffer changes when doing a dmu_sync() because
1277 1279 * we can't change the data whilst calculating the checksum.
1278 1280 */
1279 1281 locked_range_t *lr = rangelock_enter(&zv->zv_rangelock, off, resid,
1280 1282 doread ? RL_READER : RL_WRITER);
1281 1283
1282 1284 while (resid != 0 && off < volsize) {
1283 1285 size_t size = MIN(resid, zvol_maxphys);
1284 1286 if (is_dumpified) {
1285 1287 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1286 1288 error = zvol_dumpio(zv, addr, off, size,
1287 1289 doread, B_FALSE);
1288 1290 } else if (doread) {
1289 1291 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1290 1292 DMU_READ_PREFETCH);
1291 1293 } else {
1292 1294 dmu_tx_t *tx = dmu_tx_create(os);
1293 1295 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1294 1296 error = dmu_tx_assign(tx, TXG_WAIT);
1295 1297 if (error) {
1296 1298 dmu_tx_abort(tx);
1297 1299 } else {
1298 1300 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1299 1301 zvol_log_write(zv, tx, off, size, sync);
1300 1302 dmu_tx_commit(tx);
1301 1303 }
1302 1304 }
1303 1305 if (error) {
1304 1306 /* convert checksum errors into IO errors */
1305 1307 if (error == ECKSUM)
1306 1308 error = SET_ERROR(EIO);
1307 1309 break;
1308 1310 }
1309 1311 off += size;
1310 1312 addr += size;
1311 1313 resid -= size;
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
1312 1314 }
1313 1315 rangelock_exit(lr);
1314 1316
1315 1317 if ((bp->b_resid = resid) == bp->b_bcount)
1316 1318 bioerror(bp, off > volsize ? EINVAL : error);
1317 1319
1318 1320 if (sync)
1319 1321 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1320 1322 biodone(bp);
1321 1323
1324 + ht_end_unsafe();
1325 +
1322 1326 return (0);
1323 1327 }
1324 1328
1325 1329 /*
1326 1330 * Set the buffer count to the zvol maximum transfer.
1327 1331 * Using our own routine instead of the default minphys()
1328 1332 * means that for larger writes we write bigger buffers on X86
1329 1333 * (128K instead of 56K) and flush the disk write cache less often
1330 1334 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1331 1335 * 56K on X86 and 128K on sparc).
1332 1336 */
1333 1337 void
1334 1338 zvol_minphys(struct buf *bp)
1335 1339 {
1336 1340 if (bp->b_bcount > zvol_maxphys)
1337 1341 bp->b_bcount = zvol_maxphys;
1338 1342 }
1339 1343
1340 1344 int
1341 1345 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1342 1346 {
1343 1347 minor_t minor = getminor(dev);
1344 1348 zvol_state_t *zv;
1345 1349 int error = 0;
1346 1350 uint64_t size;
1347 1351 uint64_t boff;
1348 1352 uint64_t resid;
1349 1353
1350 1354 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1351 1355 if (zv == NULL)
1352 1356 return (SET_ERROR(ENXIO));
1353 1357
1354 1358 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1355 1359 return (SET_ERROR(EINVAL));
1356 1360
1357 1361 boff = ldbtob(blkno);
1358 1362 resid = ldbtob(nblocks);
1359 1363
1360 1364 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1361 1365
1362 1366 while (resid) {
1363 1367 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1364 1368 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1365 1369 if (error)
1366 1370 break;
1367 1371 boff += size;
1368 1372 addr += size;
1369 1373 resid -= size;
1370 1374 }
1371 1375
1372 1376 return (error);
1373 1377 }
1374 1378
1375 1379 /*ARGSUSED*/
1376 1380 int
1377 1381 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1378 1382 {
1379 1383 minor_t minor = getminor(dev);
1380 1384 zvol_state_t *zv;
1381 1385 uint64_t volsize;
1382 1386 int error = 0;
1383 1387
1384 1388 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1385 1389 if (zv == NULL)
1386 1390 return (SET_ERROR(ENXIO));
1387 1391
1388 1392 volsize = zv->zv_volsize;
↓ open down ↓ |
57 lines elided |
↑ open up ↑ |
1389 1393 if (uio->uio_resid > 0 &&
1390 1394 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1391 1395 return (SET_ERROR(EIO));
1392 1396
1393 1397 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1394 1398 error = physio(zvol_strategy, NULL, dev, B_READ,
1395 1399 zvol_minphys, uio);
1396 1400 return (error);
1397 1401 }
1398 1402
1403 + ht_begin_unsafe();
1404 +
1399 1405 locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
1400 1406 uio->uio_loffset, uio->uio_resid, RL_READER);
1401 1407 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1402 1408 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1403 1409
1404 1410 /* don't read past the end */
1405 1411 if (bytes > volsize - uio->uio_loffset)
1406 1412 bytes = volsize - uio->uio_loffset;
1407 1413
1408 1414 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1409 1415 if (error) {
1410 1416 /* convert checksum errors into IO errors */
1411 1417 if (error == ECKSUM)
1412 1418 error = SET_ERROR(EIO);
1413 1419 break;
1414 1420 }
1415 1421 }
1416 1422 rangelock_exit(lr);
1417 1423
1424 + ht_end_unsafe();
1425 +
1418 1426 return (error);
1419 1427 }
1420 1428
1421 1429 /*ARGSUSED*/
1422 1430 int
1423 1431 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1424 1432 {
1425 1433 minor_t minor = getminor(dev);
1426 1434 zvol_state_t *zv;
1427 1435 uint64_t volsize;
1428 1436 int error = 0;
1429 1437 boolean_t sync;
1430 1438
1431 1439 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1432 1440 if (zv == NULL)
1433 1441 return (SET_ERROR(ENXIO));
1434 1442
1435 1443 volsize = zv->zv_volsize;
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
1436 1444 if (uio->uio_resid > 0 &&
1437 1445 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1438 1446 return (SET_ERROR(EIO));
1439 1447
1440 1448 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1441 1449 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1442 1450 zvol_minphys, uio);
1443 1451 return (error);
1444 1452 }
1445 1453
1454 + ht_begin_unsafe();
1455 +
1446 1456 sync = !(zv->zv_flags & ZVOL_WCE) ||
1447 1457 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1448 1458
1449 1459 locked_range_t *lr = rangelock_enter(&zv->zv_rangelock,
1450 1460 uio->uio_loffset, uio->uio_resid, RL_WRITER);
1451 1461 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1452 1462 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1453 1463 uint64_t off = uio->uio_loffset;
1454 1464 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1455 1465
1456 1466 if (bytes > volsize - off) /* don't write past the end */
1457 1467 bytes = volsize - off;
1458 1468
1459 1469 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1460 1470 error = dmu_tx_assign(tx, TXG_WAIT);
1461 1471 if (error) {
1462 1472 dmu_tx_abort(tx);
1463 1473 break;
1464 1474 }
1465 1475 error = dmu_write_uio_dnode(zv->zv_dn, uio, bytes, tx);
1466 1476 if (error == 0)
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
1467 1477 zvol_log_write(zv, tx, off, bytes, sync);
1468 1478 dmu_tx_commit(tx);
1469 1479
1470 1480 if (error)
1471 1481 break;
1472 1482 }
1473 1483 rangelock_exit(lr);
1474 1484
1475 1485 if (sync)
1476 1486 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1487 +
1488 + ht_end_unsafe();
1489 +
1477 1490 return (error);
1478 1491 }
1479 1492
1480 1493 int
1481 1494 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1482 1495 {
1483 1496 struct uuid uuid = EFI_RESERVED;
1484 1497 efi_gpe_t gpe = { 0 };
1485 1498 uint32_t crc;
1486 1499 dk_efi_t efi;
1487 1500 int length;
1488 1501 char *ptr;
1489 1502
1490 1503 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1491 1504 return (SET_ERROR(EFAULT));
1492 1505 ptr = (char *)(uintptr_t)efi.dki_data_64;
1493 1506 length = efi.dki_length;
1494 1507 /*
1495 1508 * Some clients may attempt to request a PMBR for the
1496 1509 * zvol. Currently this interface will return EINVAL to
1497 1510 * such requests. These requests could be supported by
1498 1511 * adding a check for lba == 0 and consing up an appropriate
1499 1512 * PMBR.
1500 1513 */
1501 1514 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1502 1515 return (SET_ERROR(EINVAL));
1503 1516
1504 1517 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1505 1518 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1506 1519 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1507 1520
1508 1521 if (efi.dki_lba == 1) {
1509 1522 efi_gpt_t gpt = { 0 };
1510 1523
1511 1524 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1512 1525 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1513 1526 gpt.efi_gpt_HeaderSize = LE_32(EFI_HEADER_SIZE);
1514 1527 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1515 1528 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1516 1529 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1517 1530 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1518 1531 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1519 1532 gpt.efi_gpt_SizeOfPartitionEntry =
1520 1533 LE_32(sizeof (efi_gpe_t));
1521 1534 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1522 1535 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1523 1536 CRC32(crc, &gpt, EFI_HEADER_SIZE, -1U, crc32_table);
1524 1537 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1525 1538 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1526 1539 flag))
1527 1540 return (SET_ERROR(EFAULT));
1528 1541 ptr += sizeof (gpt);
1529 1542 length -= sizeof (gpt);
1530 1543 }
1531 1544 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1532 1545 length), flag))
1533 1546 return (SET_ERROR(EFAULT));
1534 1547 return (0);
1535 1548 }
1536 1549
1537 1550 /*
1538 1551 * BEGIN entry points to allow external callers access to the volume.
1539 1552 */
1540 1553 /*
1541 1554 * Return the volume parameters needed for access from an external caller.
1542 1555 * These values are invariant as long as the volume is held open.
1543 1556 */
1544 1557 int
1545 1558 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1546 1559 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1547 1560 void **rl_hdl, void **dnode_hdl)
1548 1561 {
1549 1562 zvol_state_t *zv;
1550 1563
1551 1564 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1552 1565 if (zv == NULL)
1553 1566 return (SET_ERROR(ENXIO));
1554 1567 if (zv->zv_flags & ZVOL_DUMPIFIED)
1555 1568 return (SET_ERROR(ENXIO));
1556 1569
1557 1570 ASSERT(blksize && max_xfer_len && minor_hdl &&
1558 1571 objset_hdl && zil_hdl && rl_hdl && dnode_hdl);
1559 1572
1560 1573 *blksize = zv->zv_volblocksize;
1561 1574 *max_xfer_len = (uint64_t)zvol_maxphys;
1562 1575 *minor_hdl = zv;
1563 1576 *objset_hdl = zv->zv_objset;
1564 1577 *zil_hdl = zv->zv_zilog;
1565 1578 *rl_hdl = &zv->zv_rangelock;
1566 1579 *dnode_hdl = zv->zv_dn;
1567 1580 return (0);
1568 1581 }
1569 1582
1570 1583 /*
1571 1584 * Return the current volume size to an external caller.
1572 1585 * The size can change while the volume is open.
1573 1586 */
1574 1587 uint64_t
1575 1588 zvol_get_volume_size(void *minor_hdl)
1576 1589 {
1577 1590 zvol_state_t *zv = minor_hdl;
1578 1591
1579 1592 return (zv->zv_volsize);
1580 1593 }
1581 1594
1582 1595 /*
1583 1596 * Return the current WCE setting to an external caller.
1584 1597 * The WCE setting can change while the volume is open.
1585 1598 */
1586 1599 int
1587 1600 zvol_get_volume_wce(void *minor_hdl)
1588 1601 {
1589 1602 zvol_state_t *zv = minor_hdl;
1590 1603
1591 1604 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1592 1605 }
1593 1606
1594 1607 /*
1595 1608 * Entry point for external callers to zvol_log_write
1596 1609 */
1597 1610 void
1598 1611 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1599 1612 boolean_t sync)
1600 1613 {
1601 1614 zvol_state_t *zv = minor_hdl;
1602 1615
1603 1616 zvol_log_write(zv, tx, off, resid, sync);
1604 1617 }
1605 1618 /*
1606 1619 * END entry points to allow external callers access to the volume.
1607 1620 */
1608 1621
1609 1622 /*
1610 1623 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1611 1624 */
1612 1625 static void
1613 1626 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1614 1627 boolean_t sync)
1615 1628 {
1616 1629 itx_t *itx;
1617 1630 lr_truncate_t *lr;
1618 1631 zilog_t *zilog = zv->zv_zilog;
1619 1632
1620 1633 if (zil_replaying(zilog, tx))
1621 1634 return;
1622 1635
1623 1636 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1624 1637 lr = (lr_truncate_t *)&itx->itx_lr;
1625 1638 lr->lr_foid = ZVOL_OBJ;
1626 1639 lr->lr_offset = off;
1627 1640 lr->lr_length = len;
1628 1641
1629 1642 itx->itx_sync = sync;
1630 1643 zil_itx_assign(zilog, itx, tx);
1631 1644 }
1632 1645
1633 1646 /*
1634 1647 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1635 1648 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1636 1649 */
1637 1650 /*ARGSUSED*/
1638 1651 int
1639 1652 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1640 1653 {
1641 1654 zvol_state_t *zv;
1642 1655 struct dk_callback *dkc;
1643 1656 int error = 0;
1644 1657 locked_range_t *lr;
1645 1658
1646 1659 mutex_enter(&zfsdev_state_lock);
1647 1660
1648 1661 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1649 1662
1650 1663 if (zv == NULL) {
1651 1664 mutex_exit(&zfsdev_state_lock);
1652 1665 return (SET_ERROR(ENXIO));
1653 1666 }
1654 1667 ASSERT(zv->zv_total_opens > 0);
1655 1668
1656 1669 switch (cmd) {
1657 1670
1658 1671 case DKIOCINFO:
1659 1672 {
1660 1673 struct dk_cinfo dki;
1661 1674
1662 1675 bzero(&dki, sizeof (dki));
1663 1676 (void) strcpy(dki.dki_cname, "zvol");
1664 1677 (void) strcpy(dki.dki_dname, "zvol");
1665 1678 dki.dki_ctype = DKC_UNKNOWN;
1666 1679 dki.dki_unit = getminor(dev);
1667 1680 dki.dki_maxtransfer =
1668 1681 1 << (SPA_OLD_MAXBLOCKSHIFT - zv->zv_min_bs);
1669 1682 mutex_exit(&zfsdev_state_lock);
1670 1683 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1671 1684 error = SET_ERROR(EFAULT);
1672 1685 return (error);
1673 1686 }
1674 1687
1675 1688 case DKIOCGMEDIAINFO:
1676 1689 {
1677 1690 struct dk_minfo dkm;
1678 1691
1679 1692 bzero(&dkm, sizeof (dkm));
1680 1693 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1681 1694 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1682 1695 dkm.dki_media_type = DK_UNKNOWN;
1683 1696 mutex_exit(&zfsdev_state_lock);
1684 1697 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1685 1698 error = SET_ERROR(EFAULT);
1686 1699 return (error);
1687 1700 }
1688 1701
1689 1702 case DKIOCGMEDIAINFOEXT:
1690 1703 {
1691 1704 struct dk_minfo_ext dkmext;
1692 1705
1693 1706 bzero(&dkmext, sizeof (dkmext));
1694 1707 dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1695 1708 dkmext.dki_pbsize = zv->zv_volblocksize;
1696 1709 dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1697 1710 dkmext.dki_media_type = DK_UNKNOWN;
1698 1711 mutex_exit(&zfsdev_state_lock);
1699 1712 if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1700 1713 error = SET_ERROR(EFAULT);
1701 1714 return (error);
1702 1715 }
1703 1716
1704 1717 case DKIOCGETEFI:
1705 1718 {
1706 1719 uint64_t vs = zv->zv_volsize;
↓ open down ↓ |
220 lines elided |
↑ open up ↑ |
1707 1720 uint8_t bs = zv->zv_min_bs;
1708 1721
1709 1722 mutex_exit(&zfsdev_state_lock);
1710 1723 error = zvol_getefi((void *)arg, flag, vs, bs);
1711 1724 return (error);
1712 1725 }
1713 1726
1714 1727 case DKIOCFLUSHWRITECACHE:
1715 1728 dkc = (struct dk_callback *)arg;
1716 1729 mutex_exit(&zfsdev_state_lock);
1730 +
1731 + ht_begin_unsafe();
1732 +
1717 1733 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1718 1734 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1719 1735 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1720 1736 error = 0;
1721 1737 }
1738 +
1739 + ht_end_unsafe();
1740 +
1722 1741 return (error);
1723 1742
1724 1743 case DKIOCGETWCE:
1725 1744 {
1726 1745 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1727 1746 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1728 1747 flag))
1729 1748 error = SET_ERROR(EFAULT);
1730 1749 break;
1731 1750 }
1732 1751 case DKIOCSETWCE:
1733 1752 {
1734 1753 int wce;
1735 1754 if (ddi_copyin((void *)arg, &wce, sizeof (int),
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
1736 1755 flag)) {
1737 1756 error = SET_ERROR(EFAULT);
1738 1757 break;
1739 1758 }
1740 1759 if (wce) {
1741 1760 zv->zv_flags |= ZVOL_WCE;
1742 1761 mutex_exit(&zfsdev_state_lock);
1743 1762 } else {
1744 1763 zv->zv_flags &= ~ZVOL_WCE;
1745 1764 mutex_exit(&zfsdev_state_lock);
1765 + ht_begin_unsafe();
1746 1766 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1767 + ht_end_unsafe();
1747 1768 }
1748 1769 return (0);
1749 1770 }
1750 1771
1751 1772 case DKIOCGGEOM:
1752 1773 case DKIOCGVTOC:
1753 1774 /*
1754 1775 * commands using these (like prtvtoc) expect ENOTSUP
1755 1776 * since we're emulating an EFI label
1756 1777 */
1757 1778 error = SET_ERROR(ENOTSUP);
1758 1779 break;
1759 1780
1760 1781 case DKIOCDUMPINIT:
1761 1782 lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1762 1783 RL_WRITER);
1763 1784 error = zvol_dumpify(zv);
1764 1785 rangelock_exit(lr);
1765 1786 break;
1766 1787
1767 1788 case DKIOCDUMPFINI:
1768 1789 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1769 1790 break;
1770 1791 lr = rangelock_enter(&zv->zv_rangelock, 0, zv->zv_volsize,
1771 1792 RL_WRITER);
1772 1793 error = zvol_dump_fini(zv);
1773 1794 rangelock_exit(lr);
1774 1795 break;
1775 1796
1776 1797 case DKIOCFREE:
1777 1798 {
1778 1799 dkioc_free_list_t *dfl;
1779 1800 dmu_tx_t *tx;
1780 1801
1781 1802 if (!zvol_unmap_enabled)
1782 1803 break;
1783 1804
1784 1805 if (!(flag & FKIOCTL)) {
1785 1806 error = dfl_copyin((void *)arg, &dfl, flag, KM_SLEEP);
1786 1807 if (error != 0)
1787 1808 break;
1788 1809 } else {
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
1789 1810 dfl = (dkioc_free_list_t *)arg;
1790 1811 ASSERT3U(dfl->dfl_num_exts, <=, DFL_COPYIN_MAX_EXTS);
1791 1812 if (dfl->dfl_num_exts > DFL_COPYIN_MAX_EXTS) {
1792 1813 error = SET_ERROR(EINVAL);
1793 1814 break;
1794 1815 }
1795 1816 }
1796 1817
1797 1818 mutex_exit(&zfsdev_state_lock);
1798 1819
1820 + ht_begin_unsafe();
1821 +
1799 1822 for (int i = 0; i < dfl->dfl_num_exts; i++) {
1800 1823 uint64_t start = dfl->dfl_exts[i].dfle_start,
1801 1824 length = dfl->dfl_exts[i].dfle_length,
1802 1825 end = start + length;
1803 1826
1804 1827 /*
1805 1828 * Apply Postel's Law to length-checking. If they
1806 1829 * overshoot, just blank out until the end, if there's
1807 1830 * a need to blank out anything.
1808 1831 */
1809 1832 if (start >= zv->zv_volsize)
1810 1833 continue; /* No need to do anything... */
1811 1834 if (end > zv->zv_volsize) {
1812 1835 end = DMU_OBJECT_END;
1813 1836 length = end - start;
1814 1837 }
1815 1838
1816 1839 lr = rangelock_enter(&zv->zv_rangelock, start, length,
1817 1840 RL_WRITER);
1818 1841 tx = dmu_tx_create(zv->zv_objset);
1819 1842 error = dmu_tx_assign(tx, TXG_WAIT);
1820 1843 if (error != 0) {
1821 1844 dmu_tx_abort(tx);
1822 1845 } else {
1823 1846 zvol_log_truncate(zv, tx, start, length,
1824 1847 B_TRUE);
1825 1848 dmu_tx_commit(tx);
1826 1849 error = dmu_free_long_range(zv->zv_objset,
1827 1850 ZVOL_OBJ, start, length);
1828 1851 }
1829 1852
1830 1853 rangelock_exit(lr);
1831 1854
1832 1855 if (error != 0)
1833 1856 break;
1834 1857 }
1835 1858
1836 1859 /*
1837 1860 * If the write-cache is disabled, 'sync' property
1838 1861 * is set to 'always', or if the caller is asking for
1839 1862 * a synchronous free, commit this operation to the zil.
1840 1863 * This will sync any previous uncommitted writes to the
1841 1864 * zvol object.
1842 1865 * Can be overridden by the zvol_unmap_sync_enabled tunable.
1843 1866 */
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
1844 1867 if ((error == 0) && zvol_unmap_sync_enabled &&
1845 1868 (!(zv->zv_flags & ZVOL_WCE) ||
1846 1869 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS) ||
1847 1870 (dfl->dfl_flags & DF_WAIT_SYNC))) {
1848 1871 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1849 1872 }
1850 1873
1851 1874 if (!(flag & FKIOCTL))
1852 1875 dfl_free(dfl);
1853 1876
1877 + ht_end_unsafe();
1878 +
1854 1879 return (error);
1855 1880 }
1856 1881
1857 1882 default:
1858 1883 error = SET_ERROR(ENOTTY);
1859 1884 break;
1860 1885
1861 1886 }
1862 1887 mutex_exit(&zfsdev_state_lock);
1863 1888 return (error);
1864 1889 }
1865 1890
1866 1891 int
1867 1892 zvol_busy(void)
1868 1893 {
1869 1894 return (zvol_minors != 0);
1870 1895 }
1871 1896
1872 1897 void
1873 1898 zvol_init(void)
1874 1899 {
1875 1900 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1876 1901 1) == 0);
1877 1902 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1878 1903 }
1879 1904
1880 1905 void
1881 1906 zvol_fini(void)
1882 1907 {
1883 1908 mutex_destroy(&zfsdev_state_lock);
1884 1909 ddi_soft_state_fini(&zfsdev_state);
1885 1910 }
1886 1911
1887 1912 /*ARGSUSED*/
1888 1913 static int
1889 1914 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1890 1915 {
1891 1916 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1892 1917
1893 1918 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1894 1919 return (1);
1895 1920 return (0);
1896 1921 }
1897 1922
1898 1923 /*ARGSUSED*/
1899 1924 static void
1900 1925 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1901 1926 {
1902 1927 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1903 1928
1904 1929 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1905 1930 }
1906 1931
1907 1932 static int
1908 1933 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1909 1934 {
1910 1935 dmu_tx_t *tx;
1911 1936 int error;
1912 1937 objset_t *os = zv->zv_objset;
1913 1938 spa_t *spa = dmu_objset_spa(os);
1914 1939 vdev_t *vd = spa->spa_root_vdev;
1915 1940 nvlist_t *nv = NULL;
1916 1941 uint64_t version = spa_version(spa);
1917 1942 uint64_t checksum, compress, refresrv, vbs, dedup;
1918 1943
1919 1944 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1920 1945 ASSERT(vd->vdev_ops == &vdev_root_ops);
1921 1946
1922 1947 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1923 1948 DMU_OBJECT_END);
1924 1949 if (error != 0)
1925 1950 return (error);
1926 1951 /* wait for dmu_free_long_range to actually free the blocks */
1927 1952 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1928 1953
1929 1954 /*
1930 1955 * If the pool on which the dump device is being initialized has more
1931 1956 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1932 1957 * enabled. If so, bump that feature's counter to indicate that the
1933 1958 * feature is active. We also check the vdev type to handle the
1934 1959 * following case:
1935 1960 * # zpool create test raidz disk1 disk2 disk3
1936 1961 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1937 1962 * the raidz vdev itself has 3 children.
1938 1963 */
1939 1964 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1940 1965 if (!spa_feature_is_enabled(spa,
1941 1966 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1942 1967 return (SET_ERROR(ENOTSUP));
1943 1968 (void) dsl_sync_task(spa_name(spa),
1944 1969 zfs_mvdev_dump_feature_check,
1945 1970 zfs_mvdev_dump_activate_feature_sync, NULL,
1946 1971 2, ZFS_SPACE_CHECK_RESERVED);
1947 1972 }
1948 1973
1949 1974 if (!resize) {
1950 1975 error = dsl_prop_get_integer(zv->zv_name,
1951 1976 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1952 1977 if (error == 0) {
1953 1978 error = dsl_prop_get_integer(zv->zv_name,
1954 1979 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum,
1955 1980 NULL);
1956 1981 }
1957 1982 if (error == 0) {
1958 1983 error = dsl_prop_get_integer(zv->zv_name,
1959 1984 zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
1960 1985 &refresrv, NULL);
1961 1986 }
1962 1987 if (error == 0) {
1963 1988 error = dsl_prop_get_integer(zv->zv_name,
1964 1989 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs,
1965 1990 NULL);
1966 1991 }
1967 1992 if (version >= SPA_VERSION_DEDUP && error == 0) {
1968 1993 error = dsl_prop_get_integer(zv->zv_name,
1969 1994 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1970 1995 }
1971 1996 }
1972 1997 if (error != 0)
1973 1998 return (error);
1974 1999
1975 2000 tx = dmu_tx_create(os);
1976 2001 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1977 2002 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1978 2003 error = dmu_tx_assign(tx, TXG_WAIT);
1979 2004 if (error != 0) {
1980 2005 dmu_tx_abort(tx);
1981 2006 return (error);
1982 2007 }
1983 2008
1984 2009 /*
1985 2010 * If we are resizing the dump device then we only need to
1986 2011 * update the refreservation to match the newly updated
1987 2012 * zvolsize. Otherwise, we save off the original state of the
1988 2013 * zvol so that we can restore them if the zvol is ever undumpified.
1989 2014 */
1990 2015 if (resize) {
1991 2016 error = zap_update(os, ZVOL_ZAP_OBJ,
1992 2017 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1993 2018 &zv->zv_volsize, tx);
1994 2019 } else {
1995 2020 error = zap_update(os, ZVOL_ZAP_OBJ,
1996 2021 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1997 2022 &compress, tx);
1998 2023 if (error == 0) {
1999 2024 error = zap_update(os, ZVOL_ZAP_OBJ,
2000 2025 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1,
2001 2026 &checksum, tx);
2002 2027 }
2003 2028 if (error == 0) {
2004 2029 error = zap_update(os, ZVOL_ZAP_OBJ,
2005 2030 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
2006 2031 &refresrv, tx);
2007 2032 }
2008 2033 if (error == 0) {
2009 2034 error = zap_update(os, ZVOL_ZAP_OBJ,
2010 2035 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
2011 2036 &vbs, tx);
2012 2037 }
2013 2038 if (error == 0) {
2014 2039 error = dmu_object_set_blocksize(
2015 2040 os, ZVOL_OBJ, SPA_OLD_MAXBLOCKSIZE, 0, tx);
2016 2041 }
2017 2042 if (version >= SPA_VERSION_DEDUP && error == 0) {
2018 2043 error = zap_update(os, ZVOL_ZAP_OBJ,
2019 2044 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
2020 2045 &dedup, tx);
2021 2046 }
2022 2047 if (error == 0)
2023 2048 zv->zv_volblocksize = SPA_OLD_MAXBLOCKSIZE;
2024 2049 }
2025 2050 dmu_tx_commit(tx);
2026 2051
2027 2052 /*
2028 2053 * We only need update the zvol's property if we are initializing
2029 2054 * the dump area for the first time.
2030 2055 */
2031 2056 if (error == 0 && !resize) {
2032 2057 /*
2033 2058 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
2034 2059 * function. Otherwise, use the old default -- OFF.
2035 2060 */
2036 2061 checksum = spa_feature_is_active(spa,
2037 2062 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
2038 2063 ZIO_CHECKSUM_OFF;
2039 2064
2040 2065 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2041 2066 VERIFY(nvlist_add_uint64(nv,
2042 2067 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
2043 2068 VERIFY(nvlist_add_uint64(nv,
2044 2069 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
2045 2070 ZIO_COMPRESS_OFF) == 0);
2046 2071 VERIFY(nvlist_add_uint64(nv,
2047 2072 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
2048 2073 checksum) == 0);
2049 2074 if (version >= SPA_VERSION_DEDUP) {
2050 2075 VERIFY(nvlist_add_uint64(nv,
2051 2076 zfs_prop_to_name(ZFS_PROP_DEDUP),
2052 2077 ZIO_CHECKSUM_OFF) == 0);
2053 2078 }
2054 2079
2055 2080 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2056 2081 nv, NULL);
2057 2082 nvlist_free(nv);
2058 2083 }
2059 2084
2060 2085 /* Allocate the space for the dump */
2061 2086 if (error == 0)
2062 2087 error = zvol_prealloc(zv);
2063 2088 return (error);
2064 2089 }
2065 2090
2066 2091 static int
2067 2092 zvol_dumpify(zvol_state_t *zv)
2068 2093 {
2069 2094 int error = 0;
2070 2095 uint64_t dumpsize = 0;
2071 2096 dmu_tx_t *tx;
2072 2097 objset_t *os = zv->zv_objset;
2073 2098
2074 2099 if (zv->zv_flags & ZVOL_RDONLY)
2075 2100 return (SET_ERROR(EROFS));
2076 2101
2077 2102 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2078 2103 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2079 2104 boolean_t resize = (dumpsize > 0);
2080 2105
2081 2106 if ((error = zvol_dump_init(zv, resize)) != 0) {
2082 2107 (void) zvol_dump_fini(zv);
2083 2108 return (error);
2084 2109 }
2085 2110 }
2086 2111
2087 2112 /*
2088 2113 * Build up our lba mapping.
2089 2114 */
2090 2115 error = zvol_get_lbas(zv);
2091 2116 if (error) {
2092 2117 (void) zvol_dump_fini(zv);
2093 2118 return (error);
2094 2119 }
2095 2120
2096 2121 tx = dmu_tx_create(os);
2097 2122 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2098 2123 error = dmu_tx_assign(tx, TXG_WAIT);
2099 2124 if (error) {
2100 2125 dmu_tx_abort(tx);
2101 2126 (void) zvol_dump_fini(zv);
2102 2127 return (error);
2103 2128 }
2104 2129
2105 2130 zv->zv_flags |= ZVOL_DUMPIFIED;
2106 2131 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2107 2132 &zv->zv_volsize, tx);
2108 2133 dmu_tx_commit(tx);
2109 2134
2110 2135 if (error) {
2111 2136 (void) zvol_dump_fini(zv);
2112 2137 return (error);
2113 2138 }
2114 2139
2115 2140 txg_wait_synced(dmu_objset_pool(os), 0);
2116 2141 return (0);
2117 2142 }
2118 2143
2119 2144 static int
2120 2145 zvol_dump_fini(zvol_state_t *zv)
2121 2146 {
2122 2147 dmu_tx_t *tx;
2123 2148 objset_t *os = zv->zv_objset;
2124 2149 nvlist_t *nv;
2125 2150 int error = 0;
2126 2151 uint64_t checksum, compress, refresrv, vbs, dedup;
2127 2152 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2128 2153
2129 2154 /*
2130 2155 * Attempt to restore the zvol back to its pre-dumpified state.
2131 2156 * This is a best-effort attempt as it's possible that not all
2132 2157 * of these properties were initialized during the dumpify process
2133 2158 * (i.e. error during zvol_dump_init).
2134 2159 */
2135 2160
2136 2161 tx = dmu_tx_create(os);
2137 2162 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2138 2163 error = dmu_tx_assign(tx, TXG_WAIT);
2139 2164 if (error) {
2140 2165 dmu_tx_abort(tx);
2141 2166 return (error);
2142 2167 }
2143 2168 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2144 2169 dmu_tx_commit(tx);
2145 2170
2146 2171 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2147 2172 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2148 2173 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2149 2174 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2150 2175 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2151 2176 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2152 2177 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2153 2178 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2154 2179
2155 2180 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2156 2181 (void) nvlist_add_uint64(nv,
2157 2182 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2158 2183 (void) nvlist_add_uint64(nv,
2159 2184 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2160 2185 (void) nvlist_add_uint64(nv,
2161 2186 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2162 2187 if (version >= SPA_VERSION_DEDUP &&
2163 2188 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2164 2189 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2165 2190 (void) nvlist_add_uint64(nv,
2166 2191 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2167 2192 }
2168 2193 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2169 2194 nv, NULL);
2170 2195 nvlist_free(nv);
2171 2196
2172 2197 zvol_free_extents(zv);
2173 2198 zv->zv_flags &= ~ZVOL_DUMPIFIED;
2174 2199 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2175 2200 /* wait for dmu_free_long_range to actually free the blocks */
2176 2201 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2177 2202 tx = dmu_tx_create(os);
2178 2203 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2179 2204 error = dmu_tx_assign(tx, TXG_WAIT);
2180 2205 if (error) {
2181 2206 dmu_tx_abort(tx);
2182 2207 return (error);
2183 2208 }
2184 2209 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2185 2210 zv->zv_volblocksize = vbs;
2186 2211 dmu_tx_commit(tx);
2187 2212
2188 2213 return (0);
2189 2214 }
↓ open down ↓ |
326 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX