Print this page
2882 implement libzfs_core
2883 changing "canmount" property to "on" should not always remount dataset
2900 "zfs snapshot" should be able to create multiple, arbitrary snapshots at once
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Chris Siden <christopher.siden@delphix.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Bill Pijewski <wdp@joyent.com>
Reviewed by: Dan Kruchinin <dan.kruchinin@gmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/zvol.c
+++ new/usr/src/uts/common/fs/zfs/zvol.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 *
24 24 * Portions Copyright 2010 Robert Milkowski
25 25 *
26 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 + * Copyright (c) 2012 by Delphix. All rights reserved.
27 28 */
28 29
29 30 /*
30 31 * ZFS volume emulation driver.
31 32 *
32 33 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
33 34 * Volumes are accessed through the symbolic links named:
34 35 *
35 36 * /dev/zvol/dsk/<pool_name>/<dataset_name>
36 37 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
37 38 *
38 39 * These links are created by the /dev filesystem (sdev_zvolops.c).
39 40 * Volumes are persistent through reboot. No user command needs to be
40 41 * run before opening and using a device.
41 42 */
42 43
43 44 #include <sys/types.h>
44 45 #include <sys/param.h>
45 46 #include <sys/errno.h>
46 47 #include <sys/uio.h>
47 48 #include <sys/buf.h>
48 49 #include <sys/modctl.h>
49 50 #include <sys/open.h>
50 51 #include <sys/kmem.h>
51 52 #include <sys/conf.h>
52 53 #include <sys/cmn_err.h>
53 54 #include <sys/stat.h>
54 55 #include <sys/zap.h>
55 56 #include <sys/spa.h>
56 57 #include <sys/zio.h>
57 58 #include <sys/dmu_traverse.h>
58 59 #include <sys/dnode.h>
59 60 #include <sys/dsl_dataset.h>
60 61 #include <sys/dsl_prop.h>
61 62 #include <sys/dkio.h>
62 63 #include <sys/efi_partition.h>
63 64 #include <sys/byteorder.h>
64 65 #include <sys/pathname.h>
65 66 #include <sys/ddi.h>
66 67 #include <sys/sunddi.h>
67 68 #include <sys/crc32.h>
68 69 #include <sys/dirent.h>
69 70 #include <sys/policy.h>
70 71 #include <sys/fs/zfs.h>
71 72 #include <sys/zfs_ioctl.h>
72 73 #include <sys/mkdev.h>
73 74 #include <sys/zil.h>
74 75 #include <sys/refcount.h>
75 76 #include <sys/zfs_znode.h>
76 77 #include <sys/zfs_rlock.h>
77 78 #include <sys/vdev_disk.h>
78 79 #include <sys/vdev_impl.h>
79 80 #include <sys/zvol.h>
80 81 #include <sys/dumphdr.h>
81 82 #include <sys/zil_impl.h>
82 83
83 84 #include "zfs_namecheck.h"
84 85
85 86 void *zfsdev_state;
86 87 static char *zvol_tag = "zvol_tag";
87 88
88 89 #define ZVOL_DUMPSIZE "dumpsize"
89 90
90 91 /*
91 92 * This lock protects the zfsdev_state structure from being modified
92 93 * while it's being used, e.g. an open that comes in before a create
93 94 * finishes. It also protects temporary opens of the dataset so that,
94 95 * e.g., an open doesn't get a spurious EBUSY.
95 96 */
96 97 kmutex_t zfsdev_state_lock;
97 98 static uint32_t zvol_minors;
98 99
99 100 typedef struct zvol_extent {
100 101 list_node_t ze_node;
101 102 dva_t ze_dva; /* dva associated with this extent */
102 103 uint64_t ze_nblks; /* number of blocks in extent */
103 104 } zvol_extent_t;
104 105
105 106 /*
106 107 * The in-core state of each volume.
107 108 */
108 109 typedef struct zvol_state {
109 110 char zv_name[MAXPATHLEN]; /* pool/dd name */
110 111 uint64_t zv_volsize; /* amount of space we advertise */
111 112 uint64_t zv_volblocksize; /* volume block size */
112 113 minor_t zv_minor; /* minor number */
113 114 uint8_t zv_min_bs; /* minimum addressable block shift */
114 115 uint8_t zv_flags; /* readonly, dumpified, etc. */
115 116 objset_t *zv_objset; /* objset handle */
116 117 uint32_t zv_open_count[OTYPCNT]; /* open counts */
117 118 uint32_t zv_total_opens; /* total open count */
118 119 zilog_t *zv_zilog; /* ZIL handle */
119 120 list_t zv_extents; /* List of extents for dump */
120 121 znode_t zv_znode; /* for range locking */
121 122 dmu_buf_t *zv_dbuf; /* bonus handle */
122 123 } zvol_state_t;
123 124
124 125 /*
125 126 * zvol specific flags
126 127 */
127 128 #define ZVOL_RDONLY 0x1
↓ open down ↓ |
91 lines elided |
↑ open up ↑ |
128 129 #define ZVOL_DUMPIFIED 0x2
129 130 #define ZVOL_EXCL 0x4
130 131 #define ZVOL_WCE 0x8
131 132
132 133 /*
133 134 * zvol maximum transfer in one DMU tx.
134 135 */
135 136 int zvol_maxphys = DMU_MAX_ACCESS/2;
136 137
137 138 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
138 - nvlist_t *, nvlist_t **);
139 + nvlist_t *, nvlist_t *);
139 140 static int zvol_remove_zv(zvol_state_t *);
140 141 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
141 142 static int zvol_dumpify(zvol_state_t *zv);
142 143 static int zvol_dump_fini(zvol_state_t *zv);
143 144 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
144 145
145 146 static void
146 147 zvol_size_changed(uint64_t volsize, major_t maj, minor_t min)
147 148 {
148 149 dev_t dev = makedevice(maj, min);
149 150
150 151 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
151 152 "Size", volsize) == DDI_SUCCESS);
152 153 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
153 154 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
154 155
155 156 /* Notify specfs to invalidate the cached size */
156 157 spec_size_invalidate(dev, VBLK);
157 158 spec_size_invalidate(dev, VCHR);
158 159 }
159 160
160 161 int
161 162 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
162 163 {
163 164 if (volsize == 0)
164 165 return (EINVAL);
165 166
166 167 if (volsize % blocksize != 0)
167 168 return (EINVAL);
168 169
169 170 #ifdef _ILP32
170 171 if (volsize - 1 > SPEC_MAXOFFSET_T)
171 172 return (EOVERFLOW);
172 173 #endif
173 174 return (0);
174 175 }
175 176
176 177 int
177 178 zvol_check_volblocksize(uint64_t volblocksize)
178 179 {
179 180 if (volblocksize < SPA_MINBLOCKSIZE ||
180 181 volblocksize > SPA_MAXBLOCKSIZE ||
181 182 !ISP2(volblocksize))
182 183 return (EDOM);
183 184
184 185 return (0);
185 186 }
186 187
187 188 int
188 189 zvol_get_stats(objset_t *os, nvlist_t *nv)
189 190 {
190 191 int error;
191 192 dmu_object_info_t doi;
192 193 uint64_t val;
193 194
194 195 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
195 196 if (error)
196 197 return (error);
197 198
198 199 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
199 200
200 201 error = dmu_object_info(os, ZVOL_OBJ, &doi);
201 202
202 203 if (error == 0) {
203 204 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
204 205 doi.doi_data_block_size);
205 206 }
206 207
207 208 return (error);
208 209 }
209 210
210 211 static zvol_state_t *
211 212 zvol_minor_lookup(const char *name)
212 213 {
213 214 minor_t minor;
214 215 zvol_state_t *zv;
215 216
216 217 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
217 218
218 219 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
219 220 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
220 221 if (zv == NULL)
221 222 continue;
222 223 if (strcmp(zv->zv_name, name) == 0)
223 224 return (zv);
224 225 }
225 226
226 227 return (NULL);
227 228 }
228 229
229 230 /* extent mapping arg */
230 231 struct maparg {
231 232 zvol_state_t *ma_zv;
232 233 uint64_t ma_blks;
233 234 };
234 235
235 236 /*ARGSUSED*/
236 237 static int
237 238 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp, arc_buf_t *pbuf,
238 239 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
239 240 {
240 241 struct maparg *ma = arg;
241 242 zvol_extent_t *ze;
242 243 int bs = ma->ma_zv->zv_volblocksize;
243 244
244 245 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
245 246 return (0);
246 247
247 248 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
248 249 ma->ma_blks++;
249 250
250 251 /* Abort immediately if we have encountered gang blocks */
251 252 if (BP_IS_GANG(bp))
252 253 return (EFRAGS);
253 254
254 255 /*
255 256 * See if the block is at the end of the previous extent.
256 257 */
257 258 ze = list_tail(&ma->ma_zv->zv_extents);
258 259 if (ze &&
259 260 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
260 261 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
261 262 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
262 263 ze->ze_nblks++;
263 264 return (0);
264 265 }
265 266
266 267 dprintf_bp(bp, "%s", "next blkptr:");
267 268
268 269 /* start a new extent */
269 270 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
270 271 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
271 272 ze->ze_nblks = 1;
272 273 list_insert_tail(&ma->ma_zv->zv_extents, ze);
273 274 return (0);
274 275 }
275 276
276 277 static void
277 278 zvol_free_extents(zvol_state_t *zv)
278 279 {
279 280 zvol_extent_t *ze;
280 281
281 282 while (ze = list_head(&zv->zv_extents)) {
282 283 list_remove(&zv->zv_extents, ze);
283 284 kmem_free(ze, sizeof (zvol_extent_t));
284 285 }
285 286 }
286 287
287 288 static int
288 289 zvol_get_lbas(zvol_state_t *zv)
289 290 {
290 291 objset_t *os = zv->zv_objset;
291 292 struct maparg ma;
292 293 int err;
293 294
294 295 ma.ma_zv = zv;
295 296 ma.ma_blks = 0;
296 297 zvol_free_extents(zv);
297 298
298 299 /* commit any in-flight changes before traversing the dataset */
299 300 txg_wait_synced(dmu_objset_pool(os), 0);
300 301 err = traverse_dataset(dmu_objset_ds(os), 0,
301 302 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
302 303 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
303 304 zvol_free_extents(zv);
304 305 return (err ? err : EIO);
305 306 }
306 307
307 308 return (0);
308 309 }
309 310
310 311 /* ARGSUSED */
311 312 void
312 313 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
313 314 {
314 315 zfs_creat_t *zct = arg;
315 316 nvlist_t *nvprops = zct->zct_props;
316 317 int error;
317 318 uint64_t volblocksize, volsize;
318 319
319 320 VERIFY(nvlist_lookup_uint64(nvprops,
320 321 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
321 322 if (nvlist_lookup_uint64(nvprops,
322 323 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
323 324 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
324 325
325 326 /*
326 327 * These properties must be removed from the list so the generic
327 328 * property setting step won't apply to them.
328 329 */
329 330 VERIFY(nvlist_remove_all(nvprops,
330 331 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
331 332 (void) nvlist_remove_all(nvprops,
332 333 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
333 334
334 335 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
335 336 DMU_OT_NONE, 0, tx);
336 337 ASSERT(error == 0);
337 338
338 339 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
339 340 DMU_OT_NONE, 0, tx);
340 341 ASSERT(error == 0);
341 342
342 343 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
343 344 ASSERT(error == 0);
344 345 }
345 346
346 347 /*
347 348 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
348 349 * implement DKIOCFREE/free-long-range.
349 350 */
350 351 static int
351 352 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
352 353 {
353 354 uint64_t offset, length;
354 355
355 356 if (byteswap)
356 357 byteswap_uint64_array(lr, sizeof (*lr));
357 358
358 359 offset = lr->lr_offset;
359 360 length = lr->lr_length;
360 361
361 362 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
362 363 }
363 364
364 365 /*
365 366 * Replay a TX_WRITE ZIL transaction that didn't get committed
366 367 * after a system failure
367 368 */
368 369 static int
369 370 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
370 371 {
371 372 objset_t *os = zv->zv_objset;
372 373 char *data = (char *)(lr + 1); /* data follows lr_write_t */
373 374 uint64_t offset, length;
374 375 dmu_tx_t *tx;
375 376 int error;
376 377
377 378 if (byteswap)
378 379 byteswap_uint64_array(lr, sizeof (*lr));
379 380
380 381 offset = lr->lr_offset;
381 382 length = lr->lr_length;
382 383
383 384 /* If it's a dmu_sync() block, write the whole block */
384 385 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
385 386 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
386 387 if (length < blocksize) {
387 388 offset -= offset % blocksize;
388 389 length = blocksize;
389 390 }
390 391 }
391 392
392 393 tx = dmu_tx_create(os);
393 394 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
394 395 error = dmu_tx_assign(tx, TXG_WAIT);
395 396 if (error) {
396 397 dmu_tx_abort(tx);
397 398 } else {
398 399 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
399 400 dmu_tx_commit(tx);
400 401 }
401 402
402 403 return (error);
403 404 }
404 405
405 406 /* ARGSUSED */
406 407 static int
407 408 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
408 409 {
409 410 return (ENOTSUP);
410 411 }
411 412
412 413 /*
413 414 * Callback vectors for replaying records.
414 415 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
415 416 */
416 417 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
417 418 zvol_replay_err, /* 0 no such transaction type */
418 419 zvol_replay_err, /* TX_CREATE */
419 420 zvol_replay_err, /* TX_MKDIR */
420 421 zvol_replay_err, /* TX_MKXATTR */
421 422 zvol_replay_err, /* TX_SYMLINK */
422 423 zvol_replay_err, /* TX_REMOVE */
423 424 zvol_replay_err, /* TX_RMDIR */
424 425 zvol_replay_err, /* TX_LINK */
425 426 zvol_replay_err, /* TX_RENAME */
426 427 zvol_replay_write, /* TX_WRITE */
427 428 zvol_replay_truncate, /* TX_TRUNCATE */
428 429 zvol_replay_err, /* TX_SETATTR */
429 430 zvol_replay_err, /* TX_ACL */
430 431 zvol_replay_err, /* TX_CREATE_ACL */
431 432 zvol_replay_err, /* TX_CREATE_ATTR */
432 433 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
433 434 zvol_replay_err, /* TX_MKDIR_ACL */
434 435 zvol_replay_err, /* TX_MKDIR_ATTR */
435 436 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
436 437 zvol_replay_err, /* TX_WRITE2 */
437 438 };
438 439
439 440 int
440 441 zvol_name2minor(const char *name, minor_t *minor)
441 442 {
442 443 zvol_state_t *zv;
443 444
444 445 mutex_enter(&zfsdev_state_lock);
445 446 zv = zvol_minor_lookup(name);
446 447 if (minor && zv)
447 448 *minor = zv->zv_minor;
448 449 mutex_exit(&zfsdev_state_lock);
449 450 return (zv ? 0 : -1);
450 451 }
451 452
452 453 /*
453 454 * Create a minor node (plus a whole lot more) for the specified volume.
454 455 */
455 456 int
456 457 zvol_create_minor(const char *name)
457 458 {
458 459 zfs_soft_state_t *zs;
459 460 zvol_state_t *zv;
460 461 objset_t *os;
461 462 dmu_object_info_t doi;
462 463 minor_t minor = 0;
463 464 char chrbuf[30], blkbuf[30];
464 465 int error;
465 466
466 467 mutex_enter(&zfsdev_state_lock);
467 468
468 469 if (zvol_minor_lookup(name) != NULL) {
469 470 mutex_exit(&zfsdev_state_lock);
470 471 return (EEXIST);
471 472 }
472 473
473 474 /* lie and say we're read-only */
474 475 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
475 476
476 477 if (error) {
477 478 mutex_exit(&zfsdev_state_lock);
478 479 return (error);
479 480 }
480 481
481 482 if ((minor = zfsdev_minor_alloc()) == 0) {
482 483 dmu_objset_disown(os, FTAG);
483 484 mutex_exit(&zfsdev_state_lock);
484 485 return (ENXIO);
485 486 }
486 487
487 488 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
488 489 dmu_objset_disown(os, FTAG);
489 490 mutex_exit(&zfsdev_state_lock);
490 491 return (EAGAIN);
491 492 }
492 493 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
493 494 (char *)name);
494 495
495 496 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
496 497
497 498 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
498 499 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
499 500 ddi_soft_state_free(zfsdev_state, minor);
500 501 dmu_objset_disown(os, FTAG);
501 502 mutex_exit(&zfsdev_state_lock);
502 503 return (EAGAIN);
503 504 }
504 505
505 506 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
506 507
507 508 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
508 509 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
509 510 ddi_remove_minor_node(zfs_dip, chrbuf);
510 511 ddi_soft_state_free(zfsdev_state, minor);
511 512 dmu_objset_disown(os, FTAG);
512 513 mutex_exit(&zfsdev_state_lock);
513 514 return (EAGAIN);
514 515 }
515 516
516 517 zs = ddi_get_soft_state(zfsdev_state, minor);
517 518 zs->zss_type = ZSST_ZVOL;
518 519 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
519 520 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
520 521 zv->zv_min_bs = DEV_BSHIFT;
521 522 zv->zv_minor = minor;
522 523 zv->zv_objset = os;
523 524 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
524 525 zv->zv_flags |= ZVOL_RDONLY;
525 526 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
526 527 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
527 528 sizeof (rl_t), offsetof(rl_t, r_node));
528 529 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
529 530 offsetof(zvol_extent_t, ze_node));
530 531 /* get and cache the blocksize */
531 532 error = dmu_object_info(os, ZVOL_OBJ, &doi);
532 533 ASSERT(error == 0);
533 534 zv->zv_volblocksize = doi.doi_data_block_size;
534 535
535 536 if (spa_writeable(dmu_objset_spa(os))) {
536 537 if (zil_replay_disable)
537 538 zil_destroy(dmu_objset_zil(os), B_FALSE);
538 539 else
539 540 zil_replay(os, zv, zvol_replay_vector);
540 541 }
541 542 dmu_objset_disown(os, FTAG);
542 543 zv->zv_objset = NULL;
543 544
544 545 zvol_minors++;
545 546
546 547 mutex_exit(&zfsdev_state_lock);
547 548
548 549 return (0);
549 550 }
550 551
551 552 /*
552 553 * Remove minor node for the specified volume.
553 554 */
554 555 static int
555 556 zvol_remove_zv(zvol_state_t *zv)
556 557 {
557 558 char nmbuf[20];
558 559 minor_t minor = zv->zv_minor;
559 560
560 561 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
561 562 if (zv->zv_total_opens != 0)
562 563 return (EBUSY);
563 564
564 565 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
565 566 ddi_remove_minor_node(zfs_dip, nmbuf);
566 567
567 568 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
568 569 ddi_remove_minor_node(zfs_dip, nmbuf);
569 570
570 571 avl_destroy(&zv->zv_znode.z_range_avl);
571 572 mutex_destroy(&zv->zv_znode.z_range_lock);
572 573
573 574 kmem_free(zv, sizeof (zvol_state_t));
574 575
575 576 ddi_soft_state_free(zfsdev_state, minor);
576 577
577 578 zvol_minors--;
578 579 return (0);
579 580 }
580 581
581 582 int
582 583 zvol_remove_minor(const char *name)
583 584 {
584 585 zvol_state_t *zv;
585 586 int rc;
586 587
587 588 mutex_enter(&zfsdev_state_lock);
588 589 if ((zv = zvol_minor_lookup(name)) == NULL) {
589 590 mutex_exit(&zfsdev_state_lock);
590 591 return (ENXIO);
591 592 }
592 593 rc = zvol_remove_zv(zv);
593 594 mutex_exit(&zfsdev_state_lock);
594 595 return (rc);
595 596 }
596 597
597 598 int
598 599 zvol_first_open(zvol_state_t *zv)
599 600 {
600 601 objset_t *os;
601 602 uint64_t volsize;
602 603 int error;
603 604 uint64_t readonly;
604 605
605 606 /* lie and say we're read-only */
606 607 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
607 608 zvol_tag, &os);
608 609 if (error)
609 610 return (error);
610 611
611 612 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
612 613 if (error) {
613 614 ASSERT(error == 0);
614 615 dmu_objset_disown(os, zvol_tag);
615 616 return (error);
616 617 }
617 618 zv->zv_objset = os;
618 619 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
619 620 if (error) {
620 621 dmu_objset_disown(os, zvol_tag);
621 622 return (error);
622 623 }
623 624 zv->zv_volsize = volsize;
624 625 zv->zv_zilog = zil_open(os, zvol_get_data);
625 626 zvol_size_changed(zv->zv_volsize, ddi_driver_major(zfs_dip),
626 627 zv->zv_minor);
627 628
628 629 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
629 630 NULL) == 0);
630 631 if (readonly || dmu_objset_is_snapshot(os) ||
631 632 !spa_writeable(dmu_objset_spa(os)))
632 633 zv->zv_flags |= ZVOL_RDONLY;
633 634 else
634 635 zv->zv_flags &= ~ZVOL_RDONLY;
635 636 return (error);
636 637 }
637 638
638 639 void
639 640 zvol_last_close(zvol_state_t *zv)
640 641 {
641 642 zil_close(zv->zv_zilog);
642 643 zv->zv_zilog = NULL;
643 644 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
644 645 zv->zv_dbuf = NULL;
645 646 dmu_objset_disown(zv->zv_objset, zvol_tag);
646 647 zv->zv_objset = NULL;
647 648 }
648 649
649 650 int
650 651 zvol_prealloc(zvol_state_t *zv)
651 652 {
652 653 objset_t *os = zv->zv_objset;
653 654 dmu_tx_t *tx;
654 655 uint64_t refd, avail, usedobjs, availobjs;
655 656 uint64_t resid = zv->zv_volsize;
656 657 uint64_t off = 0;
657 658
658 659 /* Check the space usage before attempting to allocate the space */
659 660 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
660 661 if (avail < zv->zv_volsize)
661 662 return (ENOSPC);
662 663
663 664 /* Free old extents if they exist */
664 665 zvol_free_extents(zv);
665 666
666 667 while (resid != 0) {
667 668 int error;
668 669 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
669 670
670 671 tx = dmu_tx_create(os);
671 672 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
672 673 error = dmu_tx_assign(tx, TXG_WAIT);
673 674 if (error) {
674 675 dmu_tx_abort(tx);
675 676 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
676 677 return (error);
677 678 }
678 679 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
679 680 dmu_tx_commit(tx);
680 681 off += bytes;
681 682 resid -= bytes;
682 683 }
683 684 txg_wait_synced(dmu_objset_pool(os), 0);
684 685
685 686 return (0);
686 687 }
687 688
688 689 int
689 690 zvol_update_volsize(objset_t *os, uint64_t volsize)
690 691 {
691 692 dmu_tx_t *tx;
692 693 int error;
693 694
694 695 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
695 696
696 697 tx = dmu_tx_create(os);
697 698 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
698 699 error = dmu_tx_assign(tx, TXG_WAIT);
699 700 if (error) {
700 701 dmu_tx_abort(tx);
701 702 return (error);
702 703 }
703 704
704 705 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
705 706 &volsize, tx);
706 707 dmu_tx_commit(tx);
707 708
708 709 if (error == 0)
709 710 error = dmu_free_long_range(os,
710 711 ZVOL_OBJ, volsize, DMU_OBJECT_END);
711 712 return (error);
712 713 }
713 714
714 715 void
715 716 zvol_remove_minors(const char *name)
716 717 {
717 718 zvol_state_t *zv;
718 719 char *namebuf;
719 720 minor_t minor;
720 721
721 722 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
722 723 (void) strncpy(namebuf, name, strlen(name));
723 724 (void) strcat(namebuf, "/");
724 725 mutex_enter(&zfsdev_state_lock);
725 726 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
726 727
727 728 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
728 729 if (zv == NULL)
729 730 continue;
730 731 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
731 732 (void) zvol_remove_zv(zv);
732 733 }
733 734 kmem_free(namebuf, strlen(name) + 2);
734 735
735 736 mutex_exit(&zfsdev_state_lock);
736 737 }
737 738
738 739 int
739 740 zvol_set_volsize(const char *name, major_t maj, uint64_t volsize)
740 741 {
741 742 zvol_state_t *zv = NULL;
742 743 objset_t *os;
743 744 int error;
744 745 dmu_object_info_t doi;
745 746 uint64_t old_volsize = 0ULL;
746 747 uint64_t readonly;
747 748
748 749 mutex_enter(&zfsdev_state_lock);
749 750 zv = zvol_minor_lookup(name);
750 751 if ((error = dmu_objset_hold(name, FTAG, &os)) != 0) {
751 752 mutex_exit(&zfsdev_state_lock);
752 753 return (error);
753 754 }
754 755
755 756 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
756 757 (error = zvol_check_volsize(volsize,
757 758 doi.doi_data_block_size)) != 0)
758 759 goto out;
759 760
760 761 VERIFY(dsl_prop_get_integer(name, "readonly", &readonly,
761 762 NULL) == 0);
762 763 if (readonly) {
763 764 error = EROFS;
764 765 goto out;
765 766 }
766 767
767 768 error = zvol_update_volsize(os, volsize);
768 769 /*
769 770 * Reinitialize the dump area to the new size. If we
770 771 * failed to resize the dump area then restore it back to
771 772 * its original size.
772 773 */
773 774 if (zv && error == 0) {
774 775 if (zv->zv_flags & ZVOL_DUMPIFIED) {
775 776 old_volsize = zv->zv_volsize;
776 777 zv->zv_volsize = volsize;
777 778 if ((error = zvol_dumpify(zv)) != 0 ||
778 779 (error = dumpvp_resize()) != 0) {
779 780 (void) zvol_update_volsize(os, old_volsize);
780 781 zv->zv_volsize = old_volsize;
781 782 error = zvol_dumpify(zv);
782 783 }
783 784 }
784 785 if (error == 0) {
785 786 zv->zv_volsize = volsize;
786 787 zvol_size_changed(volsize, maj, zv->zv_minor);
787 788 }
788 789 }
789 790
790 791 /*
791 792 * Generate a LUN expansion event.
792 793 */
793 794 if (zv && error == 0) {
794 795 sysevent_id_t eid;
795 796 nvlist_t *attr;
796 797 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
797 798
798 799 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
799 800 zv->zv_minor);
800 801
801 802 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
802 803 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
803 804
804 805 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
805 806 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
806 807
807 808 nvlist_free(attr);
808 809 kmem_free(physpath, MAXPATHLEN);
809 810 }
810 811
811 812 out:
812 813 dmu_objset_rele(os, FTAG);
813 814
814 815 mutex_exit(&zfsdev_state_lock);
815 816
816 817 return (error);
817 818 }
818 819
819 820 /*ARGSUSED*/
820 821 int
821 822 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
822 823 {
823 824 zvol_state_t *zv;
824 825 int err = 0;
825 826
826 827 mutex_enter(&zfsdev_state_lock);
827 828
828 829 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
829 830 if (zv == NULL) {
830 831 mutex_exit(&zfsdev_state_lock);
831 832 return (ENXIO);
832 833 }
833 834
834 835 if (zv->zv_total_opens == 0)
835 836 err = zvol_first_open(zv);
836 837 if (err) {
837 838 mutex_exit(&zfsdev_state_lock);
838 839 return (err);
839 840 }
840 841 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
841 842 err = EROFS;
842 843 goto out;
843 844 }
844 845 if (zv->zv_flags & ZVOL_EXCL) {
845 846 err = EBUSY;
846 847 goto out;
847 848 }
848 849 if (flag & FEXCL) {
849 850 if (zv->zv_total_opens != 0) {
850 851 err = EBUSY;
851 852 goto out;
852 853 }
853 854 zv->zv_flags |= ZVOL_EXCL;
854 855 }
855 856
856 857 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
857 858 zv->zv_open_count[otyp]++;
858 859 zv->zv_total_opens++;
859 860 }
860 861 mutex_exit(&zfsdev_state_lock);
861 862
862 863 return (err);
863 864 out:
864 865 if (zv->zv_total_opens == 0)
865 866 zvol_last_close(zv);
866 867 mutex_exit(&zfsdev_state_lock);
867 868 return (err);
868 869 }
869 870
870 871 /*ARGSUSED*/
871 872 int
872 873 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
873 874 {
874 875 minor_t minor = getminor(dev);
875 876 zvol_state_t *zv;
876 877 int error = 0;
877 878
878 879 mutex_enter(&zfsdev_state_lock);
879 880
880 881 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
881 882 if (zv == NULL) {
882 883 mutex_exit(&zfsdev_state_lock);
883 884 return (ENXIO);
884 885 }
885 886
886 887 if (zv->zv_flags & ZVOL_EXCL) {
887 888 ASSERT(zv->zv_total_opens == 1);
888 889 zv->zv_flags &= ~ZVOL_EXCL;
889 890 }
890 891
891 892 /*
892 893 * If the open count is zero, this is a spurious close.
893 894 * That indicates a bug in the kernel / DDI framework.
894 895 */
895 896 ASSERT(zv->zv_open_count[otyp] != 0);
896 897 ASSERT(zv->zv_total_opens != 0);
897 898
898 899 /*
899 900 * You may get multiple opens, but only one close.
900 901 */
901 902 zv->zv_open_count[otyp]--;
902 903 zv->zv_total_opens--;
903 904
904 905 if (zv->zv_total_opens == 0)
905 906 zvol_last_close(zv);
906 907
907 908 mutex_exit(&zfsdev_state_lock);
908 909 return (error);
909 910 }
910 911
911 912 static void
912 913 zvol_get_done(zgd_t *zgd, int error)
913 914 {
914 915 if (zgd->zgd_db)
915 916 dmu_buf_rele(zgd->zgd_db, zgd);
916 917
917 918 zfs_range_unlock(zgd->zgd_rl);
918 919
919 920 if (error == 0 && zgd->zgd_bp)
920 921 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
921 922
922 923 kmem_free(zgd, sizeof (zgd_t));
923 924 }
924 925
925 926 /*
926 927 * Get data to generate a TX_WRITE intent log record.
927 928 */
928 929 static int
929 930 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
930 931 {
931 932 zvol_state_t *zv = arg;
932 933 objset_t *os = zv->zv_objset;
933 934 uint64_t object = ZVOL_OBJ;
934 935 uint64_t offset = lr->lr_offset;
935 936 uint64_t size = lr->lr_length; /* length of user data */
936 937 blkptr_t *bp = &lr->lr_blkptr;
937 938 dmu_buf_t *db;
938 939 zgd_t *zgd;
939 940 int error;
940 941
941 942 ASSERT(zio != NULL);
942 943 ASSERT(size != 0);
943 944
944 945 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
945 946 zgd->zgd_zilog = zv->zv_zilog;
946 947 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
947 948
948 949 /*
949 950 * Write records come in two flavors: immediate and indirect.
950 951 * For small writes it's cheaper to store the data with the
951 952 * log record (immediate); for large writes it's cheaper to
952 953 * sync the data and get a pointer to it (indirect) so that
953 954 * we don't have to write the data twice.
954 955 */
955 956 if (buf != NULL) { /* immediate write */
956 957 error = dmu_read(os, object, offset, size, buf,
957 958 DMU_READ_NO_PREFETCH);
958 959 } else {
959 960 size = zv->zv_volblocksize;
960 961 offset = P2ALIGN(offset, size);
961 962 error = dmu_buf_hold(os, object, offset, zgd, &db,
962 963 DMU_READ_NO_PREFETCH);
963 964 if (error == 0) {
964 965 zgd->zgd_db = db;
965 966 zgd->zgd_bp = bp;
966 967
967 968 ASSERT(db->db_offset == offset);
968 969 ASSERT(db->db_size == size);
969 970
970 971 error = dmu_sync(zio, lr->lr_common.lrc_txg,
971 972 zvol_get_done, zgd);
972 973
973 974 if (error == 0)
974 975 return (0);
975 976 }
976 977 }
977 978
978 979 zvol_get_done(zgd, error);
979 980
980 981 return (error);
981 982 }
982 983
983 984 /*
984 985 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
985 986 *
986 987 * We store data in the log buffers if it's small enough.
987 988 * Otherwise we will later flush the data out via dmu_sync().
988 989 */
989 990 ssize_t zvol_immediate_write_sz = 32768;
990 991
991 992 static void
992 993 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
993 994 boolean_t sync)
994 995 {
995 996 uint32_t blocksize = zv->zv_volblocksize;
996 997 zilog_t *zilog = zv->zv_zilog;
997 998 boolean_t slogging;
998 999 ssize_t immediate_write_sz;
999 1000
1000 1001 if (zil_replaying(zilog, tx))
1001 1002 return;
1002 1003
1003 1004 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1004 1005 ? 0 : zvol_immediate_write_sz;
1005 1006
1006 1007 slogging = spa_has_slogs(zilog->zl_spa) &&
1007 1008 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1008 1009
1009 1010 while (resid) {
1010 1011 itx_t *itx;
1011 1012 lr_write_t *lr;
1012 1013 ssize_t len;
1013 1014 itx_wr_state_t write_state;
1014 1015
1015 1016 /*
1016 1017 * Unlike zfs_log_write() we can be called with
1017 1018 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1018 1019 */
1019 1020 if (blocksize > immediate_write_sz && !slogging &&
1020 1021 resid >= blocksize && off % blocksize == 0) {
1021 1022 write_state = WR_INDIRECT; /* uses dmu_sync */
1022 1023 len = blocksize;
1023 1024 } else if (sync) {
1024 1025 write_state = WR_COPIED;
1025 1026 len = MIN(ZIL_MAX_LOG_DATA, resid);
1026 1027 } else {
1027 1028 write_state = WR_NEED_COPY;
1028 1029 len = MIN(ZIL_MAX_LOG_DATA, resid);
1029 1030 }
1030 1031
1031 1032 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1032 1033 (write_state == WR_COPIED ? len : 0));
1033 1034 lr = (lr_write_t *)&itx->itx_lr;
1034 1035 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1035 1036 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1036 1037 zil_itx_destroy(itx);
1037 1038 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1038 1039 lr = (lr_write_t *)&itx->itx_lr;
1039 1040 write_state = WR_NEED_COPY;
1040 1041 }
1041 1042
1042 1043 itx->itx_wr_state = write_state;
1043 1044 if (write_state == WR_NEED_COPY)
1044 1045 itx->itx_sod += len;
1045 1046 lr->lr_foid = ZVOL_OBJ;
1046 1047 lr->lr_offset = off;
1047 1048 lr->lr_length = len;
1048 1049 lr->lr_blkoff = 0;
1049 1050 BP_ZERO(&lr->lr_blkptr);
1050 1051
1051 1052 itx->itx_private = zv;
1052 1053 itx->itx_sync = sync;
1053 1054
1054 1055 zil_itx_assign(zilog, itx, tx);
1055 1056
1056 1057 off += len;
1057 1058 resid -= len;
1058 1059 }
1059 1060 }
1060 1061
1061 1062 static int
1062 1063 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t size,
1063 1064 boolean_t doread, boolean_t isdump)
1064 1065 {
1065 1066 vdev_disk_t *dvd;
1066 1067 int c;
1067 1068 int numerrors = 0;
1068 1069
1069 1070 for (c = 0; c < vd->vdev_children; c++) {
1070 1071 ASSERT(vd->vdev_ops == &vdev_mirror_ops ||
1071 1072 vd->vdev_ops == &vdev_replacing_ops ||
1072 1073 vd->vdev_ops == &vdev_spare_ops);
1073 1074 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1074 1075 addr, offset, size, doread, isdump);
1075 1076 if (err != 0) {
1076 1077 numerrors++;
1077 1078 } else if (doread) {
1078 1079 break;
1079 1080 }
1080 1081 }
1081 1082
1082 1083 if (!vd->vdev_ops->vdev_op_leaf)
1083 1084 return (numerrors < vd->vdev_children ? 0 : EIO);
1084 1085
1085 1086 if (doread && !vdev_readable(vd))
1086 1087 return (EIO);
1087 1088 else if (!doread && !vdev_writeable(vd))
1088 1089 return (EIO);
1089 1090
1090 1091 dvd = vd->vdev_tsd;
1091 1092 ASSERT3P(dvd, !=, NULL);
1092 1093 offset += VDEV_LABEL_START_SIZE;
1093 1094
1094 1095 if (ddi_in_panic() || isdump) {
1095 1096 ASSERT(!doread);
1096 1097 if (doread)
1097 1098 return (EIO);
1098 1099 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1099 1100 lbtodb(size)));
1100 1101 } else {
1101 1102 return (vdev_disk_physio(dvd->vd_lh, addr, size, offset,
1102 1103 doread ? B_READ : B_WRITE));
1103 1104 }
1104 1105 }
1105 1106
1106 1107 static int
1107 1108 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1108 1109 boolean_t doread, boolean_t isdump)
1109 1110 {
1110 1111 vdev_t *vd;
1111 1112 int error;
1112 1113 zvol_extent_t *ze;
1113 1114 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1114 1115
1115 1116 /* Must be sector aligned, and not stradle a block boundary. */
1116 1117 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1117 1118 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1118 1119 return (EINVAL);
1119 1120 }
1120 1121 ASSERT(size <= zv->zv_volblocksize);
1121 1122
1122 1123 /* Locate the extent this belongs to */
1123 1124 ze = list_head(&zv->zv_extents);
1124 1125 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1125 1126 offset -= ze->ze_nblks * zv->zv_volblocksize;
1126 1127 ze = list_next(&zv->zv_extents, ze);
1127 1128 }
1128 1129
1129 1130 if (!ddi_in_panic())
1130 1131 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1131 1132
1132 1133 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1133 1134 offset += DVA_GET_OFFSET(&ze->ze_dva);
1134 1135 error = zvol_dumpio_vdev(vd, addr, offset, size, doread, isdump);
1135 1136
1136 1137 if (!ddi_in_panic())
1137 1138 spa_config_exit(spa, SCL_STATE, FTAG);
1138 1139
1139 1140 return (error);
1140 1141 }
1141 1142
1142 1143 int
1143 1144 zvol_strategy(buf_t *bp)
1144 1145 {
1145 1146 zfs_soft_state_t *zs = NULL;
1146 1147 zvol_state_t *zv;
1147 1148 uint64_t off, volsize;
1148 1149 size_t resid;
1149 1150 char *addr;
1150 1151 objset_t *os;
1151 1152 rl_t *rl;
1152 1153 int error = 0;
1153 1154 boolean_t doread = bp->b_flags & B_READ;
1154 1155 boolean_t is_dump;
1155 1156 boolean_t sync;
1156 1157
1157 1158 if (getminor(bp->b_edev) == 0) {
1158 1159 error = EINVAL;
1159 1160 } else {
1160 1161 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1161 1162 if (zs == NULL)
1162 1163 error = ENXIO;
1163 1164 else if (zs->zss_type != ZSST_ZVOL)
1164 1165 error = EINVAL;
1165 1166 }
1166 1167
1167 1168 if (error) {
1168 1169 bioerror(bp, error);
1169 1170 biodone(bp);
1170 1171 return (0);
1171 1172 }
1172 1173
1173 1174 zv = zs->zss_data;
1174 1175
1175 1176 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1176 1177 bioerror(bp, EROFS);
1177 1178 biodone(bp);
1178 1179 return (0);
1179 1180 }
1180 1181
1181 1182 off = ldbtob(bp->b_blkno);
1182 1183 volsize = zv->zv_volsize;
1183 1184
1184 1185 os = zv->zv_objset;
1185 1186 ASSERT(os != NULL);
1186 1187
1187 1188 bp_mapin(bp);
1188 1189 addr = bp->b_un.b_addr;
1189 1190 resid = bp->b_bcount;
1190 1191
1191 1192 if (resid > 0 && (off < 0 || off >= volsize)) {
1192 1193 bioerror(bp, EIO);
1193 1194 biodone(bp);
1194 1195 return (0);
1195 1196 }
1196 1197
1197 1198 is_dump = zv->zv_flags & ZVOL_DUMPIFIED;
1198 1199 sync = ((!(bp->b_flags & B_ASYNC) &&
1199 1200 !(zv->zv_flags & ZVOL_WCE)) ||
1200 1201 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1201 1202 !doread && !is_dump;
1202 1203
1203 1204 /*
1204 1205 * There must be no buffer changes when doing a dmu_sync() because
1205 1206 * we can't change the data whilst calculating the checksum.
1206 1207 */
1207 1208 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1208 1209 doread ? RL_READER : RL_WRITER);
1209 1210
1210 1211 while (resid != 0 && off < volsize) {
1211 1212 size_t size = MIN(resid, zvol_maxphys);
1212 1213 if (is_dump) {
1213 1214 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1214 1215 error = zvol_dumpio(zv, addr, off, size,
1215 1216 doread, B_FALSE);
1216 1217 } else if (doread) {
1217 1218 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1218 1219 DMU_READ_PREFETCH);
1219 1220 } else {
1220 1221 dmu_tx_t *tx = dmu_tx_create(os);
1221 1222 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1222 1223 error = dmu_tx_assign(tx, TXG_WAIT);
1223 1224 if (error) {
1224 1225 dmu_tx_abort(tx);
1225 1226 } else {
1226 1227 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1227 1228 zvol_log_write(zv, tx, off, size, sync);
1228 1229 dmu_tx_commit(tx);
1229 1230 }
1230 1231 }
1231 1232 if (error) {
1232 1233 /* convert checksum errors into IO errors */
1233 1234 if (error == ECKSUM)
1234 1235 error = EIO;
1235 1236 break;
1236 1237 }
1237 1238 off += size;
1238 1239 addr += size;
1239 1240 resid -= size;
1240 1241 }
1241 1242 zfs_range_unlock(rl);
1242 1243
1243 1244 if ((bp->b_resid = resid) == bp->b_bcount)
1244 1245 bioerror(bp, off > volsize ? EINVAL : error);
1245 1246
1246 1247 if (sync)
1247 1248 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1248 1249 biodone(bp);
1249 1250
1250 1251 return (0);
1251 1252 }
1252 1253
1253 1254 /*
1254 1255 * Set the buffer count to the zvol maximum transfer.
1255 1256 * Using our own routine instead of the default minphys()
1256 1257 * means that for larger writes we write bigger buffers on X86
1257 1258 * (128K instead of 56K) and flush the disk write cache less often
1258 1259 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1259 1260 * 56K on X86 and 128K on sparc).
1260 1261 */
1261 1262 void
1262 1263 zvol_minphys(struct buf *bp)
1263 1264 {
1264 1265 if (bp->b_bcount > zvol_maxphys)
1265 1266 bp->b_bcount = zvol_maxphys;
1266 1267 }
1267 1268
1268 1269 int
1269 1270 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1270 1271 {
1271 1272 minor_t minor = getminor(dev);
1272 1273 zvol_state_t *zv;
1273 1274 int error = 0;
1274 1275 uint64_t size;
1275 1276 uint64_t boff;
1276 1277 uint64_t resid;
1277 1278
1278 1279 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1279 1280 if (zv == NULL)
1280 1281 return (ENXIO);
1281 1282
1282 1283 boff = ldbtob(blkno);
1283 1284 resid = ldbtob(nblocks);
1284 1285
1285 1286 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1286 1287
1287 1288 while (resid) {
1288 1289 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1289 1290 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1290 1291 if (error)
1291 1292 break;
1292 1293 boff += size;
1293 1294 addr += size;
1294 1295 resid -= size;
1295 1296 }
1296 1297
1297 1298 return (error);
1298 1299 }
1299 1300
1300 1301 /*ARGSUSED*/
1301 1302 int
1302 1303 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1303 1304 {
1304 1305 minor_t minor = getminor(dev);
1305 1306 zvol_state_t *zv;
1306 1307 uint64_t volsize;
1307 1308 rl_t *rl;
1308 1309 int error = 0;
1309 1310
1310 1311 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1311 1312 if (zv == NULL)
1312 1313 return (ENXIO);
1313 1314
1314 1315 volsize = zv->zv_volsize;
1315 1316 if (uio->uio_resid > 0 &&
1316 1317 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1317 1318 return (EIO);
1318 1319
1319 1320 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1320 1321 error = physio(zvol_strategy, NULL, dev, B_READ,
1321 1322 zvol_minphys, uio);
1322 1323 return (error);
1323 1324 }
1324 1325
1325 1326 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1326 1327 RL_READER);
1327 1328 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1328 1329 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1329 1330
1330 1331 /* don't read past the end */
1331 1332 if (bytes > volsize - uio->uio_loffset)
1332 1333 bytes = volsize - uio->uio_loffset;
1333 1334
1334 1335 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1335 1336 if (error) {
1336 1337 /* convert checksum errors into IO errors */
1337 1338 if (error == ECKSUM)
1338 1339 error = EIO;
1339 1340 break;
1340 1341 }
1341 1342 }
1342 1343 zfs_range_unlock(rl);
1343 1344 return (error);
1344 1345 }
1345 1346
1346 1347 /*ARGSUSED*/
1347 1348 int
1348 1349 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1349 1350 {
1350 1351 minor_t minor = getminor(dev);
1351 1352 zvol_state_t *zv;
1352 1353 uint64_t volsize;
1353 1354 rl_t *rl;
1354 1355 int error = 0;
1355 1356 boolean_t sync;
1356 1357
1357 1358 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1358 1359 if (zv == NULL)
1359 1360 return (ENXIO);
1360 1361
1361 1362 volsize = zv->zv_volsize;
1362 1363 if (uio->uio_resid > 0 &&
1363 1364 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1364 1365 return (EIO);
1365 1366
1366 1367 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1367 1368 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1368 1369 zvol_minphys, uio);
1369 1370 return (error);
1370 1371 }
1371 1372
1372 1373 sync = !(zv->zv_flags & ZVOL_WCE) ||
1373 1374 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1374 1375
1375 1376 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1376 1377 RL_WRITER);
1377 1378 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1378 1379 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1379 1380 uint64_t off = uio->uio_loffset;
1380 1381 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1381 1382
1382 1383 if (bytes > volsize - off) /* don't write past the end */
1383 1384 bytes = volsize - off;
1384 1385
1385 1386 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1386 1387 error = dmu_tx_assign(tx, TXG_WAIT);
1387 1388 if (error) {
1388 1389 dmu_tx_abort(tx);
1389 1390 break;
1390 1391 }
1391 1392 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1392 1393 if (error == 0)
1393 1394 zvol_log_write(zv, tx, off, bytes, sync);
1394 1395 dmu_tx_commit(tx);
1395 1396
1396 1397 if (error)
1397 1398 break;
1398 1399 }
1399 1400 zfs_range_unlock(rl);
1400 1401 if (sync)
1401 1402 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1402 1403 return (error);
1403 1404 }
1404 1405
1405 1406 int
1406 1407 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1407 1408 {
1408 1409 struct uuid uuid = EFI_RESERVED;
1409 1410 efi_gpe_t gpe = { 0 };
1410 1411 uint32_t crc;
1411 1412 dk_efi_t efi;
1412 1413 int length;
1413 1414 char *ptr;
1414 1415
1415 1416 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1416 1417 return (EFAULT);
1417 1418 ptr = (char *)(uintptr_t)efi.dki_data_64;
1418 1419 length = efi.dki_length;
1419 1420 /*
1420 1421 * Some clients may attempt to request a PMBR for the
1421 1422 * zvol. Currently this interface will return EINVAL to
1422 1423 * such requests. These requests could be supported by
1423 1424 * adding a check for lba == 0 and consing up an appropriate
1424 1425 * PMBR.
1425 1426 */
1426 1427 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1427 1428 return (EINVAL);
1428 1429
1429 1430 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1430 1431 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1431 1432 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1432 1433
1433 1434 if (efi.dki_lba == 1) {
1434 1435 efi_gpt_t gpt = { 0 };
1435 1436
1436 1437 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1437 1438 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1438 1439 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1439 1440 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1440 1441 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1441 1442 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1442 1443 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1443 1444 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1444 1445 gpt.efi_gpt_SizeOfPartitionEntry =
1445 1446 LE_32(sizeof (efi_gpe_t));
1446 1447 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1447 1448 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1448 1449 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1449 1450 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1450 1451 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1451 1452 flag))
1452 1453 return (EFAULT);
1453 1454 ptr += sizeof (gpt);
1454 1455 length -= sizeof (gpt);
1455 1456 }
1456 1457 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1457 1458 length), flag))
1458 1459 return (EFAULT);
1459 1460 return (0);
1460 1461 }
1461 1462
1462 1463 /*
1463 1464 * BEGIN entry points to allow external callers access to the volume.
1464 1465 */
1465 1466 /*
1466 1467 * Return the volume parameters needed for access from an external caller.
1467 1468 * These values are invariant as long as the volume is held open.
1468 1469 */
1469 1470 int
1470 1471 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1471 1472 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1472 1473 void **rl_hdl, void **bonus_hdl)
1473 1474 {
1474 1475 zvol_state_t *zv;
1475 1476
1476 1477 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1477 1478 if (zv == NULL)
1478 1479 return (ENXIO);
1479 1480 if (zv->zv_flags & ZVOL_DUMPIFIED)
1480 1481 return (ENXIO);
1481 1482
1482 1483 ASSERT(blksize && max_xfer_len && minor_hdl &&
1483 1484 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1484 1485
1485 1486 *blksize = zv->zv_volblocksize;
1486 1487 *max_xfer_len = (uint64_t)zvol_maxphys;
1487 1488 *minor_hdl = zv;
1488 1489 *objset_hdl = zv->zv_objset;
1489 1490 *zil_hdl = zv->zv_zilog;
1490 1491 *rl_hdl = &zv->zv_znode;
1491 1492 *bonus_hdl = zv->zv_dbuf;
1492 1493 return (0);
1493 1494 }
1494 1495
1495 1496 /*
1496 1497 * Return the current volume size to an external caller.
1497 1498 * The size can change while the volume is open.
1498 1499 */
1499 1500 uint64_t
1500 1501 zvol_get_volume_size(void *minor_hdl)
1501 1502 {
1502 1503 zvol_state_t *zv = minor_hdl;
1503 1504
1504 1505 return (zv->zv_volsize);
1505 1506 }
1506 1507
1507 1508 /*
1508 1509 * Return the current WCE setting to an external caller.
1509 1510 * The WCE setting can change while the volume is open.
1510 1511 */
1511 1512 int
1512 1513 zvol_get_volume_wce(void *minor_hdl)
1513 1514 {
1514 1515 zvol_state_t *zv = minor_hdl;
1515 1516
1516 1517 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1517 1518 }
1518 1519
1519 1520 /*
1520 1521 * Entry point for external callers to zvol_log_write
1521 1522 */
1522 1523 void
1523 1524 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1524 1525 boolean_t sync)
1525 1526 {
1526 1527 zvol_state_t *zv = minor_hdl;
1527 1528
1528 1529 zvol_log_write(zv, tx, off, resid, sync);
1529 1530 }
1530 1531 /*
1531 1532 * END entry points to allow external callers access to the volume.
1532 1533 */
1533 1534
1534 1535 /*
1535 1536 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1536 1537 */
1537 1538 static void
1538 1539 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1539 1540 boolean_t sync)
1540 1541 {
1541 1542 itx_t *itx;
1542 1543 lr_truncate_t *lr;
1543 1544 zilog_t *zilog = zv->zv_zilog;
1544 1545
1545 1546 if (zil_replaying(zilog, tx))
1546 1547 return;
1547 1548
1548 1549 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1549 1550 lr = (lr_truncate_t *)&itx->itx_lr;
1550 1551 lr->lr_foid = ZVOL_OBJ;
1551 1552 lr->lr_offset = off;
1552 1553 lr->lr_length = len;
1553 1554
1554 1555 itx->itx_sync = sync;
1555 1556 zil_itx_assign(zilog, itx, tx);
1556 1557 }
1557 1558
1558 1559 /*
1559 1560 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1560 1561 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1561 1562 */
1562 1563 /*ARGSUSED*/
1563 1564 int
1564 1565 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1565 1566 {
1566 1567 zvol_state_t *zv;
1567 1568 struct dk_cinfo dki;
1568 1569 struct dk_minfo dkm;
1569 1570 struct dk_callback *dkc;
1570 1571 int error = 0;
1571 1572 rl_t *rl;
1572 1573
1573 1574 mutex_enter(&zfsdev_state_lock);
1574 1575
1575 1576 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1576 1577
1577 1578 if (zv == NULL) {
1578 1579 mutex_exit(&zfsdev_state_lock);
1579 1580 return (ENXIO);
1580 1581 }
1581 1582 ASSERT(zv->zv_total_opens > 0);
1582 1583
1583 1584 switch (cmd) {
1584 1585
1585 1586 case DKIOCINFO:
1586 1587 bzero(&dki, sizeof (dki));
1587 1588 (void) strcpy(dki.dki_cname, "zvol");
1588 1589 (void) strcpy(dki.dki_dname, "zvol");
1589 1590 dki.dki_ctype = DKC_UNKNOWN;
1590 1591 dki.dki_unit = getminor(dev);
1591 1592 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1592 1593 mutex_exit(&zfsdev_state_lock);
1593 1594 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1594 1595 error = EFAULT;
1595 1596 return (error);
1596 1597
1597 1598 case DKIOCGMEDIAINFO:
1598 1599 bzero(&dkm, sizeof (dkm));
1599 1600 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1600 1601 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1601 1602 dkm.dki_media_type = DK_UNKNOWN;
1602 1603 mutex_exit(&zfsdev_state_lock);
1603 1604 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1604 1605 error = EFAULT;
1605 1606 return (error);
1606 1607
1607 1608 case DKIOCGETEFI:
1608 1609 {
1609 1610 uint64_t vs = zv->zv_volsize;
1610 1611 uint8_t bs = zv->zv_min_bs;
1611 1612
1612 1613 mutex_exit(&zfsdev_state_lock);
1613 1614 error = zvol_getefi((void *)arg, flag, vs, bs);
1614 1615 return (error);
1615 1616 }
1616 1617
1617 1618 case DKIOCFLUSHWRITECACHE:
1618 1619 dkc = (struct dk_callback *)arg;
1619 1620 mutex_exit(&zfsdev_state_lock);
1620 1621 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1621 1622 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1622 1623 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1623 1624 error = 0;
1624 1625 }
1625 1626 return (error);
1626 1627
1627 1628 case DKIOCGETWCE:
1628 1629 {
1629 1630 int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1630 1631 if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1631 1632 flag))
1632 1633 error = EFAULT;
1633 1634 break;
1634 1635 }
1635 1636 case DKIOCSETWCE:
1636 1637 {
1637 1638 int wce;
1638 1639 if (ddi_copyin((void *)arg, &wce, sizeof (int),
1639 1640 flag)) {
1640 1641 error = EFAULT;
1641 1642 break;
1642 1643 }
1643 1644 if (wce) {
1644 1645 zv->zv_flags |= ZVOL_WCE;
1645 1646 mutex_exit(&zfsdev_state_lock);
1646 1647 } else {
1647 1648 zv->zv_flags &= ~ZVOL_WCE;
1648 1649 mutex_exit(&zfsdev_state_lock);
1649 1650 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1650 1651 }
1651 1652 return (0);
1652 1653 }
1653 1654
1654 1655 case DKIOCGGEOM:
1655 1656 case DKIOCGVTOC:
1656 1657 /*
1657 1658 * commands using these (like prtvtoc) expect ENOTSUP
1658 1659 * since we're emulating an EFI label
1659 1660 */
1660 1661 error = ENOTSUP;
1661 1662 break;
1662 1663
1663 1664 case DKIOCDUMPINIT:
1664 1665 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1665 1666 RL_WRITER);
1666 1667 error = zvol_dumpify(zv);
1667 1668 zfs_range_unlock(rl);
1668 1669 break;
1669 1670
1670 1671 case DKIOCDUMPFINI:
1671 1672 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1672 1673 break;
1673 1674 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1674 1675 RL_WRITER);
1675 1676 error = zvol_dump_fini(zv);
1676 1677 zfs_range_unlock(rl);
1677 1678 break;
1678 1679
1679 1680 case DKIOCFREE:
1680 1681 {
1681 1682 dkioc_free_t df;
1682 1683 dmu_tx_t *tx;
1683 1684
1684 1685 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1685 1686 error = EFAULT;
1686 1687 break;
1687 1688 }
1688 1689
1689 1690 /*
1690 1691 * Apply Postel's Law to length-checking. If they overshoot,
1691 1692 * just blank out until the end, if there's a need to blank
1692 1693 * out anything.
1693 1694 */
1694 1695 if (df.df_start >= zv->zv_volsize)
1695 1696 break; /* No need to do anything... */
1696 1697 if (df.df_start + df.df_length > zv->zv_volsize)
1697 1698 df.df_length = DMU_OBJECT_END;
1698 1699
1699 1700 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1700 1701 RL_WRITER);
1701 1702 tx = dmu_tx_create(zv->zv_objset);
1702 1703 error = dmu_tx_assign(tx, TXG_WAIT);
1703 1704 if (error != 0) {
1704 1705 dmu_tx_abort(tx);
1705 1706 } else {
1706 1707 zvol_log_truncate(zv, tx, df.df_start,
1707 1708 df.df_length, B_TRUE);
1708 1709 dmu_tx_commit(tx);
1709 1710 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1710 1711 df.df_start, df.df_length);
1711 1712 }
1712 1713
1713 1714 zfs_range_unlock(rl);
1714 1715
1715 1716 if (error == 0) {
1716 1717 /*
1717 1718 * If the write-cache is disabled or 'sync' property
1718 1719 * is set to 'always' then treat this as a synchronous
1719 1720 * operation (i.e. commit to zil).
1720 1721 */
1721 1722 if (!(zv->zv_flags & ZVOL_WCE) ||
1722 1723 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1723 1724 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1724 1725
1725 1726 /*
1726 1727 * If the caller really wants synchronous writes, and
1727 1728 * can't wait for them, don't return until the write
1728 1729 * is done.
1729 1730 */
1730 1731 if (df.df_flags & DF_WAIT_SYNC) {
1731 1732 txg_wait_synced(
1732 1733 dmu_objset_pool(zv->zv_objset), 0);
1733 1734 }
1734 1735 }
1735 1736 break;
1736 1737 }
1737 1738
1738 1739 default:
1739 1740 error = ENOTTY;
1740 1741 break;
1741 1742
1742 1743 }
1743 1744 mutex_exit(&zfsdev_state_lock);
1744 1745 return (error);
1745 1746 }
1746 1747
1747 1748 int
1748 1749 zvol_busy(void)
1749 1750 {
1750 1751 return (zvol_minors != 0);
1751 1752 }
1752 1753
1753 1754 void
1754 1755 zvol_init(void)
1755 1756 {
1756 1757 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1757 1758 1) == 0);
1758 1759 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1759 1760 }
1760 1761
1761 1762 void
1762 1763 zvol_fini(void)
1763 1764 {
1764 1765 mutex_destroy(&zfsdev_state_lock);
1765 1766 ddi_soft_state_fini(&zfsdev_state);
1766 1767 }
1767 1768
1768 1769 static int
1769 1770 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1770 1771 {
1771 1772 dmu_tx_t *tx;
1772 1773 int error = 0;
1773 1774 objset_t *os = zv->zv_objset;
1774 1775 nvlist_t *nv = NULL;
1775 1776 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1776 1777
1777 1778 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1778 1779 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1779 1780 DMU_OBJECT_END);
1780 1781 /* wait for dmu_free_long_range to actually free the blocks */
1781 1782 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1782 1783
1783 1784 tx = dmu_tx_create(os);
1784 1785 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1785 1786 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1786 1787 error = dmu_tx_assign(tx, TXG_WAIT);
1787 1788 if (error) {
1788 1789 dmu_tx_abort(tx);
1789 1790 return (error);
1790 1791 }
1791 1792
1792 1793 /*
1793 1794 * If we are resizing the dump device then we only need to
1794 1795 * update the refreservation to match the newly updated
1795 1796 * zvolsize. Otherwise, we save off the original state of the
1796 1797 * zvol so that we can restore them if the zvol is ever undumpified.
1797 1798 */
1798 1799 if (resize) {
1799 1800 error = zap_update(os, ZVOL_ZAP_OBJ,
1800 1801 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1801 1802 &zv->zv_volsize, tx);
1802 1803 } else {
1803 1804 uint64_t checksum, compress, refresrv, vbs, dedup;
1804 1805
1805 1806 error = dsl_prop_get_integer(zv->zv_name,
1806 1807 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1807 1808 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1808 1809 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1809 1810 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1810 1811 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1811 1812 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1812 1813 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1813 1814 if (version >= SPA_VERSION_DEDUP) {
1814 1815 error = error ? error :
1815 1816 dsl_prop_get_integer(zv->zv_name,
1816 1817 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1817 1818 }
1818 1819
1819 1820 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1820 1821 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1821 1822 &compress, tx);
1822 1823 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1823 1824 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1824 1825 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1825 1826 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1826 1827 &refresrv, tx);
1827 1828 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1828 1829 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1829 1830 &vbs, tx);
1830 1831 error = error ? error : dmu_object_set_blocksize(
1831 1832 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1832 1833 if (version >= SPA_VERSION_DEDUP) {
1833 1834 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1834 1835 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1835 1836 &dedup, tx);
1836 1837 }
1837 1838 if (error == 0)
1838 1839 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1839 1840 }
1840 1841 dmu_tx_commit(tx);
1841 1842
1842 1843 /*
1843 1844 * We only need update the zvol's property if we are initializing
1844 1845 * the dump area for the first time.
1845 1846 */
1846 1847 if (!resize) {
1847 1848 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1848 1849 VERIFY(nvlist_add_uint64(nv,
1849 1850 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1850 1851 VERIFY(nvlist_add_uint64(nv,
1851 1852 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1852 1853 ZIO_COMPRESS_OFF) == 0);
1853 1854 VERIFY(nvlist_add_uint64(nv,
1854 1855 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1855 1856 ZIO_CHECKSUM_OFF) == 0);
1856 1857 if (version >= SPA_VERSION_DEDUP) {
1857 1858 VERIFY(nvlist_add_uint64(nv,
1858 1859 zfs_prop_to_name(ZFS_PROP_DEDUP),
1859 1860 ZIO_CHECKSUM_OFF) == 0);
1860 1861 }
1861 1862
1862 1863 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1863 1864 nv, NULL);
1864 1865 nvlist_free(nv);
1865 1866
1866 1867 if (error)
1867 1868 return (error);
1868 1869 }
1869 1870
1870 1871 /* Allocate the space for the dump */
1871 1872 error = zvol_prealloc(zv);
1872 1873 return (error);
1873 1874 }
1874 1875
1875 1876 static int
1876 1877 zvol_dumpify(zvol_state_t *zv)
1877 1878 {
↓ open down ↓ |
1729 lines elided |
↑ open up ↑ |
1878 1879 int error = 0;
1879 1880 uint64_t dumpsize = 0;
1880 1881 dmu_tx_t *tx;
1881 1882 objset_t *os = zv->zv_objset;
1882 1883
1883 1884 if (zv->zv_flags & ZVOL_RDONLY)
1884 1885 return (EROFS);
1885 1886
1886 1887 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
1887 1888 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
1888 - boolean_t resize = (dumpsize > 0) ? B_TRUE : B_FALSE;
1889 + boolean_t resize = (dumpsize > 0);
1889 1890
1890 1891 if ((error = zvol_dump_init(zv, resize)) != 0) {
1891 1892 (void) zvol_dump_fini(zv);
1892 1893 return (error);
1893 1894 }
1894 1895 }
1895 1896
1896 1897 /*
1897 1898 * Build up our lba mapping.
1898 1899 */
1899 1900 error = zvol_get_lbas(zv);
1900 1901 if (error) {
1901 1902 (void) zvol_dump_fini(zv);
1902 1903 return (error);
1903 1904 }
1904 1905
1905 1906 tx = dmu_tx_create(os);
1906 1907 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1907 1908 error = dmu_tx_assign(tx, TXG_WAIT);
1908 1909 if (error) {
1909 1910 dmu_tx_abort(tx);
1910 1911 (void) zvol_dump_fini(zv);
1911 1912 return (error);
1912 1913 }
1913 1914
1914 1915 zv->zv_flags |= ZVOL_DUMPIFIED;
1915 1916 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
1916 1917 &zv->zv_volsize, tx);
1917 1918 dmu_tx_commit(tx);
1918 1919
1919 1920 if (error) {
1920 1921 (void) zvol_dump_fini(zv);
1921 1922 return (error);
1922 1923 }
1923 1924
1924 1925 txg_wait_synced(dmu_objset_pool(os), 0);
1925 1926 return (0);
1926 1927 }
1927 1928
1928 1929 static int
1929 1930 zvol_dump_fini(zvol_state_t *zv)
1930 1931 {
1931 1932 dmu_tx_t *tx;
1932 1933 objset_t *os = zv->zv_objset;
1933 1934 nvlist_t *nv;
1934 1935 int error = 0;
1935 1936 uint64_t checksum, compress, refresrv, vbs, dedup;
1936 1937 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
1937 1938
1938 1939 /*
1939 1940 * Attempt to restore the zvol back to its pre-dumpified state.
1940 1941 * This is a best-effort attempt as it's possible that not all
1941 1942 * of these properties were initialized during the dumpify process
1942 1943 * (i.e. error during zvol_dump_init).
1943 1944 */
1944 1945
1945 1946 tx = dmu_tx_create(os);
1946 1947 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1947 1948 error = dmu_tx_assign(tx, TXG_WAIT);
1948 1949 if (error) {
1949 1950 dmu_tx_abort(tx);
1950 1951 return (error);
1951 1952 }
1952 1953 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
1953 1954 dmu_tx_commit(tx);
1954 1955
1955 1956 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1956 1957 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
1957 1958 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1958 1959 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
1959 1960 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1960 1961 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
1961 1962 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1962 1963 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
1963 1964
1964 1965 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1965 1966 (void) nvlist_add_uint64(nv,
1966 1967 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
1967 1968 (void) nvlist_add_uint64(nv,
1968 1969 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
1969 1970 (void) nvlist_add_uint64(nv,
1970 1971 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
1971 1972 if (version >= SPA_VERSION_DEDUP &&
1972 1973 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
1973 1974 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
1974 1975 (void) nvlist_add_uint64(nv,
1975 1976 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
1976 1977 }
1977 1978 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1978 1979 nv, NULL);
1979 1980 nvlist_free(nv);
1980 1981
1981 1982 zvol_free_extents(zv);
1982 1983 zv->zv_flags &= ~ZVOL_DUMPIFIED;
1983 1984 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
1984 1985 /* wait for dmu_free_long_range to actually free the blocks */
1985 1986 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1986 1987 tx = dmu_tx_create(os);
1987 1988 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1988 1989 error = dmu_tx_assign(tx, TXG_WAIT);
1989 1990 if (error) {
1990 1991 dmu_tx_abort(tx);
1991 1992 return (error);
1992 1993 }
1993 1994 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
1994 1995 zv->zv_volblocksize = vbs;
1995 1996 dmu_tx_commit(tx);
1996 1997
1997 1998 return (0);
1998 1999 }
↓ open down ↓ |
100 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX