Print this page
3580 Want zvols to return volblocksize when queried for physical block size
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: Dan Kimmel <dan.kimmel@delphix.com>
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Christopher Siden <christopher.siden@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/zvol.c
+++ new/usr/src/uts/common/fs/zfs/zvol.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 *
24 24 * Portions Copyright 2010 Robert Milkowski
25 25 *
26 26 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
27 27 * Copyright (c) 2013 by Delphix. All rights reserved.
28 28 * Copyright (c) 2013, Joyent, Inc. All rights reserved.
29 29 */
30 30
31 31 /*
32 32 * ZFS volume emulation driver.
33 33 *
34 34 * Makes a DMU object look like a volume of arbitrary size, up to 2^64 bytes.
35 35 * Volumes are accessed through the symbolic links named:
36 36 *
37 37 * /dev/zvol/dsk/<pool_name>/<dataset_name>
38 38 * /dev/zvol/rdsk/<pool_name>/<dataset_name>
39 39 *
40 40 * These links are created by the /dev filesystem (sdev_zvolops.c).
41 41 * Volumes are persistent through reboot. No user command needs to be
42 42 * run before opening and using a device.
43 43 */
44 44
45 45 #include <sys/types.h>
46 46 #include <sys/param.h>
47 47 #include <sys/errno.h>
48 48 #include <sys/uio.h>
49 49 #include <sys/buf.h>
50 50 #include <sys/modctl.h>
51 51 #include <sys/open.h>
52 52 #include <sys/kmem.h>
53 53 #include <sys/conf.h>
54 54 #include <sys/cmn_err.h>
55 55 #include <sys/stat.h>
56 56 #include <sys/zap.h>
57 57 #include <sys/spa.h>
58 58 #include <sys/spa_impl.h>
59 59 #include <sys/zio.h>
60 60 #include <sys/dmu_traverse.h>
61 61 #include <sys/dnode.h>
62 62 #include <sys/dsl_dataset.h>
63 63 #include <sys/dsl_prop.h>
64 64 #include <sys/dkio.h>
65 65 #include <sys/efi_partition.h>
66 66 #include <sys/byteorder.h>
67 67 #include <sys/pathname.h>
68 68 #include <sys/ddi.h>
69 69 #include <sys/sunddi.h>
70 70 #include <sys/crc32.h>
71 71 #include <sys/dirent.h>
72 72 #include <sys/policy.h>
73 73 #include <sys/fs/zfs.h>
74 74 #include <sys/zfs_ioctl.h>
75 75 #include <sys/mkdev.h>
76 76 #include <sys/zil.h>
77 77 #include <sys/refcount.h>
78 78 #include <sys/zfs_znode.h>
79 79 #include <sys/zfs_rlock.h>
80 80 #include <sys/vdev_disk.h>
81 81 #include <sys/vdev_impl.h>
82 82 #include <sys/vdev_raidz.h>
83 83 #include <sys/zvol.h>
84 84 #include <sys/dumphdr.h>
85 85 #include <sys/zil_impl.h>
86 86 #include <sys/dbuf.h>
87 87 #include <sys/dmu_tx.h>
88 88 #include <sys/zfeature.h>
89 89 #include <sys/zio_checksum.h>
90 90
91 91 #include "zfs_namecheck.h"
92 92
93 93 void *zfsdev_state;
94 94 static char *zvol_tag = "zvol_tag";
95 95
96 96 #define ZVOL_DUMPSIZE "dumpsize"
97 97
98 98 /*
99 99 * This lock protects the zfsdev_state structure from being modified
100 100 * while it's being used, e.g. an open that comes in before a create
101 101 * finishes. It also protects temporary opens of the dataset so that,
102 102 * e.g., an open doesn't get a spurious EBUSY.
103 103 */
104 104 kmutex_t zfsdev_state_lock;
105 105 static uint32_t zvol_minors;
106 106
107 107 typedef struct zvol_extent {
108 108 list_node_t ze_node;
109 109 dva_t ze_dva; /* dva associated with this extent */
110 110 uint64_t ze_nblks; /* number of blocks in extent */
111 111 } zvol_extent_t;
112 112
113 113 /*
114 114 * The in-core state of each volume.
115 115 */
116 116 typedef struct zvol_state {
117 117 char zv_name[MAXPATHLEN]; /* pool/dd name */
118 118 uint64_t zv_volsize; /* amount of space we advertise */
119 119 uint64_t zv_volblocksize; /* volume block size */
120 120 minor_t zv_minor; /* minor number */
121 121 uint8_t zv_min_bs; /* minimum addressable block shift */
122 122 uint8_t zv_flags; /* readonly, dumpified, etc. */
123 123 objset_t *zv_objset; /* objset handle */
124 124 uint32_t zv_open_count[OTYPCNT]; /* open counts */
125 125 uint32_t zv_total_opens; /* total open count */
126 126 zilog_t *zv_zilog; /* ZIL handle */
127 127 list_t zv_extents; /* List of extents for dump */
128 128 znode_t zv_znode; /* for range locking */
129 129 dmu_buf_t *zv_dbuf; /* bonus handle */
130 130 } zvol_state_t;
131 131
132 132 /*
133 133 * zvol specific flags
134 134 */
135 135 #define ZVOL_RDONLY 0x1
136 136 #define ZVOL_DUMPIFIED 0x2
137 137 #define ZVOL_EXCL 0x4
138 138 #define ZVOL_WCE 0x8
139 139
140 140 /*
141 141 * zvol maximum transfer in one DMU tx.
142 142 */
143 143 int zvol_maxphys = DMU_MAX_ACCESS/2;
144 144
145 145 extern int zfs_set_prop_nvlist(const char *, zprop_source_t,
146 146 nvlist_t *, nvlist_t *);
147 147 static int zvol_remove_zv(zvol_state_t *);
148 148 static int zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio);
149 149 static int zvol_dumpify(zvol_state_t *zv);
150 150 static int zvol_dump_fini(zvol_state_t *zv);
151 151 static int zvol_dump_init(zvol_state_t *zv, boolean_t resize);
152 152
153 153 static void
154 154 zvol_size_changed(zvol_state_t *zv, uint64_t volsize)
155 155 {
156 156 dev_t dev = makedevice(ddi_driver_major(zfs_dip), zv->zv_minor);
157 157
158 158 zv->zv_volsize = volsize;
159 159 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
160 160 "Size", volsize) == DDI_SUCCESS);
161 161 VERIFY(ddi_prop_update_int64(dev, zfs_dip,
162 162 "Nblocks", lbtodb(volsize)) == DDI_SUCCESS);
163 163
164 164 /* Notify specfs to invalidate the cached size */
165 165 spec_size_invalidate(dev, VBLK);
166 166 spec_size_invalidate(dev, VCHR);
167 167 }
168 168
169 169 int
170 170 zvol_check_volsize(uint64_t volsize, uint64_t blocksize)
171 171 {
172 172 if (volsize == 0)
173 173 return (SET_ERROR(EINVAL));
174 174
175 175 if (volsize % blocksize != 0)
176 176 return (SET_ERROR(EINVAL));
177 177
178 178 #ifdef _ILP32
179 179 if (volsize - 1 > SPEC_MAXOFFSET_T)
180 180 return (SET_ERROR(EOVERFLOW));
181 181 #endif
182 182 return (0);
183 183 }
184 184
185 185 int
186 186 zvol_check_volblocksize(uint64_t volblocksize)
187 187 {
188 188 if (volblocksize < SPA_MINBLOCKSIZE ||
189 189 volblocksize > SPA_MAXBLOCKSIZE ||
190 190 !ISP2(volblocksize))
191 191 return (SET_ERROR(EDOM));
192 192
193 193 return (0);
194 194 }
195 195
196 196 int
197 197 zvol_get_stats(objset_t *os, nvlist_t *nv)
198 198 {
199 199 int error;
200 200 dmu_object_info_t doi;
201 201 uint64_t val;
202 202
203 203 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &val);
204 204 if (error)
205 205 return (error);
206 206
207 207 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLSIZE, val);
208 208
209 209 error = dmu_object_info(os, ZVOL_OBJ, &doi);
210 210
211 211 if (error == 0) {
212 212 dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_VOLBLOCKSIZE,
213 213 doi.doi_data_block_size);
214 214 }
215 215
216 216 return (error);
217 217 }
218 218
219 219 static zvol_state_t *
220 220 zvol_minor_lookup(const char *name)
221 221 {
222 222 minor_t minor;
223 223 zvol_state_t *zv;
224 224
225 225 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
226 226
227 227 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
228 228 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
229 229 if (zv == NULL)
230 230 continue;
231 231 if (strcmp(zv->zv_name, name) == 0)
232 232 return (zv);
233 233 }
234 234
235 235 return (NULL);
236 236 }
237 237
238 238 /* extent mapping arg */
239 239 struct maparg {
240 240 zvol_state_t *ma_zv;
241 241 uint64_t ma_blks;
242 242 };
243 243
244 244 /*ARGSUSED*/
245 245 static int
246 246 zvol_map_block(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
247 247 const zbookmark_t *zb, const dnode_phys_t *dnp, void *arg)
248 248 {
249 249 struct maparg *ma = arg;
250 250 zvol_extent_t *ze;
251 251 int bs = ma->ma_zv->zv_volblocksize;
252 252
253 253 if (bp == NULL || zb->zb_object != ZVOL_OBJ || zb->zb_level != 0)
254 254 return (0);
255 255
256 256 VERIFY3U(ma->ma_blks, ==, zb->zb_blkid);
257 257 ma->ma_blks++;
258 258
259 259 /* Abort immediately if we have encountered gang blocks */
260 260 if (BP_IS_GANG(bp))
261 261 return (SET_ERROR(EFRAGS));
262 262
263 263 /*
264 264 * See if the block is at the end of the previous extent.
265 265 */
266 266 ze = list_tail(&ma->ma_zv->zv_extents);
267 267 if (ze &&
268 268 DVA_GET_VDEV(BP_IDENTITY(bp)) == DVA_GET_VDEV(&ze->ze_dva) &&
269 269 DVA_GET_OFFSET(BP_IDENTITY(bp)) ==
270 270 DVA_GET_OFFSET(&ze->ze_dva) + ze->ze_nblks * bs) {
271 271 ze->ze_nblks++;
272 272 return (0);
273 273 }
274 274
275 275 dprintf_bp(bp, "%s", "next blkptr:");
276 276
277 277 /* start a new extent */
278 278 ze = kmem_zalloc(sizeof (zvol_extent_t), KM_SLEEP);
279 279 ze->ze_dva = bp->blk_dva[0]; /* structure assignment */
280 280 ze->ze_nblks = 1;
281 281 list_insert_tail(&ma->ma_zv->zv_extents, ze);
282 282 return (0);
283 283 }
284 284
285 285 static void
286 286 zvol_free_extents(zvol_state_t *zv)
287 287 {
288 288 zvol_extent_t *ze;
289 289
290 290 while (ze = list_head(&zv->zv_extents)) {
291 291 list_remove(&zv->zv_extents, ze);
292 292 kmem_free(ze, sizeof (zvol_extent_t));
293 293 }
294 294 }
295 295
296 296 static int
297 297 zvol_get_lbas(zvol_state_t *zv)
298 298 {
299 299 objset_t *os = zv->zv_objset;
300 300 struct maparg ma;
301 301 int err;
302 302
303 303 ma.ma_zv = zv;
304 304 ma.ma_blks = 0;
305 305 zvol_free_extents(zv);
306 306
307 307 /* commit any in-flight changes before traversing the dataset */
308 308 txg_wait_synced(dmu_objset_pool(os), 0);
309 309 err = traverse_dataset(dmu_objset_ds(os), 0,
310 310 TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA, zvol_map_block, &ma);
311 311 if (err || ma.ma_blks != (zv->zv_volsize / zv->zv_volblocksize)) {
312 312 zvol_free_extents(zv);
313 313 return (err ? err : EIO);
314 314 }
315 315
316 316 return (0);
317 317 }
318 318
319 319 /* ARGSUSED */
320 320 void
321 321 zvol_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
322 322 {
323 323 zfs_creat_t *zct = arg;
324 324 nvlist_t *nvprops = zct->zct_props;
325 325 int error;
326 326 uint64_t volblocksize, volsize;
327 327
328 328 VERIFY(nvlist_lookup_uint64(nvprops,
329 329 zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) == 0);
330 330 if (nvlist_lookup_uint64(nvprops,
331 331 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &volblocksize) != 0)
332 332 volblocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
333 333
334 334 /*
335 335 * These properties must be removed from the list so the generic
336 336 * property setting step won't apply to them.
337 337 */
338 338 VERIFY(nvlist_remove_all(nvprops,
339 339 zfs_prop_to_name(ZFS_PROP_VOLSIZE)) == 0);
340 340 (void) nvlist_remove_all(nvprops,
341 341 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE));
342 342
343 343 error = dmu_object_claim(os, ZVOL_OBJ, DMU_OT_ZVOL, volblocksize,
344 344 DMU_OT_NONE, 0, tx);
345 345 ASSERT(error == 0);
346 346
347 347 error = zap_create_claim(os, ZVOL_ZAP_OBJ, DMU_OT_ZVOL_PROP,
348 348 DMU_OT_NONE, 0, tx);
349 349 ASSERT(error == 0);
350 350
351 351 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize, tx);
352 352 ASSERT(error == 0);
353 353 }
354 354
355 355 /*
356 356 * Replay a TX_TRUNCATE ZIL transaction if asked. TX_TRUNCATE is how we
357 357 * implement DKIOCFREE/free-long-range.
358 358 */
359 359 static int
360 360 zvol_replay_truncate(zvol_state_t *zv, lr_truncate_t *lr, boolean_t byteswap)
361 361 {
362 362 uint64_t offset, length;
363 363
364 364 if (byteswap)
365 365 byteswap_uint64_array(lr, sizeof (*lr));
366 366
367 367 offset = lr->lr_offset;
368 368 length = lr->lr_length;
369 369
370 370 return (dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, offset, length));
371 371 }
372 372
373 373 /*
374 374 * Replay a TX_WRITE ZIL transaction that didn't get committed
375 375 * after a system failure
376 376 */
377 377 static int
378 378 zvol_replay_write(zvol_state_t *zv, lr_write_t *lr, boolean_t byteswap)
379 379 {
380 380 objset_t *os = zv->zv_objset;
381 381 char *data = (char *)(lr + 1); /* data follows lr_write_t */
382 382 uint64_t offset, length;
383 383 dmu_tx_t *tx;
384 384 int error;
385 385
386 386 if (byteswap)
387 387 byteswap_uint64_array(lr, sizeof (*lr));
388 388
389 389 offset = lr->lr_offset;
390 390 length = lr->lr_length;
391 391
392 392 /* If it's a dmu_sync() block, write the whole block */
393 393 if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
394 394 uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
395 395 if (length < blocksize) {
396 396 offset -= offset % blocksize;
397 397 length = blocksize;
398 398 }
399 399 }
400 400
401 401 tx = dmu_tx_create(os);
402 402 dmu_tx_hold_write(tx, ZVOL_OBJ, offset, length);
403 403 error = dmu_tx_assign(tx, TXG_WAIT);
404 404 if (error) {
405 405 dmu_tx_abort(tx);
406 406 } else {
407 407 dmu_write(os, ZVOL_OBJ, offset, length, data, tx);
408 408 dmu_tx_commit(tx);
409 409 }
410 410
411 411 return (error);
412 412 }
413 413
414 414 /* ARGSUSED */
415 415 static int
416 416 zvol_replay_err(zvol_state_t *zv, lr_t *lr, boolean_t byteswap)
417 417 {
418 418 return (SET_ERROR(ENOTSUP));
419 419 }
420 420
421 421 /*
422 422 * Callback vectors for replaying records.
423 423 * Only TX_WRITE and TX_TRUNCATE are needed for zvol.
424 424 */
425 425 zil_replay_func_t *zvol_replay_vector[TX_MAX_TYPE] = {
426 426 zvol_replay_err, /* 0 no such transaction type */
427 427 zvol_replay_err, /* TX_CREATE */
428 428 zvol_replay_err, /* TX_MKDIR */
429 429 zvol_replay_err, /* TX_MKXATTR */
430 430 zvol_replay_err, /* TX_SYMLINK */
431 431 zvol_replay_err, /* TX_REMOVE */
432 432 zvol_replay_err, /* TX_RMDIR */
433 433 zvol_replay_err, /* TX_LINK */
434 434 zvol_replay_err, /* TX_RENAME */
435 435 zvol_replay_write, /* TX_WRITE */
436 436 zvol_replay_truncate, /* TX_TRUNCATE */
437 437 zvol_replay_err, /* TX_SETATTR */
438 438 zvol_replay_err, /* TX_ACL */
439 439 zvol_replay_err, /* TX_CREATE_ACL */
440 440 zvol_replay_err, /* TX_CREATE_ATTR */
441 441 zvol_replay_err, /* TX_CREATE_ACL_ATTR */
442 442 zvol_replay_err, /* TX_MKDIR_ACL */
443 443 zvol_replay_err, /* TX_MKDIR_ATTR */
444 444 zvol_replay_err, /* TX_MKDIR_ACL_ATTR */
445 445 zvol_replay_err, /* TX_WRITE2 */
446 446 };
447 447
448 448 int
449 449 zvol_name2minor(const char *name, minor_t *minor)
450 450 {
451 451 zvol_state_t *zv;
452 452
453 453 mutex_enter(&zfsdev_state_lock);
454 454 zv = zvol_minor_lookup(name);
455 455 if (minor && zv)
456 456 *minor = zv->zv_minor;
457 457 mutex_exit(&zfsdev_state_lock);
458 458 return (zv ? 0 : -1);
459 459 }
460 460
461 461 /*
462 462 * Create a minor node (plus a whole lot more) for the specified volume.
463 463 */
464 464 int
465 465 zvol_create_minor(const char *name)
466 466 {
467 467 zfs_soft_state_t *zs;
468 468 zvol_state_t *zv;
469 469 objset_t *os;
470 470 dmu_object_info_t doi;
471 471 minor_t minor = 0;
472 472 char chrbuf[30], blkbuf[30];
473 473 int error;
474 474
475 475 mutex_enter(&zfsdev_state_lock);
476 476
477 477 if (zvol_minor_lookup(name) != NULL) {
478 478 mutex_exit(&zfsdev_state_lock);
479 479 return (SET_ERROR(EEXIST));
480 480 }
481 481
482 482 /* lie and say we're read-only */
483 483 error = dmu_objset_own(name, DMU_OST_ZVOL, B_TRUE, FTAG, &os);
484 484
485 485 if (error) {
486 486 mutex_exit(&zfsdev_state_lock);
487 487 return (error);
488 488 }
489 489
490 490 if ((minor = zfsdev_minor_alloc()) == 0) {
491 491 dmu_objset_disown(os, FTAG);
492 492 mutex_exit(&zfsdev_state_lock);
493 493 return (SET_ERROR(ENXIO));
494 494 }
495 495
496 496 if (ddi_soft_state_zalloc(zfsdev_state, minor) != DDI_SUCCESS) {
497 497 dmu_objset_disown(os, FTAG);
498 498 mutex_exit(&zfsdev_state_lock);
499 499 return (SET_ERROR(EAGAIN));
500 500 }
501 501 (void) ddi_prop_update_string(minor, zfs_dip, ZVOL_PROP_NAME,
502 502 (char *)name);
503 503
504 504 (void) snprintf(chrbuf, sizeof (chrbuf), "%u,raw", minor);
505 505
506 506 if (ddi_create_minor_node(zfs_dip, chrbuf, S_IFCHR,
507 507 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
508 508 ddi_soft_state_free(zfsdev_state, minor);
509 509 dmu_objset_disown(os, FTAG);
510 510 mutex_exit(&zfsdev_state_lock);
511 511 return (SET_ERROR(EAGAIN));
512 512 }
513 513
514 514 (void) snprintf(blkbuf, sizeof (blkbuf), "%u", minor);
515 515
516 516 if (ddi_create_minor_node(zfs_dip, blkbuf, S_IFBLK,
517 517 minor, DDI_PSEUDO, 0) == DDI_FAILURE) {
518 518 ddi_remove_minor_node(zfs_dip, chrbuf);
519 519 ddi_soft_state_free(zfsdev_state, minor);
520 520 dmu_objset_disown(os, FTAG);
521 521 mutex_exit(&zfsdev_state_lock);
522 522 return (SET_ERROR(EAGAIN));
523 523 }
524 524
525 525 zs = ddi_get_soft_state(zfsdev_state, minor);
526 526 zs->zss_type = ZSST_ZVOL;
527 527 zv = zs->zss_data = kmem_zalloc(sizeof (zvol_state_t), KM_SLEEP);
528 528 (void) strlcpy(zv->zv_name, name, MAXPATHLEN);
529 529 zv->zv_min_bs = DEV_BSHIFT;
530 530 zv->zv_minor = minor;
531 531 zv->zv_objset = os;
532 532 if (dmu_objset_is_snapshot(os) || !spa_writeable(dmu_objset_spa(os)))
533 533 zv->zv_flags |= ZVOL_RDONLY;
534 534 mutex_init(&zv->zv_znode.z_range_lock, NULL, MUTEX_DEFAULT, NULL);
535 535 avl_create(&zv->zv_znode.z_range_avl, zfs_range_compare,
536 536 sizeof (rl_t), offsetof(rl_t, r_node));
537 537 list_create(&zv->zv_extents, sizeof (zvol_extent_t),
538 538 offsetof(zvol_extent_t, ze_node));
539 539 /* get and cache the blocksize */
540 540 error = dmu_object_info(os, ZVOL_OBJ, &doi);
541 541 ASSERT(error == 0);
542 542 zv->zv_volblocksize = doi.doi_data_block_size;
543 543
544 544 if (spa_writeable(dmu_objset_spa(os))) {
545 545 if (zil_replay_disable)
546 546 zil_destroy(dmu_objset_zil(os), B_FALSE);
547 547 else
548 548 zil_replay(os, zv, zvol_replay_vector);
549 549 }
550 550 dmu_objset_disown(os, FTAG);
551 551 zv->zv_objset = NULL;
552 552
553 553 zvol_minors++;
554 554
555 555 mutex_exit(&zfsdev_state_lock);
556 556
557 557 return (0);
558 558 }
559 559
560 560 /*
561 561 * Remove minor node for the specified volume.
562 562 */
563 563 static int
564 564 zvol_remove_zv(zvol_state_t *zv)
565 565 {
566 566 char nmbuf[20];
567 567 minor_t minor = zv->zv_minor;
568 568
569 569 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
570 570 if (zv->zv_total_opens != 0)
571 571 return (SET_ERROR(EBUSY));
572 572
573 573 (void) snprintf(nmbuf, sizeof (nmbuf), "%u,raw", minor);
574 574 ddi_remove_minor_node(zfs_dip, nmbuf);
575 575
576 576 (void) snprintf(nmbuf, sizeof (nmbuf), "%u", minor);
577 577 ddi_remove_minor_node(zfs_dip, nmbuf);
578 578
579 579 avl_destroy(&zv->zv_znode.z_range_avl);
580 580 mutex_destroy(&zv->zv_znode.z_range_lock);
581 581
582 582 kmem_free(zv, sizeof (zvol_state_t));
583 583
584 584 ddi_soft_state_free(zfsdev_state, minor);
585 585
586 586 zvol_minors--;
587 587 return (0);
588 588 }
589 589
590 590 int
591 591 zvol_remove_minor(const char *name)
592 592 {
593 593 zvol_state_t *zv;
594 594 int rc;
595 595
596 596 mutex_enter(&zfsdev_state_lock);
597 597 if ((zv = zvol_minor_lookup(name)) == NULL) {
598 598 mutex_exit(&zfsdev_state_lock);
599 599 return (SET_ERROR(ENXIO));
600 600 }
601 601 rc = zvol_remove_zv(zv);
602 602 mutex_exit(&zfsdev_state_lock);
603 603 return (rc);
604 604 }
605 605
606 606 int
607 607 zvol_first_open(zvol_state_t *zv)
608 608 {
609 609 objset_t *os;
610 610 uint64_t volsize;
611 611 int error;
612 612 uint64_t readonly;
613 613
614 614 /* lie and say we're read-only */
615 615 error = dmu_objset_own(zv->zv_name, DMU_OST_ZVOL, B_TRUE,
616 616 zvol_tag, &os);
617 617 if (error)
618 618 return (error);
619 619
620 620 zv->zv_objset = os;
621 621 error = zap_lookup(os, ZVOL_ZAP_OBJ, "size", 8, 1, &volsize);
622 622 if (error) {
623 623 ASSERT(error == 0);
624 624 dmu_objset_disown(os, zvol_tag);
625 625 return (error);
626 626 }
627 627
628 628 error = dmu_bonus_hold(os, ZVOL_OBJ, zvol_tag, &zv->zv_dbuf);
629 629 if (error) {
630 630 dmu_objset_disown(os, zvol_tag);
631 631 return (error);
632 632 }
633 633
634 634 zvol_size_changed(zv, volsize);
635 635 zv->zv_zilog = zil_open(os, zvol_get_data);
636 636
637 637 VERIFY(dsl_prop_get_integer(zv->zv_name, "readonly", &readonly,
638 638 NULL) == 0);
639 639 if (readonly || dmu_objset_is_snapshot(os) ||
640 640 !spa_writeable(dmu_objset_spa(os)))
641 641 zv->zv_flags |= ZVOL_RDONLY;
642 642 else
643 643 zv->zv_flags &= ~ZVOL_RDONLY;
644 644 return (error);
645 645 }
646 646
647 647 void
648 648 zvol_last_close(zvol_state_t *zv)
649 649 {
650 650 zil_close(zv->zv_zilog);
651 651 zv->zv_zilog = NULL;
652 652
653 653 dmu_buf_rele(zv->zv_dbuf, zvol_tag);
654 654 zv->zv_dbuf = NULL;
655 655
656 656 /*
657 657 * Evict cached data
658 658 */
659 659 if (dsl_dataset_is_dirty(dmu_objset_ds(zv->zv_objset)) &&
660 660 !(zv->zv_flags & ZVOL_RDONLY))
661 661 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
662 662 dmu_objset_evict_dbufs(zv->zv_objset);
663 663
664 664 dmu_objset_disown(zv->zv_objset, zvol_tag);
665 665 zv->zv_objset = NULL;
666 666 }
667 667
668 668 int
669 669 zvol_prealloc(zvol_state_t *zv)
670 670 {
671 671 objset_t *os = zv->zv_objset;
672 672 dmu_tx_t *tx;
673 673 uint64_t refd, avail, usedobjs, availobjs;
674 674 uint64_t resid = zv->zv_volsize;
675 675 uint64_t off = 0;
676 676
677 677 /* Check the space usage before attempting to allocate the space */
678 678 dmu_objset_space(os, &refd, &avail, &usedobjs, &availobjs);
679 679 if (avail < zv->zv_volsize)
680 680 return (SET_ERROR(ENOSPC));
681 681
682 682 /* Free old extents if they exist */
683 683 zvol_free_extents(zv);
684 684
685 685 while (resid != 0) {
686 686 int error;
687 687 uint64_t bytes = MIN(resid, SPA_MAXBLOCKSIZE);
688 688
689 689 tx = dmu_tx_create(os);
690 690 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
691 691 error = dmu_tx_assign(tx, TXG_WAIT);
692 692 if (error) {
693 693 dmu_tx_abort(tx);
694 694 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, off);
695 695 return (error);
696 696 }
697 697 dmu_prealloc(os, ZVOL_OBJ, off, bytes, tx);
698 698 dmu_tx_commit(tx);
699 699 off += bytes;
700 700 resid -= bytes;
701 701 }
702 702 txg_wait_synced(dmu_objset_pool(os), 0);
703 703
704 704 return (0);
705 705 }
706 706
707 707 static int
708 708 zvol_update_volsize(objset_t *os, uint64_t volsize)
709 709 {
710 710 dmu_tx_t *tx;
711 711 int error;
712 712
713 713 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
714 714
715 715 tx = dmu_tx_create(os);
716 716 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
717 717 error = dmu_tx_assign(tx, TXG_WAIT);
718 718 if (error) {
719 719 dmu_tx_abort(tx);
720 720 return (error);
721 721 }
722 722
723 723 error = zap_update(os, ZVOL_ZAP_OBJ, "size", 8, 1,
724 724 &volsize, tx);
725 725 dmu_tx_commit(tx);
726 726
727 727 if (error == 0)
728 728 error = dmu_free_long_range(os,
729 729 ZVOL_OBJ, volsize, DMU_OBJECT_END);
730 730 return (error);
731 731 }
732 732
733 733 void
734 734 zvol_remove_minors(const char *name)
735 735 {
736 736 zvol_state_t *zv;
737 737 char *namebuf;
738 738 minor_t minor;
739 739
740 740 namebuf = kmem_zalloc(strlen(name) + 2, KM_SLEEP);
741 741 (void) strncpy(namebuf, name, strlen(name));
742 742 (void) strcat(namebuf, "/");
743 743 mutex_enter(&zfsdev_state_lock);
744 744 for (minor = 1; minor <= ZFSDEV_MAX_MINOR; minor++) {
745 745
746 746 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
747 747 if (zv == NULL)
748 748 continue;
749 749 if (strncmp(namebuf, zv->zv_name, strlen(namebuf)) == 0)
750 750 (void) zvol_remove_zv(zv);
751 751 }
752 752 kmem_free(namebuf, strlen(name) + 2);
753 753
754 754 mutex_exit(&zfsdev_state_lock);
755 755 }
756 756
757 757 static int
758 758 zvol_update_live_volsize(zvol_state_t *zv, uint64_t volsize)
759 759 {
760 760 uint64_t old_volsize = 0ULL;
761 761 int error = 0;
762 762
763 763 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
764 764
765 765 /*
766 766 * Reinitialize the dump area to the new size. If we
767 767 * failed to resize the dump area then restore it back to
768 768 * its original size. We must set the new volsize prior
769 769 * to calling dumpvp_resize() to ensure that the devices'
770 770 * size(9P) is not visible by the dump subsystem.
771 771 */
772 772 old_volsize = zv->zv_volsize;
773 773 zvol_size_changed(zv, volsize);
774 774
775 775 if (zv->zv_flags & ZVOL_DUMPIFIED) {
776 776 if ((error = zvol_dumpify(zv)) != 0 ||
777 777 (error = dumpvp_resize()) != 0) {
778 778 int dumpify_error;
779 779
780 780 (void) zvol_update_volsize(zv->zv_objset, old_volsize);
781 781 zvol_size_changed(zv, old_volsize);
782 782 dumpify_error = zvol_dumpify(zv);
783 783 error = dumpify_error ? dumpify_error : error;
784 784 }
785 785 }
786 786
787 787 /*
788 788 * Generate a LUN expansion event.
789 789 */
790 790 if (error == 0) {
791 791 sysevent_id_t eid;
792 792 nvlist_t *attr;
793 793 char *physpath = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
794 794
795 795 (void) snprintf(physpath, MAXPATHLEN, "%s%u", ZVOL_PSEUDO_DEV,
796 796 zv->zv_minor);
797 797
798 798 VERIFY(nvlist_alloc(&attr, NV_UNIQUE_NAME, KM_SLEEP) == 0);
799 799 VERIFY(nvlist_add_string(attr, DEV_PHYS_PATH, physpath) == 0);
800 800
801 801 (void) ddi_log_sysevent(zfs_dip, SUNW_VENDOR, EC_DEV_STATUS,
802 802 ESC_DEV_DLE, attr, &eid, DDI_SLEEP);
803 803
804 804 nvlist_free(attr);
805 805 kmem_free(physpath, MAXPATHLEN);
806 806 }
807 807 return (error);
808 808 }
809 809
810 810 int
811 811 zvol_set_volsize(const char *name, uint64_t volsize)
812 812 {
813 813 zvol_state_t *zv = NULL;
814 814 objset_t *os;
815 815 int error;
816 816 dmu_object_info_t doi;
817 817 uint64_t readonly;
818 818 boolean_t owned = B_FALSE;
819 819
820 820 error = dsl_prop_get_integer(name,
821 821 zfs_prop_to_name(ZFS_PROP_READONLY), &readonly, NULL);
822 822 if (error != 0)
823 823 return (error);
824 824 if (readonly)
825 825 return (SET_ERROR(EROFS));
826 826
827 827 mutex_enter(&zfsdev_state_lock);
828 828 zv = zvol_minor_lookup(name);
829 829
830 830 if (zv == NULL || zv->zv_objset == NULL) {
831 831 if ((error = dmu_objset_own(name, DMU_OST_ZVOL, B_FALSE,
832 832 FTAG, &os)) != 0) {
833 833 mutex_exit(&zfsdev_state_lock);
834 834 return (error);
835 835 }
836 836 owned = B_TRUE;
837 837 if (zv != NULL)
838 838 zv->zv_objset = os;
839 839 } else {
840 840 os = zv->zv_objset;
841 841 }
842 842
843 843 if ((error = dmu_object_info(os, ZVOL_OBJ, &doi)) != 0 ||
844 844 (error = zvol_check_volsize(volsize, doi.doi_data_block_size)) != 0)
845 845 goto out;
846 846
847 847 error = zvol_update_volsize(os, volsize);
848 848
849 849 if (error == 0 && zv != NULL)
850 850 error = zvol_update_live_volsize(zv, volsize);
851 851 out:
852 852 if (owned) {
853 853 dmu_objset_disown(os, FTAG);
854 854 if (zv != NULL)
855 855 zv->zv_objset = NULL;
856 856 }
857 857 mutex_exit(&zfsdev_state_lock);
858 858 return (error);
859 859 }
860 860
861 861 /*ARGSUSED*/
862 862 int
863 863 zvol_open(dev_t *devp, int flag, int otyp, cred_t *cr)
864 864 {
865 865 zvol_state_t *zv;
866 866 int err = 0;
867 867
868 868 mutex_enter(&zfsdev_state_lock);
869 869
870 870 zv = zfsdev_get_soft_state(getminor(*devp), ZSST_ZVOL);
871 871 if (zv == NULL) {
872 872 mutex_exit(&zfsdev_state_lock);
873 873 return (SET_ERROR(ENXIO));
874 874 }
875 875
876 876 if (zv->zv_total_opens == 0)
877 877 err = zvol_first_open(zv);
878 878 if (err) {
879 879 mutex_exit(&zfsdev_state_lock);
880 880 return (err);
881 881 }
882 882 if ((flag & FWRITE) && (zv->zv_flags & ZVOL_RDONLY)) {
883 883 err = SET_ERROR(EROFS);
884 884 goto out;
885 885 }
886 886 if (zv->zv_flags & ZVOL_EXCL) {
887 887 err = SET_ERROR(EBUSY);
888 888 goto out;
889 889 }
890 890 if (flag & FEXCL) {
891 891 if (zv->zv_total_opens != 0) {
892 892 err = SET_ERROR(EBUSY);
893 893 goto out;
894 894 }
895 895 zv->zv_flags |= ZVOL_EXCL;
896 896 }
897 897
898 898 if (zv->zv_open_count[otyp] == 0 || otyp == OTYP_LYR) {
899 899 zv->zv_open_count[otyp]++;
900 900 zv->zv_total_opens++;
901 901 }
902 902 mutex_exit(&zfsdev_state_lock);
903 903
904 904 return (err);
905 905 out:
906 906 if (zv->zv_total_opens == 0)
907 907 zvol_last_close(zv);
908 908 mutex_exit(&zfsdev_state_lock);
909 909 return (err);
910 910 }
911 911
912 912 /*ARGSUSED*/
913 913 int
914 914 zvol_close(dev_t dev, int flag, int otyp, cred_t *cr)
915 915 {
916 916 minor_t minor = getminor(dev);
917 917 zvol_state_t *zv;
918 918 int error = 0;
919 919
920 920 mutex_enter(&zfsdev_state_lock);
921 921
922 922 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
923 923 if (zv == NULL) {
924 924 mutex_exit(&zfsdev_state_lock);
925 925 return (SET_ERROR(ENXIO));
926 926 }
927 927
928 928 if (zv->zv_flags & ZVOL_EXCL) {
929 929 ASSERT(zv->zv_total_opens == 1);
930 930 zv->zv_flags &= ~ZVOL_EXCL;
931 931 }
932 932
933 933 /*
934 934 * If the open count is zero, this is a spurious close.
935 935 * That indicates a bug in the kernel / DDI framework.
936 936 */
937 937 ASSERT(zv->zv_open_count[otyp] != 0);
938 938 ASSERT(zv->zv_total_opens != 0);
939 939
940 940 /*
941 941 * You may get multiple opens, but only one close.
942 942 */
943 943 zv->zv_open_count[otyp]--;
944 944 zv->zv_total_opens--;
945 945
946 946 if (zv->zv_total_opens == 0)
947 947 zvol_last_close(zv);
948 948
949 949 mutex_exit(&zfsdev_state_lock);
950 950 return (error);
951 951 }
952 952
953 953 static void
954 954 zvol_get_done(zgd_t *zgd, int error)
955 955 {
956 956 if (zgd->zgd_db)
957 957 dmu_buf_rele(zgd->zgd_db, zgd);
958 958
959 959 zfs_range_unlock(zgd->zgd_rl);
960 960
961 961 if (error == 0 && zgd->zgd_bp)
962 962 zil_add_block(zgd->zgd_zilog, zgd->zgd_bp);
963 963
964 964 kmem_free(zgd, sizeof (zgd_t));
965 965 }
966 966
967 967 /*
968 968 * Get data to generate a TX_WRITE intent log record.
969 969 */
970 970 static int
971 971 zvol_get_data(void *arg, lr_write_t *lr, char *buf, zio_t *zio)
972 972 {
973 973 zvol_state_t *zv = arg;
974 974 objset_t *os = zv->zv_objset;
975 975 uint64_t object = ZVOL_OBJ;
976 976 uint64_t offset = lr->lr_offset;
977 977 uint64_t size = lr->lr_length; /* length of user data */
978 978 blkptr_t *bp = &lr->lr_blkptr;
979 979 dmu_buf_t *db;
980 980 zgd_t *zgd;
981 981 int error;
982 982
983 983 ASSERT(zio != NULL);
984 984 ASSERT(size != 0);
985 985
986 986 zgd = kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
987 987 zgd->zgd_zilog = zv->zv_zilog;
988 988 zgd->zgd_rl = zfs_range_lock(&zv->zv_znode, offset, size, RL_READER);
989 989
990 990 /*
991 991 * Write records come in two flavors: immediate and indirect.
992 992 * For small writes it's cheaper to store the data with the
993 993 * log record (immediate); for large writes it's cheaper to
994 994 * sync the data and get a pointer to it (indirect) so that
995 995 * we don't have to write the data twice.
996 996 */
997 997 if (buf != NULL) { /* immediate write */
998 998 error = dmu_read(os, object, offset, size, buf,
999 999 DMU_READ_NO_PREFETCH);
1000 1000 } else {
1001 1001 size = zv->zv_volblocksize;
1002 1002 offset = P2ALIGN(offset, size);
1003 1003 error = dmu_buf_hold(os, object, offset, zgd, &db,
1004 1004 DMU_READ_NO_PREFETCH);
1005 1005 if (error == 0) {
1006 1006 blkptr_t *obp = dmu_buf_get_blkptr(db);
1007 1007 if (obp) {
1008 1008 ASSERT(BP_IS_HOLE(bp));
1009 1009 *bp = *obp;
1010 1010 }
1011 1011
1012 1012 zgd->zgd_db = db;
1013 1013 zgd->zgd_bp = bp;
1014 1014
1015 1015 ASSERT(db->db_offset == offset);
1016 1016 ASSERT(db->db_size == size);
1017 1017
1018 1018 error = dmu_sync(zio, lr->lr_common.lrc_txg,
1019 1019 zvol_get_done, zgd);
1020 1020
1021 1021 if (error == 0)
1022 1022 return (0);
1023 1023 }
1024 1024 }
1025 1025
1026 1026 zvol_get_done(zgd, error);
1027 1027
1028 1028 return (error);
1029 1029 }
1030 1030
1031 1031 /*
1032 1032 * zvol_log_write() handles synchronous writes using TX_WRITE ZIL transactions.
1033 1033 *
1034 1034 * We store data in the log buffers if it's small enough.
1035 1035 * Otherwise we will later flush the data out via dmu_sync().
1036 1036 */
1037 1037 ssize_t zvol_immediate_write_sz = 32768;
1038 1038
1039 1039 static void
1040 1040 zvol_log_write(zvol_state_t *zv, dmu_tx_t *tx, offset_t off, ssize_t resid,
1041 1041 boolean_t sync)
1042 1042 {
1043 1043 uint32_t blocksize = zv->zv_volblocksize;
1044 1044 zilog_t *zilog = zv->zv_zilog;
1045 1045 boolean_t slogging;
1046 1046 ssize_t immediate_write_sz;
1047 1047
1048 1048 if (zil_replaying(zilog, tx))
1049 1049 return;
1050 1050
1051 1051 immediate_write_sz = (zilog->zl_logbias == ZFS_LOGBIAS_THROUGHPUT)
1052 1052 ? 0 : zvol_immediate_write_sz;
1053 1053
1054 1054 slogging = spa_has_slogs(zilog->zl_spa) &&
1055 1055 (zilog->zl_logbias == ZFS_LOGBIAS_LATENCY);
1056 1056
1057 1057 while (resid) {
1058 1058 itx_t *itx;
1059 1059 lr_write_t *lr;
1060 1060 ssize_t len;
1061 1061 itx_wr_state_t write_state;
1062 1062
1063 1063 /*
1064 1064 * Unlike zfs_log_write() we can be called with
1065 1065 * upto DMU_MAX_ACCESS/2 (5MB) writes.
1066 1066 */
1067 1067 if (blocksize > immediate_write_sz && !slogging &&
1068 1068 resid >= blocksize && off % blocksize == 0) {
1069 1069 write_state = WR_INDIRECT; /* uses dmu_sync */
1070 1070 len = blocksize;
1071 1071 } else if (sync) {
1072 1072 write_state = WR_COPIED;
1073 1073 len = MIN(ZIL_MAX_LOG_DATA, resid);
1074 1074 } else {
1075 1075 write_state = WR_NEED_COPY;
1076 1076 len = MIN(ZIL_MAX_LOG_DATA, resid);
1077 1077 }
1078 1078
1079 1079 itx = zil_itx_create(TX_WRITE, sizeof (*lr) +
1080 1080 (write_state == WR_COPIED ? len : 0));
1081 1081 lr = (lr_write_t *)&itx->itx_lr;
1082 1082 if (write_state == WR_COPIED && dmu_read(zv->zv_objset,
1083 1083 ZVOL_OBJ, off, len, lr + 1, DMU_READ_NO_PREFETCH) != 0) {
1084 1084 zil_itx_destroy(itx);
1085 1085 itx = zil_itx_create(TX_WRITE, sizeof (*lr));
1086 1086 lr = (lr_write_t *)&itx->itx_lr;
1087 1087 write_state = WR_NEED_COPY;
1088 1088 }
1089 1089
1090 1090 itx->itx_wr_state = write_state;
1091 1091 if (write_state == WR_NEED_COPY)
1092 1092 itx->itx_sod += len;
1093 1093 lr->lr_foid = ZVOL_OBJ;
1094 1094 lr->lr_offset = off;
1095 1095 lr->lr_length = len;
1096 1096 lr->lr_blkoff = 0;
1097 1097 BP_ZERO(&lr->lr_blkptr);
1098 1098
1099 1099 itx->itx_private = zv;
1100 1100 itx->itx_sync = sync;
1101 1101
1102 1102 zil_itx_assign(zilog, itx, tx);
1103 1103
1104 1104 off += len;
1105 1105 resid -= len;
1106 1106 }
1107 1107 }
1108 1108
1109 1109 static int
1110 1110 zvol_dumpio_vdev(vdev_t *vd, void *addr, uint64_t offset, uint64_t origoffset,
1111 1111 uint64_t size, boolean_t doread, boolean_t isdump)
1112 1112 {
1113 1113 vdev_disk_t *dvd;
1114 1114 int c;
1115 1115 int numerrors = 0;
1116 1116
1117 1117 if (vd->vdev_ops == &vdev_mirror_ops ||
1118 1118 vd->vdev_ops == &vdev_replacing_ops ||
1119 1119 vd->vdev_ops == &vdev_spare_ops) {
1120 1120 for (c = 0; c < vd->vdev_children; c++) {
1121 1121 int err = zvol_dumpio_vdev(vd->vdev_child[c],
1122 1122 addr, offset, origoffset, size, doread, isdump);
1123 1123 if (err != 0) {
1124 1124 numerrors++;
1125 1125 } else if (doread) {
1126 1126 break;
1127 1127 }
1128 1128 }
1129 1129 }
1130 1130
1131 1131 if (!vd->vdev_ops->vdev_op_leaf && vd->vdev_ops != &vdev_raidz_ops)
1132 1132 return (numerrors < vd->vdev_children ? 0 : EIO);
1133 1133
1134 1134 if (doread && !vdev_readable(vd))
1135 1135 return (SET_ERROR(EIO));
1136 1136 else if (!doread && !vdev_writeable(vd))
1137 1137 return (SET_ERROR(EIO));
1138 1138
1139 1139 if (vd->vdev_ops == &vdev_raidz_ops) {
1140 1140 return (vdev_raidz_physio(vd,
1141 1141 addr, size, offset, origoffset, doread, isdump));
1142 1142 }
1143 1143
1144 1144 offset += VDEV_LABEL_START_SIZE;
1145 1145
1146 1146 if (ddi_in_panic() || isdump) {
1147 1147 ASSERT(!doread);
1148 1148 if (doread)
1149 1149 return (SET_ERROR(EIO));
1150 1150 dvd = vd->vdev_tsd;
1151 1151 ASSERT3P(dvd, !=, NULL);
1152 1152 return (ldi_dump(dvd->vd_lh, addr, lbtodb(offset),
1153 1153 lbtodb(size)));
1154 1154 } else {
1155 1155 dvd = vd->vdev_tsd;
1156 1156 ASSERT3P(dvd, !=, NULL);
1157 1157 return (vdev_disk_ldi_physio(dvd->vd_lh, addr, size,
1158 1158 offset, doread ? B_READ : B_WRITE));
1159 1159 }
1160 1160 }
1161 1161
1162 1162 static int
1163 1163 zvol_dumpio(zvol_state_t *zv, void *addr, uint64_t offset, uint64_t size,
1164 1164 boolean_t doread, boolean_t isdump)
1165 1165 {
1166 1166 vdev_t *vd;
1167 1167 int error;
1168 1168 zvol_extent_t *ze;
1169 1169 spa_t *spa = dmu_objset_spa(zv->zv_objset);
1170 1170
1171 1171 /* Must be sector aligned, and not stradle a block boundary. */
1172 1172 if (P2PHASE(offset, DEV_BSIZE) || P2PHASE(size, DEV_BSIZE) ||
1173 1173 P2BOUNDARY(offset, size, zv->zv_volblocksize)) {
1174 1174 return (SET_ERROR(EINVAL));
1175 1175 }
1176 1176 ASSERT(size <= zv->zv_volblocksize);
1177 1177
1178 1178 /* Locate the extent this belongs to */
1179 1179 ze = list_head(&zv->zv_extents);
1180 1180 while (offset >= ze->ze_nblks * zv->zv_volblocksize) {
1181 1181 offset -= ze->ze_nblks * zv->zv_volblocksize;
1182 1182 ze = list_next(&zv->zv_extents, ze);
1183 1183 }
1184 1184
1185 1185 if (ze == NULL)
1186 1186 return (SET_ERROR(EINVAL));
1187 1187
1188 1188 if (!ddi_in_panic())
1189 1189 spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
1190 1190
1191 1191 vd = vdev_lookup_top(spa, DVA_GET_VDEV(&ze->ze_dva));
1192 1192 offset += DVA_GET_OFFSET(&ze->ze_dva);
1193 1193 error = zvol_dumpio_vdev(vd, addr, offset, DVA_GET_OFFSET(&ze->ze_dva),
1194 1194 size, doread, isdump);
1195 1195
1196 1196 if (!ddi_in_panic())
1197 1197 spa_config_exit(spa, SCL_STATE, FTAG);
1198 1198
1199 1199 return (error);
1200 1200 }
1201 1201
1202 1202 int
1203 1203 zvol_strategy(buf_t *bp)
1204 1204 {
1205 1205 zfs_soft_state_t *zs = NULL;
1206 1206 zvol_state_t *zv;
1207 1207 uint64_t off, volsize;
1208 1208 size_t resid;
1209 1209 char *addr;
1210 1210 objset_t *os;
1211 1211 rl_t *rl;
1212 1212 int error = 0;
1213 1213 boolean_t doread = bp->b_flags & B_READ;
1214 1214 boolean_t is_dumpified;
1215 1215 boolean_t sync;
1216 1216
1217 1217 if (getminor(bp->b_edev) == 0) {
1218 1218 error = SET_ERROR(EINVAL);
1219 1219 } else {
1220 1220 zs = ddi_get_soft_state(zfsdev_state, getminor(bp->b_edev));
1221 1221 if (zs == NULL)
1222 1222 error = SET_ERROR(ENXIO);
1223 1223 else if (zs->zss_type != ZSST_ZVOL)
1224 1224 error = SET_ERROR(EINVAL);
1225 1225 }
1226 1226
1227 1227 if (error) {
1228 1228 bioerror(bp, error);
1229 1229 biodone(bp);
1230 1230 return (0);
1231 1231 }
1232 1232
1233 1233 zv = zs->zss_data;
1234 1234
1235 1235 if (!(bp->b_flags & B_READ) && (zv->zv_flags & ZVOL_RDONLY)) {
1236 1236 bioerror(bp, EROFS);
1237 1237 biodone(bp);
1238 1238 return (0);
1239 1239 }
1240 1240
1241 1241 off = ldbtob(bp->b_blkno);
1242 1242 volsize = zv->zv_volsize;
1243 1243
1244 1244 os = zv->zv_objset;
1245 1245 ASSERT(os != NULL);
1246 1246
1247 1247 bp_mapin(bp);
1248 1248 addr = bp->b_un.b_addr;
1249 1249 resid = bp->b_bcount;
1250 1250
1251 1251 if (resid > 0 && (off < 0 || off >= volsize)) {
1252 1252 bioerror(bp, EIO);
1253 1253 biodone(bp);
1254 1254 return (0);
1255 1255 }
1256 1256
1257 1257 is_dumpified = zv->zv_flags & ZVOL_DUMPIFIED;
1258 1258 sync = ((!(bp->b_flags & B_ASYNC) &&
1259 1259 !(zv->zv_flags & ZVOL_WCE)) ||
1260 1260 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS)) &&
1261 1261 !doread && !is_dumpified;
1262 1262
1263 1263 /*
1264 1264 * There must be no buffer changes when doing a dmu_sync() because
1265 1265 * we can't change the data whilst calculating the checksum.
1266 1266 */
1267 1267 rl = zfs_range_lock(&zv->zv_znode, off, resid,
1268 1268 doread ? RL_READER : RL_WRITER);
1269 1269
1270 1270 while (resid != 0 && off < volsize) {
1271 1271 size_t size = MIN(resid, zvol_maxphys);
1272 1272 if (is_dumpified) {
1273 1273 size = MIN(size, P2END(off, zv->zv_volblocksize) - off);
1274 1274 error = zvol_dumpio(zv, addr, off, size,
1275 1275 doread, B_FALSE);
1276 1276 } else if (doread) {
1277 1277 error = dmu_read(os, ZVOL_OBJ, off, size, addr,
1278 1278 DMU_READ_PREFETCH);
1279 1279 } else {
1280 1280 dmu_tx_t *tx = dmu_tx_create(os);
1281 1281 dmu_tx_hold_write(tx, ZVOL_OBJ, off, size);
1282 1282 error = dmu_tx_assign(tx, TXG_WAIT);
1283 1283 if (error) {
1284 1284 dmu_tx_abort(tx);
1285 1285 } else {
1286 1286 dmu_write(os, ZVOL_OBJ, off, size, addr, tx);
1287 1287 zvol_log_write(zv, tx, off, size, sync);
1288 1288 dmu_tx_commit(tx);
1289 1289 }
1290 1290 }
1291 1291 if (error) {
1292 1292 /* convert checksum errors into IO errors */
1293 1293 if (error == ECKSUM)
1294 1294 error = SET_ERROR(EIO);
1295 1295 break;
1296 1296 }
1297 1297 off += size;
1298 1298 addr += size;
1299 1299 resid -= size;
1300 1300 }
1301 1301 zfs_range_unlock(rl);
1302 1302
1303 1303 if ((bp->b_resid = resid) == bp->b_bcount)
1304 1304 bioerror(bp, off > volsize ? EINVAL : error);
1305 1305
1306 1306 if (sync)
1307 1307 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1308 1308 biodone(bp);
1309 1309
1310 1310 return (0);
1311 1311 }
1312 1312
1313 1313 /*
1314 1314 * Set the buffer count to the zvol maximum transfer.
1315 1315 * Using our own routine instead of the default minphys()
1316 1316 * means that for larger writes we write bigger buffers on X86
1317 1317 * (128K instead of 56K) and flush the disk write cache less often
1318 1318 * (every zvol_maxphys - currently 1MB) instead of minphys (currently
1319 1319 * 56K on X86 and 128K on sparc).
1320 1320 */
1321 1321 void
1322 1322 zvol_minphys(struct buf *bp)
1323 1323 {
1324 1324 if (bp->b_bcount > zvol_maxphys)
1325 1325 bp->b_bcount = zvol_maxphys;
1326 1326 }
1327 1327
1328 1328 int
1329 1329 zvol_dump(dev_t dev, caddr_t addr, daddr_t blkno, int nblocks)
1330 1330 {
1331 1331 minor_t minor = getminor(dev);
1332 1332 zvol_state_t *zv;
1333 1333 int error = 0;
1334 1334 uint64_t size;
1335 1335 uint64_t boff;
1336 1336 uint64_t resid;
1337 1337
1338 1338 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1339 1339 if (zv == NULL)
1340 1340 return (SET_ERROR(ENXIO));
1341 1341
1342 1342 if ((zv->zv_flags & ZVOL_DUMPIFIED) == 0)
1343 1343 return (SET_ERROR(EINVAL));
1344 1344
1345 1345 boff = ldbtob(blkno);
1346 1346 resid = ldbtob(nblocks);
1347 1347
1348 1348 VERIFY3U(boff + resid, <=, zv->zv_volsize);
1349 1349
1350 1350 while (resid) {
1351 1351 size = MIN(resid, P2END(boff, zv->zv_volblocksize) - boff);
1352 1352 error = zvol_dumpio(zv, addr, boff, size, B_FALSE, B_TRUE);
1353 1353 if (error)
1354 1354 break;
1355 1355 boff += size;
1356 1356 addr += size;
1357 1357 resid -= size;
1358 1358 }
1359 1359
1360 1360 return (error);
1361 1361 }
1362 1362
1363 1363 /*ARGSUSED*/
1364 1364 int
1365 1365 zvol_read(dev_t dev, uio_t *uio, cred_t *cr)
1366 1366 {
1367 1367 minor_t minor = getminor(dev);
1368 1368 zvol_state_t *zv;
1369 1369 uint64_t volsize;
1370 1370 rl_t *rl;
1371 1371 int error = 0;
1372 1372
1373 1373 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1374 1374 if (zv == NULL)
1375 1375 return (SET_ERROR(ENXIO));
1376 1376
1377 1377 volsize = zv->zv_volsize;
1378 1378 if (uio->uio_resid > 0 &&
1379 1379 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1380 1380 return (SET_ERROR(EIO));
1381 1381
1382 1382 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1383 1383 error = physio(zvol_strategy, NULL, dev, B_READ,
1384 1384 zvol_minphys, uio);
1385 1385 return (error);
1386 1386 }
1387 1387
1388 1388 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1389 1389 RL_READER);
1390 1390 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1391 1391 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1392 1392
1393 1393 /* don't read past the end */
1394 1394 if (bytes > volsize - uio->uio_loffset)
1395 1395 bytes = volsize - uio->uio_loffset;
1396 1396
1397 1397 error = dmu_read_uio(zv->zv_objset, ZVOL_OBJ, uio, bytes);
1398 1398 if (error) {
1399 1399 /* convert checksum errors into IO errors */
1400 1400 if (error == ECKSUM)
1401 1401 error = SET_ERROR(EIO);
1402 1402 break;
1403 1403 }
1404 1404 }
1405 1405 zfs_range_unlock(rl);
1406 1406 return (error);
1407 1407 }
1408 1408
1409 1409 /*ARGSUSED*/
1410 1410 int
1411 1411 zvol_write(dev_t dev, uio_t *uio, cred_t *cr)
1412 1412 {
1413 1413 minor_t minor = getminor(dev);
1414 1414 zvol_state_t *zv;
1415 1415 uint64_t volsize;
1416 1416 rl_t *rl;
1417 1417 int error = 0;
1418 1418 boolean_t sync;
1419 1419
1420 1420 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1421 1421 if (zv == NULL)
1422 1422 return (SET_ERROR(ENXIO));
1423 1423
1424 1424 volsize = zv->zv_volsize;
1425 1425 if (uio->uio_resid > 0 &&
1426 1426 (uio->uio_loffset < 0 || uio->uio_loffset >= volsize))
1427 1427 return (SET_ERROR(EIO));
1428 1428
1429 1429 if (zv->zv_flags & ZVOL_DUMPIFIED) {
1430 1430 error = physio(zvol_strategy, NULL, dev, B_WRITE,
1431 1431 zvol_minphys, uio);
1432 1432 return (error);
1433 1433 }
1434 1434
1435 1435 sync = !(zv->zv_flags & ZVOL_WCE) ||
1436 1436 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS);
1437 1437
1438 1438 rl = zfs_range_lock(&zv->zv_znode, uio->uio_loffset, uio->uio_resid,
1439 1439 RL_WRITER);
1440 1440 while (uio->uio_resid > 0 && uio->uio_loffset < volsize) {
1441 1441 uint64_t bytes = MIN(uio->uio_resid, DMU_MAX_ACCESS >> 1);
1442 1442 uint64_t off = uio->uio_loffset;
1443 1443 dmu_tx_t *tx = dmu_tx_create(zv->zv_objset);
1444 1444
1445 1445 if (bytes > volsize - off) /* don't write past the end */
1446 1446 bytes = volsize - off;
1447 1447
1448 1448 dmu_tx_hold_write(tx, ZVOL_OBJ, off, bytes);
1449 1449 error = dmu_tx_assign(tx, TXG_WAIT);
1450 1450 if (error) {
1451 1451 dmu_tx_abort(tx);
1452 1452 break;
1453 1453 }
1454 1454 error = dmu_write_uio_dbuf(zv->zv_dbuf, uio, bytes, tx);
1455 1455 if (error == 0)
1456 1456 zvol_log_write(zv, tx, off, bytes, sync);
1457 1457 dmu_tx_commit(tx);
1458 1458
1459 1459 if (error)
1460 1460 break;
1461 1461 }
1462 1462 zfs_range_unlock(rl);
1463 1463 if (sync)
1464 1464 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1465 1465 return (error);
1466 1466 }
1467 1467
1468 1468 int
1469 1469 zvol_getefi(void *arg, int flag, uint64_t vs, uint8_t bs)
1470 1470 {
1471 1471 struct uuid uuid = EFI_RESERVED;
1472 1472 efi_gpe_t gpe = { 0 };
1473 1473 uint32_t crc;
1474 1474 dk_efi_t efi;
1475 1475 int length;
1476 1476 char *ptr;
1477 1477
1478 1478 if (ddi_copyin(arg, &efi, sizeof (dk_efi_t), flag))
1479 1479 return (SET_ERROR(EFAULT));
1480 1480 ptr = (char *)(uintptr_t)efi.dki_data_64;
1481 1481 length = efi.dki_length;
1482 1482 /*
1483 1483 * Some clients may attempt to request a PMBR for the
1484 1484 * zvol. Currently this interface will return EINVAL to
1485 1485 * such requests. These requests could be supported by
1486 1486 * adding a check for lba == 0 and consing up an appropriate
1487 1487 * PMBR.
1488 1488 */
1489 1489 if (efi.dki_lba < 1 || efi.dki_lba > 2 || length <= 0)
1490 1490 return (SET_ERROR(EINVAL));
1491 1491
1492 1492 gpe.efi_gpe_StartingLBA = LE_64(34ULL);
1493 1493 gpe.efi_gpe_EndingLBA = LE_64((vs >> bs) - 1);
1494 1494 UUID_LE_CONVERT(gpe.efi_gpe_PartitionTypeGUID, uuid);
1495 1495
1496 1496 if (efi.dki_lba == 1) {
1497 1497 efi_gpt_t gpt = { 0 };
1498 1498
1499 1499 gpt.efi_gpt_Signature = LE_64(EFI_SIGNATURE);
1500 1500 gpt.efi_gpt_Revision = LE_32(EFI_VERSION_CURRENT);
1501 1501 gpt.efi_gpt_HeaderSize = LE_32(sizeof (gpt));
1502 1502 gpt.efi_gpt_MyLBA = LE_64(1ULL);
1503 1503 gpt.efi_gpt_FirstUsableLBA = LE_64(34ULL);
1504 1504 gpt.efi_gpt_LastUsableLBA = LE_64((vs >> bs) - 1);
1505 1505 gpt.efi_gpt_PartitionEntryLBA = LE_64(2ULL);
1506 1506 gpt.efi_gpt_NumberOfPartitionEntries = LE_32(1);
1507 1507 gpt.efi_gpt_SizeOfPartitionEntry =
1508 1508 LE_32(sizeof (efi_gpe_t));
1509 1509 CRC32(crc, &gpe, sizeof (gpe), -1U, crc32_table);
1510 1510 gpt.efi_gpt_PartitionEntryArrayCRC32 = LE_32(~crc);
1511 1511 CRC32(crc, &gpt, sizeof (gpt), -1U, crc32_table);
1512 1512 gpt.efi_gpt_HeaderCRC32 = LE_32(~crc);
1513 1513 if (ddi_copyout(&gpt, ptr, MIN(sizeof (gpt), length),
1514 1514 flag))
1515 1515 return (SET_ERROR(EFAULT));
1516 1516 ptr += sizeof (gpt);
1517 1517 length -= sizeof (gpt);
1518 1518 }
1519 1519 if (length > 0 && ddi_copyout(&gpe, ptr, MIN(sizeof (gpe),
1520 1520 length), flag))
1521 1521 return (SET_ERROR(EFAULT));
1522 1522 return (0);
1523 1523 }
1524 1524
1525 1525 /*
1526 1526 * BEGIN entry points to allow external callers access to the volume.
1527 1527 */
1528 1528 /*
1529 1529 * Return the volume parameters needed for access from an external caller.
1530 1530 * These values are invariant as long as the volume is held open.
1531 1531 */
1532 1532 int
1533 1533 zvol_get_volume_params(minor_t minor, uint64_t *blksize,
1534 1534 uint64_t *max_xfer_len, void **minor_hdl, void **objset_hdl, void **zil_hdl,
1535 1535 void **rl_hdl, void **bonus_hdl)
1536 1536 {
1537 1537 zvol_state_t *zv;
1538 1538
1539 1539 zv = zfsdev_get_soft_state(minor, ZSST_ZVOL);
1540 1540 if (zv == NULL)
1541 1541 return (SET_ERROR(ENXIO));
1542 1542 if (zv->zv_flags & ZVOL_DUMPIFIED)
1543 1543 return (SET_ERROR(ENXIO));
1544 1544
1545 1545 ASSERT(blksize && max_xfer_len && minor_hdl &&
1546 1546 objset_hdl && zil_hdl && rl_hdl && bonus_hdl);
1547 1547
1548 1548 *blksize = zv->zv_volblocksize;
1549 1549 *max_xfer_len = (uint64_t)zvol_maxphys;
1550 1550 *minor_hdl = zv;
1551 1551 *objset_hdl = zv->zv_objset;
1552 1552 *zil_hdl = zv->zv_zilog;
1553 1553 *rl_hdl = &zv->zv_znode;
1554 1554 *bonus_hdl = zv->zv_dbuf;
1555 1555 return (0);
1556 1556 }
1557 1557
1558 1558 /*
1559 1559 * Return the current volume size to an external caller.
1560 1560 * The size can change while the volume is open.
1561 1561 */
1562 1562 uint64_t
1563 1563 zvol_get_volume_size(void *minor_hdl)
1564 1564 {
1565 1565 zvol_state_t *zv = minor_hdl;
1566 1566
1567 1567 return (zv->zv_volsize);
1568 1568 }
1569 1569
1570 1570 /*
1571 1571 * Return the current WCE setting to an external caller.
1572 1572 * The WCE setting can change while the volume is open.
1573 1573 */
1574 1574 int
1575 1575 zvol_get_volume_wce(void *minor_hdl)
1576 1576 {
1577 1577 zvol_state_t *zv = minor_hdl;
1578 1578
1579 1579 return ((zv->zv_flags & ZVOL_WCE) ? 1 : 0);
1580 1580 }
1581 1581
1582 1582 /*
1583 1583 * Entry point for external callers to zvol_log_write
1584 1584 */
1585 1585 void
1586 1586 zvol_log_write_minor(void *minor_hdl, dmu_tx_t *tx, offset_t off, ssize_t resid,
1587 1587 boolean_t sync)
1588 1588 {
1589 1589 zvol_state_t *zv = minor_hdl;
1590 1590
1591 1591 zvol_log_write(zv, tx, off, resid, sync);
1592 1592 }
1593 1593 /*
1594 1594 * END entry points to allow external callers access to the volume.
1595 1595 */
1596 1596
1597 1597 /*
1598 1598 * Log a DKIOCFREE/free-long-range to the ZIL with TX_TRUNCATE.
1599 1599 */
1600 1600 static void
1601 1601 zvol_log_truncate(zvol_state_t *zv, dmu_tx_t *tx, uint64_t off, uint64_t len,
1602 1602 boolean_t sync)
1603 1603 {
1604 1604 itx_t *itx;
1605 1605 lr_truncate_t *lr;
1606 1606 zilog_t *zilog = zv->zv_zilog;
1607 1607
1608 1608 if (zil_replaying(zilog, tx))
1609 1609 return;
1610 1610
1611 1611 itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
1612 1612 lr = (lr_truncate_t *)&itx->itx_lr;
1613 1613 lr->lr_foid = ZVOL_OBJ;
1614 1614 lr->lr_offset = off;
1615 1615 lr->lr_length = len;
1616 1616
1617 1617 itx->itx_sync = sync;
1618 1618 zil_itx_assign(zilog, itx, tx);
1619 1619 }
↓ open down ↓ |
1619 lines elided |
↑ open up ↑ |
1620 1620
1621 1621 /*
1622 1622 * Dirtbag ioctls to support mkfs(1M) for UFS filesystems. See dkio(7I).
1623 1623 * Also a dirtbag dkio ioctl for unmap/free-block functionality.
1624 1624 */
1625 1625 /*ARGSUSED*/
1626 1626 int
1627 1627 zvol_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cr, int *rvalp)
1628 1628 {
1629 1629 zvol_state_t *zv;
1630 - struct dk_cinfo dki;
1631 - struct dk_minfo dkm;
1632 1630 struct dk_callback *dkc;
1633 1631 int error = 0;
1634 1632 rl_t *rl;
1635 1633
1636 1634 mutex_enter(&zfsdev_state_lock);
1637 1635
1638 1636 zv = zfsdev_get_soft_state(getminor(dev), ZSST_ZVOL);
1639 1637
1640 1638 if (zv == NULL) {
1641 1639 mutex_exit(&zfsdev_state_lock);
1642 1640 return (SET_ERROR(ENXIO));
1643 1641 }
1644 1642 ASSERT(zv->zv_total_opens > 0);
1645 1643
1646 1644 switch (cmd) {
1647 1645
1648 1646 case DKIOCINFO:
1647 + {
1648 + struct dk_cinfo dki;
1649 +
1649 1650 bzero(&dki, sizeof (dki));
1650 1651 (void) strcpy(dki.dki_cname, "zvol");
1651 1652 (void) strcpy(dki.dki_dname, "zvol");
1652 1653 dki.dki_ctype = DKC_UNKNOWN;
1653 1654 dki.dki_unit = getminor(dev);
1654 1655 dki.dki_maxtransfer = 1 << (SPA_MAXBLOCKSHIFT - zv->zv_min_bs);
1655 1656 mutex_exit(&zfsdev_state_lock);
1656 1657 if (ddi_copyout(&dki, (void *)arg, sizeof (dki), flag))
1657 1658 error = SET_ERROR(EFAULT);
1658 1659 return (error);
1660 + }
1659 1661
1660 1662 case DKIOCGMEDIAINFO:
1663 + {
1664 + struct dk_minfo dkm;
1665 +
1661 1666 bzero(&dkm, sizeof (dkm));
1662 1667 dkm.dki_lbsize = 1U << zv->zv_min_bs;
1663 1668 dkm.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1664 1669 dkm.dki_media_type = DK_UNKNOWN;
1665 1670 mutex_exit(&zfsdev_state_lock);
1666 1671 if (ddi_copyout(&dkm, (void *)arg, sizeof (dkm), flag))
1667 1672 error = SET_ERROR(EFAULT);
1668 1673 return (error);
1674 + }
1669 1675
1676 + case DKIOCGMEDIAINFOEXT:
1677 + {
1678 + struct dk_minfo_ext dkmext;
1679 +
1680 + bzero(&dkmext, sizeof (dkmext));
1681 + dkmext.dki_lbsize = 1U << zv->zv_min_bs;
1682 + dkmext.dki_pbsize = zv->zv_volblocksize;
1683 + dkmext.dki_capacity = zv->zv_volsize >> zv->zv_min_bs;
1684 + dkmext.dki_media_type = DK_UNKNOWN;
1685 + mutex_exit(&zfsdev_state_lock);
1686 + if (ddi_copyout(&dkmext, (void *)arg, sizeof (dkmext), flag))
1687 + error = SET_ERROR(EFAULT);
1688 + return (error);
1689 + }
1690 +
1670 1691 case DKIOCGETEFI:
1671 - {
1672 - uint64_t vs = zv->zv_volsize;
1673 - uint8_t bs = zv->zv_min_bs;
1692 + {
1693 + uint64_t vs = zv->zv_volsize;
1694 + uint8_t bs = zv->zv_min_bs;
1674 1695
1675 - mutex_exit(&zfsdev_state_lock);
1676 - error = zvol_getefi((void *)arg, flag, vs, bs);
1677 - return (error);
1678 - }
1696 + mutex_exit(&zfsdev_state_lock);
1697 + error = zvol_getefi((void *)arg, flag, vs, bs);
1698 + return (error);
1699 + }
1679 1700
1680 1701 case DKIOCFLUSHWRITECACHE:
1681 1702 dkc = (struct dk_callback *)arg;
1682 1703 mutex_exit(&zfsdev_state_lock);
1683 1704 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1684 1705 if ((flag & FKIOCTL) && dkc != NULL && dkc->dkc_callback) {
1685 1706 (*dkc->dkc_callback)(dkc->dkc_cookie, error);
1686 1707 error = 0;
1687 1708 }
1688 1709 return (error);
1689 1710
1690 1711 case DKIOCGETWCE:
1691 - {
1692 - int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1693 - if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1694 - flag))
1695 - error = SET_ERROR(EFAULT);
1712 + {
1713 + int wce = (zv->zv_flags & ZVOL_WCE) ? 1 : 0;
1714 + if (ddi_copyout(&wce, (void *)arg, sizeof (int),
1715 + flag))
1716 + error = SET_ERROR(EFAULT);
1717 + break;
1718 + }
1719 + case DKIOCSETWCE:
1720 + {
1721 + int wce;
1722 + if (ddi_copyin((void *)arg, &wce, sizeof (int),
1723 + flag)) {
1724 + error = SET_ERROR(EFAULT);
1696 1725 break;
1697 1726 }
1698 - case DKIOCSETWCE:
1699 - {
1700 - int wce;
1701 - if (ddi_copyin((void *)arg, &wce, sizeof (int),
1702 - flag)) {
1703 - error = SET_ERROR(EFAULT);
1704 - break;
1705 - }
1706 - if (wce) {
1707 - zv->zv_flags |= ZVOL_WCE;
1708 - mutex_exit(&zfsdev_state_lock);
1709 - } else {
1710 - zv->zv_flags &= ~ZVOL_WCE;
1711 - mutex_exit(&zfsdev_state_lock);
1712 - zil_commit(zv->zv_zilog, ZVOL_OBJ);
1713 - }
1714 - return (0);
1727 + if (wce) {
1728 + zv->zv_flags |= ZVOL_WCE;
1729 + mutex_exit(&zfsdev_state_lock);
1730 + } else {
1731 + zv->zv_flags &= ~ZVOL_WCE;
1732 + mutex_exit(&zfsdev_state_lock);
1733 + zil_commit(zv->zv_zilog, ZVOL_OBJ);
1715 1734 }
1735 + return (0);
1736 + }
1716 1737
1717 1738 case DKIOCGGEOM:
1718 1739 case DKIOCGVTOC:
1719 1740 /*
1720 1741 * commands using these (like prtvtoc) expect ENOTSUP
1721 1742 * since we're emulating an EFI label
1722 1743 */
1723 1744 error = SET_ERROR(ENOTSUP);
1724 1745 break;
1725 1746
1726 1747 case DKIOCDUMPINIT:
1727 1748 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1728 1749 RL_WRITER);
1729 1750 error = zvol_dumpify(zv);
1730 1751 zfs_range_unlock(rl);
1731 1752 break;
1732 1753
1733 1754 case DKIOCDUMPFINI:
1734 1755 if (!(zv->zv_flags & ZVOL_DUMPIFIED))
1735 1756 break;
1736 1757 rl = zfs_range_lock(&zv->zv_znode, 0, zv->zv_volsize,
1737 1758 RL_WRITER);
1738 1759 error = zvol_dump_fini(zv);
1739 1760 zfs_range_unlock(rl);
1740 1761 break;
1741 1762
1742 1763 case DKIOCFREE:
1743 1764 {
1744 1765 dkioc_free_t df;
1745 1766 dmu_tx_t *tx;
1746 1767
1747 1768 if (ddi_copyin((void *)arg, &df, sizeof (df), flag)) {
1748 1769 error = SET_ERROR(EFAULT);
1749 1770 break;
1750 1771 }
1751 1772
1752 1773 /*
1753 1774 * Apply Postel's Law to length-checking. If they overshoot,
1754 1775 * just blank out until the end, if there's a need to blank
1755 1776 * out anything.
1756 1777 */
1757 1778 if (df.df_start >= zv->zv_volsize)
1758 1779 break; /* No need to do anything... */
1759 1780 if (df.df_start + df.df_length > zv->zv_volsize)
1760 1781 df.df_length = DMU_OBJECT_END;
1761 1782
1762 1783 rl = zfs_range_lock(&zv->zv_znode, df.df_start, df.df_length,
1763 1784 RL_WRITER);
1764 1785 tx = dmu_tx_create(zv->zv_objset);
1765 1786 error = dmu_tx_assign(tx, TXG_WAIT);
1766 1787 if (error != 0) {
1767 1788 dmu_tx_abort(tx);
1768 1789 } else {
1769 1790 zvol_log_truncate(zv, tx, df.df_start,
1770 1791 df.df_length, B_TRUE);
1771 1792 dmu_tx_commit(tx);
1772 1793 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ,
1773 1794 df.df_start, df.df_length);
1774 1795 }
1775 1796
1776 1797 zfs_range_unlock(rl);
1777 1798
1778 1799 if (error == 0) {
1779 1800 /*
1780 1801 * If the write-cache is disabled or 'sync' property
1781 1802 * is set to 'always' then treat this as a synchronous
1782 1803 * operation (i.e. commit to zil).
1783 1804 */
1784 1805 if (!(zv->zv_flags & ZVOL_WCE) ||
1785 1806 (zv->zv_objset->os_sync == ZFS_SYNC_ALWAYS))
1786 1807 zil_commit(zv->zv_zilog, ZVOL_OBJ);
1787 1808
1788 1809 /*
1789 1810 * If the caller really wants synchronous writes, and
1790 1811 * can't wait for them, don't return until the write
1791 1812 * is done.
1792 1813 */
1793 1814 if (df.df_flags & DF_WAIT_SYNC) {
1794 1815 txg_wait_synced(
1795 1816 dmu_objset_pool(zv->zv_objset), 0);
1796 1817 }
1797 1818 }
1798 1819 break;
1799 1820 }
1800 1821
1801 1822 default:
1802 1823 error = SET_ERROR(ENOTTY);
1803 1824 break;
1804 1825
1805 1826 }
1806 1827 mutex_exit(&zfsdev_state_lock);
1807 1828 return (error);
1808 1829 }
1809 1830
1810 1831 int
1811 1832 zvol_busy(void)
1812 1833 {
1813 1834 return (zvol_minors != 0);
1814 1835 }
1815 1836
1816 1837 void
1817 1838 zvol_init(void)
1818 1839 {
1819 1840 VERIFY(ddi_soft_state_init(&zfsdev_state, sizeof (zfs_soft_state_t),
1820 1841 1) == 0);
1821 1842 mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
1822 1843 }
1823 1844
1824 1845 void
1825 1846 zvol_fini(void)
1826 1847 {
1827 1848 mutex_destroy(&zfsdev_state_lock);
1828 1849 ddi_soft_state_fini(&zfsdev_state);
1829 1850 }
1830 1851
1831 1852 /*ARGSUSED*/
1832 1853 static int
1833 1854 zfs_mvdev_dump_feature_check(void *arg, dmu_tx_t *tx)
1834 1855 {
1835 1856 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1836 1857
1837 1858 if (spa_feature_is_active(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1838 1859 return (1);
1839 1860 return (0);
1840 1861 }
1841 1862
1842 1863 /*ARGSUSED*/
1843 1864 static void
1844 1865 zfs_mvdev_dump_activate_feature_sync(void *arg, dmu_tx_t *tx)
1845 1866 {
1846 1867 spa_t *spa = dmu_tx_pool(tx)->dp_spa;
1847 1868
1848 1869 spa_feature_incr(spa, SPA_FEATURE_MULTI_VDEV_CRASH_DUMP, tx);
1849 1870 }
1850 1871
1851 1872 static int
1852 1873 zvol_dump_init(zvol_state_t *zv, boolean_t resize)
1853 1874 {
1854 1875 dmu_tx_t *tx;
1855 1876 int error;
1856 1877 objset_t *os = zv->zv_objset;
1857 1878 spa_t *spa = dmu_objset_spa(os);
1858 1879 vdev_t *vd = spa->spa_root_vdev;
1859 1880 nvlist_t *nv = NULL;
1860 1881 uint64_t version = spa_version(spa);
1861 1882 enum zio_checksum checksum;
1862 1883
1863 1884 ASSERT(MUTEX_HELD(&zfsdev_state_lock));
1864 1885 ASSERT(vd->vdev_ops == &vdev_root_ops);
1865 1886
1866 1887 error = dmu_free_long_range(zv->zv_objset, ZVOL_OBJ, 0,
1867 1888 DMU_OBJECT_END);
1868 1889 /* wait for dmu_free_long_range to actually free the blocks */
1869 1890 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
1870 1891
1871 1892 /*
1872 1893 * If the pool on which the dump device is being initialized has more
1873 1894 * than one child vdev, check that the MULTI_VDEV_CRASH_DUMP feature is
1874 1895 * enabled. If so, bump that feature's counter to indicate that the
1875 1896 * feature is active. We also check the vdev type to handle the
1876 1897 * following case:
1877 1898 * # zpool create test raidz disk1 disk2 disk3
1878 1899 * Now have spa_root_vdev->vdev_children == 1 (the raidz vdev),
1879 1900 * the raidz vdev itself has 3 children.
1880 1901 */
1881 1902 if (vd->vdev_children > 1 || vd->vdev_ops == &vdev_raidz_ops) {
1882 1903 if (!spa_feature_is_enabled(spa,
1883 1904 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP))
1884 1905 return (SET_ERROR(ENOTSUP));
1885 1906 (void) dsl_sync_task(spa_name(spa),
1886 1907 zfs_mvdev_dump_feature_check,
1887 1908 zfs_mvdev_dump_activate_feature_sync, NULL, 2);
1888 1909 }
1889 1910
1890 1911 tx = dmu_tx_create(os);
1891 1912 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
1892 1913 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
1893 1914 error = dmu_tx_assign(tx, TXG_WAIT);
1894 1915 if (error) {
1895 1916 dmu_tx_abort(tx);
1896 1917 return (error);
1897 1918 }
1898 1919
1899 1920 /*
1900 1921 * If MULTI_VDEV_CRASH_DUMP is active, use the NOPARITY checksum
1901 1922 * function. Otherwise, use the old default -- OFF.
1902 1923 */
1903 1924 checksum = spa_feature_is_active(spa,
1904 1925 SPA_FEATURE_MULTI_VDEV_CRASH_DUMP) ? ZIO_CHECKSUM_NOPARITY :
1905 1926 ZIO_CHECKSUM_OFF;
1906 1927
1907 1928 /*
1908 1929 * If we are resizing the dump device then we only need to
1909 1930 * update the refreservation to match the newly updated
1910 1931 * zvolsize. Otherwise, we save off the original state of the
1911 1932 * zvol so that we can restore them if the zvol is ever undumpified.
1912 1933 */
1913 1934 if (resize) {
1914 1935 error = zap_update(os, ZVOL_ZAP_OBJ,
1915 1936 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1916 1937 &zv->zv_volsize, tx);
1917 1938 } else {
1918 1939 uint64_t checksum, compress, refresrv, vbs, dedup;
1919 1940
1920 1941 error = dsl_prop_get_integer(zv->zv_name,
1921 1942 zfs_prop_to_name(ZFS_PROP_COMPRESSION), &compress, NULL);
1922 1943 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1923 1944 zfs_prop_to_name(ZFS_PROP_CHECKSUM), &checksum, NULL);
1924 1945 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1925 1946 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &refresrv, NULL);
1926 1947 error = error ? error : dsl_prop_get_integer(zv->zv_name,
1927 1948 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), &vbs, NULL);
1928 1949 if (version >= SPA_VERSION_DEDUP) {
1929 1950 error = error ? error :
1930 1951 dsl_prop_get_integer(zv->zv_name,
1931 1952 zfs_prop_to_name(ZFS_PROP_DEDUP), &dedup, NULL);
1932 1953 }
1933 1954
1934 1955 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1935 1956 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1,
1936 1957 &compress, tx);
1937 1958 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1938 1959 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum, tx);
1939 1960 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1940 1961 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1,
1941 1962 &refresrv, tx);
1942 1963 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1943 1964 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1,
1944 1965 &vbs, tx);
1945 1966 error = error ? error : dmu_object_set_blocksize(
1946 1967 os, ZVOL_OBJ, SPA_MAXBLOCKSIZE, 0, tx);
1947 1968 if (version >= SPA_VERSION_DEDUP) {
1948 1969 error = error ? error : zap_update(os, ZVOL_ZAP_OBJ,
1949 1970 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1,
1950 1971 &dedup, tx);
1951 1972 }
1952 1973 if (error == 0)
1953 1974 zv->zv_volblocksize = SPA_MAXBLOCKSIZE;
1954 1975 }
1955 1976 dmu_tx_commit(tx);
1956 1977
1957 1978 /*
1958 1979 * We only need update the zvol's property if we are initializing
1959 1980 * the dump area for the first time.
1960 1981 */
1961 1982 if (!resize) {
1962 1983 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
1963 1984 VERIFY(nvlist_add_uint64(nv,
1964 1985 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 0) == 0);
1965 1986 VERIFY(nvlist_add_uint64(nv,
1966 1987 zfs_prop_to_name(ZFS_PROP_COMPRESSION),
1967 1988 ZIO_COMPRESS_OFF) == 0);
1968 1989 VERIFY(nvlist_add_uint64(nv,
1969 1990 zfs_prop_to_name(ZFS_PROP_CHECKSUM),
1970 1991 checksum) == 0);
1971 1992 if (version >= SPA_VERSION_DEDUP) {
1972 1993 VERIFY(nvlist_add_uint64(nv,
1973 1994 zfs_prop_to_name(ZFS_PROP_DEDUP),
1974 1995 ZIO_CHECKSUM_OFF) == 0);
1975 1996 }
1976 1997
1977 1998 error = zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
1978 1999 nv, NULL);
1979 2000 nvlist_free(nv);
1980 2001
1981 2002 if (error)
1982 2003 return (error);
1983 2004 }
1984 2005
1985 2006 /* Allocate the space for the dump */
1986 2007 error = zvol_prealloc(zv);
1987 2008 return (error);
1988 2009 }
1989 2010
1990 2011 static int
1991 2012 zvol_dumpify(zvol_state_t *zv)
1992 2013 {
1993 2014 int error = 0;
1994 2015 uint64_t dumpsize = 0;
1995 2016 dmu_tx_t *tx;
1996 2017 objset_t *os = zv->zv_objset;
1997 2018
1998 2019 if (zv->zv_flags & ZVOL_RDONLY)
1999 2020 return (SET_ERROR(EROFS));
2000 2021
2001 2022 if (zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE,
2002 2023 8, 1, &dumpsize) != 0 || dumpsize != zv->zv_volsize) {
2003 2024 boolean_t resize = (dumpsize > 0);
2004 2025
2005 2026 if ((error = zvol_dump_init(zv, resize)) != 0) {
2006 2027 (void) zvol_dump_fini(zv);
2007 2028 return (error);
2008 2029 }
2009 2030 }
2010 2031
2011 2032 /*
2012 2033 * Build up our lba mapping.
2013 2034 */
2014 2035 error = zvol_get_lbas(zv);
2015 2036 if (error) {
2016 2037 (void) zvol_dump_fini(zv);
2017 2038 return (error);
2018 2039 }
2019 2040
2020 2041 tx = dmu_tx_create(os);
2021 2042 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2022 2043 error = dmu_tx_assign(tx, TXG_WAIT);
2023 2044 if (error) {
2024 2045 dmu_tx_abort(tx);
2025 2046 (void) zvol_dump_fini(zv);
2026 2047 return (error);
2027 2048 }
2028 2049
2029 2050 zv->zv_flags |= ZVOL_DUMPIFIED;
2030 2051 error = zap_update(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, 8, 1,
2031 2052 &zv->zv_volsize, tx);
2032 2053 dmu_tx_commit(tx);
2033 2054
2034 2055 if (error) {
2035 2056 (void) zvol_dump_fini(zv);
2036 2057 return (error);
2037 2058 }
2038 2059
2039 2060 txg_wait_synced(dmu_objset_pool(os), 0);
2040 2061 return (0);
2041 2062 }
2042 2063
2043 2064 static int
2044 2065 zvol_dump_fini(zvol_state_t *zv)
2045 2066 {
2046 2067 dmu_tx_t *tx;
2047 2068 objset_t *os = zv->zv_objset;
2048 2069 nvlist_t *nv;
2049 2070 int error = 0;
2050 2071 uint64_t checksum, compress, refresrv, vbs, dedup;
2051 2072 uint64_t version = spa_version(dmu_objset_spa(zv->zv_objset));
2052 2073
2053 2074 /*
2054 2075 * Attempt to restore the zvol back to its pre-dumpified state.
2055 2076 * This is a best-effort attempt as it's possible that not all
2056 2077 * of these properties were initialized during the dumpify process
2057 2078 * (i.e. error during zvol_dump_init).
2058 2079 */
2059 2080
2060 2081 tx = dmu_tx_create(os);
2061 2082 dmu_tx_hold_zap(tx, ZVOL_ZAP_OBJ, TRUE, NULL);
2062 2083 error = dmu_tx_assign(tx, TXG_WAIT);
2063 2084 if (error) {
2064 2085 dmu_tx_abort(tx);
2065 2086 return (error);
2066 2087 }
2067 2088 (void) zap_remove(os, ZVOL_ZAP_OBJ, ZVOL_DUMPSIZE, tx);
2068 2089 dmu_tx_commit(tx);
2069 2090
2070 2091 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2071 2092 zfs_prop_to_name(ZFS_PROP_CHECKSUM), 8, 1, &checksum);
2072 2093 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2073 2094 zfs_prop_to_name(ZFS_PROP_COMPRESSION), 8, 1, &compress);
2074 2095 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2075 2096 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), 8, 1, &refresrv);
2076 2097 (void) zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2077 2098 zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE), 8, 1, &vbs);
2078 2099
2079 2100 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
2080 2101 (void) nvlist_add_uint64(nv,
2081 2102 zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum);
2082 2103 (void) nvlist_add_uint64(nv,
2083 2104 zfs_prop_to_name(ZFS_PROP_COMPRESSION), compress);
2084 2105 (void) nvlist_add_uint64(nv,
2085 2106 zfs_prop_to_name(ZFS_PROP_REFRESERVATION), refresrv);
2086 2107 if (version >= SPA_VERSION_DEDUP &&
2087 2108 zap_lookup(zv->zv_objset, ZVOL_ZAP_OBJ,
2088 2109 zfs_prop_to_name(ZFS_PROP_DEDUP), 8, 1, &dedup) == 0) {
2089 2110 (void) nvlist_add_uint64(nv,
2090 2111 zfs_prop_to_name(ZFS_PROP_DEDUP), dedup);
2091 2112 }
2092 2113 (void) zfs_set_prop_nvlist(zv->zv_name, ZPROP_SRC_LOCAL,
2093 2114 nv, NULL);
2094 2115 nvlist_free(nv);
2095 2116
2096 2117 zvol_free_extents(zv);
2097 2118 zv->zv_flags &= ~ZVOL_DUMPIFIED;
2098 2119 (void) dmu_free_long_range(os, ZVOL_OBJ, 0, DMU_OBJECT_END);
2099 2120 /* wait for dmu_free_long_range to actually free the blocks */
2100 2121 txg_wait_synced(dmu_objset_pool(zv->zv_objset), 0);
2101 2122 tx = dmu_tx_create(os);
2102 2123 dmu_tx_hold_bonus(tx, ZVOL_OBJ);
2103 2124 error = dmu_tx_assign(tx, TXG_WAIT);
2104 2125 if (error) {
2105 2126 dmu_tx_abort(tx);
2106 2127 return (error);
2107 2128 }
2108 2129 if (dmu_object_set_blocksize(os, ZVOL_OBJ, vbs, 0, tx) == 0)
2109 2130 zv->zv_volblocksize = vbs;
2110 2131 dmu_tx_commit(tx);
2111 2132
2112 2133 return (0);
2113 2134 }
↓ open down ↓ |
388 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX