1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 */
26
27 #include <sys/dmu.h>
28 #include <sys/dmu_impl.h>
29 #include <sys/dbuf.h>
30 #include <sys/dmu_tx.h>
31 #include <sys/dmu_objset.h>
32 #include <sys/dsl_dataset.h> /* for dsl_dataset_block_freeable() */
33 #include <sys/dsl_dir.h> /* for dsl_dir_tempreserve_*() */
34 #include <sys/dsl_pool.h>
35 #include <sys/zap_impl.h> /* for fzap_default_block_shift */
36 #include <sys/spa.h>
37 #include <sys/sa.h>
38 #include <sys/sa_impl.h>
39 #include <sys/zfs_context.h>
40 #include <sys/varargs.h>
41
42 typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
43 uint64_t arg1, uint64_t arg2);
44
45
46 dmu_tx_t *
47 dmu_tx_create_dd(dsl_dir_t *dd)
48 {
49 dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
50 tx->tx_dir = dd;
51 if (dd != NULL)
52 tx->tx_pool = dd->dd_pool;
53 list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
54 offsetof(dmu_tx_hold_t, txh_node));
55 list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
56 offsetof(dmu_tx_callback_t, dcb_node));
57 #ifdef ZFS_DEBUG
58 refcount_create(&tx->tx_space_written);
59 refcount_create(&tx->tx_space_freed);
60 #endif
61 return (tx);
62 }
63
64 dmu_tx_t *
65 dmu_tx_create(objset_t *os)
66 {
67 dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
68 tx->tx_objset = os;
69 tx->tx_lastsnap_txg = dsl_dataset_prev_snap_txg(os->os_dsl_dataset);
70 return (tx);
71 }
72
73 dmu_tx_t *
74 dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
75 {
76 dmu_tx_t *tx = dmu_tx_create_dd(NULL);
77
78 ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
79 tx->tx_pool = dp;
80 tx->tx_txg = txg;
81 tx->tx_anyobj = TRUE;
82
83 return (tx);
84 }
85
86 int
87 dmu_tx_is_syncing(dmu_tx_t *tx)
88 {
89 return (tx->tx_anyobj);
90 }
91
92 int
93 dmu_tx_private_ok(dmu_tx_t *tx)
94 {
95 return (tx->tx_anyobj);
96 }
97
98 static dmu_tx_hold_t *
99 dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
100 enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
101 {
102 dmu_tx_hold_t *txh;
103 dnode_t *dn = NULL;
104 int err;
105
106 if (object != DMU_NEW_OBJECT) {
107 err = dnode_hold(os, object, tx, &dn);
108 if (err) {
109 tx->tx_err = err;
110 return (NULL);
111 }
112
113 if (err == 0 && tx->tx_txg != 0) {
114 mutex_enter(&dn->dn_mtx);
115 /*
116 * dn->dn_assigned_txg == tx->tx_txg doesn't pose a
117 * problem, but there's no way for it to happen (for
118 * now, at least).
119 */
120 ASSERT(dn->dn_assigned_txg == 0);
121 dn->dn_assigned_txg = tx->tx_txg;
122 (void) refcount_add(&dn->dn_tx_holds, tx);
123 mutex_exit(&dn->dn_mtx);
124 }
125 }
126
127 txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
128 txh->txh_tx = tx;
129 txh->txh_dnode = dn;
130 #ifdef ZFS_DEBUG
131 txh->txh_type = type;
132 txh->txh_arg1 = arg1;
133 txh->txh_arg2 = arg2;
134 #endif
135 list_insert_tail(&tx->tx_holds, txh);
136
137 return (txh);
138 }
139
140 void
141 dmu_tx_add_new_object(dmu_tx_t *tx, objset_t *os, uint64_t object)
142 {
143 /*
144 * If we're syncing, they can manipulate any object anyhow, and
145 * the hold on the dnode_t can cause problems.
146 */
147 if (!dmu_tx_is_syncing(tx)) {
148 (void) dmu_tx_hold_object_impl(tx, os,
149 object, THT_NEWOBJECT, 0, 0);
150 }
151 }
152
153 static int
154 dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
155 {
156 int err;
157 dmu_buf_impl_t *db;
158
159 rw_enter(&dn->dn_struct_rwlock, RW_READER);
160 db = dbuf_hold_level(dn, level, blkid, FTAG);
161 rw_exit(&dn->dn_struct_rwlock);
162 if (db == NULL)
163 return (EIO);
164 err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
165 dbuf_rele(db, FTAG);
166 return (err);
167 }
168
169 static void
170 dmu_tx_count_twig(dmu_tx_hold_t *txh, dnode_t *dn, dmu_buf_impl_t *db,
171 int level, uint64_t blkid, boolean_t freeable, uint64_t *history)
172 {
173 objset_t *os = dn->dn_objset;
174 dsl_dataset_t *ds = os->os_dsl_dataset;
175 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
176 dmu_buf_impl_t *parent = NULL;
177 blkptr_t *bp = NULL;
178 uint64_t space;
179
180 if (level >= dn->dn_nlevels || history[level] == blkid)
181 return;
182
183 history[level] = blkid;
184
185 space = (level == 0) ? dn->dn_datablksz : (1ULL << dn->dn_indblkshift);
186
187 if (db == NULL || db == dn->dn_dbuf) {
188 ASSERT(level != 0);
189 db = NULL;
190 } else {
191 ASSERT(DB_DNODE(db) == dn);
192 ASSERT(db->db_level == level);
193 ASSERT(db->db.db_size == space);
194 ASSERT(db->db_blkid == blkid);
195 bp = db->db_blkptr;
196 parent = db->db_parent;
197 }
198
199 freeable = (bp && (freeable ||
200 dsl_dataset_block_freeable(ds, bp, bp->blk_birth)));
201
202 if (freeable)
203 txh->txh_space_tooverwrite += space;
204 else
205 txh->txh_space_towrite += space;
206 if (bp)
207 txh->txh_space_tounref += bp_get_dsize(os->os_spa, bp);
208
209 dmu_tx_count_twig(txh, dn, parent, level + 1,
210 blkid >> epbs, freeable, history);
211 }
212
213 /* ARGSUSED */
214 static void
215 dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
216 {
217 dnode_t *dn = txh->txh_dnode;
218 uint64_t start, end, i;
219 int min_bs, max_bs, min_ibs, max_ibs, epbs, bits;
220 int err = 0;
221
222 if (len == 0)
223 return;
224
225 min_bs = SPA_MINBLOCKSHIFT;
226 max_bs = SPA_MAXBLOCKSHIFT;
227 min_ibs = DN_MIN_INDBLKSHIFT;
228 max_ibs = DN_MAX_INDBLKSHIFT;
229
230 if (dn) {
231 uint64_t history[DN_MAX_LEVELS];
232 int nlvls = dn->dn_nlevels;
233 int delta;
234
235 /*
236 * For i/o error checking, read the first and last level-0
237 * blocks (if they are not aligned), and all the level-1 blocks.
238 */
239 if (dn->dn_maxblkid == 0) {
240 delta = dn->dn_datablksz;
241 start = (off < dn->dn_datablksz) ? 0 : 1;
242 end = (off+len <= dn->dn_datablksz) ? 0 : 1;
243 if (start == 0 && (off > 0 || len < dn->dn_datablksz)) {
244 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
245 if (err)
246 goto out;
247 delta -= off;
248 }
249 } else {
250 zio_t *zio = zio_root(dn->dn_objset->os_spa,
251 NULL, NULL, ZIO_FLAG_CANFAIL);
252
253 /* first level-0 block */
254 start = off >> dn->dn_datablkshift;
255 if (P2PHASE(off, dn->dn_datablksz) ||
256 len < dn->dn_datablksz) {
257 err = dmu_tx_check_ioerr(zio, dn, 0, start);
258 if (err)
259 goto out;
260 }
261
262 /* last level-0 block */
263 end = (off+len-1) >> dn->dn_datablkshift;
264 if (end != start && end <= dn->dn_maxblkid &&
265 P2PHASE(off+len, dn->dn_datablksz)) {
266 err = dmu_tx_check_ioerr(zio, dn, 0, end);
267 if (err)
268 goto out;
269 }
270
271 /* level-1 blocks */
272 if (nlvls > 1) {
273 int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
274 for (i = (start>>shft)+1; i < end>>shft; i++) {
275 err = dmu_tx_check_ioerr(zio, dn, 1, i);
276 if (err)
277 goto out;
278 }
279 }
280
281 err = zio_wait(zio);
282 if (err)
283 goto out;
284 delta = P2NPHASE(off, dn->dn_datablksz);
285 }
286
287 if (dn->dn_maxblkid > 0) {
288 /*
289 * The blocksize can't change,
290 * so we can make a more precise estimate.
291 */
292 ASSERT(dn->dn_datablkshift != 0);
293 min_bs = max_bs = dn->dn_datablkshift;
294 min_ibs = max_ibs = dn->dn_indblkshift;
295 } else if (dn->dn_indblkshift > max_ibs) {
296 /*
297 * This ensures that if we reduce DN_MAX_INDBLKSHIFT,
298 * the code will still work correctly on older pools.
299 */
300 min_ibs = max_ibs = dn->dn_indblkshift;
301 }
302
303 /*
304 * If this write is not off the end of the file
305 * we need to account for overwrites/unref.
306 */
307 if (start <= dn->dn_maxblkid) {
308 for (int l = 0; l < DN_MAX_LEVELS; l++)
309 history[l] = -1ULL;
310 }
311 while (start <= dn->dn_maxblkid) {
312 dmu_buf_impl_t *db;
313
314 rw_enter(&dn->dn_struct_rwlock, RW_READER);
315 err = dbuf_hold_impl(dn, 0, start, FALSE, FTAG, &db);
316 rw_exit(&dn->dn_struct_rwlock);
317
318 if (err) {
319 txh->txh_tx->tx_err = err;
320 return;
321 }
322
323 dmu_tx_count_twig(txh, dn, db, 0, start, B_FALSE,
324 history);
325 dbuf_rele(db, FTAG);
326 if (++start > end) {
327 /*
328 * Account for new indirects appearing
329 * before this IO gets assigned into a txg.
330 */
331 bits = 64 - min_bs;
332 epbs = min_ibs - SPA_BLKPTRSHIFT;
333 for (bits -= epbs * (nlvls - 1);
334 bits >= 0; bits -= epbs)
335 txh->txh_fudge += 1ULL << max_ibs;
336 goto out;
337 }
338 off += delta;
339 if (len >= delta)
340 len -= delta;
341 delta = dn->dn_datablksz;
342 }
343 }
344
345 /*
346 * 'end' is the last thing we will access, not one past.
347 * This way we won't overflow when accessing the last byte.
348 */
349 start = P2ALIGN(off, 1ULL << max_bs);
350 end = P2ROUNDUP(off + len, 1ULL << max_bs) - 1;
351 txh->txh_space_towrite += end - start + 1;
352
353 start >>= min_bs;
354 end >>= min_bs;
355
356 epbs = min_ibs - SPA_BLKPTRSHIFT;
357
358 /*
359 * The object contains at most 2^(64 - min_bs) blocks,
360 * and each indirect level maps 2^epbs.
361 */
362 for (bits = 64 - min_bs; bits >= 0; bits -= epbs) {
363 start >>= epbs;
364 end >>= epbs;
365 ASSERT3U(end, >=, start);
366 txh->txh_space_towrite += (end - start + 1) << max_ibs;
367 if (start != 0) {
368 /*
369 * We also need a new blkid=0 indirect block
370 * to reference any existing file data.
371 */
372 txh->txh_space_towrite += 1ULL << max_ibs;
373 }
374 }
375
376 out:
377 if (txh->txh_space_towrite + txh->txh_space_tooverwrite >
378 2 * DMU_MAX_ACCESS)
379 err = EFBIG;
380
381 if (err)
382 txh->txh_tx->tx_err = err;
383 }
384
385 static void
386 dmu_tx_count_dnode(dmu_tx_hold_t *txh)
387 {
388 dnode_t *dn = txh->txh_dnode;
389 dnode_t *mdn = DMU_META_DNODE(txh->txh_tx->tx_objset);
390 uint64_t space = mdn->dn_datablksz +
391 ((mdn->dn_nlevels-1) << mdn->dn_indblkshift);
392
393 if (dn && dn->dn_dbuf->db_blkptr &&
394 dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
395 dn->dn_dbuf->db_blkptr, dn->dn_dbuf->db_blkptr->blk_birth)) {
396 txh->txh_space_tooverwrite += space;
397 txh->txh_space_tounref += space;
398 } else {
399 txh->txh_space_towrite += space;
400 if (dn && dn->dn_dbuf->db_blkptr)
401 txh->txh_space_tounref += space;
402 }
403 }
404
405 void
406 dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
407 {
408 dmu_tx_hold_t *txh;
409
410 ASSERT(tx->tx_txg == 0);
411 ASSERT(len < DMU_MAX_ACCESS);
412 ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
413
414 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
415 object, THT_WRITE, off, len);
416 if (txh == NULL)
417 return;
418
419 dmu_tx_count_write(txh, off, len);
420 dmu_tx_count_dnode(txh);
421 }
422
423 static void
424 dmu_tx_count_free(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
425 {
426 uint64_t blkid, nblks, lastblk;
427 uint64_t space = 0, unref = 0, skipped = 0;
428 dnode_t *dn = txh->txh_dnode;
429 dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
430 spa_t *spa = txh->txh_tx->tx_pool->dp_spa;
431 int epbs;
432
433 if (dn->dn_nlevels == 0)
434 return;
435
436 /*
437 * The struct_rwlock protects us against dn_nlevels
438 * changing, in case (against all odds) we manage to dirty &
439 * sync out the changes after we check for being dirty.
440 * Also, dbuf_hold_impl() wants us to have the struct_rwlock.
441 */
442 rw_enter(&dn->dn_struct_rwlock, RW_READER);
443 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
444 if (dn->dn_maxblkid == 0) {
445 if (off == 0 && len >= dn->dn_datablksz) {
446 blkid = 0;
447 nblks = 1;
448 } else {
449 rw_exit(&dn->dn_struct_rwlock);
450 return;
451 }
452 } else {
453 blkid = off >> dn->dn_datablkshift;
454 nblks = (len + dn->dn_datablksz - 1) >> dn->dn_datablkshift;
455
456 if (blkid >= dn->dn_maxblkid) {
457 rw_exit(&dn->dn_struct_rwlock);
458 return;
459 }
460 if (blkid + nblks > dn->dn_maxblkid)
461 nblks = dn->dn_maxblkid - blkid;
462
463 }
464 if (dn->dn_nlevels == 1) {
465 int i;
466 for (i = 0; i < nblks; i++) {
467 blkptr_t *bp = dn->dn_phys->dn_blkptr;
468 ASSERT3U(blkid + i, <, dn->dn_nblkptr);
469 bp += blkid + i;
470 if (dsl_dataset_block_freeable(ds, bp, bp->blk_birth)) {
471 dprintf_bp(bp, "can free old%s", "");
472 space += bp_get_dsize(spa, bp);
473 }
474 unref += BP_GET_ASIZE(bp);
475 }
476 nblks = 0;
477 }
478
479 /*
480 * Add in memory requirements of higher-level indirects.
481 * This assumes a worst-possible scenario for dn_nlevels.
482 */
483 {
484 uint64_t blkcnt = 1 + ((nblks >> epbs) >> epbs);
485 int level = (dn->dn_nlevels > 1) ? 2 : 1;
486
487 while (level++ < DN_MAX_LEVELS) {
488 txh->txh_memory_tohold += blkcnt << dn->dn_indblkshift;
489 blkcnt = 1 + (blkcnt >> epbs);
490 }
491 ASSERT(blkcnt <= dn->dn_nblkptr);
492 }
493
494 lastblk = blkid + nblks - 1;
495 while (nblks) {
496 dmu_buf_impl_t *dbuf;
497 uint64_t ibyte, new_blkid;
498 int epb = 1 << epbs;
499 int err, i, blkoff, tochk;
500 blkptr_t *bp;
501
502 ibyte = blkid << dn->dn_datablkshift;
503 err = dnode_next_offset(dn,
504 DNODE_FIND_HAVELOCK, &ibyte, 2, 1, 0);
505 new_blkid = ibyte >> dn->dn_datablkshift;
506 if (err == ESRCH) {
507 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
508 break;
509 }
510 if (err) {
511 txh->txh_tx->tx_err = err;
512 break;
513 }
514 if (new_blkid > lastblk) {
515 skipped += (lastblk >> epbs) - (blkid >> epbs) + 1;
516 break;
517 }
518
519 if (new_blkid > blkid) {
520 ASSERT((new_blkid >> epbs) > (blkid >> epbs));
521 skipped += (new_blkid >> epbs) - (blkid >> epbs) - 1;
522 nblks -= new_blkid - blkid;
523 blkid = new_blkid;
524 }
525 blkoff = P2PHASE(blkid, epb);
526 tochk = MIN(epb - blkoff, nblks);
527
528 err = dbuf_hold_impl(dn, 1, blkid >> epbs, FALSE, FTAG, &dbuf);
529 if (err) {
530 txh->txh_tx->tx_err = err;
531 break;
532 }
533
534 txh->txh_memory_tohold += dbuf->db.db_size;
535
536 /*
537 * We don't check memory_tohold against DMU_MAX_ACCESS because
538 * memory_tohold is an over-estimation (especially the >L1
539 * indirect blocks), so it could fail. Callers should have
540 * already verified that they will not be holding too much
541 * memory.
542 */
543
544 err = dbuf_read(dbuf, NULL, DB_RF_HAVESTRUCT | DB_RF_CANFAIL);
545 if (err != 0) {
546 txh->txh_tx->tx_err = err;
547 dbuf_rele(dbuf, FTAG);
548 break;
549 }
550
551 bp = dbuf->db.db_data;
552 bp += blkoff;
553
554 for (i = 0; i < tochk; i++) {
555 if (dsl_dataset_block_freeable(ds, &bp[i],
556 bp[i].blk_birth)) {
557 dprintf_bp(&bp[i], "can free old%s", "");
558 space += bp_get_dsize(spa, &bp[i]);
559 }
560 unref += BP_GET_ASIZE(bp);
561 }
562 dbuf_rele(dbuf, FTAG);
563
564 blkid += tochk;
565 nblks -= tochk;
566 }
567 rw_exit(&dn->dn_struct_rwlock);
568
569 /* account for new level 1 indirect blocks that might show up */
570 if (skipped > 0) {
571 txh->txh_fudge += skipped << dn->dn_indblkshift;
572 skipped = MIN(skipped, DMU_MAX_DELETEBLKCNT >> epbs);
573 txh->txh_memory_tohold += skipped << dn->dn_indblkshift;
574 }
575 txh->txh_space_tofree += space;
576 txh->txh_space_tounref += unref;
577 }
578
579 void
580 dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
581 {
582 dmu_tx_hold_t *txh;
583 dnode_t *dn;
584 uint64_t start, end, i;
585 int err, shift;
586 zio_t *zio;
587
588 ASSERT(tx->tx_txg == 0);
589
590 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
591 object, THT_FREE, off, len);
592 if (txh == NULL)
593 return;
594 dn = txh->txh_dnode;
595
596 /* first block */
597 if (off != 0)
598 dmu_tx_count_write(txh, off, 1);
599 /* last block */
600 if (len != DMU_OBJECT_END)
601 dmu_tx_count_write(txh, off+len, 1);
602
603 dmu_tx_count_dnode(txh);
604
605 if (off >= (dn->dn_maxblkid+1) * dn->dn_datablksz)
606 return;
607 if (len == DMU_OBJECT_END)
608 len = (dn->dn_maxblkid+1) * dn->dn_datablksz - off;
609
610 /*
611 * For i/o error checking, read the first and last level-0
612 * blocks, and all the level-1 blocks. The above count_write's
613 * have already taken care of the level-0 blocks.
614 */
615 if (dn->dn_nlevels > 1) {
616 shift = dn->dn_datablkshift + dn->dn_indblkshift -
617 SPA_BLKPTRSHIFT;
618 start = off >> shift;
619 end = dn->dn_datablkshift ? ((off+len) >> shift) : 0;
620
621 zio = zio_root(tx->tx_pool->dp_spa,
622 NULL, NULL, ZIO_FLAG_CANFAIL);
623 for (i = start; i <= end; i++) {
624 uint64_t ibyte = i << shift;
625 err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
626 i = ibyte >> shift;
627 if (err == ESRCH)
628 break;
629 if (err) {
630 tx->tx_err = err;
631 return;
632 }
633
634 err = dmu_tx_check_ioerr(zio, dn, 1, i);
635 if (err) {
636 tx->tx_err = err;
637 return;
638 }
639 }
640 err = zio_wait(zio);
641 if (err) {
642 tx->tx_err = err;
643 return;
644 }
645 }
646
647 dmu_tx_count_free(txh, off, len);
648 }
649
650 void
651 dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
652 {
653 dmu_tx_hold_t *txh;
654 dnode_t *dn;
655 uint64_t nblocks;
656 int epbs, err;
657
658 ASSERT(tx->tx_txg == 0);
659
660 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
661 object, THT_ZAP, add, (uintptr_t)name);
662 if (txh == NULL)
663 return;
664 dn = txh->txh_dnode;
665
666 dmu_tx_count_dnode(txh);
667
668 if (dn == NULL) {
669 /*
670 * We will be able to fit a new object's entries into one leaf
671 * block. So there will be at most 2 blocks total,
672 * including the header block.
673 */
674 dmu_tx_count_write(txh, 0, 2 << fzap_default_block_shift);
675 return;
676 }
677
678 ASSERT3P(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
679
680 if (dn->dn_maxblkid == 0 && !add) {
681 blkptr_t *bp;
682
683 /*
684 * If there is only one block (i.e. this is a micro-zap)
685 * and we are not adding anything, the accounting is simple.
686 */
687 err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
688 if (err) {
689 tx->tx_err = err;
690 return;
691 }
692
693 /*
694 * Use max block size here, since we don't know how much
695 * the size will change between now and the dbuf dirty call.
696 */
697 bp = &dn->dn_phys->dn_blkptr[0];
698 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
699 bp, bp->blk_birth))
700 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
701 else
702 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
703 if (!BP_IS_HOLE(bp))
704 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
705 return;
706 }
707
708 if (dn->dn_maxblkid > 0 && name) {
709 /*
710 * access the name in this fat-zap so that we'll check
711 * for i/o errors to the leaf blocks, etc.
712 */
713 err = zap_lookup(dn->dn_objset, dn->dn_object, name,
714 8, 0, NULL);
715 if (err == EIO) {
716 tx->tx_err = err;
717 return;
718 }
719 }
720
721 err = zap_count_write(dn->dn_objset, dn->dn_object, name, add,
722 &txh->txh_space_towrite, &txh->txh_space_tooverwrite);
723
724 /*
725 * If the modified blocks are scattered to the four winds,
726 * we'll have to modify an indirect twig for each.
727 */
728 epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
729 for (nblocks = dn->dn_maxblkid >> epbs; nblocks != 0; nblocks >>= epbs)
730 if (dn->dn_objset->os_dsl_dataset->ds_phys->ds_prev_snap_obj)
731 txh->txh_space_towrite += 3 << dn->dn_indblkshift;
732 else
733 txh->txh_space_tooverwrite += 3 << dn->dn_indblkshift;
734 }
735
736 void
737 dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
738 {
739 dmu_tx_hold_t *txh;
740
741 ASSERT(tx->tx_txg == 0);
742
743 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
744 object, THT_BONUS, 0, 0);
745 if (txh)
746 dmu_tx_count_dnode(txh);
747 }
748
749 void
750 dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
751 {
752 dmu_tx_hold_t *txh;
753 ASSERT(tx->tx_txg == 0);
754
755 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
756 DMU_NEW_OBJECT, THT_SPACE, space, 0);
757
758 txh->txh_space_towrite += space;
759 }
760
761 int
762 dmu_tx_holds(dmu_tx_t *tx, uint64_t object)
763 {
764 dmu_tx_hold_t *txh;
765 int holds = 0;
766
767 /*
768 * By asserting that the tx is assigned, we're counting the
769 * number of dn_tx_holds, which is the same as the number of
770 * dn_holds. Otherwise, we'd be counting dn_holds, but
771 * dn_tx_holds could be 0.
772 */
773 ASSERT(tx->tx_txg != 0);
774
775 /* if (tx->tx_anyobj == TRUE) */
776 /* return (0); */
777
778 for (txh = list_head(&tx->tx_holds); txh;
779 txh = list_next(&tx->tx_holds, txh)) {
780 if (txh->txh_dnode && txh->txh_dnode->dn_object == object)
781 holds++;
782 }
783
784 return (holds);
785 }
786
787 #ifdef ZFS_DEBUG
788 void
789 dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
790 {
791 dmu_tx_hold_t *txh;
792 int match_object = FALSE, match_offset = FALSE;
793 dnode_t *dn;
794
795 DB_DNODE_ENTER(db);
796 dn = DB_DNODE(db);
797 ASSERT(tx->tx_txg != 0);
798 ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
799 ASSERT3U(dn->dn_object, ==, db->db.db_object);
800
801 if (tx->tx_anyobj) {
802 DB_DNODE_EXIT(db);
803 return;
804 }
805
806 /* XXX No checking on the meta dnode for now */
807 if (db->db.db_object == DMU_META_DNODE_OBJECT) {
808 DB_DNODE_EXIT(db);
809 return;
810 }
811
812 for (txh = list_head(&tx->tx_holds); txh;
813 txh = list_next(&tx->tx_holds, txh)) {
814 ASSERT(dn == NULL || dn->dn_assigned_txg == tx->tx_txg);
815 if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
816 match_object = TRUE;
817 if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
818 int datablkshift = dn->dn_datablkshift ?
819 dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
820 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
821 int shift = datablkshift + epbs * db->db_level;
822 uint64_t beginblk = shift >= 64 ? 0 :
823 (txh->txh_arg1 >> shift);
824 uint64_t endblk = shift >= 64 ? 0 :
825 ((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
826 uint64_t blkid = db->db_blkid;
827
828 /* XXX txh_arg2 better not be zero... */
829
830 dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
831 txh->txh_type, beginblk, endblk);
832
833 switch (txh->txh_type) {
834 case THT_WRITE:
835 if (blkid >= beginblk && blkid <= endblk)
836 match_offset = TRUE;
837 /*
838 * We will let this hold work for the bonus
839 * or spill buffer so that we don't need to
840 * hold it when creating a new object.
841 */
842 if (blkid == DMU_BONUS_BLKID ||
843 blkid == DMU_SPILL_BLKID)
844 match_offset = TRUE;
845 /*
846 * They might have to increase nlevels,
847 * thus dirtying the new TLIBs. Or the
848 * might have to change the block size,
849 * thus dirying the new lvl=0 blk=0.
850 */
851 if (blkid == 0)
852 match_offset = TRUE;
853 break;
854 case THT_FREE:
855 /*
856 * We will dirty all the level 1 blocks in
857 * the free range and perhaps the first and
858 * last level 0 block.
859 */
860 if (blkid >= beginblk && (blkid <= endblk ||
861 txh->txh_arg2 == DMU_OBJECT_END))
862 match_offset = TRUE;
863 break;
864 case THT_SPILL:
865 if (blkid == DMU_SPILL_BLKID)
866 match_offset = TRUE;
867 break;
868 case THT_BONUS:
869 if (blkid == DMU_BONUS_BLKID)
870 match_offset = TRUE;
871 break;
872 case THT_ZAP:
873 match_offset = TRUE;
874 break;
875 case THT_NEWOBJECT:
876 match_object = TRUE;
877 break;
878 default:
879 ASSERT(!"bad txh_type");
880 }
881 }
882 if (match_object && match_offset) {
883 DB_DNODE_EXIT(db);
884 return;
885 }
886 }
887 DB_DNODE_EXIT(db);
888 panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
889 (u_longlong_t)db->db.db_object, db->db_level,
890 (u_longlong_t)db->db_blkid);
891 }
892 #endif
893
894 static int
895 dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
896 {
897 dmu_tx_hold_t *txh;
898 spa_t *spa = tx->tx_pool->dp_spa;
899 uint64_t memory, asize, fsize, usize;
900 uint64_t towrite, tofree, tooverwrite, tounref, tohold, fudge;
901
902 ASSERT0(tx->tx_txg);
903
904 if (tx->tx_err)
905 return (tx->tx_err);
906
907 if (spa_suspended(spa)) {
908 /*
909 * If the user has indicated a blocking failure mode
910 * then return ERESTART which will block in dmu_tx_wait().
911 * Otherwise, return EIO so that an error can get
912 * propagated back to the VOP calls.
913 *
914 * Note that we always honor the txg_how flag regardless
915 * of the failuremode setting.
916 */
917 if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
918 txg_how != TXG_WAIT)
919 return (EIO);
920
921 return (ERESTART);
922 }
923
924 tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
925 tx->tx_needassign_txh = NULL;
926
927 /*
928 * NB: No error returns are allowed after txg_hold_open, but
929 * before processing the dnode holds, due to the
930 * dmu_tx_unassign() logic.
931 */
932
933 towrite = tofree = tooverwrite = tounref = tohold = fudge = 0;
934 for (txh = list_head(&tx->tx_holds); txh;
935 txh = list_next(&tx->tx_holds, txh)) {
936 dnode_t *dn = txh->txh_dnode;
937 if (dn != NULL) {
938 mutex_enter(&dn->dn_mtx);
939 if (dn->dn_assigned_txg == tx->tx_txg - 1) {
940 mutex_exit(&dn->dn_mtx);
941 tx->tx_needassign_txh = txh;
942 return (ERESTART);
943 }
944 if (dn->dn_assigned_txg == 0)
945 dn->dn_assigned_txg = tx->tx_txg;
946 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
947 (void) refcount_add(&dn->dn_tx_holds, tx);
948 mutex_exit(&dn->dn_mtx);
949 }
950 towrite += txh->txh_space_towrite;
951 tofree += txh->txh_space_tofree;
952 tooverwrite += txh->txh_space_tooverwrite;
953 tounref += txh->txh_space_tounref;
954 tohold += txh->txh_memory_tohold;
955 fudge += txh->txh_fudge;
956 }
957
958 /*
959 * NB: This check must be after we've held the dnodes, so that
960 * the dmu_tx_unassign() logic will work properly
961 */
962 if (txg_how >= TXG_INITIAL && txg_how != tx->tx_txg)
963 return (ERESTART);
964
965 /*
966 * If a snapshot has been taken since we made our estimates,
967 * assume that we won't be able to free or overwrite anything.
968 */
969 if (tx->tx_objset &&
970 dsl_dataset_prev_snap_txg(tx->tx_objset->os_dsl_dataset) >
971 tx->tx_lastsnap_txg) {
972 towrite += tooverwrite;
973 tooverwrite = tofree = 0;
974 }
975
976 /* needed allocation: worst-case estimate of write space */
977 asize = spa_get_asize(tx->tx_pool->dp_spa, towrite + tooverwrite);
978 /* freed space estimate: worst-case overwrite + free estimate */
979 fsize = spa_get_asize(tx->tx_pool->dp_spa, tooverwrite) + tofree;
980 /* convert unrefd space to worst-case estimate */
981 usize = spa_get_asize(tx->tx_pool->dp_spa, tounref);
982 /* calculate memory footprint estimate */
983 memory = towrite + tooverwrite + tohold;
984
985 #ifdef ZFS_DEBUG
986 /*
987 * Add in 'tohold' to account for our dirty holds on this memory
988 * XXX - the "fudge" factor is to account for skipped blocks that
989 * we missed because dnode_next_offset() misses in-core-only blocks.
990 */
991 tx->tx_space_towrite = asize +
992 spa_get_asize(tx->tx_pool->dp_spa, tohold + fudge);
993 tx->tx_space_tofree = tofree;
994 tx->tx_space_tooverwrite = tooverwrite;
995 tx->tx_space_tounref = tounref;
996 #endif
997
998 if (tx->tx_dir && asize != 0) {
999 int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
1000 asize, fsize, usize, &tx->tx_tempreserve_cookie, tx);
1001 if (err)
1002 return (err);
1003 }
1004
1005 return (0);
1006 }
1007
1008 static void
1009 dmu_tx_unassign(dmu_tx_t *tx)
1010 {
1011 dmu_tx_hold_t *txh;
1012
1013 if (tx->tx_txg == 0)
1014 return;
1015
1016 txg_rele_to_quiesce(&tx->tx_txgh);
1017
1018 for (txh = list_head(&tx->tx_holds); txh != tx->tx_needassign_txh;
1019 txh = list_next(&tx->tx_holds, txh)) {
1020 dnode_t *dn = txh->txh_dnode;
1021
1022 if (dn == NULL)
1023 continue;
1024 mutex_enter(&dn->dn_mtx);
1025 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1026
1027 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1028 dn->dn_assigned_txg = 0;
1029 cv_broadcast(&dn->dn_notxholds);
1030 }
1031 mutex_exit(&dn->dn_mtx);
1032 }
1033
1034 txg_rele_to_sync(&tx->tx_txgh);
1035
1036 tx->tx_lasttried_txg = tx->tx_txg;
1037 tx->tx_txg = 0;
1038 }
1039
1040 /*
1041 * Assign tx to a transaction group. txg_how can be one of:
1042 *
1043 * (1) TXG_WAIT. If the current open txg is full, waits until there's
1044 * a new one. This should be used when you're not holding locks.
1045 * If will only fail if we're truly out of space (or over quota).
1046 *
1047 * (2) TXG_NOWAIT. If we can't assign into the current open txg without
1048 * blocking, returns immediately with ERESTART. This should be used
1049 * whenever you're holding locks. On an ERESTART error, the caller
1050 * should drop locks, do a dmu_tx_wait(tx), and try again.
1051 *
1052 * (3) A specific txg. Use this if you need to ensure that multiple
1053 * transactions all sync in the same txg. Like TXG_NOWAIT, it
1054 * returns ERESTART if it can't assign you into the requested txg.
1055 */
1056 int
1057 dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
1058 {
1059 int err;
1060
1061 ASSERT(tx->tx_txg == 0);
1062 ASSERT(txg_how != 0);
1063 ASSERT(!dsl_pool_sync_context(tx->tx_pool));
1064
1065 while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
1066 dmu_tx_unassign(tx);
1067
1068 if (err != ERESTART || txg_how != TXG_WAIT)
1069 return (err);
1070
1071 dmu_tx_wait(tx);
1072 }
1073
1074 txg_rele_to_quiesce(&tx->tx_txgh);
1075
1076 return (0);
1077 }
1078
1079 void
1080 dmu_tx_wait(dmu_tx_t *tx)
1081 {
1082 spa_t *spa = tx->tx_pool->dp_spa;
1083
1084 ASSERT(tx->tx_txg == 0);
1085
1086 /*
1087 * It's possible that the pool has become active after this thread
1088 * has tried to obtain a tx. If that's the case then his
1089 * tx_lasttried_txg would not have been assigned.
1090 */
1091 if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
1092 txg_wait_synced(tx->tx_pool, spa_last_synced_txg(spa) + 1);
1093 } else if (tx->tx_needassign_txh) {
1094 dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
1095
1096 mutex_enter(&dn->dn_mtx);
1097 while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
1098 cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
1099 mutex_exit(&dn->dn_mtx);
1100 tx->tx_needassign_txh = NULL;
1101 } else {
1102 txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
1103 }
1104 }
1105
1106 void
1107 dmu_tx_willuse_space(dmu_tx_t *tx, int64_t delta)
1108 {
1109 #ifdef ZFS_DEBUG
1110 if (tx->tx_dir == NULL || delta == 0)
1111 return;
1112
1113 if (delta > 0) {
1114 ASSERT3U(refcount_count(&tx->tx_space_written) + delta, <=,
1115 tx->tx_space_towrite);
1116 (void) refcount_add_many(&tx->tx_space_written, delta, NULL);
1117 } else {
1118 (void) refcount_add_many(&tx->tx_space_freed, -delta, NULL);
1119 }
1120 #endif
1121 }
1122
1123 void
1124 dmu_tx_commit(dmu_tx_t *tx)
1125 {
1126 dmu_tx_hold_t *txh;
1127
1128 ASSERT(tx->tx_txg != 0);
1129
1130 while (txh = list_head(&tx->tx_holds)) {
1131 dnode_t *dn = txh->txh_dnode;
1132
1133 list_remove(&tx->tx_holds, txh);
1134 kmem_free(txh, sizeof (dmu_tx_hold_t));
1135 if (dn == NULL)
1136 continue;
1137 mutex_enter(&dn->dn_mtx);
1138 ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
1139
1140 if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
1141 dn->dn_assigned_txg = 0;
1142 cv_broadcast(&dn->dn_notxholds);
1143 }
1144 mutex_exit(&dn->dn_mtx);
1145 dnode_rele(dn, tx);
1146 }
1147
1148 if (tx->tx_tempreserve_cookie)
1149 dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
1150
1151 if (!list_is_empty(&tx->tx_callbacks))
1152 txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
1153
1154 if (tx->tx_anyobj == FALSE)
1155 txg_rele_to_sync(&tx->tx_txgh);
1156
1157 list_destroy(&tx->tx_callbacks);
1158 list_destroy(&tx->tx_holds);
1159 #ifdef ZFS_DEBUG
1160 dprintf("towrite=%llu written=%llu tofree=%llu freed=%llu\n",
1161 tx->tx_space_towrite, refcount_count(&tx->tx_space_written),
1162 tx->tx_space_tofree, refcount_count(&tx->tx_space_freed));
1163 refcount_destroy_many(&tx->tx_space_written,
1164 refcount_count(&tx->tx_space_written));
1165 refcount_destroy_many(&tx->tx_space_freed,
1166 refcount_count(&tx->tx_space_freed));
1167 #endif
1168 kmem_free(tx, sizeof (dmu_tx_t));
1169 }
1170
1171 void
1172 dmu_tx_abort(dmu_tx_t *tx)
1173 {
1174 dmu_tx_hold_t *txh;
1175
1176 ASSERT(tx->tx_txg == 0);
1177
1178 while (txh = list_head(&tx->tx_holds)) {
1179 dnode_t *dn = txh->txh_dnode;
1180
1181 list_remove(&tx->tx_holds, txh);
1182 kmem_free(txh, sizeof (dmu_tx_hold_t));
1183 if (dn != NULL)
1184 dnode_rele(dn, tx);
1185 }
1186
1187 /*
1188 * Call any registered callbacks with an error code.
1189 */
1190 if (!list_is_empty(&tx->tx_callbacks))
1191 dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
1192
1193 list_destroy(&tx->tx_callbacks);
1194 list_destroy(&tx->tx_holds);
1195 #ifdef ZFS_DEBUG
1196 refcount_destroy_many(&tx->tx_space_written,
1197 refcount_count(&tx->tx_space_written));
1198 refcount_destroy_many(&tx->tx_space_freed,
1199 refcount_count(&tx->tx_space_freed));
1200 #endif
1201 kmem_free(tx, sizeof (dmu_tx_t));
1202 }
1203
1204 uint64_t
1205 dmu_tx_get_txg(dmu_tx_t *tx)
1206 {
1207 ASSERT(tx->tx_txg != 0);
1208 return (tx->tx_txg);
1209 }
1210
1211 void
1212 dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
1213 {
1214 dmu_tx_callback_t *dcb;
1215
1216 dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
1217
1218 dcb->dcb_func = func;
1219 dcb->dcb_data = data;
1220
1221 list_insert_tail(&tx->tx_callbacks, dcb);
1222 }
1223
1224 /*
1225 * Call all the commit callbacks on a list, with a given error code.
1226 */
1227 void
1228 dmu_tx_do_callbacks(list_t *cb_list, int error)
1229 {
1230 dmu_tx_callback_t *dcb;
1231
1232 while (dcb = list_head(cb_list)) {
1233 list_remove(cb_list, dcb);
1234 dcb->dcb_func(dcb->dcb_data, error);
1235 kmem_free(dcb, sizeof (dmu_tx_callback_t));
1236 }
1237 }
1238
1239 /*
1240 * Interface to hold a bunch of attributes.
1241 * used for creating new files.
1242 * attrsize is the total size of all attributes
1243 * to be added during object creation
1244 *
1245 * For updating/adding a single attribute dmu_tx_hold_sa() should be used.
1246 */
1247
1248 /*
1249 * hold necessary attribute name for attribute registration.
1250 * should be a very rare case where this is needed. If it does
1251 * happen it would only happen on the first write to the file system.
1252 */
1253 static void
1254 dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
1255 {
1256 int i;
1257
1258 if (!sa->sa_need_attr_registration)
1259 return;
1260
1261 for (i = 0; i != sa->sa_num_attrs; i++) {
1262 if (!sa->sa_attr_table[i].sa_registered) {
1263 if (sa->sa_reg_attr_obj)
1264 dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
1265 B_TRUE, sa->sa_attr_table[i].sa_name);
1266 else
1267 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
1268 B_TRUE, sa->sa_attr_table[i].sa_name);
1269 }
1270 }
1271 }
1272
1273
1274 void
1275 dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
1276 {
1277 dnode_t *dn;
1278 dmu_tx_hold_t *txh;
1279
1280 txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
1281 THT_SPILL, 0, 0);
1282
1283 dn = txh->txh_dnode;
1284
1285 if (dn == NULL)
1286 return;
1287
1288 /* If blkptr doesn't exist then add space to towrite */
1289 if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
1290 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1291 } else {
1292 blkptr_t *bp;
1293
1294 bp = &dn->dn_phys->dn_spill;
1295 if (dsl_dataset_block_freeable(dn->dn_objset->os_dsl_dataset,
1296 bp, bp->blk_birth))
1297 txh->txh_space_tooverwrite += SPA_MAXBLOCKSIZE;
1298 else
1299 txh->txh_space_towrite += SPA_MAXBLOCKSIZE;
1300 if (!BP_IS_HOLE(bp))
1301 txh->txh_space_tounref += SPA_MAXBLOCKSIZE;
1302 }
1303 }
1304
1305 void
1306 dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
1307 {
1308 sa_os_t *sa = tx->tx_objset->os_sa;
1309
1310 dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
1311
1312 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1313 return;
1314
1315 if (tx->tx_objset->os_sa->sa_layout_attr_obj)
1316 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1317 else {
1318 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1319 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1320 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1321 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1322 }
1323
1324 dmu_tx_sa_registration_hold(sa, tx);
1325
1326 if (attrsize <= DN_MAX_BONUSLEN && !sa->sa_force_spill)
1327 return;
1328
1329 (void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
1330 THT_SPILL, 0, 0);
1331 }
1332
1333 /*
1334 * Hold SA attribute
1335 *
1336 * dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
1337 *
1338 * variable_size is the total size of all variable sized attributes
1339 * passed to this function. It is not the total size of all
1340 * variable size attributes that *may* exist on this object.
1341 */
1342 void
1343 dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
1344 {
1345 uint64_t object;
1346 sa_os_t *sa = tx->tx_objset->os_sa;
1347
1348 ASSERT(hdl != NULL);
1349
1350 object = sa_handle_object(hdl);
1351
1352 dmu_tx_hold_bonus(tx, object);
1353
1354 if (tx->tx_objset->os_sa->sa_master_obj == 0)
1355 return;
1356
1357 if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
1358 tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
1359 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
1360 dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
1361 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1362 dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
1363 }
1364
1365 dmu_tx_sa_registration_hold(sa, tx);
1366
1367 if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
1368 dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
1369
1370 if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
1371 ASSERT(tx->tx_txg == 0);
1372 dmu_tx_hold_spill(tx, object);
1373 } else {
1374 dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
1375 dnode_t *dn;
1376
1377 DB_DNODE_ENTER(db);
1378 dn = DB_DNODE(db);
1379 if (dn->dn_have_spill) {
1380 ASSERT(tx->tx_txg == 0);
1381 dmu_tx_hold_spill(tx, object);
1382 }
1383 DB_DNODE_EXIT(db);
1384 }
1385 }