33 * The buckets (aka "leaf nodes") are implemented in zap_leaf.c.
34 *
35 * The pointer table holds a power of 2 number of pointers.
36 * (1<<zap_t->zd_data->zd_phys->zd_prefix_len). The bucket pointed to
37 * by the pointer at index i in the table holds entries whose hash value
38 * has a zd_prefix_len - bit prefix
39 */
40
41 #include <sys/spa.h>
42 #include <sys/dmu.h>
43 #include <sys/zfs_context.h>
44 #include <sys/zfs_znode.h>
45 #include <sys/fs/zfs.h>
46 #include <sys/zap.h>
47 #include <sys/refcount.h>
48 #include <sys/zap_impl.h>
49 #include <sys/zap_leaf.h>
50
51 int fzap_default_block_shift = 14; /* 16k blocksize */
52
53 static void zap_leaf_pageout(dmu_buf_t *db, void *vl);
54 static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks);
55
56
57 void
58 fzap_byteswap(void *vbuf, size_t size)
59 {
60 uint64_t block_type;
61
62 block_type = *(uint64_t *)vbuf;
63
64 if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
65 zap_leaf_byteswap(vbuf, size);
66 else {
67 /* it's a ptrtbl block */
68 byteswap_uint64_array(vbuf, size);
69 }
70 }
71
72 void
73 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
74 {
75 dmu_buf_t *db;
76 zap_leaf_t *l;
77 int i;
78 zap_phys_t *zp;
79
80 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
81 zap->zap_ismicro = FALSE;
82
83 (void) dmu_buf_update_user(zap->zap_dbuf, zap, zap,
84 &zap->zap_f.zap_phys, zap_evict);
85
86 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
87 zap->zap_f.zap_block_shift = highbit(zap->zap_dbuf->db_size) - 1;
88
89 zp = zap->zap_f.zap_phys;
90 /*
91 * explicitly zero it since it might be coming from an
92 * initialized microzap
93 */
94 bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
95 zp->zap_block_type = ZBT_HEADER;
96 zp->zap_magic = ZAP_MAGIC;
97
98 zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap);
99
100 zp->zap_freeblk = 2; /* block 1 will be the first leaf */
101 zp->zap_num_leafs = 1;
102 zp->zap_num_entries = 0;
103 zp->zap_salt = zap->zap_salt;
104 zp->zap_normflags = zap->zap_normflags;
105 zp->zap_flags = flags;
106
107 /* block 1 will be the first leaf */
108 for (i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++)
109 ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1;
110
111 /*
112 * set up block 1 - the first leaf
113 */
114 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
115 1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH));
116 dmu_buf_will_dirty(db, tx);
117
118 l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
119 l->l_dbuf = db;
120 l->l_phys = db->db_data;
121
122 zap_leaf_init(l, zp->zap_normflags != 0);
123
124 kmem_free(l, sizeof (zap_leaf_t));
125 dmu_buf_rele(db, FTAG);
126 }
127
128 static int
129 zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx)
130 {
131 if (RW_WRITE_HELD(&zap->zap_rwlock))
132 return (1);
133 if (rw_tryupgrade(&zap->zap_rwlock)) {
134 dmu_buf_will_dirty(zap->zap_dbuf, tx);
135 return (1);
136 }
137 return (0);
138 }
139
140 /*
307 static void
308 zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n)
309 {
310 int i;
311 for (i = 0; i < n; i++) {
312 uint64_t lb = src[i];
313 dst[2*i+0] = lb;
314 dst[2*i+1] = lb;
315 }
316 }
317
318 static int
319 zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
320 {
321 /*
322 * The pointer table should never use more hash bits than we
323 * have (otherwise we'd be using useless zero bits to index it).
324 * If we are within 2 bits of running out, stop growing, since
325 * this is already an aberrant condition.
326 */
327 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2)
328 return (SET_ERROR(ENOSPC));
329
330 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
331 /*
332 * We are outgrowing the "embedded" ptrtbl (the one
333 * stored in the header block). Give it its own entire
334 * block, which will double the size of the ptrtbl.
335 */
336 uint64_t newblk;
337 dmu_buf_t *db_new;
338 int err;
339
340 ASSERT3U(zap->zap_f.zap_phys->zap_ptrtbl.zt_shift, ==,
341 ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
342 ASSERT0(zap->zap_f.zap_phys->zap_ptrtbl.zt_blk);
343
344 newblk = zap_allocate_blocks(zap, 1);
345 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
346 newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new,
347 DMU_READ_NO_PREFETCH);
348 if (err)
349 return (err);
350 dmu_buf_will_dirty(db_new, tx);
351 zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
352 db_new->db_data, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
353 dmu_buf_rele(db_new, FTAG);
354
355 zap->zap_f.zap_phys->zap_ptrtbl.zt_blk = newblk;
356 zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks = 1;
357 zap->zap_f.zap_phys->zap_ptrtbl.zt_shift++;
358
359 ASSERT3U(1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift, ==,
360 zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks <<
361 (FZAP_BLOCK_SHIFT(zap)-3));
362
363 return (0);
364 } else {
365 return (zap_table_grow(zap, &zap->zap_f.zap_phys->zap_ptrtbl,
366 zap_ptrtbl_transfer, tx));
367 }
368 }
369
370 static void
371 zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx)
372 {
373 dmu_buf_will_dirty(zap->zap_dbuf, tx);
374 mutex_enter(&zap->zap_f.zap_num_entries_mtx);
375 ASSERT(delta > 0 || zap->zap_f.zap_phys->zap_num_entries >= -delta);
376 zap->zap_f.zap_phys->zap_num_entries += delta;
377 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
378 }
379
380 static uint64_t
381 zap_allocate_blocks(zap_t *zap, int nblocks)
382 {
383 uint64_t newblk;
384 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
385 newblk = zap->zap_f.zap_phys->zap_freeblk;
386 zap->zap_f.zap_phys->zap_freeblk += nblocks;
387 return (newblk);
388 }
389
390 static zap_leaf_t *
391 zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
392 {
393 void *winner;
394 zap_leaf_t *l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
395
396 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
397
398 rw_init(&l->l_rwlock, 0, 0, 0);
399 rw_enter(&l->l_rwlock, RW_WRITER);
400 l->l_blkid = zap_allocate_blocks(zap, 1);
401 l->l_dbuf = NULL;
402 l->l_phys = NULL;
403
404 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
405 l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
406 DMU_READ_NO_PREFETCH));
407 winner = dmu_buf_set_user(l->l_dbuf, l, &l->l_phys, zap_leaf_pageout);
408 ASSERT(winner == NULL);
409 dmu_buf_will_dirty(l->l_dbuf, tx);
410
411 zap_leaf_init(l, zap->zap_normflags != 0);
412
413 zap->zap_f.zap_phys->zap_num_leafs++;
414
415 return (l);
416 }
417
418 int
419 fzap_count(zap_t *zap, uint64_t *count)
420 {
421 ASSERT(!zap->zap_ismicro);
422 mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */
423 *count = zap->zap_f.zap_phys->zap_num_entries;
424 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
425 return (0);
426 }
427
428 /*
429 * Routines for obtaining zap_leaf_t's
430 */
431
432 void
433 zap_put_leaf(zap_leaf_t *l)
434 {
435 rw_exit(&l->l_rwlock);
436 dmu_buf_rele(l->l_dbuf, NULL);
437 }
438
439 _NOTE(ARGSUSED(0))
440 static void
441 zap_leaf_pageout(dmu_buf_t *db, void *vl)
442 {
443 zap_leaf_t *l = vl;
444
445 rw_destroy(&l->l_rwlock);
446 kmem_free(l, sizeof (zap_leaf_t));
447 }
448
449 static zap_leaf_t *
450 zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
451 {
452 zap_leaf_t *l, *winner;
453
454 ASSERT(blkid != 0);
455
456 l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
457 rw_init(&l->l_rwlock, 0, 0, 0);
458 rw_enter(&l->l_rwlock, RW_WRITER);
459 l->l_blkid = blkid;
460 l->l_bs = highbit(db->db_size)-1;
461 l->l_dbuf = db;
462 l->l_phys = NULL;
463
464 winner = dmu_buf_set_user(db, l, &l->l_phys, zap_leaf_pageout);
465
466 rw_exit(&l->l_rwlock);
467 if (winner != NULL) {
468 /* someone else set it first */
469 zap_leaf_pageout(NULL, l);
470 l = winner;
471 }
472
473 /*
474 * lhr_pad was previously used for the next leaf in the leaf
475 * chain. There should be no chained leafs (as we have removed
476 * support for them).
477 */
478 ASSERT0(l->l_phys->l_hdr.lh_pad1);
479
480 /*
481 * There should be more hash entries than there can be
482 * chunks to put in the hash table
483 */
484 ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3);
485
486 /* The chunks should begin at the end of the hash table */
487 ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==,
488 &l->l_phys->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]);
489
498 zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
499 zap_leaf_t **lp)
500 {
501 dmu_buf_t *db;
502 zap_leaf_t *l;
503 int bs = FZAP_BLOCK_SHIFT(zap);
504 int err;
505
506 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
507
508 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
509 blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH);
510 if (err)
511 return (err);
512
513 ASSERT3U(db->db_object, ==, zap->zap_object);
514 ASSERT3U(db->db_offset, ==, blkid << bs);
515 ASSERT3U(db->db_size, ==, 1 << bs);
516 ASSERT(blkid != 0);
517
518 l = dmu_buf_get_user(db);
519
520 if (l == NULL)
521 l = zap_open_leaf(blkid, db);
522
523 rw_enter(&l->l_rwlock, lt);
524 /*
525 * Must lock before dirtying, otherwise l->l_phys could change,
526 * causing ASSERT below to fail.
527 */
528 if (lt == RW_WRITER)
529 dmu_buf_will_dirty(db, tx);
530 ASSERT3U(l->l_blkid, ==, blkid);
531 ASSERT3P(l->l_dbuf, ==, db);
532 ASSERT3P(l->l_phys, ==, l->l_dbuf->db_data);
533 ASSERT3U(l->l_phys->l_hdr.lh_block_type, ==, ZBT_LEAF);
534 ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
535
536 *lp = l;
537 return (0);
538 }
539
540 static int
541 zap_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t *valp)
542 {
543 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
544
545 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
546 ASSERT3U(idx, <,
547 (1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift));
548 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
549 return (0);
550 } else {
551 return (zap_table_load(zap, &zap->zap_f.zap_phys->zap_ptrtbl,
552 idx, valp));
553 }
554 }
555
556 static int
557 zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx)
558 {
559 ASSERT(tx != NULL);
560 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
561
562 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk == 0) {
563 ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) = blk;
564 return (0);
565 } else {
566 return (zap_table_store(zap, &zap->zap_f.zap_phys->zap_ptrtbl,
567 idx, blk, tx));
568 }
569 }
570
571 static int
572 zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp)
573 {
574 uint64_t idx, blk;
575 int err;
576
577 ASSERT(zap->zap_dbuf == NULL ||
578 zap->zap_f.zap_phys == zap->zap_dbuf->db_data);
579 ASSERT3U(zap->zap_f.zap_phys->zap_magic, ==, ZAP_MAGIC);
580 idx = ZAP_HASH_IDX(h, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
581 err = zap_idx_to_blk(zap, idx, &blk);
582 if (err != 0)
583 return (err);
584 err = zap_get_leaf_byblk(zap, blk, tx, lt, lp);
585
586 ASSERT(err || ZAP_HASH_IDX(h, (*lp)->l_phys->l_hdr.lh_prefix_len) ==
587 (*lp)->l_phys->l_hdr.lh_prefix);
588 return (err);
589 }
590
591 static int
592 zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx, zap_leaf_t **lp)
593 {
594 zap_t *zap = zn->zn_zap;
595 uint64_t hash = zn->zn_hash;
596 zap_leaf_t *nl;
597 int prefix_diff, i, err;
598 uint64_t sibling;
599 int old_prefix_len = l->l_phys->l_hdr.lh_prefix_len;
600
601 ASSERT3U(old_prefix_len, <=, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
602 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
603
604 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
605 l->l_phys->l_hdr.lh_prefix);
606
607 if (zap_tryupgradedir(zap, tx) == 0 ||
608 old_prefix_len == zap->zap_f.zap_phys->zap_ptrtbl.zt_shift) {
609 /* We failed to upgrade, or need to grow the pointer table */
610 objset_t *os = zap->zap_objset;
611 uint64_t object = zap->zap_object;
612
613 zap_put_leaf(l);
614 zap_unlockdir(zap);
615 err = zap_lockdir(os, object, tx, RW_WRITER,
616 FALSE, FALSE, &zn->zn_zap);
617 zap = zn->zn_zap;
618 if (err)
619 return (err);
620 ASSERT(!zap->zap_ismicro);
621
622 while (old_prefix_len ==
623 zap->zap_f.zap_phys->zap_ptrtbl.zt_shift) {
624 err = zap_grow_ptrtbl(zap, tx);
625 if (err)
626 return (err);
627 }
628
629 err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l);
630 if (err)
631 return (err);
632
633 if (l->l_phys->l_hdr.lh_prefix_len != old_prefix_len) {
634 /* it split while our locks were down */
635 *lp = l;
636 return (0);
637 }
638 }
639 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
640 ASSERT3U(old_prefix_len, <, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
641 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
642 l->l_phys->l_hdr.lh_prefix);
643
644 prefix_diff = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift -
645 (old_prefix_len + 1);
646 sibling = (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff;
647
648 /* check for i/o errors before doing zap_leaf_split */
649 for (i = 0; i < (1ULL<<prefix_diff); i++) {
650 uint64_t blk;
651 err = zap_idx_to_blk(zap, sibling+i, &blk);
652 if (err)
653 return (err);
654 ASSERT3U(blk, ==, l->l_blkid);
655 }
656
657 nl = zap_create_leaf(zap, tx);
658 zap_leaf_split(l, nl, zap->zap_normflags != 0);
659
660 /* set sibling pointers */
661 for (i = 0; i < (1ULL << prefix_diff); i++) {
662 err = zap_set_idx_to_blk(zap, sibling+i, nl->l_blkid, tx);
663 ASSERT0(err); /* we checked for i/o errors above */
664 }
665
666 if (hash & (1ULL << (64 - l->l_phys->l_hdr.lh_prefix_len))) {
667 /* we want the sibling */
668 zap_put_leaf(l);
669 *lp = nl;
670 } else {
671 zap_put_leaf(nl);
672 *lp = l;
673 }
674
675 return (0);
676 }
677
678 static void
679 zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
680 {
681 zap_t *zap = zn->zn_zap;
682 int shift = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift;
683 int leaffull = (l->l_phys->l_hdr.lh_prefix_len == shift &&
684 l->l_phys->l_hdr.lh_nfree < ZAP_LEAF_LOW_WATER);
685
686 zap_put_leaf(l);
687
688 if (leaffull || zap->zap_f.zap_phys->zap_ptrtbl.zt_nextblk) {
689 int err;
690
691 /*
692 * We are in the middle of growing the pointer table, or
693 * this leaf will soon make us grow it.
694 */
695 if (zap_tryupgradedir(zap, tx) == 0) {
696 objset_t *os = zap->zap_objset;
697 uint64_t zapobj = zap->zap_object;
698
699 zap_unlockdir(zap);
700 err = zap_lockdir(os, zapobj, tx,
701 RW_WRITER, FALSE, FALSE, &zn->zn_zap);
702 zap = zn->zn_zap;
703 if (err)
704 return;
705 }
706
707 /* could have finished growing while our locks were down */
708 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift == shift)
709 (void) zap_grow_ptrtbl(zap, tx);
710 }
711 }
712
713 static int
714 fzap_checkname(zap_name_t *zn)
715 {
716 if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
717 return (SET_ERROR(ENAMETOOLONG));
718 return (0);
719 }
720
721 static int
722 fzap_checksize(uint64_t integer_size, uint64_t num_integers)
723 {
724 /* Only integer sizes supported by C */
725 switch (integer_size) {
726 case 1:
727 case 2:
728 case 4:
919 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, tx, RW_WRITER, &l);
920 if (err != 0)
921 return (err);
922 err = zap_leaf_lookup(l, zn, &zeh);
923 if (err == 0) {
924 zap_entry_remove(&zeh);
925 zap_increment_num_entries(zn->zn_zap, -1, tx);
926 }
927 zap_put_leaf(l);
928 return (err);
929 }
930
931 void
932 fzap_prefetch(zap_name_t *zn)
933 {
934 uint64_t idx, blk;
935 zap_t *zap = zn->zn_zap;
936 int bs;
937
938 idx = ZAP_HASH_IDX(zn->zn_hash,
939 zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
940 if (zap_idx_to_blk(zap, idx, &blk) != 0)
941 return;
942 bs = FZAP_BLOCK_SHIFT(zap);
943 dmu_prefetch(zap->zap_objset, zap->zap_object, blk << bs, 1 << bs);
944 }
945
946 /*
947 * Helper functions for consumers.
948 */
949
950 uint64_t
951 zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj,
952 const char *name, dmu_tx_t *tx)
953 {
954 uint64_t new_obj;
955
956 VERIFY((new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx)) > 0);
957 VERIFY(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj,
958 tx) == 0);
959
1259 err = zap_leaf_lookup(l, zn, &zeh);
1260 if (err != 0)
1261 return (err);
1262
1263 zc->zc_leaf = l;
1264 zc->zc_hash = zeh.zeh_hash;
1265 zc->zc_cd = zeh.zeh_cd;
1266
1267 return (err);
1268 }
1269
1270 void
1271 fzap_get_stats(zap_t *zap, zap_stats_t *zs)
1272 {
1273 int bs = FZAP_BLOCK_SHIFT(zap);
1274 zs->zs_blocksize = 1ULL << bs;
1275
1276 /*
1277 * Set zap_phys_t fields
1278 */
1279 zs->zs_num_leafs = zap->zap_f.zap_phys->zap_num_leafs;
1280 zs->zs_num_entries = zap->zap_f.zap_phys->zap_num_entries;
1281 zs->zs_num_blocks = zap->zap_f.zap_phys->zap_freeblk;
1282 zs->zs_block_type = zap->zap_f.zap_phys->zap_block_type;
1283 zs->zs_magic = zap->zap_f.zap_phys->zap_magic;
1284 zs->zs_salt = zap->zap_f.zap_phys->zap_salt;
1285
1286 /*
1287 * Set zap_ptrtbl fields
1288 */
1289 zs->zs_ptrtbl_len = 1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift;
1290 zs->zs_ptrtbl_nextblk = zap->zap_f.zap_phys->zap_ptrtbl.zt_nextblk;
1291 zs->zs_ptrtbl_blks_copied =
1292 zap->zap_f.zap_phys->zap_ptrtbl.zt_blks_copied;
1293 zs->zs_ptrtbl_zt_blk = zap->zap_f.zap_phys->zap_ptrtbl.zt_blk;
1294 zs->zs_ptrtbl_zt_numblks = zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks;
1295 zs->zs_ptrtbl_zt_shift = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift;
1296
1297 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
1298 /* the ptrtbl is entirely in the header block. */
1299 zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
1300 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs);
1301 } else {
1302 int b;
1303
1304 dmu_prefetch(zap->zap_objset, zap->zap_object,
1305 zap->zap_f.zap_phys->zap_ptrtbl.zt_blk << bs,
1306 zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks << bs);
1307
1308 for (b = 0; b < zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks;
1309 b++) {
1310 dmu_buf_t *db;
1311 int err;
1312
1313 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
1314 (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk + b) << bs,
1315 FTAG, &db, DMU_READ_NO_PREFETCH);
1316 if (err == 0) {
1317 zap_stats_ptrtbl(zap, db->db_data,
1318 1<<(bs-3), zs);
1319 dmu_buf_rele(db, FTAG);
1320 }
1321 }
1322 }
1323 }
1324
1325 int
1326 fzap_count_write(zap_name_t *zn, int add, uint64_t *towrite,
1327 uint64_t *tooverwrite)
1328 {
1329 zap_t *zap = zn->zn_zap;
1330 zap_leaf_t *l;
1331 int err;
1332
1333 /*
1334 * Account for the header block of the fatzap.
1335 */
1336 if (!add && dmu_buf_freeable(zap->zap_dbuf)) {
1337 *tooverwrite += zap->zap_dbuf->db_size;
1338 } else {
1339 *towrite += zap->zap_dbuf->db_size;
1340 }
1341
1342 /*
1343 * Account for the pointer table blocks.
1344 * If we are adding we need to account for the following cases :
1345 * - If the pointer table is embedded, this operation could force an
1346 * external pointer table.
1347 * - If this already has an external pointer table this operation
1348 * could extend the table.
1349 */
1350 if (add) {
1351 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk == 0)
1352 *towrite += zap->zap_dbuf->db_size;
1353 else
1354 *towrite += (zap->zap_dbuf->db_size * 3);
1355 }
1356
1357 /*
1358 * Now, check if the block containing leaf is freeable
1359 * and account accordingly.
1360 */
1361 err = zap_deref_leaf(zap, zn->zn_hash, NULL, RW_READER, &l);
1362 if (err != 0) {
1363 return (err);
1364 }
1365
1366 if (!add && dmu_buf_freeable(l->l_dbuf)) {
1367 *tooverwrite += l->l_dbuf->db_size;
1368 } else {
1369 /*
1370 * If this an add operation, the leaf block could split.
1371 * Hence, we need to account for an additional leaf block.
|
33 * The buckets (aka "leaf nodes") are implemented in zap_leaf.c.
34 *
35 * The pointer table holds a power of 2 number of pointers.
36 * (1<<zap_t->zd_data->zd_phys->zd_prefix_len). The bucket pointed to
37 * by the pointer at index i in the table holds entries whose hash value
38 * has a zd_prefix_len - bit prefix
39 */
40
41 #include <sys/spa.h>
42 #include <sys/dmu.h>
43 #include <sys/zfs_context.h>
44 #include <sys/zfs_znode.h>
45 #include <sys/fs/zfs.h>
46 #include <sys/zap.h>
47 #include <sys/refcount.h>
48 #include <sys/zap_impl.h>
49 #include <sys/zap_leaf.h>
50
51 int fzap_default_block_shift = 14; /* 16k blocksize */
52
53 static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks);
54
55 void
56 fzap_byteswap(void *vbuf, size_t size)
57 {
58 uint64_t block_type;
59
60 block_type = *(uint64_t *)vbuf;
61
62 if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
63 zap_leaf_byteswap(vbuf, size);
64 else {
65 /* it's a ptrtbl block */
66 byteswap_uint64_array(vbuf, size);
67 }
68 }
69
70 void
71 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
72 {
73 dmu_buf_t *db;
74 zap_leaf_t *l;
75 int i;
76 zap_phys_t *zp;
77
78 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
79 zap->zap_ismicro = FALSE;
80
81 zap->db_evict.evict_func = zap_evict;
82
83 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
84 zap->zap_f.zap_block_shift = highbit(zap->zap_dbuf->db_size) - 1;
85
86 zp = zap->zap_f_phys;
87 /*
88 * explicitly zero it since it might be coming from an
89 * initialized microzap
90 */
91 bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
92 zp->zap_block_type = ZBT_HEADER;
93 zp->zap_magic = ZAP_MAGIC;
94
95 zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap);
96
97 zp->zap_freeblk = 2; /* block 1 will be the first leaf */
98 zp->zap_num_leafs = 1;
99 zp->zap_num_entries = 0;
100 zp->zap_salt = zap->zap_salt;
101 zp->zap_normflags = zap->zap_normflags;
102 zp->zap_flags = flags;
103
104 /* block 1 will be the first leaf */
105 for (i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++)
106 ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1;
107
108 /*
109 * set up block 1 - the first leaf
110 */
111 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
112 1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH));
113 dmu_buf_will_dirty(db, tx);
114
115 l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
116 l->l_dbuf = db;
117
118 zap_leaf_init(l, zp->zap_normflags != 0);
119
120 kmem_free(l, sizeof (zap_leaf_t));
121 dmu_buf_rele(db, FTAG);
122 }
123
124 static int
125 zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx)
126 {
127 if (RW_WRITE_HELD(&zap->zap_rwlock))
128 return (1);
129 if (rw_tryupgrade(&zap->zap_rwlock)) {
130 dmu_buf_will_dirty(zap->zap_dbuf, tx);
131 return (1);
132 }
133 return (0);
134 }
135
136 /*
303 static void
304 zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n)
305 {
306 int i;
307 for (i = 0; i < n; i++) {
308 uint64_t lb = src[i];
309 dst[2*i+0] = lb;
310 dst[2*i+1] = lb;
311 }
312 }
313
314 static int
315 zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
316 {
317 /*
318 * The pointer table should never use more hash bits than we
319 * have (otherwise we'd be using useless zero bits to index it).
320 * If we are within 2 bits of running out, stop growing, since
321 * this is already an aberrant condition.
322 */
323 if (zap->zap_f_phys->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2)
324 return (SET_ERROR(ENOSPC));
325
326 if (zap->zap_f_phys->zap_ptrtbl.zt_numblks == 0) {
327 /*
328 * We are outgrowing the "embedded" ptrtbl (the one
329 * stored in the header block). Give it its own entire
330 * block, which will double the size of the ptrtbl.
331 */
332 uint64_t newblk;
333 dmu_buf_t *db_new;
334 int err;
335
336 ASSERT3U(zap->zap_f_phys->zap_ptrtbl.zt_shift, ==,
337 ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
338 ASSERT0(zap->zap_f_phys->zap_ptrtbl.zt_blk);
339
340 newblk = zap_allocate_blocks(zap, 1);
341 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
342 newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new,
343 DMU_READ_NO_PREFETCH);
344 if (err)
345 return (err);
346 dmu_buf_will_dirty(db_new, tx);
347 zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
348 db_new->db_data, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
349 dmu_buf_rele(db_new, FTAG);
350
351 zap->zap_f_phys->zap_ptrtbl.zt_blk = newblk;
352 zap->zap_f_phys->zap_ptrtbl.zt_numblks = 1;
353 zap->zap_f_phys->zap_ptrtbl.zt_shift++;
354
355 ASSERT3U(1ULL << zap->zap_f_phys->zap_ptrtbl.zt_shift, ==,
356 zap->zap_f_phys->zap_ptrtbl.zt_numblks <<
357 (FZAP_BLOCK_SHIFT(zap)-3));
358
359 return (0);
360 } else {
361 return (zap_table_grow(zap, &zap->zap_f_phys->zap_ptrtbl,
362 zap_ptrtbl_transfer, tx));
363 }
364 }
365
366 static void
367 zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx)
368 {
369 dmu_buf_will_dirty(zap->zap_dbuf, tx);
370 mutex_enter(&zap->zap_f.zap_num_entries_mtx);
371 ASSERT(delta > 0 || zap->zap_f_phys->zap_num_entries >= -delta);
372 zap->zap_f_phys->zap_num_entries += delta;
373 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
374 }
375
376 static uint64_t
377 zap_allocate_blocks(zap_t *zap, int nblocks)
378 {
379 uint64_t newblk;
380 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
381 newblk = zap->zap_f_phys->zap_freeblk;
382 zap->zap_f_phys->zap_freeblk += nblocks;
383 return (newblk);
384 }
385
386 static void
387 zap_leaf_pageout(dmu_buf_user_t *dbu)
388 {
389 zap_leaf_t *l = (zap_leaf_t *)dbu;
390
391 rw_destroy(&l->l_rwlock);
392 kmem_free(l, sizeof (zap_leaf_t));
393 }
394
395 static zap_leaf_t *
396 zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
397 {
398 void *winner;
399 zap_leaf_t *l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
400
401 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
402
403 rw_init(&l->l_rwlock, 0, 0, 0);
404 rw_enter(&l->l_rwlock, RW_WRITER);
405 l->l_blkid = zap_allocate_blocks(zap, 1);
406 l->l_dbuf = NULL;
407
408 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
409 l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
410 DMU_READ_NO_PREFETCH));
411 dmu_buf_init_user(&l->db_evict, zap_leaf_pageout);
412 winner = (zap_leaf_t *)dmu_buf_set_user(l->l_dbuf, &l->db_evict);
413 ASSERT(winner == NULL);
414 dmu_buf_will_dirty(l->l_dbuf, tx);
415
416 zap_leaf_init(l, zap->zap_normflags != 0);
417
418 zap->zap_f_phys->zap_num_leafs++;
419
420 return (l);
421 }
422
423 int
424 fzap_count(zap_t *zap, uint64_t *count)
425 {
426 ASSERT(!zap->zap_ismicro);
427 mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */
428 *count = zap->zap_f_phys->zap_num_entries;
429 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
430 return (0);
431 }
432
433 /*
434 * Routines for obtaining zap_leaf_t's
435 */
436
437 void
438 zap_put_leaf(zap_leaf_t *l)
439 {
440 rw_exit(&l->l_rwlock);
441 dmu_buf_rele(l->l_dbuf, NULL);
442 }
443
444 static zap_leaf_t *
445 zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
446 {
447 zap_leaf_t *l, *winner;
448
449 ASSERT(blkid != 0);
450
451 l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
452 rw_init(&l->l_rwlock, 0, 0, 0);
453 rw_enter(&l->l_rwlock, RW_WRITER);
454 l->l_blkid = blkid;
455 l->l_bs = highbit(db->db_size)-1;
456 l->l_dbuf = db;
457
458 dmu_buf_init_user(&l->db_evict, zap_leaf_pageout);
459 winner = (zap_leaf_t *)dmu_buf_set_user(db, &l->db_evict);
460
461 rw_exit(&l->l_rwlock);
462 if (winner != NULL) {
463 /* someone else set it first */
464 zap_leaf_pageout(&l->db_evict);
465 l = winner;
466 }
467
468 /*
469 * lhr_pad was previously used for the next leaf in the leaf
470 * chain. There should be no chained leafs (as we have removed
471 * support for them).
472 */
473 ASSERT0(l->l_phys->l_hdr.lh_pad1);
474
475 /*
476 * There should be more hash entries than there can be
477 * chunks to put in the hash table
478 */
479 ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3);
480
481 /* The chunks should begin at the end of the hash table */
482 ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==,
483 &l->l_phys->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]);
484
493 zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
494 zap_leaf_t **lp)
495 {
496 dmu_buf_t *db;
497 zap_leaf_t *l;
498 int bs = FZAP_BLOCK_SHIFT(zap);
499 int err;
500
501 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
502
503 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
504 blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH);
505 if (err)
506 return (err);
507
508 ASSERT3U(db->db_object, ==, zap->zap_object);
509 ASSERT3U(db->db_offset, ==, blkid << bs);
510 ASSERT3U(db->db_size, ==, 1 << bs);
511 ASSERT(blkid != 0);
512
513 l = (zap_leaf_t *)dmu_buf_get_user(db);
514
515 if (l == NULL)
516 l = zap_open_leaf(blkid, db);
517
518 rw_enter(&l->l_rwlock, lt);
519 /*
520 * Must lock before dirtying, otherwise l->l_phys could change,
521 * causing ASSERT below to fail.
522 */
523 if (lt == RW_WRITER)
524 dmu_buf_will_dirty(db, tx);
525 ASSERT3U(l->l_blkid, ==, blkid);
526 ASSERT3P(l->l_dbuf, ==, db);
527 ASSERT3P(l->l_phys, ==, l->l_dbuf->db_data);
528 ASSERT3U(l->l_phys->l_hdr.lh_block_type, ==, ZBT_LEAF);
529 ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
530
531 *lp = l;
532 return (0);
533 }
534
535 static int
536 zap_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t *valp)
537 {
538 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
539
540 if (zap->zap_f_phys->zap_ptrtbl.zt_numblks == 0) {
541 ASSERT3U(idx, <,
542 (1ULL << zap->zap_f_phys->zap_ptrtbl.zt_shift));
543 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
544 return (0);
545 } else {
546 return (zap_table_load(zap, &zap->zap_f_phys->zap_ptrtbl,
547 idx, valp));
548 }
549 }
550
551 static int
552 zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx)
553 {
554 ASSERT(tx != NULL);
555 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
556
557 if (zap->zap_f_phys->zap_ptrtbl.zt_blk == 0) {
558 ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) = blk;
559 return (0);
560 } else {
561 return (zap_table_store(zap, &zap->zap_f_phys->zap_ptrtbl,
562 idx, blk, tx));
563 }
564 }
565
566 static int
567 zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp)
568 {
569 uint64_t idx, blk;
570 int err;
571
572 ASSERT(zap->zap_dbuf == NULL ||
573 zap->zap_f_phys == zap->zap_dbuf->db_data);
574 ASSERT3U(zap->zap_f_phys->zap_magic, ==, ZAP_MAGIC);
575 idx = ZAP_HASH_IDX(h, zap->zap_f_phys->zap_ptrtbl.zt_shift);
576 err = zap_idx_to_blk(zap, idx, &blk);
577 if (err != 0)
578 return (err);
579 err = zap_get_leaf_byblk(zap, blk, tx, lt, lp);
580
581 ASSERT(err || ZAP_HASH_IDX(h, (*lp)->l_phys->l_hdr.lh_prefix_len) ==
582 (*lp)->l_phys->l_hdr.lh_prefix);
583 return (err);
584 }
585
586 static int
587 zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx, zap_leaf_t **lp)
588 {
589 zap_t *zap = zn->zn_zap;
590 uint64_t hash = zn->zn_hash;
591 zap_leaf_t *nl;
592 int prefix_diff, i, err;
593 uint64_t sibling;
594 int old_prefix_len = l->l_phys->l_hdr.lh_prefix_len;
595
596 ASSERT3U(old_prefix_len, <=, zap->zap_f_phys->zap_ptrtbl.zt_shift);
597 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
598
599 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
600 l->l_phys->l_hdr.lh_prefix);
601
602 if (zap_tryupgradedir(zap, tx) == 0 ||
603 old_prefix_len == zap->zap_f_phys->zap_ptrtbl.zt_shift) {
604 /* We failed to upgrade, or need to grow the pointer table */
605 objset_t *os = zap->zap_objset;
606 uint64_t object = zap->zap_object;
607
608 zap_put_leaf(l);
609 zap_unlockdir(zap);
610 err = zap_lockdir(os, object, tx, RW_WRITER,
611 FALSE, FALSE, &zn->zn_zap);
612 zap = zn->zn_zap;
613 if (err)
614 return (err);
615 ASSERT(!zap->zap_ismicro);
616
617 while (old_prefix_len ==
618 zap->zap_f_phys->zap_ptrtbl.zt_shift) {
619 err = zap_grow_ptrtbl(zap, tx);
620 if (err)
621 return (err);
622 }
623
624 err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l);
625 if (err)
626 return (err);
627
628 if (l->l_phys->l_hdr.lh_prefix_len != old_prefix_len) {
629 /* it split while our locks were down */
630 *lp = l;
631 return (0);
632 }
633 }
634 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
635 ASSERT3U(old_prefix_len, <, zap->zap_f_phys->zap_ptrtbl.zt_shift);
636 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
637 l->l_phys->l_hdr.lh_prefix);
638
639 prefix_diff = zap->zap_f_phys->zap_ptrtbl.zt_shift -
640 (old_prefix_len + 1);
641 sibling = (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff;
642
643 /* check for i/o errors before doing zap_leaf_split */
644 for (i = 0; i < (1ULL<<prefix_diff); i++) {
645 uint64_t blk;
646 err = zap_idx_to_blk(zap, sibling+i, &blk);
647 if (err)
648 return (err);
649 ASSERT3U(blk, ==, l->l_blkid);
650 }
651
652 nl = zap_create_leaf(zap, tx);
653 zap_leaf_split(l, nl, zap->zap_normflags != 0);
654
655 /* set sibling pointers */
656 for (i = 0; i < (1ULL << prefix_diff); i++) {
657 err = zap_set_idx_to_blk(zap, sibling+i, nl->l_blkid, tx);
658 ASSERT0(err); /* we checked for i/o errors above */
659 }
660
661 if (hash & (1ULL << (64 - l->l_phys->l_hdr.lh_prefix_len))) {
662 /* we want the sibling */
663 zap_put_leaf(l);
664 *lp = nl;
665 } else {
666 zap_put_leaf(nl);
667 *lp = l;
668 }
669
670 return (0);
671 }
672
673 static void
674 zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
675 {
676 zap_t *zap = zn->zn_zap;
677 int shift = zap->zap_f_phys->zap_ptrtbl.zt_shift;
678 int leaffull = (l->l_phys->l_hdr.lh_prefix_len == shift &&
679 l->l_phys->l_hdr.lh_nfree < ZAP_LEAF_LOW_WATER);
680
681 zap_put_leaf(l);
682
683 if (leaffull || zap->zap_f_phys->zap_ptrtbl.zt_nextblk) {
684 int err;
685
686 /*
687 * We are in the middle of growing the pointer table, or
688 * this leaf will soon make us grow it.
689 */
690 if (zap_tryupgradedir(zap, tx) == 0) {
691 objset_t *os = zap->zap_objset;
692 uint64_t zapobj = zap->zap_object;
693
694 zap_unlockdir(zap);
695 err = zap_lockdir(os, zapobj, tx,
696 RW_WRITER, FALSE, FALSE, &zn->zn_zap);
697 zap = zn->zn_zap;
698 if (err)
699 return;
700 }
701
702 /* could have finished growing while our locks were down */
703 if (zap->zap_f_phys->zap_ptrtbl.zt_shift == shift)
704 (void) zap_grow_ptrtbl(zap, tx);
705 }
706 }
707
708 static int
709 fzap_checkname(zap_name_t *zn)
710 {
711 if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
712 return (SET_ERROR(ENAMETOOLONG));
713 return (0);
714 }
715
716 static int
717 fzap_checksize(uint64_t integer_size, uint64_t num_integers)
718 {
719 /* Only integer sizes supported by C */
720 switch (integer_size) {
721 case 1:
722 case 2:
723 case 4:
914 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, tx, RW_WRITER, &l);
915 if (err != 0)
916 return (err);
917 err = zap_leaf_lookup(l, zn, &zeh);
918 if (err == 0) {
919 zap_entry_remove(&zeh);
920 zap_increment_num_entries(zn->zn_zap, -1, tx);
921 }
922 zap_put_leaf(l);
923 return (err);
924 }
925
926 void
927 fzap_prefetch(zap_name_t *zn)
928 {
929 uint64_t idx, blk;
930 zap_t *zap = zn->zn_zap;
931 int bs;
932
933 idx = ZAP_HASH_IDX(zn->zn_hash,
934 zap->zap_f_phys->zap_ptrtbl.zt_shift);
935 if (zap_idx_to_blk(zap, idx, &blk) != 0)
936 return;
937 bs = FZAP_BLOCK_SHIFT(zap);
938 dmu_prefetch(zap->zap_objset, zap->zap_object, blk << bs, 1 << bs);
939 }
940
941 /*
942 * Helper functions for consumers.
943 */
944
945 uint64_t
946 zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj,
947 const char *name, dmu_tx_t *tx)
948 {
949 uint64_t new_obj;
950
951 VERIFY((new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx)) > 0);
952 VERIFY(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj,
953 tx) == 0);
954
1254 err = zap_leaf_lookup(l, zn, &zeh);
1255 if (err != 0)
1256 return (err);
1257
1258 zc->zc_leaf = l;
1259 zc->zc_hash = zeh.zeh_hash;
1260 zc->zc_cd = zeh.zeh_cd;
1261
1262 return (err);
1263 }
1264
1265 void
1266 fzap_get_stats(zap_t *zap, zap_stats_t *zs)
1267 {
1268 int bs = FZAP_BLOCK_SHIFT(zap);
1269 zs->zs_blocksize = 1ULL << bs;
1270
1271 /*
1272 * Set zap_phys_t fields
1273 */
1274 zs->zs_num_leafs = zap->zap_f_phys->zap_num_leafs;
1275 zs->zs_num_entries = zap->zap_f_phys->zap_num_entries;
1276 zs->zs_num_blocks = zap->zap_f_phys->zap_freeblk;
1277 zs->zs_block_type = zap->zap_f_phys->zap_block_type;
1278 zs->zs_magic = zap->zap_f_phys->zap_magic;
1279 zs->zs_salt = zap->zap_f_phys->zap_salt;
1280
1281 /*
1282 * Set zap_ptrtbl fields
1283 */
1284 zs->zs_ptrtbl_len = 1ULL << zap->zap_f_phys->zap_ptrtbl.zt_shift;
1285 zs->zs_ptrtbl_nextblk = zap->zap_f_phys->zap_ptrtbl.zt_nextblk;
1286 zs->zs_ptrtbl_blks_copied =
1287 zap->zap_f_phys->zap_ptrtbl.zt_blks_copied;
1288 zs->zs_ptrtbl_zt_blk = zap->zap_f_phys->zap_ptrtbl.zt_blk;
1289 zs->zs_ptrtbl_zt_numblks = zap->zap_f_phys->zap_ptrtbl.zt_numblks;
1290 zs->zs_ptrtbl_zt_shift = zap->zap_f_phys->zap_ptrtbl.zt_shift;
1291
1292 if (zap->zap_f_phys->zap_ptrtbl.zt_numblks == 0) {
1293 /* the ptrtbl is entirely in the header block. */
1294 zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
1295 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs);
1296 } else {
1297 int b;
1298
1299 dmu_prefetch(zap->zap_objset, zap->zap_object,
1300 zap->zap_f_phys->zap_ptrtbl.zt_blk << bs,
1301 zap->zap_f_phys->zap_ptrtbl.zt_numblks << bs);
1302
1303 for (b = 0; b < zap->zap_f_phys->zap_ptrtbl.zt_numblks;
1304 b++) {
1305 dmu_buf_t *db;
1306 int err;
1307
1308 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
1309 (zap->zap_f_phys->zap_ptrtbl.zt_blk + b) << bs,
1310 FTAG, &db, DMU_READ_NO_PREFETCH);
1311 if (err == 0) {
1312 zap_stats_ptrtbl(zap, db->db_data,
1313 1<<(bs-3), zs);
1314 dmu_buf_rele(db, FTAG);
1315 }
1316 }
1317 }
1318 }
1319
1320 int
1321 fzap_count_write(zap_name_t *zn, int add, uint64_t *towrite,
1322 uint64_t *tooverwrite)
1323 {
1324 zap_t *zap = zn->zn_zap;
1325 zap_leaf_t *l;
1326 int err;
1327
1328 /*
1329 * Account for the header block of the fatzap.
1330 */
1331 if (!add && dmu_buf_freeable(zap->zap_dbuf)) {
1332 *tooverwrite += zap->zap_dbuf->db_size;
1333 } else {
1334 *towrite += zap->zap_dbuf->db_size;
1335 }
1336
1337 /*
1338 * Account for the pointer table blocks.
1339 * If we are adding we need to account for the following cases :
1340 * - If the pointer table is embedded, this operation could force an
1341 * external pointer table.
1342 * - If this already has an external pointer table this operation
1343 * could extend the table.
1344 */
1345 if (add) {
1346 if (zap->zap_f_phys->zap_ptrtbl.zt_blk == 0)
1347 *towrite += zap->zap_dbuf->db_size;
1348 else
1349 *towrite += (zap->zap_dbuf->db_size * 3);
1350 }
1351
1352 /*
1353 * Now, check if the block containing leaf is freeable
1354 * and account accordingly.
1355 */
1356 err = zap_deref_leaf(zap, zn->zn_hash, NULL, RW_READER, &l);
1357 if (err != 0) {
1358 return (err);
1359 }
1360
1361 if (!add && dmu_buf_freeable(l->l_dbuf)) {
1362 *tooverwrite += l->l_dbuf->db_size;
1363 } else {
1364 /*
1365 * If this an add operation, the leaf block could split.
1366 * Hence, we need to account for an additional leaf block.
|