Print this page
3743 zfs needs a refcount audit
Submitted by: Will Andrews <willa@spectralogic.com>
Submitted by: Justin Gibbs <justing@spectralogic.com>
Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/zap.c
+++ new/usr/src/uts/common/fs/zfs/zap.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 * Copyright (c) 2013 by Delphix. All rights reserved.
24 24 */
25 25
26 26 /*
27 27 * This file contains the top half of the zfs directory structure
28 28 * implementation. The bottom half is in zap_leaf.c.
29 29 *
30 30 * The zdir is an extendable hash data structure. There is a table of
31 31 * pointers to buckets (zap_t->zd_data->zd_leafs). The buckets are
32 32 * each a constant size and hold a variable number of directory entries.
33 33 * The buckets (aka "leaf nodes") are implemented in zap_leaf.c.
34 34 *
35 35 * The pointer table holds a power of 2 number of pointers.
36 36 * (1<<zap_t->zd_data->zd_phys->zd_prefix_len). The bucket pointed to
37 37 * by the pointer at index i in the table holds entries whose hash value
38 38 * has a zd_prefix_len - bit prefix
39 39 */
40 40
41 41 #include <sys/spa.h>
42 42 #include <sys/dmu.h>
43 43 #include <sys/zfs_context.h>
44 44 #include <sys/zfs_znode.h>
45 45 #include <sys/fs/zfs.h>
46 46 #include <sys/zap.h>
47 47 #include <sys/refcount.h>
48 48 #include <sys/zap_impl.h>
49 49 #include <sys/zap_leaf.h>
50 50
51 51 int fzap_default_block_shift = 14; /* 16k blocksize */
52 52
53 53 static void zap_leaf_pageout(dmu_buf_t *db, void *vl);
54 54 static uint64_t zap_allocate_blocks(zap_t *zap, int nblocks);
55 55
56 56
57 57 void
58 58 fzap_byteswap(void *vbuf, size_t size)
59 59 {
60 60 uint64_t block_type;
61 61
62 62 block_type = *(uint64_t *)vbuf;
63 63
64 64 if (block_type == ZBT_LEAF || block_type == BSWAP_64(ZBT_LEAF))
65 65 zap_leaf_byteswap(vbuf, size);
66 66 else {
67 67 /* it's a ptrtbl block */
68 68 byteswap_uint64_array(vbuf, size);
69 69 }
70 70 }
71 71
72 72 void
73 73 fzap_upgrade(zap_t *zap, dmu_tx_t *tx, zap_flags_t flags)
74 74 {
75 75 dmu_buf_t *db;
76 76 zap_leaf_t *l;
77 77 int i;
78 78 zap_phys_t *zp;
79 79
80 80 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
81 81 zap->zap_ismicro = FALSE;
82 82
83 83 (void) dmu_buf_update_user(zap->zap_dbuf, zap, zap,
84 84 &zap->zap_f.zap_phys, zap_evict);
85 85
86 86 mutex_init(&zap->zap_f.zap_num_entries_mtx, 0, 0, 0);
87 87 zap->zap_f.zap_block_shift = highbit(zap->zap_dbuf->db_size) - 1;
88 88
89 89 zp = zap->zap_f.zap_phys;
90 90 /*
91 91 * explicitly zero it since it might be coming from an
92 92 * initialized microzap
93 93 */
94 94 bzero(zap->zap_dbuf->db_data, zap->zap_dbuf->db_size);
95 95 zp->zap_block_type = ZBT_HEADER;
96 96 zp->zap_magic = ZAP_MAGIC;
97 97
98 98 zp->zap_ptrtbl.zt_shift = ZAP_EMBEDDED_PTRTBL_SHIFT(zap);
99 99
100 100 zp->zap_freeblk = 2; /* block 1 will be the first leaf */
101 101 zp->zap_num_leafs = 1;
102 102 zp->zap_num_entries = 0;
103 103 zp->zap_salt = zap->zap_salt;
104 104 zp->zap_normflags = zap->zap_normflags;
105 105 zp->zap_flags = flags;
106 106
107 107 /* block 1 will be the first leaf */
108 108 for (i = 0; i < (1<<zp->zap_ptrtbl.zt_shift); i++)
109 109 ZAP_EMBEDDED_PTRTBL_ENT(zap, i) = 1;
110 110
111 111 /*
112 112 * set up block 1 - the first leaf
113 113 */
114 114 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
115 115 1<<FZAP_BLOCK_SHIFT(zap), FTAG, &db, DMU_READ_NO_PREFETCH));
116 116 dmu_buf_will_dirty(db, tx);
117 117
118 118 l = kmem_zalloc(sizeof (zap_leaf_t), KM_SLEEP);
119 119 l->l_dbuf = db;
120 120 l->l_phys = db->db_data;
121 121
122 122 zap_leaf_init(l, zp->zap_normflags != 0);
123 123
124 124 kmem_free(l, sizeof (zap_leaf_t));
125 125 dmu_buf_rele(db, FTAG);
126 126 }
127 127
128 128 static int
129 129 zap_tryupgradedir(zap_t *zap, dmu_tx_t *tx)
130 130 {
131 131 if (RW_WRITE_HELD(&zap->zap_rwlock))
132 132 return (1);
133 133 if (rw_tryupgrade(&zap->zap_rwlock)) {
134 134 dmu_buf_will_dirty(zap->zap_dbuf, tx);
135 135 return (1);
136 136 }
137 137 return (0);
138 138 }
139 139
140 140 /*
141 141 * Generic routines for dealing with the pointer & cookie tables.
142 142 */
143 143
144 144 static int
145 145 zap_table_grow(zap_t *zap, zap_table_phys_t *tbl,
146 146 void (*transfer_func)(const uint64_t *src, uint64_t *dst, int n),
147 147 dmu_tx_t *tx)
148 148 {
149 149 uint64_t b, newblk;
150 150 dmu_buf_t *db_old, *db_new;
151 151 int err;
152 152 int bs = FZAP_BLOCK_SHIFT(zap);
153 153 int hepb = 1<<(bs-4);
154 154 /* hepb = half the number of entries in a block */
155 155
156 156 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
157 157 ASSERT(tbl->zt_blk != 0);
158 158 ASSERT(tbl->zt_numblks > 0);
159 159
160 160 if (tbl->zt_nextblk != 0) {
161 161 newblk = tbl->zt_nextblk;
162 162 } else {
163 163 newblk = zap_allocate_blocks(zap, tbl->zt_numblks * 2);
164 164 tbl->zt_nextblk = newblk;
165 165 ASSERT0(tbl->zt_blks_copied);
166 166 dmu_prefetch(zap->zap_objset, zap->zap_object,
167 167 tbl->zt_blk << bs, tbl->zt_numblks << bs);
168 168 }
169 169
170 170 /*
171 171 * Copy the ptrtbl from the old to new location.
172 172 */
173 173
174 174 b = tbl->zt_blks_copied;
175 175 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
176 176 (tbl->zt_blk + b) << bs, FTAG, &db_old, DMU_READ_NO_PREFETCH);
177 177 if (err)
178 178 return (err);
179 179
180 180 /* first half of entries in old[b] go to new[2*b+0] */
181 181 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
182 182 (newblk + 2*b+0) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
183 183 dmu_buf_will_dirty(db_new, tx);
184 184 transfer_func(db_old->db_data, db_new->db_data, hepb);
185 185 dmu_buf_rele(db_new, FTAG);
186 186
187 187 /* second half of entries in old[b] go to new[2*b+1] */
188 188 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
189 189 (newblk + 2*b+1) << bs, FTAG, &db_new, DMU_READ_NO_PREFETCH));
190 190 dmu_buf_will_dirty(db_new, tx);
191 191 transfer_func((uint64_t *)db_old->db_data + hepb,
192 192 db_new->db_data, hepb);
193 193 dmu_buf_rele(db_new, FTAG);
194 194
195 195 dmu_buf_rele(db_old, FTAG);
196 196
197 197 tbl->zt_blks_copied++;
198 198
199 199 dprintf("copied block %llu of %llu\n",
200 200 tbl->zt_blks_copied, tbl->zt_numblks);
201 201
202 202 if (tbl->zt_blks_copied == tbl->zt_numblks) {
203 203 (void) dmu_free_range(zap->zap_objset, zap->zap_object,
204 204 tbl->zt_blk << bs, tbl->zt_numblks << bs, tx);
205 205
206 206 tbl->zt_blk = newblk;
207 207 tbl->zt_numblks *= 2;
208 208 tbl->zt_shift++;
209 209 tbl->zt_nextblk = 0;
210 210 tbl->zt_blks_copied = 0;
211 211
212 212 dprintf("finished; numblocks now %llu (%lluk entries)\n",
213 213 tbl->zt_numblks, 1<<(tbl->zt_shift-10));
214 214 }
215 215
216 216 return (0);
217 217 }
218 218
219 219 static int
220 220 zap_table_store(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t val,
221 221 dmu_tx_t *tx)
222 222 {
223 223 int err;
224 224 uint64_t blk, off;
225 225 int bs = FZAP_BLOCK_SHIFT(zap);
226 226 dmu_buf_t *db;
227 227
228 228 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
229 229 ASSERT(tbl->zt_blk != 0);
230 230
231 231 dprintf("storing %llx at index %llx\n", val, idx);
232 232
233 233 blk = idx >> (bs-3);
234 234 off = idx & ((1<<(bs-3))-1);
235 235
236 236 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
237 237 (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
238 238 if (err)
239 239 return (err);
240 240 dmu_buf_will_dirty(db, tx);
241 241
242 242 if (tbl->zt_nextblk != 0) {
243 243 uint64_t idx2 = idx * 2;
244 244 uint64_t blk2 = idx2 >> (bs-3);
245 245 uint64_t off2 = idx2 & ((1<<(bs-3))-1);
246 246 dmu_buf_t *db2;
247 247
248 248 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
249 249 (tbl->zt_nextblk + blk2) << bs, FTAG, &db2,
250 250 DMU_READ_NO_PREFETCH);
251 251 if (err) {
252 252 dmu_buf_rele(db, FTAG);
253 253 return (err);
254 254 }
255 255 dmu_buf_will_dirty(db2, tx);
256 256 ((uint64_t *)db2->db_data)[off2] = val;
257 257 ((uint64_t *)db2->db_data)[off2+1] = val;
258 258 dmu_buf_rele(db2, FTAG);
259 259 }
260 260
261 261 ((uint64_t *)db->db_data)[off] = val;
262 262 dmu_buf_rele(db, FTAG);
263 263
264 264 return (0);
265 265 }
266 266
267 267 static int
268 268 zap_table_load(zap_t *zap, zap_table_phys_t *tbl, uint64_t idx, uint64_t *valp)
269 269 {
270 270 uint64_t blk, off;
271 271 int err;
272 272 dmu_buf_t *db;
273 273 int bs = FZAP_BLOCK_SHIFT(zap);
274 274
275 275 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
276 276
277 277 blk = idx >> (bs-3);
278 278 off = idx & ((1<<(bs-3))-1);
279 279
280 280 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
281 281 (tbl->zt_blk + blk) << bs, FTAG, &db, DMU_READ_NO_PREFETCH);
282 282 if (err)
283 283 return (err);
284 284 *valp = ((uint64_t *)db->db_data)[off];
285 285 dmu_buf_rele(db, FTAG);
286 286
287 287 if (tbl->zt_nextblk != 0) {
↓ open down ↓ |
287 lines elided |
↑ open up ↑ |
288 288 /*
289 289 * read the nextblk for the sake of i/o error checking,
290 290 * so that zap_table_load() will catch errors for
291 291 * zap_table_store.
292 292 */
293 293 blk = (idx*2) >> (bs-3);
294 294
295 295 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
296 296 (tbl->zt_nextblk + blk) << bs, FTAG, &db,
297 297 DMU_READ_NO_PREFETCH);
298 - dmu_buf_rele(db, FTAG);
298 + if (err == 0)
299 + dmu_buf_rele(db, FTAG);
299 300 }
300 301 return (err);
301 302 }
302 303
303 304 /*
304 305 * Routines for growing the ptrtbl.
305 306 */
306 307
307 308 static void
308 309 zap_ptrtbl_transfer(const uint64_t *src, uint64_t *dst, int n)
309 310 {
310 311 int i;
311 312 for (i = 0; i < n; i++) {
312 313 uint64_t lb = src[i];
313 314 dst[2*i+0] = lb;
314 315 dst[2*i+1] = lb;
315 316 }
316 317 }
317 318
318 319 static int
319 320 zap_grow_ptrtbl(zap_t *zap, dmu_tx_t *tx)
320 321 {
321 322 /*
322 323 * The pointer table should never use more hash bits than we
323 324 * have (otherwise we'd be using useless zero bits to index it).
324 325 * If we are within 2 bits of running out, stop growing, since
325 326 * this is already an aberrant condition.
326 327 */
327 328 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift >= zap_hashbits(zap) - 2)
328 329 return (SET_ERROR(ENOSPC));
329 330
330 331 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
331 332 /*
332 333 * We are outgrowing the "embedded" ptrtbl (the one
333 334 * stored in the header block). Give it its own entire
334 335 * block, which will double the size of the ptrtbl.
335 336 */
336 337 uint64_t newblk;
337 338 dmu_buf_t *db_new;
338 339 int err;
339 340
340 341 ASSERT3U(zap->zap_f.zap_phys->zap_ptrtbl.zt_shift, ==,
341 342 ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
342 343 ASSERT0(zap->zap_f.zap_phys->zap_ptrtbl.zt_blk);
343 344
344 345 newblk = zap_allocate_blocks(zap, 1);
345 346 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
346 347 newblk << FZAP_BLOCK_SHIFT(zap), FTAG, &db_new,
347 348 DMU_READ_NO_PREFETCH);
348 349 if (err)
349 350 return (err);
350 351 dmu_buf_will_dirty(db_new, tx);
351 352 zap_ptrtbl_transfer(&ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
352 353 db_new->db_data, 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap));
353 354 dmu_buf_rele(db_new, FTAG);
354 355
355 356 zap->zap_f.zap_phys->zap_ptrtbl.zt_blk = newblk;
356 357 zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks = 1;
357 358 zap->zap_f.zap_phys->zap_ptrtbl.zt_shift++;
358 359
359 360 ASSERT3U(1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift, ==,
360 361 zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks <<
361 362 (FZAP_BLOCK_SHIFT(zap)-3));
362 363
363 364 return (0);
364 365 } else {
365 366 return (zap_table_grow(zap, &zap->zap_f.zap_phys->zap_ptrtbl,
366 367 zap_ptrtbl_transfer, tx));
367 368 }
368 369 }
369 370
370 371 static void
371 372 zap_increment_num_entries(zap_t *zap, int delta, dmu_tx_t *tx)
372 373 {
373 374 dmu_buf_will_dirty(zap->zap_dbuf, tx);
374 375 mutex_enter(&zap->zap_f.zap_num_entries_mtx);
375 376 ASSERT(delta > 0 || zap->zap_f.zap_phys->zap_num_entries >= -delta);
376 377 zap->zap_f.zap_phys->zap_num_entries += delta;
377 378 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
378 379 }
379 380
380 381 static uint64_t
381 382 zap_allocate_blocks(zap_t *zap, int nblocks)
382 383 {
383 384 uint64_t newblk;
384 385 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
385 386 newblk = zap->zap_f.zap_phys->zap_freeblk;
386 387 zap->zap_f.zap_phys->zap_freeblk += nblocks;
387 388 return (newblk);
388 389 }
389 390
390 391 static zap_leaf_t *
391 392 zap_create_leaf(zap_t *zap, dmu_tx_t *tx)
392 393 {
393 394 void *winner;
394 395 zap_leaf_t *l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
395 396
396 397 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
397 398
398 399 rw_init(&l->l_rwlock, 0, 0, 0);
399 400 rw_enter(&l->l_rwlock, RW_WRITER);
400 401 l->l_blkid = zap_allocate_blocks(zap, 1);
401 402 l->l_dbuf = NULL;
402 403 l->l_phys = NULL;
403 404
404 405 VERIFY(0 == dmu_buf_hold(zap->zap_objset, zap->zap_object,
405 406 l->l_blkid << FZAP_BLOCK_SHIFT(zap), NULL, &l->l_dbuf,
406 407 DMU_READ_NO_PREFETCH));
407 408 winner = dmu_buf_set_user(l->l_dbuf, l, &l->l_phys, zap_leaf_pageout);
408 409 ASSERT(winner == NULL);
409 410 dmu_buf_will_dirty(l->l_dbuf, tx);
410 411
411 412 zap_leaf_init(l, zap->zap_normflags != 0);
412 413
413 414 zap->zap_f.zap_phys->zap_num_leafs++;
414 415
415 416 return (l);
416 417 }
417 418
418 419 int
419 420 fzap_count(zap_t *zap, uint64_t *count)
420 421 {
421 422 ASSERT(!zap->zap_ismicro);
422 423 mutex_enter(&zap->zap_f.zap_num_entries_mtx); /* unnecessary */
423 424 *count = zap->zap_f.zap_phys->zap_num_entries;
424 425 mutex_exit(&zap->zap_f.zap_num_entries_mtx);
425 426 return (0);
426 427 }
427 428
428 429 /*
429 430 * Routines for obtaining zap_leaf_t's
430 431 */
431 432
432 433 void
433 434 zap_put_leaf(zap_leaf_t *l)
434 435 {
435 436 rw_exit(&l->l_rwlock);
436 437 dmu_buf_rele(l->l_dbuf, NULL);
437 438 }
438 439
439 440 _NOTE(ARGSUSED(0))
440 441 static void
441 442 zap_leaf_pageout(dmu_buf_t *db, void *vl)
442 443 {
443 444 zap_leaf_t *l = vl;
444 445
445 446 rw_destroy(&l->l_rwlock);
446 447 kmem_free(l, sizeof (zap_leaf_t));
447 448 }
448 449
449 450 static zap_leaf_t *
450 451 zap_open_leaf(uint64_t blkid, dmu_buf_t *db)
451 452 {
452 453 zap_leaf_t *l, *winner;
453 454
454 455 ASSERT(blkid != 0);
455 456
456 457 l = kmem_alloc(sizeof (zap_leaf_t), KM_SLEEP);
457 458 rw_init(&l->l_rwlock, 0, 0, 0);
458 459 rw_enter(&l->l_rwlock, RW_WRITER);
459 460 l->l_blkid = blkid;
460 461 l->l_bs = highbit(db->db_size)-1;
461 462 l->l_dbuf = db;
462 463 l->l_phys = NULL;
463 464
464 465 winner = dmu_buf_set_user(db, l, &l->l_phys, zap_leaf_pageout);
465 466
466 467 rw_exit(&l->l_rwlock);
467 468 if (winner != NULL) {
468 469 /* someone else set it first */
469 470 zap_leaf_pageout(NULL, l);
470 471 l = winner;
471 472 }
472 473
473 474 /*
474 475 * lhr_pad was previously used for the next leaf in the leaf
475 476 * chain. There should be no chained leafs (as we have removed
476 477 * support for them).
477 478 */
478 479 ASSERT0(l->l_phys->l_hdr.lh_pad1);
479 480
480 481 /*
481 482 * There should be more hash entries than there can be
482 483 * chunks to put in the hash table
483 484 */
484 485 ASSERT3U(ZAP_LEAF_HASH_NUMENTRIES(l), >, ZAP_LEAF_NUMCHUNKS(l) / 3);
485 486
486 487 /* The chunks should begin at the end of the hash table */
487 488 ASSERT3P(&ZAP_LEAF_CHUNK(l, 0), ==,
488 489 &l->l_phys->l_hash[ZAP_LEAF_HASH_NUMENTRIES(l)]);
489 490
490 491 /* The chunks should end at the end of the block */
491 492 ASSERT3U((uintptr_t)&ZAP_LEAF_CHUNK(l, ZAP_LEAF_NUMCHUNKS(l)) -
492 493 (uintptr_t)l->l_phys, ==, l->l_dbuf->db_size);
493 494
494 495 return (l);
495 496 }
496 497
497 498 static int
498 499 zap_get_leaf_byblk(zap_t *zap, uint64_t blkid, dmu_tx_t *tx, krw_t lt,
499 500 zap_leaf_t **lp)
500 501 {
501 502 dmu_buf_t *db;
502 503 zap_leaf_t *l;
503 504 int bs = FZAP_BLOCK_SHIFT(zap);
504 505 int err;
505 506
506 507 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
507 508
508 509 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
509 510 blkid << bs, NULL, &db, DMU_READ_NO_PREFETCH);
510 511 if (err)
511 512 return (err);
512 513
513 514 ASSERT3U(db->db_object, ==, zap->zap_object);
514 515 ASSERT3U(db->db_offset, ==, blkid << bs);
515 516 ASSERT3U(db->db_size, ==, 1 << bs);
516 517 ASSERT(blkid != 0);
517 518
518 519 l = dmu_buf_get_user(db);
519 520
520 521 if (l == NULL)
521 522 l = zap_open_leaf(blkid, db);
522 523
523 524 rw_enter(&l->l_rwlock, lt);
524 525 /*
525 526 * Must lock before dirtying, otherwise l->l_phys could change,
526 527 * causing ASSERT below to fail.
527 528 */
528 529 if (lt == RW_WRITER)
529 530 dmu_buf_will_dirty(db, tx);
530 531 ASSERT3U(l->l_blkid, ==, blkid);
531 532 ASSERT3P(l->l_dbuf, ==, db);
532 533 ASSERT3P(l->l_phys, ==, l->l_dbuf->db_data);
533 534 ASSERT3U(l->l_phys->l_hdr.lh_block_type, ==, ZBT_LEAF);
534 535 ASSERT3U(l->l_phys->l_hdr.lh_magic, ==, ZAP_LEAF_MAGIC);
535 536
536 537 *lp = l;
537 538 return (0);
538 539 }
539 540
540 541 static int
541 542 zap_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t *valp)
542 543 {
543 544 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
544 545
545 546 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
546 547 ASSERT3U(idx, <,
547 548 (1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift));
548 549 *valp = ZAP_EMBEDDED_PTRTBL_ENT(zap, idx);
549 550 return (0);
550 551 } else {
551 552 return (zap_table_load(zap, &zap->zap_f.zap_phys->zap_ptrtbl,
552 553 idx, valp));
553 554 }
554 555 }
555 556
556 557 static int
557 558 zap_set_idx_to_blk(zap_t *zap, uint64_t idx, uint64_t blk, dmu_tx_t *tx)
558 559 {
559 560 ASSERT(tx != NULL);
560 561 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
561 562
562 563 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk == 0) {
563 564 ZAP_EMBEDDED_PTRTBL_ENT(zap, idx) = blk;
564 565 return (0);
565 566 } else {
566 567 return (zap_table_store(zap, &zap->zap_f.zap_phys->zap_ptrtbl,
567 568 idx, blk, tx));
568 569 }
569 570 }
570 571
571 572 static int
572 573 zap_deref_leaf(zap_t *zap, uint64_t h, dmu_tx_t *tx, krw_t lt, zap_leaf_t **lp)
573 574 {
574 575 uint64_t idx, blk;
575 576 int err;
576 577
577 578 ASSERT(zap->zap_dbuf == NULL ||
578 579 zap->zap_f.zap_phys == zap->zap_dbuf->db_data);
579 580 ASSERT3U(zap->zap_f.zap_phys->zap_magic, ==, ZAP_MAGIC);
580 581 idx = ZAP_HASH_IDX(h, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
581 582 err = zap_idx_to_blk(zap, idx, &blk);
582 583 if (err != 0)
583 584 return (err);
584 585 err = zap_get_leaf_byblk(zap, blk, tx, lt, lp);
585 586
586 587 ASSERT(err || ZAP_HASH_IDX(h, (*lp)->l_phys->l_hdr.lh_prefix_len) ==
587 588 (*lp)->l_phys->l_hdr.lh_prefix);
588 589 return (err);
589 590 }
590 591
591 592 static int
592 593 zap_expand_leaf(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx, zap_leaf_t **lp)
593 594 {
594 595 zap_t *zap = zn->zn_zap;
595 596 uint64_t hash = zn->zn_hash;
596 597 zap_leaf_t *nl;
597 598 int prefix_diff, i, err;
598 599 uint64_t sibling;
599 600 int old_prefix_len = l->l_phys->l_hdr.lh_prefix_len;
600 601
601 602 ASSERT3U(old_prefix_len, <=, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
602 603 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
603 604
604 605 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
605 606 l->l_phys->l_hdr.lh_prefix);
606 607
607 608 if (zap_tryupgradedir(zap, tx) == 0 ||
608 609 old_prefix_len == zap->zap_f.zap_phys->zap_ptrtbl.zt_shift) {
609 610 /* We failed to upgrade, or need to grow the pointer table */
610 611 objset_t *os = zap->zap_objset;
611 612 uint64_t object = zap->zap_object;
612 613
613 614 zap_put_leaf(l);
614 615 zap_unlockdir(zap);
615 616 err = zap_lockdir(os, object, tx, RW_WRITER,
616 617 FALSE, FALSE, &zn->zn_zap);
617 618 zap = zn->zn_zap;
618 619 if (err)
619 620 return (err);
620 621 ASSERT(!zap->zap_ismicro);
621 622
622 623 while (old_prefix_len ==
623 624 zap->zap_f.zap_phys->zap_ptrtbl.zt_shift) {
624 625 err = zap_grow_ptrtbl(zap, tx);
625 626 if (err)
626 627 return (err);
627 628 }
628 629
629 630 err = zap_deref_leaf(zap, hash, tx, RW_WRITER, &l);
630 631 if (err)
631 632 return (err);
632 633
633 634 if (l->l_phys->l_hdr.lh_prefix_len != old_prefix_len) {
634 635 /* it split while our locks were down */
635 636 *lp = l;
636 637 return (0);
637 638 }
638 639 }
639 640 ASSERT(RW_WRITE_HELD(&zap->zap_rwlock));
640 641 ASSERT3U(old_prefix_len, <, zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
641 642 ASSERT3U(ZAP_HASH_IDX(hash, old_prefix_len), ==,
642 643 l->l_phys->l_hdr.lh_prefix);
643 644
644 645 prefix_diff = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift -
645 646 (old_prefix_len + 1);
646 647 sibling = (ZAP_HASH_IDX(hash, old_prefix_len + 1) | 1) << prefix_diff;
647 648
648 649 /* check for i/o errors before doing zap_leaf_split */
649 650 for (i = 0; i < (1ULL<<prefix_diff); i++) {
650 651 uint64_t blk;
651 652 err = zap_idx_to_blk(zap, sibling+i, &blk);
652 653 if (err)
653 654 return (err);
654 655 ASSERT3U(blk, ==, l->l_blkid);
655 656 }
656 657
657 658 nl = zap_create_leaf(zap, tx);
658 659 zap_leaf_split(l, nl, zap->zap_normflags != 0);
659 660
660 661 /* set sibling pointers */
661 662 for (i = 0; i < (1ULL << prefix_diff); i++) {
662 663 err = zap_set_idx_to_blk(zap, sibling+i, nl->l_blkid, tx);
663 664 ASSERT0(err); /* we checked for i/o errors above */
664 665 }
665 666
666 667 if (hash & (1ULL << (64 - l->l_phys->l_hdr.lh_prefix_len))) {
667 668 /* we want the sibling */
668 669 zap_put_leaf(l);
669 670 *lp = nl;
670 671 } else {
671 672 zap_put_leaf(nl);
672 673 *lp = l;
673 674 }
674 675
675 676 return (0);
676 677 }
677 678
678 679 static void
679 680 zap_put_leaf_maybe_grow_ptrtbl(zap_name_t *zn, zap_leaf_t *l, dmu_tx_t *tx)
680 681 {
681 682 zap_t *zap = zn->zn_zap;
682 683 int shift = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift;
683 684 int leaffull = (l->l_phys->l_hdr.lh_prefix_len == shift &&
684 685 l->l_phys->l_hdr.lh_nfree < ZAP_LEAF_LOW_WATER);
685 686
686 687 zap_put_leaf(l);
687 688
688 689 if (leaffull || zap->zap_f.zap_phys->zap_ptrtbl.zt_nextblk) {
689 690 int err;
690 691
691 692 /*
692 693 * We are in the middle of growing the pointer table, or
693 694 * this leaf will soon make us grow it.
694 695 */
695 696 if (zap_tryupgradedir(zap, tx) == 0) {
696 697 objset_t *os = zap->zap_objset;
697 698 uint64_t zapobj = zap->zap_object;
698 699
699 700 zap_unlockdir(zap);
700 701 err = zap_lockdir(os, zapobj, tx,
701 702 RW_WRITER, FALSE, FALSE, &zn->zn_zap);
702 703 zap = zn->zn_zap;
703 704 if (err)
704 705 return;
705 706 }
706 707
707 708 /* could have finished growing while our locks were down */
708 709 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_shift == shift)
709 710 (void) zap_grow_ptrtbl(zap, tx);
710 711 }
711 712 }
712 713
713 714 static int
714 715 fzap_checkname(zap_name_t *zn)
715 716 {
716 717 if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
717 718 return (SET_ERROR(ENAMETOOLONG));
718 719 return (0);
719 720 }
720 721
721 722 static int
722 723 fzap_checksize(uint64_t integer_size, uint64_t num_integers)
723 724 {
724 725 /* Only integer sizes supported by C */
725 726 switch (integer_size) {
726 727 case 1:
727 728 case 2:
728 729 case 4:
729 730 case 8:
730 731 break;
731 732 default:
732 733 return (SET_ERROR(EINVAL));
733 734 }
734 735
735 736 if (integer_size * num_integers > ZAP_MAXVALUELEN)
736 737 return (E2BIG);
737 738
738 739 return (0);
739 740 }
740 741
741 742 static int
742 743 fzap_check(zap_name_t *zn, uint64_t integer_size, uint64_t num_integers)
743 744 {
744 745 int err;
745 746
746 747 if ((err = fzap_checkname(zn)) != 0)
747 748 return (err);
748 749 return (fzap_checksize(integer_size, num_integers));
749 750 }
750 751
751 752 /*
752 753 * Routines for manipulating attributes.
753 754 */
754 755 int
755 756 fzap_lookup(zap_name_t *zn,
756 757 uint64_t integer_size, uint64_t num_integers, void *buf,
757 758 char *realname, int rn_len, boolean_t *ncp)
758 759 {
759 760 zap_leaf_t *l;
760 761 int err;
761 762 zap_entry_handle_t zeh;
762 763
763 764 if ((err = fzap_checkname(zn)) != 0)
764 765 return (err);
765 766
766 767 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l);
767 768 if (err != 0)
768 769 return (err);
769 770 err = zap_leaf_lookup(l, zn, &zeh);
770 771 if (err == 0) {
771 772 if ((err = fzap_checksize(integer_size, num_integers)) != 0) {
772 773 zap_put_leaf(l);
773 774 return (err);
774 775 }
775 776
776 777 err = zap_entry_read(&zeh, integer_size, num_integers, buf);
777 778 (void) zap_entry_read_name(zn->zn_zap, &zeh, rn_len, realname);
778 779 if (ncp) {
779 780 *ncp = zap_entry_normalization_conflict(&zeh,
780 781 zn, NULL, zn->zn_zap);
781 782 }
782 783 }
783 784
784 785 zap_put_leaf(l);
785 786 return (err);
786 787 }
787 788
788 789 int
789 790 fzap_add_cd(zap_name_t *zn,
790 791 uint64_t integer_size, uint64_t num_integers,
791 792 const void *val, uint32_t cd, dmu_tx_t *tx)
792 793 {
793 794 zap_leaf_t *l;
794 795 int err;
795 796 zap_entry_handle_t zeh;
796 797 zap_t *zap = zn->zn_zap;
797 798
798 799 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
799 800 ASSERT(!zap->zap_ismicro);
800 801 ASSERT(fzap_check(zn, integer_size, num_integers) == 0);
801 802
802 803 err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
803 804 if (err != 0)
804 805 return (err);
805 806 retry:
806 807 err = zap_leaf_lookup(l, zn, &zeh);
807 808 if (err == 0) {
808 809 err = SET_ERROR(EEXIST);
809 810 goto out;
810 811 }
811 812 if (err != ENOENT)
812 813 goto out;
813 814
814 815 err = zap_entry_create(l, zn, cd,
815 816 integer_size, num_integers, val, &zeh);
816 817
817 818 if (err == 0) {
818 819 zap_increment_num_entries(zap, 1, tx);
819 820 } else if (err == EAGAIN) {
820 821 err = zap_expand_leaf(zn, l, tx, &l);
821 822 zap = zn->zn_zap; /* zap_expand_leaf() may change zap */
822 823 if (err == 0)
823 824 goto retry;
824 825 }
825 826
826 827 out:
827 828 if (zap != NULL)
828 829 zap_put_leaf_maybe_grow_ptrtbl(zn, l, tx);
829 830 return (err);
830 831 }
831 832
832 833 int
833 834 fzap_add(zap_name_t *zn,
834 835 uint64_t integer_size, uint64_t num_integers,
835 836 const void *val, dmu_tx_t *tx)
836 837 {
837 838 int err = fzap_check(zn, integer_size, num_integers);
838 839 if (err != 0)
839 840 return (err);
840 841
841 842 return (fzap_add_cd(zn, integer_size, num_integers,
842 843 val, ZAP_NEED_CD, tx));
843 844 }
844 845
845 846 int
846 847 fzap_update(zap_name_t *zn,
847 848 int integer_size, uint64_t num_integers, const void *val, dmu_tx_t *tx)
848 849 {
849 850 zap_leaf_t *l;
850 851 int err, create;
851 852 zap_entry_handle_t zeh;
852 853 zap_t *zap = zn->zn_zap;
853 854
854 855 ASSERT(RW_LOCK_HELD(&zap->zap_rwlock));
855 856 err = fzap_check(zn, integer_size, num_integers);
856 857 if (err != 0)
857 858 return (err);
858 859
859 860 err = zap_deref_leaf(zap, zn->zn_hash, tx, RW_WRITER, &l);
860 861 if (err != 0)
861 862 return (err);
862 863 retry:
863 864 err = zap_leaf_lookup(l, zn, &zeh);
864 865 create = (err == ENOENT);
865 866 ASSERT(err == 0 || err == ENOENT);
866 867
867 868 if (create) {
868 869 err = zap_entry_create(l, zn, ZAP_NEED_CD,
869 870 integer_size, num_integers, val, &zeh);
870 871 if (err == 0)
871 872 zap_increment_num_entries(zap, 1, tx);
872 873 } else {
873 874 err = zap_entry_update(&zeh, integer_size, num_integers, val);
874 875 }
875 876
876 877 if (err == EAGAIN) {
877 878 err = zap_expand_leaf(zn, l, tx, &l);
878 879 zap = zn->zn_zap; /* zap_expand_leaf() may change zap */
879 880 if (err == 0)
880 881 goto retry;
881 882 }
882 883
883 884 if (zap != NULL)
884 885 zap_put_leaf_maybe_grow_ptrtbl(zn, l, tx);
885 886 return (err);
886 887 }
887 888
888 889 int
889 890 fzap_length(zap_name_t *zn,
890 891 uint64_t *integer_size, uint64_t *num_integers)
891 892 {
892 893 zap_leaf_t *l;
893 894 int err;
894 895 zap_entry_handle_t zeh;
895 896
896 897 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, NULL, RW_READER, &l);
897 898 if (err != 0)
898 899 return (err);
899 900 err = zap_leaf_lookup(l, zn, &zeh);
900 901 if (err != 0)
901 902 goto out;
902 903
903 904 if (integer_size)
904 905 *integer_size = zeh.zeh_integer_size;
905 906 if (num_integers)
906 907 *num_integers = zeh.zeh_num_integers;
907 908 out:
908 909 zap_put_leaf(l);
909 910 return (err);
910 911 }
911 912
912 913 int
913 914 fzap_remove(zap_name_t *zn, dmu_tx_t *tx)
914 915 {
915 916 zap_leaf_t *l;
916 917 int err;
917 918 zap_entry_handle_t zeh;
918 919
919 920 err = zap_deref_leaf(zn->zn_zap, zn->zn_hash, tx, RW_WRITER, &l);
920 921 if (err != 0)
921 922 return (err);
922 923 err = zap_leaf_lookup(l, zn, &zeh);
923 924 if (err == 0) {
924 925 zap_entry_remove(&zeh);
925 926 zap_increment_num_entries(zn->zn_zap, -1, tx);
926 927 }
927 928 zap_put_leaf(l);
928 929 return (err);
929 930 }
930 931
931 932 void
932 933 fzap_prefetch(zap_name_t *zn)
933 934 {
934 935 uint64_t idx, blk;
935 936 zap_t *zap = zn->zn_zap;
936 937 int bs;
937 938
938 939 idx = ZAP_HASH_IDX(zn->zn_hash,
939 940 zap->zap_f.zap_phys->zap_ptrtbl.zt_shift);
940 941 if (zap_idx_to_blk(zap, idx, &blk) != 0)
941 942 return;
942 943 bs = FZAP_BLOCK_SHIFT(zap);
943 944 dmu_prefetch(zap->zap_objset, zap->zap_object, blk << bs, 1 << bs);
944 945 }
945 946
946 947 /*
947 948 * Helper functions for consumers.
948 949 */
949 950
950 951 uint64_t
951 952 zap_create_link(objset_t *os, dmu_object_type_t ot, uint64_t parent_obj,
952 953 const char *name, dmu_tx_t *tx)
953 954 {
954 955 uint64_t new_obj;
955 956
956 957 VERIFY((new_obj = zap_create(os, ot, DMU_OT_NONE, 0, tx)) > 0);
957 958 VERIFY(zap_add(os, parent_obj, name, sizeof (uint64_t), 1, &new_obj,
958 959 tx) == 0);
959 960
960 961 return (new_obj);
961 962 }
962 963
963 964 int
964 965 zap_value_search(objset_t *os, uint64_t zapobj, uint64_t value, uint64_t mask,
965 966 char *name)
966 967 {
967 968 zap_cursor_t zc;
968 969 zap_attribute_t *za;
969 970 int err;
970 971
971 972 if (mask == 0)
972 973 mask = -1ULL;
973 974
974 975 za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
975 976 for (zap_cursor_init(&zc, os, zapobj);
976 977 (err = zap_cursor_retrieve(&zc, za)) == 0;
977 978 zap_cursor_advance(&zc)) {
978 979 if ((za->za_first_integer & mask) == (value & mask)) {
979 980 (void) strcpy(name, za->za_name);
980 981 break;
981 982 }
982 983 }
983 984 zap_cursor_fini(&zc);
984 985 kmem_free(za, sizeof (zap_attribute_t));
↓ open down ↓ |
676 lines elided |
↑ open up ↑ |
985 986 return (err);
986 987 }
987 988
988 989 int
989 990 zap_join(objset_t *os, uint64_t fromobj, uint64_t intoobj, dmu_tx_t *tx)
990 991 {
991 992 zap_cursor_t zc;
992 993 zap_attribute_t za;
993 994 int err;
994 995
996 + err = 0;
995 997 for (zap_cursor_init(&zc, os, fromobj);
996 998 zap_cursor_retrieve(&zc, &za) == 0;
997 999 (void) zap_cursor_advance(&zc)) {
998 - if (za.za_integer_length != 8 || za.za_num_integers != 1)
999 - return (SET_ERROR(EINVAL));
1000 + if (za.za_integer_length != 8 || za.za_num_integers != 1) {
1001 + err = SET_ERROR(EINVAL);
1002 + break;
1003 + }
1000 1004 err = zap_add(os, intoobj, za.za_name,
1001 1005 8, 1, &za.za_first_integer, tx);
1002 1006 if (err)
1003 - return (err);
1007 + break;
1004 1008 }
1005 1009 zap_cursor_fini(&zc);
1006 - return (0);
1010 + return (err);
1007 1011 }
1008 1012
1009 1013 int
1010 1014 zap_join_key(objset_t *os, uint64_t fromobj, uint64_t intoobj,
1011 1015 uint64_t value, dmu_tx_t *tx)
1012 1016 {
1013 1017 zap_cursor_t zc;
1014 1018 zap_attribute_t za;
1015 1019 int err;
1016 1020
1021 + err = 0;
1017 1022 for (zap_cursor_init(&zc, os, fromobj);
1018 1023 zap_cursor_retrieve(&zc, &za) == 0;
1019 1024 (void) zap_cursor_advance(&zc)) {
1020 - if (za.za_integer_length != 8 || za.za_num_integers != 1)
1021 - return (SET_ERROR(EINVAL));
1025 + if (za.za_integer_length != 8 || za.za_num_integers != 1) {
1026 + err = SET_ERROR(EINVAL);
1027 + break;
1028 + }
1022 1029 err = zap_add(os, intoobj, za.za_name,
1023 1030 8, 1, &value, tx);
1024 1031 if (err)
1025 - return (err);
1032 + break;
1026 1033 }
1027 1034 zap_cursor_fini(&zc);
1028 - return (0);
1035 + return (err);
1029 1036 }
1030 1037
1031 1038 int
1032 1039 zap_join_increment(objset_t *os, uint64_t fromobj, uint64_t intoobj,
1033 1040 dmu_tx_t *tx)
1034 1041 {
1035 1042 zap_cursor_t zc;
1036 1043 zap_attribute_t za;
1037 1044 int err;
1038 1045
1046 + err = 0;
1039 1047 for (zap_cursor_init(&zc, os, fromobj);
1040 1048 zap_cursor_retrieve(&zc, &za) == 0;
1041 1049 (void) zap_cursor_advance(&zc)) {
1042 1050 uint64_t delta = 0;
1043 1051
1044 - if (za.za_integer_length != 8 || za.za_num_integers != 1)
1045 - return (SET_ERROR(EINVAL));
1052 + if (za.za_integer_length != 8 || za.za_num_integers != 1) {
1053 + err = SET_ERROR(EINVAL);
1054 + break;
1055 + }
1046 1056
1047 1057 err = zap_lookup(os, intoobj, za.za_name, 8, 1, &delta);
1048 1058 if (err != 0 && err != ENOENT)
1049 - return (err);
1059 + break;
1050 1060 delta += za.za_first_integer;
1051 1061 err = zap_update(os, intoobj, za.za_name, 8, 1, &delta, tx);
1052 1062 if (err)
1053 - return (err);
1063 + break;
1054 1064 }
1055 1065 zap_cursor_fini(&zc);
1056 - return (0);
1066 + return (err);
1057 1067 }
1058 1068
1059 1069 int
1060 1070 zap_add_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx)
1061 1071 {
1062 1072 char name[20];
1063 1073
1064 1074 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
1065 1075 return (zap_add(os, obj, name, 8, 1, &value, tx));
1066 1076 }
1067 1077
1068 1078 int
1069 1079 zap_remove_int(objset_t *os, uint64_t obj, uint64_t value, dmu_tx_t *tx)
1070 1080 {
1071 1081 char name[20];
1072 1082
1073 1083 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
1074 1084 return (zap_remove(os, obj, name, tx));
1075 1085 }
1076 1086
1077 1087 int
1078 1088 zap_lookup_int(objset_t *os, uint64_t obj, uint64_t value)
1079 1089 {
1080 1090 char name[20];
1081 1091
1082 1092 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)value);
1083 1093 return (zap_lookup(os, obj, name, 8, 1, &value));
1084 1094 }
1085 1095
1086 1096 int
1087 1097 zap_add_int_key(objset_t *os, uint64_t obj,
1088 1098 uint64_t key, uint64_t value, dmu_tx_t *tx)
1089 1099 {
1090 1100 char name[20];
1091 1101
1092 1102 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1093 1103 return (zap_add(os, obj, name, 8, 1, &value, tx));
1094 1104 }
1095 1105
1096 1106 int
1097 1107 zap_update_int_key(objset_t *os, uint64_t obj,
1098 1108 uint64_t key, uint64_t value, dmu_tx_t *tx)
1099 1109 {
1100 1110 char name[20];
1101 1111
1102 1112 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1103 1113 return (zap_update(os, obj, name, 8, 1, &value, tx));
1104 1114 }
1105 1115
1106 1116 int
1107 1117 zap_lookup_int_key(objset_t *os, uint64_t obj, uint64_t key, uint64_t *valuep)
1108 1118 {
1109 1119 char name[20];
1110 1120
1111 1121 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1112 1122 return (zap_lookup(os, obj, name, 8, 1, valuep));
1113 1123 }
1114 1124
1115 1125 int
1116 1126 zap_increment(objset_t *os, uint64_t obj, const char *name, int64_t delta,
1117 1127 dmu_tx_t *tx)
1118 1128 {
1119 1129 uint64_t value = 0;
1120 1130 int err;
1121 1131
1122 1132 if (delta == 0)
1123 1133 return (0);
1124 1134
1125 1135 err = zap_lookup(os, obj, name, 8, 1, &value);
1126 1136 if (err != 0 && err != ENOENT)
1127 1137 return (err);
1128 1138 value += delta;
1129 1139 if (value == 0)
1130 1140 err = zap_remove(os, obj, name, tx);
1131 1141 else
1132 1142 err = zap_update(os, obj, name, 8, 1, &value, tx);
1133 1143 return (err);
1134 1144 }
1135 1145
1136 1146 int
1137 1147 zap_increment_int(objset_t *os, uint64_t obj, uint64_t key, int64_t delta,
1138 1148 dmu_tx_t *tx)
1139 1149 {
1140 1150 char name[20];
1141 1151
1142 1152 (void) snprintf(name, sizeof (name), "%llx", (longlong_t)key);
1143 1153 return (zap_increment(os, obj, name, delta, tx));
1144 1154 }
1145 1155
1146 1156 /*
1147 1157 * Routines for iterating over the attributes.
1148 1158 */
1149 1159
1150 1160 int
1151 1161 fzap_cursor_retrieve(zap_t *zap, zap_cursor_t *zc, zap_attribute_t *za)
1152 1162 {
1153 1163 int err = ENOENT;
1154 1164 zap_entry_handle_t zeh;
1155 1165 zap_leaf_t *l;
1156 1166
1157 1167 /* retrieve the next entry at or after zc_hash/zc_cd */
1158 1168 /* if no entry, return ENOENT */
1159 1169
1160 1170 if (zc->zc_leaf &&
1161 1171 (ZAP_HASH_IDX(zc->zc_hash,
1162 1172 zc->zc_leaf->l_phys->l_hdr.lh_prefix_len) !=
1163 1173 zc->zc_leaf->l_phys->l_hdr.lh_prefix)) {
1164 1174 rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
1165 1175 zap_put_leaf(zc->zc_leaf);
1166 1176 zc->zc_leaf = NULL;
1167 1177 }
1168 1178
1169 1179 again:
1170 1180 if (zc->zc_leaf == NULL) {
1171 1181 err = zap_deref_leaf(zap, zc->zc_hash, NULL, RW_READER,
1172 1182 &zc->zc_leaf);
1173 1183 if (err != 0)
1174 1184 return (err);
1175 1185 } else {
1176 1186 rw_enter(&zc->zc_leaf->l_rwlock, RW_READER);
1177 1187 }
1178 1188 l = zc->zc_leaf;
1179 1189
1180 1190 err = zap_leaf_lookup_closest(l, zc->zc_hash, zc->zc_cd, &zeh);
1181 1191
1182 1192 if (err == ENOENT) {
1183 1193 uint64_t nocare =
1184 1194 (1ULL << (64 - l->l_phys->l_hdr.lh_prefix_len)) - 1;
1185 1195 zc->zc_hash = (zc->zc_hash & ~nocare) + nocare + 1;
1186 1196 zc->zc_cd = 0;
1187 1197 if (l->l_phys->l_hdr.lh_prefix_len == 0 || zc->zc_hash == 0) {
1188 1198 zc->zc_hash = -1ULL;
1189 1199 } else {
1190 1200 zap_put_leaf(zc->zc_leaf);
1191 1201 zc->zc_leaf = NULL;
1192 1202 goto again;
1193 1203 }
1194 1204 }
1195 1205
1196 1206 if (err == 0) {
1197 1207 zc->zc_hash = zeh.zeh_hash;
1198 1208 zc->zc_cd = zeh.zeh_cd;
1199 1209 za->za_integer_length = zeh.zeh_integer_size;
1200 1210 za->za_num_integers = zeh.zeh_num_integers;
1201 1211 if (zeh.zeh_num_integers == 0) {
1202 1212 za->za_first_integer = 0;
1203 1213 } else {
1204 1214 err = zap_entry_read(&zeh, 8, 1, &za->za_first_integer);
1205 1215 ASSERT(err == 0 || err == EOVERFLOW);
1206 1216 }
1207 1217 err = zap_entry_read_name(zap, &zeh,
1208 1218 sizeof (za->za_name), za->za_name);
1209 1219 ASSERT(err == 0);
1210 1220
1211 1221 za->za_normalization_conflict =
1212 1222 zap_entry_normalization_conflict(&zeh,
1213 1223 NULL, za->za_name, zap);
1214 1224 }
1215 1225 rw_exit(&zc->zc_leaf->l_rwlock);
1216 1226 return (err);
1217 1227 }
1218 1228
1219 1229 static void
1220 1230 zap_stats_ptrtbl(zap_t *zap, uint64_t *tbl, int len, zap_stats_t *zs)
1221 1231 {
1222 1232 int i, err;
1223 1233 uint64_t lastblk = 0;
1224 1234
1225 1235 /*
1226 1236 * NB: if a leaf has more pointers than an entire ptrtbl block
1227 1237 * can hold, then it'll be accounted for more than once, since
1228 1238 * we won't have lastblk.
1229 1239 */
1230 1240 for (i = 0; i < len; i++) {
1231 1241 zap_leaf_t *l;
1232 1242
1233 1243 if (tbl[i] == lastblk)
1234 1244 continue;
1235 1245 lastblk = tbl[i];
1236 1246
1237 1247 err = zap_get_leaf_byblk(zap, tbl[i], NULL, RW_READER, &l);
1238 1248 if (err == 0) {
1239 1249 zap_leaf_stats(zap, l, zs);
1240 1250 zap_put_leaf(l);
1241 1251 }
1242 1252 }
1243 1253 }
1244 1254
1245 1255 int
1246 1256 fzap_cursor_move_to_key(zap_cursor_t *zc, zap_name_t *zn)
1247 1257 {
1248 1258 int err;
1249 1259 zap_leaf_t *l;
1250 1260 zap_entry_handle_t zeh;
1251 1261
1252 1262 if (zn->zn_key_orig_numints * zn->zn_key_intlen > ZAP_MAXNAMELEN)
1253 1263 return (SET_ERROR(ENAMETOOLONG));
1254 1264
1255 1265 err = zap_deref_leaf(zc->zc_zap, zn->zn_hash, NULL, RW_READER, &l);
1256 1266 if (err != 0)
1257 1267 return (err);
1258 1268
1259 1269 err = zap_leaf_lookup(l, zn, &zeh);
1260 1270 if (err != 0)
1261 1271 return (err);
1262 1272
1263 1273 zc->zc_leaf = l;
1264 1274 zc->zc_hash = zeh.zeh_hash;
1265 1275 zc->zc_cd = zeh.zeh_cd;
1266 1276
1267 1277 return (err);
1268 1278 }
1269 1279
1270 1280 void
1271 1281 fzap_get_stats(zap_t *zap, zap_stats_t *zs)
1272 1282 {
1273 1283 int bs = FZAP_BLOCK_SHIFT(zap);
1274 1284 zs->zs_blocksize = 1ULL << bs;
1275 1285
1276 1286 /*
1277 1287 * Set zap_phys_t fields
1278 1288 */
1279 1289 zs->zs_num_leafs = zap->zap_f.zap_phys->zap_num_leafs;
1280 1290 zs->zs_num_entries = zap->zap_f.zap_phys->zap_num_entries;
1281 1291 zs->zs_num_blocks = zap->zap_f.zap_phys->zap_freeblk;
1282 1292 zs->zs_block_type = zap->zap_f.zap_phys->zap_block_type;
1283 1293 zs->zs_magic = zap->zap_f.zap_phys->zap_magic;
1284 1294 zs->zs_salt = zap->zap_f.zap_phys->zap_salt;
1285 1295
1286 1296 /*
1287 1297 * Set zap_ptrtbl fields
1288 1298 */
1289 1299 zs->zs_ptrtbl_len = 1ULL << zap->zap_f.zap_phys->zap_ptrtbl.zt_shift;
1290 1300 zs->zs_ptrtbl_nextblk = zap->zap_f.zap_phys->zap_ptrtbl.zt_nextblk;
1291 1301 zs->zs_ptrtbl_blks_copied =
1292 1302 zap->zap_f.zap_phys->zap_ptrtbl.zt_blks_copied;
1293 1303 zs->zs_ptrtbl_zt_blk = zap->zap_f.zap_phys->zap_ptrtbl.zt_blk;
1294 1304 zs->zs_ptrtbl_zt_numblks = zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks;
1295 1305 zs->zs_ptrtbl_zt_shift = zap->zap_f.zap_phys->zap_ptrtbl.zt_shift;
1296 1306
1297 1307 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks == 0) {
1298 1308 /* the ptrtbl is entirely in the header block. */
1299 1309 zap_stats_ptrtbl(zap, &ZAP_EMBEDDED_PTRTBL_ENT(zap, 0),
1300 1310 1 << ZAP_EMBEDDED_PTRTBL_SHIFT(zap), zs);
1301 1311 } else {
1302 1312 int b;
1303 1313
1304 1314 dmu_prefetch(zap->zap_objset, zap->zap_object,
1305 1315 zap->zap_f.zap_phys->zap_ptrtbl.zt_blk << bs,
1306 1316 zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks << bs);
1307 1317
1308 1318 for (b = 0; b < zap->zap_f.zap_phys->zap_ptrtbl.zt_numblks;
1309 1319 b++) {
1310 1320 dmu_buf_t *db;
1311 1321 int err;
1312 1322
1313 1323 err = dmu_buf_hold(zap->zap_objset, zap->zap_object,
1314 1324 (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk + b) << bs,
1315 1325 FTAG, &db, DMU_READ_NO_PREFETCH);
1316 1326 if (err == 0) {
1317 1327 zap_stats_ptrtbl(zap, db->db_data,
1318 1328 1<<(bs-3), zs);
1319 1329 dmu_buf_rele(db, FTAG);
1320 1330 }
1321 1331 }
1322 1332 }
1323 1333 }
1324 1334
1325 1335 int
1326 1336 fzap_count_write(zap_name_t *zn, int add, uint64_t *towrite,
1327 1337 uint64_t *tooverwrite)
1328 1338 {
1329 1339 zap_t *zap = zn->zn_zap;
1330 1340 zap_leaf_t *l;
1331 1341 int err;
1332 1342
1333 1343 /*
1334 1344 * Account for the header block of the fatzap.
1335 1345 */
1336 1346 if (!add && dmu_buf_freeable(zap->zap_dbuf)) {
1337 1347 *tooverwrite += zap->zap_dbuf->db_size;
1338 1348 } else {
1339 1349 *towrite += zap->zap_dbuf->db_size;
1340 1350 }
1341 1351
1342 1352 /*
1343 1353 * Account for the pointer table blocks.
1344 1354 * If we are adding we need to account for the following cases :
1345 1355 * - If the pointer table is embedded, this operation could force an
1346 1356 * external pointer table.
1347 1357 * - If this already has an external pointer table this operation
1348 1358 * could extend the table.
1349 1359 */
1350 1360 if (add) {
1351 1361 if (zap->zap_f.zap_phys->zap_ptrtbl.zt_blk == 0)
1352 1362 *towrite += zap->zap_dbuf->db_size;
1353 1363 else
1354 1364 *towrite += (zap->zap_dbuf->db_size * 3);
1355 1365 }
1356 1366
1357 1367 /*
1358 1368 * Now, check if the block containing leaf is freeable
1359 1369 * and account accordingly.
1360 1370 */
1361 1371 err = zap_deref_leaf(zap, zn->zn_hash, NULL, RW_READER, &l);
1362 1372 if (err != 0) {
1363 1373 return (err);
1364 1374 }
1365 1375
1366 1376 if (!add && dmu_buf_freeable(l->l_dbuf)) {
1367 1377 *tooverwrite += l->l_dbuf->db_size;
1368 1378 } else {
1369 1379 /*
1370 1380 * If this an add operation, the leaf block could split.
1371 1381 * Hence, we need to account for an additional leaf block.
1372 1382 */
1373 1383 *towrite += (add ? 2 : 1) * l->l_dbuf->db_size;
1374 1384 }
1375 1385
1376 1386 zap_put_leaf(l);
1377 1387 return (0);
1378 1388 }
↓ open down ↓ |
312 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX