Print this page
Integrated Edon-R hash function.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
+++ new/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
1 1 /*
2 2 * GRUB -- GRand Unified Bootloader
3 3 * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
4 4 *
5 5 * This program is free software; you can redistribute it and/or modify
6 6 * it under the terms of the GNU General Public License as published by
7 7 * the Free Software Foundation; either version 2 of the License, or
8 8 * (at your option) any later version.
9 9 *
10 10 * This program is distributed in the hope that it will be useful,
11 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 13 * GNU General Public License for more details.
14 14 *
15 15 * You should have received a copy of the GNU General Public License
16 16 * along with this program; if not, write to the Free Software
17 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 18 */
19 19
20 20 /*
21 21 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
22 22 * Use is subject to license terms.
23 23 */
24 24
25 25 /*
26 26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 27 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * The zfs plug-in routines for GRUB are:
32 32 *
33 33 * zfs_mount() - locates a valid uberblock of the root pool and reads
34 34 * in its MOS at the memory address MOS.
35 35 *
36 36 * zfs_open() - locates a plain file object by following the MOS
37 37 * and places its dnode at the memory address DNODE.
38 38 *
39 39 * zfs_read() - read in the data blocks pointed by the DNODE.
40 40 *
41 41 * ZFS_SCRATCH is used as a working area.
42 42 *
43 43 * (memory addr) MOS DNODE ZFS_SCRATCH
44 44 * | | |
45 45 * +-------V---------V----------V---------------+
46 46 * memory | | dnode | dnode | scratch |
47 47 * | | 512B | 512B | area |
48 48 * +--------------------------------------------+
49 49 */
50 50
51 51 #ifdef FSYS_ZFS
52 52
53 53 #include "shared.h"
54 54 #include "filesys.h"
55 55 #include "fsys_zfs.h"
56 56
57 57 /* cache for a file block of the currently zfs_open()-ed file */
58 58 static void *file_buf = NULL;
59 59 static uint64_t file_start = 0;
60 60 static uint64_t file_end = 0;
61 61
62 62 /* cache for a dnode block */
63 63 static dnode_phys_t *dnode_buf = NULL;
64 64 static dnode_phys_t *dnode_mdn = NULL;
65 65 static uint64_t dnode_start = 0;
66 66 static uint64_t dnode_end = 0;
67 67
68 68 static uint64_t pool_guid = 0;
69 69 static uberblock_t current_uberblock;
70 70 static char *stackbase;
71 71
72 72 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
73 73 {
74 74 {"inherit", 0}, /* ZIO_COMPRESS_INHERIT */
75 75 {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */
76 76 {"off", 0}, /* ZIO_COMPRESS_OFF */
77 77 {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */
78 78 {"empty", 0}, /* ZIO_COMPRESS_EMPTY */
79 79 {"gzip-1", 0}, /* ZIO_COMPRESS_GZIP_1 */
80 80 {"gzip-2", 0}, /* ZIO_COMPRESS_GZIP_2 */
81 81 {"gzip-3", 0}, /* ZIO_COMPRESS_GZIP_3 */
82 82 {"gzip-4", 0}, /* ZIO_COMPRESS_GZIP_4 */
83 83 {"gzip-5", 0}, /* ZIO_COMPRESS_GZIP_5 */
84 84 {"gzip-6", 0}, /* ZIO_COMPRESS_GZIP_6 */
85 85 {"gzip-7", 0}, /* ZIO_COMPRESS_GZIP_7 */
86 86 {"gzip-8", 0}, /* ZIO_COMPRESS_GZIP_8 */
87 87 {"gzip-9", 0}, /* ZIO_COMPRESS_GZIP_9 */
88 88 {"zle", 0}, /* ZIO_COMPRESS_ZLE */
89 89 {"lz4", lz4_decompress} /* ZIO_COMPRESS_LZ4 */
90 90 };
91 91
92 92 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
93 93
94 94 /*
95 95 * Our own version of bcmp().
96 96 */
97 97 static int
98 98 zfs_bcmp(const void *s1, const void *s2, size_t n)
99 99 {
100 100 const uchar_t *ps1 = s1;
101 101 const uchar_t *ps2 = s2;
102 102
103 103 if (s1 != s2 && n != 0) {
104 104 do {
105 105 if (*ps1++ != *ps2++)
106 106 return (1);
107 107 } while (--n != 0);
108 108 }
109 109
110 110 return (0);
111 111 }
112 112
113 113 /*
114 114 * Our own version of log2(). Same thing as highbit()-1.
115 115 */
116 116 static int
117 117 zfs_log2(uint64_t num)
118 118 {
119 119 int i = 0;
120 120
121 121 while (num > 1) {
122 122 i++;
123 123 num = num >> 1;
124 124 }
125 125
126 126 return (i);
127 127 }
128 128
129 129 /* Checksum Functions */
130 130 static void
131 131 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
132 132 {
133 133 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
134 134 }
135 135
136 136 /* Checksum Table and Values */
137 137 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
↓ open down ↓ |
137 lines elided |
↑ open up ↑ |
138 138 {{NULL, NULL}, 0, 0, "inherit"},
139 139 {{NULL, NULL}, 0, 0, "on"},
140 140 {{zio_checksum_off, zio_checksum_off}, 0, 0, "off"},
141 141 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "label"},
142 142 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "gang_header"},
143 143 {{NULL, NULL}, 0, 0, "zilog"},
144 144 {{fletcher_2_native, fletcher_2_byteswap}, 0, 0, "fletcher2"},
145 145 {{fletcher_4_native, fletcher_4_byteswap}, 1, 0, "fletcher4"},
146 146 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 0, "SHA256"},
147 147 {{NULL, NULL}, 0, 0, "zilog2"},
148 + {{zio_checksum_EdonR512_256, zio_checksum_EdonR512_256_byteswap},
149 + 1, 0, "edonr512/256"}
148 150 };
149 151
150 152 /*
151 153 * zio_checksum_verify: Provides support for checksum verification.
152 154 *
153 155 * Fletcher2, Fletcher4, and SHA256 are supported.
154 156 *
155 157 * Return:
156 158 * -1 = Failure
157 159 * 0 = Success
158 160 */
159 161 static int
160 162 zio_checksum_verify(blkptr_t *bp, char *data, int size)
161 163 {
162 164 zio_cksum_t zc = bp->blk_cksum;
163 165 uint32_t checksum = BP_GET_CHECKSUM(bp);
164 166 int byteswap = BP_SHOULD_BYTESWAP(bp);
165 167 zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
166 168 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
167 169 zio_cksum_t actual_cksum, expected_cksum;
168 170
169 171 /* byteswap is not supported */
170 172 if (byteswap)
171 173 return (-1);
172 174
173 175 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
174 176 return (-1);
175 177
176 178 if (ci->ci_eck) {
177 179 expected_cksum = zec->zec_cksum;
178 180 zec->zec_cksum = zc;
179 181 ci->ci_func[0](data, size, &actual_cksum);
180 182 zec->zec_cksum = expected_cksum;
181 183 zc = expected_cksum;
182 184
183 185 } else {
184 186 ci->ci_func[byteswap](data, size, &actual_cksum);
185 187 }
186 188
187 189 if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
188 190 (actual_cksum.zc_word[1] - zc.zc_word[1]) |
189 191 (actual_cksum.zc_word[2] - zc.zc_word[2]) |
190 192 (actual_cksum.zc_word[3] - zc.zc_word[3]))
191 193 return (-1);
192 194
193 195 return (0);
194 196 }
195 197
196 198 /*
197 199 * vdev_label_start returns the physical disk offset (in bytes) of
198 200 * label "l".
199 201 */
200 202 static uint64_t
201 203 vdev_label_start(uint64_t psize, int l)
202 204 {
203 205 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
204 206 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
205 207 }
206 208
207 209 /*
208 210 * vdev_uberblock_compare takes two uberblock structures and returns an integer
209 211 * indicating the more recent of the two.
210 212 * Return Value = 1 if ub2 is more recent
211 213 * Return Value = -1 if ub1 is more recent
212 214 * The most recent uberblock is determined using its transaction number and
213 215 * timestamp. The uberblock with the highest transaction number is
214 216 * considered "newer". If the transaction numbers of the two blocks match, the
215 217 * timestamps are compared to determine the "newer" of the two.
216 218 */
217 219 static int
218 220 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
219 221 {
220 222 if (ub1->ub_txg < ub2->ub_txg)
221 223 return (-1);
222 224 if (ub1->ub_txg > ub2->ub_txg)
223 225 return (1);
224 226
225 227 if (ub1->ub_timestamp < ub2->ub_timestamp)
226 228 return (-1);
227 229 if (ub1->ub_timestamp > ub2->ub_timestamp)
228 230 return (1);
229 231
230 232 return (0);
231 233 }
232 234
233 235 /*
234 236 * Three pieces of information are needed to verify an uberblock: the magic
235 237 * number, the version number, and the checksum.
236 238 *
237 239 * Return:
238 240 * 0 - Success
239 241 * -1 - Failure
240 242 */
241 243 static int
242 244 uberblock_verify(uberblock_t *uber, uint64_t ub_size, uint64_t offset)
243 245 {
244 246 blkptr_t bp;
245 247
246 248 BP_ZERO(&bp);
247 249 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
248 250 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
249 251 ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
250 252
251 253 if (zio_checksum_verify(&bp, (char *)uber, ub_size) != 0)
252 254 return (-1);
253 255
254 256 if (uber->ub_magic == UBERBLOCK_MAGIC &&
255 257 SPA_VERSION_IS_SUPPORTED(uber->ub_version))
256 258 return (0);
257 259
258 260 return (-1);
259 261 }
260 262
261 263 /*
262 264 * Find the best uberblock.
263 265 * Return:
264 266 * Success - Pointer to the best uberblock.
265 267 * Failure - NULL
266 268 */
267 269 static uberblock_t *
268 270 find_bestub(char *ub_array, uint64_t ashift, uint64_t sector)
269 271 {
270 272 uberblock_t *ubbest = NULL;
271 273 uberblock_t *ubnext;
272 274 uint64_t offset, ub_size;
273 275 int i;
274 276
275 277 ub_size = VDEV_UBERBLOCK_SIZE(ashift);
276 278
277 279 for (i = 0; i < VDEV_UBERBLOCK_COUNT(ashift); i++) {
278 280 ubnext = (uberblock_t *)ub_array;
279 281 ub_array += ub_size;
280 282 offset = (sector << SPA_MINBLOCKSHIFT) +
281 283 VDEV_UBERBLOCK_OFFSET(ashift, i);
282 284
283 285 if (uberblock_verify(ubnext, ub_size, offset) != 0)
284 286 continue;
285 287
286 288 if (ubbest == NULL ||
287 289 vdev_uberblock_compare(ubnext, ubbest) > 0)
288 290 ubbest = ubnext;
289 291 }
290 292
291 293 return (ubbest);
292 294 }
293 295
294 296 /*
295 297 * Read a block of data based on the gang block address dva,
296 298 * and put its data in buf.
297 299 *
298 300 * Return:
299 301 * 0 - success
300 302 * 1 - failure
301 303 */
302 304 static int
303 305 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
304 306 {
305 307 zio_gbh_phys_t *zio_gb;
306 308 uint64_t offset, sector;
307 309 blkptr_t tmpbp;
308 310 int i;
309 311
310 312 zio_gb = (zio_gbh_phys_t *)stack;
311 313 stack += SPA_GANGBLOCKSIZE;
312 314 offset = DVA_GET_OFFSET(dva);
313 315 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
314 316
315 317 /* read in the gang block header */
316 318 if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
317 319 grub_printf("failed to read in a gang block header\n");
318 320 return (1);
319 321 }
320 322
321 323 /* self checksuming the gang block header */
322 324 BP_ZERO(&tmpbp);
323 325 BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
324 326 BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
325 327 ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
326 328 DVA_GET_OFFSET(dva), bp->blk_birth, 0);
327 329 if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
328 330 grub_printf("failed to checksum a gang block header\n");
329 331 return (1);
330 332 }
331 333
332 334 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
333 335 if (zio_gb->zg_blkptr[i].blk_birth == 0)
334 336 continue;
335 337
336 338 if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
337 339 return (1);
338 340 buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
339 341 }
340 342
341 343 return (0);
342 344 }
343 345
344 346 /*
345 347 * Read in a block of raw data to buf.
346 348 *
347 349 * Return:
348 350 * 0 - success
349 351 * 1 - failure
350 352 */
351 353 static int
352 354 zio_read_data(blkptr_t *bp, void *buf, char *stack)
353 355 {
354 356 int i, psize;
355 357
356 358 psize = BP_GET_PSIZE(bp);
357 359
358 360 /* pick a good dva from the block pointer */
359 361 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
360 362 uint64_t offset, sector;
361 363
362 364 if (bp->blk_dva[i].dva_word[0] == 0 &&
363 365 bp->blk_dva[i].dva_word[1] == 0)
364 366 continue;
365 367
366 368 if (DVA_GET_GANG(&bp->blk_dva[i])) {
367 369 if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
368 370 return (0);
369 371 } else {
370 372 /* read in a data block */
371 373 offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
372 374 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
373 375 if (devread(sector, 0, psize, buf) != 0)
374 376 return (0);
375 377 }
376 378 }
377 379
378 380 return (1);
379 381 }
380 382
381 383 /*
382 384 * Read in a block of data, verify its checksum, decompress if needed,
383 385 * and put the uncompressed data in buf.
384 386 *
385 387 * Return:
386 388 * 0 - success
387 389 * errnum - failure
388 390 */
389 391 static int
390 392 zio_read(blkptr_t *bp, void *buf, char *stack)
391 393 {
392 394 int lsize, psize, comp;
393 395 char *retbuf;
394 396
395 397 comp = BP_GET_COMPRESS(bp);
396 398 lsize = BP_GET_LSIZE(bp);
397 399 psize = BP_GET_PSIZE(bp);
398 400
399 401 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
400 402 (comp != ZIO_COMPRESS_OFF &&
401 403 decomp_table[comp].decomp_func == NULL)) {
402 404 grub_printf("compression algorithm not supported\n");
403 405 return (ERR_FSYS_CORRUPT);
404 406 }
405 407
406 408 if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
407 409 grub_printf("not enough memory allocated\n");
408 410 return (ERR_WONT_FIT);
409 411 }
410 412
411 413 retbuf = buf;
412 414 if (comp != ZIO_COMPRESS_OFF) {
413 415 buf = stack;
414 416 stack += psize;
415 417 }
416 418
417 419 if (zio_read_data(bp, buf, stack) != 0) {
418 420 grub_printf("zio_read_data failed\n");
419 421 return (ERR_FSYS_CORRUPT);
420 422 }
421 423
422 424 if (zio_checksum_verify(bp, buf, psize) != 0) {
423 425 grub_printf("checksum verification failed\n");
424 426 return (ERR_FSYS_CORRUPT);
425 427 }
426 428
427 429 if (comp != ZIO_COMPRESS_OFF) {
428 430 if (decomp_table[comp].decomp_func(buf, retbuf, psize,
429 431 lsize) != 0) {
430 432 grub_printf("zio_read decompression failed\n");
431 433 return (ERR_FSYS_CORRUPT);
432 434 }
433 435 }
434 436
435 437 return (0);
436 438 }
437 439
438 440 /*
439 441 * Get the block from a block id.
440 442 * push the block onto the stack.
441 443 *
442 444 * Return:
443 445 * 0 - success
444 446 * errnum - failure
445 447 */
446 448 static int
447 449 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
448 450 {
449 451 int idx, level;
450 452 blkptr_t *bp_array = dn->dn_blkptr;
451 453 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
452 454 blkptr_t *bp, *tmpbuf;
453 455
454 456 bp = (blkptr_t *)stack;
455 457 stack += sizeof (blkptr_t);
456 458
457 459 tmpbuf = (blkptr_t *)stack;
458 460 stack += 1<<dn->dn_indblkshift;
459 461
460 462 for (level = dn->dn_nlevels - 1; level >= 0; level--) {
461 463 idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
462 464 *bp = bp_array[idx];
463 465 if (level == 0)
464 466 tmpbuf = buf;
465 467 if (BP_IS_HOLE(bp)) {
466 468 grub_memset(buf, 0,
467 469 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
468 470 break;
469 471 } else if (errnum = zio_read(bp, tmpbuf, stack)) {
470 472 return (errnum);
471 473 }
472 474
473 475 bp_array = tmpbuf;
474 476 }
475 477
476 478 return (0);
477 479 }
478 480
479 481 /*
480 482 * mzap_lookup: Looks up property described by "name" and returns the value
481 483 * in "value".
482 484 *
483 485 * Return:
484 486 * 0 - success
485 487 * errnum - failure
486 488 */
487 489 static int
488 490 mzap_lookup(mzap_phys_t *zapobj, int objsize, const char *name,
489 491 uint64_t *value)
490 492 {
491 493 int i, chunks;
492 494 mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
493 495
494 496 chunks = objsize / MZAP_ENT_LEN - 1;
495 497 for (i = 0; i < chunks; i++) {
496 498 if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
497 499 *value = mzap_ent[i].mze_value;
498 500 return (0);
499 501 }
500 502 }
501 503
502 504 return (ERR_FSYS_CORRUPT);
503 505 }
504 506
505 507 static uint64_t
506 508 zap_hash(uint64_t salt, const char *name)
507 509 {
508 510 static uint64_t table[256];
509 511 const uint8_t *cp;
510 512 uint8_t c;
511 513 uint64_t crc = salt;
512 514
513 515 if (table[128] == 0) {
514 516 uint64_t *ct;
515 517 int i, j;
516 518 for (i = 0; i < 256; i++) {
517 519 for (ct = table + i, *ct = i, j = 8; j > 0; j--)
518 520 *ct = (*ct >> 1) ^ (-(*ct & 1) &
519 521 ZFS_CRC64_POLY);
520 522 }
521 523 }
522 524
523 525 if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
524 526 errnum = ERR_FSYS_CORRUPT;
525 527 return (0);
526 528 }
527 529
528 530 for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
529 531 crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
530 532
531 533 /*
532 534 * Only use 28 bits, since we need 4 bits in the cookie for the
533 535 * collision differentiator. We MUST use the high bits, since
534 536 * those are the ones that we first pay attention to when
535 537 * choosing the bucket.
536 538 */
537 539 crc &= ~((1ULL << (64 - 28)) - 1);
538 540
539 541 return (crc);
540 542 }
541 543
542 544 /*
543 545 * Only to be used on 8-bit arrays.
544 546 * array_len is actual len in bytes (not encoded le_value_length).
545 547 * buf is null-terminated.
546 548 */
547 549 static int
548 550 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
549 551 int array_len, const char *buf)
550 552 {
551 553 int bseen = 0;
552 554
553 555 while (bseen < array_len) {
554 556 struct zap_leaf_array *la =
555 557 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
556 558 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
557 559
558 560 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
559 561 return (0);
560 562
561 563 if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
562 564 break;
563 565 chunk = la->la_next;
564 566 bseen += toread;
565 567 }
566 568 return (bseen == array_len);
567 569 }
568 570
569 571 /*
570 572 * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
571 573 * value for the property "name".
572 574 *
573 575 * Return:
574 576 * 0 - success
575 577 * errnum - failure
576 578 */
577 579 static int
578 580 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
579 581 const char *name, uint64_t *value)
580 582 {
581 583 uint16_t chunk;
582 584 struct zap_leaf_entry *le;
583 585
584 586 /* Verify if this is a valid leaf block */
585 587 if (l->l_hdr.lh_block_type != ZBT_LEAF)
586 588 return (ERR_FSYS_CORRUPT);
587 589 if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
588 590 return (ERR_FSYS_CORRUPT);
589 591
590 592 for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
591 593 chunk != CHAIN_END; chunk = le->le_next) {
592 594
593 595 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
594 596 return (ERR_FSYS_CORRUPT);
595 597
596 598 le = ZAP_LEAF_ENTRY(l, blksft, chunk);
597 599
598 600 /* Verify the chunk entry */
599 601 if (le->le_type != ZAP_CHUNK_ENTRY)
600 602 return (ERR_FSYS_CORRUPT);
601 603
602 604 if (le->le_hash != h)
603 605 continue;
604 606
605 607 if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
606 608 le->le_name_length, name)) {
607 609
608 610 struct zap_leaf_array *la;
609 611 uint8_t *ip;
610 612
611 613 if (le->le_int_size != 8 || le->le_value_length != 1)
612 614 return (ERR_FSYS_CORRUPT);
613 615
614 616 /* get the uint64_t property value */
615 617 la = &ZAP_LEAF_CHUNK(l, blksft,
616 618 le->le_value_chunk).l_array;
617 619 ip = la->la_array;
618 620
619 621 *value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
620 622 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
621 623 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
622 624 (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
623 625
624 626 return (0);
625 627 }
626 628 }
627 629
628 630 return (ERR_FSYS_CORRUPT);
629 631 }
630 632
631 633 /*
632 634 * Fat ZAP lookup
633 635 *
634 636 * Return:
635 637 * 0 - success
636 638 * errnum - failure
637 639 */
638 640 static int
639 641 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
640 642 const char *name, uint64_t *value, char *stack)
641 643 {
642 644 zap_leaf_phys_t *l;
643 645 uint64_t hash, idx, blkid;
644 646 int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
645 647
646 648 /* Verify if this is a fat zap header block */
647 649 if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
648 650 zap->zap_flags != 0)
649 651 return (ERR_FSYS_CORRUPT);
650 652
651 653 hash = zap_hash(zap->zap_salt, name);
652 654 if (errnum)
653 655 return (errnum);
654 656
655 657 /* get block id from index */
656 658 if (zap->zap_ptrtbl.zt_numblks != 0) {
657 659 /* external pointer tables not supported */
658 660 return (ERR_FSYS_CORRUPT);
659 661 }
660 662 idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
661 663 blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
662 664
663 665 /* Get the leaf block */
664 666 l = (zap_leaf_phys_t *)stack;
665 667 stack += 1<<blksft;
666 668 if ((1<<blksft) < sizeof (zap_leaf_phys_t))
667 669 return (ERR_FSYS_CORRUPT);
668 670 if (errnum = dmu_read(zap_dnode, blkid, l, stack))
669 671 return (errnum);
670 672
671 673 return (zap_leaf_lookup(l, blksft, hash, name, value));
672 674 }
673 675
674 676 /*
675 677 * Read in the data of a zap object and find the value for a matching
676 678 * property name.
677 679 *
678 680 * Return:
679 681 * 0 - success
680 682 * errnum - failure
681 683 */
682 684 static int
683 685 zap_lookup(dnode_phys_t *zap_dnode, const char *name, uint64_t *val,
684 686 char *stack)
685 687 {
686 688 uint64_t block_type;
687 689 int size;
688 690 void *zapbuf;
689 691
690 692 /* Read in the first block of the zap object data. */
691 693 zapbuf = stack;
692 694 size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
693 695 stack += size;
694 696
695 697 if ((errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) != 0)
696 698 return (errnum);
697 699
698 700 block_type = *((uint64_t *)zapbuf);
699 701
700 702 if (block_type == ZBT_MICRO) {
701 703 return (mzap_lookup(zapbuf, size, name, val));
702 704 } else if (block_type == ZBT_HEADER) {
703 705 /* this is a fat zap */
704 706 return (fzap_lookup(zap_dnode, zapbuf, name,
705 707 val, stack));
706 708 }
707 709
708 710 return (ERR_FSYS_CORRUPT);
709 711 }
710 712
711 713 typedef struct zap_attribute {
712 714 int za_integer_length;
713 715 uint64_t za_num_integers;
714 716 uint64_t za_first_integer;
715 717 char *za_name;
716 718 } zap_attribute_t;
717 719
718 720 typedef int (zap_cb_t)(zap_attribute_t *za, void *arg, char *stack);
719 721
720 722 static int
721 723 zap_iterate(dnode_phys_t *zap_dnode, zap_cb_t *cb, void *arg, char *stack)
722 724 {
723 725 uint32_t size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
724 726 zap_attribute_t za;
725 727 int i;
726 728 mzap_phys_t *mzp = (mzap_phys_t *)stack;
727 729 stack += size;
728 730
729 731 if ((errnum = dmu_read(zap_dnode, 0, mzp, stack)) != 0)
730 732 return (errnum);
731 733
732 734 /*
733 735 * Iteration over fatzap objects has not yet been implemented.
734 736 * If we encounter a pool in which there are more features for
735 737 * read than can fit inside a microzap (i.e., more than 2048
736 738 * features for read), we can add support for fatzap iteration.
737 739 * For now, fail.
738 740 */
739 741 if (mzp->mz_block_type != ZBT_MICRO) {
740 742 grub_printf("feature information stored in fatzap, pool "
741 743 "version not supported\n");
742 744 return (1);
743 745 }
744 746
745 747 za.za_integer_length = 8;
746 748 za.za_num_integers = 1;
747 749 for (i = 0; i < size / MZAP_ENT_LEN - 1; i++) {
748 750 mzap_ent_phys_t *mzep = &mzp->mz_chunk[i];
749 751 int err;
750 752
751 753 za.za_first_integer = mzep->mze_value;
752 754 za.za_name = mzep->mze_name;
753 755 err = cb(&za, arg, stack);
754 756 if (err != 0)
755 757 return (err);
756 758 }
757 759
758 760 return (0);
759 761 }
760 762
761 763 /*
762 764 * Get the dnode of an object number from the metadnode of an object set.
763 765 *
764 766 * Input
765 767 * mdn - metadnode to get the object dnode
766 768 * objnum - object number for the object dnode
767 769 * buf - data buffer that holds the returning dnode
768 770 * stack - scratch area
769 771 *
770 772 * Return:
771 773 * 0 - success
772 774 * errnum - failure
773 775 */
774 776 static int
775 777 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
776 778 char *stack)
777 779 {
778 780 uint64_t blkid, blksz; /* the block id this object dnode is in */
779 781 int epbs; /* shift of number of dnodes in a block */
780 782 int idx; /* index within a block */
781 783 dnode_phys_t *dnbuf;
782 784
783 785 blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
784 786 epbs = zfs_log2(blksz) - DNODE_SHIFT;
785 787 blkid = objnum >> epbs;
786 788 idx = objnum & ((1<<epbs)-1);
787 789
788 790 if (dnode_buf != NULL && dnode_mdn == mdn &&
789 791 objnum >= dnode_start && objnum < dnode_end) {
790 792 grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
791 793 VERIFY_DN_TYPE(buf, type);
792 794 return (0);
793 795 }
794 796
795 797 if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
796 798 dnbuf = dnode_buf;
797 799 dnode_mdn = mdn;
798 800 dnode_start = blkid << epbs;
799 801 dnode_end = (blkid + 1) << epbs;
800 802 } else {
801 803 dnbuf = (dnode_phys_t *)stack;
802 804 stack += blksz;
803 805 }
804 806
805 807 if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
806 808 return (errnum);
807 809
808 810 grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
809 811 VERIFY_DN_TYPE(buf, type);
810 812
811 813 return (0);
812 814 }
813 815
814 816 /*
815 817 * Check if this is a special file that resides at the top
816 818 * dataset of the pool. Currently this is the GRUB menu,
817 819 * boot signature and boot signature backup.
818 820 * str starts with '/'.
819 821 */
820 822 static int
821 823 is_top_dataset_file(char *str)
822 824 {
823 825 char *tptr;
824 826
825 827 if ((tptr = grub_strstr(str, "menu.lst")) &&
826 828 (tptr[8] == '\0' || tptr[8] == ' ') &&
827 829 *(tptr-1) == '/')
828 830 return (1);
829 831
830 832 if (grub_strncmp(str, BOOTSIGN_DIR"/",
831 833 grub_strlen(BOOTSIGN_DIR) + 1) == 0)
832 834 return (1);
833 835
834 836 if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
835 837 return (1);
836 838
837 839 return (0);
838 840 }
839 841
840 842 static int
841 843 check_feature(zap_attribute_t *za, void *arg, char *stack)
842 844 {
843 845 const char **names = arg;
844 846 int i;
845 847
846 848 if (za->za_first_integer == 0)
847 849 return (0);
848 850
849 851 for (i = 0; names[i] != NULL; i++) {
850 852 if (grub_strcmp(za->za_name, names[i]) == 0) {
851 853 return (0);
852 854 }
853 855 }
854 856 grub_printf("missing feature for read '%s'\n", za->za_name);
855 857 return (ERR_NEWER_VERSION);
856 858 }
857 859
858 860 /*
859 861 * Get the file dnode for a given file name where mdn is the meta dnode
860 862 * for this ZFS object set. When found, place the file dnode in dn.
861 863 * The 'path' argument will be mangled.
862 864 *
863 865 * Return:
864 866 * 0 - success
865 867 * errnum - failure
866 868 */
867 869 static int
868 870 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
869 871 char *stack)
870 872 {
871 873 uint64_t objnum, version;
872 874 char *cname, ch;
873 875
874 876 if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
875 877 dn, stack))
876 878 return (errnum);
877 879
878 880 if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
879 881 return (errnum);
880 882 if (version > ZPL_VERSION)
881 883 return (-1);
882 884
883 885 if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
884 886 return (errnum);
885 887
886 888 if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
887 889 dn, stack))
888 890 return (errnum);
889 891
890 892 /* skip leading slashes */
891 893 while (*path == '/')
892 894 path++;
893 895
894 896 while (*path && !grub_isspace(*path)) {
895 897
896 898 /* get the next component name */
897 899 cname = path;
898 900 while (*path && !grub_isspace(*path) && *path != '/')
899 901 path++;
900 902 ch = *path;
901 903 *path = 0; /* ensure null termination */
902 904
903 905 if (errnum = zap_lookup(dn, cname, &objnum, stack))
904 906 return (errnum);
905 907
906 908 objnum = ZFS_DIRENT_OBJ(objnum);
907 909 if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
908 910 return (errnum);
909 911
910 912 *path = ch;
911 913 while (*path == '/')
912 914 path++;
913 915 }
914 916
915 917 /* We found the dnode for this file. Verify if it is a plain file. */
916 918 VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
917 919
918 920 return (0);
919 921 }
920 922
921 923 /*
922 924 * Get the default 'bootfs' property value from the rootpool.
923 925 *
924 926 * Return:
925 927 * 0 - success
926 928 * errnum -failure
927 929 */
928 930 static int
929 931 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
930 932 {
931 933 uint64_t objnum = 0;
932 934 dnode_phys_t *dn = (dnode_phys_t *)stack;
933 935 stack += DNODE_SIZE;
934 936
935 937 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
936 938 DMU_OT_OBJECT_DIRECTORY, dn, stack))
937 939 return (errnum);
938 940
939 941 /*
940 942 * find the object number for 'pool_props', and get the dnode
941 943 * of the 'pool_props'.
942 944 */
943 945 if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
944 946 return (ERR_FILESYSTEM_NOT_FOUND);
945 947
946 948 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
947 949 return (errnum);
948 950
949 951 if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
950 952 return (ERR_FILESYSTEM_NOT_FOUND);
951 953
952 954 if (!objnum)
953 955 return (ERR_FILESYSTEM_NOT_FOUND);
954 956
955 957 *obj = objnum;
↓ open down ↓ |
798 lines elided |
↑ open up ↑ |
956 958 return (0);
957 959 }
958 960
959 961 /*
960 962 * List of pool features that the grub implementation of ZFS supports for
961 963 * read. Note that features that are only required for write do not need
962 964 * to be listed here since grub opens pools in read-only mode.
963 965 */
964 966 static const char *spa_feature_names[] = {
965 967 "org.illumos:lz4_compress",
968 + "org.illumos:edonr_cksum",
966 969 NULL
967 970 };
968 971
969 972 /*
970 973 * Checks whether the MOS features that are active are supported by this
971 974 * (GRUB's) implementation of ZFS.
972 975 *
973 976 * Return:
974 977 * 0: Success.
975 978 * errnum: Failure.
976 979 */
977 980 static int
978 981 check_mos_features(dnode_phys_t *mosmdn, char *stack)
979 982 {
980 983 uint64_t objnum;
981 984 dnode_phys_t *dn;
982 985 uint8_t error = 0;
983 986
984 987 dn = (dnode_phys_t *)stack;
985 988 stack += DNODE_SIZE;
986 989
987 990 if ((errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
988 991 DMU_OT_OBJECT_DIRECTORY, dn, stack)) != 0)
989 992 return (errnum);
990 993
991 994 /*
992 995 * Find the object number for 'features_for_read' and retrieve its
993 996 * corresponding dnode. Note that we don't check features_for_write
994 997 * because GRUB is not opening the pool for write.
995 998 */
996 999 if ((errnum = zap_lookup(dn, DMU_POOL_FEATURES_FOR_READ, &objnum,
997 1000 stack)) != 0)
998 1001 return (errnum);
999 1002
1000 1003 if ((errnum = dnode_get(mosmdn, objnum, DMU_OTN_ZAP_METADATA,
1001 1004 dn, stack)) != 0)
1002 1005 return (errnum);
1003 1006
1004 1007 return (zap_iterate(dn, check_feature, spa_feature_names, stack));
1005 1008 }
1006 1009
1007 1010 /*
1008 1011 * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
1009 1012 * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
1010 1013 * of pool/rootfs.
1011 1014 *
1012 1015 * If no fsname and no obj are given, return the DSL_DIR metadnode.
1013 1016 * If fsname is given, return its metadnode and its matching object number.
1014 1017 * If only obj is given, return the metadnode for this object number.
1015 1018 *
1016 1019 * Return:
1017 1020 * 0 - success
1018 1021 * errnum - failure
1019 1022 */
1020 1023 static int
1021 1024 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
1022 1025 dnode_phys_t *mdn, char *stack)
1023 1026 {
1024 1027 uint64_t objnum, headobj;
1025 1028 char *cname, ch;
1026 1029 blkptr_t *bp;
1027 1030 objset_phys_t *osp;
1028 1031 int issnapshot = 0;
1029 1032 char *snapname;
1030 1033
1031 1034 if (fsname == NULL && obj) {
1032 1035 headobj = *obj;
1033 1036 goto skip;
1034 1037 }
1035 1038
1036 1039 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1037 1040 DMU_OT_OBJECT_DIRECTORY, mdn, stack))
1038 1041 return (errnum);
1039 1042
1040 1043 if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
1041 1044 stack))
1042 1045 return (errnum);
1043 1046
1044 1047 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack))
1045 1048 return (errnum);
1046 1049
1047 1050 if (fsname == NULL) {
1048 1051 headobj =
1049 1052 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1050 1053 goto skip;
1051 1054 }
1052 1055
1053 1056 /* take out the pool name */
1054 1057 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1055 1058 fsname++;
1056 1059
1057 1060 while (*fsname && !grub_isspace(*fsname)) {
1058 1061 uint64_t childobj;
1059 1062
1060 1063 while (*fsname == '/')
1061 1064 fsname++;
1062 1065
1063 1066 cname = fsname;
1064 1067 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1065 1068 fsname++;
1066 1069 ch = *fsname;
1067 1070 *fsname = 0;
1068 1071
1069 1072 snapname = cname;
1070 1073 while (*snapname && !grub_isspace(*snapname) && *snapname !=
1071 1074 '@')
1072 1075 snapname++;
1073 1076 if (*snapname == '@') {
1074 1077 issnapshot = 1;
1075 1078 *snapname = 0;
1076 1079 }
1077 1080 childobj =
1078 1081 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
1079 1082 if (errnum = dnode_get(mosmdn, childobj,
1080 1083 DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
1081 1084 return (errnum);
1082 1085
1083 1086 if (zap_lookup(mdn, cname, &objnum, stack))
1084 1087 return (ERR_FILESYSTEM_NOT_FOUND);
1085 1088
1086 1089 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR,
1087 1090 mdn, stack))
1088 1091 return (errnum);
1089 1092
1090 1093 *fsname = ch;
1091 1094 if (issnapshot)
1092 1095 *snapname = '@';
1093 1096 }
1094 1097 headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1095 1098 if (obj)
1096 1099 *obj = headobj;
1097 1100
1098 1101 skip:
1099 1102 if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack))
1100 1103 return (errnum);
1101 1104 if (issnapshot) {
1102 1105 uint64_t snapobj;
1103 1106
1104 1107 snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
1105 1108 ds_snapnames_zapobj;
1106 1109
1107 1110 if (errnum = dnode_get(mosmdn, snapobj,
1108 1111 DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
1109 1112 return (errnum);
1110 1113 if (zap_lookup(mdn, snapname + 1, &headobj, stack))
1111 1114 return (ERR_FILESYSTEM_NOT_FOUND);
1112 1115 if (errnum = dnode_get(mosmdn, headobj,
1113 1116 DMU_OT_DSL_DATASET, mdn, stack))
1114 1117 return (errnum);
1115 1118 if (obj)
1116 1119 *obj = headobj;
1117 1120 }
1118 1121
1119 1122 bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
1120 1123 osp = (objset_phys_t *)stack;
1121 1124 stack += sizeof (objset_phys_t);
1122 1125 if (errnum = zio_read(bp, osp, stack))
1123 1126 return (errnum);
1124 1127
1125 1128 grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
1126 1129
1127 1130 return (0);
1128 1131 }
1129 1132
1130 1133 /*
1131 1134 * For a given XDR packed nvlist, verify the first 4 bytes and move on.
1132 1135 *
1133 1136 * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
1134 1137 *
1135 1138 * encoding method/host endian (4 bytes)
1136 1139 * nvl_version (4 bytes)
1137 1140 * nvl_nvflag (4 bytes)
1138 1141 * encoded nvpairs:
1139 1142 * encoded size of the nvpair (4 bytes)
1140 1143 * decoded size of the nvpair (4 bytes)
1141 1144 * name string size (4 bytes)
1142 1145 * name string data (sizeof(NV_ALIGN4(string))
1143 1146 * data type (4 bytes)
1144 1147 * # of elements in the nvpair (4 bytes)
1145 1148 * data
1146 1149 * 2 zero's for the last nvpair
1147 1150 * (end of the entire list) (8 bytes)
1148 1151 *
1149 1152 * Return:
1150 1153 * 0 - success
1151 1154 * 1 - failure
1152 1155 */
1153 1156 static int
1154 1157 nvlist_unpack(char *nvlist, char **out)
1155 1158 {
1156 1159 /* Verify if the 1st and 2nd byte in the nvlist are valid. */
1157 1160 if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1158 1161 return (1);
1159 1162
1160 1163 *out = nvlist + 4;
1161 1164 return (0);
1162 1165 }
1163 1166
1164 1167 static char *
1165 1168 nvlist_array(char *nvlist, int index)
1166 1169 {
1167 1170 int i, encode_size;
1168 1171
1169 1172 for (i = 0; i < index; i++) {
1170 1173 /* skip the header, nvl_version, and nvl_nvflag */
1171 1174 nvlist = nvlist + 4 * 2;
1172 1175
1173 1176 while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1174 1177 nvlist += encode_size; /* goto the next nvpair */
1175 1178
1176 1179 nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1177 1180 }
1178 1181
1179 1182 return (nvlist);
1180 1183 }
1181 1184
1182 1185 /*
1183 1186 * The nvlist_next_nvpair() function returns a handle to the next nvpair in the
1184 1187 * list following nvpair. If nvpair is NULL, the first pair is returned. If
1185 1188 * nvpair is the last pair in the nvlist, NULL is returned.
1186 1189 */
1187 1190 static char *
1188 1191 nvlist_next_nvpair(char *nvl, char *nvpair)
1189 1192 {
1190 1193 char *cur, *prev;
1191 1194 int encode_size;
1192 1195
1193 1196 if (nvl == NULL)
1194 1197 return (NULL);
1195 1198
1196 1199 if (nvpair == NULL) {
1197 1200 /* skip over nvl_version and nvl_nvflag */
1198 1201 nvpair = nvl + 4 * 2;
1199 1202 } else {
1200 1203 /* skip to the next nvpair */
1201 1204 encode_size = BSWAP_32(*(uint32_t *)nvpair);
1202 1205 nvpair += encode_size;
1203 1206 }
1204 1207
1205 1208 /* 8 bytes of 0 marks the end of the list */
1206 1209 if (*(uint64_t *)nvpair == 0)
1207 1210 return (NULL);
1208 1211
1209 1212 return (nvpair);
1210 1213 }
1211 1214
1212 1215 /*
1213 1216 * This function returns 0 on success and 1 on failure. On success, a string
1214 1217 * containing the name of nvpair is saved in buf.
1215 1218 */
1216 1219 static int
1217 1220 nvpair_name(char *nvp, char *buf, int buflen)
1218 1221 {
1219 1222 int len;
1220 1223
1221 1224 /* skip over encode/decode size */
1222 1225 nvp += 4 * 2;
1223 1226
1224 1227 len = BSWAP_32(*(uint32_t *)nvp);
1225 1228 if (buflen < len + 1)
1226 1229 return (1);
1227 1230
1228 1231 grub_memmove(buf, nvp + 4, len);
1229 1232 buf[len] = '\0';
1230 1233
1231 1234 return (0);
1232 1235 }
1233 1236
1234 1237 /*
1235 1238 * This function retrieves the value of the nvpair in the form of enumerated
1236 1239 * type data_type_t. This is used to determine the appropriate type to pass to
1237 1240 * nvpair_value().
1238 1241 */
1239 1242 static int
1240 1243 nvpair_type(char *nvp)
1241 1244 {
1242 1245 int name_len, type;
1243 1246
1244 1247 /* skip over encode/decode size */
1245 1248 nvp += 4 * 2;
1246 1249
1247 1250 /* skip over name_len */
1248 1251 name_len = BSWAP_32(*(uint32_t *)nvp);
1249 1252 nvp += 4;
1250 1253
1251 1254 /* skip over name */
1252 1255 nvp = nvp + ((name_len + 3) & ~3); /* align */
1253 1256
1254 1257 type = BSWAP_32(*(uint32_t *)nvp);
1255 1258
1256 1259 return (type);
1257 1260 }
1258 1261
1259 1262 static int
1260 1263 nvpair_value(char *nvp, void *val, int valtype, int *nelmp)
1261 1264 {
1262 1265 int name_len, type, slen;
1263 1266 char *strval = val;
1264 1267 uint64_t *intval = val;
1265 1268
1266 1269 /* skip over encode/decode size */
1267 1270 nvp += 4 * 2;
1268 1271
1269 1272 /* skip over name_len */
1270 1273 name_len = BSWAP_32(*(uint32_t *)nvp);
1271 1274 nvp += 4;
1272 1275
1273 1276 /* skip over name */
1274 1277 nvp = nvp + ((name_len + 3) & ~3); /* align */
1275 1278
1276 1279 /* skip over type */
1277 1280 type = BSWAP_32(*(uint32_t *)nvp);
1278 1281 nvp += 4;
1279 1282
1280 1283 if (type == valtype) {
1281 1284 int nelm;
1282 1285
1283 1286 nelm = BSWAP_32(*(uint32_t *)nvp);
1284 1287 if (valtype != DATA_TYPE_BOOLEAN && nelm < 1)
1285 1288 return (1);
1286 1289 nvp += 4;
1287 1290
1288 1291 switch (valtype) {
1289 1292 case DATA_TYPE_BOOLEAN:
1290 1293 return (0);
1291 1294
1292 1295 case DATA_TYPE_STRING:
1293 1296 slen = BSWAP_32(*(uint32_t *)nvp);
1294 1297 nvp += 4;
1295 1298 grub_memmove(strval, nvp, slen);
1296 1299 strval[slen] = '\0';
1297 1300 return (0);
1298 1301
1299 1302 case DATA_TYPE_UINT64:
1300 1303 *intval = BSWAP_64(*(uint64_t *)nvp);
1301 1304 return (0);
1302 1305
1303 1306 case DATA_TYPE_NVLIST:
1304 1307 *(void **)val = (void *)nvp;
1305 1308 return (0);
1306 1309
1307 1310 case DATA_TYPE_NVLIST_ARRAY:
1308 1311 *(void **)val = (void *)nvp;
1309 1312 if (nelmp)
1310 1313 *nelmp = nelm;
1311 1314 return (0);
1312 1315 }
1313 1316 }
1314 1317
1315 1318 return (1);
1316 1319 }
1317 1320
1318 1321 static int
1319 1322 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1320 1323 int *nelmp)
1321 1324 {
1322 1325 char *nvpair;
1323 1326
1324 1327 for (nvpair = nvlist_next_nvpair(nvlist, NULL);
1325 1328 nvpair != NULL;
1326 1329 nvpair = nvlist_next_nvpair(nvlist, nvpair)) {
1327 1330 int name_len = BSWAP_32(*(uint32_t *)(nvpair + 4 * 2));
1328 1331 char *nvp_name = nvpair + 4 * 3;
1329 1332
1330 1333 if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1331 1334 nvpair_type(nvpair) == valtype) {
1332 1335 return (nvpair_value(nvpair, val, valtype, nelmp));
1333 1336 }
1334 1337 }
1335 1338 return (1);
1336 1339 }
1337 1340
1338 1341 /*
1339 1342 * Check if this vdev is online and is in a good state.
1340 1343 */
1341 1344 static int
1342 1345 vdev_validate(char *nv)
1343 1346 {
1344 1347 uint64_t ival;
1345 1348
1346 1349 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1347 1350 DATA_TYPE_UINT64, NULL) == 0 ||
1348 1351 nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1349 1352 DATA_TYPE_UINT64, NULL) == 0 ||
1350 1353 nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1351 1354 DATA_TYPE_UINT64, NULL) == 0)
1352 1355 return (ERR_DEV_VALUES);
1353 1356
1354 1357 return (0);
1355 1358 }
1356 1359
1357 1360 /*
1358 1361 * Get a valid vdev pathname/devid from the boot device.
1359 1362 * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1360 1363 */
1361 1364 static int
1362 1365 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1363 1366 int is_spare)
1364 1367 {
1365 1368 char type[16];
1366 1369
1367 1370 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1368 1371 NULL))
1369 1372 return (ERR_FSYS_CORRUPT);
1370 1373
1371 1374 if (grub_strcmp(type, VDEV_TYPE_DISK) == 0) {
1372 1375 uint64_t guid;
1373 1376
1374 1377 if (vdev_validate(nv) != 0)
1375 1378 return (ERR_NO_BOOTPATH);
1376 1379
1377 1380 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1378 1381 &guid, DATA_TYPE_UINT64, NULL) != 0)
1379 1382 return (ERR_NO_BOOTPATH);
1380 1383
1381 1384 if (guid != inguid)
1382 1385 return (ERR_NO_BOOTPATH);
1383 1386
1384 1387 /* for a spare vdev, pick the disk labeled with "is_spare" */
1385 1388 if (is_spare) {
1386 1389 uint64_t spare = 0;
1387 1390 (void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1388 1391 &spare, DATA_TYPE_UINT64, NULL);
1389 1392 if (!spare)
1390 1393 return (ERR_NO_BOOTPATH);
1391 1394 }
1392 1395
1393 1396 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1394 1397 bootpath, DATA_TYPE_STRING, NULL) != 0)
1395 1398 bootpath[0] = '\0';
1396 1399
1397 1400 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1398 1401 devid, DATA_TYPE_STRING, NULL) != 0)
1399 1402 devid[0] = '\0';
1400 1403
1401 1404 if (grub_strlen(bootpath) >= MAXPATHLEN ||
1402 1405 grub_strlen(devid) >= MAXPATHLEN)
1403 1406 return (ERR_WONT_FIT);
1404 1407
1405 1408 return (0);
1406 1409
1407 1410 } else if (grub_strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1408 1411 grub_strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1409 1412 (is_spare = (grub_strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1410 1413 int nelm, i;
1411 1414 char *child;
1412 1415
1413 1416 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1414 1417 DATA_TYPE_NVLIST_ARRAY, &nelm))
1415 1418 return (ERR_FSYS_CORRUPT);
1416 1419
1417 1420 for (i = 0; i < nelm; i++) {
1418 1421 char *child_i;
1419 1422
1420 1423 child_i = nvlist_array(child, i);
1421 1424 if (vdev_get_bootpath(child_i, inguid, devid,
1422 1425 bootpath, is_spare) == 0)
1423 1426 return (0);
1424 1427 }
1425 1428 }
1426 1429
1427 1430 return (ERR_NO_BOOTPATH);
1428 1431 }
1429 1432
1430 1433 /*
1431 1434 * Check the disk label information and retrieve needed vdev name-value pairs.
1432 1435 *
1433 1436 * Return:
1434 1437 * 0 - success
1435 1438 * ERR_* - failure
1436 1439 */
1437 1440 static int
1438 1441 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1439 1442 char *outpath, uint64_t *outguid, uint64_t *outashift, uint64_t *outversion)
1440 1443 {
1441 1444 vdev_phys_t *vdev;
1442 1445 uint64_t pool_state, txg = 0;
1443 1446 char *nvlist, *nv, *features;
1444 1447 uint64_t diskguid;
1445 1448
1446 1449 sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1447 1450
1448 1451 /* Read in the vdev name-value pair list (112K). */
1449 1452 if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1450 1453 return (ERR_READ);
1451 1454
1452 1455 vdev = (vdev_phys_t *)stack;
1453 1456 stack += sizeof (vdev_phys_t);
1454 1457
1455 1458 if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1456 1459 return (ERR_FSYS_CORRUPT);
1457 1460
1458 1461 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1459 1462 DATA_TYPE_UINT64, NULL))
1460 1463 return (ERR_FSYS_CORRUPT);
1461 1464
1462 1465 if (pool_state == POOL_STATE_DESTROYED)
1463 1466 return (ERR_FILESYSTEM_NOT_FOUND);
1464 1467
1465 1468 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1466 1469 current_rootpool, DATA_TYPE_STRING, NULL))
1467 1470 return (ERR_FSYS_CORRUPT);
1468 1471
1469 1472 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1470 1473 DATA_TYPE_UINT64, NULL))
1471 1474 return (ERR_FSYS_CORRUPT);
1472 1475
1473 1476 /* not an active device */
1474 1477 if (txg == 0)
1475 1478 return (ERR_NO_BOOTPATH);
1476 1479
1477 1480 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, outversion,
1478 1481 DATA_TYPE_UINT64, NULL))
1479 1482 return (ERR_FSYS_CORRUPT);
1480 1483 if (!SPA_VERSION_IS_SUPPORTED(*outversion))
1481 1484 return (ERR_NEWER_VERSION);
1482 1485 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1483 1486 DATA_TYPE_NVLIST, NULL))
1484 1487 return (ERR_FSYS_CORRUPT);
1485 1488 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1486 1489 DATA_TYPE_UINT64, NULL))
1487 1490 return (ERR_FSYS_CORRUPT);
1488 1491 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_ASHIFT, outashift,
1489 1492 DATA_TYPE_UINT64, NULL) != 0)
1490 1493 return (ERR_FSYS_CORRUPT);
1491 1494 if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1492 1495 return (ERR_NO_BOOTPATH);
1493 1496 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1494 1497 DATA_TYPE_UINT64, NULL))
1495 1498 return (ERR_FSYS_CORRUPT);
1496 1499
1497 1500 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1498 1501 &features, DATA_TYPE_NVLIST, NULL) == 0) {
1499 1502 char *nvp;
1500 1503 char *name = stack;
1501 1504 stack += MAXNAMELEN;
1502 1505
1503 1506 for (nvp = nvlist_next_nvpair(features, NULL);
1504 1507 nvp != NULL;
1505 1508 nvp = nvlist_next_nvpair(features, nvp)) {
1506 1509 zap_attribute_t za;
1507 1510
1508 1511 if (nvpair_name(nvp, name, MAXNAMELEN) != 0)
1509 1512 return (ERR_FSYS_CORRUPT);
1510 1513
1511 1514 za.za_integer_length = 8;
1512 1515 za.za_num_integers = 1;
1513 1516 za.za_first_integer = 1;
1514 1517 za.za_name = name;
1515 1518 if (check_feature(&za, spa_feature_names, stack) != 0)
1516 1519 return (ERR_NEWER_VERSION);
1517 1520 }
1518 1521 }
1519 1522
1520 1523 return (0);
1521 1524 }
1522 1525
1523 1526 /*
1524 1527 * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1525 1528 * to the memory address MOS.
1526 1529 *
1527 1530 * Return:
1528 1531 * 1 - success
1529 1532 * 0 - failure
1530 1533 */
1531 1534 int
1532 1535 zfs_mount(void)
1533 1536 {
1534 1537 char *stack, *ub_array;
1535 1538 int label = 0;
1536 1539 uberblock_t *ubbest;
1537 1540 objset_phys_t *osp;
1538 1541 char tmp_bootpath[MAXNAMELEN];
1539 1542 char tmp_devid[MAXNAMELEN];
1540 1543 uint64_t tmp_guid, ashift, version;
1541 1544 uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1542 1545 int err = errnum; /* preserve previous errnum state */
1543 1546
1544 1547 /* if it's our first time here, zero the best uberblock out */
1545 1548 if (best_drive == 0 && best_part == 0 && find_best_root) {
1546 1549 grub_memset(¤t_uberblock, 0, sizeof (uberblock_t));
1547 1550 pool_guid = 0;
1548 1551 }
1549 1552
1550 1553 stackbase = ZFS_SCRATCH;
1551 1554 stack = stackbase;
1552 1555 ub_array = stack;
1553 1556 stack += VDEV_UBERBLOCK_RING;
1554 1557
1555 1558 osp = (objset_phys_t *)stack;
1556 1559 stack += sizeof (objset_phys_t);
1557 1560 adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1558 1561
1559 1562 for (label = 0; label < VDEV_LABELS; label++) {
1560 1563
1561 1564 /*
1562 1565 * some eltorito stacks don't give us a size and
1563 1566 * we end up setting the size to MAXUINT, further
1564 1567 * some of these devices stop working once a single
1565 1568 * read past the end has been issued. Checking
1566 1569 * for a maximum part_length and skipping the backup
1567 1570 * labels at the end of the slice/partition/device
1568 1571 * avoids breaking down on such devices.
1569 1572 */
1570 1573 if (part_length == MAXUINT && label == 2)
1571 1574 break;
1572 1575
1573 1576 uint64_t sector = vdev_label_start(adjpl,
1574 1577 label) >> SPA_MINBLOCKSHIFT;
1575 1578
1576 1579 /* Read in the uberblock ring (128K). */
1577 1580 if (devread(sector +
1578 1581 ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> SPA_MINBLOCKSHIFT),
1579 1582 0, VDEV_UBERBLOCK_RING, ub_array) == 0)
1580 1583 continue;
1581 1584
1582 1585 if (check_pool_label(sector, stack, tmp_devid,
1583 1586 tmp_bootpath, &tmp_guid, &ashift, &version))
1584 1587 continue;
1585 1588
1586 1589 if (pool_guid == 0)
1587 1590 pool_guid = tmp_guid;
1588 1591
1589 1592 if ((ubbest = find_bestub(ub_array, ashift, sector)) == NULL ||
1590 1593 zio_read(&ubbest->ub_rootbp, osp, stack) != 0)
1591 1594 continue;
1592 1595
1593 1596 VERIFY_OS_TYPE(osp, DMU_OST_META);
1594 1597
1595 1598 if (version >= SPA_VERSION_FEATURES &&
1596 1599 check_mos_features(&osp->os_meta_dnode, stack) != 0)
1597 1600 continue;
1598 1601
1599 1602 if (find_best_root && ((pool_guid != tmp_guid) ||
1600 1603 vdev_uberblock_compare(ubbest, &(current_uberblock)) <= 0))
1601 1604 continue;
1602 1605
1603 1606 /* Got the MOS. Save it at the memory addr MOS. */
1604 1607 grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1605 1608 grub_memmove(¤t_uberblock, ubbest, sizeof (uberblock_t));
1606 1609 grub_memmove(current_bootpath, tmp_bootpath, MAXNAMELEN);
1607 1610 grub_memmove(current_devid, tmp_devid, grub_strlen(tmp_devid));
1608 1611 is_zfs_mount = 1;
1609 1612 return (1);
1610 1613 }
1611 1614
1612 1615 /*
1613 1616 * While some fs impls. (tftp) rely on setting and keeping
1614 1617 * global errnums set, others won't reset it and will break
1615 1618 * when issuing rawreads. The goal here is to simply not
1616 1619 * have zfs mount attempts impact the previous state.
1617 1620 */
1618 1621 errnum = err;
1619 1622 return (0);
1620 1623 }
1621 1624
1622 1625 /*
1623 1626 * zfs_open() locates a file in the rootpool by following the
1624 1627 * MOS and places the dnode of the file in the memory address DNODE.
1625 1628 *
1626 1629 * Return:
1627 1630 * 1 - success
1628 1631 * 0 - failure
1629 1632 */
1630 1633 int
1631 1634 zfs_open(char *filename)
1632 1635 {
1633 1636 char *stack;
1634 1637 dnode_phys_t *mdn;
1635 1638
1636 1639 file_buf = NULL;
1637 1640 stackbase = ZFS_SCRATCH;
1638 1641 stack = stackbase;
1639 1642
1640 1643 mdn = (dnode_phys_t *)stack;
1641 1644 stack += sizeof (dnode_phys_t);
1642 1645
1643 1646 dnode_mdn = NULL;
1644 1647 dnode_buf = (dnode_phys_t *)stack;
1645 1648 stack += 1<<DNODE_BLOCK_SHIFT;
1646 1649
1647 1650 /*
1648 1651 * menu.lst is placed at the root pool filesystem level,
1649 1652 * do not goto 'current_bootfs'.
1650 1653 */
1651 1654 if (is_top_dataset_file(filename)) {
1652 1655 if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1653 1656 return (0);
1654 1657
1655 1658 current_bootfs_obj = 0;
1656 1659 } else {
1657 1660 if (current_bootfs[0] == '\0') {
1658 1661 /* Get the default root filesystem object number */
1659 1662 if (errnum = get_default_bootfsobj(MOS,
1660 1663 ¤t_bootfs_obj, stack))
1661 1664 return (0);
1662 1665
1663 1666 if (errnum = get_objset_mdn(MOS, NULL,
1664 1667 ¤t_bootfs_obj, mdn, stack))
1665 1668 return (0);
1666 1669 } else {
1667 1670 if (errnum = get_objset_mdn(MOS, current_bootfs,
1668 1671 ¤t_bootfs_obj, mdn, stack)) {
1669 1672 grub_memset(current_bootfs, 0, MAXNAMELEN);
1670 1673 return (0);
1671 1674 }
1672 1675 }
1673 1676 }
1674 1677
1675 1678 if (dnode_get_path(mdn, filename, DNODE, stack)) {
1676 1679 errnum = ERR_FILE_NOT_FOUND;
1677 1680 return (0);
1678 1681 }
1679 1682
1680 1683 /* get the file size and set the file position to 0 */
1681 1684
1682 1685 /*
1683 1686 * For DMU_OT_SA we will need to locate the SIZE attribute
1684 1687 * attribute, which could be either in the bonus buffer
1685 1688 * or the "spill" block.
1686 1689 */
1687 1690 if (DNODE->dn_bonustype == DMU_OT_SA) {
1688 1691 sa_hdr_phys_t *sahdrp;
1689 1692 int hdrsize;
1690 1693
1691 1694 if (DNODE->dn_bonuslen != 0) {
1692 1695 sahdrp = (sa_hdr_phys_t *)DN_BONUS(DNODE);
1693 1696 } else {
1694 1697 if (DNODE->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1695 1698 blkptr_t *bp = &DNODE->dn_spill;
1696 1699 void *buf;
1697 1700
1698 1701 buf = (void *)stack;
1699 1702 stack += BP_GET_LSIZE(bp);
1700 1703
1701 1704 /* reset errnum to rawread() failure */
1702 1705 errnum = 0;
1703 1706 if (zio_read(bp, buf, stack) != 0) {
1704 1707 return (0);
1705 1708 }
1706 1709 sahdrp = buf;
1707 1710 } else {
1708 1711 errnum = ERR_FSYS_CORRUPT;
1709 1712 return (0);
1710 1713 }
1711 1714 }
1712 1715 hdrsize = SA_HDR_SIZE(sahdrp);
1713 1716 filemax = *(uint64_t *)((char *)sahdrp + hdrsize +
1714 1717 SA_SIZE_OFFSET);
1715 1718 } else {
1716 1719 filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1717 1720 }
1718 1721 filepos = 0;
1719 1722
1720 1723 dnode_buf = NULL;
1721 1724 return (1);
1722 1725 }
1723 1726
1724 1727 /*
1725 1728 * zfs_read reads in the data blocks pointed by the DNODE.
1726 1729 *
1727 1730 * Return:
1728 1731 * len - the length successfully read in to the buffer
1729 1732 * 0 - failure
1730 1733 */
1731 1734 int
1732 1735 zfs_read(char *buf, int len)
1733 1736 {
1734 1737 char *stack;
1735 1738 int blksz, length, movesize;
1736 1739
1737 1740 if (file_buf == NULL) {
1738 1741 file_buf = stackbase;
1739 1742 stackbase += SPA_MAXBLOCKSIZE;
1740 1743 file_start = file_end = 0;
1741 1744 }
1742 1745 stack = stackbase;
1743 1746
1744 1747 /*
1745 1748 * If offset is in memory, move it into the buffer provided and return.
1746 1749 */
1747 1750 if (filepos >= file_start && filepos+len <= file_end) {
1748 1751 grub_memmove(buf, file_buf + filepos - file_start, len);
1749 1752 filepos += len;
1750 1753 return (len);
1751 1754 }
1752 1755
1753 1756 blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1754 1757
1755 1758 /*
1756 1759 * Entire Dnode is too big to fit into the space available. We
1757 1760 * will need to read it in chunks. This could be optimized to
1758 1761 * read in as large a chunk as there is space available, but for
1759 1762 * now, this only reads in one data block at a time.
1760 1763 */
1761 1764 length = len;
1762 1765 while (length) {
1763 1766 /*
1764 1767 * Find requested blkid and the offset within that block.
1765 1768 */
1766 1769 uint64_t blkid = filepos / blksz;
1767 1770
1768 1771 if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1769 1772 return (0);
1770 1773
1771 1774 file_start = blkid * blksz;
1772 1775 file_end = file_start + blksz;
1773 1776
1774 1777 movesize = MIN(length, file_end - filepos);
1775 1778
1776 1779 grub_memmove(buf, file_buf + filepos - file_start,
1777 1780 movesize);
1778 1781 buf += movesize;
1779 1782 length -= movesize;
1780 1783 filepos += movesize;
1781 1784 }
1782 1785
1783 1786 return (len);
1784 1787 }
1785 1788
1786 1789 /*
1787 1790 * No-Op
1788 1791 */
1789 1792 int
1790 1793 zfs_embed(int *start_sector, int needed_sectors)
1791 1794 {
1792 1795 return (1);
1793 1796 }
1794 1797
1795 1798 #endif /* FSYS_ZFS */
↓ open down ↓ |
820 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX