Print this page
4185 New hash algorithm support
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
+++ new/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
1 1 /*
2 2 * GRUB -- GRand Unified Bootloader
3 3 * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
4 4 *
5 5 * This program is free software; you can redistribute it and/or modify
6 6 * it under the terms of the GNU General Public License as published by
7 7 * the Free Software Foundation; either version 2 of the License, or
8 8 * (at your option) any later version.
9 9 *
10 10 * This program is distributed in the hope that it will be useful,
11 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 13 * GNU General Public License for more details.
14 14 *
15 15 * You should have received a copy of the GNU General Public License
16 16 * along with this program; if not, write to the Free Software
17 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 18 */
19 19
20 20 /*
21 21 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
22 22 * Use is subject to license terms.
23 23 */
24 24
25 25 /*
26 26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 27 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * The zfs plug-in routines for GRUB are:
32 32 *
33 33 * zfs_mount() - locates a valid uberblock of the root pool and reads
34 34 * in its MOS at the memory address MOS.
35 35 *
36 36 * zfs_open() - locates a plain file object by following the MOS
37 37 * and places its dnode at the memory address DNODE.
38 38 *
39 39 * zfs_read() - read in the data blocks pointed by the DNODE.
40 40 *
41 41 * ZFS_SCRATCH is used as a working area.
42 42 *
43 43 * (memory addr) MOS DNODE ZFS_SCRATCH
44 44 * | | |
45 45 * +-------V---------V----------V---------------+
46 46 * memory | | dnode | dnode | scratch |
47 47 * | | 512B | 512B | area |
48 48 * +--------------------------------------------+
49 49 */
50 50
51 51 #ifdef FSYS_ZFS
52 52
53 53 #include "shared.h"
54 54 #include "filesys.h"
55 55 #include "fsys_zfs.h"
56 56
57 57 /* cache for a file block of the currently zfs_open()-ed file */
58 58 static void *file_buf = NULL;
59 59 static uint64_t file_start = 0;
60 60 static uint64_t file_end = 0;
61 61
62 62 /* cache for a dnode block */
63 63 static dnode_phys_t *dnode_buf = NULL;
64 64 static dnode_phys_t *dnode_mdn = NULL;
65 65 static uint64_t dnode_start = 0;
66 66 static uint64_t dnode_end = 0;
67 67
68 68 static uint64_t pool_guid = 0;
69 69 static uberblock_t current_uberblock;
70 70 static char *stackbase;
71 71
72 72 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
73 73 {
74 74 {"inherit", 0}, /* ZIO_COMPRESS_INHERIT */
75 75 {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */
76 76 {"off", 0}, /* ZIO_COMPRESS_OFF */
77 77 {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */
78 78 {"empty", 0}, /* ZIO_COMPRESS_EMPTY */
79 79 {"gzip-1", 0}, /* ZIO_COMPRESS_GZIP_1 */
80 80 {"gzip-2", 0}, /* ZIO_COMPRESS_GZIP_2 */
81 81 {"gzip-3", 0}, /* ZIO_COMPRESS_GZIP_3 */
82 82 {"gzip-4", 0}, /* ZIO_COMPRESS_GZIP_4 */
83 83 {"gzip-5", 0}, /* ZIO_COMPRESS_GZIP_5 */
84 84 {"gzip-6", 0}, /* ZIO_COMPRESS_GZIP_6 */
85 85 {"gzip-7", 0}, /* ZIO_COMPRESS_GZIP_7 */
86 86 {"gzip-8", 0}, /* ZIO_COMPRESS_GZIP_8 */
87 87 {"gzip-9", 0}, /* ZIO_COMPRESS_GZIP_9 */
88 88 {"zle", 0}, /* ZIO_COMPRESS_ZLE */
89 89 {"lz4", lz4_decompress} /* ZIO_COMPRESS_LZ4 */
90 90 };
91 91
92 92 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
93 93
94 94 /*
95 95 * Our own version of bcmp().
96 96 */
97 97 static int
98 98 zfs_bcmp(const void *s1, const void *s2, size_t n)
99 99 {
100 100 const uchar_t *ps1 = s1;
101 101 const uchar_t *ps2 = s2;
102 102
103 103 if (s1 != s2 && n != 0) {
104 104 do {
105 105 if (*ps1++ != *ps2++)
106 106 return (1);
107 107 } while (--n != 0);
108 108 }
109 109
110 110 return (0);
111 111 }
112 112
113 113 /*
114 114 * Our own version of log2(). Same thing as highbit()-1.
115 115 */
116 116 static int
117 117 zfs_log2(uint64_t num)
118 118 {
119 119 int i = 0;
120 120
121 121 while (num > 1) {
122 122 i++;
123 123 num = num >> 1;
124 124 }
125 125
126 126 return (i);
127 127 }
128 128
129 129 /* Checksum Functions */
130 130 static void
131 131 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
132 132 {
133 133 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
134 134 }
135 135
136 136 /* Checksum Table and Values */
137 137 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
↓ open down ↓ |
137 lines elided |
↑ open up ↑ |
138 138 {{NULL, NULL}, 0, 0, "inherit"},
139 139 {{NULL, NULL}, 0, 0, "on"},
140 140 {{zio_checksum_off, zio_checksum_off}, 0, 0, "off"},
141 141 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "label"},
142 142 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "gang_header"},
143 143 {{NULL, NULL}, 0, 0, "zilog"},
144 144 {{fletcher_2_native, fletcher_2_byteswap}, 0, 0, "fletcher2"},
145 145 {{fletcher_4_native, fletcher_4_byteswap}, 1, 0, "fletcher4"},
146 146 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 0, "SHA256"},
147 147 {{NULL, NULL}, 0, 0, "zilog2"},
148 + {{zio_checksum_SHA512, NULL}, 0, 0, "SHA512"}
148 149 };
149 150
150 151 /*
151 152 * zio_checksum_verify: Provides support for checksum verification.
152 153 *
153 - * Fletcher2, Fletcher4, and SHA256 are supported.
154 + * Fletcher2, Fletcher4, SHA-256 and SHA-512/256 are supported.
154 155 *
155 156 * Return:
156 157 * -1 = Failure
157 158 * 0 = Success
158 159 */
159 160 static int
160 161 zio_checksum_verify(blkptr_t *bp, char *data, int size)
161 162 {
162 163 zio_cksum_t zc = bp->blk_cksum;
163 164 uint32_t checksum = BP_GET_CHECKSUM(bp);
164 165 int byteswap = BP_SHOULD_BYTESWAP(bp);
165 166 zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
166 167 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
167 168 zio_cksum_t actual_cksum, expected_cksum;
168 169
169 170 /* byteswap is not supported */
170 171 if (byteswap)
171 172 return (-1);
172 173
173 174 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
174 175 return (-1);
175 176
176 177 if (ci->ci_eck) {
177 178 expected_cksum = zec->zec_cksum;
178 179 zec->zec_cksum = zc;
179 180 ci->ci_func[0](data, size, &actual_cksum);
180 181 zec->zec_cksum = expected_cksum;
181 182 zc = expected_cksum;
182 183
183 184 } else {
184 185 ci->ci_func[byteswap](data, size, &actual_cksum);
185 186 }
186 187
187 188 if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
188 189 (actual_cksum.zc_word[1] - zc.zc_word[1]) |
189 190 (actual_cksum.zc_word[2] - zc.zc_word[2]) |
190 191 (actual_cksum.zc_word[3] - zc.zc_word[3]))
191 192 return (-1);
192 193
193 194 return (0);
194 195 }
195 196
196 197 /*
197 198 * vdev_label_start returns the physical disk offset (in bytes) of
198 199 * label "l".
199 200 */
200 201 static uint64_t
201 202 vdev_label_start(uint64_t psize, int l)
202 203 {
203 204 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
204 205 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
205 206 }
206 207
207 208 /*
208 209 * vdev_uberblock_compare takes two uberblock structures and returns an integer
209 210 * indicating the more recent of the two.
210 211 * Return Value = 1 if ub2 is more recent
211 212 * Return Value = -1 if ub1 is more recent
212 213 * The most recent uberblock is determined using its transaction number and
213 214 * timestamp. The uberblock with the highest transaction number is
214 215 * considered "newer". If the transaction numbers of the two blocks match, the
215 216 * timestamps are compared to determine the "newer" of the two.
216 217 */
217 218 static int
218 219 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
219 220 {
220 221 if (ub1->ub_txg < ub2->ub_txg)
221 222 return (-1);
222 223 if (ub1->ub_txg > ub2->ub_txg)
223 224 return (1);
224 225
225 226 if (ub1->ub_timestamp < ub2->ub_timestamp)
226 227 return (-1);
227 228 if (ub1->ub_timestamp > ub2->ub_timestamp)
228 229 return (1);
229 230
230 231 return (0);
231 232 }
232 233
233 234 /*
234 235 * Three pieces of information are needed to verify an uberblock: the magic
235 236 * number, the version number, and the checksum.
236 237 *
237 238 * Return:
238 239 * 0 - Success
239 240 * -1 - Failure
240 241 */
241 242 static int
242 243 uberblock_verify(uberblock_t *uber, uint64_t ub_size, uint64_t offset)
243 244 {
244 245 blkptr_t bp;
245 246
246 247 BP_ZERO(&bp);
247 248 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
248 249 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
249 250 ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
250 251
251 252 if (zio_checksum_verify(&bp, (char *)uber, ub_size) != 0)
252 253 return (-1);
253 254
254 255 if (uber->ub_magic == UBERBLOCK_MAGIC &&
255 256 SPA_VERSION_IS_SUPPORTED(uber->ub_version))
256 257 return (0);
257 258
258 259 return (-1);
259 260 }
260 261
261 262 /*
262 263 * Find the best uberblock.
263 264 * Return:
264 265 * Success - Pointer to the best uberblock.
265 266 * Failure - NULL
266 267 */
267 268 static uberblock_t *
268 269 find_bestub(char *ub_array, uint64_t ashift, uint64_t sector)
269 270 {
270 271 uberblock_t *ubbest = NULL;
271 272 uberblock_t *ubnext;
272 273 uint64_t offset, ub_size;
273 274 int i;
274 275
275 276 ub_size = VDEV_UBERBLOCK_SIZE(ashift);
276 277
277 278 for (i = 0; i < VDEV_UBERBLOCK_COUNT(ashift); i++) {
278 279 ubnext = (uberblock_t *)ub_array;
279 280 ub_array += ub_size;
280 281 offset = (sector << SPA_MINBLOCKSHIFT) +
281 282 VDEV_UBERBLOCK_OFFSET(ashift, i);
282 283
283 284 if (uberblock_verify(ubnext, ub_size, offset) != 0)
284 285 continue;
285 286
286 287 if (ubbest == NULL ||
287 288 vdev_uberblock_compare(ubnext, ubbest) > 0)
288 289 ubbest = ubnext;
289 290 }
290 291
291 292 return (ubbest);
292 293 }
293 294
294 295 /*
295 296 * Read a block of data based on the gang block address dva,
296 297 * and put its data in buf.
297 298 *
298 299 * Return:
299 300 * 0 - success
300 301 * 1 - failure
301 302 */
302 303 static int
303 304 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
304 305 {
305 306 zio_gbh_phys_t *zio_gb;
306 307 uint64_t offset, sector;
307 308 blkptr_t tmpbp;
308 309 int i;
309 310
310 311 zio_gb = (zio_gbh_phys_t *)stack;
311 312 stack += SPA_GANGBLOCKSIZE;
312 313 offset = DVA_GET_OFFSET(dva);
313 314 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
314 315
315 316 /* read in the gang block header */
316 317 if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
317 318 grub_printf("failed to read in a gang block header\n");
318 319 return (1);
319 320 }
320 321
321 322 /* self checksuming the gang block header */
322 323 BP_ZERO(&tmpbp);
323 324 BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
324 325 BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
325 326 ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
326 327 DVA_GET_OFFSET(dva), bp->blk_birth, 0);
327 328 if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
328 329 grub_printf("failed to checksum a gang block header\n");
329 330 return (1);
330 331 }
331 332
332 333 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
333 334 if (zio_gb->zg_blkptr[i].blk_birth == 0)
334 335 continue;
335 336
336 337 if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
337 338 return (1);
338 339 buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
339 340 }
340 341
341 342 return (0);
342 343 }
343 344
344 345 /*
345 346 * Read in a block of raw data to buf.
346 347 *
347 348 * Return:
348 349 * 0 - success
349 350 * 1 - failure
350 351 */
351 352 static int
352 353 zio_read_data(blkptr_t *bp, void *buf, char *stack)
353 354 {
354 355 int i, psize;
355 356
356 357 psize = BP_GET_PSIZE(bp);
357 358
358 359 /* pick a good dva from the block pointer */
359 360 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
360 361 uint64_t offset, sector;
361 362
362 363 if (bp->blk_dva[i].dva_word[0] == 0 &&
363 364 bp->blk_dva[i].dva_word[1] == 0)
364 365 continue;
365 366
366 367 if (DVA_GET_GANG(&bp->blk_dva[i])) {
367 368 if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
368 369 return (0);
369 370 } else {
370 371 /* read in a data block */
371 372 offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
372 373 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
373 374 if (devread(sector, 0, psize, buf) != 0)
374 375 return (0);
375 376 }
376 377 }
377 378
378 379 return (1);
379 380 }
380 381
381 382 /*
382 383 * Read in a block of data, verify its checksum, decompress if needed,
383 384 * and put the uncompressed data in buf.
384 385 *
385 386 * Return:
386 387 * 0 - success
387 388 * errnum - failure
388 389 */
389 390 static int
390 391 zio_read(blkptr_t *bp, void *buf, char *stack)
391 392 {
392 393 int lsize, psize, comp;
393 394 char *retbuf;
394 395
395 396 comp = BP_GET_COMPRESS(bp);
396 397 lsize = BP_GET_LSIZE(bp);
397 398 psize = BP_GET_PSIZE(bp);
398 399
399 400 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
400 401 (comp != ZIO_COMPRESS_OFF &&
401 402 decomp_table[comp].decomp_func == NULL)) {
402 403 grub_printf("compression algorithm not supported\n");
403 404 return (ERR_FSYS_CORRUPT);
404 405 }
405 406
406 407 if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
407 408 grub_printf("not enough memory allocated\n");
408 409 return (ERR_WONT_FIT);
409 410 }
410 411
411 412 retbuf = buf;
412 413 if (comp != ZIO_COMPRESS_OFF) {
413 414 buf = stack;
414 415 stack += psize;
415 416 }
416 417
417 418 if (zio_read_data(bp, buf, stack) != 0) {
418 419 grub_printf("zio_read_data failed\n");
419 420 return (ERR_FSYS_CORRUPT);
420 421 }
421 422
422 423 if (zio_checksum_verify(bp, buf, psize) != 0) {
423 424 grub_printf("checksum verification failed\n");
424 425 return (ERR_FSYS_CORRUPT);
425 426 }
426 427
427 428 if (comp != ZIO_COMPRESS_OFF) {
428 429 if (decomp_table[comp].decomp_func(buf, retbuf, psize,
429 430 lsize) != 0) {
430 431 grub_printf("zio_read decompression failed\n");
431 432 return (ERR_FSYS_CORRUPT);
432 433 }
433 434 }
434 435
435 436 return (0);
436 437 }
437 438
438 439 /*
439 440 * Get the block from a block id.
440 441 * push the block onto the stack.
441 442 *
442 443 * Return:
443 444 * 0 - success
444 445 * errnum - failure
445 446 */
446 447 static int
447 448 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
448 449 {
449 450 int idx, level;
450 451 blkptr_t *bp_array = dn->dn_blkptr;
451 452 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
452 453 blkptr_t *bp, *tmpbuf;
453 454
454 455 bp = (blkptr_t *)stack;
455 456 stack += sizeof (blkptr_t);
456 457
457 458 tmpbuf = (blkptr_t *)stack;
458 459 stack += 1<<dn->dn_indblkshift;
459 460
460 461 for (level = dn->dn_nlevels - 1; level >= 0; level--) {
461 462 idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
462 463 *bp = bp_array[idx];
463 464 if (level == 0)
464 465 tmpbuf = buf;
465 466 if (BP_IS_HOLE(bp)) {
466 467 grub_memset(buf, 0,
467 468 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
468 469 break;
469 470 } else if (errnum = zio_read(bp, tmpbuf, stack)) {
470 471 return (errnum);
471 472 }
472 473
473 474 bp_array = tmpbuf;
474 475 }
475 476
476 477 return (0);
477 478 }
478 479
479 480 /*
480 481 * mzap_lookup: Looks up property described by "name" and returns the value
481 482 * in "value".
482 483 *
483 484 * Return:
484 485 * 0 - success
485 486 * errnum - failure
486 487 */
487 488 static int
488 489 mzap_lookup(mzap_phys_t *zapobj, int objsize, const char *name,
489 490 uint64_t *value)
490 491 {
491 492 int i, chunks;
492 493 mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
493 494
494 495 chunks = objsize / MZAP_ENT_LEN - 1;
495 496 for (i = 0; i < chunks; i++) {
496 497 if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
497 498 *value = mzap_ent[i].mze_value;
498 499 return (0);
499 500 }
500 501 }
501 502
502 503 return (ERR_FSYS_CORRUPT);
503 504 }
504 505
505 506 static uint64_t
506 507 zap_hash(uint64_t salt, const char *name)
507 508 {
508 509 static uint64_t table[256];
509 510 const uint8_t *cp;
510 511 uint8_t c;
511 512 uint64_t crc = salt;
512 513
513 514 if (table[128] == 0) {
514 515 uint64_t *ct;
515 516 int i, j;
516 517 for (i = 0; i < 256; i++) {
517 518 for (ct = table + i, *ct = i, j = 8; j > 0; j--)
518 519 *ct = (*ct >> 1) ^ (-(*ct & 1) &
519 520 ZFS_CRC64_POLY);
520 521 }
521 522 }
522 523
523 524 if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
524 525 errnum = ERR_FSYS_CORRUPT;
525 526 return (0);
526 527 }
527 528
528 529 for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
529 530 crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
530 531
531 532 /*
532 533 * Only use 28 bits, since we need 4 bits in the cookie for the
533 534 * collision differentiator. We MUST use the high bits, since
534 535 * those are the ones that we first pay attention to when
535 536 * choosing the bucket.
536 537 */
537 538 crc &= ~((1ULL << (64 - 28)) - 1);
538 539
539 540 return (crc);
540 541 }
541 542
542 543 /*
543 544 * Only to be used on 8-bit arrays.
544 545 * array_len is actual len in bytes (not encoded le_value_length).
545 546 * buf is null-terminated.
546 547 */
547 548 static int
548 549 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
549 550 int array_len, const char *buf)
550 551 {
551 552 int bseen = 0;
552 553
553 554 while (bseen < array_len) {
554 555 struct zap_leaf_array *la =
555 556 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
556 557 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
557 558
558 559 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
559 560 return (0);
560 561
561 562 if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
562 563 break;
563 564 chunk = la->la_next;
564 565 bseen += toread;
565 566 }
566 567 return (bseen == array_len);
567 568 }
568 569
569 570 /*
570 571 * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
571 572 * value for the property "name".
572 573 *
573 574 * Return:
574 575 * 0 - success
575 576 * errnum - failure
576 577 */
577 578 static int
578 579 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
579 580 const char *name, uint64_t *value)
580 581 {
581 582 uint16_t chunk;
582 583 struct zap_leaf_entry *le;
583 584
584 585 /* Verify if this is a valid leaf block */
585 586 if (l->l_hdr.lh_block_type != ZBT_LEAF)
586 587 return (ERR_FSYS_CORRUPT);
587 588 if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
588 589 return (ERR_FSYS_CORRUPT);
589 590
590 591 for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
591 592 chunk != CHAIN_END; chunk = le->le_next) {
592 593
593 594 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
594 595 return (ERR_FSYS_CORRUPT);
595 596
596 597 le = ZAP_LEAF_ENTRY(l, blksft, chunk);
597 598
598 599 /* Verify the chunk entry */
599 600 if (le->le_type != ZAP_CHUNK_ENTRY)
600 601 return (ERR_FSYS_CORRUPT);
601 602
602 603 if (le->le_hash != h)
603 604 continue;
604 605
605 606 if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
606 607 le->le_name_length, name)) {
607 608
608 609 struct zap_leaf_array *la;
609 610 uint8_t *ip;
610 611
611 612 if (le->le_int_size != 8 || le->le_value_length != 1)
612 613 return (ERR_FSYS_CORRUPT);
613 614
614 615 /* get the uint64_t property value */
615 616 la = &ZAP_LEAF_CHUNK(l, blksft,
616 617 le->le_value_chunk).l_array;
617 618 ip = la->la_array;
618 619
619 620 *value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
620 621 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
621 622 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
622 623 (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
623 624
624 625 return (0);
625 626 }
626 627 }
627 628
628 629 return (ERR_FSYS_CORRUPT);
629 630 }
630 631
631 632 /*
632 633 * Fat ZAP lookup
633 634 *
634 635 * Return:
635 636 * 0 - success
636 637 * errnum - failure
637 638 */
638 639 static int
639 640 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
640 641 const char *name, uint64_t *value, char *stack)
641 642 {
642 643 zap_leaf_phys_t *l;
643 644 uint64_t hash, idx, blkid;
644 645 int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
645 646
646 647 /* Verify if this is a fat zap header block */
647 648 if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
648 649 zap->zap_flags != 0)
649 650 return (ERR_FSYS_CORRUPT);
650 651
651 652 hash = zap_hash(zap->zap_salt, name);
652 653 if (errnum)
653 654 return (errnum);
654 655
655 656 /* get block id from index */
656 657 if (zap->zap_ptrtbl.zt_numblks != 0) {
657 658 /* external pointer tables not supported */
658 659 return (ERR_FSYS_CORRUPT);
659 660 }
660 661 idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
661 662 blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
662 663
663 664 /* Get the leaf block */
664 665 l = (zap_leaf_phys_t *)stack;
665 666 stack += 1<<blksft;
666 667 if ((1<<blksft) < sizeof (zap_leaf_phys_t))
667 668 return (ERR_FSYS_CORRUPT);
668 669 if (errnum = dmu_read(zap_dnode, blkid, l, stack))
669 670 return (errnum);
670 671
671 672 return (zap_leaf_lookup(l, blksft, hash, name, value));
672 673 }
673 674
674 675 /*
675 676 * Read in the data of a zap object and find the value for a matching
676 677 * property name.
677 678 *
678 679 * Return:
679 680 * 0 - success
680 681 * errnum - failure
681 682 */
682 683 static int
683 684 zap_lookup(dnode_phys_t *zap_dnode, const char *name, uint64_t *val,
684 685 char *stack)
685 686 {
686 687 uint64_t block_type;
687 688 int size;
688 689 void *zapbuf;
689 690
690 691 /* Read in the first block of the zap object data. */
691 692 zapbuf = stack;
692 693 size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
693 694 stack += size;
694 695
695 696 if ((errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) != 0)
696 697 return (errnum);
697 698
698 699 block_type = *((uint64_t *)zapbuf);
699 700
700 701 if (block_type == ZBT_MICRO) {
701 702 return (mzap_lookup(zapbuf, size, name, val));
702 703 } else if (block_type == ZBT_HEADER) {
703 704 /* this is a fat zap */
704 705 return (fzap_lookup(zap_dnode, zapbuf, name,
705 706 val, stack));
706 707 }
707 708
708 709 return (ERR_FSYS_CORRUPT);
709 710 }
710 711
711 712 typedef struct zap_attribute {
712 713 int za_integer_length;
713 714 uint64_t za_num_integers;
714 715 uint64_t za_first_integer;
715 716 char *za_name;
716 717 } zap_attribute_t;
717 718
718 719 typedef int (zap_cb_t)(zap_attribute_t *za, void *arg, char *stack);
719 720
720 721 static int
721 722 zap_iterate(dnode_phys_t *zap_dnode, zap_cb_t *cb, void *arg, char *stack)
722 723 {
723 724 uint32_t size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
724 725 zap_attribute_t za;
725 726 int i;
726 727 mzap_phys_t *mzp = (mzap_phys_t *)stack;
727 728 stack += size;
728 729
729 730 if ((errnum = dmu_read(zap_dnode, 0, mzp, stack)) != 0)
730 731 return (errnum);
731 732
732 733 /*
733 734 * Iteration over fatzap objects has not yet been implemented.
734 735 * If we encounter a pool in which there are more features for
735 736 * read than can fit inside a microzap (i.e., more than 2048
736 737 * features for read), we can add support for fatzap iteration.
737 738 * For now, fail.
738 739 */
739 740 if (mzp->mz_block_type != ZBT_MICRO) {
740 741 grub_printf("feature information stored in fatzap, pool "
741 742 "version not supported\n");
742 743 return (1);
743 744 }
744 745
745 746 za.za_integer_length = 8;
746 747 za.za_num_integers = 1;
747 748 for (i = 0; i < size / MZAP_ENT_LEN - 1; i++) {
748 749 mzap_ent_phys_t *mzep = &mzp->mz_chunk[i];
749 750 int err;
750 751
751 752 za.za_first_integer = mzep->mze_value;
752 753 za.za_name = mzep->mze_name;
753 754 err = cb(&za, arg, stack);
754 755 if (err != 0)
755 756 return (err);
756 757 }
757 758
758 759 return (0);
759 760 }
760 761
761 762 /*
762 763 * Get the dnode of an object number from the metadnode of an object set.
763 764 *
764 765 * Input
765 766 * mdn - metadnode to get the object dnode
766 767 * objnum - object number for the object dnode
767 768 * buf - data buffer that holds the returning dnode
768 769 * stack - scratch area
769 770 *
770 771 * Return:
771 772 * 0 - success
772 773 * errnum - failure
773 774 */
774 775 static int
775 776 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
776 777 char *stack)
777 778 {
778 779 uint64_t blkid, blksz; /* the block id this object dnode is in */
779 780 int epbs; /* shift of number of dnodes in a block */
780 781 int idx; /* index within a block */
781 782 dnode_phys_t *dnbuf;
782 783
783 784 blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
784 785 epbs = zfs_log2(blksz) - DNODE_SHIFT;
785 786 blkid = objnum >> epbs;
786 787 idx = objnum & ((1<<epbs)-1);
787 788
788 789 if (dnode_buf != NULL && dnode_mdn == mdn &&
789 790 objnum >= dnode_start && objnum < dnode_end) {
790 791 grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
791 792 VERIFY_DN_TYPE(buf, type);
792 793 return (0);
793 794 }
794 795
795 796 if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
796 797 dnbuf = dnode_buf;
797 798 dnode_mdn = mdn;
798 799 dnode_start = blkid << epbs;
799 800 dnode_end = (blkid + 1) << epbs;
800 801 } else {
801 802 dnbuf = (dnode_phys_t *)stack;
802 803 stack += blksz;
803 804 }
804 805
805 806 if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
806 807 return (errnum);
807 808
808 809 grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
809 810 VERIFY_DN_TYPE(buf, type);
810 811
811 812 return (0);
812 813 }
813 814
814 815 /*
815 816 * Check if this is a special file that resides at the top
816 817 * dataset of the pool. Currently this is the GRUB menu,
817 818 * boot signature and boot signature backup.
818 819 * str starts with '/'.
819 820 */
820 821 static int
821 822 is_top_dataset_file(char *str)
822 823 {
823 824 char *tptr;
824 825
825 826 if ((tptr = grub_strstr(str, "menu.lst")) &&
826 827 (tptr[8] == '\0' || tptr[8] == ' ') &&
827 828 *(tptr-1) == '/')
828 829 return (1);
829 830
830 831 if (grub_strncmp(str, BOOTSIGN_DIR"/",
831 832 grub_strlen(BOOTSIGN_DIR) + 1) == 0)
832 833 return (1);
833 834
834 835 if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
835 836 return (1);
836 837
837 838 return (0);
838 839 }
839 840
840 841 static int
841 842 check_feature(zap_attribute_t *za, void *arg, char *stack)
842 843 {
843 844 const char **names = arg;
844 845 int i;
845 846
846 847 if (za->za_first_integer == 0)
847 848 return (0);
848 849
849 850 for (i = 0; names[i] != NULL; i++) {
850 851 if (grub_strcmp(za->za_name, names[i]) == 0) {
851 852 return (0);
852 853 }
853 854 }
854 855 grub_printf("missing feature for read '%s'\n", za->za_name);
855 856 return (ERR_NEWER_VERSION);
856 857 }
857 858
858 859 /*
859 860 * Get the file dnode for a given file name where mdn is the meta dnode
860 861 * for this ZFS object set. When found, place the file dnode in dn.
861 862 * The 'path' argument will be mangled.
862 863 *
863 864 * Return:
864 865 * 0 - success
865 866 * errnum - failure
866 867 */
867 868 static int
868 869 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
869 870 char *stack)
870 871 {
871 872 uint64_t objnum, version;
872 873 char *cname, ch;
873 874
874 875 if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
875 876 dn, stack))
876 877 return (errnum);
877 878
878 879 if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
879 880 return (errnum);
880 881 if (version > ZPL_VERSION)
881 882 return (-1);
882 883
883 884 if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
884 885 return (errnum);
885 886
886 887 if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
887 888 dn, stack))
888 889 return (errnum);
889 890
890 891 /* skip leading slashes */
891 892 while (*path == '/')
892 893 path++;
893 894
894 895 while (*path && !grub_isspace(*path)) {
895 896
896 897 /* get the next component name */
897 898 cname = path;
898 899 while (*path && !grub_isspace(*path) && *path != '/')
899 900 path++;
900 901 ch = *path;
901 902 *path = 0; /* ensure null termination */
902 903
903 904 if (errnum = zap_lookup(dn, cname, &objnum, stack))
904 905 return (errnum);
905 906
906 907 objnum = ZFS_DIRENT_OBJ(objnum);
907 908 if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
908 909 return (errnum);
909 910
910 911 *path = ch;
911 912 while (*path == '/')
912 913 path++;
913 914 }
914 915
915 916 /* We found the dnode for this file. Verify if it is a plain file. */
916 917 VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
917 918
918 919 return (0);
919 920 }
920 921
921 922 /*
922 923 * Get the default 'bootfs' property value from the rootpool.
923 924 *
924 925 * Return:
925 926 * 0 - success
926 927 * errnum -failure
927 928 */
928 929 static int
929 930 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
930 931 {
931 932 uint64_t objnum = 0;
932 933 dnode_phys_t *dn = (dnode_phys_t *)stack;
933 934 stack += DNODE_SIZE;
934 935
935 936 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
936 937 DMU_OT_OBJECT_DIRECTORY, dn, stack))
937 938 return (errnum);
938 939
939 940 /*
940 941 * find the object number for 'pool_props', and get the dnode
941 942 * of the 'pool_props'.
942 943 */
943 944 if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
944 945 return (ERR_FILESYSTEM_NOT_FOUND);
945 946
946 947 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
947 948 return (errnum);
948 949
949 950 if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
950 951 return (ERR_FILESYSTEM_NOT_FOUND);
951 952
952 953 if (!objnum)
953 954 return (ERR_FILESYSTEM_NOT_FOUND);
954 955
955 956 *obj = objnum;
956 957 return (0);
957 958 }
958 959
↓ open down ↓ |
795 lines elided |
↑ open up ↑ |
959 960 /*
960 961 * List of pool features that the grub implementation of ZFS supports for
961 962 * read. Note that features that are only required for write do not need
962 963 * to be listed here since grub opens pools in read-only mode.
963 964 *
964 965 * When this list is updated the version number in usr/src/grub/capability
965 966 * must be incremented to ensure the new grub gets installed.
966 967 */
967 968 static const char *spa_feature_names[] = {
968 969 "org.illumos:lz4_compress",
970 + "org.illumos:sha512",
969 971 NULL
970 972 };
971 973
972 974 /*
973 975 * Checks whether the MOS features that are active are supported by this
974 976 * (GRUB's) implementation of ZFS.
975 977 *
976 978 * Return:
977 979 * 0: Success.
978 980 * errnum: Failure.
979 981 */
980 982 static int
981 983 check_mos_features(dnode_phys_t *mosmdn, char *stack)
982 984 {
983 985 uint64_t objnum;
984 986 dnode_phys_t *dn;
985 987 uint8_t error = 0;
986 988
987 989 dn = (dnode_phys_t *)stack;
988 990 stack += DNODE_SIZE;
989 991
990 992 if ((errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
991 993 DMU_OT_OBJECT_DIRECTORY, dn, stack)) != 0)
992 994 return (errnum);
993 995
994 996 /*
995 997 * Find the object number for 'features_for_read' and retrieve its
996 998 * corresponding dnode. Note that we don't check features_for_write
997 999 * because GRUB is not opening the pool for write.
998 1000 */
999 1001 if ((errnum = zap_lookup(dn, DMU_POOL_FEATURES_FOR_READ, &objnum,
1000 1002 stack)) != 0)
1001 1003 return (errnum);
1002 1004
1003 1005 if ((errnum = dnode_get(mosmdn, objnum, DMU_OTN_ZAP_METADATA,
1004 1006 dn, stack)) != 0)
1005 1007 return (errnum);
1006 1008
1007 1009 return (zap_iterate(dn, check_feature, spa_feature_names, stack));
1008 1010 }
1009 1011
1010 1012 /*
1011 1013 * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
1012 1014 * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
1013 1015 * of pool/rootfs.
1014 1016 *
1015 1017 * If no fsname and no obj are given, return the DSL_DIR metadnode.
1016 1018 * If fsname is given, return its metadnode and its matching object number.
1017 1019 * If only obj is given, return the metadnode for this object number.
1018 1020 *
1019 1021 * Return:
1020 1022 * 0 - success
1021 1023 * errnum - failure
1022 1024 */
1023 1025 static int
1024 1026 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
1025 1027 dnode_phys_t *mdn, char *stack)
1026 1028 {
1027 1029 uint64_t objnum, headobj;
1028 1030 char *cname, ch;
1029 1031 blkptr_t *bp;
1030 1032 objset_phys_t *osp;
1031 1033 int issnapshot = 0;
1032 1034 char *snapname;
1033 1035
1034 1036 if (fsname == NULL && obj) {
1035 1037 headobj = *obj;
1036 1038 goto skip;
1037 1039 }
1038 1040
1039 1041 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1040 1042 DMU_OT_OBJECT_DIRECTORY, mdn, stack))
1041 1043 return (errnum);
1042 1044
1043 1045 if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
1044 1046 stack))
1045 1047 return (errnum);
1046 1048
1047 1049 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack))
1048 1050 return (errnum);
1049 1051
1050 1052 if (fsname == NULL) {
1051 1053 headobj =
1052 1054 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1053 1055 goto skip;
1054 1056 }
1055 1057
1056 1058 /* take out the pool name */
1057 1059 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1058 1060 fsname++;
1059 1061
1060 1062 while (*fsname && !grub_isspace(*fsname)) {
1061 1063 uint64_t childobj;
1062 1064
1063 1065 while (*fsname == '/')
1064 1066 fsname++;
1065 1067
1066 1068 cname = fsname;
1067 1069 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1068 1070 fsname++;
1069 1071 ch = *fsname;
1070 1072 *fsname = 0;
1071 1073
1072 1074 snapname = cname;
1073 1075 while (*snapname && !grub_isspace(*snapname) && *snapname !=
1074 1076 '@')
1075 1077 snapname++;
1076 1078 if (*snapname == '@') {
1077 1079 issnapshot = 1;
1078 1080 *snapname = 0;
1079 1081 }
1080 1082 childobj =
1081 1083 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
1082 1084 if (errnum = dnode_get(mosmdn, childobj,
1083 1085 DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
1084 1086 return (errnum);
1085 1087
1086 1088 if (zap_lookup(mdn, cname, &objnum, stack))
1087 1089 return (ERR_FILESYSTEM_NOT_FOUND);
1088 1090
1089 1091 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR,
1090 1092 mdn, stack))
1091 1093 return (errnum);
1092 1094
1093 1095 *fsname = ch;
1094 1096 if (issnapshot)
1095 1097 *snapname = '@';
1096 1098 }
1097 1099 headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1098 1100 if (obj)
1099 1101 *obj = headobj;
1100 1102
1101 1103 skip:
1102 1104 if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack))
1103 1105 return (errnum);
1104 1106 if (issnapshot) {
1105 1107 uint64_t snapobj;
1106 1108
1107 1109 snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
1108 1110 ds_snapnames_zapobj;
1109 1111
1110 1112 if (errnum = dnode_get(mosmdn, snapobj,
1111 1113 DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
1112 1114 return (errnum);
1113 1115 if (zap_lookup(mdn, snapname + 1, &headobj, stack))
1114 1116 return (ERR_FILESYSTEM_NOT_FOUND);
1115 1117 if (errnum = dnode_get(mosmdn, headobj,
1116 1118 DMU_OT_DSL_DATASET, mdn, stack))
1117 1119 return (errnum);
1118 1120 if (obj)
1119 1121 *obj = headobj;
1120 1122 }
1121 1123
1122 1124 bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
1123 1125 osp = (objset_phys_t *)stack;
1124 1126 stack += sizeof (objset_phys_t);
1125 1127 if (errnum = zio_read(bp, osp, stack))
1126 1128 return (errnum);
1127 1129
1128 1130 grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
1129 1131
1130 1132 return (0);
1131 1133 }
1132 1134
1133 1135 /*
1134 1136 * For a given XDR packed nvlist, verify the first 4 bytes and move on.
1135 1137 *
1136 1138 * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
1137 1139 *
1138 1140 * encoding method/host endian (4 bytes)
1139 1141 * nvl_version (4 bytes)
1140 1142 * nvl_nvflag (4 bytes)
1141 1143 * encoded nvpairs:
1142 1144 * encoded size of the nvpair (4 bytes)
1143 1145 * decoded size of the nvpair (4 bytes)
1144 1146 * name string size (4 bytes)
1145 1147 * name string data (sizeof(NV_ALIGN4(string))
1146 1148 * data type (4 bytes)
1147 1149 * # of elements in the nvpair (4 bytes)
1148 1150 * data
1149 1151 * 2 zero's for the last nvpair
1150 1152 * (end of the entire list) (8 bytes)
1151 1153 *
1152 1154 * Return:
1153 1155 * 0 - success
1154 1156 * 1 - failure
1155 1157 */
1156 1158 static int
1157 1159 nvlist_unpack(char *nvlist, char **out)
1158 1160 {
1159 1161 /* Verify if the 1st and 2nd byte in the nvlist are valid. */
1160 1162 if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1161 1163 return (1);
1162 1164
1163 1165 *out = nvlist + 4;
1164 1166 return (0);
1165 1167 }
1166 1168
1167 1169 static char *
1168 1170 nvlist_array(char *nvlist, int index)
1169 1171 {
1170 1172 int i, encode_size;
1171 1173
1172 1174 for (i = 0; i < index; i++) {
1173 1175 /* skip the header, nvl_version, and nvl_nvflag */
1174 1176 nvlist = nvlist + 4 * 2;
1175 1177
1176 1178 while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1177 1179 nvlist += encode_size; /* goto the next nvpair */
1178 1180
1179 1181 nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1180 1182 }
1181 1183
1182 1184 return (nvlist);
1183 1185 }
1184 1186
1185 1187 /*
1186 1188 * The nvlist_next_nvpair() function returns a handle to the next nvpair in the
1187 1189 * list following nvpair. If nvpair is NULL, the first pair is returned. If
1188 1190 * nvpair is the last pair in the nvlist, NULL is returned.
1189 1191 */
1190 1192 static char *
1191 1193 nvlist_next_nvpair(char *nvl, char *nvpair)
1192 1194 {
1193 1195 char *cur, *prev;
1194 1196 int encode_size;
1195 1197
1196 1198 if (nvl == NULL)
1197 1199 return (NULL);
1198 1200
1199 1201 if (nvpair == NULL) {
1200 1202 /* skip over nvl_version and nvl_nvflag */
1201 1203 nvpair = nvl + 4 * 2;
1202 1204 } else {
1203 1205 /* skip to the next nvpair */
1204 1206 encode_size = BSWAP_32(*(uint32_t *)nvpair);
1205 1207 nvpair += encode_size;
1206 1208 }
1207 1209
1208 1210 /* 8 bytes of 0 marks the end of the list */
1209 1211 if (*(uint64_t *)nvpair == 0)
1210 1212 return (NULL);
1211 1213
1212 1214 return (nvpair);
1213 1215 }
1214 1216
1215 1217 /*
1216 1218 * This function returns 0 on success and 1 on failure. On success, a string
1217 1219 * containing the name of nvpair is saved in buf.
1218 1220 */
1219 1221 static int
1220 1222 nvpair_name(char *nvp, char *buf, int buflen)
1221 1223 {
1222 1224 int len;
1223 1225
1224 1226 /* skip over encode/decode size */
1225 1227 nvp += 4 * 2;
1226 1228
1227 1229 len = BSWAP_32(*(uint32_t *)nvp);
1228 1230 if (buflen < len + 1)
1229 1231 return (1);
1230 1232
1231 1233 grub_memmove(buf, nvp + 4, len);
1232 1234 buf[len] = '\0';
1233 1235
1234 1236 return (0);
1235 1237 }
1236 1238
1237 1239 /*
1238 1240 * This function retrieves the value of the nvpair in the form of enumerated
1239 1241 * type data_type_t. This is used to determine the appropriate type to pass to
1240 1242 * nvpair_value().
1241 1243 */
1242 1244 static int
1243 1245 nvpair_type(char *nvp)
1244 1246 {
1245 1247 int name_len, type;
1246 1248
1247 1249 /* skip over encode/decode size */
1248 1250 nvp += 4 * 2;
1249 1251
1250 1252 /* skip over name_len */
1251 1253 name_len = BSWAP_32(*(uint32_t *)nvp);
1252 1254 nvp += 4;
1253 1255
1254 1256 /* skip over name */
1255 1257 nvp = nvp + ((name_len + 3) & ~3); /* align */
1256 1258
1257 1259 type = BSWAP_32(*(uint32_t *)nvp);
1258 1260
1259 1261 return (type);
1260 1262 }
1261 1263
1262 1264 static int
1263 1265 nvpair_value(char *nvp, void *val, int valtype, int *nelmp)
1264 1266 {
1265 1267 int name_len, type, slen;
1266 1268 char *strval = val;
1267 1269 uint64_t *intval = val;
1268 1270
1269 1271 /* skip over encode/decode size */
1270 1272 nvp += 4 * 2;
1271 1273
1272 1274 /* skip over name_len */
1273 1275 name_len = BSWAP_32(*(uint32_t *)nvp);
1274 1276 nvp += 4;
1275 1277
1276 1278 /* skip over name */
1277 1279 nvp = nvp + ((name_len + 3) & ~3); /* align */
1278 1280
1279 1281 /* skip over type */
1280 1282 type = BSWAP_32(*(uint32_t *)nvp);
1281 1283 nvp += 4;
1282 1284
1283 1285 if (type == valtype) {
1284 1286 int nelm;
1285 1287
1286 1288 nelm = BSWAP_32(*(uint32_t *)nvp);
1287 1289 if (valtype != DATA_TYPE_BOOLEAN && nelm < 1)
1288 1290 return (1);
1289 1291 nvp += 4;
1290 1292
1291 1293 switch (valtype) {
1292 1294 case DATA_TYPE_BOOLEAN:
1293 1295 return (0);
1294 1296
1295 1297 case DATA_TYPE_STRING:
1296 1298 slen = BSWAP_32(*(uint32_t *)nvp);
1297 1299 nvp += 4;
1298 1300 grub_memmove(strval, nvp, slen);
1299 1301 strval[slen] = '\0';
1300 1302 return (0);
1301 1303
1302 1304 case DATA_TYPE_UINT64:
1303 1305 *intval = BSWAP_64(*(uint64_t *)nvp);
1304 1306 return (0);
1305 1307
1306 1308 case DATA_TYPE_NVLIST:
1307 1309 *(void **)val = (void *)nvp;
1308 1310 return (0);
1309 1311
1310 1312 case DATA_TYPE_NVLIST_ARRAY:
1311 1313 *(void **)val = (void *)nvp;
1312 1314 if (nelmp)
1313 1315 *nelmp = nelm;
1314 1316 return (0);
1315 1317 }
1316 1318 }
1317 1319
1318 1320 return (1);
1319 1321 }
1320 1322
1321 1323 static int
1322 1324 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1323 1325 int *nelmp)
1324 1326 {
1325 1327 char *nvpair;
1326 1328
1327 1329 for (nvpair = nvlist_next_nvpair(nvlist, NULL);
1328 1330 nvpair != NULL;
1329 1331 nvpair = nvlist_next_nvpair(nvlist, nvpair)) {
1330 1332 int name_len = BSWAP_32(*(uint32_t *)(nvpair + 4 * 2));
1331 1333 char *nvp_name = nvpair + 4 * 3;
1332 1334
1333 1335 if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1334 1336 nvpair_type(nvpair) == valtype) {
1335 1337 return (nvpair_value(nvpair, val, valtype, nelmp));
1336 1338 }
1337 1339 }
1338 1340 return (1);
1339 1341 }
1340 1342
1341 1343 /*
1342 1344 * Check if this vdev is online and is in a good state.
1343 1345 */
1344 1346 static int
1345 1347 vdev_validate(char *nv)
1346 1348 {
1347 1349 uint64_t ival;
1348 1350
1349 1351 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1350 1352 DATA_TYPE_UINT64, NULL) == 0 ||
1351 1353 nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1352 1354 DATA_TYPE_UINT64, NULL) == 0 ||
1353 1355 nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1354 1356 DATA_TYPE_UINT64, NULL) == 0)
1355 1357 return (ERR_DEV_VALUES);
1356 1358
1357 1359 return (0);
1358 1360 }
1359 1361
1360 1362 /*
1361 1363 * Get a valid vdev pathname/devid from the boot device.
1362 1364 * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1363 1365 */
1364 1366 static int
1365 1367 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1366 1368 int is_spare)
1367 1369 {
1368 1370 char type[16];
1369 1371
1370 1372 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1371 1373 NULL))
1372 1374 return (ERR_FSYS_CORRUPT);
1373 1375
1374 1376 if (grub_strcmp(type, VDEV_TYPE_DISK) == 0) {
1375 1377 uint64_t guid;
1376 1378
1377 1379 if (vdev_validate(nv) != 0)
1378 1380 return (ERR_NO_BOOTPATH);
1379 1381
1380 1382 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1381 1383 &guid, DATA_TYPE_UINT64, NULL) != 0)
1382 1384 return (ERR_NO_BOOTPATH);
1383 1385
1384 1386 if (guid != inguid)
1385 1387 return (ERR_NO_BOOTPATH);
1386 1388
1387 1389 /* for a spare vdev, pick the disk labeled with "is_spare" */
1388 1390 if (is_spare) {
1389 1391 uint64_t spare = 0;
1390 1392 (void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1391 1393 &spare, DATA_TYPE_UINT64, NULL);
1392 1394 if (!spare)
1393 1395 return (ERR_NO_BOOTPATH);
1394 1396 }
1395 1397
1396 1398 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1397 1399 bootpath, DATA_TYPE_STRING, NULL) != 0)
1398 1400 bootpath[0] = '\0';
1399 1401
1400 1402 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1401 1403 devid, DATA_TYPE_STRING, NULL) != 0)
1402 1404 devid[0] = '\0';
1403 1405
1404 1406 if (grub_strlen(bootpath) >= MAXPATHLEN ||
1405 1407 grub_strlen(devid) >= MAXPATHLEN)
1406 1408 return (ERR_WONT_FIT);
1407 1409
1408 1410 return (0);
1409 1411
1410 1412 } else if (grub_strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1411 1413 grub_strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1412 1414 (is_spare = (grub_strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1413 1415 int nelm, i;
1414 1416 char *child;
1415 1417
1416 1418 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1417 1419 DATA_TYPE_NVLIST_ARRAY, &nelm))
1418 1420 return (ERR_FSYS_CORRUPT);
1419 1421
1420 1422 for (i = 0; i < nelm; i++) {
1421 1423 char *child_i;
1422 1424
1423 1425 child_i = nvlist_array(child, i);
1424 1426 if (vdev_get_bootpath(child_i, inguid, devid,
1425 1427 bootpath, is_spare) == 0)
1426 1428 return (0);
1427 1429 }
1428 1430 }
1429 1431
1430 1432 return (ERR_NO_BOOTPATH);
1431 1433 }
1432 1434
1433 1435 /*
1434 1436 * Check the disk label information and retrieve needed vdev name-value pairs.
1435 1437 *
1436 1438 * Return:
1437 1439 * 0 - success
1438 1440 * ERR_* - failure
1439 1441 */
1440 1442 static int
1441 1443 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1442 1444 char *outpath, uint64_t *outguid, uint64_t *outashift, uint64_t *outversion)
1443 1445 {
1444 1446 vdev_phys_t *vdev;
1445 1447 uint64_t pool_state, txg = 0;
1446 1448 char *nvlist, *nv, *features;
1447 1449 uint64_t diskguid;
1448 1450
1449 1451 sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1450 1452
1451 1453 /* Read in the vdev name-value pair list (112K). */
1452 1454 if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1453 1455 return (ERR_READ);
1454 1456
1455 1457 vdev = (vdev_phys_t *)stack;
1456 1458 stack += sizeof (vdev_phys_t);
1457 1459
1458 1460 if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1459 1461 return (ERR_FSYS_CORRUPT);
1460 1462
1461 1463 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1462 1464 DATA_TYPE_UINT64, NULL))
1463 1465 return (ERR_FSYS_CORRUPT);
1464 1466
1465 1467 if (pool_state == POOL_STATE_DESTROYED)
1466 1468 return (ERR_FILESYSTEM_NOT_FOUND);
1467 1469
1468 1470 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1469 1471 current_rootpool, DATA_TYPE_STRING, NULL))
1470 1472 return (ERR_FSYS_CORRUPT);
1471 1473
1472 1474 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1473 1475 DATA_TYPE_UINT64, NULL))
1474 1476 return (ERR_FSYS_CORRUPT);
1475 1477
1476 1478 /* not an active device */
1477 1479 if (txg == 0)
1478 1480 return (ERR_NO_BOOTPATH);
1479 1481
1480 1482 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, outversion,
1481 1483 DATA_TYPE_UINT64, NULL))
1482 1484 return (ERR_FSYS_CORRUPT);
1483 1485 if (!SPA_VERSION_IS_SUPPORTED(*outversion))
1484 1486 return (ERR_NEWER_VERSION);
1485 1487 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1486 1488 DATA_TYPE_NVLIST, NULL))
1487 1489 return (ERR_FSYS_CORRUPT);
1488 1490 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1489 1491 DATA_TYPE_UINT64, NULL))
1490 1492 return (ERR_FSYS_CORRUPT);
1491 1493 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_ASHIFT, outashift,
1492 1494 DATA_TYPE_UINT64, NULL) != 0)
1493 1495 return (ERR_FSYS_CORRUPT);
1494 1496 if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1495 1497 return (ERR_NO_BOOTPATH);
1496 1498 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1497 1499 DATA_TYPE_UINT64, NULL))
1498 1500 return (ERR_FSYS_CORRUPT);
1499 1501
1500 1502 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1501 1503 &features, DATA_TYPE_NVLIST, NULL) == 0) {
1502 1504 char *nvp;
1503 1505 char *name = stack;
1504 1506 stack += MAXNAMELEN;
1505 1507
1506 1508 for (nvp = nvlist_next_nvpair(features, NULL);
1507 1509 nvp != NULL;
1508 1510 nvp = nvlist_next_nvpair(features, nvp)) {
1509 1511 zap_attribute_t za;
1510 1512
1511 1513 if (nvpair_name(nvp, name, MAXNAMELEN) != 0)
1512 1514 return (ERR_FSYS_CORRUPT);
1513 1515
1514 1516 za.za_integer_length = 8;
1515 1517 za.za_num_integers = 1;
1516 1518 za.za_first_integer = 1;
1517 1519 za.za_name = name;
1518 1520 if (check_feature(&za, spa_feature_names, stack) != 0)
1519 1521 return (ERR_NEWER_VERSION);
1520 1522 }
1521 1523 }
1522 1524
1523 1525 return (0);
1524 1526 }
1525 1527
1526 1528 /*
1527 1529 * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1528 1530 * to the memory address MOS.
1529 1531 *
1530 1532 * Return:
1531 1533 * 1 - success
1532 1534 * 0 - failure
1533 1535 */
1534 1536 int
1535 1537 zfs_mount(void)
1536 1538 {
1537 1539 char *stack, *ub_array;
1538 1540 int label = 0;
1539 1541 uberblock_t *ubbest;
1540 1542 objset_phys_t *osp;
1541 1543 char tmp_bootpath[MAXNAMELEN];
1542 1544 char tmp_devid[MAXNAMELEN];
1543 1545 uint64_t tmp_guid, ashift, version;
1544 1546 uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1545 1547 int err = errnum; /* preserve previous errnum state */
1546 1548
1547 1549 /* if it's our first time here, zero the best uberblock out */
1548 1550 if (best_drive == 0 && best_part == 0 && find_best_root) {
1549 1551 grub_memset(¤t_uberblock, 0, sizeof (uberblock_t));
1550 1552 pool_guid = 0;
1551 1553 }
1552 1554
1553 1555 stackbase = ZFS_SCRATCH;
1554 1556 stack = stackbase;
1555 1557 ub_array = stack;
1556 1558 stack += VDEV_UBERBLOCK_RING;
1557 1559
1558 1560 osp = (objset_phys_t *)stack;
1559 1561 stack += sizeof (objset_phys_t);
1560 1562 adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1561 1563
1562 1564 for (label = 0; label < VDEV_LABELS; label++) {
1563 1565
1564 1566 /*
1565 1567 * some eltorito stacks don't give us a size and
1566 1568 * we end up setting the size to MAXUINT, further
1567 1569 * some of these devices stop working once a single
1568 1570 * read past the end has been issued. Checking
1569 1571 * for a maximum part_length and skipping the backup
1570 1572 * labels at the end of the slice/partition/device
1571 1573 * avoids breaking down on such devices.
1572 1574 */
1573 1575 if (part_length == MAXUINT && label == 2)
1574 1576 break;
1575 1577
1576 1578 uint64_t sector = vdev_label_start(adjpl,
1577 1579 label) >> SPA_MINBLOCKSHIFT;
1578 1580
1579 1581 /* Read in the uberblock ring (128K). */
1580 1582 if (devread(sector +
1581 1583 ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> SPA_MINBLOCKSHIFT),
1582 1584 0, VDEV_UBERBLOCK_RING, ub_array) == 0)
1583 1585 continue;
1584 1586
1585 1587 if (check_pool_label(sector, stack, tmp_devid,
1586 1588 tmp_bootpath, &tmp_guid, &ashift, &version))
1587 1589 continue;
1588 1590
1589 1591 if (pool_guid == 0)
1590 1592 pool_guid = tmp_guid;
1591 1593
1592 1594 if ((ubbest = find_bestub(ub_array, ashift, sector)) == NULL ||
1593 1595 zio_read(&ubbest->ub_rootbp, osp, stack) != 0)
1594 1596 continue;
1595 1597
1596 1598 VERIFY_OS_TYPE(osp, DMU_OST_META);
1597 1599
1598 1600 if (version >= SPA_VERSION_FEATURES &&
1599 1601 check_mos_features(&osp->os_meta_dnode, stack) != 0)
1600 1602 continue;
1601 1603
1602 1604 if (find_best_root && ((pool_guid != tmp_guid) ||
1603 1605 vdev_uberblock_compare(ubbest, &(current_uberblock)) <= 0))
1604 1606 continue;
1605 1607
1606 1608 /* Got the MOS. Save it at the memory addr MOS. */
1607 1609 grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1608 1610 grub_memmove(¤t_uberblock, ubbest, sizeof (uberblock_t));
1609 1611 grub_memmove(current_bootpath, tmp_bootpath, MAXNAMELEN);
1610 1612 grub_memmove(current_devid, tmp_devid, grub_strlen(tmp_devid));
1611 1613 is_zfs_mount = 1;
1612 1614 return (1);
1613 1615 }
1614 1616
1615 1617 /*
1616 1618 * While some fs impls. (tftp) rely on setting and keeping
1617 1619 * global errnums set, others won't reset it and will break
1618 1620 * when issuing rawreads. The goal here is to simply not
1619 1621 * have zfs mount attempts impact the previous state.
1620 1622 */
1621 1623 errnum = err;
1622 1624 return (0);
1623 1625 }
1624 1626
1625 1627 /*
1626 1628 * zfs_open() locates a file in the rootpool by following the
1627 1629 * MOS and places the dnode of the file in the memory address DNODE.
1628 1630 *
1629 1631 * Return:
1630 1632 * 1 - success
1631 1633 * 0 - failure
1632 1634 */
1633 1635 int
1634 1636 zfs_open(char *filename)
1635 1637 {
1636 1638 char *stack;
1637 1639 dnode_phys_t *mdn;
1638 1640
1639 1641 file_buf = NULL;
1640 1642 stackbase = ZFS_SCRATCH;
1641 1643 stack = stackbase;
1642 1644
1643 1645 mdn = (dnode_phys_t *)stack;
1644 1646 stack += sizeof (dnode_phys_t);
1645 1647
1646 1648 dnode_mdn = NULL;
1647 1649 dnode_buf = (dnode_phys_t *)stack;
1648 1650 stack += 1<<DNODE_BLOCK_SHIFT;
1649 1651
1650 1652 /*
1651 1653 * menu.lst is placed at the root pool filesystem level,
1652 1654 * do not goto 'current_bootfs'.
1653 1655 */
1654 1656 if (is_top_dataset_file(filename)) {
1655 1657 if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1656 1658 return (0);
1657 1659
1658 1660 current_bootfs_obj = 0;
1659 1661 } else {
1660 1662 if (current_bootfs[0] == '\0') {
1661 1663 /* Get the default root filesystem object number */
1662 1664 if (errnum = get_default_bootfsobj(MOS,
1663 1665 ¤t_bootfs_obj, stack))
1664 1666 return (0);
1665 1667
1666 1668 if (errnum = get_objset_mdn(MOS, NULL,
1667 1669 ¤t_bootfs_obj, mdn, stack))
1668 1670 return (0);
1669 1671 } else {
1670 1672 if (errnum = get_objset_mdn(MOS, current_bootfs,
1671 1673 ¤t_bootfs_obj, mdn, stack)) {
1672 1674 grub_memset(current_bootfs, 0, MAXNAMELEN);
1673 1675 return (0);
1674 1676 }
1675 1677 }
1676 1678 }
1677 1679
1678 1680 if (dnode_get_path(mdn, filename, DNODE, stack)) {
1679 1681 errnum = ERR_FILE_NOT_FOUND;
1680 1682 return (0);
1681 1683 }
1682 1684
1683 1685 /* get the file size and set the file position to 0 */
1684 1686
1685 1687 /*
1686 1688 * For DMU_OT_SA we will need to locate the SIZE attribute
1687 1689 * attribute, which could be either in the bonus buffer
1688 1690 * or the "spill" block.
1689 1691 */
1690 1692 if (DNODE->dn_bonustype == DMU_OT_SA) {
1691 1693 sa_hdr_phys_t *sahdrp;
1692 1694 int hdrsize;
1693 1695
1694 1696 if (DNODE->dn_bonuslen != 0) {
1695 1697 sahdrp = (sa_hdr_phys_t *)DN_BONUS(DNODE);
1696 1698 } else {
1697 1699 if (DNODE->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1698 1700 blkptr_t *bp = &DNODE->dn_spill;
1699 1701 void *buf;
1700 1702
1701 1703 buf = (void *)stack;
1702 1704 stack += BP_GET_LSIZE(bp);
1703 1705
1704 1706 /* reset errnum to rawread() failure */
1705 1707 errnum = 0;
1706 1708 if (zio_read(bp, buf, stack) != 0) {
1707 1709 return (0);
1708 1710 }
1709 1711 sahdrp = buf;
1710 1712 } else {
1711 1713 errnum = ERR_FSYS_CORRUPT;
1712 1714 return (0);
1713 1715 }
1714 1716 }
1715 1717 hdrsize = SA_HDR_SIZE(sahdrp);
1716 1718 filemax = *(uint64_t *)((char *)sahdrp + hdrsize +
1717 1719 SA_SIZE_OFFSET);
1718 1720 } else {
1719 1721 filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1720 1722 }
1721 1723 filepos = 0;
1722 1724
1723 1725 dnode_buf = NULL;
1724 1726 return (1);
1725 1727 }
1726 1728
1727 1729 /*
1728 1730 * zfs_read reads in the data blocks pointed by the DNODE.
1729 1731 *
1730 1732 * Return:
1731 1733 * len - the length successfully read in to the buffer
1732 1734 * 0 - failure
1733 1735 */
1734 1736 int
1735 1737 zfs_read(char *buf, int len)
1736 1738 {
1737 1739 char *stack;
1738 1740 int blksz, length, movesize;
1739 1741
1740 1742 if (file_buf == NULL) {
1741 1743 file_buf = stackbase;
1742 1744 stackbase += SPA_MAXBLOCKSIZE;
1743 1745 file_start = file_end = 0;
1744 1746 }
1745 1747 stack = stackbase;
1746 1748
1747 1749 /*
1748 1750 * If offset is in memory, move it into the buffer provided and return.
1749 1751 */
1750 1752 if (filepos >= file_start && filepos+len <= file_end) {
1751 1753 grub_memmove(buf, file_buf + filepos - file_start, len);
1752 1754 filepos += len;
1753 1755 return (len);
1754 1756 }
1755 1757
1756 1758 blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1757 1759
1758 1760 /*
1759 1761 * Entire Dnode is too big to fit into the space available. We
1760 1762 * will need to read it in chunks. This could be optimized to
1761 1763 * read in as large a chunk as there is space available, but for
1762 1764 * now, this only reads in one data block at a time.
1763 1765 */
1764 1766 length = len;
1765 1767 while (length) {
1766 1768 /*
1767 1769 * Find requested blkid and the offset within that block.
1768 1770 */
1769 1771 uint64_t blkid = filepos / blksz;
1770 1772
1771 1773 if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1772 1774 return (0);
1773 1775
1774 1776 file_start = blkid * blksz;
1775 1777 file_end = file_start + blksz;
1776 1778
1777 1779 movesize = MIN(length, file_end - filepos);
1778 1780
1779 1781 grub_memmove(buf, file_buf + filepos - file_start,
1780 1782 movesize);
1781 1783 buf += movesize;
1782 1784 length -= movesize;
1783 1785 filepos += movesize;
1784 1786 }
1785 1787
1786 1788 return (len);
1787 1789 }
1788 1790
1789 1791 /*
1790 1792 * No-Op
1791 1793 */
1792 1794 int
1793 1795 zfs_embed(int *start_sector, int needed_sectors)
1794 1796 {
1795 1797 return (1);
1796 1798 }
1797 1799
1798 1800 #endif /* FSYS_ZFS */
↓ open down ↓ |
820 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX