Print this page
3966 zfs lz4 compression (etc) should have bumped grub capability VERSION
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
+++ new/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
1 1 /*
2 2 * GRUB -- GRand Unified Bootloader
3 3 * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
4 4 *
5 5 * This program is free software; you can redistribute it and/or modify
6 6 * it under the terms of the GNU General Public License as published by
7 7 * the Free Software Foundation; either version 2 of the License, or
8 8 * (at your option) any later version.
9 9 *
10 10 * This program is distributed in the hope that it will be useful,
11 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 13 * GNU General Public License for more details.
14 14 *
15 15 * You should have received a copy of the GNU General Public License
16 16 * along with this program; if not, write to the Free Software
17 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 18 */
19 19
20 20 /*
21 21 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
22 22 * Use is subject to license terms.
23 23 */
24 24
25 25 /*
26 26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 27 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28 28 */
29 29
30 30 /*
31 31 * The zfs plug-in routines for GRUB are:
32 32 *
33 33 * zfs_mount() - locates a valid uberblock of the root pool and reads
34 34 * in its MOS at the memory address MOS.
35 35 *
36 36 * zfs_open() - locates a plain file object by following the MOS
37 37 * and places its dnode at the memory address DNODE.
38 38 *
39 39 * zfs_read() - read in the data blocks pointed by the DNODE.
40 40 *
41 41 * ZFS_SCRATCH is used as a working area.
42 42 *
43 43 * (memory addr) MOS DNODE ZFS_SCRATCH
44 44 * | | |
45 45 * +-------V---------V----------V---------------+
46 46 * memory | | dnode | dnode | scratch |
47 47 * | | 512B | 512B | area |
48 48 * +--------------------------------------------+
49 49 */
50 50
51 51 #ifdef FSYS_ZFS
52 52
53 53 #include "shared.h"
54 54 #include "filesys.h"
55 55 #include "fsys_zfs.h"
56 56
57 57 /* cache for a file block of the currently zfs_open()-ed file */
58 58 static void *file_buf = NULL;
59 59 static uint64_t file_start = 0;
60 60 static uint64_t file_end = 0;
61 61
62 62 /* cache for a dnode block */
63 63 static dnode_phys_t *dnode_buf = NULL;
64 64 static dnode_phys_t *dnode_mdn = NULL;
65 65 static uint64_t dnode_start = 0;
66 66 static uint64_t dnode_end = 0;
67 67
68 68 static uint64_t pool_guid = 0;
69 69 static uberblock_t current_uberblock;
70 70 static char *stackbase;
71 71
72 72 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
73 73 {
74 74 {"inherit", 0}, /* ZIO_COMPRESS_INHERIT */
75 75 {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */
76 76 {"off", 0}, /* ZIO_COMPRESS_OFF */
77 77 {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */
78 78 {"empty", 0}, /* ZIO_COMPRESS_EMPTY */
79 79 {"gzip-1", 0}, /* ZIO_COMPRESS_GZIP_1 */
80 80 {"gzip-2", 0}, /* ZIO_COMPRESS_GZIP_2 */
81 81 {"gzip-3", 0}, /* ZIO_COMPRESS_GZIP_3 */
82 82 {"gzip-4", 0}, /* ZIO_COMPRESS_GZIP_4 */
83 83 {"gzip-5", 0}, /* ZIO_COMPRESS_GZIP_5 */
84 84 {"gzip-6", 0}, /* ZIO_COMPRESS_GZIP_6 */
85 85 {"gzip-7", 0}, /* ZIO_COMPRESS_GZIP_7 */
86 86 {"gzip-8", 0}, /* ZIO_COMPRESS_GZIP_8 */
87 87 {"gzip-9", 0}, /* ZIO_COMPRESS_GZIP_9 */
88 88 {"zle", 0}, /* ZIO_COMPRESS_ZLE */
89 89 {"lz4", lz4_decompress} /* ZIO_COMPRESS_LZ4 */
90 90 };
91 91
92 92 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
93 93
94 94 /*
95 95 * Our own version of bcmp().
96 96 */
97 97 static int
98 98 zfs_bcmp(const void *s1, const void *s2, size_t n)
99 99 {
100 100 const uchar_t *ps1 = s1;
101 101 const uchar_t *ps2 = s2;
102 102
103 103 if (s1 != s2 && n != 0) {
104 104 do {
105 105 if (*ps1++ != *ps2++)
106 106 return (1);
107 107 } while (--n != 0);
108 108 }
109 109
110 110 return (0);
111 111 }
112 112
113 113 /*
114 114 * Our own version of log2(). Same thing as highbit()-1.
115 115 */
116 116 static int
117 117 zfs_log2(uint64_t num)
118 118 {
119 119 int i = 0;
120 120
121 121 while (num > 1) {
122 122 i++;
123 123 num = num >> 1;
124 124 }
125 125
126 126 return (i);
127 127 }
128 128
129 129 /* Checksum Functions */
130 130 static void
131 131 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
132 132 {
133 133 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
134 134 }
135 135
136 136 /* Checksum Table and Values */
137 137 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
138 138 {{NULL, NULL}, 0, 0, "inherit"},
139 139 {{NULL, NULL}, 0, 0, "on"},
140 140 {{zio_checksum_off, zio_checksum_off}, 0, 0, "off"},
141 141 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "label"},
142 142 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "gang_header"},
143 143 {{NULL, NULL}, 0, 0, "zilog"},
144 144 {{fletcher_2_native, fletcher_2_byteswap}, 0, 0, "fletcher2"},
145 145 {{fletcher_4_native, fletcher_4_byteswap}, 1, 0, "fletcher4"},
146 146 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 0, "SHA256"},
147 147 {{NULL, NULL}, 0, 0, "zilog2"},
148 148 };
149 149
150 150 /*
151 151 * zio_checksum_verify: Provides support for checksum verification.
152 152 *
153 153 * Fletcher2, Fletcher4, and SHA256 are supported.
154 154 *
155 155 * Return:
156 156 * -1 = Failure
157 157 * 0 = Success
158 158 */
159 159 static int
160 160 zio_checksum_verify(blkptr_t *bp, char *data, int size)
161 161 {
162 162 zio_cksum_t zc = bp->blk_cksum;
163 163 uint32_t checksum = BP_GET_CHECKSUM(bp);
164 164 int byteswap = BP_SHOULD_BYTESWAP(bp);
165 165 zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
166 166 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
167 167 zio_cksum_t actual_cksum, expected_cksum;
168 168
169 169 /* byteswap is not supported */
170 170 if (byteswap)
171 171 return (-1);
172 172
173 173 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
174 174 return (-1);
175 175
176 176 if (ci->ci_eck) {
177 177 expected_cksum = zec->zec_cksum;
178 178 zec->zec_cksum = zc;
179 179 ci->ci_func[0](data, size, &actual_cksum);
180 180 zec->zec_cksum = expected_cksum;
181 181 zc = expected_cksum;
182 182
183 183 } else {
184 184 ci->ci_func[byteswap](data, size, &actual_cksum);
185 185 }
186 186
187 187 if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
188 188 (actual_cksum.zc_word[1] - zc.zc_word[1]) |
189 189 (actual_cksum.zc_word[2] - zc.zc_word[2]) |
190 190 (actual_cksum.zc_word[3] - zc.zc_word[3]))
191 191 return (-1);
192 192
193 193 return (0);
194 194 }
195 195
196 196 /*
197 197 * vdev_label_start returns the physical disk offset (in bytes) of
198 198 * label "l".
199 199 */
200 200 static uint64_t
201 201 vdev_label_start(uint64_t psize, int l)
202 202 {
203 203 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
204 204 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
205 205 }
206 206
207 207 /*
208 208 * vdev_uberblock_compare takes two uberblock structures and returns an integer
209 209 * indicating the more recent of the two.
210 210 * Return Value = 1 if ub2 is more recent
211 211 * Return Value = -1 if ub1 is more recent
212 212 * The most recent uberblock is determined using its transaction number and
213 213 * timestamp. The uberblock with the highest transaction number is
214 214 * considered "newer". If the transaction numbers of the two blocks match, the
215 215 * timestamps are compared to determine the "newer" of the two.
216 216 */
217 217 static int
218 218 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
219 219 {
220 220 if (ub1->ub_txg < ub2->ub_txg)
221 221 return (-1);
222 222 if (ub1->ub_txg > ub2->ub_txg)
223 223 return (1);
224 224
225 225 if (ub1->ub_timestamp < ub2->ub_timestamp)
226 226 return (-1);
227 227 if (ub1->ub_timestamp > ub2->ub_timestamp)
228 228 return (1);
229 229
230 230 return (0);
231 231 }
232 232
233 233 /*
234 234 * Three pieces of information are needed to verify an uberblock: the magic
235 235 * number, the version number, and the checksum.
236 236 *
237 237 * Return:
238 238 * 0 - Success
239 239 * -1 - Failure
240 240 */
241 241 static int
242 242 uberblock_verify(uberblock_t *uber, uint64_t ub_size, uint64_t offset)
243 243 {
244 244 blkptr_t bp;
245 245
246 246 BP_ZERO(&bp);
247 247 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
248 248 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
249 249 ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
250 250
251 251 if (zio_checksum_verify(&bp, (char *)uber, ub_size) != 0)
252 252 return (-1);
253 253
254 254 if (uber->ub_magic == UBERBLOCK_MAGIC &&
255 255 SPA_VERSION_IS_SUPPORTED(uber->ub_version))
256 256 return (0);
257 257
258 258 return (-1);
259 259 }
260 260
261 261 /*
262 262 * Find the best uberblock.
263 263 * Return:
264 264 * Success - Pointer to the best uberblock.
265 265 * Failure - NULL
266 266 */
267 267 static uberblock_t *
268 268 find_bestub(char *ub_array, uint64_t ashift, uint64_t sector)
269 269 {
270 270 uberblock_t *ubbest = NULL;
271 271 uberblock_t *ubnext;
272 272 uint64_t offset, ub_size;
273 273 int i;
274 274
275 275 ub_size = VDEV_UBERBLOCK_SIZE(ashift);
276 276
277 277 for (i = 0; i < VDEV_UBERBLOCK_COUNT(ashift); i++) {
278 278 ubnext = (uberblock_t *)ub_array;
279 279 ub_array += ub_size;
280 280 offset = (sector << SPA_MINBLOCKSHIFT) +
281 281 VDEV_UBERBLOCK_OFFSET(ashift, i);
282 282
283 283 if (uberblock_verify(ubnext, ub_size, offset) != 0)
284 284 continue;
285 285
286 286 if (ubbest == NULL ||
287 287 vdev_uberblock_compare(ubnext, ubbest) > 0)
288 288 ubbest = ubnext;
289 289 }
290 290
291 291 return (ubbest);
292 292 }
293 293
294 294 /*
295 295 * Read a block of data based on the gang block address dva,
296 296 * and put its data in buf.
297 297 *
298 298 * Return:
299 299 * 0 - success
300 300 * 1 - failure
301 301 */
302 302 static int
303 303 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
304 304 {
305 305 zio_gbh_phys_t *zio_gb;
306 306 uint64_t offset, sector;
307 307 blkptr_t tmpbp;
308 308 int i;
309 309
310 310 zio_gb = (zio_gbh_phys_t *)stack;
311 311 stack += SPA_GANGBLOCKSIZE;
312 312 offset = DVA_GET_OFFSET(dva);
313 313 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
314 314
315 315 /* read in the gang block header */
316 316 if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
317 317 grub_printf("failed to read in a gang block header\n");
318 318 return (1);
319 319 }
320 320
321 321 /* self checksuming the gang block header */
322 322 BP_ZERO(&tmpbp);
323 323 BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
324 324 BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
325 325 ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
326 326 DVA_GET_OFFSET(dva), bp->blk_birth, 0);
327 327 if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
328 328 grub_printf("failed to checksum a gang block header\n");
329 329 return (1);
330 330 }
331 331
332 332 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
333 333 if (zio_gb->zg_blkptr[i].blk_birth == 0)
334 334 continue;
335 335
336 336 if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
337 337 return (1);
338 338 buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
339 339 }
340 340
341 341 return (0);
342 342 }
343 343
344 344 /*
345 345 * Read in a block of raw data to buf.
346 346 *
347 347 * Return:
348 348 * 0 - success
349 349 * 1 - failure
350 350 */
351 351 static int
352 352 zio_read_data(blkptr_t *bp, void *buf, char *stack)
353 353 {
354 354 int i, psize;
355 355
356 356 psize = BP_GET_PSIZE(bp);
357 357
358 358 /* pick a good dva from the block pointer */
359 359 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
360 360 uint64_t offset, sector;
361 361
362 362 if (bp->blk_dva[i].dva_word[0] == 0 &&
363 363 bp->blk_dva[i].dva_word[1] == 0)
364 364 continue;
365 365
366 366 if (DVA_GET_GANG(&bp->blk_dva[i])) {
367 367 if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
368 368 return (0);
369 369 } else {
370 370 /* read in a data block */
371 371 offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
372 372 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
373 373 if (devread(sector, 0, psize, buf) != 0)
374 374 return (0);
375 375 }
376 376 }
377 377
378 378 return (1);
379 379 }
380 380
381 381 /*
382 382 * Read in a block of data, verify its checksum, decompress if needed,
383 383 * and put the uncompressed data in buf.
384 384 *
385 385 * Return:
386 386 * 0 - success
387 387 * errnum - failure
388 388 */
389 389 static int
390 390 zio_read(blkptr_t *bp, void *buf, char *stack)
391 391 {
392 392 int lsize, psize, comp;
393 393 char *retbuf;
394 394
395 395 comp = BP_GET_COMPRESS(bp);
396 396 lsize = BP_GET_LSIZE(bp);
397 397 psize = BP_GET_PSIZE(bp);
398 398
399 399 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
400 400 (comp != ZIO_COMPRESS_OFF &&
401 401 decomp_table[comp].decomp_func == NULL)) {
402 402 grub_printf("compression algorithm not supported\n");
403 403 return (ERR_FSYS_CORRUPT);
404 404 }
405 405
406 406 if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
407 407 grub_printf("not enough memory allocated\n");
408 408 return (ERR_WONT_FIT);
409 409 }
410 410
411 411 retbuf = buf;
412 412 if (comp != ZIO_COMPRESS_OFF) {
413 413 buf = stack;
414 414 stack += psize;
415 415 }
416 416
417 417 if (zio_read_data(bp, buf, stack) != 0) {
418 418 grub_printf("zio_read_data failed\n");
419 419 return (ERR_FSYS_CORRUPT);
420 420 }
421 421
422 422 if (zio_checksum_verify(bp, buf, psize) != 0) {
423 423 grub_printf("checksum verification failed\n");
424 424 return (ERR_FSYS_CORRUPT);
425 425 }
426 426
427 427 if (comp != ZIO_COMPRESS_OFF) {
428 428 if (decomp_table[comp].decomp_func(buf, retbuf, psize,
429 429 lsize) != 0) {
430 430 grub_printf("zio_read decompression failed\n");
431 431 return (ERR_FSYS_CORRUPT);
432 432 }
433 433 }
434 434
435 435 return (0);
436 436 }
437 437
438 438 /*
439 439 * Get the block from a block id.
440 440 * push the block onto the stack.
441 441 *
442 442 * Return:
443 443 * 0 - success
444 444 * errnum - failure
445 445 */
446 446 static int
447 447 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
448 448 {
449 449 int idx, level;
450 450 blkptr_t *bp_array = dn->dn_blkptr;
451 451 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
452 452 blkptr_t *bp, *tmpbuf;
453 453
454 454 bp = (blkptr_t *)stack;
455 455 stack += sizeof (blkptr_t);
456 456
457 457 tmpbuf = (blkptr_t *)stack;
458 458 stack += 1<<dn->dn_indblkshift;
459 459
460 460 for (level = dn->dn_nlevels - 1; level >= 0; level--) {
461 461 idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
462 462 *bp = bp_array[idx];
463 463 if (level == 0)
464 464 tmpbuf = buf;
465 465 if (BP_IS_HOLE(bp)) {
466 466 grub_memset(buf, 0,
467 467 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
468 468 break;
469 469 } else if (errnum = zio_read(bp, tmpbuf, stack)) {
470 470 return (errnum);
471 471 }
472 472
473 473 bp_array = tmpbuf;
474 474 }
475 475
476 476 return (0);
477 477 }
478 478
479 479 /*
480 480 * mzap_lookup: Looks up property described by "name" and returns the value
481 481 * in "value".
482 482 *
483 483 * Return:
484 484 * 0 - success
485 485 * errnum - failure
486 486 */
487 487 static int
488 488 mzap_lookup(mzap_phys_t *zapobj, int objsize, const char *name,
489 489 uint64_t *value)
490 490 {
491 491 int i, chunks;
492 492 mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
493 493
494 494 chunks = objsize / MZAP_ENT_LEN - 1;
495 495 for (i = 0; i < chunks; i++) {
496 496 if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
497 497 *value = mzap_ent[i].mze_value;
498 498 return (0);
499 499 }
500 500 }
501 501
502 502 return (ERR_FSYS_CORRUPT);
503 503 }
504 504
505 505 static uint64_t
506 506 zap_hash(uint64_t salt, const char *name)
507 507 {
508 508 static uint64_t table[256];
509 509 const uint8_t *cp;
510 510 uint8_t c;
511 511 uint64_t crc = salt;
512 512
513 513 if (table[128] == 0) {
514 514 uint64_t *ct;
515 515 int i, j;
516 516 for (i = 0; i < 256; i++) {
517 517 for (ct = table + i, *ct = i, j = 8; j > 0; j--)
518 518 *ct = (*ct >> 1) ^ (-(*ct & 1) &
519 519 ZFS_CRC64_POLY);
520 520 }
521 521 }
522 522
523 523 if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
524 524 errnum = ERR_FSYS_CORRUPT;
525 525 return (0);
526 526 }
527 527
528 528 for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
529 529 crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
530 530
531 531 /*
532 532 * Only use 28 bits, since we need 4 bits in the cookie for the
533 533 * collision differentiator. We MUST use the high bits, since
534 534 * those are the ones that we first pay attention to when
535 535 * choosing the bucket.
536 536 */
537 537 crc &= ~((1ULL << (64 - 28)) - 1);
538 538
539 539 return (crc);
540 540 }
541 541
542 542 /*
543 543 * Only to be used on 8-bit arrays.
544 544 * array_len is actual len in bytes (not encoded le_value_length).
545 545 * buf is null-terminated.
546 546 */
547 547 static int
548 548 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
549 549 int array_len, const char *buf)
550 550 {
551 551 int bseen = 0;
552 552
553 553 while (bseen < array_len) {
554 554 struct zap_leaf_array *la =
555 555 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
556 556 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
557 557
558 558 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
559 559 return (0);
560 560
561 561 if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
562 562 break;
563 563 chunk = la->la_next;
564 564 bseen += toread;
565 565 }
566 566 return (bseen == array_len);
567 567 }
568 568
569 569 /*
570 570 * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
571 571 * value for the property "name".
572 572 *
573 573 * Return:
574 574 * 0 - success
575 575 * errnum - failure
576 576 */
577 577 static int
578 578 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
579 579 const char *name, uint64_t *value)
580 580 {
581 581 uint16_t chunk;
582 582 struct zap_leaf_entry *le;
583 583
584 584 /* Verify if this is a valid leaf block */
585 585 if (l->l_hdr.lh_block_type != ZBT_LEAF)
586 586 return (ERR_FSYS_CORRUPT);
587 587 if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
588 588 return (ERR_FSYS_CORRUPT);
589 589
590 590 for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
591 591 chunk != CHAIN_END; chunk = le->le_next) {
592 592
593 593 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
594 594 return (ERR_FSYS_CORRUPT);
595 595
596 596 le = ZAP_LEAF_ENTRY(l, blksft, chunk);
597 597
598 598 /* Verify the chunk entry */
599 599 if (le->le_type != ZAP_CHUNK_ENTRY)
600 600 return (ERR_FSYS_CORRUPT);
601 601
602 602 if (le->le_hash != h)
603 603 continue;
604 604
605 605 if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
606 606 le->le_name_length, name)) {
607 607
608 608 struct zap_leaf_array *la;
609 609 uint8_t *ip;
610 610
611 611 if (le->le_int_size != 8 || le->le_value_length != 1)
612 612 return (ERR_FSYS_CORRUPT);
613 613
614 614 /* get the uint64_t property value */
615 615 la = &ZAP_LEAF_CHUNK(l, blksft,
616 616 le->le_value_chunk).l_array;
617 617 ip = la->la_array;
618 618
619 619 *value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
620 620 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
621 621 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
622 622 (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
623 623
624 624 return (0);
625 625 }
626 626 }
627 627
628 628 return (ERR_FSYS_CORRUPT);
629 629 }
630 630
631 631 /*
632 632 * Fat ZAP lookup
633 633 *
634 634 * Return:
635 635 * 0 - success
636 636 * errnum - failure
637 637 */
638 638 static int
639 639 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
640 640 const char *name, uint64_t *value, char *stack)
641 641 {
642 642 zap_leaf_phys_t *l;
643 643 uint64_t hash, idx, blkid;
644 644 int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
645 645
646 646 /* Verify if this is a fat zap header block */
647 647 if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
648 648 zap->zap_flags != 0)
649 649 return (ERR_FSYS_CORRUPT);
650 650
651 651 hash = zap_hash(zap->zap_salt, name);
652 652 if (errnum)
653 653 return (errnum);
654 654
655 655 /* get block id from index */
656 656 if (zap->zap_ptrtbl.zt_numblks != 0) {
657 657 /* external pointer tables not supported */
658 658 return (ERR_FSYS_CORRUPT);
659 659 }
660 660 idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
661 661 blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
662 662
663 663 /* Get the leaf block */
664 664 l = (zap_leaf_phys_t *)stack;
665 665 stack += 1<<blksft;
666 666 if ((1<<blksft) < sizeof (zap_leaf_phys_t))
667 667 return (ERR_FSYS_CORRUPT);
668 668 if (errnum = dmu_read(zap_dnode, blkid, l, stack))
669 669 return (errnum);
670 670
671 671 return (zap_leaf_lookup(l, blksft, hash, name, value));
672 672 }
673 673
674 674 /*
675 675 * Read in the data of a zap object and find the value for a matching
676 676 * property name.
677 677 *
678 678 * Return:
679 679 * 0 - success
680 680 * errnum - failure
681 681 */
682 682 static int
683 683 zap_lookup(dnode_phys_t *zap_dnode, const char *name, uint64_t *val,
684 684 char *stack)
685 685 {
686 686 uint64_t block_type;
687 687 int size;
688 688 void *zapbuf;
689 689
690 690 /* Read in the first block of the zap object data. */
691 691 zapbuf = stack;
692 692 size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
693 693 stack += size;
694 694
695 695 if ((errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) != 0)
696 696 return (errnum);
697 697
698 698 block_type = *((uint64_t *)zapbuf);
699 699
700 700 if (block_type == ZBT_MICRO) {
701 701 return (mzap_lookup(zapbuf, size, name, val));
702 702 } else if (block_type == ZBT_HEADER) {
703 703 /* this is a fat zap */
704 704 return (fzap_lookup(zap_dnode, zapbuf, name,
705 705 val, stack));
706 706 }
707 707
708 708 return (ERR_FSYS_CORRUPT);
709 709 }
710 710
711 711 typedef struct zap_attribute {
712 712 int za_integer_length;
713 713 uint64_t za_num_integers;
714 714 uint64_t za_first_integer;
715 715 char *za_name;
716 716 } zap_attribute_t;
717 717
718 718 typedef int (zap_cb_t)(zap_attribute_t *za, void *arg, char *stack);
719 719
720 720 static int
721 721 zap_iterate(dnode_phys_t *zap_dnode, zap_cb_t *cb, void *arg, char *stack)
722 722 {
723 723 uint32_t size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
724 724 zap_attribute_t za;
725 725 int i;
726 726 mzap_phys_t *mzp = (mzap_phys_t *)stack;
727 727 stack += size;
728 728
729 729 if ((errnum = dmu_read(zap_dnode, 0, mzp, stack)) != 0)
730 730 return (errnum);
731 731
732 732 /*
733 733 * Iteration over fatzap objects has not yet been implemented.
734 734 * If we encounter a pool in which there are more features for
735 735 * read than can fit inside a microzap (i.e., more than 2048
736 736 * features for read), we can add support for fatzap iteration.
737 737 * For now, fail.
738 738 */
739 739 if (mzp->mz_block_type != ZBT_MICRO) {
740 740 grub_printf("feature information stored in fatzap, pool "
741 741 "version not supported\n");
742 742 return (1);
743 743 }
744 744
745 745 za.za_integer_length = 8;
746 746 za.za_num_integers = 1;
747 747 for (i = 0; i < size / MZAP_ENT_LEN - 1; i++) {
748 748 mzap_ent_phys_t *mzep = &mzp->mz_chunk[i];
749 749 int err;
750 750
751 751 za.za_first_integer = mzep->mze_value;
752 752 za.za_name = mzep->mze_name;
753 753 err = cb(&za, arg, stack);
754 754 if (err != 0)
755 755 return (err);
756 756 }
757 757
758 758 return (0);
759 759 }
760 760
761 761 /*
762 762 * Get the dnode of an object number from the metadnode of an object set.
763 763 *
764 764 * Input
765 765 * mdn - metadnode to get the object dnode
766 766 * objnum - object number for the object dnode
767 767 * buf - data buffer that holds the returning dnode
768 768 * stack - scratch area
769 769 *
770 770 * Return:
771 771 * 0 - success
772 772 * errnum - failure
773 773 */
774 774 static int
775 775 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
776 776 char *stack)
777 777 {
778 778 uint64_t blkid, blksz; /* the block id this object dnode is in */
779 779 int epbs; /* shift of number of dnodes in a block */
780 780 int idx; /* index within a block */
781 781 dnode_phys_t *dnbuf;
782 782
783 783 blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
784 784 epbs = zfs_log2(blksz) - DNODE_SHIFT;
785 785 blkid = objnum >> epbs;
786 786 idx = objnum & ((1<<epbs)-1);
787 787
788 788 if (dnode_buf != NULL && dnode_mdn == mdn &&
789 789 objnum >= dnode_start && objnum < dnode_end) {
790 790 grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
791 791 VERIFY_DN_TYPE(buf, type);
792 792 return (0);
793 793 }
794 794
795 795 if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
796 796 dnbuf = dnode_buf;
797 797 dnode_mdn = mdn;
798 798 dnode_start = blkid << epbs;
799 799 dnode_end = (blkid + 1) << epbs;
800 800 } else {
801 801 dnbuf = (dnode_phys_t *)stack;
802 802 stack += blksz;
803 803 }
804 804
805 805 if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
806 806 return (errnum);
807 807
808 808 grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
809 809 VERIFY_DN_TYPE(buf, type);
810 810
811 811 return (0);
812 812 }
813 813
814 814 /*
815 815 * Check if this is a special file that resides at the top
816 816 * dataset of the pool. Currently this is the GRUB menu,
817 817 * boot signature and boot signature backup.
818 818 * str starts with '/'.
819 819 */
820 820 static int
821 821 is_top_dataset_file(char *str)
822 822 {
823 823 char *tptr;
824 824
825 825 if ((tptr = grub_strstr(str, "menu.lst")) &&
826 826 (tptr[8] == '\0' || tptr[8] == ' ') &&
827 827 *(tptr-1) == '/')
828 828 return (1);
829 829
830 830 if (grub_strncmp(str, BOOTSIGN_DIR"/",
831 831 grub_strlen(BOOTSIGN_DIR) + 1) == 0)
832 832 return (1);
833 833
834 834 if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
835 835 return (1);
836 836
837 837 return (0);
838 838 }
839 839
840 840 static int
841 841 check_feature(zap_attribute_t *za, void *arg, char *stack)
842 842 {
843 843 const char **names = arg;
844 844 int i;
845 845
846 846 if (za->za_first_integer == 0)
847 847 return (0);
848 848
849 849 for (i = 0; names[i] != NULL; i++) {
850 850 if (grub_strcmp(za->za_name, names[i]) == 0) {
851 851 return (0);
852 852 }
853 853 }
854 854 grub_printf("missing feature for read '%s'\n", za->za_name);
855 855 return (ERR_NEWER_VERSION);
856 856 }
857 857
858 858 /*
859 859 * Get the file dnode for a given file name where mdn is the meta dnode
860 860 * for this ZFS object set. When found, place the file dnode in dn.
861 861 * The 'path' argument will be mangled.
862 862 *
863 863 * Return:
864 864 * 0 - success
865 865 * errnum - failure
866 866 */
867 867 static int
868 868 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
869 869 char *stack)
870 870 {
871 871 uint64_t objnum, version;
872 872 char *cname, ch;
873 873
874 874 if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
875 875 dn, stack))
876 876 return (errnum);
877 877
878 878 if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
879 879 return (errnum);
880 880 if (version > ZPL_VERSION)
881 881 return (-1);
882 882
883 883 if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
884 884 return (errnum);
885 885
886 886 if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
887 887 dn, stack))
888 888 return (errnum);
889 889
890 890 /* skip leading slashes */
891 891 while (*path == '/')
892 892 path++;
893 893
894 894 while (*path && !grub_isspace(*path)) {
895 895
896 896 /* get the next component name */
897 897 cname = path;
898 898 while (*path && !grub_isspace(*path) && *path != '/')
899 899 path++;
900 900 ch = *path;
901 901 *path = 0; /* ensure null termination */
902 902
903 903 if (errnum = zap_lookup(dn, cname, &objnum, stack))
904 904 return (errnum);
905 905
906 906 objnum = ZFS_DIRENT_OBJ(objnum);
907 907 if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
908 908 return (errnum);
909 909
910 910 *path = ch;
911 911 while (*path == '/')
912 912 path++;
913 913 }
914 914
915 915 /* We found the dnode for this file. Verify if it is a plain file. */
916 916 VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
917 917
918 918 return (0);
919 919 }
920 920
921 921 /*
922 922 * Get the default 'bootfs' property value from the rootpool.
923 923 *
924 924 * Return:
925 925 * 0 - success
926 926 * errnum -failure
927 927 */
928 928 static int
929 929 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
930 930 {
931 931 uint64_t objnum = 0;
932 932 dnode_phys_t *dn = (dnode_phys_t *)stack;
933 933 stack += DNODE_SIZE;
934 934
935 935 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
936 936 DMU_OT_OBJECT_DIRECTORY, dn, stack))
937 937 return (errnum);
938 938
939 939 /*
940 940 * find the object number for 'pool_props', and get the dnode
941 941 * of the 'pool_props'.
942 942 */
943 943 if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
944 944 return (ERR_FILESYSTEM_NOT_FOUND);
945 945
946 946 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
947 947 return (errnum);
948 948
949 949 if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
950 950 return (ERR_FILESYSTEM_NOT_FOUND);
951 951
952 952 if (!objnum)
↓ open down ↓ |
952 lines elided |
↑ open up ↑ |
953 953 return (ERR_FILESYSTEM_NOT_FOUND);
954 954
955 955 *obj = objnum;
956 956 return (0);
957 957 }
958 958
959 959 /*
960 960 * List of pool features that the grub implementation of ZFS supports for
961 961 * read. Note that features that are only required for write do not need
962 962 * to be listed here since grub opens pools in read-only mode.
963 + *
964 + * When this list is updated the version number in usr/src/grub/capability
965 + * must be incremented to ensure the new grub gets installed.
963 966 */
964 967 static const char *spa_feature_names[] = {
965 968 "org.illumos:lz4_compress",
966 969 NULL
967 970 };
968 971
969 972 /*
970 973 * Checks whether the MOS features that are active are supported by this
971 974 * (GRUB's) implementation of ZFS.
972 975 *
973 976 * Return:
974 977 * 0: Success.
975 978 * errnum: Failure.
976 979 */
977 980 static int
978 981 check_mos_features(dnode_phys_t *mosmdn, char *stack)
979 982 {
980 983 uint64_t objnum;
981 984 dnode_phys_t *dn;
982 985 uint8_t error = 0;
983 986
984 987 dn = (dnode_phys_t *)stack;
985 988 stack += DNODE_SIZE;
986 989
987 990 if ((errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
988 991 DMU_OT_OBJECT_DIRECTORY, dn, stack)) != 0)
989 992 return (errnum);
990 993
991 994 /*
992 995 * Find the object number for 'features_for_read' and retrieve its
993 996 * corresponding dnode. Note that we don't check features_for_write
994 997 * because GRUB is not opening the pool for write.
995 998 */
996 999 if ((errnum = zap_lookup(dn, DMU_POOL_FEATURES_FOR_READ, &objnum,
997 1000 stack)) != 0)
998 1001 return (errnum);
999 1002
1000 1003 if ((errnum = dnode_get(mosmdn, objnum, DMU_OTN_ZAP_METADATA,
1001 1004 dn, stack)) != 0)
1002 1005 return (errnum);
1003 1006
1004 1007 return (zap_iterate(dn, check_feature, spa_feature_names, stack));
1005 1008 }
1006 1009
1007 1010 /*
1008 1011 * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
1009 1012 * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
1010 1013 * of pool/rootfs.
1011 1014 *
1012 1015 * If no fsname and no obj are given, return the DSL_DIR metadnode.
1013 1016 * If fsname is given, return its metadnode and its matching object number.
1014 1017 * If only obj is given, return the metadnode for this object number.
1015 1018 *
1016 1019 * Return:
1017 1020 * 0 - success
1018 1021 * errnum - failure
1019 1022 */
1020 1023 static int
1021 1024 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
1022 1025 dnode_phys_t *mdn, char *stack)
1023 1026 {
1024 1027 uint64_t objnum, headobj;
1025 1028 char *cname, ch;
1026 1029 blkptr_t *bp;
1027 1030 objset_phys_t *osp;
1028 1031 int issnapshot = 0;
1029 1032 char *snapname;
1030 1033
1031 1034 if (fsname == NULL && obj) {
1032 1035 headobj = *obj;
1033 1036 goto skip;
1034 1037 }
1035 1038
1036 1039 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1037 1040 DMU_OT_OBJECT_DIRECTORY, mdn, stack))
1038 1041 return (errnum);
1039 1042
1040 1043 if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
1041 1044 stack))
1042 1045 return (errnum);
1043 1046
1044 1047 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack))
1045 1048 return (errnum);
1046 1049
1047 1050 if (fsname == NULL) {
1048 1051 headobj =
1049 1052 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1050 1053 goto skip;
1051 1054 }
1052 1055
1053 1056 /* take out the pool name */
1054 1057 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1055 1058 fsname++;
1056 1059
1057 1060 while (*fsname && !grub_isspace(*fsname)) {
1058 1061 uint64_t childobj;
1059 1062
1060 1063 while (*fsname == '/')
1061 1064 fsname++;
1062 1065
1063 1066 cname = fsname;
1064 1067 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1065 1068 fsname++;
1066 1069 ch = *fsname;
1067 1070 *fsname = 0;
1068 1071
1069 1072 snapname = cname;
1070 1073 while (*snapname && !grub_isspace(*snapname) && *snapname !=
1071 1074 '@')
1072 1075 snapname++;
1073 1076 if (*snapname == '@') {
1074 1077 issnapshot = 1;
1075 1078 *snapname = 0;
1076 1079 }
1077 1080 childobj =
1078 1081 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
1079 1082 if (errnum = dnode_get(mosmdn, childobj,
1080 1083 DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
1081 1084 return (errnum);
1082 1085
1083 1086 if (zap_lookup(mdn, cname, &objnum, stack))
1084 1087 return (ERR_FILESYSTEM_NOT_FOUND);
1085 1088
1086 1089 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR,
1087 1090 mdn, stack))
1088 1091 return (errnum);
1089 1092
1090 1093 *fsname = ch;
1091 1094 if (issnapshot)
1092 1095 *snapname = '@';
1093 1096 }
1094 1097 headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1095 1098 if (obj)
1096 1099 *obj = headobj;
1097 1100
1098 1101 skip:
1099 1102 if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack))
1100 1103 return (errnum);
1101 1104 if (issnapshot) {
1102 1105 uint64_t snapobj;
1103 1106
1104 1107 snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
1105 1108 ds_snapnames_zapobj;
1106 1109
1107 1110 if (errnum = dnode_get(mosmdn, snapobj,
1108 1111 DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
1109 1112 return (errnum);
1110 1113 if (zap_lookup(mdn, snapname + 1, &headobj, stack))
1111 1114 return (ERR_FILESYSTEM_NOT_FOUND);
1112 1115 if (errnum = dnode_get(mosmdn, headobj,
1113 1116 DMU_OT_DSL_DATASET, mdn, stack))
1114 1117 return (errnum);
1115 1118 if (obj)
1116 1119 *obj = headobj;
1117 1120 }
1118 1121
1119 1122 bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
1120 1123 osp = (objset_phys_t *)stack;
1121 1124 stack += sizeof (objset_phys_t);
1122 1125 if (errnum = zio_read(bp, osp, stack))
1123 1126 return (errnum);
1124 1127
1125 1128 grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
1126 1129
1127 1130 return (0);
1128 1131 }
1129 1132
1130 1133 /*
1131 1134 * For a given XDR packed nvlist, verify the first 4 bytes and move on.
1132 1135 *
1133 1136 * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
1134 1137 *
1135 1138 * encoding method/host endian (4 bytes)
1136 1139 * nvl_version (4 bytes)
1137 1140 * nvl_nvflag (4 bytes)
1138 1141 * encoded nvpairs:
1139 1142 * encoded size of the nvpair (4 bytes)
1140 1143 * decoded size of the nvpair (4 bytes)
1141 1144 * name string size (4 bytes)
1142 1145 * name string data (sizeof(NV_ALIGN4(string))
1143 1146 * data type (4 bytes)
1144 1147 * # of elements in the nvpair (4 bytes)
1145 1148 * data
1146 1149 * 2 zero's for the last nvpair
1147 1150 * (end of the entire list) (8 bytes)
1148 1151 *
1149 1152 * Return:
1150 1153 * 0 - success
1151 1154 * 1 - failure
1152 1155 */
1153 1156 static int
1154 1157 nvlist_unpack(char *nvlist, char **out)
1155 1158 {
1156 1159 /* Verify if the 1st and 2nd byte in the nvlist are valid. */
1157 1160 if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1158 1161 return (1);
1159 1162
1160 1163 *out = nvlist + 4;
1161 1164 return (0);
1162 1165 }
1163 1166
1164 1167 static char *
1165 1168 nvlist_array(char *nvlist, int index)
1166 1169 {
1167 1170 int i, encode_size;
1168 1171
1169 1172 for (i = 0; i < index; i++) {
1170 1173 /* skip the header, nvl_version, and nvl_nvflag */
1171 1174 nvlist = nvlist + 4 * 2;
1172 1175
1173 1176 while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1174 1177 nvlist += encode_size; /* goto the next nvpair */
1175 1178
1176 1179 nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1177 1180 }
1178 1181
1179 1182 return (nvlist);
1180 1183 }
1181 1184
1182 1185 /*
1183 1186 * The nvlist_next_nvpair() function returns a handle to the next nvpair in the
1184 1187 * list following nvpair. If nvpair is NULL, the first pair is returned. If
1185 1188 * nvpair is the last pair in the nvlist, NULL is returned.
1186 1189 */
1187 1190 static char *
1188 1191 nvlist_next_nvpair(char *nvl, char *nvpair)
1189 1192 {
1190 1193 char *cur, *prev;
1191 1194 int encode_size;
1192 1195
1193 1196 if (nvl == NULL)
1194 1197 return (NULL);
1195 1198
1196 1199 if (nvpair == NULL) {
1197 1200 /* skip over nvl_version and nvl_nvflag */
1198 1201 nvpair = nvl + 4 * 2;
1199 1202 } else {
1200 1203 /* skip to the next nvpair */
1201 1204 encode_size = BSWAP_32(*(uint32_t *)nvpair);
1202 1205 nvpair += encode_size;
1203 1206 }
1204 1207
1205 1208 /* 8 bytes of 0 marks the end of the list */
1206 1209 if (*(uint64_t *)nvpair == 0)
1207 1210 return (NULL);
1208 1211
1209 1212 return (nvpair);
1210 1213 }
1211 1214
1212 1215 /*
1213 1216 * This function returns 0 on success and 1 on failure. On success, a string
1214 1217 * containing the name of nvpair is saved in buf.
1215 1218 */
1216 1219 static int
1217 1220 nvpair_name(char *nvp, char *buf, int buflen)
1218 1221 {
1219 1222 int len;
1220 1223
1221 1224 /* skip over encode/decode size */
1222 1225 nvp += 4 * 2;
1223 1226
1224 1227 len = BSWAP_32(*(uint32_t *)nvp);
1225 1228 if (buflen < len + 1)
1226 1229 return (1);
1227 1230
1228 1231 grub_memmove(buf, nvp + 4, len);
1229 1232 buf[len] = '\0';
1230 1233
1231 1234 return (0);
1232 1235 }
1233 1236
1234 1237 /*
1235 1238 * This function retrieves the value of the nvpair in the form of enumerated
1236 1239 * type data_type_t. This is used to determine the appropriate type to pass to
1237 1240 * nvpair_value().
1238 1241 */
1239 1242 static int
1240 1243 nvpair_type(char *nvp)
1241 1244 {
1242 1245 int name_len, type;
1243 1246
1244 1247 /* skip over encode/decode size */
1245 1248 nvp += 4 * 2;
1246 1249
1247 1250 /* skip over name_len */
1248 1251 name_len = BSWAP_32(*(uint32_t *)nvp);
1249 1252 nvp += 4;
1250 1253
1251 1254 /* skip over name */
1252 1255 nvp = nvp + ((name_len + 3) & ~3); /* align */
1253 1256
1254 1257 type = BSWAP_32(*(uint32_t *)nvp);
1255 1258
1256 1259 return (type);
1257 1260 }
1258 1261
1259 1262 static int
1260 1263 nvpair_value(char *nvp, void *val, int valtype, int *nelmp)
1261 1264 {
1262 1265 int name_len, type, slen;
1263 1266 char *strval = val;
1264 1267 uint64_t *intval = val;
1265 1268
1266 1269 /* skip over encode/decode size */
1267 1270 nvp += 4 * 2;
1268 1271
1269 1272 /* skip over name_len */
1270 1273 name_len = BSWAP_32(*(uint32_t *)nvp);
1271 1274 nvp += 4;
1272 1275
1273 1276 /* skip over name */
1274 1277 nvp = nvp + ((name_len + 3) & ~3); /* align */
1275 1278
1276 1279 /* skip over type */
1277 1280 type = BSWAP_32(*(uint32_t *)nvp);
1278 1281 nvp += 4;
1279 1282
1280 1283 if (type == valtype) {
1281 1284 int nelm;
1282 1285
1283 1286 nelm = BSWAP_32(*(uint32_t *)nvp);
1284 1287 if (valtype != DATA_TYPE_BOOLEAN && nelm < 1)
1285 1288 return (1);
1286 1289 nvp += 4;
1287 1290
1288 1291 switch (valtype) {
1289 1292 case DATA_TYPE_BOOLEAN:
1290 1293 return (0);
1291 1294
1292 1295 case DATA_TYPE_STRING:
1293 1296 slen = BSWAP_32(*(uint32_t *)nvp);
1294 1297 nvp += 4;
1295 1298 grub_memmove(strval, nvp, slen);
1296 1299 strval[slen] = '\0';
1297 1300 return (0);
1298 1301
1299 1302 case DATA_TYPE_UINT64:
1300 1303 *intval = BSWAP_64(*(uint64_t *)nvp);
1301 1304 return (0);
1302 1305
1303 1306 case DATA_TYPE_NVLIST:
1304 1307 *(void **)val = (void *)nvp;
1305 1308 return (0);
1306 1309
1307 1310 case DATA_TYPE_NVLIST_ARRAY:
1308 1311 *(void **)val = (void *)nvp;
1309 1312 if (nelmp)
1310 1313 *nelmp = nelm;
1311 1314 return (0);
1312 1315 }
1313 1316 }
1314 1317
1315 1318 return (1);
1316 1319 }
1317 1320
1318 1321 static int
1319 1322 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1320 1323 int *nelmp)
1321 1324 {
1322 1325 char *nvpair;
1323 1326
1324 1327 for (nvpair = nvlist_next_nvpair(nvlist, NULL);
1325 1328 nvpair != NULL;
1326 1329 nvpair = nvlist_next_nvpair(nvlist, nvpair)) {
1327 1330 int name_len = BSWAP_32(*(uint32_t *)(nvpair + 4 * 2));
1328 1331 char *nvp_name = nvpair + 4 * 3;
1329 1332
1330 1333 if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1331 1334 nvpair_type(nvpair) == valtype) {
1332 1335 return (nvpair_value(nvpair, val, valtype, nelmp));
1333 1336 }
1334 1337 }
1335 1338 return (1);
1336 1339 }
1337 1340
1338 1341 /*
1339 1342 * Check if this vdev is online and is in a good state.
1340 1343 */
1341 1344 static int
1342 1345 vdev_validate(char *nv)
1343 1346 {
1344 1347 uint64_t ival;
1345 1348
1346 1349 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1347 1350 DATA_TYPE_UINT64, NULL) == 0 ||
1348 1351 nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1349 1352 DATA_TYPE_UINT64, NULL) == 0 ||
1350 1353 nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1351 1354 DATA_TYPE_UINT64, NULL) == 0)
1352 1355 return (ERR_DEV_VALUES);
1353 1356
1354 1357 return (0);
1355 1358 }
1356 1359
1357 1360 /*
1358 1361 * Get a valid vdev pathname/devid from the boot device.
1359 1362 * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1360 1363 */
1361 1364 static int
1362 1365 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1363 1366 int is_spare)
1364 1367 {
1365 1368 char type[16];
1366 1369
1367 1370 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1368 1371 NULL))
1369 1372 return (ERR_FSYS_CORRUPT);
1370 1373
1371 1374 if (grub_strcmp(type, VDEV_TYPE_DISK) == 0) {
1372 1375 uint64_t guid;
1373 1376
1374 1377 if (vdev_validate(nv) != 0)
1375 1378 return (ERR_NO_BOOTPATH);
1376 1379
1377 1380 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1378 1381 &guid, DATA_TYPE_UINT64, NULL) != 0)
1379 1382 return (ERR_NO_BOOTPATH);
1380 1383
1381 1384 if (guid != inguid)
1382 1385 return (ERR_NO_BOOTPATH);
1383 1386
1384 1387 /* for a spare vdev, pick the disk labeled with "is_spare" */
1385 1388 if (is_spare) {
1386 1389 uint64_t spare = 0;
1387 1390 (void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1388 1391 &spare, DATA_TYPE_UINT64, NULL);
1389 1392 if (!spare)
1390 1393 return (ERR_NO_BOOTPATH);
1391 1394 }
1392 1395
1393 1396 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1394 1397 bootpath, DATA_TYPE_STRING, NULL) != 0)
1395 1398 bootpath[0] = '\0';
1396 1399
1397 1400 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1398 1401 devid, DATA_TYPE_STRING, NULL) != 0)
1399 1402 devid[0] = '\0';
1400 1403
1401 1404 if (grub_strlen(bootpath) >= MAXPATHLEN ||
1402 1405 grub_strlen(devid) >= MAXPATHLEN)
1403 1406 return (ERR_WONT_FIT);
1404 1407
1405 1408 return (0);
1406 1409
1407 1410 } else if (grub_strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1408 1411 grub_strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1409 1412 (is_spare = (grub_strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1410 1413 int nelm, i;
1411 1414 char *child;
1412 1415
1413 1416 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1414 1417 DATA_TYPE_NVLIST_ARRAY, &nelm))
1415 1418 return (ERR_FSYS_CORRUPT);
1416 1419
1417 1420 for (i = 0; i < nelm; i++) {
1418 1421 char *child_i;
1419 1422
1420 1423 child_i = nvlist_array(child, i);
1421 1424 if (vdev_get_bootpath(child_i, inguid, devid,
1422 1425 bootpath, is_spare) == 0)
1423 1426 return (0);
1424 1427 }
1425 1428 }
1426 1429
1427 1430 return (ERR_NO_BOOTPATH);
1428 1431 }
1429 1432
1430 1433 /*
1431 1434 * Check the disk label information and retrieve needed vdev name-value pairs.
1432 1435 *
1433 1436 * Return:
1434 1437 * 0 - success
1435 1438 * ERR_* - failure
1436 1439 */
1437 1440 static int
1438 1441 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1439 1442 char *outpath, uint64_t *outguid, uint64_t *outashift, uint64_t *outversion)
1440 1443 {
1441 1444 vdev_phys_t *vdev;
1442 1445 uint64_t pool_state, txg = 0;
1443 1446 char *nvlist, *nv, *features;
1444 1447 uint64_t diskguid;
1445 1448
1446 1449 sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1447 1450
1448 1451 /* Read in the vdev name-value pair list (112K). */
1449 1452 if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1450 1453 return (ERR_READ);
1451 1454
1452 1455 vdev = (vdev_phys_t *)stack;
1453 1456 stack += sizeof (vdev_phys_t);
1454 1457
1455 1458 if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1456 1459 return (ERR_FSYS_CORRUPT);
1457 1460
1458 1461 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1459 1462 DATA_TYPE_UINT64, NULL))
1460 1463 return (ERR_FSYS_CORRUPT);
1461 1464
1462 1465 if (pool_state == POOL_STATE_DESTROYED)
1463 1466 return (ERR_FILESYSTEM_NOT_FOUND);
1464 1467
1465 1468 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1466 1469 current_rootpool, DATA_TYPE_STRING, NULL))
1467 1470 return (ERR_FSYS_CORRUPT);
1468 1471
1469 1472 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1470 1473 DATA_TYPE_UINT64, NULL))
1471 1474 return (ERR_FSYS_CORRUPT);
1472 1475
1473 1476 /* not an active device */
1474 1477 if (txg == 0)
1475 1478 return (ERR_NO_BOOTPATH);
1476 1479
1477 1480 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, outversion,
1478 1481 DATA_TYPE_UINT64, NULL))
1479 1482 return (ERR_FSYS_CORRUPT);
1480 1483 if (!SPA_VERSION_IS_SUPPORTED(*outversion))
1481 1484 return (ERR_NEWER_VERSION);
1482 1485 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1483 1486 DATA_TYPE_NVLIST, NULL))
1484 1487 return (ERR_FSYS_CORRUPT);
1485 1488 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1486 1489 DATA_TYPE_UINT64, NULL))
1487 1490 return (ERR_FSYS_CORRUPT);
1488 1491 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_ASHIFT, outashift,
1489 1492 DATA_TYPE_UINT64, NULL) != 0)
1490 1493 return (ERR_FSYS_CORRUPT);
1491 1494 if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1492 1495 return (ERR_NO_BOOTPATH);
1493 1496 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1494 1497 DATA_TYPE_UINT64, NULL))
1495 1498 return (ERR_FSYS_CORRUPT);
1496 1499
1497 1500 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1498 1501 &features, DATA_TYPE_NVLIST, NULL) == 0) {
1499 1502 char *nvp;
1500 1503 char *name = stack;
1501 1504 stack += MAXNAMELEN;
1502 1505
1503 1506 for (nvp = nvlist_next_nvpair(features, NULL);
1504 1507 nvp != NULL;
1505 1508 nvp = nvlist_next_nvpair(features, nvp)) {
1506 1509 zap_attribute_t za;
1507 1510
1508 1511 if (nvpair_name(nvp, name, MAXNAMELEN) != 0)
1509 1512 return (ERR_FSYS_CORRUPT);
1510 1513
1511 1514 za.za_integer_length = 8;
1512 1515 za.za_num_integers = 1;
1513 1516 za.za_first_integer = 1;
1514 1517 za.za_name = name;
1515 1518 if (check_feature(&za, spa_feature_names, stack) != 0)
1516 1519 return (ERR_NEWER_VERSION);
1517 1520 }
1518 1521 }
1519 1522
1520 1523 return (0);
1521 1524 }
1522 1525
1523 1526 /*
1524 1527 * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1525 1528 * to the memory address MOS.
1526 1529 *
1527 1530 * Return:
1528 1531 * 1 - success
1529 1532 * 0 - failure
1530 1533 */
1531 1534 int
1532 1535 zfs_mount(void)
1533 1536 {
1534 1537 char *stack, *ub_array;
1535 1538 int label = 0;
1536 1539 uberblock_t *ubbest;
1537 1540 objset_phys_t *osp;
1538 1541 char tmp_bootpath[MAXNAMELEN];
1539 1542 char tmp_devid[MAXNAMELEN];
1540 1543 uint64_t tmp_guid, ashift, version;
1541 1544 uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1542 1545 int err = errnum; /* preserve previous errnum state */
1543 1546
1544 1547 /* if it's our first time here, zero the best uberblock out */
1545 1548 if (best_drive == 0 && best_part == 0 && find_best_root) {
1546 1549 grub_memset(¤t_uberblock, 0, sizeof (uberblock_t));
1547 1550 pool_guid = 0;
1548 1551 }
1549 1552
1550 1553 stackbase = ZFS_SCRATCH;
1551 1554 stack = stackbase;
1552 1555 ub_array = stack;
1553 1556 stack += VDEV_UBERBLOCK_RING;
1554 1557
1555 1558 osp = (objset_phys_t *)stack;
1556 1559 stack += sizeof (objset_phys_t);
1557 1560 adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1558 1561
1559 1562 for (label = 0; label < VDEV_LABELS; label++) {
1560 1563
1561 1564 /*
1562 1565 * some eltorito stacks don't give us a size and
1563 1566 * we end up setting the size to MAXUINT, further
1564 1567 * some of these devices stop working once a single
1565 1568 * read past the end has been issued. Checking
1566 1569 * for a maximum part_length and skipping the backup
1567 1570 * labels at the end of the slice/partition/device
1568 1571 * avoids breaking down on such devices.
1569 1572 */
1570 1573 if (part_length == MAXUINT && label == 2)
1571 1574 break;
1572 1575
1573 1576 uint64_t sector = vdev_label_start(adjpl,
1574 1577 label) >> SPA_MINBLOCKSHIFT;
1575 1578
1576 1579 /* Read in the uberblock ring (128K). */
1577 1580 if (devread(sector +
1578 1581 ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> SPA_MINBLOCKSHIFT),
1579 1582 0, VDEV_UBERBLOCK_RING, ub_array) == 0)
1580 1583 continue;
1581 1584
1582 1585 if (check_pool_label(sector, stack, tmp_devid,
1583 1586 tmp_bootpath, &tmp_guid, &ashift, &version))
1584 1587 continue;
1585 1588
1586 1589 if (pool_guid == 0)
1587 1590 pool_guid = tmp_guid;
1588 1591
1589 1592 if ((ubbest = find_bestub(ub_array, ashift, sector)) == NULL ||
1590 1593 zio_read(&ubbest->ub_rootbp, osp, stack) != 0)
1591 1594 continue;
1592 1595
1593 1596 VERIFY_OS_TYPE(osp, DMU_OST_META);
1594 1597
1595 1598 if (version >= SPA_VERSION_FEATURES &&
1596 1599 check_mos_features(&osp->os_meta_dnode, stack) != 0)
1597 1600 continue;
1598 1601
1599 1602 if (find_best_root && ((pool_guid != tmp_guid) ||
1600 1603 vdev_uberblock_compare(ubbest, &(current_uberblock)) <= 0))
1601 1604 continue;
1602 1605
1603 1606 /* Got the MOS. Save it at the memory addr MOS. */
1604 1607 grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1605 1608 grub_memmove(¤t_uberblock, ubbest, sizeof (uberblock_t));
1606 1609 grub_memmove(current_bootpath, tmp_bootpath, MAXNAMELEN);
1607 1610 grub_memmove(current_devid, tmp_devid, grub_strlen(tmp_devid));
1608 1611 is_zfs_mount = 1;
1609 1612 return (1);
1610 1613 }
1611 1614
1612 1615 /*
1613 1616 * While some fs impls. (tftp) rely on setting and keeping
1614 1617 * global errnums set, others won't reset it and will break
1615 1618 * when issuing rawreads. The goal here is to simply not
1616 1619 * have zfs mount attempts impact the previous state.
1617 1620 */
1618 1621 errnum = err;
1619 1622 return (0);
1620 1623 }
1621 1624
1622 1625 /*
1623 1626 * zfs_open() locates a file in the rootpool by following the
1624 1627 * MOS and places the dnode of the file in the memory address DNODE.
1625 1628 *
1626 1629 * Return:
1627 1630 * 1 - success
1628 1631 * 0 - failure
1629 1632 */
1630 1633 int
1631 1634 zfs_open(char *filename)
1632 1635 {
1633 1636 char *stack;
1634 1637 dnode_phys_t *mdn;
1635 1638
1636 1639 file_buf = NULL;
1637 1640 stackbase = ZFS_SCRATCH;
1638 1641 stack = stackbase;
1639 1642
1640 1643 mdn = (dnode_phys_t *)stack;
1641 1644 stack += sizeof (dnode_phys_t);
1642 1645
1643 1646 dnode_mdn = NULL;
1644 1647 dnode_buf = (dnode_phys_t *)stack;
1645 1648 stack += 1<<DNODE_BLOCK_SHIFT;
1646 1649
1647 1650 /*
1648 1651 * menu.lst is placed at the root pool filesystem level,
1649 1652 * do not goto 'current_bootfs'.
1650 1653 */
1651 1654 if (is_top_dataset_file(filename)) {
1652 1655 if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1653 1656 return (0);
1654 1657
1655 1658 current_bootfs_obj = 0;
1656 1659 } else {
1657 1660 if (current_bootfs[0] == '\0') {
1658 1661 /* Get the default root filesystem object number */
1659 1662 if (errnum = get_default_bootfsobj(MOS,
1660 1663 ¤t_bootfs_obj, stack))
1661 1664 return (0);
1662 1665
1663 1666 if (errnum = get_objset_mdn(MOS, NULL,
1664 1667 ¤t_bootfs_obj, mdn, stack))
1665 1668 return (0);
1666 1669 } else {
1667 1670 if (errnum = get_objset_mdn(MOS, current_bootfs,
1668 1671 ¤t_bootfs_obj, mdn, stack)) {
1669 1672 grub_memset(current_bootfs, 0, MAXNAMELEN);
1670 1673 return (0);
1671 1674 }
1672 1675 }
1673 1676 }
1674 1677
1675 1678 if (dnode_get_path(mdn, filename, DNODE, stack)) {
1676 1679 errnum = ERR_FILE_NOT_FOUND;
1677 1680 return (0);
1678 1681 }
1679 1682
1680 1683 /* get the file size and set the file position to 0 */
1681 1684
1682 1685 /*
1683 1686 * For DMU_OT_SA we will need to locate the SIZE attribute
1684 1687 * attribute, which could be either in the bonus buffer
1685 1688 * or the "spill" block.
1686 1689 */
1687 1690 if (DNODE->dn_bonustype == DMU_OT_SA) {
1688 1691 sa_hdr_phys_t *sahdrp;
1689 1692 int hdrsize;
1690 1693
1691 1694 if (DNODE->dn_bonuslen != 0) {
1692 1695 sahdrp = (sa_hdr_phys_t *)DN_BONUS(DNODE);
1693 1696 } else {
1694 1697 if (DNODE->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1695 1698 blkptr_t *bp = &DNODE->dn_spill;
1696 1699 void *buf;
1697 1700
1698 1701 buf = (void *)stack;
1699 1702 stack += BP_GET_LSIZE(bp);
1700 1703
1701 1704 /* reset errnum to rawread() failure */
1702 1705 errnum = 0;
1703 1706 if (zio_read(bp, buf, stack) != 0) {
1704 1707 return (0);
1705 1708 }
1706 1709 sahdrp = buf;
1707 1710 } else {
1708 1711 errnum = ERR_FSYS_CORRUPT;
1709 1712 return (0);
1710 1713 }
1711 1714 }
1712 1715 hdrsize = SA_HDR_SIZE(sahdrp);
1713 1716 filemax = *(uint64_t *)((char *)sahdrp + hdrsize +
1714 1717 SA_SIZE_OFFSET);
1715 1718 } else {
1716 1719 filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1717 1720 }
1718 1721 filepos = 0;
1719 1722
1720 1723 dnode_buf = NULL;
1721 1724 return (1);
1722 1725 }
1723 1726
1724 1727 /*
1725 1728 * zfs_read reads in the data blocks pointed by the DNODE.
1726 1729 *
1727 1730 * Return:
1728 1731 * len - the length successfully read in to the buffer
1729 1732 * 0 - failure
1730 1733 */
1731 1734 int
1732 1735 zfs_read(char *buf, int len)
1733 1736 {
1734 1737 char *stack;
1735 1738 int blksz, length, movesize;
1736 1739
1737 1740 if (file_buf == NULL) {
1738 1741 file_buf = stackbase;
1739 1742 stackbase += SPA_MAXBLOCKSIZE;
1740 1743 file_start = file_end = 0;
1741 1744 }
1742 1745 stack = stackbase;
1743 1746
1744 1747 /*
1745 1748 * If offset is in memory, move it into the buffer provided and return.
1746 1749 */
1747 1750 if (filepos >= file_start && filepos+len <= file_end) {
1748 1751 grub_memmove(buf, file_buf + filepos - file_start, len);
1749 1752 filepos += len;
1750 1753 return (len);
1751 1754 }
1752 1755
1753 1756 blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1754 1757
1755 1758 /*
1756 1759 * Entire Dnode is too big to fit into the space available. We
1757 1760 * will need to read it in chunks. This could be optimized to
1758 1761 * read in as large a chunk as there is space available, but for
1759 1762 * now, this only reads in one data block at a time.
1760 1763 */
1761 1764 length = len;
1762 1765 while (length) {
1763 1766 /*
1764 1767 * Find requested blkid and the offset within that block.
1765 1768 */
1766 1769 uint64_t blkid = filepos / blksz;
1767 1770
1768 1771 if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1769 1772 return (0);
1770 1773
1771 1774 file_start = blkid * blksz;
1772 1775 file_end = file_start + blksz;
1773 1776
1774 1777 movesize = MIN(length, file_end - filepos);
1775 1778
1776 1779 grub_memmove(buf, file_buf + filepos - file_start,
1777 1780 movesize);
1778 1781 buf += movesize;
1779 1782 length -= movesize;
1780 1783 filepos += movesize;
1781 1784 }
1782 1785
1783 1786 return (len);
1784 1787 }
1785 1788
1786 1789 /*
1787 1790 * No-Op
1788 1791 */
1789 1792 int
1790 1793 zfs_embed(int *start_sector, int needed_sectors)
1791 1794 {
1792 1795 return (1);
1793 1796 }
1794 1797
1795 1798 #endif /* FSYS_ZFS */
↓ open down ↓ |
823 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX