1 /*
2 * GRUB -- GRand Unified Bootloader
3 * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 */
19
20 /*
21 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
22 * Use is subject to license terms.
23 */
24
25 /*
26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
28 */
29
30 /*
31 * The zfs plug-in routines for GRUB are:
32 *
33 * zfs_mount() - locates a valid uberblock of the root pool and reads
34 * in its MOS at the memory address MOS.
35 *
36 * zfs_open() - locates a plain file object by following the MOS
37 * and places its dnode at the memory address DNODE.
38 *
39 * zfs_read() - read in the data blocks pointed by the DNODE.
40 *
41 * ZFS_SCRATCH is used as a working area.
42 *
43 * (memory addr) MOS DNODE ZFS_SCRATCH
44 * | | |
45 * +-------V---------V----------V---------------+
46 * memory | | dnode | dnode | scratch |
47 * | | 512B | 512B | area |
48 * +--------------------------------------------+
49 */
50
51 #ifdef FSYS_ZFS
52
53 #include "shared.h"
54 #include "filesys.h"
55 #include "fsys_zfs.h"
56
57 /* cache for a file block of the currently zfs_open()-ed file */
58 static void *file_buf = NULL;
59 static uint64_t file_start = 0;
60 static uint64_t file_end = 0;
61
62 /* cache for a dnode block */
63 static dnode_phys_t *dnode_buf = NULL;
64 static dnode_phys_t *dnode_mdn = NULL;
65 static uint64_t dnode_start = 0;
66 static uint64_t dnode_end = 0;
67
68 static uint64_t pool_guid = 0;
69 static uberblock_t current_uberblock;
70 static char *stackbase;
71
72 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
73 {
74 {"inherit", 0}, /* ZIO_COMPRESS_INHERIT */
75 {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */
76 {"off", 0}, /* ZIO_COMPRESS_OFF */
77 {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */
78 {"empty", 0}, /* ZIO_COMPRESS_EMPTY */
79 {"gzip-1", 0}, /* ZIO_COMPRESS_GZIP_1 */
80 {"gzip-2", 0}, /* ZIO_COMPRESS_GZIP_2 */
81 {"gzip-3", 0}, /* ZIO_COMPRESS_GZIP_3 */
82 {"gzip-4", 0}, /* ZIO_COMPRESS_GZIP_4 */
83 {"gzip-5", 0}, /* ZIO_COMPRESS_GZIP_5 */
84 {"gzip-6", 0}, /* ZIO_COMPRESS_GZIP_6 */
85 {"gzip-7", 0}, /* ZIO_COMPRESS_GZIP_7 */
86 {"gzip-8", 0}, /* ZIO_COMPRESS_GZIP_8 */
87 {"gzip-9", 0}, /* ZIO_COMPRESS_GZIP_9 */
88 {"zle", 0}, /* ZIO_COMPRESS_ZLE */
89 {"lz4", lz4_decompress} /* ZIO_COMPRESS_LZ4 */
90 };
91
92 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
93
94 /*
95 * Our own version of bcmp().
96 */
97 static int
98 zfs_bcmp(const void *s1, const void *s2, size_t n)
99 {
100 const uchar_t *ps1 = s1;
101 const uchar_t *ps2 = s2;
102
103 if (s1 != s2 && n != 0) {
104 do {
105 if (*ps1++ != *ps2++)
106 return (1);
107 } while (--n != 0);
108 }
109
110 return (0);
111 }
112
113 /*
114 * Our own version of log2(). Same thing as highbit()-1.
115 */
116 static int
117 zfs_log2(uint64_t num)
118 {
119 int i = 0;
120
121 while (num > 1) {
122 i++;
123 num = num >> 1;
124 }
125
126 return (i);
127 }
128
129 /* Checksum Functions */
130 static void
131 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
132 {
133 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
134 }
135
136 /* Checksum Table and Values */
137 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
138 {{NULL, NULL}, 0, 0, "inherit"},
139 {{NULL, NULL}, 0, 0, "on"},
140 {{zio_checksum_off, zio_checksum_off}, 0, 0, "off"},
141 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "label"},
142 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "gang_header"},
143 {{NULL, NULL}, 0, 0, "zilog"},
144 {{fletcher_2_native, fletcher_2_byteswap}, 0, 0, "fletcher2"},
145 {{fletcher_4_native, fletcher_4_byteswap}, 1, 0, "fletcher4"},
146 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 0, "SHA256"},
147 {{NULL, NULL}, 0, 0, "zilog2"},
148 };
149
150 /*
151 * zio_checksum_verify: Provides support for checksum verification.
152 *
153 * Fletcher2, Fletcher4, and SHA256 are supported.
154 *
155 * Return:
156 * -1 = Failure
157 * 0 = Success
158 */
159 static int
160 zio_checksum_verify(blkptr_t *bp, char *data, int size)
161 {
162 zio_cksum_t zc = bp->blk_cksum;
163 uint32_t checksum = BP_GET_CHECKSUM(bp);
164 int byteswap = BP_SHOULD_BYTESWAP(bp);
165 zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
166 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
167 zio_cksum_t actual_cksum, expected_cksum;
168
169 /* byteswap is not supported */
170 if (byteswap)
171 return (-1);
172
173 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
174 return (-1);
175
176 if (ci->ci_eck) {
177 expected_cksum = zec->zec_cksum;
178 zec->zec_cksum = zc;
179 ci->ci_func[0](data, size, &actual_cksum);
180 zec->zec_cksum = expected_cksum;
181 zc = expected_cksum;
182
183 } else {
184 ci->ci_func[byteswap](data, size, &actual_cksum);
185 }
186
187 if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
188 (actual_cksum.zc_word[1] - zc.zc_word[1]) |
189 (actual_cksum.zc_word[2] - zc.zc_word[2]) |
190 (actual_cksum.zc_word[3] - zc.zc_word[3]))
191 return (-1);
192
193 return (0);
194 }
195
196 /*
197 * vdev_label_start returns the physical disk offset (in bytes) of
198 * label "l".
199 */
200 static uint64_t
201 vdev_label_start(uint64_t psize, int l)
202 {
203 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
204 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
205 }
206
207 /*
208 * vdev_uberblock_compare takes two uberblock structures and returns an integer
209 * indicating the more recent of the two.
210 * Return Value = 1 if ub2 is more recent
211 * Return Value = -1 if ub1 is more recent
212 * The most recent uberblock is determined using its transaction number and
213 * timestamp. The uberblock with the highest transaction number is
214 * considered "newer". If the transaction numbers of the two blocks match, the
215 * timestamps are compared to determine the "newer" of the two.
216 */
217 static int
218 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
219 {
220 if (ub1->ub_txg < ub2->ub_txg)
221 return (-1);
222 if (ub1->ub_txg > ub2->ub_txg)
223 return (1);
224
225 if (ub1->ub_timestamp < ub2->ub_timestamp)
226 return (-1);
227 if (ub1->ub_timestamp > ub2->ub_timestamp)
228 return (1);
229
230 return (0);
231 }
232
233 /*
234 * Three pieces of information are needed to verify an uberblock: the magic
235 * number, the version number, and the checksum.
236 *
237 * Return:
238 * 0 - Success
239 * -1 - Failure
240 */
241 static int
242 uberblock_verify(uberblock_t *uber, uint64_t ub_size, uint64_t offset)
243 {
244 blkptr_t bp;
245
246 BP_ZERO(&bp);
247 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
248 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
249 ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
250
251 if (zio_checksum_verify(&bp, (char *)uber, ub_size) != 0)
252 return (-1);
253
254 if (uber->ub_magic == UBERBLOCK_MAGIC &&
255 SPA_VERSION_IS_SUPPORTED(uber->ub_version))
256 return (0);
257
258 return (-1);
259 }
260
261 /*
262 * Find the best uberblock.
263 * Return:
264 * Success - Pointer to the best uberblock.
265 * Failure - NULL
266 */
267 static uberblock_t *
268 find_bestub(char *ub_array, uint64_t ashift, uint64_t sector)
269 {
270 uberblock_t *ubbest = NULL;
271 uberblock_t *ubnext;
272 uint64_t offset, ub_size;
273 int i;
274
275 ub_size = VDEV_UBERBLOCK_SIZE(ashift);
276
277 for (i = 0; i < VDEV_UBERBLOCK_COUNT(ashift); i++) {
278 ubnext = (uberblock_t *)ub_array;
279 ub_array += ub_size;
280 offset = (sector << SPA_MINBLOCKSHIFT) +
281 VDEV_UBERBLOCK_OFFSET(ashift, i);
282
283 if (uberblock_verify(ubnext, ub_size, offset) != 0)
284 continue;
285
286 if (ubbest == NULL ||
287 vdev_uberblock_compare(ubnext, ubbest) > 0)
288 ubbest = ubnext;
289 }
290
291 return (ubbest);
292 }
293
294 /*
295 * Read a block of data based on the gang block address dva,
296 * and put its data in buf.
297 *
298 * Return:
299 * 0 - success
300 * 1 - failure
301 */
302 static int
303 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
304 {
305 zio_gbh_phys_t *zio_gb;
306 uint64_t offset, sector;
307 blkptr_t tmpbp;
308 int i;
309
310 zio_gb = (zio_gbh_phys_t *)stack;
311 stack += SPA_GANGBLOCKSIZE;
312 offset = DVA_GET_OFFSET(dva);
313 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
314
315 /* read in the gang block header */
316 if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
317 grub_printf("failed to read in a gang block header\n");
318 return (1);
319 }
320
321 /* self checksuming the gang block header */
322 BP_ZERO(&tmpbp);
323 BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
324 BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
325 ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
326 DVA_GET_OFFSET(dva), bp->blk_birth, 0);
327 if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
328 grub_printf("failed to checksum a gang block header\n");
329 return (1);
330 }
331
332 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
333 if (zio_gb->zg_blkptr[i].blk_birth == 0)
334 continue;
335
336 if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
337 return (1);
338 buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
339 }
340
341 return (0);
342 }
343
344 /*
345 * Read in a block of raw data to buf.
346 *
347 * Return:
348 * 0 - success
349 * 1 - failure
350 */
351 static int
352 zio_read_data(blkptr_t *bp, void *buf, char *stack)
353 {
354 int i, psize;
355
356 psize = BP_GET_PSIZE(bp);
357
358 /* pick a good dva from the block pointer */
359 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
360 uint64_t offset, sector;
361
362 if (bp->blk_dva[i].dva_word[0] == 0 &&
363 bp->blk_dva[i].dva_word[1] == 0)
364 continue;
365
366 if (DVA_GET_GANG(&bp->blk_dva[i])) {
367 if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
368 return (0);
369 } else {
370 /* read in a data block */
371 offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
372 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
373 if (devread(sector, 0, psize, buf) != 0)
374 return (0);
375 }
376 }
377
378 return (1);
379 }
380
381 /*
382 * Read in a block of data, verify its checksum, decompress if needed,
383 * and put the uncompressed data in buf.
384 *
385 * Return:
386 * 0 - success
387 * errnum - failure
388 */
389 static int
390 zio_read(blkptr_t *bp, void *buf, char *stack)
391 {
392 int lsize, psize, comp;
393 char *retbuf;
394
395 comp = BP_GET_COMPRESS(bp);
396 lsize = BP_GET_LSIZE(bp);
397 psize = BP_GET_PSIZE(bp);
398
399 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
400 (comp != ZIO_COMPRESS_OFF &&
401 decomp_table[comp].decomp_func == NULL)) {
402 grub_printf("compression algorithm not supported\n");
403 return (ERR_FSYS_CORRUPT);
404 }
405
406 if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
407 grub_printf("not enough memory allocated\n");
408 return (ERR_WONT_FIT);
409 }
410
411 retbuf = buf;
412 if (comp != ZIO_COMPRESS_OFF) {
413 buf = stack;
414 stack += psize;
415 }
416
417 if (zio_read_data(bp, buf, stack) != 0) {
418 grub_printf("zio_read_data failed\n");
419 return (ERR_FSYS_CORRUPT);
420 }
421
422 if (zio_checksum_verify(bp, buf, psize) != 0) {
423 grub_printf("checksum verification failed\n");
424 return (ERR_FSYS_CORRUPT);
425 }
426
427 if (comp != ZIO_COMPRESS_OFF) {
428 if (decomp_table[comp].decomp_func(buf, retbuf, psize,
429 lsize) != 0) {
430 grub_printf("zio_read decompression failed\n");
431 return (ERR_FSYS_CORRUPT);
432 }
433 }
434
435 return (0);
436 }
437
438 /*
439 * Get the block from a block id.
440 * push the block onto the stack.
441 *
442 * Return:
443 * 0 - success
444 * errnum - failure
445 */
446 static int
447 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
448 {
449 int idx, level;
450 blkptr_t *bp_array = dn->dn_blkptr;
451 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
452 blkptr_t *bp, *tmpbuf;
453
454 bp = (blkptr_t *)stack;
455 stack += sizeof (blkptr_t);
456
457 tmpbuf = (blkptr_t *)stack;
458 stack += 1<<dn->dn_indblkshift;
459
460 for (level = dn->dn_nlevels - 1; level >= 0; level--) {
461 idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
462 *bp = bp_array[idx];
463 if (level == 0)
464 tmpbuf = buf;
465 if (BP_IS_HOLE(bp)) {
466 grub_memset(buf, 0,
467 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
468 break;
469 } else if (errnum = zio_read(bp, tmpbuf, stack)) {
470 return (errnum);
471 }
472
473 bp_array = tmpbuf;
474 }
475
476 return (0);
477 }
478
479 /*
480 * mzap_lookup: Looks up property described by "name" and returns the value
481 * in "value".
482 *
483 * Return:
484 * 0 - success
485 * errnum - failure
486 */
487 static int
488 mzap_lookup(mzap_phys_t *zapobj, int objsize, const char *name,
489 uint64_t *value)
490 {
491 int i, chunks;
492 mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
493
494 chunks = objsize / MZAP_ENT_LEN - 1;
495 for (i = 0; i < chunks; i++) {
496 if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
497 *value = mzap_ent[i].mze_value;
498 return (0);
499 }
500 }
501
502 return (ERR_FSYS_CORRUPT);
503 }
504
505 static uint64_t
506 zap_hash(uint64_t salt, const char *name)
507 {
508 static uint64_t table[256];
509 const uint8_t *cp;
510 uint8_t c;
511 uint64_t crc = salt;
512
513 if (table[128] == 0) {
514 uint64_t *ct;
515 int i, j;
516 for (i = 0; i < 256; i++) {
517 for (ct = table + i, *ct = i, j = 8; j > 0; j--)
518 *ct = (*ct >> 1) ^ (-(*ct & 1) &
519 ZFS_CRC64_POLY);
520 }
521 }
522
523 if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
524 errnum = ERR_FSYS_CORRUPT;
525 return (0);
526 }
527
528 for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
529 crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
530
531 /*
532 * Only use 28 bits, since we need 4 bits in the cookie for the
533 * collision differentiator. We MUST use the high bits, since
534 * those are the ones that we first pay attention to when
535 * choosing the bucket.
536 */
537 crc &= ~((1ULL << (64 - 28)) - 1);
538
539 return (crc);
540 }
541
542 /*
543 * Only to be used on 8-bit arrays.
544 * array_len is actual len in bytes (not encoded le_value_length).
545 * buf is null-terminated.
546 */
547 static int
548 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
549 int array_len, const char *buf)
550 {
551 int bseen = 0;
552
553 while (bseen < array_len) {
554 struct zap_leaf_array *la =
555 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
556 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
557
558 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
559 return (0);
560
561 if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
562 break;
563 chunk = la->la_next;
564 bseen += toread;
565 }
566 return (bseen == array_len);
567 }
568
569 /*
570 * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
571 * value for the property "name".
572 *
573 * Return:
574 * 0 - success
575 * errnum - failure
576 */
577 static int
578 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
579 const char *name, uint64_t *value)
580 {
581 uint16_t chunk;
582 struct zap_leaf_entry *le;
583
584 /* Verify if this is a valid leaf block */
585 if (l->l_hdr.lh_block_type != ZBT_LEAF)
586 return (ERR_FSYS_CORRUPT);
587 if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
588 return (ERR_FSYS_CORRUPT);
589
590 for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
591 chunk != CHAIN_END; chunk = le->le_next) {
592
593 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
594 return (ERR_FSYS_CORRUPT);
595
596 le = ZAP_LEAF_ENTRY(l, blksft, chunk);
597
598 /* Verify the chunk entry */
599 if (le->le_type != ZAP_CHUNK_ENTRY)
600 return (ERR_FSYS_CORRUPT);
601
602 if (le->le_hash != h)
603 continue;
604
605 if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
606 le->le_name_length, name)) {
607
608 struct zap_leaf_array *la;
609 uint8_t *ip;
610
611 if (le->le_int_size != 8 || le->le_value_length != 1)
612 return (ERR_FSYS_CORRUPT);
613
614 /* get the uint64_t property value */
615 la = &ZAP_LEAF_CHUNK(l, blksft,
616 le->le_value_chunk).l_array;
617 ip = la->la_array;
618
619 *value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
620 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
621 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
622 (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
623
624 return (0);
625 }
626 }
627
628 return (ERR_FSYS_CORRUPT);
629 }
630
631 /*
632 * Fat ZAP lookup
633 *
634 * Return:
635 * 0 - success
636 * errnum - failure
637 */
638 static int
639 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
640 const char *name, uint64_t *value, char *stack)
641 {
642 zap_leaf_phys_t *l;
643 uint64_t hash, idx, blkid;
644 int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
645
646 /* Verify if this is a fat zap header block */
647 if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
648 zap->zap_flags != 0)
649 return (ERR_FSYS_CORRUPT);
650
651 hash = zap_hash(zap->zap_salt, name);
652 if (errnum)
653 return (errnum);
654
655 /* get block id from index */
656 if (zap->zap_ptrtbl.zt_numblks != 0) {
657 /* external pointer tables not supported */
658 return (ERR_FSYS_CORRUPT);
659 }
660 idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
661 blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
662
663 /* Get the leaf block */
664 l = (zap_leaf_phys_t *)stack;
665 stack += 1<<blksft;
666 if ((1<<blksft) < sizeof (zap_leaf_phys_t))
667 return (ERR_FSYS_CORRUPT);
668 if (errnum = dmu_read(zap_dnode, blkid, l, stack))
669 return (errnum);
670
671 return (zap_leaf_lookup(l, blksft, hash, name, value));
672 }
673
674 /*
675 * Read in the data of a zap object and find the value for a matching
676 * property name.
677 *
678 * Return:
679 * 0 - success
680 * errnum - failure
681 */
682 static int
683 zap_lookup(dnode_phys_t *zap_dnode, const char *name, uint64_t *val,
684 char *stack)
685 {
686 uint64_t block_type;
687 int size;
688 void *zapbuf;
689
690 /* Read in the first block of the zap object data. */
691 zapbuf = stack;
692 size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
693 stack += size;
694
695 if ((errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) != 0)
696 return (errnum);
697
698 block_type = *((uint64_t *)zapbuf);
699
700 if (block_type == ZBT_MICRO) {
701 return (mzap_lookup(zapbuf, size, name, val));
702 } else if (block_type == ZBT_HEADER) {
703 /* this is a fat zap */
704 return (fzap_lookup(zap_dnode, zapbuf, name,
705 val, stack));
706 }
707
708 return (ERR_FSYS_CORRUPT);
709 }
710
711 typedef struct zap_attribute {
712 int za_integer_length;
713 uint64_t za_num_integers;
714 uint64_t za_first_integer;
715 char *za_name;
716 } zap_attribute_t;
717
718 typedef int (zap_cb_t)(zap_attribute_t *za, void *arg, char *stack);
719
720 static int
721 zap_iterate(dnode_phys_t *zap_dnode, zap_cb_t *cb, void *arg, char *stack)
722 {
723 uint32_t size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
724 zap_attribute_t za;
725 int i;
726 mzap_phys_t *mzp = (mzap_phys_t *)stack;
727 stack += size;
728
729 if ((errnum = dmu_read(zap_dnode, 0, mzp, stack)) != 0)
730 return (errnum);
731
732 /*
733 * Iteration over fatzap objects has not yet been implemented.
734 * If we encounter a pool in which there are more features for
735 * read than can fit inside a microzap (i.e., more than 2048
736 * features for read), we can add support for fatzap iteration.
737 * For now, fail.
738 */
739 if (mzp->mz_block_type != ZBT_MICRO) {
740 grub_printf("feature information stored in fatzap, pool "
741 "version not supported\n");
742 return (1);
743 }
744
745 za.za_integer_length = 8;
746 za.za_num_integers = 1;
747 for (i = 0; i < size / MZAP_ENT_LEN - 1; i++) {
748 mzap_ent_phys_t *mzep = &mzp->mz_chunk[i];
749 int err;
750
751 za.za_first_integer = mzep->mze_value;
752 za.za_name = mzep->mze_name;
753 err = cb(&za, arg, stack);
754 if (err != 0)
755 return (err);
756 }
757
758 return (0);
759 }
760
761 /*
762 * Get the dnode of an object number from the metadnode of an object set.
763 *
764 * Input
765 * mdn - metadnode to get the object dnode
766 * objnum - object number for the object dnode
767 * buf - data buffer that holds the returning dnode
768 * stack - scratch area
769 *
770 * Return:
771 * 0 - success
772 * errnum - failure
773 */
774 static int
775 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
776 char *stack)
777 {
778 uint64_t blkid, blksz; /* the block id this object dnode is in */
779 int epbs; /* shift of number of dnodes in a block */
780 int idx; /* index within a block */
781 dnode_phys_t *dnbuf;
782
783 blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
784 epbs = zfs_log2(blksz) - DNODE_SHIFT;
785 blkid = objnum >> epbs;
786 idx = objnum & ((1<<epbs)-1);
787
788 if (dnode_buf != NULL && dnode_mdn == mdn &&
789 objnum >= dnode_start && objnum < dnode_end) {
790 grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
791 VERIFY_DN_TYPE(buf, type);
792 return (0);
793 }
794
795 if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
796 dnbuf = dnode_buf;
797 dnode_mdn = mdn;
798 dnode_start = blkid << epbs;
799 dnode_end = (blkid + 1) << epbs;
800 } else {
801 dnbuf = (dnode_phys_t *)stack;
802 stack += blksz;
803 }
804
805 if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
806 return (errnum);
807
808 grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
809 VERIFY_DN_TYPE(buf, type);
810
811 return (0);
812 }
813
814 /*
815 * Check if this is a special file that resides at the top
816 * dataset of the pool. Currently this is the GRUB menu,
817 * boot signature and boot signature backup.
818 * str starts with '/'.
819 */
820 static int
821 is_top_dataset_file(char *str)
822 {
823 char *tptr;
824
825 if ((tptr = grub_strstr(str, "menu.lst")) &&
826 (tptr[8] == '\0' || tptr[8] == ' ') &&
827 *(tptr-1) == '/')
828 return (1);
829
830 if (grub_strncmp(str, BOOTSIGN_DIR"/",
831 grub_strlen(BOOTSIGN_DIR) + 1) == 0)
832 return (1);
833
834 if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
835 return (1);
836
837 return (0);
838 }
839
840 static int
841 check_feature(zap_attribute_t *za, void *arg, char *stack)
842 {
843 const char **names = arg;
844 int i;
845
846 if (za->za_first_integer == 0)
847 return (0);
848
849 for (i = 0; names[i] != NULL; i++) {
850 if (grub_strcmp(za->za_name, names[i]) == 0) {
851 return (0);
852 }
853 }
854 grub_printf("missing feature for read '%s'\n", za->za_name);
855 return (ERR_NEWER_VERSION);
856 }
857
858 /*
859 * Get the file dnode for a given file name where mdn is the meta dnode
860 * for this ZFS object set. When found, place the file dnode in dn.
861 * The 'path' argument will be mangled.
862 *
863 * Return:
864 * 0 - success
865 * errnum - failure
866 */
867 static int
868 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
869 char *stack)
870 {
871 uint64_t objnum, version;
872 char *cname, ch;
873
874 if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
875 dn, stack))
876 return (errnum);
877
878 if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
879 return (errnum);
880 if (version > ZPL_VERSION)
881 return (-1);
882
883 if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
884 return (errnum);
885
886 if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
887 dn, stack))
888 return (errnum);
889
890 /* skip leading slashes */
891 while (*path == '/')
892 path++;
893
894 while (*path && !grub_isspace(*path)) {
895
896 /* get the next component name */
897 cname = path;
898 while (*path && !grub_isspace(*path) && *path != '/')
899 path++;
900 ch = *path;
901 *path = 0; /* ensure null termination */
902
903 if (errnum = zap_lookup(dn, cname, &objnum, stack))
904 return (errnum);
905
906 objnum = ZFS_DIRENT_OBJ(objnum);
907 if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
908 return (errnum);
909
910 *path = ch;
911 while (*path == '/')
912 path++;
913 }
914
915 /* We found the dnode for this file. Verify if it is a plain file. */
916 VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
917
918 return (0);
919 }
920
921 /*
922 * Get the default 'bootfs' property value from the rootpool.
923 *
924 * Return:
925 * 0 - success
926 * errnum -failure
927 */
928 static int
929 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
930 {
931 uint64_t objnum = 0;
932 dnode_phys_t *dn = (dnode_phys_t *)stack;
933 stack += DNODE_SIZE;
934
935 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
936 DMU_OT_OBJECT_DIRECTORY, dn, stack))
937 return (errnum);
938
939 /*
940 * find the object number for 'pool_props', and get the dnode
941 * of the 'pool_props'.
942 */
943 if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
944 return (ERR_FILESYSTEM_NOT_FOUND);
945
946 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
947 return (errnum);
948
949 if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
950 return (ERR_FILESYSTEM_NOT_FOUND);
951
952 if (!objnum)
953 return (ERR_FILESYSTEM_NOT_FOUND);
954
955 *obj = objnum;
956 return (0);
957 }
958
959 /*
960 * List of pool features that the grub implementation of ZFS supports for
961 * read. Note that features that are only required for write do not need
962 * to be listed here since grub opens pools in read-only mode.
963 *
964 * When this list is updated the version number in usr/src/grub/capability
965 * must be incremented to ensure the new grub gets installed.
966 */
967 static const char *spa_feature_names[] = {
968 "org.illumos:lz4_compress",
969 NULL
970 };
971
972 /*
973 * Checks whether the MOS features that are active are supported by this
974 * (GRUB's) implementation of ZFS.
975 *
976 * Return:
977 * 0: Success.
978 * errnum: Failure.
979 */
980 static int
981 check_mos_features(dnode_phys_t *mosmdn, char *stack)
982 {
983 uint64_t objnum;
984 dnode_phys_t *dn;
985 uint8_t error = 0;
986
987 dn = (dnode_phys_t *)stack;
988 stack += DNODE_SIZE;
989
990 if ((errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
991 DMU_OT_OBJECT_DIRECTORY, dn, stack)) != 0)
992 return (errnum);
993
994 /*
995 * Find the object number for 'features_for_read' and retrieve its
996 * corresponding dnode. Note that we don't check features_for_write
997 * because GRUB is not opening the pool for write.
998 */
999 if ((errnum = zap_lookup(dn, DMU_POOL_FEATURES_FOR_READ, &objnum,
1000 stack)) != 0)
1001 return (errnum);
1002
1003 if ((errnum = dnode_get(mosmdn, objnum, DMU_OTN_ZAP_METADATA,
1004 dn, stack)) != 0)
1005 return (errnum);
1006
1007 return (zap_iterate(dn, check_feature, spa_feature_names, stack));
1008 }
1009
1010 /*
1011 * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
1012 * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
1013 * of pool/rootfs.
1014 *
1015 * If no fsname and no obj are given, return the DSL_DIR metadnode.
1016 * If fsname is given, return its metadnode and its matching object number.
1017 * If only obj is given, return the metadnode for this object number.
1018 *
1019 * Return:
1020 * 0 - success
1021 * errnum - failure
1022 */
1023 static int
1024 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
1025 dnode_phys_t *mdn, char *stack)
1026 {
1027 uint64_t objnum, headobj;
1028 char *cname, ch;
1029 blkptr_t *bp;
1030 objset_phys_t *osp;
1031 int issnapshot = 0;
1032 char *snapname;
1033
1034 if (fsname == NULL && obj) {
1035 headobj = *obj;
1036 goto skip;
1037 }
1038
1039 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1040 DMU_OT_OBJECT_DIRECTORY, mdn, stack))
1041 return (errnum);
1042
1043 if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
1044 stack))
1045 return (errnum);
1046
1047 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack))
1048 return (errnum);
1049
1050 if (fsname == NULL) {
1051 headobj =
1052 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1053 goto skip;
1054 }
1055
1056 /* take out the pool name */
1057 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1058 fsname++;
1059
1060 while (*fsname && !grub_isspace(*fsname)) {
1061 uint64_t childobj;
1062
1063 while (*fsname == '/')
1064 fsname++;
1065
1066 cname = fsname;
1067 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1068 fsname++;
1069 ch = *fsname;
1070 *fsname = 0;
1071
1072 snapname = cname;
1073 while (*snapname && !grub_isspace(*snapname) && *snapname !=
1074 '@')
1075 snapname++;
1076 if (*snapname == '@') {
1077 issnapshot = 1;
1078 *snapname = 0;
1079 }
1080 childobj =
1081 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
1082 if (errnum = dnode_get(mosmdn, childobj,
1083 DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
1084 return (errnum);
1085
1086 if (zap_lookup(mdn, cname, &objnum, stack))
1087 return (ERR_FILESYSTEM_NOT_FOUND);
1088
1089 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR,
1090 mdn, stack))
1091 return (errnum);
1092
1093 *fsname = ch;
1094 if (issnapshot)
1095 *snapname = '@';
1096 }
1097 headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1098 if (obj)
1099 *obj = headobj;
1100
1101 skip:
1102 if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack))
1103 return (errnum);
1104 if (issnapshot) {
1105 uint64_t snapobj;
1106
1107 snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
1108 ds_snapnames_zapobj;
1109
1110 if (errnum = dnode_get(mosmdn, snapobj,
1111 DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
1112 return (errnum);
1113 if (zap_lookup(mdn, snapname + 1, &headobj, stack))
1114 return (ERR_FILESYSTEM_NOT_FOUND);
1115 if (errnum = dnode_get(mosmdn, headobj,
1116 DMU_OT_DSL_DATASET, mdn, stack))
1117 return (errnum);
1118 if (obj)
1119 *obj = headobj;
1120 }
1121
1122 bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
1123 osp = (objset_phys_t *)stack;
1124 stack += sizeof (objset_phys_t);
1125 if (errnum = zio_read(bp, osp, stack))
1126 return (errnum);
1127
1128 grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
1129
1130 return (0);
1131 }
1132
1133 /*
1134 * For a given XDR packed nvlist, verify the first 4 bytes and move on.
1135 *
1136 * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
1137 *
1138 * encoding method/host endian (4 bytes)
1139 * nvl_version (4 bytes)
1140 * nvl_nvflag (4 bytes)
1141 * encoded nvpairs:
1142 * encoded size of the nvpair (4 bytes)
1143 * decoded size of the nvpair (4 bytes)
1144 * name string size (4 bytes)
1145 * name string data (sizeof(NV_ALIGN4(string))
1146 * data type (4 bytes)
1147 * # of elements in the nvpair (4 bytes)
1148 * data
1149 * 2 zero's for the last nvpair
1150 * (end of the entire list) (8 bytes)
1151 *
1152 * Return:
1153 * 0 - success
1154 * 1 - failure
1155 */
1156 static int
1157 nvlist_unpack(char *nvlist, char **out)
1158 {
1159 /* Verify if the 1st and 2nd byte in the nvlist are valid. */
1160 if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1161 return (1);
1162
1163 *out = nvlist + 4;
1164 return (0);
1165 }
1166
1167 static char *
1168 nvlist_array(char *nvlist, int index)
1169 {
1170 int i, encode_size;
1171
1172 for (i = 0; i < index; i++) {
1173 /* skip the header, nvl_version, and nvl_nvflag */
1174 nvlist = nvlist + 4 * 2;
1175
1176 while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1177 nvlist += encode_size; /* goto the next nvpair */
1178
1179 nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1180 }
1181
1182 return (nvlist);
1183 }
1184
1185 /*
1186 * The nvlist_next_nvpair() function returns a handle to the next nvpair in the
1187 * list following nvpair. If nvpair is NULL, the first pair is returned. If
1188 * nvpair is the last pair in the nvlist, NULL is returned.
1189 */
1190 static char *
1191 nvlist_next_nvpair(char *nvl, char *nvpair)
1192 {
1193 char *cur, *prev;
1194 int encode_size;
1195
1196 if (nvl == NULL)
1197 return (NULL);
1198
1199 if (nvpair == NULL) {
1200 /* skip over nvl_version and nvl_nvflag */
1201 nvpair = nvl + 4 * 2;
1202 } else {
1203 /* skip to the next nvpair */
1204 encode_size = BSWAP_32(*(uint32_t *)nvpair);
1205 nvpair += encode_size;
1206 }
1207
1208 /* 8 bytes of 0 marks the end of the list */
1209 if (*(uint64_t *)nvpair == 0)
1210 return (NULL);
1211
1212 return (nvpair);
1213 }
1214
1215 /*
1216 * This function returns 0 on success and 1 on failure. On success, a string
1217 * containing the name of nvpair is saved in buf.
1218 */
1219 static int
1220 nvpair_name(char *nvp, char *buf, int buflen)
1221 {
1222 int len;
1223
1224 /* skip over encode/decode size */
1225 nvp += 4 * 2;
1226
1227 len = BSWAP_32(*(uint32_t *)nvp);
1228 if (buflen < len + 1)
1229 return (1);
1230
1231 grub_memmove(buf, nvp + 4, len);
1232 buf[len] = '\0';
1233
1234 return (0);
1235 }
1236
1237 /*
1238 * This function retrieves the value of the nvpair in the form of enumerated
1239 * type data_type_t. This is used to determine the appropriate type to pass to
1240 * nvpair_value().
1241 */
1242 static int
1243 nvpair_type(char *nvp)
1244 {
1245 int name_len, type;
1246
1247 /* skip over encode/decode size */
1248 nvp += 4 * 2;
1249
1250 /* skip over name_len */
1251 name_len = BSWAP_32(*(uint32_t *)nvp);
1252 nvp += 4;
1253
1254 /* skip over name */
1255 nvp = nvp + ((name_len + 3) & ~3); /* align */
1256
1257 type = BSWAP_32(*(uint32_t *)nvp);
1258
1259 return (type);
1260 }
1261
1262 static int
1263 nvpair_value(char *nvp, void *val, int valtype, int *nelmp)
1264 {
1265 int name_len, type, slen;
1266 char *strval = val;
1267 uint64_t *intval = val;
1268
1269 /* skip over encode/decode size */
1270 nvp += 4 * 2;
1271
1272 /* skip over name_len */
1273 name_len = BSWAP_32(*(uint32_t *)nvp);
1274 nvp += 4;
1275
1276 /* skip over name */
1277 nvp = nvp + ((name_len + 3) & ~3); /* align */
1278
1279 /* skip over type */
1280 type = BSWAP_32(*(uint32_t *)nvp);
1281 nvp += 4;
1282
1283 if (type == valtype) {
1284 int nelm;
1285
1286 nelm = BSWAP_32(*(uint32_t *)nvp);
1287 if (valtype != DATA_TYPE_BOOLEAN && nelm < 1)
1288 return (1);
1289 nvp += 4;
1290
1291 switch (valtype) {
1292 case DATA_TYPE_BOOLEAN:
1293 return (0);
1294
1295 case DATA_TYPE_STRING:
1296 slen = BSWAP_32(*(uint32_t *)nvp);
1297 nvp += 4;
1298 grub_memmove(strval, nvp, slen);
1299 strval[slen] = '\0';
1300 return (0);
1301
1302 case DATA_TYPE_UINT64:
1303 *intval = BSWAP_64(*(uint64_t *)nvp);
1304 return (0);
1305
1306 case DATA_TYPE_NVLIST:
1307 *(void **)val = (void *)nvp;
1308 return (0);
1309
1310 case DATA_TYPE_NVLIST_ARRAY:
1311 *(void **)val = (void *)nvp;
1312 if (nelmp)
1313 *nelmp = nelm;
1314 return (0);
1315 }
1316 }
1317
1318 return (1);
1319 }
1320
1321 static int
1322 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1323 int *nelmp)
1324 {
1325 char *nvpair;
1326
1327 for (nvpair = nvlist_next_nvpair(nvlist, NULL);
1328 nvpair != NULL;
1329 nvpair = nvlist_next_nvpair(nvlist, nvpair)) {
1330 int name_len = BSWAP_32(*(uint32_t *)(nvpair + 4 * 2));
1331 char *nvp_name = nvpair + 4 * 3;
1332
1333 if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1334 nvpair_type(nvpair) == valtype) {
1335 return (nvpair_value(nvpair, val, valtype, nelmp));
1336 }
1337 }
1338 return (1);
1339 }
1340
1341 /*
1342 * Check if this vdev is online and is in a good state.
1343 */
1344 static int
1345 vdev_validate(char *nv)
1346 {
1347 uint64_t ival;
1348
1349 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1350 DATA_TYPE_UINT64, NULL) == 0 ||
1351 nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1352 DATA_TYPE_UINT64, NULL) == 0 ||
1353 nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1354 DATA_TYPE_UINT64, NULL) == 0)
1355 return (ERR_DEV_VALUES);
1356
1357 return (0);
1358 }
1359
1360 /*
1361 * Get a valid vdev pathname/devid from the boot device.
1362 * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1363 */
1364 static int
1365 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1366 int is_spare)
1367 {
1368 char type[16];
1369
1370 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1371 NULL))
1372 return (ERR_FSYS_CORRUPT);
1373
1374 if (grub_strcmp(type, VDEV_TYPE_DISK) == 0) {
1375 uint64_t guid;
1376
1377 if (vdev_validate(nv) != 0)
1378 return (ERR_NO_BOOTPATH);
1379
1380 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1381 &guid, DATA_TYPE_UINT64, NULL) != 0)
1382 return (ERR_NO_BOOTPATH);
1383
1384 if (guid != inguid)
1385 return (ERR_NO_BOOTPATH);
1386
1387 /* for a spare vdev, pick the disk labeled with "is_spare" */
1388 if (is_spare) {
1389 uint64_t spare = 0;
1390 (void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1391 &spare, DATA_TYPE_UINT64, NULL);
1392 if (!spare)
1393 return (ERR_NO_BOOTPATH);
1394 }
1395
1396 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1397 bootpath, DATA_TYPE_STRING, NULL) != 0)
1398 bootpath[0] = '\0';
1399
1400 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1401 devid, DATA_TYPE_STRING, NULL) != 0)
1402 devid[0] = '\0';
1403
1404 if (grub_strlen(bootpath) >= MAXPATHLEN ||
1405 grub_strlen(devid) >= MAXPATHLEN)
1406 return (ERR_WONT_FIT);
1407
1408 return (0);
1409
1410 } else if (grub_strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1411 grub_strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1412 (is_spare = (grub_strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1413 int nelm, i;
1414 char *child;
1415
1416 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1417 DATA_TYPE_NVLIST_ARRAY, &nelm))
1418 return (ERR_FSYS_CORRUPT);
1419
1420 for (i = 0; i < nelm; i++) {
1421 char *child_i;
1422
1423 child_i = nvlist_array(child, i);
1424 if (vdev_get_bootpath(child_i, inguid, devid,
1425 bootpath, is_spare) == 0)
1426 return (0);
1427 }
1428 }
1429
1430 return (ERR_NO_BOOTPATH);
1431 }
1432
1433 /*
1434 * Check the disk label information and retrieve needed vdev name-value pairs.
1435 *
1436 * Return:
1437 * 0 - success
1438 * ERR_* - failure
1439 */
1440 static int
1441 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1442 char *outpath, uint64_t *outguid, uint64_t *outashift, uint64_t *outversion)
1443 {
1444 vdev_phys_t *vdev;
1445 uint64_t pool_state, txg = 0;
1446 char *nvlist, *nv, *features;
1447 uint64_t diskguid;
1448
1449 sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1450
1451 /* Read in the vdev name-value pair list (112K). */
1452 if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1453 return (ERR_READ);
1454
1455 vdev = (vdev_phys_t *)stack;
1456 stack += sizeof (vdev_phys_t);
1457
1458 if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1459 return (ERR_FSYS_CORRUPT);
1460
1461 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1462 DATA_TYPE_UINT64, NULL))
1463 return (ERR_FSYS_CORRUPT);
1464
1465 if (pool_state == POOL_STATE_DESTROYED)
1466 return (ERR_FILESYSTEM_NOT_FOUND);
1467
1468 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1469 current_rootpool, DATA_TYPE_STRING, NULL))
1470 return (ERR_FSYS_CORRUPT);
1471
1472 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1473 DATA_TYPE_UINT64, NULL))
1474 return (ERR_FSYS_CORRUPT);
1475
1476 /* not an active device */
1477 if (txg == 0)
1478 return (ERR_NO_BOOTPATH);
1479
1480 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, outversion,
1481 DATA_TYPE_UINT64, NULL))
1482 return (ERR_FSYS_CORRUPT);
1483 if (!SPA_VERSION_IS_SUPPORTED(*outversion))
1484 return (ERR_NEWER_VERSION);
1485 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1486 DATA_TYPE_NVLIST, NULL))
1487 return (ERR_FSYS_CORRUPT);
1488 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1489 DATA_TYPE_UINT64, NULL))
1490 return (ERR_FSYS_CORRUPT);
1491 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_ASHIFT, outashift,
1492 DATA_TYPE_UINT64, NULL) != 0)
1493 return (ERR_FSYS_CORRUPT);
1494 if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1495 return (ERR_NO_BOOTPATH);
1496 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1497 DATA_TYPE_UINT64, NULL))
1498 return (ERR_FSYS_CORRUPT);
1499
1500 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1501 &features, DATA_TYPE_NVLIST, NULL) == 0) {
1502 char *nvp;
1503 char *name = stack;
1504 stack += MAXNAMELEN;
1505
1506 for (nvp = nvlist_next_nvpair(features, NULL);
1507 nvp != NULL;
1508 nvp = nvlist_next_nvpair(features, nvp)) {
1509 zap_attribute_t za;
1510
1511 if (nvpair_name(nvp, name, MAXNAMELEN) != 0)
1512 return (ERR_FSYS_CORRUPT);
1513
1514 za.za_integer_length = 8;
1515 za.za_num_integers = 1;
1516 za.za_first_integer = 1;
1517 za.za_name = name;
1518 if (check_feature(&za, spa_feature_names, stack) != 0)
1519 return (ERR_NEWER_VERSION);
1520 }
1521 }
1522
1523 return (0);
1524 }
1525
1526 /*
1527 * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1528 * to the memory address MOS.
1529 *
1530 * Return:
1531 * 1 - success
1532 * 0 - failure
1533 */
1534 int
1535 zfs_mount(void)
1536 {
1537 char *stack, *ub_array;
1538 int label = 0;
1539 uberblock_t *ubbest;
1540 objset_phys_t *osp;
1541 char tmp_bootpath[MAXNAMELEN];
1542 char tmp_devid[MAXNAMELEN];
1543 uint64_t tmp_guid, ashift, version;
1544 uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1545 int err = errnum; /* preserve previous errnum state */
1546
1547 /* if it's our first time here, zero the best uberblock out */
1548 if (best_drive == 0 && best_part == 0 && find_best_root) {
1549 grub_memset(¤t_uberblock, 0, sizeof (uberblock_t));
1550 pool_guid = 0;
1551 }
1552
1553 stackbase = ZFS_SCRATCH;
1554 stack = stackbase;
1555 ub_array = stack;
1556 stack += VDEV_UBERBLOCK_RING;
1557
1558 osp = (objset_phys_t *)stack;
1559 stack += sizeof (objset_phys_t);
1560 adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1561
1562 for (label = 0; label < VDEV_LABELS; label++) {
1563
1564 /*
1565 * some eltorito stacks don't give us a size and
1566 * we end up setting the size to MAXUINT, further
1567 * some of these devices stop working once a single
1568 * read past the end has been issued. Checking
1569 * for a maximum part_length and skipping the backup
1570 * labels at the end of the slice/partition/device
1571 * avoids breaking down on such devices.
1572 */
1573 if (part_length == MAXUINT && label == 2)
1574 break;
1575
1576 uint64_t sector = vdev_label_start(adjpl,
1577 label) >> SPA_MINBLOCKSHIFT;
1578
1579 /* Read in the uberblock ring (128K). */
1580 if (devread(sector +
1581 ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> SPA_MINBLOCKSHIFT),
1582 0, VDEV_UBERBLOCK_RING, ub_array) == 0)
1583 continue;
1584
1585 if (check_pool_label(sector, stack, tmp_devid,
1586 tmp_bootpath, &tmp_guid, &ashift, &version))
1587 continue;
1588
1589 if (pool_guid == 0)
1590 pool_guid = tmp_guid;
1591
1592 if ((ubbest = find_bestub(ub_array, ashift, sector)) == NULL ||
1593 zio_read(&ubbest->ub_rootbp, osp, stack) != 0)
1594 continue;
1595
1596 VERIFY_OS_TYPE(osp, DMU_OST_META);
1597
1598 if (version >= SPA_VERSION_FEATURES &&
1599 check_mos_features(&osp->os_meta_dnode, stack) != 0)
1600 continue;
1601
1602 if (find_best_root && ((pool_guid != tmp_guid) ||
1603 vdev_uberblock_compare(ubbest, &(current_uberblock)) <= 0))
1604 continue;
1605
1606 /* Got the MOS. Save it at the memory addr MOS. */
1607 grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1608 grub_memmove(¤t_uberblock, ubbest, sizeof (uberblock_t));
1609 grub_memmove(current_bootpath, tmp_bootpath, MAXNAMELEN);
1610 grub_memmove(current_devid, tmp_devid, grub_strlen(tmp_devid));
1611 is_zfs_mount = 1;
1612 return (1);
1613 }
1614
1615 /*
1616 * While some fs impls. (tftp) rely on setting and keeping
1617 * global errnums set, others won't reset it and will break
1618 * when issuing rawreads. The goal here is to simply not
1619 * have zfs mount attempts impact the previous state.
1620 */
1621 errnum = err;
1622 return (0);
1623 }
1624
1625 /*
1626 * zfs_open() locates a file in the rootpool by following the
1627 * MOS and places the dnode of the file in the memory address DNODE.
1628 *
1629 * Return:
1630 * 1 - success
1631 * 0 - failure
1632 */
1633 int
1634 zfs_open(char *filename)
1635 {
1636 char *stack;
1637 dnode_phys_t *mdn;
1638
1639 file_buf = NULL;
1640 stackbase = ZFS_SCRATCH;
1641 stack = stackbase;
1642
1643 mdn = (dnode_phys_t *)stack;
1644 stack += sizeof (dnode_phys_t);
1645
1646 dnode_mdn = NULL;
1647 dnode_buf = (dnode_phys_t *)stack;
1648 stack += 1<<DNODE_BLOCK_SHIFT;
1649
1650 /*
1651 * menu.lst is placed at the root pool filesystem level,
1652 * do not goto 'current_bootfs'.
1653 */
1654 if (is_top_dataset_file(filename)) {
1655 if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1656 return (0);
1657
1658 current_bootfs_obj = 0;
1659 } else {
1660 if (current_bootfs[0] == '\0') {
1661 /* Get the default root filesystem object number */
1662 if (errnum = get_default_bootfsobj(MOS,
1663 ¤t_bootfs_obj, stack))
1664 return (0);
1665
1666 if (errnum = get_objset_mdn(MOS, NULL,
1667 ¤t_bootfs_obj, mdn, stack))
1668 return (0);
1669 } else {
1670 if (errnum = get_objset_mdn(MOS, current_bootfs,
1671 ¤t_bootfs_obj, mdn, stack)) {
1672 grub_memset(current_bootfs, 0, MAXNAMELEN);
1673 return (0);
1674 }
1675 }
1676 }
1677
1678 if (dnode_get_path(mdn, filename, DNODE, stack)) {
1679 errnum = ERR_FILE_NOT_FOUND;
1680 return (0);
1681 }
1682
1683 /* get the file size and set the file position to 0 */
1684
1685 /*
1686 * For DMU_OT_SA we will need to locate the SIZE attribute
1687 * attribute, which could be either in the bonus buffer
1688 * or the "spill" block.
1689 */
1690 if (DNODE->dn_bonustype == DMU_OT_SA) {
1691 sa_hdr_phys_t *sahdrp;
1692 int hdrsize;
1693
1694 if (DNODE->dn_bonuslen != 0) {
1695 sahdrp = (sa_hdr_phys_t *)DN_BONUS(DNODE);
1696 } else {
1697 if (DNODE->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1698 blkptr_t *bp = &DNODE->dn_spill;
1699 void *buf;
1700
1701 buf = (void *)stack;
1702 stack += BP_GET_LSIZE(bp);
1703
1704 /* reset errnum to rawread() failure */
1705 errnum = 0;
1706 if (zio_read(bp, buf, stack) != 0) {
1707 return (0);
1708 }
1709 sahdrp = buf;
1710 } else {
1711 errnum = ERR_FSYS_CORRUPT;
1712 return (0);
1713 }
1714 }
1715 hdrsize = SA_HDR_SIZE(sahdrp);
1716 filemax = *(uint64_t *)((char *)sahdrp + hdrsize +
1717 SA_SIZE_OFFSET);
1718 } else {
1719 filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1720 }
1721 filepos = 0;
1722
1723 dnode_buf = NULL;
1724 return (1);
1725 }
1726
1727 /*
1728 * zfs_read reads in the data blocks pointed by the DNODE.
1729 *
1730 * Return:
1731 * len - the length successfully read in to the buffer
1732 * 0 - failure
1733 */
1734 int
1735 zfs_read(char *buf, int len)
1736 {
1737 char *stack;
1738 int blksz, length, movesize;
1739
1740 if (file_buf == NULL) {
1741 file_buf = stackbase;
1742 stackbase += SPA_MAXBLOCKSIZE;
1743 file_start = file_end = 0;
1744 }
1745 stack = stackbase;
1746
1747 /*
1748 * If offset is in memory, move it into the buffer provided and return.
1749 */
1750 if (filepos >= file_start && filepos+len <= file_end) {
1751 grub_memmove(buf, file_buf + filepos - file_start, len);
1752 filepos += len;
1753 return (len);
1754 }
1755
1756 blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1757
1758 /*
1759 * Entire Dnode is too big to fit into the space available. We
1760 * will need to read it in chunks. This could be optimized to
1761 * read in as large a chunk as there is space available, but for
1762 * now, this only reads in one data block at a time.
1763 */
1764 length = len;
1765 while (length) {
1766 /*
1767 * Find requested blkid and the offset within that block.
1768 */
1769 uint64_t blkid = filepos / blksz;
1770
1771 if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1772 return (0);
1773
1774 file_start = blkid * blksz;
1775 file_end = file_start + blksz;
1776
1777 movesize = MIN(length, file_end - filepos);
1778
1779 grub_memmove(buf, file_buf + filepos - file_start,
1780 movesize);
1781 buf += movesize;
1782 length -= movesize;
1783 filepos += movesize;
1784 }
1785
1786 return (len);
1787 }
1788
1789 /*
1790 * No-Op
1791 */
1792 int
1793 zfs_embed(int *start_sector, int needed_sectors)
1794 {
1795 return (1);
1796 }
1797
1798 #endif /* FSYS_ZFS */