Print this page
%B
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
+++ new/usr/src/grub/grub-0.97/stage2/fsys_zfs.c
1 1 /*
2 2 * GRUB -- GRand Unified Bootloader
3 3 * Copyright (C) 1999,2000,2001,2002,2003,2004 Free Software Foundation, Inc.
4 4 *
5 5 * This program is free software; you can redistribute it and/or modify
6 6 * it under the terms of the GNU General Public License as published by
7 7 * the Free Software Foundation; either version 2 of the License, or
8 8 * (at your option) any later version.
9 9 *
10 10 * This program is distributed in the hope that it will be useful,
11 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 13 * GNU General Public License for more details.
14 14 *
15 15 * You should have received a copy of the GNU General Public License
16 16 * along with this program; if not, write to the Free Software
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 18 */
19 19
20 20 /*
21 21 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
22 22 * Use is subject to license terms.
23 23 */
24 24
25 25 /*
26 26 * Copyright (c) 2012 by Delphix. All rights reserved.
27 + * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
27 28 */
28 29
29 30 /*
30 31 * The zfs plug-in routines for GRUB are:
31 32 *
32 33 * zfs_mount() - locates a valid uberblock of the root pool and reads
33 34 * in its MOS at the memory address MOS.
34 35 *
35 36 * zfs_open() - locates a plain file object by following the MOS
36 37 * and places its dnode at the memory address DNODE.
37 38 *
38 39 * zfs_read() - read in the data blocks pointed by the DNODE.
39 40 *
40 41 * ZFS_SCRATCH is used as a working area.
41 42 *
42 43 * (memory addr) MOS DNODE ZFS_SCRATCH
43 44 * | | |
44 45 * +-------V---------V----------V---------------+
45 46 * memory | | dnode | dnode | scratch |
46 47 * | | 512B | 512B | area |
47 48 * +--------------------------------------------+
48 49 */
49 50
50 51 #ifdef FSYS_ZFS
51 52
52 53 #include "shared.h"
53 54 #include "filesys.h"
54 55 #include "fsys_zfs.h"
55 56
56 57 /* cache for a file block of the currently zfs_open()-ed file */
57 58 static void *file_buf = NULL;
58 59 static uint64_t file_start = 0;
59 60 static uint64_t file_end = 0;
60 61
61 62 /* cache for a dnode block */
62 63 static dnode_phys_t *dnode_buf = NULL;
63 64 static dnode_phys_t *dnode_mdn = NULL;
64 65 static uint64_t dnode_start = 0;
65 66 static uint64_t dnode_end = 0;
66 67
↓ open down ↓ |
30 lines elided |
↑ open up ↑ |
67 68 static uint64_t pool_guid = 0;
68 69 static uberblock_t current_uberblock;
69 70 static char *stackbase;
70 71
71 72 decomp_entry_t decomp_table[ZIO_COMPRESS_FUNCTIONS] =
72 73 {
73 74 {"inherit", 0}, /* ZIO_COMPRESS_INHERIT */
74 75 {"on", lzjb_decompress}, /* ZIO_COMPRESS_ON */
75 76 {"off", 0}, /* ZIO_COMPRESS_OFF */
76 77 {"lzjb", lzjb_decompress}, /* ZIO_COMPRESS_LZJB */
77 - {"empty", 0} /* ZIO_COMPRESS_EMPTY */
78 + {"empty", 0}, /* ZIO_COMPRESS_EMPTY */
79 + {"gzip-1", 0}, /* ZIO_COMPRESS_GZIP_1 */
80 + {"gzip-2", 0}, /* ZIO_COMPRESS_GZIP_2 */
81 + {"gzip-3", 0}, /* ZIO_COMPRESS_GZIP_3 */
82 + {"gzip-4", 0}, /* ZIO_COMPRESS_GZIP_4 */
83 + {"gzip-5", 0}, /* ZIO_COMPRESS_GZIP_5 */
84 + {"gzip-6", 0}, /* ZIO_COMPRESS_GZIP_6 */
85 + {"gzip-7", 0}, /* ZIO_COMPRESS_GZIP_7 */
86 + {"gzip-8", 0}, /* ZIO_COMPRESS_GZIP_8 */
87 + {"gzip-9", 0}, /* ZIO_COMPRESS_GZIP_9 */
88 + {"zle", 0}, /* ZIO_COMPRESS_ZLE */
89 + {"lz4", lz4_decompress} /* ZIO_COMPRESS_LZ4 */
78 90 };
79 91
80 92 static int zio_read_data(blkptr_t *bp, void *buf, char *stack);
81 93
82 94 /*
83 95 * Our own version of bcmp().
84 96 */
85 97 static int
86 98 zfs_bcmp(const void *s1, const void *s2, size_t n)
87 99 {
88 100 const uchar_t *ps1 = s1;
89 101 const uchar_t *ps2 = s2;
90 102
91 103 if (s1 != s2 && n != 0) {
92 104 do {
93 105 if (*ps1++ != *ps2++)
94 106 return (1);
95 107 } while (--n != 0);
96 108 }
97 109
98 110 return (0);
99 111 }
100 112
101 113 /*
102 114 * Our own version of log2(). Same thing as highbit()-1.
103 115 */
104 116 static int
105 117 zfs_log2(uint64_t num)
106 118 {
107 119 int i = 0;
108 120
109 121 while (num > 1) {
110 122 i++;
111 123 num = num >> 1;
112 124 }
113 125
114 126 return (i);
115 127 }
116 128
117 129 /* Checksum Functions */
118 130 static void
119 131 zio_checksum_off(const void *buf, uint64_t size, zio_cksum_t *zcp)
120 132 {
121 133 ZIO_SET_CHECKSUM(zcp, 0, 0, 0, 0);
122 134 }
123 135
124 136 /* Checksum Table and Values */
125 137 zio_checksum_info_t zio_checksum_table[ZIO_CHECKSUM_FUNCTIONS] = {
126 138 {{NULL, NULL}, 0, 0, "inherit"},
127 139 {{NULL, NULL}, 0, 0, "on"},
128 140 {{zio_checksum_off, zio_checksum_off}, 0, 0, "off"},
129 141 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "label"},
130 142 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 1, "gang_header"},
131 143 {{NULL, NULL}, 0, 0, "zilog"},
132 144 {{fletcher_2_native, fletcher_2_byteswap}, 0, 0, "fletcher2"},
133 145 {{fletcher_4_native, fletcher_4_byteswap}, 1, 0, "fletcher4"},
134 146 {{zio_checksum_SHA256, zio_checksum_SHA256}, 1, 0, "SHA256"},
135 147 {{NULL, NULL}, 0, 0, "zilog2"},
136 148 };
137 149
138 150 /*
139 151 * zio_checksum_verify: Provides support for checksum verification.
140 152 *
141 153 * Fletcher2, Fletcher4, and SHA256 are supported.
142 154 *
143 155 * Return:
144 156 * -1 = Failure
145 157 * 0 = Success
146 158 */
147 159 static int
148 160 zio_checksum_verify(blkptr_t *bp, char *data, int size)
149 161 {
150 162 zio_cksum_t zc = bp->blk_cksum;
151 163 uint32_t checksum = BP_GET_CHECKSUM(bp);
152 164 int byteswap = BP_SHOULD_BYTESWAP(bp);
153 165 zio_eck_t *zec = (zio_eck_t *)(data + size) - 1;
154 166 zio_checksum_info_t *ci = &zio_checksum_table[checksum];
155 167 zio_cksum_t actual_cksum, expected_cksum;
156 168
157 169 /* byteswap is not supported */
158 170 if (byteswap)
159 171 return (-1);
160 172
161 173 if (checksum >= ZIO_CHECKSUM_FUNCTIONS || ci->ci_func[0] == NULL)
162 174 return (-1);
163 175
164 176 if (ci->ci_eck) {
165 177 expected_cksum = zec->zec_cksum;
166 178 zec->zec_cksum = zc;
167 179 ci->ci_func[0](data, size, &actual_cksum);
168 180 zec->zec_cksum = expected_cksum;
169 181 zc = expected_cksum;
170 182
171 183 } else {
172 184 ci->ci_func[byteswap](data, size, &actual_cksum);
173 185 }
174 186
175 187 if ((actual_cksum.zc_word[0] - zc.zc_word[0]) |
176 188 (actual_cksum.zc_word[1] - zc.zc_word[1]) |
177 189 (actual_cksum.zc_word[2] - zc.zc_word[2]) |
178 190 (actual_cksum.zc_word[3] - zc.zc_word[3]))
179 191 return (-1);
180 192
181 193 return (0);
182 194 }
183 195
184 196 /*
185 197 * vdev_label_start returns the physical disk offset (in bytes) of
186 198 * label "l".
187 199 */
188 200 static uint64_t
189 201 vdev_label_start(uint64_t psize, int l)
190 202 {
191 203 return (l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
192 204 0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
193 205 }
194 206
195 207 /*
196 208 * vdev_uberblock_compare takes two uberblock structures and returns an integer
197 209 * indicating the more recent of the two.
198 210 * Return Value = 1 if ub2 is more recent
199 211 * Return Value = -1 if ub1 is more recent
200 212 * The most recent uberblock is determined using its transaction number and
201 213 * timestamp. The uberblock with the highest transaction number is
202 214 * considered "newer". If the transaction numbers of the two blocks match, the
203 215 * timestamps are compared to determine the "newer" of the two.
204 216 */
205 217 static int
206 218 vdev_uberblock_compare(uberblock_t *ub1, uberblock_t *ub2)
207 219 {
208 220 if (ub1->ub_txg < ub2->ub_txg)
209 221 return (-1);
210 222 if (ub1->ub_txg > ub2->ub_txg)
211 223 return (1);
212 224
213 225 if (ub1->ub_timestamp < ub2->ub_timestamp)
214 226 return (-1);
215 227 if (ub1->ub_timestamp > ub2->ub_timestamp)
216 228 return (1);
217 229
218 230 return (0);
219 231 }
220 232
221 233 /*
222 234 * Three pieces of information are needed to verify an uberblock: the magic
223 235 * number, the version number, and the checksum.
224 236 *
225 237 * Return:
226 238 * 0 - Success
227 239 * -1 - Failure
228 240 */
229 241 static int
230 242 uberblock_verify(uberblock_t *uber, uint64_t ub_size, uint64_t offset)
231 243 {
232 244 blkptr_t bp;
233 245
234 246 BP_ZERO(&bp);
235 247 BP_SET_CHECKSUM(&bp, ZIO_CHECKSUM_LABEL);
236 248 BP_SET_BYTEORDER(&bp, ZFS_HOST_BYTEORDER);
237 249 ZIO_SET_CHECKSUM(&bp.blk_cksum, offset, 0, 0, 0);
238 250
239 251 if (zio_checksum_verify(&bp, (char *)uber, ub_size) != 0)
240 252 return (-1);
241 253
242 254 if (uber->ub_magic == UBERBLOCK_MAGIC &&
243 255 SPA_VERSION_IS_SUPPORTED(uber->ub_version))
244 256 return (0);
245 257
246 258 return (-1);
247 259 }
248 260
249 261 /*
250 262 * Find the best uberblock.
251 263 * Return:
252 264 * Success - Pointer to the best uberblock.
253 265 * Failure - NULL
254 266 */
255 267 static uberblock_t *
256 268 find_bestub(char *ub_array, uint64_t ashift, uint64_t sector)
257 269 {
258 270 uberblock_t *ubbest = NULL;
259 271 uberblock_t *ubnext;
260 272 uint64_t offset, ub_size;
261 273 int i;
262 274
263 275 ub_size = VDEV_UBERBLOCK_SIZE(ashift);
264 276
265 277 for (i = 0; i < VDEV_UBERBLOCK_COUNT(ashift); i++) {
266 278 ubnext = (uberblock_t *)ub_array;
267 279 ub_array += ub_size;
268 280 offset = (sector << SPA_MINBLOCKSHIFT) +
269 281 VDEV_UBERBLOCK_OFFSET(ashift, i);
270 282
271 283 if (uberblock_verify(ubnext, ub_size, offset) != 0)
272 284 continue;
273 285
274 286 if (ubbest == NULL ||
275 287 vdev_uberblock_compare(ubnext, ubbest) > 0)
276 288 ubbest = ubnext;
277 289 }
278 290
279 291 return (ubbest);
280 292 }
281 293
282 294 /*
283 295 * Read a block of data based on the gang block address dva,
284 296 * and put its data in buf.
285 297 *
286 298 * Return:
287 299 * 0 - success
288 300 * 1 - failure
289 301 */
290 302 static int
291 303 zio_read_gang(blkptr_t *bp, dva_t *dva, void *buf, char *stack)
292 304 {
293 305 zio_gbh_phys_t *zio_gb;
294 306 uint64_t offset, sector;
295 307 blkptr_t tmpbp;
296 308 int i;
297 309
298 310 zio_gb = (zio_gbh_phys_t *)stack;
299 311 stack += SPA_GANGBLOCKSIZE;
300 312 offset = DVA_GET_OFFSET(dva);
301 313 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
302 314
303 315 /* read in the gang block header */
304 316 if (devread(sector, 0, SPA_GANGBLOCKSIZE, (char *)zio_gb) == 0) {
305 317 grub_printf("failed to read in a gang block header\n");
306 318 return (1);
307 319 }
308 320
309 321 /* self checksuming the gang block header */
310 322 BP_ZERO(&tmpbp);
311 323 BP_SET_CHECKSUM(&tmpbp, ZIO_CHECKSUM_GANG_HEADER);
312 324 BP_SET_BYTEORDER(&tmpbp, ZFS_HOST_BYTEORDER);
313 325 ZIO_SET_CHECKSUM(&tmpbp.blk_cksum, DVA_GET_VDEV(dva),
314 326 DVA_GET_OFFSET(dva), bp->blk_birth, 0);
315 327 if (zio_checksum_verify(&tmpbp, (char *)zio_gb, SPA_GANGBLOCKSIZE)) {
316 328 grub_printf("failed to checksum a gang block header\n");
317 329 return (1);
318 330 }
319 331
320 332 for (i = 0; i < SPA_GBH_NBLKPTRS; i++) {
321 333 if (zio_gb->zg_blkptr[i].blk_birth == 0)
322 334 continue;
323 335
324 336 if (zio_read_data(&zio_gb->zg_blkptr[i], buf, stack))
325 337 return (1);
326 338 buf += BP_GET_PSIZE(&zio_gb->zg_blkptr[i]);
327 339 }
328 340
329 341 return (0);
330 342 }
331 343
332 344 /*
333 345 * Read in a block of raw data to buf.
334 346 *
335 347 * Return:
336 348 * 0 - success
337 349 * 1 - failure
338 350 */
339 351 static int
340 352 zio_read_data(blkptr_t *bp, void *buf, char *stack)
341 353 {
342 354 int i, psize;
343 355
344 356 psize = BP_GET_PSIZE(bp);
345 357
346 358 /* pick a good dva from the block pointer */
347 359 for (i = 0; i < SPA_DVAS_PER_BP; i++) {
348 360 uint64_t offset, sector;
349 361
350 362 if (bp->blk_dva[i].dva_word[0] == 0 &&
351 363 bp->blk_dva[i].dva_word[1] == 0)
352 364 continue;
353 365
354 366 if (DVA_GET_GANG(&bp->blk_dva[i])) {
355 367 if (zio_read_gang(bp, &bp->blk_dva[i], buf, stack) == 0)
356 368 return (0);
357 369 } else {
358 370 /* read in a data block */
359 371 offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
360 372 sector = DVA_OFFSET_TO_PHYS_SECTOR(offset);
361 373 if (devread(sector, 0, psize, buf) != 0)
362 374 return (0);
363 375 }
364 376 }
365 377
366 378 return (1);
367 379 }
368 380
369 381 /*
370 382 * Read in a block of data, verify its checksum, decompress if needed,
371 383 * and put the uncompressed data in buf.
372 384 *
373 385 * Return:
374 386 * 0 - success
375 387 * errnum - failure
376 388 */
377 389 static int
378 390 zio_read(blkptr_t *bp, void *buf, char *stack)
379 391 {
380 392 int lsize, psize, comp;
381 393 char *retbuf;
382 394
383 395 comp = BP_GET_COMPRESS(bp);
384 396 lsize = BP_GET_LSIZE(bp);
385 397 psize = BP_GET_PSIZE(bp);
386 398
387 399 if ((unsigned int)comp >= ZIO_COMPRESS_FUNCTIONS ||
388 400 (comp != ZIO_COMPRESS_OFF &&
389 401 decomp_table[comp].decomp_func == NULL)) {
390 402 grub_printf("compression algorithm not supported\n");
391 403 return (ERR_FSYS_CORRUPT);
392 404 }
393 405
394 406 if ((char *)buf < stack && ((char *)buf) + lsize > stack) {
395 407 grub_printf("not enough memory allocated\n");
396 408 return (ERR_WONT_FIT);
397 409 }
398 410
399 411 retbuf = buf;
400 412 if (comp != ZIO_COMPRESS_OFF) {
401 413 buf = stack;
402 414 stack += psize;
403 415 }
404 416
↓ open down ↓ |
317 lines elided |
↑ open up ↑ |
405 417 if (zio_read_data(bp, buf, stack) != 0) {
406 418 grub_printf("zio_read_data failed\n");
407 419 return (ERR_FSYS_CORRUPT);
408 420 }
409 421
410 422 if (zio_checksum_verify(bp, buf, psize) != 0) {
411 423 grub_printf("checksum verification failed\n");
412 424 return (ERR_FSYS_CORRUPT);
413 425 }
414 426
415 - if (comp != ZIO_COMPRESS_OFF)
416 - decomp_table[comp].decomp_func(buf, retbuf, psize, lsize);
427 + if (comp != ZIO_COMPRESS_OFF) {
428 + if (decomp_table[comp].decomp_func(buf, retbuf, psize,
429 + lsize) != 0) {
430 + grub_printf("zio_read decompression failed\n");
431 + return (ERR_FSYS_CORRUPT);
432 + }
433 + }
417 434
418 435 return (0);
419 436 }
420 437
421 438 /*
422 439 * Get the block from a block id.
423 440 * push the block onto the stack.
424 441 *
425 442 * Return:
426 443 * 0 - success
427 444 * errnum - failure
428 445 */
429 446 static int
430 447 dmu_read(dnode_phys_t *dn, uint64_t blkid, void *buf, char *stack)
431 448 {
432 449 int idx, level;
433 450 blkptr_t *bp_array = dn->dn_blkptr;
434 451 int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
435 452 blkptr_t *bp, *tmpbuf;
436 453
437 454 bp = (blkptr_t *)stack;
438 455 stack += sizeof (blkptr_t);
439 456
440 457 tmpbuf = (blkptr_t *)stack;
441 458 stack += 1<<dn->dn_indblkshift;
442 459
443 460 for (level = dn->dn_nlevels - 1; level >= 0; level--) {
444 461 idx = (blkid >> (epbs * level)) & ((1<<epbs)-1);
445 462 *bp = bp_array[idx];
446 463 if (level == 0)
447 464 tmpbuf = buf;
448 465 if (BP_IS_HOLE(bp)) {
449 466 grub_memset(buf, 0,
450 467 dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
451 468 break;
452 469 } else if (errnum = zio_read(bp, tmpbuf, stack)) {
453 470 return (errnum);
454 471 }
455 472
456 473 bp_array = tmpbuf;
457 474 }
458 475
459 476 return (0);
460 477 }
461 478
462 479 /*
463 480 * mzap_lookup: Looks up property described by "name" and returns the value
464 481 * in "value".
465 482 *
466 483 * Return:
467 484 * 0 - success
468 485 * errnum - failure
469 486 */
470 487 static int
471 488 mzap_lookup(mzap_phys_t *zapobj, int objsize, const char *name,
472 489 uint64_t *value)
473 490 {
474 491 int i, chunks;
475 492 mzap_ent_phys_t *mzap_ent = zapobj->mz_chunk;
476 493
477 494 chunks = objsize / MZAP_ENT_LEN - 1;
478 495 for (i = 0; i < chunks; i++) {
479 496 if (grub_strcmp(mzap_ent[i].mze_name, name) == 0) {
480 497 *value = mzap_ent[i].mze_value;
481 498 return (0);
482 499 }
483 500 }
484 501
485 502 return (ERR_FSYS_CORRUPT);
486 503 }
487 504
488 505 static uint64_t
489 506 zap_hash(uint64_t salt, const char *name)
490 507 {
491 508 static uint64_t table[256];
492 509 const uint8_t *cp;
493 510 uint8_t c;
494 511 uint64_t crc = salt;
495 512
496 513 if (table[128] == 0) {
497 514 uint64_t *ct;
498 515 int i, j;
499 516 for (i = 0; i < 256; i++) {
500 517 for (ct = table + i, *ct = i, j = 8; j > 0; j--)
501 518 *ct = (*ct >> 1) ^ (-(*ct & 1) &
502 519 ZFS_CRC64_POLY);
503 520 }
504 521 }
505 522
506 523 if (crc == 0 || table[128] != ZFS_CRC64_POLY) {
507 524 errnum = ERR_FSYS_CORRUPT;
508 525 return (0);
509 526 }
510 527
511 528 for (cp = (const uint8_t *)name; (c = *cp) != '\0'; cp++)
512 529 crc = (crc >> 8) ^ table[(crc ^ c) & 0xFF];
513 530
514 531 /*
515 532 * Only use 28 bits, since we need 4 bits in the cookie for the
516 533 * collision differentiator. We MUST use the high bits, since
517 534 * those are the ones that we first pay attention to when
518 535 * choosing the bucket.
519 536 */
520 537 crc &= ~((1ULL << (64 - 28)) - 1);
521 538
522 539 return (crc);
523 540 }
524 541
525 542 /*
526 543 * Only to be used on 8-bit arrays.
527 544 * array_len is actual len in bytes (not encoded le_value_length).
528 545 * buf is null-terminated.
529 546 */
530 547 static int
531 548 zap_leaf_array_equal(zap_leaf_phys_t *l, int blksft, int chunk,
532 549 int array_len, const char *buf)
533 550 {
534 551 int bseen = 0;
535 552
536 553 while (bseen < array_len) {
537 554 struct zap_leaf_array *la =
538 555 &ZAP_LEAF_CHUNK(l, blksft, chunk).l_array;
539 556 int toread = MIN(array_len - bseen, ZAP_LEAF_ARRAY_BYTES);
540 557
541 558 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
542 559 return (0);
543 560
544 561 if (zfs_bcmp(la->la_array, buf + bseen, toread) != 0)
545 562 break;
546 563 chunk = la->la_next;
547 564 bseen += toread;
548 565 }
549 566 return (bseen == array_len);
550 567 }
551 568
552 569 /*
553 570 * Given a zap_leaf_phys_t, walk thru the zap leaf chunks to get the
554 571 * value for the property "name".
555 572 *
556 573 * Return:
557 574 * 0 - success
558 575 * errnum - failure
559 576 */
560 577 static int
561 578 zap_leaf_lookup(zap_leaf_phys_t *l, int blksft, uint64_t h,
562 579 const char *name, uint64_t *value)
563 580 {
564 581 uint16_t chunk;
565 582 struct zap_leaf_entry *le;
566 583
567 584 /* Verify if this is a valid leaf block */
568 585 if (l->l_hdr.lh_block_type != ZBT_LEAF)
569 586 return (ERR_FSYS_CORRUPT);
570 587 if (l->l_hdr.lh_magic != ZAP_LEAF_MAGIC)
571 588 return (ERR_FSYS_CORRUPT);
572 589
573 590 for (chunk = l->l_hash[LEAF_HASH(blksft, h)];
574 591 chunk != CHAIN_END; chunk = le->le_next) {
575 592
576 593 if (chunk >= ZAP_LEAF_NUMCHUNKS(blksft))
577 594 return (ERR_FSYS_CORRUPT);
578 595
579 596 le = ZAP_LEAF_ENTRY(l, blksft, chunk);
580 597
581 598 /* Verify the chunk entry */
582 599 if (le->le_type != ZAP_CHUNK_ENTRY)
583 600 return (ERR_FSYS_CORRUPT);
584 601
585 602 if (le->le_hash != h)
586 603 continue;
587 604
588 605 if (zap_leaf_array_equal(l, blksft, le->le_name_chunk,
589 606 le->le_name_length, name)) {
590 607
591 608 struct zap_leaf_array *la;
592 609 uint8_t *ip;
593 610
594 611 if (le->le_int_size != 8 || le->le_value_length != 1)
595 612 return (ERR_FSYS_CORRUPT);
596 613
597 614 /* get the uint64_t property value */
598 615 la = &ZAP_LEAF_CHUNK(l, blksft,
599 616 le->le_value_chunk).l_array;
600 617 ip = la->la_array;
601 618
602 619 *value = (uint64_t)ip[0] << 56 | (uint64_t)ip[1] << 48 |
603 620 (uint64_t)ip[2] << 40 | (uint64_t)ip[3] << 32 |
604 621 (uint64_t)ip[4] << 24 | (uint64_t)ip[5] << 16 |
605 622 (uint64_t)ip[6] << 8 | (uint64_t)ip[7];
606 623
607 624 return (0);
608 625 }
609 626 }
610 627
611 628 return (ERR_FSYS_CORRUPT);
612 629 }
613 630
614 631 /*
615 632 * Fat ZAP lookup
616 633 *
617 634 * Return:
618 635 * 0 - success
619 636 * errnum - failure
620 637 */
621 638 static int
622 639 fzap_lookup(dnode_phys_t *zap_dnode, zap_phys_t *zap,
623 640 const char *name, uint64_t *value, char *stack)
624 641 {
625 642 zap_leaf_phys_t *l;
626 643 uint64_t hash, idx, blkid;
627 644 int blksft = zfs_log2(zap_dnode->dn_datablkszsec << DNODE_SHIFT);
628 645
629 646 /* Verify if this is a fat zap header block */
630 647 if (zap->zap_magic != (uint64_t)ZAP_MAGIC ||
631 648 zap->zap_flags != 0)
632 649 return (ERR_FSYS_CORRUPT);
633 650
634 651 hash = zap_hash(zap->zap_salt, name);
635 652 if (errnum)
636 653 return (errnum);
637 654
638 655 /* get block id from index */
639 656 if (zap->zap_ptrtbl.zt_numblks != 0) {
640 657 /* external pointer tables not supported */
641 658 return (ERR_FSYS_CORRUPT);
642 659 }
643 660 idx = ZAP_HASH_IDX(hash, zap->zap_ptrtbl.zt_shift);
644 661 blkid = ((uint64_t *)zap)[idx + (1<<(blksft-3-1))];
645 662
646 663 /* Get the leaf block */
647 664 l = (zap_leaf_phys_t *)stack;
648 665 stack += 1<<blksft;
649 666 if ((1<<blksft) < sizeof (zap_leaf_phys_t))
650 667 return (ERR_FSYS_CORRUPT);
651 668 if (errnum = dmu_read(zap_dnode, blkid, l, stack))
652 669 return (errnum);
653 670
654 671 return (zap_leaf_lookup(l, blksft, hash, name, value));
655 672 }
656 673
657 674 /*
658 675 * Read in the data of a zap object and find the value for a matching
659 676 * property name.
660 677 *
661 678 * Return:
662 679 * 0 - success
663 680 * errnum - failure
664 681 */
665 682 static int
666 683 zap_lookup(dnode_phys_t *zap_dnode, const char *name, uint64_t *val,
667 684 char *stack)
668 685 {
669 686 uint64_t block_type;
670 687 int size;
671 688 void *zapbuf;
672 689
673 690 /* Read in the first block of the zap object data. */
674 691 zapbuf = stack;
675 692 size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
676 693 stack += size;
677 694
678 695 if ((errnum = dmu_read(zap_dnode, 0, zapbuf, stack)) != 0)
679 696 return (errnum);
680 697
681 698 block_type = *((uint64_t *)zapbuf);
682 699
683 700 if (block_type == ZBT_MICRO) {
684 701 return (mzap_lookup(zapbuf, size, name, val));
685 702 } else if (block_type == ZBT_HEADER) {
686 703 /* this is a fat zap */
687 704 return (fzap_lookup(zap_dnode, zapbuf, name,
688 705 val, stack));
689 706 }
690 707
691 708 return (ERR_FSYS_CORRUPT);
692 709 }
693 710
694 711 typedef struct zap_attribute {
695 712 int za_integer_length;
696 713 uint64_t za_num_integers;
697 714 uint64_t za_first_integer;
698 715 char *za_name;
699 716 } zap_attribute_t;
700 717
701 718 typedef int (zap_cb_t)(zap_attribute_t *za, void *arg, char *stack);
702 719
703 720 static int
704 721 zap_iterate(dnode_phys_t *zap_dnode, zap_cb_t *cb, void *arg, char *stack)
705 722 {
706 723 uint32_t size = zap_dnode->dn_datablkszsec << SPA_MINBLOCKSHIFT;
707 724 zap_attribute_t za;
708 725 int i;
709 726 mzap_phys_t *mzp = (mzap_phys_t *)stack;
710 727 stack += size;
711 728
712 729 if ((errnum = dmu_read(zap_dnode, 0, mzp, stack)) != 0)
713 730 return (errnum);
714 731
715 732 /*
716 733 * Iteration over fatzap objects has not yet been implemented.
717 734 * If we encounter a pool in which there are more features for
718 735 * read than can fit inside a microzap (i.e., more than 2048
719 736 * features for read), we can add support for fatzap iteration.
720 737 * For now, fail.
721 738 */
722 739 if (mzp->mz_block_type != ZBT_MICRO) {
723 740 grub_printf("feature information stored in fatzap, pool "
724 741 "version not supported\n");
725 742 return (1);
726 743 }
727 744
728 745 za.za_integer_length = 8;
729 746 za.za_num_integers = 1;
730 747 for (i = 0; i < size / MZAP_ENT_LEN - 1; i++) {
731 748 mzap_ent_phys_t *mzep = &mzp->mz_chunk[i];
732 749 int err;
733 750
734 751 za.za_first_integer = mzep->mze_value;
735 752 za.za_name = mzep->mze_name;
736 753 err = cb(&za, arg, stack);
737 754 if (err != 0)
738 755 return (err);
739 756 }
740 757
741 758 return (0);
742 759 }
743 760
744 761 /*
745 762 * Get the dnode of an object number from the metadnode of an object set.
746 763 *
747 764 * Input
748 765 * mdn - metadnode to get the object dnode
749 766 * objnum - object number for the object dnode
750 767 * buf - data buffer that holds the returning dnode
751 768 * stack - scratch area
752 769 *
753 770 * Return:
754 771 * 0 - success
755 772 * errnum - failure
756 773 */
757 774 static int
758 775 dnode_get(dnode_phys_t *mdn, uint64_t objnum, uint8_t type, dnode_phys_t *buf,
759 776 char *stack)
760 777 {
761 778 uint64_t blkid, blksz; /* the block id this object dnode is in */
762 779 int epbs; /* shift of number of dnodes in a block */
763 780 int idx; /* index within a block */
764 781 dnode_phys_t *dnbuf;
765 782
766 783 blksz = mdn->dn_datablkszsec << SPA_MINBLOCKSHIFT;
767 784 epbs = zfs_log2(blksz) - DNODE_SHIFT;
768 785 blkid = objnum >> epbs;
769 786 idx = objnum & ((1<<epbs)-1);
770 787
771 788 if (dnode_buf != NULL && dnode_mdn == mdn &&
772 789 objnum >= dnode_start && objnum < dnode_end) {
773 790 grub_memmove(buf, &dnode_buf[idx], DNODE_SIZE);
774 791 VERIFY_DN_TYPE(buf, type);
775 792 return (0);
776 793 }
777 794
778 795 if (dnode_buf && blksz == 1<<DNODE_BLOCK_SHIFT) {
779 796 dnbuf = dnode_buf;
780 797 dnode_mdn = mdn;
781 798 dnode_start = blkid << epbs;
782 799 dnode_end = (blkid + 1) << epbs;
783 800 } else {
784 801 dnbuf = (dnode_phys_t *)stack;
785 802 stack += blksz;
786 803 }
787 804
788 805 if (errnum = dmu_read(mdn, blkid, (char *)dnbuf, stack))
789 806 return (errnum);
790 807
791 808 grub_memmove(buf, &dnbuf[idx], DNODE_SIZE);
792 809 VERIFY_DN_TYPE(buf, type);
793 810
794 811 return (0);
795 812 }
796 813
797 814 /*
798 815 * Check if this is a special file that resides at the top
799 816 * dataset of the pool. Currently this is the GRUB menu,
800 817 * boot signature and boot signature backup.
801 818 * str starts with '/'.
802 819 */
803 820 static int
804 821 is_top_dataset_file(char *str)
805 822 {
806 823 char *tptr;
807 824
808 825 if ((tptr = grub_strstr(str, "menu.lst")) &&
809 826 (tptr[8] == '\0' || tptr[8] == ' ') &&
810 827 *(tptr-1) == '/')
811 828 return (1);
812 829
813 830 if (grub_strncmp(str, BOOTSIGN_DIR"/",
814 831 grub_strlen(BOOTSIGN_DIR) + 1) == 0)
815 832 return (1);
816 833
817 834 if (grub_strcmp(str, BOOTSIGN_BACKUP) == 0)
818 835 return (1);
819 836
820 837 return (0);
821 838 }
822 839
823 840 static int
824 841 check_feature(zap_attribute_t *za, void *arg, char *stack)
825 842 {
826 843 const char **names = arg;
827 844 int i;
828 845
829 846 if (za->za_first_integer == 0)
830 847 return (0);
831 848
832 849 for (i = 0; names[i] != NULL; i++) {
833 850 if (grub_strcmp(za->za_name, names[i]) == 0) {
834 851 return (0);
835 852 }
836 853 }
837 854 grub_printf("missing feature for read '%s'\n", za->za_name);
838 855 return (ERR_NEWER_VERSION);
839 856 }
840 857
841 858 /*
842 859 * Get the file dnode for a given file name where mdn is the meta dnode
843 860 * for this ZFS object set. When found, place the file dnode in dn.
844 861 * The 'path' argument will be mangled.
845 862 *
846 863 * Return:
847 864 * 0 - success
848 865 * errnum - failure
849 866 */
850 867 static int
851 868 dnode_get_path(dnode_phys_t *mdn, char *path, dnode_phys_t *dn,
852 869 char *stack)
853 870 {
854 871 uint64_t objnum, version;
855 872 char *cname, ch;
856 873
857 874 if (errnum = dnode_get(mdn, MASTER_NODE_OBJ, DMU_OT_MASTER_NODE,
858 875 dn, stack))
859 876 return (errnum);
860 877
861 878 if (errnum = zap_lookup(dn, ZPL_VERSION_STR, &version, stack))
862 879 return (errnum);
863 880 if (version > ZPL_VERSION)
864 881 return (-1);
865 882
866 883 if (errnum = zap_lookup(dn, ZFS_ROOT_OBJ, &objnum, stack))
867 884 return (errnum);
868 885
869 886 if (errnum = dnode_get(mdn, objnum, DMU_OT_DIRECTORY_CONTENTS,
870 887 dn, stack))
871 888 return (errnum);
872 889
873 890 /* skip leading slashes */
874 891 while (*path == '/')
875 892 path++;
876 893
877 894 while (*path && !grub_isspace(*path)) {
878 895
879 896 /* get the next component name */
880 897 cname = path;
881 898 while (*path && !grub_isspace(*path) && *path != '/')
882 899 path++;
883 900 ch = *path;
884 901 *path = 0; /* ensure null termination */
885 902
886 903 if (errnum = zap_lookup(dn, cname, &objnum, stack))
887 904 return (errnum);
888 905
889 906 objnum = ZFS_DIRENT_OBJ(objnum);
890 907 if (errnum = dnode_get(mdn, objnum, 0, dn, stack))
891 908 return (errnum);
892 909
893 910 *path = ch;
894 911 while (*path == '/')
895 912 path++;
896 913 }
897 914
898 915 /* We found the dnode for this file. Verify if it is a plain file. */
899 916 VERIFY_DN_TYPE(dn, DMU_OT_PLAIN_FILE_CONTENTS);
900 917
901 918 return (0);
902 919 }
903 920
904 921 /*
905 922 * Get the default 'bootfs' property value from the rootpool.
906 923 *
907 924 * Return:
908 925 * 0 - success
909 926 * errnum -failure
910 927 */
911 928 static int
912 929 get_default_bootfsobj(dnode_phys_t *mosmdn, uint64_t *obj, char *stack)
913 930 {
914 931 uint64_t objnum = 0;
915 932 dnode_phys_t *dn = (dnode_phys_t *)stack;
916 933 stack += DNODE_SIZE;
917 934
918 935 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
919 936 DMU_OT_OBJECT_DIRECTORY, dn, stack))
920 937 return (errnum);
921 938
922 939 /*
923 940 * find the object number for 'pool_props', and get the dnode
924 941 * of the 'pool_props'.
925 942 */
926 943 if (zap_lookup(dn, DMU_POOL_PROPS, &objnum, stack))
927 944 return (ERR_FILESYSTEM_NOT_FOUND);
928 945
929 946 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_POOL_PROPS, dn, stack))
930 947 return (errnum);
931 948
932 949 if (zap_lookup(dn, ZPOOL_PROP_BOOTFS, &objnum, stack))
933 950 return (ERR_FILESYSTEM_NOT_FOUND);
934 951
935 952 if (!objnum)
936 953 return (ERR_FILESYSTEM_NOT_FOUND);
937 954
↓ open down ↓ |
511 lines elided |
↑ open up ↑ |
938 955 *obj = objnum;
939 956 return (0);
940 957 }
941 958
942 959 /*
943 960 * List of pool features that the grub implementation of ZFS supports for
944 961 * read. Note that features that are only required for write do not need
945 962 * to be listed here since grub opens pools in read-only mode.
946 963 */
947 964 static const char *spa_feature_names[] = {
965 + "org.illumos:lz4_compress",
948 966 NULL
949 967 };
950 968
951 969 /*
952 970 * Checks whether the MOS features that are active are supported by this
953 971 * (GRUB's) implementation of ZFS.
954 972 *
955 973 * Return:
956 974 * 0: Success.
957 975 * errnum: Failure.
958 976 */
959 977 static int
960 978 check_mos_features(dnode_phys_t *mosmdn, char *stack)
961 979 {
962 980 uint64_t objnum;
963 981 dnode_phys_t *dn;
964 982 uint8_t error = 0;
965 983
966 984 dn = (dnode_phys_t *)stack;
967 985 stack += DNODE_SIZE;
968 986
969 987 if ((errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
970 988 DMU_OT_OBJECT_DIRECTORY, dn, stack)) != 0)
971 989 return (errnum);
972 990
973 991 /*
974 992 * Find the object number for 'features_for_read' and retrieve its
975 993 * corresponding dnode. Note that we don't check features_for_write
976 994 * because GRUB is not opening the pool for write.
977 995 */
978 996 if ((errnum = zap_lookup(dn, DMU_POOL_FEATURES_FOR_READ, &objnum,
979 997 stack)) != 0)
980 998 return (errnum);
981 999
982 1000 if ((errnum = dnode_get(mosmdn, objnum, DMU_OTN_ZAP_METADATA,
983 1001 dn, stack)) != 0)
984 1002 return (errnum);
985 1003
986 1004 return (zap_iterate(dn, check_feature, spa_feature_names, stack));
987 1005 }
988 1006
989 1007 /*
990 1008 * Given a MOS metadnode, get the metadnode of a given filesystem name (fsname),
991 1009 * e.g. pool/rootfs, or a given object number (obj), e.g. the object number
992 1010 * of pool/rootfs.
993 1011 *
994 1012 * If no fsname and no obj are given, return the DSL_DIR metadnode.
995 1013 * If fsname is given, return its metadnode and its matching object number.
996 1014 * If only obj is given, return the metadnode for this object number.
997 1015 *
998 1016 * Return:
999 1017 * 0 - success
1000 1018 * errnum - failure
1001 1019 */
1002 1020 static int
1003 1021 get_objset_mdn(dnode_phys_t *mosmdn, char *fsname, uint64_t *obj,
1004 1022 dnode_phys_t *mdn, char *stack)
1005 1023 {
1006 1024 uint64_t objnum, headobj;
1007 1025 char *cname, ch;
1008 1026 blkptr_t *bp;
1009 1027 objset_phys_t *osp;
1010 1028 int issnapshot = 0;
1011 1029 char *snapname;
1012 1030
1013 1031 if (fsname == NULL && obj) {
1014 1032 headobj = *obj;
1015 1033 goto skip;
1016 1034 }
1017 1035
1018 1036 if (errnum = dnode_get(mosmdn, DMU_POOL_DIRECTORY_OBJECT,
1019 1037 DMU_OT_OBJECT_DIRECTORY, mdn, stack))
1020 1038 return (errnum);
1021 1039
1022 1040 if (errnum = zap_lookup(mdn, DMU_POOL_ROOT_DATASET, &objnum,
1023 1041 stack))
1024 1042 return (errnum);
1025 1043
1026 1044 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR, mdn, stack))
1027 1045 return (errnum);
1028 1046
1029 1047 if (fsname == NULL) {
1030 1048 headobj =
1031 1049 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1032 1050 goto skip;
1033 1051 }
1034 1052
1035 1053 /* take out the pool name */
1036 1054 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1037 1055 fsname++;
1038 1056
1039 1057 while (*fsname && !grub_isspace(*fsname)) {
1040 1058 uint64_t childobj;
1041 1059
1042 1060 while (*fsname == '/')
1043 1061 fsname++;
1044 1062
1045 1063 cname = fsname;
1046 1064 while (*fsname && !grub_isspace(*fsname) && *fsname != '/')
1047 1065 fsname++;
1048 1066 ch = *fsname;
1049 1067 *fsname = 0;
1050 1068
1051 1069 snapname = cname;
1052 1070 while (*snapname && !grub_isspace(*snapname) && *snapname !=
1053 1071 '@')
1054 1072 snapname++;
1055 1073 if (*snapname == '@') {
1056 1074 issnapshot = 1;
1057 1075 *snapname = 0;
1058 1076 }
1059 1077 childobj =
1060 1078 ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_child_dir_zapobj;
1061 1079 if (errnum = dnode_get(mosmdn, childobj,
1062 1080 DMU_OT_DSL_DIR_CHILD_MAP, mdn, stack))
1063 1081 return (errnum);
1064 1082
1065 1083 if (zap_lookup(mdn, cname, &objnum, stack))
1066 1084 return (ERR_FILESYSTEM_NOT_FOUND);
1067 1085
1068 1086 if (errnum = dnode_get(mosmdn, objnum, DMU_OT_DSL_DIR,
1069 1087 mdn, stack))
1070 1088 return (errnum);
1071 1089
1072 1090 *fsname = ch;
1073 1091 if (issnapshot)
1074 1092 *snapname = '@';
1075 1093 }
1076 1094 headobj = ((dsl_dir_phys_t *)DN_BONUS(mdn))->dd_head_dataset_obj;
1077 1095 if (obj)
1078 1096 *obj = headobj;
1079 1097
1080 1098 skip:
1081 1099 if (errnum = dnode_get(mosmdn, headobj, DMU_OT_DSL_DATASET, mdn, stack))
1082 1100 return (errnum);
1083 1101 if (issnapshot) {
1084 1102 uint64_t snapobj;
1085 1103
1086 1104 snapobj = ((dsl_dataset_phys_t *)DN_BONUS(mdn))->
1087 1105 ds_snapnames_zapobj;
1088 1106
1089 1107 if (errnum = dnode_get(mosmdn, snapobj,
1090 1108 DMU_OT_DSL_DS_SNAP_MAP, mdn, stack))
1091 1109 return (errnum);
1092 1110 if (zap_lookup(mdn, snapname + 1, &headobj, stack))
1093 1111 return (ERR_FILESYSTEM_NOT_FOUND);
1094 1112 if (errnum = dnode_get(mosmdn, headobj,
1095 1113 DMU_OT_DSL_DATASET, mdn, stack))
1096 1114 return (errnum);
1097 1115 if (obj)
1098 1116 *obj = headobj;
1099 1117 }
1100 1118
1101 1119 bp = &((dsl_dataset_phys_t *)DN_BONUS(mdn))->ds_bp;
1102 1120 osp = (objset_phys_t *)stack;
1103 1121 stack += sizeof (objset_phys_t);
1104 1122 if (errnum = zio_read(bp, osp, stack))
1105 1123 return (errnum);
1106 1124
1107 1125 grub_memmove((char *)mdn, (char *)&osp->os_meta_dnode, DNODE_SIZE);
1108 1126
1109 1127 return (0);
1110 1128 }
1111 1129
1112 1130 /*
1113 1131 * For a given XDR packed nvlist, verify the first 4 bytes and move on.
1114 1132 *
1115 1133 * An XDR packed nvlist is encoded as (comments from nvs_xdr_create) :
1116 1134 *
1117 1135 * encoding method/host endian (4 bytes)
1118 1136 * nvl_version (4 bytes)
1119 1137 * nvl_nvflag (4 bytes)
1120 1138 * encoded nvpairs:
1121 1139 * encoded size of the nvpair (4 bytes)
1122 1140 * decoded size of the nvpair (4 bytes)
1123 1141 * name string size (4 bytes)
1124 1142 * name string data (sizeof(NV_ALIGN4(string))
1125 1143 * data type (4 bytes)
1126 1144 * # of elements in the nvpair (4 bytes)
1127 1145 * data
1128 1146 * 2 zero's for the last nvpair
1129 1147 * (end of the entire list) (8 bytes)
1130 1148 *
1131 1149 * Return:
1132 1150 * 0 - success
1133 1151 * 1 - failure
1134 1152 */
1135 1153 static int
1136 1154 nvlist_unpack(char *nvlist, char **out)
1137 1155 {
1138 1156 /* Verify if the 1st and 2nd byte in the nvlist are valid. */
1139 1157 if (nvlist[0] != NV_ENCODE_XDR || nvlist[1] != HOST_ENDIAN)
1140 1158 return (1);
1141 1159
1142 1160 *out = nvlist + 4;
1143 1161 return (0);
1144 1162 }
1145 1163
1146 1164 static char *
1147 1165 nvlist_array(char *nvlist, int index)
1148 1166 {
1149 1167 int i, encode_size;
1150 1168
1151 1169 for (i = 0; i < index; i++) {
1152 1170 /* skip the header, nvl_version, and nvl_nvflag */
1153 1171 nvlist = nvlist + 4 * 2;
1154 1172
1155 1173 while (encode_size = BSWAP_32(*(uint32_t *)nvlist))
1156 1174 nvlist += encode_size; /* goto the next nvpair */
1157 1175
1158 1176 nvlist = nvlist + 4 * 2; /* skip the ending 2 zeros - 8 bytes */
1159 1177 }
1160 1178
1161 1179 return (nvlist);
1162 1180 }
1163 1181
1164 1182 /*
1165 1183 * The nvlist_next_nvpair() function returns a handle to the next nvpair in the
1166 1184 * list following nvpair. If nvpair is NULL, the first pair is returned. If
1167 1185 * nvpair is the last pair in the nvlist, NULL is returned.
1168 1186 */
1169 1187 static char *
1170 1188 nvlist_next_nvpair(char *nvl, char *nvpair)
1171 1189 {
1172 1190 char *cur, *prev;
1173 1191 int encode_size;
1174 1192
1175 1193 if (nvl == NULL)
1176 1194 return (NULL);
1177 1195
1178 1196 if (nvpair == NULL) {
1179 1197 /* skip over nvl_version and nvl_nvflag */
1180 1198 nvpair = nvl + 4 * 2;
1181 1199 } else {
1182 1200 /* skip to the next nvpair */
1183 1201 encode_size = BSWAP_32(*(uint32_t *)nvpair);
1184 1202 nvpair += encode_size;
1185 1203 }
1186 1204
1187 1205 /* 8 bytes of 0 marks the end of the list */
1188 1206 if (*(uint64_t *)nvpair == 0)
1189 1207 return (NULL);
1190 1208
1191 1209 return (nvpair);
1192 1210 }
1193 1211
1194 1212 /*
1195 1213 * This function returns 0 on success and 1 on failure. On success, a string
1196 1214 * containing the name of nvpair is saved in buf.
1197 1215 */
1198 1216 static int
1199 1217 nvpair_name(char *nvp, char *buf, int buflen)
1200 1218 {
1201 1219 int len;
1202 1220
1203 1221 /* skip over encode/decode size */
1204 1222 nvp += 4 * 2;
1205 1223
1206 1224 len = BSWAP_32(*(uint32_t *)nvp);
1207 1225 if (buflen < len + 1)
1208 1226 return (1);
1209 1227
1210 1228 grub_memmove(buf, nvp + 4, len);
1211 1229 buf[len] = '\0';
1212 1230
1213 1231 return (0);
1214 1232 }
1215 1233
1216 1234 /*
1217 1235 * This function retrieves the value of the nvpair in the form of enumerated
1218 1236 * type data_type_t. This is used to determine the appropriate type to pass to
1219 1237 * nvpair_value().
1220 1238 */
1221 1239 static int
1222 1240 nvpair_type(char *nvp)
1223 1241 {
1224 1242 int name_len, type;
1225 1243
1226 1244 /* skip over encode/decode size */
1227 1245 nvp += 4 * 2;
1228 1246
1229 1247 /* skip over name_len */
1230 1248 name_len = BSWAP_32(*(uint32_t *)nvp);
1231 1249 nvp += 4;
1232 1250
1233 1251 /* skip over name */
1234 1252 nvp = nvp + ((name_len + 3) & ~3); /* align */
1235 1253
1236 1254 type = BSWAP_32(*(uint32_t *)nvp);
1237 1255
1238 1256 return (type);
1239 1257 }
1240 1258
1241 1259 static int
1242 1260 nvpair_value(char *nvp, void *val, int valtype, int *nelmp)
1243 1261 {
1244 1262 int name_len, type, slen;
1245 1263 char *strval = val;
1246 1264 uint64_t *intval = val;
1247 1265
1248 1266 /* skip over encode/decode size */
1249 1267 nvp += 4 * 2;
1250 1268
1251 1269 /* skip over name_len */
1252 1270 name_len = BSWAP_32(*(uint32_t *)nvp);
1253 1271 nvp += 4;
1254 1272
1255 1273 /* skip over name */
1256 1274 nvp = nvp + ((name_len + 3) & ~3); /* align */
1257 1275
1258 1276 /* skip over type */
1259 1277 type = BSWAP_32(*(uint32_t *)nvp);
1260 1278 nvp += 4;
1261 1279
1262 1280 if (type == valtype) {
1263 1281 int nelm;
1264 1282
1265 1283 nelm = BSWAP_32(*(uint32_t *)nvp);
1266 1284 if (valtype != DATA_TYPE_BOOLEAN && nelm < 1)
1267 1285 return (1);
1268 1286 nvp += 4;
1269 1287
1270 1288 switch (valtype) {
1271 1289 case DATA_TYPE_BOOLEAN:
1272 1290 return (0);
1273 1291
1274 1292 case DATA_TYPE_STRING:
1275 1293 slen = BSWAP_32(*(uint32_t *)nvp);
1276 1294 nvp += 4;
1277 1295 grub_memmove(strval, nvp, slen);
1278 1296 strval[slen] = '\0';
1279 1297 return (0);
1280 1298
1281 1299 case DATA_TYPE_UINT64:
1282 1300 *intval = BSWAP_64(*(uint64_t *)nvp);
1283 1301 return (0);
1284 1302
1285 1303 case DATA_TYPE_NVLIST:
1286 1304 *(void **)val = (void *)nvp;
1287 1305 return (0);
1288 1306
1289 1307 case DATA_TYPE_NVLIST_ARRAY:
1290 1308 *(void **)val = (void *)nvp;
1291 1309 if (nelmp)
1292 1310 *nelmp = nelm;
1293 1311 return (0);
1294 1312 }
1295 1313 }
1296 1314
1297 1315 return (1);
1298 1316 }
1299 1317
1300 1318 static int
1301 1319 nvlist_lookup_value(char *nvlist, char *name, void *val, int valtype,
1302 1320 int *nelmp)
1303 1321 {
1304 1322 char *nvpair;
1305 1323
1306 1324 for (nvpair = nvlist_next_nvpair(nvlist, NULL);
1307 1325 nvpair != NULL;
1308 1326 nvpair = nvlist_next_nvpair(nvlist, nvpair)) {
1309 1327 int name_len = BSWAP_32(*(uint32_t *)(nvpair + 4 * 2));
1310 1328 char *nvp_name = nvpair + 4 * 3;
1311 1329
1312 1330 if ((grub_strncmp(nvp_name, name, name_len) == 0) &&
1313 1331 nvpair_type(nvpair) == valtype) {
1314 1332 return (nvpair_value(nvpair, val, valtype, nelmp));
1315 1333 }
1316 1334 }
1317 1335 return (1);
1318 1336 }
1319 1337
1320 1338 /*
1321 1339 * Check if this vdev is online and is in a good state.
1322 1340 */
1323 1341 static int
1324 1342 vdev_validate(char *nv)
1325 1343 {
1326 1344 uint64_t ival;
1327 1345
1328 1346 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_OFFLINE, &ival,
1329 1347 DATA_TYPE_UINT64, NULL) == 0 ||
1330 1348 nvlist_lookup_value(nv, ZPOOL_CONFIG_FAULTED, &ival,
1331 1349 DATA_TYPE_UINT64, NULL) == 0 ||
1332 1350 nvlist_lookup_value(nv, ZPOOL_CONFIG_REMOVED, &ival,
1333 1351 DATA_TYPE_UINT64, NULL) == 0)
1334 1352 return (ERR_DEV_VALUES);
1335 1353
1336 1354 return (0);
1337 1355 }
1338 1356
1339 1357 /*
1340 1358 * Get a valid vdev pathname/devid from the boot device.
1341 1359 * The caller should already allocate MAXPATHLEN memory for bootpath and devid.
1342 1360 */
1343 1361 static int
1344 1362 vdev_get_bootpath(char *nv, uint64_t inguid, char *devid, char *bootpath,
1345 1363 int is_spare)
1346 1364 {
1347 1365 char type[16];
1348 1366
1349 1367 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_TYPE, &type, DATA_TYPE_STRING,
1350 1368 NULL))
1351 1369 return (ERR_FSYS_CORRUPT);
1352 1370
1353 1371 if (grub_strcmp(type, VDEV_TYPE_DISK) == 0) {
1354 1372 uint64_t guid;
1355 1373
1356 1374 if (vdev_validate(nv) != 0)
1357 1375 return (ERR_NO_BOOTPATH);
1358 1376
1359 1377 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_GUID,
1360 1378 &guid, DATA_TYPE_UINT64, NULL) != 0)
1361 1379 return (ERR_NO_BOOTPATH);
1362 1380
1363 1381 if (guid != inguid)
1364 1382 return (ERR_NO_BOOTPATH);
1365 1383
1366 1384 /* for a spare vdev, pick the disk labeled with "is_spare" */
1367 1385 if (is_spare) {
1368 1386 uint64_t spare = 0;
1369 1387 (void) nvlist_lookup_value(nv, ZPOOL_CONFIG_IS_SPARE,
1370 1388 &spare, DATA_TYPE_UINT64, NULL);
1371 1389 if (!spare)
1372 1390 return (ERR_NO_BOOTPATH);
1373 1391 }
1374 1392
1375 1393 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_PHYS_PATH,
1376 1394 bootpath, DATA_TYPE_STRING, NULL) != 0)
1377 1395 bootpath[0] = '\0';
1378 1396
1379 1397 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_DEVID,
1380 1398 devid, DATA_TYPE_STRING, NULL) != 0)
1381 1399 devid[0] = '\0';
1382 1400
1383 1401 if (grub_strlen(bootpath) >= MAXPATHLEN ||
1384 1402 grub_strlen(devid) >= MAXPATHLEN)
1385 1403 return (ERR_WONT_FIT);
1386 1404
1387 1405 return (0);
1388 1406
1389 1407 } else if (grub_strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
1390 1408 grub_strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
1391 1409 (is_spare = (grub_strcmp(type, VDEV_TYPE_SPARE) == 0))) {
1392 1410 int nelm, i;
1393 1411 char *child;
1394 1412
1395 1413 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_CHILDREN, &child,
1396 1414 DATA_TYPE_NVLIST_ARRAY, &nelm))
1397 1415 return (ERR_FSYS_CORRUPT);
1398 1416
1399 1417 for (i = 0; i < nelm; i++) {
1400 1418 char *child_i;
1401 1419
1402 1420 child_i = nvlist_array(child, i);
1403 1421 if (vdev_get_bootpath(child_i, inguid, devid,
1404 1422 bootpath, is_spare) == 0)
1405 1423 return (0);
1406 1424 }
1407 1425 }
1408 1426
1409 1427 return (ERR_NO_BOOTPATH);
1410 1428 }
1411 1429
1412 1430 /*
1413 1431 * Check the disk label information and retrieve needed vdev name-value pairs.
1414 1432 *
1415 1433 * Return:
1416 1434 * 0 - success
1417 1435 * ERR_* - failure
1418 1436 */
1419 1437 static int
1420 1438 check_pool_label(uint64_t sector, char *stack, char *outdevid,
1421 1439 char *outpath, uint64_t *outguid, uint64_t *outashift, uint64_t *outversion)
1422 1440 {
1423 1441 vdev_phys_t *vdev;
1424 1442 uint64_t pool_state, txg = 0;
1425 1443 char *nvlist, *nv, *features;
1426 1444 uint64_t diskguid;
1427 1445
1428 1446 sector += (VDEV_SKIP_SIZE >> SPA_MINBLOCKSHIFT);
1429 1447
1430 1448 /* Read in the vdev name-value pair list (112K). */
1431 1449 if (devread(sector, 0, VDEV_PHYS_SIZE, stack) == 0)
1432 1450 return (ERR_READ);
1433 1451
1434 1452 vdev = (vdev_phys_t *)stack;
1435 1453 stack += sizeof (vdev_phys_t);
1436 1454
1437 1455 if (nvlist_unpack(vdev->vp_nvlist, &nvlist))
1438 1456 return (ERR_FSYS_CORRUPT);
1439 1457
1440 1458 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_STATE, &pool_state,
1441 1459 DATA_TYPE_UINT64, NULL))
1442 1460 return (ERR_FSYS_CORRUPT);
1443 1461
1444 1462 if (pool_state == POOL_STATE_DESTROYED)
1445 1463 return (ERR_FILESYSTEM_NOT_FOUND);
1446 1464
1447 1465 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_NAME,
1448 1466 current_rootpool, DATA_TYPE_STRING, NULL))
1449 1467 return (ERR_FSYS_CORRUPT);
1450 1468
1451 1469 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_TXG, &txg,
1452 1470 DATA_TYPE_UINT64, NULL))
1453 1471 return (ERR_FSYS_CORRUPT);
1454 1472
1455 1473 /* not an active device */
1456 1474 if (txg == 0)
1457 1475 return (ERR_NO_BOOTPATH);
1458 1476
1459 1477 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VERSION, outversion,
1460 1478 DATA_TYPE_UINT64, NULL))
1461 1479 return (ERR_FSYS_CORRUPT);
1462 1480 if (!SPA_VERSION_IS_SUPPORTED(*outversion))
1463 1481 return (ERR_NEWER_VERSION);
1464 1482 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_VDEV_TREE, &nv,
1465 1483 DATA_TYPE_NVLIST, NULL))
1466 1484 return (ERR_FSYS_CORRUPT);
1467 1485 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_GUID, &diskguid,
1468 1486 DATA_TYPE_UINT64, NULL))
1469 1487 return (ERR_FSYS_CORRUPT);
1470 1488 if (nvlist_lookup_value(nv, ZPOOL_CONFIG_ASHIFT, outashift,
1471 1489 DATA_TYPE_UINT64, NULL) != 0)
1472 1490 return (ERR_FSYS_CORRUPT);
1473 1491 if (vdev_get_bootpath(nv, diskguid, outdevid, outpath, 0))
1474 1492 return (ERR_NO_BOOTPATH);
1475 1493 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_POOL_GUID, outguid,
1476 1494 DATA_TYPE_UINT64, NULL))
1477 1495 return (ERR_FSYS_CORRUPT);
1478 1496
1479 1497 if (nvlist_lookup_value(nvlist, ZPOOL_CONFIG_FEATURES_FOR_READ,
1480 1498 &features, DATA_TYPE_NVLIST, NULL) == 0) {
1481 1499 char *nvp;
1482 1500 char *name = stack;
1483 1501 stack += MAXNAMELEN;
1484 1502
1485 1503 for (nvp = nvlist_next_nvpair(features, NULL);
1486 1504 nvp != NULL;
1487 1505 nvp = nvlist_next_nvpair(features, nvp)) {
1488 1506 zap_attribute_t za;
1489 1507
1490 1508 if (nvpair_name(nvp, name, MAXNAMELEN) != 0)
1491 1509 return (ERR_FSYS_CORRUPT);
1492 1510
1493 1511 za.za_integer_length = 8;
1494 1512 za.za_num_integers = 1;
1495 1513 za.za_first_integer = 1;
1496 1514 za.za_name = name;
1497 1515 if (check_feature(&za, spa_feature_names, stack) != 0)
1498 1516 return (ERR_NEWER_VERSION);
1499 1517 }
1500 1518 }
1501 1519
1502 1520 return (0);
1503 1521 }
1504 1522
1505 1523 /*
1506 1524 * zfs_mount() locates a valid uberblock of the root pool and read in its MOS
1507 1525 * to the memory address MOS.
1508 1526 *
1509 1527 * Return:
1510 1528 * 1 - success
1511 1529 * 0 - failure
1512 1530 */
1513 1531 int
1514 1532 zfs_mount(void)
1515 1533 {
1516 1534 char *stack, *ub_array;
1517 1535 int label = 0;
1518 1536 uberblock_t *ubbest;
1519 1537 objset_phys_t *osp;
1520 1538 char tmp_bootpath[MAXNAMELEN];
1521 1539 char tmp_devid[MAXNAMELEN];
1522 1540 uint64_t tmp_guid, ashift, version;
1523 1541 uint64_t adjpl = (uint64_t)part_length << SPA_MINBLOCKSHIFT;
1524 1542 int err = errnum; /* preserve previous errnum state */
1525 1543
1526 1544 /* if it's our first time here, zero the best uberblock out */
1527 1545 if (best_drive == 0 && best_part == 0 && find_best_root) {
1528 1546 grub_memset(¤t_uberblock, 0, sizeof (uberblock_t));
1529 1547 pool_guid = 0;
1530 1548 }
1531 1549
1532 1550 stackbase = ZFS_SCRATCH;
1533 1551 stack = stackbase;
1534 1552 ub_array = stack;
1535 1553 stack += VDEV_UBERBLOCK_RING;
1536 1554
1537 1555 osp = (objset_phys_t *)stack;
1538 1556 stack += sizeof (objset_phys_t);
1539 1557 adjpl = P2ALIGN(adjpl, (uint64_t)sizeof (vdev_label_t));
1540 1558
1541 1559 for (label = 0; label < VDEV_LABELS; label++) {
1542 1560
1543 1561 /*
1544 1562 * some eltorito stacks don't give us a size and
1545 1563 * we end up setting the size to MAXUINT, further
1546 1564 * some of these devices stop working once a single
1547 1565 * read past the end has been issued. Checking
1548 1566 * for a maximum part_length and skipping the backup
1549 1567 * labels at the end of the slice/partition/device
1550 1568 * avoids breaking down on such devices.
1551 1569 */
1552 1570 if (part_length == MAXUINT && label == 2)
1553 1571 break;
1554 1572
1555 1573 uint64_t sector = vdev_label_start(adjpl,
1556 1574 label) >> SPA_MINBLOCKSHIFT;
1557 1575
1558 1576 /* Read in the uberblock ring (128K). */
1559 1577 if (devread(sector +
1560 1578 ((VDEV_SKIP_SIZE + VDEV_PHYS_SIZE) >> SPA_MINBLOCKSHIFT),
1561 1579 0, VDEV_UBERBLOCK_RING, ub_array) == 0)
1562 1580 continue;
1563 1581
1564 1582 if (check_pool_label(sector, stack, tmp_devid,
1565 1583 tmp_bootpath, &tmp_guid, &ashift, &version))
1566 1584 continue;
1567 1585
1568 1586 if (pool_guid == 0)
1569 1587 pool_guid = tmp_guid;
1570 1588
1571 1589 if ((ubbest = find_bestub(ub_array, ashift, sector)) == NULL ||
1572 1590 zio_read(&ubbest->ub_rootbp, osp, stack) != 0)
1573 1591 continue;
1574 1592
1575 1593 VERIFY_OS_TYPE(osp, DMU_OST_META);
1576 1594
1577 1595 if (version >= SPA_VERSION_FEATURES &&
1578 1596 check_mos_features(&osp->os_meta_dnode, stack) != 0)
1579 1597 continue;
1580 1598
1581 1599 if (find_best_root && ((pool_guid != tmp_guid) ||
1582 1600 vdev_uberblock_compare(ubbest, &(current_uberblock)) <= 0))
1583 1601 continue;
1584 1602
1585 1603 /* Got the MOS. Save it at the memory addr MOS. */
1586 1604 grub_memmove(MOS, &osp->os_meta_dnode, DNODE_SIZE);
1587 1605 grub_memmove(¤t_uberblock, ubbest, sizeof (uberblock_t));
1588 1606 grub_memmove(current_bootpath, tmp_bootpath, MAXNAMELEN);
1589 1607 grub_memmove(current_devid, tmp_devid, grub_strlen(tmp_devid));
1590 1608 is_zfs_mount = 1;
1591 1609 return (1);
1592 1610 }
1593 1611
1594 1612 /*
1595 1613 * While some fs impls. (tftp) rely on setting and keeping
1596 1614 * global errnums set, others won't reset it and will break
1597 1615 * when issuing rawreads. The goal here is to simply not
1598 1616 * have zfs mount attempts impact the previous state.
1599 1617 */
1600 1618 errnum = err;
1601 1619 return (0);
1602 1620 }
1603 1621
1604 1622 /*
1605 1623 * zfs_open() locates a file in the rootpool by following the
1606 1624 * MOS and places the dnode of the file in the memory address DNODE.
1607 1625 *
1608 1626 * Return:
1609 1627 * 1 - success
1610 1628 * 0 - failure
1611 1629 */
1612 1630 int
1613 1631 zfs_open(char *filename)
1614 1632 {
1615 1633 char *stack;
1616 1634 dnode_phys_t *mdn;
1617 1635
1618 1636 file_buf = NULL;
1619 1637 stackbase = ZFS_SCRATCH;
1620 1638 stack = stackbase;
1621 1639
1622 1640 mdn = (dnode_phys_t *)stack;
1623 1641 stack += sizeof (dnode_phys_t);
1624 1642
1625 1643 dnode_mdn = NULL;
1626 1644 dnode_buf = (dnode_phys_t *)stack;
1627 1645 stack += 1<<DNODE_BLOCK_SHIFT;
1628 1646
1629 1647 /*
1630 1648 * menu.lst is placed at the root pool filesystem level,
1631 1649 * do not goto 'current_bootfs'.
1632 1650 */
1633 1651 if (is_top_dataset_file(filename)) {
1634 1652 if (errnum = get_objset_mdn(MOS, NULL, NULL, mdn, stack))
1635 1653 return (0);
1636 1654
1637 1655 current_bootfs_obj = 0;
1638 1656 } else {
1639 1657 if (current_bootfs[0] == '\0') {
1640 1658 /* Get the default root filesystem object number */
1641 1659 if (errnum = get_default_bootfsobj(MOS,
1642 1660 ¤t_bootfs_obj, stack))
1643 1661 return (0);
1644 1662
1645 1663 if (errnum = get_objset_mdn(MOS, NULL,
1646 1664 ¤t_bootfs_obj, mdn, stack))
1647 1665 return (0);
1648 1666 } else {
1649 1667 if (errnum = get_objset_mdn(MOS, current_bootfs,
1650 1668 ¤t_bootfs_obj, mdn, stack)) {
1651 1669 grub_memset(current_bootfs, 0, MAXNAMELEN);
1652 1670 return (0);
1653 1671 }
1654 1672 }
1655 1673 }
1656 1674
1657 1675 if (dnode_get_path(mdn, filename, DNODE, stack)) {
1658 1676 errnum = ERR_FILE_NOT_FOUND;
1659 1677 return (0);
1660 1678 }
1661 1679
1662 1680 /* get the file size and set the file position to 0 */
1663 1681
1664 1682 /*
1665 1683 * For DMU_OT_SA we will need to locate the SIZE attribute
1666 1684 * attribute, which could be either in the bonus buffer
1667 1685 * or the "spill" block.
1668 1686 */
1669 1687 if (DNODE->dn_bonustype == DMU_OT_SA) {
1670 1688 sa_hdr_phys_t *sahdrp;
1671 1689 int hdrsize;
1672 1690
1673 1691 if (DNODE->dn_bonuslen != 0) {
1674 1692 sahdrp = (sa_hdr_phys_t *)DN_BONUS(DNODE);
1675 1693 } else {
1676 1694 if (DNODE->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
1677 1695 blkptr_t *bp = &DNODE->dn_spill;
1678 1696 void *buf;
1679 1697
1680 1698 buf = (void *)stack;
1681 1699 stack += BP_GET_LSIZE(bp);
1682 1700
1683 1701 /* reset errnum to rawread() failure */
1684 1702 errnum = 0;
1685 1703 if (zio_read(bp, buf, stack) != 0) {
1686 1704 return (0);
1687 1705 }
1688 1706 sahdrp = buf;
1689 1707 } else {
1690 1708 errnum = ERR_FSYS_CORRUPT;
1691 1709 return (0);
1692 1710 }
1693 1711 }
1694 1712 hdrsize = SA_HDR_SIZE(sahdrp);
1695 1713 filemax = *(uint64_t *)((char *)sahdrp + hdrsize +
1696 1714 SA_SIZE_OFFSET);
1697 1715 } else {
1698 1716 filemax = ((znode_phys_t *)DN_BONUS(DNODE))->zp_size;
1699 1717 }
1700 1718 filepos = 0;
1701 1719
1702 1720 dnode_buf = NULL;
1703 1721 return (1);
1704 1722 }
1705 1723
1706 1724 /*
1707 1725 * zfs_read reads in the data blocks pointed by the DNODE.
1708 1726 *
1709 1727 * Return:
1710 1728 * len - the length successfully read in to the buffer
1711 1729 * 0 - failure
1712 1730 */
1713 1731 int
1714 1732 zfs_read(char *buf, int len)
1715 1733 {
1716 1734 char *stack;
1717 1735 int blksz, length, movesize;
1718 1736
1719 1737 if (file_buf == NULL) {
1720 1738 file_buf = stackbase;
1721 1739 stackbase += SPA_MAXBLOCKSIZE;
1722 1740 file_start = file_end = 0;
1723 1741 }
1724 1742 stack = stackbase;
1725 1743
1726 1744 /*
1727 1745 * If offset is in memory, move it into the buffer provided and return.
1728 1746 */
1729 1747 if (filepos >= file_start && filepos+len <= file_end) {
1730 1748 grub_memmove(buf, file_buf + filepos - file_start, len);
1731 1749 filepos += len;
1732 1750 return (len);
1733 1751 }
1734 1752
1735 1753 blksz = DNODE->dn_datablkszsec << SPA_MINBLOCKSHIFT;
1736 1754
1737 1755 /*
1738 1756 * Entire Dnode is too big to fit into the space available. We
1739 1757 * will need to read it in chunks. This could be optimized to
1740 1758 * read in as large a chunk as there is space available, but for
1741 1759 * now, this only reads in one data block at a time.
1742 1760 */
1743 1761 length = len;
1744 1762 while (length) {
1745 1763 /*
1746 1764 * Find requested blkid and the offset within that block.
1747 1765 */
1748 1766 uint64_t blkid = filepos / blksz;
1749 1767
1750 1768 if (errnum = dmu_read(DNODE, blkid, file_buf, stack))
1751 1769 return (0);
1752 1770
1753 1771 file_start = blkid * blksz;
1754 1772 file_end = file_start + blksz;
1755 1773
1756 1774 movesize = MIN(length, file_end - filepos);
1757 1775
1758 1776 grub_memmove(buf, file_buf + filepos - file_start,
1759 1777 movesize);
1760 1778 buf += movesize;
1761 1779 length -= movesize;
1762 1780 filepos += movesize;
1763 1781 }
1764 1782
1765 1783 return (len);
1766 1784 }
1767 1785
1768 1786 /*
1769 1787 * No-Op
1770 1788 */
1771 1789 int
1772 1790 zfs_embed(int *start_sector, int needed_sectors)
1773 1791 {
1774 1792 return (1);
1775 1793 }
1776 1794
1777 1795 #endif /* FSYS_ZFS */
↓ open down ↓ |
820 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX