Print this page
4334 Improve ZFS N-way mirror read performance
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/fs/zfs/vdev_mirror.c
+++ new/usr/src/uts/common/fs/zfs/vdev_mirror.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
23 23 * Use is subject to license terms.
24 24 */
25 25
26 26 /*
27 27 * Copyright (c) 2013 by Delphix. All rights reserved.
28 + * Copyright (c) 2013 Steven Hartland. All rights reserved.
28 29 */
29 30
30 31 #include <sys/zfs_context.h>
31 32 #include <sys/spa.h>
32 33 #include <sys/vdev_impl.h>
33 34 #include <sys/zio.h>
34 35 #include <sys/fs/zfs.h>
35 36
36 37 /*
37 38 * Virtual device vector for mirroring.
38 39 */
39 40
40 41 typedef struct mirror_child {
41 42 vdev_t *mc_vd;
42 43 uint64_t mc_offset;
43 44 int mc_error;
45 + int mc_load;
44 46 uint8_t mc_tried;
45 47 uint8_t mc_skipped;
46 48 uint8_t mc_speculative;
47 49 } mirror_child_t;
48 50
49 51 typedef struct mirror_map {
52 + int *mm_preferred;
53 + int mm_preferred_cnt;
50 54 int mm_children;
51 - int mm_replacing;
52 - int mm_preferred;
53 - int mm_root;
54 - mirror_child_t mm_child[1];
55 + boolean_t mm_replacing;
56 + boolean_t mm_root;
57 + mirror_child_t mm_child[];
55 58 } mirror_map_t;
56 59
57 -int vdev_mirror_shift = 21;
60 +int zfs_vdev_mirror_shift = 21;
61 +
62 +/*
63 + * The load configuration settings below are tuned by default for
64 + * the case where all devices are of the same rotational type.
65 + *
66 + * If there is a mixture of rotating and non-rotating media, setting
67 + * non_rotating_seek_inc to 0 may well provide better results as it
68 + * will direct more reads to the non-rotating vdevs which are more
69 + * likely to have a higher performance.
70 + */
71 +
72 +/* Rotating media load calculation configuration. */
73 +/* Rotating media load increment for non-seeking I/O's. */
74 +int zfs_vdev_mirror_rotating_inc = 0;
75 +
76 +/* Rotating media load increment for seeking I/O's. */
77 +int zfs_vdev_mirror_rotating_seek_inc = 5;
78 +
79 +/*
80 + * Offset in bytes from the last I/O which triggers a reduced rotating media
81 + * seek increment.
82 + */
83 +int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
84 +
85 +/* Non-rotating media load calculation configuration. */
86 +/* Non-rotating media load increment for non-seeking I/O's. */
87 +int zfs_vdev_mirror_non_rotating_inc = 0;
88 +
89 +/* Non-rotating media load increment for seeking I/O's. */
90 +int zfs_vdev_mirror_non_rotating_seek_inc = 1;
91 +
92 +static inline size_t
93 +vdev_mirror_map_size(int children)
94 +{
95 + return (offsetof(mirror_map_t, mm_child[children]) +
96 + sizeof (int) * children);
97 +}
98 +
99 +static inline mirror_map_t *
100 +vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
101 +{
102 + mirror_map_t *mm;
103 +
104 + mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
105 + mm->mm_children = children;
106 + mm->mm_replacing = replacing;
107 + mm->mm_root = root;
108 + mm->mm_preferred = (int *)((uintptr_t)mm +
109 + offsetof(mirror_map_t, mm_child[children]));
110 +
111 + return (mm);
112 +}
58 113
59 114 static void
60 115 vdev_mirror_map_free(zio_t *zio)
61 116 {
62 117 mirror_map_t *mm = zio->io_vsd;
63 118
64 - kmem_free(mm, offsetof(mirror_map_t, mm_child[mm->mm_children]));
119 + kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
65 120 }
66 121
67 122 static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
68 123 vdev_mirror_map_free,
69 124 zio_vsd_default_cksum_report
70 125 };
71 126
127 +/*
128 + * Calculate and return the load of the specified vdev adjusted for a zio at
129 + * the given offset.
130 + *
131 + * The calcuation takes into account the vdev's:
132 + * 1. Rotation rate
133 + * 2. The distance of zio_offset from the last queued request
134 + */
135 +static int
136 +vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
137 +{
138 + uint64_t lastoffset;
139 + int load;
140 +
141 + /* All DVAs have equal weight at the root. */
142 + if (mm->mm_root)
143 + return (INT_MAX);
144 +
145 + /*
146 + * We don't return INT_MAX if the device is resilvering i.e.
147 + * vdev_resilver_txg != 0 as when tested performance was slightly
148 + * worse overall when resilvering with compared to without.
149 + */
150 +
151 + /* Standard load based on pending queue length. */
152 + load = vdev_queue_length(vd);
153 + lastoffset = vdev_queue_last_queued_offset(vd);
154 +
155 + if (vd->vdev_rotation_rate == VDEV_RATE_NON_ROTATING) {
156 + /* Non-rotating media. */
157 + if (lastoffset == zio_offset)
158 + return (load + zfs_vdev_mirror_non_rotating_inc);
159 +
160 + /*
161 + * Apply a seek penalty even for non-rotating devices as
162 + * sequential I/O'a can be aggregated into fewer operations
163 + * on the device, thus avoiding unnecessary per-command
164 + * overhead and boosting performance.
165 + */
166 + return (load + zfs_vdev_mirror_non_rotating_seek_inc);
167 + }
168 +
169 + /* Rotating media I/O's which directly follow the last I/O. */
170 + if (lastoffset == zio_offset)
171 + return (load + zfs_vdev_mirror_rotating_inc);
172 +
173 + /*
174 + * Apply half the seek increment to I/O's within seek offset
175 + * of the last I/O queued to this vdev as they should incure less
176 + * of a seek increment.
177 + */
178 + if (ABS(lastoffset - zio_offset) <
179 + zfs_vdev_mirror_rotating_seek_offset)
180 + return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
181 +
182 + /* Apply the full seek increment to all other I/O's. */
183 + return (load + zfs_vdev_mirror_rotating_seek_inc);
184 +}
185 +
186 +
72 187 static mirror_map_t *
73 -vdev_mirror_map_alloc(zio_t *zio)
188 +vdev_mirror_map_init(zio_t *zio)
74 189 {
75 190 mirror_map_t *mm = NULL;
76 191 mirror_child_t *mc;
77 192 vdev_t *vd = zio->io_vd;
78 - int c, d;
193 + int c;
79 194
80 195 if (vd == NULL) {
81 196 dva_t *dva = zio->io_bp->blk_dva;
82 197 spa_t *spa = zio->io_spa;
83 198
84 - c = BP_GET_NDVAS(zio->io_bp);
85 -
86 - mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP);
87 - mm->mm_children = c;
88 - mm->mm_replacing = B_FALSE;
89 - mm->mm_preferred = spa_get_random(c);
90 - mm->mm_root = B_TRUE;
91 -
92 - /*
93 - * Check the other, lower-index DVAs to see if they're on
94 - * the same vdev as the child we picked. If they are, use
95 - * them since they are likely to have been allocated from
96 - * the primary metaslab in use at the time, and hence are
97 - * more likely to have locality with single-copy data.
98 - */
99 - for (c = mm->mm_preferred, d = c - 1; d >= 0; d--) {
100 - if (DVA_GET_VDEV(&dva[d]) == DVA_GET_VDEV(&dva[c]))
101 - mm->mm_preferred = d;
102 - }
103 -
199 + mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
200 + B_TRUE);
104 201 for (c = 0; c < mm->mm_children; c++) {
105 202 mc = &mm->mm_child[c];
106 -
107 203 mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
108 204 mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
109 205 }
110 206 } else {
111 - c = vd->vdev_children;
112 -
113 - mm = kmem_zalloc(offsetof(mirror_map_t, mm_child[c]), KM_SLEEP);
114 - mm->mm_children = c;
115 - mm->mm_replacing = (vd->vdev_ops == &vdev_replacing_ops ||
116 - vd->vdev_ops == &vdev_spare_ops);
117 - mm->mm_preferred = mm->mm_replacing ? 0 :
118 - (zio->io_offset >> vdev_mirror_shift) % c;
119 - mm->mm_root = B_FALSE;
120 -
207 + mm = vdev_mirror_map_alloc(vd->vdev_children,
208 + (vd->vdev_ops == &vdev_replacing_ops ||
209 + vd->vdev_ops == &vdev_spare_ops), B_FALSE);
121 210 for (c = 0; c < mm->mm_children; c++) {
122 211 mc = &mm->mm_child[c];
123 212 mc->mc_vd = vd->vdev_child[c];
124 213 mc->mc_offset = zio->io_offset;
125 214 }
126 215 }
127 216
128 217 zio->io_vsd = mm;
129 218 zio->io_vsd_ops = &vdev_mirror_vsd_ops;
130 219 return (mm);
131 220 }
132 221
133 222 static int
134 223 vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
135 224 uint64_t *ashift)
136 225 {
137 226 int numerrors = 0;
138 227 int lasterror = 0;
139 228
140 229 if (vd->vdev_children == 0) {
141 230 vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
142 231 return (SET_ERROR(EINVAL));
143 232 }
144 233
145 234 vdev_open_children(vd);
146 235
147 236 for (int c = 0; c < vd->vdev_children; c++) {
148 237 vdev_t *cvd = vd->vdev_child[c];
149 238
150 239 if (cvd->vdev_open_error) {
151 240 lasterror = cvd->vdev_open_error;
152 241 numerrors++;
153 242 continue;
154 243 }
155 244
156 245 *asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
157 246 *max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
158 247 *ashift = MAX(*ashift, cvd->vdev_ashift);
159 248 }
160 249
161 250 if (numerrors == vd->vdev_children) {
162 251 vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
163 252 return (lasterror);
164 253 }
165 254
166 255 return (0);
167 256 }
168 257
169 258 static void
170 259 vdev_mirror_close(vdev_t *vd)
171 260 {
172 261 for (int c = 0; c < vd->vdev_children; c++)
173 262 vdev_close(vd->vdev_child[c]);
174 263 }
175 264
176 265 static void
177 266 vdev_mirror_child_done(zio_t *zio)
178 267 {
179 268 mirror_child_t *mc = zio->io_private;
180 269
181 270 mc->mc_error = zio->io_error;
182 271 mc->mc_tried = 1;
183 272 mc->mc_skipped = 0;
184 273 }
185 274
186 275 static void
187 276 vdev_mirror_scrub_done(zio_t *zio)
188 277 {
189 278 mirror_child_t *mc = zio->io_private;
190 279
191 280 if (zio->io_error == 0) {
192 281 zio_t *pio;
193 282
194 283 mutex_enter(&zio->io_lock);
195 284 while ((pio = zio_walk_parents(zio)) != NULL) {
196 285 mutex_enter(&pio->io_lock);
197 286 ASSERT3U(zio->io_size, >=, pio->io_size);
198 287 bcopy(zio->io_data, pio->io_data, pio->io_size);
199 288 mutex_exit(&pio->io_lock);
200 289 }
201 290 mutex_exit(&zio->io_lock);
↓ open down ↓ |
71 lines elided |
↑ open up ↑ |
202 291 }
203 292
204 293 zio_buf_free(zio->io_data, zio->io_size);
205 294
206 295 mc->mc_error = zio->io_error;
207 296 mc->mc_tried = 1;
208 297 mc->mc_skipped = 0;
209 298 }
210 299
211 300 /*
212 - * Try to find a child whose DTL doesn't contain the block we want to read.
301 + * Check the other, lower-index DVAs to see if they're on the same
302 + * vdev as the child we picked. If they are, use them since they
303 + * are likely to have been allocated from the primary metaslab in
304 + * use at the time, and hence are more likely to have locality with
305 + * single-copy data.
306 + */
307 +static int
308 +vdev_mirror_dva_select(zio_t *zio, int preferred)
309 +{
310 + dva_t *dva = zio->io_bp->blk_dva;
311 + int c;
312 +
313 + for (c = preferred - 1; c >= 0; c--) {
314 + if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
315 + preferred = c;
316 + }
317 + return (preferred);
318 +}
319 +
320 +static int
321 +vdev_mirror_preferred_child_randomize(zio_t *zio)
322 +{
323 + mirror_map_t *mm = zio->io_vsd;
324 + int p;
325 +
326 + if (mm->mm_root) {
327 + p = spa_get_random(mm->mm_preferred_cnt);
328 + return (vdev_mirror_dva_select(zio, mm->mm_preferred[p]));
329 + }
330 +
331 + /*
332 + * To ensure we don't always favour the first matching vdev,
333 + * which could lead to wear leveling issues on SSD's, we
334 + * use the I/O offset as a pseudo random seed into the vdevs
335 + * which have the lowest load.
336 + */
337 + p = (zio->io_offset >> zfs_vdev_mirror_shift) % mm->mm_preferred_cnt;
338 + return (mm->mm_preferred[p]);
339 +}
340 +
341 +/*
342 + * Try to find a vdev whose DTL doesn't contain the block we want to read
343 + * prefering vdevs based on determined load.
344 + *
213 345 * If we can't, try the read on any vdev we haven't already tried.
214 346 */
215 347 static int
216 348 vdev_mirror_child_select(zio_t *zio)
217 349 {
218 350 mirror_map_t *mm = zio->io_vsd;
219 - mirror_child_t *mc;
220 351 uint64_t txg = zio->io_txg;
221 - int i, c;
352 + int c, lowest_load;
222 353
223 354 ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
224 355
225 - /*
226 - * Try to find a child whose DTL doesn't contain the block to read.
227 - * If a child is known to be completely inaccessible (indicated by
228 - * vdev_readable() returning B_FALSE), don't even try.
229 - */
230 - for (i = 0, c = mm->mm_preferred; i < mm->mm_children; i++, c++) {
231 - if (c >= mm->mm_children)
232 - c = 0;
356 + lowest_load = INT_MAX;
357 + mm->mm_preferred_cnt = 0;
358 + for (c = 0; c < mm->mm_children; c++) {
359 + mirror_child_t *mc;
360 +
233 361 mc = &mm->mm_child[c];
234 362 if (mc->mc_tried || mc->mc_skipped)
235 363 continue;
364 +
236 365 if (!vdev_readable(mc->mc_vd)) {
237 366 mc->mc_error = SET_ERROR(ENXIO);
238 367 mc->mc_tried = 1; /* don't even try */
239 368 mc->mc_skipped = 1;
240 369 continue;
241 370 }
242 - if (!vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1))
243 - return (c);
244 - mc->mc_error = SET_ERROR(ESTALE);
245 - mc->mc_skipped = 1;
246 - mc->mc_speculative = 1;
371 +
372 + if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
373 + mc->mc_error = SET_ERROR(ESTALE);
374 + mc->mc_skipped = 1;
375 + mc->mc_speculative = 1;
376 + continue;
377 + }
378 +
379 + mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
380 + if (mc->mc_load > lowest_load)
381 + continue;
382 +
383 + if (mc->mc_load < lowest_load) {
384 + lowest_load = mc->mc_load;
385 + mm->mm_preferred_cnt = 0;
386 + }
387 + mm->mm_preferred[mm->mm_preferred_cnt] = c;
388 + mm->mm_preferred_cnt++;
389 + }
390 +
391 + if (mm->mm_preferred_cnt == 1) {
392 + vdev_queue_register_last_queued_offset(
393 + mm->mm_child[mm->mm_preferred[0]].mc_vd, zio);
394 + return (mm->mm_preferred[0]);
395 + }
396 +
397 + if (mm->mm_preferred_cnt > 1) {
398 + int c = vdev_mirror_preferred_child_randomize(zio);
399 +
400 + vdev_queue_register_last_queued_offset(mm->mm_child[c].mc_vd,
401 + zio);
402 + return (c);
247 403 }
248 404
249 405 /*
250 406 * Every device is either missing or has this txg in its DTL.
251 407 * Look for any child we haven't already tried before giving up.
252 408 */
253 - for (c = 0; c < mm->mm_children; c++)
254 - if (!mm->mm_child[c].mc_tried)
409 + for (c = 0; c < mm->mm_children; c++) {
410 + if (!mm->mm_child[c].mc_tried) {
411 + vdev_queue_register_last_queued_offset(
412 + mm->mm_child[c].mc_vd, zio);
255 413 return (c);
414 + }
415 + }
256 416
257 417 /*
258 418 * Every child failed. There's no place left to look.
259 419 */
260 420 return (-1);
261 421 }
262 422
263 423 static int
264 424 vdev_mirror_io_start(zio_t *zio)
265 425 {
266 426 mirror_map_t *mm;
267 427 mirror_child_t *mc;
268 428 int c, children;
269 429
270 - mm = vdev_mirror_map_alloc(zio);
430 + mm = vdev_mirror_map_init(zio);
271 431
272 432 if (zio->io_type == ZIO_TYPE_READ) {
273 433 if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) {
274 434 /*
275 435 * For scrubbing reads we need to allocate a read
276 436 * buffer for each child and issue reads to all
277 437 * children. If any child succeeds, it will copy its
278 438 * data into zio->io_data in vdev_mirror_scrub_done.
279 439 */
280 440 for (c = 0; c < mm->mm_children; c++) {
281 441 mc = &mm->mm_child[c];
282 442 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
283 443 mc->mc_vd, mc->mc_offset,
284 444 zio_buf_alloc(zio->io_size), zio->io_size,
285 445 zio->io_type, zio->io_priority, 0,
286 446 vdev_mirror_scrub_done, mc));
287 447 }
288 448 return (ZIO_PIPELINE_CONTINUE);
289 449 }
290 450 /*
291 451 * For normal reads just pick one child.
292 452 */
293 453 c = vdev_mirror_child_select(zio);
294 454 children = (c >= 0);
295 455 } else {
296 456 ASSERT(zio->io_type == ZIO_TYPE_WRITE);
297 457
298 458 /*
299 459 * Writes go to all children.
300 460 */
301 461 c = 0;
302 462 children = mm->mm_children;
303 463 }
304 464
305 465 while (children--) {
306 466 mc = &mm->mm_child[c];
307 467 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
308 468 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
309 469 zio->io_type, zio->io_priority, 0,
310 470 vdev_mirror_child_done, mc));
311 471 c++;
312 472 }
313 473
314 474 return (ZIO_PIPELINE_CONTINUE);
315 475 }
316 476
317 477 static int
318 478 vdev_mirror_worst_error(mirror_map_t *mm)
319 479 {
320 480 int error[2] = { 0, 0 };
321 481
322 482 for (int c = 0; c < mm->mm_children; c++) {
323 483 mirror_child_t *mc = &mm->mm_child[c];
324 484 int s = mc->mc_speculative;
325 485 error[s] = zio_worst_error(error[s], mc->mc_error);
326 486 }
327 487
328 488 return (error[0] ? error[0] : error[1]);
329 489 }
330 490
331 491 static void
332 492 vdev_mirror_io_done(zio_t *zio)
333 493 {
334 494 mirror_map_t *mm = zio->io_vsd;
335 495 mirror_child_t *mc;
336 496 int c;
337 497 int good_copies = 0;
338 498 int unexpected_errors = 0;
339 499
340 500 for (c = 0; c < mm->mm_children; c++) {
341 501 mc = &mm->mm_child[c];
342 502
343 503 if (mc->mc_error) {
344 504 if (!mc->mc_skipped)
345 505 unexpected_errors++;
346 506 } else if (mc->mc_tried) {
347 507 good_copies++;
348 508 }
349 509 }
350 510
351 511 if (zio->io_type == ZIO_TYPE_WRITE) {
352 512 /*
353 513 * XXX -- for now, treat partial writes as success.
354 514 *
355 515 * Now that we support write reallocation, it would be better
356 516 * to treat partial failure as real failure unless there are
357 517 * no non-degraded top-level vdevs left, and not update DTLs
358 518 * if we intend to reallocate.
359 519 */
360 520 /* XXPOLICY */
361 521 if (good_copies != mm->mm_children) {
362 522 /*
363 523 * Always require at least one good copy.
364 524 *
365 525 * For ditto blocks (io_vd == NULL), require
366 526 * all copies to be good.
367 527 *
368 528 * XXX -- for replacing vdevs, there's no great answer.
369 529 * If the old device is really dead, we may not even
370 530 * be able to access it -- so we only want to
371 531 * require good writes to the new device. But if
372 532 * the new device turns out to be flaky, we want
373 533 * to be able to detach it -- which requires all
374 534 * writes to the old device to have succeeded.
375 535 */
376 536 if (good_copies == 0 || zio->io_vd == NULL)
377 537 zio->io_error = vdev_mirror_worst_error(mm);
378 538 }
379 539 return;
380 540 }
381 541
382 542 ASSERT(zio->io_type == ZIO_TYPE_READ);
383 543
384 544 /*
385 545 * If we don't have a good copy yet, keep trying other children.
386 546 */
387 547 /* XXPOLICY */
388 548 if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
389 549 ASSERT(c >= 0 && c < mm->mm_children);
390 550 mc = &mm->mm_child[c];
391 551 zio_vdev_io_redone(zio);
392 552 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
393 553 mc->mc_vd, mc->mc_offset, zio->io_data, zio->io_size,
394 554 ZIO_TYPE_READ, zio->io_priority, 0,
395 555 vdev_mirror_child_done, mc));
396 556 return;
397 557 }
398 558
399 559 /* XXPOLICY */
400 560 if (good_copies == 0) {
401 561 zio->io_error = vdev_mirror_worst_error(mm);
402 562 ASSERT(zio->io_error != 0);
403 563 }
404 564
405 565 if (good_copies && spa_writeable(zio->io_spa) &&
406 566 (unexpected_errors ||
407 567 (zio->io_flags & ZIO_FLAG_RESILVER) ||
408 568 ((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) {
409 569 /*
410 570 * Use the good data we have in hand to repair damaged children.
411 571 */
412 572 for (c = 0; c < mm->mm_children; c++) {
413 573 /*
414 574 * Don't rewrite known good children.
415 575 * Not only is it unnecessary, it could
416 576 * actually be harmful: if the system lost
417 577 * power while rewriting the only good copy,
418 578 * there would be no good copies left!
419 579 */
420 580 mc = &mm->mm_child[c];
421 581
422 582 if (mc->mc_error == 0) {
423 583 if (mc->mc_tried)
424 584 continue;
425 585 if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
426 586 !vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
427 587 zio->io_txg, 1))
428 588 continue;
429 589 mc->mc_error = SET_ERROR(ESTALE);
430 590 }
431 591
432 592 zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
433 593 mc->mc_vd, mc->mc_offset,
434 594 zio->io_data, zio->io_size,
435 595 ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
436 596 ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
437 597 ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
438 598 }
439 599 }
440 600 }
441 601
442 602 static void
443 603 vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
444 604 {
445 605 if (faulted == vd->vdev_children)
446 606 vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
447 607 VDEV_AUX_NO_REPLICAS);
448 608 else if (degraded + faulted != 0)
449 609 vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
450 610 else
451 611 vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
452 612 }
453 613
454 614 vdev_ops_t vdev_mirror_ops = {
455 615 vdev_mirror_open,
456 616 vdev_mirror_close,
457 617 vdev_default_asize,
458 618 vdev_mirror_io_start,
459 619 vdev_mirror_io_done,
460 620 vdev_mirror_state_change,
461 621 NULL,
462 622 NULL,
463 623 VDEV_TYPE_MIRROR, /* name of this vdev type */
464 624 B_FALSE /* not a leaf vdev */
465 625 };
466 626
467 627 vdev_ops_t vdev_replacing_ops = {
468 628 vdev_mirror_open,
469 629 vdev_mirror_close,
470 630 vdev_default_asize,
471 631 vdev_mirror_io_start,
472 632 vdev_mirror_io_done,
473 633 vdev_mirror_state_change,
474 634 NULL,
475 635 NULL,
476 636 VDEV_TYPE_REPLACING, /* name of this vdev type */
477 637 B_FALSE /* not a leaf vdev */
478 638 };
479 639
480 640 vdev_ops_t vdev_spare_ops = {
481 641 vdev_mirror_open,
482 642 vdev_mirror_close,
483 643 vdev_default_asize,
484 644 vdev_mirror_io_start,
485 645 vdev_mirror_io_done,
486 646 vdev_mirror_state_change,
487 647 NULL,
488 648 NULL,
489 649 VDEV_TYPE_SPARE, /* name of this vdev type */
490 650 B_FALSE /* not a leaf vdev */
491 651 };
↓ open down ↓ |
211 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX