199 (spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
200 (SCL_CONFIG | SCL_STATE) &&
201 dsl_pool_sync_context(spa_get_dsl(zio->io_spa))));
202 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
203
204 zio_nowait(zio_write_phys(zio, vd,
205 vdev_label_offset(vd->vdev_psize, l, offset),
206 size, buf, ZIO_CHECKSUM_LABEL, done, private,
207 ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
208 }
209
210 /*
211 * Generate the nvlist representing this vdev's config.
212 */
213 nvlist_t *
214 vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
215 vdev_config_flag_t flags)
216 {
217 nvlist_t *nv = NULL;
218
219 VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
220
221 VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_TYPE,
222 vd->vdev_ops->vdev_op_type) == 0);
223 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
224 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id)
225 == 0);
226 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid) == 0);
227
228 if (vd->vdev_path != NULL)
229 VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_PATH,
230 vd->vdev_path) == 0);
231
232 if (vd->vdev_devid != NULL)
233 VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_DEVID,
234 vd->vdev_devid) == 0);
235
236 if (vd->vdev_physpath != NULL)
237 VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
238 vd->vdev_physpath) == 0);
239
240 if (vd->vdev_fru != NULL)
241 VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_FRU,
242 vd->vdev_fru) == 0);
243
244 if (vd->vdev_nparity != 0) {
245 ASSERT(strcmp(vd->vdev_ops->vdev_op_type,
246 VDEV_TYPE_RAIDZ) == 0);
247
248 /*
249 * Make sure someone hasn't managed to sneak a fancy new vdev
250 * into a crufty old storage pool.
251 */
252 ASSERT(vd->vdev_nparity == 1 ||
253 (vd->vdev_nparity <= 2 &&
254 spa_version(spa) >= SPA_VERSION_RAIDZ2) ||
255 (vd->vdev_nparity <= 3 &&
256 spa_version(spa) >= SPA_VERSION_RAIDZ3));
257
258 /*
259 * Note that we'll add the nparity tag even on storage pools
260 * that only support a single parity device -- older software
261 * will just ignore it.
262 */
263 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY,
264 vd->vdev_nparity) == 0);
265 }
266
267 if (vd->vdev_wholedisk != -1ULL)
268 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
269 vd->vdev_wholedisk) == 0);
270
271 if (vd->vdev_not_present)
272 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1) == 0);
273
274 if (vd->vdev_isspare)
275 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1) == 0);
276
277 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
278 vd == vd->vdev_top) {
279 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
280 vd->vdev_ms_array) == 0);
281 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
282 vd->vdev_ms_shift) == 0);
283 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT,
284 vd->vdev_ashift) == 0);
285 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
286 vd->vdev_asize) == 0);
287 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG,
288 vd->vdev_islog) == 0);
289 if (vd->vdev_removing)
290 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
291 vd->vdev_removing) == 0);
292 }
293
294 if (vd->vdev_dtl_smo.smo_object != 0)
295 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
296 vd->vdev_dtl_smo.smo_object) == 0);
297
298 if (vd->vdev_crtxg)
299 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
300 vd->vdev_crtxg) == 0);
301
302 if (getstats) {
303 vdev_stat_t vs;
304 pool_scan_stat_t ps;
305
306 vdev_get_stats(vd, &vs);
307 VERIFY(nvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
308 (uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t)) == 0);
309
310 /* provide either current or previous scan information */
311 if (spa_scan_get_stats(spa, &ps) == 0) {
312 VERIFY(nvlist_add_uint64_array(nv,
313 ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
314 sizeof (pool_scan_stat_t) / sizeof (uint64_t))
315 == 0);
316 }
317 }
318
319 if (!vd->vdev_ops->vdev_op_leaf) {
320 nvlist_t **child;
321 int c, idx;
322
323 ASSERT(!vd->vdev_ishole);
324
325 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
326 KM_SLEEP);
327
328 for (c = 0, idx = 0; c < vd->vdev_children; c++) {
329 vdev_t *cvd = vd->vdev_child[c];
330
331 /*
332 * If we're generating an nvlist of removing
333 * vdevs then skip over any device which is
334 * not being removed.
335 */
336 if ((flags & VDEV_CONFIG_REMOVING) &&
337 !cvd->vdev_removing)
338 continue;
339
340 child[idx++] = vdev_config_generate(spa, cvd,
341 getstats, flags);
342 }
343
344 if (idx) {
345 VERIFY(nvlist_add_nvlist_array(nv,
346 ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
347 }
348
349 for (c = 0; c < idx; c++)
350 nvlist_free(child[c]);
351
352 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
353
354 } else {
355 const char *aux = NULL;
356
357 if (vd->vdev_offline && !vd->vdev_tmpoffline)
358 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE,
359 B_TRUE) == 0);
360 if (vd->vdev_resilvering)
361 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVERING,
362 B_TRUE) == 0);
363 if (vd->vdev_faulted)
364 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED,
365 B_TRUE) == 0);
366 if (vd->vdev_degraded)
367 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED,
368 B_TRUE) == 0);
369 if (vd->vdev_removed)
370 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED,
371 B_TRUE) == 0);
372 if (vd->vdev_unspare)
373 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE,
374 B_TRUE) == 0);
375 if (vd->vdev_ishole)
376 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE,
377 B_TRUE) == 0);
378
379 switch (vd->vdev_stat.vs_aux) {
380 case VDEV_AUX_ERR_EXCEEDED:
381 aux = "err_exceeded";
382 break;
383
384 case VDEV_AUX_EXTERNAL:
385 aux = "external";
386 break;
387 }
388
389 if (aux != NULL)
390 VERIFY(nvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE,
391 aux) == 0);
392
393 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
394 VERIFY(nvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
395 vd->vdev_orig_guid) == 0);
396 }
397 }
398
399 return (nv);
400 }
401
402 /*
403 * Generate a view of the top-level vdevs. If we currently have holes
404 * in the namespace, then generate an array which contains a list of holey
405 * vdevs. Additionally, add the number of top-level children that currently
406 * exist.
407 */
408 void
409 vdev_top_config_generate(spa_t *spa, nvlist_t *config)
410 {
411 vdev_t *rvd = spa->spa_root_vdev;
412 uint64_t *array;
413 uint_t c, idx;
414
415 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
|
199 (spa_config_held(zio->io_spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
200 (SCL_CONFIG | SCL_STATE) &&
201 dsl_pool_sync_context(spa_get_dsl(zio->io_spa))));
202 ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
203
204 zio_nowait(zio_write_phys(zio, vd,
205 vdev_label_offset(vd->vdev_psize, l, offset),
206 size, buf, ZIO_CHECKSUM_LABEL, done, private,
207 ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
208 }
209
210 /*
211 * Generate the nvlist representing this vdev's config.
212 */
213 nvlist_t *
214 vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
215 vdev_config_flag_t flags)
216 {
217 nvlist_t *nv = NULL;
218
219 nv = fnvlist_alloc();
220
221 fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
222 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
223 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
224 fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
225
226 if (vd->vdev_path != NULL)
227 fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
228
229 if (vd->vdev_devid != NULL)
230 fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
231
232 if (vd->vdev_physpath != NULL)
233 fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
234 vd->vdev_physpath);
235
236 if (vd->vdev_fru != NULL)
237 fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
238
239 if (vd->vdev_nparity != 0) {
240 ASSERT(strcmp(vd->vdev_ops->vdev_op_type,
241 VDEV_TYPE_RAIDZ) == 0);
242
243 /*
244 * Make sure someone hasn't managed to sneak a fancy new vdev
245 * into a crufty old storage pool.
246 */
247 ASSERT(vd->vdev_nparity == 1 ||
248 (vd->vdev_nparity <= 2 &&
249 spa_version(spa) >= SPA_VERSION_RAIDZ2) ||
250 (vd->vdev_nparity <= 3 &&
251 spa_version(spa) >= SPA_VERSION_RAIDZ3));
252
253 /*
254 * Note that we'll add the nparity tag even on storage pools
255 * that only support a single parity device -- older software
256 * will just ignore it.
257 */
258 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vd->vdev_nparity);
259 }
260
261 if (vd->vdev_wholedisk != -1ULL)
262 fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
263 vd->vdev_wholedisk);
264
265 if (vd->vdev_not_present)
266 fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
267
268 if (vd->vdev_isspare)
269 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
270
271 if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
272 vd == vd->vdev_top) {
273 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
274 vd->vdev_ms_array);
275 fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
276 vd->vdev_ms_shift);
277 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
278 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
279 vd->vdev_asize);
280 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
281 if (vd->vdev_removing)
282 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
283 vd->vdev_removing);
284 }
285
286 if (vd->vdev_dtl_smo.smo_object != 0)
287 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
288 vd->vdev_dtl_smo.smo_object);
289
290 if (vd->vdev_crtxg)
291 fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
292
293 if (getstats) {
294 vdev_stat_t vs;
295 pool_scan_stat_t ps;
296
297 vdev_get_stats(vd, &vs);
298 fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
299 (uint64_t *)&vs, sizeof (vs) / sizeof (uint64_t));
300
301 /* provide either current or previous scan information */
302 if (spa_scan_get_stats(spa, &ps) == 0) {
303 fnvlist_add_uint64_array(nv,
304 ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
305 sizeof (pool_scan_stat_t) / sizeof (uint64_t));
306 }
307 }
308
309 if (!vd->vdev_ops->vdev_op_leaf) {
310 nvlist_t **child;
311 int c, idx;
312
313 ASSERT(!vd->vdev_ishole);
314
315 child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
316 KM_SLEEP);
317
318 for (c = 0, idx = 0; c < vd->vdev_children; c++) {
319 vdev_t *cvd = vd->vdev_child[c];
320
321 /*
322 * If we're generating an nvlist of removing
323 * vdevs then skip over any device which is
324 * not being removed.
325 */
326 if ((flags & VDEV_CONFIG_REMOVING) &&
327 !cvd->vdev_removing)
328 continue;
329
330 child[idx++] = vdev_config_generate(spa, cvd,
331 getstats, flags);
332 }
333
334 if (idx) {
335 fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
336 child, idx);
337 }
338
339 for (c = 0; c < idx; c++)
340 nvlist_free(child[c]);
341
342 kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
343
344 } else {
345 const char *aux = NULL;
346
347 if (vd->vdev_offline && !vd->vdev_tmpoffline)
348 fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
349 if (vd->vdev_resilver_txg != 0)
350 fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
351 vd->vdev_resilver_txg);
352 if (vd->vdev_faulted)
353 fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
354 if (vd->vdev_degraded)
355 fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
356 if (vd->vdev_removed)
357 fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
358 if (vd->vdev_unspare)
359 fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
360 if (vd->vdev_ishole)
361 fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
362
363 switch (vd->vdev_stat.vs_aux) {
364 case VDEV_AUX_ERR_EXCEEDED:
365 aux = "err_exceeded";
366 break;
367
368 case VDEV_AUX_EXTERNAL:
369 aux = "external";
370 break;
371 }
372
373 if (aux != NULL)
374 fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
375
376 if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
377 fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
378 vd->vdev_orig_guid);
379 }
380 }
381
382 return (nv);
383 }
384
385 /*
386 * Generate a view of the top-level vdevs. If we currently have holes
387 * in the namespace, then generate an array which contains a list of holey
388 * vdevs. Additionally, add the number of top-level children that currently
389 * exist.
390 */
391 void
392 vdev_top_config_generate(spa_t *spa, nvlist_t *config)
393 {
394 vdev_t *rvd = spa->spa_root_vdev;
395 uint64_t *array;
396 uint_t c, idx;
397
398 array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
|