Print this page
3818 zpool status -x should report pools with removed l2arc devices
Reviewed by: Saso Kiselkov <skiselkov.ml@gmail.com>
Reviewed by: George Wilson <gwilson@zfsmail.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/lib/libzfs/common/libzfs_status.c
+++ new/usr/src/lib/libzfs/common/libzfs_status.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2012 by Delphix. All rights reserved.
25 + * Copyright (c) 2013 Steven Hartland. All rights reserved.
25 26 */
26 27
27 28 /*
28 29 * This file contains the functions which analyze the status of a pool. This
29 30 * include both the status of an active pool, as well as the status exported
30 31 * pools. Returns one of the ZPOOL_STATUS_* defines describing the status of
31 32 * the pool. This status is independent (to a certain degree) from the state of
32 33 * the pool. A pool's state describes only whether or not it is capable of
33 34 * providing the necessary fault tolerance for data. The status describes the
34 35 * overall status of devices. A pool that is online can still have a device
35 36 * that is experiencing errors.
36 37 *
37 38 * Only a subset of the possible faults can be detected using 'zpool status',
38 39 * and not all possible errors correspond to a FMA message ID. The explanation
39 40 * is left up to the caller, depending on whether it is a live pool or an
40 41 * import.
41 42 */
42 43
43 44 #include <libzfs.h>
44 45 #include <string.h>
45 46 #include <unistd.h>
46 47 #include "libzfs_impl.h"
47 48 #include "zfeature_common.h"
48 49
49 50 /*
50 51 * Message ID table. This must be kept in sync with the ZPOOL_STATUS_* defines
51 52 * in libzfs.h. Note that there are some status results which go past the end
52 53 * of this table, and hence have no associated message ID.
53 54 */
54 55 static char *zfs_msgid_table[] = {
55 56 "ZFS-8000-14",
56 57 "ZFS-8000-2Q",
57 58 "ZFS-8000-3C",
58 59 "ZFS-8000-4J",
59 60 "ZFS-8000-5E",
60 61 "ZFS-8000-6X",
61 62 "ZFS-8000-72",
62 63 "ZFS-8000-8A",
63 64 "ZFS-8000-9P",
64 65 "ZFS-8000-A5",
65 66 "ZFS-8000-EY",
66 67 "ZFS-8000-HC",
67 68 "ZFS-8000-JQ",
68 69 "ZFS-8000-K4",
69 70 };
70 71
71 72 #define NMSGID (sizeof (zfs_msgid_table) / sizeof (zfs_msgid_table[0]))
72 73
73 74 /* ARGSUSED */
74 75 static int
75 76 vdev_missing(uint64_t state, uint64_t aux, uint64_t errs)
76 77 {
77 78 return (state == VDEV_STATE_CANT_OPEN &&
78 79 aux == VDEV_AUX_OPEN_FAILED);
79 80 }
80 81
81 82 /* ARGSUSED */
82 83 static int
83 84 vdev_faulted(uint64_t state, uint64_t aux, uint64_t errs)
84 85 {
85 86 return (state == VDEV_STATE_FAULTED);
86 87 }
87 88
88 89 /* ARGSUSED */
89 90 static int
90 91 vdev_errors(uint64_t state, uint64_t aux, uint64_t errs)
91 92 {
92 93 return (state == VDEV_STATE_DEGRADED || errs != 0);
93 94 }
94 95
95 96 /* ARGSUSED */
96 97 static int
97 98 vdev_broken(uint64_t state, uint64_t aux, uint64_t errs)
98 99 {
99 100 return (state == VDEV_STATE_CANT_OPEN);
100 101 }
101 102
102 103 /* ARGSUSED */
103 104 static int
104 105 vdev_offlined(uint64_t state, uint64_t aux, uint64_t errs)
105 106 {
106 107 return (state == VDEV_STATE_OFFLINE);
107 108 }
108 109
109 110 /* ARGSUSED */
110 111 static int
111 112 vdev_removed(uint64_t state, uint64_t aux, uint64_t errs)
112 113 {
113 114 return (state == VDEV_STATE_REMOVED);
114 115 }
115 116
116 117 /*
117 118 * Detect if any leaf devices that have seen errors or could not be opened.
118 119 */
119 120 static boolean_t
120 121 find_vdev_problem(nvlist_t *vdev, int (*func)(uint64_t, uint64_t, uint64_t))
121 122 {
122 123 nvlist_t **child;
123 124 vdev_stat_t *vs;
124 125 uint_t c, children;
125 126 char *type;
126 127
127 128 /*
128 129 * Ignore problems within a 'replacing' vdev, since we're presumably in
129 130 * the process of repairing any such errors, and don't want to call them
130 131 * out again. We'll pick up the fact that a resilver is happening
131 132 * later.
132 133 */
133 134 verify(nvlist_lookup_string(vdev, ZPOOL_CONFIG_TYPE, &type) == 0);
134 135 if (strcmp(type, VDEV_TYPE_REPLACING) == 0)
135 136 return (B_FALSE);
136 137
137 138 if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_CHILDREN, &child,
138 139 &children) == 0) {
139 140 for (c = 0; c < children; c++)
140 141 if (find_vdev_problem(child[c], func))
141 142 return (B_TRUE);
142 143 } else {
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
143 144 verify(nvlist_lookup_uint64_array(vdev, ZPOOL_CONFIG_VDEV_STATS,
144 145 (uint64_t **)&vs, &c) == 0);
145 146
146 147 if (func(vs->vs_state, vs->vs_aux,
147 148 vs->vs_read_errors +
148 149 vs->vs_write_errors +
149 150 vs->vs_checksum_errors))
150 151 return (B_TRUE);
151 152 }
152 153
154 + /*
155 + * Check any L2 cache devs
156 + */
157 + if (nvlist_lookup_nvlist_array(vdev, ZPOOL_CONFIG_L2CACHE, &child,
158 + &children) == 0) {
159 + for (c = 0; c < children; c++)
160 + if (find_vdev_problem(child[c], func))
161 + return (B_TRUE);
162 + }
163 +
153 164 return (B_FALSE);
154 165 }
155 166
156 167 /*
157 168 * Active pool health status.
158 169 *
159 170 * To determine the status for a pool, we make several passes over the config,
160 171 * picking the most egregious error we find. In order of importance, we do the
161 172 * following:
162 173 *
163 174 * - Check for a complete and valid configuration
164 175 * - Look for any faulted or missing devices in a non-replicated config
165 176 * - Check for any data errors
166 177 * - Check for any faulted or missing devices in a replicated config
167 178 * - Look for any devices showing errors
168 179 * - Check for any resilvering devices
169 180 *
170 181 * There can obviously be multiple errors within a single pool, so this routine
171 182 * only picks the most damaging of all the current errors to report.
172 183 */
173 184 static zpool_status_t
174 185 check_status(nvlist_t *config, boolean_t isimport)
175 186 {
176 187 nvlist_t *nvroot;
177 188 vdev_stat_t *vs;
178 189 pool_scan_stat_t *ps = NULL;
179 190 uint_t vsc, psc;
180 191 uint64_t nerr;
181 192 uint64_t version;
182 193 uint64_t stateval;
183 194 uint64_t suspended;
184 195 uint64_t hostid = 0;
185 196
186 197 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
187 198 &version) == 0);
188 199 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
189 200 &nvroot) == 0);
190 201 verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
191 202 (uint64_t **)&vs, &vsc) == 0);
192 203 verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
193 204 &stateval) == 0);
194 205
195 206 /*
196 207 * Currently resilvering a vdev
197 208 */
198 209 (void) nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_SCAN_STATS,
199 210 (uint64_t **)&ps, &psc);
200 211 if (ps && ps->pss_func == POOL_SCAN_RESILVER &&
201 212 ps->pss_state == DSS_SCANNING)
202 213 return (ZPOOL_STATUS_RESILVERING);
203 214
204 215 /*
205 216 * Pool last accessed by another system.
206 217 */
207 218 (void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
208 219 if (hostid != 0 && (unsigned long)hostid != gethostid() &&
209 220 stateval == POOL_STATE_ACTIVE)
210 221 return (ZPOOL_STATUS_HOSTID_MISMATCH);
211 222
212 223 /*
213 224 * Newer on-disk version.
214 225 */
215 226 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
216 227 vs->vs_aux == VDEV_AUX_VERSION_NEWER)
217 228 return (ZPOOL_STATUS_VERSION_NEWER);
218 229
219 230 /*
220 231 * Unsupported feature(s).
221 232 */
222 233 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
223 234 vs->vs_aux == VDEV_AUX_UNSUP_FEAT) {
224 235 nvlist_t *nvinfo;
225 236
226 237 verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
227 238 &nvinfo) == 0);
228 239 if (nvlist_exists(nvinfo, ZPOOL_CONFIG_CAN_RDONLY))
229 240 return (ZPOOL_STATUS_UNSUP_FEAT_WRITE);
230 241 return (ZPOOL_STATUS_UNSUP_FEAT_READ);
231 242 }
232 243
233 244 /*
234 245 * Check that the config is complete.
235 246 */
236 247 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
237 248 vs->vs_aux == VDEV_AUX_BAD_GUID_SUM)
238 249 return (ZPOOL_STATUS_BAD_GUID_SUM);
239 250
240 251 /*
241 252 * Check whether the pool has suspended due to failed I/O.
242 253 */
243 254 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_SUSPENDED,
244 255 &suspended) == 0) {
245 256 if (suspended == ZIO_FAILURE_MODE_CONTINUE)
246 257 return (ZPOOL_STATUS_IO_FAILURE_CONTINUE);
247 258 return (ZPOOL_STATUS_IO_FAILURE_WAIT);
248 259 }
249 260
250 261 /*
251 262 * Could not read a log.
252 263 */
253 264 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
254 265 vs->vs_aux == VDEV_AUX_BAD_LOG) {
255 266 return (ZPOOL_STATUS_BAD_LOG);
256 267 }
257 268
258 269 /*
259 270 * Bad devices in non-replicated config.
260 271 */
261 272 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
262 273 find_vdev_problem(nvroot, vdev_faulted))
263 274 return (ZPOOL_STATUS_FAULTED_DEV_NR);
264 275
265 276 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
266 277 find_vdev_problem(nvroot, vdev_missing))
267 278 return (ZPOOL_STATUS_MISSING_DEV_NR);
268 279
269 280 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
270 281 find_vdev_problem(nvroot, vdev_broken))
271 282 return (ZPOOL_STATUS_CORRUPT_LABEL_NR);
272 283
273 284 /*
274 285 * Corrupted pool metadata
275 286 */
276 287 if (vs->vs_state == VDEV_STATE_CANT_OPEN &&
277 288 vs->vs_aux == VDEV_AUX_CORRUPT_DATA)
278 289 return (ZPOOL_STATUS_CORRUPT_POOL);
279 290
280 291 /*
281 292 * Persistent data errors.
282 293 */
283 294 if (!isimport) {
284 295 if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
285 296 &nerr) == 0 && nerr != 0)
286 297 return (ZPOOL_STATUS_CORRUPT_DATA);
287 298 }
288 299
289 300 /*
290 301 * Missing devices in a replicated config.
291 302 */
292 303 if (find_vdev_problem(nvroot, vdev_faulted))
293 304 return (ZPOOL_STATUS_FAULTED_DEV_R);
294 305 if (find_vdev_problem(nvroot, vdev_missing))
295 306 return (ZPOOL_STATUS_MISSING_DEV_R);
296 307 if (find_vdev_problem(nvroot, vdev_broken))
297 308 return (ZPOOL_STATUS_CORRUPT_LABEL_R);
298 309
299 310 /*
300 311 * Devices with errors
301 312 */
302 313 if (!isimport && find_vdev_problem(nvroot, vdev_errors))
303 314 return (ZPOOL_STATUS_FAILING_DEV);
304 315
305 316 /*
306 317 * Offlined devices
307 318 */
308 319 if (find_vdev_problem(nvroot, vdev_offlined))
309 320 return (ZPOOL_STATUS_OFFLINE_DEV);
310 321
311 322 /*
312 323 * Removed device
313 324 */
314 325 if (find_vdev_problem(nvroot, vdev_removed))
315 326 return (ZPOOL_STATUS_REMOVED_DEV);
316 327
317 328 /*
318 329 * Outdated, but usable, version
319 330 */
320 331 if (SPA_VERSION_IS_SUPPORTED(version) && version != SPA_VERSION)
321 332 return (ZPOOL_STATUS_VERSION_OLDER);
322 333
323 334 /*
324 335 * Usable pool with disabled features
325 336 */
326 337 if (version >= SPA_VERSION_FEATURES) {
327 338 int i;
328 339 nvlist_t *feat;
329 340
330 341 if (isimport) {
331 342 feat = fnvlist_lookup_nvlist(config,
332 343 ZPOOL_CONFIG_LOAD_INFO);
333 344 feat = fnvlist_lookup_nvlist(feat,
334 345 ZPOOL_CONFIG_ENABLED_FEAT);
335 346 } else {
336 347 feat = fnvlist_lookup_nvlist(config,
337 348 ZPOOL_CONFIG_FEATURE_STATS);
338 349 }
339 350
340 351 for (i = 0; i < SPA_FEATURES; i++) {
341 352 zfeature_info_t *fi = &spa_feature_table[i];
342 353 if (!nvlist_exists(feat, fi->fi_guid))
343 354 return (ZPOOL_STATUS_FEAT_DISABLED);
344 355 }
345 356 }
346 357
347 358 return (ZPOOL_STATUS_OK);
348 359 }
349 360
350 361 zpool_status_t
351 362 zpool_get_status(zpool_handle_t *zhp, char **msgid)
352 363 {
353 364 zpool_status_t ret = check_status(zhp->zpool_config, B_FALSE);
354 365
355 366 if (ret >= NMSGID)
356 367 *msgid = NULL;
357 368 else
358 369 *msgid = zfs_msgid_table[ret];
359 370
360 371 return (ret);
361 372 }
362 373
363 374 zpool_status_t
364 375 zpool_import_status(nvlist_t *config, char **msgid)
365 376 {
366 377 zpool_status_t ret = check_status(config, B_TRUE);
367 378
368 379 if (ret >= NMSGID)
369 380 *msgid = NULL;
370 381 else
371 382 *msgid = zfs_msgid_table[ret];
372 383
373 384 return (ret);
374 385 }
375 386
376 387 static void
377 388 dump_ddt_stat(const ddt_stat_t *dds, int h)
378 389 {
379 390 char refcnt[6];
380 391 char blocks[6], lsize[6], psize[6], dsize[6];
381 392 char ref_blocks[6], ref_lsize[6], ref_psize[6], ref_dsize[6];
382 393
383 394 if (dds == NULL || dds->dds_blocks == 0)
384 395 return;
385 396
386 397 if (h == -1)
387 398 (void) strcpy(refcnt, "Total");
388 399 else
389 400 zfs_nicenum(1ULL << h, refcnt, sizeof (refcnt));
390 401
391 402 zfs_nicenum(dds->dds_blocks, blocks, sizeof (blocks));
392 403 zfs_nicenum(dds->dds_lsize, lsize, sizeof (lsize));
393 404 zfs_nicenum(dds->dds_psize, psize, sizeof (psize));
394 405 zfs_nicenum(dds->dds_dsize, dsize, sizeof (dsize));
395 406 zfs_nicenum(dds->dds_ref_blocks, ref_blocks, sizeof (ref_blocks));
396 407 zfs_nicenum(dds->dds_ref_lsize, ref_lsize, sizeof (ref_lsize));
397 408 zfs_nicenum(dds->dds_ref_psize, ref_psize, sizeof (ref_psize));
398 409 zfs_nicenum(dds->dds_ref_dsize, ref_dsize, sizeof (ref_dsize));
399 410
400 411 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
401 412 refcnt,
402 413 blocks, lsize, psize, dsize,
403 414 ref_blocks, ref_lsize, ref_psize, ref_dsize);
404 415 }
405 416
406 417 /*
407 418 * Print the DDT histogram and the column totals.
408 419 */
409 420 void
410 421 zpool_dump_ddt(const ddt_stat_t *dds_total, const ddt_histogram_t *ddh)
411 422 {
412 423 int h;
413 424
414 425 (void) printf("\n");
415 426
416 427 (void) printf("bucket "
417 428 " allocated "
418 429 " referenced \n");
419 430 (void) printf("______ "
420 431 "______________________________ "
421 432 "______________________________\n");
422 433
423 434 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
424 435 "refcnt",
425 436 "blocks", "LSIZE", "PSIZE", "DSIZE",
426 437 "blocks", "LSIZE", "PSIZE", "DSIZE");
427 438
428 439 (void) printf("%6s %6s %5s %5s %5s %6s %5s %5s %5s\n",
429 440 "------",
430 441 "------", "-----", "-----", "-----",
431 442 "------", "-----", "-----", "-----");
432 443
433 444 for (h = 0; h < 64; h++)
434 445 dump_ddt_stat(&ddh->ddh_stat[h], h);
435 446
436 447 dump_ddt_stat(dds_total, -1);
437 448
438 449 (void) printf("\n");
439 450 }
↓ open down ↓ |
277 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX