Print this page
9709 Remove support for BZIP2 from dump
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
9707 Enable parallel crash dump
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/os/dumpsubr.c
+++ new/usr/src/uts/common/os/dumpsubr.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1998, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2018 Joyent, Inc.
25 25 * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
26 26 */
27 27
28 28 #include <sys/types.h>
29 29 #include <sys/param.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/vm.h>
32 32 #include <sys/proc.h>
33 33 #include <sys/file.h>
34 34 #include <sys/conf.h>
35 35 #include <sys/kmem.h>
36 36 #include <sys/mem.h>
37 37 #include <sys/mman.h>
38 38 #include <sys/vnode.h>
39 39 #include <sys/errno.h>
40 40 #include <sys/memlist.h>
41 41 #include <sys/dumphdr.h>
42 42 #include <sys/dumpadm.h>
43 43 #include <sys/ksyms.h>
44 44 #include <sys/compress.h>
45 45 #include <sys/stream.h>
46 46 #include <sys/strsun.h>
47 47 #include <sys/cmn_err.h>
48 48 #include <sys/bitmap.h>
49 49 #include <sys/modctl.h>
50 50 #include <sys/utsname.h>
51 51 #include <sys/systeminfo.h>
52 52 #include <sys/vmem.h>
53 53 #include <sys/log.h>
54 54 #include <sys/var.h>
55 55 #include <sys/debug.h>
56 56 #include <sys/sunddi.h>
57 57 #include <fs/fs_subr.h>
58 58 #include <sys/fs/snode.h>
59 59 #include <sys/ontrap.h>
60 60 #include <sys/panic.h>
61 61 #include <sys/dkio.h>
62 62 #include <sys/vtoc.h>
63 63 #include <sys/errorq.h>
64 64 #include <sys/fm/util.h>
65 65 #include <sys/fs/zfs.h>
66 66
↓ open down ↓ |
66 lines elided |
↑ open up ↑ |
67 67 #include <vm/hat.h>
68 68 #include <vm/as.h>
69 69 #include <vm/page.h>
70 70 #include <vm/pvn.h>
71 71 #include <vm/seg.h>
72 72 #include <vm/seg_kmem.h>
73 73 #include <sys/clock_impl.h>
74 74 #include <sys/hold_page.h>
75 75 #include <sys/cpu.h>
76 76
77 -#include <bzip2/bzlib.h>
78 -
79 77 #define ONE_GIG (1024 * 1024 * 1024UL)
80 78
81 79 /*
82 - * Crash dump time is dominated by disk write time. To reduce this,
83 - * the stronger compression method bzip2 is applied to reduce the dump
84 - * size and hence reduce I/O time. However, bzip2 is much more
85 - * computationally expensive than the existing lzjb algorithm, so to
86 - * avoid increasing compression time, CPUs that are otherwise idle
87 - * during panic are employed to parallelize the compression task.
88 - * Many helper CPUs are needed to prevent bzip2 from being a
89 - * bottleneck, and on systems with too few CPUs, the lzjb algorithm is
90 - * parallelized instead. Lastly, I/O and compression are performed by
91 - * different CPUs, and are hence overlapped in time, unlike the older
92 - * serial code.
93 - *
94 - * Another important consideration is the speed of the dump
95 - * device. Faster disks need less CPUs in order to benefit from
96 - * parallel lzjb versus parallel bzip2. Therefore, the CPU count
97 - * threshold for switching from parallel lzjb to paralled bzip2 is
98 - * elevated for faster disks. The dump device speed is adduced from
99 - * the setting for dumpbuf.iosize, see dump_update_clevel.
80 + * Parallel Dump:
81 + * CPUs that are otherwise idle during panic are employed to parallelize
82 + * the compression task. I/O and compression are performed by different
83 + * CPUs, and are hence overlapped in time, unlike the older serial code.
100 84 */
101 85
102 86 /*
103 87 * exported vars
104 88 */
105 89 kmutex_t dump_lock; /* lock for dump configuration */
106 90 dumphdr_t *dumphdr; /* dump header */
107 91 int dump_conflags = DUMP_KERNEL; /* dump configuration flags */
108 92 vnode_t *dumpvp; /* dump device vnode pointer */
109 93 u_offset_t dumpvp_size; /* size of dump device, in bytes */
110 94 char *dumppath; /* pathname of dump device */
111 95 int dump_timeout = 120; /* timeout for dumping pages */
112 96 int dump_timeleft; /* portion of dump_timeout remaining */
113 97 int dump_ioerr; /* dump i/o error */
114 98 int dump_check_used; /* enable check for used pages */
115 -char *dump_stack_scratch; /* scratch area for saving stack summary */
99 +char *dump_stack_scratch; /* scratch area for saving stack summary */
116 100
117 101 /*
118 - * Tunables for dump compression and parallelism. These can be set via
119 - * /etc/system.
102 + * Tunables for dump compression and parallelism.
103 + * These can be set via /etc/system.
120 104 *
121 - * dump_ncpu_low number of helpers for parallel lzjb
122 - * This is also the minimum configuration.
105 + * dump_ncpu_low:
106 + * This is the minimum configuration for parallel lzjb.
107 + * A special value of 0 means that parallel dump will not be used.
123 108 *
124 - * dump_bzip2_level bzip2 compression level: 1-9
125 - * Higher numbers give greater compression, but take more memory
126 - * and time. Memory used per helper is ~(dump_bzip2_level * 1MB).
127 - *
128 - * dump_plat_mincpu the cross-over limit for using bzip2 (per platform):
129 - * if dump_plat_mincpu == 0, then always do single threaded dump
130 - * if ncpu >= dump_plat_mincpu then try to use bzip2
131 - *
132 - * dump_metrics_on if set, metrics are collected in the kernel, passed
133 - * to savecore via the dump file, and recorded by savecore in
134 - * METRICS.txt.
109 + * dump_metrics_on:
110 + * If set, metrics are collected in the kernel, passed to savecore
111 + * via the dump file, and recorded by savecore in METRICS.txt.
135 112 */
136 113 uint_t dump_ncpu_low = 4; /* minimum config for parallel lzjb */
137 -uint_t dump_bzip2_level = 1; /* bzip2 level (1-9) */
138 114
139 -/* Use dump_plat_mincpu_default unless this variable is set by /etc/system */
140 -#define MINCPU_NOT_SET ((uint_t)-1)
141 -uint_t dump_plat_mincpu = MINCPU_NOT_SET;
142 -
143 115 /* tunables for pre-reserved heap */
144 116 uint_t dump_kmem_permap = 1024;
145 117 uint_t dump_kmem_pages = 0;
146 118
147 119 /* Define multiple buffers per helper to avoid stalling */
148 120 #define NCBUF_PER_HELPER 2
149 121 #define NCMAP_PER_HELPER 4
150 122
151 123 /* minimum number of helpers configured */
152 -#define MINHELPERS (dump_ncpu_low)
124 +#define MINHELPERS (MAX(dump_ncpu_low, 1))
153 125 #define MINCBUFS (MINHELPERS * NCBUF_PER_HELPER)
154 126
155 127 /*
156 128 * Define constant parameters.
157 129 *
158 130 * CBUF_SIZE size of an output buffer
159 131 *
160 132 * CBUF_MAPSIZE size of virtual range for mapping pages
161 133 *
162 134 * CBUF_MAPNP size of virtual range in pages
163 135 *
164 136 */
165 137 #define DUMP_1KB ((size_t)1 << 10)
166 138 #define DUMP_1MB ((size_t)1 << 20)
167 139 #define CBUF_SIZE ((size_t)1 << 17)
168 140 #define CBUF_MAPSHIFT (22)
169 141 #define CBUF_MAPSIZE ((size_t)1 << CBUF_MAPSHIFT)
170 142 #define CBUF_MAPNP ((size_t)1 << (CBUF_MAPSHIFT - PAGESHIFT))
171 143
172 144 /*
173 145 * Compression metrics are accumulated nano-second subtotals. The
174 146 * results are normalized by the number of pages dumped. A report is
175 147 * generated when dumpsys() completes and is saved in the dump image
176 148 * after the trailing dump header.
177 149 *
178 150 * Metrics are always collected. Set the variable dump_metrics_on to
179 151 * cause metrics to be saved in the crash file, where savecore will
180 152 * save it in the file METRICS.txt.
181 153 */
182 154 #define PERPAGES \
183 155 PERPAGE(bitmap) PERPAGE(map) PERPAGE(unmap) \
184 156 PERPAGE(copy) PERPAGE(compress) \
185 157 PERPAGE(write) \
186 158 PERPAGE(inwait) PERPAGE(outwait)
187 159
188 160 typedef struct perpage {
189 161 #define PERPAGE(x) hrtime_t x;
190 162 PERPAGES
191 163 #undef PERPAGE
192 164 } perpage_t;
193 165
194 166 /*
195 167 * This macro controls the code generation for collecting dump
196 168 * performance information. By default, the code is generated, but
197 169 * automatic saving of the information is disabled. If dump_metrics_on
198 170 * is set to 1, the timing information is passed to savecore via the
199 171 * crash file, where it is appended to the file dump-dir/METRICS.txt.
200 172 */
201 173 #define COLLECT_METRICS
202 174
203 175 #ifdef COLLECT_METRICS
204 176 uint_t dump_metrics_on = 0; /* set to 1 to enable recording metrics */
205 177
206 178 #define HRSTART(v, m) v##ts.m = gethrtime()
207 179 #define HRSTOP(v, m) v.m += gethrtime() - v##ts.m
208 180 #define HRBEGIN(v, m, s) v##ts.m = gethrtime(); v.size += s
209 181 #define HREND(v, m) v.m += gethrtime() - v##ts.m
210 182 #define HRNORM(v, m, n) v.m /= (n)
211 183
212 184 #else
213 185 #define HRSTART(v, m)
214 186 #define HRSTOP(v, m)
215 187 #define HRBEGIN(v, m, s)
216 188 #define HREND(v, m)
217 189 #define HRNORM(v, m, n)
218 190 #endif /* COLLECT_METRICS */
219 191
220 192 /*
221 193 * Buffers for copying and compressing memory pages.
222 194 *
223 195 * cbuf_t buffer controllers: used for both input and output.
224 196 *
225 197 * The buffer state indicates how it is being used:
226 198 *
227 199 * CBUF_FREEMAP: CBUF_MAPSIZE virtual address range is available for
228 200 * mapping input pages.
229 201 *
230 202 * CBUF_INREADY: input pages are mapped and ready for compression by a
231 203 * helper.
232 204 *
233 205 * CBUF_USEDMAP: mapping has been consumed by a helper. Needs unmap.
234 206 *
235 207 * CBUF_FREEBUF: CBUF_SIZE output buffer, which is available.
236 208 *
237 209 * CBUF_WRITE: CBUF_SIZE block of compressed pages from a helper,
238 210 * ready to write out.
239 211 *
240 212 * CBUF_ERRMSG: CBUF_SIZE block of error messages from a helper
241 213 * (reports UE errors.)
242 214 */
243 215
244 216 typedef enum cbufstate {
245 217 CBUF_FREEMAP,
246 218 CBUF_INREADY,
247 219 CBUF_USEDMAP,
248 220 CBUF_FREEBUF,
249 221 CBUF_WRITE,
250 222 CBUF_ERRMSG
251 223 } cbufstate_t;
252 224
253 225 typedef struct cbuf cbuf_t;
254 226
255 227 struct cbuf {
256 228 cbuf_t *next; /* next in list */
257 229 cbufstate_t state; /* processing state */
258 230 size_t used; /* amount used */
259 231 size_t size; /* mem size */
260 232 char *buf; /* kmem or vmem */
261 233 pgcnt_t pagenum; /* index to pfn map */
262 234 pgcnt_t bitnum; /* first set bitnum */
263 235 pfn_t pfn; /* first pfn in mapped range */
264 236 int off; /* byte offset to first pfn */
265 237 };
266 238
267 239 static char dump_osimage_uuid[36 + 1];
268 240
269 241 #define isdigit(ch) ((ch) >= '0' && (ch) <= '9')
270 242 #define isxdigit(ch) (isdigit(ch) || ((ch) >= 'a' && (ch) <= 'f') || \
271 243 ((ch) >= 'A' && (ch) <= 'F'))
272 244
273 245 /*
274 246 * cqueue_t queues: a uni-directional channel for communication
275 247 * from the master to helper tasks or vice-versa using put and
276 248 * get primitives. Both mappings and data buffers are passed via
277 249 * queues. Producers close a queue when done. The number of
278 250 * active producers is reference counted so the consumer can
279 251 * detect end of data. Concurrent access is mediated by atomic
280 252 * operations for panic dump, or mutex/cv for live dump.
281 253 *
282 254 * There a four queues, used as follows:
283 255 *
284 256 * Queue Dataflow NewState
285 257 * --------------------------------------------------
286 258 * mainq master -> master FREEMAP
287 259 * master has initialized or unmapped an input buffer
288 260 * --------------------------------------------------
289 261 * helperq master -> helper INREADY
290 262 * master has mapped input for use by helper
291 263 * --------------------------------------------------
292 264 * mainq master <- helper USEDMAP
293 265 * helper is done with input
294 266 * --------------------------------------------------
295 267 * freebufq master -> helper FREEBUF
296 268 * master has initialized or written an output buffer
297 269 * --------------------------------------------------
298 270 * mainq master <- helper WRITE
299 271 * block of compressed pages from a helper
300 272 * --------------------------------------------------
301 273 * mainq master <- helper ERRMSG
302 274 * error messages from a helper (memory error case)
303 275 * --------------------------------------------------
304 276 * writerq master <- master WRITE
305 277 * non-blocking queue of blocks to write
306 278 * --------------------------------------------------
307 279 */
308 280 typedef struct cqueue {
309 281 cbuf_t *volatile first; /* first in list */
310 282 cbuf_t *last; /* last in list */
311 283 hrtime_t ts; /* timestamp */
312 284 hrtime_t empty; /* total time empty */
313 285 kmutex_t mutex; /* live state lock */
314 286 kcondvar_t cv; /* live wait var */
315 287 lock_t spinlock; /* panic mode spin lock */
316 288 volatile uint_t open; /* producer ref count */
317 289 } cqueue_t;
318 290
319 291 /*
320 292 * Convenience macros for using the cqueue functions
321 293 * Note that the caller must have defined "dumpsync_t *ds"
322 294 */
323 295 #define CQ_IS_EMPTY(q) \
324 296 (ds->q.first == NULL)
325 297
326 298 #define CQ_OPEN(q) \
327 299 atomic_inc_uint(&ds->q.open)
328 300
329 301 #define CQ_CLOSE(q) \
330 302 dumpsys_close_cq(&ds->q, ds->live)
331 303
332 304 #define CQ_PUT(q, cp, st) \
333 305 dumpsys_put_cq(&ds->q, cp, st, ds->live)
334 306
335 307 #define CQ_GET(q) \
336 308 dumpsys_get_cq(&ds->q, ds->live)
337 309
338 310 /*
339 311 * Dynamic state when dumpsys() is running.
340 312 */
341 313 typedef struct dumpsync {
342 314 pgcnt_t npages; /* subtotal of pages dumped */
343 315 pgcnt_t pages_mapped; /* subtotal of pages mapped */
344 316 pgcnt_t pages_used; /* subtotal of pages used per map */
345 317 size_t nwrite; /* subtotal of bytes written */
346 318 uint_t live; /* running live dump */
347 319 uint_t neednl; /* will need to print a newline */
348 320 uint_t percent; /* dump progress */
349 321 uint_t percent_done; /* dump progress reported */
350 322 int sec_done; /* dump progress last report time */
351 323 cqueue_t freebufq; /* free kmem bufs for writing */
352 324 cqueue_t mainq; /* input for main task */
353 325 cqueue_t helperq; /* input for helpers */
354 326 cqueue_t writerq; /* input for writer */
355 327 hrtime_t start; /* start time */
356 328 hrtime_t elapsed; /* elapsed time when completed */
357 329 hrtime_t iotime; /* time spent writing nwrite bytes */
358 330 hrtime_t iowait; /* time spent waiting for output */
359 331 hrtime_t iowaitts; /* iowait timestamp */
360 332 perpage_t perpage; /* metrics */
361 333 perpage_t perpagets;
362 334 int dumpcpu; /* master cpu */
363 335 } dumpsync_t;
364 336
365 337 static dumpsync_t dumpsync; /* synchronization vars */
366 338
367 339 /*
368 340 * helper_t helpers: contains the context for a stream. CPUs run in
369 341 * parallel at dump time; each CPU creates a single stream of
370 342 * compression data. Stream data is divided into CBUF_SIZE blocks.
371 343 * The blocks are written in order within a stream. But, blocks from
372 344 * multiple streams can be interleaved. Each stream is identified by a
373 345 * unique tag.
374 346 */
375 347 typedef struct helper {
376 348 int helper; /* bound helper id */
↓ open down ↓ |
214 lines elided |
↑ open up ↑ |
377 349 int tag; /* compression stream tag */
378 350 perpage_t perpage; /* per page metrics */
379 351 perpage_t perpagets; /* per page metrics (timestamps) */
380 352 taskqid_t taskqid; /* live dump task ptr */
381 353 int in, out; /* buffer offsets */
382 354 cbuf_t *cpin, *cpout, *cperr; /* cbuf objects in process */
383 355 dumpsync_t *ds; /* pointer to sync vars */
384 356 size_t used; /* counts input consumed */
385 357 char *page; /* buffer for page copy */
386 358 char *lzbuf; /* lzjb output */
387 - bz_stream bzstream; /* bzip2 state */
388 359 } helper_t;
389 360
390 361 #define MAINHELPER (-1) /* helper is also the main task */
391 362 #define FREEHELPER (-2) /* unbound helper */
392 363 #define DONEHELPER (-3) /* helper finished */
393 364
394 365 /*
395 366 * configuration vars for dumpsys
396 367 */
397 368 typedef struct dumpcfg {
398 - int threshold; /* ncpu threshold for bzip2 */
399 369 int nhelper; /* number of helpers */
400 370 int nhelper_used; /* actual number of helpers used */
401 371 int ncmap; /* number VA pages for compression */
402 372 int ncbuf; /* number of bufs for compression */
403 373 int ncbuf_used; /* number of bufs in use */
404 374 uint_t clevel; /* dump compression level */
405 375 helper_t *helper; /* array of helpers */
406 376 cbuf_t *cmap; /* array of input (map) buffers */
407 377 cbuf_t *cbuf; /* array of output buffers */
408 378 ulong_t *helpermap; /* set of dumpsys helper CPU ids */
409 379 ulong_t *bitmap; /* bitmap for marking pages to dump */
410 380 ulong_t *rbitmap; /* bitmap for used CBUF_MAPSIZE ranges */
411 381 pgcnt_t bitmapsize; /* size of bitmap */
412 382 pgcnt_t rbitmapsize; /* size of bitmap for ranges */
413 383 pgcnt_t found4m; /* number ranges allocated by dump */
414 384 pgcnt_t foundsm; /* number small pages allocated by dump */
415 385 pid_t *pids; /* list of process IDs at dump time */
416 386 size_t maxsize; /* memory size needed at dump time */
417 387 size_t maxvmsize; /* size of reserved VM */
418 388 char *maxvm; /* reserved VM for spare pages */
419 389 lock_t helper_lock; /* protect helper state */
420 390 char helpers_wanted; /* flag to enable parallelism */
421 391 } dumpcfg_t;
422 392
423 393 static dumpcfg_t dumpcfg; /* config vars */
424 394
425 395 /*
426 396 * The dump I/O buffer.
427 397 *
428 398 * There is one I/O buffer used by dumpvp_write and dumvp_flush. It is
429 399 * sized according to the optimum device transfer speed.
430 400 */
431 401 typedef struct dumpbuf {
432 402 vnode_t *cdev_vp; /* VCHR open of the dump device */
433 403 len_t vp_limit; /* maximum write offset */
434 404 offset_t vp_off; /* current dump device offset */
435 405 char *cur; /* dump write pointer */
436 406 char *start; /* dump buffer address */
437 407 char *end; /* dump buffer end */
438 408 size_t size; /* size of dumpbuf in bytes */
439 409 size_t iosize; /* best transfer size for device */
440 410 } dumpbuf_t;
441 411
442 412 dumpbuf_t dumpbuf; /* I/O buffer */
443 413
444 414 /*
445 415 * For parallel dump, defines maximum time main task thread will wait
446 416 * for at least one helper to register in dumpcfg.helpermap, before
447 417 * assuming there are no helpers and falling back to serial mode.
448 418 * Value is chosen arbitrary and provides *really* long wait for any
449 419 * available helper to register.
450 420 */
451 421 #define DUMP_HELPER_MAX_WAIT 1000 /* millisec */
452 422
453 423 /*
454 424 * The dump I/O buffer must be at least one page, at most xfer_size
455 425 * bytes, and should scale with physmem in between. The transfer size
456 426 * passed in will either represent a global default (maxphys) or the
457 427 * best size for the device. The size of the dumpbuf I/O buffer is
458 428 * limited by dumpbuf_limit (8MB by default) because the dump
459 429 * performance saturates beyond a certain size. The default is to
460 430 * select 1/4096 of the memory.
461 431 */
462 432 static int dumpbuf_fraction = 12; /* memory size scale factor */
463 433 static size_t dumpbuf_limit = 8 * DUMP_1MB; /* max I/O buf size */
464 434
465 435 static size_t
466 436 dumpbuf_iosize(size_t xfer_size)
467 437 {
468 438 size_t iosize = ptob(physmem >> dumpbuf_fraction);
469 439
470 440 if (iosize < PAGESIZE)
471 441 iosize = PAGESIZE;
472 442 else if (iosize > xfer_size)
473 443 iosize = xfer_size;
474 444 if (iosize > dumpbuf_limit)
475 445 iosize = dumpbuf_limit;
476 446 return (iosize & PAGEMASK);
477 447 }
478 448
479 449 /*
480 450 * resize the I/O buffer
481 451 */
482 452 static void
483 453 dumpbuf_resize(void)
484 454 {
485 455 char *old_buf = dumpbuf.start;
486 456 size_t old_size = dumpbuf.size;
487 457 char *new_buf;
488 458 size_t new_size;
489 459
490 460 ASSERT(MUTEX_HELD(&dump_lock));
491 461
492 462 new_size = dumpbuf_iosize(MAX(dumpbuf.iosize, maxphys));
493 463 if (new_size <= old_size)
494 464 return; /* no need to reallocate buffer */
↓ open down ↓ |
86 lines elided |
↑ open up ↑ |
495 465
496 466 new_buf = kmem_alloc(new_size, KM_SLEEP);
497 467 dumpbuf.size = new_size;
498 468 dumpbuf.start = new_buf;
499 469 dumpbuf.end = new_buf + new_size;
500 470 kmem_free(old_buf, old_size);
501 471 }
502 472
503 473 /*
504 474 * dump_update_clevel is called when dumpadm configures the dump device.
475 + * Determine the compression level / type
476 + * - DUMP_CLEVEL_SERIAL is single threaded lzjb
477 + * - DUMP_CLEVEL_LZJB is parallel lzjb
505 478 * Calculate number of helpers and buffers.
506 479 * Allocate the minimum configuration for now.
507 480 *
508 481 * When the dump file is configured we reserve a minimum amount of
509 482 * memory for use at crash time. But we reserve VA for all the memory
510 483 * we really want in order to do the fastest dump possible. The VA is
511 484 * backed by pages not being dumped, according to the bitmap. If
512 485 * there is insufficient spare memory, however, we fall back to the
513 486 * minimum.
514 487 *
515 488 * Live dump (savecore -L) always uses the minimum config.
516 489 *
517 - * clevel 0 is single threaded lzjb
518 - * clevel 1 is parallel lzjb
519 - * clevel 2 is parallel bzip2
520 - *
521 - * The ncpu threshold is selected with dump_plat_mincpu.
522 - * On OPL, set_platform_defaults() overrides the sun4u setting.
523 - * The actual values are defined via DUMP_PLAT_*_MINCPU macros.
524 - *
525 - * Architecture Threshold Algorithm
526 - * sun4u < 51 parallel lzjb
527 - * sun4u >= 51 parallel bzip2(*)
528 - * sun4u OPL < 8 parallel lzjb
529 - * sun4u OPL >= 8 parallel bzip2(*)
530 - * sun4v < 128 parallel lzjb
531 - * sun4v >= 128 parallel bzip2(*)
532 - * x86 < 11 parallel lzjb
533 - * x86 >= 11 parallel bzip2(*)
534 - * 32-bit N/A single-threaded lzjb
535 - *
536 - * (*) bzip2 is only chosen if there is sufficient available
537 - * memory for buffers at dump time. See dumpsys_get_maxmem().
538 - *
539 - * Faster dump devices have larger I/O buffers. The threshold value is
540 - * increased according to the size of the dump I/O buffer, because
541 - * parallel lzjb performs better with faster disks. For buffers >= 1MB
542 - * the threshold is 3X; for buffers >= 256K threshold is 2X.
543 - *
544 490 * For parallel dumps, the number of helpers is ncpu-1. The CPU
545 491 * running panic runs the main task. For single-threaded dumps, the
546 492 * panic CPU does lzjb compression (it is tagged as MAINHELPER.)
547 493 *
548 494 * Need multiple buffers per helper so that they do not block waiting
549 495 * for the main task.
550 496 * parallel single-threaded
551 497 * Number of output buffers: nhelper*2 1
552 498 * Number of mapping buffers: nhelper*4 1
553 499 *
554 500 */
555 501 static void
556 502 dump_update_clevel()
557 503 {
558 504 int tag;
559 - size_t bz2size;
560 505 helper_t *hp, *hpend;
561 506 cbuf_t *cp, *cpend;
562 507 dumpcfg_t *old = &dumpcfg;
563 508 dumpcfg_t newcfg = *old;
564 509 dumpcfg_t *new = &newcfg;
565 510
566 511 ASSERT(MUTEX_HELD(&dump_lock));
567 512
568 513 /*
569 514 * Free the previously allocated bufs and VM.
570 515 */
571 516 if (old->helper != NULL) {
572 517
573 518 /* helpers */
574 519 hpend = &old->helper[old->nhelper];
575 520 for (hp = old->helper; hp != hpend; hp++) {
576 521 if (hp->lzbuf != NULL)
577 522 kmem_free(hp->lzbuf, PAGESIZE);
578 523 if (hp->page != NULL)
579 524 kmem_free(hp->page, PAGESIZE);
580 525 }
581 526 kmem_free(old->helper, old->nhelper * sizeof (helper_t));
582 527
583 528 /* VM space for mapping pages */
584 529 cpend = &old->cmap[old->ncmap];
585 530 for (cp = old->cmap; cp != cpend; cp++)
586 531 vmem_xfree(heap_arena, cp->buf, CBUF_MAPSIZE);
587 532 kmem_free(old->cmap, old->ncmap * sizeof (cbuf_t));
588 533
589 534 /* output bufs */
590 535 cpend = &old->cbuf[old->ncbuf];
591 536 for (cp = old->cbuf; cp != cpend; cp++)
592 537 if (cp->buf != NULL)
593 538 kmem_free(cp->buf, cp->size);
594 539 kmem_free(old->cbuf, old->ncbuf * sizeof (cbuf_t));
595 540
596 541 /* reserved VM for dumpsys_get_maxmem */
597 542 if (old->maxvmsize > 0)
598 543 vmem_xfree(heap_arena, old->maxvm, old->maxvmsize);
599 544 }
600 545
601 546 /*
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
602 547 * Allocate memory and VM.
603 548 * One CPU runs dumpsys, the rest are helpers.
604 549 */
605 550 new->nhelper = ncpus - 1;
606 551 if (new->nhelper < 1)
607 552 new->nhelper = 1;
608 553
609 554 if (new->nhelper > DUMP_MAX_NHELPER)
610 555 new->nhelper = DUMP_MAX_NHELPER;
611 556
612 - /* use platform default, unless /etc/system overrides */
613 - if (dump_plat_mincpu == MINCPU_NOT_SET)
614 - dump_plat_mincpu = dump_plat_mincpu_default;
615 -
616 - /* increase threshold for faster disks */
617 - new->threshold = dump_plat_mincpu;
618 - if (dumpbuf.iosize >= DUMP_1MB)
619 - new->threshold *= 3;
620 - else if (dumpbuf.iosize >= (256 * DUMP_1KB))
621 - new->threshold *= 2;
622 -
623 - /* figure compression level based upon the computed threshold. */
624 - if (dump_plat_mincpu == 0 || new->nhelper < 2) {
625 - new->clevel = 0;
557 + /* If dump_ncpu_low is 0 or greater than ncpus, do serial dump */
558 + if (dump_ncpu_low == 0 || dump_ncpu_low > ncpus || new->nhelper < 2) {
559 + new->clevel = DUMP_CLEVEL_SERIAL;
626 560 new->nhelper = 1;
627 - } else if ((new->nhelper + 1) >= new->threshold) {
628 - new->clevel = DUMP_CLEVEL_BZIP2;
629 - } else {
630 - new->clevel = DUMP_CLEVEL_LZJB;
631 - }
632 -
633 - if (new->clevel == 0) {
634 561 new->ncbuf = 1;
635 562 new->ncmap = 1;
636 563 } else {
564 + new->clevel = DUMP_CLEVEL_LZJB;
637 565 new->ncbuf = NCBUF_PER_HELPER * new->nhelper;
638 566 new->ncmap = NCMAP_PER_HELPER * new->nhelper;
639 567 }
640 568
641 569 /*
642 570 * Allocate new data structures and buffers for MINHELPERS,
643 571 * and also figure the max desired size.
644 572 */
645 - bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
646 573 new->maxsize = 0;
647 574 new->maxvmsize = 0;
648 575 new->maxvm = NULL;
649 576 tag = 1;
650 577 new->helper = kmem_zalloc(new->nhelper * sizeof (helper_t), KM_SLEEP);
651 578 hpend = &new->helper[new->nhelper];
652 579 for (hp = new->helper; hp != hpend; hp++) {
653 580 hp->tag = tag++;
654 581 if (hp < &new->helper[MINHELPERS]) {
655 582 hp->lzbuf = kmem_alloc(PAGESIZE, KM_SLEEP);
656 583 hp->page = kmem_alloc(PAGESIZE, KM_SLEEP);
657 - } else if (new->clevel < DUMP_CLEVEL_BZIP2) {
584 + } else {
658 585 new->maxsize += 2 * PAGESIZE;
659 - } else {
660 - new->maxsize += PAGESIZE;
661 586 }
662 - if (new->clevel >= DUMP_CLEVEL_BZIP2)
663 - new->maxsize += bz2size;
664 587 }
665 588
666 589 new->cbuf = kmem_zalloc(new->ncbuf * sizeof (cbuf_t), KM_SLEEP);
667 590 cpend = &new->cbuf[new->ncbuf];
668 591 for (cp = new->cbuf; cp != cpend; cp++) {
669 592 cp->state = CBUF_FREEBUF;
670 593 cp->size = CBUF_SIZE;
671 594 if (cp < &new->cbuf[MINCBUFS])
672 595 cp->buf = kmem_alloc(cp->size, KM_SLEEP);
673 596 else
674 597 new->maxsize += cp->size;
675 598 }
676 599
677 600 new->cmap = kmem_zalloc(new->ncmap * sizeof (cbuf_t), KM_SLEEP);
678 601 cpend = &new->cmap[new->ncmap];
679 602 for (cp = new->cmap; cp != cpend; cp++) {
680 603 cp->state = CBUF_FREEMAP;
681 604 cp->size = CBUF_MAPSIZE;
682 605 cp->buf = vmem_xalloc(heap_arena, CBUF_MAPSIZE, CBUF_MAPSIZE,
683 606 0, 0, NULL, NULL, VM_SLEEP);
684 607 }
685 608
686 609 /* reserve VA to be backed with spare pages at crash time */
687 610 if (new->maxsize > 0) {
688 611 new->maxsize = P2ROUNDUP(new->maxsize, PAGESIZE);
689 612 new->maxvmsize = P2ROUNDUP(new->maxsize, CBUF_MAPSIZE);
690 613 new->maxvm = vmem_xalloc(heap_arena, new->maxvmsize,
691 614 CBUF_MAPSIZE, 0, 0, NULL, NULL, VM_SLEEP);
692 615 }
693 616
694 617 /*
695 618 * Reserve memory for kmem allocation calls made during crash dump. The
696 619 * hat layer allocates memory for each mapping created, and the I/O path
697 620 * allocates buffers and data structs.
698 621 *
699 622 * On larger systems, we easily exceed the lower amount, so we need some
700 623 * more space; the cut-over point is relatively arbitrary. If we run
701 624 * out, the only impact is that kmem state in the dump becomes
702 625 * inconsistent.
703 626 */
704 627
705 628 if (dump_kmem_pages == 0) {
706 629 if (physmem > (16 * ONE_GIG) / PAGESIZE)
707 630 dump_kmem_pages = 20;
708 631 else
709 632 dump_kmem_pages = 8;
710 633 }
711 634
712 635 kmem_dump_init((new->ncmap * dump_kmem_permap) +
713 636 (dump_kmem_pages * PAGESIZE));
714 637
715 638 /* set new config pointers */
716 639 *old = *new;
717 640 }
718 641
719 642 /*
720 643 * Define a struct memlist walker to optimize bitnum to pfn
721 644 * lookup. The walker maintains the state of the list traversal.
722 645 */
723 646 typedef struct dumpmlw {
724 647 struct memlist *mp; /* current memlist */
725 648 pgcnt_t basenum; /* bitnum base offset */
726 649 pgcnt_t mppages; /* current memlist size */
727 650 pgcnt_t mpleft; /* size to end of current memlist */
728 651 pfn_t mpaddr; /* first pfn in memlist */
729 652 } dumpmlw_t;
730 653
731 654 /* initialize the walker */
732 655 static inline void
733 656 dump_init_memlist_walker(dumpmlw_t *pw)
734 657 {
735 658 pw->mp = phys_install;
736 659 pw->basenum = 0;
737 660 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
738 661 pw->mpleft = pw->mppages;
739 662 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
740 663 }
741 664
742 665 /*
743 666 * Lookup pfn given bitnum. The memlist can be quite long on some
744 667 * systems (e.g.: one per board). To optimize sequential lookups, the
745 668 * caller initializes and presents a memlist walker.
746 669 */
747 670 static pfn_t
748 671 dump_bitnum_to_pfn(pgcnt_t bitnum, dumpmlw_t *pw)
749 672 {
750 673 bitnum -= pw->basenum;
751 674 while (pw->mp != NULL) {
752 675 if (bitnum < pw->mppages) {
753 676 pw->mpleft = pw->mppages - bitnum;
754 677 return (pw->mpaddr + bitnum);
755 678 }
756 679 bitnum -= pw->mppages;
757 680 pw->basenum += pw->mppages;
758 681 pw->mp = pw->mp->ml_next;
759 682 if (pw->mp != NULL) {
760 683 pw->mppages = pw->mp->ml_size >> PAGESHIFT;
761 684 pw->mpleft = pw->mppages;
762 685 pw->mpaddr = pw->mp->ml_address >> PAGESHIFT;
763 686 }
764 687 }
765 688 return (PFN_INVALID);
766 689 }
767 690
768 691 static pgcnt_t
769 692 dump_pfn_to_bitnum(pfn_t pfn)
770 693 {
771 694 struct memlist *mp;
772 695 pgcnt_t bitnum = 0;
773 696
774 697 for (mp = phys_install; mp != NULL; mp = mp->ml_next) {
775 698 if (pfn >= (mp->ml_address >> PAGESHIFT) &&
776 699 pfn < ((mp->ml_address + mp->ml_size) >> PAGESHIFT))
777 700 return (bitnum + pfn - (mp->ml_address >> PAGESHIFT));
778 701 bitnum += mp->ml_size >> PAGESHIFT;
779 702 }
780 703 return ((pgcnt_t)-1);
781 704 }
782 705
783 706 /*
784 707 * Set/test bitmap for a CBUF_MAPSIZE range which includes pfn. The
785 708 * mapping of pfn to range index is imperfect because pfn and bitnum
786 709 * do not have the same phase. To make sure a CBUF_MAPSIZE range is
787 710 * covered, call this for both ends:
788 711 * dump_set_used(base)
789 712 * dump_set_used(base+CBUF_MAPNP-1)
790 713 *
791 714 * This is used during a panic dump to mark pages allocated by
792 715 * dumpsys_get_maxmem(). The macro IS_DUMP_PAGE(pp) is used by
793 716 * page_get_mnode_freelist() to make sure pages used by dump are never
794 717 * allocated.
795 718 */
796 719 #define CBUF_MAPP2R(pfn) ((pfn) >> (CBUF_MAPSHIFT - PAGESHIFT))
797 720
798 721 static void
799 722 dump_set_used(pfn_t pfn)
800 723 {
801 724
802 725 pgcnt_t bitnum, rbitnum;
803 726
804 727 bitnum = dump_pfn_to_bitnum(pfn);
805 728 ASSERT(bitnum != (pgcnt_t)-1);
806 729
807 730 rbitnum = CBUF_MAPP2R(bitnum);
808 731 ASSERT(rbitnum < dumpcfg.rbitmapsize);
809 732
810 733 BT_SET(dumpcfg.rbitmap, rbitnum);
811 734 }
812 735
813 736 int
814 737 dump_test_used(pfn_t pfn)
815 738 {
816 739 pgcnt_t bitnum, rbitnum;
817 740
↓ open down ↓ |
144 lines elided |
↑ open up ↑ |
818 741 bitnum = dump_pfn_to_bitnum(pfn);
819 742 ASSERT(bitnum != (pgcnt_t)-1);
820 743
821 744 rbitnum = CBUF_MAPP2R(bitnum);
822 745 ASSERT(rbitnum < dumpcfg.rbitmapsize);
823 746
824 747 return (BT_TEST(dumpcfg.rbitmap, rbitnum));
825 748 }
826 749
827 750 /*
828 - * dumpbzalloc and dumpbzfree are callbacks from the bzip2 library.
829 - * dumpsys_get_maxmem() uses them for BZ2_bzCompressInit().
830 - */
831 -static void *
832 -dumpbzalloc(void *opaque, int items, int size)
833 -{
834 - size_t *sz;
835 - char *ret;
836 -
837 - ASSERT(opaque != NULL);
838 - sz = opaque;
839 - ret = dumpcfg.maxvm + *sz;
840 - *sz += items * size;
841 - *sz = P2ROUNDUP(*sz, BZ2_BZALLOC_ALIGN);
842 - ASSERT(*sz <= dumpcfg.maxvmsize);
843 - return (ret);
844 -}
845 -
846 -/*ARGSUSED*/
847 -static void
848 -dumpbzfree(void *opaque, void *addr)
849 -{
850 -}
851 -
852 -/*
853 751 * Perform additional checks on the page to see if we can really use
854 752 * it. The kernel (kas) pages are always set in the bitmap. However,
855 753 * boot memory pages (prom_ppages or P_BOOTPAGES) are not in the
856 754 * bitmap. So we check for them.
857 755 */
858 756 static inline int
859 757 dump_pfn_check(pfn_t pfn)
860 758 {
861 759 page_t *pp = page_numtopp_nolock(pfn);
862 760 if (pp == NULL || pp->p_pagenum != pfn ||
863 761 #if defined(__sparc)
864 762 pp->p_vnode == &promvp ||
865 763 #else
866 764 PP_ISBOOTPAGES(pp) ||
867 765 #endif
868 766 pp->p_toxic != 0)
869 767 return (0);
870 768 return (1);
871 769 }
872 770
873 771 /*
874 772 * Check a range to see if all contained pages are available and
875 773 * return non-zero if the range can be used.
876 774 */
877 775 static inline int
878 776 dump_range_check(pgcnt_t start, pgcnt_t end, pfn_t pfn)
879 777 {
880 778 for (; start < end; start++, pfn++) {
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
881 779 if (BT_TEST(dumpcfg.bitmap, start))
882 780 return (0);
883 781 if (!dump_pfn_check(pfn))
884 782 return (0);
885 783 }
886 784 return (1);
887 785 }
888 786
889 787 /*
890 788 * dumpsys_get_maxmem() is called during panic. Find unused ranges
891 - * and use them for buffers. If we find enough memory switch to
892 - * parallel bzip2, otherwise use parallel lzjb.
893 - *
789 + * and use them for buffers.
894 790 * It searches the dump bitmap in 2 passes. The first time it looks
895 791 * for CBUF_MAPSIZE ranges. On the second pass it uses small pages.
896 792 */
897 793 static void
898 794 dumpsys_get_maxmem()
899 795 {
900 796 dumpcfg_t *cfg = &dumpcfg;
901 797 cbuf_t *endcp = &cfg->cbuf[cfg->ncbuf];
902 798 helper_t *endhp = &cfg->helper[cfg->nhelper];
903 799 pgcnt_t bitnum, end;
904 - size_t sz, endsz, bz2size;
800 + size_t sz, endsz;
905 801 pfn_t pfn, off;
906 802 cbuf_t *cp;
907 - helper_t *hp, *ohp;
803 + helper_t *hp;
908 804 dumpmlw_t mlw;
909 805 int k;
910 806
911 807 /*
912 - * Setting dump_plat_mincpu to 0 at any time forces a serial
913 - * dump.
808 + * Setting dump_ncpu_low to 0 forces a single threaded dump.
914 809 */
915 - if (dump_plat_mincpu == 0) {
916 - cfg->clevel = 0;
810 + if (dump_ncpu_low == 0) {
811 + cfg->clevel = DUMP_CLEVEL_SERIAL;
917 812 return;
918 813 }
919 814
920 815 /*
921 816 * There may be no point in looking for spare memory. If
922 817 * dumping all memory, then none is spare. If doing a serial
923 818 * dump, then already have buffers.
924 819 */
925 - if (cfg->maxsize == 0 || cfg->clevel < DUMP_CLEVEL_LZJB ||
820 + if (cfg->maxsize == 0 || cfg->clevel == DUMP_CLEVEL_SERIAL ||
926 821 (dump_conflags & DUMP_ALL) != 0) {
927 - if (cfg->clevel > DUMP_CLEVEL_LZJB)
928 - cfg->clevel = DUMP_CLEVEL_LZJB;
929 822 return;
930 823 }
931 824
932 825 sz = 0;
933 826 cfg->found4m = 0;
934 827 cfg->foundsm = 0;
935 828
936 829 /* bitmap of ranges used to estimate which pfns are being used */
937 830 bzero(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.rbitmapsize));
938 831
939 832 /* find ranges that are not being dumped to use for buffers */
940 833 dump_init_memlist_walker(&mlw);
941 834 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
942 835 dump_timeleft = dump_timeout;
943 836 end = bitnum + CBUF_MAPNP;
944 837 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
945 838 ASSERT(pfn != PFN_INVALID);
946 839
947 840 /* skip partial range at end of mem segment */
948 841 if (mlw.mpleft < CBUF_MAPNP) {
949 842 end = bitnum + mlw.mpleft;
950 843 continue;
951 844 }
952 845
953 846 /* skip non aligned pages */
954 847 off = P2PHASE(pfn, CBUF_MAPNP);
955 848 if (off != 0) {
956 849 end -= off;
957 850 continue;
958 851 }
959 852
960 853 if (!dump_range_check(bitnum, end, pfn))
961 854 continue;
962 855
963 856 ASSERT((sz + CBUF_MAPSIZE) <= cfg->maxvmsize);
964 857 hat_devload(kas.a_hat, cfg->maxvm + sz, CBUF_MAPSIZE, pfn,
965 858 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
966 859 sz += CBUF_MAPSIZE;
967 860 cfg->found4m++;
968 861
969 862 /* set the bitmap for both ends to be sure to cover the range */
970 863 dump_set_used(pfn);
971 864 dump_set_used(pfn + CBUF_MAPNP - 1);
972 865
973 866 if (sz >= cfg->maxsize)
974 867 goto foundmax;
975 868 }
976 869
977 870 /* Add small pages if we can't find enough large pages. */
978 871 dump_init_memlist_walker(&mlw);
979 872 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum = end) {
980 873 dump_timeleft = dump_timeout;
981 874 end = bitnum + CBUF_MAPNP;
982 875 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
983 876 ASSERT(pfn != PFN_INVALID);
984 877
985 878 /* Find any non-aligned pages at start and end of segment. */
986 879 off = P2PHASE(pfn, CBUF_MAPNP);
987 880 if (mlw.mpleft < CBUF_MAPNP) {
988 881 end = bitnum + mlw.mpleft;
989 882 } else if (off != 0) {
990 883 end -= off;
991 884 } else if (cfg->found4m && dump_test_used(pfn)) {
992 885 continue;
993 886 }
994 887
995 888 for (; bitnum < end; bitnum++, pfn++) {
996 889 dump_timeleft = dump_timeout;
997 890 if (BT_TEST(dumpcfg.bitmap, bitnum))
998 891 continue;
999 892 if (!dump_pfn_check(pfn))
1000 893 continue;
1001 894 ASSERT((sz + PAGESIZE) <= cfg->maxvmsize);
↓ open down ↓ |
63 lines elided |
↑ open up ↑ |
1002 895 hat_devload(kas.a_hat, cfg->maxvm + sz, PAGESIZE, pfn,
1003 896 PROT_READ | PROT_WRITE, HAT_LOAD_NOCONSIST);
1004 897 sz += PAGESIZE;
1005 898 cfg->foundsm++;
1006 899 dump_set_used(pfn);
1007 900 if (sz >= cfg->maxsize)
1008 901 goto foundmax;
1009 902 }
1010 903 }
1011 904
1012 - /* Fall back to lzjb if we did not get enough memory for bzip2. */
1013 - endsz = (cfg->maxsize * cfg->threshold) / cfg->nhelper;
1014 - if (sz < endsz) {
1015 - cfg->clevel = DUMP_CLEVEL_LZJB;
1016 - }
1017 -
1018 905 /* Allocate memory for as many helpers as we can. */
1019 906 foundmax:
1020 907
1021 908 /* Byte offsets into memory found and mapped above */
1022 909 endsz = sz;
1023 910 sz = 0;
1024 911
1025 - /* Set the size for bzip2 state. Only bzip2 needs it. */
1026 - bz2size = BZ2_bzCompressInitSize(dump_bzip2_level);
1027 -
1028 912 /* Skip the preallocate output buffers. */
1029 913 cp = &cfg->cbuf[MINCBUFS];
1030 914
1031 - /* Use this to move memory up from the preallocated helpers. */
1032 - ohp = cfg->helper;
1033 -
1034 915 /* Loop over all helpers and allocate memory. */
1035 916 for (hp = cfg->helper; hp < endhp; hp++) {
1036 917
1037 918 /* Skip preallocated helpers by checking hp->page. */
1038 919 if (hp->page == NULL) {
1039 - if (cfg->clevel <= DUMP_CLEVEL_LZJB) {
1040 - /* lzjb needs 2 1-page buffers */
1041 - if ((sz + (2 * PAGESIZE)) > endsz)
1042 - break;
1043 - hp->page = cfg->maxvm + sz;
1044 - sz += PAGESIZE;
1045 - hp->lzbuf = cfg->maxvm + sz;
1046 - sz += PAGESIZE;
1047 -
1048 - } else if (ohp->lzbuf != NULL) {
1049 - /* re-use the preallocted lzjb page for bzip2 */
1050 - hp->page = ohp->lzbuf;
1051 - ohp->lzbuf = NULL;
1052 - ++ohp;
1053 -
1054 - } else {
1055 - /* bzip2 needs a 1-page buffer */
1056 - if ((sz + PAGESIZE) > endsz)
1057 - break;
1058 - hp->page = cfg->maxvm + sz;
1059 - sz += PAGESIZE;
1060 - }
920 + /* lzjb needs 2 1-page buffers */
921 + if ((sz + (2 * PAGESIZE)) > endsz)
922 + break;
923 + hp->page = cfg->maxvm + sz;
924 + sz += PAGESIZE;
925 + hp->lzbuf = cfg->maxvm + sz;
926 + sz += PAGESIZE;
1061 927 }
1062 928
1063 929 /*
1064 930 * Add output buffers per helper. The number of
1065 931 * buffers per helper is determined by the ratio of
1066 932 * ncbuf to nhelper.
1067 933 */
1068 934 for (k = 0; cp < endcp && (sz + CBUF_SIZE) <= endsz &&
1069 935 k < NCBUF_PER_HELPER; k++) {
1070 936 cp->state = CBUF_FREEBUF;
1071 937 cp->size = CBUF_SIZE;
1072 938 cp->buf = cfg->maxvm + sz;
1073 939 sz += CBUF_SIZE;
1074 940 ++cp;
1075 941 }
1076 -
1077 - /*
1078 - * bzip2 needs compression state. Use the dumpbzalloc
1079 - * and dumpbzfree callbacks to allocate the memory.
1080 - * bzip2 does allocation only at init time.
1081 - */
1082 - if (cfg->clevel >= DUMP_CLEVEL_BZIP2) {
1083 - if ((sz + bz2size) > endsz) {
1084 - hp->page = NULL;
1085 - break;
1086 - } else {
1087 - hp->bzstream.opaque = &sz;
1088 - hp->bzstream.bzalloc = dumpbzalloc;
1089 - hp->bzstream.bzfree = dumpbzfree;
1090 - (void) BZ2_bzCompressInit(&hp->bzstream,
1091 - dump_bzip2_level, 0, 0);
1092 - hp->bzstream.opaque = NULL;
1093 - }
1094 - }
1095 942 }
1096 943
1097 944 /* Finish allocating output buffers */
1098 945 for (; cp < endcp && (sz + CBUF_SIZE) <= endsz; cp++) {
1099 946 cp->state = CBUF_FREEBUF;
1100 947 cp->size = CBUF_SIZE;
1101 948 cp->buf = cfg->maxvm + sz;
1102 949 sz += CBUF_SIZE;
1103 950 }
1104 951
1105 952 /* Enable IS_DUMP_PAGE macro, which checks for pages we took. */
1106 953 if (cfg->found4m || cfg->foundsm)
1107 954 dump_check_used = 1;
1108 955
1109 956 ASSERT(sz <= endsz);
1110 957 }
1111 958
1112 959 static void
1113 960 dumphdr_init(void)
1114 961 {
1115 962 pgcnt_t npages = 0;
1116 963
1117 964 ASSERT(MUTEX_HELD(&dump_lock));
1118 965
1119 966 if (dumphdr == NULL) {
1120 967 dumphdr = kmem_zalloc(sizeof (dumphdr_t), KM_SLEEP);
1121 968 dumphdr->dump_magic = DUMP_MAGIC;
1122 969 dumphdr->dump_version = DUMP_VERSION;
1123 970 dumphdr->dump_wordsize = DUMP_WORDSIZE;
1124 971 dumphdr->dump_pageshift = PAGESHIFT;
1125 972 dumphdr->dump_pagesize = PAGESIZE;
1126 973 dumphdr->dump_utsname = utsname;
1127 974 (void) strcpy(dumphdr->dump_platform, platform);
1128 975 dumpbuf.size = dumpbuf_iosize(maxphys);
1129 976 dumpbuf.start = kmem_alloc(dumpbuf.size, KM_SLEEP);
1130 977 dumpbuf.end = dumpbuf.start + dumpbuf.size;
1131 978 dumpcfg.pids = kmem_alloc(v.v_proc * sizeof (pid_t), KM_SLEEP);
1132 979 dumpcfg.helpermap = kmem_zalloc(BT_SIZEOFMAP(NCPU), KM_SLEEP);
1133 980 LOCK_INIT_HELD(&dumpcfg.helper_lock);
1134 981 dump_stack_scratch = kmem_alloc(STACK_BUF_SIZE, KM_SLEEP);
1135 982 (void) strncpy(dumphdr->dump_uuid, dump_get_uuid(),
1136 983 sizeof (dumphdr->dump_uuid));
1137 984 }
1138 985
1139 986 npages = num_phys_pages();
1140 987
1141 988 if (dumpcfg.bitmapsize != npages) {
1142 989 size_t rlen = CBUF_MAPP2R(P2ROUNDUP(npages, CBUF_MAPNP));
1143 990 void *map = kmem_alloc(BT_SIZEOFMAP(npages), KM_SLEEP);
1144 991 void *rmap = kmem_alloc(BT_SIZEOFMAP(rlen), KM_SLEEP);
1145 992
1146 993 if (dumpcfg.bitmap != NULL)
1147 994 kmem_free(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.
1148 995 bitmapsize));
1149 996 if (dumpcfg.rbitmap != NULL)
1150 997 kmem_free(dumpcfg.rbitmap, BT_SIZEOFMAP(dumpcfg.
1151 998 rbitmapsize));
1152 999 dumpcfg.bitmap = map;
1153 1000 dumpcfg.bitmapsize = npages;
1154 1001 dumpcfg.rbitmap = rmap;
1155 1002 dumpcfg.rbitmapsize = rlen;
1156 1003 }
1157 1004 }
1158 1005
1159 1006 /*
1160 1007 * Establish a new dump device.
1161 1008 */
1162 1009 int
1163 1010 dumpinit(vnode_t *vp, char *name, int justchecking)
1164 1011 {
1165 1012 vnode_t *cvp;
1166 1013 vattr_t vattr;
1167 1014 vnode_t *cdev_vp;
1168 1015 int error = 0;
1169 1016
1170 1017 ASSERT(MUTEX_HELD(&dump_lock));
1171 1018
1172 1019 dumphdr_init();
1173 1020
1174 1021 cvp = common_specvp(vp);
1175 1022 if (cvp == dumpvp)
1176 1023 return (0);
1177 1024
1178 1025 /*
1179 1026 * Determine whether this is a plausible dump device. We want either:
1180 1027 * (1) a real device that's not mounted and has a cb_dump routine, or
1181 1028 * (2) a swapfile on some filesystem that has a vop_dump routine.
1182 1029 */
1183 1030 if ((error = VOP_OPEN(&cvp, FREAD | FWRITE, kcred, NULL)) != 0)
1184 1031 return (error);
1185 1032
1186 1033 vattr.va_mask = AT_SIZE | AT_TYPE | AT_RDEV;
1187 1034 if ((error = VOP_GETATTR(cvp, &vattr, 0, kcred, NULL)) == 0) {
1188 1035 if (vattr.va_type == VBLK || vattr.va_type == VCHR) {
1189 1036 if (devopsp[getmajor(vattr.va_rdev)]->
1190 1037 devo_cb_ops->cb_dump == nodev)
1191 1038 error = ENOTSUP;
1192 1039 else if (vfs_devismounted(vattr.va_rdev))
1193 1040 error = EBUSY;
1194 1041 if (strcmp(ddi_driver_name(VTOS(cvp)->s_dip),
1195 1042 ZFS_DRIVER) == 0 &&
1196 1043 IS_SWAPVP(common_specvp(cvp)))
1197 1044 error = EBUSY;
1198 1045 } else {
1199 1046 if (vn_matchopval(cvp, VOPNAME_DUMP, fs_nosys) ||
1200 1047 !IS_SWAPVP(cvp))
1201 1048 error = ENOTSUP;
1202 1049 }
1203 1050 }
1204 1051
1205 1052 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE)
1206 1053 error = ENOSPC;
1207 1054
1208 1055 if (error || justchecking) {
1209 1056 (void) VOP_CLOSE(cvp, FREAD | FWRITE, 1, (offset_t)0,
1210 1057 kcred, NULL);
1211 1058 return (error);
1212 1059 }
1213 1060
1214 1061 VN_HOLD(cvp);
1215 1062
1216 1063 if (dumpvp != NULL)
1217 1064 dumpfini(); /* unconfigure the old dump device */
1218 1065
1219 1066 dumpvp = cvp;
1220 1067 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
1221 1068 dumppath = kmem_alloc(strlen(name) + 1, KM_SLEEP);
1222 1069 (void) strcpy(dumppath, name);
1223 1070 dumpbuf.iosize = 0;
1224 1071
1225 1072 /*
1226 1073 * If the dump device is a block device, attempt to open up the
1227 1074 * corresponding character device and determine its maximum transfer
1228 1075 * size. We use this information to potentially resize dumpbuf to a
1229 1076 * larger and more optimal size for performing i/o to the dump device.
1230 1077 */
1231 1078 if (cvp->v_type == VBLK &&
1232 1079 (cdev_vp = makespecvp(VTOS(cvp)->s_dev, VCHR)) != NULL) {
1233 1080 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1234 1081 size_t blk_size;
1235 1082 struct dk_cinfo dki;
1236 1083 struct dk_minfo minf;
1237 1084
1238 1085 if (VOP_IOCTL(cdev_vp, DKIOCGMEDIAINFO,
1239 1086 (intptr_t)&minf, FKIOCTL, kcred, NULL, NULL)
1240 1087 == 0 && minf.dki_lbsize != 0)
1241 1088 blk_size = minf.dki_lbsize;
1242 1089 else
1243 1090 blk_size = DEV_BSIZE;
1244 1091
1245 1092 if (VOP_IOCTL(cdev_vp, DKIOCINFO, (intptr_t)&dki,
1246 1093 FKIOCTL, kcred, NULL, NULL) == 0) {
1247 1094 dumpbuf.iosize = dki.dki_maxtransfer * blk_size;
1248 1095 dumpbuf_resize();
1249 1096 }
1250 1097 /*
1251 1098 * If we are working with a zvol then dumpify it
1252 1099 * if it's not being used as swap.
1253 1100 */
1254 1101 if (strcmp(dki.dki_dname, ZVOL_DRIVER) == 0) {
1255 1102 if (IS_SWAPVP(common_specvp(cvp)))
1256 1103 error = EBUSY;
1257 1104 else if ((error = VOP_IOCTL(cdev_vp,
1258 1105 DKIOCDUMPINIT, NULL, FKIOCTL, kcred,
1259 1106 NULL, NULL)) != 0)
1260 1107 dumpfini();
1261 1108 }
1262 1109
1263 1110 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1264 1111 kcred, NULL);
1265 1112 }
1266 1113
1267 1114 VN_RELE(cdev_vp);
1268 1115 }
1269 1116
1270 1117 cmn_err(CE_CONT, "?dump on %s size %llu MB\n", name, dumpvp_size >> 20);
1271 1118
1272 1119 dump_update_clevel();
1273 1120
1274 1121 return (error);
1275 1122 }
1276 1123
1277 1124 void
1278 1125 dumpfini(void)
1279 1126 {
1280 1127 vattr_t vattr;
1281 1128 boolean_t is_zfs = B_FALSE;
1282 1129 vnode_t *cdev_vp;
1283 1130 ASSERT(MUTEX_HELD(&dump_lock));
1284 1131
1285 1132 kmem_free(dumppath, strlen(dumppath) + 1);
1286 1133
1287 1134 /*
1288 1135 * Determine if we are using zvols for our dump device
1289 1136 */
1290 1137 vattr.va_mask = AT_RDEV;
1291 1138 if (VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL) == 0) {
1292 1139 is_zfs = (getmajor(vattr.va_rdev) ==
1293 1140 ddi_name_to_major(ZFS_DRIVER)) ? B_TRUE : B_FALSE;
1294 1141 }
1295 1142
1296 1143 /*
1297 1144 * If we have a zvol dump device then we call into zfs so
1298 1145 * that it may have a chance to cleanup.
1299 1146 */
1300 1147 if (is_zfs &&
1301 1148 (cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR)) != NULL) {
1302 1149 if (VOP_OPEN(&cdev_vp, FREAD | FWRITE, kcred, NULL) == 0) {
1303 1150 (void) VOP_IOCTL(cdev_vp, DKIOCDUMPFINI, NULL, FKIOCTL,
1304 1151 kcred, NULL, NULL);
1305 1152 (void) VOP_CLOSE(cdev_vp, FREAD | FWRITE, 1, 0,
1306 1153 kcred, NULL);
1307 1154 }
1308 1155 VN_RELE(cdev_vp);
1309 1156 }
1310 1157
1311 1158 (void) VOP_CLOSE(dumpvp, FREAD | FWRITE, 1, (offset_t)0, kcred, NULL);
1312 1159
1313 1160 VN_RELE(dumpvp);
1314 1161
1315 1162 dumpvp = NULL;
1316 1163 dumpvp_size = 0;
1317 1164 dumppath = NULL;
1318 1165 }
1319 1166
1320 1167 static offset_t
1321 1168 dumpvp_flush(void)
1322 1169 {
1323 1170 size_t size = P2ROUNDUP(dumpbuf.cur - dumpbuf.start, PAGESIZE);
1324 1171 hrtime_t iotime;
1325 1172 int err;
1326 1173
1327 1174 if (dumpbuf.vp_off + size > dumpbuf.vp_limit) {
1328 1175 dump_ioerr = ENOSPC;
1329 1176 dumpbuf.vp_off = dumpbuf.vp_limit;
1330 1177 } else if (size != 0) {
1331 1178 iotime = gethrtime();
1332 1179 dumpsync.iowait += iotime - dumpsync.iowaitts;
1333 1180 if (panicstr)
1334 1181 err = VOP_DUMP(dumpvp, dumpbuf.start,
1335 1182 lbtodb(dumpbuf.vp_off), btod(size), NULL);
1336 1183 else
1337 1184 err = vn_rdwr(UIO_WRITE, dumpbuf.cdev_vp != NULL ?
1338 1185 dumpbuf.cdev_vp : dumpvp, dumpbuf.start, size,
1339 1186 dumpbuf.vp_off, UIO_SYSSPACE, 0, dumpbuf.vp_limit,
1340 1187 kcred, 0);
1341 1188 if (err && dump_ioerr == 0)
1342 1189 dump_ioerr = err;
1343 1190 dumpsync.iowaitts = gethrtime();
1344 1191 dumpsync.iotime += dumpsync.iowaitts - iotime;
1345 1192 dumpsync.nwrite += size;
1346 1193 dumpbuf.vp_off += size;
1347 1194 }
1348 1195 dumpbuf.cur = dumpbuf.start;
1349 1196 dump_timeleft = dump_timeout;
1350 1197 return (dumpbuf.vp_off);
1351 1198 }
1352 1199
1353 1200 /* maximize write speed by keeping seek offset aligned with size */
1354 1201 void
1355 1202 dumpvp_write(const void *va, size_t size)
1356 1203 {
1357 1204 size_t len, off, sz;
1358 1205
1359 1206 while (size != 0) {
1360 1207 len = MIN(size, dumpbuf.end - dumpbuf.cur);
1361 1208 if (len == 0) {
1362 1209 off = P2PHASE(dumpbuf.vp_off, dumpbuf.size);
1363 1210 if (off == 0 || !ISP2(dumpbuf.size)) {
1364 1211 (void) dumpvp_flush();
1365 1212 } else {
1366 1213 sz = dumpbuf.size - off;
1367 1214 dumpbuf.cur = dumpbuf.start + sz;
1368 1215 (void) dumpvp_flush();
1369 1216 ovbcopy(dumpbuf.start + sz, dumpbuf.start, off);
1370 1217 dumpbuf.cur += off;
1371 1218 }
1372 1219 } else {
1373 1220 bcopy(va, dumpbuf.cur, len);
1374 1221 va = (char *)va + len;
1375 1222 dumpbuf.cur += len;
1376 1223 size -= len;
1377 1224 }
1378 1225 }
1379 1226 }
1380 1227
1381 1228 /*ARGSUSED*/
1382 1229 static void
1383 1230 dumpvp_ksyms_write(const void *src, void *dst, size_t size)
1384 1231 {
1385 1232 dumpvp_write(src, size);
1386 1233 }
1387 1234
1388 1235 /*
1389 1236 * Mark 'pfn' in the bitmap and dump its translation table entry.
1390 1237 */
1391 1238 void
1392 1239 dump_addpage(struct as *as, void *va, pfn_t pfn)
1393 1240 {
1394 1241 mem_vtop_t mem_vtop;
1395 1242 pgcnt_t bitnum;
1396 1243
1397 1244 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1398 1245 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1399 1246 dumphdr->dump_npages++;
1400 1247 BT_SET(dumpcfg.bitmap, bitnum);
1401 1248 }
1402 1249 dumphdr->dump_nvtop++;
1403 1250 mem_vtop.m_as = as;
1404 1251 mem_vtop.m_va = va;
1405 1252 mem_vtop.m_pfn = pfn;
1406 1253 dumpvp_write(&mem_vtop, sizeof (mem_vtop_t));
1407 1254 }
1408 1255 dump_timeleft = dump_timeout;
1409 1256 }
1410 1257
1411 1258 /*
1412 1259 * Mark 'pfn' in the bitmap
1413 1260 */
1414 1261 void
1415 1262 dump_page(pfn_t pfn)
1416 1263 {
1417 1264 pgcnt_t bitnum;
1418 1265
1419 1266 if ((bitnum = dump_pfn_to_bitnum(pfn)) != (pgcnt_t)-1) {
1420 1267 if (!BT_TEST(dumpcfg.bitmap, bitnum)) {
1421 1268 dumphdr->dump_npages++;
1422 1269 BT_SET(dumpcfg.bitmap, bitnum);
1423 1270 }
1424 1271 }
1425 1272 dump_timeleft = dump_timeout;
1426 1273 }
1427 1274
1428 1275 /*
1429 1276 * Dump the <as, va, pfn> information for a given address space.
1430 1277 * SEGOP_DUMP() will call dump_addpage() for each page in the segment.
1431 1278 */
1432 1279 static void
1433 1280 dump_as(struct as *as)
1434 1281 {
1435 1282 struct seg *seg;
1436 1283
1437 1284 AS_LOCK_ENTER(as, RW_READER);
1438 1285 for (seg = AS_SEGFIRST(as); seg; seg = AS_SEGNEXT(as, seg)) {
1439 1286 if (seg->s_as != as)
1440 1287 break;
1441 1288 if (seg->s_ops == NULL)
1442 1289 continue;
1443 1290 SEGOP_DUMP(seg);
1444 1291 }
1445 1292 AS_LOCK_EXIT(as);
1446 1293
1447 1294 if (seg != NULL)
1448 1295 cmn_err(CE_WARN, "invalid segment %p in address space %p",
1449 1296 (void *)seg, (void *)as);
1450 1297 }
1451 1298
1452 1299 static int
1453 1300 dump_process(pid_t pid)
1454 1301 {
1455 1302 proc_t *p = sprlock(pid);
1456 1303
1457 1304 if (p == NULL)
1458 1305 return (-1);
1459 1306 if (p->p_as != &kas) {
1460 1307 mutex_exit(&p->p_lock);
1461 1308 dump_as(p->p_as);
1462 1309 mutex_enter(&p->p_lock);
1463 1310 }
1464 1311
1465 1312 sprunlock(p);
1466 1313
1467 1314 return (0);
1468 1315 }
1469 1316
1470 1317 /*
1471 1318 * The following functions (dump_summary(), dump_ereports(), and
1472 1319 * dump_messages()), write data to an uncompressed area within the
1473 1320 * crashdump. The layout of these is
1474 1321 *
1475 1322 * +------------------------------------------------------------+
1476 1323 * | compressed pages | summary | ereports | messages |
1477 1324 * +------------------------------------------------------------+
1478 1325 *
1479 1326 * With the advent of saving a compressed crash dump by default, we
1480 1327 * need to save a little more data to describe the failure mode in
1481 1328 * an uncompressed buffer available before savecore uncompresses
1482 1329 * the dump. Initially this is a copy of the stack trace. Additional
1483 1330 * summary information should be added here.
1484 1331 */
1485 1332
1486 1333 void
1487 1334 dump_summary(void)
1488 1335 {
1489 1336 u_offset_t dumpvp_start;
1490 1337 summary_dump_t sd;
1491 1338
1492 1339 if (dumpvp == NULL || dumphdr == NULL)
1493 1340 return;
1494 1341
1495 1342 dumpbuf.cur = dumpbuf.start;
1496 1343
1497 1344 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE +
1498 1345 DUMP_ERPTSIZE);
1499 1346 dumpvp_start = dumpbuf.vp_limit - DUMP_SUMMARYSIZE;
1500 1347 dumpbuf.vp_off = dumpvp_start;
1501 1348
1502 1349 sd.sd_magic = SUMMARY_MAGIC;
1503 1350 sd.sd_ssum = checksum32(dump_stack_scratch, STACK_BUF_SIZE);
1504 1351 dumpvp_write(&sd, sizeof (sd));
1505 1352 dumpvp_write(dump_stack_scratch, STACK_BUF_SIZE);
1506 1353
1507 1354 sd.sd_magic = 0; /* indicate end of summary */
1508 1355 dumpvp_write(&sd, sizeof (sd));
1509 1356 (void) dumpvp_flush();
1510 1357 }
1511 1358
1512 1359 void
1513 1360 dump_ereports(void)
1514 1361 {
1515 1362 u_offset_t dumpvp_start;
1516 1363 erpt_dump_t ed;
1517 1364
1518 1365 if (dumpvp == NULL || dumphdr == NULL)
1519 1366 return;
1520 1367
1521 1368 dumpbuf.cur = dumpbuf.start;
1522 1369 dumpbuf.vp_limit = dumpvp_size - (DUMP_OFFSET + DUMP_LOGSIZE);
1523 1370 dumpvp_start = dumpbuf.vp_limit - DUMP_ERPTSIZE;
1524 1371 dumpbuf.vp_off = dumpvp_start;
1525 1372
1526 1373 fm_ereport_dump();
1527 1374 if (panicstr)
1528 1375 errorq_dump();
1529 1376
1530 1377 bzero(&ed, sizeof (ed)); /* indicate end of ereports */
1531 1378 dumpvp_write(&ed, sizeof (ed));
1532 1379 (void) dumpvp_flush();
1533 1380
1534 1381 if (!panicstr) {
1535 1382 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1536 1383 (size_t)(dumpbuf.vp_off - dumpvp_start),
1537 1384 B_INVAL | B_FORCE, kcred, NULL);
1538 1385 }
1539 1386 }
1540 1387
1541 1388 void
1542 1389 dump_messages(void)
1543 1390 {
1544 1391 log_dump_t ld;
1545 1392 mblk_t *mctl, *mdata;
1546 1393 queue_t *q, *qlast;
1547 1394 u_offset_t dumpvp_start;
1548 1395
1549 1396 if (dumpvp == NULL || dumphdr == NULL || log_consq == NULL)
1550 1397 return;
1551 1398
1552 1399 dumpbuf.cur = dumpbuf.start;
1553 1400 dumpbuf.vp_limit = dumpvp_size - DUMP_OFFSET;
1554 1401 dumpvp_start = dumpbuf.vp_limit - DUMP_LOGSIZE;
1555 1402 dumpbuf.vp_off = dumpvp_start;
1556 1403
1557 1404 qlast = NULL;
1558 1405 do {
1559 1406 for (q = log_consq; q->q_next != qlast; q = q->q_next)
1560 1407 continue;
1561 1408 for (mctl = q->q_first; mctl != NULL; mctl = mctl->b_next) {
1562 1409 dump_timeleft = dump_timeout;
1563 1410 mdata = mctl->b_cont;
1564 1411 ld.ld_magic = LOG_MAGIC;
1565 1412 ld.ld_msgsize = MBLKL(mctl->b_cont);
1566 1413 ld.ld_csum = checksum32(mctl->b_rptr, MBLKL(mctl));
1567 1414 ld.ld_msum = checksum32(mdata->b_rptr, MBLKL(mdata));
1568 1415 dumpvp_write(&ld, sizeof (ld));
1569 1416 dumpvp_write(mctl->b_rptr, MBLKL(mctl));
1570 1417 dumpvp_write(mdata->b_rptr, MBLKL(mdata));
1571 1418 }
1572 1419 } while ((qlast = q) != log_consq);
1573 1420
1574 1421 ld.ld_magic = 0; /* indicate end of messages */
1575 1422 dumpvp_write(&ld, sizeof (ld));
1576 1423 (void) dumpvp_flush();
1577 1424 if (!panicstr) {
1578 1425 (void) VOP_PUTPAGE(dumpvp, dumpvp_start,
1579 1426 (size_t)(dumpbuf.vp_off - dumpvp_start),
1580 1427 B_INVAL | B_FORCE, kcred, NULL);
1581 1428 }
1582 1429 }
1583 1430
1584 1431 /*
1585 1432 * The following functions are called on multiple CPUs during dump.
1586 1433 * They must not use most kernel services, because all cross-calls are
1587 1434 * disabled during panic. Therefore, blocking locks and cache flushes
1588 1435 * will not work.
1589 1436 */
1590 1437
1591 1438 /*
1592 1439 * Copy pages, trapping ECC errors. Also, for robustness, trap data
1593 1440 * access in case something goes wrong in the hat layer and the
1594 1441 * mapping is broken.
1595 1442 */
1596 1443 static int
1597 1444 dump_pagecopy(void *src, void *dst)
1598 1445 {
1599 1446 long *wsrc = (long *)src;
1600 1447 long *wdst = (long *)dst;
1601 1448 const ulong_t ncopies = PAGESIZE / sizeof (long);
1602 1449 volatile int w = 0;
1603 1450 volatile int ueoff = -1;
1604 1451 on_trap_data_t otd;
1605 1452
1606 1453 if (on_trap(&otd, OT_DATA_EC | OT_DATA_ACCESS)) {
1607 1454 if (ueoff == -1)
1608 1455 ueoff = w * sizeof (long);
1609 1456 /* report "bad ECC" or "bad address" */
1610 1457 #ifdef _LP64
1611 1458 if (otd.ot_trap & OT_DATA_EC)
1612 1459 wdst[w++] = 0x00badecc00badecc;
1613 1460 else
1614 1461 wdst[w++] = 0x00badadd00badadd;
1615 1462 #else
1616 1463 if (otd.ot_trap & OT_DATA_EC)
1617 1464 wdst[w++] = 0x00badecc;
1618 1465 else
1619 1466 wdst[w++] = 0x00badadd;
1620 1467 #endif
1621 1468 }
1622 1469 while (w < ncopies) {
1623 1470 wdst[w] = wsrc[w];
1624 1471 w++;
1625 1472 }
1626 1473 no_trap();
1627 1474 return (ueoff);
1628 1475 }
1629 1476
1630 1477 static void
1631 1478 dumpsys_close_cq(cqueue_t *cq, int live)
1632 1479 {
1633 1480 if (live) {
1634 1481 mutex_enter(&cq->mutex);
1635 1482 atomic_dec_uint(&cq->open);
1636 1483 cv_signal(&cq->cv);
1637 1484 mutex_exit(&cq->mutex);
1638 1485 } else {
1639 1486 atomic_dec_uint(&cq->open);
1640 1487 }
1641 1488 }
1642 1489
1643 1490 static inline void
1644 1491 dumpsys_spinlock(lock_t *lp)
1645 1492 {
1646 1493 uint_t backoff = 0;
1647 1494 int loop_count = 0;
1648 1495
1649 1496 while (LOCK_HELD(lp) || !lock_spin_try(lp)) {
1650 1497 if (++loop_count >= ncpus) {
1651 1498 backoff = mutex_lock_backoff(0);
1652 1499 loop_count = 0;
1653 1500 } else {
1654 1501 backoff = mutex_lock_backoff(backoff);
1655 1502 }
1656 1503 mutex_lock_delay(backoff);
1657 1504 }
1658 1505 }
1659 1506
1660 1507 static inline void
1661 1508 dumpsys_spinunlock(lock_t *lp)
1662 1509 {
1663 1510 lock_clear(lp);
1664 1511 }
1665 1512
1666 1513 static inline void
1667 1514 dumpsys_lock(cqueue_t *cq, int live)
1668 1515 {
1669 1516 if (live)
1670 1517 mutex_enter(&cq->mutex);
1671 1518 else
1672 1519 dumpsys_spinlock(&cq->spinlock);
1673 1520 }
1674 1521
1675 1522 static inline void
1676 1523 dumpsys_unlock(cqueue_t *cq, int live, int signal)
1677 1524 {
1678 1525 if (live) {
1679 1526 if (signal)
1680 1527 cv_signal(&cq->cv);
1681 1528 mutex_exit(&cq->mutex);
1682 1529 } else {
1683 1530 dumpsys_spinunlock(&cq->spinlock);
1684 1531 }
1685 1532 }
1686 1533
1687 1534 static void
1688 1535 dumpsys_wait_cq(cqueue_t *cq, int live)
1689 1536 {
1690 1537 if (live) {
1691 1538 cv_wait(&cq->cv, &cq->mutex);
1692 1539 } else {
1693 1540 dumpsys_spinunlock(&cq->spinlock);
1694 1541 while (cq->open)
1695 1542 if (cq->first)
1696 1543 break;
1697 1544 dumpsys_spinlock(&cq->spinlock);
1698 1545 }
1699 1546 }
1700 1547
1701 1548 static void
1702 1549 dumpsys_put_cq(cqueue_t *cq, cbuf_t *cp, int newstate, int live)
1703 1550 {
1704 1551 if (cp == NULL)
1705 1552 return;
1706 1553
1707 1554 dumpsys_lock(cq, live);
1708 1555
1709 1556 if (cq->ts != 0) {
1710 1557 cq->empty += gethrtime() - cq->ts;
1711 1558 cq->ts = 0;
1712 1559 }
1713 1560
1714 1561 cp->state = newstate;
1715 1562 cp->next = NULL;
1716 1563 if (cq->last == NULL)
1717 1564 cq->first = cp;
1718 1565 else
1719 1566 cq->last->next = cp;
1720 1567 cq->last = cp;
1721 1568
1722 1569 dumpsys_unlock(cq, live, 1);
1723 1570 }
1724 1571
1725 1572 static cbuf_t *
1726 1573 dumpsys_get_cq(cqueue_t *cq, int live)
1727 1574 {
1728 1575 cbuf_t *cp;
1729 1576 hrtime_t now = gethrtime();
1730 1577
1731 1578 dumpsys_lock(cq, live);
1732 1579
1733 1580 /* CONSTCOND */
1734 1581 while (1) {
1735 1582 cp = (cbuf_t *)cq->first;
1736 1583 if (cp == NULL) {
1737 1584 if (cq->open == 0)
1738 1585 break;
1739 1586 dumpsys_wait_cq(cq, live);
1740 1587 continue;
1741 1588 }
1742 1589 cq->first = cp->next;
1743 1590 if (cq->first == NULL) {
1744 1591 cq->last = NULL;
1745 1592 cq->ts = now;
1746 1593 }
1747 1594 break;
1748 1595 }
1749 1596
1750 1597 dumpsys_unlock(cq, live, cq->first != NULL || cq->open == 0);
1751 1598 return (cp);
1752 1599 }
1753 1600
1754 1601 /*
1755 1602 * Send an error message to the console. If the main task is running
1756 1603 * just write the message via uprintf. If a helper is running the
1757 1604 * message has to be put on a queue for the main task. Setting fmt to
1758 1605 * NULL means flush the error message buffer. If fmt is not NULL, just
1759 1606 * add the text to the existing buffer.
1760 1607 */
1761 1608 static void
1762 1609 dumpsys_errmsg(helper_t *hp, const char *fmt, ...)
1763 1610 {
1764 1611 dumpsync_t *ds = hp->ds;
1765 1612 cbuf_t *cp = hp->cperr;
1766 1613 va_list adx;
1767 1614
1768 1615 if (hp->helper == MAINHELPER) {
1769 1616 if (fmt != NULL) {
1770 1617 if (ds->neednl) {
1771 1618 uprintf("\n");
1772 1619 ds->neednl = 0;
1773 1620 }
1774 1621 va_start(adx, fmt);
1775 1622 vuprintf(fmt, adx);
1776 1623 va_end(adx);
1777 1624 }
1778 1625 } else if (fmt == NULL) {
1779 1626 if (cp != NULL) {
1780 1627 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1781 1628 hp->cperr = NULL;
1782 1629 }
1783 1630 } else {
1784 1631 if (hp->cperr == NULL) {
1785 1632 cp = CQ_GET(freebufq);
1786 1633 hp->cperr = cp;
1787 1634 cp->used = 0;
1788 1635 }
1789 1636 va_start(adx, fmt);
1790 1637 cp->used += vsnprintf(cp->buf + cp->used, cp->size - cp->used,
1791 1638 fmt, adx);
1792 1639 va_end(adx);
1793 1640 if ((cp->used + LOG_MSGSIZE) > cp->size) {
1794 1641 CQ_PUT(mainq, cp, CBUF_ERRMSG);
1795 1642 hp->cperr = NULL;
1796 1643 }
1797 1644 }
1798 1645 }
1799 1646
1800 1647 /*
1801 1648 * Write an output buffer to the dump file. If the main task is
1802 1649 * running just write the data. If a helper is running the output is
1803 1650 * placed on a queue for the main task.
1804 1651 */
1805 1652 static void
1806 1653 dumpsys_swrite(helper_t *hp, cbuf_t *cp, size_t used)
1807 1654 {
1808 1655 dumpsync_t *ds = hp->ds;
1809 1656
1810 1657 if (hp->helper == MAINHELPER) {
1811 1658 HRSTART(ds->perpage, write);
1812 1659 dumpvp_write(cp->buf, used);
1813 1660 HRSTOP(ds->perpage, write);
1814 1661 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
1815 1662 } else {
1816 1663 cp->used = used;
1817 1664 CQ_PUT(mainq, cp, CBUF_WRITE);
1818 1665 }
1819 1666 }
1820 1667
1821 1668 /*
1822 1669 * Copy one page within the mapped range. The offset starts at 0 and
1823 1670 * is relative to the first pfn. cp->buf + cp->off is the address of
1824 1671 * the first pfn. If dump_pagecopy returns a UE offset, create an
1825 1672 * error message. Returns the offset to the next pfn in the range
1826 1673 * selected by the bitmap.
1827 1674 */
1828 1675 static int
1829 1676 dumpsys_copy_page(helper_t *hp, int offset)
1830 1677 {
1831 1678 cbuf_t *cp = hp->cpin;
1832 1679 int ueoff;
1833 1680
1834 1681 ASSERT(cp->off + offset + PAGESIZE <= cp->size);
1835 1682 ASSERT(BT_TEST(dumpcfg.bitmap, cp->bitnum));
1836 1683
1837 1684 ueoff = dump_pagecopy(cp->buf + cp->off + offset, hp->page);
1838 1685
1839 1686 /* ueoff is the offset in the page to a UE error */
1840 1687 if (ueoff != -1) {
1841 1688 uint64_t pa = ptob(cp->pfn) + offset + ueoff;
1842 1689
1843 1690 dumpsys_errmsg(hp, "cpu %d: memory error at PA 0x%08x.%08x\n",
1844 1691 CPU->cpu_id, (uint32_t)(pa >> 32), (uint32_t)pa);
1845 1692 }
1846 1693
1847 1694 /*
1848 1695 * Advance bitnum and offset to the next input page for the
1849 1696 * next call to this function.
1850 1697 */
1851 1698 offset += PAGESIZE;
1852 1699 cp->bitnum++;
1853 1700 while (cp->off + offset < cp->size) {
1854 1701 if (BT_TEST(dumpcfg.bitmap, cp->bitnum))
1855 1702 break;
1856 1703 offset += PAGESIZE;
1857 1704 cp->bitnum++;
1858 1705 }
1859 1706
1860 1707 return (offset);
1861 1708 }
1862 1709
1863 1710 /*
1864 1711 * Read the helper queue, and copy one mapped page. Return 0 when
1865 1712 * done. Return 1 when a page has been copied into hp->page.
1866 1713 */
1867 1714 static int
1868 1715 dumpsys_sread(helper_t *hp)
1869 1716 {
1870 1717 dumpsync_t *ds = hp->ds;
1871 1718
1872 1719 /* CONSTCOND */
1873 1720 while (1) {
1874 1721
1875 1722 /* Find the next input buffer. */
1876 1723 if (hp->cpin == NULL) {
1877 1724 HRSTART(hp->perpage, inwait);
1878 1725
1879 1726 /* CONSTCOND */
1880 1727 while (1) {
1881 1728 hp->cpin = CQ_GET(helperq);
1882 1729 dump_timeleft = dump_timeout;
1883 1730
1884 1731 /*
1885 1732 * NULL return means the helper queue
1886 1733 * is closed and empty.
1887 1734 */
1888 1735 if (hp->cpin == NULL)
1889 1736 break;
1890 1737
1891 1738 /* Have input, check for dump I/O error. */
1892 1739 if (!dump_ioerr)
1893 1740 break;
1894 1741
1895 1742 /*
1896 1743 * If an I/O error occurs, stay in the
1897 1744 * loop in order to empty the helper
1898 1745 * queue. Return the buffers to the
1899 1746 * main task to unmap and free it.
1900 1747 */
1901 1748 hp->cpin->used = 0;
1902 1749 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1903 1750 }
1904 1751 HRSTOP(hp->perpage, inwait);
1905 1752
1906 1753 /* Stop here when the helper queue is closed. */
1907 1754 if (hp->cpin == NULL)
1908 1755 break;
1909 1756
1910 1757 /* Set the offset=0 to get the first pfn. */
1911 1758 hp->in = 0;
1912 1759
1913 1760 /* Set the total processed to 0 */
1914 1761 hp->used = 0;
1915 1762 }
1916 1763
1917 1764 /* Process the next page. */
1918 1765 if (hp->used < hp->cpin->used) {
1919 1766
1920 1767 /*
1921 1768 * Get the next page from the input buffer and
1922 1769 * return a copy.
1923 1770 */
1924 1771 ASSERT(hp->in != -1);
1925 1772 HRSTART(hp->perpage, copy);
1926 1773 hp->in = dumpsys_copy_page(hp, hp->in);
1927 1774 hp->used += PAGESIZE;
1928 1775 HRSTOP(hp->perpage, copy);
1929 1776 break;
1930 1777
1931 1778 } else {
1932 1779
1933 1780 /*
1934 1781 * Done with the input. Flush the VM and
1935 1782 * return the buffer to the main task.
1936 1783 */
1937 1784 if (panicstr && hp->helper != MAINHELPER)
1938 1785 hat_flush_range(kas.a_hat,
1939 1786 hp->cpin->buf, hp->cpin->size);
↓ open down ↓ |
835 lines elided |
↑ open up ↑ |
1940 1787 dumpsys_errmsg(hp, NULL);
1941 1788 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
1942 1789 hp->cpin = NULL;
1943 1790 }
1944 1791 }
1945 1792
1946 1793 return (hp->cpin != NULL);
1947 1794 }
1948 1795
1949 1796 /*
1950 - * Compress size bytes starting at buf with bzip2
1951 - * mode:
1952 - * BZ_RUN add one more compressed page
1953 - * BZ_FINISH no more input, flush the state
1954 - */
1955 -static void
1956 -dumpsys_bzrun(helper_t *hp, void *buf, size_t size, int mode)
1957 -{
1958 - dumpsync_t *ds = hp->ds;
1959 - const int CSIZE = sizeof (dumpcsize_t);
1960 - bz_stream *ps = &hp->bzstream;
1961 - int rc = 0;
1962 - uint32_t csize;
1963 - dumpcsize_t cs;
1964 -
1965 - /* Set input pointers to new input page */
1966 - if (size > 0) {
1967 - ps->avail_in = size;
1968 - ps->next_in = buf;
1969 - }
1970 -
1971 - /* CONSTCOND */
1972 - while (1) {
1973 -
1974 - /* Quit when all input has been consumed */
1975 - if (ps->avail_in == 0 && mode == BZ_RUN)
1976 - break;
1977 -
1978 - /* Get a new output buffer */
1979 - if (hp->cpout == NULL) {
1980 - HRSTART(hp->perpage, outwait);
1981 - hp->cpout = CQ_GET(freebufq);
1982 - HRSTOP(hp->perpage, outwait);
1983 - ps->avail_out = hp->cpout->size - CSIZE;
1984 - ps->next_out = hp->cpout->buf + CSIZE;
1985 - }
1986 -
1987 - /* Compress input, or finalize */
1988 - HRSTART(hp->perpage, compress);
1989 - rc = BZ2_bzCompress(ps, mode);
1990 - HRSTOP(hp->perpage, compress);
1991 -
1992 - /* Check for error */
1993 - if (mode == BZ_RUN && rc != BZ_RUN_OK) {
1994 - dumpsys_errmsg(hp, "%d: BZ_RUN error %s at page %lx\n",
1995 - hp->helper, BZ2_bzErrorString(rc),
1996 - hp->cpin->pagenum);
1997 - break;
1998 - }
1999 -
2000 - /* Write the buffer if it is full, or we are flushing */
2001 - if (ps->avail_out == 0 || mode == BZ_FINISH) {
2002 - csize = hp->cpout->size - CSIZE - ps->avail_out;
2003 - cs = DUMP_SET_TAG(csize, hp->tag);
2004 - if (csize > 0) {
2005 - (void) memcpy(hp->cpout->buf, &cs, CSIZE);
2006 - dumpsys_swrite(hp, hp->cpout, csize + CSIZE);
2007 - hp->cpout = NULL;
2008 - }
2009 - }
2010 -
2011 - /* Check for final complete */
2012 - if (mode == BZ_FINISH) {
2013 - if (rc == BZ_STREAM_END)
2014 - break;
2015 - if (rc != BZ_FINISH_OK) {
2016 - dumpsys_errmsg(hp, "%d: BZ_FINISH error %s\n",
2017 - hp->helper, BZ2_bzErrorString(rc));
2018 - break;
2019 - }
2020 - }
2021 - }
2022 -
2023 - /* Cleanup state and buffers */
2024 - if (mode == BZ_FINISH) {
2025 -
2026 - /* Reset state so that it is re-usable. */
2027 - (void) BZ2_bzCompressReset(&hp->bzstream);
2028 -
2029 - /* Give any unused outout buffer to the main task */
2030 - if (hp->cpout != NULL) {
2031 - hp->cpout->used = 0;
2032 - CQ_PUT(mainq, hp->cpout, CBUF_ERRMSG);
2033 - hp->cpout = NULL;
2034 - }
2035 - }
2036 -}
2037 -
2038 -static void
2039 -dumpsys_bz2compress(helper_t *hp)
2040 -{
2041 - dumpsync_t *ds = hp->ds;
2042 - dumpstreamhdr_t sh;
2043 -
2044 - (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2045 - sh.stream_pagenum = (pgcnt_t)-1;
2046 - sh.stream_npages = 0;
2047 - hp->cpin = NULL;
2048 - hp->cpout = NULL;
2049 - hp->cperr = NULL;
2050 - hp->in = 0;
2051 - hp->out = 0;
2052 - hp->bzstream.avail_in = 0;
2053 -
2054 - /* Bump reference to mainq while we are running */
2055 - CQ_OPEN(mainq);
2056 -
2057 - /* Get one page at a time */
2058 - while (dumpsys_sread(hp)) {
2059 - if (sh.stream_pagenum != hp->cpin->pagenum) {
2060 - sh.stream_pagenum = hp->cpin->pagenum;
2061 - sh.stream_npages = btop(hp->cpin->used);
2062 - dumpsys_bzrun(hp, &sh, sizeof (sh), BZ_RUN);
2063 - }
2064 - dumpsys_bzrun(hp, hp->page, PAGESIZE, 0);
2065 - }
2066 -
2067 - /* Done with input, flush any partial buffer */
2068 - if (sh.stream_pagenum != (pgcnt_t)-1) {
2069 - dumpsys_bzrun(hp, NULL, 0, BZ_FINISH);
2070 - dumpsys_errmsg(hp, NULL);
2071 - }
2072 -
2073 - ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2074 -
2075 - /* Decrement main queue count, we are done */
2076 - CQ_CLOSE(mainq);
2077 -}
2078 -
2079 -/*
2080 1797 * Compress with lzjb
2081 1798 * write stream block if full or size==0
2082 1799 * if csize==0 write stream header, else write <csize, data>
2083 1800 * size==0 is a call to flush a buffer
2084 1801 * hp->cpout is the buffer we are flushing or filling
2085 1802 * hp->out is the next index to fill data
2086 1803 * osize is either csize+data, or the size of a stream header
2087 1804 */
2088 1805 static void
2089 1806 dumpsys_lzjbrun(helper_t *hp, size_t csize, void *buf, size_t size)
2090 1807 {
2091 1808 dumpsync_t *ds = hp->ds;
2092 1809 const int CSIZE = sizeof (dumpcsize_t);
2093 1810 dumpcsize_t cs;
2094 1811 size_t osize = csize > 0 ? CSIZE + size : size;
2095 1812
2096 1813 /* If flush, and there is no buffer, just return */
2097 1814 if (size == 0 && hp->cpout == NULL)
2098 1815 return;
2099 1816
2100 1817 /* If flush, or cpout is full, write it out */
2101 1818 if (size == 0 ||
2102 1819 hp->cpout != NULL && hp->out + osize > hp->cpout->size) {
2103 1820
2104 1821 /* Set tag+size word at the front of the stream block. */
2105 1822 cs = DUMP_SET_TAG(hp->out - CSIZE, hp->tag);
2106 1823 (void) memcpy(hp->cpout->buf, &cs, CSIZE);
2107 1824
2108 1825 /* Write block to dump file. */
2109 1826 dumpsys_swrite(hp, hp->cpout, hp->out);
2110 1827
2111 1828 /* Clear pointer to indicate we need a new buffer */
2112 1829 hp->cpout = NULL;
2113 1830
2114 1831 /* flushing, we are done */
2115 1832 if (size == 0)
2116 1833 return;
2117 1834 }
2118 1835
2119 1836 /* Get an output buffer if we dont have one. */
2120 1837 if (hp->cpout == NULL) {
2121 1838 HRSTART(hp->perpage, outwait);
2122 1839 hp->cpout = CQ_GET(freebufq);
2123 1840 HRSTOP(hp->perpage, outwait);
2124 1841 hp->out = CSIZE;
2125 1842 }
2126 1843
2127 1844 /* Store csize word. This is the size of compressed data. */
2128 1845 if (csize > 0) {
2129 1846 cs = DUMP_SET_TAG(csize, 0);
2130 1847 (void) memcpy(hp->cpout->buf + hp->out, &cs, CSIZE);
2131 1848 hp->out += CSIZE;
2132 1849 }
2133 1850
2134 1851 /* Store the data. */
2135 1852 (void) memcpy(hp->cpout->buf + hp->out, buf, size);
2136 1853 hp->out += size;
2137 1854 }
2138 1855
2139 1856 static void
2140 1857 dumpsys_lzjbcompress(helper_t *hp)
2141 1858 {
2142 1859 dumpsync_t *ds = hp->ds;
2143 1860 size_t csize;
2144 1861 dumpstreamhdr_t sh;
2145 1862
2146 1863 (void) strcpy(sh.stream_magic, DUMP_STREAM_MAGIC);
2147 1864 sh.stream_pagenum = (pfn_t)-1;
2148 1865 sh.stream_npages = 0;
2149 1866 hp->cpin = NULL;
2150 1867 hp->cpout = NULL;
2151 1868 hp->cperr = NULL;
2152 1869 hp->in = 0;
2153 1870 hp->out = 0;
2154 1871
2155 1872 /* Bump reference to mainq while we are running */
2156 1873 CQ_OPEN(mainq);
2157 1874
2158 1875 /* Get one page at a time */
2159 1876 while (dumpsys_sread(hp)) {
2160 1877
2161 1878 /* Create a stream header for each new input map */
2162 1879 if (sh.stream_pagenum != hp->cpin->pagenum) {
2163 1880 sh.stream_pagenum = hp->cpin->pagenum;
2164 1881 sh.stream_npages = btop(hp->cpin->used);
2165 1882 dumpsys_lzjbrun(hp, 0, &sh, sizeof (sh));
2166 1883 }
2167 1884
2168 1885 /* Compress one page */
2169 1886 HRSTART(hp->perpage, compress);
2170 1887 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2171 1888 HRSTOP(hp->perpage, compress);
2172 1889
2173 1890 /* Add csize+data to output block */
2174 1891 ASSERT(csize > 0 && csize <= PAGESIZE);
2175 1892 dumpsys_lzjbrun(hp, csize, hp->lzbuf, csize);
2176 1893 }
2177 1894
2178 1895 /* Done with input, flush any partial buffer */
2179 1896 if (sh.stream_pagenum != (pfn_t)-1) {
2180 1897 dumpsys_lzjbrun(hp, 0, NULL, 0);
2181 1898 dumpsys_errmsg(hp, NULL);
2182 1899 }
2183 1900
2184 1901 ASSERT(hp->cpin == NULL && hp->cpout == NULL && hp->cperr == NULL);
2185 1902
2186 1903 /* Decrement main queue count, we are done */
2187 1904 CQ_CLOSE(mainq);
2188 1905 }
2189 1906
2190 1907 /*
2191 1908 * Dump helper called from panic_idle() to compress pages. CPUs in
2192 1909 * this path must not call most kernel services.
2193 1910 *
2194 1911 * During panic, all but one of the CPUs is idle. These CPUs are used
2195 1912 * as helpers working in parallel to copy and compress memory
2196 1913 * pages. During a panic, however, these processors cannot call any
2197 1914 * kernel services. This is because mutexes become no-ops during
2198 1915 * panic, and, cross-call interrupts are inhibited. Therefore, during
2199 1916 * panic dump the helper CPUs communicate with the panic CPU using
2200 1917 * memory variables. All memory mapping and I/O is performed by the
2201 1918 * panic CPU.
2202 1919 *
2203 1920 * At dump configuration time, helper_lock is set and helpers_wanted
2204 1921 * is 0. dumpsys() decides whether to set helpers_wanted before
2205 1922 * clearing helper_lock.
2206 1923 *
2207 1924 * At panic time, idle CPUs spin-wait on helper_lock, then alternately
2208 1925 * take the lock and become a helper, or return.
2209 1926 */
2210 1927 void
↓ open down ↓ |
121 lines elided |
↑ open up ↑ |
2211 1928 dumpsys_helper()
2212 1929 {
2213 1930 dumpsys_spinlock(&dumpcfg.helper_lock);
2214 1931 if (dumpcfg.helpers_wanted) {
2215 1932 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2216 1933
2217 1934 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2218 1935 if (hp->helper == FREEHELPER) {
2219 1936 hp->helper = CPU->cpu_id;
2220 1937 BT_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2221 -
2222 1938 dumpsys_spinunlock(&dumpcfg.helper_lock);
2223 -
2224 - if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2225 - dumpsys_lzjbcompress(hp);
2226 - else
2227 - dumpsys_bz2compress(hp);
2228 -
1939 + dumpsys_lzjbcompress(hp);
2229 1940 hp->helper = DONEHELPER;
2230 1941 return;
2231 1942 }
2232 1943 }
2233 1944
2234 1945 /* No more helpers are needed. */
2235 1946 dumpcfg.helpers_wanted = 0;
2236 1947
2237 1948 }
2238 1949 dumpsys_spinunlock(&dumpcfg.helper_lock);
2239 1950 }
2240 1951
2241 1952 /*
2242 1953 * No-wait helper callable in spin loops.
2243 1954 *
2244 1955 * Do not wait for helper_lock. Just check helpers_wanted. The caller
2245 1956 * may decide to continue. This is the "c)ontinue, s)ync, r)eset? s"
2246 1957 * case.
2247 1958 */
2248 1959 void
2249 1960 dumpsys_helper_nw()
2250 1961 {
2251 1962 if (dumpcfg.helpers_wanted)
2252 1963 dumpsys_helper();
2253 1964 }
2254 1965
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
2255 1966 /*
2256 1967 * Dump helper for live dumps.
2257 1968 * These run as a system task.
2258 1969 */
2259 1970 static void
2260 1971 dumpsys_live_helper(void *arg)
2261 1972 {
2262 1973 helper_t *hp = arg;
2263 1974
2264 1975 BT_ATOMIC_SET(dumpcfg.helpermap, CPU->cpu_seqid);
2265 - if (dumpcfg.clevel < DUMP_CLEVEL_BZIP2)
2266 - dumpsys_lzjbcompress(hp);
2267 - else
2268 - dumpsys_bz2compress(hp);
1976 + dumpsys_lzjbcompress(hp);
2269 1977 }
2270 1978
2271 1979 /*
2272 1980 * Compress one page with lzjb (single threaded case)
2273 1981 */
2274 1982 static void
2275 1983 dumpsys_lzjb_page(helper_t *hp, cbuf_t *cp)
2276 1984 {
2277 1985 dumpsync_t *ds = hp->ds;
2278 1986 uint32_t csize;
2279 1987
2280 1988 hp->helper = MAINHELPER;
2281 1989 hp->in = 0;
2282 1990 hp->used = 0;
2283 1991 hp->cpin = cp;
2284 1992 while (hp->used < cp->used) {
2285 1993 HRSTART(hp->perpage, copy);
2286 1994 hp->in = dumpsys_copy_page(hp, hp->in);
2287 1995 hp->used += PAGESIZE;
2288 1996 HRSTOP(hp->perpage, copy);
2289 1997
2290 1998 HRSTART(hp->perpage, compress);
2291 1999 csize = compress(hp->page, hp->lzbuf, PAGESIZE);
2292 2000 HRSTOP(hp->perpage, compress);
2293 2001
2294 2002 HRSTART(hp->perpage, write);
2295 2003 dumpvp_write(&csize, sizeof (csize));
2296 2004 dumpvp_write(hp->lzbuf, csize);
2297 2005 HRSTOP(hp->perpage, write);
2298 2006 }
2299 2007 CQ_PUT(mainq, hp->cpin, CBUF_USEDMAP);
2300 2008 hp->cpin = NULL;
2301 2009 }
2302 2010
2303 2011 /*
2304 2012 * Main task to dump pages. This is called on the dump CPU.
↓ open down ↓ |
26 lines elided |
↑ open up ↑ |
2305 2013 */
2306 2014 static void
2307 2015 dumpsys_main_task(void *arg)
2308 2016 {
2309 2017 dumpsync_t *ds = arg;
2310 2018 pgcnt_t pagenum = 0, bitnum = 0, hibitnum;
2311 2019 dumpmlw_t mlw;
2312 2020 cbuf_t *cp;
2313 2021 pgcnt_t baseoff, pfnoff;
2314 2022 pfn_t base, pfn;
2315 - boolean_t dumpserial;
2316 2023 int i;
2317 2024
2318 2025 /*
2319 2026 * Fall back to serial mode if there are no helpers.
2320 - * dump_plat_mincpu can be set to 0 at any time.
2027 + * dump_ncpu_low can be set to 0 at any time.
2321 2028 * dumpcfg.helpermap must contain at least one member.
2322 2029 *
2323 2030 * It is possible that the helpers haven't registered
2324 2031 * in helpermap yet; wait up to DUMP_HELPER_MAX_WAIT for
2325 2032 * at least one helper to register.
2326 2033 */
2327 - dumpserial = B_TRUE;
2328 - if (dump_plat_mincpu != 0 && dumpcfg.clevel != 0) {
2034 + if (dump_ncpu_low != 0 && dumpcfg.clevel != DUMP_CLEVEL_SERIAL) {
2035 + boolean_t dumpserial = B_TRUE;
2329 2036 hrtime_t hrtmax = MSEC2NSEC(DUMP_HELPER_MAX_WAIT);
2330 2037 hrtime_t hrtstart = gethrtime();
2331 2038
2332 2039 for (;;) {
2333 2040 for (i = 0; i < BT_BITOUL(NCPU); ++i) {
2334 2041 if (dumpcfg.helpermap[i] != 0) {
2335 2042 dumpserial = B_FALSE;
2336 2043 break;
2337 2044 }
2338 2045 }
2339 2046
2340 2047 if ((!dumpserial) ||
2341 2048 ((gethrtime() - hrtstart) >= hrtmax)) {
2342 2049 break;
2343 2050 }
2344 2051
2345 2052 SMT_PAUSE();
2346 2053 }
2347 2054
2348 2055 if (dumpserial) {
2349 - dumpcfg.clevel = 0;
2056 + dumpcfg.clevel = DUMP_CLEVEL_SERIAL;
2350 2057 if (dumpcfg.helper[0].lzbuf == NULL) {
2351 2058 dumpcfg.helper[0].lzbuf =
2352 2059 dumpcfg.helper[1].page;
2353 2060 }
2354 2061 }
2355 2062 }
2356 2063
2357 2064 dump_init_memlist_walker(&mlw);
2358 2065
2359 2066 for (;;) {
2360 2067 int sec = (gethrtime() - ds->start) / NANOSEC;
2361 2068
2362 2069 /*
2363 2070 * Render a simple progress display on the system console to
2364 2071 * make clear to the operator that the system has not hung.
2365 2072 * Emit an update when dump progress has advanced by one
2366 2073 * percent, or when no update has been drawn in the last
2367 2074 * second.
2368 2075 */
2369 2076 if (ds->percent > ds->percent_done || sec > ds->sec_done) {
2370 2077 ds->sec_done = sec;
2371 2078 ds->percent_done = ds->percent;
2372 2079 uprintf("^\rdumping: %2d:%02d %3d%% done",
2373 2080 sec / 60, sec % 60, ds->percent);
2374 2081 ds->neednl = 1;
2375 2082 }
2376 2083
2377 2084 while (CQ_IS_EMPTY(mainq) && !CQ_IS_EMPTY(writerq)) {
2378 2085
2379 2086 /* the writerq never blocks */
2380 2087 cp = CQ_GET(writerq);
2381 2088 if (cp == NULL)
2382 2089 break;
2383 2090
2384 2091 dump_timeleft = dump_timeout;
2385 2092
2386 2093 HRSTART(ds->perpage, write);
2387 2094 dumpvp_write(cp->buf, cp->used);
2388 2095 HRSTOP(ds->perpage, write);
2389 2096
2390 2097 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2391 2098 }
2392 2099
2393 2100 /*
2394 2101 * Wait here for some buffers to process. Returns NULL
2395 2102 * when all helpers have terminated and all buffers
2396 2103 * have been processed.
2397 2104 */
2398 2105 cp = CQ_GET(mainq);
2399 2106
2400 2107 if (cp == NULL) {
2401 2108
2402 2109 /* Drain the write queue. */
2403 2110 if (!CQ_IS_EMPTY(writerq))
2404 2111 continue;
2405 2112
2406 2113 /* Main task exits here. */
2407 2114 break;
2408 2115 }
2409 2116
2410 2117 dump_timeleft = dump_timeout;
2411 2118
2412 2119 switch (cp->state) {
2413 2120
2414 2121 case CBUF_FREEMAP:
2415 2122
2416 2123 /*
2417 2124 * Note that we drop CBUF_FREEMAP buffers on
2418 2125 * the floor (they will not be on any cqueue)
2419 2126 * when we no longer need them.
2420 2127 */
2421 2128 if (bitnum >= dumpcfg.bitmapsize)
2422 2129 break;
2423 2130
2424 2131 if (dump_ioerr) {
2425 2132 bitnum = dumpcfg.bitmapsize;
2426 2133 CQ_CLOSE(helperq);
2427 2134 break;
2428 2135 }
2429 2136
2430 2137 HRSTART(ds->perpage, bitmap);
2431 2138 for (; bitnum < dumpcfg.bitmapsize; bitnum++)
2432 2139 if (BT_TEST(dumpcfg.bitmap, bitnum))
2433 2140 break;
2434 2141 HRSTOP(ds->perpage, bitmap);
2435 2142 dump_timeleft = dump_timeout;
2436 2143
2437 2144 if (bitnum >= dumpcfg.bitmapsize) {
2438 2145 CQ_CLOSE(helperq);
2439 2146 break;
2440 2147 }
2441 2148
2442 2149 /*
2443 2150 * Try to map CBUF_MAPSIZE ranges. Can't
2444 2151 * assume that memory segment size is a
2445 2152 * multiple of CBUF_MAPSIZE. Can't assume that
2446 2153 * the segment starts on a CBUF_MAPSIZE
2447 2154 * boundary.
2448 2155 */
2449 2156 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2450 2157 ASSERT(pfn != PFN_INVALID);
2451 2158 ASSERT(bitnum + mlw.mpleft <= dumpcfg.bitmapsize);
2452 2159
2453 2160 base = P2ALIGN(pfn, CBUF_MAPNP);
2454 2161 if (base < mlw.mpaddr) {
2455 2162 base = mlw.mpaddr;
2456 2163 baseoff = P2PHASE(base, CBUF_MAPNP);
2457 2164 } else {
2458 2165 baseoff = 0;
2459 2166 }
2460 2167
2461 2168 pfnoff = pfn - base;
2462 2169 if (pfnoff + mlw.mpleft < CBUF_MAPNP) {
2463 2170 hibitnum = bitnum + mlw.mpleft;
2464 2171 cp->size = ptob(pfnoff + mlw.mpleft);
2465 2172 } else {
2466 2173 hibitnum = bitnum - pfnoff + CBUF_MAPNP -
2467 2174 baseoff;
2468 2175 cp->size = CBUF_MAPSIZE - ptob(baseoff);
2469 2176 }
2470 2177
2471 2178 cp->pfn = pfn;
2472 2179 cp->bitnum = bitnum++;
2473 2180 cp->pagenum = pagenum++;
2474 2181 cp->off = ptob(pfnoff);
2475 2182
2476 2183 for (; bitnum < hibitnum; bitnum++)
2477 2184 if (BT_TEST(dumpcfg.bitmap, bitnum))
2478 2185 pagenum++;
2479 2186
2480 2187 dump_timeleft = dump_timeout;
2481 2188 cp->used = ptob(pagenum - cp->pagenum);
2482 2189
2483 2190 HRSTART(ds->perpage, map);
2484 2191 hat_devload(kas.a_hat, cp->buf, cp->size, base,
2485 2192 PROT_READ, HAT_LOAD_NOCONSIST);
2486 2193 HRSTOP(ds->perpage, map);
↓ open down ↓ |
127 lines elided |
↑ open up ↑ |
2487 2194
2488 2195 ds->pages_mapped += btop(cp->size);
2489 2196 ds->pages_used += pagenum - cp->pagenum;
2490 2197
2491 2198 CQ_OPEN(mainq);
2492 2199
2493 2200 /*
2494 2201 * If there are no helpers the main task does
2495 2202 * non-streams lzjb compress.
2496 2203 */
2497 - if (dumpserial) {
2204 + if (dumpcfg.clevel == DUMP_CLEVEL_SERIAL) {
2498 2205 dumpsys_lzjb_page(dumpcfg.helper, cp);
2499 2206 } else {
2500 2207 /* pass mapped pages to a helper */
2501 2208 CQ_PUT(helperq, cp, CBUF_INREADY);
2502 2209 }
2503 2210
2504 2211 /* the last page was done */
2505 2212 if (bitnum >= dumpcfg.bitmapsize)
2506 2213 CQ_CLOSE(helperq);
2507 2214
2508 2215 break;
2509 2216
2510 2217 case CBUF_USEDMAP:
2511 2218
2512 2219 ds->npages += btop(cp->used);
2513 2220
2514 2221 HRSTART(ds->perpage, unmap);
2515 2222 hat_unload(kas.a_hat, cp->buf, cp->size, HAT_UNLOAD);
2516 2223 HRSTOP(ds->perpage, unmap);
2517 2224
2518 2225 if (bitnum < dumpcfg.bitmapsize)
2519 2226 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2520 2227 CQ_CLOSE(mainq);
2521 2228
2522 2229 ASSERT(ds->npages <= dumphdr->dump_npages);
2523 2230 ds->percent = ds->npages * 100LL / dumphdr->dump_npages;
2524 2231 break;
2525 2232
2526 2233 case CBUF_WRITE:
2527 2234
2528 2235 CQ_PUT(writerq, cp, CBUF_WRITE);
2529 2236 break;
2530 2237
2531 2238 case CBUF_ERRMSG:
2532 2239
2533 2240 if (cp->used > 0) {
2534 2241 cp->buf[cp->size - 2] = '\n';
2535 2242 cp->buf[cp->size - 1] = '\0';
2536 2243 if (ds->neednl) {
2537 2244 uprintf("\n%s", cp->buf);
2538 2245 ds->neednl = 0;
2539 2246 } else {
2540 2247 uprintf("%s", cp->buf);
2541 2248 }
2542 2249 /* wait for console output */
2543 2250 drv_usecwait(200000);
2544 2251 dump_timeleft = dump_timeout;
2545 2252 }
2546 2253 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2547 2254 break;
2548 2255
2549 2256 default:
2550 2257 uprintf("dump: unexpected buffer state %d, "
2551 2258 "buffer will be lost\n", cp->state);
2552 2259 break;
2553 2260
2554 2261 } /* end switch */
2555 2262 }
2556 2263 }
2557 2264
2558 2265 #ifdef COLLECT_METRICS
2559 2266 size_t
2560 2267 dumpsys_metrics(dumpsync_t *ds, char *buf, size_t size)
2561 2268 {
2562 2269 dumpcfg_t *cfg = &dumpcfg;
2563 2270 int myid = CPU->cpu_seqid;
2564 2271 int i, compress_ratio;
2565 2272 int sec, iorate;
2566 2273 helper_t *hp, *hpend = &cfg->helper[cfg->nhelper];
2567 2274 char *e = buf + size;
2568 2275 char *p = buf;
2569 2276
2570 2277 sec = ds->elapsed / (1000 * 1000 * 1000ULL);
2571 2278 if (sec < 1)
2572 2279 sec = 1;
2573 2280
2574 2281 if (ds->iotime < 1)
2575 2282 ds->iotime = 1;
2576 2283 iorate = (ds->nwrite * 100000ULL) / ds->iotime;
2577 2284
2578 2285 compress_ratio = 100LL * ds->npages / btopr(ds->nwrite + 1);
2579 2286
2580 2287 #define P(...) (p += p < e ? snprintf(p, e - p, __VA_ARGS__) : 0)
2581 2288
2582 2289 P("Master cpu_seqid,%d\n", CPU->cpu_seqid);
2583 2290 P("Master cpu_id,%d\n", CPU->cpu_id);
2584 2291 P("dump_flags,0x%x\n", dumphdr->dump_flags);
2585 2292 P("dump_ioerr,%d\n", dump_ioerr);
2586 2293
2587 2294 P("Helpers:\n");
2588 2295 for (i = 0; i < ncpus; i++) {
2589 2296 if ((i & 15) == 0)
2590 2297 P(",,%03d,", i);
2591 2298 if (i == myid)
2592 2299 P(" M");
2593 2300 else if (BT_TEST(cfg->helpermap, i))
2594 2301 P("%4d", cpu_seq[i]->cpu_id);
2595 2302 else
2596 2303 P(" *");
2597 2304 if ((i & 15) == 15)
↓ open down ↓ |
90 lines elided |
↑ open up ↑ |
2598 2305 P("\n");
2599 2306 }
2600 2307
2601 2308 P("ncbuf_used,%d\n", cfg->ncbuf_used);
2602 2309 P("ncmap,%d\n", cfg->ncmap);
2603 2310
2604 2311 P("Found %ldM ranges,%ld\n", (CBUF_MAPSIZE / DUMP_1MB), cfg->found4m);
2605 2312 P("Found small pages,%ld\n", cfg->foundsm);
2606 2313
2607 2314 P("Compression level,%d\n", cfg->clevel);
2608 - P("Compression type,%s %s", cfg->clevel == 0 ? "serial" : "parallel",
2609 - cfg->clevel >= DUMP_CLEVEL_BZIP2 ? "bzip2" : "lzjb");
2610 - if (cfg->clevel >= DUMP_CLEVEL_BZIP2)
2611 - P(" (level %d)\n", dump_bzip2_level);
2612 - else
2613 - P("\n");
2315 + P("Compression type,%s lzjb\n",
2316 + cfg->clevel == DUMP_CLEVEL_SERIAL ? "serial" : "parallel");
2614 2317 P("Compression ratio,%d.%02d\n", compress_ratio / 100, compress_ratio %
2615 2318 100);
2616 2319 P("nhelper_used,%d\n", cfg->nhelper_used);
2617 2320
2618 2321 P("Dump I/O rate MBS,%d.%02d\n", iorate / 100, iorate % 100);
2619 2322 P("..total bytes,%lld\n", (u_longlong_t)ds->nwrite);
2620 2323 P("..total nsec,%lld\n", (u_longlong_t)ds->iotime);
2621 2324 P("dumpbuf.iosize,%ld\n", dumpbuf.iosize);
2622 2325 P("dumpbuf.size,%ld\n", dumpbuf.size);
2623 2326
2624 2327 P("Dump pages/sec,%llu\n", (u_longlong_t)ds->npages / sec);
2625 2328 P("Dump pages,%llu\n", (u_longlong_t)ds->npages);
2626 2329 P("Dump time,%d\n", sec);
2627 2330
2628 2331 if (ds->pages_mapped > 0)
2629 2332 P("per-cent map utilization,%d\n", (int)((100 * ds->pages_used)
2630 2333 / ds->pages_mapped));
2631 2334
2632 2335 P("\nPer-page metrics:\n");
2633 2336 if (ds->npages > 0) {
2634 2337 for (hp = cfg->helper; hp != hpend; hp++) {
2635 2338 #define PERPAGE(x) ds->perpage.x += hp->perpage.x;
2636 2339 PERPAGES;
2637 2340 #undef PERPAGE
2638 2341 }
2639 2342 #define PERPAGE(x) \
2640 2343 P("%s nsec/page,%d\n", #x, (int)(ds->perpage.x / ds->npages));
2641 2344 PERPAGES;
2642 2345 #undef PERPAGE
2643 2346 P("freebufq.empty,%d\n", (int)(ds->freebufq.empty /
2644 2347 ds->npages));
2645 2348 P("helperq.empty,%d\n", (int)(ds->helperq.empty /
2646 2349 ds->npages));
2647 2350 P("writerq.empty,%d\n", (int)(ds->writerq.empty /
2648 2351 ds->npages));
2649 2352 P("mainq.empty,%d\n", (int)(ds->mainq.empty / ds->npages));
2650 2353
2651 2354 P("I/O wait nsec/page,%llu\n", (u_longlong_t)(ds->iowait /
2652 2355 ds->npages));
2653 2356 }
2654 2357 #undef P
2655 2358 if (p < e)
2656 2359 bzero(p, e - p);
2657 2360 return (p - buf);
2658 2361 }
2659 2362 #endif /* COLLECT_METRICS */
2660 2363
2661 2364 /*
2662 2365 * Dump the system.
2663 2366 */
2664 2367 void
2665 2368 dumpsys(void)
2666 2369 {
2667 2370 dumpsync_t *ds = &dumpsync;
2668 2371 taskq_t *livetaskq = NULL;
2669 2372 pfn_t pfn;
2670 2373 pgcnt_t bitnum;
2671 2374 proc_t *p;
2672 2375 helper_t *hp, *hpend = &dumpcfg.helper[dumpcfg.nhelper];
2673 2376 cbuf_t *cp;
2674 2377 pid_t npids, pidx;
2675 2378 char *content;
2676 2379 char *buf;
2677 2380 size_t size;
2678 2381 int save_dump_clevel;
2679 2382 dumpmlw_t mlw;
2680 2383 dumpcsize_t datatag;
2681 2384 dumpdatahdr_t datahdr;
2682 2385
2683 2386 if (dumpvp == NULL || dumphdr == NULL) {
2684 2387 uprintf("skipping system dump - no dump device configured\n");
2685 2388 if (panicstr) {
2686 2389 dumpcfg.helpers_wanted = 0;
2687 2390 dumpsys_spinunlock(&dumpcfg.helper_lock);
2688 2391 }
2689 2392 return;
2690 2393 }
2691 2394 dumpbuf.cur = dumpbuf.start;
2692 2395
2693 2396 /* clear the sync variables */
2694 2397 ASSERT(dumpcfg.nhelper > 0);
2695 2398 bzero(ds, sizeof (*ds));
2696 2399 ds->dumpcpu = CPU->cpu_id;
2697 2400
2698 2401 /*
2699 2402 * Calculate the starting block for dump. If we're dumping on a
2700 2403 * swap device, start 1/5 of the way in; otherwise, start at the
2701 2404 * beginning. And never use the first page -- it may be a disk label.
2702 2405 */
2703 2406 if (dumpvp->v_flag & VISSWAP)
2704 2407 dumphdr->dump_start = P2ROUNDUP(dumpvp_size / 5, DUMP_OFFSET);
2705 2408 else
2706 2409 dumphdr->dump_start = DUMP_OFFSET;
2707 2410
2708 2411 dumphdr->dump_flags = DF_VALID | DF_COMPLETE | DF_LIVE | DF_COMPRESSED;
2709 2412 dumphdr->dump_crashtime = gethrestime_sec();
2710 2413 dumphdr->dump_npages = 0;
2711 2414 dumphdr->dump_nvtop = 0;
2712 2415 bzero(dumpcfg.bitmap, BT_SIZEOFMAP(dumpcfg.bitmapsize));
2713 2416 dump_timeleft = dump_timeout;
2714 2417
2715 2418 if (panicstr) {
2716 2419 dumphdr->dump_flags &= ~DF_LIVE;
2717 2420 (void) VOP_DUMPCTL(dumpvp, DUMP_FREE, NULL, NULL);
2718 2421 (void) VOP_DUMPCTL(dumpvp, DUMP_ALLOC, NULL, NULL);
2719 2422 (void) vsnprintf(dumphdr->dump_panicstring, DUMP_PANICSIZE,
2720 2423 panicstr, panicargs);
2721 2424
2722 2425 }
2723 2426
2724 2427 if (dump_conflags & DUMP_ALL)
2725 2428 content = "all";
2726 2429 else if (dump_conflags & DUMP_CURPROC)
2727 2430 content = "kernel + curproc";
2728 2431 else
2729 2432 content = "kernel";
2730 2433 uprintf("dumping to %s, offset %lld, content: %s\n", dumppath,
2731 2434 dumphdr->dump_start, content);
2732 2435
2733 2436 /* Make sure nodename is current */
2734 2437 bcopy(utsname.nodename, dumphdr->dump_utsname.nodename, SYS_NMLN);
2735 2438
2736 2439 /*
2737 2440 * If this is a live dump, try to open a VCHR vnode for better
2738 2441 * performance. We must take care to flush the buffer cache
2739 2442 * first.
2740 2443 */
2741 2444 if (!panicstr) {
2742 2445 vnode_t *cdev_vp, *cmn_cdev_vp;
2743 2446
2744 2447 ASSERT(dumpbuf.cdev_vp == NULL);
2745 2448 cdev_vp = makespecvp(VTOS(dumpvp)->s_dev, VCHR);
2746 2449 if (cdev_vp != NULL) {
2747 2450 cmn_cdev_vp = common_specvp(cdev_vp);
2748 2451 if (VOP_OPEN(&cmn_cdev_vp, FREAD | FWRITE, kcred, NULL)
2749 2452 == 0) {
2750 2453 if (vn_has_cached_data(dumpvp))
2751 2454 (void) pvn_vplist_dirty(dumpvp, 0, NULL,
2752 2455 B_INVAL | B_TRUNC, kcred);
2753 2456 dumpbuf.cdev_vp = cmn_cdev_vp;
2754 2457 } else {
2755 2458 VN_RELE(cdev_vp);
2756 2459 }
2757 2460 }
2758 2461 }
2759 2462
2760 2463 /*
2761 2464 * Store a hires timestamp so we can look it up during debugging.
2762 2465 */
2763 2466 lbolt_debug_entry();
2764 2467
2765 2468 /*
2766 2469 * Leave room for the message and ereport save areas and terminal dump
2767 2470 * header.
2768 2471 */
2769 2472 dumpbuf.vp_limit = dumpvp_size - DUMP_LOGSIZE - DUMP_OFFSET -
2770 2473 DUMP_ERPTSIZE;
2771 2474
2772 2475 /*
2773 2476 * Write out the symbol table. It's no longer compressed,
2774 2477 * so its 'size' and 'csize' are equal.
2775 2478 */
2776 2479 dumpbuf.vp_off = dumphdr->dump_ksyms = dumphdr->dump_start + PAGESIZE;
2777 2480 dumphdr->dump_ksyms_size = dumphdr->dump_ksyms_csize =
2778 2481 ksyms_snapshot(dumpvp_ksyms_write, NULL, LONG_MAX);
2779 2482
2780 2483 /*
2781 2484 * Write out the translation map.
2782 2485 */
2783 2486 dumphdr->dump_map = dumpvp_flush();
2784 2487 dump_as(&kas);
2785 2488 dumphdr->dump_nvtop += dump_plat_addr();
2786 2489
2787 2490 /*
2788 2491 * call into hat, which may have unmapped pages that also need to
2789 2492 * be in the dump
2790 2493 */
2791 2494 hat_dump();
2792 2495
2793 2496 if (dump_conflags & DUMP_ALL) {
2794 2497 mutex_enter(&pidlock);
2795 2498
2796 2499 for (npids = 0, p = practive; p != NULL; p = p->p_next)
2797 2500 dumpcfg.pids[npids++] = p->p_pid;
2798 2501
2799 2502 mutex_exit(&pidlock);
2800 2503
2801 2504 for (pidx = 0; pidx < npids; pidx++)
2802 2505 (void) dump_process(dumpcfg.pids[pidx]);
2803 2506
2804 2507 dump_init_memlist_walker(&mlw);
2805 2508 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2806 2509 dump_timeleft = dump_timeout;
2807 2510 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2808 2511 /*
2809 2512 * Some hypervisors do not have all pages available to
2810 2513 * be accessed by the guest OS. Check for page
2811 2514 * accessibility.
2812 2515 */
2813 2516 if (plat_hold_page(pfn, PLAT_HOLD_NO_LOCK, NULL) !=
2814 2517 PLAT_HOLD_OK)
2815 2518 continue;
2816 2519 BT_SET(dumpcfg.bitmap, bitnum);
2817 2520 }
2818 2521 dumphdr->dump_npages = dumpcfg.bitmapsize;
2819 2522 dumphdr->dump_flags |= DF_ALL;
2820 2523
2821 2524 } else if (dump_conflags & DUMP_CURPROC) {
2822 2525 /*
2823 2526 * Determine which pid is to be dumped. If we're panicking, we
2824 2527 * dump the process associated with panic_thread (if any). If
2825 2528 * this is a live dump, we dump the process associated with
2826 2529 * curthread.
2827 2530 */
2828 2531 npids = 0;
2829 2532 if (panicstr) {
2830 2533 if (panic_thread != NULL &&
2831 2534 panic_thread->t_procp != NULL &&
2832 2535 panic_thread->t_procp != &p0) {
2833 2536 dumpcfg.pids[npids++] =
2834 2537 panic_thread->t_procp->p_pid;
2835 2538 }
2836 2539 } else {
2837 2540 dumpcfg.pids[npids++] = curthread->t_procp->p_pid;
2838 2541 }
2839 2542
2840 2543 if (npids && dump_process(dumpcfg.pids[0]) == 0)
2841 2544 dumphdr->dump_flags |= DF_CURPROC;
2842 2545 else
2843 2546 dumphdr->dump_flags |= DF_KERNEL;
2844 2547
2845 2548 } else {
2846 2549 dumphdr->dump_flags |= DF_KERNEL;
2847 2550 }
2848 2551
2849 2552 dumphdr->dump_hashmask = (1 << highbit(dumphdr->dump_nvtop - 1)) - 1;
2850 2553
2851 2554 /*
2852 2555 * Write out the pfn table.
2853 2556 */
2854 2557 dumphdr->dump_pfn = dumpvp_flush();
2855 2558 dump_init_memlist_walker(&mlw);
2856 2559 for (bitnum = 0; bitnum < dumpcfg.bitmapsize; bitnum++) {
2857 2560 dump_timeleft = dump_timeout;
2858 2561 if (!BT_TEST(dumpcfg.bitmap, bitnum))
2859 2562 continue;
2860 2563 pfn = dump_bitnum_to_pfn(bitnum, &mlw);
2861 2564 ASSERT(pfn != PFN_INVALID);
2862 2565 dumpvp_write(&pfn, sizeof (pfn_t));
2863 2566 }
↓ open down ↓ |
240 lines elided |
↑ open up ↑ |
2864 2567 dump_plat_pfn();
2865 2568
2866 2569 /*
2867 2570 * Write out all the pages.
2868 2571 * Map pages, copy them handling UEs, compress, and write them out.
2869 2572 * Cooperate with any helpers running on CPUs in panic_idle().
2870 2573 */
2871 2574 dumphdr->dump_data = dumpvp_flush();
2872 2575
2873 2576 bzero(dumpcfg.helpermap, BT_SIZEOFMAP(NCPU));
2874 - ds->live = dumpcfg.clevel > 0 &&
2577 + ds->live = dumpcfg.clevel > DUMP_CLEVEL_SERIAL &&
2875 2578 (dumphdr->dump_flags & DF_LIVE) != 0;
2876 2579
2877 2580 save_dump_clevel = dumpcfg.clevel;
2878 2581 if (panicstr)
2879 2582 dumpsys_get_maxmem();
2880 - else if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2881 - dumpcfg.clevel = DUMP_CLEVEL_LZJB;
2882 2583
2883 2584 dumpcfg.nhelper_used = 0;
2884 2585 for (hp = dumpcfg.helper; hp != hpend; hp++) {
2885 2586 if (hp->page == NULL) {
2886 2587 hp->helper = DONEHELPER;
2887 2588 continue;
2888 2589 }
2889 2590 ++dumpcfg.nhelper_used;
2890 2591 hp->helper = FREEHELPER;
2891 2592 hp->taskqid = NULL;
2892 2593 hp->ds = ds;
2893 2594 bzero(&hp->perpage, sizeof (hp->perpage));
2894 - if (dumpcfg.clevel >= DUMP_CLEVEL_BZIP2)
2895 - (void) BZ2_bzCompressReset(&hp->bzstream);
2896 2595 }
2897 2596
2898 2597 CQ_OPEN(freebufq);
2899 2598 CQ_OPEN(helperq);
2900 2599
2901 2600 dumpcfg.ncbuf_used = 0;
2902 2601 for (cp = dumpcfg.cbuf; cp != &dumpcfg.cbuf[dumpcfg.ncbuf]; cp++) {
2903 2602 if (cp->buf != NULL) {
2904 2603 CQ_PUT(freebufq, cp, CBUF_FREEBUF);
2905 2604 ++dumpcfg.ncbuf_used;
2906 2605 }
2907 2606 }
2908 2607
2909 2608 for (cp = dumpcfg.cmap; cp != &dumpcfg.cmap[dumpcfg.ncmap]; cp++)
2910 2609 CQ_PUT(mainq, cp, CBUF_FREEMAP);
2911 2610
2912 2611 ds->start = gethrtime();
2913 2612 ds->iowaitts = ds->start;
2914 2613
2915 2614 /* start helpers */
2916 2615 if (ds->live) {
2917 2616 int n = dumpcfg.nhelper_used;
2918 2617 int pri = MINCLSYSPRI - 25;
2919 2618
2920 2619 livetaskq = taskq_create("LiveDump", n, pri, n, n,
2921 2620 TASKQ_PREPOPULATE);
2922 2621 for (hp = dumpcfg.helper; hp != hpend; hp++) {
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
2923 2622 if (hp->page == NULL)
2924 2623 continue;
2925 2624 hp->helper = hp - dumpcfg.helper;
2926 2625 hp->taskqid = taskq_dispatch(livetaskq,
2927 2626 dumpsys_live_helper, (void *)hp, TQ_NOSLEEP);
2928 2627 }
2929 2628
2930 2629 } else {
2931 2630 if (panicstr)
2932 2631 kmem_dump_begin();
2933 - dumpcfg.helpers_wanted = dumpcfg.clevel > 0;
2632 + dumpcfg.helpers_wanted = dumpcfg.clevel > DUMP_CLEVEL_SERIAL;
2934 2633 dumpsys_spinunlock(&dumpcfg.helper_lock);
2935 2634 }
2936 2635
2937 2636 /* run main task */
2938 2637 dumpsys_main_task(ds);
2939 2638
2940 2639 ds->elapsed = gethrtime() - ds->start;
2941 2640 if (ds->elapsed < 1)
2942 2641 ds->elapsed = 1;
2943 2642
2944 2643 if (livetaskq != NULL)
2945 2644 taskq_destroy(livetaskq);
2946 2645
2947 2646 if (ds->neednl) {
2948 2647 uprintf("\n");
2949 2648 ds->neednl = 0;
2950 2649 }
2951 2650
2952 2651 /* record actual pages dumped */
2953 2652 dumphdr->dump_npages = ds->npages;
2954 2653
2955 2654 /* platform-specific data */
2956 2655 dumphdr->dump_npages += dump_plat_data(dumpcfg.cbuf[0].buf);
2957 2656
2958 2657 /* note any errors by clearing DF_COMPLETE */
2959 2658 if (dump_ioerr || ds->npages < dumphdr->dump_npages)
2960 2659 dumphdr->dump_flags &= ~DF_COMPLETE;
2961 2660
2962 2661 /* end of stream blocks */
2963 2662 datatag = 0;
2964 2663 dumpvp_write(&datatag, sizeof (datatag));
2965 2664
2966 2665 bzero(&datahdr, sizeof (datahdr));
2967 2666
2968 2667 /* buffer for metrics */
2969 2668 buf = dumpcfg.cbuf[0].buf;
2970 2669 size = MIN(dumpcfg.cbuf[0].size, DUMP_OFFSET - sizeof (dumphdr_t) -
2971 2670 sizeof (dumpdatahdr_t));
2972 2671
2973 2672 /* finish the kmem intercepts, collect kmem verbose info */
2974 2673 if (panicstr) {
2975 2674 datahdr.dump_metrics = kmem_dump_finish(buf, size);
2976 2675 buf += datahdr.dump_metrics;
2977 2676 size -= datahdr.dump_metrics;
2978 2677 }
2979 2678
2980 2679 /* record in the header whether this is a fault-management panic */
2981 2680 if (panicstr)
2982 2681 dumphdr->dump_fm_panic = is_fm_panic();
2983 2682
2984 2683 /* compression info in data header */
2985 2684 datahdr.dump_datahdr_magic = DUMP_DATAHDR_MAGIC;
2986 2685 datahdr.dump_datahdr_version = DUMP_DATAHDR_VERSION;
2987 2686 datahdr.dump_maxcsize = CBUF_SIZE;
2988 2687 datahdr.dump_maxrange = CBUF_MAPSIZE / PAGESIZE;
2989 2688 datahdr.dump_nstreams = dumpcfg.nhelper_used;
2990 2689 datahdr.dump_clevel = dumpcfg.clevel;
2991 2690 #ifdef COLLECT_METRICS
2992 2691 if (dump_metrics_on)
2993 2692 datahdr.dump_metrics += dumpsys_metrics(ds, buf, size);
2994 2693 #endif
2995 2694 datahdr.dump_data_csize = dumpvp_flush() - dumphdr->dump_data;
2996 2695
2997 2696 /*
2998 2697 * Write out the initial and terminal dump headers.
2999 2698 */
3000 2699 dumpbuf.vp_off = dumphdr->dump_start;
3001 2700 dumpvp_write(dumphdr, sizeof (dumphdr_t));
3002 2701 (void) dumpvp_flush();
3003 2702
3004 2703 dumpbuf.vp_limit = dumpvp_size;
3005 2704 dumpbuf.vp_off = dumpbuf.vp_limit - DUMP_OFFSET;
3006 2705 dumpvp_write(dumphdr, sizeof (dumphdr_t));
3007 2706 dumpvp_write(&datahdr, sizeof (dumpdatahdr_t));
3008 2707 dumpvp_write(dumpcfg.cbuf[0].buf, datahdr.dump_metrics);
3009 2708
3010 2709 (void) dumpvp_flush();
3011 2710
3012 2711 uprintf("\r%3d%% done: %llu pages dumped, ",
3013 2712 ds->percent_done, (u_longlong_t)ds->npages);
3014 2713
3015 2714 if (dump_ioerr == 0) {
3016 2715 uprintf("dump succeeded\n");
3017 2716 } else {
3018 2717 uprintf("dump failed: error %d\n", dump_ioerr);
3019 2718 #ifdef DEBUG
3020 2719 if (panicstr)
3021 2720 debug_enter("dump failed");
3022 2721 #endif
3023 2722 }
3024 2723
3025 2724 /*
3026 2725 * Write out all undelivered messages. This has to be the *last*
3027 2726 * thing we do because the dump process itself emits messages.
3028 2727 */
3029 2728 if (panicstr) {
3030 2729 dump_summary();
3031 2730 dump_ereports();
3032 2731 dump_messages();
3033 2732 }
3034 2733
3035 2734 delay(2 * hz); /* let people see the 'done' message */
3036 2735 dump_timeleft = 0;
3037 2736 dump_ioerr = 0;
3038 2737
3039 2738 /* restore settings after live dump completes */
3040 2739 if (!panicstr) {
3041 2740 dumpcfg.clevel = save_dump_clevel;
3042 2741
3043 2742 /* release any VCHR open of the dump device */
3044 2743 if (dumpbuf.cdev_vp != NULL) {
3045 2744 (void) VOP_CLOSE(dumpbuf.cdev_vp, FREAD | FWRITE, 1, 0,
3046 2745 kcred, NULL);
3047 2746 VN_RELE(dumpbuf.cdev_vp);
3048 2747 dumpbuf.cdev_vp = NULL;
3049 2748 }
3050 2749 }
3051 2750 }
3052 2751
3053 2752 /*
3054 2753 * This function is called whenever the memory size, as represented
3055 2754 * by the phys_install list, changes.
3056 2755 */
3057 2756 void
3058 2757 dump_resize()
3059 2758 {
3060 2759 mutex_enter(&dump_lock);
3061 2760 dumphdr_init();
3062 2761 dumpbuf_resize();
3063 2762 dump_update_clevel();
3064 2763 mutex_exit(&dump_lock);
3065 2764 }
3066 2765
3067 2766 /*
3068 2767 * This function allows for dynamic resizing of a dump area. It assumes that
3069 2768 * the underlying device has update its appropriate size(9P).
3070 2769 */
3071 2770 int
3072 2771 dumpvp_resize()
3073 2772 {
3074 2773 int error;
3075 2774 vattr_t vattr;
3076 2775
3077 2776 mutex_enter(&dump_lock);
3078 2777 vattr.va_mask = AT_SIZE;
3079 2778 if ((error = VOP_GETATTR(dumpvp, &vattr, 0, kcred, NULL)) != 0) {
3080 2779 mutex_exit(&dump_lock);
3081 2780 return (error);
3082 2781 }
3083 2782
3084 2783 if (error == 0 && vattr.va_size < 2 * DUMP_LOGSIZE + DUMP_ERPTSIZE) {
3085 2784 mutex_exit(&dump_lock);
3086 2785 return (ENOSPC);
3087 2786 }
3088 2787
3089 2788 dumpvp_size = vattr.va_size & -DUMP_OFFSET;
3090 2789 mutex_exit(&dump_lock);
3091 2790 return (0);
3092 2791 }
3093 2792
3094 2793 int
3095 2794 dump_set_uuid(const char *uuidstr)
3096 2795 {
3097 2796 const char *ptr;
3098 2797 int i;
3099 2798
3100 2799 if (uuidstr == NULL || strnlen(uuidstr, 36 + 1) != 36)
3101 2800 return (EINVAL);
3102 2801
3103 2802 /* uuid_parse is not common code so check manually */
3104 2803 for (i = 0, ptr = uuidstr; i < 36; i++, ptr++) {
3105 2804 switch (i) {
3106 2805 case 8:
3107 2806 case 13:
3108 2807 case 18:
3109 2808 case 23:
3110 2809 if (*ptr != '-')
3111 2810 return (EINVAL);
3112 2811 break;
3113 2812
3114 2813 default:
3115 2814 if (!isxdigit(*ptr))
3116 2815 return (EINVAL);
3117 2816 break;
3118 2817 }
3119 2818 }
3120 2819
3121 2820 if (dump_osimage_uuid[0] != '\0')
3122 2821 return (EALREADY);
3123 2822
3124 2823 (void) strncpy(dump_osimage_uuid, uuidstr, 36 + 1);
3125 2824
3126 2825 cmn_err(CE_CONT, "?This Solaris instance has UUID %s\n",
3127 2826 dump_osimage_uuid);
3128 2827
3129 2828 return (0);
3130 2829 }
3131 2830
3132 2831 const char *
3133 2832 dump_get_uuid(void)
3134 2833 {
3135 2834 return (dump_osimage_uuid[0] != '\0' ? dump_osimage_uuid : "");
3136 2835 }
↓ open down ↓ |
193 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX