Print this page
11859 need swapgs mitigation
Reviewed by: Robert Mustacchi <rm@fingolfin.org>
Reviewed by: Dan McDonald <danmcd@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@fingolfin.org>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/ml/exception.s
+++ new/usr/src/uts/intel/ia32/ml/exception.s
1 1 /*
2 2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 4 * Copyright 2019 Joyent, Inc.
5 5 */
6 6
7 7 /*
8 8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 9 * Copyright (c) 1990 The Regents of the University of California.
10 10 * All rights reserved.
11 11 *
12 12 * Redistribution and use in source and binary forms, with or without
13 13 * modification, are permitted provided that the following conditions
14 14 * are met:
15 15 * 1. Redistributions of source code must retain the above copyright
16 16 * notice, this list of conditions and the following disclaimer.
17 17 * 2. Redistributions in binary form must reproduce the above copyright
18 18 * notice, this list of conditions and the following disclaimer in the
19 19 * documentation and/or other materials provided with the distribution.
20 20 * 3. All advertising materials mentioning features or use of this software
21 21 * must display the following acknowledgement:
22 22 * This product includes software developed by the University of
23 23 * California, Berkeley and its contributors.
24 24 * 4. Neither the name of the University nor the names of its contributors
25 25 * may be used to endorse or promote products derived from this software
26 26 * without specific prior written permission.
27 27 *
28 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 38 * SUCH DAMAGE.
39 39 *
40 40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
41 41 */
42 42
43 43 #include <sys/asm_linkage.h>
44 44 #include <sys/asm_misc.h>
45 45 #include <sys/trap.h>
46 46 #include <sys/psw.h>
47 47 #include <sys/regset.h>
48 48 #include <sys/privregs.h>
49 49 #include <sys/dtrace.h>
50 50 #include <sys/x86_archext.h>
51 51 #include <sys/traptrace.h>
52 52 #include <sys/machparam.h>
53 53
54 54 #if !defined(__lint)
55 55
56 56 #include "assym.h"
57 57
58 58 /*
59 59 * push $0 on stack for traps that do not
60 60 * generate an error code. This is so the rest
61 61 * of the kernel can expect a consistent stack
62 62 * from from any exception.
63 63 *
64 64 * Note that for all exceptions for amd64
65 65 * %r11 and %rcx are on the stack. Just pop
66 66 * them back into their appropriate registers and let
67 67 * it get saved as is running native.
68 68 */
69 69
70 70 #if defined(__xpv) && defined(__amd64)
71 71
72 72 #define NPTRAP_NOERR(trapno) \
73 73 pushq $0; \
74 74 pushq $trapno
75 75
76 76 #define TRAP_NOERR(trapno) \
77 77 XPV_TRAP_POP; \
78 78 NPTRAP_NOERR(trapno)
79 79
80 80 /*
81 81 * error code already pushed by hw
82 82 * onto stack.
83 83 */
84 84 #define TRAP_ERR(trapno) \
85 85 XPV_TRAP_POP; \
86 86 pushq $trapno
87 87
88 88 #else /* __xpv && __amd64 */
89 89
90 90 #define TRAP_NOERR(trapno) \
91 91 push $0; \
92 92 push $trapno
93 93
94 94 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
95 95
96 96 /*
97 97 * error code already pushed by hw
98 98 * onto stack.
99 99 */
100 100 #define TRAP_ERR(trapno) \
101 101 push $trapno
102 102
103 103 #endif /* __xpv && __amd64 */
104 104
105 105 /*
106 106 * These are the stacks used on cpu0 for taking double faults,
107 107 * NMIs and MCEs (the latter two only on amd64 where we have IST).
108 108 *
109 109 * We define them here instead of in a C file so that we can page-align
110 110 * them (gcc won't do that in a .c file).
111 111 */
112 112 .data
113 113 DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
114 114 .fill DEFAULTSTKSZ, 1, 0
115 115 DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
116 116 .fill DEFAULTSTKSZ, 1, 0
117 117 DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
118 118 .fill DEFAULTSTKSZ, 1, 0
119 119
120 120 /*
121 121 * #DE
122 122 */
123 123 ENTRY_NP(div0trap)
124 124 TRAP_NOERR(T_ZERODIV) /* $0 */
125 125 jmp cmntrap
126 126 SET_SIZE(div0trap)
127 127
128 128 /*
129 129 * #DB
130 130 *
131 131 * Fetch %dr6 and clear it, handing off the value to the
132 132 * cmntrap code in %r15/%esi
133 133 */
134 134 ENTRY_NP(dbgtrap)
135 135 TRAP_NOERR(T_SGLSTP) /* $1 */
136 136
137 137 #if defined(__amd64)
138 138 #if !defined(__xpv) /* no sysenter support yet */
139 139 /*
140 140 * If we get here as a result of single-stepping a sysenter
141 141 * instruction, we suddenly find ourselves taking a #db
142 142 * in kernel mode -before- we've swapgs'ed. So before we can
143 143 * take the trap, we do the swapgs here, and fix the return
144 144 * %rip in trap() so that we return immediately after the
145 145 * swapgs in the sysenter handler to avoid doing the swapgs again.
146 146 *
147 147 * Nobody said that the design of sysenter was particularly
148 148 * elegant, did they?
149 149 */
150 150
151 151 pushq %r11
152 152
153 153 /*
154 154 * At this point the stack looks like this:
155 155 *
156 156 * (high address) r_ss
157 157 * r_rsp
158 158 * r_rfl
159 159 * r_cs
160 160 * r_rip <-- %rsp + 24
161 161 * r_err <-- %rsp + 16
162 162 * r_trapno <-- %rsp + 8
163 163 * (low address) %r11 <-- %rsp
164 164 */
165 165 leaq sys_sysenter(%rip), %r11
166 166 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
↓ open down ↓ |
166 lines elided |
↑ open up ↑ |
167 167 je 1f
168 168 leaq brand_sys_sysenter(%rip), %r11
169 169 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
170 170 je 1f
171 171 leaq tr_sys_sysenter(%rip), %r11
172 172 cmpq %r11, 24(%rsp)
173 173 je 1f
174 174 leaq tr_brand_sys_sysenter(%rip), %r11
175 175 cmpq %r11, 24(%rsp)
176 176 jne 2f
177 -1: SWAPGS
178 -2: popq %r11
177 +1: swapgs
178 +2: lfence /* swapgs mitigation */
179 + popq %r11
179 180 #endif /* !__xpv */
180 181
181 182 INTR_PUSH
182 183 #if defined(__xpv)
183 184 movl $6, %edi
184 185 call kdi_dreg_get
185 186 movq %rax, %r15 /* %db6 -> %r15 */
186 187 movl $6, %edi
187 188 movl $0, %esi
188 189 call kdi_dreg_set /* 0 -> %db6 */
189 190 #else
190 191 movq %db6, %r15
191 192 xorl %eax, %eax
192 193 movq %rax, %db6
193 194 #endif
194 195
195 196 #elif defined(__i386)
196 197
197 198 INTR_PUSH
198 199 #if defined(__xpv)
199 200 pushl $6
200 201 call kdi_dreg_get
201 202 addl $4, %esp
202 203 movl %eax, %esi /* %dr6 -> %esi */
203 204 pushl $0
204 205 pushl $6
205 206 call kdi_dreg_set /* 0 -> %dr6 */
206 207 addl $8, %esp
207 208 #else
208 209 movl %db6, %esi
209 210 xorl %eax, %eax
210 211 movl %eax, %db6
211 212 #endif
212 213 #endif /* __i386 */
213 214
214 215 jmp cmntrap_pushed
215 216 SET_SIZE(dbgtrap)
216 217
217 218 #if defined(__amd64)
218 219 #if !defined(__xpv)
219 220
220 221 /*
221 222 * Macro to set the gsbase or kgsbase to the address of the struct cpu
222 223 * for this processor. If we came from userland, set kgsbase else
223 224 * set gsbase. We find the proper cpu struct by looping through
224 225 * the cpu structs for all processors till we find a match for the gdt
225 226 * of the trapping processor. The stack is expected to be pointing at
226 227 * the standard regs pushed by hardware on a trap (plus error code and trapno).
227 228 *
228 229 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
229 230 * and kgsbase set to the same value) because we're not going back the normal
230 231 * way out of here (via IRET). Where we're going, we don't need no user %gs.
231 232 */
232 233 #define SET_CPU_GSBASE \
233 234 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
234 235 movq %rax, REGOFF_RAX(%rsp); \
235 236 movq %rbx, REGOFF_RBX(%rsp); \
236 237 movq %rcx, REGOFF_RCX(%rsp); \
237 238 movq %rdx, REGOFF_RDX(%rsp); \
238 239 movq %rbp, REGOFF_RBP(%rsp); \
239 240 movq %rsp, %rbp; \
240 241 subq $16, %rsp; /* space for gdt */ \
241 242 sgdt 6(%rsp); \
242 243 movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \
243 244 xorl %ebx, %ebx; /* loop index */ \
244 245 leaq cpu(%rip), %rdx; /* cpu pointer array */ \
245 246 1: \
246 247 movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \
247 248 cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \
248 249 je 2f; /* yes, continue */ \
249 250 cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \
250 251 je 3f; /* yes, go set gsbase */ \
251 252 2: \
252 253 incl %ebx; /* i++ */ \
253 254 cmpl $NCPU, %ebx; /* i < NCPU ? */ \
254 255 jb 1b; /* yes, loop */ \
255 256 /* XXX BIG trouble if we fall thru here. We didn't find a gdt match */ \
256 257 3: \
257 258 movl $MSR_AMD_KGSBASE, %ecx; \
258 259 cmpw $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */ \
259 260 jne 4f; /* no, go set KGSBASE */ \
260 261 movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \
261 262 mfence; /* OPTERON_ERRATUM_88 */ \
262 263 4: \
263 264 movq %rax, %rdx; /* write base register */ \
264 265 shrq $32, %rdx; \
265 266 wrmsr; \
266 267 movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \
267 268 movq REGOFF_RCX(%rbp), %rcx; \
268 269 movq REGOFF_RBX(%rbp), %rbx; \
269 270 movq REGOFF_RAX(%rbp), %rax; \
270 271 movq %rbp, %rsp; \
271 272 movq REGOFF_RBP(%rsp), %rbp; \
272 273 addq $REGOFF_TRAPNO, %rsp /* pop stack */
273 274
274 275 #else /* __xpv */
275 276
276 277 #define SET_CPU_GSBASE /* noop on the hypervisor */
277 278
278 279 #endif /* __xpv */
279 280 #endif /* __amd64 */
280 281
281 282
282 283 #if defined(__amd64)
283 284
284 285 /*
285 286 * #NMI
286 287 *
287 288 * XXPV: See 6532669.
288 289 */
289 290 ENTRY_NP(nmiint)
290 291 TRAP_NOERR(T_NMIFLT) /* $2 */
291 292
292 293 SET_CPU_GSBASE
293 294
294 295 /*
295 296 * Save all registers and setup segment registers
296 297 * with kernel selectors.
297 298 */
298 299 INTR_PUSH
299 300 INTGATE_INIT_KERNEL_FLAGS
300 301
301 302 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
302 303 TRACE_REGS(%r12, %rsp, %rax, %rbx)
303 304 TRACE_STAMP(%r12)
304 305
305 306 movq %rsp, %rbp
306 307
307 308 movq %rbp, %rdi
308 309 call av_dispatch_nmivect
309 310
310 311 INTR_POP
311 312 call x86_md_clear
312 313 jmp tr_iret_auto
313 314 /*NOTREACHED*/
314 315 SET_SIZE(nmiint)
315 316
316 317 #elif defined(__i386)
317 318
318 319 /*
319 320 * #NMI
320 321 */
321 322 ENTRY_NP(nmiint)
322 323 TRAP_NOERR(T_NMIFLT) /* $2 */
323 324
324 325 /*
325 326 * Save all registers and setup segment registers
326 327 * with kernel selectors.
327 328 */
328 329 INTR_PUSH
329 330 INTGATE_INIT_KERNEL_FLAGS
330 331
331 332 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
332 333 TRACE_REGS(%edi, %esp, %ebx, %ecx)
333 334 TRACE_STAMP(%edi)
334 335
335 336 movl %esp, %ebp
336 337
337 338 pushl %ebp
338 339 call av_dispatch_nmivect
339 340 addl $4, %esp
340 341
341 342 INTR_POP_USER
342 343 IRET
343 344 SET_SIZE(nmiint)
344 345
345 346 #endif /* __i386 */
346 347
347 348 /*
348 349 * #BP
349 350 */
350 351 ENTRY_NP(brktrap)
351 352
352 353 #if defined(__amd64)
353 354 XPV_TRAP_POP
354 355 cmpw $KCS_SEL, 8(%rsp)
355 356 jne bp_user
356 357
357 358 /*
358 359 * This is a breakpoint in the kernel -- it is very likely that this
359 360 * is DTrace-induced. To unify DTrace handling, we spoof this as an
360 361 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
361 362 * we must decrement the trapping %rip to make it appear as a fault.
362 363 * We then push a non-zero error code to indicate that this is coming
363 364 * from #BP.
364 365 */
365 366 decq (%rsp)
366 367 push $1 /* error code -- non-zero for #BP */
367 368 jmp ud_kernel
368 369
369 370 bp_user:
370 371 #endif /* __amd64 */
371 372
372 373 NPTRAP_NOERR(T_BPTFLT) /* $3 */
373 374 jmp dtrace_trap
374 375
375 376 SET_SIZE(brktrap)
376 377
377 378 /*
378 379 * #OF
379 380 */
380 381 ENTRY_NP(ovflotrap)
381 382 TRAP_NOERR(T_OVFLW) /* $4 */
382 383 jmp cmntrap
383 384 SET_SIZE(ovflotrap)
384 385
385 386 /*
386 387 * #BR
387 388 */
388 389 ENTRY_NP(boundstrap)
389 390 TRAP_NOERR(T_BOUNDFLT) /* $5 */
390 391 jmp cmntrap
391 392 SET_SIZE(boundstrap)
392 393
393 394 #if defined(__amd64)
394 395
395 396 ENTRY_NP(invoptrap)
396 397
397 398 XPV_TRAP_POP
398 399
399 400 cmpw $KCS_SEL, 8(%rsp)
400 401 jne ud_user
401 402
402 403 #if defined(__xpv)
403 404 movb $0, 12(%rsp) /* clear saved upcall_mask from %cs */
404 405 #endif
405 406 push $0 /* error code -- zero for #UD */
406 407 ud_kernel:
407 408 push $0xdddd /* a dummy trap number */
408 409 INTR_PUSH
409 410 movq REGOFF_RIP(%rsp), %rdi
410 411 movq REGOFF_RSP(%rsp), %rsi
411 412 movq REGOFF_RAX(%rsp), %rdx
412 413 pushq (%rsi)
413 414 movq %rsp, %rsi
414 415 subq $8, %rsp
415 416 call dtrace_invop
416 417 ALTENTRY(dtrace_invop_callsite)
417 418 addq $16, %rsp
418 419 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
419 420 je ud_push
420 421 cmpl $DTRACE_INVOP_LEAVE, %eax
421 422 je ud_leave
422 423 cmpl $DTRACE_INVOP_NOP, %eax
423 424 je ud_nop
424 425 cmpl $DTRACE_INVOP_RET, %eax
425 426 je ud_ret
426 427 jmp ud_trap
427 428
428 429 ud_push:
429 430 /*
430 431 * We must emulate a "pushq %rbp". To do this, we pull the stack
431 432 * down 8 bytes, and then store the base pointer.
432 433 */
433 434 INTR_POP
434 435 subq $16, %rsp /* make room for %rbp */
435 436 pushq %rax /* push temp */
436 437 movq 24(%rsp), %rax /* load calling RIP */
437 438 addq $1, %rax /* increment over trapping instr */
438 439 movq %rax, 8(%rsp) /* store calling RIP */
439 440 movq 32(%rsp), %rax /* load calling CS */
440 441 movq %rax, 16(%rsp) /* store calling CS */
441 442 movq 40(%rsp), %rax /* load calling RFLAGS */
442 443 movq %rax, 24(%rsp) /* store calling RFLAGS */
443 444 movq 48(%rsp), %rax /* load calling RSP */
444 445 subq $8, %rax /* make room for %rbp */
445 446 movq %rax, 32(%rsp) /* store calling RSP */
446 447 movq 56(%rsp), %rax /* load calling SS */
447 448 movq %rax, 40(%rsp) /* store calling SS */
448 449 movq 32(%rsp), %rax /* reload calling RSP */
449 450 movq %rbp, (%rax) /* store %rbp there */
450 451 popq %rax /* pop off temp */
451 452 jmp tr_iret_kernel /* return from interrupt */
452 453 /*NOTREACHED*/
453 454
454 455 ud_leave:
455 456 /*
456 457 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
457 458 * followed by a "popq %rbp". This is quite a bit simpler on amd64
458 459 * than it is on i386 -- we can exploit the fact that the %rsp is
459 460 * explicitly saved to effect the pop without having to reshuffle
460 461 * the other data pushed for the trap.
461 462 */
462 463 INTR_POP
463 464 pushq %rax /* push temp */
464 465 movq 8(%rsp), %rax /* load calling RIP */
465 466 addq $1, %rax /* increment over trapping instr */
466 467 movq %rax, 8(%rsp) /* store calling RIP */
467 468 movq (%rbp), %rax /* get new %rbp */
468 469 addq $8, %rbp /* adjust new %rsp */
469 470 movq %rbp, 32(%rsp) /* store new %rsp */
470 471 movq %rax, %rbp /* set new %rbp */
471 472 popq %rax /* pop off temp */
472 473 jmp tr_iret_kernel /* return from interrupt */
473 474 /*NOTREACHED*/
474 475
475 476 ud_nop:
476 477 /*
477 478 * We must emulate a "nop". This is obviously not hard: we need only
478 479 * advance the %rip by one.
479 480 */
480 481 INTR_POP
481 482 incq (%rsp)
482 483 jmp tr_iret_kernel
483 484 /*NOTREACHED*/
484 485
485 486 ud_ret:
486 487 INTR_POP
487 488 pushq %rax /* push temp */
488 489 movq 32(%rsp), %rax /* load %rsp */
489 490 movq (%rax), %rax /* load calling RIP */
490 491 movq %rax, 8(%rsp) /* store calling RIP */
491 492 addq $8, 32(%rsp) /* adjust new %rsp */
492 493 popq %rax /* pop off temp */
493 494 jmp tr_iret_kernel /* return from interrupt */
494 495 /*NOTREACHED*/
495 496
496 497 ud_trap:
497 498 /*
498 499 * We're going to let the kernel handle this as a normal #UD. If,
499 500 * however, we came through #BP and are spoofing #UD (in this case,
500 501 * the stored error value will be non-zero), we need to de-spoof
501 502 * the trap by incrementing %rip and pushing T_BPTFLT.
502 503 */
503 504 cmpq $0, REGOFF_ERR(%rsp)
504 505 je ud_ud
505 506 incq REGOFF_RIP(%rsp)
506 507 addq $REGOFF_RIP, %rsp
507 508 NPTRAP_NOERR(T_BPTFLT) /* $3 */
508 509 jmp cmntrap
509 510
510 511 ud_ud:
511 512 addq $REGOFF_RIP, %rsp
512 513 ud_user:
513 514 NPTRAP_NOERR(T_ILLINST)
514 515 jmp cmntrap
515 516 SET_SIZE(invoptrap)
516 517
517 518 #elif defined(__i386)
518 519
519 520 /*
520 521 * #UD
521 522 */
522 523 ENTRY_NP(invoptrap)
523 524 /*
524 525 * If we are taking an invalid opcode trap while in the kernel, this
525 526 * is likely an FBT probe point.
526 527 */
527 528 pushl %gs
528 529 cmpw $KGS_SEL, (%esp)
529 530 jne 8f
530 531
531 532 addl $4, %esp
532 533 #if defined(__xpv)
533 534 movb $0, 6(%esp) /* clear saved upcall_mask from %cs */
534 535 #endif /* __xpv */
535 536 pusha
536 537 pushl %eax /* push %eax -- may be return value */
537 538 pushl %esp /* push stack pointer */
538 539 addl $48, (%esp) /* adjust to incoming args */
539 540 pushl 40(%esp) /* push calling EIP */
540 541 call dtrace_invop
541 542 ALTENTRY(dtrace_invop_callsite)
542 543 addl $12, %esp
543 544 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
544 545 je 1f
545 546 cmpl $DTRACE_INVOP_POPL_EBP, %eax
546 547 je 2f
547 548 cmpl $DTRACE_INVOP_LEAVE, %eax
548 549 je 3f
549 550 cmpl $DTRACE_INVOP_NOP, %eax
550 551 je 4f
551 552 jmp 7f
552 553 1:
553 554 /*
554 555 * We must emulate a "pushl %ebp". To do this, we pull the stack
555 556 * down 4 bytes, and then store the base pointer.
556 557 */
557 558 popa
558 559 subl $4, %esp /* make room for %ebp */
559 560 pushl %eax /* push temp */
560 561 movl 8(%esp), %eax /* load calling EIP */
561 562 incl %eax /* increment over LOCK prefix */
562 563 movl %eax, 4(%esp) /* store calling EIP */
563 564 movl 12(%esp), %eax /* load calling CS */
564 565 movl %eax, 8(%esp) /* store calling CS */
565 566 movl 16(%esp), %eax /* load calling EFLAGS */
566 567 movl %eax, 12(%esp) /* store calling EFLAGS */
567 568 movl %ebp, 16(%esp) /* push %ebp */
568 569 popl %eax /* pop off temp */
569 570 jmp _emul_done
570 571 2:
571 572 /*
572 573 * We must emulate a "popl %ebp". To do this, we do the opposite of
573 574 * the above: we remove the %ebp from the stack, and squeeze up the
574 575 * saved state from the trap.
575 576 */
576 577 popa
577 578 pushl %eax /* push temp */
578 579 movl 16(%esp), %ebp /* pop %ebp */
579 580 movl 12(%esp), %eax /* load calling EFLAGS */
580 581 movl %eax, 16(%esp) /* store calling EFLAGS */
581 582 movl 8(%esp), %eax /* load calling CS */
582 583 movl %eax, 12(%esp) /* store calling CS */
583 584 movl 4(%esp), %eax /* load calling EIP */
584 585 incl %eax /* increment over LOCK prefix */
585 586 movl %eax, 8(%esp) /* store calling EIP */
586 587 popl %eax /* pop off temp */
587 588 addl $4, %esp /* adjust stack pointer */
588 589 jmp _emul_done
589 590 3:
590 591 /*
591 592 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
592 593 * followed by a "popl %ebp". This looks similar to the above, but
593 594 * requires two temporaries: one for the new base pointer, and one
594 595 * for the staging register.
595 596 */
596 597 popa
597 598 pushl %eax /* push temp */
598 599 pushl %ebx /* push temp */
599 600 movl %ebp, %ebx /* set temp to old %ebp */
600 601 movl (%ebx), %ebp /* pop %ebp */
601 602 movl 16(%esp), %eax /* load calling EFLAGS */
602 603 movl %eax, (%ebx) /* store calling EFLAGS */
603 604 movl 12(%esp), %eax /* load calling CS */
604 605 movl %eax, -4(%ebx) /* store calling CS */
605 606 movl 8(%esp), %eax /* load calling EIP */
606 607 incl %eax /* increment over LOCK prefix */
607 608 movl %eax, -8(%ebx) /* store calling EIP */
608 609 movl %ebx, -4(%esp) /* temporarily store new %esp */
609 610 popl %ebx /* pop off temp */
610 611 popl %eax /* pop off temp */
611 612 movl -12(%esp), %esp /* set stack pointer */
612 613 subl $8, %esp /* adjust for three pushes, one pop */
613 614 jmp _emul_done
614 615 4:
615 616 /*
616 617 * We must emulate a "nop". This is obviously not hard: we need only
617 618 * advance the %eip by one.
618 619 */
619 620 popa
620 621 incl (%esp)
621 622 _emul_done:
622 623 IRET /* return from interrupt */
623 624 7:
624 625 popa
625 626 pushl $0
626 627 pushl $T_ILLINST /* $6 */
627 628 jmp cmntrap
628 629 8:
629 630 addl $4, %esp
630 631 pushl $0
631 632 pushl $T_ILLINST /* $6 */
632 633 jmp cmntrap
633 634 SET_SIZE(invoptrap)
634 635
635 636 #endif /* __i386 */
636 637
637 638 /*
638 639 * #NM
639 640 */
640 641
641 642 ENTRY_NP(ndptrap)
642 643 TRAP_NOERR(T_NOEXTFLT) /* $0 */
643 644 SET_CPU_GSBASE
644 645 jmp cmntrap
645 646 SET_SIZE(ndptrap)
646 647
647 648 #if !defined(__xpv)
648 649 #if defined(__amd64)
649 650
650 651 /*
651 652 * #DF
652 653 */
653 654 ENTRY_NP(syserrtrap)
654 655 pushq $T_DBLFLT
655 656 SET_CPU_GSBASE
656 657
657 658 /*
658 659 * We share this handler with kmdb (if kmdb is loaded). As such, we
659 660 * may have reached this point after encountering a #df in kmdb. If
660 661 * that happens, we'll still be on kmdb's IDT. We need to switch back
661 662 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
662 663 * here from kmdb, kmdb is probably in a very sickly state, and
663 664 * shouldn't be entered from the panic flow. We'll suppress that
664 665 * entry by setting nopanicdebug.
665 666 */
666 667 pushq %rax
667 668 subq $DESCTBR_SIZE, %rsp
668 669 sidt (%rsp)
669 670 movq %gs:CPU_IDT, %rax
670 671 cmpq %rax, DTR_BASE(%rsp)
671 672 je 1f
672 673
673 674 movq %rax, DTR_BASE(%rsp)
674 675 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
675 676 lidt (%rsp)
676 677
677 678 movl $1, nopanicdebug
678 679
679 680 1: addq $DESCTBR_SIZE, %rsp
680 681 popq %rax
681 682
682 683 DFTRAP_PUSH
683 684
684 685 /*
685 686 * freeze trap trace.
686 687 */
687 688 #ifdef TRAPTRACE
688 689 leaq trap_trace_freeze(%rip), %r11
689 690 incl (%r11)
690 691 #endif
691 692
692 693 ENABLE_INTR_FLAGS
693 694
694 695 movq %rsp, %rdi /* ®s */
695 696 xorl %esi, %esi /* clear address */
696 697 xorl %edx, %edx /* cpuid = 0 */
697 698 call trap
698 699
699 700 SET_SIZE(syserrtrap)
700 701
701 702 #elif defined(__i386)
702 703
703 704 /*
704 705 * #DF
705 706 */
706 707 ENTRY_NP(syserrtrap)
707 708 cli /* disable interrupts */
708 709
709 710 /*
710 711 * We share this handler with kmdb (if kmdb is loaded). As such, we
711 712 * may have reached this point after encountering a #df in kmdb. If
712 713 * that happens, we'll still be on kmdb's IDT. We need to switch back
713 714 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
714 715 * here from kmdb, kmdb is probably in a very sickly state, and
715 716 * shouldn't be entered from the panic flow. We'll suppress that
716 717 * entry by setting nopanicdebug.
717 718 */
718 719
719 720 subl $DESCTBR_SIZE, %esp
720 721 movl %gs:CPU_IDT, %eax
721 722 sidt (%esp)
722 723 cmpl DTR_BASE(%esp), %eax
723 724 je 1f
724 725
725 726 movl %eax, DTR_BASE(%esp)
726 727 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
727 728 lidt (%esp)
728 729
729 730 movl $1, nopanicdebug
730 731
731 732 1: addl $DESCTBR_SIZE, %esp
732 733
733 734 /*
734 735 * Check the CPL in the TSS to see what mode
735 736 * (user or kernel) we took the fault in. At this
736 737 * point we are running in the context of the double
737 738 * fault task (dftss) but the CPU's task points to
738 739 * the previous task (ktss) where the process context
739 740 * has been saved as the result of the task switch.
740 741 */
741 742 movl %gs:CPU_TSS, %eax /* get the TSS */
742 743 movl TSS_SS(%eax), %ebx /* save the fault SS */
743 744 movl TSS_ESP(%eax), %edx /* save the fault ESP */
744 745 testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */
745 746 jz make_frame
746 747 movw TSS_SS0(%eax), %ss /* get on the kernel stack */
747 748 movl TSS_ESP0(%eax), %esp
748 749
749 750 /*
750 751 * Clear the NT flag to avoid a task switch when the process
751 752 * finally pops the EFL off the stack via an iret. Clear
752 753 * the TF flag since that is what the processor does for
753 754 * a normal exception. Clear the IE flag so that interrupts
754 755 * remain disabled.
755 756 */
756 757 movl TSS_EFL(%eax), %ecx
757 758 andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
758 759 pushl %ecx
759 760 popfl /* restore the EFL */
760 761 movw TSS_LDT(%eax), %cx /* restore the LDT */
761 762 lldt %cx
762 763
763 764 /*
764 765 * Restore process segment selectors.
765 766 */
766 767 movw TSS_DS(%eax), %ds
767 768 movw TSS_ES(%eax), %es
768 769 movw TSS_FS(%eax), %fs
769 770 movw TSS_GS(%eax), %gs
770 771
771 772 /*
772 773 * Restore task segment selectors.
773 774 */
774 775 movl $KDS_SEL, TSS_DS(%eax)
775 776 movl $KDS_SEL, TSS_ES(%eax)
776 777 movl $KDS_SEL, TSS_SS(%eax)
777 778 movl $KFS_SEL, TSS_FS(%eax)
778 779 movl $KGS_SEL, TSS_GS(%eax)
779 780
780 781 /*
781 782 * Clear the TS bit, the busy bits in both task
782 783 * descriptors, and switch tasks.
783 784 */
784 785 clts
785 786 leal gdt0, %ecx
786 787 movl DFTSS_SEL+4(%ecx), %esi
787 788 andl $_BITNOT(0x200), %esi
788 789 movl %esi, DFTSS_SEL+4(%ecx)
789 790 movl KTSS_SEL+4(%ecx), %esi
790 791 andl $_BITNOT(0x200), %esi
791 792 movl %esi, KTSS_SEL+4(%ecx)
792 793 movw $KTSS_SEL, %cx
793 794 ltr %cx
794 795
795 796 /*
796 797 * Restore part of the process registers.
797 798 */
798 799 movl TSS_EBP(%eax), %ebp
799 800 movl TSS_ECX(%eax), %ecx
800 801 movl TSS_ESI(%eax), %esi
801 802 movl TSS_EDI(%eax), %edi
802 803
803 804 make_frame:
804 805 /*
805 806 * Make a trap frame. Leave the error code (0) on
806 807 * the stack since the first word on a trap stack is
807 808 * unused anyway.
808 809 */
809 810 pushl %ebx / fault SS
810 811 pushl %edx / fault ESP
811 812 pushl TSS_EFL(%eax) / fault EFL
812 813 pushl TSS_CS(%eax) / fault CS
813 814 pushl TSS_EIP(%eax) / fault EIP
814 815 pushl $0 / error code
815 816 pushl $T_DBLFLT / trap number 8
816 817 movl TSS_EBX(%eax), %ebx / restore EBX
817 818 movl TSS_EDX(%eax), %edx / restore EDX
818 819 movl TSS_EAX(%eax), %eax / restore EAX
819 820 sti / enable interrupts
820 821 jmp cmntrap
821 822 SET_SIZE(syserrtrap)
822 823
823 824 #endif /* __i386 */
824 825 #endif /* !__xpv */
825 826
826 827 /*
827 828 * #TS
828 829 */
829 830 ENTRY_NP(invtsstrap)
830 831 TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */
831 832 jmp cmntrap
832 833 SET_SIZE(invtsstrap)
833 834
834 835 /*
835 836 * #NP
836 837 */
837 838 ENTRY_NP(segnptrap)
838 839 TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
839 840 #if defined(__amd64)
840 841 SET_CPU_GSBASE
841 842 #endif
842 843 jmp cmntrap
843 844 SET_SIZE(segnptrap)
844 845
845 846 /*
846 847 * #SS
847 848 */
848 849 ENTRY_NP(stktrap)
849 850 TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
850 851 #if defined(__amd64)
851 852 SET_CPU_GSBASE
852 853 #endif
853 854 jmp cmntrap
854 855 SET_SIZE(stktrap)
855 856
856 857 /*
857 858 * #GP
858 859 */
859 860 ENTRY_NP(gptrap)
860 861 TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
861 862 #if defined(__amd64)
862 863 SET_CPU_GSBASE
863 864 #endif
864 865 jmp cmntrap
865 866 SET_SIZE(gptrap)
866 867
867 868 /*
868 869 * #PF
869 870 */
870 871 ENTRY_NP(pftrap)
871 872 TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */
872 873 INTR_PUSH
873 874 #if defined(__xpv)
874 875
875 876 #if defined(__amd64)
876 877 movq %gs:CPU_VCPU_INFO, %r15
877 878 movq VCPU_INFO_ARCH_CR2(%r15), %r15 /* vcpu[].arch.cr2 */
878 879 #elif defined(__i386)
879 880 movl %gs:CPU_VCPU_INFO, %esi
880 881 movl VCPU_INFO_ARCH_CR2(%esi), %esi /* vcpu[].arch.cr2 */
881 882 #endif /* __i386 */
882 883
883 884 #else /* __xpv */
884 885
885 886 #if defined(__amd64)
886 887 movq %cr2, %r15
887 888 #elif defined(__i386)
888 889 movl %cr2, %esi
889 890 #endif /* __i386 */
890 891
891 892 #endif /* __xpv */
892 893 jmp cmntrap_pushed
893 894 SET_SIZE(pftrap)
894 895
895 896 #if !defined(__amd64)
896 897
897 898 .globl idt0_default_r
898 899
899 900 /*
900 901 * #PF pentium bug workaround
901 902 */
902 903 ENTRY_NP(pentium_pftrap)
903 904 pushl %eax
904 905 movl %cr2, %eax
905 906 andl $MMU_STD_PAGEMASK, %eax
906 907
907 908 cmpl %eax, %cs:idt0_default_r+2 /* fixme */
908 909
909 910 je check_for_user_address
910 911 user_mode:
911 912 popl %eax
912 913 pushl $T_PGFLT /* $14 */
913 914 jmp cmntrap
914 915 check_for_user_address:
915 916 /*
916 917 * Before we assume that we have an unmapped trap on our hands,
917 918 * check to see if this is a fault from user mode. If it is,
918 919 * we'll kick back into the page fault handler.
919 920 */
920 921 movl 4(%esp), %eax /* error code */
921 922 andl $PF_ERR_USER, %eax
922 923 jnz user_mode
923 924
924 925 /*
925 926 * We now know that this is the invalid opcode trap.
926 927 */
927 928 popl %eax
928 929 addl $4, %esp /* pop error code */
929 930 jmp invoptrap
930 931 SET_SIZE(pentium_pftrap)
931 932
932 933 #endif /* !__amd64 */
933 934
934 935 ENTRY_NP(resvtrap)
935 936 TRAP_NOERR(T_RESVTRAP) /* (reserved) */
936 937 jmp cmntrap
937 938 SET_SIZE(resvtrap)
938 939
939 940 /*
940 941 * #MF
941 942 */
942 943 ENTRY_NP(ndperr)
943 944 TRAP_NOERR(T_EXTERRFLT) /* $16 */
944 945 jmp cmninttrap
945 946 SET_SIZE(ndperr)
946 947
947 948 /*
948 949 * #AC
949 950 */
950 951 ENTRY_NP(achktrap)
951 952 TRAP_ERR(T_ALIGNMENT) /* $17 */
952 953 jmp cmntrap
953 954 SET_SIZE(achktrap)
954 955
955 956 /*
956 957 * #MC
957 958 */
958 959 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
959 960
960 961 #if defined(__amd64)
961 962
962 963 ENTRY_NP(mcetrap)
963 964 TRAP_NOERR(T_MCE) /* $18 */
964 965
965 966 SET_CPU_GSBASE
966 967
967 968 INTR_PUSH
968 969 INTGATE_INIT_KERNEL_FLAGS
969 970
970 971 TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
971 972 TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
972 973 TRACE_STAMP(%rdi)
973 974
974 975 movq %rsp, %rbp
975 976
976 977 movq %rsp, %rdi /* arg0 = struct regs *rp */
977 978 call cmi_mca_trap /* cmi_mca_trap(rp); */
978 979
979 980 jmp _sys_rtt
980 981 SET_SIZE(mcetrap)
981 982
982 983 #else
983 984
984 985 ENTRY_NP(mcetrap)
985 986 TRAP_NOERR(T_MCE) /* $18 */
986 987
987 988 INTR_PUSH
988 989 INTGATE_INIT_KERNEL_FLAGS
989 990
990 991 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
991 992 TRACE_REGS(%edi, %esp, %ebx, %ecx)
992 993 TRACE_STAMP(%edi)
993 994
994 995 movl %esp, %ebp
995 996
996 997 movl %esp, %ecx
997 998 pushl %ecx /* arg0 = struct regs *rp */
998 999 call cmi_mca_trap /* cmi_mca_trap(rp) */
999 1000 addl $4, %esp /* pop arg0 */
1000 1001
1001 1002 jmp _sys_rtt
1002 1003 SET_SIZE(mcetrap)
1003 1004
1004 1005 #endif
1005 1006
1006 1007 /*
1007 1008 * #XF
1008 1009 */
1009 1010 ENTRY_NP(xmtrap)
1010 1011 TRAP_NOERR(T_SIMDFPE) /* $19 */
1011 1012 jmp cmninttrap
1012 1013 SET_SIZE(xmtrap)
1013 1014
1014 1015 ENTRY_NP(invaltrap)
1015 1016 TRAP_NOERR(T_INVALTRAP) /* very invalid */
1016 1017 jmp cmntrap
1017 1018 SET_SIZE(invaltrap)
1018 1019
1019 1020 .globl fasttable
1020 1021
1021 1022 #if defined(__amd64)
1022 1023
1023 1024 ENTRY_NP(fasttrap)
1024 1025 cmpl $T_LASTFAST, %eax
1025 1026 ja 1f
1026 1027 orl %eax, %eax /* (zero extend top 32-bits) */
1027 1028 leaq fasttable(%rip), %r11
1028 1029 leaq (%r11, %rax, CLONGSIZE), %r11
1029 1030 movq (%r11), %r11
1030 1031 INDIRECT_JMP_REG(r11)
1031 1032 1:
1032 1033 /*
1033 1034 * Fast syscall number was illegal. Make it look
1034 1035 * as if the INT failed. Modify %rip to point before the
1035 1036 * INT, push the expected error code and fake a GP fault.
1036 1037 *
1037 1038 * XXX Why make the error code be offset into idt + 1?
1038 1039 * Instead we should push a real (soft?) error code
1039 1040 * on the stack and #gp handler could know about fasttraps?
1040 1041 */
1041 1042 XPV_TRAP_POP
1042 1043
1043 1044 subq $2, (%rsp) /* XXX int insn 2-bytes */
1044 1045 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1045 1046
1046 1047 #if defined(__xpv)
1047 1048 pushq %r11
1048 1049 pushq %rcx
1049 1050 #endif
1050 1051 jmp gptrap
1051 1052 SET_SIZE(fasttrap)
1052 1053
1053 1054 #elif defined(__i386)
1054 1055
1055 1056 ENTRY_NP(fasttrap)
1056 1057 cmpl $T_LASTFAST, %eax
1057 1058 ja 1f
1058 1059 jmp *%cs:fasttable(, %eax, CLONGSIZE)
1059 1060 1:
1060 1061 /*
1061 1062 * Fast syscall number was illegal. Make it look
1062 1063 * as if the INT failed. Modify %eip to point before the
1063 1064 * INT, push the expected error code and fake a GP fault.
1064 1065 *
1065 1066 * XXX Why make the error code be offset into idt + 1?
1066 1067 * Instead we should push a real (soft?) error code
1067 1068 * on the stack and #gp handler could know about fasttraps?
1068 1069 */
1069 1070 subl $2, (%esp) /* XXX int insn 2-bytes */
1070 1071 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1071 1072 jmp gptrap
1072 1073 SET_SIZE(fasttrap)
1073 1074
1074 1075 #endif /* __i386 */
1075 1076
1076 1077 ENTRY_NP(dtrace_ret)
1077 1078 TRAP_NOERR(T_DTRACE_RET)
1078 1079 jmp dtrace_trap
1079 1080 SET_SIZE(dtrace_ret)
1080 1081
1081 1082 #if defined(__amd64)
1082 1083
1083 1084 /*
1084 1085 * RFLAGS 24 bytes up the stack from %rsp.
1085 1086 * XXX a constant would be nicer.
1086 1087 */
1087 1088 ENTRY_NP(fast_null)
1088 1089 XPV_TRAP_POP
1089 1090 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1090 1091 call x86_md_clear
1091 1092 jmp tr_iret_auto
1092 1093 /*NOTREACHED*/
1093 1094 SET_SIZE(fast_null)
1094 1095
1095 1096 #elif defined(__i386)
1096 1097
1097 1098 ENTRY_NP(fast_null)
1098 1099 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1099 1100 IRET
1100 1101 SET_SIZE(fast_null)
1101 1102
1102 1103 #endif /* __i386 */
1103 1104
1104 1105 /*
1105 1106 * Interrupts start at 32
1106 1107 */
1107 1108 #define MKIVCT(n) \
1108 1109 ENTRY_NP(ivct/**/n) \
1109 1110 push $0; \
1110 1111 push $n - 0x20; \
1111 1112 jmp cmnint; \
1112 1113 SET_SIZE(ivct/**/n)
1113 1114
1114 1115 MKIVCT(32)
1115 1116 MKIVCT(33)
1116 1117 MKIVCT(34)
1117 1118 MKIVCT(35)
1118 1119 MKIVCT(36)
1119 1120 MKIVCT(37)
1120 1121 MKIVCT(38)
1121 1122 MKIVCT(39)
1122 1123 MKIVCT(40)
1123 1124 MKIVCT(41)
1124 1125 MKIVCT(42)
1125 1126 MKIVCT(43)
1126 1127 MKIVCT(44)
1127 1128 MKIVCT(45)
1128 1129 MKIVCT(46)
1129 1130 MKIVCT(47)
1130 1131 MKIVCT(48)
1131 1132 MKIVCT(49)
1132 1133 MKIVCT(50)
1133 1134 MKIVCT(51)
1134 1135 MKIVCT(52)
1135 1136 MKIVCT(53)
1136 1137 MKIVCT(54)
1137 1138 MKIVCT(55)
1138 1139 MKIVCT(56)
1139 1140 MKIVCT(57)
1140 1141 MKIVCT(58)
1141 1142 MKIVCT(59)
1142 1143 MKIVCT(60)
1143 1144 MKIVCT(61)
1144 1145 MKIVCT(62)
1145 1146 MKIVCT(63)
1146 1147 MKIVCT(64)
1147 1148 MKIVCT(65)
1148 1149 MKIVCT(66)
1149 1150 MKIVCT(67)
1150 1151 MKIVCT(68)
1151 1152 MKIVCT(69)
1152 1153 MKIVCT(70)
1153 1154 MKIVCT(71)
1154 1155 MKIVCT(72)
1155 1156 MKIVCT(73)
1156 1157 MKIVCT(74)
1157 1158 MKIVCT(75)
1158 1159 MKIVCT(76)
1159 1160 MKIVCT(77)
1160 1161 MKIVCT(78)
1161 1162 MKIVCT(79)
1162 1163 MKIVCT(80)
1163 1164 MKIVCT(81)
1164 1165 MKIVCT(82)
1165 1166 MKIVCT(83)
1166 1167 MKIVCT(84)
1167 1168 MKIVCT(85)
1168 1169 MKIVCT(86)
1169 1170 MKIVCT(87)
1170 1171 MKIVCT(88)
1171 1172 MKIVCT(89)
1172 1173 MKIVCT(90)
1173 1174 MKIVCT(91)
1174 1175 MKIVCT(92)
1175 1176 MKIVCT(93)
1176 1177 MKIVCT(94)
1177 1178 MKIVCT(95)
1178 1179 MKIVCT(96)
1179 1180 MKIVCT(97)
1180 1181 MKIVCT(98)
1181 1182 MKIVCT(99)
1182 1183 MKIVCT(100)
1183 1184 MKIVCT(101)
1184 1185 MKIVCT(102)
1185 1186 MKIVCT(103)
1186 1187 MKIVCT(104)
1187 1188 MKIVCT(105)
1188 1189 MKIVCT(106)
1189 1190 MKIVCT(107)
1190 1191 MKIVCT(108)
1191 1192 MKIVCT(109)
1192 1193 MKIVCT(110)
1193 1194 MKIVCT(111)
1194 1195 MKIVCT(112)
1195 1196 MKIVCT(113)
1196 1197 MKIVCT(114)
1197 1198 MKIVCT(115)
1198 1199 MKIVCT(116)
1199 1200 MKIVCT(117)
1200 1201 MKIVCT(118)
1201 1202 MKIVCT(119)
1202 1203 MKIVCT(120)
1203 1204 MKIVCT(121)
1204 1205 MKIVCT(122)
1205 1206 MKIVCT(123)
1206 1207 MKIVCT(124)
1207 1208 MKIVCT(125)
1208 1209 MKIVCT(126)
1209 1210 MKIVCT(127)
1210 1211 MKIVCT(128)
1211 1212 MKIVCT(129)
1212 1213 MKIVCT(130)
1213 1214 MKIVCT(131)
1214 1215 MKIVCT(132)
1215 1216 MKIVCT(133)
1216 1217 MKIVCT(134)
1217 1218 MKIVCT(135)
1218 1219 MKIVCT(136)
1219 1220 MKIVCT(137)
1220 1221 MKIVCT(138)
1221 1222 MKIVCT(139)
1222 1223 MKIVCT(140)
1223 1224 MKIVCT(141)
1224 1225 MKIVCT(142)
1225 1226 MKIVCT(143)
1226 1227 MKIVCT(144)
1227 1228 MKIVCT(145)
1228 1229 MKIVCT(146)
1229 1230 MKIVCT(147)
1230 1231 MKIVCT(148)
1231 1232 MKIVCT(149)
1232 1233 MKIVCT(150)
1233 1234 MKIVCT(151)
1234 1235 MKIVCT(152)
1235 1236 MKIVCT(153)
1236 1237 MKIVCT(154)
1237 1238 MKIVCT(155)
1238 1239 MKIVCT(156)
1239 1240 MKIVCT(157)
1240 1241 MKIVCT(158)
1241 1242 MKIVCT(159)
1242 1243 MKIVCT(160)
1243 1244 MKIVCT(161)
1244 1245 MKIVCT(162)
1245 1246 MKIVCT(163)
1246 1247 MKIVCT(164)
1247 1248 MKIVCT(165)
1248 1249 MKIVCT(166)
1249 1250 MKIVCT(167)
1250 1251 MKIVCT(168)
1251 1252 MKIVCT(169)
1252 1253 MKIVCT(170)
1253 1254 MKIVCT(171)
1254 1255 MKIVCT(172)
1255 1256 MKIVCT(173)
1256 1257 MKIVCT(174)
1257 1258 MKIVCT(175)
1258 1259 MKIVCT(176)
1259 1260 MKIVCT(177)
1260 1261 MKIVCT(178)
1261 1262 MKIVCT(179)
1262 1263 MKIVCT(180)
1263 1264 MKIVCT(181)
1264 1265 MKIVCT(182)
1265 1266 MKIVCT(183)
1266 1267 MKIVCT(184)
1267 1268 MKIVCT(185)
1268 1269 MKIVCT(186)
1269 1270 MKIVCT(187)
1270 1271 MKIVCT(188)
1271 1272 MKIVCT(189)
1272 1273 MKIVCT(190)
1273 1274 MKIVCT(191)
1274 1275 MKIVCT(192)
1275 1276 MKIVCT(193)
1276 1277 MKIVCT(194)
1277 1278 MKIVCT(195)
1278 1279 MKIVCT(196)
1279 1280 MKIVCT(197)
1280 1281 MKIVCT(198)
1281 1282 MKIVCT(199)
1282 1283 MKIVCT(200)
1283 1284 MKIVCT(201)
1284 1285 MKIVCT(202)
1285 1286 MKIVCT(203)
1286 1287 MKIVCT(204)
1287 1288 MKIVCT(205)
1288 1289 MKIVCT(206)
1289 1290 MKIVCT(207)
1290 1291 MKIVCT(208)
1291 1292 MKIVCT(209)
1292 1293 MKIVCT(210)
1293 1294 MKIVCT(211)
1294 1295 MKIVCT(212)
1295 1296 MKIVCT(213)
1296 1297 MKIVCT(214)
1297 1298 MKIVCT(215)
1298 1299 MKIVCT(216)
1299 1300 MKIVCT(217)
1300 1301 MKIVCT(218)
1301 1302 MKIVCT(219)
1302 1303 MKIVCT(220)
1303 1304 MKIVCT(221)
1304 1305 MKIVCT(222)
1305 1306 MKIVCT(223)
1306 1307 MKIVCT(224)
1307 1308 MKIVCT(225)
1308 1309 MKIVCT(226)
1309 1310 MKIVCT(227)
1310 1311 MKIVCT(228)
1311 1312 MKIVCT(229)
1312 1313 MKIVCT(230)
1313 1314 MKIVCT(231)
1314 1315 MKIVCT(232)
1315 1316 MKIVCT(233)
1316 1317 MKIVCT(234)
1317 1318 MKIVCT(235)
1318 1319 MKIVCT(236)
1319 1320 MKIVCT(237)
1320 1321 MKIVCT(238)
1321 1322 MKIVCT(239)
1322 1323 MKIVCT(240)
1323 1324 MKIVCT(241)
1324 1325 MKIVCT(242)
1325 1326 MKIVCT(243)
1326 1327 MKIVCT(244)
1327 1328 MKIVCT(245)
1328 1329 MKIVCT(246)
1329 1330 MKIVCT(247)
1330 1331 MKIVCT(248)
1331 1332 MKIVCT(249)
1332 1333 MKIVCT(250)
1333 1334 MKIVCT(251)
1334 1335 MKIVCT(252)
1335 1336 MKIVCT(253)
1336 1337 MKIVCT(254)
1337 1338 MKIVCT(255)
1338 1339
1339 1340 #endif /* __lint */
↓ open down ↓ |
1151 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX