Print this page
11787 Kernel needs to be built with retpolines
11788 Kernel needs to generally use RSB stuffing
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/ml/exception.s
+++ new/usr/src/uts/intel/ia32/ml/exception.s
1 1 /*
2 2 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
3 3 * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
4 4 * Copyright 2019 Joyent, Inc.
5 5 */
6 6
7 7 /*
8 8 * Copyright (c) 1989, 1990 William F. Jolitz.
9 9 * Copyright (c) 1990 The Regents of the University of California.
10 10 * All rights reserved.
11 11 *
12 12 * Redistribution and use in source and binary forms, with or without
13 13 * modification, are permitted provided that the following conditions
14 14 * are met:
15 15 * 1. Redistributions of source code must retain the above copyright
16 16 * notice, this list of conditions and the following disclaimer.
17 17 * 2. Redistributions in binary form must reproduce the above copyright
18 18 * notice, this list of conditions and the following disclaimer in the
19 19 * documentation and/or other materials provided with the distribution.
20 20 * 3. All advertising materials mentioning features or use of this software
21 21 * must display the following acknowledgement:
22 22 * This product includes software developed by the University of
23 23 * California, Berkeley and its contributors.
24 24 * 4. Neither the name of the University nor the names of its contributors
25 25 * may be used to endorse or promote products derived from this software
26 26 * without specific prior written permission.
27 27 *
28 28 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
29 29 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
30 30 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
31 31 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
32 32 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
33 33 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
34 34 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
35 35 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
36 36 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
37 37 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
38 38 * SUCH DAMAGE.
39 39 *
40 40 * $FreeBSD: src/sys/amd64/amd64/exception.S,v 1.113 2003/10/15 02:04:52 peter Exp $
41 41 */
42 42
43 43 #include <sys/asm_linkage.h>
44 44 #include <sys/asm_misc.h>
45 45 #include <sys/trap.h>
46 46 #include <sys/psw.h>
47 47 #include <sys/regset.h>
48 48 #include <sys/privregs.h>
49 49 #include <sys/dtrace.h>
50 50 #include <sys/x86_archext.h>
51 51 #include <sys/traptrace.h>
52 52 #include <sys/machparam.h>
53 53
54 54 #if !defined(__lint)
55 55
56 56 #include "assym.h"
57 57
58 58 /*
59 59 * push $0 on stack for traps that do not
60 60 * generate an error code. This is so the rest
61 61 * of the kernel can expect a consistent stack
62 62 * from from any exception.
63 63 *
64 64 * Note that for all exceptions for amd64
65 65 * %r11 and %rcx are on the stack. Just pop
66 66 * them back into their appropriate registers and let
67 67 * it get saved as is running native.
68 68 */
69 69
70 70 #if defined(__xpv) && defined(__amd64)
71 71
72 72 #define NPTRAP_NOERR(trapno) \
73 73 pushq $0; \
74 74 pushq $trapno
75 75
76 76 #define TRAP_NOERR(trapno) \
77 77 XPV_TRAP_POP; \
78 78 NPTRAP_NOERR(trapno)
79 79
80 80 /*
81 81 * error code already pushed by hw
82 82 * onto stack.
83 83 */
84 84 #define TRAP_ERR(trapno) \
85 85 XPV_TRAP_POP; \
86 86 pushq $trapno
87 87
88 88 #else /* __xpv && __amd64 */
89 89
90 90 #define TRAP_NOERR(trapno) \
91 91 push $0; \
92 92 push $trapno
93 93
94 94 #define NPTRAP_NOERR(trapno) TRAP_NOERR(trapno)
95 95
96 96 /*
97 97 * error code already pushed by hw
98 98 * onto stack.
99 99 */
100 100 #define TRAP_ERR(trapno) \
101 101 push $trapno
102 102
103 103 #endif /* __xpv && __amd64 */
104 104
105 105 /*
106 106 * These are the stacks used on cpu0 for taking double faults,
107 107 * NMIs and MCEs (the latter two only on amd64 where we have IST).
108 108 *
109 109 * We define them here instead of in a C file so that we can page-align
110 110 * them (gcc won't do that in a .c file).
111 111 */
112 112 .data
113 113 DGDEF3(dblfault_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
114 114 .fill DEFAULTSTKSZ, 1, 0
115 115 DGDEF3(nmi_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
116 116 .fill DEFAULTSTKSZ, 1, 0
117 117 DGDEF3(mce_stack0, DEFAULTSTKSZ, MMU_PAGESIZE)
118 118 .fill DEFAULTSTKSZ, 1, 0
119 119
120 120 /*
121 121 * #DE
122 122 */
123 123 ENTRY_NP(div0trap)
124 124 TRAP_NOERR(T_ZERODIV) /* $0 */
125 125 jmp cmntrap
126 126 SET_SIZE(div0trap)
127 127
128 128 /*
129 129 * #DB
130 130 *
131 131 * Fetch %dr6 and clear it, handing off the value to the
132 132 * cmntrap code in %r15/%esi
133 133 */
134 134 ENTRY_NP(dbgtrap)
135 135 TRAP_NOERR(T_SGLSTP) /* $1 */
136 136
137 137 #if defined(__amd64)
138 138 #if !defined(__xpv) /* no sysenter support yet */
139 139 /*
140 140 * If we get here as a result of single-stepping a sysenter
141 141 * instruction, we suddenly find ourselves taking a #db
142 142 * in kernel mode -before- we've swapgs'ed. So before we can
143 143 * take the trap, we do the swapgs here, and fix the return
144 144 * %rip in trap() so that we return immediately after the
145 145 * swapgs in the sysenter handler to avoid doing the swapgs again.
↓ open down ↓ |
145 lines elided |
↑ open up ↑ |
146 146 *
147 147 * Nobody said that the design of sysenter was particularly
148 148 * elegant, did they?
149 149 */
150 150
151 151 pushq %r11
152 152
153 153 /*
154 154 * At this point the stack looks like this:
155 155 *
156 - * (high address) r_ss
156 + * (high address) r_ss
157 157 * r_rsp
158 158 * r_rfl
159 159 * r_cs
160 160 * r_rip <-- %rsp + 24
161 161 * r_err <-- %rsp + 16
162 162 * r_trapno <-- %rsp + 8
163 163 * (low address) %r11 <-- %rsp
164 164 */
165 165 leaq sys_sysenter(%rip), %r11
166 166 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
167 167 je 1f
168 168 leaq brand_sys_sysenter(%rip), %r11
169 169 cmpq %r11, 24(%rsp) /* Compare to saved r_rip on the stack */
170 170 je 1f
171 171 leaq tr_sys_sysenter(%rip), %r11
172 172 cmpq %r11, 24(%rsp)
173 173 je 1f
174 174 leaq tr_brand_sys_sysenter(%rip), %r11
175 175 cmpq %r11, 24(%rsp)
176 176 jne 2f
177 177 1: SWAPGS
178 178 2: popq %r11
179 179 #endif /* !__xpv */
180 180
181 181 INTR_PUSH
182 182 #if defined(__xpv)
183 183 movl $6, %edi
184 184 call kdi_dreg_get
185 185 movq %rax, %r15 /* %db6 -> %r15 */
186 186 movl $6, %edi
187 187 movl $0, %esi
188 188 call kdi_dreg_set /* 0 -> %db6 */
189 189 #else
190 190 movq %db6, %r15
191 191 xorl %eax, %eax
192 192 movq %rax, %db6
193 193 #endif
194 194
195 195 #elif defined(__i386)
196 196
197 197 INTR_PUSH
198 198 #if defined(__xpv)
199 199 pushl $6
200 200 call kdi_dreg_get
201 201 addl $4, %esp
202 202 movl %eax, %esi /* %dr6 -> %esi */
203 203 pushl $0
204 204 pushl $6
205 205 call kdi_dreg_set /* 0 -> %dr6 */
206 206 addl $8, %esp
207 207 #else
208 208 movl %db6, %esi
209 209 xorl %eax, %eax
210 210 movl %eax, %db6
211 211 #endif
212 212 #endif /* __i386 */
213 213
214 214 jmp cmntrap_pushed
215 215 SET_SIZE(dbgtrap)
216 216
217 217 #if defined(__amd64)
218 218 #if !defined(__xpv)
219 219
220 220 /*
221 221 * Macro to set the gsbase or kgsbase to the address of the struct cpu
222 222 * for this processor. If we came from userland, set kgsbase else
223 223 * set gsbase. We find the proper cpu struct by looping through
224 224 * the cpu structs for all processors till we find a match for the gdt
225 225 * of the trapping processor. The stack is expected to be pointing at
226 226 * the standard regs pushed by hardware on a trap (plus error code and trapno).
227 227 *
228 228 * It's ok for us to clobber gsbase here (and possibly end up with both gsbase
229 229 * and kgsbase set to the same value) because we're not going back the normal
230 230 * way out of here (via IRET). Where we're going, we don't need no user %gs.
231 231 */
232 232 #define SET_CPU_GSBASE \
233 233 subq $REGOFF_TRAPNO, %rsp; /* save regs */ \
234 234 movq %rax, REGOFF_RAX(%rsp); \
235 235 movq %rbx, REGOFF_RBX(%rsp); \
236 236 movq %rcx, REGOFF_RCX(%rsp); \
237 237 movq %rdx, REGOFF_RDX(%rsp); \
238 238 movq %rbp, REGOFF_RBP(%rsp); \
239 239 movq %rsp, %rbp; \
240 240 subq $16, %rsp; /* space for gdt */ \
241 241 sgdt 6(%rsp); \
242 242 movq 8(%rsp), %rcx; /* %rcx has gdt to match */ \
243 243 xorl %ebx, %ebx; /* loop index */ \
244 244 leaq cpu(%rip), %rdx; /* cpu pointer array */ \
245 245 1: \
246 246 movq (%rdx, %rbx, CLONGSIZE), %rax; /* get cpu[i] */ \
247 247 cmpq $0x0, %rax; /* cpu[i] == NULL ? */ \
248 248 je 2f; /* yes, continue */ \
249 249 cmpq %rcx, CPU_GDT(%rax); /* gdt == cpu[i]->cpu_gdt ? */ \
250 250 je 3f; /* yes, go set gsbase */ \
251 251 2: \
252 252 incl %ebx; /* i++ */ \
253 253 cmpl $NCPU, %ebx; /* i < NCPU ? */ \
254 254 jb 1b; /* yes, loop */ \
255 255 /* XXX BIG trouble if we fall thru here. We didn't find a gdt match */ \
256 256 3: \
257 257 movl $MSR_AMD_KGSBASE, %ecx; \
258 258 cmpw $KCS_SEL, REGOFF_CS(%rbp); /* trap from kernel? */ \
259 259 jne 4f; /* no, go set KGSBASE */ \
260 260 movl $MSR_AMD_GSBASE, %ecx; /* yes, set GSBASE */ \
261 261 mfence; /* OPTERON_ERRATUM_88 */ \
262 262 4: \
263 263 movq %rax, %rdx; /* write base register */ \
264 264 shrq $32, %rdx; \
265 265 wrmsr; \
266 266 movq REGOFF_RDX(%rbp), %rdx; /* restore regs */ \
267 267 movq REGOFF_RCX(%rbp), %rcx; \
268 268 movq REGOFF_RBX(%rbp), %rbx; \
269 269 movq REGOFF_RAX(%rbp), %rax; \
270 270 movq %rbp, %rsp; \
271 271 movq REGOFF_RBP(%rsp), %rbp; \
272 272 addq $REGOFF_TRAPNO, %rsp /* pop stack */
273 273
274 274 #else /* __xpv */
275 275
276 276 #define SET_CPU_GSBASE /* noop on the hypervisor */
277 277
278 278 #endif /* __xpv */
279 279 #endif /* __amd64 */
280 280
281 281
282 282 #if defined(__amd64)
283 283
284 284 /*
285 285 * #NMI
286 286 *
287 287 * XXPV: See 6532669.
288 288 */
289 289 ENTRY_NP(nmiint)
290 290 TRAP_NOERR(T_NMIFLT) /* $2 */
291 291
292 292 SET_CPU_GSBASE
293 293
294 294 /*
295 295 * Save all registers and setup segment registers
296 296 * with kernel selectors.
297 297 */
298 298 INTR_PUSH
299 299 INTGATE_INIT_KERNEL_FLAGS
300 300
↓ open down ↓ |
134 lines elided |
↑ open up ↑ |
301 301 TRACE_PTR(%r12, %rax, %eax, %rdx, $TT_TRAP)
302 302 TRACE_REGS(%r12, %rsp, %rax, %rbx)
303 303 TRACE_STAMP(%r12)
304 304
305 305 movq %rsp, %rbp
306 306
307 307 movq %rbp, %rdi
308 308 call av_dispatch_nmivect
309 309
310 310 INTR_POP
311 - call *x86_md_clear
311 + call x86_md_clear
312 312 jmp tr_iret_auto
313 313 /*NOTREACHED*/
314 314 SET_SIZE(nmiint)
315 315
316 316 #elif defined(__i386)
317 317
318 318 /*
319 319 * #NMI
320 320 */
321 321 ENTRY_NP(nmiint)
322 322 TRAP_NOERR(T_NMIFLT) /* $2 */
323 323
324 324 /*
325 325 * Save all registers and setup segment registers
326 326 * with kernel selectors.
327 327 */
328 328 INTR_PUSH
329 329 INTGATE_INIT_KERNEL_FLAGS
330 330
331 331 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
332 332 TRACE_REGS(%edi, %esp, %ebx, %ecx)
333 333 TRACE_STAMP(%edi)
334 334
335 335 movl %esp, %ebp
336 336
337 337 pushl %ebp
338 338 call av_dispatch_nmivect
339 339 addl $4, %esp
340 340
341 341 INTR_POP_USER
342 342 IRET
343 343 SET_SIZE(nmiint)
344 344
345 345 #endif /* __i386 */
346 346
347 347 /*
348 348 * #BP
349 349 */
350 350 ENTRY_NP(brktrap)
351 351
352 352 #if defined(__amd64)
353 353 XPV_TRAP_POP
354 354 cmpw $KCS_SEL, 8(%rsp)
355 355 jne bp_user
356 356
357 357 /*
358 358 * This is a breakpoint in the kernel -- it is very likely that this
359 359 * is DTrace-induced. To unify DTrace handling, we spoof this as an
360 360 * invalid opcode (#UD) fault. Note that #BP is a trap, not a fault --
361 361 * we must decrement the trapping %rip to make it appear as a fault.
362 362 * We then push a non-zero error code to indicate that this is coming
363 363 * from #BP.
364 364 */
365 365 decq (%rsp)
366 366 push $1 /* error code -- non-zero for #BP */
367 367 jmp ud_kernel
368 368
369 369 bp_user:
370 370 #endif /* __amd64 */
371 371
372 372 NPTRAP_NOERR(T_BPTFLT) /* $3 */
373 373 jmp dtrace_trap
374 374
375 375 SET_SIZE(brktrap)
376 376
377 377 /*
378 378 * #OF
379 379 */
380 380 ENTRY_NP(ovflotrap)
381 381 TRAP_NOERR(T_OVFLW) /* $4 */
382 382 jmp cmntrap
383 383 SET_SIZE(ovflotrap)
384 384
385 385 /*
386 386 * #BR
387 387 */
388 388 ENTRY_NP(boundstrap)
389 389 TRAP_NOERR(T_BOUNDFLT) /* $5 */
390 390 jmp cmntrap
391 391 SET_SIZE(boundstrap)
392 392
393 393 #if defined(__amd64)
394 394
395 395 ENTRY_NP(invoptrap)
396 396
397 397 XPV_TRAP_POP
398 398
399 399 cmpw $KCS_SEL, 8(%rsp)
400 400 jne ud_user
401 401
402 402 #if defined(__xpv)
403 403 movb $0, 12(%rsp) /* clear saved upcall_mask from %cs */
404 404 #endif
405 405 push $0 /* error code -- zero for #UD */
406 406 ud_kernel:
407 407 push $0xdddd /* a dummy trap number */
408 408 INTR_PUSH
409 409 movq REGOFF_RIP(%rsp), %rdi
410 410 movq REGOFF_RSP(%rsp), %rsi
411 411 movq REGOFF_RAX(%rsp), %rdx
412 412 pushq (%rsi)
413 413 movq %rsp, %rsi
414 414 subq $8, %rsp
415 415 call dtrace_invop
416 416 ALTENTRY(dtrace_invop_callsite)
417 417 addq $16, %rsp
418 418 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
419 419 je ud_push
420 420 cmpl $DTRACE_INVOP_LEAVE, %eax
421 421 je ud_leave
422 422 cmpl $DTRACE_INVOP_NOP, %eax
423 423 je ud_nop
424 424 cmpl $DTRACE_INVOP_RET, %eax
425 425 je ud_ret
426 426 jmp ud_trap
427 427
428 428 ud_push:
429 429 /*
430 430 * We must emulate a "pushq %rbp". To do this, we pull the stack
431 431 * down 8 bytes, and then store the base pointer.
432 432 */
433 433 INTR_POP
434 434 subq $16, %rsp /* make room for %rbp */
435 435 pushq %rax /* push temp */
436 436 movq 24(%rsp), %rax /* load calling RIP */
437 437 addq $1, %rax /* increment over trapping instr */
438 438 movq %rax, 8(%rsp) /* store calling RIP */
439 439 movq 32(%rsp), %rax /* load calling CS */
440 440 movq %rax, 16(%rsp) /* store calling CS */
441 441 movq 40(%rsp), %rax /* load calling RFLAGS */
442 442 movq %rax, 24(%rsp) /* store calling RFLAGS */
443 443 movq 48(%rsp), %rax /* load calling RSP */
444 444 subq $8, %rax /* make room for %rbp */
445 445 movq %rax, 32(%rsp) /* store calling RSP */
446 446 movq 56(%rsp), %rax /* load calling SS */
447 447 movq %rax, 40(%rsp) /* store calling SS */
448 448 movq 32(%rsp), %rax /* reload calling RSP */
449 449 movq %rbp, (%rax) /* store %rbp there */
450 450 popq %rax /* pop off temp */
451 451 jmp tr_iret_kernel /* return from interrupt */
452 452 /*NOTREACHED*/
453 453
454 454 ud_leave:
455 455 /*
456 456 * We must emulate a "leave", which is the same as a "movq %rbp, %rsp"
457 457 * followed by a "popq %rbp". This is quite a bit simpler on amd64
458 458 * than it is on i386 -- we can exploit the fact that the %rsp is
459 459 * explicitly saved to effect the pop without having to reshuffle
460 460 * the other data pushed for the trap.
461 461 */
462 462 INTR_POP
463 463 pushq %rax /* push temp */
464 464 movq 8(%rsp), %rax /* load calling RIP */
465 465 addq $1, %rax /* increment over trapping instr */
466 466 movq %rax, 8(%rsp) /* store calling RIP */
467 467 movq (%rbp), %rax /* get new %rbp */
468 468 addq $8, %rbp /* adjust new %rsp */
469 469 movq %rbp, 32(%rsp) /* store new %rsp */
470 470 movq %rax, %rbp /* set new %rbp */
471 471 popq %rax /* pop off temp */
472 472 jmp tr_iret_kernel /* return from interrupt */
473 473 /*NOTREACHED*/
474 474
475 475 ud_nop:
476 476 /*
477 477 * We must emulate a "nop". This is obviously not hard: we need only
478 478 * advance the %rip by one.
479 479 */
480 480 INTR_POP
481 481 incq (%rsp)
482 482 jmp tr_iret_kernel
483 483 /*NOTREACHED*/
484 484
485 485 ud_ret:
486 486 INTR_POP
487 487 pushq %rax /* push temp */
488 488 movq 32(%rsp), %rax /* load %rsp */
489 489 movq (%rax), %rax /* load calling RIP */
490 490 movq %rax, 8(%rsp) /* store calling RIP */
491 491 addq $8, 32(%rsp) /* adjust new %rsp */
492 492 popq %rax /* pop off temp */
493 493 jmp tr_iret_kernel /* return from interrupt */
494 494 /*NOTREACHED*/
495 495
496 496 ud_trap:
497 497 /*
498 498 * We're going to let the kernel handle this as a normal #UD. If,
499 499 * however, we came through #BP and are spoofing #UD (in this case,
500 500 * the stored error value will be non-zero), we need to de-spoof
501 501 * the trap by incrementing %rip and pushing T_BPTFLT.
502 502 */
503 503 cmpq $0, REGOFF_ERR(%rsp)
504 504 je ud_ud
505 505 incq REGOFF_RIP(%rsp)
506 506 addq $REGOFF_RIP, %rsp
507 507 NPTRAP_NOERR(T_BPTFLT) /* $3 */
508 508 jmp cmntrap
509 509
510 510 ud_ud:
511 511 addq $REGOFF_RIP, %rsp
512 512 ud_user:
513 513 NPTRAP_NOERR(T_ILLINST)
514 514 jmp cmntrap
515 515 SET_SIZE(invoptrap)
516 516
517 517 #elif defined(__i386)
518 518
519 519 /*
520 520 * #UD
521 521 */
522 522 ENTRY_NP(invoptrap)
523 523 /*
524 524 * If we are taking an invalid opcode trap while in the kernel, this
525 525 * is likely an FBT probe point.
526 526 */
527 527 pushl %gs
528 528 cmpw $KGS_SEL, (%esp)
529 529 jne 8f
530 530
531 531 addl $4, %esp
532 532 #if defined(__xpv)
533 533 movb $0, 6(%esp) /* clear saved upcall_mask from %cs */
534 534 #endif /* __xpv */
535 535 pusha
536 536 pushl %eax /* push %eax -- may be return value */
537 537 pushl %esp /* push stack pointer */
538 538 addl $48, (%esp) /* adjust to incoming args */
539 539 pushl 40(%esp) /* push calling EIP */
540 540 call dtrace_invop
541 541 ALTENTRY(dtrace_invop_callsite)
542 542 addl $12, %esp
543 543 cmpl $DTRACE_INVOP_PUSHL_EBP, %eax
544 544 je 1f
545 545 cmpl $DTRACE_INVOP_POPL_EBP, %eax
546 546 je 2f
547 547 cmpl $DTRACE_INVOP_LEAVE, %eax
548 548 je 3f
549 549 cmpl $DTRACE_INVOP_NOP, %eax
550 550 je 4f
551 551 jmp 7f
552 552 1:
553 553 /*
554 554 * We must emulate a "pushl %ebp". To do this, we pull the stack
555 555 * down 4 bytes, and then store the base pointer.
556 556 */
557 557 popa
558 558 subl $4, %esp /* make room for %ebp */
559 559 pushl %eax /* push temp */
560 560 movl 8(%esp), %eax /* load calling EIP */
561 561 incl %eax /* increment over LOCK prefix */
562 562 movl %eax, 4(%esp) /* store calling EIP */
563 563 movl 12(%esp), %eax /* load calling CS */
564 564 movl %eax, 8(%esp) /* store calling CS */
565 565 movl 16(%esp), %eax /* load calling EFLAGS */
566 566 movl %eax, 12(%esp) /* store calling EFLAGS */
567 567 movl %ebp, 16(%esp) /* push %ebp */
568 568 popl %eax /* pop off temp */
569 569 jmp _emul_done
570 570 2:
571 571 /*
572 572 * We must emulate a "popl %ebp". To do this, we do the opposite of
573 573 * the above: we remove the %ebp from the stack, and squeeze up the
574 574 * saved state from the trap.
575 575 */
576 576 popa
577 577 pushl %eax /* push temp */
578 578 movl 16(%esp), %ebp /* pop %ebp */
579 579 movl 12(%esp), %eax /* load calling EFLAGS */
580 580 movl %eax, 16(%esp) /* store calling EFLAGS */
581 581 movl 8(%esp), %eax /* load calling CS */
582 582 movl %eax, 12(%esp) /* store calling CS */
583 583 movl 4(%esp), %eax /* load calling EIP */
584 584 incl %eax /* increment over LOCK prefix */
585 585 movl %eax, 8(%esp) /* store calling EIP */
586 586 popl %eax /* pop off temp */
587 587 addl $4, %esp /* adjust stack pointer */
588 588 jmp _emul_done
589 589 3:
590 590 /*
591 591 * We must emulate a "leave", which is the same as a "movl %ebp, %esp"
592 592 * followed by a "popl %ebp". This looks similar to the above, but
593 593 * requires two temporaries: one for the new base pointer, and one
594 594 * for the staging register.
595 595 */
596 596 popa
597 597 pushl %eax /* push temp */
598 598 pushl %ebx /* push temp */
599 599 movl %ebp, %ebx /* set temp to old %ebp */
600 600 movl (%ebx), %ebp /* pop %ebp */
601 601 movl 16(%esp), %eax /* load calling EFLAGS */
602 602 movl %eax, (%ebx) /* store calling EFLAGS */
603 603 movl 12(%esp), %eax /* load calling CS */
604 604 movl %eax, -4(%ebx) /* store calling CS */
605 605 movl 8(%esp), %eax /* load calling EIP */
606 606 incl %eax /* increment over LOCK prefix */
607 607 movl %eax, -8(%ebx) /* store calling EIP */
608 608 movl %ebx, -4(%esp) /* temporarily store new %esp */
609 609 popl %ebx /* pop off temp */
610 610 popl %eax /* pop off temp */
611 611 movl -12(%esp), %esp /* set stack pointer */
612 612 subl $8, %esp /* adjust for three pushes, one pop */
613 613 jmp _emul_done
614 614 4:
615 615 /*
616 616 * We must emulate a "nop". This is obviously not hard: we need only
617 617 * advance the %eip by one.
618 618 */
619 619 popa
620 620 incl (%esp)
621 621 _emul_done:
622 622 IRET /* return from interrupt */
623 623 7:
624 624 popa
625 625 pushl $0
626 626 pushl $T_ILLINST /* $6 */
627 627 jmp cmntrap
628 628 8:
629 629 addl $4, %esp
630 630 pushl $0
631 631 pushl $T_ILLINST /* $6 */
632 632 jmp cmntrap
633 633 SET_SIZE(invoptrap)
634 634
635 635 #endif /* __i386 */
636 636
637 637 /*
638 638 * #NM
639 639 */
640 640
641 641 ENTRY_NP(ndptrap)
642 642 TRAP_NOERR(T_NOEXTFLT) /* $0 */
643 643 SET_CPU_GSBASE
644 644 jmp cmntrap
645 645 SET_SIZE(ndptrap)
646 646
647 647 #if !defined(__xpv)
648 648 #if defined(__amd64)
649 649
650 650 /*
651 651 * #DF
652 652 */
653 653 ENTRY_NP(syserrtrap)
654 654 pushq $T_DBLFLT
655 655 SET_CPU_GSBASE
656 656
657 657 /*
658 658 * We share this handler with kmdb (if kmdb is loaded). As such, we
659 659 * may have reached this point after encountering a #df in kmdb. If
660 660 * that happens, we'll still be on kmdb's IDT. We need to switch back
661 661 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
662 662 * here from kmdb, kmdb is probably in a very sickly state, and
663 663 * shouldn't be entered from the panic flow. We'll suppress that
664 664 * entry by setting nopanicdebug.
665 665 */
666 666 pushq %rax
667 667 subq $DESCTBR_SIZE, %rsp
668 668 sidt (%rsp)
669 669 movq %gs:CPU_IDT, %rax
670 670 cmpq %rax, DTR_BASE(%rsp)
671 671 je 1f
672 672
673 673 movq %rax, DTR_BASE(%rsp)
674 674 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%rsp)
675 675 lidt (%rsp)
676 676
677 677 movl $1, nopanicdebug
678 678
679 679 1: addq $DESCTBR_SIZE, %rsp
680 680 popq %rax
681 681
682 682 DFTRAP_PUSH
683 683
684 684 /*
685 685 * freeze trap trace.
686 686 */
687 687 #ifdef TRAPTRACE
688 688 leaq trap_trace_freeze(%rip), %r11
689 689 incl (%r11)
690 690 #endif
691 691
692 692 ENABLE_INTR_FLAGS
693 693
694 694 movq %rsp, %rdi /* ®s */
695 695 xorl %esi, %esi /* clear address */
696 696 xorl %edx, %edx /* cpuid = 0 */
697 697 call trap
698 698
699 699 SET_SIZE(syserrtrap)
700 700
701 701 #elif defined(__i386)
702 702
703 703 /*
704 704 * #DF
705 705 */
706 706 ENTRY_NP(syserrtrap)
707 707 cli /* disable interrupts */
708 708
709 709 /*
710 710 * We share this handler with kmdb (if kmdb is loaded). As such, we
711 711 * may have reached this point after encountering a #df in kmdb. If
712 712 * that happens, we'll still be on kmdb's IDT. We need to switch back
713 713 * to this CPU's IDT before proceeding. Furthermore, if we did arrive
714 714 * here from kmdb, kmdb is probably in a very sickly state, and
715 715 * shouldn't be entered from the panic flow. We'll suppress that
716 716 * entry by setting nopanicdebug.
717 717 */
718 718
719 719 subl $DESCTBR_SIZE, %esp
720 720 movl %gs:CPU_IDT, %eax
721 721 sidt (%esp)
722 722 cmpl DTR_BASE(%esp), %eax
723 723 je 1f
724 724
725 725 movl %eax, DTR_BASE(%esp)
726 726 movw $_MUL(NIDT, GATE_DESC_SIZE), DTR_LIMIT(%esp)
727 727 lidt (%esp)
728 728
729 729 movl $1, nopanicdebug
730 730
731 731 1: addl $DESCTBR_SIZE, %esp
732 732
733 733 /*
734 734 * Check the CPL in the TSS to see what mode
735 735 * (user or kernel) we took the fault in. At this
736 736 * point we are running in the context of the double
737 737 * fault task (dftss) but the CPU's task points to
738 738 * the previous task (ktss) where the process context
739 739 * has been saved as the result of the task switch.
740 740 */
741 741 movl %gs:CPU_TSS, %eax /* get the TSS */
742 742 movl TSS_SS(%eax), %ebx /* save the fault SS */
743 743 movl TSS_ESP(%eax), %edx /* save the fault ESP */
744 744 testw $CPL_MASK, TSS_CS(%eax) /* user mode ? */
745 745 jz make_frame
746 746 movw TSS_SS0(%eax), %ss /* get on the kernel stack */
747 747 movl TSS_ESP0(%eax), %esp
748 748
749 749 /*
750 750 * Clear the NT flag to avoid a task switch when the process
751 751 * finally pops the EFL off the stack via an iret. Clear
752 752 * the TF flag since that is what the processor does for
753 753 * a normal exception. Clear the IE flag so that interrupts
754 754 * remain disabled.
755 755 */
756 756 movl TSS_EFL(%eax), %ecx
757 757 andl $_BITNOT(PS_NT|PS_T|PS_IE), %ecx
758 758 pushl %ecx
759 759 popfl /* restore the EFL */
760 760 movw TSS_LDT(%eax), %cx /* restore the LDT */
761 761 lldt %cx
762 762
763 763 /*
764 764 * Restore process segment selectors.
765 765 */
766 766 movw TSS_DS(%eax), %ds
767 767 movw TSS_ES(%eax), %es
768 768 movw TSS_FS(%eax), %fs
769 769 movw TSS_GS(%eax), %gs
770 770
771 771 /*
772 772 * Restore task segment selectors.
773 773 */
774 774 movl $KDS_SEL, TSS_DS(%eax)
775 775 movl $KDS_SEL, TSS_ES(%eax)
776 776 movl $KDS_SEL, TSS_SS(%eax)
777 777 movl $KFS_SEL, TSS_FS(%eax)
778 778 movl $KGS_SEL, TSS_GS(%eax)
779 779
780 780 /*
781 781 * Clear the TS bit, the busy bits in both task
782 782 * descriptors, and switch tasks.
783 783 */
784 784 clts
785 785 leal gdt0, %ecx
786 786 movl DFTSS_SEL+4(%ecx), %esi
787 787 andl $_BITNOT(0x200), %esi
788 788 movl %esi, DFTSS_SEL+4(%ecx)
789 789 movl KTSS_SEL+4(%ecx), %esi
790 790 andl $_BITNOT(0x200), %esi
791 791 movl %esi, KTSS_SEL+4(%ecx)
792 792 movw $KTSS_SEL, %cx
793 793 ltr %cx
794 794
795 795 /*
796 796 * Restore part of the process registers.
797 797 */
798 798 movl TSS_EBP(%eax), %ebp
799 799 movl TSS_ECX(%eax), %ecx
800 800 movl TSS_ESI(%eax), %esi
801 801 movl TSS_EDI(%eax), %edi
802 802
803 803 make_frame:
804 804 /*
805 805 * Make a trap frame. Leave the error code (0) on
806 806 * the stack since the first word on a trap stack is
807 807 * unused anyway.
808 808 */
809 809 pushl %ebx / fault SS
810 810 pushl %edx / fault ESP
811 811 pushl TSS_EFL(%eax) / fault EFL
812 812 pushl TSS_CS(%eax) / fault CS
813 813 pushl TSS_EIP(%eax) / fault EIP
814 814 pushl $0 / error code
815 815 pushl $T_DBLFLT / trap number 8
816 816 movl TSS_EBX(%eax), %ebx / restore EBX
817 817 movl TSS_EDX(%eax), %edx / restore EDX
818 818 movl TSS_EAX(%eax), %eax / restore EAX
819 819 sti / enable interrupts
820 820 jmp cmntrap
821 821 SET_SIZE(syserrtrap)
822 822
823 823 #endif /* __i386 */
824 824 #endif /* !__xpv */
825 825
826 826 /*
827 827 * #TS
828 828 */
829 829 ENTRY_NP(invtsstrap)
830 830 TRAP_ERR(T_TSSFLT) /* $10 already have error code on stack */
831 831 jmp cmntrap
832 832 SET_SIZE(invtsstrap)
833 833
834 834 /*
835 835 * #NP
836 836 */
837 837 ENTRY_NP(segnptrap)
838 838 TRAP_ERR(T_SEGFLT) /* $11 already have error code on stack */
839 839 #if defined(__amd64)
840 840 SET_CPU_GSBASE
841 841 #endif
842 842 jmp cmntrap
843 843 SET_SIZE(segnptrap)
844 844
845 845 /*
846 846 * #SS
847 847 */
848 848 ENTRY_NP(stktrap)
849 849 TRAP_ERR(T_STKFLT) /* $12 already have error code on stack */
850 850 #if defined(__amd64)
851 851 SET_CPU_GSBASE
852 852 #endif
853 853 jmp cmntrap
854 854 SET_SIZE(stktrap)
855 855
856 856 /*
857 857 * #GP
858 858 */
859 859 ENTRY_NP(gptrap)
860 860 TRAP_ERR(T_GPFLT) /* $13 already have error code on stack */
861 861 #if defined(__amd64)
862 862 SET_CPU_GSBASE
863 863 #endif
864 864 jmp cmntrap
865 865 SET_SIZE(gptrap)
866 866
867 867 /*
868 868 * #PF
869 869 */
870 870 ENTRY_NP(pftrap)
871 871 TRAP_ERR(T_PGFLT) /* $14 already have error code on stack */
872 872 INTR_PUSH
873 873 #if defined(__xpv)
874 874
875 875 #if defined(__amd64)
876 876 movq %gs:CPU_VCPU_INFO, %r15
877 877 movq VCPU_INFO_ARCH_CR2(%r15), %r15 /* vcpu[].arch.cr2 */
878 878 #elif defined(__i386)
879 879 movl %gs:CPU_VCPU_INFO, %esi
880 880 movl VCPU_INFO_ARCH_CR2(%esi), %esi /* vcpu[].arch.cr2 */
881 881 #endif /* __i386 */
882 882
883 883 #else /* __xpv */
884 884
885 885 #if defined(__amd64)
886 886 movq %cr2, %r15
887 887 #elif defined(__i386)
888 888 movl %cr2, %esi
889 889 #endif /* __i386 */
890 890
891 891 #endif /* __xpv */
892 892 jmp cmntrap_pushed
893 893 SET_SIZE(pftrap)
894 894
895 895 #if !defined(__amd64)
896 896
897 897 .globl idt0_default_r
898 898
899 899 /*
900 900 * #PF pentium bug workaround
901 901 */
902 902 ENTRY_NP(pentium_pftrap)
903 903 pushl %eax
904 904 movl %cr2, %eax
905 905 andl $MMU_STD_PAGEMASK, %eax
906 906
907 907 cmpl %eax, %cs:idt0_default_r+2 /* fixme */
908 908
909 909 je check_for_user_address
910 910 user_mode:
911 911 popl %eax
912 912 pushl $T_PGFLT /* $14 */
913 913 jmp cmntrap
914 914 check_for_user_address:
915 915 /*
916 916 * Before we assume that we have an unmapped trap on our hands,
917 917 * check to see if this is a fault from user mode. If it is,
918 918 * we'll kick back into the page fault handler.
919 919 */
920 920 movl 4(%esp), %eax /* error code */
921 921 andl $PF_ERR_USER, %eax
922 922 jnz user_mode
923 923
924 924 /*
925 925 * We now know that this is the invalid opcode trap.
926 926 */
927 927 popl %eax
928 928 addl $4, %esp /* pop error code */
929 929 jmp invoptrap
930 930 SET_SIZE(pentium_pftrap)
931 931
932 932 #endif /* !__amd64 */
933 933
934 934 ENTRY_NP(resvtrap)
935 935 TRAP_NOERR(T_RESVTRAP) /* (reserved) */
936 936 jmp cmntrap
937 937 SET_SIZE(resvtrap)
938 938
939 939 /*
940 940 * #MF
941 941 */
942 942 ENTRY_NP(ndperr)
943 943 TRAP_NOERR(T_EXTERRFLT) /* $16 */
944 944 jmp cmninttrap
945 945 SET_SIZE(ndperr)
946 946
947 947 /*
948 948 * #AC
949 949 */
950 950 ENTRY_NP(achktrap)
951 951 TRAP_ERR(T_ALIGNMENT) /* $17 */
952 952 jmp cmntrap
953 953 SET_SIZE(achktrap)
954 954
955 955 /*
956 956 * #MC
957 957 */
958 958 .globl cmi_mca_trap /* see uts/i86pc/os/cmi.c */
959 959
960 960 #if defined(__amd64)
961 961
962 962 ENTRY_NP(mcetrap)
963 963 TRAP_NOERR(T_MCE) /* $18 */
964 964
965 965 SET_CPU_GSBASE
966 966
967 967 INTR_PUSH
968 968 INTGATE_INIT_KERNEL_FLAGS
969 969
970 970 TRACE_PTR(%rdi, %rbx, %ebx, %rcx, $TT_TRAP)
971 971 TRACE_REGS(%rdi, %rsp, %rbx, %rcx)
972 972 TRACE_STAMP(%rdi)
973 973
974 974 movq %rsp, %rbp
975 975
976 976 movq %rsp, %rdi /* arg0 = struct regs *rp */
977 977 call cmi_mca_trap /* cmi_mca_trap(rp); */
978 978
979 979 jmp _sys_rtt
980 980 SET_SIZE(mcetrap)
981 981
982 982 #else
983 983
984 984 ENTRY_NP(mcetrap)
985 985 TRAP_NOERR(T_MCE) /* $18 */
986 986
987 987 INTR_PUSH
988 988 INTGATE_INIT_KERNEL_FLAGS
989 989
990 990 TRACE_PTR(%edi, %ebx, %ebx, %ecx, $TT_TRAP)
991 991 TRACE_REGS(%edi, %esp, %ebx, %ecx)
992 992 TRACE_STAMP(%edi)
993 993
994 994 movl %esp, %ebp
995 995
996 996 movl %esp, %ecx
997 997 pushl %ecx /* arg0 = struct regs *rp */
998 998 call cmi_mca_trap /* cmi_mca_trap(rp) */
999 999 addl $4, %esp /* pop arg0 */
1000 1000
1001 1001 jmp _sys_rtt
1002 1002 SET_SIZE(mcetrap)
1003 1003
1004 1004 #endif
1005 1005
1006 1006 /*
1007 1007 * #XF
1008 1008 */
1009 1009 ENTRY_NP(xmtrap)
1010 1010 TRAP_NOERR(T_SIMDFPE) /* $19 */
1011 1011 jmp cmninttrap
1012 1012 SET_SIZE(xmtrap)
1013 1013
1014 1014 ENTRY_NP(invaltrap)
1015 1015 TRAP_NOERR(T_INVALTRAP) /* very invalid */
1016 1016 jmp cmntrap
1017 1017 SET_SIZE(invaltrap)
1018 1018
↓ open down ↓ |
697 lines elided |
↑ open up ↑ |
1019 1019 .globl fasttable
1020 1020
1021 1021 #if defined(__amd64)
1022 1022
1023 1023 ENTRY_NP(fasttrap)
1024 1024 cmpl $T_LASTFAST, %eax
1025 1025 ja 1f
1026 1026 orl %eax, %eax /* (zero extend top 32-bits) */
1027 1027 leaq fasttable(%rip), %r11
1028 1028 leaq (%r11, %rax, CLONGSIZE), %r11
1029 - jmp *(%r11)
1029 + movq (%r11), %r11
1030 + INDIRECT_JMP_REG(r11)
1030 1031 1:
1031 1032 /*
1032 1033 * Fast syscall number was illegal. Make it look
1033 1034 * as if the INT failed. Modify %rip to point before the
1034 1035 * INT, push the expected error code and fake a GP fault.
1035 1036 *
1036 1037 * XXX Why make the error code be offset into idt + 1?
1037 1038 * Instead we should push a real (soft?) error code
1038 1039 * on the stack and #gp handler could know about fasttraps?
1039 1040 */
1040 1041 XPV_TRAP_POP
1041 1042
1042 1043 subq $2, (%rsp) /* XXX int insn 2-bytes */
1043 1044 pushq $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1044 1045
1045 1046 #if defined(__xpv)
1046 1047 pushq %r11
1047 1048 pushq %rcx
1048 1049 #endif
1049 1050 jmp gptrap
1050 1051 SET_SIZE(fasttrap)
1051 1052
1052 1053 #elif defined(__i386)
1053 1054
1054 1055 ENTRY_NP(fasttrap)
1055 1056 cmpl $T_LASTFAST, %eax
1056 1057 ja 1f
1057 1058 jmp *%cs:fasttable(, %eax, CLONGSIZE)
1058 1059 1:
1059 1060 /*
1060 1061 * Fast syscall number was illegal. Make it look
1061 1062 * as if the INT failed. Modify %eip to point before the
1062 1063 * INT, push the expected error code and fake a GP fault.
1063 1064 *
1064 1065 * XXX Why make the error code be offset into idt + 1?
1065 1066 * Instead we should push a real (soft?) error code
1066 1067 * on the stack and #gp handler could know about fasttraps?
1067 1068 */
1068 1069 subl $2, (%esp) /* XXX int insn 2-bytes */
1069 1070 pushl $_CONST(_MUL(T_FASTTRAP, GATE_DESC_SIZE) + 2)
1070 1071 jmp gptrap
1071 1072 SET_SIZE(fasttrap)
1072 1073
1073 1074 #endif /* __i386 */
1074 1075
1075 1076 ENTRY_NP(dtrace_ret)
1076 1077 TRAP_NOERR(T_DTRACE_RET)
1077 1078 jmp dtrace_trap
1078 1079 SET_SIZE(dtrace_ret)
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
1079 1080
1080 1081 #if defined(__amd64)
1081 1082
1082 1083 /*
1083 1084 * RFLAGS 24 bytes up the stack from %rsp.
1084 1085 * XXX a constant would be nicer.
1085 1086 */
1086 1087 ENTRY_NP(fast_null)
1087 1088 XPV_TRAP_POP
1088 1089 orq $PS_C, 24(%rsp) /* set carry bit in user flags */
1089 - call *x86_md_clear
1090 + call x86_md_clear
1090 1091 jmp tr_iret_auto
1091 1092 /*NOTREACHED*/
1092 1093 SET_SIZE(fast_null)
1093 1094
1094 1095 #elif defined(__i386)
1095 1096
1096 1097 ENTRY_NP(fast_null)
1097 1098 orw $PS_C, 8(%esp) /* set carry bit in user flags */
1098 1099 IRET
1099 1100 SET_SIZE(fast_null)
1100 1101
1101 1102 #endif /* __i386 */
1102 1103
1103 1104 /*
1104 1105 * Interrupts start at 32
1105 1106 */
1106 1107 #define MKIVCT(n) \
1107 1108 ENTRY_NP(ivct/**/n) \
1108 1109 push $0; \
1109 1110 push $n - 0x20; \
1110 1111 jmp cmnint; \
1111 1112 SET_SIZE(ivct/**/n)
1112 1113
1113 1114 MKIVCT(32)
1114 1115 MKIVCT(33)
1115 1116 MKIVCT(34)
1116 1117 MKIVCT(35)
1117 1118 MKIVCT(36)
1118 1119 MKIVCT(37)
1119 1120 MKIVCT(38)
1120 1121 MKIVCT(39)
1121 1122 MKIVCT(40)
1122 1123 MKIVCT(41)
1123 1124 MKIVCT(42)
1124 1125 MKIVCT(43)
1125 1126 MKIVCT(44)
1126 1127 MKIVCT(45)
1127 1128 MKIVCT(46)
1128 1129 MKIVCT(47)
1129 1130 MKIVCT(48)
1130 1131 MKIVCT(49)
1131 1132 MKIVCT(50)
1132 1133 MKIVCT(51)
1133 1134 MKIVCT(52)
1134 1135 MKIVCT(53)
1135 1136 MKIVCT(54)
1136 1137 MKIVCT(55)
1137 1138 MKIVCT(56)
1138 1139 MKIVCT(57)
1139 1140 MKIVCT(58)
1140 1141 MKIVCT(59)
1141 1142 MKIVCT(60)
1142 1143 MKIVCT(61)
1143 1144 MKIVCT(62)
1144 1145 MKIVCT(63)
1145 1146 MKIVCT(64)
1146 1147 MKIVCT(65)
1147 1148 MKIVCT(66)
1148 1149 MKIVCT(67)
1149 1150 MKIVCT(68)
1150 1151 MKIVCT(69)
1151 1152 MKIVCT(70)
1152 1153 MKIVCT(71)
1153 1154 MKIVCT(72)
1154 1155 MKIVCT(73)
1155 1156 MKIVCT(74)
1156 1157 MKIVCT(75)
1157 1158 MKIVCT(76)
1158 1159 MKIVCT(77)
1159 1160 MKIVCT(78)
1160 1161 MKIVCT(79)
1161 1162 MKIVCT(80)
1162 1163 MKIVCT(81)
1163 1164 MKIVCT(82)
1164 1165 MKIVCT(83)
1165 1166 MKIVCT(84)
1166 1167 MKIVCT(85)
1167 1168 MKIVCT(86)
1168 1169 MKIVCT(87)
1169 1170 MKIVCT(88)
1170 1171 MKIVCT(89)
1171 1172 MKIVCT(90)
1172 1173 MKIVCT(91)
1173 1174 MKIVCT(92)
1174 1175 MKIVCT(93)
1175 1176 MKIVCT(94)
1176 1177 MKIVCT(95)
1177 1178 MKIVCT(96)
1178 1179 MKIVCT(97)
1179 1180 MKIVCT(98)
1180 1181 MKIVCT(99)
1181 1182 MKIVCT(100)
1182 1183 MKIVCT(101)
1183 1184 MKIVCT(102)
1184 1185 MKIVCT(103)
1185 1186 MKIVCT(104)
1186 1187 MKIVCT(105)
1187 1188 MKIVCT(106)
1188 1189 MKIVCT(107)
1189 1190 MKIVCT(108)
1190 1191 MKIVCT(109)
1191 1192 MKIVCT(110)
1192 1193 MKIVCT(111)
1193 1194 MKIVCT(112)
1194 1195 MKIVCT(113)
1195 1196 MKIVCT(114)
1196 1197 MKIVCT(115)
1197 1198 MKIVCT(116)
1198 1199 MKIVCT(117)
1199 1200 MKIVCT(118)
1200 1201 MKIVCT(119)
1201 1202 MKIVCT(120)
1202 1203 MKIVCT(121)
1203 1204 MKIVCT(122)
1204 1205 MKIVCT(123)
1205 1206 MKIVCT(124)
1206 1207 MKIVCT(125)
1207 1208 MKIVCT(126)
1208 1209 MKIVCT(127)
1209 1210 MKIVCT(128)
1210 1211 MKIVCT(129)
1211 1212 MKIVCT(130)
1212 1213 MKIVCT(131)
1213 1214 MKIVCT(132)
1214 1215 MKIVCT(133)
1215 1216 MKIVCT(134)
1216 1217 MKIVCT(135)
1217 1218 MKIVCT(136)
1218 1219 MKIVCT(137)
1219 1220 MKIVCT(138)
1220 1221 MKIVCT(139)
1221 1222 MKIVCT(140)
1222 1223 MKIVCT(141)
1223 1224 MKIVCT(142)
1224 1225 MKIVCT(143)
1225 1226 MKIVCT(144)
1226 1227 MKIVCT(145)
1227 1228 MKIVCT(146)
1228 1229 MKIVCT(147)
1229 1230 MKIVCT(148)
1230 1231 MKIVCT(149)
1231 1232 MKIVCT(150)
1232 1233 MKIVCT(151)
1233 1234 MKIVCT(152)
1234 1235 MKIVCT(153)
1235 1236 MKIVCT(154)
1236 1237 MKIVCT(155)
1237 1238 MKIVCT(156)
1238 1239 MKIVCT(157)
1239 1240 MKIVCT(158)
1240 1241 MKIVCT(159)
1241 1242 MKIVCT(160)
1242 1243 MKIVCT(161)
1243 1244 MKIVCT(162)
1244 1245 MKIVCT(163)
1245 1246 MKIVCT(164)
1246 1247 MKIVCT(165)
1247 1248 MKIVCT(166)
1248 1249 MKIVCT(167)
1249 1250 MKIVCT(168)
1250 1251 MKIVCT(169)
1251 1252 MKIVCT(170)
1252 1253 MKIVCT(171)
1253 1254 MKIVCT(172)
1254 1255 MKIVCT(173)
1255 1256 MKIVCT(174)
1256 1257 MKIVCT(175)
1257 1258 MKIVCT(176)
1258 1259 MKIVCT(177)
1259 1260 MKIVCT(178)
1260 1261 MKIVCT(179)
1261 1262 MKIVCT(180)
1262 1263 MKIVCT(181)
1263 1264 MKIVCT(182)
1264 1265 MKIVCT(183)
1265 1266 MKIVCT(184)
1266 1267 MKIVCT(185)
1267 1268 MKIVCT(186)
1268 1269 MKIVCT(187)
1269 1270 MKIVCT(188)
1270 1271 MKIVCT(189)
1271 1272 MKIVCT(190)
1272 1273 MKIVCT(191)
1273 1274 MKIVCT(192)
1274 1275 MKIVCT(193)
1275 1276 MKIVCT(194)
1276 1277 MKIVCT(195)
1277 1278 MKIVCT(196)
1278 1279 MKIVCT(197)
1279 1280 MKIVCT(198)
1280 1281 MKIVCT(199)
1281 1282 MKIVCT(200)
1282 1283 MKIVCT(201)
1283 1284 MKIVCT(202)
1284 1285 MKIVCT(203)
1285 1286 MKIVCT(204)
1286 1287 MKIVCT(205)
1287 1288 MKIVCT(206)
1288 1289 MKIVCT(207)
1289 1290 MKIVCT(208)
1290 1291 MKIVCT(209)
1291 1292 MKIVCT(210)
1292 1293 MKIVCT(211)
1293 1294 MKIVCT(212)
1294 1295 MKIVCT(213)
1295 1296 MKIVCT(214)
1296 1297 MKIVCT(215)
1297 1298 MKIVCT(216)
1298 1299 MKIVCT(217)
1299 1300 MKIVCT(218)
1300 1301 MKIVCT(219)
1301 1302 MKIVCT(220)
1302 1303 MKIVCT(221)
1303 1304 MKIVCT(222)
1304 1305 MKIVCT(223)
1305 1306 MKIVCT(224)
1306 1307 MKIVCT(225)
1307 1308 MKIVCT(226)
1308 1309 MKIVCT(227)
1309 1310 MKIVCT(228)
1310 1311 MKIVCT(229)
1311 1312 MKIVCT(230)
1312 1313 MKIVCT(231)
1313 1314 MKIVCT(232)
1314 1315 MKIVCT(233)
1315 1316 MKIVCT(234)
1316 1317 MKIVCT(235)
1317 1318 MKIVCT(236)
1318 1319 MKIVCT(237)
1319 1320 MKIVCT(238)
1320 1321 MKIVCT(239)
1321 1322 MKIVCT(240)
1322 1323 MKIVCT(241)
1323 1324 MKIVCT(242)
1324 1325 MKIVCT(243)
1325 1326 MKIVCT(244)
1326 1327 MKIVCT(245)
1327 1328 MKIVCT(246)
1328 1329 MKIVCT(247)
1329 1330 MKIVCT(248)
1330 1331 MKIVCT(249)
1331 1332 MKIVCT(250)
1332 1333 MKIVCT(251)
1333 1334 MKIVCT(252)
1334 1335 MKIVCT(253)
1335 1336 MKIVCT(254)
1336 1337 MKIVCT(255)
1337 1338
1338 1339 #endif /* __lint */
↓ open down ↓ |
239 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX