Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/ml/mpcore.s
+++ new/usr/src/uts/i86pc/ml/mpcore.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright (c) 2010, Intel Corporation.
26 26 * All rights reserved.
27 + *
28 + * Copyright 2018 Joyent, Inc.
27 29 */
28 30
29 31 #include <sys/asm_linkage.h>
30 32 #include <sys/asm_misc.h>
31 33 #include <sys/regset.h>
32 34 #include <sys/privregs.h>
33 35 #include <sys/x86_archext.h>
34 36
35 37 #if !defined(__lint)
36 38 #include <sys/segments.h>
37 39 #include "assym.h"
38 40 #endif
39 41
40 42 /*
41 43 * Our assumptions:
42 44 * - We are running in real mode.
43 45 * - Interrupts are disabled.
44 46 * - Selectors are equal (cs == ds == ss) for all real mode code
45 47 * - The GDT, IDT, ktss and page directory has been built for us
46 48 *
47 49 * Our actions:
48 50 * Start CPU:
49 51 * - We start using our GDT by loading correct values in the
50 52 * selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
51 53 * gs=KGS_SEL).
52 54 * - We change over to using our IDT.
53 55 * - We load the default LDT into the hardware LDT register.
54 56 * - We load the default TSS into the hardware task register.
55 57 * - call mp_startup(void) indirectly through the T_PC
56 58 * Stop CPU:
57 59 * - Put CPU into halted state with interrupts disabled
58 60 *
59 61 */
60 62
61 63 #if defined(__lint)
62 64
63 65 void
64 66 real_mode_start_cpu(void)
65 67 {}
66 68
67 69 void
68 70 real_mode_stop_cpu_stage1(void)
69 71 {}
70 72
71 73 void
72 74 real_mode_stop_cpu_stage2(void)
73 75 {}
74 76
75 77 #else /* __lint */
76 78
77 79 #if defined(__amd64)
78 80
79 81 ENTRY_NP(real_mode_start_cpu)
80 82
81 83 /*
82 84 * NOTE: The GNU assembler automatically does the right thing to
83 85 * generate data size operand prefixes based on the code size
84 86 * generation mode (e.g. .code16, .code32, .code64) and as such
85 87 * prefixes need not be used on instructions EXCEPT in the case
86 88 * of address prefixes for code for which the reference is not
87 89 * automatically of the default operand size.
88 90 */
89 91 .code16
90 92 cli
91 93 movw %cs, %ax
92 94 movw %ax, %ds /* load cs into ds */
93 95 movw %ax, %ss /* and into ss */
94 96
95 97 /*
96 98 * Helps in debugging by giving us the fault address.
97 99 *
98 100 * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
99 101 */
100 102 movl $0xffc, %esp
101 103 movl %cr0, %eax
102 104
103 105 /*
104 106 * Enable protected-mode, write protect, and alignment mask
105 107 */
106 108 orl $(CR0_PE|CR0_WP|CR0_AM), %eax
107 109 movl %eax, %cr0
108 110
109 111 /*
110 112 * Do a jmp immediately after writing to cr0 when enabling protected
111 113 * mode to clear the real mode prefetch queue (per Intel's docs)
112 114 */
113 115 jmp pestart
114 116
115 117 pestart:
116 118 /*
117 119 * 16-bit protected mode is now active, so prepare to turn on long
118 120 * mode.
119 121 *
120 122 * Note that we currently assume that if we're attempting to run a
121 123 * kernel compiled with (__amd64) #defined, the target CPU has long
122 124 * mode support.
123 125 */
124 126
125 127 #if 0
126 128 /*
127 129 * If there's a chance this might not be true, the following test should
128 130 * be done, with the no_long_mode branch then doing something
129 131 * appropriate:
130 132 */
131 133
132 134 movl $0x80000000, %eax /* get largest extended CPUID */
133 135 cpuid
134 136 cmpl $0x80000000, %eax /* check if > 0x80000000 */
135 137 jbe no_long_mode /* nope, no long mode */
136 138 movl $0x80000001, %eax
137 139 cpuid /* get extended feature flags */
138 140 btl $29, %edx /* check for long mode */
139 141 jnc no_long_mode /* long mode not supported */
140 142 #endif
141 143
142 144 /*
143 145 * Add any initial cr4 bits
144 146 */
145 147 movl %cr4, %eax
146 148 addr32 orl CR4OFF, %eax
147 149
148 150 /*
149 151 * Enable PAE mode (CR4.PAE)
150 152 */
151 153 orl $CR4_PAE, %eax
152 154 movl %eax, %cr4
153 155
154 156 /*
155 157 * Point cr3 to the 64-bit long mode page tables.
156 158 *
157 159 * Note that these MUST exist in 32-bit space, as we don't have
158 160 * a way to load %cr3 with a 64-bit base address for the page tables
159 161 * until the CPU is actually executing in 64-bit long mode.
160 162 */
161 163 addr32 movl CR3OFF, %eax
162 164 movl %eax, %cr3
163 165
164 166 /*
165 167 * Set long mode enable in EFER (EFER.LME = 1)
166 168 */
167 169 movl $MSR_AMD_EFER, %ecx
168 170 rdmsr
169 171 orl $AMD_EFER_LME, %eax
170 172 wrmsr
171 173
172 174 /*
173 175 * Finally, turn on paging (CR0.PG = 1) to activate long mode.
174 176 */
175 177 movl %cr0, %eax
176 178 orl $CR0_PG, %eax
177 179 movl %eax, %cr0
178 180
179 181 /*
180 182 * The instruction after enabling paging in CR0 MUST be a branch.
181 183 */
182 184 jmp long_mode_active
183 185
184 186 long_mode_active:
185 187 /*
186 188 * Long mode is now active but since we're still running with the
187 189 * original 16-bit CS we're actually in 16-bit compatability mode.
188 190 *
189 191 * We have to load an intermediate GDT and IDT here that we know are
190 192 * in 32-bit space before we can use the kernel's GDT and IDT, which
191 193 * may be in the 64-bit address space, and since we're in compatability
192 194 * mode, we only have access to 16 and 32-bit instructions at the
193 195 * moment.
194 196 */
195 197 addr32 lgdtl TEMPGDTOFF /* load temporary GDT */
196 198 addr32 lidtl TEMPIDTOFF /* load temporary IDT */
197 199
198 200 /*
199 201 * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit
200 202 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
201 203 * to the real mode platter address of long_mode 64 as until the 64-bit
202 204 * CS is in place we don't have access to 64-bit instructions and thus
203 205 * can't reference a 64-bit %rip.
204 206 */
205 207 pushl $TEMP_CS64_SEL
206 208 addr32 pushl LM64OFF
207 209 lretl
208 210
209 211 .globl long_mode_64
210 212 long_mode_64:
211 213 .code64
212 214 /*
213 215 * We are now running in long mode with a 64-bit CS (EFER.LMA=1,
214 216 * CS.L=1) so we now have access to 64-bit instructions.
215 217 *
216 218 * First, set the 64-bit GDT base.
217 219 */
218 220 .globl rm_platter_pa
219 221 movl rm_platter_pa, %eax
220 222 lgdtq GDTROFF(%rax) /* load 64-bit GDT */
221 223
222 224 /*
223 225 * Save the CPU number in %r11; get the value here since it's saved in
224 226 * the real mode platter.
225 227 */
226 228 movl CPUNOFF(%rax), %r11d
227 229
228 230 /*
229 231 * Add rm_platter_pa to %rsp to point it to the same location as seen
230 232 * from 64-bit mode.
231 233 */
232 234 addq %rax, %rsp
233 235
234 236 /*
235 237 * Now do an lretq to load CS with the appropriate selector for the
236 238 * kernel's 64-bit GDT and to start executing 64-bit setup code at the
237 239 * virtual address where boot originally loaded this code rather than
238 240 * the copy in the real mode platter's rm_code array as we've been
239 241 * doing so far.
240 242 */
241 243 pushq $KCS_SEL
242 244 pushq $kernel_cs_code
243 245 lretq
244 246 .globl real_mode_start_cpu_end
245 247 real_mode_start_cpu_end:
246 248 nop
247 249
248 250 kernel_cs_code:
249 251 /*
250 252 * Complete the balance of the setup we need to before executing
251 253 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
252 254 */
253 255 .globl rm_platter_va
254 256 movq rm_platter_va, %rax
255 257 lidtq IDTROFF(%rax)
256 258
257 259 movw $KDS_SEL, %ax
258 260 movw %ax, %ds
259 261 movw %ax, %es
260 262 movw %ax, %ss
261 263
262 264 movw $KTSS_SEL, %ax /* setup kernel TSS */
263 265 ltr %ax
264 266
265 267 xorw %ax, %ax /* clear LDTR */
266 268 lldt %ax
267 269
268 270 /*
269 271 * Set GS to the address of the per-cpu structure as contained in
270 272 * cpu[cpu_number].
271 273 *
272 274 * Unfortunately there's no way to set the 64-bit gsbase with a mov,
273 275 * so we have to stuff the low 32 bits in %eax and the high 32 bits in
274 276 * %edx, then call wrmsr.
275 277 */
276 278 leaq cpu(%rip), %rdi
277 279 movl (%rdi, %r11, 8), %eax
278 280 movl 4(%rdi, %r11, 8), %edx
279 281 movl $MSR_AMD_GSBASE, %ecx
280 282 wrmsr
281 283
282 284 /*
283 285 * Init FS and KernelGSBase.
284 286 *
285 287 * Based on code in mlsetup(), set them both to 8G (which shouldn't be
286 288 * valid until some 64-bit processes run); this will then cause an
287 289 * exception in any code that tries to index off them before they are
288 290 * properly setup.
289 291 */
290 292 xorl %eax, %eax /* low 32 bits = 0 */
291 293 movl $2, %edx /* high 32 bits = 2 */
292 294 movl $MSR_AMD_FSBASE, %ecx
293 295 wrmsr
294 296
295 297 movl $MSR_AMD_KGSBASE, %ecx
296 298 wrmsr
297 299
298 300 /*
299 301 * Init %rsp to the exception stack set in tss_ist1 and create a legal
300 302 * AMD64 ABI stack frame
301 303 */
302 304 movq %gs:CPU_TSS, %rax
303 305 movq TSS_IST1(%rax), %rsp
304 306 pushq $0 /* null return address */
305 307 pushq $0 /* null frame pointer terminates stack trace */
306 308 movq %rsp, %rbp /* stack aligned on 16-byte boundary */
307 309
308 310 movq %cr0, %rax
309 311 andq $~(CR0_TS|CR0_EM), %rax /* clear emulate math chip bit */
310 312 orq $(CR0_MP|CR0_NE), %rax
311 313 movq %rax, %cr0 /* set machine status word */
312 314
313 315 /*
314 316 * Before going any further, enable usage of page table NX bit if
315 317 * that's how our page tables are set up.
316 318 */
317 319 bt $X86FSET_NX, x86_featureset(%rip)
318 320 jnc 1f
↓ open down ↓ |
282 lines elided |
↑ open up ↑ |
319 321 movl $MSR_AMD_EFER, %ecx
320 322 rdmsr
321 323 orl $AMD_EFER_NXE, %eax
322 324 wrmsr
323 325 1:
324 326
325 327 /*
326 328 * Complete the rest of the setup and call mp_startup().
327 329 */
328 330 movq %gs:CPU_THREAD, %rax /* get thread ptr */
329 - call *T_PC(%rax) /* call mp_startup */
331 + call *T_PC(%rax) /* call mp_startup_boot */
330 332 /* not reached */
331 333 int $20 /* whoops, returned somehow! */
332 334
333 335 SET_SIZE(real_mode_start_cpu)
334 336
335 337 #elif defined(__i386)
336 338
337 339 ENTRY_NP(real_mode_start_cpu)
338 340
339 341 #if !defined(__GNUC_AS__)
340 342
341 343 cli
342 344 D16 movw %cs, %eax
343 345 movw %eax, %ds /* load cs into ds */
344 346 movw %eax, %ss /* and into ss */
345 347
346 348 /*
347 349 * Helps in debugging by giving us the fault address.
348 350 *
349 351 * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
350 352 */
351 353 D16 movl $0xffc, %esp
352 354
353 355 D16 A16 lgdt %cs:GDTROFF
354 356 D16 A16 lidt %cs:IDTROFF
355 357 D16 A16 movl %cs:CR4OFF, %eax /* set up CR4, if desired */
356 358 D16 andl %eax, %eax
357 359 D16 A16 je no_cr4
358 360
359 361 D16 movl %eax, %ecx
360 362 D16 movl %cr4, %eax
361 363 D16 orl %ecx, %eax
362 364 D16 movl %eax, %cr4
363 365 no_cr4:
364 366 D16 A16 movl %cs:CR3OFF, %eax
365 367 A16 movl %eax, %cr3
366 368 movl %cr0, %eax
367 369
368 370 /*
369 371 * Enable protected-mode, paging, write protect, and alignment mask
370 372 */
371 373 D16 orl $[CR0_PG|CR0_PE|CR0_WP|CR0_AM], %eax
372 374 movl %eax, %cr0
373 375 jmp pestart
374 376
375 377 pestart:
376 378 D16 pushl $KCS_SEL
377 379 D16 pushl $kernel_cs_code
378 380 D16 lret
379 381 .globl real_mode_start_cpu_end
380 382 real_mode_start_cpu_end:
381 383 nop
382 384
383 385 .globl kernel_cs_code
384 386 kernel_cs_code:
385 387 /*
386 388 * At this point we are with kernel's cs and proper eip.
387 389 *
388 390 * We will be executing not from the copy in real mode platter,
389 391 * but from the original code where boot loaded us.
390 392 *
391 393 * By this time GDT and IDT are loaded as is cr3.
392 394 */
393 395 movw $KFS_SEL,%eax
394 396 movw %eax,%fs
395 397 movw $KGS_SEL,%eax
396 398 movw %eax,%gs
397 399 movw $KDS_SEL,%eax
398 400 movw %eax,%ds
399 401 movw %eax,%es
400 402 movl %gs:CPU_TSS,%esi
401 403 movw %eax,%ss
402 404 movl TSS_ESP0(%esi),%esp
403 405 movw $KTSS_SEL,%ax
404 406 ltr %ax
405 407 xorw %ax, %ax /* clear LDTR */
406 408 lldt %ax
407 409 movl %cr0,%edx
408 410 andl $-1![CR0_TS|CR0_EM],%edx /* clear emulate math chip bit */
409 411 orl $[CR0_MP|CR0_NE],%edx
410 412 movl %edx,%cr0 /* set machine status word */
411 413
412 414 /*
413 415 * Before going any further, enable usage of page table NX bit if
414 416 * that's how our page tables are set up.
415 417 */
416 418 bt $X86FSET_NX, x86_featureset
417 419 jnc 1f
418 420 movl %cr4, %ecx
419 421 andl $CR4_PAE, %ecx
420 422 jz 1f
421 423 movl $MSR_AMD_EFER, %ecx
422 424 rdmsr
423 425 orl $AMD_EFER_NXE, %eax
424 426 wrmsr
425 427 1:
426 428 movl %gs:CPU_THREAD, %eax /* get thread ptr */
427 429 call *T_PC(%eax) /* call mp_startup */
428 430 /* not reached */
429 431 int $20 /* whoops, returned somehow! */
430 432
431 433 #else
432 434
433 435 cli
434 436 mov %cs, %ax
435 437 mov %eax, %ds /* load cs into ds */
436 438 mov %eax, %ss /* and into ss */
437 439
438 440 /*
439 441 * Helps in debugging by giving us the fault address.
440 442 *
441 443 * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
442 444 */
443 445 D16 mov $0xffc, %esp
444 446
445 447 D16 A16 lgdtl %cs:GDTROFF
446 448 D16 A16 lidtl %cs:IDTROFF
447 449 D16 A16 mov %cs:CR4OFF, %eax /* set up CR4, if desired */
448 450 D16 and %eax, %eax
449 451 D16 A16 je no_cr4
450 452
451 453 D16 mov %eax, %ecx
452 454 D16 mov %cr4, %eax
453 455 D16 or %ecx, %eax
454 456 D16 mov %eax, %cr4
455 457 no_cr4:
456 458 D16 A16 mov %cs:CR3OFF, %eax
457 459 A16 mov %eax, %cr3
458 460 mov %cr0, %eax
459 461
460 462 /*
461 463 * Enable protected-mode, paging, write protect, and alignment mask
462 464 */
463 465 D16 or $(CR0_PG|CR0_PE|CR0_WP|CR0_AM), %eax
464 466 mov %eax, %cr0
465 467 jmp pestart
466 468
467 469 pestart:
468 470 D16 pushl $KCS_SEL
469 471 D16 pushl $kernel_cs_code
470 472 D16 lret
471 473 .globl real_mode_start_cpu_end
472 474 real_mode_start_cpu_end:
473 475 nop
474 476 .globl kernel_cs_code
475 477 kernel_cs_code:
476 478 /*
477 479 * At this point we are with kernel's cs and proper eip.
478 480 *
479 481 * We will be executing not from the copy in real mode platter,
480 482 * but from the original code where boot loaded us.
481 483 *
482 484 * By this time GDT and IDT are loaded as is cr3.
483 485 */
484 486 mov $KFS_SEL, %ax
485 487 mov %eax, %fs
486 488 mov $KGS_SEL, %ax
487 489 mov %eax, %gs
488 490 mov $KDS_SEL, %ax
489 491 mov %eax, %ds
490 492 mov %eax, %es
491 493 mov %gs:CPU_TSS, %esi
492 494 mov %eax, %ss
493 495 mov TSS_ESP0(%esi), %esp
494 496 mov $(KTSS_SEL), %ax
↓ open down ↓ |
155 lines elided |
↑ open up ↑ |
495 497 ltr %ax
496 498 xorw %ax, %ax /* clear LDTR */
497 499 lldt %ax
498 500 mov %cr0, %edx
499 501 and $~(CR0_TS|CR0_EM), %edx /* clear emulate math chip bit */
500 502 or $(CR0_MP|CR0_NE), %edx
501 503 mov %edx, %cr0 /* set machine status word */
502 504
503 505 /*
504 506 * Before going any farther, enable usage of page table NX bit if
505 - * that's how our page tables are set up.
507 + * that's how our page tables are set up. (PCIDE is enabled later on).
506 508 */
507 509 bt $X86FSET_NX, x86_featureset
508 510 jnc 1f
509 511 movl %cr4, %ecx
510 512 andl $CR4_PAE, %ecx
511 513 jz 1f
512 514 movl $MSR_AMD_EFER, %ecx
513 515 rdmsr
514 516 orl $AMD_EFER_NXE, %eax
515 517 wrmsr
516 518 1:
517 519 mov %gs:CPU_THREAD, %eax /* get thread ptr */
518 520 call *T_PC(%eax) /* call mp_startup */
519 521 /* not reached */
520 522 int $20 /* whoops, returned somehow! */
521 523 #endif
522 524
523 525 SET_SIZE(real_mode_start_cpu)
524 526
525 527 #endif /* __amd64 */
526 528
527 529 #if defined(__amd64)
528 530
529 531 ENTRY_NP(real_mode_stop_cpu_stage1)
530 532
531 533 #if !defined(__GNUC_AS__)
532 534
533 535 /*
534 536 * For vulcan as we need to do a .code32 and mentally invert the
535 537 * meaning of the addr16 and data16 prefixes to get 32-bit access when
536 538 * generating code to be executed in 16-bit mode (sigh...)
537 539 */
538 540 .code32
539 541 cli
540 542 movw %cs, %ax
541 543 movw %ax, %ds /* load cs into ds */
542 544 movw %ax, %ss /* and into ss */
543 545
544 546 /*
545 547 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
546 548 */
547 549 movw $CPUHALTCODEOFF, %ax
548 550 .byte 0xff, 0xe0 /* jmp *%ax */
549 551
550 552 #else /* __GNUC_AS__ */
551 553
552 554 /*
553 555 * NOTE: The GNU assembler automatically does the right thing to
554 556 * generate data size operand prefixes based on the code size
555 557 * generation mode (e.g. .code16, .code32, .code64) and as such
556 558 * prefixes need not be used on instructions EXCEPT in the case
557 559 * of address prefixes for code for which the reference is not
558 560 * automatically of the default operand size.
559 561 */
560 562 .code16
561 563 cli
562 564 movw %cs, %ax
563 565 movw %ax, %ds /* load cs into ds */
564 566 movw %ax, %ss /* and into ss */
565 567
566 568 /*
567 569 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
568 570 */
569 571 movw $CPUHALTCODEOFF, %ax
570 572 jmp *%ax
571 573
572 574 #endif /* !__GNUC_AS__ */
573 575
574 576 .globl real_mode_stop_cpu_stage1_end
575 577 real_mode_stop_cpu_stage1_end:
576 578 nop
577 579
578 580 SET_SIZE(real_mode_stop_cpu_stage1)
579 581
580 582 #elif defined(__i386)
581 583
582 584 ENTRY_NP(real_mode_stop_cpu_stage1)
583 585
584 586 #if !defined(__GNUC_AS__)
585 587
586 588 cli
587 589 D16 movw %cs, %eax
588 590 movw %eax, %ds /* load cs into ds */
589 591 movw %eax, %ss /* and into ss */
590 592
591 593 /*
592 594 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
593 595 */
594 596 movw $CPUHALTCODEOFF, %ax
595 597 .byte 0xff, 0xe0 /* jmp *%ax */
596 598
597 599 #else /* __GNUC_AS__ */
598 600
599 601 cli
600 602 mov %cs, %ax
601 603 mov %eax, %ds /* load cs into ds */
602 604 mov %eax, %ss /* and into ss */
603 605
604 606 /*
605 607 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
606 608 */
607 609 movw $CPUHALTCODEOFF, %ax
608 610 jmp *%ax
609 611
610 612 #endif /* !__GNUC_AS__ */
611 613
612 614 .globl real_mode_stop_cpu_stage1_end
613 615 real_mode_stop_cpu_stage1_end:
614 616 nop
615 617
616 618 SET_SIZE(real_mode_stop_cpu_stage1)
617 619
618 620 #endif /* __amd64 */
619 621
620 622 ENTRY_NP(real_mode_stop_cpu_stage2)
621 623
622 624 movw $0xdead, %ax
623 625 movw %ax, CPUHALTEDOFF
624 626
625 627 real_mode_stop_cpu_loop:
626 628 /*
627 629 * Put CPU into halted state.
628 630 * Only INIT, SMI, NMI could break the loop.
629 631 */
630 632 hlt
631 633 jmp real_mode_stop_cpu_loop
632 634
633 635 .globl real_mode_stop_cpu_stage2_end
634 636 real_mode_stop_cpu_stage2_end:
635 637 nop
636 638
637 639 SET_SIZE(real_mode_stop_cpu_stage2)
638 640
639 641 #endif /* __lint */
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX