Print this page
11787 Kernel needs to be built with retpolines
11788 Kernel needs to generally use RSB stuffing
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/ml/mpcore.s
+++ new/usr/src/uts/i86pc/ml/mpcore.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
↓ open down ↓ |
17 lines elided |
↑ open up ↑ |
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright (c) 2010, Intel Corporation.
26 26 * All rights reserved.
27 27 *
28 - * Copyright 2018 Joyent, Inc.
28 + * Copyright 2019 Joyent, Inc.
29 29 */
30 -
30 +
31 31 #include <sys/asm_linkage.h>
32 32 #include <sys/asm_misc.h>
33 33 #include <sys/regset.h>
34 34 #include <sys/privregs.h>
35 35 #include <sys/x86_archext.h>
36 36
37 37 #if !defined(__lint)
38 38 #include <sys/segments.h>
39 39 #include "assym.h"
40 40 #endif
41 41
42 42 /*
43 43 * Our assumptions:
44 44 * - We are running in real mode.
45 45 * - Interrupts are disabled.
46 46 * - Selectors are equal (cs == ds == ss) for all real mode code
47 47 * - The GDT, IDT, ktss and page directory has been built for us
48 48 *
49 49 * Our actions:
50 50 * Start CPU:
51 51 * - We start using our GDT by loading correct values in the
52 52 * selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
53 53 * gs=KGS_SEL).
54 54 * - We change over to using our IDT.
55 55 * - We load the default LDT into the hardware LDT register.
56 56 * - We load the default TSS into the hardware task register.
57 57 * - call mp_startup(void) indirectly through the T_PC
58 58 * Stop CPU:
59 59 * - Put CPU into halted state with interrupts disabled
60 60 *
61 61 */
62 62
63 63 #if defined(__lint)
64 64
65 65 void
66 66 real_mode_start_cpu(void)
67 67 {}
68 68
69 69 void
70 70 real_mode_stop_cpu_stage1(void)
71 71 {}
72 72
73 73 void
74 74 real_mode_stop_cpu_stage2(void)
75 75 {}
76 76
77 77 #else /* __lint */
78 78
79 79 #if defined(__amd64)
↓ open down ↓ |
39 lines elided |
↑ open up ↑ |
80 80
81 81 ENTRY_NP(real_mode_start_cpu)
82 82
83 83 /*
84 84 * NOTE: The GNU assembler automatically does the right thing to
85 85 * generate data size operand prefixes based on the code size
86 86 * generation mode (e.g. .code16, .code32, .code64) and as such
87 87 * prefixes need not be used on instructions EXCEPT in the case
88 88 * of address prefixes for code for which the reference is not
89 89 * automatically of the default operand size.
90 - */
90 + */
91 91 .code16
92 92 cli
93 93 movw %cs, %ax
94 94 movw %ax, %ds /* load cs into ds */
95 95 movw %ax, %ss /* and into ss */
96 96
97 97 /*
98 98 * Helps in debugging by giving us the fault address.
99 99 *
100 100 * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
101 101 */
102 102 movl $0xffc, %esp
103 103 movl %cr0, %eax
104 104
105 105 /*
106 106 * Enable protected-mode, write protect, and alignment mask
107 107 */
108 108 orl $(CR0_PE|CR0_WP|CR0_AM), %eax
↓ open down ↓ |
8 lines elided |
↑ open up ↑ |
109 109 movl %eax, %cr0
110 110
111 111 /*
112 112 * Do a jmp immediately after writing to cr0 when enabling protected
113 113 * mode to clear the real mode prefetch queue (per Intel's docs)
114 114 */
115 115 jmp pestart
116 116
117 117 pestart:
118 118 /*
119 - * 16-bit protected mode is now active, so prepare to turn on long
119 + * 16-bit protected mode is now active, so prepare to turn on long
120 120 * mode.
121 121 *
122 122 * Note that we currently assume that if we're attempting to run a
123 123 * kernel compiled with (__amd64) #defined, the target CPU has long
124 124 * mode support.
125 125 */
126 126
127 127 #if 0
128 128 /*
129 129 * If there's a chance this might not be true, the following test should
130 130 * be done, with the no_long_mode branch then doing something
131 131 * appropriate:
132 132 */
133 133
134 134 movl $0x80000000, %eax /* get largest extended CPUID */
135 135 cpuid
136 136 cmpl $0x80000000, %eax /* check if > 0x80000000 */
137 137 jbe no_long_mode /* nope, no long mode */
138 - movl $0x80000001, %eax
138 + movl $0x80000001, %eax
139 139 cpuid /* get extended feature flags */
140 140 btl $29, %edx /* check for long mode */
141 141 jnc no_long_mode /* long mode not supported */
142 142 #endif
143 143
144 144 /*
145 - * Add any initial cr4 bits
145 + * Add any initial cr4 bits
146 146 */
147 147 movl %cr4, %eax
148 148 addr32 orl CR4OFF, %eax
149 149
150 150 /*
151 151 * Enable PAE mode (CR4.PAE)
152 152 */
153 153 orl $CR4_PAE, %eax
154 154 movl %eax, %cr4
155 155
156 156 /*
157 157 * Point cr3 to the 64-bit long mode page tables.
158 158 *
159 159 * Note that these MUST exist in 32-bit space, as we don't have
160 160 * a way to load %cr3 with a 64-bit base address for the page tables
161 161 * until the CPU is actually executing in 64-bit long mode.
162 162 */
163 163 addr32 movl CR3OFF, %eax
164 164 movl %eax, %cr3
165 165
166 166 /*
167 167 * Set long mode enable in EFER (EFER.LME = 1)
168 168 */
169 169 movl $MSR_AMD_EFER, %ecx
170 170 rdmsr
171 171 orl $AMD_EFER_LME, %eax
172 172 wrmsr
173 173
174 174 /*
175 175 * Finally, turn on paging (CR0.PG = 1) to activate long mode.
176 176 */
177 177 movl %cr0, %eax
178 178 orl $CR0_PG, %eax
179 179 movl %eax, %cr0
180 180
181 181 /*
182 182 * The instruction after enabling paging in CR0 MUST be a branch.
183 183 */
184 184 jmp long_mode_active
185 185
186 186 long_mode_active:
187 187 /*
188 188 * Long mode is now active but since we're still running with the
189 189 * original 16-bit CS we're actually in 16-bit compatability mode.
190 190 *
↓ open down ↓ |
35 lines elided |
↑ open up ↑ |
191 191 * We have to load an intermediate GDT and IDT here that we know are
192 192 * in 32-bit space before we can use the kernel's GDT and IDT, which
193 193 * may be in the 64-bit address space, and since we're in compatability
194 194 * mode, we only have access to 16 and 32-bit instructions at the
195 195 * moment.
196 196 */
197 197 addr32 lgdtl TEMPGDTOFF /* load temporary GDT */
198 198 addr32 lidtl TEMPIDTOFF /* load temporary IDT */
199 199
200 200 /*
201 - * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit
201 + * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit
202 202 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
203 203 * to the real mode platter address of long_mode 64 as until the 64-bit
204 204 * CS is in place we don't have access to 64-bit instructions and thus
205 205 * can't reference a 64-bit %rip.
206 206 */
207 - pushl $TEMP_CS64_SEL
207 + pushl $TEMP_CS64_SEL
208 208 addr32 pushl LM64OFF
209 209 lretl
210 210
211 211 .globl long_mode_64
212 212 long_mode_64:
213 213 .code64
214 214 /*
215 215 * We are now running in long mode with a 64-bit CS (EFER.LMA=1,
216 216 * CS.L=1) so we now have access to 64-bit instructions.
217 217 *
218 218 * First, set the 64-bit GDT base.
219 219 */
220 220 .globl rm_platter_pa
221 221 movl rm_platter_pa, %eax
222 222 lgdtq GDTROFF(%rax) /* load 64-bit GDT */
223 223
224 224 /*
225 225 * Save the CPU number in %r11; get the value here since it's saved in
226 226 * the real mode platter.
227 227 */
228 228 movl CPUNOFF(%rax), %r11d
229 229
230 230 /*
231 231 * Add rm_platter_pa to %rsp to point it to the same location as seen
232 232 * from 64-bit mode.
233 233 */
234 234 addq %rax, %rsp
235 235
236 236 /*
237 237 * Now do an lretq to load CS with the appropriate selector for the
238 238 * kernel's 64-bit GDT and to start executing 64-bit setup code at the
239 239 * virtual address where boot originally loaded this code rather than
240 240 * the copy in the real mode platter's rm_code array as we've been
241 241 * doing so far.
242 242 */
243 243 pushq $KCS_SEL
244 244 pushq $kernel_cs_code
245 245 lretq
246 246 .globl real_mode_start_cpu_end
247 247 real_mode_start_cpu_end:
248 248 nop
249 249
250 250 kernel_cs_code:
251 251 /*
252 252 * Complete the balance of the setup we need to before executing
253 253 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
254 254 */
255 255 .globl rm_platter_va
256 256 movq rm_platter_va, %rax
257 257 lidtq IDTROFF(%rax)
258 258
259 259 movw $KDS_SEL, %ax
260 260 movw %ax, %ds
261 261 movw %ax, %es
262 262 movw %ax, %ss
263 263
264 264 movw $KTSS_SEL, %ax /* setup kernel TSS */
265 265 ltr %ax
266 266
267 267 xorw %ax, %ax /* clear LDTR */
268 268 lldt %ax
269 269
270 270 /*
271 271 * Set GS to the address of the per-cpu structure as contained in
272 272 * cpu[cpu_number].
273 273 *
274 274 * Unfortunately there's no way to set the 64-bit gsbase with a mov,
275 275 * so we have to stuff the low 32 bits in %eax and the high 32 bits in
276 276 * %edx, then call wrmsr.
277 277 */
278 278 leaq cpu(%rip), %rdi
279 279 movl (%rdi, %r11, 8), %eax
280 280 movl 4(%rdi, %r11, 8), %edx
281 281 movl $MSR_AMD_GSBASE, %ecx
282 282 wrmsr
283 283
284 284 /*
285 285 * Init FS and KernelGSBase.
286 286 *
287 287 * Based on code in mlsetup(), set them both to 8G (which shouldn't be
288 288 * valid until some 64-bit processes run); this will then cause an
289 289 * exception in any code that tries to index off them before they are
290 290 * properly setup.
291 291 */
292 292 xorl %eax, %eax /* low 32 bits = 0 */
293 293 movl $2, %edx /* high 32 bits = 2 */
294 294 movl $MSR_AMD_FSBASE, %ecx
295 295 wrmsr
296 296
297 297 movl $MSR_AMD_KGSBASE, %ecx
298 298 wrmsr
299 299
300 300 /*
301 301 * Init %rsp to the exception stack set in tss_ist1 and create a legal
302 302 * AMD64 ABI stack frame
303 303 */
304 304 movq %gs:CPU_TSS, %rax
305 305 movq TSS_IST1(%rax), %rsp
↓ open down ↓ |
88 lines elided |
↑ open up ↑ |
306 306 pushq $0 /* null return address */
307 307 pushq $0 /* null frame pointer terminates stack trace */
308 308 movq %rsp, %rbp /* stack aligned on 16-byte boundary */
309 309
310 310 movq %cr0, %rax
311 311 andq $~(CR0_TS|CR0_EM), %rax /* clear emulate math chip bit */
312 312 orq $(CR0_MP|CR0_NE), %rax
313 313 movq %rax, %cr0 /* set machine status word */
314 314
315 315 /*
316 - * Before going any further, enable usage of page table NX bit if
316 + * Before going any further, enable usage of page table NX bit if
317 317 * that's how our page tables are set up.
318 318 */
319 319 bt $X86FSET_NX, x86_featureset(%rip)
320 320 jnc 1f
321 321 movl $MSR_AMD_EFER, %ecx
322 322 rdmsr
323 323 orl $AMD_EFER_NXE, %eax
324 324 wrmsr
325 325 1:
326 326
327 327 /*
328 328 * Complete the rest of the setup and call mp_startup().
329 329 */
330 330 movq %gs:CPU_THREAD, %rax /* get thread ptr */
331 - call *T_PC(%rax) /* call mp_startup_boot */
331 + movq T_PC(%rax), %rax
332 + INDIRECT_CALL_REG(rax) /* call mp_startup_boot */
332 333 /* not reached */
333 334 int $20 /* whoops, returned somehow! */
334 335
335 336 SET_SIZE(real_mode_start_cpu)
336 337
337 338 #elif defined(__i386)
338 339
339 340 ENTRY_NP(real_mode_start_cpu)
340 341
341 342 #if !defined(__GNUC_AS__)
342 343
343 344 cli
344 345 D16 movw %cs, %eax
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
345 346 movw %eax, %ds /* load cs into ds */
346 347 movw %eax, %ss /* and into ss */
347 348
348 349 /*
349 350 * Helps in debugging by giving us the fault address.
350 351 *
351 352 * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
352 353 */
353 354 D16 movl $0xffc, %esp
354 355
355 - D16 A16 lgdt %cs:GDTROFF
356 - D16 A16 lidt %cs:IDTROFF
356 + D16 A16 lgdt %cs:GDTROFF
357 + D16 A16 lidt %cs:IDTROFF
357 358 D16 A16 movl %cs:CR4OFF, %eax /* set up CR4, if desired */
358 359 D16 andl %eax, %eax
359 360 D16 A16 je no_cr4
360 361
361 362 D16 movl %eax, %ecx
362 363 D16 movl %cr4, %eax
363 364 D16 orl %ecx, %eax
364 365 D16 movl %eax, %cr4
365 366 no_cr4:
366 367 D16 A16 movl %cs:CR3OFF, %eax
367 368 A16 movl %eax, %cr3
368 369 movl %cr0, %eax
369 370
370 371 /*
371 372 * Enable protected-mode, paging, write protect, and alignment mask
372 373 */
373 374 D16 orl $[CR0_PG|CR0_PE|CR0_WP|CR0_AM], %eax
374 375 movl %eax, %cr0
375 376 jmp pestart
376 377
377 378 pestart:
378 379 D16 pushl $KCS_SEL
379 380 D16 pushl $kernel_cs_code
380 381 D16 lret
381 382 .globl real_mode_start_cpu_end
382 383 real_mode_start_cpu_end:
383 384 nop
384 385
385 386 .globl kernel_cs_code
386 387 kernel_cs_code:
387 388 /*
388 389 * At this point we are with kernel's cs and proper eip.
389 390 *
390 391 * We will be executing not from the copy in real mode platter,
391 392 * but from the original code where boot loaded us.
392 393 *
393 394 * By this time GDT and IDT are loaded as is cr3.
394 395 */
395 396 movw $KFS_SEL,%eax
396 397 movw %eax,%fs
397 398 movw $KGS_SEL,%eax
398 399 movw %eax,%gs
399 400 movw $KDS_SEL,%eax
400 401 movw %eax,%ds
401 402 movw %eax,%es
402 403 movl %gs:CPU_TSS,%esi
403 404 movw %eax,%ss
404 405 movl TSS_ESP0(%esi),%esp
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
405 406 movw $KTSS_SEL,%ax
406 407 ltr %ax
407 408 xorw %ax, %ax /* clear LDTR */
408 409 lldt %ax
409 410 movl %cr0,%edx
410 411 andl $-1![CR0_TS|CR0_EM],%edx /* clear emulate math chip bit */
411 412 orl $[CR0_MP|CR0_NE],%edx
412 413 movl %edx,%cr0 /* set machine status word */
413 414
414 415 /*
415 - * Before going any further, enable usage of page table NX bit if
416 + * Before going any further, enable usage of page table NX bit if
416 417 * that's how our page tables are set up.
417 418 */
418 419 bt $X86FSET_NX, x86_featureset
419 420 jnc 1f
420 421 movl %cr4, %ecx
421 422 andl $CR4_PAE, %ecx
422 423 jz 1f
423 424 movl $MSR_AMD_EFER, %ecx
424 425 rdmsr
425 426 orl $AMD_EFER_NXE, %eax
426 427 wrmsr
427 428 1:
428 429 movl %gs:CPU_THREAD, %eax /* get thread ptr */
429 430 call *T_PC(%eax) /* call mp_startup */
430 431 /* not reached */
431 432 int $20 /* whoops, returned somehow! */
432 433
433 434 #else
434 435
435 436 cli
436 437 mov %cs, %ax
437 438 mov %eax, %ds /* load cs into ds */
438 439 mov %eax, %ss /* and into ss */
439 440
440 441 /*
441 442 * Helps in debugging by giving us the fault address.
442 443 *
443 444 * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
444 445 */
445 446 D16 mov $0xffc, %esp
446 447
447 448 D16 A16 lgdtl %cs:GDTROFF
448 449 D16 A16 lidtl %cs:IDTROFF
449 450 D16 A16 mov %cs:CR4OFF, %eax /* set up CR4, if desired */
450 451 D16 and %eax, %eax
451 452 D16 A16 je no_cr4
452 453
453 454 D16 mov %eax, %ecx
454 455 D16 mov %cr4, %eax
455 456 D16 or %ecx, %eax
456 457 D16 mov %eax, %cr4
457 458 no_cr4:
458 459 D16 A16 mov %cs:CR3OFF, %eax
459 460 A16 mov %eax, %cr3
460 461 mov %cr0, %eax
461 462
462 463 /*
463 464 * Enable protected-mode, paging, write protect, and alignment mask
464 465 */
465 466 D16 or $(CR0_PG|CR0_PE|CR0_WP|CR0_AM), %eax
466 467 mov %eax, %cr0
467 468 jmp pestart
468 469
469 470 pestart:
470 471 D16 pushl $KCS_SEL
471 472 D16 pushl $kernel_cs_code
472 473 D16 lret
473 474 .globl real_mode_start_cpu_end
474 475 real_mode_start_cpu_end:
475 476 nop
476 477 .globl kernel_cs_code
477 478 kernel_cs_code:
478 479 /*
479 480 * At this point we are with kernel's cs and proper eip.
480 481 *
481 482 * We will be executing not from the copy in real mode platter,
482 483 * but from the original code where boot loaded us.
483 484 *
484 485 * By this time GDT and IDT are loaded as is cr3.
485 486 */
486 487 mov $KFS_SEL, %ax
487 488 mov %eax, %fs
488 489 mov $KGS_SEL, %ax
489 490 mov %eax, %gs
490 491 mov $KDS_SEL, %ax
491 492 mov %eax, %ds
492 493 mov %eax, %es
493 494 mov %gs:CPU_TSS, %esi
494 495 mov %eax, %ss
495 496 mov TSS_ESP0(%esi), %esp
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
496 497 mov $(KTSS_SEL), %ax
497 498 ltr %ax
498 499 xorw %ax, %ax /* clear LDTR */
499 500 lldt %ax
500 501 mov %cr0, %edx
501 502 and $~(CR0_TS|CR0_EM), %edx /* clear emulate math chip bit */
502 503 or $(CR0_MP|CR0_NE), %edx
503 504 mov %edx, %cr0 /* set machine status word */
504 505
505 506 /*
506 - * Before going any farther, enable usage of page table NX bit if
507 + * Before going any farther, enable usage of page table NX bit if
507 508 * that's how our page tables are set up. (PCIDE is enabled later on).
508 509 */
509 510 bt $X86FSET_NX, x86_featureset
510 511 jnc 1f
511 512 movl %cr4, %ecx
512 513 andl $CR4_PAE, %ecx
513 514 jz 1f
514 515 movl $MSR_AMD_EFER, %ecx
515 516 rdmsr
516 517 orl $AMD_EFER_NXE, %eax
517 518 wrmsr
518 519 1:
519 520 mov %gs:CPU_THREAD, %eax /* get thread ptr */
520 521 call *T_PC(%eax) /* call mp_startup */
521 522 /* not reached */
522 523 int $20 /* whoops, returned somehow! */
523 524 #endif
524 525
525 526 SET_SIZE(real_mode_start_cpu)
526 527
527 528 #endif /* __amd64 */
528 529
529 530 #if defined(__amd64)
530 531
531 532 ENTRY_NP(real_mode_stop_cpu_stage1)
532 533
533 534 #if !defined(__GNUC_AS__)
534 535
535 536 /*
536 537 * For vulcan as we need to do a .code32 and mentally invert the
537 538 * meaning of the addr16 and data16 prefixes to get 32-bit access when
538 539 * generating code to be executed in 16-bit mode (sigh...)
539 540 */
540 541 .code32
541 542 cli
542 543 movw %cs, %ax
543 544 movw %ax, %ds /* load cs into ds */
544 545 movw %ax, %ss /* and into ss */
545 546
546 547 /*
547 548 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
548 549 */
549 550 movw $CPUHALTCODEOFF, %ax
550 551 .byte 0xff, 0xe0 /* jmp *%ax */
↓ open down ↓ |
34 lines elided |
↑ open up ↑ |
551 552
552 553 #else /* __GNUC_AS__ */
553 554
554 555 /*
555 556 * NOTE: The GNU assembler automatically does the right thing to
556 557 * generate data size operand prefixes based on the code size
557 558 * generation mode (e.g. .code16, .code32, .code64) and as such
558 559 * prefixes need not be used on instructions EXCEPT in the case
559 560 * of address prefixes for code for which the reference is not
560 561 * automatically of the default operand size.
561 - */
562 + */
562 563 .code16
563 564 cli
564 565 movw %cs, %ax
565 566 movw %ax, %ds /* load cs into ds */
566 567 movw %ax, %ss /* and into ss */
567 568
568 569 /*
569 570 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
570 571 */
571 572 movw $CPUHALTCODEOFF, %ax
572 573 jmp *%ax
573 574
574 575 #endif /* !__GNUC_AS__ */
575 576
576 577 .globl real_mode_stop_cpu_stage1_end
577 578 real_mode_stop_cpu_stage1_end:
578 579 nop
579 580
580 581 SET_SIZE(real_mode_stop_cpu_stage1)
581 582
582 583 #elif defined(__i386)
583 584
584 585 ENTRY_NP(real_mode_stop_cpu_stage1)
585 586
586 587 #if !defined(__GNUC_AS__)
587 588
588 589 cli
589 590 D16 movw %cs, %eax
590 591 movw %eax, %ds /* load cs into ds */
591 592 movw %eax, %ss /* and into ss */
592 593
593 594 /*
594 595 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
595 596 */
596 597 movw $CPUHALTCODEOFF, %ax
597 598 .byte 0xff, 0xe0 /* jmp *%ax */
598 599
599 600 #else /* __GNUC_AS__ */
↓ open down ↓ |
28 lines elided |
↑ open up ↑ |
600 601
601 602 cli
602 603 mov %cs, %ax
603 604 mov %eax, %ds /* load cs into ds */
604 605 mov %eax, %ss /* and into ss */
605 606
606 607 /*
607 608 * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
608 609 */
609 610 movw $CPUHALTCODEOFF, %ax
611 + /*
612 + * The following indirect call is executed as part of starting up a CPU.
613 + * As such nothing else should be running on it or executing in the
614 + * system such that it is a viable Spectre v2 branch target injection
615 + * location. At least, in theory.
616 + */
610 617 jmp *%ax
611 618
612 619 #endif /* !__GNUC_AS__ */
613 620
614 621 .globl real_mode_stop_cpu_stage1_end
615 622 real_mode_stop_cpu_stage1_end:
616 623 nop
617 624
618 625 SET_SIZE(real_mode_stop_cpu_stage1)
619 626
620 627 #endif /* __amd64 */
621 628
622 629 ENTRY_NP(real_mode_stop_cpu_stage2)
623 630
624 631 movw $0xdead, %ax
625 632 movw %ax, CPUHALTEDOFF
626 633
627 634 real_mode_stop_cpu_loop:
628 635 /*
629 636 * Put CPU into halted state.
630 637 * Only INIT, SMI, NMI could break the loop.
631 638 */
632 639 hlt
633 640 jmp real_mode_stop_cpu_loop
634 641
635 642 .globl real_mode_stop_cpu_stage2_end
636 643 real_mode_stop_cpu_stage2_end:
637 644 nop
638 645
639 646 SET_SIZE(real_mode_stop_cpu_stage2)
640 647
641 648 #endif /* __lint */
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX