Print this page
11787 Kernel needs to be built with retpolines
11788 Kernel needs to generally use RSB stuffing
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/ml/cpr_wakecode.s
+++ new/usr/src/uts/i86pc/ml/cpr_wakecode.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2019 Joyent, Inc.
23 24 */
24 -
25 +
25 26 #include <sys/asm_linkage.h>
26 27 #include <sys/asm_misc.h>
27 28 #include <sys/regset.h>
28 29 #include <sys/privregs.h>
29 30 #include <sys/x86_archext.h>
30 31 #include <sys/cpr_wakecode.h>
31 32
32 33 #if !defined(__lint)
33 34 #include <sys/segments.h>
34 35 #include "assym.h"
35 36 #endif
36 37
37 38 #ifdef DEBUG
38 39 #define LED 1
39 40 #define SERIAL 1
40 41 #endif /* DEBUG */
41 42
42 43 #ifdef DEBUG
43 44 #define COM1 0x3f8
44 45 #define COM2 0x2f8
45 46 #define WC_COM COM2 /* either COM1 or COM2 */
46 47 #define WC_LED 0x80 /* diagnostic led port ON motherboard */
47 48
48 49 /*
49 50 * defined as offsets from the data register
50 51 */
51 52 #define DLL 0 /* divisor latch (lsb) */
52 53 #define DLH 1 /* divisor latch (msb) */
53 54 #define LCR 3 /* line control register */
54 55 #define MCR 4 /* modem control register */
55 56
56 57
57 58 #define DLAB 0x80 /* divisor latch access bit */
58 59 #define B9600L 0X0c /* lsb bit pattern for 9600 baud */
59 60 #define B9600H 0X0 /* hsb bit pattern for 9600 baud */
60 61 #define DTR 0x01 /* Data Terminal Ready */
61 62 #define RTS 0x02 /* Request To Send */
62 63 #define STOP1 0x00 /* 1 stop bit */
63 64 #define BITS8 0x03 /* 8 bits per char */
64 65
65 66 #endif /* DEBUG */
66 67
67 68 /*
68 69 * This file contains the low level routines involved in getting
69 70 * into and out of ACPI S3, including those needed for restarting
70 71 * the non-boot cpus.
71 72 *
72 73 * Our assumptions:
73 74 *
74 75 * Our actions:
75 76 *
76 77 */
77 78
78 79 #if defined(lint) || defined(__lint)
79 80
80 81 /*ARGSUSED*/
81 82 int
82 83 wc_save_context(wc_cpu_t *pcpu)
83 84 { return 0; }
84 85
85 86 #else /* lint */
86 87
87 88 #if defined(__amd64)
88 89
89 90 ENTRY_NP(wc_save_context)
90 91
91 92 movq (%rsp), %rdx / return address
92 93 movq %rdx, WC_RETADDR(%rdi)
93 94 pushq %rbp
94 95 movq %rsp,%rbp
95 96
96 97 movq %rdi, WC_VIRTADDR(%rdi)
97 98 movq %rdi, WC_RDI(%rdi)
98 99
99 100 movq %rdx, WC_RDX(%rdi)
100 101
101 102 / stash everything else we need
102 103 sgdt WC_GDT(%rdi)
103 104 sidt WC_IDT(%rdi)
104 105 sldt WC_LDT(%rdi)
105 106 str WC_TR(%rdi)
106 107
107 108 movq %cr0, %rdx
108 109 movq %rdx, WC_CR0(%rdi)
109 110 movq %cr3, %rdx
110 111 movq %rdx, WC_CR3(%rdi)
111 112 movq %cr4, %rdx
112 113 movq %rdx, WC_CR4(%rdi)
113 114 movq %cr8, %rdx
114 115 movq %rdx, WC_CR8(%rdi)
115 116
116 117 movq %r8, WC_R8(%rdi)
117 118 movq %r9, WC_R9(%rdi)
118 119 movq %r10, WC_R10(%rdi)
119 120 movq %r11, WC_R11(%rdi)
120 121 movq %r12, WC_R12(%rdi)
121 122 movq %r13, WC_R13(%rdi)
122 123 movq %r14, WC_R14(%rdi)
123 124 movq %r15, WC_R15(%rdi)
124 125 movq %rax, WC_RAX(%rdi)
125 126 movq %rbp, WC_RBP(%rdi)
126 127 movq %rbx, WC_RBX(%rdi)
127 128 movq %rcx, WC_RCX(%rdi)
128 129 movq %rsi, WC_RSI(%rdi)
129 130 movq %rsp, WC_RSP(%rdi)
130 131
131 132 movw %ss, WC_SS(%rdi)
132 133 movw %cs, WC_CS(%rdi)
133 134 movw %ds, WC_DS(%rdi)
134 135 movw %es, WC_ES(%rdi)
135 136
136 137 movq $0, %rcx / save %fs register
137 138 movw %fs, %cx
138 139 movq %rcx, WC_FS(%rdi)
139 140
140 141 movl $MSR_AMD_FSBASE, %ecx
141 142 rdmsr
142 143 movl %eax, WC_FSBASE(%rdi)
143 144 movl %edx, WC_FSBASE+4(%rdi)
144 145
145 146 movq $0, %rcx / save %gs register
146 147 movw %gs, %cx
147 148 movq %rcx, WC_GS(%rdi)
148 149
149 150 movl $MSR_AMD_GSBASE, %ecx / save gsbase msr
150 151 rdmsr
151 152 movl %eax, WC_GSBASE(%rdi)
152 153 movl %edx, WC_GSBASE+4(%rdi)
153 154
154 155 movl $MSR_AMD_KGSBASE, %ecx / save kgsbase msr
155 156 rdmsr
156 157 movl %eax, WC_KGSBASE(%rdi)
157 158 movl %edx, WC_KGSBASE+4(%rdi)
158 159
159 160 movq %gs:CPU_ID, %rax / save current cpu id
160 161 movq %rax, WC_CPU_ID(%rdi)
161 162
162 163 pushfq
163 164 popq WC_EFLAGS(%rdi)
164 165
165 166 wbinvd / flush the cache
166 167 mfence
167 168
168 169 movq $1, %rax / at suspend return 1
169 170
170 171 leave
171 172
172 173 ret
173 174
174 175 SET_SIZE(wc_save_context)
175 176
176 177 #elif defined(__i386)
177 178
178 179 ENTRY_NP(wc_save_context)
179 180
180 181 movl 4(%esp), %eax / wc_cpu_t *
181 182 movl %eax, WC_VIRTADDR(%eax)
182 183
183 184 movl (%esp), %edx / return address
184 185 movl %edx, WC_RETADDR(%eax)
185 186
186 187 str WC_TR(%eax) / stash everything else we need
187 188 sgdt WC_GDT(%eax)
188 189 sldt WC_LDT(%eax)
189 190 sidt WC_IDT(%eax)
190 191
191 192 movl %cr0, %edx
192 193 movl %edx, WC_CR0(%eax)
193 194 movl %cr3, %edx
194 195 movl %edx, WC_CR3(%eax)
195 196 movl %cr4, %edx
196 197 movl %edx, WC_CR4(%eax)
197 198
198 199 movl %ebx, WC_EBX(%eax)
199 200 movl %edi, WC_EDI(%eax)
200 201 movl %esi, WC_ESI(%eax)
201 202 movl %ebp, WC_EBP(%eax)
202 203 movl %esp, WC_ESP(%eax)
203 204
204 205 movw %ss, WC_SS(%eax)
205 206 movw %cs, WC_CS(%eax)
206 207 movw %ds, WC_DS(%eax)
207 208 movw %es, WC_ES(%eax)
208 209 movw %fs, WC_FS(%eax)
209 210 movw %gs, WC_GS(%eax)
210 211
211 212 pushfl
212 213 popl WC_EFLAGS(%eax)
213 214
214 215 pushl %gs:CPU_ID / save current cpu id
215 216 popl WC_CPU_ID(%eax)
216 217
217 218 wbinvd / flush the cache
218 219 mfence
219 220
220 221 movl $1, %eax / at suspend return 1
221 222 ret
222 223
223 224 SET_SIZE(wc_save_context)
224 225
225 226 #endif /* __amd64 */
226 227
227 228 #endif /* lint */
228 229
229 230
230 231 /*
231 232 * Our assumptions:
232 233 * - We are running in real mode.
233 234 * - Interrupts are disabled.
234 235 *
235 236 * Our actions:
236 237 * - We start using our GDT by loading correct values in the
237 238 * selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
238 239 * gs=KGS_SEL).
239 240 * - We change over to using our IDT.
240 241 * - We load the default LDT into the hardware LDT register.
241 242 * - We load the default TSS into the hardware task register.
242 243 * - We restore registers
243 244 * - We return to original caller (a la setjmp)
244 245 */
245 246
246 247 #if defined(lint) || defined(__lint)
247 248
248 249 void
249 250 wc_rm_start(void)
250 251 {}
251 252
252 253 void
253 254 wc_rm_end(void)
254 255 {}
255 256
256 257 #else /* lint */
257 258
258 259 #if defined(__amd64)
259 260
260 261 ENTRY_NP(wc_rm_start)
261 262
262 263 /*
263 264 * For the Sun Studio 10 assembler we needed to do a .code32 and
264 265 * mentally invert the meaning of the addr16 and data16 prefixes to
265 266 * get 32-bit access when generating code to be executed in 16-bit
266 267 * mode (sigh...)
267 268 *
268 269 * This code, despite always being built with GNU as, has inherited
269 270 * the conceptual damage.
270 271 */
271 272
272 273 .code32
273 274
274 275 cli
275 276 movw %cs, %ax
276 277 movw %ax, %ds / establish ds ...
277 278 movw %ax, %ss / ... and ss:esp
278 279 D16 movl $WC_STKSTART, %esp
279 280 / using the following value blows up machines! - DO NOT USE
280 281 / D16 movl 0xffc, %esp
281 282
282 283
283 284 #if LED
284 285 D16 movl $WC_LED, %edx
285 286 D16 movb $0xd1, %al
286 287 outb (%dx)
287 288 #endif
288 289
289 290 #if SERIAL
290 291 D16 movl $WC_COM, %edx
291 292 D16 movb $0x61, %al
292 293 outb (%dx)
293 294 #endif
294 295
295 296 D16 call cominit
296 297
297 298 /*
298 299 * Enable protected-mode, write protect, and alignment mask
299 300 * %cr0 has already been initialsed to zero
300 301 */
301 302 movl %cr0, %eax
302 303 D16 orl $_CONST(CR0_PE|CR0_WP|CR0_AM), %eax
303 304 movl %eax, %cr0
304 305
305 306 /*
306 307 * Do a jmp immediately after writing to cr0 when enabling protected
307 308 * mode to clear the real mode prefetch queue (per Intel's docs)
308 309 */
309 310 jmp pestart
310 311 pestart:
311 312
312 313 #if LED
313 314 D16 movl $WC_LED, %edx
314 315 D16 movb $0xd2, %al
315 316 outb (%dx)
316 317 #endif
317 318
318 319 #if SERIAL
319 320 D16 movl $WC_COM, %edx
320 321 D16 movb $0x62, %al
321 322 outb (%dx)
322 323 #endif
323 324
324 325 /*
325 326 * 16-bit protected mode is now active, so prepare to turn on long
326 327 * mode
327 328 */
328 329
329 330 #if LED
330 331 D16 movl $WC_LED, %edx
331 332 D16 movb $0xd3, %al
↓ open down ↓ |
297 lines elided |
↑ open up ↑ |
332 333 outb (%dx)
333 334 #endif
334 335
335 336 #if SERIAL
336 337 D16 movl $WC_COM, %edx
337 338 D16 movb $0x63, %al
338 339 outb (%dx)
339 340 #endif
340 341
341 342 /*
342 - * Add any initial cr4 bits
343 + * Add any initial cr4 bits
343 344 */
344 345 movl %cr4, %eax
345 346 A16 D16 orl CR4OFF, %eax
346 347
347 348 /*
348 349 * Enable PAE mode (CR4.PAE)
349 350 */
350 351 D16 orl $CR4_PAE, %eax
351 352 movl %eax, %cr4
352 353
353 354 #if LED
354 355 D16 movl $WC_LED, %edx
355 356 D16 movb $0xd4, %al
356 357 outb (%dx)
357 358 #endif
358 359
359 360 #if SERIAL
360 361 D16 movl $WC_COM, %edx
361 362 D16 movb $0x64, %al
362 363 outb (%dx)
363 364 #endif
364 365
365 366 /*
366 367 * Point cr3 to the 64-bit long mode page tables.
367 368 *
368 369 * Note that these MUST exist in 32-bit space, as we don't have
369 370 * a way to load %cr3 with a 64-bit base address for the page tables
370 371 * until the CPU is actually executing in 64-bit long mode.
371 372 */
372 373 A16 D16 movl CR3OFF, %eax
373 374 movl %eax, %cr3
374 375
375 376 /*
376 377 * Set long mode enable in EFER (EFER.LME = 1)
377 378 */
378 379 D16 movl $MSR_AMD_EFER, %ecx
379 380 rdmsr
380 381
381 382 D16 orl $AMD_EFER_LME, %eax
382 383 wrmsr
383 384
384 385 #if LED
385 386 D16 movl $WC_LED, %edx
386 387 D16 movb $0xd5, %al
387 388 outb (%dx)
388 389 #endif
389 390
390 391 #if SERIAL
391 392 D16 movl $WC_COM, %edx
392 393 D16 movb $0x65, %al
393 394 outb (%dx)
394 395 #endif
395 396
396 397 /*
397 398 * Finally, turn on paging (CR0.PG = 1) to activate long mode.
398 399 */
399 400 movl %cr0, %eax
400 401 D16 orl $CR0_PG, %eax
401 402 movl %eax, %cr0
402 403
403 404 /*
404 405 * The instruction after enabling paging in CR0 MUST be a branch.
405 406 */
406 407 jmp long_mode_active
407 408
408 409 long_mode_active:
409 410
410 411 #if LED
411 412 D16 movl $WC_LED, %edx
412 413 D16 movb $0xd6, %al
413 414 outb (%dx)
414 415 #endif
415 416
416 417 #if SERIAL
417 418 D16 movl $WC_COM, %edx
418 419 D16 movb $0x66, %al
419 420 outb (%dx)
420 421 #endif
421 422
422 423 /*
423 424 * Long mode is now active but since we're still running with the
424 425 * original 16-bit CS we're actually in 16-bit compatability mode.
425 426 *
426 427 * We have to load an intermediate GDT and IDT here that we know are
↓ open down ↓ |
74 lines elided |
↑ open up ↑ |
427 428 * in 32-bit space before we can use the kernel's GDT and IDT, which
428 429 * may be in the 64-bit address space, and since we're in compatability
429 430 * mode, we only have access to 16 and 32-bit instructions at the
430 431 * moment.
431 432 */
432 433 A16 D16 lgdt TEMPGDTOFF /* load temporary GDT */
433 434 A16 D16 lidt TEMPIDTOFF /* load temporary IDT */
434 435
435 436
436 437 /*
437 - * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit
438 + * Do a far transfer to 64-bit mode. Set the CS selector to a 64-bit
438 439 * long mode selector (CS.L=1) in the temporary 32-bit GDT and jump
439 440 * to the real mode platter address of wc_long_mode_64 as until the
440 441 * 64-bit CS is in place we don't have access to 64-bit instructions
441 442 * and thus can't reference a 64-bit %rip.
442 443 */
443 444
444 445 #if LED
445 446 D16 movl $WC_LED, %edx
446 447 D16 movb $0xd7, %al
447 448 outb (%dx)
448 449 #endif
449 450
450 451 #if SERIAL
451 452 D16 movl $WC_COM, %edx
452 453 D16 movb $0x67, %al
453 454 outb (%dx)
454 455 #endif
455 456
456 - D16 pushl $TEMP_CS64_SEL
457 + D16 pushl $TEMP_CS64_SEL
457 458 A16 D16 pushl LM64OFF
458 459
459 460 D16 lret
460 461
461 462
462 463 /*
463 464 * Support routine to re-initialize VGA subsystem
464 465 */
465 466 vgainit:
466 467 D16 ret
467 468
468 469 /*
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
469 470 * Support routine to re-initialize keyboard (which is USB - help!)
470 471 */
471 472 kbdinit:
472 473 D16 ret
473 474
474 475 /*
475 476 * Support routine to re-initialize COM ports to something sane
476 477 */
477 478 cominit:
478 479 / init COM1 & COM2
479 -
480 +
480 481 #if DEBUG
481 482 /*
482 483 * on debug kernels we need to initialize COM1 & COM2 here, so that
483 484 * we can get debug output before the asy driver has resumed
484 485 */
485 486
486 487 / select COM1
487 488 D16 movl $_CONST(COM1+LCR), %edx
488 489 D16 movb $DLAB, %al / divisor latch
489 490 outb (%dx)
490 491
491 492 D16 movl $_CONST(COM1+DLL), %edx / divisor latch lsb
492 493 D16 movb $B9600L, %al / divisor latch
493 494 outb (%dx)
494 495
495 496 D16 movl $_CONST(COM1+DLH), %edx / divisor latch hsb
496 497 D16 movb $B9600H, %al / divisor latch
497 498 outb (%dx)
498 499
499 500 D16 movl $_CONST(COM1+LCR), %edx / select COM1
500 501 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len
501 502 outb (%dx)
502 503
503 504 D16 movl $_CONST(COM1+MCR), %edx / select COM1
504 505 D16 movb $_CONST(RTS|DTR), %al / data term ready & req to send
505 506 outb (%dx)
506 507
507 508 / select COM2
508 509 D16 movl $_CONST(COM2+LCR), %edx
509 510 D16 movb $DLAB, %al / divisor latch
510 511 outb (%dx)
511 512
512 513 D16 movl $_CONST(COM2+DLL), %edx / divisor latch lsb
513 514 D16 movb $B9600L, %al / divisor latch
514 515 outb (%dx)
515 516
516 517 D16 movl $_CONST(COM2+DLH), %edx / divisor latch hsb
517 518 D16 movb $B9600H, %al / divisor latch
518 519 outb (%dx)
519 520
520 521 D16 movl $_CONST(COM2+LCR), %edx / select COM1
521 522 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len
522 523 outb (%dx)
523 524
524 525 D16 movl $_CONST(COM2+MCR), %edx / select COM1
525 526 D16 movb $_CONST(RTS|DTR), %al / data term ready & req to send
526 527 outb (%dx)
527 528 #endif /* DEBUG */
528 529
529 530 D16 ret
530 531
531 532 .code64
532 533
533 534 .globl wc_long_mode_64
534 535 wc_long_mode_64:
535 536
536 537 #if LED
537 538 movw $WC_LED, %dx
538 539 movb $0xd8, %al
539 540 outb (%dx)
540 541 #endif
541 542
542 543 #if SERIAL
543 544 movw $WC_COM, %dx
544 545 movb $0x68, %al
545 546 outb (%dx)
546 547 #endif
547 548
548 549 /*
549 550 * We are now running in long mode with a 64-bit CS (EFER.LMA=1,
550 551 * CS.L=1) so we now have access to 64-bit instructions.
551 552 *
552 553 * First, set the 64-bit GDT base.
553 554 */
554 555 .globl rm_platter_pa
555 556 movl rm_platter_pa, %eax
556 557
557 558 lgdtq GDTROFF(%rax) /* load 64-bit GDT */
558 559
559 560 /*
560 561 * Save the CPU number in %r11; get the value here since it's saved in
561 562 * the real mode platter.
562 563 */
563 564 / JAN
564 565 / the following is wrong! need to figure out MP systems
565 566 / movl CPUNOFF(%rax), %r11d
566 567
567 568 /*
568 569 * Add rm_platter_pa to %rsp to point it to the same location as seen
569 570 * from 64-bit mode.
570 571 */
571 572 addq %rax, %rsp
572 573
573 574 /*
574 575 * Now do an lretq to load CS with the appropriate selector for the
575 576 * kernel's 64-bit GDT and to start executing 64-bit setup code at the
576 577 * virtual address where boot originally loaded this code rather than
577 578 * the copy in the real mode platter's rm_code array as we've been
578 579 * doing so far.
579 580 */
580 581
581 582 #if LED
582 583 movw $WC_LED, %dx
583 584 movb $0xd9, %al
584 585 outb (%dx)
585 586 #endif
586 587
587 588 / JAN this should produce 'i' but we get 'g' instead ???
588 589 #if SERIAL
589 590 movw $WC_COM, %dx
590 591 movb $0x69, %al
591 592 outb (%dx)
592 593 #endif
593 594
594 595 pushq $KCS_SEL
595 596 pushq $kernel_wc_code
596 597 lretq
597 598
598 599 .globl kernel_wc_code
599 600 kernel_wc_code:
600 601
601 602 #if LED
602 603 movw $WC_LED, %dx
603 604 movb $0xda, %al
604 605 outb (%dx)
605 606 #endif
606 607
607 608 / JAN this should produce 'j' but we get 'g' instead ???
608 609 #if SERIAL
609 610 movw $WC_COM, %dx
610 611 movb $0x6a, %al
611 612 outb (%dx)
612 613 #endif
613 614
614 615 /*
615 616 * Complete the balance of the setup we need to before executing
616 617 * 64-bit kernel code (namely init rsp, TSS, LGDT, FS and GS).
617 618 */
618 619 .globl rm_platter_va
619 620 movq rm_platter_va, %rbx
620 621 addq $WC_CPU, %rbx
621 622
622 623 #if LED
623 624 movw $WC_LED, %dx
624 625 movb $0xdb, %al
625 626 outb (%dx)
626 627 #endif
627 628
628 629 #if SERIAL
629 630 movw $WC_COM, %dx
630 631 movw $0x6b, %ax
631 632 outb (%dx)
632 633 #endif
633 634
634 635 /*
635 636 * restore the rest of the registers
636 637 */
637 638
638 639 lidtq WC_IDT(%rbx)
639 640
640 641 #if LED
641 642 movw $WC_LED, %dx
642 643 movb $0xdc, %al
643 644 outb (%dx)
644 645 #endif
645 646
646 647 #if SERIAL
647 648 movw $WC_COM, %dx
648 649 movw $0x6c, %ax
649 650 outb (%dx)
650 651 #endif
651 652
652 653 /*
653 654 * restore the rest of the registers
654 655 */
655 656
656 657 movw $KDS_SEL, %ax
657 658 movw %ax, %ds
658 659 movw %ax, %es
659 660 movw %ax, %ss
660 661
661 662 /*
662 663 * Before proceeding, enable usage of the page table NX bit if
663 664 * that's how the page tables are set up.
664 665 */
665 666 bt $X86FSET_NX, x86_featureset(%rip)
666 667 jnc 1f
667 668 movl $MSR_AMD_EFER, %ecx
668 669 rdmsr
669 670 orl $AMD_EFER_NXE, %eax
670 671 wrmsr
↓ open down ↓ |
181 lines elided |
↑ open up ↑ |
671 672 1:
672 673
673 674 movq WC_CR4(%rbx), %rax / restore full cr4 (with Global Enable)
674 675 movq %rax, %cr4
675 676
676 677 lldt WC_LDT(%rbx)
677 678 movzwq WC_TR(%rbx), %rax / clear TSS busy bit
678 679 addq WC_GDT+2(%rbx), %rax
679 680 andl $0xfffffdff, 4(%rax)
680 681 movq 4(%rax), %rcx
681 - ltr WC_TR(%rbx)
682 + ltr WC_TR(%rbx)
682 683
683 684 #if LED
684 685 movw $WC_LED, %dx
685 686 movb $0xdd, %al
686 687 outb (%dx)
687 688 #endif
688 689
689 690 #if SERIAL
690 691 movw $WC_COM, %dx
691 692 movw $0x6d, %ax
692 693 outb (%dx)
693 694 #endif
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
694 695
695 696 / restore %fsbase %gsbase %kgbase registers using wrmsr instruction
696 697
697 698 movq WC_FS(%rbx), %rcx / restore fs register
698 699 movw %cx, %fs
699 700
700 701 movl $MSR_AMD_FSBASE, %ecx
701 702 movl WC_FSBASE(%rbx), %eax
702 703 movl WC_FSBASE+4(%rbx), %edx
703 704 wrmsr
704 -
705 +
705 706 movq WC_GS(%rbx), %rcx / restore gs register
706 707 movw %cx, %gs
707 708
708 709 movl $MSR_AMD_GSBASE, %ecx / restore gsbase msr
709 710 movl WC_GSBASE(%rbx), %eax
710 711 movl WC_GSBASE+4(%rbx), %edx
711 712 wrmsr
712 713
713 714 movl $MSR_AMD_KGSBASE, %ecx / restore kgsbase msr
714 715 movl WC_KGSBASE(%rbx), %eax
715 716 movl WC_KGSBASE+4(%rbx), %edx
716 717 wrmsr
717 718
718 719 movq WC_CR0(%rbx), %rdx
719 720 movq %rdx, %cr0
720 721 movq WC_CR3(%rbx), %rdx
721 722 movq %rdx, %cr3
722 723 movq WC_CR8(%rbx), %rdx
723 724 movq %rdx, %cr8
724 725
725 726 #if LED
726 727 movw $WC_LED, %dx
727 728 movb $0xde, %al
728 729 outb (%dx)
729 730 #endif
730 731
731 732 #if SERIAL
732 733 movw $WC_COM, %dx
733 734 movb $0x6e, %al
734 735 outb (%dx)
735 736 #endif
736 737
737 738 /*
738 739 * if we are not running on the boot CPU restore stack contents by
739 740 * calling i_cpr_restore_stack(curthread, save_stack);
740 741 */
741 742 movq %rsp, %rbp
742 743 call i_cpr_bootcpuid
743 744 cmpl %eax, WC_CPU_ID(%rbx)
744 745 je 2f
745 746
746 747 movq %gs:CPU_THREAD, %rdi
747 748 movq WC_SAVED_STACK(%rbx), %rsi
748 749 call i_cpr_restore_stack
749 750 2:
750 751
751 752 movq WC_RSP(%rbx), %rsp / restore stack pointer
752 753
↓ open down ↓ |
38 lines elided |
↑ open up ↑ |
753 754 /*
754 755 * APIC initialization
755 756 */
756 757 movq %rsp, %rbp
757 758
758 759 /*
759 760 * skip iff function pointer is NULL
760 761 */
761 762 cmpq $0, ap_mlsetup
762 763 je 3f
763 - call *ap_mlsetup
764 + leaq ap_mlsetup, %rax
765 + INDIRECT_CALL_REG(rax)
764 766 3:
765 767
766 - call *cpr_start_cpu_func
768 + leaq cpr_start_cpu_func, %rax
769 + INDIRECT_CALL_REG(rax)
767 770
768 771 / restore %rbx to the value it ahd before we called the functions above
769 772 movq rm_platter_va, %rbx
770 773 addq $WC_CPU, %rbx
771 774
772 775 movq WC_R8(%rbx), %r8
773 776 movq WC_R9(%rbx), %r9
774 777 movq WC_R10(%rbx), %r10
775 778 movq WC_R11(%rbx), %r11
776 779 movq WC_R12(%rbx), %r12
777 780 movq WC_R13(%rbx), %r13
778 781 movq WC_R14(%rbx), %r14
779 782 movq WC_R15(%rbx), %r15
780 783 / movq WC_RAX(%rbx), %rax
781 784 movq WC_RBP(%rbx), %rbp
782 785 movq WC_RCX(%rbx), %rcx
783 786 / movq WC_RDX(%rbx), %rdx
784 787 movq WC_RDI(%rbx), %rdi
785 788 movq WC_RSI(%rbx), %rsi
786 789
787 790
788 791 / assume that %cs does not need to be restored
789 792 / %ds, %es & %ss are ignored in 64bit mode
790 793 movw WC_SS(%rbx), %ss
791 794 movw WC_DS(%rbx), %ds
792 795 movw WC_ES(%rbx), %es
793 796
794 797 #if LED
795 798 movw $WC_LED, %dx
796 799 movb $0xdf, %al
797 800 outb (%dx)
798 801 #endif
799 802
800 803 #if SERIAL
801 804 movw $WC_COM, %dx
802 805 movb $0x6f, %al
803 806 outb (%dx)
804 807 #endif
805 808
806 809
807 810 movq WC_RBP(%rbx), %rbp
808 811 movq WC_RSP(%rbx), %rsp
809 812
810 813 #if LED
811 814 movw $WC_LED, %dx
812 815 movb $0xe0, %al
813 816 outb (%dx)
814 817 #endif
815 818
816 819 #if SERIAL
817 820 movw $WC_COM, %dx
818 821 movb $0x70, %al
819 822 outb (%dx)
820 823 #endif
821 824
822 825
823 826 movq WC_RCX(%rbx), %rcx
824 827
825 828 pushq WC_EFLAGS(%rbx) / restore flags
826 829 popfq
827 830
828 831 #if LED
829 832 movw $WC_LED, %dx
830 833 movb $0xe1, %al
831 834 outb (%dx)
832 835 #endif
833 836
834 837 #if SERIAL
835 838 movw $WC_COM, %dx
836 839 movb $0x71, %al
837 840 outb (%dx)
838 841 #endif
839 842
840 843 /*
841 844 * can not use outb after this point, because doing so would mean using
842 845 * %dx which would modify %rdx which is restored here
843 846 */
844 847
845 848 movq %rbx, %rax
846 849 movq WC_RDX(%rax), %rdx
847 850 movq WC_RBX(%rax), %rbx
848 851
849 852 leave
850 853
851 854 movq WC_RETADDR(%rax), %rax
852 855 movq %rax, (%rsp) / return to caller of wc_save_context
853 856
854 857 xorl %eax, %eax / at wakeup return 0
855 858 ret
856 859
857 860
858 861 SET_SIZE(wc_rm_start)
859 862
860 863 ENTRY_NP(asmspin)
861 864
862 865 movl %edi, %ecx
863 866 A1:
864 867 loop A1
865 868
866 869 SET_SIZE(asmspin)
867 870
868 871 .globl wc_rm_end
869 872 wc_rm_end:
870 873 nop
871 874
872 875 #elif defined(__i386)
873 876
874 877 ENTRY_NP(wc_rm_start)
875 878
876 879 /entry: jmp entry / stop here for HDT
877 880
878 881 cli
879 882 movw %cs, %ax
880 883 movw %ax, %ds / establish ds ...
881 884 movw %ax, %ss / ... and ss:esp
882 885 D16 movl $WC_STKSTART, %esp
883 886
884 887 #if LED
885 888 D16 movl $WC_LED, %edx
886 889 D16 movb $0xd1, %al
887 890 outb (%dx)
888 891 #endif
889 892
890 893 #if SERIAL
891 894 D16 movl $WC_COM, %edx
892 895 D16 movb $0x61, %al
893 896 outb (%dx)
894 897 #endif
895 898
896 899
897 900 D16 call vgainit
898 901 D16 call kbdinit
899 902 D16 call cominit
900 903
901 904 #if LED
902 905 D16 movl $WC_LED, %edx
903 906 D16 movb $0xd2, %al
904 907 outb (%dx)
905 908 #endif
906 909
907 910 #if SERIAL
908 911 D16 movl $WC_COM, %edx
909 912 D16 movb $0x62, %al
910 913 outb (%dx)
911 914 #endif
912 915
913 916 D16 A16 movl $WC_CPU, %ebx / base add of wc_cpu_t
914 917
915 918 #if LED
916 919 D16 movb $0xd3, %al
917 920 outb $WC_LED
918 921 #endif
919 922
920 923 #if SERIAL
921 924 D16 movl $WC_COM, %edx
922 925 D16 movb $0x63, %al
923 926 outb (%dx)
924 927 #endif
925 928
926 929 D16 A16 movl %cs:WC_DS(%ebx), %edx / %ds post prot/paging transit
927 930
928 931 #if LED
929 932 D16 movb $0xd4, %al
930 933 outb $WC_LED
931 934 #endif
932 935
933 936 D16 A16 lgdt %cs:WC_GDT(%ebx) / restore gdt and idtr
934 937 D16 A16 lidt %cs:WC_IDT(%ebx)
935 938
936 939 #if LED
937 940 D16 movb $0xd5, %al
938 941 outb $WC_LED
939 942 #endif
940 943
941 944 D16 A16 movl %cs:WC_CR4(%ebx), %eax / restore cr4
942 945 D16 andl $_BITNOT(CR4_PGE), %eax / don't set Global Enable yet
943 946 movl %eax, %cr4
944 947
945 948 #if LED
946 949 D16 movb $0xd6, %al
947 950 outb $WC_LED
948 951 #endif
949 952
950 953 D16 A16 movl %cs:WC_CR3(%ebx), %eax / set PDPT
951 954 movl %eax, %cr3
952 955
953 956 #if LED
954 957 D16 movb $0xd7, %al
955 958 outb $WC_LED
956 959 #endif
957 960
958 961 D16 A16 movl %cs:WC_CR0(%ebx), %eax / enable prot/paging, etc.
959 962 movl %eax, %cr0
960 963
961 964 #if LED
962 965 D16 movb $0xd8, %al
963 966 outb $WC_LED
964 967 #endif
965 968
966 969 D16 A16 movl %cs:WC_VIRTADDR(%ebx), %ebx / virtaddr of wc_cpu_t
967 970
968 971 #if LED
969 972 D16 movb $0xd9, %al
970 973 outb $WC_LED
971 974 #endif
972 975
973 976 #if LED
974 977 D16 movb $0xda, %al
975 978 outb $WC_LED
976 979 #endif
977 980
978 981 jmp flush / flush prefetch queue
979 982 flush:
980 983 D16 pushl $KCS_SEL
981 984 D16 pushl $kernel_wc_code
982 985 D16 lret / re-appear at kernel_wc_code
983 986
984 987
985 988 /*
986 989 * Support routine to re-initialize VGA subsystem
987 990 */
988 991 vgainit:
989 992 D16 ret
990 993
991 994 /*
992 995 * Support routine to re-initialize keyboard (which is USB - help!)
993 996 */
994 997 kbdinit:
995 998 D16 ret
996 999
997 1000 /*
998 1001 * Support routine to re-initialize COM ports to something sane for debug output
999 1002 */
1000 1003 cominit:
1001 1004 #if DEBUG
1002 1005 /*
1003 1006 * on debug kernels we need to initialize COM1 & COM2 here, so that
1004 1007 * we can get debug output before the asy driver has resumed
1005 1008 */
1006 1009
1007 1010 / select COM1
1008 1011 D16 movl $_CONST(COM1+LCR), %edx
1009 1012 D16 movb $DLAB, %al / divisor latch
1010 1013 outb (%dx)
1011 1014
1012 1015 D16 movl $_CONST(COM1+DLL), %edx / divisor latch lsb
1013 1016 D16 movb $B9600L, %al / divisor latch
1014 1017 outb (%dx)
1015 1018
1016 1019 D16 movl $_CONST(COM1+DLH), %edx / divisor latch hsb
1017 1020 D16 movb $B9600H, %al / divisor latch
1018 1021 outb (%dx)
1019 1022
1020 1023 D16 movl $_CONST(COM1+LCR), %edx / select COM1
1021 1024 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len
1022 1025 outb (%dx)
1023 1026
1024 1027 D16 movl $_CONST(COM1+MCR), %edx / select COM1
1025 1028 D16 movb $_CONST(RTS|DTR), %al / 1 stop bit, 8bit word len
1026 1029 outb (%dx)
1027 1030
1028 1031 / select COM2
1029 1032 D16 movl $_CONST(COM2+LCR), %edx
1030 1033 D16 movb $DLAB, %al / divisor latch
1031 1034 outb (%dx)
1032 1035
1033 1036 D16 movl $_CONST(COM2+DLL), %edx / divisor latch lsb
1034 1037 D16 movb $B9600L, %al / divisor latch
1035 1038 outb (%dx)
1036 1039
1037 1040 D16 movl $_CONST(COM2+DLH), %edx / divisor latch hsb
1038 1041 D16 movb $B9600H, %al / divisor latch
1039 1042 outb (%dx)
1040 1043
1041 1044 D16 movl $_CONST(COM2+LCR), %edx / select COM1
1042 1045 D16 movb $_CONST(STOP1|BITS8), %al / 1 stop bit, 8bit word len
1043 1046 outb (%dx)
1044 1047
1045 1048 D16 movl $_CONST(COM2+MCR), %edx / select COM1
1046 1049 D16 movb $_CONST(RTS|DTR), %al / 1 stop bit, 8bit word len
1047 1050 outb (%dx)
1048 1051 #endif /* DEBUG */
1049 1052
1050 1053 D16 ret
↓ open down ↓ |
274 lines elided |
↑ open up ↑ |
1051 1054
1052 1055 .globl wc_rm_end
1053 1056 wc_rm_end:
1054 1057 nop
1055 1058
1056 1059 .globl kernel_wc_code
1057 1060 kernel_wc_code:
1058 1061 / At this point we are with kernel's cs and proper eip.
1059 1062 / We will be executing not from the copy in real mode platter,
1060 1063 / but from the original code where boot loaded us.
1061 - / By this time GDT and IDT are loaded as is cr0, cr3 and cr4.
1064 + / By this time GDT and IDT are loaded as is cr0, cr3 and cr4.
1062 1065 / %ebx is wc_cpu
1063 1066 / %dx is our ds
1064 1067
1065 1068 #if LED
1066 1069 D16 movb $0xdb, %al
1067 1070 outb $WC_LED
1068 1071 #endif
1069 1072
1070 1073 / got here OK
1071 1074
1072 1075 movw %dx, %ds / $KDS_SEL
1073 1076
1074 1077 #if LED
1075 1078 movb $0xdc, %al
1076 1079 outb $WC_LED
1077 1080 #endif
1078 1081
1079 1082 /*
1080 1083 * Before proceeding, enable usage of the page table NX bit if
1081 1084 * that's how the page tables are set up.
1082 1085 */
1083 1086 bt $X86FSET_NX, x86_featureset
1084 1087 jnc 1f
1085 1088 movl $MSR_AMD_EFER, %ecx
1086 1089 rdmsr
1087 1090 orl $AMD_EFER_NXE, %eax
1088 1091 wrmsr
1089 1092 1:
1090 1093
1091 1094 movl WC_CR4(%ebx), %eax / restore full cr4 (with Global Enable)
1092 1095 movl %eax, %cr4
1093 1096
1094 1097
1095 1098 lldt WC_LDT(%ebx) / $LDT_SEL
1096 1099
1097 1100 movzwl WC_TR(%ebx), %eax / clear TSS busy bit
1098 1101 addl WC_GDT+2(%ebx), %eax
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
1099 1102 andl $_BITNOT(0x200), 4(%eax)
1100 1103 ltr WC_TR(%ebx) / $UTSS_SEL
1101 1104
1102 1105 movw WC_SS(%ebx), %ss / restore segment registers
1103 1106 movw WC_ES(%ebx), %es
1104 1107 movw WC_FS(%ebx), %fs
1105 1108 movw WC_GS(%ebx), %gs
1106 1109
1107 1110 /*
1108 1111 * set the stack pointer to point into the identity mapped page
1109 - * temporarily, so we can make function calls
1112 + * temporarily, so we can make function calls
1110 1113 */
1111 1114 .globl rm_platter_va
1112 1115 movl rm_platter_va, %eax
1113 1116 movl $WC_STKSTART, %esp
1114 1117 addl %eax, %esp
1115 1118 movl %esp, %ebp
1116 1119
1117 1120 /*
1118 1121 * if we are not running on the boot CPU restore stack contents by
1119 1122 * calling i_cpr_restore_stack(curthread, save_stack);
1120 1123 */
1121 1124 call i_cpr_bootcpuid
1122 1125 cmpl %eax, WC_CPU_ID(%ebx)
1123 1126 je 2f
1124 1127
1125 1128 pushl WC_SAVED_STACK(%ebx)
1126 1129 pushl %gs:CPU_THREAD
1127 1130 call i_cpr_restore_stack
1128 1131 addl $0x10, %esp
1129 1132 2:
1130 1133
1131 1134 movl WC_ESP(%ebx), %esp
1132 1135 movl %esp, %ebp
1133 1136
1134 1137 movl WC_RETADDR(%ebx), %eax / return to caller of wc_save_context
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
1135 1138 movl %eax, (%esp)
1136 1139
1137 1140 /*
1138 1141 * APIC initialization, skip iff function pointer is NULL
1139 1142 */
1140 1143 cmpl $0, ap_mlsetup
1141 1144 je 3f
1142 1145 call *ap_mlsetup
1143 1146 3:
1144 1147
1145 - call *cpr_start_cpu_func
1148 + call *cpr_start_cpu_func
1146 1149
1147 1150 pushl WC_EFLAGS(%ebx) / restore flags
1148 1151 popfl
1149 1152
1150 1153 movl WC_EDI(%ebx), %edi / restore general registers
1151 1154 movl WC_ESI(%ebx), %esi
1152 1155 movl WC_EBP(%ebx), %ebp
1153 1156 movl WC_EBX(%ebx), %ebx
1154 1157
1155 1158 /exit: jmp exit / stop here for HDT
1156 1159
1157 1160 xorl %eax, %eax / at wakeup return 0
1158 1161 ret
1159 1162
1160 1163 SET_SIZE(wc_rm_start)
1161 1164
1162 1165
1163 1166 #endif /* defined(__amd64) */
1164 1167
1165 1168 #endif /* lint */
1166 1169
↓ open down ↓ |
11 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX