7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #if defined(__lint)
30
31 int silence_lint = 0;
32
33 #else
34
35 #include <sys/segments.h>
36 #include <sys/controlregs.h>
37
38 /*
39 * Do a call into BIOS. This goes down to 16 bit real mode and back again.
40 */
41
42 /*
43 * instruction prefix to change operand size in instruction
44 */
45 #define DATASZ .byte 0x66;
46
47 #if defined(__amd64)
48 #define MOVCR(x, y) movq x,%rax; movq %rax, y
49 #define LOAD_XAX(sym) leaq sym, %rax
50 #elif defined(__i386)
51 #define MOVCR(x, y) movl x,%eax; movl %eax, y
52 #define LOAD_XAX(sym) leal sym, %eax
53 #endif
54
55 .globl _start
56 _start:
57
58 #if defined(__i386)
59
60 /*
61 * Save caller registers
62 */
63 movl %ebp, save_ebp
64 movl %esp, save_esp
65 movl %ebx, save_ebx
66 movl %esi, save_esi
67 movl %edi, save_edi
68
69 /* get registers argument into esi */
70 movl 8(%esp), %esi
71
72 /* put interrupt number in %bl */
73 movl 4(%esp), %ebx
74
75 /* Switch to a low memory stack */
76 movl $_start, %esp
77
78 /* allocate space for args on stack */
79 subl $18, %esp
80 movl %esp, %edi
81
82 #elif defined(__amd64)
83
84 /*
85 * Save caller registers
86 */
87 movq %rbp, save_rbp
88 movq %rsp, save_rsp
89 movq %rbx, save_rbx
90 movq %rsi, save_rsi
91 movq %r12, save_r12
92 movq %r13, save_r13
93 movq %r14, save_r14
94 movq %r15, save_r15
95
96 /* Switch to a low memory stack */
97 movq $_start, %rsp
98
99 /* put interrupt number in %bl */
100 movq %rdi, %rbx
101
102 /* allocate space for args on stack */
103 subq $18, %rsp
104 movq %rsp, %rdi
105
106 #endif
107
108 /* copy args from high memory to stack in low memory */
109 cld
110 movl $18, %ecx
111 rep
112 movsb
113
114 /*
115 * Save system registers
116 */
117 sidt save_idt
118 sgdt save_gdt
119 str save_tr
120 movw %cs, save_cs
121 movw %ds, save_ds
122 movw %ss, save_ss
123 movw %es, save_es
124 movw %fs, save_fs
125 movw %gs, save_gs
126 MOVCR( %cr4, save_cr4)
127 MOVCR( %cr3, save_cr3)
128 MOVCR( %cr0, save_cr0)
129
130 #if defined(__amd64)
131 /*
132 * save/clear the extension parts of the fs/gs base registers and cr8
133 */
134 movl $MSR_AMD_FSBASE, %ecx
135 rdmsr
136 movl %eax, save_fsbase
137 movl %edx, save_fsbase + 4
138 xorl %eax, %eax
139 xorl %edx, %edx
140 wrmsr
141
142 movl $MSR_AMD_GSBASE, %ecx
143 rdmsr
144 movl %eax, save_gsbase
145 movl %edx, save_gsbase + 4
146 xorl %eax, %eax
147 xorl %edx, %edx
148 wrmsr
149
150 movl $MSR_AMD_KGSBASE, %ecx
151 rdmsr
152 movl %eax, save_kgsbase
153 movl %edx, save_kgsbase + 4
154 xorl %eax, %eax
155 xorl %edx, %edx
156 wrmsr
157
158 movq %cr8, %rax
159 movq %rax, save_cr8
160 #endif
161
162 /*
163 * set offsets in 16 bit ljmp instructions below
164 */
165 LOAD_XAX(enter_real)
166 movw %ax, enter_real_ljmp
167
168 LOAD_XAX(enter_protected)
169 movw %ax, enter_protected_ljmp
170
171 LOAD_XAX(gdt_info)
172 movw %ax, gdt_info_load
173
174 /*
175 * insert BIOS interrupt number into later instruction
176 */
177 movb %bl, int_instr+1
178 jmp 1f
179 1:
180
181 /*
182 * zero out all the registers to make sure they're 16 bit clean
183 */
184 #if defined(__amd64)
185 xorq %r8, %r8
186 xorq %r9, %r9
187 xorq %r10, %r10
188 xorq %r11, %r11
189 xorq %r12, %r12
190 xorq %r13, %r13
191 xorq %r14, %r14
192 xorq %r15, %r15
193 #endif
194 xorl %eax, %eax
195 xorl %ebx, %ebx
196 xorl %ecx, %ecx
197 xorl %edx, %edx
198 xorl %ebp, %ebp
199 xorl %esi, %esi
200 xorl %edi, %edi
201
202 /*
203 * Load our own GDT/IDT
204 */
205 lgdt gdt_info
206 lidt idt_info
207
208 #if defined(__amd64)
209 /*
210 * Shut down 64 bit mode. First get into compatiblity mode.
211 */
212 movq %rsp, %rax
213 pushq $B32DATA_SEL
214 pushq %rax
215 pushf
216 pushq $B32CODE_SEL
217 pushq $1f
218 iretq
219 1:
220 .code32
221
222 /*
223 * disable long mode by:
224 * - shutting down paging (bit 31 of cr0)
225 * - flushing the TLB
226 * - disabling LME (long made enable) in EFER (extended feature reg)
227 */
228 movl %cr0, %eax
229 btcl $31, %eax /* disable paging */
230 movl %eax, %cr0
231 ljmp $B32CODE_SEL, $1f
232 1:
233
234 xorl %eax, %eax
235 movl %eax, %cr3 /* flushes TLB */
236
237 movl $MSR_AMD_EFER, %ecx /* Extended Feature Enable */
238 rdmsr
239 btcl $8, %eax /* bit 8 Long Mode Enable bit */
240 wrmsr
241 #endif
242
243 /*
244 * ok.. now enter 16 bit mode, so we can shut down protected mode
245 *
246 * We'll have to act like we're still in a 32 bit section.
247 * So the code from this point has DATASZ in front of it to get 32 bit
248 * operands. If DATASZ is missing the operands will be 16 bit.
249 *
250 * Now shut down paging and protected (ie. segmentation) modes.
251 */
252 ljmp $B16CODE_SEL, $enter_16_bit
253 enter_16_bit:
254
255 /*
256 * Make sure hidden parts of segment registers are 16 bit clean
257 */
258 DATASZ movl $B16DATA_SEL, %eax
259 movw %ax, %ss
260 movw %ax, %ds
261 movw %ax, %es
334 .value B32CODE_SEL /* %cs value */
335 enter_protected:
336
337 /*
338 * We are now back in a 32 bit code section, fix data/stack segments
339 */
340 .code32
341 movw $B32DATA_SEL, %ax
342 movw %ax, %ds
343 movw %ax, %ss
344
345 /*
346 * Re-enable paging. Note we only use 32 bit mov's to restore these
347 * control registers. That's OK as the upper 32 bits are always zero.
348 */
349 movl save_cr4, %eax
350 movl %eax, %cr4
351 movl save_cr3, %eax
352 movl %eax, %cr3
353
354 #if defined(__amd64)
355 /*
356 * re-enable long mode
357 */
358 movl $MSR_AMD_EFER, %ecx
359 rdmsr
360 btsl $8, %eax
361 wrmsr
362 #endif
363
364 movl save_cr0, %eax
365 movl %eax, %cr0
366 jmp enter_paging
367 enter_paging:
368
369
370 #if defined(__amd64)
371 /*
372 * transition back to 64 bit mode
373 */
374 pushl $B64CODE_SEL
375 pushl $longmode
376 lret
377 longmode:
378 .code64
379 #endif
380 /*
381 * restore caller frame pointer and segment registers
382 */
383 lgdt save_gdt
384 lidt save_idt
385
386 /*
387 * Before loading the task register we need to reset the busy bit
388 * in its corresponding GDT selector. The busy bit is the 2nd bit in
389 * the 5th byte of the selector.
390 */
391 #if defined(__i386)
392 movzwl save_tr, %eax
393 addl save_gdt+2, %eax
394 btcl $1, 5(%eax)
395 #elif defined(__amd64)
396 movzwq save_tr, %rax
397 addq save_gdt+2, %rax
398 btcl $1, 5(%rax)
399 #endif
400 ltr save_tr
401 movw save_ds, %ds
402 movw save_ss, %ss
403 movw save_es, %es
404 movw save_fs, %fs
405 movw save_gs, %gs
406
407 #if defined(__i386)
408 pushl save_cs
409 pushl $.newcs
410 lret
411 #elif defined(__amd64)
412 pushq save_cs
413 pushq $.newcs
414 lretq
415 #endif
416 .newcs:
417
418 #if defined(__amd64)
419 /*
420 * restore the hidden kernel segment base register values
421 */
422 movl save_fsbase, %eax
423 movl save_fsbase + 4, %edx
424 movl $MSR_AMD_FSBASE, %ecx
425 wrmsr
426
427 movl save_gsbase, %eax
428 movl save_gsbase + 4, %edx
429 movl $MSR_AMD_GSBASE, %ecx
430 wrmsr
431
432 movl save_kgsbase, %eax
433 movl save_kgsbase + 4, %edx
434 movl $MSR_AMD_KGSBASE, %ecx
435 wrmsr
436
437 movq save_cr8, %rax
438 cmpq $0, %rax
439 je 1f
440 movq %rax, %cr8
441 1:
442 #endif
443
444 /*
445 * copy results to caller's location, then restore remaining registers
446 */
447 #if defined(__i386)
448 movl save_esp, %edi
449 movl 8(%edi), %edi
450 movl %esp, %esi
451 movl $18, %ecx
452 rep
453 movsb
454 movw 18(%esp), %ax
455 andl $0xffff, %eax
456 movl save_ebx, %ebx
457 movl save_esi, %esi
458 movl save_edi, %edi
459 movl save_esp, %esp
460 movl save_ebp, %ebp
461 movl save_esp, %esp
462 ret
463
464 #elif defined(__amd64)
465 movq save_rsi, %rdi
466 movq %rsp, %rsi
467 movq $18, %rcx
468 rep
469 movsb
470 movw 18(%rsp), %ax
471 andq $0xffff, %rax
472 movq save_r12, %r12
473 movq save_r13, %r13
474 movq save_r14, %r14
475 movq save_r15, %r15
476 movq save_rbx, %rbx
477 movq save_rbp, %rbp
478 movq save_rsp, %rsp
479 ret
480
481 #endif
482
483
484 /*
485 * Caller's registers to restore
486 */
487 .align 4
488 save_esi:
489 .long 0
490 save_edi:
491 .long 0
492 save_ebx:
493 .long 0
494 save_ebp:
495 .long 0
496 save_esp:
497 .long 0
498
499 .align 8
500 #if defined(__amd64)
501 save_rsi:
502 .quad 0
503 save_rbx:
504 .quad 0
505 save_rbp:
506 .quad 0
507 save_rsp:
508 .quad 0
509 save_r12:
510 .quad 0
511 save_r13:
512 .quad 0
513 save_r14:
514 .quad 0
515 save_r15:
516 .quad 0
517 save_kgsbase:
518 .quad 0
519 save_gsbase:
520 .quad 0
521 save_fsbase:
522 .quad 0
523 save_cr8:
524 .quad 0
525 #endif /* __amd64 */
526
527 save_idt:
528 .quad 0
529 .quad 0
530
531 save_gdt:
532 .quad 0
533 .quad 0
534
535 save_cr0:
536 .quad 0
537 save_cr3:
538 .quad 0
539 save_cr4:
540 .quad 0
541 save_cs:
542 .quad 0
543 save_ss:
544 .value 0
545 save_ds:
546 .value 0
547 save_es:
548 .value 0
549 save_fs:
550 .value 0
551 save_gs:
552 .value 0
553 save_tr:
554 .value 0
555
556 idt_info:
557 .value 0x3ff
558 .quad 0
559
560
561 /*
562 * We need to trampoline thru a gdt we have in low memory.
563 */
564 #include "../boot/boot_gdt.s"
565 #endif /* __lint */
|
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2019 Joyent, Inc.
29 */
30
31 #include <sys/segments.h>
32 #include <sys/controlregs.h>
33
34 /*
35 * Do a call into BIOS. This goes down to 16 bit real mode and back again.
36 */
37
38 /*
39 * instruction prefix to change operand size in instruction
40 */
41 #define DATASZ .byte 0x66;
42
43 .globl _start
44 _start:
45
46 /*
47 * Save caller registers
48 */
49 movq %rbp, save_rbp
50 movq %rsp, save_rsp
51 movq %rbx, save_rbx
52 movq %rsi, save_rsi
53 movq %r12, save_r12
54 movq %r13, save_r13
55 movq %r14, save_r14
56 movq %r15, save_r15
57
58 /* Switch to a low memory stack */
59 movq $_start, %rsp
60
61 /* put interrupt number in %bl */
62 movq %rdi, %rbx
63
64 /* allocate space for args on stack */
65 subq $18, %rsp
66 movq %rsp, %rdi
67
68 /* copy args from high memory to stack in low memory */
69 cld
70 movl $18, %ecx
71 rep
72 movsb
73
74 /*
75 * Save system registers
76 */
77 sidt save_idt
78 sgdt save_gdt
79 str save_tr
80 movw %cs, save_cs
81 movw %ds, save_ds
82 movw %ss, save_ss
83 movw %es, save_es
84 movw %fs, save_fs
85 movw %gs, save_gs
86 movq %cr4, %rax
87 movq %rax, save_cr4
88 movq %cr3, %rax
89 movq %rax, save_cr3
90 movq %cr0, %rax
91 movq %rax, save_cr0
92
93 /*
94 * save/clear the extension parts of the fs/gs base registers and cr8
95 */
96 movl $MSR_AMD_FSBASE, %ecx
97 rdmsr
98 movl %eax, save_fsbase
99 movl %edx, save_fsbase + 4
100 xorl %eax, %eax
101 xorl %edx, %edx
102 wrmsr
103
104 movl $MSR_AMD_GSBASE, %ecx
105 rdmsr
106 movl %eax, save_gsbase
107 movl %edx, save_gsbase + 4
108 xorl %eax, %eax
109 xorl %edx, %edx
110 wrmsr
111
112 movl $MSR_AMD_KGSBASE, %ecx
113 rdmsr
114 movl %eax, save_kgsbase
115 movl %edx, save_kgsbase + 4
116 xorl %eax, %eax
117 xorl %edx, %edx
118 wrmsr
119
120 movq %cr8, %rax
121 movq %rax, save_cr8
122
123 /*
124 * set offsets in 16 bit ljmp instructions below
125 */
126 leaq enter_real, %rax
127 movw %ax, enter_real_ljmp
128
129 leaq enter_protected, %rax
130 movw %ax, enter_protected_ljmp
131
132 leaq gdt_info, %rax
133 movw %ax, gdt_info_load
134
135 /*
136 * insert BIOS interrupt number into later instruction
137 */
138 movb %bl, int_instr+1
139 jmp 1f
140 1:
141
142 /*
143 * zero out all the registers to make sure they're 16 bit clean
144 */
145 xorq %r8, %r8
146 xorq %r9, %r9
147 xorq %r10, %r10
148 xorq %r11, %r11
149 xorq %r12, %r12
150 xorq %r13, %r13
151 xorq %r14, %r14
152 xorq %r15, %r15
153 xorl %eax, %eax
154 xorl %ebx, %ebx
155 xorl %ecx, %ecx
156 xorl %edx, %edx
157 xorl %ebp, %ebp
158 xorl %esi, %esi
159 xorl %edi, %edi
160
161 /*
162 * Load our own GDT/IDT
163 */
164 lgdt gdt_info
165 lidt idt_info
166
167 /*
168 * Shut down 64 bit mode. First get into compatibility mode.
169 */
170 movq %rsp, %rax
171 pushq $B32DATA_SEL
172 pushq %rax
173 pushf
174 pushq $B32CODE_SEL
175 pushq $1f
176 iretq
177 1:
178 .code32
179
180 /*
181 * disable long mode by:
182 * - shutting down paging (bit 31 of cr0)
183 * - flushing the TLB
184 * - disabling LME (long made enable) in EFER (extended feature reg)
185 */
186 movl %cr0, %eax
187 btcl $31, %eax /* disable paging */
188 movl %eax, %cr0
189 ljmp $B32CODE_SEL, $1f
190 1:
191
192 xorl %eax, %eax
193 movl %eax, %cr3 /* flushes TLB */
194
195 movl $MSR_AMD_EFER, %ecx /* Extended Feature Enable */
196 rdmsr
197 btcl $8, %eax /* bit 8 Long Mode Enable bit */
198 wrmsr
199
200 /*
201 * ok.. now enter 16 bit mode, so we can shut down protected mode
202 *
203 * We'll have to act like we're still in a 32 bit section.
204 * So the code from this point has DATASZ in front of it to get 32 bit
205 * operands. If DATASZ is missing the operands will be 16 bit.
206 *
207 * Now shut down paging and protected (ie. segmentation) modes.
208 */
209 ljmp $B16CODE_SEL, $enter_16_bit
210 enter_16_bit:
211
212 /*
213 * Make sure hidden parts of segment registers are 16 bit clean
214 */
215 DATASZ movl $B16DATA_SEL, %eax
216 movw %ax, %ss
217 movw %ax, %ds
218 movw %ax, %es
291 .value B32CODE_SEL /* %cs value */
292 enter_protected:
293
294 /*
295 * We are now back in a 32 bit code section, fix data/stack segments
296 */
297 .code32
298 movw $B32DATA_SEL, %ax
299 movw %ax, %ds
300 movw %ax, %ss
301
302 /*
303 * Re-enable paging. Note we only use 32 bit mov's to restore these
304 * control registers. That's OK as the upper 32 bits are always zero.
305 */
306 movl save_cr4, %eax
307 movl %eax, %cr4
308 movl save_cr3, %eax
309 movl %eax, %cr3
310
311 /*
312 * re-enable long mode
313 */
314 movl $MSR_AMD_EFER, %ecx
315 rdmsr
316 btsl $8, %eax
317 wrmsr
318
319 movl save_cr0, %eax
320 movl %eax, %cr0
321 jmp enter_paging
322 enter_paging:
323
324
325 /*
326 * transition back to 64 bit mode
327 */
328 pushl $B64CODE_SEL
329 pushl $longmode
330 lret
331 longmode:
332 .code64
333 /*
334 * restore caller frame pointer and segment registers
335 */
336 lgdt save_gdt
337 lidt save_idt
338
339 /*
340 * Before loading the task register we need to reset the busy bit
341 * in its corresponding GDT selector. The busy bit is the 2nd bit in
342 * the 5th byte of the selector.
343 */
344 movzwq save_tr, %rax
345 addq save_gdt+2, %rax
346 btcl $1, 5(%rax)
347 ltr save_tr
348 movw save_ds, %ds
349 movw save_ss, %ss
350 movw save_es, %es
351 movw save_fs, %fs
352 movw save_gs, %gs
353
354 pushq save_cs
355 pushq $.newcs
356 lretq
357 .newcs:
358
359 /*
360 * restore the hidden kernel segment base register values
361 */
362 movl save_fsbase, %eax
363 movl save_fsbase + 4, %edx
364 movl $MSR_AMD_FSBASE, %ecx
365 wrmsr
366
367 movl save_gsbase, %eax
368 movl save_gsbase + 4, %edx
369 movl $MSR_AMD_GSBASE, %ecx
370 wrmsr
371
372 movl save_kgsbase, %eax
373 movl save_kgsbase + 4, %edx
374 movl $MSR_AMD_KGSBASE, %ecx
375 wrmsr
376
377 movq save_cr8, %rax
378 cmpq $0, %rax
379 je 1f
380 movq %rax, %cr8
381 1:
382
383 /*
384 * copy results to caller's location, then restore remaining registers
385 */
386 movq save_rsi, %rdi
387 movq %rsp, %rsi
388 movq $18, %rcx
389 rep
390 movsb
391 movw 18(%rsp), %ax
392 andq $0xffff, %rax
393 movq save_r12, %r12
394 movq save_r13, %r13
395 movq save_r14, %r14
396 movq save_r15, %r15
397 movq save_rbx, %rbx
398 movq save_rbp, %rbp
399 movq save_rsp, %rsp
400 ret
401
402
403 /*
404 * Caller's registers to restore
405 */
406 .align 4
407 save_esi:
408 .long 0
409 save_edi:
410 .long 0
411 save_ebx:
412 .long 0
413 save_ebp:
414 .long 0
415 save_esp:
416 .long 0
417
418 .align 8
419 save_rsi:
420 .quad 0
421 save_rbx:
422 .quad 0
423 save_rbp:
424 .quad 0
425 save_rsp:
426 .quad 0
427 save_r12:
428 .quad 0
429 save_r13:
430 .quad 0
431 save_r14:
432 .quad 0
433 save_r15:
434 .quad 0
435 save_kgsbase:
436 .quad 0
437 save_gsbase:
438 .quad 0
439 save_fsbase:
440 .quad 0
441 save_cr8:
442 .quad 0
443
444 save_idt:
445 .quad 0
446 .quad 0
447
448 save_gdt:
449 .quad 0
450 .quad 0
451
452 save_cr0:
453 .quad 0
454 save_cr3:
455 .quad 0
456 save_cr4:
457 .quad 0
458 save_cs:
459 .quad 0
460 save_ss:
461 .value 0
462 save_ds:
463 .value 0
464 save_es:
465 .value 0
466 save_fs:
467 .value 0
468 save_gs:
469 .value 0
470 save_tr:
471 .value 0
472
473 idt_info:
474 .value 0x3ff
475 .quad 0
476
477
478 /*
479 * We need to trampoline thru a gdt we have in low memory.
480 */
481 #include "../boot/boot_gdt.s"
|