Print this page
de-linting of .s files
m
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/ml/bios_call_src.s
+++ new/usr/src/uts/i86pc/ml/bios_call_src.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 -#pragma ident "%Z%%M% %I% %E% SMI"
27 +/*
28 + * Copyright 2019 Joyent, Inc.
29 + */
28 30
29 -#if defined(__lint)
30 -
31 -int silence_lint = 0;
32 -
33 -#else
34 -
35 31 #include <sys/segments.h>
36 32 #include <sys/controlregs.h>
37 33
38 34 /*
39 35 * Do a call into BIOS. This goes down to 16 bit real mode and back again.
40 36 */
41 37
42 38 /*
43 39 * instruction prefix to change operand size in instruction
44 40 */
45 41 #define DATASZ .byte 0x66;
46 42
47 -#if defined(__amd64)
48 -#define MOVCR(x, y) movq x,%rax; movq %rax, y
49 -#define LOAD_XAX(sym) leaq sym, %rax
50 -#elif defined(__i386)
51 -#define MOVCR(x, y) movl x,%eax; movl %eax, y
52 -#define LOAD_XAX(sym) leal sym, %eax
53 -#endif
54 -
55 43 .globl _start
56 44 _start:
57 45
58 -#if defined(__i386)
59 -
60 46 /*
61 47 * Save caller registers
62 48 */
63 - movl %ebp, save_ebp
64 - movl %esp, save_esp
65 - movl %ebx, save_ebx
66 - movl %esi, save_esi
67 - movl %edi, save_edi
68 -
69 - /* get registers argument into esi */
70 - movl 8(%esp), %esi
71 -
72 - /* put interrupt number in %bl */
73 - movl 4(%esp), %ebx
74 -
75 - /* Switch to a low memory stack */
76 - movl $_start, %esp
77 -
78 - /* allocate space for args on stack */
79 - subl $18, %esp
80 - movl %esp, %edi
81 -
82 -#elif defined(__amd64)
83 -
84 - /*
85 - * Save caller registers
86 - */
87 49 movq %rbp, save_rbp
88 50 movq %rsp, save_rsp
89 51 movq %rbx, save_rbx
90 52 movq %rsi, save_rsi
91 53 movq %r12, save_r12
92 54 movq %r13, save_r13
93 55 movq %r14, save_r14
94 56 movq %r15, save_r15
95 57
96 58 /* Switch to a low memory stack */
97 59 movq $_start, %rsp
98 60
99 61 /* put interrupt number in %bl */
100 62 movq %rdi, %rbx
101 63
102 64 /* allocate space for args on stack */
103 65 subq $18, %rsp
104 66 movq %rsp, %rdi
105 67
106 -#endif
107 -
108 68 /* copy args from high memory to stack in low memory */
109 69 cld
110 70 movl $18, %ecx
111 71 rep
112 72 movsb
113 73
114 74 /*
115 75 * Save system registers
116 76 */
117 77 sidt save_idt
118 78 sgdt save_gdt
119 79 str save_tr
120 80 movw %cs, save_cs
121 81 movw %ds, save_ds
122 82 movw %ss, save_ss
123 83 movw %es, save_es
124 84 movw %fs, save_fs
125 85 movw %gs, save_gs
126 - MOVCR( %cr4, save_cr4)
127 - MOVCR( %cr3, save_cr3)
128 - MOVCR( %cr0, save_cr0)
86 + movq %cr4, %rax
87 + movq %rax, save_cr4
88 + movq %cr3, %rax
89 + movq %rax, save_cr3
90 + movq %cr0, %rax
91 + movq %rax, save_cr0
129 92
130 -#if defined(__amd64)
131 93 /*
132 94 * save/clear the extension parts of the fs/gs base registers and cr8
133 95 */
134 96 movl $MSR_AMD_FSBASE, %ecx
135 97 rdmsr
136 98 movl %eax, save_fsbase
137 99 movl %edx, save_fsbase + 4
138 100 xorl %eax, %eax
139 101 xorl %edx, %edx
140 102 wrmsr
141 103
142 104 movl $MSR_AMD_GSBASE, %ecx
143 105 rdmsr
144 106 movl %eax, save_gsbase
145 107 movl %edx, save_gsbase + 4
146 108 xorl %eax, %eax
147 109 xorl %edx, %edx
148 110 wrmsr
149 111
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
150 112 movl $MSR_AMD_KGSBASE, %ecx
151 113 rdmsr
152 114 movl %eax, save_kgsbase
153 115 movl %edx, save_kgsbase + 4
154 116 xorl %eax, %eax
155 117 xorl %edx, %edx
156 118 wrmsr
157 119
158 120 movq %cr8, %rax
159 121 movq %rax, save_cr8
160 -#endif
161 122
162 123 /*
163 124 * set offsets in 16 bit ljmp instructions below
164 125 */
165 - LOAD_XAX(enter_real)
126 + leaq enter_real, %rax
166 127 movw %ax, enter_real_ljmp
167 128
168 - LOAD_XAX(enter_protected)
129 + leaq enter_protected, %rax
169 130 movw %ax, enter_protected_ljmp
170 131
171 - LOAD_XAX(gdt_info)
132 + leaq gdt_info, %rax
172 133 movw %ax, gdt_info_load
173 134
174 135 /*
175 136 * insert BIOS interrupt number into later instruction
176 137 */
177 138 movb %bl, int_instr+1
178 139 jmp 1f
179 140 1:
180 141
181 142 /*
182 143 * zero out all the registers to make sure they're 16 bit clean
183 144 */
184 -#if defined(__amd64)
185 145 xorq %r8, %r8
186 146 xorq %r9, %r9
187 147 xorq %r10, %r10
188 148 xorq %r11, %r11
189 149 xorq %r12, %r12
190 150 xorq %r13, %r13
191 151 xorq %r14, %r14
192 152 xorq %r15, %r15
193 -#endif
194 153 xorl %eax, %eax
195 154 xorl %ebx, %ebx
196 155 xorl %ecx, %ecx
197 156 xorl %edx, %edx
198 157 xorl %ebp, %ebp
199 158 xorl %esi, %esi
200 159 xorl %edi, %edi
201 160
202 161 /*
203 162 * Load our own GDT/IDT
204 163 */
205 164 lgdt gdt_info
206 165 lidt idt_info
207 166
208 -#if defined(__amd64)
209 167 /*
210 - * Shut down 64 bit mode. First get into compatiblity mode.
168 + * Shut down 64 bit mode. First get into compatibility mode.
211 169 */
212 170 movq %rsp, %rax
213 171 pushq $B32DATA_SEL
214 172 pushq %rax
215 173 pushf
216 174 pushq $B32CODE_SEL
217 175 pushq $1f
218 176 iretq
219 177 1:
220 178 .code32
221 179
222 180 /*
223 181 * disable long mode by:
224 182 * - shutting down paging (bit 31 of cr0)
225 183 * - flushing the TLB
226 184 * - disabling LME (long made enable) in EFER (extended feature reg)
227 185 */
228 186 movl %cr0, %eax
229 187 btcl $31, %eax /* disable paging */
230 188 movl %eax, %cr0
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
231 189 ljmp $B32CODE_SEL, $1f
232 190 1:
233 191
234 192 xorl %eax, %eax
235 193 movl %eax, %cr3 /* flushes TLB */
236 194
237 195 movl $MSR_AMD_EFER, %ecx /* Extended Feature Enable */
238 196 rdmsr
239 197 btcl $8, %eax /* bit 8 Long Mode Enable bit */
240 198 wrmsr
241 -#endif
242 199
243 200 /*
244 201 * ok.. now enter 16 bit mode, so we can shut down protected mode
245 202 *
246 203 * We'll have to act like we're still in a 32 bit section.
247 204 * So the code from this point has DATASZ in front of it to get 32 bit
248 205 * operands. If DATASZ is missing the operands will be 16 bit.
249 206 *
250 207 * Now shut down paging and protected (ie. segmentation) modes.
251 208 */
252 209 ljmp $B16CODE_SEL, $enter_16_bit
253 210 enter_16_bit:
254 211
255 212 /*
256 213 * Make sure hidden parts of segment registers are 16 bit clean
257 214 */
258 215 DATASZ movl $B16DATA_SEL, %eax
259 216 movw %ax, %ss
260 217 movw %ax, %ds
261 218 movw %ax, %es
262 219 movw %ax, %fs
263 220 movw %ax, %gs
264 221
265 222
266 223 DATASZ movl $0x0, %eax /* put us in real mode */
267 224 DATASZ movl %eax, %cr0
268 225 .byte 0xea /* ljmp */
269 226 enter_real_ljmp:
270 227 .value 0 /* addr (16 bit) */
271 228 .value 0x0 /* value for %cs */
272 229 enter_real:
273 230
274 231 /*
275 232 * zero out the remaining segment registers
276 233 */
277 234 DATASZ xorl %eax, %eax
278 235 movw %ax, %ss
279 236 movw %ax, %ds
280 237 movw %ax, %es
281 238 movw %ax, %fs
282 239 movw %ax, %gs
283 240
284 241 /*
285 242 * load the arguments to the BIOS call from the stack
286 243 */
287 244 popl %eax /* really executes a 16 bit pop */
288 245 popl %ebx
289 246 popl %ecx
290 247 popl %edx
291 248 popl %esi
292 249 popl %edi
293 250 popl %ebp
294 251 pop %es
295 252 pop %ds
296 253
297 254 /*
298 255 * do the actual BIOS call
299 256 */
300 257 sti
301 258 int_instr:
302 259 int $0x10 /* this int number is overwritten */
303 260 cli /* ensure interrupts remain disabled */
304 261
305 262 /*
306 263 * save results of the BIOS call
307 264 */
308 265 pushf
309 266 push %ds
310 267 push %es
311 268 pushl %ebp /* still executes as 16 bit */
312 269 pushl %edi
313 270 pushl %esi
314 271 pushl %edx
315 272 pushl %ecx
316 273 pushl %ebx
317 274 pushl %eax
318 275
319 276 /*
320 277 * Restore protected mode and 32 bit execution
321 278 */
322 279 push $0 /* make sure %ds is zero before lgdt */
323 280 pop %ds
324 281 .byte 0x0f, 0x01, 0x16 /* lgdt */
325 282 gdt_info_load:
326 283 .value 0 /* temp GDT in currently addressible mem */
327 284
328 285 DATASZ movl $0x1, %eax
329 286 DATASZ movl %eax, %cr0
330 287
331 288 .byte 0xea /* ljmp */
332 289 enter_protected_ljmp:
333 290 .value 0 /* addr (still in 16 bit) */
334 291 .value B32CODE_SEL /* %cs value */
335 292 enter_protected:
336 293
337 294 /*
338 295 * We are now back in a 32 bit code section, fix data/stack segments
339 296 */
340 297 .code32
341 298 movw $B32DATA_SEL, %ax
342 299 movw %ax, %ds
343 300 movw %ax, %ss
↓ open down ↓ |
92 lines elided |
↑ open up ↑ |
344 301
345 302 /*
346 303 * Re-enable paging. Note we only use 32 bit mov's to restore these
347 304 * control registers. That's OK as the upper 32 bits are always zero.
348 305 */
349 306 movl save_cr4, %eax
350 307 movl %eax, %cr4
351 308 movl save_cr3, %eax
352 309 movl %eax, %cr3
353 310
354 -#if defined(__amd64)
355 311 /*
356 312 * re-enable long mode
357 313 */
358 314 movl $MSR_AMD_EFER, %ecx
359 315 rdmsr
360 316 btsl $8, %eax
361 317 wrmsr
362 -#endif
363 318
364 319 movl save_cr0, %eax
365 320 movl %eax, %cr0
366 321 jmp enter_paging
367 322 enter_paging:
368 323
369 324
370 -#if defined(__amd64)
371 325 /*
372 326 * transition back to 64 bit mode
373 327 */
374 328 pushl $B64CODE_SEL
375 329 pushl $longmode
376 330 lret
377 331 longmode:
378 332 .code64
379 -#endif
380 333 /*
381 334 * restore caller frame pointer and segment registers
382 335 */
383 336 lgdt save_gdt
384 337 lidt save_idt
385 338
386 339 /*
387 340 * Before loading the task register we need to reset the busy bit
388 341 * in its corresponding GDT selector. The busy bit is the 2nd bit in
389 342 * the 5th byte of the selector.
390 343 */
391 -#if defined(__i386)
392 - movzwl save_tr, %eax
393 - addl save_gdt+2, %eax
394 - btcl $1, 5(%eax)
395 -#elif defined(__amd64)
396 344 movzwq save_tr, %rax
397 345 addq save_gdt+2, %rax
398 346 btcl $1, 5(%rax)
399 -#endif
400 347 ltr save_tr
401 348 movw save_ds, %ds
402 349 movw save_ss, %ss
403 350 movw save_es, %es
404 351 movw save_fs, %fs
405 352 movw save_gs, %gs
406 353
407 -#if defined(__i386)
408 - pushl save_cs
409 - pushl $.newcs
410 - lret
411 -#elif defined(__amd64)
412 354 pushq save_cs
413 355 pushq $.newcs
414 356 lretq
415 -#endif
416 357 .newcs:
417 358
418 -#if defined(__amd64)
419 359 /*
420 360 * restore the hidden kernel segment base register values
421 361 */
422 362 movl save_fsbase, %eax
423 363 movl save_fsbase + 4, %edx
424 364 movl $MSR_AMD_FSBASE, %ecx
425 365 wrmsr
426 366
427 367 movl save_gsbase, %eax
428 368 movl save_gsbase + 4, %edx
429 369 movl $MSR_AMD_GSBASE, %ecx
430 370 wrmsr
431 371
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
432 372 movl save_kgsbase, %eax
433 373 movl save_kgsbase + 4, %edx
434 374 movl $MSR_AMD_KGSBASE, %ecx
435 375 wrmsr
436 376
437 377 movq save_cr8, %rax
438 378 cmpq $0, %rax
439 379 je 1f
440 380 movq %rax, %cr8
441 381 1:
442 -#endif
443 382
444 383 /*
445 384 * copy results to caller's location, then restore remaining registers
446 385 */
447 -#if defined(__i386)
448 - movl save_esp, %edi
449 - movl 8(%edi), %edi
450 - movl %esp, %esi
451 - movl $18, %ecx
452 - rep
453 - movsb
454 - movw 18(%esp), %ax
455 - andl $0xffff, %eax
456 - movl save_ebx, %ebx
457 - movl save_esi, %esi
458 - movl save_edi, %edi
459 - movl save_esp, %esp
460 - movl save_ebp, %ebp
461 - movl save_esp, %esp
462 - ret
463 -
464 -#elif defined(__amd64)
465 386 movq save_rsi, %rdi
466 387 movq %rsp, %rsi
467 388 movq $18, %rcx
468 389 rep
469 390 movsb
470 391 movw 18(%rsp), %ax
471 392 andq $0xffff, %rax
472 393 movq save_r12, %r12
473 394 movq save_r13, %r13
474 395 movq save_r14, %r14
475 396 movq save_r15, %r15
476 397 movq save_rbx, %rbx
477 398 movq save_rbp, %rbp
478 399 movq save_rsp, %rsp
479 400 ret
480 401
481 -#endif
482 402
483 -
484 403 /*
485 404 * Caller's registers to restore
486 405 */
487 406 .align 4
488 407 save_esi:
489 408 .long 0
490 409 save_edi:
491 410 .long 0
492 411 save_ebx:
493 412 .long 0
494 413 save_ebp:
495 414 .long 0
496 415 save_esp:
497 416 .long 0
498 417
499 418 .align 8
500 -#if defined(__amd64)
501 419 save_rsi:
502 420 .quad 0
503 421 save_rbx:
504 422 .quad 0
505 423 save_rbp:
506 424 .quad 0
507 425 save_rsp:
508 426 .quad 0
509 427 save_r12:
510 428 .quad 0
511 429 save_r13:
512 430 .quad 0
513 431 save_r14:
514 432 .quad 0
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
515 433 save_r15:
516 434 .quad 0
517 435 save_kgsbase:
518 436 .quad 0
519 437 save_gsbase:
520 438 .quad 0
521 439 save_fsbase:
522 440 .quad 0
523 441 save_cr8:
524 442 .quad 0
525 -#endif /* __amd64 */
526 443
527 444 save_idt:
528 445 .quad 0
529 446 .quad 0
530 447
531 448 save_gdt:
532 449 .quad 0
533 450 .quad 0
534 451
535 452 save_cr0:
536 453 .quad 0
537 454 save_cr3:
538 455 .quad 0
539 456 save_cr4:
540 457 .quad 0
541 458 save_cs:
542 459 .quad 0
543 460 save_ss:
544 461 .value 0
545 462 save_ds:
546 463 .value 0
547 464 save_es:
548 465 .value 0
549 466 save_fs:
550 467 .value 0
551 468 save_gs:
552 469 .value 0
553 470 save_tr:
554 471 .value 0
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
555 472
556 473 idt_info:
557 474 .value 0x3ff
558 475 .quad 0
559 476
560 477
561 478 /*
562 479 * We need to trampoline thru a gdt we have in low memory.
563 480 */
564 481 #include "../boot/boot_gdt.s"
565 -#endif /* __lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX