Print this page
de-linting of .s files
first
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/ia32/ml/i86_subr.s
+++ new/usr/src/uts/intel/ia32/ml/i86_subr.s
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
25 25 * Copyright (c) 2014 by Delphix. All rights reserved.
26 26 * Copyright 2019 Joyent, Inc.
27 27 */
28 28
29 29 /*
30 30 * Copyright (c) 1990, 1991 UNIX System Laboratories, Inc.
31 31 * Copyright (c) 1984, 1986, 1987, 1988, 1989, 1990 AT&T
32 32 * All Rights Reserved
33 33 */
34 34
35 35 /*
36 36 * Copyright (c) 2009, Intel Corporation.
37 37 * All rights reserved.
38 38 */
39 39
40 40 /*
41 41 * General assembly language routines.
42 42 * It is the intent of this file to contain routines that are
43 43 * independent of the specific kernel architecture, and those that are
44 44 * common across kernel architectures.
45 45 * As architectures diverge, and implementations of specific
46 46 * architecture-dependent routines change, the routines should be moved
47 47 * from this file into the respective ../`arch -k`/subr.s file.
48 48 */
49 49
↓ open down ↓ |
49 lines elided |
↑ open up ↑ |
50 50 #include <sys/asm_linkage.h>
51 51 #include <sys/asm_misc.h>
52 52 #include <sys/panic.h>
53 53 #include <sys/ontrap.h>
54 54 #include <sys/regset.h>
55 55 #include <sys/privregs.h>
56 56 #include <sys/reboot.h>
57 57 #include <sys/psw.h>
58 58 #include <sys/x86_archext.h>
59 59
60 -#if defined(__lint)
61 -#include <sys/types.h>
62 -#include <sys/systm.h>
63 -#include <sys/thread.h>
64 -#include <sys/archsystm.h>
65 -#include <sys/byteorder.h>
66 -#include <sys/dtrace.h>
67 -#include <sys/ftrace.h>
68 -#else /* __lint */
69 60 #include "assym.h"
70 -#endif /* __lint */
71 61 #include <sys/dditypes.h>
72 62
73 63 /*
74 64 * on_fault()
75 65 *
76 66 * Catch lofault faults. Like setjmp except it returns one
77 67 * if code following causes uncorrectable fault. Turned off
78 68 * by calling no_fault(). Note that while under on_fault(),
79 69 * SMAP is disabled. For more information see
80 70 * uts/intel/ia32/ml/copy.s.
81 71 */
82 72
83 -#if defined(__lint)
84 -
85 -/* ARGSUSED */
86 -int
87 -on_fault(label_t *ljb)
88 -{ return (0); }
89 -
90 -void
91 -no_fault(void)
92 -{}
93 -
94 -#else /* __lint */
95 -
96 -#if defined(__amd64)
97 -
98 73 ENTRY(on_fault)
99 74 movq %gs:CPU_THREAD, %rsi
100 75 leaq catch_fault(%rip), %rdx
101 76 movq %rdi, T_ONFAULT(%rsi) /* jumpbuf in t_onfault */
102 77 movq %rdx, T_LOFAULT(%rsi) /* catch_fault in t_lofault */
103 78 call smap_disable /* allow user accesses */
104 79 jmp setjmp /* let setjmp do the rest */
105 80
106 81 catch_fault:
107 82 movq %gs:CPU_THREAD, %rsi
108 83 movq T_ONFAULT(%rsi), %rdi /* address of save area */
109 84 xorl %eax, %eax
110 85 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */
111 86 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */
112 87 call smap_enable /* disallow user accesses */
113 88 jmp longjmp /* let longjmp do the rest */
114 89 SET_SIZE(on_fault)
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
115 90
116 91 ENTRY(no_fault)
117 92 movq %gs:CPU_THREAD, %rsi
118 93 xorl %eax, %eax
119 94 movq %rax, T_ONFAULT(%rsi) /* turn off onfault */
120 95 movq %rax, T_LOFAULT(%rsi) /* turn off lofault */
121 96 call smap_enable /* disallow user accesses */
122 97 ret
123 98 SET_SIZE(no_fault)
124 99
125 -#elif defined(__i386)
126 -
127 - ENTRY(on_fault)
128 - movl %gs:CPU_THREAD, %edx
129 - movl 4(%esp), %eax /* jumpbuf address */
130 - leal catch_fault, %ecx
131 - movl %eax, T_ONFAULT(%edx) /* jumpbuf in t_onfault */
132 - movl %ecx, T_LOFAULT(%edx) /* catch_fault in t_lofault */
133 - jmp setjmp /* let setjmp do the rest */
134 -
135 -catch_fault:
136 - movl %gs:CPU_THREAD, %edx
137 - xorl %eax, %eax
138 - movl T_ONFAULT(%edx), %ecx /* address of save area */
139 - movl %eax, T_ONFAULT(%edx) /* turn off onfault */
140 - movl %eax, T_LOFAULT(%edx) /* turn off lofault */
141 - pushl %ecx
142 - call longjmp /* let longjmp do the rest */
143 - SET_SIZE(on_fault)
144 -
145 - ENTRY(no_fault)
146 - movl %gs:CPU_THREAD, %edx
147 - xorl %eax, %eax
148 - movl %eax, T_ONFAULT(%edx) /* turn off onfault */
149 - movl %eax, T_LOFAULT(%edx) /* turn off lofault */
150 - ret
151 - SET_SIZE(no_fault)
152 -
153 -#endif /* __i386 */
154 -#endif /* __lint */
155 -
156 100 /*
157 101 * Default trampoline code for on_trap() (see <sys/ontrap.h>). We just
158 102 * do a longjmp(&curthread->t_ontrap->ot_jmpbuf) if this is ever called.
159 103 */
160 104
161 -#if defined(lint)
162 -
163 -void
164 -on_trap_trampoline(void)
165 -{}
166 -
167 -#else /* __lint */
168 -
169 -#if defined(__amd64)
170 -
171 105 ENTRY(on_trap_trampoline)
172 106 movq %gs:CPU_THREAD, %rsi
173 107 movq T_ONTRAP(%rsi), %rdi
174 108 addq $OT_JMPBUF, %rdi
175 109 jmp longjmp
176 110 SET_SIZE(on_trap_trampoline)
177 111
178 -#elif defined(__i386)
179 -
180 - ENTRY(on_trap_trampoline)
181 - movl %gs:CPU_THREAD, %eax
182 - movl T_ONTRAP(%eax), %eax
183 - addl $OT_JMPBUF, %eax
184 - pushl %eax
185 - call longjmp
186 - SET_SIZE(on_trap_trampoline)
187 -
188 -#endif /* __i386 */
189 -#endif /* __lint */
190 -
191 112 /*
192 113 * Push a new element on to the t_ontrap stack. Refer to <sys/ontrap.h> for
193 114 * more information about the on_trap() mechanism. If the on_trap_data is the
194 115 * same as the topmost stack element, we just modify that element.
195 116 */
196 -#if defined(lint)
197 117
198 -/*ARGSUSED*/
199 -int
200 -on_trap(on_trap_data_t *otp, uint_t prot)
201 -{ return (0); }
202 -
203 -#else /* __lint */
204 -
205 -#if defined(__amd64)
206 -
207 118 ENTRY(on_trap)
208 119 movw %si, OT_PROT(%rdi) /* ot_prot = prot */
209 120 movw $0, OT_TRAP(%rdi) /* ot_trap = 0 */
210 121 leaq on_trap_trampoline(%rip), %rdx /* rdx = &on_trap_trampoline */
211 122 movq %rdx, OT_TRAMPOLINE(%rdi) /* ot_trampoline = rdx */
212 123 xorl %ecx, %ecx
213 124 movq %rcx, OT_HANDLE(%rdi) /* ot_handle = NULL */
214 125 movq %rcx, OT_PAD1(%rdi) /* ot_pad1 = NULL */
215 126 movq %gs:CPU_THREAD, %rdx /* rdx = curthread */
216 127 movq T_ONTRAP(%rdx), %rcx /* rcx = curthread->t_ontrap */
217 128 cmpq %rdi, %rcx /* if (otp == %rcx) */
218 129 je 0f /* don't modify t_ontrap */
219 130
220 131 movq %rcx, OT_PREV(%rdi) /* ot_prev = t_ontrap */
221 132 movq %rdi, T_ONTRAP(%rdx) /* curthread->t_ontrap = otp */
222 133
223 134 0: addq $OT_JMPBUF, %rdi /* &ot_jmpbuf */
224 135 jmp setjmp
225 136 SET_SIZE(on_trap)
226 137
227 -#elif defined(__i386)
228 -
229 - ENTRY(on_trap)
230 - movl 4(%esp), %eax /* %eax = otp */
231 - movl 8(%esp), %edx /* %edx = prot */
232 -
233 - movw %dx, OT_PROT(%eax) /* ot_prot = prot */
234 - movw $0, OT_TRAP(%eax) /* ot_trap = 0 */
235 - leal on_trap_trampoline, %edx /* %edx = &on_trap_trampoline */
236 - movl %edx, OT_TRAMPOLINE(%eax) /* ot_trampoline = %edx */
237 - movl $0, OT_HANDLE(%eax) /* ot_handle = NULL */
238 - movl $0, OT_PAD1(%eax) /* ot_pad1 = NULL */
239 - movl %gs:CPU_THREAD, %edx /* %edx = curthread */
240 - movl T_ONTRAP(%edx), %ecx /* %ecx = curthread->t_ontrap */
241 - cmpl %eax, %ecx /* if (otp == %ecx) */
242 - je 0f /* don't modify t_ontrap */
243 -
244 - movl %ecx, OT_PREV(%eax) /* ot_prev = t_ontrap */
245 - movl %eax, T_ONTRAP(%edx) /* curthread->t_ontrap = otp */
246 -
247 -0: addl $OT_JMPBUF, %eax /* %eax = &ot_jmpbuf */
248 - movl %eax, 4(%esp) /* put %eax back on the stack */
249 - jmp setjmp /* let setjmp do the rest */
250 - SET_SIZE(on_trap)
251 -
252 -#endif /* __i386 */
253 -#endif /* __lint */
254 -
255 138 /*
256 139 * Setjmp and longjmp implement non-local gotos using state vectors
257 140 * type label_t.
258 141 */
259 142
260 -#if defined(__lint)
261 -
262 -/* ARGSUSED */
263 -int
264 -setjmp(label_t *lp)
265 -{ return (0); }
266 -
267 -/* ARGSUSED */
268 -void
269 -longjmp(label_t *lp)
270 -{}
271 -
272 -#else /* __lint */
273 -
274 143 #if LABEL_PC != 0
275 144 #error LABEL_PC MUST be defined as 0 for setjmp/longjmp to work as coded
276 145 #endif /* LABEL_PC != 0 */
277 146
278 -#if defined(__amd64)
279 -
280 147 ENTRY(setjmp)
281 148 movq %rsp, LABEL_SP(%rdi)
282 149 movq %rbp, LABEL_RBP(%rdi)
283 150 movq %rbx, LABEL_RBX(%rdi)
284 151 movq %r12, LABEL_R12(%rdi)
285 152 movq %r13, LABEL_R13(%rdi)
286 153 movq %r14, LABEL_R14(%rdi)
287 154 movq %r15, LABEL_R15(%rdi)
288 155 movq (%rsp), %rdx /* return address */
289 156 movq %rdx, (%rdi) /* LABEL_PC is 0 */
290 157 xorl %eax, %eax /* return 0 */
291 158 ret
292 159 SET_SIZE(setjmp)
293 160
294 161 ENTRY(longjmp)
295 162 movq LABEL_SP(%rdi), %rsp
296 163 movq LABEL_RBP(%rdi), %rbp
297 164 movq LABEL_RBX(%rdi), %rbx
298 165 movq LABEL_R12(%rdi), %r12
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
299 166 movq LABEL_R13(%rdi), %r13
300 167 movq LABEL_R14(%rdi), %r14
301 168 movq LABEL_R15(%rdi), %r15
302 169 movq (%rdi), %rdx /* return address; LABEL_PC is 0 */
303 170 movq %rdx, (%rsp)
304 171 xorl %eax, %eax
305 172 incl %eax /* return 1 */
306 173 ret
307 174 SET_SIZE(longjmp)
308 175
309 -#elif defined(__i386)
310 -
311 - ENTRY(setjmp)
312 - movl 4(%esp), %edx /* address of save area */
313 - movl %ebp, LABEL_EBP(%edx)
314 - movl %ebx, LABEL_EBX(%edx)
315 - movl %esi, LABEL_ESI(%edx)
316 - movl %edi, LABEL_EDI(%edx)
317 - movl %esp, 4(%edx)
318 - movl (%esp), %ecx /* %eip (return address) */
319 - movl %ecx, (%edx) /* LABEL_PC is 0 */
320 - subl %eax, %eax /* return 0 */
321 - ret
322 - SET_SIZE(setjmp)
323 -
324 - ENTRY(longjmp)
325 - movl 4(%esp), %edx /* address of save area */
326 - movl LABEL_EBP(%edx), %ebp
327 - movl LABEL_EBX(%edx), %ebx
328 - movl LABEL_ESI(%edx), %esi
329 - movl LABEL_EDI(%edx), %edi
330 - movl 4(%edx), %esp
331 - movl (%edx), %ecx /* %eip (return addr); LABEL_PC is 0 */
332 - movl $1, %eax
333 - addl $4, %esp /* pop ret adr */
334 - jmp *%ecx /* indirect */
335 - SET_SIZE(longjmp)
336 -
337 -#endif /* __i386 */
338 -#endif /* __lint */
339 -
340 176 /*
341 177 * if a() calls b() calls caller(),
342 178 * caller() returns return address in a().
343 179 * (Note: We assume a() and b() are C routines which do the normal entry/exit
344 180 * sequence.)
345 181 */
346 182
347 -#if defined(__lint)
348 -
349 -caddr_t
350 -caller(void)
351 -{ return (0); }
352 -
353 -#else /* __lint */
354 -
355 -#if defined(__amd64)
356 -
357 183 ENTRY(caller)
358 184 movq 8(%rbp), %rax /* b()'s return pc, in a() */
359 185 ret
360 186 SET_SIZE(caller)
361 187
362 -#elif defined(__i386)
363 -
364 - ENTRY(caller)
365 - movl 4(%ebp), %eax /* b()'s return pc, in a() */
366 - ret
367 - SET_SIZE(caller)
368 -
369 -#endif /* __i386 */
370 -#endif /* __lint */
371 -
372 188 /*
373 189 * if a() calls callee(), callee() returns the
374 190 * return address in a();
375 191 */
376 192
377 -#if defined(__lint)
378 -
379 -caddr_t
380 -callee(void)
381 -{ return (0); }
382 -
383 -#else /* __lint */
384 -
385 -#if defined(__amd64)
386 -
387 193 ENTRY(callee)
388 194 movq (%rsp), %rax /* callee()'s return pc, in a() */
389 195 ret
390 196 SET_SIZE(callee)
391 197
392 -#elif defined(__i386)
393 -
394 - ENTRY(callee)
395 - movl (%esp), %eax /* callee()'s return pc, in a() */
396 - ret
397 - SET_SIZE(callee)
398 -
399 -#endif /* __i386 */
400 -#endif /* __lint */
401 -
402 198 /*
403 199 * return the current frame pointer
404 200 */
405 201
406 -#if defined(__lint)
407 -
408 -greg_t
409 -getfp(void)
410 -{ return (0); }
411 -
412 -#else /* __lint */
413 -
414 -#if defined(__amd64)
415 -
416 202 ENTRY(getfp)
417 203 movq %rbp, %rax
418 204 ret
419 205 SET_SIZE(getfp)
420 206
421 -#elif defined(__i386)
422 -
423 - ENTRY(getfp)
424 - movl %ebp, %eax
425 - ret
426 - SET_SIZE(getfp)
427 -
428 -#endif /* __i386 */
429 -#endif /* __lint */
430 -
431 207 /*
432 208 * Invalidate a single page table entry in the TLB
433 209 */
434 210
435 -#if defined(__lint)
436 -
437 -/* ARGSUSED */
438 -void
439 -mmu_invlpg(caddr_t m)
440 -{}
441 -
442 -#else /* __lint */
443 -
444 211 ENTRY(mmu_invlpg)
445 212 invlpg (%rdi)
446 213 ret
447 214 SET_SIZE(mmu_invlpg)
448 215
449 -#endif /* __lint */
450 216
451 -
452 217 /*
453 218 * Get/Set the value of various control registers
454 219 */
455 220
456 -#if defined(__lint)
457 -
458 -ulong_t
459 -getcr0(void)
460 -{ return (0); }
461 -
462 -/* ARGSUSED */
463 -void
464 -setcr0(ulong_t value)
465 -{}
466 -
467 -ulong_t
468 -getcr2(void)
469 -{ return (0); }
470 -
471 -ulong_t
472 -getcr3(void)
473 -{ return (0); }
474 -
475 -#if !defined(__xpv)
476 -/* ARGSUSED */
477 -void
478 -setcr3(ulong_t val)
479 -{}
480 -
481 -void
482 -reload_cr3(void)
483 -{}
484 -#endif
485 -
486 -ulong_t
487 -getcr4(void)
488 -{ return (0); }
489 -
490 -/* ARGSUSED */
491 -void
492 -setcr4(ulong_t val)
493 -{}
494 -
495 -#if defined(__amd64)
496 -
497 -ulong_t
498 -getcr8(void)
499 -{ return (0); }
500 -
501 -/* ARGSUSED */
502 -void
503 -setcr8(ulong_t val)
504 -{}
505 -
506 -#endif /* __amd64 */
507 -
508 -#else /* __lint */
509 -
510 -#if defined(__amd64)
511 -
512 221 ENTRY(getcr0)
513 222 movq %cr0, %rax
514 223 ret
515 224 SET_SIZE(getcr0)
516 225
517 226 ENTRY(setcr0)
518 227 movq %rdi, %cr0
519 228 ret
520 229 SET_SIZE(setcr0)
521 230
522 231 ENTRY(getcr2)
523 232 #if defined(__xpv)
524 233 movq %gs:CPU_VCPU_INFO, %rax
525 234 movq VCPU_INFO_ARCH_CR2(%rax), %rax
526 235 #else
527 236 movq %cr2, %rax
528 237 #endif
529 238 ret
530 239 SET_SIZE(getcr2)
531 240
532 241 ENTRY(getcr3)
533 242 movq %cr3, %rax
534 243 ret
535 244 SET_SIZE(getcr3)
536 245
537 246 #if !defined(__xpv)
538 247
539 248 ENTRY(setcr3)
540 249 movq %rdi, %cr3
541 250 ret
542 251 SET_SIZE(setcr3)
543 252
544 253 ENTRY(reload_cr3)
545 254 movq %cr3, %rdi
546 255 movq %rdi, %cr3
547 256 ret
548 257 SET_SIZE(reload_cr3)
549 258
550 259 #endif /* __xpv */
551 260
552 261 ENTRY(getcr4)
553 262 movq %cr4, %rax
554 263 ret
555 264 SET_SIZE(getcr4)
556 265
557 266 ENTRY(setcr4)
558 267 movq %rdi, %cr4
559 268 ret
560 269 SET_SIZE(setcr4)
561 270
↓ open down ↓ |
40 lines elided |
↑ open up ↑ |
562 271 ENTRY(getcr8)
563 272 movq %cr8, %rax
564 273 ret
565 274 SET_SIZE(getcr8)
566 275
567 276 ENTRY(setcr8)
568 277 movq %rdi, %cr8
569 278 ret
570 279 SET_SIZE(setcr8)
571 280
572 -#elif defined(__i386)
573 -
574 - ENTRY(getcr0)
575 - movl %cr0, %eax
576 - ret
577 - SET_SIZE(getcr0)
578 -
579 - ENTRY(setcr0)
580 - movl 4(%esp), %eax
581 - movl %eax, %cr0
582 - ret
583 - SET_SIZE(setcr0)
584 -
585 - /*
586 - * "lock mov %cr0" is used on processors which indicate it is
587 - * supported via CPUID. Normally the 32 bit TPR is accessed via
588 - * the local APIC.
589 - */
590 - ENTRY(getcr8)
591 - lock
592 - movl %cr0, %eax
593 - ret
594 - SET_SIZE(getcr8)
595 -
596 - ENTRY(setcr8)
597 - movl 4(%esp), %eax
598 - lock
599 - movl %eax, %cr0
600 - ret
601 - SET_SIZE(setcr8)
602 -
603 - ENTRY(getcr2)
604 -#if defined(__xpv)
605 - movl %gs:CPU_VCPU_INFO, %eax
606 - movl VCPU_INFO_ARCH_CR2(%eax), %eax
607 -#else
608 - movl %cr2, %eax
609 -#endif
610 - ret
611 - SET_SIZE(getcr2)
612 -
613 - ENTRY(getcr3)
614 - movl %cr3, %eax
615 - ret
616 - SET_SIZE(getcr3)
617 -
618 -#if !defined(__xpv)
619 -
620 - ENTRY(setcr3)
621 - movl 4(%esp), %eax
622 - movl %eax, %cr3
623 - ret
624 - SET_SIZE(setcr3)
625 -
626 - ENTRY(reload_cr3)
627 - movl %cr3, %eax
628 - movl %eax, %cr3
629 - ret
630 - SET_SIZE(reload_cr3)
631 -
632 -#endif /* __xpv */
633 -
634 - ENTRY(getcr4)
635 - movl %cr4, %eax
636 - ret
637 - SET_SIZE(getcr4)
638 -
639 - ENTRY(setcr4)
640 - movl 4(%esp), %eax
641 - movl %eax, %cr4
642 - ret
643 - SET_SIZE(setcr4)
644 -
645 -#endif /* __i386 */
646 -#endif /* __lint */
647 -
648 -#if defined(__lint)
649 -
650 -/*ARGSUSED*/
651 -uint32_t
652 -__cpuid_insn(struct cpuid_regs *regs)
653 -{ return (0); }
654 -
655 -#else /* __lint */
656 -
657 -#if defined(__amd64)
658 -
659 281 ENTRY(__cpuid_insn)
660 282 movq %rbx, %r8
661 283 movq %rcx, %r9
662 284 movq %rdx, %r11
663 285 movl (%rdi), %eax /* %eax = regs->cp_eax */
664 286 movl 0x4(%rdi), %ebx /* %ebx = regs->cp_ebx */
665 287 movl 0x8(%rdi), %ecx /* %ecx = regs->cp_ecx */
666 288 movl 0xc(%rdi), %edx /* %edx = regs->cp_edx */
667 289 cpuid
668 290 movl %eax, (%rdi) /* regs->cp_eax = %eax */
669 291 movl %ebx, 0x4(%rdi) /* regs->cp_ebx = %ebx */
670 292 movl %ecx, 0x8(%rdi) /* regs->cp_ecx = %ecx */
671 293 movl %edx, 0xc(%rdi) /* regs->cp_edx = %edx */
672 294 movq %r8, %rbx
673 295 movq %r9, %rcx
674 296 movq %r11, %rdx
675 297 ret
676 298 SET_SIZE(__cpuid_insn)
677 299
678 -#elif defined(__i386)
679 -
680 - ENTRY(__cpuid_insn)
681 - pushl %ebp
682 - movl 0x8(%esp), %ebp /* %ebp = regs */
683 - pushl %ebx
684 - pushl %ecx
685 - pushl %edx
686 - movl (%ebp), %eax /* %eax = regs->cp_eax */
687 - movl 0x4(%ebp), %ebx /* %ebx = regs->cp_ebx */
688 - movl 0x8(%ebp), %ecx /* %ecx = regs->cp_ecx */
689 - movl 0xc(%ebp), %edx /* %edx = regs->cp_edx */
690 - cpuid
691 - movl %eax, (%ebp) /* regs->cp_eax = %eax */
692 - movl %ebx, 0x4(%ebp) /* regs->cp_ebx = %ebx */
693 - movl %ecx, 0x8(%ebp) /* regs->cp_ecx = %ecx */
694 - movl %edx, 0xc(%ebp) /* regs->cp_edx = %edx */
695 - popl %edx
696 - popl %ecx
697 - popl %ebx
698 - popl %ebp
699 - ret
700 - SET_SIZE(__cpuid_insn)
701 -
702 -#endif /* __i386 */
703 -#endif /* __lint */
704 -
705 -#if defined(__lint)
706 -
707 -/*ARGSUSED*/
708 -void
709 -i86_monitor(volatile uint32_t *addr, uint32_t extensions, uint32_t hints)
710 -{}
711 -
712 -#else /* __lint */
713 -
714 -#if defined(__amd64)
715 -
716 300 ENTRY_NP(i86_monitor)
717 301 pushq %rbp
718 302 movq %rsp, %rbp
719 303 movq %rdi, %rax /* addr */
720 304 movq %rsi, %rcx /* extensions */
721 305 /* rdx contains input arg3: hints */
722 306 clflush (%rax)
723 307 .byte 0x0f, 0x01, 0xc8 /* monitor */
724 308 leave
725 309 ret
726 310 SET_SIZE(i86_monitor)
727 311
728 -#elif defined(__i386)
729 -
730 -ENTRY_NP(i86_monitor)
731 - pushl %ebp
732 - movl %esp, %ebp
733 - movl 0x8(%ebp),%eax /* addr */
734 - movl 0xc(%ebp),%ecx /* extensions */
735 - movl 0x10(%ebp),%edx /* hints */
736 - clflush (%eax)
737 - .byte 0x0f, 0x01, 0xc8 /* monitor */
738 - leave
739 - ret
740 - SET_SIZE(i86_monitor)
741 -
742 -#endif /* __i386 */
743 -#endif /* __lint */
744 -
745 -#if defined(__lint)
746 -
747 -/*ARGSUSED*/
748 -void
749 -i86_mwait(uint32_t data, uint32_t extensions)
750 -{}
751 -
752 -#else /* __lint */
753 -
754 -#if defined(__amd64)
755 -
756 312 ENTRY_NP(i86_mwait)
757 313 pushq %rbp
758 314 call x86_md_clear
759 315 movq %rsp, %rbp
760 316 movq %rdi, %rax /* data */
761 317 movq %rsi, %rcx /* extensions */
762 318 .byte 0x0f, 0x01, 0xc9 /* mwait */
763 319 leave
764 320 ret
765 321 SET_SIZE(i86_mwait)
766 322
767 -#elif defined(__i386)
768 -
769 - ENTRY_NP(i86_mwait)
770 - pushl %ebp
771 - movl %esp, %ebp
772 - movl 0x8(%ebp),%eax /* data */
773 - movl 0xc(%ebp),%ecx /* extensions */
774 - .byte 0x0f, 0x01, 0xc9 /* mwait */
775 - leave
776 - ret
777 - SET_SIZE(i86_mwait)
778 -
779 -#endif /* __i386 */
780 -#endif /* __lint */
781 -
782 323 #if defined(__xpv)
783 324 /*
784 325 * Defined in C
785 326 */
786 327 #else
787 328
788 -#if defined(__lint)
789 -
790 -hrtime_t
791 -tsc_read(void)
792 -{
793 - return (0);
794 -}
795 -
796 -#else /* __lint */
797 -
798 -#if defined(__amd64)
799 -
800 329 ENTRY_NP(tsc_read)
801 330 movq %rbx, %r11
802 331 movl $0, %eax
803 332 cpuid
804 333 rdtsc
805 334 movq %r11, %rbx
806 335 shlq $32, %rdx
807 336 orq %rdx, %rax
808 337 ret
809 338 .globl _tsc_mfence_start
810 339 _tsc_mfence_start:
811 340 mfence
812 341 rdtsc
813 342 shlq $32, %rdx
814 343 orq %rdx, %rax
815 344 ret
816 345 .globl _tsc_mfence_end
817 346 _tsc_mfence_end:
818 347 .globl _tscp_start
819 348 _tscp_start:
820 349 .byte 0x0f, 0x01, 0xf9 /* rdtscp instruction */
821 350 shlq $32, %rdx
822 351 orq %rdx, %rax
823 352 ret
824 353 .globl _tscp_end
825 354 _tscp_end:
826 355 .globl _no_rdtsc_start
827 356 _no_rdtsc_start:
828 357 xorl %edx, %edx
829 358 xorl %eax, %eax
830 359 ret
831 360 .globl _no_rdtsc_end
832 361 _no_rdtsc_end:
833 362 .globl _tsc_lfence_start
↓ open down ↓ |
24 lines elided |
↑ open up ↑ |
834 363 _tsc_lfence_start:
835 364 lfence
836 365 rdtsc
837 366 shlq $32, %rdx
838 367 orq %rdx, %rax
839 368 ret
840 369 .globl _tsc_lfence_end
841 370 _tsc_lfence_end:
842 371 SET_SIZE(tsc_read)
843 372
844 -#else /* __i386 */
845 373
846 - ENTRY_NP(tsc_read)
847 - pushl %ebx
848 - movl $0, %eax
849 - cpuid
850 - rdtsc
851 - popl %ebx
852 - ret
853 - .globl _tsc_mfence_start
854 -_tsc_mfence_start:
855 - mfence
856 - rdtsc
857 - ret
858 - .globl _tsc_mfence_end
859 -_tsc_mfence_end:
860 - .globl _tscp_start
861 -_tscp_start:
862 - .byte 0x0f, 0x01, 0xf9 /* rdtscp instruction */
863 - ret
864 - .globl _tscp_end
865 -_tscp_end:
866 - .globl _no_rdtsc_start
867 -_no_rdtsc_start:
868 - xorl %edx, %edx
869 - xorl %eax, %eax
870 - ret
871 - .globl _no_rdtsc_end
872 -_no_rdtsc_end:
873 - .globl _tsc_lfence_start
874 -_tsc_lfence_start:
875 - lfence
876 - rdtsc
877 - ret
878 - .globl _tsc_lfence_end
879 -_tsc_lfence_end:
880 - SET_SIZE(tsc_read)
881 -
882 -#endif /* __i386 */
883 -
884 -#endif /* __lint */
885 -
886 -
887 374 #endif /* __xpv */
888 375
889 -#ifdef __lint
890 -/*
891 - * Do not use this function for obtaining clock tick. This
892 - * is called by callers who do not need to have a guarenteed
893 - * correct tick value. The proper routine to use is tsc_read().
894 - */
895 -u_longlong_t
896 -randtick(void)
897 -{
898 - return (0);
899 -}
900 -#else
901 -#if defined(__amd64)
902 376 ENTRY_NP(randtick)
903 377 rdtsc
904 378 shlq $32, %rdx
905 379 orq %rdx, %rax
906 380 ret
907 381 SET_SIZE(randtick)
908 -#else
909 - ENTRY_NP(randtick)
910 - rdtsc
911 - ret
912 - SET_SIZE(randtick)
913 -#endif /* __i386 */
914 -#endif /* __lint */
915 382 /*
916 383 * Insert entryp after predp in a doubly linked list.
917 384 */
918 385
919 -#if defined(__lint)
920 -
921 -/*ARGSUSED*/
922 -void
923 -_insque(caddr_t entryp, caddr_t predp)
924 -{}
925 -
926 -#else /* __lint */
927 -
928 -#if defined(__amd64)
929 -
930 386 ENTRY(_insque)
931 387 movq (%rsi), %rax /* predp->forw */
932 388 movq %rsi, CPTRSIZE(%rdi) /* entryp->back = predp */
933 389 movq %rax, (%rdi) /* entryp->forw = predp->forw */
934 390 movq %rdi, (%rsi) /* predp->forw = entryp */
935 391 movq %rdi, CPTRSIZE(%rax) /* predp->forw->back = entryp */
936 392 ret
937 393 SET_SIZE(_insque)
938 394
939 -#elif defined(__i386)
940 -
941 - ENTRY(_insque)
942 - movl 8(%esp), %edx
943 - movl 4(%esp), %ecx
944 - movl (%edx), %eax /* predp->forw */
945 - movl %edx, CPTRSIZE(%ecx) /* entryp->back = predp */
946 - movl %eax, (%ecx) /* entryp->forw = predp->forw */
947 - movl %ecx, (%edx) /* predp->forw = entryp */
948 - movl %ecx, CPTRSIZE(%eax) /* predp->forw->back = entryp */
949 - ret
950 - SET_SIZE(_insque)
951 -
952 -#endif /* __i386 */
953 -#endif /* __lint */
954 -
955 395 /*
956 396 * Remove entryp from a doubly linked list
957 397 */
958 398
959 -#if defined(__lint)
960 -
961 -/*ARGSUSED*/
962 -void
963 -_remque(caddr_t entryp)
964 -{}
965 -
966 -#else /* __lint */
967 -
968 -#if defined(__amd64)
969 -
970 399 ENTRY(_remque)
971 400 movq (%rdi), %rax /* entry->forw */
972 401 movq CPTRSIZE(%rdi), %rdx /* entry->back */
973 402 movq %rax, (%rdx) /* entry->back->forw = entry->forw */
974 403 movq %rdx, CPTRSIZE(%rax) /* entry->forw->back = entry->back */
975 404 ret
976 405 SET_SIZE(_remque)
977 406
978 -#elif defined(__i386)
979 -
980 - ENTRY(_remque)
981 - movl 4(%esp), %ecx
982 - movl (%ecx), %eax /* entry->forw */
983 - movl CPTRSIZE(%ecx), %edx /* entry->back */
984 - movl %eax, (%edx) /* entry->back->forw = entry->forw */
985 - movl %edx, CPTRSIZE(%eax) /* entry->forw->back = entry->back */
986 - ret
987 - SET_SIZE(_remque)
988 -
989 -#endif /* __i386 */
990 -#endif /* __lint */
991 -
992 407 /*
993 408 * Returns the number of
994 409 * non-NULL bytes in string argument.
995 410 */
996 411
997 -#if defined(__lint)
998 -
999 -/* ARGSUSED */
1000 -size_t
1001 -strlen(const char *str)
1002 -{ return (0); }
1003 -
1004 -#else /* __lint */
1005 -
1006 -#if defined(__amd64)
1007 -
1008 412 /*
1009 413 * This is close to a simple transliteration of a C version of this
1010 414 * routine. We should either just -make- this be a C version, or
1011 415 * justify having it in assembler by making it significantly faster.
1012 416 *
1013 417 * size_t
1014 418 * strlen(const char *s)
1015 419 * {
1016 420 * const char *s0;
1017 421 * #if defined(DEBUG)
1018 422 * if ((uintptr_t)s < KERNELBASE)
1019 423 * panic(.str_panic_msg);
1020 424 * #endif
1021 425 * for (s0 = s; *s; s++)
1022 426 * ;
1023 427 * return (s - s0);
1024 428 * }
1025 429 */
1026 430
1027 431 ENTRY(strlen)
1028 432 #ifdef DEBUG
1029 433 movq postbootkernelbase(%rip), %rax
1030 434 cmpq %rax, %rdi
1031 435 jae str_valid
1032 436 pushq %rbp
1033 437 movq %rsp, %rbp
1034 438 leaq .str_panic_msg(%rip), %rdi
1035 439 xorl %eax, %eax
1036 440 call panic
1037 441 #endif /* DEBUG */
1038 442 str_valid:
1039 443 cmpb $0, (%rdi)
1040 444 movq %rdi, %rax
1041 445 je .null_found
1042 446 .align 4
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
1043 447 .strlen_loop:
1044 448 incq %rdi
1045 449 cmpb $0, (%rdi)
1046 450 jne .strlen_loop
1047 451 .null_found:
1048 452 subq %rax, %rdi
1049 453 movq %rdi, %rax
1050 454 ret
1051 455 SET_SIZE(strlen)
1052 456
1053 -#elif defined(__i386)
1054 -
1055 - ENTRY(strlen)
1056 457 #ifdef DEBUG
1057 - movl postbootkernelbase, %eax
1058 - cmpl %eax, 4(%esp)
1059 - jae str_valid
1060 - pushl %ebp
1061 - movl %esp, %ebp
1062 - pushl $.str_panic_msg
1063 - call panic
1064 -#endif /* DEBUG */
1065 -
1066 -str_valid:
1067 - movl 4(%esp), %eax /* %eax = string address */
1068 - testl $3, %eax /* if %eax not word aligned */
1069 - jnz .not_word_aligned /* goto .not_word_aligned */
1070 - .align 4
1071 -.word_aligned:
1072 - movl (%eax), %edx /* move 1 word from (%eax) to %edx */
1073 - movl $0x7f7f7f7f, %ecx
1074 - andl %edx, %ecx /* %ecx = %edx & 0x7f7f7f7f */
1075 - addl $4, %eax /* next word */
1076 - addl $0x7f7f7f7f, %ecx /* %ecx += 0x7f7f7f7f */
1077 - orl %edx, %ecx /* %ecx |= %edx */
1078 - andl $0x80808080, %ecx /* %ecx &= 0x80808080 */
1079 - cmpl $0x80808080, %ecx /* if no null byte in this word */
1080 - je .word_aligned /* goto .word_aligned */
1081 - subl $4, %eax /* post-incremented */
1082 -.not_word_aligned:
1083 - cmpb $0, (%eax) /* if a byte in (%eax) is null */
1084 - je .null_found /* goto .null_found */
1085 - incl %eax /* next byte */
1086 - testl $3, %eax /* if %eax not word aligned */
1087 - jnz .not_word_aligned /* goto .not_word_aligned */
1088 - jmp .word_aligned /* goto .word_aligned */
1089 - .align 4
1090 -.null_found:
1091 - subl 4(%esp), %eax /* %eax -= string address */
1092 - ret
1093 - SET_SIZE(strlen)
1094 -
1095 -#endif /* __i386 */
1096 -
1097 -#ifdef DEBUG
1098 458 .text
1099 459 .str_panic_msg:
1100 460 .string "strlen: argument below kernelbase"
1101 461 #endif /* DEBUG */
1102 462
1103 -#endif /* __lint */
1104 -
1105 463 /*
1106 464 * Berkeley 4.3 introduced symbolically named interrupt levels
1107 465 * as a way deal with priority in a machine independent fashion.
1108 466 * Numbered priorities are machine specific, and should be
1109 467 * discouraged where possible.
1110 468 *
1111 469 * Note, for the machine specific priorities there are
1112 470 * examples listed for devices that use a particular priority.
1113 471 * It should not be construed that all devices of that
1114 472 * type should be at that priority. It is currently were
1115 473 * the current devices fit into the priority scheme based
1116 474 * upon time criticalness.
1117 475 *
1118 476 * The underlying assumption of these assignments is that
1119 477 * IPL 10 is the highest level from which a device
1120 478 * routine can call wakeup. Devices that interrupt from higher
1121 479 * levels are restricted in what they can do. If they need
1122 480 * kernels services they should schedule a routine at a lower
1123 481 * level (via software interrupt) to do the required
1124 482 * processing.
1125 483 *
1126 484 * Examples of this higher usage:
1127 485 * Level Usage
1128 486 * 14 Profiling clock (and PROM uart polling clock)
1129 487 * 12 Serial ports
1130 488 *
1131 489 * The serial ports request lower level processing on level 6.
1132 490 *
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
1133 491 * Also, almost all splN routines (where N is a number or a
1134 492 * mnemonic) will do a RAISE(), on the assumption that they are
1135 493 * never used to lower our priority.
1136 494 * The exceptions are:
1137 495 * spl8() Because you can't be above 15 to begin with!
1138 496 * splzs() Because this is used at boot time to lower our
1139 497 * priority, to allow the PROM to poll the uart.
1140 498 * spl0() Used to lower priority to 0.
1141 499 */
1142 500
1143 -#if defined(__lint)
1144 -
1145 -int spl0(void) { return (0); }
1146 -int spl6(void) { return (0); }
1147 -int spl7(void) { return (0); }
1148 -int spl8(void) { return (0); }
1149 -int splhigh(void) { return (0); }
1150 -int splhi(void) { return (0); }
1151 -int splzs(void) { return (0); }
1152 -
1153 -/* ARGSUSED */
1154 -void
1155 -splx(int level)
1156 -{}
1157 -
1158 -#else /* __lint */
1159 -
1160 -#if defined(__amd64)
1161 -
1162 501 #define SETPRI(level) \
1163 502 movl $/**/level, %edi; /* new priority */ \
1164 503 jmp do_splx /* redirect to do_splx */
1165 504
1166 505 #define RAISE(level) \
1167 506 movl $/**/level, %edi; /* new priority */ \
1168 507 jmp splr /* redirect to splr */
1169 508
1170 -#elif defined(__i386)
1171 -
1172 -#define SETPRI(level) \
1173 - pushl $/**/level; /* new priority */ \
1174 - call do_splx; /* invoke common splx code */ \
1175 - addl $4, %esp; /* unstack arg */ \
1176 - ret
1177 -
1178 -#define RAISE(level) \
1179 - pushl $/**/level; /* new priority */ \
1180 - call splr; /* invoke common splr code */ \
1181 - addl $4, %esp; /* unstack args */ \
1182 - ret
1183 -
1184 -#endif /* __i386 */
1185 -
1186 509 /* locks out all interrupts, including memory errors */
1187 510 ENTRY(spl8)
1188 511 SETPRI(15)
1189 512 SET_SIZE(spl8)
1190 513
1191 514 /* just below the level that profiling runs */
1192 515 ENTRY(spl7)
1193 516 RAISE(13)
1194 517 SET_SIZE(spl7)
1195 518
1196 519 /* sun specific - highest priority onboard serial i/o asy ports */
1197 520 ENTRY(splzs)
1198 521 SETPRI(12) /* Can't be a RAISE, as it's used to lower us */
1199 522 SET_SIZE(splzs)
1200 523
1201 524 ENTRY(splhi)
1202 525 ALTENTRY(splhigh)
1203 526 ALTENTRY(spl6)
1204 527 ALTENTRY(i_ddi_splhigh)
1205 528
1206 529 RAISE(DISP_LEVEL)
1207 530
1208 531 SET_SIZE(i_ddi_splhigh)
1209 532 SET_SIZE(spl6)
1210 533 SET_SIZE(splhigh)
1211 534 SET_SIZE(splhi)
1212 535
1213 536 /* allow all interrupts */
↓ open down ↓ |
18 lines elided |
↑ open up ↑ |
1214 537 ENTRY(spl0)
1215 538 SETPRI(0)
1216 539 SET_SIZE(spl0)
1217 540
1218 541
1219 542 /* splx implementation */
1220 543 ENTRY(splx)
1221 544 jmp do_splx /* redirect to common splx code */
1222 545 SET_SIZE(splx)
1223 546
1224 -#endif /* __lint */
1225 -
1226 -#if defined(__i386)
1227 -
1228 -/*
1229 - * Read and write the %gs register
1230 - */
1231 -
1232 -#if defined(__lint)
1233 -
1234 -/*ARGSUSED*/
1235 -uint16_t
1236 -getgs(void)
1237 -{ return (0); }
1238 -
1239 -/*ARGSUSED*/
1240 -void
1241 -setgs(uint16_t sel)
1242 -{}
1243 -
1244 -#else /* __lint */
1245 -
1246 - ENTRY(getgs)
1247 - clr %eax
1248 - movw %gs, %ax
1249 - ret
1250 - SET_SIZE(getgs)
1251 -
1252 - ENTRY(setgs)
1253 - movw 4(%esp), %gs
1254 - ret
1255 - SET_SIZE(setgs)
1256 -
1257 -#endif /* __lint */
1258 -#endif /* __i386 */
1259 -
1260 -#if defined(__lint)
1261 -
1262 -void
1263 -pc_reset(void)
1264 -{}
1265 -
1266 -void
1267 -efi_reset(void)
1268 -{}
1269 -
1270 -#else /* __lint */
1271 -
1272 547 ENTRY(wait_500ms)
1273 -#if defined(__amd64)
1274 548 pushq %rbx
1275 -#elif defined(__i386)
1276 - push %ebx
1277 -#endif
1278 549 movl $50000, %ebx
1279 550 1:
1280 551 call tenmicrosec
1281 552 decl %ebx
1282 553 jnz 1b
1283 -#if defined(__amd64)
1284 554 popq %rbx
1285 -#elif defined(__i386)
1286 - pop %ebx
1287 -#endif
1288 555 ret
1289 556 SET_SIZE(wait_500ms)
1290 557
1291 558 #define RESET_METHOD_KBC 1
1292 559 #define RESET_METHOD_PORT92 2
1293 560 #define RESET_METHOD_PCI 4
1294 561
1295 562 DGDEF3(pc_reset_methods, 4, 8)
1296 563 .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1297 564
1298 565 ENTRY(pc_reset)
1299 566
1300 -#if defined(__i386)
1301 - testl $RESET_METHOD_KBC, pc_reset_methods
1302 -#elif defined(__amd64)
1303 567 testl $RESET_METHOD_KBC, pc_reset_methods(%rip)
1304 -#endif
1305 568 jz 1f
1306 569
1307 570 /
1308 571 / Try the classic keyboard controller-triggered reset.
1309 572 /
1310 573 movw $0x64, %dx
1311 574 movb $0xfe, %al
1312 575 outb (%dx)
1313 576
1314 577 / Wait up to 500 milliseconds here for the keyboard controller
1315 578 / to pull the reset line. On some systems where the keyboard
1316 579 / controller is slow to pull the reset line, the next reset method
1317 580 / may be executed (which may be bad if those systems hang when the
1318 581 / next reset method is used, e.g. Ferrari 3400 (doesn't like port 92),
1319 582 / and Ferrari 4000 (doesn't like the cf9 reset method))
1320 583
1321 584 call wait_500ms
1322 585
1323 586 1:
1324 -#if defined(__i386)
1325 - testl $RESET_METHOD_PORT92, pc_reset_methods
1326 -#elif defined(__amd64)
1327 587 testl $RESET_METHOD_PORT92, pc_reset_methods(%rip)
1328 -#endif
1329 588 jz 3f
1330 589
1331 590 /
1332 591 / Try port 0x92 fast reset
1333 592 /
1334 593 movw $0x92, %dx
1335 594 inb (%dx)
1336 595 cmpb $0xff, %al / If port's not there, we should get back 0xFF
1337 596 je 1f
1338 597 testb $1, %al / If bit 0
1339 598 jz 2f / is clear, jump to perform the reset
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1340 599 andb $0xfe, %al / otherwise,
1341 600 outb (%dx) / clear bit 0 first, then
1342 601 2:
1343 602 orb $1, %al / Set bit 0
1344 603 outb (%dx) / and reset the system
1345 604 1:
1346 605
1347 606 call wait_500ms
1348 607
1349 608 3:
1350 -#if defined(__i386)
1351 - testl $RESET_METHOD_PCI, pc_reset_methods
1352 -#elif defined(__amd64)
1353 609 testl $RESET_METHOD_PCI, pc_reset_methods(%rip)
1354 -#endif
1355 610 jz 4f
1356 611
1357 612 / Try the PCI (soft) reset vector (should work on all modern systems,
1358 613 / but has been shown to cause problems on 450NX systems, and some newer
1359 614 / systems (e.g. ATI IXP400-equipped systems))
1360 615 / When resetting via this method, 2 writes are required. The first
1361 616 / targets bit 1 (0=hard reset without power cycle, 1=hard reset with
1362 617 / power cycle).
1363 - / The reset occurs on the second write, during bit 2's transition from
618 + / The reset occurs on the second write, during bit 2 's transition from
1364 619 / 0->1.
1365 620 movw $0xcf9, %dx
1366 621 movb $0x2, %al / Reset mode = hard, no power cycle
1367 622 outb (%dx)
1368 623 movb $0x6, %al
1369 624 outb (%dx)
1370 625
1371 626 call wait_500ms
1372 627
1373 628 4:
1374 629 /
1375 630 / port 0xcf9 failed also. Last-ditch effort is to
1376 631 / triple-fault the CPU.
1377 632 / Also, use triple fault for EFI firmware
1378 633 /
1379 634 ENTRY(efi_reset)
1380 -#if defined(__amd64)
1381 635 pushq $0x0
1382 636 pushq $0x0 / IDT base of 0, limit of 0 + 2 unused bytes
1383 637 lidt (%rsp)
1384 -#elif defined(__i386)
1385 - pushl $0x0
1386 - pushl $0x0 / IDT base of 0, limit of 0 + 2 unused bytes
1387 - lidt (%esp)
1388 -#endif
1389 638 int $0x0 / Trigger interrupt, generate triple-fault
1390 639
1391 640 cli
1392 641 hlt / Wait forever
1393 642 /*NOTREACHED*/
1394 643 SET_SIZE(efi_reset)
1395 644 SET_SIZE(pc_reset)
1396 645
1397 -#endif /* __lint */
1398 -
1399 646 /*
1400 647 * C callable in and out routines
1401 648 */
1402 649
1403 -#if defined(__lint)
1404 -
1405 -/* ARGSUSED */
1406 -void
1407 -outl(int port_address, uint32_t val)
1408 -{}
1409 -
1410 -#else /* __lint */
1411 -
1412 -#if defined(__amd64)
1413 -
1414 650 ENTRY(outl)
1415 651 movw %di, %dx
1416 652 movl %esi, %eax
1417 653 outl (%dx)
1418 654 ret
1419 655 SET_SIZE(outl)
1420 656
1421 -#elif defined(__i386)
1422 -
1423 - .set PORT, 4
1424 - .set VAL, 8
1425 -
1426 - ENTRY(outl)
1427 - movw PORT(%esp), %dx
1428 - movl VAL(%esp), %eax
1429 - outl (%dx)
1430 - ret
1431 - SET_SIZE(outl)
1432 -
1433 -#endif /* __i386 */
1434 -#endif /* __lint */
1435 -
1436 -#if defined(__lint)
1437 -
1438 -/* ARGSUSED */
1439 -void
1440 -outw(int port_address, uint16_t val)
1441 -{}
1442 -
1443 -#else /* __lint */
1444 -
1445 -#if defined(__amd64)
1446 -
1447 657 ENTRY(outw)
1448 658 movw %di, %dx
1449 659 movw %si, %ax
1450 660 D16 outl (%dx) /* XX64 why not outw? */
1451 661 ret
1452 662 SET_SIZE(outw)
1453 663
1454 -#elif defined(__i386)
1455 -
1456 - ENTRY(outw)
1457 - movw PORT(%esp), %dx
1458 - movw VAL(%esp), %ax
1459 - D16 outl (%dx)
1460 - ret
1461 - SET_SIZE(outw)
1462 -
1463 -#endif /* __i386 */
1464 -#endif /* __lint */
1465 -
1466 -#if defined(__lint)
1467 -
1468 -/* ARGSUSED */
1469 -void
1470 -outb(int port_address, uint8_t val)
1471 -{}
1472 -
1473 -#else /* __lint */
1474 -
1475 -#if defined(__amd64)
1476 -
1477 664 ENTRY(outb)
1478 665 movw %di, %dx
1479 666 movb %sil, %al
1480 667 outb (%dx)
1481 668 ret
1482 669 SET_SIZE(outb)
1483 670
1484 -#elif defined(__i386)
1485 -
1486 - ENTRY(outb)
1487 - movw PORT(%esp), %dx
1488 - movb VAL(%esp), %al
1489 - outb (%dx)
1490 - ret
1491 - SET_SIZE(outb)
1492 -
1493 -#endif /* __i386 */
1494 -#endif /* __lint */
1495 -
1496 -#if defined(__lint)
1497 -
1498 -/* ARGSUSED */
1499 -uint32_t
1500 -inl(int port_address)
1501 -{ return (0); }
1502 -
1503 -#else /* __lint */
1504 -
1505 -#if defined(__amd64)
1506 -
1507 671 ENTRY(inl)
1508 672 xorl %eax, %eax
1509 673 movw %di, %dx
1510 674 inl (%dx)
1511 675 ret
1512 676 SET_SIZE(inl)
1513 677
1514 -#elif defined(__i386)
1515 -
1516 - ENTRY(inl)
1517 - movw PORT(%esp), %dx
1518 - inl (%dx)
1519 - ret
1520 - SET_SIZE(inl)
1521 -
1522 -#endif /* __i386 */
1523 -#endif /* __lint */
1524 -
1525 -#if defined(__lint)
1526 -
1527 -/* ARGSUSED */
1528 -uint16_t
1529 -inw(int port_address)
1530 -{ return (0); }
1531 -
1532 -#else /* __lint */
1533 -
1534 -#if defined(__amd64)
1535 -
1536 678 ENTRY(inw)
1537 679 xorl %eax, %eax
1538 680 movw %di, %dx
1539 681 D16 inl (%dx)
1540 682 ret
1541 683 SET_SIZE(inw)
1542 684
1543 -#elif defined(__i386)
1544 685
1545 - ENTRY(inw)
1546 - subl %eax, %eax
1547 - movw PORT(%esp), %dx
1548 - D16 inl (%dx)
1549 - ret
1550 - SET_SIZE(inw)
1551 -
1552 -#endif /* __i386 */
1553 -#endif /* __lint */
1554 -
1555 -
1556 -#if defined(__lint)
1557 -
1558 -/* ARGSUSED */
1559 -uint8_t
1560 -inb(int port_address)
1561 -{ return (0); }
1562 -
1563 -#else /* __lint */
1564 -
1565 -#if defined(__amd64)
1566 -
1567 686 ENTRY(inb)
1568 687 xorl %eax, %eax
1569 688 movw %di, %dx
1570 689 inb (%dx)
1571 690 ret
1572 691 SET_SIZE(inb)
1573 692
1574 -#elif defined(__i386)
1575 693
1576 - ENTRY(inb)
1577 - subl %eax, %eax
1578 - movw PORT(%esp), %dx
1579 - inb (%dx)
1580 - ret
1581 - SET_SIZE(inb)
1582 -
1583 -#endif /* __i386 */
1584 -#endif /* __lint */
1585 -
1586 -
1587 -#if defined(__lint)
1588 -
1589 -/* ARGSUSED */
1590 -void
1591 -repoutsw(int port, uint16_t *addr, int cnt)
1592 -{}
1593 -
1594 -#else /* __lint */
1595 -
1596 -#if defined(__amd64)
1597 -
1598 694 ENTRY(repoutsw)
1599 695 movl %edx, %ecx
1600 696 movw %di, %dx
1601 697 rep
1602 698 D16 outsl
1603 699 ret
1604 700 SET_SIZE(repoutsw)
1605 701
1606 -#elif defined(__i386)
1607 702
1608 - /*
1609 - * The arguments and saved registers are on the stack in the
1610 - * following order:
1611 - * | cnt | +16
1612 - * | *addr | +12
1613 - * | port | +8
1614 - * | eip | +4
1615 - * | esi | <-- %esp
1616 - * If additional values are pushed onto the stack, make sure
1617 - * to adjust the following constants accordingly.
1618 - */
1619 - .set PORT, 8
1620 - .set ADDR, 12
1621 - .set COUNT, 16
1622 -
1623 - ENTRY(repoutsw)
1624 - pushl %esi
1625 - movl PORT(%esp), %edx
1626 - movl ADDR(%esp), %esi
1627 - movl COUNT(%esp), %ecx
1628 - rep
1629 - D16 outsl
1630 - popl %esi
1631 - ret
1632 - SET_SIZE(repoutsw)
1633 -
1634 -#endif /* __i386 */
1635 -#endif /* __lint */
1636 -
1637 -
1638 -#if defined(__lint)
1639 -
1640 -/* ARGSUSED */
1641 -void
1642 -repinsw(int port_addr, uint16_t *addr, int cnt)
1643 -{}
1644 -
1645 -#else /* __lint */
1646 -
1647 -#if defined(__amd64)
1648 -
1649 703 ENTRY(repinsw)
1650 704 movl %edx, %ecx
1651 705 movw %di, %dx
1652 706 rep
1653 707 D16 insl
1654 708 ret
1655 709 SET_SIZE(repinsw)
1656 710
1657 -#elif defined(__i386)
1658 711
1659 - ENTRY(repinsw)
1660 - pushl %edi
1661 - movl PORT(%esp), %edx
1662 - movl ADDR(%esp), %edi
1663 - movl COUNT(%esp), %ecx
1664 - rep
1665 - D16 insl
1666 - popl %edi
1667 - ret
1668 - SET_SIZE(repinsw)
1669 -
1670 -#endif /* __i386 */
1671 -#endif /* __lint */
1672 -
1673 -
1674 -#if defined(__lint)
1675 -
1676 -/* ARGSUSED */
1677 -void
1678 -repinsb(int port, uint8_t *addr, int count)
1679 -{}
1680 -
1681 -#else /* __lint */
1682 -
1683 -#if defined(__amd64)
1684 -
1685 712 ENTRY(repinsb)
1686 713 movl %edx, %ecx
1687 714 movw %di, %dx
1688 715 movq %rsi, %rdi
1689 716 rep
1690 717 insb
1691 718 ret
1692 719 SET_SIZE(repinsb)
1693 720
1694 -#elif defined(__i386)
1695 721
1696 - /*
1697 - * The arguments and saved registers are on the stack in the
1698 - * following order:
1699 - * | cnt | +16
1700 - * | *addr | +12
1701 - * | port | +8
1702 - * | eip | +4
1703 - * | esi | <-- %esp
1704 - * If additional values are pushed onto the stack, make sure
1705 - * to adjust the following constants accordingly.
1706 - */
1707 - .set IO_PORT, 8
1708 - .set IO_ADDR, 12
1709 - .set IO_COUNT, 16
1710 -
1711 - ENTRY(repinsb)
1712 - pushl %edi
1713 - movl IO_ADDR(%esp), %edi
1714 - movl IO_COUNT(%esp), %ecx
1715 - movl IO_PORT(%esp), %edx
1716 - rep
1717 - insb
1718 - popl %edi
1719 - ret
1720 - SET_SIZE(repinsb)
1721 -
1722 -#endif /* __i386 */
1723 -#endif /* __lint */
1724 -
1725 -
1726 722 /*
1727 723 * Input a stream of 32-bit words.
1728 724 * NOTE: count is a DWORD count.
1729 725 */
1730 -#if defined(__lint)
1731 726
1732 -/* ARGSUSED */
1733 -void
1734 -repinsd(int port, uint32_t *addr, int count)
1735 -{}
1736 -
1737 -#else /* __lint */
1738 -
1739 -#if defined(__amd64)
1740 -
1741 727 ENTRY(repinsd)
1742 728 movl %edx, %ecx
1743 729 movw %di, %dx
1744 730 movq %rsi, %rdi
1745 731 rep
1746 732 insl
1747 733 ret
1748 734 SET_SIZE(repinsd)
1749 735
1750 -#elif defined(__i386)
1751 -
1752 - ENTRY(repinsd)
1753 - pushl %edi
1754 - movl IO_ADDR(%esp), %edi
1755 - movl IO_COUNT(%esp), %ecx
1756 - movl IO_PORT(%esp), %edx
1757 - rep
1758 - insl
1759 - popl %edi
1760 - ret
1761 - SET_SIZE(repinsd)
1762 -
1763 -#endif /* __i386 */
1764 -#endif /* __lint */
1765 -
1766 736 /*
1767 737 * Output a stream of bytes
1768 738 * NOTE: count is a byte count
1769 739 */
1770 -#if defined(__lint)
1771 740
1772 -/* ARGSUSED */
1773 -void
1774 -repoutsb(int port, uint8_t *addr, int count)
1775 -{}
1776 -
1777 -#else /* __lint */
1778 -
1779 -#if defined(__amd64)
1780 -
1781 741 ENTRY(repoutsb)
1782 742 movl %edx, %ecx
1783 743 movw %di, %dx
1784 744 rep
1785 745 outsb
1786 746 ret
1787 747 SET_SIZE(repoutsb)
1788 748
1789 -#elif defined(__i386)
1790 -
1791 - ENTRY(repoutsb)
1792 - pushl %esi
1793 - movl IO_ADDR(%esp), %esi
1794 - movl IO_COUNT(%esp), %ecx
1795 - movl IO_PORT(%esp), %edx
1796 - rep
1797 - outsb
1798 - popl %esi
1799 - ret
1800 - SET_SIZE(repoutsb)
1801 -
1802 -#endif /* __i386 */
1803 -#endif /* __lint */
1804 -
1805 749 /*
1806 750 * Output a stream of 32-bit words
1807 751 * NOTE: count is a DWORD count
1808 752 */
1809 -#if defined(__lint)
1810 753
1811 -/* ARGSUSED */
1812 -void
1813 -repoutsd(int port, uint32_t *addr, int count)
1814 -{}
1815 -
1816 -#else /* __lint */
1817 -
1818 -#if defined(__amd64)
1819 -
1820 754 ENTRY(repoutsd)
1821 755 movl %edx, %ecx
1822 756 movw %di, %dx
1823 757 rep
1824 758 outsl
1825 759 ret
1826 760 SET_SIZE(repoutsd)
1827 761
1828 -#elif defined(__i386)
1829 -
1830 - ENTRY(repoutsd)
1831 - pushl %esi
1832 - movl IO_ADDR(%esp), %esi
1833 - movl IO_COUNT(%esp), %ecx
1834 - movl IO_PORT(%esp), %edx
1835 - rep
1836 - outsl
1837 - popl %esi
1838 - ret
1839 - SET_SIZE(repoutsd)
1840 -
1841 -#endif /* __i386 */
1842 -#endif /* __lint */
1843 -
1844 762 /*
1845 763 * void int3(void)
1846 764 * void int18(void)
1847 765 * void int20(void)
1848 766 * void int_cmci(void)
1849 767 */
1850 768
1851 -#if defined(__lint)
1852 -
1853 -void
1854 -int3(void)
1855 -{}
1856 -
1857 -void
1858 -int18(void)
1859 -{}
1860 -
1861 -void
1862 -int20(void)
1863 -{}
1864 -
1865 -void
1866 -int_cmci(void)
1867 -{}
1868 -
1869 -#else /* __lint */
1870 -
1871 769 ENTRY(int3)
1872 770 int $T_BPTFLT
1873 771 ret
1874 772 SET_SIZE(int3)
1875 773
1876 774 ENTRY(int18)
1877 775 int $T_MCE
1878 776 ret
1879 777 SET_SIZE(int18)
1880 778
1881 779 ENTRY(int20)
1882 780 movl boothowto, %eax
1883 781 andl $RB_DEBUG, %eax
1884 782 jz 1f
1885 783
1886 784 int $T_DBGENTR
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1887 785 1:
1888 786 rep; ret /* use 2 byte return instruction when branch target */
1889 787 /* AMD Software Optimization Guide - Section 6.2 */
1890 788 SET_SIZE(int20)
1891 789
1892 790 ENTRY(int_cmci)
1893 791 int $T_ENOEXTFLT
1894 792 ret
1895 793 SET_SIZE(int_cmci)
1896 794
1897 -#endif /* __lint */
1898 -
1899 -#if defined(__lint)
1900 -
1901 -/* ARGSUSED */
1902 -int
1903 -scanc(size_t size, uchar_t *cp, uchar_t *table, uchar_t mask)
1904 -{ return (0); }
1905 -
1906 -#else /* __lint */
1907 -
1908 -#if defined(__amd64)
1909 -
1910 795 ENTRY(scanc)
1911 796 /* rdi == size */
1912 797 /* rsi == cp */
1913 798 /* rdx == table */
1914 799 /* rcx == mask */
1915 800 addq %rsi, %rdi /* end = &cp[size] */
1916 801 .scanloop:
1917 802 cmpq %rdi, %rsi /* while (cp < end */
1918 803 jnb .scandone
1919 804 movzbq (%rsi), %r8 /* %r8 = *cp */
1920 805 incq %rsi /* cp++ */
1921 806 testb %cl, (%r8, %rdx)
1922 807 jz .scanloop /* && (table[*cp] & mask) == 0) */
1923 808 decq %rsi /* (fix post-increment) */
1924 809 .scandone:
1925 810 movl %edi, %eax
1926 811 subl %esi, %eax /* return (end - cp) */
1927 812 ret
1928 813 SET_SIZE(scanc)
1929 814
1930 -#elif defined(__i386)
1931 -
1932 - ENTRY(scanc)
1933 - pushl %edi
1934 - pushl %esi
1935 - movb 24(%esp), %cl /* mask = %cl */
1936 - movl 16(%esp), %esi /* cp = %esi */
1937 - movl 20(%esp), %edx /* table = %edx */
1938 - movl %esi, %edi
1939 - addl 12(%esp), %edi /* end = &cp[size]; */
1940 -.scanloop:
1941 - cmpl %edi, %esi /* while (cp < end */
1942 - jnb .scandone
1943 - movzbl (%esi), %eax /* %al = *cp */
1944 - incl %esi /* cp++ */
1945 - movb (%edx, %eax), %al /* %al = table[*cp] */
1946 - testb %al, %cl
1947 - jz .scanloop /* && (table[*cp] & mask) == 0) */
1948 - dec %esi /* post-incremented */
1949 -.scandone:
1950 - movl %edi, %eax
1951 - subl %esi, %eax /* return (end - cp) */
1952 - popl %esi
1953 - popl %edi
1954 - ret
1955 - SET_SIZE(scanc)
1956 -
1957 -#endif /* __i386 */
1958 -#endif /* __lint */
1959 -
1960 815 /*
1961 816 * Replacement functions for ones that are normally inlined.
1962 817 * In addition to the copy in i86.il, they are defined here just in case.
1963 818 */
1964 819
1965 -#if defined(__lint)
1966 -
1967 -ulong_t
1968 -intr_clear(void)
1969 -{ return (0); }
1970 -
1971 -ulong_t
1972 -clear_int_flag(void)
1973 -{ return (0); }
1974 -
1975 -#else /* __lint */
1976 -
1977 -#if defined(__amd64)
1978 -
1979 820 ENTRY(intr_clear)
1980 821 ENTRY(clear_int_flag)
1981 822 pushfq
1982 823 popq %rax
1983 824 #if defined(__xpv)
1984 825 leaq xpv_panicking, %rdi
1985 826 movl (%rdi), %edi
1986 827 cmpl $0, %edi
1987 828 jne 2f
1988 829 CLIRET(%rdi, %dl) /* returns event mask in %dl */
1989 830 /*
1990 831 * Synthesize the PS_IE bit from the event mask bit
1991 832 */
1992 833 andq $_BITNOT(PS_IE), %rax
1993 834 testb $1, %dl
1994 835 jnz 1f
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1995 836 orq $PS_IE, %rax
1996 837 1:
1997 838 ret
1998 839 2:
1999 840 #endif
2000 841 CLI(%rdi)
2001 842 ret
2002 843 SET_SIZE(clear_int_flag)
2003 844 SET_SIZE(intr_clear)
2004 845
2005 -#elif defined(__i386)
2006 -
2007 - ENTRY(intr_clear)
2008 - ENTRY(clear_int_flag)
2009 - pushfl
2010 - popl %eax
2011 -#if defined(__xpv)
2012 - leal xpv_panicking, %edx
2013 - movl (%edx), %edx
2014 - cmpl $0, %edx
2015 - jne 2f
2016 - CLIRET(%edx, %cl) /* returns event mask in %cl */
2017 - /*
2018 - * Synthesize the PS_IE bit from the event mask bit
2019 - */
2020 - andl $_BITNOT(PS_IE), %eax
2021 - testb $1, %cl
2022 - jnz 1f
2023 - orl $PS_IE, %eax
2024 -1:
2025 - ret
2026 -2:
2027 -#endif
2028 - CLI(%edx)
2029 - ret
2030 - SET_SIZE(clear_int_flag)
2031 - SET_SIZE(intr_clear)
2032 -
2033 -#endif /* __i386 */
2034 -#endif /* __lint */
2035 -
2036 -#if defined(__lint)
2037 -
2038 -struct cpu *
2039 -curcpup(void)
2040 -{ return 0; }
2041 -
2042 -#else /* __lint */
2043 -
2044 -#if defined(__amd64)
2045 -
2046 846 ENTRY(curcpup)
2047 847 movq %gs:CPU_SELF, %rax
2048 848 ret
2049 849 SET_SIZE(curcpup)
2050 850
2051 -#elif defined(__i386)
2052 -
2053 - ENTRY(curcpup)
2054 - movl %gs:CPU_SELF, %eax
2055 - ret
2056 - SET_SIZE(curcpup)
2057 -
2058 -#endif /* __i386 */
2059 -#endif /* __lint */
2060 -
2061 851 /* htonll(), ntohll(), htonl(), ntohl(), htons(), ntohs()
2062 852 * These functions reverse the byte order of the input parameter and returns
2063 853 * the result. This is to convert the byte order from host byte order
2064 854 * (little endian) to network byte order (big endian), or vice versa.
2065 855 */
2066 856
2067 -#if defined(__lint)
2068 -
2069 -uint64_t
2070 -htonll(uint64_t i)
2071 -{ return (i); }
2072 -
2073 -uint64_t
2074 -ntohll(uint64_t i)
2075 -{ return (i); }
2076 -
2077 -uint32_t
2078 -htonl(uint32_t i)
2079 -{ return (i); }
2080 -
2081 -uint32_t
2082 -ntohl(uint32_t i)
2083 -{ return (i); }
2084 -
2085 -uint16_t
2086 -htons(uint16_t i)
2087 -{ return (i); }
2088 -
2089 -uint16_t
2090 -ntohs(uint16_t i)
2091 -{ return (i); }
2092 -
2093 -#else /* __lint */
2094 -
2095 -#if defined(__amd64)
2096 -
2097 857 ENTRY(htonll)
2098 858 ALTENTRY(ntohll)
2099 859 movq %rdi, %rax
2100 860 bswapq %rax
2101 861 ret
2102 862 SET_SIZE(ntohll)
2103 863 SET_SIZE(htonll)
2104 864
2105 865 /* XX64 there must be shorter sequences for this */
2106 866 ENTRY(htonl)
2107 867 ALTENTRY(ntohl)
2108 868 movl %edi, %eax
2109 869 bswap %eax
2110 870 ret
2111 871 SET_SIZE(ntohl)
2112 872 SET_SIZE(htonl)
2113 873
↓ open down ↓ |
7 lines elided |
↑ open up ↑ |
2114 874 /* XX64 there must be better sequences for this */
2115 875 ENTRY(htons)
2116 876 ALTENTRY(ntohs)
2117 877 movl %edi, %eax
2118 878 bswap %eax
2119 879 shrl $16, %eax
2120 880 ret
2121 881 SET_SIZE(ntohs)
2122 882 SET_SIZE(htons)
2123 883
2124 -#elif defined(__i386)
2125 884
2126 - ENTRY(htonll)
2127 - ALTENTRY(ntohll)
2128 - movl 4(%esp), %edx
2129 - movl 8(%esp), %eax
2130 - bswap %edx
2131 - bswap %eax
2132 - ret
2133 - SET_SIZE(ntohll)
2134 - SET_SIZE(htonll)
2135 -
2136 - ENTRY(htonl)
2137 - ALTENTRY(ntohl)
2138 - movl 4(%esp), %eax
2139 - bswap %eax
2140 - ret
2141 - SET_SIZE(ntohl)
2142 - SET_SIZE(htonl)
2143 -
2144 - ENTRY(htons)
2145 - ALTENTRY(ntohs)
2146 - movl 4(%esp), %eax
2147 - bswap %eax
2148 - shrl $16, %eax
2149 - ret
2150 - SET_SIZE(ntohs)
2151 - SET_SIZE(htons)
2152 -
2153 -#endif /* __i386 */
2154 -#endif /* __lint */
2155 -
2156 -
2157 -#if defined(__lint)
2158 -
2159 -/* ARGSUSED */
2160 -void
2161 -intr_restore(ulong_t i)
2162 -{ return; }
2163 -
2164 -/* ARGSUSED */
2165 -void
2166 -restore_int_flag(ulong_t i)
2167 -{ return; }
2168 -
2169 -#else /* __lint */
2170 -
2171 -#if defined(__amd64)
2172 -
2173 885 ENTRY(intr_restore)
2174 886 ENTRY(restore_int_flag)
2175 887 testq $PS_IE, %rdi
2176 888 jz 1f
2177 889 #if defined(__xpv)
2178 890 leaq xpv_panicking, %rsi
2179 891 movl (%rsi), %esi
2180 892 cmpl $0, %esi
2181 893 jne 1f
2182 894 /*
2183 895 * Since we're -really- running unprivileged, our attempt
2184 896 * to change the state of the IF bit will be ignored.
2185 897 * The virtual IF bit is tweaked by CLI and STI.
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
2186 898 */
2187 899 IE_TO_EVENT_MASK(%rsi, %rdi)
2188 900 #else
2189 901 sti
2190 902 #endif
2191 903 1:
2192 904 ret
2193 905 SET_SIZE(restore_int_flag)
2194 906 SET_SIZE(intr_restore)
2195 907
2196 -#elif defined(__i386)
2197 -
2198 - ENTRY(intr_restore)
2199 - ENTRY(restore_int_flag)
2200 - testl $PS_IE, 4(%esp)
2201 - jz 1f
2202 -#if defined(__xpv)
2203 - leal xpv_panicking, %edx
2204 - movl (%edx), %edx
2205 - cmpl $0, %edx
2206 - jne 1f
2207 - /*
2208 - * Since we're -really- running unprivileged, our attempt
2209 - * to change the state of the IF bit will be ignored.
2210 - * The virtual IF bit is tweaked by CLI and STI.
2211 - */
2212 - IE_TO_EVENT_MASK(%edx, 4(%esp))
2213 -#else
2214 - sti
2215 -#endif
2216 -1:
2217 - ret
2218 - SET_SIZE(restore_int_flag)
2219 - SET_SIZE(intr_restore)
2220 -
2221 -#endif /* __i386 */
2222 -#endif /* __lint */
2223 -
2224 -#if defined(__lint)
2225 -
2226 -void
2227 -sti(void)
2228 -{}
2229 -
2230 -void
2231 -cli(void)
2232 -{}
2233 -
2234 -#else /* __lint */
2235 -
2236 908 ENTRY(sti)
2237 909 STI
2238 910 ret
2239 911 SET_SIZE(sti)
2240 912
2241 913 ENTRY(cli)
2242 -#if defined(__amd64)
2243 914 CLI(%rax)
2244 -#elif defined(__i386)
2245 - CLI(%eax)
2246 -#endif /* __i386 */
2247 915 ret
2248 916 SET_SIZE(cli)
2249 917
2250 -#endif /* __lint */
2251 -
2252 -#if defined(__lint)
2253 -
2254 -dtrace_icookie_t
2255 -dtrace_interrupt_disable(void)
2256 -{ return (0); }
2257 -
2258 -#else /* __lint */
2259 -
2260 -#if defined(__amd64)
2261 -
2262 918 ENTRY(dtrace_interrupt_disable)
2263 919 pushfq
2264 920 popq %rax
2265 921 #if defined(__xpv)
2266 922 leaq xpv_panicking, %rdi
2267 923 movl (%rdi), %edi
2268 924 cmpl $0, %edi
2269 925 jne .dtrace_interrupt_disable_done
2270 926 CLIRET(%rdi, %dl) /* returns event mask in %dl */
2271 927 /*
2272 928 * Synthesize the PS_IE bit from the event mask bit
2273 929 */
2274 930 andq $_BITNOT(PS_IE), %rax
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
2275 931 testb $1, %dl
2276 932 jnz .dtrace_interrupt_disable_done
2277 933 orq $PS_IE, %rax
2278 934 #else
2279 935 CLI(%rdx)
2280 936 #endif
2281 937 .dtrace_interrupt_disable_done:
2282 938 ret
2283 939 SET_SIZE(dtrace_interrupt_disable)
2284 940
2285 -#elif defined(__i386)
2286 -
2287 - ENTRY(dtrace_interrupt_disable)
2288 - pushfl
2289 - popl %eax
2290 -#if defined(__xpv)
2291 - leal xpv_panicking, %edx
2292 - movl (%edx), %edx
2293 - cmpl $0, %edx
2294 - jne .dtrace_interrupt_disable_done
2295 - CLIRET(%edx, %cl) /* returns event mask in %cl */
2296 - /*
2297 - * Synthesize the PS_IE bit from the event mask bit
2298 - */
2299 - andl $_BITNOT(PS_IE), %eax
2300 - testb $1, %cl
2301 - jnz .dtrace_interrupt_disable_done
2302 - orl $PS_IE, %eax
2303 -#else
2304 - CLI(%edx)
2305 -#endif
2306 -.dtrace_interrupt_disable_done:
2307 - ret
2308 - SET_SIZE(dtrace_interrupt_disable)
2309 -
2310 -#endif /* __i386 */
2311 -#endif /* __lint */
2312 -
2313 -#if defined(__lint)
2314 -
2315 -/*ARGSUSED*/
2316 -void
2317 -dtrace_interrupt_enable(dtrace_icookie_t cookie)
2318 -{}
2319 -
2320 -#else /* __lint */
2321 -
2322 -#if defined(__amd64)
2323 -
2324 941 ENTRY(dtrace_interrupt_enable)
2325 942 pushq %rdi
2326 943 popfq
2327 944 #if defined(__xpv)
2328 945 leaq xpv_panicking, %rdx
2329 946 movl (%rdx), %edx
2330 947 cmpl $0, %edx
2331 948 jne .dtrace_interrupt_enable_done
2332 949 /*
2333 950 * Since we're -really- running unprivileged, our attempt
2334 951 * to change the state of the IF bit will be ignored. The
2335 952 * virtual IF bit is tweaked by CLI and STI.
2336 953 */
2337 954 IE_TO_EVENT_MASK(%rdx, %rdi)
2338 955 #endif
2339 956 .dtrace_interrupt_enable_done:
2340 957 ret
2341 958 SET_SIZE(dtrace_interrupt_enable)
2342 959
2343 -#elif defined(__i386)
2344 960
2345 - ENTRY(dtrace_interrupt_enable)
2346 - movl 4(%esp), %eax
2347 - pushl %eax
2348 - popfl
2349 -#if defined(__xpv)
2350 - leal xpv_panicking, %edx
2351 - movl (%edx), %edx
2352 - cmpl $0, %edx
2353 - jne .dtrace_interrupt_enable_done
2354 - /*
2355 - * Since we're -really- running unprivileged, our attempt
2356 - * to change the state of the IF bit will be ignored. The
2357 - * virtual IF bit is tweaked by CLI and STI.
2358 - */
2359 - IE_TO_EVENT_MASK(%edx, %eax)
2360 -#endif
2361 -.dtrace_interrupt_enable_done:
2362 - ret
2363 - SET_SIZE(dtrace_interrupt_enable)
2364 -
2365 -#endif /* __i386 */
2366 -#endif /* __lint */
2367 -
2368 -
2369 -#if defined(lint)
2370 -
2371 -void
2372 -dtrace_membar_producer(void)
2373 -{}
2374 -
2375 -void
2376 -dtrace_membar_consumer(void)
2377 -{}
2378 -
2379 -#else /* __lint */
2380 -
2381 961 ENTRY(dtrace_membar_producer)
2382 962 rep; ret /* use 2 byte return instruction when branch target */
2383 963 /* AMD Software Optimization Guide - Section 6.2 */
2384 964 SET_SIZE(dtrace_membar_producer)
2385 965
2386 966 ENTRY(dtrace_membar_consumer)
2387 967 rep; ret /* use 2 byte return instruction when branch target */
2388 968 /* AMD Software Optimization Guide - Section 6.2 */
2389 969 SET_SIZE(dtrace_membar_consumer)
2390 970
2391 -#endif /* __lint */
2392 -
2393 -#if defined(__lint)
2394 -
2395 -kthread_id_t
2396 -threadp(void)
2397 -{ return ((kthread_id_t)0); }
2398 -
2399 -#else /* __lint */
2400 -
2401 -#if defined(__amd64)
2402 -
2403 971 ENTRY(threadp)
2404 972 movq %gs:CPU_THREAD, %rax
2405 973 ret
2406 974 SET_SIZE(threadp)
2407 975
2408 -#elif defined(__i386)
2409 -
2410 - ENTRY(threadp)
2411 - movl %gs:CPU_THREAD, %eax
2412 - ret
2413 - SET_SIZE(threadp)
2414 -
2415 -#endif /* __i386 */
2416 -#endif /* __lint */
2417 -
2418 976 /*
2419 977 * Checksum routine for Internet Protocol Headers
2420 978 */
2421 979
2422 -#if defined(__lint)
2423 -
2424 -/* ARGSUSED */
2425 -unsigned int
2426 -ip_ocsum(
2427 - ushort_t *address, /* ptr to 1st message buffer */
2428 - int halfword_count, /* length of data */
2429 - unsigned int sum) /* partial checksum */
2430 -{
2431 - int i;
2432 - unsigned int psum = 0; /* partial sum */
2433 -
2434 - for (i = 0; i < halfword_count; i++, address++) {
2435 - psum += *address;
2436 - }
2437 -
2438 - while ((psum >> 16) != 0) {
2439 - psum = (psum & 0xffff) + (psum >> 16);
2440 - }
2441 -
2442 - psum += sum;
2443 -
2444 - while ((psum >> 16) != 0) {
2445 - psum = (psum & 0xffff) + (psum >> 16);
2446 - }
2447 -
2448 - return (psum);
2449 -}
2450 -
2451 -#else /* __lint */
2452 -
2453 -#if defined(__amd64)
2454 -
2455 980 ENTRY(ip_ocsum)
2456 981 pushq %rbp
2457 982 movq %rsp, %rbp
2458 983 #ifdef DEBUG
2459 984 movq postbootkernelbase(%rip), %rax
2460 985 cmpq %rax, %rdi
2461 986 jnb 1f
2462 987 xorl %eax, %eax
2463 988 movq %rdi, %rsi
2464 989 leaq .ip_ocsum_panic_msg(%rip), %rdi
2465 990 call panic
2466 991 /*NOTREACHED*/
2467 992 .ip_ocsum_panic_msg:
2468 993 .string "ip_ocsum: address 0x%p below kernelbase\n"
2469 994 1:
2470 995 #endif
2471 996 movl %esi, %ecx /* halfword_count */
2472 997 movq %rdi, %rsi /* address */
2473 998 /* partial sum in %edx */
2474 999 xorl %eax, %eax
2475 1000 testl %ecx, %ecx
2476 1001 jz .ip_ocsum_done
2477 1002 testq $3, %rsi
2478 1003 jnz .ip_csum_notaligned
2479 1004 .ip_csum_aligned: /* XX64 opportunities for 8-byte operations? */
2480 1005 .next_iter:
2481 1006 /* XX64 opportunities for prefetch? */
2482 1007 /* XX64 compute csum with 64 bit quantities? */
2483 1008 subl $32, %ecx
2484 1009 jl .less_than_32
2485 1010
2486 1011 addl 0(%rsi), %edx
2487 1012 .only60:
2488 1013 adcl 4(%rsi), %eax
2489 1014 .only56:
2490 1015 adcl 8(%rsi), %edx
2491 1016 .only52:
2492 1017 adcl 12(%rsi), %eax
2493 1018 .only48:
2494 1019 adcl 16(%rsi), %edx
2495 1020 .only44:
2496 1021 adcl 20(%rsi), %eax
2497 1022 .only40:
2498 1023 adcl 24(%rsi), %edx
2499 1024 .only36:
2500 1025 adcl 28(%rsi), %eax
2501 1026 .only32:
2502 1027 adcl 32(%rsi), %edx
2503 1028 .only28:
2504 1029 adcl 36(%rsi), %eax
2505 1030 .only24:
2506 1031 adcl 40(%rsi), %edx
2507 1032 .only20:
2508 1033 adcl 44(%rsi), %eax
2509 1034 .only16:
2510 1035 adcl 48(%rsi), %edx
2511 1036 .only12:
2512 1037 adcl 52(%rsi), %eax
2513 1038 .only8:
2514 1039 adcl 56(%rsi), %edx
2515 1040 .only4:
2516 1041 adcl 60(%rsi), %eax /* could be adding -1 and -1 with a carry */
2517 1042 .only0:
2518 1043 adcl $0, %eax /* could be adding -1 in eax with a carry */
2519 1044 adcl $0, %eax
2520 1045
2521 1046 addq $64, %rsi
2522 1047 testl %ecx, %ecx
2523 1048 jnz .next_iter
2524 1049
2525 1050 .ip_ocsum_done:
2526 1051 addl %eax, %edx
2527 1052 adcl $0, %edx
2528 1053 movl %edx, %eax /* form a 16 bit checksum by */
2529 1054 shrl $16, %eax /* adding two halves of 32 bit checksum */
2530 1055 addw %dx, %ax
2531 1056 adcw $0, %ax
2532 1057 andl $0xffff, %eax
2533 1058 leave
2534 1059 ret
2535 1060
2536 1061 .ip_csum_notaligned:
2537 1062 xorl %edi, %edi
2538 1063 movw (%rsi), %di
2539 1064 addl %edi, %edx
2540 1065 adcl $0, %edx
2541 1066 addq $2, %rsi
2542 1067 decl %ecx
2543 1068 jmp .ip_csum_aligned
2544 1069
2545 1070 .less_than_32:
2546 1071 addl $32, %ecx
2547 1072 testl $1, %ecx
2548 1073 jz .size_aligned
2549 1074 andl $0xfe, %ecx
2550 1075 movzwl (%rsi, %rcx, 2), %edi
2551 1076 addl %edi, %edx
2552 1077 adcl $0, %edx
2553 1078 .size_aligned:
2554 1079 movl %ecx, %edi
2555 1080 shrl $1, %ecx
2556 1081 shl $1, %edi
2557 1082 subq $64, %rdi
2558 1083 addq %rdi, %rsi
2559 1084 leaq .ip_ocsum_jmptbl(%rip), %rdi
2560 1085 leaq (%rdi, %rcx, 8), %rdi
2561 1086 xorl %ecx, %ecx
2562 1087 clc
↓ open down ↓ |
98 lines elided |
↑ open up ↑ |
2563 1088 movq (%rdi), %rdi
2564 1089 INDIRECT_JMP_REG(rdi)
2565 1090
2566 1091 .align 8
2567 1092 .ip_ocsum_jmptbl:
2568 1093 .quad .only0, .only4, .only8, .only12, .only16, .only20
2569 1094 .quad .only24, .only28, .only32, .only36, .only40, .only44
2570 1095 .quad .only48, .only52, .only56, .only60
2571 1096 SET_SIZE(ip_ocsum)
2572 1097
2573 -#elif defined(__i386)
2574 -
2575 - ENTRY(ip_ocsum)
2576 - pushl %ebp
2577 - movl %esp, %ebp
2578 - pushl %ebx
2579 - pushl %esi
2580 - pushl %edi
2581 - movl 12(%ebp), %ecx /* count of half words */
2582 - movl 16(%ebp), %edx /* partial checksum */
2583 - movl 8(%ebp), %esi
2584 - xorl %eax, %eax
2585 - testl %ecx, %ecx
2586 - jz .ip_ocsum_done
2587 -
2588 - testl $3, %esi
2589 - jnz .ip_csum_notaligned
2590 -.ip_csum_aligned:
2591 -.next_iter:
2592 - subl $32, %ecx
2593 - jl .less_than_32
2594 -
2595 - addl 0(%esi), %edx
2596 -.only60:
2597 - adcl 4(%esi), %eax
2598 -.only56:
2599 - adcl 8(%esi), %edx
2600 -.only52:
2601 - adcl 12(%esi), %eax
2602 -.only48:
2603 - adcl 16(%esi), %edx
2604 -.only44:
2605 - adcl 20(%esi), %eax
2606 -.only40:
2607 - adcl 24(%esi), %edx
2608 -.only36:
2609 - adcl 28(%esi), %eax
2610 -.only32:
2611 - adcl 32(%esi), %edx
2612 -.only28:
2613 - adcl 36(%esi), %eax
2614 -.only24:
2615 - adcl 40(%esi), %edx
2616 -.only20:
2617 - adcl 44(%esi), %eax
2618 -.only16:
2619 - adcl 48(%esi), %edx
2620 -.only12:
2621 - adcl 52(%esi), %eax
2622 -.only8:
2623 - adcl 56(%esi), %edx
2624 -.only4:
2625 - adcl 60(%esi), %eax /* We could be adding -1 and -1 with a carry */
2626 -.only0:
2627 - adcl $0, %eax /* we could be adding -1 in eax with a carry */
2628 - adcl $0, %eax
2629 -
2630 - addl $64, %esi
2631 - andl %ecx, %ecx
2632 - jnz .next_iter
2633 -
2634 -.ip_ocsum_done:
2635 - addl %eax, %edx
2636 - adcl $0, %edx
2637 - movl %edx, %eax /* form a 16 bit checksum by */
2638 - shrl $16, %eax /* adding two halves of 32 bit checksum */
2639 - addw %dx, %ax
2640 - adcw $0, %ax
2641 - andl $0xffff, %eax
2642 - popl %edi /* restore registers */
2643 - popl %esi
2644 - popl %ebx
2645 - leave
2646 - ret
2647 -
2648 -.ip_csum_notaligned:
2649 - xorl %edi, %edi
2650 - movw (%esi), %di
2651 - addl %edi, %edx
2652 - adcl $0, %edx
2653 - addl $2, %esi
2654 - decl %ecx
2655 - jmp .ip_csum_aligned
2656 -
2657 -.less_than_32:
2658 - addl $32, %ecx
2659 - testl $1, %ecx
2660 - jz .size_aligned
2661 - andl $0xfe, %ecx
2662 - movzwl (%esi, %ecx, 2), %edi
2663 - addl %edi, %edx
2664 - adcl $0, %edx
2665 -.size_aligned:
2666 - movl %ecx, %edi
2667 - shrl $1, %ecx
2668 - shl $1, %edi
2669 - subl $64, %edi
2670 - addl %edi, %esi
2671 - movl $.ip_ocsum_jmptbl, %edi
2672 - lea (%edi, %ecx, 4), %edi
2673 - xorl %ecx, %ecx
2674 - clc
2675 - jmp *(%edi)
2676 - SET_SIZE(ip_ocsum)
2677 -
2678 - .data
2679 - .align 4
2680 -
2681 -.ip_ocsum_jmptbl:
2682 - .long .only0, .only4, .only8, .only12, .only16, .only20
2683 - .long .only24, .only28, .only32, .only36, .only40, .only44
2684 - .long .only48, .only52, .only56, .only60
2685 -
2686 -
2687 -#endif /* __i386 */
2688 -#endif /* __lint */
2689 -
2690 1098 /*
2691 1099 * multiply two long numbers and yield a u_longlong_t result, callable from C.
2692 1100 * Provided to manipulate hrtime_t values.
2693 1101 */
2694 -#if defined(__lint)
2695 1102
2696 -/* result = a * b; */
2697 -
2698 -/* ARGSUSED */
2699 -unsigned long long
2700 -mul32(uint_t a, uint_t b)
2701 -{ return (0); }
2702 -
2703 -#else /* __lint */
2704 -
2705 -#if defined(__amd64)
2706 -
2707 1103 ENTRY(mul32)
2708 1104 xorl %edx, %edx /* XX64 joe, paranoia? */
2709 1105 movl %edi, %eax
2710 1106 mull %esi
2711 1107 shlq $32, %rdx
2712 1108 orq %rdx, %rax
2713 1109 ret
2714 1110 SET_SIZE(mul32)
2715 1111
2716 -#elif defined(__i386)
2717 -
2718 - ENTRY(mul32)
2719 - movl 8(%esp), %eax
2720 - movl 4(%esp), %ecx
2721 - mull %ecx
2722 - ret
2723 - SET_SIZE(mul32)
2724 -
2725 -#endif /* __i386 */
2726 -#endif /* __lint */
2727 -
2728 -#if defined(notused)
2729 -#if defined(__lint)
2730 -/* ARGSUSED */
2731 -void
2732 -load_pte64(uint64_t *pte, uint64_t pte_value)
2733 -{}
2734 -#else /* __lint */
2735 - .globl load_pte64
2736 -load_pte64:
2737 - movl 4(%esp), %eax
2738 - movl 8(%esp), %ecx
2739 - movl 12(%esp), %edx
2740 - movl %edx, 4(%eax)
2741 - movl %ecx, (%eax)
2742 - ret
2743 -#endif /* __lint */
2744 -#endif /* notused */
2745 -
2746 -#if defined(__lint)
2747 -
2748 -/*ARGSUSED*/
2749 -void
2750 -scan_memory(caddr_t addr, size_t size)
2751 -{}
2752 -
2753 -#else /* __lint */
2754 -
2755 -#if defined(__amd64)
2756 -
2757 1112 ENTRY(scan_memory)
2758 1113 shrq $3, %rsi /* convert %rsi from byte to quadword count */
2759 1114 jz .scanm_done
2760 1115 movq %rsi, %rcx /* move count into rep control register */
2761 1116 movq %rdi, %rsi /* move addr into lodsq control reg. */
2762 1117 rep lodsq /* scan the memory range */
2763 1118 .scanm_done:
2764 1119 rep; ret /* use 2 byte return instruction when branch target */
2765 1120 /* AMD Software Optimization Guide - Section 6.2 */
2766 1121 SET_SIZE(scan_memory)
2767 1122
2768 -#elif defined(__i386)
2769 1123
2770 - ENTRY(scan_memory)
2771 - pushl %ecx
2772 - pushl %esi
2773 - movl 16(%esp), %ecx /* move 2nd arg into rep control register */
2774 - shrl $2, %ecx /* convert from byte count to word count */
2775 - jz .scanm_done
2776 - movl 12(%esp), %esi /* move 1st arg into lodsw control register */
2777 - .byte 0xf3 /* rep prefix. lame assembler. sigh. */
2778 - lodsl
2779 -.scanm_done:
2780 - popl %esi
2781 - popl %ecx
2782 - ret
2783 - SET_SIZE(scan_memory)
2784 -
2785 -#endif /* __i386 */
2786 -#endif /* __lint */
2787 -
2788 -
2789 -#if defined(__lint)
2790 -
2791 -/*ARGSUSED */
2792 -int
2793 -lowbit(ulong_t i)
2794 -{ return (0); }
2795 -
2796 -#else /* __lint */
2797 -
2798 -#if defined(__amd64)
2799 -
2800 1124 ENTRY(lowbit)
2801 1125 movl $-1, %eax
2802 1126 bsfq %rdi, %rdi
2803 1127 cmovnz %edi, %eax
2804 1128 incl %eax
2805 1129 ret
2806 1130 SET_SIZE(lowbit)
2807 1131
2808 -#elif defined(__i386)
2809 -
2810 - ENTRY(lowbit)
2811 - bsfl 4(%esp), %eax
2812 - jz 0f
2813 - incl %eax
2814 - ret
2815 -0:
2816 - xorl %eax, %eax
2817 - ret
2818 - SET_SIZE(lowbit)
2819 -
2820 -#endif /* __i386 */
2821 -#endif /* __lint */
2822 -
2823 -#if defined(__lint)
2824 -
2825 -/*ARGSUSED*/
2826 -int
2827 -highbit(ulong_t i)
2828 -{ return (0); }
2829 -
2830 -/*ARGSUSED*/
2831 -int
2832 -highbit64(uint64_t i)
2833 -{ return (0); }
2834 -
2835 -#else /* __lint */
2836 -
2837 -#if defined(__amd64)
2838 -
2839 1132 ENTRY(highbit)
2840 1133 ALTENTRY(highbit64)
2841 1134 movl $-1, %eax
2842 1135 bsrq %rdi, %rdi
2843 1136 cmovnz %edi, %eax
2844 1137 incl %eax
2845 1138 ret
2846 1139 SET_SIZE(highbit64)
2847 1140 SET_SIZE(highbit)
2848 1141
2849 -#elif defined(__i386)
2850 -
2851 - ENTRY(highbit)
2852 - bsrl 4(%esp), %eax
2853 - jz 0f
2854 - incl %eax
2855 - ret
2856 -0:
2857 - xorl %eax, %eax
2858 - ret
2859 - SET_SIZE(highbit)
2860 -
2861 - ENTRY(highbit64)
2862 - bsrl 8(%esp), %eax
2863 - jz highbit
2864 - addl $33, %eax
2865 - ret
2866 - SET_SIZE(highbit64)
2867 -
2868 -#endif /* __i386 */
2869 -#endif /* __lint */
2870 -
2871 -#if defined(__lint)
2872 -
2873 -/*ARGSUSED*/
2874 -uint64_t
2875 -rdmsr(uint_t r)
2876 -{ return (0); }
2877 -
2878 -/*ARGSUSED*/
2879 -void
2880 -wrmsr(uint_t r, const uint64_t val)
2881 -{}
2882 -
2883 -/*ARGSUSED*/
2884 -uint64_t
2885 -xrdmsr(uint_t r)
2886 -{ return (0); }
2887 -
2888 -/*ARGSUSED*/
2889 -void
2890 -xwrmsr(uint_t r, const uint64_t val)
2891 -{}
2892 -
2893 -void
2894 -invalidate_cache(void)
2895 -{}
2896 -
2897 -/*ARGSUSED*/
2898 -uint64_t
2899 -get_xcr(uint_t r)
2900 -{ return (0); }
2901 -
2902 -/*ARGSUSED*/
2903 -void
2904 -set_xcr(uint_t r, const uint64_t val)
2905 -{}
2906 -
2907 -#else /* __lint */
2908 -
2909 1142 #define XMSR_ACCESS_VAL $0x9c5a203a
2910 1143
2911 -#if defined(__amd64)
2912 -
2913 1144 ENTRY(rdmsr)
2914 1145 movl %edi, %ecx
2915 1146 rdmsr
2916 1147 shlq $32, %rdx
2917 1148 orq %rdx, %rax
2918 1149 ret
2919 1150 SET_SIZE(rdmsr)
2920 1151
2921 1152 ENTRY(wrmsr)
2922 1153 movq %rsi, %rdx
2923 1154 shrq $32, %rdx
2924 1155 movl %esi, %eax
2925 1156 movl %edi, %ecx
2926 1157 wrmsr
2927 1158 ret
2928 1159 SET_SIZE(wrmsr)
2929 1160
2930 1161 ENTRY(xrdmsr)
2931 1162 pushq %rbp
2932 1163 movq %rsp, %rbp
2933 1164 movl %edi, %ecx
2934 1165 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
2935 1166 rdmsr
2936 1167 shlq $32, %rdx
2937 1168 orq %rdx, %rax
2938 1169 leave
2939 1170 ret
2940 1171 SET_SIZE(xrdmsr)
2941 1172
2942 1173 ENTRY(xwrmsr)
2943 1174 pushq %rbp
2944 1175 movq %rsp, %rbp
2945 1176 movl %edi, %ecx
2946 1177 movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
2947 1178 movq %rsi, %rdx
2948 1179 shrq $32, %rdx
2949 1180 movl %esi, %eax
2950 1181 wrmsr
2951 1182 leave
2952 1183 ret
2953 1184 SET_SIZE(xwrmsr)
2954 1185
2955 1186 ENTRY(get_xcr)
2956 1187 movl %edi, %ecx
2957 1188 #xgetbv
2958 1189 .byte 0x0f,0x01,0xd0
2959 1190 shlq $32, %rdx
2960 1191 orq %rdx, %rax
2961 1192 ret
2962 1193 SET_SIZE(get_xcr)
2963 1194
↓ open down ↓ |
41 lines elided |
↑ open up ↑ |
2964 1195 ENTRY(set_xcr)
2965 1196 movq %rsi, %rdx
2966 1197 shrq $32, %rdx
2967 1198 movl %esi, %eax
2968 1199 movl %edi, %ecx
2969 1200 #xsetbv
2970 1201 .byte 0x0f,0x01,0xd1
2971 1202 ret
2972 1203 SET_SIZE(set_xcr)
2973 1204
2974 -#elif defined(__i386)
2975 -
2976 - ENTRY(rdmsr)
2977 - movl 4(%esp), %ecx
2978 - rdmsr
2979 - ret
2980 - SET_SIZE(rdmsr)
2981 -
2982 - ENTRY(wrmsr)
2983 - movl 4(%esp), %ecx
2984 - movl 8(%esp), %eax
2985 - movl 12(%esp), %edx
2986 - wrmsr
2987 - ret
2988 - SET_SIZE(wrmsr)
2989 -
2990 - ENTRY(xrdmsr)
2991 - pushl %ebp
2992 - movl %esp, %ebp
2993 - movl 8(%esp), %ecx
2994 - pushl %edi
2995 - movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
2996 - rdmsr
2997 - popl %edi
2998 - leave
2999 - ret
3000 - SET_SIZE(xrdmsr)
3001 -
3002 - ENTRY(xwrmsr)
3003 - pushl %ebp
3004 - movl %esp, %ebp
3005 - movl 8(%esp), %ecx
3006 - movl 12(%esp), %eax
3007 - movl 16(%esp), %edx
3008 - pushl %edi
3009 - movl XMSR_ACCESS_VAL, %edi /* this value is needed to access MSR */
3010 - wrmsr
3011 - popl %edi
3012 - leave
3013 - ret
3014 - SET_SIZE(xwrmsr)
3015 -
3016 - ENTRY(get_xcr)
3017 - movl 4(%esp), %ecx
3018 - #xgetbv
3019 - .byte 0x0f,0x01,0xd0
3020 - ret
3021 - SET_SIZE(get_xcr)
3022 -
3023 - ENTRY(set_xcr)
3024 - movl 4(%esp), %ecx
3025 - movl 8(%esp), %eax
3026 - movl 12(%esp), %edx
3027 - #xsetbv
3028 - .byte 0x0f,0x01,0xd1
3029 - ret
3030 - SET_SIZE(set_xcr)
3031 -
3032 -#endif /* __i386 */
3033 -
3034 1205 ENTRY(invalidate_cache)
3035 1206 wbinvd
3036 1207 ret
3037 1208 SET_SIZE(invalidate_cache)
3038 1209
3039 -#endif /* __lint */
3040 -
3041 -#if defined(__lint)
3042 -
3043 -/*ARGSUSED*/
3044 -void
3045 -getcregs(struct cregs *crp)
3046 -{}
3047 -
3048 -#else /* __lint */
3049 -
3050 -#if defined(__amd64)
3051 -
3052 1210 ENTRY_NP(getcregs)
3053 1211 #if defined(__xpv)
3054 1212 /*
3055 1213 * Only a few of the hardware control registers or descriptor tables
3056 1214 * are directly accessible to us, so just zero the structure.
3057 1215 *
3058 1216 * XXPV Perhaps it would be helpful for the hypervisor to return
3059 1217 * virtualized versions of these for post-mortem use.
3060 1218 * (Need to reevaluate - perhaps it already does!)
3061 1219 */
3062 1220 pushq %rdi /* save *crp */
3063 1221 movq $CREGSZ, %rsi
3064 1222 call bzero
3065 1223 popq %rdi
3066 1224
3067 1225 /*
3068 1226 * Dump what limited information we can
3069 1227 */
3070 1228 movq %cr0, %rax
3071 1229 movq %rax, CREG_CR0(%rdi) /* cr0 */
3072 1230 movq %cr2, %rax
3073 1231 movq %rax, CREG_CR2(%rdi) /* cr2 */
3074 1232 movq %cr3, %rax
3075 1233 movq %rax, CREG_CR3(%rdi) /* cr3 */
3076 1234 movq %cr4, %rax
3077 1235 movq %rax, CREG_CR4(%rdi) /* cr4 */
3078 1236
3079 1237 #else /* __xpv */
3080 1238
3081 1239 #define GETMSR(r, off, d) \
3082 1240 movl $r, %ecx; \
3083 1241 rdmsr; \
3084 1242 movl %eax, off(d); \
3085 1243 movl %edx, off+4(d)
3086 1244
3087 1245 xorl %eax, %eax
3088 1246 movq %rax, CREG_GDT+8(%rdi)
3089 1247 sgdt CREG_GDT(%rdi) /* 10 bytes */
3090 1248 movq %rax, CREG_IDT+8(%rdi)
3091 1249 sidt CREG_IDT(%rdi) /* 10 bytes */
3092 1250 movq %rax, CREG_LDT(%rdi)
3093 1251 sldt CREG_LDT(%rdi) /* 2 bytes */
3094 1252 movq %rax, CREG_TASKR(%rdi)
3095 1253 str CREG_TASKR(%rdi) /* 2 bytes */
3096 1254 movq %cr0, %rax
3097 1255 movq %rax, CREG_CR0(%rdi) /* cr0 */
3098 1256 movq %cr2, %rax
3099 1257 movq %rax, CREG_CR2(%rdi) /* cr2 */
3100 1258 movq %cr3, %rax
3101 1259 movq %rax, CREG_CR3(%rdi) /* cr3 */
3102 1260 movq %cr4, %rax
3103 1261 movq %rax, CREG_CR4(%rdi) /* cr4 */
↓ open down ↓ |
42 lines elided |
↑ open up ↑ |
3104 1262 movq %cr8, %rax
3105 1263 movq %rax, CREG_CR8(%rdi) /* cr8 */
3106 1264 GETMSR(MSR_AMD_KGSBASE, CREG_KGSBASE, %rdi)
3107 1265 GETMSR(MSR_AMD_EFER, CREG_EFER, %rdi)
3108 1266 #endif /* __xpv */
3109 1267 ret
3110 1268 SET_SIZE(getcregs)
3111 1269
3112 1270 #undef GETMSR
3113 1271
3114 -#elif defined(__i386)
3115 1272
3116 - ENTRY_NP(getcregs)
3117 -#if defined(__xpv)
3118 - /*
3119 - * Only a few of the hardware control registers or descriptor tables
3120 - * are directly accessible to us, so just zero the structure.
3121 - *
3122 - * XXPV Perhaps it would be helpful for the hypervisor to return
3123 - * virtualized versions of these for post-mortem use.
3124 - * (Need to reevaluate - perhaps it already does!)
3125 - */
3126 - movl 4(%esp), %edx
3127 - pushl $CREGSZ
3128 - pushl %edx
3129 - call bzero
3130 - addl $8, %esp
3131 - movl 4(%esp), %edx
3132 -
3133 - /*
3134 - * Dump what limited information we can
3135 - */
3136 - movl %cr0, %eax
3137 - movl %eax, CREG_CR0(%edx) /* cr0 */
3138 - movl %cr2, %eax
3139 - movl %eax, CREG_CR2(%edx) /* cr2 */
3140 - movl %cr3, %eax
3141 - movl %eax, CREG_CR3(%edx) /* cr3 */
3142 - movl %cr4, %eax
3143 - movl %eax, CREG_CR4(%edx) /* cr4 */
3144 -
3145 -#else /* __xpv */
3146 -
3147 - movl 4(%esp), %edx
3148 - movw $0, CREG_GDT+6(%edx)
3149 - movw $0, CREG_IDT+6(%edx)
3150 - sgdt CREG_GDT(%edx) /* gdt */
3151 - sidt CREG_IDT(%edx) /* idt */
3152 - sldt CREG_LDT(%edx) /* ldt */
3153 - str CREG_TASKR(%edx) /* task */
3154 - movl %cr0, %eax
3155 - movl %eax, CREG_CR0(%edx) /* cr0 */
3156 - movl %cr2, %eax
3157 - movl %eax, CREG_CR2(%edx) /* cr2 */
3158 - movl %cr3, %eax
3159 - movl %eax, CREG_CR3(%edx) /* cr3 */
3160 - bt $X86FSET_LARGEPAGE, x86_featureset
3161 - jnc .nocr4
3162 - movl %cr4, %eax
3163 - movl %eax, CREG_CR4(%edx) /* cr4 */
3164 - jmp .skip
3165 -.nocr4:
3166 - movl $0, CREG_CR4(%edx)
3167 -.skip:
3168 -#endif
3169 - ret
3170 - SET_SIZE(getcregs)
3171 -
3172 -#endif /* __i386 */
3173 -#endif /* __lint */
3174 -
3175 -
3176 1273 /*
3177 1274 * A panic trigger is a word which is updated atomically and can only be set
3178 1275 * once. We atomically store 0xDEFACEDD and load the old value. If the
3179 1276 * previous value was 0, we succeed and return 1; otherwise return 0.
3180 1277 * This allows a partially corrupt trigger to still trigger correctly. DTrace
3181 1278 * has its own version of this function to allow it to panic correctly from
3182 1279 * probe context.
3183 1280 */
3184 -#if defined(__lint)
3185 1281
3186 -/*ARGSUSED*/
3187 -int
3188 -panic_trigger(int *tp)
3189 -{ return (0); }
3190 -
3191 -/*ARGSUSED*/
3192 -int
3193 -dtrace_panic_trigger(int *tp)
3194 -{ return (0); }
3195 -
3196 -#else /* __lint */
3197 -
3198 -#if defined(__amd64)
3199 -
3200 1282 ENTRY_NP(panic_trigger)
3201 1283 xorl %eax, %eax
3202 1284 movl $0xdefacedd, %edx
3203 1285 lock
3204 1286 xchgl %edx, (%rdi)
3205 1287 cmpl $0, %edx
3206 1288 je 0f
3207 1289 movl $0, %eax
3208 1290 ret
3209 1291 0: movl $1, %eax
3210 1292 ret
3211 1293 SET_SIZE(panic_trigger)
3212 1294
3213 1295 ENTRY_NP(dtrace_panic_trigger)
3214 1296 xorl %eax, %eax
3215 1297 movl $0xdefacedd, %edx
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
3216 1298 lock
3217 1299 xchgl %edx, (%rdi)
3218 1300 cmpl $0, %edx
3219 1301 je 0f
3220 1302 movl $0, %eax
3221 1303 ret
3222 1304 0: movl $1, %eax
3223 1305 ret
3224 1306 SET_SIZE(dtrace_panic_trigger)
3225 1307
3226 -#elif defined(__i386)
3227 -
3228 - ENTRY_NP(panic_trigger)
3229 - movl 4(%esp), %edx / %edx = address of trigger
3230 - movl $0xdefacedd, %eax / %eax = 0xdefacedd
3231 - lock / assert lock
3232 - xchgl %eax, (%edx) / exchange %eax and the trigger
3233 - cmpl $0, %eax / if (%eax == 0x0)
3234 - je 0f / return (1);
3235 - movl $0, %eax / else
3236 - ret / return (0);
3237 -0: movl $1, %eax
3238 - ret
3239 - SET_SIZE(panic_trigger)
3240 -
3241 - ENTRY_NP(dtrace_panic_trigger)
3242 - movl 4(%esp), %edx / %edx = address of trigger
3243 - movl $0xdefacedd, %eax / %eax = 0xdefacedd
3244 - lock / assert lock
3245 - xchgl %eax, (%edx) / exchange %eax and the trigger
3246 - cmpl $0, %eax / if (%eax == 0x0)
3247 - je 0f / return (1);
3248 - movl $0, %eax / else
3249 - ret / return (0);
3250 -0: movl $1, %eax
3251 - ret
3252 - SET_SIZE(dtrace_panic_trigger)
3253 -
3254 -#endif /* __i386 */
3255 -#endif /* __lint */
3256 -
3257 1308 /*
3258 1309 * The panic() and cmn_err() functions invoke vpanic() as a common entry point
3259 1310 * into the panic code implemented in panicsys(). vpanic() is responsible
3260 1311 * for passing through the format string and arguments, and constructing a
3261 1312 * regs structure on the stack into which it saves the current register
3262 1313 * values. If we are not dying due to a fatal trap, these registers will
3263 1314 * then be preserved in panicbuf as the current processor state. Before
3264 1315 * invoking panicsys(), vpanic() activates the first panic trigger (see
3265 1316 * common/os/panic.c) and switches to the panic_stack if successful. Note that
3266 1317 * DTrace takes a slightly different panic path if it must panic from probe
3267 1318 * context. Instead of calling panic, it calls into dtrace_vpanic(), which
3268 1319 * sets up the initial stack as vpanic does, calls dtrace_panic_trigger(), and
3269 1320 * branches back into vpanic().
3270 1321 */
3271 -#if defined(__lint)
3272 1322
3273 -/*ARGSUSED*/
3274 -void
3275 -vpanic(const char *format, va_list alist)
3276 -{}
3277 -
3278 -/*ARGSUSED*/
3279 -void
3280 -dtrace_vpanic(const char *format, va_list alist)
3281 -{}
3282 -
3283 -#else /* __lint */
3284 -
3285 -#if defined(__amd64)
3286 -
3287 1323 ENTRY_NP(vpanic) /* Initial stack layout: */
3288 1324
3289 1325 pushq %rbp /* | %rip | 0x60 */
3290 1326 movq %rsp, %rbp /* | %rbp | 0x58 */
3291 1327 pushfq /* | rfl | 0x50 */
3292 1328 pushq %r11 /* | %r11 | 0x48 */
3293 1329 pushq %r10 /* | %r10 | 0x40 */
3294 1330 pushq %rbx /* | %rbx | 0x38 */
3295 1331 pushq %rax /* | %rax | 0x30 */
3296 1332 pushq %r9 /* | %r9 | 0x28 */
3297 1333 pushq %r8 /* | %r8 | 0x20 */
3298 1334 pushq %rcx /* | %rcx | 0x18 */
3299 1335 pushq %rdx /* | %rdx | 0x10 */
3300 1336 pushq %rsi /* | %rsi | 0x8 alist */
3301 1337 pushq %rdi /* | %rdi | 0x0 format */
3302 1338
3303 1339 movq %rsp, %rbx /* %rbx = current %rsp */
3304 1340
3305 1341 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */
3306 1342 call panic_trigger /* %eax = panic_trigger() */
3307 1343
3308 1344 vpanic_common:
3309 1345 /*
3310 1346 * The panic_trigger result is in %eax from the call above, and
3311 1347 * dtrace_panic places it in %eax before branching here.
3312 1348 * The rdmsr instructions that follow below will clobber %eax so
3313 1349 * we stash the panic_trigger result in %r11d.
3314 1350 */
3315 1351 movl %eax, %r11d
3316 1352 cmpl $0, %r11d
3317 1353 je 0f
3318 1354
3319 1355 /*
3320 1356 * If panic_trigger() was successful, we are the first to initiate a
3321 1357 * panic: we now switch to the reserved panic_stack before continuing.
3322 1358 */
3323 1359 leaq panic_stack(%rip), %rsp
3324 1360 addq $PANICSTKSIZE, %rsp
3325 1361 0: subq $REGSIZE, %rsp
3326 1362 /*
3327 1363 * Now that we've got everything set up, store the register values as
3328 1364 * they were when we entered vpanic() to the designated location in
3329 1365 * the regs structure we allocated on the stack.
3330 1366 */
3331 1367 movq 0x0(%rbx), %rcx
3332 1368 movq %rcx, REGOFF_RDI(%rsp)
3333 1369 movq 0x8(%rbx), %rcx
3334 1370 movq %rcx, REGOFF_RSI(%rsp)
3335 1371 movq 0x10(%rbx), %rcx
3336 1372 movq %rcx, REGOFF_RDX(%rsp)
3337 1373 movq 0x18(%rbx), %rcx
3338 1374 movq %rcx, REGOFF_RCX(%rsp)
3339 1375 movq 0x20(%rbx), %rcx
3340 1376
3341 1377 movq %rcx, REGOFF_R8(%rsp)
3342 1378 movq 0x28(%rbx), %rcx
3343 1379 movq %rcx, REGOFF_R9(%rsp)
3344 1380 movq 0x30(%rbx), %rcx
3345 1381 movq %rcx, REGOFF_RAX(%rsp)
3346 1382 movq 0x38(%rbx), %rcx
3347 1383 movq %rcx, REGOFF_RBX(%rsp)
3348 1384 movq 0x58(%rbx), %rcx
3349 1385
3350 1386 movq %rcx, REGOFF_RBP(%rsp)
3351 1387 movq 0x40(%rbx), %rcx
3352 1388 movq %rcx, REGOFF_R10(%rsp)
3353 1389 movq 0x48(%rbx), %rcx
3354 1390 movq %rcx, REGOFF_R11(%rsp)
3355 1391 movq %r12, REGOFF_R12(%rsp)
3356 1392
3357 1393 movq %r13, REGOFF_R13(%rsp)
3358 1394 movq %r14, REGOFF_R14(%rsp)
3359 1395 movq %r15, REGOFF_R15(%rsp)
3360 1396
3361 1397 xorl %ecx, %ecx
3362 1398 movw %ds, %cx
3363 1399 movq %rcx, REGOFF_DS(%rsp)
3364 1400 movw %es, %cx
3365 1401 movq %rcx, REGOFF_ES(%rsp)
3366 1402 movw %fs, %cx
3367 1403 movq %rcx, REGOFF_FS(%rsp)
3368 1404 movw %gs, %cx
3369 1405 movq %rcx, REGOFF_GS(%rsp)
3370 1406
3371 1407 movq $0, REGOFF_TRAPNO(%rsp)
3372 1408
3373 1409 movq $0, REGOFF_ERR(%rsp)
3374 1410 leaq vpanic(%rip), %rcx
3375 1411 movq %rcx, REGOFF_RIP(%rsp)
3376 1412 movw %cs, %cx
3377 1413 movzwq %cx, %rcx
3378 1414 movq %rcx, REGOFF_CS(%rsp)
3379 1415 movq 0x50(%rbx), %rcx
3380 1416 movq %rcx, REGOFF_RFL(%rsp)
3381 1417 movq %rbx, %rcx
3382 1418 addq $0x60, %rcx
3383 1419 movq %rcx, REGOFF_RSP(%rsp)
3384 1420 movw %ss, %cx
3385 1421 movzwq %cx, %rcx
3386 1422 movq %rcx, REGOFF_SS(%rsp)
3387 1423
3388 1424 /*
3389 1425 * panicsys(format, alist, rp, on_panic_stack)
3390 1426 */
3391 1427 movq REGOFF_RDI(%rsp), %rdi /* format */
3392 1428 movq REGOFF_RSI(%rsp), %rsi /* alist */
3393 1429 movq %rsp, %rdx /* struct regs */
3394 1430 movl %r11d, %ecx /* on_panic_stack */
3395 1431 call panicsys
3396 1432 addq $REGSIZE, %rsp
3397 1433 popq %rdi
3398 1434 popq %rsi
3399 1435 popq %rdx
3400 1436 popq %rcx
3401 1437 popq %r8
3402 1438 popq %r9
3403 1439 popq %rax
3404 1440 popq %rbx
3405 1441 popq %r10
3406 1442 popq %r11
3407 1443 popfq
3408 1444 leave
3409 1445 ret
3410 1446 SET_SIZE(vpanic)
3411 1447
3412 1448 ENTRY_NP(dtrace_vpanic) /* Initial stack layout: */
3413 1449
3414 1450 pushq %rbp /* | %rip | 0x60 */
3415 1451 movq %rsp, %rbp /* | %rbp | 0x58 */
3416 1452 pushfq /* | rfl | 0x50 */
3417 1453 pushq %r11 /* | %r11 | 0x48 */
3418 1454 pushq %r10 /* | %r10 | 0x40 */
3419 1455 pushq %rbx /* | %rbx | 0x38 */
3420 1456 pushq %rax /* | %rax | 0x30 */
3421 1457 pushq %r9 /* | %r9 | 0x28 */
3422 1458 pushq %r8 /* | %r8 | 0x20 */
3423 1459 pushq %rcx /* | %rcx | 0x18 */
3424 1460 pushq %rdx /* | %rdx | 0x10 */
3425 1461 pushq %rsi /* | %rsi | 0x8 alist */
↓ open down ↓ |
129 lines elided |
↑ open up ↑ |
3426 1462 pushq %rdi /* | %rdi | 0x0 format */
3427 1463
3428 1464 movq %rsp, %rbx /* %rbx = current %rsp */
3429 1465
3430 1466 leaq panic_quiesce(%rip), %rdi /* %rdi = &panic_quiesce */
3431 1467 call dtrace_panic_trigger /* %eax = dtrace_panic_trigger() */
3432 1468 jmp vpanic_common
3433 1469
3434 1470 SET_SIZE(dtrace_vpanic)
3435 1471
3436 -#elif defined(__i386)
3437 -
3438 - ENTRY_NP(vpanic) / Initial stack layout:
3439 -
3440 - pushl %ebp / | %eip | 20
3441 - movl %esp, %ebp / | %ebp | 16
3442 - pushl %eax / | %eax | 12
3443 - pushl %ebx / | %ebx | 8
3444 - pushl %ecx / | %ecx | 4
3445 - pushl %edx / | %edx | 0
3446 -
3447 - movl %esp, %ebx / %ebx = current stack pointer
3448 -
3449 - lea panic_quiesce, %eax / %eax = &panic_quiesce
3450 - pushl %eax / push &panic_quiesce
3451 - call panic_trigger / %eax = panic_trigger()
3452 - addl $4, %esp / reset stack pointer
3453 -
3454 -vpanic_common:
3455 - cmpl $0, %eax / if (%eax == 0)
3456 - je 0f / goto 0f;
3457 -
3458 - /*
3459 - * If panic_trigger() was successful, we are the first to initiate a
3460 - * panic: we now switch to the reserved panic_stack before continuing.
3461 - */
3462 - lea panic_stack, %esp / %esp = panic_stack
3463 - addl $PANICSTKSIZE, %esp / %esp += PANICSTKSIZE
3464 -
3465 -0: subl $REGSIZE, %esp / allocate struct regs
3466 -
3467 - /*
3468 - * Now that we've got everything set up, store the register values as
3469 - * they were when we entered vpanic() to the designated location in
3470 - * the regs structure we allocated on the stack.
3471 - */
3472 -#if !defined(__GNUC_AS__)
3473 - movw %gs, %edx
3474 - movl %edx, REGOFF_GS(%esp)
3475 - movw %fs, %edx
3476 - movl %edx, REGOFF_FS(%esp)
3477 - movw %es, %edx
3478 - movl %edx, REGOFF_ES(%esp)
3479 - movw %ds, %edx
3480 - movl %edx, REGOFF_DS(%esp)
3481 -#else /* __GNUC_AS__ */
3482 - mov %gs, %edx
3483 - mov %edx, REGOFF_GS(%esp)
3484 - mov %fs, %edx
3485 - mov %edx, REGOFF_FS(%esp)
3486 - mov %es, %edx
3487 - mov %edx, REGOFF_ES(%esp)
3488 - mov %ds, %edx
3489 - mov %edx, REGOFF_DS(%esp)
3490 -#endif /* __GNUC_AS__ */
3491 - movl %edi, REGOFF_EDI(%esp)
3492 - movl %esi, REGOFF_ESI(%esp)
3493 - movl 16(%ebx), %ecx
3494 - movl %ecx, REGOFF_EBP(%esp)
3495 - movl %ebx, %ecx
3496 - addl $20, %ecx
3497 - movl %ecx, REGOFF_ESP(%esp)
3498 - movl 8(%ebx), %ecx
3499 - movl %ecx, REGOFF_EBX(%esp)
3500 - movl 0(%ebx), %ecx
3501 - movl %ecx, REGOFF_EDX(%esp)
3502 - movl 4(%ebx), %ecx
3503 - movl %ecx, REGOFF_ECX(%esp)
3504 - movl 12(%ebx), %ecx
3505 - movl %ecx, REGOFF_EAX(%esp)
3506 - movl $0, REGOFF_TRAPNO(%esp)
3507 - movl $0, REGOFF_ERR(%esp)
3508 - lea vpanic, %ecx
3509 - movl %ecx, REGOFF_EIP(%esp)
3510 -#if !defined(__GNUC_AS__)
3511 - movw %cs, %edx
3512 -#else /* __GNUC_AS__ */
3513 - mov %cs, %edx
3514 -#endif /* __GNUC_AS__ */
3515 - movl %edx, REGOFF_CS(%esp)
3516 - pushfl
3517 - popl %ecx
3518 -#if defined(__xpv)
3519 - /*
3520 - * Synthesize the PS_IE bit from the event mask bit
3521 - */
3522 - CURTHREAD(%edx)
3523 - KPREEMPT_DISABLE(%edx)
3524 - EVENT_MASK_TO_IE(%edx, %ecx)
3525 - CURTHREAD(%edx)
3526 - KPREEMPT_ENABLE_NOKP(%edx)
3527 -#endif
3528 - movl %ecx, REGOFF_EFL(%esp)
3529 - movl $0, REGOFF_UESP(%esp)
3530 -#if !defined(__GNUC_AS__)
3531 - movw %ss, %edx
3532 -#else /* __GNUC_AS__ */
3533 - mov %ss, %edx
3534 -#endif /* __GNUC_AS__ */
3535 - movl %edx, REGOFF_SS(%esp)
3536 -
3537 - movl %esp, %ecx / %ecx = ®s
3538 - pushl %eax / push on_panic_stack
3539 - pushl %ecx / push ®s
3540 - movl 12(%ebp), %ecx / %ecx = alist
3541 - pushl %ecx / push alist
3542 - movl 8(%ebp), %ecx / %ecx = format
3543 - pushl %ecx / push format
3544 - call panicsys / panicsys();
3545 - addl $16, %esp / pop arguments
3546 -
3547 - addl $REGSIZE, %esp
3548 - popl %edx
3549 - popl %ecx
3550 - popl %ebx
3551 - popl %eax
3552 - leave
3553 - ret
3554 - SET_SIZE(vpanic)
3555 -
3556 - ENTRY_NP(dtrace_vpanic) / Initial stack layout:
3557 -
3558 - pushl %ebp / | %eip | 20
3559 - movl %esp, %ebp / | %ebp | 16
3560 - pushl %eax / | %eax | 12
3561 - pushl %ebx / | %ebx | 8
3562 - pushl %ecx / | %ecx | 4
3563 - pushl %edx / | %edx | 0
3564 -
3565 - movl %esp, %ebx / %ebx = current stack pointer
3566 -
3567 - lea panic_quiesce, %eax / %eax = &panic_quiesce
3568 - pushl %eax / push &panic_quiesce
3569 - call dtrace_panic_trigger / %eax = dtrace_panic_trigger()
3570 - addl $4, %esp / reset stack pointer
3571 - jmp vpanic_common / jump back to common code
3572 -
3573 - SET_SIZE(dtrace_vpanic)
3574 -
3575 -#endif /* __i386 */
3576 -#endif /* __lint */
3577 -
3578 -#if defined(__lint)
3579 -
3580 -void
3581 -hres_tick(void)
3582 -{}
3583 -
3584 -int64_t timedelta;
3585 -hrtime_t hrtime_base;
3586 -
3587 -#else /* __lint */
3588 -
3589 1472 DGDEF3(timedelta, 8, 8)
3590 1473 .long 0, 0
3591 1474
3592 1475 /*
3593 1476 * initialized to a non zero value to make pc_gethrtime()
3594 1477 * work correctly even before clock is initialized
3595 1478 */
3596 1479 DGDEF3(hrtime_base, 8, 8)
3597 1480 .long _MUL(NSEC_PER_CLOCK_TICK, 6), 0
3598 1481
3599 1482 DGDEF3(adj_shift, 4, 4)
3600 1483 .long ADJ_SHIFT
3601 1484
3602 -#if defined(__amd64)
3603 -
3604 1485 ENTRY_NP(hres_tick)
3605 1486 pushq %rbp
3606 1487 movq %rsp, %rbp
3607 1488
3608 1489 /*
3609 1490 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3610 1491 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3611 1492 * At worst, performing this now instead of under CLOCK_LOCK may
3612 1493 * introduce some jitter in pc_gethrestime().
3613 1494 */
3614 1495 movq gethrtimef(%rip), %rsi
3615 1496 INDIRECT_CALL_REG(rsi)
3616 1497 movq %rax, %r8
3617 1498
3618 1499 leaq hres_lock(%rip), %rax
3619 1500 movb $-1, %dl
3620 1501 .CL1:
3621 1502 xchgb %dl, (%rax)
3622 1503 testb %dl, %dl
3623 1504 jz .CL3 /* got it */
3624 1505 .CL2:
3625 1506 cmpb $0, (%rax) /* possible to get lock? */
3626 1507 pause
3627 1508 jne .CL2
3628 1509 jmp .CL1 /* yes, try again */
3629 1510 .CL3:
3630 1511 /*
3631 1512 * compute the interval since last time hres_tick was called
3632 1513 * and adjust hrtime_base and hrestime accordingly
3633 1514 * hrtime_base is an 8 byte value (in nsec), hrestime is
3634 1515 * a timestruc_t (sec, nsec)
3635 1516 */
3636 1517 leaq hres_last_tick(%rip), %rax
3637 1518 movq %r8, %r11
3638 1519 subq (%rax), %r8
3639 1520 addq %r8, hrtime_base(%rip) /* add interval to hrtime_base */
3640 1521 addq %r8, hrestime+8(%rip) /* add interval to hrestime.tv_nsec */
3641 1522 /*
3642 1523 * Now that we have CLOCK_LOCK, we can update hres_last_tick
3643 1524 */
3644 1525 movq %r11, (%rax)
3645 1526
↓ open down ↓ |
32 lines elided |
↑ open up ↑ |
3646 1527 call __adj_hrestime
3647 1528
3648 1529 /*
3649 1530 * release the hres_lock
3650 1531 */
3651 1532 incl hres_lock(%rip)
3652 1533 leave
3653 1534 ret
3654 1535 SET_SIZE(hres_tick)
3655 1536
3656 -#elif defined(__i386)
3657 -
3658 - ENTRY_NP(hres_tick)
3659 - pushl %ebp
3660 - movl %esp, %ebp
3661 - pushl %esi
3662 - pushl %ebx
3663 -
3664 - /*
3665 - * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3666 - * hres_last_tick can only be modified while holding CLOCK_LOCK).
3667 - * At worst, performing this now instead of under CLOCK_LOCK may
3668 - * introduce some jitter in pc_gethrestime().
3669 - */
3670 - call *gethrtimef
3671 - movl %eax, %ebx
3672 - movl %edx, %esi
3673 -
3674 - movl $hres_lock, %eax
3675 - movl $-1, %edx
3676 -.CL1:
3677 - xchgb %dl, (%eax)
3678 - testb %dl, %dl
3679 - jz .CL3 / got it
3680 -.CL2:
3681 - cmpb $0, (%eax) / possible to get lock?
3682 - pause
3683 - jne .CL2
3684 - jmp .CL1 / yes, try again
3685 -.CL3:
3686 - /*
3687 - * compute the interval since last time hres_tick was called
3688 - * and adjust hrtime_base and hrestime accordingly
3689 - * hrtime_base is an 8 byte value (in nsec), hrestime is
3690 - * timestruc_t (sec, nsec)
3691 - */
3692 -
3693 - lea hres_last_tick, %eax
3694 -
3695 - movl %ebx, %edx
3696 - movl %esi, %ecx
3697 -
3698 - subl (%eax), %edx
3699 - sbbl 4(%eax), %ecx
3700 -
3701 - addl %edx, hrtime_base / add interval to hrtime_base
3702 - adcl %ecx, hrtime_base+4
3703 -
3704 - addl %edx, hrestime+4 / add interval to hrestime.tv_nsec
3705 -
3706 - /
3707 - / Now that we have CLOCK_LOCK, we can update hres_last_tick.
3708 - /
3709 - movl %ebx, (%eax)
3710 - movl %esi, 4(%eax)
3711 -
3712 - / get hrestime at this moment. used as base for pc_gethrestime
3713 - /
3714 - / Apply adjustment, if any
3715 - /
3716 - / #define HRES_ADJ (NSEC_PER_CLOCK_TICK >> ADJ_SHIFT)
3717 - / (max_hres_adj)
3718 - /
3719 - / void
3720 - / adj_hrestime()
3721 - / {
3722 - / long long adj;
3723 - /
3724 - / if (hrestime_adj == 0)
3725 - / adj = 0;
3726 - / else if (hrestime_adj > 0) {
3727 - / if (hrestime_adj < HRES_ADJ)
3728 - / adj = hrestime_adj;
3729 - / else
3730 - / adj = HRES_ADJ;
3731 - / }
3732 - / else {
3733 - / if (hrestime_adj < -(HRES_ADJ))
3734 - / adj = -(HRES_ADJ);
3735 - / else
3736 - / adj = hrestime_adj;
3737 - / }
3738 - /
3739 - / timedelta -= adj;
3740 - / hrestime_adj = timedelta;
3741 - / hrestime.tv_nsec += adj;
3742 - /
3743 - / while (hrestime.tv_nsec >= NANOSEC) {
3744 - / one_sec++;
3745 - / hrestime.tv_sec++;
3746 - / hrestime.tv_nsec -= NANOSEC;
3747 - / }
3748 - / }
3749 -__adj_hrestime:
3750 - movl hrestime_adj, %esi / if (hrestime_adj == 0)
3751 - movl hrestime_adj+4, %edx
3752 - andl %esi, %esi
3753 - jne .CL4 / no
3754 - andl %edx, %edx
3755 - jne .CL4 / no
3756 - subl %ecx, %ecx / yes, adj = 0;
3757 - subl %edx, %edx
3758 - jmp .CL5
3759 -.CL4:
3760 - subl %ecx, %ecx
3761 - subl %eax, %eax
3762 - subl %esi, %ecx
3763 - sbbl %edx, %eax
3764 - andl %eax, %eax / if (hrestime_adj > 0)
3765 - jge .CL6
3766 -
3767 - / In the following comments, HRES_ADJ is used, while in the code
3768 - / max_hres_adj is used.
3769 - /
3770 - / The test for "hrestime_adj < HRES_ADJ" is complicated because
3771 - / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely
3772 - / on the logical equivalence of:
3773 - /
3774 - / !(hrestime_adj < HRES_ADJ)
3775 - /
3776 - / and the two step sequence:
3777 - /
3778 - / (HRES_ADJ - lsw(hrestime_adj)) generates a Borrow/Carry
3779 - /
3780 - / which computes whether or not the least significant 32-bits
3781 - / of hrestime_adj is greater than HRES_ADJ, followed by:
3782 - /
3783 - / Previous Borrow/Carry + -1 + msw(hrestime_adj) generates a Carry
3784 - /
3785 - / which generates a carry whenever step 1 is true or the most
3786 - / significant long of the longlong hrestime_adj is non-zero.
3787 -
3788 - movl max_hres_adj, %ecx / hrestime_adj is positive
3789 - subl %esi, %ecx
3790 - movl %edx, %eax
3791 - adcl $-1, %eax
3792 - jnc .CL7
3793 - movl max_hres_adj, %ecx / adj = HRES_ADJ;
3794 - subl %edx, %edx
3795 - jmp .CL5
3796 -
3797 - / The following computation is similar to the one above.
3798 - /
3799 - / The test for "hrestime_adj < -(HRES_ADJ)" is complicated because
3800 - / hrestime_adj is 64-bits, while HRES_ADJ is 32-bits. We rely
3801 - / on the logical equivalence of:
3802 - /
3803 - / (hrestime_adj > -HRES_ADJ)
3804 - /
3805 - / and the two step sequence:
3806 - /
3807 - / (HRES_ADJ + lsw(hrestime_adj)) generates a Carry
3808 - /
3809 - / which means the least significant 32-bits of hrestime_adj is
3810 - / greater than -HRES_ADJ, followed by:
3811 - /
3812 - / Previous Carry + 0 + msw(hrestime_adj) generates a Carry
3813 - /
3814 - / which generates a carry only when step 1 is true and the most
3815 - / significant long of the longlong hrestime_adj is -1.
3816 -
3817 -.CL6: / hrestime_adj is negative
3818 - movl %esi, %ecx
3819 - addl max_hres_adj, %ecx
3820 - movl %edx, %eax
3821 - adcl $0, %eax
3822 - jc .CL7
3823 - xor %ecx, %ecx
3824 - subl max_hres_adj, %ecx / adj = -(HRES_ADJ);
3825 - movl $-1, %edx
3826 - jmp .CL5
3827 -.CL7:
3828 - movl %esi, %ecx / adj = hrestime_adj;
3829 -.CL5:
3830 - movl timedelta, %esi
3831 - subl %ecx, %esi
3832 - movl timedelta+4, %eax
3833 - sbbl %edx, %eax
3834 - movl %esi, timedelta
3835 - movl %eax, timedelta+4 / timedelta -= adj;
3836 - movl %esi, hrestime_adj
3837 - movl %eax, hrestime_adj+4 / hrestime_adj = timedelta;
3838 - addl hrestime+4, %ecx
3839 -
3840 - movl %ecx, %eax / eax = tv_nsec
3841 -1:
3842 - cmpl $NANOSEC, %eax / if ((unsigned long)tv_nsec >= NANOSEC)
3843 - jb .CL8 / no
3844 - incl one_sec / yes, one_sec++;
3845 - incl hrestime / hrestime.tv_sec++;
3846 - addl $-NANOSEC, %eax / tv_nsec -= NANOSEC
3847 - jmp 1b / check for more seconds
3848 -
3849 -.CL8:
3850 - movl %eax, hrestime+4 / store final into hrestime.tv_nsec
3851 - incl hres_lock / release the hres_lock
3852 -
3853 - popl %ebx
3854 - popl %esi
3855 - leave
3856 - ret
3857 - SET_SIZE(hres_tick)
3858 -
3859 -#endif /* __i386 */
3860 -#endif /* __lint */
3861 -
3862 1537 /*
3863 1538 * void prefetch_smap_w(void *)
3864 1539 *
3865 1540 * Prefetch ahead within a linear list of smap structures.
3866 1541 * Not implemented for ia32. Stub for compatibility.
3867 1542 */
3868 1543
3869 -#if defined(__lint)
3870 -
3871 -/*ARGSUSED*/
3872 -void prefetch_smap_w(void *smp)
3873 -{}
3874 -
3875 -#else /* __lint */
3876 -
3877 1544 ENTRY(prefetch_smap_w)
3878 1545 rep; ret /* use 2 byte return instruction when branch target */
3879 1546 /* AMD Software Optimization Guide - Section 6.2 */
3880 1547 SET_SIZE(prefetch_smap_w)
3881 1548
3882 -#endif /* __lint */
3883 -
3884 1549 /*
3885 1550 * prefetch_page_r(page_t *)
3886 1551 * issue prefetch instructions for a page_t
3887 1552 */
3888 -#if defined(__lint)
3889 1553
3890 -/*ARGSUSED*/
3891 -void
3892 -prefetch_page_r(void *pp)
3893 -{}
3894 -
3895 -#else /* __lint */
3896 -
3897 1554 ENTRY(prefetch_page_r)
3898 1555 rep; ret /* use 2 byte return instruction when branch target */
3899 1556 /* AMD Software Optimization Guide - Section 6.2 */
3900 1557 SET_SIZE(prefetch_page_r)
3901 1558
3902 -#endif /* __lint */
3903 -
3904 -#if defined(__lint)
3905 -
3906 -/*ARGSUSED*/
3907 -int
3908 -bcmp(const void *s1, const void *s2, size_t count)
3909 -{ return (0); }
3910 -
3911 -#else /* __lint */
3912 -
3913 -#if defined(__amd64)
3914 -
3915 1559 ENTRY(bcmp)
3916 1560 pushq %rbp
3917 1561 movq %rsp, %rbp
3918 1562 #ifdef DEBUG
3919 1563 testq %rdx,%rdx
3920 1564 je 1f
3921 1565 movq postbootkernelbase(%rip), %r11
3922 1566 cmpq %r11, %rdi
3923 1567 jb 0f
3924 1568 cmpq %r11, %rsi
3925 1569 jnb 1f
3926 1570 0: leaq .bcmp_panic_msg(%rip), %rdi
3927 1571 xorl %eax, %eax
3928 1572 call panic
↓ open down ↓ |
4 lines elided |
↑ open up ↑ |
3929 1573 1:
3930 1574 #endif /* DEBUG */
3931 1575 call memcmp
3932 1576 testl %eax, %eax
3933 1577 setne %dl
3934 1578 leave
3935 1579 movzbl %dl, %eax
3936 1580 ret
3937 1581 SET_SIZE(bcmp)
3938 1582
3939 -#elif defined(__i386)
3940 -
3941 -#define ARG_S1 8
3942 -#define ARG_S2 12
3943 -#define ARG_LENGTH 16
3944 -
3945 - ENTRY(bcmp)
3946 - pushl %ebp
3947 - movl %esp, %ebp / create new stack frame
3948 1583 #ifdef DEBUG
3949 - cmpl $0, ARG_LENGTH(%ebp)
3950 - je 1f
3951 - movl postbootkernelbase, %eax
3952 - cmpl %eax, ARG_S1(%ebp)
3953 - jb 0f
3954 - cmpl %eax, ARG_S2(%ebp)
3955 - jnb 1f
3956 -0: pushl $.bcmp_panic_msg
3957 - call panic
3958 -1:
3959 -#endif /* DEBUG */
3960 -
3961 - pushl %edi / save register variable
3962 - movl ARG_S1(%ebp), %eax / %eax = address of string 1
3963 - movl ARG_S2(%ebp), %ecx / %ecx = address of string 2
3964 - cmpl %eax, %ecx / if the same string
3965 - je .equal / goto .equal
3966 - movl ARG_LENGTH(%ebp), %edi / %edi = length in bytes
3967 - cmpl $4, %edi / if %edi < 4
3968 - jb .byte_check / goto .byte_check
3969 - .align 4
3970 -.word_loop:
3971 - movl (%ecx), %edx / move 1 word from (%ecx) to %edx
3972 - leal -4(%edi), %edi / %edi -= 4
3973 - cmpl (%eax), %edx / compare 1 word from (%eax) with %edx
3974 - jne .word_not_equal / if not equal, goto .word_not_equal
3975 - leal 4(%ecx), %ecx / %ecx += 4 (next word)
3976 - leal 4(%eax), %eax / %eax += 4 (next word)
3977 - cmpl $4, %edi / if %edi >= 4
3978 - jae .word_loop / goto .word_loop
3979 -.byte_check:
3980 - cmpl $0, %edi / if %edi == 0
3981 - je .equal / goto .equal
3982 - jmp .byte_loop / goto .byte_loop (checks in bytes)
3983 -.word_not_equal:
3984 - leal 4(%edi), %edi / %edi += 4 (post-decremented)
3985 - .align 4
3986 -.byte_loop:
3987 - movb (%ecx), %dl / move 1 byte from (%ecx) to %dl
3988 - cmpb %dl, (%eax) / compare %dl with 1 byte from (%eax)
3989 - jne .not_equal / if not equal, goto .not_equal
3990 - incl %ecx / %ecx++ (next byte)
3991 - incl %eax / %eax++ (next byte)
3992 - decl %edi / %edi--
3993 - jnz .byte_loop / if not zero, goto .byte_loop
3994 -.equal:
3995 - xorl %eax, %eax / %eax = 0
3996 - popl %edi / restore register variable
3997 - leave / restore old stack frame
3998 - ret / return (NULL)
3999 - .align 4
4000 -.not_equal:
4001 - movl $1, %eax / return 1
4002 - popl %edi / restore register variable
4003 - leave / restore old stack frame
4004 - ret / return (NULL)
4005 - SET_SIZE(bcmp)
4006 -
4007 -#endif /* __i386 */
4008 -
4009 -#ifdef DEBUG
4010 1584 .text
4011 1585 .bcmp_panic_msg:
4012 1586 .string "bcmp: arguments below kernelbase"
4013 1587 #endif /* DEBUG */
4014 1588
4015 -#endif /* __lint */
4016 -
4017 -#if defined(__lint)
4018 -
4019 -uint_t
4020 -bsrw_insn(uint16_t mask)
4021 -{
4022 - uint_t index = sizeof (mask) * NBBY - 1;
4023 -
4024 - while ((mask & (1 << index)) == 0)
4025 - index--;
4026 - return (index);
4027 -}
4028 -
4029 -#else /* __lint */
4030 -
4031 -#if defined(__amd64)
4032 -
4033 1589 ENTRY_NP(bsrw_insn)
4034 1590 xorl %eax, %eax
4035 1591 bsrw %di, %ax
4036 1592 ret
4037 1593 SET_SIZE(bsrw_insn)
4038 1594
4039 -#elif defined(__i386)
4040 -
4041 - ENTRY_NP(bsrw_insn)
4042 - movw 4(%esp), %cx
4043 - xorl %eax, %eax
4044 - bsrw %cx, %ax
4045 - ret
4046 - SET_SIZE(bsrw_insn)
4047 -
4048 -#endif /* __i386 */
4049 -#endif /* __lint */
4050 -
4051 -#if defined(__lint)
4052 -
4053 -uint_t
4054 -atomic_btr32(uint32_t *pending, uint_t pil)
4055 -{
4056 - return (*pending &= ~(1 << pil));
4057 -}
4058 -
4059 -#else /* __lint */
4060 -
4061 -#if defined(__i386)
4062 -
4063 - ENTRY_NP(atomic_btr32)
4064 - movl 4(%esp), %ecx
4065 - movl 8(%esp), %edx
4066 - xorl %eax, %eax
4067 - lock
4068 - btrl %edx, (%ecx)
4069 - setc %al
4070 - ret
4071 - SET_SIZE(atomic_btr32)
4072 -
4073 -#endif /* __i386 */
4074 -#endif /* __lint */
4075 -
4076 -#if defined(__lint)
4077 -
4078 -/*ARGSUSED*/
4079 -void
4080 -switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4081 - uint_t arg2)
4082 -{}
4083 -
4084 -#else /* __lint */
4085 -
4086 -#if defined(__amd64)
4087 -
4088 1595 ENTRY_NP(switch_sp_and_call)
4089 1596 pushq %rbp
4090 1597 movq %rsp, %rbp /* set up stack frame */
4091 1598 movq %rdi, %rsp /* switch stack pointer */
4092 1599 movq %rdx, %rdi /* pass func arg 1 */
4093 1600 movq %rsi, %r11 /* save function to call */
4094 1601 movq %rcx, %rsi /* pass func arg 2 */
4095 1602 INDIRECT_CALL_REG(r11) /* call function */
4096 1603 leave /* restore stack */
4097 1604 ret
4098 1605 SET_SIZE(switch_sp_and_call)
4099 1606
4100 -#elif defined(__i386)
4101 -
4102 - ENTRY_NP(switch_sp_and_call)
4103 - pushl %ebp
4104 - mov %esp, %ebp /* set up stack frame */
4105 - movl 8(%ebp), %esp /* switch stack pointer */
4106 - pushl 20(%ebp) /* push func arg 2 */
4107 - pushl 16(%ebp) /* push func arg 1 */
4108 - call *12(%ebp) /* call function */
4109 - addl $8, %esp /* pop arguments */
4110 - leave /* restore stack */
4111 - ret
4112 - SET_SIZE(switch_sp_and_call)
4113 -
4114 -#endif /* __i386 */
4115 -#endif /* __lint */
4116 -
4117 -#if defined(__lint)
4118 -
4119 -void
4120 -kmdb_enter(void)
4121 -{}
4122 -
4123 -#else /* __lint */
4124 -
4125 -#if defined(__amd64)
4126 -
4127 1607 ENTRY_NP(kmdb_enter)
4128 1608 pushq %rbp
4129 1609 movq %rsp, %rbp
4130 1610
4131 1611 /*
4132 1612 * Save flags, do a 'cli' then return the saved flags
4133 1613 */
4134 1614 call intr_clear
4135 1615
4136 1616 int $T_DBGENTR
4137 1617
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
4138 1618 /*
4139 1619 * Restore the saved flags
4140 1620 */
4141 1621 movq %rax, %rdi
4142 1622 call intr_restore
4143 1623
4144 1624 leave
4145 1625 ret
4146 1626 SET_SIZE(kmdb_enter)
4147 1627
4148 -#elif defined(__i386)
4149 -
4150 - ENTRY_NP(kmdb_enter)
4151 - pushl %ebp
4152 - movl %esp, %ebp
4153 -
4154 - /*
4155 - * Save flags, do a 'cli' then return the saved flags
4156 - */
4157 - call intr_clear
4158 -
4159 - int $T_DBGENTR
4160 -
4161 - /*
4162 - * Restore the saved flags
4163 - */
4164 - pushl %eax
4165 - call intr_restore
4166 - addl $4, %esp
4167 -
4168 - leave
4169 - ret
4170 - SET_SIZE(kmdb_enter)
4171 -
4172 -#endif /* __i386 */
4173 -#endif /* __lint */
4174 -
4175 -#if defined(__lint)
4176 -
4177 -void
4178 -return_instr(void)
4179 -{}
4180 -
4181 -#else /* __lint */
4182 -
4183 1628 ENTRY_NP(return_instr)
4184 1629 rep; ret /* use 2 byte instruction when branch target */
4185 1630 /* AMD Software Optimization Guide - Section 6.2 */
4186 1631 SET_SIZE(return_instr)
4187 1632
4188 -#endif /* __lint */
4189 -
4190 -#if defined(__lint)
4191 -
4192 -ulong_t
4193 -getflags(void)
4194 -{
4195 - return (0);
4196 -}
4197 -
4198 -#else /* __lint */
4199 -
4200 -#if defined(__amd64)
4201 -
4202 1633 ENTRY(getflags)
4203 1634 pushfq
4204 1635 popq %rax
4205 1636 #if defined(__xpv)
4206 1637 CURTHREAD(%rdi)
4207 1638 KPREEMPT_DISABLE(%rdi)
4208 1639 /*
4209 1640 * Synthesize the PS_IE bit from the event mask bit
4210 1641 */
4211 1642 CURVCPU(%r11)
4212 1643 andq $_BITNOT(PS_IE), %rax
4213 1644 XEN_TEST_UPCALL_MASK(%r11)
4214 1645 jnz 1f
4215 1646 orq $PS_IE, %rax
4216 1647 1:
4217 1648 KPREEMPT_ENABLE_NOKP(%rdi)
4218 1649 #endif
4219 1650 ret
4220 1651 SET_SIZE(getflags)
4221 1652
4222 -#elif defined(__i386)
4223 -
4224 - ENTRY(getflags)
4225 - pushfl
4226 - popl %eax
4227 -#if defined(__xpv)
4228 - CURTHREAD(%ecx)
4229 - KPREEMPT_DISABLE(%ecx)
4230 - /*
4231 - * Synthesize the PS_IE bit from the event mask bit
4232 - */
4233 - CURVCPU(%edx)
4234 - andl $_BITNOT(PS_IE), %eax
4235 - XEN_TEST_UPCALL_MASK(%edx)
4236 - jnz 1f
4237 - orl $PS_IE, %eax
4238 -1:
4239 - KPREEMPT_ENABLE_NOKP(%ecx)
4240 -#endif
4241 - ret
4242 - SET_SIZE(getflags)
4243 -
4244 -#endif /* __i386 */
4245 -
4246 -#endif /* __lint */
4247 -
4248 -#if defined(__lint)
4249 -
4250 -ftrace_icookie_t
4251 -ftrace_interrupt_disable(void)
4252 -{ return (0); }
4253 -
4254 -#else /* __lint */
4255 -
4256 -#if defined(__amd64)
4257 -
4258 1653 ENTRY(ftrace_interrupt_disable)
4259 1654 pushfq
4260 1655 popq %rax
4261 1656 CLI(%rdx)
4262 1657 ret
4263 1658 SET_SIZE(ftrace_interrupt_disable)
4264 1659
4265 -#elif defined(__i386)
4266 -
4267 - ENTRY(ftrace_interrupt_disable)
4268 - pushfl
4269 - popl %eax
4270 - CLI(%edx)
4271 - ret
4272 - SET_SIZE(ftrace_interrupt_disable)
4273 -
4274 -#endif /* __i386 */
4275 -#endif /* __lint */
4276 -
4277 -#if defined(__lint)
4278 -
4279 -/*ARGSUSED*/
4280 -void
4281 -ftrace_interrupt_enable(ftrace_icookie_t cookie)
4282 -{}
4283 -
4284 -#else /* __lint */
4285 -
4286 -#if defined(__amd64)
4287 -
4288 1660 ENTRY(ftrace_interrupt_enable)
4289 1661 pushq %rdi
4290 1662 popfq
4291 1663 ret
4292 1664 SET_SIZE(ftrace_interrupt_enable)
4293 1665
4294 -#elif defined(__i386)
4295 -
4296 - ENTRY(ftrace_interrupt_enable)
4297 - movl 4(%esp), %eax
4298 - pushl %eax
4299 - popfl
4300 - ret
4301 - SET_SIZE(ftrace_interrupt_enable)
4302 -
4303 -#endif /* __i386 */
4304 -#endif /* __lint */
4305 -
4306 -#if defined (__lint)
4307 -
4308 -/*ARGSUSED*/
4309 -void
4310 -clflush_insn(caddr_t addr)
4311 -{}
4312 -
4313 -#else /* __lint */
4314 -
4315 -#if defined (__amd64)
4316 1666 ENTRY(clflush_insn)
4317 1667 clflush (%rdi)
4318 1668 ret
4319 1669 SET_SIZE(clflush_insn)
4320 -#elif defined (__i386)
4321 - ENTRY(clflush_insn)
4322 - movl 4(%esp), %eax
4323 - clflush (%eax)
4324 - ret
4325 - SET_SIZE(clflush_insn)
4326 1670
4327 -#endif /* __i386 */
4328 -#endif /* __lint */
4329 -
4330 -#if defined (__lint)
4331 -/*ARGSUSED*/
4332 -void
4333 -mfence_insn(void)
4334 -{}
4335 -
4336 -#else /* __lint */
4337 -
4338 -#if defined (__amd64)
4339 1671 ENTRY(mfence_insn)
4340 1672 mfence
4341 1673 ret
4342 1674 SET_SIZE(mfence_insn)
4343 -#elif defined (__i386)
4344 - ENTRY(mfence_insn)
4345 - mfence
4346 - ret
4347 - SET_SIZE(mfence_insn)
4348 1675
4349 -#endif /* __i386 */
4350 -#endif /* __lint */
4351 -
4352 1676 /*
4353 1677 * VMware implements an I/O port that programs can query to detect if software
4354 1678 * is running in a VMware hypervisor. This hypervisor port behaves differently
4355 1679 * depending on magic values in certain registers and modifies some registers
4356 1680 * as a side effect.
4357 1681 *
4358 1682 * References: http://kb.vmware.com/kb/1009458
4359 1683 */
4360 1684
4361 -#if defined(__lint)
4362 -
4363 -/* ARGSUSED */
4364 -void
4365 -vmware_port(int cmd, uint32_t *regs) { return; }
4366 -
4367 -#else
4368 -
4369 -#if defined(__amd64)
4370 -
4371 1685 ENTRY(vmware_port)
4372 1686 pushq %rbx
4373 1687 movl $VMWARE_HVMAGIC, %eax
4374 1688 movl $0xffffffff, %ebx
4375 1689 movl %edi, %ecx
4376 1690 movl $VMWARE_HVPORT, %edx
4377 1691 inl (%dx)
4378 1692 movl %eax, (%rsi)
4379 1693 movl %ebx, 4(%rsi)
4380 1694 movl %ecx, 8(%rsi)
4381 1695 movl %edx, 12(%rsi)
4382 1696 popq %rbx
4383 1697 ret
4384 1698 SET_SIZE(vmware_port)
4385 1699
4386 -#elif defined(__i386)
4387 -
4388 - ENTRY(vmware_port)
4389 - pushl %ebx
4390 - pushl %esi
4391 - movl $VMWARE_HVMAGIC, %eax
4392 - movl $0xffffffff, %ebx
4393 - movl 12(%esp), %ecx
4394 - movl $VMWARE_HVPORT, %edx
4395 - inl (%dx)
4396 - movl 16(%esp), %esi
4397 - movl %eax, (%esi)
4398 - movl %ebx, 4(%esi)
4399 - movl %ecx, 8(%esi)
4400 - movl %edx, 12(%esi)
4401 - popl %esi
4402 - popl %ebx
4403 - ret
4404 - SET_SIZE(vmware_port)
4405 -
4406 -#endif /* __i386 */
4407 -#endif /* __lint */
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX