738 leave
739 ret
740 SET_SIZE(i86_monitor)
741
742 #endif /* __i386 */
743 #endif /* __lint */
744
745 #if defined(__lint)
746
747 /*ARGSUSED*/
748 void
749 i86_mwait(uint32_t data, uint32_t extensions)
750 {}
751
752 #else /* __lint */
753
754 #if defined(__amd64)
755
756 ENTRY_NP(i86_mwait)
757 pushq %rbp
758 call *x86_md_clear
759 movq %rsp, %rbp
760 movq %rdi, %rax /* data */
761 movq %rsi, %rcx /* extensions */
762 .byte 0x0f, 0x01, 0xc9 /* mwait */
763 leave
764 ret
765 SET_SIZE(i86_mwait)
766
767 #elif defined(__i386)
768
769 ENTRY_NP(i86_mwait)
770 pushl %ebp
771 movl %esp, %ebp
772 movl 0x8(%ebp),%eax /* data */
773 movl 0xc(%ebp),%ecx /* extensions */
774 .byte 0x0f, 0x01, 0xc9 /* mwait */
775 leave
776 ret
777 SET_SIZE(i86_mwait)
778
2543 jmp .ip_csum_aligned
2544
2545 .less_than_32:
2546 addl $32, %ecx
2547 testl $1, %ecx
2548 jz .size_aligned
2549 andl $0xfe, %ecx
2550 movzwl (%rsi, %rcx, 2), %edi
2551 addl %edi, %edx
2552 adcl $0, %edx
2553 .size_aligned:
2554 movl %ecx, %edi
2555 shrl $1, %ecx
2556 shl $1, %edi
2557 subq $64, %rdi
2558 addq %rdi, %rsi
2559 leaq .ip_ocsum_jmptbl(%rip), %rdi
2560 leaq (%rdi, %rcx, 8), %rdi
2561 xorl %ecx, %ecx
2562 clc
2563 jmp *(%rdi)
2564
2565 .align 8
2566 .ip_ocsum_jmptbl:
2567 .quad .only0, .only4, .only8, .only12, .only16, .only20
2568 .quad .only24, .only28, .only32, .only36, .only40, .only44
2569 .quad .only48, .only52, .only56, .only60
2570 SET_SIZE(ip_ocsum)
2571
2572 #elif defined(__i386)
2573
2574 ENTRY(ip_ocsum)
2575 pushl %ebp
2576 movl %esp, %ebp
2577 pushl %ebx
2578 pushl %esi
2579 pushl %edi
2580 movl 12(%ebp), %ecx /* count of half words */
2581 movl 16(%ebp), %edx /* partial checksum */
2582 movl 8(%ebp), %esi
2583 xorl %eax, %eax
3593 * work correctly even before clock is initialized
3594 */
3595 DGDEF3(hrtime_base, 8, 8)
3596 .long _MUL(NSEC_PER_CLOCK_TICK, 6), 0
3597
3598 DGDEF3(adj_shift, 4, 4)
3599 .long ADJ_SHIFT
3600
3601 #if defined(__amd64)
3602
3603 ENTRY_NP(hres_tick)
3604 pushq %rbp
3605 movq %rsp, %rbp
3606
3607 /*
3608 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3609 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3610 * At worst, performing this now instead of under CLOCK_LOCK may
3611 * introduce some jitter in pc_gethrestime().
3612 */
3613 call *gethrtimef(%rip)
3614 movq %rax, %r8
3615
3616 leaq hres_lock(%rip), %rax
3617 movb $-1, %dl
3618 .CL1:
3619 xchgb %dl, (%rax)
3620 testb %dl, %dl
3621 jz .CL3 /* got it */
3622 .CL2:
3623 cmpb $0, (%rax) /* possible to get lock? */
3624 pause
3625 jne .CL2
3626 jmp .CL1 /* yes, try again */
3627 .CL3:
3628 /*
3629 * compute the interval since last time hres_tick was called
3630 * and adjust hrtime_base and hrestime accordingly
3631 * hrtime_base is an 8 byte value (in nsec), hrestime is
3632 * a timestruc_t (sec, nsec)
3633 */
4073
4074 #if defined(__lint)
4075
4076 /*ARGSUSED*/
4077 void
4078 switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4079 uint_t arg2)
4080 {}
4081
4082 #else /* __lint */
4083
4084 #if defined(__amd64)
4085
4086 ENTRY_NP(switch_sp_and_call)
4087 pushq %rbp
4088 movq %rsp, %rbp /* set up stack frame */
4089 movq %rdi, %rsp /* switch stack pointer */
4090 movq %rdx, %rdi /* pass func arg 1 */
4091 movq %rsi, %r11 /* save function to call */
4092 movq %rcx, %rsi /* pass func arg 2 */
4093 call *%r11 /* call function */
4094 leave /* restore stack */
4095 ret
4096 SET_SIZE(switch_sp_and_call)
4097
4098 #elif defined(__i386)
4099
4100 ENTRY_NP(switch_sp_and_call)
4101 pushl %ebp
4102 mov %esp, %ebp /* set up stack frame */
4103 movl 8(%ebp), %esp /* switch stack pointer */
4104 pushl 20(%ebp) /* push func arg 2 */
4105 pushl 16(%ebp) /* push func arg 1 */
4106 call *12(%ebp) /* call function */
4107 addl $8, %esp /* pop arguments */
4108 leave /* restore stack */
4109 ret
4110 SET_SIZE(switch_sp_and_call)
4111
4112 #endif /* __i386 */
4113 #endif /* __lint */
|
738 leave
739 ret
740 SET_SIZE(i86_monitor)
741
742 #endif /* __i386 */
743 #endif /* __lint */
744
745 #if defined(__lint)
746
747 /*ARGSUSED*/
748 void
749 i86_mwait(uint32_t data, uint32_t extensions)
750 {}
751
752 #else /* __lint */
753
754 #if defined(__amd64)
755
756 ENTRY_NP(i86_mwait)
757 pushq %rbp
758 call x86_md_clear
759 movq %rsp, %rbp
760 movq %rdi, %rax /* data */
761 movq %rsi, %rcx /* extensions */
762 .byte 0x0f, 0x01, 0xc9 /* mwait */
763 leave
764 ret
765 SET_SIZE(i86_mwait)
766
767 #elif defined(__i386)
768
769 ENTRY_NP(i86_mwait)
770 pushl %ebp
771 movl %esp, %ebp
772 movl 0x8(%ebp),%eax /* data */
773 movl 0xc(%ebp),%ecx /* extensions */
774 .byte 0x0f, 0x01, 0xc9 /* mwait */
775 leave
776 ret
777 SET_SIZE(i86_mwait)
778
2543 jmp .ip_csum_aligned
2544
2545 .less_than_32:
2546 addl $32, %ecx
2547 testl $1, %ecx
2548 jz .size_aligned
2549 andl $0xfe, %ecx
2550 movzwl (%rsi, %rcx, 2), %edi
2551 addl %edi, %edx
2552 adcl $0, %edx
2553 .size_aligned:
2554 movl %ecx, %edi
2555 shrl $1, %ecx
2556 shl $1, %edi
2557 subq $64, %rdi
2558 addq %rdi, %rsi
2559 leaq .ip_ocsum_jmptbl(%rip), %rdi
2560 leaq (%rdi, %rcx, 8), %rdi
2561 xorl %ecx, %ecx
2562 clc
2563 movq (%rdi), %rdi
2564 INDIRECT_JMP_REG(rdi)
2565
2566 .align 8
2567 .ip_ocsum_jmptbl:
2568 .quad .only0, .only4, .only8, .only12, .only16, .only20
2569 .quad .only24, .only28, .only32, .only36, .only40, .only44
2570 .quad .only48, .only52, .only56, .only60
2571 SET_SIZE(ip_ocsum)
2572
2573 #elif defined(__i386)
2574
2575 ENTRY(ip_ocsum)
2576 pushl %ebp
2577 movl %esp, %ebp
2578 pushl %ebx
2579 pushl %esi
2580 pushl %edi
2581 movl 12(%ebp), %ecx /* count of half words */
2582 movl 16(%ebp), %edx /* partial checksum */
2583 movl 8(%ebp), %esi
2584 xorl %eax, %eax
3594 * work correctly even before clock is initialized
3595 */
3596 DGDEF3(hrtime_base, 8, 8)
3597 .long _MUL(NSEC_PER_CLOCK_TICK, 6), 0
3598
3599 DGDEF3(adj_shift, 4, 4)
3600 .long ADJ_SHIFT
3601
3602 #if defined(__amd64)
3603
3604 ENTRY_NP(hres_tick)
3605 pushq %rbp
3606 movq %rsp, %rbp
3607
3608 /*
3609 * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3610 * hres_last_tick can only be modified while holding CLOCK_LOCK).
3611 * At worst, performing this now instead of under CLOCK_LOCK may
3612 * introduce some jitter in pc_gethrestime().
3613 */
3614 movq gethrtimef(%rip), %rsi
3615 INDIRECT_CALL_REG(rsi)
3616 movq %rax, %r8
3617
3618 leaq hres_lock(%rip), %rax
3619 movb $-1, %dl
3620 .CL1:
3621 xchgb %dl, (%rax)
3622 testb %dl, %dl
3623 jz .CL3 /* got it */
3624 .CL2:
3625 cmpb $0, (%rax) /* possible to get lock? */
3626 pause
3627 jne .CL2
3628 jmp .CL1 /* yes, try again */
3629 .CL3:
3630 /*
3631 * compute the interval since last time hres_tick was called
3632 * and adjust hrtime_base and hrestime accordingly
3633 * hrtime_base is an 8 byte value (in nsec), hrestime is
3634 * a timestruc_t (sec, nsec)
3635 */
4075
4076 #if defined(__lint)
4077
4078 /*ARGSUSED*/
4079 void
4080 switch_sp_and_call(void *newsp, void (*func)(uint_t, uint_t), uint_t arg1,
4081 uint_t arg2)
4082 {}
4083
4084 #else /* __lint */
4085
4086 #if defined(__amd64)
4087
4088 ENTRY_NP(switch_sp_and_call)
4089 pushq %rbp
4090 movq %rsp, %rbp /* set up stack frame */
4091 movq %rdi, %rsp /* switch stack pointer */
4092 movq %rdx, %rdi /* pass func arg 1 */
4093 movq %rsi, %r11 /* save function to call */
4094 movq %rcx, %rsi /* pass func arg 2 */
4095 INDIRECT_CALL_REG(r11) /* call function */
4096 leave /* restore stack */
4097 ret
4098 SET_SIZE(switch_sp_and_call)
4099
4100 #elif defined(__i386)
4101
4102 ENTRY_NP(switch_sp_and_call)
4103 pushl %ebp
4104 mov %esp, %ebp /* set up stack frame */
4105 movl 8(%ebp), %esp /* switch stack pointer */
4106 pushl 20(%ebp) /* push func arg 2 */
4107 pushl 16(%ebp) /* push func arg 1 */
4108 call *12(%ebp) /* call function */
4109 addl $8, %esp /* pop arguments */
4110 leave /* restore stack */
4111 ret
4112 SET_SIZE(switch_sp_and_call)
4113
4114 #endif /* __i386 */
4115 #endif /* __lint */
|