Print this page
11787 Kernel needs to be built with retpolines
11788 Kernel needs to generally use RSB stuffing
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: John Levon <john.levon@joyent.com>

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/intel/ia32/ml/i86_subr.s
          +++ new/usr/src/uts/intel/ia32/ml/i86_subr.s
↓ open down ↓ 747 lines elided ↑ open up ↑
 748  748  void
 749  749  i86_mwait(uint32_t data, uint32_t extensions)
 750  750  {}
 751  751  
 752  752  #else   /* __lint */
 753  753  
 754  754  #if defined(__amd64)
 755  755  
 756  756          ENTRY_NP(i86_mwait)
 757  757          pushq   %rbp
 758      -        call    *x86_md_clear
      758 +        call    x86_md_clear
 759  759          movq    %rsp, %rbp
 760  760          movq    %rdi, %rax              /* data */
 761  761          movq    %rsi, %rcx              /* extensions */
 762  762          .byte   0x0f, 0x01, 0xc9        /* mwait */
 763  763          leave
 764  764          ret
 765  765          SET_SIZE(i86_mwait)
 766  766  
 767  767  #elif defined(__i386)
 768  768  
↓ open down ↓ 509 lines elided ↑ open up ↑
1278 1278          movl    $50000, %ebx
1279 1279  1:
1280 1280          call    tenmicrosec
1281 1281          decl    %ebx
1282 1282          jnz     1b
1283 1283  #if defined(__amd64)
1284 1284          popq    %rbx
1285 1285  #elif defined(__i386)
1286 1286          pop     %ebx
1287 1287  #endif
1288      -        ret     
     1288 +        ret
1289 1289          SET_SIZE(wait_500ms)
1290 1290  
1291 1291  #define RESET_METHOD_KBC        1
1292 1292  #define RESET_METHOD_PORT92     2
1293 1293  #define RESET_METHOD_PCI        4
1294 1294  
1295 1295          DGDEF3(pc_reset_methods, 4, 8)
1296 1296          .long RESET_METHOD_KBC|RESET_METHOD_PORT92|RESET_METHOD_PCI;
1297 1297  
1298 1298          ENTRY(pc_reset)
↓ open down ↓ 377 lines elided ↑ open up ↑
1676 1676  /* ARGSUSED */
1677 1677  void
1678 1678  repinsb(int port, uint8_t *addr, int count)
1679 1679  {}
1680 1680  
1681 1681  #else   /* __lint */
1682 1682  
1683 1683  #if defined(__amd64)
1684 1684  
1685 1685          ENTRY(repinsb)
1686      -        movl    %edx, %ecx      
     1686 +        movl    %edx, %ecx
1687 1687          movw    %di, %dx
1688 1688          movq    %rsi, %rdi
1689 1689          rep
1690 1690            insb
1691      -        ret             
     1691 +        ret
1692 1692          SET_SIZE(repinsb)
1693 1693  
1694 1694  #elif defined(__i386)
1695      -        
     1695 +
1696 1696          /*
1697 1697           * The arguments and saved registers are on the stack in the
1698 1698           *  following order:
1699 1699           *      |  cnt  |  +16
1700 1700           *      | *addr |  +12
1701 1701           *      | port  |  +8
1702 1702           *      |  eip  |  +4
1703 1703           *      |  esi  |  <-- %esp
1704 1704           * If additional values are pushed onto the stack, make sure
1705 1705           * to adjust the following constants accordingly.
↓ open down ↓ 24 lines elided ↑ open up ↑
1730 1730  #if defined(__lint)
1731 1731  
1732 1732  /* ARGSUSED */
1733 1733  void
1734 1734  repinsd(int port, uint32_t *addr, int count)
1735 1735  {}
1736 1736  
1737 1737  #else   /* __lint */
1738 1738  
1739 1739  #if defined(__amd64)
1740      -        
     1740 +
1741 1741          ENTRY(repinsd)
1742 1742          movl    %edx, %ecx
1743 1743          movw    %di, %dx
1744 1744          movq    %rsi, %rdi
1745 1745          rep
1746 1746            insl
1747 1747          ret
1748 1748          SET_SIZE(repinsd)
1749 1749  
1750 1750  #elif defined(__i386)
↓ open down ↓ 25 lines elided ↑ open up ↑
1776 1776  
1777 1777  #else   /* __lint */
1778 1778  
1779 1779  #if defined(__amd64)
1780 1780  
1781 1781          ENTRY(repoutsb)
1782 1782          movl    %edx, %ecx
1783 1783          movw    %di, %dx
1784 1784          rep
1785 1785            outsb
1786      -        ret     
     1786 +        ret
1787 1787          SET_SIZE(repoutsb)
1788 1788  
1789 1789  #elif defined(__i386)
1790 1790  
1791 1791          ENTRY(repoutsb)
1792 1792          pushl   %esi
1793 1793          movl    IO_ADDR(%esp), %esi
1794 1794          movl    IO_COUNT(%esp), %ecx
1795 1795          movl    IO_PORT(%esp), %edx
1796 1796          rep
1797 1797            outsb
1798 1798          popl    %esi
1799 1799          ret
1800 1800          SET_SIZE(repoutsb)
1801 1801  
1802      -#endif  /* __i386 */    
     1802 +#endif  /* __i386 */
1803 1803  #endif  /* __lint */
1804 1804  
1805 1805  /*
1806 1806   * Output a stream of 32-bit words
1807 1807   * NOTE: count is a DWORD count
1808 1808   */
1809 1809  #if defined(__lint)
1810 1810  
1811 1811  /* ARGSUSED */
1812 1812  void
↓ open down ↓ 2 lines elided ↑ open up ↑
1815 1815  
1816 1816  #else   /* __lint */
1817 1817  
1818 1818  #if defined(__amd64)
1819 1819  
1820 1820          ENTRY(repoutsd)
1821 1821          movl    %edx, %ecx
1822 1822          movw    %di, %dx
1823 1823          rep
1824 1824            outsl
1825      -        ret     
     1825 +        ret
1826 1826          SET_SIZE(repoutsd)
1827 1827  
1828 1828  #elif defined(__i386)
1829 1829  
1830 1830          ENTRY(repoutsd)
1831 1831          pushl   %esi
1832 1832          movl    IO_ADDR(%esp), %esi
1833 1833          movl    IO_COUNT(%esp), %ecx
1834 1834          movl    IO_PORT(%esp), %edx
1835 1835          rep
↓ open down ↓ 440 lines elided ↑ open up ↑
2276 2276          jnz     .dtrace_interrupt_disable_done
2277 2277          orq     $PS_IE, %rax
2278 2278  #else
2279 2279          CLI(%rdx)
2280 2280  #endif
2281 2281  .dtrace_interrupt_disable_done:
2282 2282          ret
2283 2283          SET_SIZE(dtrace_interrupt_disable)
2284 2284  
2285 2285  #elif defined(__i386)
2286      -                
     2286 +
2287 2287          ENTRY(dtrace_interrupt_disable)
2288 2288          pushfl
2289 2289          popl    %eax
2290 2290  #if defined(__xpv)
2291 2291          leal    xpv_panicking, %edx
2292 2292          movl    (%edx), %edx
2293 2293          cmpl    $0, %edx
2294 2294          jne     .dtrace_interrupt_disable_done
2295 2295          CLIRET(%edx, %cl)       /* returns event mask in %cl */
2296 2296          /*
↓ open down ↓ 3 lines elided ↑ open up ↑
2300 2300          testb   $1, %cl
2301 2301          jnz     .dtrace_interrupt_disable_done
2302 2302          orl     $PS_IE, %eax
2303 2303  #else
2304 2304          CLI(%edx)
2305 2305  #endif
2306 2306  .dtrace_interrupt_disable_done:
2307 2307          ret
2308 2308          SET_SIZE(dtrace_interrupt_disable)
2309 2309  
2310      -#endif  /* __i386 */    
     2310 +#endif  /* __i386 */
2311 2311  #endif  /* __lint */
2312 2312  
2313 2313  #if defined(__lint)
2314 2314  
2315 2315  /*ARGSUSED*/
2316 2316  void
2317 2317  dtrace_interrupt_enable(dtrace_icookie_t cookie)
2318 2318  {}
2319 2319  
2320 2320  #else   /* __lint */
↓ open down ↓ 13 lines elided ↑ open up ↑
2334 2334           * to change the state of the IF bit will be ignored. The
2335 2335           * virtual IF bit is tweaked by CLI and STI.
2336 2336           */
2337 2337          IE_TO_EVENT_MASK(%rdx, %rdi)
2338 2338  #endif
2339 2339  .dtrace_interrupt_enable_done:
2340 2340          ret
2341 2341          SET_SIZE(dtrace_interrupt_enable)
2342 2342  
2343 2343  #elif defined(__i386)
2344      -                
     2344 +
2345 2345          ENTRY(dtrace_interrupt_enable)
2346 2346          movl    4(%esp), %eax
2347 2347          pushl   %eax
2348 2348          popfl
2349 2349  #if defined(__xpv)
2350 2350          leal    xpv_panicking, %edx
2351 2351          movl    (%edx), %edx
2352 2352          cmpl    $0, %edx
2353 2353          jne     .dtrace_interrupt_enable_done
2354 2354          /*
2355 2355           * Since we're -really- running unprivileged, our attempt
2356 2356           * to change the state of the IF bit will be ignored. The
2357 2357           * virtual IF bit is tweaked by CLI and STI.
2358 2358           */
2359 2359          IE_TO_EVENT_MASK(%edx, %eax)
2360 2360  #endif
2361 2361  .dtrace_interrupt_enable_done:
2362 2362          ret
2363 2363          SET_SIZE(dtrace_interrupt_enable)
2364 2364  
2365      -#endif  /* __i386 */    
     2365 +#endif  /* __i386 */
2366 2366  #endif  /* __lint */
2367 2367  
2368 2368  
2369 2369  #if defined(lint)
2370 2370  
2371 2371  void
2372 2372  dtrace_membar_producer(void)
2373 2373  {}
2374 2374  
2375 2375  void
↓ open down ↓ 16 lines elided ↑ open up ↑
2392 2392  
2393 2393  #if defined(__lint)
2394 2394  
2395 2395  kthread_id_t
2396 2396  threadp(void)
2397 2397  { return ((kthread_id_t)0); }
2398 2398  
2399 2399  #else   /* __lint */
2400 2400  
2401 2401  #if defined(__amd64)
2402      -        
     2402 +
2403 2403          ENTRY(threadp)
2404 2404          movq    %gs:CPU_THREAD, %rax
2405 2405          ret
2406 2406          SET_SIZE(threadp)
2407 2407  
2408 2408  #elif defined(__i386)
2409 2409  
2410 2410          ENTRY(threadp)
2411 2411          movl    %gs:CPU_THREAD, %eax
2412 2412          ret
↓ open down ↓ 7 lines elided ↑ open up ↑
2420 2420   */
2421 2421  
2422 2422  #if defined(__lint)
2423 2423  
2424 2424  /* ARGSUSED */
2425 2425  unsigned int
2426 2426  ip_ocsum(
2427 2427          ushort_t *address,      /* ptr to 1st message buffer */
2428 2428          int halfword_count,     /* length of data */
2429 2429          unsigned int sum)       /* partial checksum */
2430      -{ 
     2430 +{
2431 2431          int             i;
2432 2432          unsigned int    psum = 0;       /* partial sum */
2433 2433  
2434 2434          for (i = 0; i < halfword_count; i++, address++) {
2435 2435                  psum += *address;
2436 2436          }
2437 2437  
2438 2438          while ((psum >> 16) != 0) {
2439 2439                  psum = (psum & 0xffff) + (psum >> 16);
2440 2440          }
↓ open down ↓ 112 lines elided ↑ open up ↑
2553 2553  .size_aligned:
2554 2554          movl    %ecx, %edi
2555 2555          shrl    $1, %ecx
2556 2556          shl     $1, %edi
2557 2557          subq    $64, %rdi
2558 2558          addq    %rdi, %rsi
2559 2559          leaq    .ip_ocsum_jmptbl(%rip), %rdi
2560 2560          leaq    (%rdi, %rcx, 8), %rdi
2561 2561          xorl    %ecx, %ecx
2562 2562          clc
2563      -        jmp     *(%rdi)
     2563 +        movq    (%rdi), %rdi
     2564 +        INDIRECT_JMP_REG(rdi)
2564 2565  
2565 2566          .align  8
2566 2567  .ip_ocsum_jmptbl:
2567 2568          .quad   .only0, .only4, .only8, .only12, .only16, .only20
2568 2569          .quad   .only24, .only28, .only32, .only36, .only40, .only44
2569 2570          .quad   .only48, .only52, .only56, .only60
2570 2571          SET_SIZE(ip_ocsum)
2571 2572  
2572 2573  #elif defined(__i386)
2573 2574  
↓ open down ↓ 90 lines elided ↑ open up ↑
2664 2665  .size_aligned:
2665 2666          movl    %ecx, %edi
2666 2667          shrl    $1, %ecx
2667 2668          shl     $1, %edi
2668 2669          subl    $64, %edi
2669 2670          addl    %edi, %esi
2670 2671          movl    $.ip_ocsum_jmptbl, %edi
2671 2672          lea     (%edi, %ecx, 4), %edi
2672 2673          xorl    %ecx, %ecx
2673 2674          clc
2674      -        jmp     *(%edi)
     2675 +        jmp     *(%edi)
2675 2676          SET_SIZE(ip_ocsum)
2676 2677  
2677 2678          .data
2678 2679          .align  4
2679 2680  
2680 2681  .ip_ocsum_jmptbl:
2681 2682          .long   .only0, .only4, .only8, .only12, .only16, .only20
2682 2683          .long   .only24, .only28, .only32, .only36, .only40, .only44
2683 2684          .long   .only48, .only52, .only56, .only60
2684 2685  
2685      -        
2686      -#endif  /* __i386 */            
     2686 +
     2687 +#endif  /* __i386 */
2687 2688  #endif  /* __lint */
2688 2689  
2689 2690  /*
2690 2691   * multiply two long numbers and yield a u_longlong_t result, callable from C.
2691 2692   * Provided to manipulate hrtime_t values.
2692 2693   */
2693 2694  #if defined(__lint)
2694 2695  
2695 2696  /* result = a * b; */
2696 2697  
↓ open down ↓ 3 lines elided ↑ open up ↑
2700 2701  { return (0); }
2701 2702  
2702 2703  #else   /* __lint */
2703 2704  
2704 2705  #if defined(__amd64)
2705 2706  
2706 2707          ENTRY(mul32)
2707 2708          xorl    %edx, %edx      /* XX64 joe, paranoia? */
2708 2709          movl    %edi, %eax
2709 2710          mull    %esi
2710      -        shlq    $32, %rdx       
     2711 +        shlq    $32, %rdx
2711 2712          orq     %rdx, %rax
2712 2713          ret
2713 2714          SET_SIZE(mul32)
2714 2715  
2715 2716  #elif defined(__i386)
2716 2717  
2717 2718          ENTRY(mul32)
2718 2719          movl    8(%esp), %eax
2719 2720          movl    4(%esp), %ecx
2720 2721          mull    %ecx
↓ open down ↓ 126 lines elided ↑ open up ↑
2847 2848  
2848 2849  #elif defined(__i386)
2849 2850  
2850 2851          ENTRY(highbit)
2851 2852          bsrl    4(%esp), %eax
2852 2853          jz      0f
2853 2854          incl    %eax
2854 2855          ret
2855 2856  0:
2856 2857          xorl    %eax, %eax
2857      -        ret    
     2858 +        ret
2858 2859          SET_SIZE(highbit)
2859 2860  
2860 2861          ENTRY(highbit64)
2861 2862          bsrl    8(%esp), %eax
2862 2863          jz      highbit
2863 2864          addl    $33, %eax
2864 2865          ret
2865 2866          SET_SIZE(highbit64)
2866 2867  
2867 2868  #endif  /* __i386 */
↓ open down ↓ 33 lines elided ↑ open up ↑
2901 2902  /*ARGSUSED*/
2902 2903  void
2903 2904  set_xcr(uint_t r, const uint64_t val)
2904 2905  {}
2905 2906  
2906 2907  #else  /* __lint */
2907 2908  
2908 2909  #define XMSR_ACCESS_VAL         $0x9c5a203a
2909 2910  
2910 2911  #if defined(__amd64)
2911      -        
     2912 +
2912 2913          ENTRY(rdmsr)
2913 2914          movl    %edi, %ecx
2914 2915          rdmsr
2915 2916          shlq    $32, %rdx
2916 2917          orq     %rdx, %rax
2917 2918          ret
2918 2919          SET_SIZE(rdmsr)
2919 2920  
2920 2921          ENTRY(wrmsr)
2921 2922          movq    %rsi, %rdx
↓ open down ↓ 52 lines elided ↑ open up ↑
2974 2975  
2975 2976          ENTRY(rdmsr)
2976 2977          movl    4(%esp), %ecx
2977 2978          rdmsr
2978 2979          ret
2979 2980          SET_SIZE(rdmsr)
2980 2981  
2981 2982          ENTRY(wrmsr)
2982 2983          movl    4(%esp), %ecx
2983 2984          movl    8(%esp), %eax
2984      -        movl    12(%esp), %edx 
     2985 +        movl    12(%esp), %edx
2985 2986          wrmsr
2986 2987          ret
2987 2988          SET_SIZE(wrmsr)
2988 2989  
2989 2990          ENTRY(xrdmsr)
2990 2991          pushl   %ebp
2991 2992          movl    %esp, %ebp
2992 2993          movl    8(%esp), %ecx
2993 2994          pushl   %edi
2994 2995          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
↓ open down ↓ 1 lines elided ↑ open up ↑
2996 2997          popl    %edi
2997 2998          leave
2998 2999          ret
2999 3000          SET_SIZE(xrdmsr)
3000 3001  
3001 3002          ENTRY(xwrmsr)
3002 3003          pushl   %ebp
3003 3004          movl    %esp, %ebp
3004 3005          movl    8(%esp), %ecx
3005 3006          movl    12(%esp), %eax
3006      -        movl    16(%esp), %edx 
     3007 +        movl    16(%esp), %edx
3007 3008          pushl   %edi
3008 3009          movl    XMSR_ACCESS_VAL, %edi   /* this value is needed to access MSR */
3009 3010          wrmsr
3010 3011          popl    %edi
3011 3012          leave
3012 3013          ret
3013 3014          SET_SIZE(xwrmsr)
3014 3015  
3015 3016          ENTRY(get_xcr)
3016 3017          movl    4(%esp), %ecx
↓ open down ↓ 178 lines elided ↑ open up ↑
3195 3196  #else   /* __lint */
3196 3197  
3197 3198  #if defined(__amd64)
3198 3199  
3199 3200          ENTRY_NP(panic_trigger)
3200 3201          xorl    %eax, %eax
3201 3202          movl    $0xdefacedd, %edx
3202 3203          lock
3203 3204            xchgl %edx, (%rdi)
3204 3205          cmpl    $0, %edx
3205      -        je      0f 
     3206 +        je      0f
3206 3207          movl    $0, %eax
3207 3208          ret
3208 3209  0:      movl    $1, %eax
3209 3210          ret
3210 3211          SET_SIZE(panic_trigger)
3211      -        
     3212 +
3212 3213          ENTRY_NP(dtrace_panic_trigger)
3213 3214          xorl    %eax, %eax
3214 3215          movl    $0xdefacedd, %edx
3215 3216          lock
3216 3217            xchgl %edx, (%rdi)
3217 3218          cmpl    $0, %edx
3218 3219          je      0f
3219 3220          movl    $0, %eax
3220 3221          ret
3221 3222  0:      movl    $1, %eax
↓ open down ↓ 55 lines elided ↑ open up ↑
3277 3278  /*ARGSUSED*/
3278 3279  void
3279 3280  dtrace_vpanic(const char *format, va_list alist)
3280 3281  {}
3281 3282  
3282 3283  #else   /* __lint */
3283 3284  
3284 3285  #if defined(__amd64)
3285 3286  
3286 3287          ENTRY_NP(vpanic)                        /* Initial stack layout: */
3287      -        
3288      -        pushq   %rbp                            /* | %rip |     0x60    */
     3288 +
     3289 +        pushq   %rbp                            /* | %rip |     0x60    */
3289 3290          movq    %rsp, %rbp                      /* | %rbp |     0x58    */
3290 3291          pushfq                                  /* | rfl  |     0x50    */
3291 3292          pushq   %r11                            /* | %r11 |     0x48    */
3292 3293          pushq   %r10                            /* | %r10 |     0x40    */
3293 3294          pushq   %rbx                            /* | %rbx |     0x38    */
3294 3295          pushq   %rax                            /* | %rax |     0x30    */
3295 3296          pushq   %r9                             /* | %r9  |     0x28    */
3296 3297          pushq   %r8                             /* | %r8  |     0x20    */
3297 3298          pushq   %rcx                            /* | %rcx |     0x18    */
3298 3299          pushq   %rdx                            /* | %rdx |     0x10    */
↓ open down ↓ 79 lines elided ↑ open up ↑
3378 3379          movq    0x50(%rbx), %rcx
3379 3380          movq    %rcx, REGOFF_RFL(%rsp)
3380 3381          movq    %rbx, %rcx
3381 3382          addq    $0x60, %rcx
3382 3383          movq    %rcx, REGOFF_RSP(%rsp)
3383 3384          movw    %ss, %cx
3384 3385          movzwq  %cx, %rcx
3385 3386          movq    %rcx, REGOFF_SS(%rsp)
3386 3387  
3387 3388          /*
3388      -         * panicsys(format, alist, rp, on_panic_stack) 
3389      -         */     
     3389 +         * panicsys(format, alist, rp, on_panic_stack)
     3390 +         */
3390 3391          movq    REGOFF_RDI(%rsp), %rdi          /* format */
3391 3392          movq    REGOFF_RSI(%rsp), %rsi          /* alist */
3392 3393          movq    %rsp, %rdx                      /* struct regs */
3393 3394          movl    %r11d, %ecx                     /* on_panic_stack */
3394 3395          call    panicsys
3395 3396          addq    $REGSIZE, %rsp
3396 3397          popq    %rdi
3397 3398          popq    %rsi
3398 3399          popq    %rdx
3399 3400          popq    %rcx
↓ open down ↓ 3 lines elided ↑ open up ↑
3403 3404          popq    %rbx
3404 3405          popq    %r10
3405 3406          popq    %r11
3406 3407          popfq
3407 3408          leave
3408 3409          ret
3409 3410          SET_SIZE(vpanic)
3410 3411  
3411 3412          ENTRY_NP(dtrace_vpanic)                 /* Initial stack layout: */
3412 3413  
3413      -        pushq   %rbp                            /* | %rip |     0x60    */
     3414 +        pushq   %rbp                            /* | %rip |     0x60    */
3414 3415          movq    %rsp, %rbp                      /* | %rbp |     0x58    */
3415 3416          pushfq                                  /* | rfl  |     0x50    */
3416 3417          pushq   %r11                            /* | %r11 |     0x48    */
3417 3418          pushq   %r10                            /* | %r10 |     0x40    */
3418 3419          pushq   %rbx                            /* | %rbx |     0x38    */
3419 3420          pushq   %rax                            /* | %rax |     0x30    */
3420 3421          pushq   %r9                             /* | %r9  |     0x28    */
3421 3422          pushq   %r8                             /* | %r8  |     0x20    */
3422 3423          pushq   %rcx                            /* | %rcx |     0x18    */
3423 3424          pushq   %rdx                            /* | %rdx |     0x10    */
↓ open down ↓ 35 lines elided ↑ open up ↑
3459 3460           * panic: we now switch to the reserved panic_stack before continuing.
3460 3461           */
3461 3462          lea     panic_stack, %esp               / %esp  = panic_stack
3462 3463          addl    $PANICSTKSIZE, %esp             / %esp += PANICSTKSIZE
3463 3464  
3464 3465  0:      subl    $REGSIZE, %esp                  / allocate struct regs
3465 3466  
3466 3467          /*
3467 3468           * Now that we've got everything set up, store the register values as
3468 3469           * they were when we entered vpanic() to the designated location in
3469      -         * the regs structure we allocated on the stack. 
     3470 +         * the regs structure we allocated on the stack.
3470 3471           */
3471 3472  #if !defined(__GNUC_AS__)
3472 3473          movw    %gs, %edx
3473 3474          movl    %edx, REGOFF_GS(%esp)
3474 3475          movw    %fs, %edx
3475 3476          movl    %edx, REGOFF_FS(%esp)
3476 3477          movw    %es, %edx
3477 3478          movl    %edx, REGOFF_ES(%esp)
3478 3479          movw    %ds, %edx
3479 3480          movl    %edx, REGOFF_DS(%esp)
↓ open down ↓ 123 lines elided ↑ open up ↑
3603 3604          ENTRY_NP(hres_tick)
3604 3605          pushq   %rbp
3605 3606          movq    %rsp, %rbp
3606 3607  
3607 3608          /*
3608 3609           * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
3609 3610           * hres_last_tick can only be modified while holding CLOCK_LOCK).
3610 3611           * At worst, performing this now instead of under CLOCK_LOCK may
3611 3612           * introduce some jitter in pc_gethrestime().
3612 3613           */
3613      -        call    *gethrtimef(%rip)
     3614 +        movq    gethrtimef(%rip), %rsi
     3615 +        INDIRECT_CALL_REG(rsi)
3614 3616          movq    %rax, %r8
3615 3617  
3616 3618          leaq    hres_lock(%rip), %rax
3617 3619          movb    $-1, %dl
3618 3620  .CL1:
3619 3621          xchgb   %dl, (%rax)
3620 3622          testb   %dl, %dl
3621 3623          jz      .CL3                    /* got it */
3622 3624  .CL2:
3623 3625          cmpb    $0, (%rax)              /* possible to get lock? */
↓ open down ↓ 7 lines elided ↑ open up ↑
3631 3633           * hrtime_base is an 8 byte value (in nsec), hrestime is
3632 3634           * a timestruc_t (sec, nsec)
3633 3635           */
3634 3636          leaq    hres_last_tick(%rip), %rax
3635 3637          movq    %r8, %r11
3636 3638          subq    (%rax), %r8
3637 3639          addq    %r8, hrtime_base(%rip)  /* add interval to hrtime_base */
3638 3640          addq    %r8, hrestime+8(%rip)   /* add interval to hrestime.tv_nsec */
3639 3641          /*
3640 3642           * Now that we have CLOCK_LOCK, we can update hres_last_tick
3641      -         */     
3642      -        movq    %r11, (%rax)    
     3643 +         */
     3644 +        movq    %r11, (%rax)
3643 3645  
3644 3646          call    __adj_hrestime
3645 3647  
3646 3648          /*
3647 3649           * release the hres_lock
3648 3650           */
3649 3651          incl    hres_lock(%rip)
3650 3652          leave
3651 3653          ret
3652 3654          SET_SIZE(hres_tick)
3653      -        
     3655 +
3654 3656  #elif defined(__i386)
3655 3657  
3656 3658          ENTRY_NP(hres_tick)
3657 3659          pushl   %ebp
3658 3660          movl    %esp, %ebp
3659 3661          pushl   %esi
3660 3662          pushl   %ebx
3661 3663  
3662 3664          /*
3663 3665           * We need to call *gethrtimef before picking up CLOCK_LOCK (obviously,
↓ open down ↓ 22 lines elided ↑ open up ↑
3686 3688           * and adjust hrtime_base and hrestime accordingly
3687 3689           * hrtime_base is an 8 byte value (in nsec), hrestime is
3688 3690           * timestruc_t (sec, nsec)
3689 3691           */
3690 3692  
3691 3693          lea     hres_last_tick, %eax
3692 3694  
3693 3695          movl    %ebx, %edx
3694 3696          movl    %esi, %ecx
3695 3697  
3696      -        subl    (%eax), %edx
3697      -        sbbl    4(%eax), %ecx
     3698 +        subl    (%eax), %edx
     3699 +        sbbl    4(%eax), %ecx
3698 3700  
3699 3701          addl    %edx, hrtime_base       / add interval to hrtime_base
3700 3702          adcl    %ecx, hrtime_base+4
3701 3703  
3702      -        addl    %edx, hrestime+4        / add interval to hrestime.tv_nsec
     3704 +        addl    %edx, hrestime+4        / add interval to hrestime.tv_nsec
3703 3705  
3704 3706          /
3705 3707          / Now that we have CLOCK_LOCK, we can update hres_last_tick.
3706 3708          /
3707 3709          movl    %ebx, (%eax)
3708 3710          movl    %esi,  4(%eax)
3709 3711  
3710 3712          / get hrestime at this moment. used as base for pc_gethrestime
3711 3713          /
3712 3714          / Apply adjustment, if any
↓ open down ↓ 213 lines elided ↑ open up ↑
3926 3928          call    panic
3927 3929  1:
3928 3930  #endif  /* DEBUG */
3929 3931          call    memcmp
3930 3932          testl   %eax, %eax
3931 3933          setne   %dl
3932 3934          leave
3933 3935          movzbl  %dl, %eax
3934 3936          ret
3935 3937          SET_SIZE(bcmp)
3936      -        
     3938 +
3937 3939  #elif defined(__i386)
3938      -        
     3940 +
3939 3941  #define ARG_S1          8
3940 3942  #define ARG_S2          12
3941 3943  #define ARG_LENGTH      16
3942 3944  
3943 3945          ENTRY(bcmp)
3944 3946          pushl   %ebp
3945 3947          movl    %esp, %ebp      / create new stack frame
3946 3948  #ifdef DEBUG
3947 3949          cmpl    $0, ARG_LENGTH(%ebp)
3948 3950          je      1f
↓ open down ↓ 134 lines elided ↑ open up ↑
4083 4085  
4084 4086  #if defined(__amd64)
4085 4087  
4086 4088          ENTRY_NP(switch_sp_and_call)
4087 4089          pushq   %rbp
4088 4090          movq    %rsp, %rbp              /* set up stack frame */
4089 4091          movq    %rdi, %rsp              /* switch stack pointer */
4090 4092          movq    %rdx, %rdi              /* pass func arg 1 */
4091 4093          movq    %rsi, %r11              /* save function to call */
4092 4094          movq    %rcx, %rsi              /* pass func arg 2 */
4093      -        call    *%r11                   /* call function */
     4095 +        INDIRECT_CALL_REG(r11)          /* call function */
4094 4096          leave                           /* restore stack */
4095 4097          ret
4096 4098          SET_SIZE(switch_sp_and_call)
4097 4099  
4098 4100  #elif defined(__i386)
4099 4101  
4100 4102          ENTRY_NP(switch_sp_and_call)
4101 4103          pushl   %ebp
4102 4104          mov     %esp, %ebp              /* set up stack frame */
4103 4105          movl    8(%ebp), %esp           /* switch stack pointer */
↓ open down ↓ 29 lines elided ↑ open up ↑
4133 4135  
4134 4136          int     $T_DBGENTR
4135 4137  
4136 4138          /*
4137 4139           * Restore the saved flags
4138 4140           */
4139 4141          movq    %rax, %rdi
4140 4142          call    intr_restore
4141 4143  
4142 4144          leave
4143      -        ret     
     4145 +        ret
4144 4146          SET_SIZE(kmdb_enter)
4145 4147  
4146 4148  #elif defined(__i386)
4147 4149  
4148 4150          ENTRY_NP(kmdb_enter)
4149 4151          pushl   %ebp
4150 4152          movl    %esp, %ebp
4151 4153  
4152 4154          /*
4153 4155           * Save flags, do a 'cli' then return the saved flags
↓ open down ↓ 3 lines elided ↑ open up ↑
4157 4159          int     $T_DBGENTR
4158 4160  
4159 4161          /*
4160 4162           * Restore the saved flags
4161 4163           */
4162 4164          pushl   %eax
4163 4165          call    intr_restore
4164 4166          addl    $4, %esp
4165 4167  
4166 4168          leave
4167      -        ret     
     4169 +        ret
4168 4170          SET_SIZE(kmdb_enter)
4169 4171  
4170 4172  #endif  /* __i386 */
4171 4173  #endif  /* __lint */
4172 4174  
4173 4175  #if defined(__lint)
4174 4176  
4175 4177  void
4176 4178  return_instr(void)
4177 4179  {}
↓ open down ↓ 76 lines elided ↑ open up ↑
4254 4256  #if defined(__amd64)
4255 4257  
4256 4258          ENTRY(ftrace_interrupt_disable)
4257 4259          pushfq
4258 4260          popq    %rax
4259 4261          CLI(%rdx)
4260 4262          ret
4261 4263          SET_SIZE(ftrace_interrupt_disable)
4262 4264  
4263 4265  #elif defined(__i386)
4264      -                
     4266 +
4265 4267          ENTRY(ftrace_interrupt_disable)
4266 4268          pushfl
4267 4269          popl    %eax
4268 4270          CLI(%edx)
4269 4271          ret
4270 4272          SET_SIZE(ftrace_interrupt_disable)
4271 4273  
4272      -#endif  /* __i386 */    
     4274 +#endif  /* __i386 */
4273 4275  #endif  /* __lint */
4274 4276  
4275 4277  #if defined(__lint)
4276 4278  
4277 4279  /*ARGSUSED*/
4278 4280  void
4279 4281  ftrace_interrupt_enable(ftrace_icookie_t cookie)
4280 4282  {}
4281 4283  
4282 4284  #else   /* __lint */
4283 4285  
4284 4286  #if defined(__amd64)
4285 4287  
4286 4288          ENTRY(ftrace_interrupt_enable)
4287 4289          pushq   %rdi
4288 4290          popfq
4289 4291          ret
4290 4292          SET_SIZE(ftrace_interrupt_enable)
4291 4293  
4292 4294  #elif defined(__i386)
4293      -                
     4295 +
4294 4296          ENTRY(ftrace_interrupt_enable)
4295 4297          movl    4(%esp), %eax
4296 4298          pushl   %eax
4297 4299          popfl
4298 4300          ret
4299 4301          SET_SIZE(ftrace_interrupt_enable)
4300 4302  
4301 4303  #endif  /* __i386 */
4302 4304  #endif  /* __lint */
4303 4305  
↓ open down ↓ 42 lines elided ↑ open up ↑
4346 4348  
4347 4349  #endif /* __i386 */
4348 4350  #endif /* __lint */
4349 4351  
4350 4352  /*
4351 4353   * VMware implements an I/O port that programs can query to detect if software
4352 4354   * is running in a VMware hypervisor. This hypervisor port behaves differently
4353 4355   * depending on magic values in certain registers and modifies some registers
4354 4356   * as a side effect.
4355 4357   *
4356      - * References: http://kb.vmware.com/kb/1009458 
     4358 + * References: http://kb.vmware.com/kb/1009458
4357 4359   */
4358 4360  
4359 4361  #if defined(__lint)
4360 4362  
4361 4363  /* ARGSUSED */
4362 4364  void
4363 4365  vmware_port(int cmd, uint32_t *regs) { return; }
4364 4366  
4365 4367  #else
4366 4368  
↓ open down ↓ 39 lines elided ↑ open up ↑
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX