Print this page
de-linting of .s files
m

Split Close
Expand all
Collapse all
          --- old/usr/src/uts/i86pc/ml/mpcore.s
          +++ new/usr/src/uts/i86pc/ml/mpcore.s
↓ open down ↓ 26 lines elided ↑ open up ↑
  27   27   *
  28   28   * Copyright 2019 Joyent, Inc.
  29   29   */
  30   30  
  31   31  #include <sys/asm_linkage.h>
  32   32  #include <sys/asm_misc.h>
  33   33  #include <sys/regset.h>
  34   34  #include <sys/privregs.h>
  35   35  #include <sys/x86_archext.h>
  36   36  
  37      -#if !defined(__lint)
  38   37  #include <sys/segments.h>
  39   38  #include "assym.h"
  40      -#endif
  41   39  
  42   40  /*
  43   41   *      Our assumptions:
  44   42   *              - We are running in real mode.
  45   43   *              - Interrupts are disabled.
  46   44   *              - Selectors are equal (cs == ds == ss) for all real mode code
  47   45   *              - The GDT, IDT, ktss and page directory has been built for us
  48   46   *
  49   47   *      Our actions:
  50   48   *      Start CPU:
↓ open down ↓ 2 lines elided ↑ open up ↑
  53   51   *                gs=KGS_SEL).
  54   52   *              - We change over to using our IDT.
  55   53   *              - We load the default LDT into the hardware LDT register.
  56   54   *              - We load the default TSS into the hardware task register.
  57   55   *              - call mp_startup(void) indirectly through the T_PC
  58   56   *      Stop CPU:
  59   57   *              - Put CPU into halted state with interrupts disabled
  60   58   *
  61   59   */
  62   60  
  63      -#if defined(__lint)
  64      -
  65      -void
  66      -real_mode_start_cpu(void)
  67      -{}
  68      -
  69      -void
  70      -real_mode_stop_cpu_stage1(void)
  71      -{}
  72      -
  73      -void
  74      -real_mode_stop_cpu_stage2(void)
  75      -{}
  76      -
  77      -#else   /* __lint */
  78      -
  79      -#if defined(__amd64)
  80      -
  81   61          ENTRY_NP(real_mode_start_cpu)
  82   62  
  83   63          /*
  84   64           * NOTE:  The GNU assembler automatically does the right thing to
  85   65           *        generate data size operand prefixes based on the code size
  86   66           *        generation mode (e.g. .code16, .code32, .code64) and as such
  87   67           *        prefixes need not be used on instructions EXCEPT in the case
  88   68           *        of address prefixes for code for which the reference is not
  89   69           *        automatically of the default operand size.
  90   70           */
↓ open down ↓ 20 lines elided ↑ open up ↑
 111   91          /*
 112   92           * Do a jmp immediately after writing to cr0 when enabling protected
 113   93           * mode to clear the real mode prefetch queue (per Intel's docs)
 114   94           */
 115   95          jmp             pestart
 116   96  
 117   97  pestart:
 118   98          /*
 119   99           * 16-bit protected mode is now active, so prepare to turn on long
 120  100           * mode.
 121      -         *
 122      -         * Note that we currently assume that if we're attempting to run a
 123      -         * kernel compiled with (__amd64) #defined, the target CPU has long
 124      -         * mode support.
 125  101           */
 126  102  
 127      -#if 0
 128  103          /*
 129      -         * If there's a chance this might not be true, the following test should
 130      -         * be done, with the no_long_mode branch then doing something
 131      -         * appropriate:
 132      -         */
 133      -
 134      -        movl            $0x80000000, %eax       /* get largest extended CPUID */
 135      -        cpuid
 136      -        cmpl            $0x80000000, %eax       /* check if > 0x80000000 */
 137      -        jbe             no_long_mode            /* nope, no long mode */
 138      -        movl            $0x80000001, %eax
 139      -        cpuid                                   /* get extended feature flags */
 140      -        btl             $29, %edx               /* check for long mode */
 141      -        jnc             no_long_mode            /* long mode not supported */
 142      -#endif
 143      -
 144      -        /*
 145  104           * Add any initial cr4 bits
 146  105           */
 147  106          movl            %cr4, %eax
 148  107          addr32 orl      CR4OFF, %eax
 149  108  
 150  109          /*
 151  110           * Enable PAE mode (CR4.PAE)
 152  111           */
 153  112          orl             $CR4_PAE, %eax
 154  113          movl            %eax, %cr4
↓ open down ↓ 173 lines elided ↑ open up ↑
 328  287           * Complete the rest of the setup and call mp_startup().
 329  288           */
 330  289          movq    %gs:CPU_THREAD, %rax    /* get thread ptr */
 331  290          movq    T_PC(%rax), %rax
 332  291          INDIRECT_CALL_REG(rax)          /* call mp_startup_boot */
 333  292          /* not reached */
 334  293          int     $20                     /* whoops, returned somehow! */
 335  294  
 336  295          SET_SIZE(real_mode_start_cpu)
 337  296  
 338      -#elif defined(__i386)
 339      -
 340      -        ENTRY_NP(real_mode_start_cpu)
 341      -
 342      -#if !defined(__GNUC_AS__)
 343      -
 344      -        cli
 345      -        D16 movw        %cs, %eax
 346      -        movw            %eax, %ds       /* load cs into ds */
 347      -        movw            %eax, %ss       /* and into ss */
 348      -
 349      -        /*
 350      -         * Helps in debugging by giving us the fault address.
 351      -         *
 352      -         * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
 353      -         */
 354      -        D16 movl        $0xffc, %esp
 355      -
 356      -        D16 A16 lgdt    %cs:GDTROFF
 357      -        D16 A16 lidt    %cs:IDTROFF
 358      -        D16 A16 movl    %cs:CR4OFF, %eax        /* set up CR4, if desired */
 359      -        D16 andl        %eax, %eax
 360      -        D16 A16 je      no_cr4
 361      -
 362      -        D16 movl        %eax, %ecx
 363      -        D16 movl        %cr4, %eax
 364      -        D16 orl         %ecx, %eax
 365      -        D16 movl        %eax, %cr4
 366      -no_cr4:
 367      -        D16 A16 movl    %cs:CR3OFF, %eax
 368      -        A16 movl        %eax, %cr3
 369      -        movl            %cr0, %eax
 370      -
 371      -        /*
 372      -         * Enable protected-mode, paging, write protect, and alignment mask
 373      -         */
 374      -        D16 orl         $[CR0_PG|CR0_PE|CR0_WP|CR0_AM], %eax
 375      -        movl            %eax, %cr0
 376      -        jmp             pestart
 377      -
 378      -pestart:
 379      -        D16 pushl       $KCS_SEL
 380      -        D16 pushl       $kernel_cs_code
 381      -        D16 lret
 382      -        .globl real_mode_start_cpu_end
 383      -real_mode_start_cpu_end:
 384      -        nop
 385      -
 386      -        .globl  kernel_cs_code
 387      -kernel_cs_code:
 388      -        /*
 389      -         * At this point we are with kernel's cs and proper eip.
 390      -         *
 391      -         * We will be executing not from the copy in real mode platter,
 392      -         * but from the original code where boot loaded us.
 393      -         *
 394      -         * By this time GDT and IDT are loaded as is cr3.
 395      -         */
 396      -        movw    $KFS_SEL,%eax
 397      -        movw    %eax,%fs
 398      -        movw    $KGS_SEL,%eax
 399      -        movw    %eax,%gs
 400      -        movw    $KDS_SEL,%eax
 401      -        movw    %eax,%ds
 402      -        movw    %eax,%es
 403      -        movl    %gs:CPU_TSS,%esi
 404      -        movw    %eax,%ss
 405      -        movl    TSS_ESP0(%esi),%esp
 406      -        movw    $KTSS_SEL,%ax
 407      -        ltr     %ax
 408      -        xorw    %ax, %ax                /* clear LDTR */
 409      -        lldt    %ax
 410      -        movl    %cr0,%edx
 411      -        andl    $-1![CR0_TS|CR0_EM],%edx  /* clear emulate math chip bit */
 412      -        orl     $[CR0_MP|CR0_NE],%edx
 413      -        movl    %edx,%cr0                 /* set machine status word */
 414      -
 415      -        /*
 416      -         * Before going any further, enable usage of page table NX bit if
 417      -         * that's how our page tables are set up.
 418      -         */
 419      -        bt      $X86FSET_NX, x86_featureset
 420      -        jnc     1f
 421      -        movl    %cr4, %ecx
 422      -        andl    $CR4_PAE, %ecx
 423      -        jz      1f
 424      -        movl    $MSR_AMD_EFER, %ecx
 425      -        rdmsr
 426      -        orl     $AMD_EFER_NXE, %eax
 427      -        wrmsr
 428      -1:
 429      -        movl    %gs:CPU_THREAD, %eax    /* get thread ptr */
 430      -        call    *T_PC(%eax)             /* call mp_startup */
 431      -        /* not reached */
 432      -        int     $20                     /* whoops, returned somehow! */
 433      -
 434      -#else
 435      -
 436      -        cli
 437      -        mov             %cs, %ax
 438      -        mov             %eax, %ds       /* load cs into ds */
 439      -        mov             %eax, %ss       /* and into ss */
 440      -
 441      -        /*
 442      -         * Helps in debugging by giving us the fault address.
 443      -         *
 444      -         * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
 445      -         */
 446      -        D16 mov         $0xffc, %esp
 447      -
 448      -        D16 A16 lgdtl   %cs:GDTROFF
 449      -        D16 A16 lidtl   %cs:IDTROFF
 450      -        D16 A16 mov     %cs:CR4OFF, %eax        /* set up CR4, if desired */
 451      -        D16 and         %eax, %eax
 452      -        D16 A16 je      no_cr4
 453      -
 454      -        D16 mov         %eax, %ecx
 455      -        D16 mov         %cr4, %eax
 456      -        D16 or          %ecx, %eax
 457      -        D16 mov         %eax, %cr4
 458      -no_cr4:
 459      -        D16 A16 mov     %cs:CR3OFF, %eax
 460      -        A16 mov         %eax, %cr3
 461      -        mov             %cr0, %eax
 462      -
 463      -        /*
 464      -         * Enable protected-mode, paging, write protect, and alignment mask
 465      -         */
 466      -        D16 or          $(CR0_PG|CR0_PE|CR0_WP|CR0_AM), %eax
 467      -        mov             %eax, %cr0
 468      -        jmp             pestart
 469      -
 470      -pestart:
 471      -        D16 pushl       $KCS_SEL
 472      -        D16 pushl       $kernel_cs_code
 473      -        D16 lret
 474      -        .globl real_mode_start_cpu_end
 475      -real_mode_start_cpu_end:
 476      -        nop
 477      -        .globl  kernel_cs_code
 478      -kernel_cs_code:
 479      -        /*
 480      -         * At this point we are with kernel's cs and proper eip.
 481      -         *
 482      -         * We will be executing not from the copy in real mode platter,
 483      -         * but from the original code where boot loaded us.
 484      -         *
 485      -         * By this time GDT and IDT are loaded as is cr3.
 486      -         */
 487      -        mov     $KFS_SEL, %ax
 488      -        mov     %eax, %fs
 489      -        mov     $KGS_SEL, %ax
 490      -        mov     %eax, %gs
 491      -        mov     $KDS_SEL, %ax
 492      -        mov     %eax, %ds
 493      -        mov     %eax, %es
 494      -        mov     %gs:CPU_TSS, %esi
 495      -        mov     %eax, %ss
 496      -        mov     TSS_ESP0(%esi), %esp
 497      -        mov     $(KTSS_SEL), %ax
 498      -        ltr     %ax
 499      -        xorw    %ax, %ax                /* clear LDTR */
 500      -        lldt    %ax
 501      -        mov     %cr0, %edx
 502      -        and     $~(CR0_TS|CR0_EM), %edx /* clear emulate math chip bit */
 503      -        or      $(CR0_MP|CR0_NE), %edx
 504      -        mov     %edx, %cr0              /* set machine status word */
 505      -
 506      -        /*
 507      -         * Before going any farther, enable usage of page table NX bit if
 508      -         * that's how our page tables are set up.  (PCIDE is enabled later on).
 509      -         */
 510      -        bt      $X86FSET_NX, x86_featureset
 511      -        jnc     1f
 512      -        movl    %cr4, %ecx
 513      -        andl    $CR4_PAE, %ecx
 514      -        jz      1f
 515      -        movl    $MSR_AMD_EFER, %ecx
 516      -        rdmsr
 517      -        orl     $AMD_EFER_NXE, %eax
 518      -        wrmsr
 519      -1:
 520      -        mov     %gs:CPU_THREAD, %eax    /* get thread ptr */
 521      -        call    *T_PC(%eax)             /* call mp_startup */
 522      -        /* not reached */
 523      -        int     $20                     /* whoops, returned somehow! */
 524      -#endif
 525      -
 526      -        SET_SIZE(real_mode_start_cpu)
 527      -
 528      -#endif  /* __amd64 */
 529      -
 530      -#if defined(__amd64)
 531      -
 532  297          ENTRY_NP(real_mode_stop_cpu_stage1)
 533  298  
 534  299  #if !defined(__GNUC_AS__)
 535  300  
 536  301          /*
 537  302           * For vulcan as we need to do a .code32 and mentally invert the
 538  303           * meaning of the addr16 and data16 prefixes to get 32-bit access when
 539  304           * generating code to be executed in 16-bit mode (sigh...)
 540  305           */
 541  306          .code32
↓ open down ↓ 31 lines elided ↑ open up ↑
 573  338          jmp             *%ax
 574  339  
 575  340  #endif  /* !__GNUC_AS__ */
 576  341  
 577  342          .globl real_mode_stop_cpu_stage1_end
 578  343  real_mode_stop_cpu_stage1_end:
 579  344          nop
 580  345  
 581  346          SET_SIZE(real_mode_stop_cpu_stage1)
 582  347  
 583      -#elif defined(__i386)
 584      -
 585      -        ENTRY_NP(real_mode_stop_cpu_stage1)
 586      -
 587      -#if !defined(__GNUC_AS__)
 588      -
 589      -        cli
 590      -        D16 movw        %cs, %eax
 591      -        movw            %eax, %ds       /* load cs into ds */
 592      -        movw            %eax, %ss       /* and into ss */
 593      -
 594      -        /*
 595      -         * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 596      -         */
 597      -        movw            $CPUHALTCODEOFF, %ax
 598      -        .byte           0xff, 0xe0      /* jmp *%ax */
 599      -
 600      -#else   /* __GNUC_AS__ */
 601      -
 602      -        cli
 603      -        mov             %cs, %ax
 604      -        mov             %eax, %ds       /* load cs into ds */
 605      -        mov             %eax, %ss       /* and into ss */
 606      -
 607      -        /*
 608      -         * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 609      -         */
 610      -        movw            $CPUHALTCODEOFF, %ax
 611      -        /*
 612      -         * The following indirect call is executed as part of starting up a CPU.
 613      -         * As such nothing else should be running on it or executing in the
 614      -         * system such that it is a viable Spectre v2 branch target injection
 615      -         * location. At least, in theory.
 616      -         */
 617      -        jmp             *%ax
 618      -
 619      -#endif  /* !__GNUC_AS__ */
 620      -
 621      -        .globl real_mode_stop_cpu_stage1_end
 622      -real_mode_stop_cpu_stage1_end:
 623      -        nop
 624      -
 625      -        SET_SIZE(real_mode_stop_cpu_stage1)
 626      -
 627      -#endif  /* __amd64 */
 628      -
 629  348          ENTRY_NP(real_mode_stop_cpu_stage2)
 630  349  
 631  350          movw            $0xdead, %ax
 632  351          movw            %ax, CPUHALTEDOFF
 633  352  
 634  353  real_mode_stop_cpu_loop:
 635  354          /*
 636  355           * Put CPU into halted state.
 637  356           * Only INIT, SMI, NMI could break the loop.
 638  357           */
 639  358          hlt
 640  359          jmp             real_mode_stop_cpu_loop
 641  360  
 642  361          .globl real_mode_stop_cpu_stage2_end
 643  362  real_mode_stop_cpu_stage2_end:
 644  363          nop
 645  364  
 646  365          SET_SIZE(real_mode_stop_cpu_stage2)
 647  366  
 648      -#endif  /* __lint */
    
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX