Print this page
de-linting of .s files
m


  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 /*
  25  * Copyright (c) 2010, Intel Corporation.
  26  * All rights reserved.
  27  *
  28  * Copyright 2019 Joyent, Inc.
  29  */
  30 
  31 #include <sys/asm_linkage.h>
  32 #include <sys/asm_misc.h>
  33 #include <sys/regset.h>
  34 #include <sys/privregs.h>
  35 #include <sys/x86_archext.h>
  36 
  37 #if !defined(__lint)
  38 #include <sys/segments.h>
  39 #include "assym.h"
  40 #endif
  41 
  42 /*
  43  *      Our assumptions:
  44  *              - We are running in real mode.
  45  *              - Interrupts are disabled.
  46  *              - Selectors are equal (cs == ds == ss) for all real mode code
  47  *              - The GDT, IDT, ktss and page directory has been built for us
  48  *
  49  *      Our actions:
  50  *      Start CPU:
  51  *              - We start using our GDT by loading correct values in the
  52  *                selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
  53  *                gs=KGS_SEL).
  54  *              - We change over to using our IDT.
  55  *              - We load the default LDT into the hardware LDT register.
  56  *              - We load the default TSS into the hardware task register.
  57  *              - call mp_startup(void) indirectly through the T_PC
  58  *      Stop CPU:
  59  *              - Put CPU into halted state with interrupts disabled
  60  *
  61  */
  62 
  63 #if defined(__lint)
  64 
  65 void
  66 real_mode_start_cpu(void)
  67 {}
  68 
  69 void
  70 real_mode_stop_cpu_stage1(void)
  71 {}
  72 
  73 void
  74 real_mode_stop_cpu_stage2(void)
  75 {}
  76 
  77 #else   /* __lint */
  78 
  79 #if defined(__amd64)
  80 
  81         ENTRY_NP(real_mode_start_cpu)
  82 
  83         /*
  84          * NOTE:  The GNU assembler automatically does the right thing to
  85          *        generate data size operand prefixes based on the code size
  86          *        generation mode (e.g. .code16, .code32, .code64) and as such
  87          *        prefixes need not be used on instructions EXCEPT in the case
  88          *        of address prefixes for code for which the reference is not
  89          *        automatically of the default operand size.
  90          */
  91         .code16
  92         cli
  93         movw            %cs, %ax
  94         movw            %ax, %ds        /* load cs into ds */
  95         movw            %ax, %ss        /* and into ss */
  96 
  97         /*
  98          * Helps in debugging by giving us the fault address.
  99          *
 100          * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
 101          */
 102         movl            $0xffc, %esp
 103         movl            %cr0, %eax
 104 
 105         /*
 106          * Enable protected-mode, write protect, and alignment mask
 107          */
 108         orl             $(CR0_PE|CR0_WP|CR0_AM), %eax
 109         movl            %eax, %cr0
 110 
 111         /*
 112          * Do a jmp immediately after writing to cr0 when enabling protected
 113          * mode to clear the real mode prefetch queue (per Intel's docs)
 114          */
 115         jmp             pestart
 116 
 117 pestart:
 118         /*
 119          * 16-bit protected mode is now active, so prepare to turn on long
 120          * mode.
 121          *
 122          * Note that we currently assume that if we're attempting to run a
 123          * kernel compiled with (__amd64) #defined, the target CPU has long
 124          * mode support.
 125          */
 126 
 127 #if 0
 128         /*
 129          * If there's a chance this might not be true, the following test should
 130          * be done, with the no_long_mode branch then doing something
 131          * appropriate:
 132          */
 133 
 134         movl            $0x80000000, %eax       /* get largest extended CPUID */
 135         cpuid
 136         cmpl            $0x80000000, %eax       /* check if > 0x80000000 */
 137         jbe             no_long_mode            /* nope, no long mode */
 138         movl            $0x80000001, %eax
 139         cpuid                                   /* get extended feature flags */
 140         btl             $29, %edx               /* check for long mode */
 141         jnc             no_long_mode            /* long mode not supported */
 142 #endif
 143 
 144         /*
 145          * Add any initial cr4 bits
 146          */
 147         movl            %cr4, %eax
 148         addr32 orl      CR4OFF, %eax
 149 
 150         /*
 151          * Enable PAE mode (CR4.PAE)
 152          */
 153         orl             $CR4_PAE, %eax
 154         movl            %eax, %cr4
 155 
 156         /*
 157          * Point cr3 to the 64-bit long mode page tables.
 158          *
 159          * Note that these MUST exist in 32-bit space, as we don't have
 160          * a way to load %cr3 with a 64-bit base address for the page tables
 161          * until the CPU is actually executing in 64-bit long mode.
 162          */
 163         addr32 movl     CR3OFF, %eax
 164         movl            %eax, %cr3


 318          */
 319         bt      $X86FSET_NX, x86_featureset(%rip)
 320         jnc     1f
 321         movl    $MSR_AMD_EFER, %ecx
 322         rdmsr
 323         orl     $AMD_EFER_NXE, %eax
 324         wrmsr
 325 1:
 326 
 327         /*
 328          * Complete the rest of the setup and call mp_startup().
 329          */
 330         movq    %gs:CPU_THREAD, %rax    /* get thread ptr */
 331         movq    T_PC(%rax), %rax
 332         INDIRECT_CALL_REG(rax)          /* call mp_startup_boot */
 333         /* not reached */
 334         int     $20                     /* whoops, returned somehow! */
 335 
 336         SET_SIZE(real_mode_start_cpu)
 337 
 338 #elif defined(__i386)
 339 
 340         ENTRY_NP(real_mode_start_cpu)
 341 
 342 #if !defined(__GNUC_AS__)
 343 
 344         cli
 345         D16 movw        %cs, %eax
 346         movw            %eax, %ds       /* load cs into ds */
 347         movw            %eax, %ss       /* and into ss */
 348 
 349         /*
 350          * Helps in debugging by giving us the fault address.
 351          *
 352          * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
 353          */
 354         D16 movl        $0xffc, %esp
 355 
 356         D16 A16 lgdt    %cs:GDTROFF
 357         D16 A16 lidt    %cs:IDTROFF
 358         D16 A16 movl    %cs:CR4OFF, %eax        /* set up CR4, if desired */
 359         D16 andl        %eax, %eax
 360         D16 A16 je      no_cr4
 361 
 362         D16 movl        %eax, %ecx
 363         D16 movl        %cr4, %eax
 364         D16 orl         %ecx, %eax
 365         D16 movl        %eax, %cr4
 366 no_cr4:
 367         D16 A16 movl    %cs:CR3OFF, %eax
 368         A16 movl        %eax, %cr3
 369         movl            %cr0, %eax
 370 
 371         /*
 372          * Enable protected-mode, paging, write protect, and alignment mask
 373          */
 374         D16 orl         $[CR0_PG|CR0_PE|CR0_WP|CR0_AM], %eax
 375         movl            %eax, %cr0
 376         jmp             pestart
 377 
 378 pestart:
 379         D16 pushl       $KCS_SEL
 380         D16 pushl       $kernel_cs_code
 381         D16 lret
 382         .globl real_mode_start_cpu_end
 383 real_mode_start_cpu_end:
 384         nop
 385 
 386         .globl  kernel_cs_code
 387 kernel_cs_code:
 388         /*
 389          * At this point we are with kernel's cs and proper eip.
 390          *
 391          * We will be executing not from the copy in real mode platter,
 392          * but from the original code where boot loaded us.
 393          *
 394          * By this time GDT and IDT are loaded as is cr3.
 395          */
 396         movw    $KFS_SEL,%eax
 397         movw    %eax,%fs
 398         movw    $KGS_SEL,%eax
 399         movw    %eax,%gs
 400         movw    $KDS_SEL,%eax
 401         movw    %eax,%ds
 402         movw    %eax,%es
 403         movl    %gs:CPU_TSS,%esi
 404         movw    %eax,%ss
 405         movl    TSS_ESP0(%esi),%esp
 406         movw    $KTSS_SEL,%ax
 407         ltr     %ax
 408         xorw    %ax, %ax                /* clear LDTR */
 409         lldt    %ax
 410         movl    %cr0,%edx
 411         andl    $-1![CR0_TS|CR0_EM],%edx  /* clear emulate math chip bit */
 412         orl     $[CR0_MP|CR0_NE],%edx
 413         movl    %edx,%cr0                 /* set machine status word */
 414 
 415         /*
 416          * Before going any further, enable usage of page table NX bit if
 417          * that's how our page tables are set up.
 418          */
 419         bt      $X86FSET_NX, x86_featureset
 420         jnc     1f
 421         movl    %cr4, %ecx
 422         andl    $CR4_PAE, %ecx
 423         jz      1f
 424         movl    $MSR_AMD_EFER, %ecx
 425         rdmsr
 426         orl     $AMD_EFER_NXE, %eax
 427         wrmsr
 428 1:
 429         movl    %gs:CPU_THREAD, %eax    /* get thread ptr */
 430         call    *T_PC(%eax)             /* call mp_startup */
 431         /* not reached */
 432         int     $20                     /* whoops, returned somehow! */
 433 
 434 #else
 435 
 436         cli
 437         mov             %cs, %ax
 438         mov             %eax, %ds       /* load cs into ds */
 439         mov             %eax, %ss       /* and into ss */
 440 
 441         /*
 442          * Helps in debugging by giving us the fault address.
 443          *
 444          * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
 445          */
 446         D16 mov         $0xffc, %esp
 447 
 448         D16 A16 lgdtl   %cs:GDTROFF
 449         D16 A16 lidtl   %cs:IDTROFF
 450         D16 A16 mov     %cs:CR4OFF, %eax        /* set up CR4, if desired */
 451         D16 and         %eax, %eax
 452         D16 A16 je      no_cr4
 453 
 454         D16 mov         %eax, %ecx
 455         D16 mov         %cr4, %eax
 456         D16 or          %ecx, %eax
 457         D16 mov         %eax, %cr4
 458 no_cr4:
 459         D16 A16 mov     %cs:CR3OFF, %eax
 460         A16 mov         %eax, %cr3
 461         mov             %cr0, %eax
 462 
 463         /*
 464          * Enable protected-mode, paging, write protect, and alignment mask
 465          */
 466         D16 or          $(CR0_PG|CR0_PE|CR0_WP|CR0_AM), %eax
 467         mov             %eax, %cr0
 468         jmp             pestart
 469 
 470 pestart:
 471         D16 pushl       $KCS_SEL
 472         D16 pushl       $kernel_cs_code
 473         D16 lret
 474         .globl real_mode_start_cpu_end
 475 real_mode_start_cpu_end:
 476         nop
 477         .globl  kernel_cs_code
 478 kernel_cs_code:
 479         /*
 480          * At this point we are with kernel's cs and proper eip.
 481          *
 482          * We will be executing not from the copy in real mode platter,
 483          * but from the original code where boot loaded us.
 484          *
 485          * By this time GDT and IDT are loaded as is cr3.
 486          */
 487         mov     $KFS_SEL, %ax
 488         mov     %eax, %fs
 489         mov     $KGS_SEL, %ax
 490         mov     %eax, %gs
 491         mov     $KDS_SEL, %ax
 492         mov     %eax, %ds
 493         mov     %eax, %es
 494         mov     %gs:CPU_TSS, %esi
 495         mov     %eax, %ss
 496         mov     TSS_ESP0(%esi), %esp
 497         mov     $(KTSS_SEL), %ax
 498         ltr     %ax
 499         xorw    %ax, %ax                /* clear LDTR */
 500         lldt    %ax
 501         mov     %cr0, %edx
 502         and     $~(CR0_TS|CR0_EM), %edx /* clear emulate math chip bit */
 503         or      $(CR0_MP|CR0_NE), %edx
 504         mov     %edx, %cr0              /* set machine status word */
 505 
 506         /*
 507          * Before going any farther, enable usage of page table NX bit if
 508          * that's how our page tables are set up.  (PCIDE is enabled later on).
 509          */
 510         bt      $X86FSET_NX, x86_featureset
 511         jnc     1f
 512         movl    %cr4, %ecx
 513         andl    $CR4_PAE, %ecx
 514         jz      1f
 515         movl    $MSR_AMD_EFER, %ecx
 516         rdmsr
 517         orl     $AMD_EFER_NXE, %eax
 518         wrmsr
 519 1:
 520         mov     %gs:CPU_THREAD, %eax    /* get thread ptr */
 521         call    *T_PC(%eax)             /* call mp_startup */
 522         /* not reached */
 523         int     $20                     /* whoops, returned somehow! */
 524 #endif
 525 
 526         SET_SIZE(real_mode_start_cpu)
 527 
 528 #endif  /* __amd64 */
 529 
 530 #if defined(__amd64)
 531 
 532         ENTRY_NP(real_mode_stop_cpu_stage1)
 533 
 534 #if !defined(__GNUC_AS__)
 535 
 536         /*
 537          * For vulcan as we need to do a .code32 and mentally invert the
 538          * meaning of the addr16 and data16 prefixes to get 32-bit access when
 539          * generating code to be executed in 16-bit mode (sigh...)
 540          */
 541         .code32
 542         cli
 543         movw            %cs, %ax
 544         movw            %ax, %ds        /* load cs into ds */
 545         movw            %ax, %ss        /* and into ss */
 546 
 547         /*
 548          * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 549          */
 550         movw            $CPUHALTCODEOFF, %ax
 551         .byte           0xff, 0xe0      /* jmp *%ax */


 563         .code16
 564         cli
 565         movw            %cs, %ax
 566         movw            %ax, %ds        /* load cs into ds */
 567         movw            %ax, %ss        /* and into ss */
 568 
 569         /*
 570          * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 571          */
 572         movw            $CPUHALTCODEOFF, %ax
 573         jmp             *%ax
 574 
 575 #endif  /* !__GNUC_AS__ */
 576 
 577         .globl real_mode_stop_cpu_stage1_end
 578 real_mode_stop_cpu_stage1_end:
 579         nop
 580 
 581         SET_SIZE(real_mode_stop_cpu_stage1)
 582 
 583 #elif defined(__i386)
 584 
 585         ENTRY_NP(real_mode_stop_cpu_stage1)
 586 
 587 #if !defined(__GNUC_AS__)
 588 
 589         cli
 590         D16 movw        %cs, %eax
 591         movw            %eax, %ds       /* load cs into ds */
 592         movw            %eax, %ss       /* and into ss */
 593 
 594         /*
 595          * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 596          */
 597         movw            $CPUHALTCODEOFF, %ax
 598         .byte           0xff, 0xe0      /* jmp *%ax */
 599 
 600 #else   /* __GNUC_AS__ */
 601 
 602         cli
 603         mov             %cs, %ax
 604         mov             %eax, %ds       /* load cs into ds */
 605         mov             %eax, %ss       /* and into ss */
 606 
 607         /*
 608          * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 609          */
 610         movw            $CPUHALTCODEOFF, %ax
 611         /*
 612          * The following indirect call is executed as part of starting up a CPU.
 613          * As such nothing else should be running on it or executing in the
 614          * system such that it is a viable Spectre v2 branch target injection
 615          * location. At least, in theory.
 616          */
 617         jmp             *%ax
 618 
 619 #endif  /* !__GNUC_AS__ */
 620 
 621         .globl real_mode_stop_cpu_stage1_end
 622 real_mode_stop_cpu_stage1_end:
 623         nop
 624 
 625         SET_SIZE(real_mode_stop_cpu_stage1)
 626 
 627 #endif  /* __amd64 */
 628 
 629         ENTRY_NP(real_mode_stop_cpu_stage2)
 630 
 631         movw            $0xdead, %ax
 632         movw            %ax, CPUHALTEDOFF
 633 
 634 real_mode_stop_cpu_loop:
 635         /*
 636          * Put CPU into halted state.
 637          * Only INIT, SMI, NMI could break the loop.
 638          */
 639         hlt
 640         jmp             real_mode_stop_cpu_loop
 641 
 642         .globl real_mode_stop_cpu_stage2_end
 643 real_mode_stop_cpu_stage2_end:
 644         nop
 645 
 646         SET_SIZE(real_mode_stop_cpu_stage2)
 647 
 648 #endif  /* __lint */


  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  23  */
  24 /*
  25  * Copyright (c) 2010, Intel Corporation.
  26  * All rights reserved.
  27  *
  28  * Copyright 2019 Joyent, Inc.
  29  */
  30 
  31 #include <sys/asm_linkage.h>
  32 #include <sys/asm_misc.h>
  33 #include <sys/regset.h>
  34 #include <sys/privregs.h>
  35 #include <sys/x86_archext.h>
  36 

  37 #include <sys/segments.h>
  38 #include "assym.h"

  39 
  40 /*
  41  *      Our assumptions:
  42  *              - We are running in real mode.
  43  *              - Interrupts are disabled.
  44  *              - Selectors are equal (cs == ds == ss) for all real mode code
  45  *              - The GDT, IDT, ktss and page directory has been built for us
  46  *
  47  *      Our actions:
  48  *      Start CPU:
  49  *              - We start using our GDT by loading correct values in the
  50  *                selector registers (cs=KCS_SEL, ds=es=ss=KDS_SEL, fs=KFS_SEL,
  51  *                gs=KGS_SEL).
  52  *              - We change over to using our IDT.
  53  *              - We load the default LDT into the hardware LDT register.
  54  *              - We load the default TSS into the hardware task register.
  55  *              - call mp_startup(void) indirectly through the T_PC
  56  *      Stop CPU:
  57  *              - Put CPU into halted state with interrupts disabled
  58  *
  59  */
  60 


















  61         ENTRY_NP(real_mode_start_cpu)
  62 
  63         /*
  64          * NOTE:  The GNU assembler automatically does the right thing to
  65          *        generate data size operand prefixes based on the code size
  66          *        generation mode (e.g. .code16, .code32, .code64) and as such
  67          *        prefixes need not be used on instructions EXCEPT in the case
  68          *        of address prefixes for code for which the reference is not
  69          *        automatically of the default operand size.
  70          */
  71         .code16
  72         cli
  73         movw            %cs, %ax
  74         movw            %ax, %ds        /* load cs into ds */
  75         movw            %ax, %ss        /* and into ss */
  76 
  77         /*
  78          * Helps in debugging by giving us the fault address.
  79          *
  80          * Remember to patch a hlt (0xf4) at cmntrap to get a good stack.
  81          */
  82         movl            $0xffc, %esp
  83         movl            %cr0, %eax
  84 
  85         /*
  86          * Enable protected-mode, write protect, and alignment mask
  87          */
  88         orl             $(CR0_PE|CR0_WP|CR0_AM), %eax
  89         movl            %eax, %cr0
  90 
  91         /*
  92          * Do a jmp immediately after writing to cr0 when enabling protected
  93          * mode to clear the real mode prefetch queue (per Intel's docs)
  94          */
  95         jmp             pestart
  96 
  97 pestart:
  98         /*
  99          * 16-bit protected mode is now active, so prepare to turn on long
 100          * mode.




 101          */
 102 

 103         /*
















 104          * Add any initial cr4 bits
 105          */
 106         movl            %cr4, %eax
 107         addr32 orl      CR4OFF, %eax
 108 
 109         /*
 110          * Enable PAE mode (CR4.PAE)
 111          */
 112         orl             $CR4_PAE, %eax
 113         movl            %eax, %cr4
 114 
 115         /*
 116          * Point cr3 to the 64-bit long mode page tables.
 117          *
 118          * Note that these MUST exist in 32-bit space, as we don't have
 119          * a way to load %cr3 with a 64-bit base address for the page tables
 120          * until the CPU is actually executing in 64-bit long mode.
 121          */
 122         addr32 movl     CR3OFF, %eax
 123         movl            %eax, %cr3


 277          */
 278         bt      $X86FSET_NX, x86_featureset(%rip)
 279         jnc     1f
 280         movl    $MSR_AMD_EFER, %ecx
 281         rdmsr
 282         orl     $AMD_EFER_NXE, %eax
 283         wrmsr
 284 1:
 285 
 286         /*
 287          * Complete the rest of the setup and call mp_startup().
 288          */
 289         movq    %gs:CPU_THREAD, %rax    /* get thread ptr */
 290         movq    T_PC(%rax), %rax
 291         INDIRECT_CALL_REG(rax)          /* call mp_startup_boot */
 292         /* not reached */
 293         int     $20                     /* whoops, returned somehow! */
 294 
 295         SET_SIZE(real_mode_start_cpu)
 296 


































































































































































































 297         ENTRY_NP(real_mode_stop_cpu_stage1)
 298 
 299 #if !defined(__GNUC_AS__)
 300 
 301         /*
 302          * For vulcan as we need to do a .code32 and mentally invert the
 303          * meaning of the addr16 and data16 prefixes to get 32-bit access when
 304          * generating code to be executed in 16-bit mode (sigh...)
 305          */
 306         .code32
 307         cli
 308         movw            %cs, %ax
 309         movw            %ax, %ds        /* load cs into ds */
 310         movw            %ax, %ss        /* and into ss */
 311 
 312         /*
 313          * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 314          */
 315         movw            $CPUHALTCODEOFF, %ax
 316         .byte           0xff, 0xe0      /* jmp *%ax */


 328         .code16
 329         cli
 330         movw            %cs, %ax
 331         movw            %ax, %ds        /* load cs into ds */
 332         movw            %ax, %ss        /* and into ss */
 333 
 334         /*
 335          * Jump to the stage 2 code in the rm_platter_va->rm_cpu_halt_code
 336          */
 337         movw            $CPUHALTCODEOFF, %ax
 338         jmp             *%ax
 339 
 340 #endif  /* !__GNUC_AS__ */
 341 
 342         .globl real_mode_stop_cpu_stage1_end
 343 real_mode_stop_cpu_stage1_end:
 344         nop
 345 
 346         SET_SIZE(real_mode_stop_cpu_stage1)
 347 














































 348         ENTRY_NP(real_mode_stop_cpu_stage2)
 349 
 350         movw            $0xdead, %ax
 351         movw            %ax, CPUHALTEDOFF
 352 
 353 real_mode_stop_cpu_loop:
 354         /*
 355          * Put CPU into halted state.
 356          * Only INIT, SMI, NMI could break the loop.
 357          */
 358         hlt
 359         jmp             real_mode_stop_cpu_loop
 360 
 361         .globl real_mode_stop_cpu_stage2_end
 362 real_mode_stop_cpu_stage2_end:
 363         nop
 364 
 365         SET_SIZE(real_mode_stop_cpu_stage2)
 366