Print this page
8956 Implement KPTI
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Robert Mustacchi <rm@joyent.com>


  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 /*
  26  * Copyright (c) 2010, Intel Corporation.
  27  * All rights reserved.
  28  */
  29 /*
  30  * Copyright 2016 Joyent, Inc.
  31  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  32  */
  33 
  34 #include <sys/types.h>
  35 #include <sys/thread.h>
  36 #include <sys/cpuvar.h>
  37 #include <sys/cpu.h>
  38 #include <sys/t_lock.h>
  39 #include <sys/param.h>
  40 #include <sys/proc.h>
  41 #include <sys/disp.h>
  42 #include <sys/class.h>
  43 #include <sys/cmn_err.h>
  44 #include <sys/debug.h>
  45 #include <sys/note.h>
  46 #include <sys/asm_linkage.h>
  47 #include <sys/x_call.h>
  48 #include <sys/systm.h>
  49 #include <sys/var.h>
  50 #include <sys/vtrace.h>


  63 #include <sys/cpc_impl.h>
  64 #include <sys/pg.h>
  65 #include <sys/cmt.h>
  66 #include <sys/dtrace.h>
  67 #include <sys/archsystm.h>
  68 #include <sys/fp.h>
  69 #include <sys/reboot.h>
  70 #include <sys/kdi_machimpl.h>
  71 #include <vm/hat_i86.h>
  72 #include <vm/vm_dep.h>
  73 #include <sys/memnode.h>
  74 #include <sys/pci_cfgspace.h>
  75 #include <sys/mach_mmu.h>
  76 #include <sys/sysmacros.h>
  77 #if defined(__xpv)
  78 #include <sys/hypervisor.h>
  79 #endif
  80 #include <sys/cpu_module.h>
  81 #include <sys/ontrap.h>
  82 
  83 struct cpu      cpus[1];                        /* CPU data */
  84 struct cpu      *cpu[NCPU] = {&cpus[0]};    /* pointers to all CPUs */
  85 struct cpu      *cpu_free_list;                 /* list for released CPUs */
  86 cpu_core_t      cpu_core[NCPU];                 /* cpu_core structures */
  87 
  88 #define cpu_next_free   cpu_prev
  89 
  90 /*
  91  * Useful for disabling MP bring-up on a MP capable system.
  92  */
  93 int use_mp = 1;
  94 
  95 /*
  96  * to be set by a PSM to indicate what cpus
  97  * are sitting around on the system.
  98  */
  99 cpuset_t mp_cpus;
 100 
 101 /*
 102  * This variable is used by the hat layer to decide whether or not
 103  * critical sections are needed to prevent race conditions.  For sun4m,
 104  * this variable is set once enough MP initialization has been done in
 105  * order to allow cross calls.
 106  */


 151         /*
 152          * If called for the BSP, cp is equal to current CPU.
 153          * For non-BSPs, cpuid info of cp is not ready yet, so use cpuid info
 154          * of current CPU as default values for cpu_idstr and cpu_brandstr.
 155          * They will be corrected in mp_startup_common() after cpuid_pass1()
 156          * has been invoked on target CPU.
 157          */
 158         (void) cpuid_getidstr(CPU, cp->cpu_idstr, CPU_IDSTRLEN);
 159         (void) cpuid_getbrandstr(CPU, cp->cpu_brandstr, CPU_IDSTRLEN);
 160 }
 161 
 162 /*
 163  * Configure syscall support on this CPU.
 164  */
 165 /*ARGSUSED*/
 166 void
 167 init_cpu_syscall(struct cpu *cp)
 168 {
 169         kpreempt_disable();
 170 
 171 #if defined(__amd64)
 172         if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
 173             is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
 174                 uint64_t flags;
 175 
 176 #if !defined(__lint)
 177                 /*
 178                  * The syscall instruction imposes a certain ordering on
 179                  * segment selectors, so we double-check that ordering
 180                  * here.
 181                  */
 182                 ASSERT(KDS_SEL == KCS_SEL + 8);
 183                 ASSERT(UDS_SEL == U32CS_SEL + 8);
 184                 ASSERT(UCS_SEL == U32CS_SEL + 16);
 185 #endif

 186                 /*
 187                  * Turn syscall/sysret extensions on.
 188                  */
 189                 cpu_asysc_enable();
 190 
 191                 /*
 192                  * Program the magic registers ..
 193                  */
 194                 wrmsr(MSR_AMD_STAR,
 195                     ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32);
 196                 wrmsr(MSR_AMD_LSTAR, (uint64_t)(uintptr_t)sys_syscall);
 197                 wrmsr(MSR_AMD_CSTAR, (uint64_t)(uintptr_t)sys_syscall32);









 198 
 199                 /*
 200                  * This list of flags is masked off the incoming
 201                  * %rfl when we enter the kernel.
 202                  */
 203                 flags = PS_IE | PS_T;
 204                 if (is_x86_feature(x86_featureset, X86FSET_SMAP) == B_TRUE)
 205                         flags |= PS_ACHK;
 206                 wrmsr(MSR_AMD_SFMASK, flags);
 207         }
 208 #endif
 209 
 210         /*
 211          * On 32-bit kernels, we use sysenter/sysexit because it's too
 212          * hard to use syscall/sysret, and it is more portable anyway.
 213          *
 214          * On 64-bit kernels on Nocona machines, the 32-bit syscall
 215          * variant isn't available to 32-bit applications, but sysenter is.
 216          */
 217         if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
 218             is_x86_feature(x86_featureset, X86FSET_SEP)) {
 219 
 220 #if !defined(__lint)
 221                 /*
 222                  * The sysenter instruction imposes a certain ordering on
 223                  * segment selectors, so we double-check that ordering
 224                  * here. See "sysenter" in Intel document 245471-012, "IA-32
 225                  * Intel Architecture Software Developer's Manual Volume 2:
 226                  * Instruction Set Reference"
 227                  */
 228                 ASSERT(KDS_SEL == KCS_SEL + 8);
 229 
 230                 ASSERT32(UCS_SEL == ((KCS_SEL + 16) | 3));
 231                 ASSERT32(UDS_SEL == UCS_SEL + 8);
 232 
 233                 ASSERT64(U32CS_SEL == ((KCS_SEL + 16) | 3));
 234                 ASSERT64(UDS_SEL == U32CS_SEL + 8);
 235 #endif
 236 
 237                 cpu_sep_enable();
 238 
 239                 /*
 240                  * resume() sets this value to the base of the threads stack
 241                  * via a context handler.
 242                  */
 243                 wrmsr(MSR_INTC_SEP_ESP, 0);
 244                 wrmsr(MSR_INTC_SEP_EIP, (uint64_t)(uintptr_t)sys_sysenter);






 245         }

 246 
 247         kpreempt_enable();
 248 }
 249 
 250 #if !defined(__xpv)
 251 /*
 252  * Configure per-cpu ID GDT
 253  */
 254 static void
 255 init_cpu_id_gdt(struct cpu *cp)
 256 {
 257         /* Write cpu_id into limit field of GDT for usermode retrieval */
 258 #if defined(__amd64)
 259         set_usegd(&cp->cpu_gdt[GDT_CPUID], SDP_SHORT, NULL, cp->cpu_id,
 260             SDT_MEMRODA, SEL_UPL, SDP_BYTES, SDP_OP32);
 261 #elif defined(__i386)
 262         set_usegd(&cp->cpu_gdt[GDT_CPUID], NULL, cp->cpu_id, SDT_MEMRODA,
 263             SEL_UPL, SDP_BYTES, SDP_OP32);
 264 #endif
 265 }


 401         setup_vaddr_for_ppcopy(cp);
 402 
 403         /*
 404          * Allocate page for new GDT and initialize from current GDT.
 405          */
 406 #if !defined(__lint)
 407         ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE);
 408 #endif
 409         cp->cpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP);
 410         bcopy(CPU->cpu_gdt, cp->cpu_gdt, (sizeof (*cp->cpu_gdt) * NGDT));
 411 
 412 #if defined(__i386)
 413         /*
 414          * setup kernel %gs.
 415          */
 416         set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
 417             SEL_KPL, 0, 1);
 418 #endif
 419 
 420         /*
 421          * If we have more than one node, each cpu gets a copy of IDT
 422          * local to its node. If this is a Pentium box, we use cpu 0's
 423          * IDT. cpu 0's IDT has been made read-only to workaround the
 424          * cmpxchgl register bug
 425          */
 426         if (system_hardware.hd_nodes && x86_type != X86_TYPE_P5) {






 427 #if !defined(__lint)
 428                 ASSERT((sizeof (*CPU->cpu_idt) * NIDT) <= PAGESIZE);
 429 #endif
 430                 cp->cpu_idt = kmem_zalloc(PAGESIZE, KM_SLEEP);
 431                 bcopy(CPU->cpu_idt, cp->cpu_idt, PAGESIZE);
 432         } else {
 433                 cp->cpu_idt = CPU->cpu_idt;
 434         }
 435 
 436         /*
 437          * alloc space for cpuid info
 438          */
 439         cpuid_alloc_space(cp);
 440 #if !defined(__xpv)
 441         if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
 442             idle_cpu_prefer_mwait) {
 443                 cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp);
 444                 cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
 445         } else
 446 #endif
 447                 cp->cpu_m.mcpu_idle_cpu = cpu_idle;
 448 
 449         init_cpu_info(cp);
 450 
 451 #if !defined(__xpv)
 452         init_cpu_id_gdt(cp);
 453 #endif
 454 


 552                 kmem_free(cp->cpu_idstr, CPU_IDSTRLEN);
 553                 cp->cpu_idstr = NULL;
 554         }
 555         if (cp->cpu_brandstr) {
 556                 kmem_free(cp->cpu_brandstr, CPU_IDSTRLEN);
 557                 cp->cpu_brandstr = NULL;
 558         }
 559 
 560 #if !defined(__xpv)
 561         if (cp->cpu_m.mcpu_mwait != NULL) {
 562                 cpuid_mwait_free(cp);
 563                 cp->cpu_m.mcpu_mwait = NULL;
 564         }
 565 #endif
 566         cpuid_free_space(cp);
 567 
 568         if (cp->cpu_idt != CPU->cpu_idt)
 569                 kmem_free(cp->cpu_idt, PAGESIZE);
 570         cp->cpu_idt = NULL;
 571 




 572         kmem_free(cp->cpu_gdt, PAGESIZE);
 573         cp->cpu_gdt = NULL;
 574 
 575         if (cp->cpu_supp_freqs != NULL) {
 576                 size_t len = strlen(cp->cpu_supp_freqs) + 1;
 577                 kmem_free(cp->cpu_supp_freqs, len);
 578                 cp->cpu_supp_freqs = NULL;
 579         }
 580 
 581         teardown_vaddr_for_ppcopy(cp);
 582 
 583         kcpc_hw_fini(cp);
 584 
 585         cp->cpu_dispthread = NULL;
 586         cp->cpu_thread = NULL;       /* discarded by cpu_destroy_bound_threads() */
 587 
 588         cpu_vm_data_destroy(cp);
 589 
 590         xc_fini_cpu(cp);
 591         disp_cpu_fini(cp);


1766          * We could be more sophisticated here, and just mark the CPU
1767          * as "faulted" but at this point we'll opt for the easier
1768          * answer of dying horribly.  Provided the boot cpu is ok,
1769          * the system can be recovered by booting with use_mp set to zero.
1770          */
1771         if (workaround_errata(cp) != 0)
1772                 panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
1773 
1774         /*
1775          * We can touch cpu_flags here without acquiring the cpu_lock here
1776          * because the cpu_lock is held by the control CPU which is running
1777          * mp_start_cpu_common().
1778          * Need to clear CPU_QUIESCED flag before calling any function which
1779          * may cause thread context switching, such as kmem_alloc() etc.
1780          * The idle thread checks for CPU_QUIESCED flag and loops for ever if
1781          * it's set. So the startup thread may have no chance to switch back
1782          * again if it's switched away with CPU_QUIESCED set.
1783          */
1784         cp->cpu_flags &= ~(CPU_POWEROFF | CPU_QUIESCED);
1785 


1786         /*
1787          * Setup this processor for XSAVE.
1788          */
1789         if (fp_save_mech == FP_XSAVE) {
1790                 xsave_setup_msr(cp);
1791         }
1792 
1793         cpuid_pass2(cp);
1794         cpuid_pass3(cp);
1795         cpuid_pass4(cp, NULL);
1796 
1797         /*
1798          * Correct cpu_idstr and cpu_brandstr on target CPU after
1799          * cpuid_pass1() is done.
1800          */
1801         (void) cpuid_getidstr(cp, cp->cpu_idstr, CPU_IDSTRLEN);
1802         (void) cpuid_getbrandstr(cp, cp->cpu_brandstr, CPU_IDSTRLEN);
1803 
1804         cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS;
1805 




  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 /*
  26  * Copyright (c) 2010, Intel Corporation.
  27  * All rights reserved.
  28  */
  29 /*
  30  * Copyright 2018 Joyent, Inc.
  31  * Copyright 2013 Nexenta Systems, Inc.  All rights reserved.
  32  */
  33 
  34 #include <sys/types.h>
  35 #include <sys/thread.h>
  36 #include <sys/cpuvar.h>
  37 #include <sys/cpu.h>
  38 #include <sys/t_lock.h>
  39 #include <sys/param.h>
  40 #include <sys/proc.h>
  41 #include <sys/disp.h>
  42 #include <sys/class.h>
  43 #include <sys/cmn_err.h>
  44 #include <sys/debug.h>
  45 #include <sys/note.h>
  46 #include <sys/asm_linkage.h>
  47 #include <sys/x_call.h>
  48 #include <sys/systm.h>
  49 #include <sys/var.h>
  50 #include <sys/vtrace.h>


  63 #include <sys/cpc_impl.h>
  64 #include <sys/pg.h>
  65 #include <sys/cmt.h>
  66 #include <sys/dtrace.h>
  67 #include <sys/archsystm.h>
  68 #include <sys/fp.h>
  69 #include <sys/reboot.h>
  70 #include <sys/kdi_machimpl.h>
  71 #include <vm/hat_i86.h>
  72 #include <vm/vm_dep.h>
  73 #include <sys/memnode.h>
  74 #include <sys/pci_cfgspace.h>
  75 #include <sys/mach_mmu.h>
  76 #include <sys/sysmacros.h>
  77 #if defined(__xpv)
  78 #include <sys/hypervisor.h>
  79 #endif
  80 #include <sys/cpu_module.h>
  81 #include <sys/ontrap.h>
  82 
  83 struct cpu      cpus[1] __aligned(MMU_PAGESIZE);
  84 struct cpu      *cpu[NCPU] = {&cpus[0]};
  85 struct cpu      *cpu_free_list;
  86 cpu_core_t      cpu_core[NCPU];
  87 
  88 #define cpu_next_free   cpu_prev
  89 
  90 /*
  91  * Useful for disabling MP bring-up on a MP capable system.
  92  */
  93 int use_mp = 1;
  94 
  95 /*
  96  * to be set by a PSM to indicate what cpus
  97  * are sitting around on the system.
  98  */
  99 cpuset_t mp_cpus;
 100 
 101 /*
 102  * This variable is used by the hat layer to decide whether or not
 103  * critical sections are needed to prevent race conditions.  For sun4m,
 104  * this variable is set once enough MP initialization has been done in
 105  * order to allow cross calls.
 106  */


 151         /*
 152          * If called for the BSP, cp is equal to current CPU.
 153          * For non-BSPs, cpuid info of cp is not ready yet, so use cpuid info
 154          * of current CPU as default values for cpu_idstr and cpu_brandstr.
 155          * They will be corrected in mp_startup_common() after cpuid_pass1()
 156          * has been invoked on target CPU.
 157          */
 158         (void) cpuid_getidstr(CPU, cp->cpu_idstr, CPU_IDSTRLEN);
 159         (void) cpuid_getbrandstr(CPU, cp->cpu_brandstr, CPU_IDSTRLEN);
 160 }
 161 
 162 /*
 163  * Configure syscall support on this CPU.
 164  */
 165 /*ARGSUSED*/
 166 void
 167 init_cpu_syscall(struct cpu *cp)
 168 {
 169         kpreempt_disable();
 170 

 171         if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
 172             is_x86_feature(x86_featureset, X86FSET_ASYSC)) {
 173                 uint64_t flags;
 174 
 175 #if !defined(__xpv)
 176                 /*
 177                  * The syscall instruction imposes a certain ordering on
 178                  * segment selectors, so we double-check that ordering
 179                  * here.
 180                  */
 181                 CTASSERT(KDS_SEL == KCS_SEL + 8);
 182                 CTASSERT(UDS_SEL == U32CS_SEL + 8);
 183                 CTASSERT(UCS_SEL == U32CS_SEL + 16);
 184 #endif
 185 
 186                 /*
 187                  * Turn syscall/sysret extensions on.
 188                  */
 189                 cpu_asysc_enable();
 190 
 191                 /*
 192                  * Program the magic registers ..
 193                  */
 194                 wrmsr(MSR_AMD_STAR,
 195                     ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32);
 196                 if (kpti_enable == 1) {
 197                         wrmsr(MSR_AMD_LSTAR,
 198                             (uint64_t)(uintptr_t)tr_sys_syscall);
 199                         wrmsr(MSR_AMD_CSTAR,
 200                             (uint64_t)(uintptr_t)tr_sys_syscall32);
 201                 } else {
 202                         wrmsr(MSR_AMD_LSTAR,
 203                             (uint64_t)(uintptr_t)sys_syscall);
 204                         wrmsr(MSR_AMD_CSTAR,
 205                             (uint64_t)(uintptr_t)sys_syscall32);
 206                 }
 207 
 208                 /*
 209                  * This list of flags is masked off the incoming
 210                  * %rfl when we enter the kernel.
 211                  */
 212                 flags = PS_IE | PS_T;
 213                 if (is_x86_feature(x86_featureset, X86FSET_SMAP) == B_TRUE)
 214                         flags |= PS_ACHK;
 215                 wrmsr(MSR_AMD_SFMASK, flags);
 216         }

 217 
 218         /*



 219          * On 64-bit kernels on Nocona machines, the 32-bit syscall
 220          * variant isn't available to 32-bit applications, but sysenter is.
 221          */
 222         if (is_x86_feature(x86_featureset, X86FSET_MSR) &&
 223             is_x86_feature(x86_featureset, X86FSET_SEP)) {
 224 
 225 #if !defined(__xpv)
 226                 /*
 227                  * The sysenter instruction imposes a certain ordering on
 228                  * segment selectors, so we double-check that ordering
 229                  * here. See "sysenter" in Intel document 245471-012, "IA-32
 230                  * Intel Architecture Software Developer's Manual Volume 2:
 231                  * Instruction Set Reference"
 232                  */
 233                 CTASSERT(KDS_SEL == KCS_SEL + 8);
 234 
 235                 CTASSERT(U32CS_SEL == ((KCS_SEL + 16) | 3));
 236                 CTASSERT(UDS_SEL == U32CS_SEL + 8);



 237 #endif
 238 
 239                 cpu_sep_enable();
 240 
 241                 /*
 242                  * resume() sets this value to the base of the threads stack
 243                  * via a context handler.
 244                  */
 245                 wrmsr(MSR_INTC_SEP_ESP, 0);
 246 
 247                 if (kpti_enable == 1) {
 248                         wrmsr(MSR_INTC_SEP_EIP,
 249                             (uint64_t)(uintptr_t)tr_sys_sysenter);
 250                 } else {
 251                         wrmsr(MSR_INTC_SEP_EIP,
 252                             (uint64_t)(uintptr_t)sys_sysenter);
 253                 }
 254         }
 255 
 256         kpreempt_enable();
 257 }
 258 
 259 #if !defined(__xpv)
 260 /*
 261  * Configure per-cpu ID GDT
 262  */
 263 static void
 264 init_cpu_id_gdt(struct cpu *cp)
 265 {
 266         /* Write cpu_id into limit field of GDT for usermode retrieval */
 267 #if defined(__amd64)
 268         set_usegd(&cp->cpu_gdt[GDT_CPUID], SDP_SHORT, NULL, cp->cpu_id,
 269             SDT_MEMRODA, SEL_UPL, SDP_BYTES, SDP_OP32);
 270 #elif defined(__i386)
 271         set_usegd(&cp->cpu_gdt[GDT_CPUID], NULL, cp->cpu_id, SDT_MEMRODA,
 272             SEL_UPL, SDP_BYTES, SDP_OP32);
 273 #endif
 274 }


 410         setup_vaddr_for_ppcopy(cp);
 411 
 412         /*
 413          * Allocate page for new GDT and initialize from current GDT.
 414          */
 415 #if !defined(__lint)
 416         ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE);
 417 #endif
 418         cp->cpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP);
 419         bcopy(CPU->cpu_gdt, cp->cpu_gdt, (sizeof (*cp->cpu_gdt) * NGDT));
 420 
 421 #if defined(__i386)
 422         /*
 423          * setup kernel %gs.
 424          */
 425         set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA,
 426             SEL_KPL, 0, 1);
 427 #endif
 428 
 429         /*
 430          * Allocate pages for the CPU LDT.



 431          */
 432         cp->cpu_m.mcpu_ldt = kmem_zalloc(LDT_CPU_SIZE, KM_SLEEP);
 433         cp->cpu_m.mcpu_ldt_len = 0;
 434 
 435         /*
 436          * Allocate a per-CPU IDT and initialize the new IDT to the currently
 437          * runing CPU.
 438          */
 439 #if !defined(__lint)
 440         ASSERT((sizeof (*CPU->cpu_idt) * NIDT) <= PAGESIZE);
 441 #endif
 442         cp->cpu_idt = kmem_alloc(PAGESIZE, KM_SLEEP);
 443         bcopy(CPU->cpu_idt, cp->cpu_idt, PAGESIZE);



 444 
 445         /*
 446          * alloc space for cpuid info
 447          */
 448         cpuid_alloc_space(cp);
 449 #if !defined(__xpv)
 450         if (is_x86_feature(x86_featureset, X86FSET_MWAIT) &&
 451             idle_cpu_prefer_mwait) {
 452                 cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp);
 453                 cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait;
 454         } else
 455 #endif
 456                 cp->cpu_m.mcpu_idle_cpu = cpu_idle;
 457 
 458         init_cpu_info(cp);
 459 
 460 #if !defined(__xpv)
 461         init_cpu_id_gdt(cp);
 462 #endif
 463 


 561                 kmem_free(cp->cpu_idstr, CPU_IDSTRLEN);
 562                 cp->cpu_idstr = NULL;
 563         }
 564         if (cp->cpu_brandstr) {
 565                 kmem_free(cp->cpu_brandstr, CPU_IDSTRLEN);
 566                 cp->cpu_brandstr = NULL;
 567         }
 568 
 569 #if !defined(__xpv)
 570         if (cp->cpu_m.mcpu_mwait != NULL) {
 571                 cpuid_mwait_free(cp);
 572                 cp->cpu_m.mcpu_mwait = NULL;
 573         }
 574 #endif
 575         cpuid_free_space(cp);
 576 
 577         if (cp->cpu_idt != CPU->cpu_idt)
 578                 kmem_free(cp->cpu_idt, PAGESIZE);
 579         cp->cpu_idt = NULL;
 580 
 581         kmem_free(cp->cpu_m.mcpu_ldt, LDT_CPU_SIZE);
 582         cp->cpu_m.mcpu_ldt = NULL;
 583         cp->cpu_m.mcpu_ldt_len = 0;
 584 
 585         kmem_free(cp->cpu_gdt, PAGESIZE);
 586         cp->cpu_gdt = NULL;
 587 
 588         if (cp->cpu_supp_freqs != NULL) {
 589                 size_t len = strlen(cp->cpu_supp_freqs) + 1;
 590                 kmem_free(cp->cpu_supp_freqs, len);
 591                 cp->cpu_supp_freqs = NULL;
 592         }
 593 
 594         teardown_vaddr_for_ppcopy(cp);
 595 
 596         kcpc_hw_fini(cp);
 597 
 598         cp->cpu_dispthread = NULL;
 599         cp->cpu_thread = NULL;       /* discarded by cpu_destroy_bound_threads() */
 600 
 601         cpu_vm_data_destroy(cp);
 602 
 603         xc_fini_cpu(cp);
 604         disp_cpu_fini(cp);


1779          * We could be more sophisticated here, and just mark the CPU
1780          * as "faulted" but at this point we'll opt for the easier
1781          * answer of dying horribly.  Provided the boot cpu is ok,
1782          * the system can be recovered by booting with use_mp set to zero.
1783          */
1784         if (workaround_errata(cp) != 0)
1785                 panic("critical workaround(s) missing for cpu%d", cp->cpu_id);
1786 
1787         /*
1788          * We can touch cpu_flags here without acquiring the cpu_lock here
1789          * because the cpu_lock is held by the control CPU which is running
1790          * mp_start_cpu_common().
1791          * Need to clear CPU_QUIESCED flag before calling any function which
1792          * may cause thread context switching, such as kmem_alloc() etc.
1793          * The idle thread checks for CPU_QUIESCED flag and loops for ever if
1794          * it's set. So the startup thread may have no chance to switch back
1795          * again if it's switched away with CPU_QUIESCED set.
1796          */
1797         cp->cpu_flags &= ~(CPU_POWEROFF | CPU_QUIESCED);
1798 
1799         enable_pcid();
1800 
1801         /*
1802          * Setup this processor for XSAVE.
1803          */
1804         if (fp_save_mech == FP_XSAVE) {
1805                 xsave_setup_msr(cp);
1806         }
1807 
1808         cpuid_pass2(cp);
1809         cpuid_pass3(cp);
1810         cpuid_pass4(cp, NULL);
1811 
1812         /*
1813          * Correct cpu_idstr and cpu_brandstr on target CPU after
1814          * cpuid_pass1() is done.
1815          */
1816         (void) cpuid_getidstr(cp, cp->cpu_idstr, CPU_IDSTRLEN);
1817         (void) cpuid_getbrandstr(cp, cp->cpu_brandstr, CPU_IDSTRLEN);
1818 
1819         cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS;
1820