1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 /* 26 * Copyright (c) 2010, Intel Corporation. 27 * All rights reserved. 28 */ 29 /* 30 * Copyright 2018 Joyent, Inc. 31 * Copyright 2018 Nexenta Systems, Inc. 32 * Copyright 2018 OmniOS Community Edition (OmniOSce) Association. 33 */ 34 35 #include <sys/types.h> 36 #include <sys/thread.h> 37 #include <sys/cpuvar.h> 38 #include <sys/cpu.h> 39 #include <sys/t_lock.h> 40 #include <sys/param.h> 41 #include <sys/proc.h> 42 #include <sys/disp.h> 43 #include <sys/class.h> 44 #include <sys/cmn_err.h> 45 #include <sys/debug.h> 46 #include <sys/note.h> 47 #include <sys/asm_linkage.h> 48 #include <sys/x_call.h> 49 #include <sys/systm.h> 50 #include <sys/var.h> 51 #include <sys/vtrace.h> 52 #include <vm/hat.h> 53 #include <vm/as.h> 54 #include <vm/seg_kmem.h> 55 #include <vm/seg_kp.h> 56 #include <sys/segments.h> 57 #include <sys/kmem.h> 58 #include <sys/stack.h> 59 #include <sys/smp_impldefs.h> 60 #include <sys/x86_archext.h> 61 #include <sys/machsystm.h> 62 #include <sys/traptrace.h> 63 #include <sys/clock.h> 64 #include <sys/cpc_impl.h> 65 #include <sys/pg.h> 66 #include <sys/cmt.h> 67 #include <sys/dtrace.h> 68 #include <sys/archsystm.h> 69 #include <sys/fp.h> 70 #include <sys/reboot.h> 71 #include <sys/kdi_machimpl.h> 72 #include <vm/hat_i86.h> 73 #include <vm/vm_dep.h> 74 #include <sys/memnode.h> 75 #include <sys/pci_cfgspace.h> 76 #include <sys/mach_mmu.h> 77 #include <sys/sysmacros.h> 78 #if defined(__xpv) 79 #include <sys/hypervisor.h> 80 #endif 81 #include <sys/cpu_module.h> 82 #include <sys/ontrap.h> 83 84 struct cpu cpus[1] __aligned(MMU_PAGESIZE); 85 struct cpu *cpu[NCPU] = {&cpus[0]}; 86 struct cpu *cpu_free_list; 87 cpu_core_t cpu_core[NCPU]; 88 89 #define cpu_next_free cpu_prev 90 91 /* 92 * Useful for disabling MP bring-up on a MP capable system. 93 */ 94 int use_mp = 1; 95 96 /* 97 * to be set by a PSM to indicate what cpus 98 * are sitting around on the system. 99 */ 100 cpuset_t mp_cpus; 101 102 /* 103 * This variable is used by the hat layer to decide whether or not 104 * critical sections are needed to prevent race conditions. For sun4m, 105 * this variable is set once enough MP initialization has been done in 106 * order to allow cross calls. 107 */ 108 int flushes_require_xcalls; 109 110 cpuset_t cpu_ready_set; /* initialized in startup() */ 111 112 static void mp_startup_boot(void); 113 static void mp_startup_hotplug(void); 114 115 static void cpu_sep_enable(void); 116 static void cpu_sep_disable(void); 117 static void cpu_asysc_enable(void); 118 static void cpu_asysc_disable(void); 119 120 /* 121 * Init CPU info - get CPU type info for processor_info system call. 122 */ 123 void 124 init_cpu_info(struct cpu *cp) 125 { 126 processor_info_t *pi = &cp->cpu_type_info; 127 128 /* 129 * Get clock-frequency property for the CPU. 130 */ 131 pi->pi_clock = cpu_freq; 132 133 /* 134 * Current frequency in Hz. 135 */ 136 cp->cpu_curr_clock = cpu_freq_hz; 137 138 /* 139 * Supported frequencies. 140 */ 141 if (cp->cpu_supp_freqs == NULL) { 142 cpu_set_supp_freqs(cp, NULL); 143 } 144 145 (void) strcpy(pi->pi_processor_type, "i386"); 146 if (fpu_exists) 147 (void) strcpy(pi->pi_fputypes, "i387 compatible"); 148 149 cp->cpu_idstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP); 150 cp->cpu_brandstr = kmem_zalloc(CPU_IDSTRLEN, KM_SLEEP); 151 152 /* 153 * If called for the BSP, cp is equal to current CPU. 154 * For non-BSPs, cpuid info of cp is not ready yet, so use cpuid info 155 * of current CPU as default values for cpu_idstr and cpu_brandstr. 156 * They will be corrected in mp_startup_common() after cpuid_pass1() 157 * has been invoked on target CPU. 158 */ 159 (void) cpuid_getidstr(CPU, cp->cpu_idstr, CPU_IDSTRLEN); 160 (void) cpuid_getbrandstr(CPU, cp->cpu_brandstr, CPU_IDSTRLEN); 161 } 162 163 /* 164 * Configure syscall support on this CPU. 165 */ 166 /*ARGSUSED*/ 167 void 168 init_cpu_syscall(struct cpu *cp) 169 { 170 kpreempt_disable(); 171 172 if (is_x86_feature(x86_featureset, X86FSET_MSR) && 173 is_x86_feature(x86_featureset, X86FSET_ASYSC)) { 174 uint64_t flags; 175 176 #if !defined(__xpv) 177 /* 178 * The syscall instruction imposes a certain ordering on 179 * segment selectors, so we double-check that ordering 180 * here. 181 */ 182 CTASSERT(KDS_SEL == KCS_SEL + 8); 183 CTASSERT(UDS_SEL == U32CS_SEL + 8); 184 CTASSERT(UCS_SEL == U32CS_SEL + 16); 185 #endif 186 187 /* 188 * Turn syscall/sysret extensions on. 189 */ 190 cpu_asysc_enable(); 191 192 /* 193 * Program the magic registers .. 194 */ 195 wrmsr(MSR_AMD_STAR, 196 ((uint64_t)(U32CS_SEL << 16 | KCS_SEL)) << 32); 197 if (kpti_enable == 1) { 198 wrmsr(MSR_AMD_LSTAR, 199 (uint64_t)(uintptr_t)tr_sys_syscall); 200 wrmsr(MSR_AMD_CSTAR, 201 (uint64_t)(uintptr_t)tr_sys_syscall32); 202 } else { 203 wrmsr(MSR_AMD_LSTAR, 204 (uint64_t)(uintptr_t)sys_syscall); 205 wrmsr(MSR_AMD_CSTAR, 206 (uint64_t)(uintptr_t)sys_syscall32); 207 } 208 209 /* 210 * This list of flags is masked off the incoming 211 * %rfl when we enter the kernel. 212 */ 213 flags = PS_IE | PS_T; 214 if (is_x86_feature(x86_featureset, X86FSET_SMAP) == B_TRUE) 215 flags |= PS_ACHK; 216 wrmsr(MSR_AMD_SFMASK, flags); 217 } 218 219 /* 220 * On 64-bit kernels on Nocona machines, the 32-bit syscall 221 * variant isn't available to 32-bit applications, but sysenter is. 222 */ 223 if (is_x86_feature(x86_featureset, X86FSET_MSR) && 224 is_x86_feature(x86_featureset, X86FSET_SEP)) { 225 226 #if !defined(__xpv) 227 /* 228 * The sysenter instruction imposes a certain ordering on 229 * segment selectors, so we double-check that ordering 230 * here. See "sysenter" in Intel document 245471-012, "IA-32 231 * Intel Architecture Software Developer's Manual Volume 2: 232 * Instruction Set Reference" 233 */ 234 CTASSERT(KDS_SEL == KCS_SEL + 8); 235 236 CTASSERT(U32CS_SEL == ((KCS_SEL + 16) | 3)); 237 CTASSERT(UDS_SEL == U32CS_SEL + 8); 238 #endif 239 240 cpu_sep_enable(); 241 242 /* 243 * resume() sets this value to the base of the threads stack 244 * via a context handler. 245 */ 246 wrmsr(MSR_INTC_SEP_ESP, 0); 247 248 if (kpti_enable == 1) { 249 wrmsr(MSR_INTC_SEP_EIP, 250 (uint64_t)(uintptr_t)tr_sys_sysenter); 251 } else { 252 wrmsr(MSR_INTC_SEP_EIP, 253 (uint64_t)(uintptr_t)sys_sysenter); 254 } 255 } 256 257 kpreempt_enable(); 258 } 259 260 #if !defined(__xpv) 261 /* 262 * Configure per-cpu ID GDT 263 */ 264 static void 265 init_cpu_id_gdt(struct cpu *cp) 266 { 267 /* Write cpu_id into limit field of GDT for usermode retrieval */ 268 #if defined(__amd64) 269 set_usegd(&cp->cpu_gdt[GDT_CPUID], SDP_SHORT, NULL, cp->cpu_id, 270 SDT_MEMRODA, SEL_UPL, SDP_BYTES, SDP_OP32); 271 #elif defined(__i386) 272 set_usegd(&cp->cpu_gdt[GDT_CPUID], NULL, cp->cpu_id, SDT_MEMRODA, 273 SEL_UPL, SDP_BYTES, SDP_OP32); 274 #endif 275 } 276 #endif /* !defined(__xpv) */ 277 278 /* 279 * Multiprocessor initialization. 280 * 281 * Allocate and initialize the cpu structure, TRAPTRACE buffer, and the 282 * startup and idle threads for the specified CPU. 283 * Parameter boot is true for boot time operations and is false for CPU 284 * DR operations. 285 */ 286 static struct cpu * 287 mp_cpu_configure_common(int cpun, boolean_t boot) 288 { 289 struct cpu *cp; 290 kthread_id_t tp; 291 caddr_t sp; 292 proc_t *procp; 293 #if !defined(__xpv) 294 extern int idle_cpu_prefer_mwait; 295 extern void cpu_idle_mwait(); 296 #endif 297 extern void idle(); 298 extern void cpu_idle(); 299 300 #ifdef TRAPTRACE 301 trap_trace_ctl_t *ttc = &trap_trace_ctl[cpun]; 302 #endif 303 304 ASSERT(MUTEX_HELD(&cpu_lock)); 305 ASSERT(cpun < NCPU && cpu[cpun] == NULL); 306 307 if (cpu_free_list == NULL) { 308 cp = kmem_zalloc(sizeof (*cp), KM_SLEEP); 309 } else { 310 cp = cpu_free_list; 311 cpu_free_list = cp->cpu_next_free; 312 } 313 314 cp->cpu_m.mcpu_istamp = cpun << 16; 315 316 /* Create per CPU specific threads in the process p0. */ 317 procp = &p0; 318 319 /* 320 * Initialize the dispatcher first. 321 */ 322 disp_cpu_init(cp); 323 324 cpu_vm_data_init(cp); 325 326 /* 327 * Allocate and initialize the startup thread for this CPU. 328 * Interrupt and process switch stacks get allocated later 329 * when the CPU starts running. 330 */ 331 tp = thread_create(NULL, 0, NULL, NULL, 0, procp, 332 TS_STOPPED, maxclsyspri); 333 334 /* 335 * Set state to TS_ONPROC since this thread will start running 336 * as soon as the CPU comes online. 337 * 338 * All the other fields of the thread structure are setup by 339 * thread_create(). 340 */ 341 THREAD_ONPROC(tp, cp); 342 tp->t_preempt = 1; 343 tp->t_bound_cpu = cp; 344 tp->t_affinitycnt = 1; 345 tp->t_cpu = cp; 346 tp->t_disp_queue = cp->cpu_disp; 347 348 /* 349 * Setup thread to start in mp_startup_common. 350 */ 351 sp = tp->t_stk; 352 tp->t_sp = (uintptr_t)(sp - MINFRAME); 353 #if defined(__amd64) 354 tp->t_sp -= STACK_ENTRY_ALIGN; /* fake a call */ 355 #endif 356 /* 357 * Setup thread start entry point for boot or hotplug. 358 */ 359 if (boot) { 360 tp->t_pc = (uintptr_t)mp_startup_boot; 361 } else { 362 tp->t_pc = (uintptr_t)mp_startup_hotplug; 363 } 364 365 cp->cpu_id = cpun; 366 cp->cpu_self = cp; 367 cp->cpu_thread = tp; 368 cp->cpu_lwp = NULL; 369 cp->cpu_dispthread = tp; 370 cp->cpu_dispatch_pri = DISP_PRIO(tp); 371 372 /* 373 * cpu_base_spl must be set explicitly here to prevent any blocking 374 * operations in mp_startup_common from causing the spl of the cpu 375 * to drop to 0 (allowing device interrupts before we're ready) in 376 * resume(). 377 * cpu_base_spl MUST remain at LOCK_LEVEL until the cpu is CPU_READY. 378 * As an extra bit of security on DEBUG kernels, this is enforced with 379 * an assertion in mp_startup_common() -- before cpu_base_spl is set 380 * to its proper value. 381 */ 382 cp->cpu_base_spl = ipltospl(LOCK_LEVEL); 383 384 /* 385 * Now, initialize per-CPU idle thread for this CPU. 386 */ 387 tp = thread_create(NULL, PAGESIZE, idle, NULL, 0, procp, TS_ONPROC, -1); 388 389 cp->cpu_idle_thread = tp; 390 391 tp->t_preempt = 1; 392 tp->t_bound_cpu = cp; 393 tp->t_affinitycnt = 1; 394 tp->t_cpu = cp; 395 tp->t_disp_queue = cp->cpu_disp; 396 397 /* 398 * Bootstrap the CPU's PG data 399 */ 400 pg_cpu_bootstrap(cp); 401 402 /* 403 * Perform CPC initialization on the new CPU. 404 */ 405 kcpc_hw_init(cp); 406 407 /* 408 * Allocate virtual addresses for cpu_caddr1 and cpu_caddr2 409 * for each CPU. 410 */ 411 setup_vaddr_for_ppcopy(cp); 412 413 /* 414 * Allocate page for new GDT and initialize from current GDT. 415 */ 416 #if !defined(__lint) 417 ASSERT((sizeof (*cp->cpu_gdt) * NGDT) <= PAGESIZE); 418 #endif 419 cp->cpu_gdt = kmem_zalloc(PAGESIZE, KM_SLEEP); 420 bcopy(CPU->cpu_gdt, cp->cpu_gdt, (sizeof (*cp->cpu_gdt) * NGDT)); 421 422 #if defined(__i386) 423 /* 424 * setup kernel %gs. 425 */ 426 set_usegd(&cp->cpu_gdt[GDT_GS], cp, sizeof (struct cpu) -1, SDT_MEMRWA, 427 SEL_KPL, 0, 1); 428 #endif 429 430 /* 431 * Allocate pages for the CPU LDT. 432 */ 433 cp->cpu_m.mcpu_ldt = kmem_zalloc(LDT_CPU_SIZE, KM_SLEEP); 434 cp->cpu_m.mcpu_ldt_len = 0; 435 436 /* 437 * Allocate a per-CPU IDT and initialize the new IDT to the currently 438 * runing CPU. 439 */ 440 #if !defined(__lint) 441 ASSERT((sizeof (*CPU->cpu_idt) * NIDT) <= PAGESIZE); 442 #endif 443 cp->cpu_idt = kmem_alloc(PAGESIZE, KM_SLEEP); 444 bcopy(CPU->cpu_idt, cp->cpu_idt, PAGESIZE); 445 446 /* 447 * alloc space for cpuid info 448 */ 449 cpuid_alloc_space(cp); 450 #if !defined(__xpv) 451 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) && 452 idle_cpu_prefer_mwait) { 453 cp->cpu_m.mcpu_mwait = cpuid_mwait_alloc(cp); 454 cp->cpu_m.mcpu_idle_cpu = cpu_idle_mwait; 455 } else 456 #endif 457 cp->cpu_m.mcpu_idle_cpu = cpu_idle; 458 459 init_cpu_info(cp); 460 461 #if !defined(__xpv) 462 init_cpu_id_gdt(cp); 463 #endif 464 465 /* 466 * alloc space for ucode_info 467 */ 468 ucode_alloc_space(cp); 469 xc_init_cpu(cp); 470 hat_cpu_online(cp); 471 472 #ifdef TRAPTRACE 473 /* 474 * If this is a TRAPTRACE kernel, allocate TRAPTRACE buffers 475 */ 476 ttc->ttc_first = (uintptr_t)kmem_zalloc(trap_trace_bufsize, KM_SLEEP); 477 ttc->ttc_next = ttc->ttc_first; 478 ttc->ttc_limit = ttc->ttc_first + trap_trace_bufsize; 479 #endif 480 481 /* 482 * Record that we have another CPU. 483 */ 484 /* 485 * Initialize the interrupt threads for this CPU 486 */ 487 cpu_intr_alloc(cp, NINTR_THREADS); 488 489 cp->cpu_flags = CPU_OFFLINE | CPU_QUIESCED | CPU_POWEROFF; 490 cpu_set_state(cp); 491 492 /* 493 * Add CPU to list of available CPUs. It'll be on the active list 494 * after mp_startup_common(). 495 */ 496 cpu_add_unit(cp); 497 498 return (cp); 499 } 500 501 /* 502 * Undo what was done in mp_cpu_configure_common 503 */ 504 static void 505 mp_cpu_unconfigure_common(struct cpu *cp, int error) 506 { 507 ASSERT(MUTEX_HELD(&cpu_lock)); 508 509 /* 510 * Remove the CPU from the list of available CPUs. 511 */ 512 cpu_del_unit(cp->cpu_id); 513 514 if (error == ETIMEDOUT) { 515 /* 516 * The cpu was started, but never *seemed* to run any 517 * code in the kernel; it's probably off spinning in its 518 * own private world, though with potential references to 519 * our kmem-allocated IDTs and GDTs (for example). 520 * 521 * Worse still, it may actually wake up some time later, 522 * so rather than guess what it might or might not do, we 523 * leave the fundamental data structures intact. 524 */ 525 cp->cpu_flags = 0; 526 return; 527 } 528 529 /* 530 * At this point, the only threads bound to this CPU should 531 * special per-cpu threads: it's idle thread, it's pause threads, 532 * and it's interrupt threads. Clean these up. 533 */ 534 cpu_destroy_bound_threads(cp); 535 cp->cpu_idle_thread = NULL; 536 537 /* 538 * Free the interrupt stack. 539 */ 540 segkp_release(segkp, 541 cp->cpu_intr_stack - (INTR_STACK_SIZE - SA(MINFRAME))); 542 cp->cpu_intr_stack = NULL; 543 544 #ifdef TRAPTRACE 545 /* 546 * Discard the trap trace buffer 547 */ 548 { 549 trap_trace_ctl_t *ttc = &trap_trace_ctl[cp->cpu_id]; 550 551 kmem_free((void *)ttc->ttc_first, trap_trace_bufsize); 552 ttc->ttc_first = NULL; 553 } 554 #endif 555 556 hat_cpu_offline(cp); 557 558 ucode_free_space(cp); 559 560 /* Free CPU ID string and brand string. */ 561 if (cp->cpu_idstr) { 562 kmem_free(cp->cpu_idstr, CPU_IDSTRLEN); 563 cp->cpu_idstr = NULL; 564 } 565 if (cp->cpu_brandstr) { 566 kmem_free(cp->cpu_brandstr, CPU_IDSTRLEN); 567 cp->cpu_brandstr = NULL; 568 } 569 570 #if !defined(__xpv) 571 if (cp->cpu_m.mcpu_mwait != NULL) { 572 cpuid_mwait_free(cp); 573 cp->cpu_m.mcpu_mwait = NULL; 574 } 575 #endif 576 cpuid_free_space(cp); 577 578 if (cp->cpu_idt != CPU->cpu_idt) 579 kmem_free(cp->cpu_idt, PAGESIZE); 580 cp->cpu_idt = NULL; 581 582 kmem_free(cp->cpu_m.mcpu_ldt, LDT_CPU_SIZE); 583 cp->cpu_m.mcpu_ldt = NULL; 584 cp->cpu_m.mcpu_ldt_len = 0; 585 586 kmem_free(cp->cpu_gdt, PAGESIZE); 587 cp->cpu_gdt = NULL; 588 589 if (cp->cpu_supp_freqs != NULL) { 590 size_t len = strlen(cp->cpu_supp_freqs) + 1; 591 kmem_free(cp->cpu_supp_freqs, len); 592 cp->cpu_supp_freqs = NULL; 593 } 594 595 teardown_vaddr_for_ppcopy(cp); 596 597 kcpc_hw_fini(cp); 598 599 cp->cpu_dispthread = NULL; 600 cp->cpu_thread = NULL; /* discarded by cpu_destroy_bound_threads() */ 601 602 cpu_vm_data_destroy(cp); 603 604 xc_fini_cpu(cp); 605 disp_cpu_fini(cp); 606 607 ASSERT(cp != CPU0); 608 bzero(cp, sizeof (*cp)); 609 cp->cpu_next_free = cpu_free_list; 610 cpu_free_list = cp; 611 } 612 613 /* 614 * Apply workarounds for known errata, and warn about those that are absent. 615 * 616 * System vendors occasionally create configurations which contain different 617 * revisions of the CPUs that are almost but not exactly the same. At the 618 * time of writing, this meant that their clock rates were the same, their 619 * feature sets were the same, but the required workaround were -not- 620 * necessarily the same. So, this routine is invoked on -every- CPU soon 621 * after starting to make sure that the resulting system contains the most 622 * pessimal set of workarounds needed to cope with *any* of the CPUs in the 623 * system. 624 * 625 * workaround_errata is invoked early in mlsetup() for CPU 0, and in 626 * mp_startup_common() for all slave CPUs. Slaves process workaround_errata 627 * prior to acknowledging their readiness to the master, so this routine will 628 * never be executed by multiple CPUs in parallel, thus making updates to 629 * global data safe. 630 * 631 * These workarounds are based on Rev 3.57 of the Revision Guide for 632 * AMD Athlon(tm) 64 and AMD Opteron(tm) Processors, August 2005. 633 */ 634 635 #if defined(OPTERON_ERRATUM_88) 636 int opteron_erratum_88; /* if non-zero -> at least one cpu has it */ 637 #endif 638 639 #if defined(OPTERON_ERRATUM_91) 640 int opteron_erratum_91; /* if non-zero -> at least one cpu has it */ 641 #endif 642 643 #if defined(OPTERON_ERRATUM_93) 644 int opteron_erratum_93; /* if non-zero -> at least one cpu has it */ 645 #endif 646 647 #if defined(OPTERON_ERRATUM_95) 648 int opteron_erratum_95; /* if non-zero -> at least one cpu has it */ 649 #endif 650 651 #if defined(OPTERON_ERRATUM_100) 652 int opteron_erratum_100; /* if non-zero -> at least one cpu has it */ 653 #endif 654 655 #if defined(OPTERON_ERRATUM_108) 656 int opteron_erratum_108; /* if non-zero -> at least one cpu has it */ 657 #endif 658 659 #if defined(OPTERON_ERRATUM_109) 660 int opteron_erratum_109; /* if non-zero -> at least one cpu has it */ 661 #endif 662 663 #if defined(OPTERON_ERRATUM_121) 664 int opteron_erratum_121; /* if non-zero -> at least one cpu has it */ 665 #endif 666 667 #if defined(OPTERON_ERRATUM_122) 668 int opteron_erratum_122; /* if non-zero -> at least one cpu has it */ 669 #endif 670 671 #if defined(OPTERON_ERRATUM_123) 672 int opteron_erratum_123; /* if non-zero -> at least one cpu has it */ 673 #endif 674 675 #if defined(OPTERON_ERRATUM_131) 676 int opteron_erratum_131; /* if non-zero -> at least one cpu has it */ 677 #endif 678 679 #if defined(OPTERON_WORKAROUND_6336786) 680 int opteron_workaround_6336786; /* non-zero -> WA relevant and applied */ 681 int opteron_workaround_6336786_UP = 0; /* Not needed for UP */ 682 #endif 683 684 #if defined(OPTERON_WORKAROUND_6323525) 685 int opteron_workaround_6323525; /* if non-zero -> at least one cpu has it */ 686 #endif 687 688 #if defined(OPTERON_ERRATUM_298) 689 int opteron_erratum_298; 690 #endif 691 692 #if defined(OPTERON_ERRATUM_721) 693 int opteron_erratum_721; 694 #endif 695 696 static void 697 workaround_warning(cpu_t *cp, uint_t erratum) 698 { 699 cmn_err(CE_WARN, "cpu%d: no workaround for erratum %u", 700 cp->cpu_id, erratum); 701 } 702 703 static void 704 workaround_applied(uint_t erratum) 705 { 706 if (erratum > 1000000) 707 cmn_err(CE_CONT, "?workaround applied for cpu issue #%d\n", 708 erratum); 709 else 710 cmn_err(CE_CONT, "?workaround applied for cpu erratum #%d\n", 711 erratum); 712 } 713 714 static void 715 msr_warning(cpu_t *cp, const char *rw, uint_t msr, int error) 716 { 717 cmn_err(CE_WARN, "cpu%d: couldn't %smsr 0x%x, error %d", 718 cp->cpu_id, rw, msr, error); 719 } 720 721 /* 722 * Determine the number of nodes in a system. 723 * 724 * This routine uses a PCI config space based mechanism 725 * for retrieving the number of nodes in the system. 726 * 727 * Current processor families that support this mechanism are 728 * 0xf, 0x10, 0x11, and 0x15. 729 */ 730 static uint_t 731 opteron_get_nnodes(void) 732 { 733 static uint_t nnodes = 0; 734 735 /* 736 * Obtain the number of nodes in the system from 737 * bits [6:4] of the Node ID register on node 0. 738 * 739 * The actual node count is NodeID[6:4] + 1 740 * 741 * The Node ID register is accessed via function 0, 742 * offset 0x60. Node 0 is device 24. 743 */ 744 if (nnodes == 0) 745 nnodes = ((pci_getl_func(0, 24, 0, 0x60) & 0x70) >> 4) + 1; 746 747 return (nnodes); 748 } 749 750 uint_t 751 do_erratum_298(struct cpu *cpu) 752 { 753 static int osvwrc = -3; 754 extern int osvw_opteron_erratum(cpu_t *, uint_t); 755 756 /* 757 * L2 Eviction May Occur During Processor Operation To Set 758 * Accessed or Dirty Bit. 759 */ 760 if (osvwrc == -3) { 761 osvwrc = osvw_opteron_erratum(cpu, 298); 762 } else { 763 /* osvw return codes should be consistent for all cpus */ 764 ASSERT(osvwrc == osvw_opteron_erratum(cpu, 298)); 765 } 766 767 switch (osvwrc) { 768 case 0: /* erratum is not present: do nothing */ 769 break; 770 case 1: /* erratum is present: BIOS workaround applied */ 771 /* 772 * check if workaround is actually in place and issue warning 773 * if not. 774 */ 775 if (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) || 776 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0)) { 777 #if defined(OPTERON_ERRATUM_298) 778 opteron_erratum_298++; 779 #else 780 workaround_warning(cpu, 298); 781 return (1); 782 #endif 783 } 784 break; 785 case -1: /* cannot determine via osvw: check cpuid */ 786 if ((cpuid_opteron_erratum(cpu, 298) > 0) && 787 (((rdmsr(MSR_AMD_HWCR) & AMD_HWCR_TLBCACHEDIS) == 0) || 788 ((rdmsr(MSR_AMD_BU_CFG) & AMD_BU_CFG_E298) == 0))) { 789 #if defined(OPTERON_ERRATUM_298) 790 opteron_erratum_298++; 791 #else 792 workaround_warning(cpu, 298); 793 return (1); 794 #endif 795 } 796 break; 797 } 798 return (0); 799 } 800 801 uint_t 802 workaround_errata(struct cpu *cpu) 803 { 804 uint_t missing = 0; 805 806 ASSERT(cpu == CPU); 807 808 /*LINTED*/ 809 if (cpuid_opteron_erratum(cpu, 88) > 0) { 810 /* 811 * SWAPGS May Fail To Read Correct GS Base 812 */ 813 #if defined(OPTERON_ERRATUM_88) 814 /* 815 * The workaround is an mfence in the relevant assembler code 816 */ 817 opteron_erratum_88++; 818 #else 819 workaround_warning(cpu, 88); 820 missing++; 821 #endif 822 } 823 824 if (cpuid_opteron_erratum(cpu, 91) > 0) { 825 /* 826 * Software Prefetches May Report A Page Fault 827 */ 828 #if defined(OPTERON_ERRATUM_91) 829 /* 830 * fix is in trap.c 831 */ 832 opteron_erratum_91++; 833 #else 834 workaround_warning(cpu, 91); 835 missing++; 836 #endif 837 } 838 839 if (cpuid_opteron_erratum(cpu, 93) > 0) { 840 /* 841 * RSM Auto-Halt Restart Returns to Incorrect RIP 842 */ 843 #if defined(OPTERON_ERRATUM_93) 844 /* 845 * fix is in trap.c 846 */ 847 opteron_erratum_93++; 848 #else 849 workaround_warning(cpu, 93); 850 missing++; 851 #endif 852 } 853 854 /*LINTED*/ 855 if (cpuid_opteron_erratum(cpu, 95) > 0) { 856 /* 857 * RET Instruction May Return to Incorrect EIP 858 */ 859 #if defined(OPTERON_ERRATUM_95) 860 #if defined(_LP64) 861 /* 862 * Workaround this by ensuring that 32-bit user code and 863 * 64-bit kernel code never occupy the same address 864 * range mod 4G. 865 */ 866 if (_userlimit32 > 0xc0000000ul) 867 *(uintptr_t *)&_userlimit32 = 0xc0000000ul; 868 869 /*LINTED*/ 870 ASSERT((uint32_t)COREHEAP_BASE == 0xc0000000u); 871 opteron_erratum_95++; 872 #endif /* _LP64 */ 873 #else 874 workaround_warning(cpu, 95); 875 missing++; 876 #endif 877 } 878 879 if (cpuid_opteron_erratum(cpu, 100) > 0) { 880 /* 881 * Compatibility Mode Branches Transfer to Illegal Address 882 */ 883 #if defined(OPTERON_ERRATUM_100) 884 /* 885 * fix is in trap.c 886 */ 887 opteron_erratum_100++; 888 #else 889 workaround_warning(cpu, 100); 890 missing++; 891 #endif 892 } 893 894 /*LINTED*/ 895 if (cpuid_opteron_erratum(cpu, 108) > 0) { 896 /* 897 * CPUID Instruction May Return Incorrect Model Number In 898 * Some Processors 899 */ 900 #if defined(OPTERON_ERRATUM_108) 901 /* 902 * (Our cpuid-handling code corrects the model number on 903 * those processors) 904 */ 905 #else 906 workaround_warning(cpu, 108); 907 missing++; 908 #endif 909 } 910 911 /*LINTED*/ 912 if (cpuid_opteron_erratum(cpu, 109) > 0) do { 913 /* 914 * Certain Reverse REP MOVS May Produce Unpredictable Behavior 915 */ 916 #if defined(OPTERON_ERRATUM_109) 917 /* 918 * The "workaround" is to print a warning to upgrade the BIOS 919 */ 920 uint64_t value; 921 const uint_t msr = MSR_AMD_PATCHLEVEL; 922 int err; 923 924 if ((err = checked_rdmsr(msr, &value)) != 0) { 925 msr_warning(cpu, "rd", msr, err); 926 workaround_warning(cpu, 109); 927 missing++; 928 } 929 if (value == 0) 930 opteron_erratum_109++; 931 #else 932 workaround_warning(cpu, 109); 933 missing++; 934 #endif 935 /*CONSTANTCONDITION*/ 936 } while (0); 937 938 /*LINTED*/ 939 if (cpuid_opteron_erratum(cpu, 121) > 0) { 940 /* 941 * Sequential Execution Across Non_Canonical Boundary Caused 942 * Processor Hang 943 */ 944 #if defined(OPTERON_ERRATUM_121) 945 #if defined(_LP64) 946 /* 947 * Erratum 121 is only present in long (64 bit) mode. 948 * Workaround is to include the page immediately before the 949 * va hole to eliminate the possibility of system hangs due to 950 * sequential execution across the va hole boundary. 951 */ 952 if (opteron_erratum_121) 953 opteron_erratum_121++; 954 else { 955 if (hole_start) { 956 hole_start -= PAGESIZE; 957 } else { 958 /* 959 * hole_start not yet initialized by 960 * mmu_init. Initialize hole_start 961 * with value to be subtracted. 962 */ 963 hole_start = PAGESIZE; 964 } 965 opteron_erratum_121++; 966 } 967 #endif /* _LP64 */ 968 #else 969 workaround_warning(cpu, 121); 970 missing++; 971 #endif 972 } 973 974 /*LINTED*/ 975 if (cpuid_opteron_erratum(cpu, 122) > 0) do { 976 /* 977 * TLB Flush Filter May Cause Coherency Problem in 978 * Multiprocessor Systems 979 */ 980 #if defined(OPTERON_ERRATUM_122) 981 uint64_t value; 982 const uint_t msr = MSR_AMD_HWCR; 983 int error; 984 985 /* 986 * Erratum 122 is only present in MP configurations (multi-core 987 * or multi-processor). 988 */ 989 #if defined(__xpv) 990 if (!DOMAIN_IS_INITDOMAIN(xen_info)) 991 break; 992 if (!opteron_erratum_122 && xpv_nr_phys_cpus() == 1) 993 break; 994 #else 995 if (!opteron_erratum_122 && opteron_get_nnodes() == 1 && 996 cpuid_get_ncpu_per_chip(cpu) == 1) 997 break; 998 #endif 999 /* disable TLB Flush Filter */ 1000 1001 if ((error = checked_rdmsr(msr, &value)) != 0) { 1002 msr_warning(cpu, "rd", msr, error); 1003 workaround_warning(cpu, 122); 1004 missing++; 1005 } else { 1006 value |= (uint64_t)AMD_HWCR_FFDIS; 1007 if ((error = checked_wrmsr(msr, value)) != 0) { 1008 msr_warning(cpu, "wr", msr, error); 1009 workaround_warning(cpu, 122); 1010 missing++; 1011 } 1012 } 1013 opteron_erratum_122++; 1014 #else 1015 workaround_warning(cpu, 122); 1016 missing++; 1017 #endif 1018 /*CONSTANTCONDITION*/ 1019 } while (0); 1020 1021 /*LINTED*/ 1022 if (cpuid_opteron_erratum(cpu, 123) > 0) do { 1023 /* 1024 * Bypassed Reads May Cause Data Corruption of System Hang in 1025 * Dual Core Processors 1026 */ 1027 #if defined(OPTERON_ERRATUM_123) 1028 uint64_t value; 1029 const uint_t msr = MSR_AMD_PATCHLEVEL; 1030 int err; 1031 1032 /* 1033 * Erratum 123 applies only to multi-core cpus. 1034 */ 1035 if (cpuid_get_ncpu_per_chip(cpu) < 2) 1036 break; 1037 #if defined(__xpv) 1038 if (!DOMAIN_IS_INITDOMAIN(xen_info)) 1039 break; 1040 #endif 1041 /* 1042 * The "workaround" is to print a warning to upgrade the BIOS 1043 */ 1044 if ((err = checked_rdmsr(msr, &value)) != 0) { 1045 msr_warning(cpu, "rd", msr, err); 1046 workaround_warning(cpu, 123); 1047 missing++; 1048 } 1049 if (value == 0) 1050 opteron_erratum_123++; 1051 #else 1052 workaround_warning(cpu, 123); 1053 missing++; 1054 1055 #endif 1056 /*CONSTANTCONDITION*/ 1057 } while (0); 1058 1059 /*LINTED*/ 1060 if (cpuid_opteron_erratum(cpu, 131) > 0) do { 1061 /* 1062 * Multiprocessor Systems with Four or More Cores May Deadlock 1063 * Waiting for a Probe Response 1064 */ 1065 #if defined(OPTERON_ERRATUM_131) 1066 uint64_t nbcfg; 1067 const uint_t msr = MSR_AMD_NB_CFG; 1068 const uint64_t wabits = 1069 AMD_NB_CFG_SRQ_HEARTBEAT | AMD_NB_CFG_SRQ_SPR; 1070 int error; 1071 1072 /* 1073 * Erratum 131 applies to any system with four or more cores. 1074 */ 1075 if (opteron_erratum_131) 1076 break; 1077 #if defined(__xpv) 1078 if (!DOMAIN_IS_INITDOMAIN(xen_info)) 1079 break; 1080 if (xpv_nr_phys_cpus() < 4) 1081 break; 1082 #else 1083 if (opteron_get_nnodes() * cpuid_get_ncpu_per_chip(cpu) < 4) 1084 break; 1085 #endif 1086 /* 1087 * Print a warning if neither of the workarounds for 1088 * erratum 131 is present. 1089 */ 1090 if ((error = checked_rdmsr(msr, &nbcfg)) != 0) { 1091 msr_warning(cpu, "rd", msr, error); 1092 workaround_warning(cpu, 131); 1093 missing++; 1094 } else if ((nbcfg & wabits) == 0) { 1095 opteron_erratum_131++; 1096 } else { 1097 /* cannot have both workarounds set */ 1098 ASSERT((nbcfg & wabits) != wabits); 1099 } 1100 #else 1101 workaround_warning(cpu, 131); 1102 missing++; 1103 #endif 1104 /*CONSTANTCONDITION*/ 1105 } while (0); 1106 1107 /* 1108 * This isn't really an erratum, but for convenience the 1109 * detection/workaround code lives here and in cpuid_opteron_erratum. 1110 */ 1111 if (cpuid_opteron_erratum(cpu, 6336786) > 0) { 1112 #if defined(OPTERON_WORKAROUND_6336786) 1113 /* 1114 * Disable C1-Clock ramping on multi-core/multi-processor 1115 * K8 platforms to guard against TSC drift. 1116 */ 1117 if (opteron_workaround_6336786) { 1118 opteron_workaround_6336786++; 1119 #if defined(__xpv) 1120 } else if ((DOMAIN_IS_INITDOMAIN(xen_info) && 1121 xpv_nr_phys_cpus() > 1) || 1122 opteron_workaround_6336786_UP) { 1123 /* 1124 * XXPV Hmm. We can't walk the Northbridges on 1125 * the hypervisor; so just complain and drive 1126 * on. This probably needs to be fixed in 1127 * the hypervisor itself. 1128 */ 1129 opteron_workaround_6336786++; 1130 workaround_warning(cpu, 6336786); 1131 #else /* __xpv */ 1132 } else if ((opteron_get_nnodes() * 1133 cpuid_get_ncpu_per_chip(cpu) > 1) || 1134 opteron_workaround_6336786_UP) { 1135 1136 uint_t node, nnodes; 1137 uint8_t data; 1138 1139 nnodes = opteron_get_nnodes(); 1140 for (node = 0; node < nnodes; node++) { 1141 /* 1142 * Clear PMM7[1:0] (function 3, offset 0x87) 1143 * Northbridge device is the node id + 24. 1144 */ 1145 data = pci_getb_func(0, node + 24, 3, 0x87); 1146 data &= 0xFC; 1147 pci_putb_func(0, node + 24, 3, 0x87, data); 1148 } 1149 opteron_workaround_6336786++; 1150 #endif /* __xpv */ 1151 } 1152 #else 1153 workaround_warning(cpu, 6336786); 1154 missing++; 1155 #endif 1156 } 1157 1158 /*LINTED*/ 1159 /* 1160 * Mutex primitives don't work as expected. 1161 */ 1162 if (cpuid_opteron_erratum(cpu, 6323525) > 0) { 1163 #if defined(OPTERON_WORKAROUND_6323525) 1164 /* 1165 * This problem only occurs with 2 or more cores. If bit in 1166 * MSR_AMD_BU_CFG set, then not applicable. The workaround 1167 * is to patch the semaphone routines with the lfence 1168 * instruction to provide necessary load memory barrier with 1169 * possible subsequent read-modify-write ops. 1170 * 1171 * It is too early in boot to call the patch routine so 1172 * set erratum variable to be done in startup_end(). 1173 */ 1174 if (opteron_workaround_6323525) { 1175 opteron_workaround_6323525++; 1176 #if defined(__xpv) 1177 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2)) { 1178 if (DOMAIN_IS_INITDOMAIN(xen_info)) { 1179 /* 1180 * XXPV Use dom0_msr here when extended 1181 * operations are supported? 1182 */ 1183 if (xpv_nr_phys_cpus() > 1) 1184 opteron_workaround_6323525++; 1185 } else { 1186 /* 1187 * We have no way to tell how many physical 1188 * cpus there are, or even if this processor 1189 * has the problem, so enable the workaround 1190 * unconditionally (at some performance cost). 1191 */ 1192 opteron_workaround_6323525++; 1193 } 1194 #else /* __xpv */ 1195 } else if (is_x86_feature(x86_featureset, X86FSET_SSE2) && 1196 ((opteron_get_nnodes() * 1197 cpuid_get_ncpu_per_chip(cpu)) > 1)) { 1198 if ((xrdmsr(MSR_AMD_BU_CFG) & (UINT64_C(1) << 33)) == 0) 1199 opteron_workaround_6323525++; 1200 #endif /* __xpv */ 1201 } 1202 #else 1203 workaround_warning(cpu, 6323525); 1204 missing++; 1205 #endif 1206 } 1207 1208 missing += do_erratum_298(cpu); 1209 1210 if (cpuid_opteron_erratum(cpu, 721) > 0) { 1211 #if defined(OPTERON_ERRATUM_721) 1212 on_trap_data_t otd; 1213 1214 if (!on_trap(&otd, OT_DATA_ACCESS)) 1215 wrmsr(MSR_AMD_DE_CFG, 1216 rdmsr(MSR_AMD_DE_CFG) | AMD_DE_CFG_E721); 1217 no_trap(); 1218 1219 opteron_erratum_721++; 1220 #else 1221 workaround_warning(cpu, 721); 1222 missing++; 1223 #endif 1224 } 1225 1226 #ifdef __xpv 1227 return (0); 1228 #else 1229 return (missing); 1230 #endif 1231 } 1232 1233 void 1234 workaround_errata_end() 1235 { 1236 #if defined(OPTERON_ERRATUM_88) 1237 if (opteron_erratum_88) 1238 workaround_applied(88); 1239 #endif 1240 #if defined(OPTERON_ERRATUM_91) 1241 if (opteron_erratum_91) 1242 workaround_applied(91); 1243 #endif 1244 #if defined(OPTERON_ERRATUM_93) 1245 if (opteron_erratum_93) 1246 workaround_applied(93); 1247 #endif 1248 #if defined(OPTERON_ERRATUM_95) 1249 if (opteron_erratum_95) 1250 workaround_applied(95); 1251 #endif 1252 #if defined(OPTERON_ERRATUM_100) 1253 if (opteron_erratum_100) 1254 workaround_applied(100); 1255 #endif 1256 #if defined(OPTERON_ERRATUM_108) 1257 if (opteron_erratum_108) 1258 workaround_applied(108); 1259 #endif 1260 #if defined(OPTERON_ERRATUM_109) 1261 if (opteron_erratum_109) { 1262 cmn_err(CE_WARN, 1263 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)" 1264 " processor\nerratum 109 was not detected; updating your" 1265 " system's BIOS to a version\ncontaining this" 1266 " microcode patch is HIGHLY recommended or erroneous" 1267 " system\noperation may occur.\n"); 1268 } 1269 #endif 1270 #if defined(OPTERON_ERRATUM_121) 1271 if (opteron_erratum_121) 1272 workaround_applied(121); 1273 #endif 1274 #if defined(OPTERON_ERRATUM_122) 1275 if (opteron_erratum_122) 1276 workaround_applied(122); 1277 #endif 1278 #if defined(OPTERON_ERRATUM_123) 1279 if (opteron_erratum_123) { 1280 cmn_err(CE_WARN, 1281 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)" 1282 " processor\nerratum 123 was not detected; updating your" 1283 " system's BIOS to a version\ncontaining this" 1284 " microcode patch is HIGHLY recommended or erroneous" 1285 " system\noperation may occur.\n"); 1286 } 1287 #endif 1288 #if defined(OPTERON_ERRATUM_131) 1289 if (opteron_erratum_131) { 1290 cmn_err(CE_WARN, 1291 "BIOS microcode patch for AMD Athlon(tm) 64/Opteron(tm)" 1292 " processor\nerratum 131 was not detected; updating your" 1293 " system's BIOS to a version\ncontaining this" 1294 " microcode patch is HIGHLY recommended or erroneous" 1295 " system\noperation may occur.\n"); 1296 } 1297 #endif 1298 #if defined(OPTERON_WORKAROUND_6336786) 1299 if (opteron_workaround_6336786) 1300 workaround_applied(6336786); 1301 #endif 1302 #if defined(OPTERON_WORKAROUND_6323525) 1303 if (opteron_workaround_6323525) 1304 workaround_applied(6323525); 1305 #endif 1306 #if defined(OPTERON_ERRATUM_298) 1307 if (opteron_erratum_298) { 1308 cmn_err(CE_WARN, 1309 "BIOS microcode patch for AMD 64/Opteron(tm)" 1310 " processor\nerratum 298 was not detected; updating your" 1311 " system's BIOS to a version\ncontaining this" 1312 " microcode patch is HIGHLY recommended or erroneous" 1313 " system\noperation may occur.\n"); 1314 } 1315 #endif 1316 #if defined(OPTERON_ERRATUM_721) 1317 if (opteron_erratum_721) 1318 workaround_applied(721); 1319 #endif 1320 } 1321 1322 /* 1323 * The procset_slave and procset_master are used to synchronize 1324 * between the control CPU and the target CPU when starting CPUs. 1325 */ 1326 static cpuset_t procset_slave, procset_master; 1327 1328 static void 1329 mp_startup_wait(cpuset_t *sp, processorid_t cpuid) 1330 { 1331 cpuset_t tempset; 1332 1333 for (tempset = *sp; !CPU_IN_SET(tempset, cpuid); 1334 tempset = *(volatile cpuset_t *)sp) { 1335 SMT_PAUSE(); 1336 } 1337 CPUSET_ATOMIC_DEL(*(cpuset_t *)sp, cpuid); 1338 } 1339 1340 static void 1341 mp_startup_signal(cpuset_t *sp, processorid_t cpuid) 1342 { 1343 cpuset_t tempset; 1344 1345 CPUSET_ATOMIC_ADD(*(cpuset_t *)sp, cpuid); 1346 for (tempset = *sp; CPU_IN_SET(tempset, cpuid); 1347 tempset = *(volatile cpuset_t *)sp) { 1348 SMT_PAUSE(); 1349 } 1350 } 1351 1352 int 1353 mp_start_cpu_common(cpu_t *cp, boolean_t boot) 1354 { 1355 _NOTE(ARGUNUSED(boot)); 1356 1357 void *ctx; 1358 int delays; 1359 int error = 0; 1360 cpuset_t tempset; 1361 processorid_t cpuid; 1362 #ifndef __xpv 1363 extern void cpupm_init(cpu_t *); 1364 #endif 1365 1366 ASSERT(cp != NULL); 1367 cpuid = cp->cpu_id; 1368 ctx = mach_cpucontext_alloc(cp); 1369 if (ctx == NULL) { 1370 cmn_err(CE_WARN, 1371 "cpu%d: failed to allocate context", cp->cpu_id); 1372 return (EAGAIN); 1373 } 1374 error = mach_cpu_start(cp, ctx); 1375 if (error != 0) { 1376 cmn_err(CE_WARN, 1377 "cpu%d: failed to start, error %d", cp->cpu_id, error); 1378 mach_cpucontext_free(cp, ctx, error); 1379 return (error); 1380 } 1381 1382 for (delays = 0, tempset = procset_slave; !CPU_IN_SET(tempset, cpuid); 1383 delays++) { 1384 if (delays == 500) { 1385 /* 1386 * After five seconds, things are probably looking 1387 * a bit bleak - explain the hang. 1388 */ 1389 cmn_err(CE_NOTE, "cpu%d: started, " 1390 "but not running in the kernel yet", cpuid); 1391 } else if (delays > 2000) { 1392 /* 1393 * We waited at least 20 seconds, bail .. 1394 */ 1395 error = ETIMEDOUT; 1396 cmn_err(CE_WARN, "cpu%d: timed out", cpuid); 1397 mach_cpucontext_free(cp, ctx, error); 1398 return (error); 1399 } 1400 1401 /* 1402 * wait at least 10ms, then check again.. 1403 */ 1404 delay(USEC_TO_TICK_ROUNDUP(10000)); 1405 tempset = *((volatile cpuset_t *)&procset_slave); 1406 } 1407 CPUSET_ATOMIC_DEL(procset_slave, cpuid); 1408 1409 mach_cpucontext_free(cp, ctx, 0); 1410 1411 #ifndef __xpv 1412 if (tsc_gethrtime_enable) 1413 tsc_sync_master(cpuid); 1414 #endif 1415 1416 if (dtrace_cpu_init != NULL) { 1417 (*dtrace_cpu_init)(cpuid); 1418 } 1419 1420 /* 1421 * During CPU DR operations, the cpu_lock is held by current 1422 * (the control) thread. We can't release the cpu_lock here 1423 * because that will break the CPU DR logic. 1424 * On the other hand, CPUPM and processor group initialization 1425 * routines need to access the cpu_lock. So we invoke those 1426 * routines here on behalf of mp_startup_common(). 1427 * 1428 * CPUPM and processor group initialization routines depend 1429 * on the cpuid probing results. Wait for mp_startup_common() 1430 * to signal that cpuid probing is done. 1431 */ 1432 mp_startup_wait(&procset_slave, cpuid); 1433 #ifndef __xpv 1434 cpupm_init(cp); 1435 #endif 1436 (void) pg_cpu_init(cp, B_FALSE); 1437 cpu_set_state(cp); 1438 mp_startup_signal(&procset_master, cpuid); 1439 1440 return (0); 1441 } 1442 1443 /* 1444 * Start a single cpu, assuming that the kernel context is available 1445 * to successfully start another cpu. 1446 * 1447 * (For example, real mode code is mapped into the right place 1448 * in memory and is ready to be run.) 1449 */ 1450 int 1451 start_cpu(processorid_t who) 1452 { 1453 cpu_t *cp; 1454 int error = 0; 1455 cpuset_t tempset; 1456 1457 ASSERT(who != 0); 1458 1459 /* 1460 * Check if there's at least a Mbyte of kmem available 1461 * before attempting to start the cpu. 1462 */ 1463 if (kmem_avail() < 1024 * 1024) { 1464 /* 1465 * Kick off a reap in case that helps us with 1466 * later attempts .. 1467 */ 1468 kmem_reap(); 1469 return (ENOMEM); 1470 } 1471 1472 /* 1473 * First configure cpu. 1474 */ 1475 cp = mp_cpu_configure_common(who, B_TRUE); 1476 ASSERT(cp != NULL); 1477 1478 /* 1479 * Then start cpu. 1480 */ 1481 error = mp_start_cpu_common(cp, B_TRUE); 1482 if (error != 0) { 1483 mp_cpu_unconfigure_common(cp, error); 1484 return (error); 1485 } 1486 1487 mutex_exit(&cpu_lock); 1488 tempset = cpu_ready_set; 1489 while (!CPU_IN_SET(tempset, who)) { 1490 drv_usecwait(1); 1491 tempset = *((volatile cpuset_t *)&cpu_ready_set); 1492 } 1493 mutex_enter(&cpu_lock); 1494 1495 return (0); 1496 } 1497 1498 void 1499 start_other_cpus(int cprboot) 1500 { 1501 _NOTE(ARGUNUSED(cprboot)); 1502 1503 uint_t who; 1504 uint_t bootcpuid = 0; 1505 1506 /* 1507 * Initialize our own cpu_info. 1508 */ 1509 init_cpu_info(CPU); 1510 1511 #if !defined(__xpv) 1512 init_cpu_id_gdt(CPU); 1513 #endif 1514 1515 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_idstr); 1516 cmn_err(CE_CONT, "?cpu%d: %s\n", CPU->cpu_id, CPU->cpu_brandstr); 1517 1518 /* 1519 * KPTI initialisation happens very early in boot, before logging is 1520 * set up. Output a status message now as the boot CPU comes online. 1521 */ 1522 cmn_err(CE_CONT, "?KPTI %s (PCID %s, INVPCID %s)\n", 1523 kpti_enable ? "enabled" : "disabled", 1524 x86_use_pcid == 1 ? "in use" : 1525 (is_x86_feature(x86_featureset, X86FSET_PCID) ? "disabled" : 1526 "not supported"), 1527 x86_use_pcid == 1 && x86_use_invpcid == 1 ? "in use" : 1528 (is_x86_feature(x86_featureset, X86FSET_INVPCID) ? "disabled" : 1529 "not supported")); 1530 1531 /* 1532 * Initialize our syscall handlers 1533 */ 1534 init_cpu_syscall(CPU); 1535 1536 /* 1537 * Take the boot cpu out of the mp_cpus set because we know 1538 * it's already running. Add it to the cpu_ready_set for 1539 * precisely the same reason. 1540 */ 1541 CPUSET_DEL(mp_cpus, bootcpuid); 1542 CPUSET_ADD(cpu_ready_set, bootcpuid); 1543 1544 /* 1545 * skip the rest of this if 1546 * . only 1 cpu dectected and system isn't hotplug-capable 1547 * . not using MP 1548 */ 1549 if ((CPUSET_ISNULL(mp_cpus) && plat_dr_support_cpu() == 0) || 1550 use_mp == 0) { 1551 if (use_mp == 0) 1552 cmn_err(CE_CONT, "?***** Not in MP mode\n"); 1553 goto done; 1554 } 1555 1556 /* 1557 * perform such initialization as is needed 1558 * to be able to take CPUs on- and off-line. 1559 */ 1560 cpu_pause_init(); 1561 1562 xc_init_cpu(CPU); /* initialize processor crosscalls */ 1563 1564 if (mach_cpucontext_init() != 0) 1565 goto done; 1566 1567 flushes_require_xcalls = 1; 1568 1569 /* 1570 * We lock our affinity to the master CPU to ensure that all slave CPUs 1571 * do their TSC syncs with the same CPU. 1572 */ 1573 affinity_set(CPU_CURRENT); 1574 1575 for (who = 0; who < NCPU; who++) { 1576 if (!CPU_IN_SET(mp_cpus, who)) 1577 continue; 1578 ASSERT(who != bootcpuid); 1579 1580 mutex_enter(&cpu_lock); 1581 if (start_cpu(who) != 0) 1582 CPUSET_DEL(mp_cpus, who); 1583 cpu_state_change_notify(who, CPU_SETUP); 1584 mutex_exit(&cpu_lock); 1585 } 1586 1587 /* Free the space allocated to hold the microcode file */ 1588 ucode_cleanup(); 1589 1590 affinity_clear(); 1591 1592 mach_cpucontext_fini(); 1593 1594 done: 1595 if (get_hwenv() == HW_NATIVE) 1596 workaround_errata_end(); 1597 cmi_post_mpstartup(); 1598 1599 if (use_mp && ncpus != boot_max_ncpus) { 1600 cmn_err(CE_NOTE, 1601 "System detected %d cpus, but " 1602 "only %d cpu(s) were enabled during boot.", 1603 boot_max_ncpus, ncpus); 1604 cmn_err(CE_NOTE, 1605 "Use \"boot-ncpus\" parameter to enable more CPU(s). " 1606 "See eeprom(1M)."); 1607 } 1608 } 1609 1610 int 1611 mp_cpu_configure(int cpuid) 1612 { 1613 cpu_t *cp; 1614 1615 if (use_mp == 0 || plat_dr_support_cpu() == 0) { 1616 return (ENOTSUP); 1617 } 1618 1619 cp = cpu_get(cpuid); 1620 if (cp != NULL) { 1621 return (EALREADY); 1622 } 1623 1624 /* 1625 * Check if there's at least a Mbyte of kmem available 1626 * before attempting to start the cpu. 1627 */ 1628 if (kmem_avail() < 1024 * 1024) { 1629 /* 1630 * Kick off a reap in case that helps us with 1631 * later attempts .. 1632 */ 1633 kmem_reap(); 1634 return (ENOMEM); 1635 } 1636 1637 cp = mp_cpu_configure_common(cpuid, B_FALSE); 1638 ASSERT(cp != NULL && cpu_get(cpuid) == cp); 1639 1640 return (cp != NULL ? 0 : EAGAIN); 1641 } 1642 1643 int 1644 mp_cpu_unconfigure(int cpuid) 1645 { 1646 cpu_t *cp; 1647 1648 if (use_mp == 0 || plat_dr_support_cpu() == 0) { 1649 return (ENOTSUP); 1650 } else if (cpuid < 0 || cpuid >= max_ncpus) { 1651 return (EINVAL); 1652 } 1653 1654 cp = cpu_get(cpuid); 1655 if (cp == NULL) { 1656 return (ENODEV); 1657 } 1658 mp_cpu_unconfigure_common(cp, 0); 1659 1660 return (0); 1661 } 1662 1663 /* 1664 * Startup function for 'other' CPUs (besides boot cpu). 1665 * Called from real_mode_start. 1666 * 1667 * WARNING: until CPU_READY is set, mp_startup_common and routines called by 1668 * mp_startup_common should not call routines (e.g. kmem_free) that could call 1669 * hat_unload which requires CPU_READY to be set. 1670 */ 1671 static void 1672 mp_startup_common(boolean_t boot) 1673 { 1674 cpu_t *cp = CPU; 1675 uchar_t new_x86_featureset[BT_SIZEOFMAP(NUM_X86_FEATURES)]; 1676 extern void cpu_event_init_cpu(cpu_t *); 1677 1678 /* 1679 * We need to get TSC on this proc synced (i.e., any delta 1680 * from cpu0 accounted for) as soon as we can, because many 1681 * many things use gethrtime/pc_gethrestime, including 1682 * interrupts, cmn_err, etc. Before we can do that, we want to 1683 * clear TSC if we're on a buggy Sandy/Ivy Bridge CPU, so do that 1684 * right away. 1685 */ 1686 bzero(new_x86_featureset, BT_SIZEOFMAP(NUM_X86_FEATURES)); 1687 cpuid_pass1(cp, new_x86_featureset); 1688 1689 if (boot && get_hwenv() == HW_NATIVE && 1690 cpuid_getvendor(CPU) == X86_VENDOR_Intel && 1691 cpuid_getfamily(CPU) == 6 && 1692 (cpuid_getmodel(CPU) == 0x2d || cpuid_getmodel(CPU) == 0x3e) && 1693 is_x86_feature(new_x86_featureset, X86FSET_TSC)) { 1694 (void) wrmsr(REG_TSC, 0UL); 1695 } 1696 1697 /* Let the control CPU continue into tsc_sync_master() */ 1698 mp_startup_signal(&procset_slave, cp->cpu_id); 1699 1700 #ifndef __xpv 1701 if (tsc_gethrtime_enable) 1702 tsc_sync_slave(); 1703 #endif 1704 1705 /* 1706 * Once this was done from assembly, but it's safer here; if 1707 * it blocks, we need to be able to swtch() to and from, and 1708 * since we get here by calling t_pc, we need to do that call 1709 * before swtch() overwrites it. 1710 */ 1711 (void) (*ap_mlsetup)(); 1712 1713 #ifndef __xpv 1714 /* 1715 * Program this cpu's PAT 1716 */ 1717 pat_sync(); 1718 #endif 1719 1720 /* 1721 * Set up TSC_AUX to contain the cpuid for this processor 1722 * for the rdtscp instruction. 1723 */ 1724 if (is_x86_feature(x86_featureset, X86FSET_TSCP)) 1725 (void) wrmsr(MSR_AMD_TSCAUX, cp->cpu_id); 1726 1727 /* 1728 * Initialize this CPU's syscall handlers 1729 */ 1730 init_cpu_syscall(cp); 1731 1732 /* 1733 * Enable interrupts with spl set to LOCK_LEVEL. LOCK_LEVEL is the 1734 * highest level at which a routine is permitted to block on 1735 * an adaptive mutex (allows for cpu poke interrupt in case 1736 * the cpu is blocked on a mutex and halts). Setting LOCK_LEVEL blocks 1737 * device interrupts that may end up in the hat layer issuing cross 1738 * calls before CPU_READY is set. 1739 */ 1740 splx(ipltospl(LOCK_LEVEL)); 1741 sti(); 1742 1743 /* 1744 * Do a sanity check to make sure this new CPU is a sane thing 1745 * to add to the collection of processors running this system. 1746 * 1747 * XXX Clearly this needs to get more sophisticated, if x86 1748 * systems start to get built out of heterogenous CPUs; as is 1749 * likely to happen once the number of processors in a configuration 1750 * gets large enough. 1751 */ 1752 if (compare_x86_featureset(x86_featureset, new_x86_featureset) == 1753 B_FALSE) { 1754 cmn_err(CE_CONT, "cpu%d: featureset\n", cp->cpu_id); 1755 print_x86_featureset(new_x86_featureset); 1756 cmn_err(CE_WARN, "cpu%d feature mismatch", cp->cpu_id); 1757 } 1758 1759 /* 1760 * There exists a small subset of systems which expose differing 1761 * MWAIT/MONITOR support between CPUs. If MWAIT support is absent from 1762 * the boot CPU, but is found on a later CPU, the system continues to 1763 * operate as if no MWAIT support is available. 1764 * 1765 * The reverse case, where MWAIT is available on the boot CPU but not 1766 * on a subsequently initialized CPU, is not presently allowed and will 1767 * result in a panic. 1768 */ 1769 if (is_x86_feature(x86_featureset, X86FSET_MWAIT) != 1770 is_x86_feature(new_x86_featureset, X86FSET_MWAIT)) { 1771 if (!is_x86_feature(x86_featureset, X86FSET_MWAIT)) { 1772 remove_x86_feature(new_x86_featureset, X86FSET_MWAIT); 1773 } else { 1774 panic("unsupported mixed cpu mwait support detected"); 1775 } 1776 } 1777 1778 /* 1779 * We could be more sophisticated here, and just mark the CPU 1780 * as "faulted" but at this point we'll opt for the easier 1781 * answer of dying horribly. Provided the boot cpu is ok, 1782 * the system can be recovered by booting with use_mp set to zero. 1783 */ 1784 if (workaround_errata(cp) != 0) 1785 panic("critical workaround(s) missing for cpu%d", cp->cpu_id); 1786 1787 /* 1788 * We can touch cpu_flags here without acquiring the cpu_lock here 1789 * because the cpu_lock is held by the control CPU which is running 1790 * mp_start_cpu_common(). 1791 * Need to clear CPU_QUIESCED flag before calling any function which 1792 * may cause thread context switching, such as kmem_alloc() etc. 1793 * The idle thread checks for CPU_QUIESCED flag and loops for ever if 1794 * it's set. So the startup thread may have no chance to switch back 1795 * again if it's switched away with CPU_QUIESCED set. 1796 */ 1797 cp->cpu_flags &= ~(CPU_POWEROFF | CPU_QUIESCED); 1798 1799 enable_pcid(); 1800 1801 /* 1802 * Setup this processor for XSAVE. 1803 */ 1804 if (fp_save_mech == FP_XSAVE) { 1805 xsave_setup_msr(cp); 1806 } 1807 1808 cpuid_pass2(cp); 1809 cpuid_pass3(cp); 1810 cpuid_pass4(cp, NULL); 1811 1812 /* 1813 * Correct cpu_idstr and cpu_brandstr on target CPU after 1814 * cpuid_pass1() is done. 1815 */ 1816 (void) cpuid_getidstr(cp, cp->cpu_idstr, CPU_IDSTRLEN); 1817 (void) cpuid_getbrandstr(cp, cp->cpu_brandstr, CPU_IDSTRLEN); 1818 1819 cp->cpu_flags |= CPU_RUNNING | CPU_READY | CPU_EXISTS; 1820 1821 post_startup_cpu_fixups(); 1822 1823 cpu_event_init_cpu(cp); 1824 1825 /* 1826 * Enable preemption here so that contention for any locks acquired 1827 * later in mp_startup_common may be preempted if the thread owning 1828 * those locks is continuously executing on other CPUs (for example, 1829 * this CPU must be preemptible to allow other CPUs to pause it during 1830 * their startup phases). It's safe to enable preemption here because 1831 * the CPU state is pretty-much fully constructed. 1832 */ 1833 curthread->t_preempt = 0; 1834 1835 /* The base spl should still be at LOCK LEVEL here */ 1836 ASSERT(cp->cpu_base_spl == ipltospl(LOCK_LEVEL)); 1837 set_base_spl(); /* Restore the spl to its proper value */ 1838 1839 pghw_physid_create(cp); 1840 /* 1841 * Delegate initialization tasks, which need to access the cpu_lock, 1842 * to mp_start_cpu_common() because we can't acquire the cpu_lock here 1843 * during CPU DR operations. 1844 */ 1845 mp_startup_signal(&procset_slave, cp->cpu_id); 1846 mp_startup_wait(&procset_master, cp->cpu_id); 1847 pg_cmt_cpu_startup(cp); 1848 1849 if (boot) { 1850 mutex_enter(&cpu_lock); 1851 cp->cpu_flags &= ~CPU_OFFLINE; 1852 cpu_enable_intr(cp); 1853 cpu_add_active(cp); 1854 mutex_exit(&cpu_lock); 1855 } 1856 1857 /* Enable interrupts */ 1858 (void) spl0(); 1859 1860 /* 1861 * Fill out cpu_ucode_info. Update microcode if necessary. 1862 */ 1863 ucode_check(cp); 1864 1865 #ifndef __xpv 1866 { 1867 /* 1868 * Set up the CPU module for this CPU. This can't be done 1869 * before this CPU is made CPU_READY, because we may (in 1870 * heterogeneous systems) need to go load another CPU module. 1871 * The act of attempting to load a module may trigger a 1872 * cross-call, which will ASSERT unless this cpu is CPU_READY. 1873 */ 1874 cmi_hdl_t hdl; 1875 1876 if ((hdl = cmi_init(CMI_HDL_NATIVE, cmi_ntv_hwchipid(CPU), 1877 cmi_ntv_hwcoreid(CPU), cmi_ntv_hwstrandid(CPU))) != NULL) { 1878 if (is_x86_feature(x86_featureset, X86FSET_MCA)) 1879 cmi_mca_init(hdl); 1880 cp->cpu_m.mcpu_cmi_hdl = hdl; 1881 } 1882 } 1883 #endif /* __xpv */ 1884 1885 if (boothowto & RB_DEBUG) 1886 kdi_cpu_init(); 1887 1888 /* 1889 * Setting the bit in cpu_ready_set must be the last operation in 1890 * processor initialization; the boot CPU will continue to boot once 1891 * it sees this bit set for all active CPUs. 1892 */ 1893 CPUSET_ATOMIC_ADD(cpu_ready_set, cp->cpu_id); 1894 1895 (void) mach_cpu_create_device_node(cp, NULL); 1896 1897 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_idstr); 1898 cmn_err(CE_CONT, "?cpu%d: %s\n", cp->cpu_id, cp->cpu_brandstr); 1899 cmn_err(CE_CONT, "?cpu%d initialization complete - online\n", 1900 cp->cpu_id); 1901 1902 /* 1903 * Now we are done with the startup thread, so free it up. 1904 */ 1905 thread_exit(); 1906 panic("mp_startup: cannot return"); 1907 /*NOTREACHED*/ 1908 } 1909 1910 /* 1911 * Startup function for 'other' CPUs at boot time (besides boot cpu). 1912 */ 1913 static void 1914 mp_startup_boot(void) 1915 { 1916 mp_startup_common(B_TRUE); 1917 } 1918 1919 /* 1920 * Startup function for hotplug CPUs at runtime. 1921 */ 1922 void 1923 mp_startup_hotplug(void) 1924 { 1925 mp_startup_common(B_FALSE); 1926 } 1927 1928 /* 1929 * Start CPU on user request. 1930 */ 1931 /* ARGSUSED */ 1932 int 1933 mp_cpu_start(struct cpu *cp) 1934 { 1935 ASSERT(MUTEX_HELD(&cpu_lock)); 1936 return (0); 1937 } 1938 1939 /* 1940 * Stop CPU on user request. 1941 */ 1942 int 1943 mp_cpu_stop(struct cpu *cp) 1944 { 1945 extern int cbe_psm_timer_mode; 1946 ASSERT(MUTEX_HELD(&cpu_lock)); 1947 1948 #ifdef __xpv 1949 /* 1950 * We can't offline vcpu0. 1951 */ 1952 if (cp->cpu_id == 0) 1953 return (EBUSY); 1954 #endif 1955 1956 /* 1957 * If TIMER_PERIODIC mode is used, CPU0 is the one running it; 1958 * can't stop it. (This is true only for machines with no TSC.) 1959 */ 1960 1961 if ((cbe_psm_timer_mode == TIMER_PERIODIC) && (cp->cpu_id == 0)) 1962 return (EBUSY); 1963 1964 return (0); 1965 } 1966 1967 /* 1968 * Take the specified CPU out of participation in interrupts. 1969 */ 1970 int 1971 cpu_disable_intr(struct cpu *cp) 1972 { 1973 if (psm_disable_intr(cp->cpu_id) != DDI_SUCCESS) 1974 return (EBUSY); 1975 1976 cp->cpu_flags &= ~CPU_ENABLE; 1977 return (0); 1978 } 1979 1980 /* 1981 * Allow the specified CPU to participate in interrupts. 1982 */ 1983 void 1984 cpu_enable_intr(struct cpu *cp) 1985 { 1986 ASSERT(MUTEX_HELD(&cpu_lock)); 1987 cp->cpu_flags |= CPU_ENABLE; 1988 psm_enable_intr(cp->cpu_id); 1989 } 1990 1991 void 1992 mp_cpu_faulted_enter(struct cpu *cp) 1993 { 1994 #ifdef __xpv 1995 _NOTE(ARGUNUSED(cp)); 1996 #else 1997 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl; 1998 1999 if (hdl != NULL) { 2000 cmi_hdl_hold(hdl); 2001 } else { 2002 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp), 2003 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp)); 2004 } 2005 if (hdl != NULL) { 2006 cmi_faulted_enter(hdl); 2007 cmi_hdl_rele(hdl); 2008 } 2009 #endif 2010 } 2011 2012 void 2013 mp_cpu_faulted_exit(struct cpu *cp) 2014 { 2015 #ifdef __xpv 2016 _NOTE(ARGUNUSED(cp)); 2017 #else 2018 cmi_hdl_t hdl = cp->cpu_m.mcpu_cmi_hdl; 2019 2020 if (hdl != NULL) { 2021 cmi_hdl_hold(hdl); 2022 } else { 2023 hdl = cmi_hdl_lookup(CMI_HDL_NATIVE, cmi_ntv_hwchipid(cp), 2024 cmi_ntv_hwcoreid(cp), cmi_ntv_hwstrandid(cp)); 2025 } 2026 if (hdl != NULL) { 2027 cmi_faulted_exit(hdl); 2028 cmi_hdl_rele(hdl); 2029 } 2030 #endif 2031 } 2032 2033 /* 2034 * The following two routines are used as context operators on threads belonging 2035 * to processes with a private LDT (see sysi86). Due to the rarity of such 2036 * processes, these routines are currently written for best code readability and 2037 * organization rather than speed. We could avoid checking x86_featureset at 2038 * every context switch by installing different context ops, depending on 2039 * x86_featureset, at LDT creation time -- one for each combination of fast 2040 * syscall features. 2041 */ 2042 2043 void 2044 cpu_fast_syscall_disable(void) 2045 { 2046 if (is_x86_feature(x86_featureset, X86FSET_MSR) && 2047 is_x86_feature(x86_featureset, X86FSET_SEP)) 2048 cpu_sep_disable(); 2049 if (is_x86_feature(x86_featureset, X86FSET_MSR) && 2050 is_x86_feature(x86_featureset, X86FSET_ASYSC)) 2051 cpu_asysc_disable(); 2052 } 2053 2054 void 2055 cpu_fast_syscall_enable(void) 2056 { 2057 if (is_x86_feature(x86_featureset, X86FSET_MSR) && 2058 is_x86_feature(x86_featureset, X86FSET_SEP)) 2059 cpu_sep_enable(); 2060 if (is_x86_feature(x86_featureset, X86FSET_MSR) && 2061 is_x86_feature(x86_featureset, X86FSET_ASYSC)) 2062 cpu_asysc_enable(); 2063 } 2064 2065 static void 2066 cpu_sep_enable(void) 2067 { 2068 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP)); 2069 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 2070 2071 wrmsr(MSR_INTC_SEP_CS, (uint64_t)(uintptr_t)KCS_SEL); 2072 } 2073 2074 static void 2075 cpu_sep_disable(void) 2076 { 2077 ASSERT(is_x86_feature(x86_featureset, X86FSET_SEP)); 2078 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 2079 2080 /* 2081 * Setting the SYSENTER_CS_MSR register to 0 causes software executing 2082 * the sysenter or sysexit instruction to trigger a #gp fault. 2083 */ 2084 wrmsr(MSR_INTC_SEP_CS, 0); 2085 } 2086 2087 static void 2088 cpu_asysc_enable(void) 2089 { 2090 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC)); 2091 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 2092 2093 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) | 2094 (uint64_t)(uintptr_t)AMD_EFER_SCE); 2095 } 2096 2097 static void 2098 cpu_asysc_disable(void) 2099 { 2100 ASSERT(is_x86_feature(x86_featureset, X86FSET_ASYSC)); 2101 ASSERT(curthread->t_preempt || getpil() >= LOCK_LEVEL); 2102 2103 /* 2104 * Turn off the SCE (syscall enable) bit in the EFER register. Software 2105 * executing syscall or sysret with this bit off will incur a #ud trap. 2106 */ 2107 wrmsr(MSR_AMD_EFER, rdmsr(MSR_AMD_EFER) & 2108 ~((uint64_t)(uintptr_t)AMD_EFER_SCE)); 2109 }