1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 /*
  22  * Copyright (c) 2012 Gary Mills
  23  *
  24  * Copyright (c) 1993, 2010, Oracle and/or its affiliates. All rights reserved.
  25  * Copyright (c) 2011 by Delphix. All rights reserved.
  26  * Copyright 2016 Joyent, Inc.
  27  */
  28 /*
  29  * Copyright (c) 2010, Intel Corporation.
  30  * All rights reserved.
  31  */
  32 
  33 #include <sys/types.h>
  34 #include <sys/sysmacros.h>
  35 #include <sys/disp.h>
  36 #include <sys/promif.h>
  37 #include <sys/clock.h>
  38 #include <sys/cpuvar.h>
  39 #include <sys/stack.h>
  40 #include <vm/as.h>
  41 #include <vm/hat.h>
  42 #include <sys/reboot.h>
  43 #include <sys/avintr.h>
  44 #include <sys/vtrace.h>
  45 #include <sys/proc.h>
  46 #include <sys/thread.h>
  47 #include <sys/cpupart.h>
  48 #include <sys/pset.h>
  49 #include <sys/copyops.h>
  50 #include <sys/pg.h>
  51 #include <sys/disp.h>
  52 #include <sys/debug.h>
  53 #include <sys/sunddi.h>
  54 #include <sys/x86_archext.h>
  55 #include <sys/privregs.h>
  56 #include <sys/machsystm.h>
  57 #include <sys/ontrap.h>
  58 #include <sys/bootconf.h>
  59 #include <sys/boot_console.h>
  60 #include <sys/kdi_machimpl.h>
  61 #include <sys/archsystm.h>
  62 #include <sys/promif.h>
  63 #include <sys/pci_cfgspace.h>
  64 #include <sys/bootvfs.h>
  65 #include <sys/tsc.h>
  66 #ifdef __xpv
  67 #include <sys/hypervisor.h>
  68 #else
  69 #include <sys/xpv_support.h>
  70 #endif
  71 
  72 /*
  73  * some globals for patching the result of cpuid
  74  * to solve problems w/ creative cpu vendors
  75  */
  76 
  77 extern uint32_t cpuid_feature_ecx_include;
  78 extern uint32_t cpuid_feature_ecx_exclude;
  79 extern uint32_t cpuid_feature_edx_include;
  80 extern uint32_t cpuid_feature_edx_exclude;
  81 
  82 /*
  83  * Set console mode
  84  */
  85 static void
  86 set_console_mode(uint8_t val)
  87 {
  88         struct bop_regs rp = {0};
  89 
  90         rp.eax.byte.ah = 0x0;
  91         rp.eax.byte.al = val;
  92         rp.ebx.word.bx = 0x0;
  93 
  94         BOP_DOINT(bootops, 0x10, &rp);
  95 }
  96 
  97 
  98 /*
  99  * Setup routine called right before main(). Interposing this function
 100  * before main() allows us to call it in a machine-independent fashion.
 101  */
 102 void
 103 mlsetup(struct regs *rp)
 104 {
 105         u_longlong_t prop_value;
 106         extern struct classfuncs sys_classfuncs;
 107         extern disp_t cpu0_disp;
 108         extern char t0stack[];
 109         extern int post_fastreboot;
 110         extern uint64_t plat_dr_options;
 111 
 112         ASSERT_STACK_ALIGNED();
 113 
 114         /*
 115          * initialize cpu_self
 116          */
 117         cpu[0]->cpu_self = cpu[0];
 118 
 119 #if defined(__xpv)
 120         /*
 121          * Point at the hypervisor's virtual cpu structure
 122          */
 123         cpu[0]->cpu_m.mcpu_vcpu_info = &HYPERVISOR_shared_info->vcpu_info[0];
 124 #endif
 125 
 126         /*
 127          * check if we've got special bits to clear or set
 128          * when checking cpu features
 129          */
 130 
 131         if (bootprop_getval("cpuid_feature_ecx_include", &prop_value) != 0)
 132                 cpuid_feature_ecx_include = 0;
 133         else
 134                 cpuid_feature_ecx_include = (uint32_t)prop_value;
 135 
 136         if (bootprop_getval("cpuid_feature_ecx_exclude", &prop_value) != 0)
 137                 cpuid_feature_ecx_exclude = 0;
 138         else
 139                 cpuid_feature_ecx_exclude = (uint32_t)prop_value;
 140 
 141         if (bootprop_getval("cpuid_feature_edx_include", &prop_value) != 0)
 142                 cpuid_feature_edx_include = 0;
 143         else
 144                 cpuid_feature_edx_include = (uint32_t)prop_value;
 145 
 146         if (bootprop_getval("cpuid_feature_edx_exclude", &prop_value) != 0)
 147                 cpuid_feature_edx_exclude = 0;
 148         else
 149                 cpuid_feature_edx_exclude = (uint32_t)prop_value;
 150 
 151         /*
 152          * Initialize idt0, gdt0, ldt0_default, ktss0 and dftss.
 153          */
 154         init_desctbls();
 155 
 156         /*
 157          * lgrp_init() and possibly cpuid_pass1() need PCI config
 158          * space access
 159          */
 160 #if defined(__xpv)
 161         if (DOMAIN_IS_INITDOMAIN(xen_info))
 162                 pci_cfgspace_init();
 163 #else
 164         pci_cfgspace_init();
 165         /*
 166          * Initialize the platform type from CPU 0 to ensure that
 167          * determine_platform() is only ever called once.
 168          */
 169         determine_platform();
 170 #endif
 171 
 172         /*
 173          * The first lightweight pass (pass0) through the cpuid data
 174          * was done in locore before mlsetup was called.  Do the next
 175          * pass in C code.
 176          *
 177          * The x86_featureset is initialized here based on the capabilities
 178          * of the boot CPU.  Note that if we choose to support CPUs that have
 179          * different feature sets (at which point we would almost certainly
 180          * want to set the feature bits to correspond to the feature
 181          * minimum) this value may be altered.
 182          */
 183         cpuid_pass1(cpu[0], x86_featureset);
 184 
 185 #if !defined(__xpv)
 186         if ((get_hwenv() & HW_XEN_HVM) != 0)
 187                 xen_hvm_init();
 188 
 189         /*
 190          * Before we do anything with the TSCs, we need to work around
 191          * Intel erratum BT81.  On some CPUs, warm reset does not
 192          * clear the TSC.  If we are on such a CPU, we will clear TSC ourselves
 193          * here.  Other CPUs will clear it when we boot them later, and the
 194          * resulting skew will be handled by tsc_sync_master()/_slave();
 195          * note that such skew already exists and has to be handled anyway.
 196          *
 197          * We do this only on metal.  This same problem can occur with a
 198          * hypervisor that does not happen to virtualise a TSC that starts from
 199          * zero, regardless of CPU type; however, we do not expect hypervisors
 200          * that do not virtualise TSC that way to handle writes to TSC
 201          * correctly, either.
 202          */
 203         if (get_hwenv() == HW_NATIVE &&
 204             cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
 205             cpuid_getfamily(CPU) == 6 &&
 206             (cpuid_getmodel(CPU) == 0x2d || cpuid_getmodel(CPU) == 0x3e) &&
 207             is_x86_feature(x86_featureset, X86FSET_TSC)) {
 208                 (void) wrmsr(REG_TSC, 0UL);
 209         }
 210 
 211         /*
 212          * Patch the tsc_read routine with appropriate set of instructions,
 213          * depending on the processor family and architecure, to read the
 214          * time-stamp counter while ensuring no out-of-order execution.
 215          * Patch it while the kernel text is still writable.
 216          *
 217          * Note: tsc_read is not patched for intel processors whose family
 218          * is >6 and for amd whose family >f (in case they don't support rdtscp
 219          * instruction, unlikely). By default tsc_read will use cpuid for
 220          * serialization in such cases. The following code needs to be
 221          * revisited if intel processors of family >= f retains the
 222          * instruction serialization nature of mfence instruction.
 223          * Note: tsc_read is not patched for x86 processors which do
 224          * not support "mfence". By default tsc_read will use cpuid for
 225          * serialization in such cases.
 226          *
 227          * The Xen hypervisor does not correctly report whether rdtscp is
 228          * supported or not, so we must assume that it is not.
 229          */
 230         if ((get_hwenv() & HW_XEN_HVM) == 0 &&
 231             is_x86_feature(x86_featureset, X86FSET_TSCP))
 232                 patch_tsc_read(TSC_TSCP);
 233         else if (cpuid_getvendor(CPU) == X86_VENDOR_AMD &&
 234             cpuid_getfamily(CPU) <= 0xf &&
 235             is_x86_feature(x86_featureset, X86FSET_SSE2))
 236                 patch_tsc_read(TSC_RDTSC_MFENCE);
 237         else if (cpuid_getvendor(CPU) == X86_VENDOR_Intel &&
 238             cpuid_getfamily(CPU) <= 6 &&
 239             is_x86_feature(x86_featureset, X86FSET_SSE2))
 240                 patch_tsc_read(TSC_RDTSC_LFENCE);
 241 
 242 #endif  /* !__xpv */
 243 
 244 #if defined(__i386) && !defined(__xpv)
 245         /*
 246          * Some i386 processors do not implement the rdtsc instruction,
 247          * or at least they do not implement it correctly. Patch them to
 248          * return 0.
 249          */
 250         if (!is_x86_feature(x86_featureset, X86FSET_TSC))
 251                 patch_tsc_read(TSC_NONE);
 252 #endif  /* __i386 && !__xpv */
 253 
 254 #if defined(__amd64) && !defined(__xpv)
 255         patch_memops(cpuid_getvendor(CPU));
 256 #endif  /* __amd64 && !__xpv */
 257 
 258 #if !defined(__xpv)
 259         /* XXPV what, if anything, should be dorked with here under xen? */
 260 
 261         /*
 262          * While we're thinking about the TSC, let's set up %cr4 so that
 263          * userland can issue rdtsc, and initialize the TSC_AUX value
 264          * (the cpuid) for the rdtscp instruction on appropriately
 265          * capable hardware.
 266          */
 267         if (is_x86_feature(x86_featureset, X86FSET_TSC))
 268                 setcr4(getcr4() & ~CR4_TSD);
 269 
 270         if (is_x86_feature(x86_featureset, X86FSET_TSCP))
 271                 (void) wrmsr(MSR_AMD_TSCAUX, 0);
 272 
 273         /*
 274          * Let's get the other %cr4 stuff while we're here. Note, we defer
 275          * enabling CR4_SMAP until startup_end(); however, that's importantly
 276          * before we start other CPUs. That ensures that it will be synced out
 277          * to other CPUs.
 278          */
 279         if (is_x86_feature(x86_featureset, X86FSET_DE))
 280                 setcr4(getcr4() | CR4_DE);
 281 
 282         if (is_x86_feature(x86_featureset, X86FSET_SMEP))
 283                 setcr4(getcr4() | CR4_SMEP);
 284 #endif /* __xpv */
 285 
 286         /*
 287          * initialize t0
 288          */
 289         t0.t_stk = (caddr_t)rp - MINFRAME;
 290         t0.t_stkbase = t0stack;
 291         t0.t_pri = maxclsyspri - 3;
 292         t0.t_schedflag = TS_LOAD | TS_DONT_SWAP;
 293         t0.t_procp = &p0;
 294         t0.t_plockp = &p0lock.pl_lock;
 295         t0.t_lwp = &lwp0;
 296         t0.t_forw = &t0;
 297         t0.t_back = &t0;
 298         t0.t_next = &t0;
 299         t0.t_prev = &t0;
 300         t0.t_cpu = cpu[0];
 301         t0.t_disp_queue = &cpu0_disp;
 302         t0.t_bind_cpu = PBIND_NONE;
 303         t0.t_bind_pset = PS_NONE;
 304         t0.t_bindflag = (uchar_t)default_binding_mode;
 305         t0.t_cpupart = &cp_default;
 306         t0.t_clfuncs = &sys_classfuncs.thread;
 307         t0.t_copyops = NULL;
 308         THREAD_ONPROC(&t0, CPU);
 309 
 310         lwp0.lwp_thread = &t0;
 311         lwp0.lwp_regs = (void *)rp;
 312         lwp0.lwp_procp = &p0;
 313         t0.t_tid = p0.p_lwpcnt = p0.p_lwprcnt = p0.p_lwpid = 1;
 314 
 315         p0.p_exec = NULL;
 316         p0.p_stat = SRUN;
 317         p0.p_flag = SSYS;
 318         p0.p_tlist = &t0;
 319         p0.p_stksize = 2*PAGESIZE;
 320         p0.p_stkpageszc = 0;
 321         p0.p_as = &kas;
 322         p0.p_lockp = &p0lock;
 323         p0.p_brkpageszc = 0;
 324         p0.p_t1_lgrpid = LGRP_NONE;
 325         p0.p_tr_lgrpid = LGRP_NONE;
 326         psecflags_default(&p0.p_secflags);
 327 
 328         sigorset(&p0.p_ignore, &ignoredefault);
 329 
 330         CPU->cpu_thread = &t0;
 331         bzero(&cpu0_disp, sizeof (disp_t));
 332         CPU->cpu_disp = &cpu0_disp;
 333         CPU->cpu_disp->disp_cpu = CPU;
 334         CPU->cpu_dispthread = &t0;
 335         CPU->cpu_idle_thread = &t0;
 336         CPU->cpu_flags = CPU_READY | CPU_RUNNING | CPU_EXISTS | CPU_ENABLE;
 337         CPU->cpu_dispatch_pri = t0.t_pri;
 338 
 339         CPU->cpu_id = 0;
 340 
 341         CPU->cpu_pri = 12;           /* initial PIL for the boot CPU */
 342 
 343         /*
 344          * The kernel doesn't use LDTs unless a process explicitly requests one.
 345          */
 346         p0.p_ldt_desc = null_sdesc;
 347 
 348         /*
 349          * Initialize thread/cpu microstate accounting
 350          */
 351         init_mstate(&t0, LMS_SYSTEM);
 352         init_cpu_mstate(CPU, CMS_SYSTEM);
 353 
 354         /*
 355          * Initialize lists of available and active CPUs.
 356          */
 357         cpu_list_init(CPU);
 358 
 359         pg_cpu_bootstrap(CPU);
 360 
 361         /*
 362          * Now that we have taken over the GDT, IDT and have initialized
 363          * active CPU list it's time to inform kmdb if present.
 364          */
 365         if (boothowto & RB_DEBUG)
 366                 kdi_idt_sync();
 367 
 368         /*
 369          * Explicitly set console to text mode (0x3) if this is a boot
 370          * post Fast Reboot, and the console is set to CONS_SCREEN_TEXT.
 371          */
 372         if (post_fastreboot && boot_console_type(NULL) == CONS_SCREEN_TEXT)
 373                 set_console_mode(0x3);
 374 
 375         /*
 376          * If requested (boot -d) drop into kmdb.
 377          *
 378          * This must be done after cpu_list_init() on the 64-bit kernel
 379          * since taking a trap requires that we re-compute gsbase based
 380          * on the cpu list.
 381          */
 382         if (boothowto & RB_DEBUGENTER)
 383                 kmdb_enter();
 384 
 385         cpu_vm_data_init(CPU);
 386 
 387         rp->r_fp = 0;        /* terminate kernel stack traces! */
 388 
 389         prom_init("kernel", (void *)NULL);
 390 
 391         /* User-set option overrides firmware value. */
 392         if (bootprop_getval(PLAT_DR_OPTIONS_NAME, &prop_value) == 0) {
 393                 plat_dr_options = (uint64_t)prop_value;
 394         }
 395 #if defined(__xpv)
 396         /* No support of DR operations on xpv */
 397         plat_dr_options = 0;
 398 #else   /* __xpv */
 399         /* Flag PLAT_DR_FEATURE_ENABLED should only be set by DR driver. */
 400         plat_dr_options &= ~PLAT_DR_FEATURE_ENABLED;
 401 #ifndef __amd64
 402         /* Only enable CPU/memory DR on 64 bits kernel. */
 403         plat_dr_options &= ~PLAT_DR_FEATURE_MEMORY;
 404         plat_dr_options &= ~PLAT_DR_FEATURE_CPU;
 405 #endif  /* __amd64 */
 406 #endif  /* __xpv */
 407 
 408         /*
 409          * Get value of "plat_dr_physmax" boot option.
 410          * It overrides values calculated from MSCT or SRAT table.
 411          */
 412         if (bootprop_getval(PLAT_DR_PHYSMAX_NAME, &prop_value) == 0) {
 413                 plat_dr_physmax = ((uint64_t)prop_value) >> PAGESHIFT;
 414         }
 415 
 416         /* Get value of boot_ncpus. */
 417         if (bootprop_getval(BOOT_NCPUS_NAME, &prop_value) != 0) {
 418                 boot_ncpus = NCPU;
 419         } else {
 420                 boot_ncpus = (int)prop_value;
 421                 if (boot_ncpus <= 0 || boot_ncpus > NCPU)
 422                         boot_ncpus = NCPU;
 423         }
 424 
 425         /*
 426          * Set max_ncpus and boot_max_ncpus to boot_ncpus if platform doesn't
 427          * support CPU DR operations.
 428          */
 429         if (plat_dr_support_cpu() == 0) {
 430                 max_ncpus = boot_max_ncpus = boot_ncpus;
 431         } else {
 432                 if (bootprop_getval(PLAT_MAX_NCPUS_NAME, &prop_value) != 0) {
 433                         max_ncpus = NCPU;
 434                 } else {
 435                         max_ncpus = (int)prop_value;
 436                         if (max_ncpus <= 0 || max_ncpus > NCPU) {
 437                                 max_ncpus = NCPU;
 438                         }
 439                         if (boot_ncpus > max_ncpus) {
 440                                 boot_ncpus = max_ncpus;
 441                         }
 442                 }
 443 
 444                 if (bootprop_getval(BOOT_MAX_NCPUS_NAME, &prop_value) != 0) {
 445                         boot_max_ncpus = boot_ncpus;
 446                 } else {
 447                         boot_max_ncpus = (int)prop_value;
 448                         if (boot_max_ncpus <= 0 || boot_max_ncpus > NCPU) {
 449                                 boot_max_ncpus = boot_ncpus;
 450                         } else if (boot_max_ncpus > max_ncpus) {
 451                                 boot_max_ncpus = max_ncpus;
 452                         }
 453                 }
 454         }
 455 
 456         /*
 457          * Initialize the lgrp framework
 458          */
 459         lgrp_init(LGRP_INIT_STAGE1);
 460 
 461         if (boothowto & RB_HALT) {
 462                 prom_printf("unix: kernel halted by -h flag\n");
 463                 prom_enter_mon();
 464         }
 465 
 466         ASSERT_STACK_ALIGNED();
 467 
 468         /*
 469          * Fill out cpu_ucode_info.  Update microcode if necessary.
 470          */
 471         ucode_check(CPU);
 472 
 473         if (workaround_errata(CPU) != 0)
 474                 panic("critical workaround(s) missing for boot cpu");
 475 }
 476 
 477 
 478 void
 479 mach_modpath(char *path, const char *filename)
 480 {
 481         /*
 482          * Construct the directory path from the filename.
 483          */
 484 
 485         int len;
 486         char *p;
 487         const char isastr[] = "/amd64";
 488         size_t isalen = strlen(isastr);
 489 
 490         len = strlen(SYSTEM_BOOT_PATH "/kernel");
 491         (void) strcpy(path, SYSTEM_BOOT_PATH "/kernel ");
 492         path += len + 1;
 493 
 494         if ((p = strrchr(filename, '/')) == NULL)
 495                 return;
 496 
 497         while (p > filename && *(p - 1) == '/')
 498                 p--;    /* remove trailing '/' characters */
 499         if (p == filename)
 500                 p++;    /* so "/" -is- the modpath in this case */
 501 
 502         /*
 503          * Remove optional isa-dependent directory name - the module
 504          * subsystem will put this back again (!)
 505          */
 506         len = p - filename;
 507         if (len > isalen &&
 508             strncmp(&filename[len - isalen], isastr, isalen) == 0)
 509                 p -= isalen;
 510 
 511         /*
 512          * "/platform/mumblefrotz" + " " + MOD_DEFPATH
 513          */
 514         len += (p - filename) + 1 + strlen(MOD_DEFPATH) + 1;
 515         (void) strncpy(path, filename, p - filename);
 516 }