1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2006, 2010, Oracle and/or its affiliates. All rights reserved. 24 */ 25 26 #include <sys/types.h> 27 #include <sys/regset.h> 28 #include <sys/privregs.h> 29 #include <sys/pci_impl.h> 30 #include <sys/cpuvar.h> 31 #include <sys/x86_archext.h> 32 #include <sys/cmn_err.h> 33 #include <sys/systm.h> 34 #include <sys/sysmacros.h> 35 #include <sys/pghw.h> 36 #include <sys/cyclic.h> 37 #include <sys/sysevent.h> 38 #include <sys/smbios.h> 39 #include <sys/mca_x86.h> 40 #include <sys/mca_amd.h> 41 #include <sys/mc.h> 42 #include <sys/mc_amd.h> 43 #include <sys/psw.h> 44 #include <sys/ddi.h> 45 #include <sys/sunddi.h> 46 #include <sys/sdt.h> 47 #include <sys/fm/util.h> 48 #include <sys/fm/protocol.h> 49 #include <sys/fm/cpu/AMD.h> 50 #include <sys/fm/smb/fmsmb.h> 51 #include <acpica/include/acpi.h> 52 #include <sys/acpica.h> 53 #include <sys/cpu_module.h> 54 55 #include "ao.h" 56 #include "ao_mca_disp.h" 57 58 #define AO_F_REVS_FG (X86_CHIPREV_AMD_F_REV_F | X86_CHIPREV_AMD_F_REV_G) 59 60 int ao_mca_smi_disable = 1; /* attempt to disable SMI polling */ 61 62 extern int x86gentopo_legacy; /* x86 generic topology support */ 63 64 struct ao_ctl_init { 65 uint32_t ctl_revmask; /* rev(s) to which this applies */ 66 uint64_t ctl_bits; /* mca ctl reg bitmask to set */ 67 }; 68 69 /* 70 * Additional NB MCA ctl initialization for revs F and G 71 */ 72 static const struct ao_ctl_init ao_nb_ctl_init[] = { 73 { AO_F_REVS_FG, AMD_NB_CTL_INIT_REV_FG }, 74 { X86_CHIPREV_UNKNOWN, 0 } 75 }; 76 77 typedef struct ao_bank_cfg { 78 uint64_t bank_ctl_init_cmn; /* Common init value */ 79 const struct ao_ctl_init *bank_ctl_init_extra; /* Extra for each rev */ 80 void (*bank_misc_initfunc)(cmi_hdl_t, ao_ms_data_t *, uint32_t); 81 uint_t bank_ctl_mask; 82 } ao_bank_cfg_t; 83 84 static void nb_mcamisc_init(cmi_hdl_t, ao_ms_data_t *, uint32_t); 85 86 static const ao_bank_cfg_t ao_bank_cfgs[] = { 87 { AMD_DC_CTL_INIT_CMN, NULL, NULL, AMD_MSR_DC_MASK }, 88 { AMD_IC_CTL_INIT_CMN, NULL, NULL, AMD_MSR_IC_MASK }, 89 { AMD_BU_CTL_INIT_CMN, NULL, NULL, AMD_MSR_BU_MASK }, 90 { AMD_LS_CTL_INIT_CMN, NULL, NULL, AMD_MSR_LS_MASK }, 91 { AMD_NB_CTL_INIT_CMN, &ao_nb_ctl_init[0], nb_mcamisc_init, 92 AMD_MSR_NB_MASK }, 93 }; 94 95 static int ao_nbanks = sizeof (ao_bank_cfgs) / sizeof (ao_bank_cfgs[0]); 96 97 /* 98 * This is quite awful but necessary to work around x86 system vendor's view of 99 * the world. Other operating systems (you know who you are) don't understand 100 * Opteron-specific error handling, so BIOS and system vendors often hide these 101 * conditions from them by using SMI polling to copy out any errors from the 102 * machine-check registers. When Solaris runs on a system with this feature, 103 * we want to disable the SMI polling so we can use FMA instead. Sadly, there 104 * isn't even a standard self-describing way to express the whole situation, 105 * so we have to resort to hard-coded values. This should all be changed to 106 * be a self-describing vendor-specific SMBIOS structure in the future. 107 */ 108 static const struct ao_smi_disable { 109 const char *asd_sys_vendor; /* SMB_TYPE_SYSTEM vendor prefix */ 110 const char *asd_sys_product; /* SMB_TYPE_SYSTEM product prefix */ 111 const char *asd_bios_vendor; /* SMB_TYPE_BIOS vendor prefix */ 112 uint8_t asd_code; /* output code for SMI disable */ 113 } ao_smi_disable[] = { 114 { "Sun Microsystems", "Galaxy12", 115 "American Megatrends", 0x59 }, 116 { "Sun Microsystems", "Sun Fire X4100 Server", 117 "American Megatrends", 0x59 }, 118 { "Sun Microsystems", "Sun Fire X4200 Server", 119 "American Megatrends", 0x59 }, 120 { NULL, NULL, NULL, 0 } 121 }; 122 123 static int 124 ao_disp_match_r4(uint16_t ref, uint8_t r4) 125 { 126 static const uint16_t ao_r4_map[] = { 127 AO_MCA_R4_BIT_ERR, /* MCAX86_ERRCODE_RRRR_ERR */ 128 AO_MCA_R4_BIT_RD, /* MCAX86_ERRCODE_RRRR_RD */ 129 AO_MCA_R4_BIT_WR, /* MCAX86_ERRCODE_RRRR_WR */ 130 AO_MCA_R4_BIT_DRD, /* MCAX86_ERRCODE_RRRR_DRD */ 131 AO_MCA_R4_BIT_DWR, /* MCAX86_ERRCODE_RRRR_DWR */ 132 AO_MCA_R4_BIT_IRD, /* MCAX86_ERRCODE_RRRR_IRD */ 133 AO_MCA_R4_BIT_PREFETCH, /* MCAX86_ERRCODE_RRRR_PREFETCH */ 134 AO_MCA_R4_BIT_EVICT, /* MCAX86_ERRCODE_RRRR_EVICT */ 135 AO_MCA_R4_BIT_SNOOP /* MCAX86_ERRCODE_RRRR_SNOOP */ 136 }; 137 138 ASSERT(r4 < sizeof (ao_r4_map) / sizeof (uint16_t)); 139 140 return ((ref & ao_r4_map[r4]) != 0); 141 } 142 143 static int 144 ao_disp_match_pp(uint8_t ref, uint8_t pp) 145 { 146 static const uint8_t ao_pp_map[] = { 147 AO_MCA_PP_BIT_SRC, /* MCAX86_ERRCODE_PP_SRC */ 148 AO_MCA_PP_BIT_RES, /* MCAX86_ERRCODE_PP_RES */ 149 AO_MCA_PP_BIT_OBS, /* MCAX86_ERRCODE_PP_OBS */ 150 AO_MCA_PP_BIT_GEN /* MCAX86_ERRCODE_PP_GEN */ 151 }; 152 153 ASSERT(pp < sizeof (ao_pp_map) / sizeof (uint8_t)); 154 155 return ((ref & ao_pp_map[pp]) != 0); 156 } 157 158 static int 159 ao_disp_match_ii(uint8_t ref, uint8_t ii) 160 { 161 static const uint8_t ao_ii_map[] = { 162 AO_MCA_II_BIT_MEM, /* MCAX86_ERRCODE_II_MEM */ 163 0, 164 AO_MCA_II_BIT_IO, /* MCAX86_ERRCODE_II_IO */ 165 AO_MCA_II_BIT_GEN /* MCAX86_ERRCODE_II_GEN */ 166 }; 167 168 ASSERT(ii < sizeof (ao_ii_map) / sizeof (uint8_t)); 169 170 return ((ref & ao_ii_map[ii]) != 0); 171 } 172 173 static uint8_t 174 bit_strip(uint16_t *codep, uint16_t mask, uint16_t shift) 175 { 176 uint8_t val = (*codep & mask) >> shift; 177 *codep &= ~mask; 178 return (val); 179 } 180 181 #define BIT_STRIP(codep, name) \ 182 bit_strip(codep, MCAX86_ERRCODE_##name##_MASK, \ 183 MCAX86_ERRCODE_##name##_SHIFT) 184 185 /*ARGSUSED*/ 186 static int 187 ao_disp_match_one(const ao_error_disp_t *aed, uint64_t status, uint32_t rev, 188 int bankno) 189 { 190 uint16_t code = MCAX86_ERRCODE(status); 191 uint8_t extcode = AMD_EXT_ERRCODE(status); 192 uint64_t stat_mask = aed->aed_stat_mask; 193 uint64_t stat_mask_res = aed->aed_stat_mask_res; 194 195 /* 196 * If the bank's status register indicates overflow, then we can no 197 * longer rely on the value of CECC: our experience with actual fault 198 * injection has shown that multiple CE's overwriting each other shows 199 * AMD_BANK_STAT_CECC and AMD_BANK_STAT_UECC both set to zero. This 200 * should be clarified in a future BKDG or by the Revision Guide. 201 * This behaviour is fixed in revision F. 202 */ 203 if (bankno == AMD_MCA_BANK_NB && 204 !X86_CHIPREV_ATLEAST(rev, X86_CHIPREV_AMD_F_REV_F) && 205 status & MSR_MC_STATUS_OVER) { 206 stat_mask &= ~AMD_BANK_STAT_CECC; 207 stat_mask_res &= ~AMD_BANK_STAT_CECC; 208 } 209 210 if ((status & stat_mask) != stat_mask_res) 211 return (0); 212 213 /* 214 * r4 and pp bits are stored separately, so we mask off and compare them 215 * for the code types that use them. Once we've taken the r4 and pp 216 * bits out of the equation, we can directly compare the resulting code 217 * with the one stored in the ao_error_disp_t. 218 */ 219 if (AMD_ERRCODE_ISMEM(code)) { 220 uint8_t r4 = BIT_STRIP(&code, RRRR); 221 222 if (!ao_disp_match_r4(aed->aed_stat_r4_bits, r4)) 223 return (0); 224 225 } else if (AMD_ERRCODE_ISBUS(code)) { 226 uint8_t r4 = BIT_STRIP(&code, RRRR); 227 uint8_t pp = BIT_STRIP(&code, PP); 228 uint8_t ii = BIT_STRIP(&code, II); 229 230 if (!ao_disp_match_r4(aed->aed_stat_r4_bits, r4) || 231 !ao_disp_match_pp(aed->aed_stat_pp_bits, pp) || 232 !ao_disp_match_ii(aed->aed_stat_ii_bits, ii)) 233 return (0); 234 } 235 236 return (code == aed->aed_stat_code && extcode == aed->aed_stat_extcode); 237 } 238 239 /*ARGSUSED*/ 240 cms_cookie_t 241 ao_ms_disp_match(cmi_hdl_t hdl, int ismc, int banknum, uint64_t status, 242 uint64_t addr, uint64_t misc, void *mslogout) 243 { 244 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 245 uint32_t rev = ao->ao_ms_shared->aos_chiprev; 246 const ao_error_disp_t *aed; 247 248 for (aed = ao_error_disp[banknum]; aed->aed_stat_mask != 0; aed++) { 249 if (ao_disp_match_one(aed, status, rev, banknum)) 250 return ((cms_cookie_t)aed); 251 } 252 253 return (NULL); 254 } 255 256 /*ARGSUSED*/ 257 void 258 ao_ms_ereport_class(cmi_hdl_t hdl, cms_cookie_t mscookie, 259 const char **cpuclsp, const char **leafclsp) 260 { 261 const ao_error_disp_t *aed = mscookie; 262 263 if (aed != NULL) { 264 *cpuclsp = FM_EREPORT_CPU_AMD; 265 *leafclsp = aed->aed_class; 266 } 267 } 268 269 static int 270 ao_chip_once(ao_ms_data_t *ao, enum ao_cfgonce_bitnum what) 271 { 272 return (atomic_set_long_excl(&ao->ao_ms_shared->aos_cfgonce, 273 what) == 0 ? B_TRUE : B_FALSE); 274 } 275 276 /* 277 * This knob exists in case any platform has a problem with our default 278 * policy of disabling any interrupt registered in the NB MC4_MISC 279 * register. Setting this may cause Solaris and external entities 280 * who also have an interest in this register to argue over available 281 * telemetry (so setting it is generally not recommended). 282 */ 283 int ao_nb_cfg_mc4misc_noseize = 0; 284 285 /* 286 * The BIOS may have setup to receive SMI on counter overflow. It may also 287 * have locked various fields or made them read-only. We will clear any 288 * SMI request and leave the register locked. We will also clear the 289 * counter and enable counting - while we don't use the counter it is nice 290 * to have it enabled for verification and debug work. 291 */ 292 static void 293 nb_mcamisc_init(cmi_hdl_t hdl, ao_ms_data_t *ao, uint32_t rev) 294 { 295 uint64_t val, nval; 296 297 if (!X86_CHIPREV_MATCH(rev, AO_F_REVS_FG)) 298 return; 299 300 if (cmi_hdl_rdmsr(hdl, AMD_MSR_NB_MISC, &val) != CMI_SUCCESS) 301 return; 302 303 ao->ao_ms_shared->aos_bcfg_nb_misc = val; 304 305 if (ao_nb_cfg_mc4misc_noseize) 306 return; /* stash BIOS value, but no changes */ 307 308 309 /* 310 * The Valid bit tells us whether the CtrP bit is defined; if it 311 * is the CtrP bit tells us whether an ErrCount field is present. 312 * If not then there is nothing for us to do. 313 */ 314 if (!(val & AMD_NB_MISC_VALID) || !(val & AMD_NB_MISC_CTRP)) 315 return; 316 317 318 nval = val; 319 nval |= AMD_NB_MISC_CNTEN; /* enable ECC error counting */ 320 nval &= ~AMD_NB_MISC_ERRCOUNT_MASK; /* clear ErrCount */ 321 nval &= ~AMD_NB_MISC_OVRFLW; /* clear Ovrflw */ 322 nval &= ~AMD_NB_MISC_INTTYPE_MASK; /* no interrupt on overflow */ 323 nval |= AMD_NB_MISC_LOCKED; 324 325 if (nval != val) { 326 uint64_t locked = val & AMD_NB_MISC_LOCKED; 327 328 if (locked) 329 ao_bankstatus_prewrite(hdl, ao); 330 331 (void) cmi_hdl_wrmsr(hdl, AMD_MSR_NB_MISC, nval); 332 333 if (locked) 334 ao_bankstatus_postwrite(hdl, ao); 335 } 336 } 337 338 /* 339 * NorthBridge (NB) MCA Configuration. 340 * 341 * We add and remove bits from the BIOS-configured value, rather than 342 * writing an absolute value. The variables ao_nb_cfg_{add,remove}_cmn and 343 * ap_nb_cfg_{add,remove}_revFG are available for modification via kmdb 344 * and /etc/system. The revision-specific adds and removes are applied 345 * after the common changes, and one write is made to the config register. 346 * These are not intended for watchdog configuration via these variables - 347 * use the watchdog policy below. 348 */ 349 350 /* 351 * Bits to be added to the NB configuration register - all revs. 352 */ 353 uint32_t ao_nb_cfg_add_cmn = AMD_NB_CFG_ADD_CMN; 354 355 /* 356 * Bits to be cleared from the NB configuration register - all revs. 357 */ 358 uint32_t ao_nb_cfg_remove_cmn = AMD_NB_CFG_REMOVE_CMN; 359 360 /* 361 * Bits to be added to the NB configuration register - revs F and G. 362 */ 363 uint32_t ao_nb_cfg_add_revFG = AMD_NB_CFG_ADD_REV_FG; 364 365 /* 366 * Bits to be cleared from the NB configuration register - revs F and G. 367 */ 368 uint32_t ao_nb_cfg_remove_revFG = AMD_NB_CFG_REMOVE_REV_FG; 369 370 struct ao_nb_cfg { 371 uint32_t cfg_revmask; 372 uint32_t *cfg_add_p; 373 uint32_t *cfg_remove_p; 374 }; 375 376 static const struct ao_nb_cfg ao_cfg_extra[] = { 377 { AO_F_REVS_FG, &ao_nb_cfg_add_revFG, &ao_nb_cfg_remove_revFG }, 378 { X86_CHIPREV_UNKNOWN, NULL, NULL } 379 }; 380 381 /* 382 * Bits to be used if we configure the NorthBridge (NB) Watchdog. The watchdog 383 * triggers a machine check exception when no response to an NB system access 384 * occurs within a specified time interval. 385 */ 386 uint32_t ao_nb_cfg_wdog = 387 AMD_NB_CFG_WDOGTMRCNTSEL_4095 | 388 AMD_NB_CFG_WDOGTMRBASESEL_1MS; 389 390 /* 391 * The default watchdog policy is to enable it (at the above rate) if it 392 * is disabled; if it is enabled then we leave it enabled at the rate 393 * chosen by the BIOS. 394 */ 395 enum { 396 AO_NB_WDOG_LEAVEALONE, /* Don't touch watchdog config */ 397 AO_NB_WDOG_DISABLE, /* Always disable watchdog */ 398 AO_NB_WDOG_ENABLE_IF_DISABLED, /* If disabled, enable at our rate */ 399 AO_NB_WDOG_ENABLE_FORCE_RATE /* Enable and set our rate */ 400 } ao_nb_watchdog_policy = AO_NB_WDOG_ENABLE_IF_DISABLED; 401 402 static void 403 ao_nb_cfg(ao_ms_data_t *ao, uint32_t rev) 404 { 405 const struct ao_nb_cfg *nbcp = &ao_cfg_extra[0]; 406 uint_t procnodeid = pg_plat_hw_instance_id(CPU, PGHW_PROCNODE); 407 uint32_t val; 408 409 /* 410 * Read the NorthBridge (NB) configuration register in PCI space, 411 * modify the settings accordingly, and store the new value back. 412 * Note that the stashed BIOS config value aos_bcfg_nb_cfg is used 413 * in ereport payload population to determine ECC syndrome type for 414 * memory errors. 415 */ 416 ao->ao_ms_shared->aos_bcfg_nb_cfg = val = 417 ao_pcicfg_read(procnodeid, MC_FUNC_MISCCTL, MC_CTL_REG_NBCFG); 418 419 switch (ao_nb_watchdog_policy) { 420 case AO_NB_WDOG_LEAVEALONE: 421 break; 422 423 case AO_NB_WDOG_DISABLE: 424 val &= ~AMD_NB_CFG_WDOGTMRBASESEL_MASK; 425 val &= ~AMD_NB_CFG_WDOGTMRCNTSEL_MASK; 426 val |= AMD_NB_CFG_WDOGTMRDIS; 427 break; 428 429 default: 430 cmn_err(CE_NOTE, "ao_nb_watchdog_policy=%d unrecognised, " 431 "using default policy", ao_nb_watchdog_policy); 432 /*FALLTHRU*/ 433 434 case AO_NB_WDOG_ENABLE_IF_DISABLED: 435 if (!(val & AMD_NB_CFG_WDOGTMRDIS)) 436 break; /* if enabled leave rate intact */ 437 /*FALLTHRU*/ 438 439 case AO_NB_WDOG_ENABLE_FORCE_RATE: 440 val &= ~AMD_NB_CFG_WDOGTMRBASESEL_MASK; 441 val &= ~AMD_NB_CFG_WDOGTMRCNTSEL_MASK; 442 val &= ~AMD_NB_CFG_WDOGTMRDIS; 443 val |= ao_nb_cfg_wdog; 444 break; 445 } 446 447 /* 448 * Now apply bit adds and removes, first those common to all revs 449 * and then the revision-specific ones. 450 */ 451 val &= ~ao_nb_cfg_remove_cmn; 452 val |= ao_nb_cfg_add_cmn; 453 454 while (nbcp->cfg_revmask != X86_CHIPREV_UNKNOWN) { 455 if (X86_CHIPREV_MATCH(rev, nbcp->cfg_revmask)) { 456 val &= ~(*nbcp->cfg_remove_p); 457 val |= *nbcp->cfg_add_p; 458 } 459 nbcp++; 460 } 461 462 ao_pcicfg_write(procnodeid, MC_FUNC_MISCCTL, MC_CTL_REG_NBCFG, val); 463 } 464 465 static void 466 ao_dram_cfg(ao_ms_data_t *ao, uint32_t rev) 467 { 468 uint_t procnodeid = pg_plat_hw_instance_id(CPU, PGHW_PROCNODE); 469 union mcreg_dramcfg_lo dcfglo; 470 471 ao->ao_ms_shared->aos_bcfg_dcfg_lo = MCREG_VAL32(&dcfglo) = 472 ao_pcicfg_read(procnodeid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGLO); 473 ao->ao_ms_shared->aos_bcfg_dcfg_hi = 474 ao_pcicfg_read(procnodeid, MC_FUNC_DRAMCTL, MC_DC_REG_DRAMCFGHI); 475 #ifdef OPTERON_ERRATUM_172 476 if (X86_CHIPREV_MATCH(rev, AO_F_REVS_FG) && 477 MCREG_FIELD_F_revFG(&dcfglo, ParEn)) { 478 MCREG_FIELD_F_revFG(&dcfglo, ParEn) = 0; 479 ao_pcicfg_write(procnodeid, MC_FUNC_DRAMCTL, 480 MC_DC_REG_DRAMCFGLO, MCREG_VAL32(&dcfglo)); 481 } 482 #endif 483 } 484 485 /* 486 * This knob exists in case any platform has a problem with our default 487 * policy of disabling any interrupt registered in the online spare 488 * control register. Setting this may cause Solaris and external entities 489 * who also have an interest in this register to argue over available 490 * telemetry (so setting it is generally not recommended). 491 */ 492 int ao_nb_cfg_sparectl_noseize = 0; 493 494 /* 495 * Setup the online spare control register (revs F and G). We disable 496 * any interrupt registered by the BIOS and zero all error counts. 497 */ 498 static void 499 ao_sparectl_cfg(ao_ms_data_t *ao) 500 { 501 uint_t procnodeid = pg_plat_hw_instance_id(CPU, PGHW_PROCNODE); 502 union mcreg_sparectl sparectl; 503 int chan, cs; 504 505 ao->ao_ms_shared->aos_bcfg_nb_sparectl = MCREG_VAL32(&sparectl) = 506 ao_pcicfg_read(procnodeid, MC_FUNC_MISCCTL, MC_CTL_REG_SPARECTL); 507 508 if (ao_nb_cfg_sparectl_noseize) 509 return; /* stash BIOS value, but no changes */ 510 511 /* 512 * If the BIOS has requested SMI interrupt type for ECC count 513 * overflow for a chip-select or channel force those off. 514 */ 515 MCREG_FIELD_F_revFG(&sparectl, EccErrInt) = 0; 516 MCREG_FIELD_F_revFG(&sparectl, SwapDoneInt) = 0; 517 518 /* 519 * Zero EccErrCnt and write this back to all chan/cs combinations. 520 */ 521 MCREG_FIELD_F_revFG(&sparectl, EccErrCntWrEn) = 1; 522 MCREG_FIELD_F_revFG(&sparectl, EccErrCnt) = 0; 523 for (chan = 0; chan < MC_CHIP_NDRAMCHAN; chan++) { 524 MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramChan) = chan; 525 526 for (cs = 0; cs < MC_CHIP_NCS; cs++) { 527 MCREG_FIELD_F_revFG(&sparectl, EccErrCntDramCs) = cs; 528 ao_pcicfg_write(procnodeid, MC_FUNC_MISCCTL, 529 MC_CTL_REG_SPARECTL, MCREG_VAL32(&sparectl)); 530 } 531 } 532 } 533 534 int ao_forgive_uc = 0; /* For test/debug only */ 535 int ao_forgive_pcc = 0; /* For test/debug only */ 536 int ao_fake_poison = 0; /* For test/debug only */ 537 538 uint32_t 539 ao_ms_error_action(cmi_hdl_t hdl, int ismc, int banknum, 540 uint64_t status, uint64_t addr, uint64_t misc, void *mslogout) 541 { 542 const ao_error_disp_t *aed; 543 uint32_t retval = 0; 544 uint8_t when; 545 int en; 546 547 if (ao_forgive_uc) 548 retval |= CMS_ERRSCOPE_CLEARED_UC; 549 550 if (ao_forgive_pcc) 551 retval |= CMS_ERRSCOPE_CURCONTEXT_OK; 552 553 if (ao_fake_poison && status & MSR_MC_STATUS_UC) 554 retval |= CMS_ERRSCOPE_POISONED; 555 556 if (retval) 557 return (retval); 558 559 aed = ao_ms_disp_match(hdl, ismc, banknum, status, addr, misc, 560 mslogout); 561 562 /* 563 * If we do not recognise the error let the cpu module apply 564 * the generic criteria to decide how to react. 565 */ 566 if (aed == NULL) 567 return (0); 568 569 en = (status & MSR_MC_STATUS_EN) != 0; 570 571 if ((when = aed->aed_panic_when) == AO_AED_PANIC_NEVER) 572 retval |= CMS_ERRSCOPE_IGNORE_ERR; 573 574 if ((when & AO_AED_PANIC_ALWAYS) || 575 ((when & AO_AED_PANIC_IFMCE) && (en || ismc))) 576 retval |= CMS_ERRSCOPE_FORCE_FATAL; 577 578 /* 579 * The original AMD implementation would panic on a machine check 580 * (not a poll) if the status overflow bit was set, with an 581 * exception for the case of rev F or later with an NB error 582 * indicating CECC. This came from the perception that the 583 * overflow bit was not correctly managed on rev E and earlier, for 584 * example that repeated correctable memeory errors did not set 585 * OVER but somehow clear CECC. 586 * 587 * We will leave the generic support to evaluate overflow errors 588 * and decide to panic on their individual merits, e.g., if PCC 589 * is set and so on. The AMD docs do say (as Intel does) that 590 * the status information is *all* from the higher-priority 591 * error in the case of an overflow, so it is at least as serious 592 * as the original and we can decide panic etc based on it. 593 */ 594 595 return (retval); 596 } 597 598 /* 599 * Will need to change for family 0x10 600 */ 601 static uint_t 602 ao_ereport_synd(ao_ms_data_t *ao, uint64_t status, uint_t *typep, 603 int is_nb) 604 { 605 if (is_nb) { 606 if (ao->ao_ms_shared->aos_bcfg_nb_cfg & 607 AMD_NB_CFG_CHIPKILLECCEN) { 608 *typep = AMD_SYNDTYPE_CHIPKILL; 609 return (AMD_NB_STAT_CKSYND(status)); 610 } else { 611 *typep = AMD_SYNDTYPE_ECC; 612 return (AMD_BANK_SYND(status)); 613 } 614 } else { 615 *typep = AMD_SYNDTYPE_ECC; 616 return (AMD_BANK_SYND(status)); 617 } 618 } 619 620 static nvlist_t * 621 ao_ereport_create_resource_elem(cmi_hdl_t hdl, nv_alloc_t *nva, 622 mc_unum_t *unump, int dimmnum) 623 { 624 nvlist_t *nvl, *snvl; 625 nvlist_t *board_list = NULL; 626 627 if ((nvl = fm_nvlist_create(nva)) == NULL) /* freed by caller */ 628 return (NULL); 629 630 if ((snvl = fm_nvlist_create(nva)) == NULL) { 631 fm_nvlist_destroy(nvl, nva ? FM_NVA_RETAIN : FM_NVA_FREE); 632 return (NULL); 633 } 634 635 (void) nvlist_add_uint64(snvl, FM_FMRI_HC_SPECIFIC_OFFSET, 636 unump->unum_offset); 637 638 if (!x86gentopo_legacy) { 639 board_list = cmi_hdl_smb_bboard(hdl); 640 641 if (board_list == NULL) { 642 fm_nvlist_destroy(nvl, 643 nva ? FM_NVA_RETAIN : FM_NVA_FREE); 644 fm_nvlist_destroy(snvl, 645 nva ? FM_NVA_RETAIN : FM_NVA_FREE); 646 return (NULL); 647 } 648 649 fm_fmri_hc_create(nvl, FM_HC_SCHEME_VERSION, NULL, snvl, 650 board_list, 4, 651 "chip", cmi_hdl_smb_chipid(hdl), 652 "memory-controller", unump->unum_mc, 653 "dimm", unump->unum_dimms[dimmnum], 654 "rank", unump->unum_rank); 655 } else { 656 fm_fmri_hc_set(nvl, FM_HC_SCHEME_VERSION, NULL, snvl, 5, 657 "motherboard", unump->unum_board, 658 "chip", unump->unum_chip, 659 "memory-controller", unump->unum_mc, 660 "dimm", unump->unum_dimms[dimmnum], 661 "rank", unump->unum_rank); 662 } 663 664 fm_nvlist_destroy(snvl, nva ? FM_NVA_RETAIN : FM_NVA_FREE); 665 666 return (nvl); 667 } 668 669 static void 670 ao_ereport_add_resource(cmi_hdl_t hdl, nvlist_t *payload, nv_alloc_t *nva, 671 mc_unum_t *unump) 672 { 673 674 nvlist_t *elems[MC_UNUM_NDIMM]; 675 int nelems = 0; 676 int i; 677 678 for (i = 0; i < MC_UNUM_NDIMM; i++) { 679 if (unump->unum_dimms[i] == MC_INVALNUM) 680 break; 681 682 if ((elems[nelems] = ao_ereport_create_resource_elem(hdl, nva, 683 unump, i)) == NULL) 684 break; 685 686 nelems++; 687 } 688 689 if (nelems == 0) 690 return; 691 692 fm_payload_set(payload, FM_EREPORT_PAYLOAD_NAME_RESOURCE, 693 DATA_TYPE_NVLIST_ARRAY, nelems, elems, NULL); 694 695 for (i = 0; i < nelems; i++) 696 fm_nvlist_destroy(elems[i], nva ? FM_NVA_RETAIN : FM_NVA_FREE); 697 } 698 699 /*ARGSUSED*/ 700 void 701 ao_ms_ereport_add_logout(cmi_hdl_t hdl, nvlist_t *ereport, 702 nv_alloc_t *nva, int banknum, uint64_t status, uint64_t addr, 703 uint64_t misc, void *mslogout, cms_cookie_t mscookie) 704 { 705 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 706 const ao_error_disp_t *aed = mscookie; 707 uint_t synd, syndtype; 708 uint64_t members; 709 710 if (aed == NULL) 711 return; 712 713 members = aed->aed_ereport_members; 714 715 synd = ao_ereport_synd(ao, status, &syndtype, 716 banknum == AMD_MCA_BANK_NB); 717 718 if (members & FM_EREPORT_PAYLOAD_FLAG_SYND) { 719 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_NAME_SYND, 720 DATA_TYPE_UINT16, synd, NULL); 721 } 722 723 if (members & FM_EREPORT_PAYLOAD_FLAG_SYND_TYPE) { 724 fm_payload_set(ereport, FM_EREPORT_PAYLOAD_NAME_SYND_TYPE, 725 DATA_TYPE_STRING, (syndtype == AMD_SYNDTYPE_CHIPKILL ? 726 "C4" : "E"), NULL); 727 } 728 729 if (members & FM_EREPORT_PAYLOAD_FLAG_RESOURCE) { 730 mc_unum_t unum; 731 732 if (((aed->aed_flags & AO_AED_FLAGS_ADDRTYPE) == 733 AO_AED_F_PHYSICAL) && (status & MSR_MC_STATUS_ADDRV) && 734 cmi_mc_patounum(addr, aed->aed_addrvalid_hi, 735 aed->aed_addrvalid_lo, synd, syndtype, &unum) == 736 CMI_SUCCESS) 737 ao_ereport_add_resource(hdl, ereport, nva, &unum); 738 } 739 } 740 741 /*ARGSUSED*/ 742 boolean_t 743 ao_ms_ereport_includestack(cmi_hdl_t hdl, cms_cookie_t mscookie) 744 { 745 const ao_error_disp_t *aed = mscookie; 746 747 if (aed == NULL) 748 return (0); 749 750 return ((aed->aed_ereport_members & 751 FM_EREPORT_PAYLOAD_FLAG_STACK) != 0); 752 } 753 754 cms_errno_t 755 ao_ms_msrinject(cmi_hdl_t hdl, uint_t msr, uint64_t val) 756 { 757 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 758 cms_errno_t rv = CMSERR_BADMSRWRITE; 759 760 ao_bankstatus_prewrite(hdl, ao); 761 if (cmi_hdl_wrmsr(hdl, msr, val) == CMI_SUCCESS) 762 rv = CMS_SUCCESS; 763 ao_bankstatus_postwrite(hdl, ao); 764 765 return (rv); 766 } 767 768 /*ARGSUSED*/ 769 uint64_t 770 ao_ms_mcgctl_val(cmi_hdl_t hdl, int nbanks, uint64_t def) 771 { 772 return ((1ULL << nbanks) - 1); 773 } 774 775 boolean_t 776 ao_ms_bankctl_skipinit(cmi_hdl_t hdl, int banknum) 777 { 778 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 779 780 if (banknum != AMD_MCA_BANK_NB) 781 return (B_FALSE); 782 783 /* 784 * If we are the first to atomically set the "I'll do it" bit 785 * then return B_FALSE (do not skip), otherwise skip with B_TRUE. 786 */ 787 return (ao_chip_once(ao, AO_CFGONCE_NBMCA) == B_TRUE ? 788 B_FALSE : B_TRUE); 789 } 790 791 uint64_t 792 ao_ms_bankctl_val(cmi_hdl_t hdl, int banknum, uint64_t def) 793 { 794 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 795 const struct ao_ctl_init *extrap; 796 const ao_bank_cfg_t *bankcfg; 797 uint64_t mcictl; 798 uint32_t rev = ao->ao_ms_shared->aos_chiprev; 799 800 if (banknum >= sizeof (ao_bank_cfgs) / sizeof (ao_bank_cfgs[0])) 801 return (def); 802 803 bankcfg = &ao_bank_cfgs[banknum]; 804 extrap = bankcfg->bank_ctl_init_extra; 805 806 mcictl = bankcfg->bank_ctl_init_cmn; 807 808 while (extrap != NULL && extrap->ctl_revmask != X86_CHIPREV_UNKNOWN) { 809 if (X86_CHIPREV_MATCH(rev, extrap->ctl_revmask)) 810 mcictl |= extrap->ctl_bits; 811 extrap++; 812 } 813 814 return (mcictl); 815 } 816 817 /*ARGSUSED*/ 818 void 819 ao_bankstatus_prewrite(cmi_hdl_t hdl, ao_ms_data_t *ao) 820 { 821 #ifndef __xpv 822 uint64_t hwcr; 823 824 if (cmi_hdl_rdmsr(hdl, MSR_AMD_HWCR, &hwcr) != CMI_SUCCESS) 825 return; 826 827 ao->ao_ms_hwcr_val = hwcr; 828 829 if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) { 830 hwcr |= AMD_HWCR_MCI_STATUS_WREN; 831 (void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr); 832 } 833 #endif 834 } 835 836 /*ARGSUSED*/ 837 void 838 ao_bankstatus_postwrite(cmi_hdl_t hdl, ao_ms_data_t *ao) 839 { 840 #ifndef __xpv 841 uint64_t hwcr = ao->ao_ms_hwcr_val; 842 843 if (!(hwcr & AMD_HWCR_MCI_STATUS_WREN)) { 844 hwcr &= ~AMD_HWCR_MCI_STATUS_WREN; 845 (void) cmi_hdl_wrmsr(hdl, MSR_AMD_HWCR, hwcr); 846 } 847 #endif 848 } 849 850 void 851 ao_ms_mca_init(cmi_hdl_t hdl, int nbanks) 852 { 853 ao_ms_data_t *ao = cms_hdl_getcmsdata(hdl); 854 uint32_t rev = ao->ao_ms_shared->aos_chiprev; 855 ao_ms_mca_t *mca = &ao->ao_ms_mca; 856 uint64_t *maskp; 857 int i; 858 859 maskp = mca->ao_mca_bios_cfg.bcfg_bank_mask = kmem_zalloc(nbanks * 860 sizeof (uint64_t), KM_SLEEP); 861 862 /* 863 * Read the bank ctl mask MSRs, but only as many as we know 864 * certainly exist - don't calculate the register address. 865 * Also initialize the MCi_MISC register where required. 866 */ 867 for (i = 0; i < MIN(nbanks, ao_nbanks); i++) { 868 (void) cmi_hdl_rdmsr(hdl, ao_bank_cfgs[i].bank_ctl_mask, 869 maskp++); 870 if (ao_bank_cfgs[i].bank_misc_initfunc != NULL) 871 ao_bank_cfgs[i].bank_misc_initfunc(hdl, ao, rev); 872 873 } 874 875 if (ao_chip_once(ao, AO_CFGONCE_NBCFG) == B_TRUE) { 876 ao_nb_cfg(ao, rev); 877 878 if (X86_CHIPREV_MATCH(rev, AO_F_REVS_FG)) 879 ao_sparectl_cfg(ao); 880 } 881 882 if (ao_chip_once(ao, AO_CFGONCE_DRAMCFG) == B_TRUE) 883 ao_dram_cfg(ao, rev); 884 885 ao_procnode_scrubber_enable(hdl, ao); 886 } 887 888 /* 889 * Note that although this cpu module is loaded before the PSMs are 890 * loaded (and hence before acpica is loaded), this function is 891 * called from post_startup(), after PSMs are initialized and acpica 892 * is loaded. 893 */ 894 static int 895 ao_acpi_find_smicmd(int *asd_port) 896 { 897 ACPI_TABLE_FADT *fadt = NULL; 898 899 /* 900 * AcpiGetTable works even if ACPI is disabled, so a failure 901 * here means we weren't able to retreive a pointer to the FADT. 902 */ 903 if (AcpiGetTable(ACPI_SIG_FADT, 1, (ACPI_TABLE_HEADER **)&fadt) != 904 AE_OK) 905 return (-1); 906 907 ASSERT(fadt != NULL); 908 909 *asd_port = fadt->SmiCommand; 910 return (0); 911 } 912 913 /*ARGSUSED*/ 914 void 915 ao_ms_post_startup(cmi_hdl_t hdl) 916 { 917 const struct ao_smi_disable *asd; 918 id_t id; 919 int rv = -1, asd_port; 920 921 smbios_system_t sy; 922 smbios_bios_t sb; 923 smbios_info_t si; 924 925 /* 926 * Fetch the System and BIOS vendor strings from SMBIOS and see if they 927 * match a value in our table. If so, disable SMI error polling. This 928 * is grotesque and should be replaced by self-describing vendor- 929 * specific SMBIOS data or a specification enhancement instead. 930 */ 931 if (ao_mca_smi_disable && ksmbios != NULL && 932 smbios_info_bios(ksmbios, &sb) != SMB_ERR && 933 (id = smbios_info_system(ksmbios, &sy)) != SMB_ERR && 934 smbios_info_common(ksmbios, id, &si) != SMB_ERR) { 935 936 for (asd = ao_smi_disable; asd->asd_sys_vendor != NULL; asd++) { 937 if (strncmp(asd->asd_sys_vendor, si.smbi_manufacturer, 938 strlen(asd->asd_sys_vendor)) != 0 || 939 strncmp(asd->asd_sys_product, si.smbi_product, 940 strlen(asd->asd_sys_product)) != 0 || 941 strncmp(asd->asd_bios_vendor, sb.smbb_vendor, 942 strlen(asd->asd_bios_vendor)) != 0) 943 continue; 944 945 /* 946 * Look for the SMI_CMD port in the ACPI FADT, 947 * if the port is 0, this platform doesn't support 948 * SMM, so there is no SMI error polling to disable. 949 */ 950 if ((rv = ao_acpi_find_smicmd(&asd_port)) == 0 && 951 asd_port != 0) { 952 cmn_err(CE_CONT, "?SMI polling disabled in " 953 "favor of Solaris Fault Management for " 954 "AMD Processors\n"); 955 956 outb(asd_port, asd->asd_code); 957 958 } else if (rv < 0) { 959 cmn_err(CE_CONT, "?Solaris Fault Management " 960 "for AMD Processors could not disable SMI " 961 "polling because an error occurred while " 962 "trying to determine the SMI command port " 963 "from the ACPI FADT table\n"); 964 } 965 break; 966 } 967 } 968 }