1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 
  26 #include <sys/types.h>
  27 #include <sys/cmn_err.h>
  28 #include <sys/errno.h>
  29 #include <sys/log.h>
  30 #include <sys/systm.h>
  31 #include <sys/modctl.h>
  32 #include <sys/errorq.h>
  33 #include <sys/controlregs.h>
  34 #include <sys/fm/util.h>
  35 #include <sys/fm/protocol.h>
  36 #include <sys/sysevent.h>
  37 #include <sys/pghw.h>
  38 #include <sys/cyclic.h>
  39 #include <sys/pci_cfgspace.h>
  40 #include <sys/mc_intel.h>
  41 #include <sys/smbios.h>
  42 #include <sys/pci.h>
  43 #include <sys/pcie.h>
  44 #include "nb5000.h"
  45 #include "nb_log.h"
  46 #include "dimm_phys.h"
  47 #include "rank.h"
  48 
  49 int nb_hw_memory_scrub_enable = 1;
  50 static int nb_sw_scrub_disabled = 0;
  51 
  52 int nb_5000_memory_controller = 0;
  53 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
  54 int nb_channels_per_branch = NB_MAX_CHANNELS_PER_BRANCH;
  55 int nb_dimms_per_channel = 0;
  56 
  57 nb_dimm_t **nb_dimms;
  58 int nb_ndimm;
  59 uint32_t nb_chipset;
  60 enum nb_memory_mode nb_mode;
  61 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
  62 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
  63 uint32_t top_of_low_memory;
  64 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
  65 
  66 extern int nb_no_smbios;
  67 
  68 errorq_t *nb_queue;
  69 kmutex_t nb_mutex;
  70 
  71 static int nb_dimm_slots;
  72 
  73 static uint32_t nb_err0_int;
  74 static uint32_t nb_err1_int;
  75 static uint32_t nb_err2_int;
  76 static uint32_t nb_mcerr_int;
  77 static uint32_t nb_emask_int;
  78 
  79 static uint32_t nb_err0_fbd;
  80 static uint32_t nb_err1_fbd;
  81 static uint32_t nb_err2_fbd;
  82 static uint32_t nb_mcerr_fbd;
  83 static uint32_t nb_emask_fbd;
  84 
  85 static uint32_t nb_err0_mem;
  86 static uint32_t nb_err1_mem;
  87 static uint32_t nb_err2_mem;
  88 static uint32_t nb_mcerr_mem;
  89 static uint32_t nb_emask_mem;
  90 
  91 static uint16_t nb_err0_fsb;
  92 static uint16_t nb_err1_fsb;
  93 static uint16_t nb_err2_fsb;
  94 static uint16_t nb_mcerr_fsb;
  95 static uint16_t nb_emask_fsb;
  96 
  97 static uint16_t nb_err0_thr;
  98 static uint16_t nb_err1_thr;
  99 static uint16_t nb_err2_thr;
 100 static uint16_t nb_mcerr_thr;
 101 static uint16_t nb_emask_thr;
 102 
 103 static uint32_t emask_uncor_pex[NB_PCI_DEV];
 104 static uint32_t emask_cor_pex[NB_PCI_DEV];
 105 static uint32_t emask_rp_pex[NB_PCI_DEV];
 106 static uint32_t docmd_pex[NB_PCI_DEV];
 107 static uint32_t uncerrsev[NB_PCI_DEV];
 108 
 109 static uint32_t l_mcerr_int;
 110 static uint32_t l_mcerr_fbd;
 111 static uint32_t l_mcerr_mem;
 112 static uint16_t l_mcerr_fsb;
 113 static uint16_t l_mcerr_thr;
 114 
 115 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
 116 uint_t nb5400_emask_fbd = 0;
 117 int nb5000_reset_emask_fbd = 1;
 118 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
 119 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
 120 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
 121 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
 122 uint_t nb7300_mask_poll_fbd = EMASK_7300_FBD_NF;
 123 uint_t nb7300_mask_bios_fbd = EMASK_7300_FBD_FATAL;
 124 
 125 int nb5100_reset_emask_mem = 1;
 126 uint_t nb5100_mask_poll_mem = EMASK_MEM_NF;
 127 
 128 uint_t nb5000_emask_fsb = 0;
 129 int nb5000_reset_emask_fsb = 1;
 130 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
 131 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
 132 
 133 uint_t nb5100_emask_int = EMASK_INT_5100;
 134 uint_t nb5400_emask_int = EMASK_INT_5400;
 135 
 136 uint_t nb7300_emask_int = EMASK_INT_7300;
 137 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
 138 uint_t nb5000_emask_int = EMASK_INT_5000;
 139 int nb5000_reset_emask_int = 1;
 140 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
 141 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
 142 uint_t nb5100_mask_poll_int = EMASK_INT_5100_NF;
 143 uint_t nb5100_mask_bios_int = EMASK_INT_5100_FATAL;
 144 
 145 uint_t nb_mask_poll_thr = EMASK_THR_NF;
 146 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
 147 
 148 int nb5000_reset_uncor_pex = 0;
 149 uint_t nb5000_mask_uncor_pex = 0;
 150 int nb5000_reset_cor_pex = 0;
 151 uint_t nb5000_mask_cor_pex = 0xffffffff;
 152 uint32_t nb5000_rp_pex = 0x1;
 153 
 154 int nb_mask_mc_set;
 155 
 156 typedef struct find_dimm_label {
 157         void (*label_function)(int, char *, int);
 158 } find_dimm_label_t;
 159 
 160 static void x8450_dimm_label(int, char *, int);
 161 static void cp3250_dimm_label(int, char *, int);
 162 
 163 static struct platform_label {
 164         const char *sys_vendor;         /* SMB_TYPE_SYSTEM vendor prefix */
 165         const char *sys_product;        /* SMB_TYPE_SYSTEM product prefix */
 166         find_dimm_label_t dimm_label;
 167         int dimms_per_channel;
 168 } platform_label[] = {
 169         { "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
 170             { x8450_dimm_label }, 8 },
 171         { "MiTAC,Shunde", "CP3250", { cp3250_dimm_label }, 0 },
 172         { NULL, NULL, { NULL }, 0 }
 173 };
 174 
 175 static unsigned short
 176 read_spd(int bus)
 177 {
 178         unsigned short rt = 0;
 179         int branch = bus >> 1;
 180         int channel = bus & 1;
 181 
 182         rt = SPD_RD(branch, channel);
 183 
 184         return (rt);
 185 }
 186 
 187 static void
 188 write_spdcmd(int bus, uint32_t val)
 189 {
 190         int branch = bus >> 1;
 191         int channel = bus & 1;
 192         SPDCMD_WR(branch, channel, val);
 193 }
 194 
 195 static int
 196 read_spd_eeprom(int bus, int slave, int addr)
 197 {
 198         int retry = 4;
 199         int wait;
 200         int spd;
 201         uint32_t cmd;
 202 
 203         for (;;) {
 204                 wait = 1000;
 205                 for (;;) {
 206                         spd = read_spd(bus);
 207                         if ((spd & SPD_BUSY) == 0)
 208                                 break;
 209                         if (--wait == 0)
 210                                 return (-1);
 211                         drv_usecwait(10);
 212                 }
 213                 cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
 214                 write_spdcmd(bus, cmd);
 215                 wait = 1000;
 216                 for (;;) {
 217                         spd = read_spd(bus);
 218                         if ((spd & SPD_BUSY) == 0)
 219                                 break;
 220                         if (--wait == 0) {
 221                                 spd = SPD_BUS_ERROR;
 222                                 break;
 223                         }
 224                         drv_usecwait(10);
 225                 }
 226                 while ((spd & SPD_BUS_ERROR) == 0 &&
 227                     (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
 228                     SPD_READ_DATA_VALID) {
 229                         spd = read_spd(bus);
 230                         if (--wait == 0)
 231                                 return (-1);
 232                 }
 233                 if ((spd & SPD_BUS_ERROR) == 0)
 234                         break;
 235                 if (--retry == 0)
 236                         return (-1);
 237         }
 238         return (spd & 0xff);
 239 }
 240 
 241 static void
 242 nb_fini()
 243 {
 244         int i, j;
 245         int nchannels = nb_number_memory_controllers * nb_channels_per_branch;
 246         nb_dimm_t **dimmpp;
 247         nb_dimm_t *dimmp;
 248 
 249         dimmpp = nb_dimms;
 250         for (i = 0; i < nchannels; i++) {
 251                 for (j = 0; j < nb_dimms_per_channel; j++) {
 252                         dimmp = *dimmpp;
 253                         if (dimmp) {
 254                                 kmem_free(dimmp, sizeof (nb_dimm_t));
 255                                 *dimmpp = NULL;
 256                         }
 257                         dimmpp++;
 258                 }
 259         }
 260         kmem_free(nb_dimms, sizeof (nb_dimm_t *) * nb_dimm_slots);
 261         nb_dimms = NULL;
 262         dimm_fini();
 263 }
 264 
 265 void
 266 nb_scrubber_enable()
 267 {
 268         uint32_t mc;
 269 
 270         if (!nb_hw_memory_scrub_enable)
 271                 return;
 272 
 273         mc = MC_RD();
 274         if ((mc & MC_MIRROR) != 0) /* mirror mode */
 275                 mc |= MC_PATROL_SCRUB;
 276         else
 277                 mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
 278         MC_WR(mc);
 279 
 280         if (nb_sw_scrub_disabled++)
 281                 cmi_mc_sw_memscrub_disable();
 282 }
 283 
 284 static void
 285 fbd_eeprom(int channel, int dimm, nb_dimm_t *dp)
 286 {
 287         int i, t;
 288         int spd_sz;
 289 
 290         t = read_spd_eeprom(channel, dimm, 0) & 0xf;
 291         if (t == 1)
 292                 spd_sz = 128;
 293         else if (t == 2)
 294                 spd_sz = 176;
 295         else
 296                 spd_sz = 256;
 297         dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
 298             (read_spd_eeprom(channel, dimm, 118) << 8);
 299         dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
 300         dp->serial_number =
 301             (read_spd_eeprom(channel, dimm, 122) << 24) |
 302             (read_spd_eeprom(channel, dimm, 123) << 16) |
 303             (read_spd_eeprom(channel, dimm, 124) << 8) |
 304             read_spd_eeprom(channel, dimm, 125);
 305         t = read_spd_eeprom(channel, dimm, 121);
 306         dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
 307         dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
 308         if (spd_sz > 128) {
 309                 for (i = 0; i < sizeof (dp->part_number); i++) {
 310                         dp->part_number[i] =
 311                             read_spd_eeprom(channel, dimm, 128 + i);
 312                 }
 313                 for (i = 0; i < sizeof (dp->revision); i++) {
 314                         dp->revision[i] =
 315                             read_spd_eeprom(channel, dimm, 146 + i);
 316                 }
 317         }
 318 }
 319 
 320 /* read the manR of the DDR2 dimm */
 321 static void
 322 ddr2_eeprom(int channel, int dimm, nb_dimm_t *dp)
 323 {
 324         int i, t;
 325         int slave;
 326 
 327         slave = channel & 0x1 ? dimm + 4 : dimm;
 328 
 329         /* byte[3]: number of row addresses */
 330         dp->nrow = read_spd_eeprom(channel, slave, 3) & 0x1f;
 331 
 332         /* byte[4]: number of column addresses */
 333         dp->ncolumn = read_spd_eeprom(channel, slave, 4) & 0xf;
 334 
 335         /* byte[5]: numranks; 0 means one rank */
 336         dp->nranks = (read_spd_eeprom(channel, slave, 5) & 0x3) + 1;
 337 
 338         /* byte[6]: data width */
 339         dp->width = (read_spd_eeprom(channel, slave, 6) >> 5) << 2;
 340 
 341         /* byte[17]: number of banks */
 342         dp->nbanks = read_spd_eeprom(channel, slave, 17);
 343 
 344         dp->dimm_size = DIMMSIZE(dp->nrow, dp->ncolumn, dp->nranks, dp->nbanks,
 345             dp->width);
 346 
 347         /* manufacture-id - byte[64-65] */
 348         dp->manufacture_id = read_spd_eeprom(channel, slave, 64) |
 349             (read_spd_eeprom(channel, dimm, 65) << 8);
 350 
 351         /* location - byte[72] */
 352         dp->manufacture_location = read_spd_eeprom(channel, slave, 72);
 353 
 354         /* serial number - byte[95-98] */
 355         dp->serial_number =
 356             (read_spd_eeprom(channel, slave, 98) << 24) |
 357             (read_spd_eeprom(channel, slave, 97) << 16) |
 358             (read_spd_eeprom(channel, slave, 96) << 8) |
 359             read_spd_eeprom(channel, slave, 95);
 360 
 361         /* week - byte[94] */
 362         t = read_spd_eeprom(channel, slave, 94);
 363         dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
 364         /* week - byte[93] */
 365         t = read_spd_eeprom(channel, slave, 93);
 366         dp->manufacture_year = (t >> 4) * 10 + (t & 0xf) + 2000;
 367 
 368         /* part number - byte[73-81] */
 369         for (i = 0; i < 8; i++) {
 370                 dp->part_number[i] = read_spd_eeprom(channel, slave, 73 + i);
 371         }
 372 
 373         /* revision - byte[91-92] */
 374         for (i = 0; i < 2; i++) {
 375                 dp->revision[i] = read_spd_eeprom(channel, slave, 91 + i);
 376         }
 377 }
 378 
 379 static boolean_t
 380 nb_dimm_present(int channel, int dimm)
 381 {
 382         boolean_t rc = B_FALSE;
 383 
 384         if (nb_chipset == INTEL_NB_5100) {
 385                 int t, slave;
 386                 slave = channel & 0x1 ? dimm + 4 : dimm;
 387                 /* read the type field from the dimm and check for DDR2 type */
 388                 if ((t = read_spd_eeprom(channel, slave, SPD_MEM_TYPE)) == -1)
 389                         return (B_FALSE);
 390                 rc = (t & 0xf) == SPD_DDR2;
 391         } else {
 392                 rc = MTR_PRESENT(MTR_RD(channel, dimm));
 393         }
 394 
 395         return (rc);
 396 }
 397 
 398 static nb_dimm_t *
 399 nb_ddr2_dimm_init(int channel, int dimm, int start_rank)
 400 {
 401         nb_dimm_t *dp;
 402 
 403         if (nb_dimm_present(channel, dimm) == B_FALSE)
 404                 return (NULL);
 405 
 406         dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
 407 
 408         ddr2_eeprom(channel, dimm, dp);
 409 
 410         /* The 1st rank of the dimm takes on this value */
 411         dp->start_rank = (uint8_t)start_rank;
 412 
 413         dp->mtr_present = 1;
 414 
 415         return (dp);
 416 }
 417 
 418 static nb_dimm_t *
 419 nb_fbd_dimm_init(int channel, int dimm, uint16_t mtr)
 420 {
 421         nb_dimm_t *dp;
 422         int t;
 423 
 424         if (MTR_PRESENT(mtr) == 0)
 425                 return (NULL);
 426         t = read_spd_eeprom(channel, dimm, SPD_MEM_TYPE) & 0xf;
 427 
 428         /* check for the dimm type */
 429         if (t != SPD_FBDIMM)
 430                 return (NULL);
 431 
 432         dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
 433 
 434         fbd_eeprom(channel, dimm, dp);
 435 
 436         dp->mtr_present = MTR_PRESENT(mtr);
 437         dp->start_rank = dimm << 1;
 438         dp->nranks = MTR_NUMRANK(mtr);
 439         dp->nbanks = MTR_NUMBANK(mtr);
 440         dp->ncolumn = MTR_NUMCOL(mtr);
 441         dp->nrow = MTR_NUMROW(mtr);
 442         dp->width = MTR_WIDTH(mtr);
 443         dp->dimm_size = MTR_DIMMSIZE(mtr);
 444 
 445         return (dp);
 446 }
 447 
 448 static uint64_t
 449 mc_range(int controller, uint64_t base)
 450 {
 451         int i;
 452         uint64_t limit = 0;
 453 
 454         for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
 455                 if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
 456                     base < nb_banks[i].limit) {
 457                         limit = nb_banks[i].limit;
 458                         if (base <= top_of_low_memory &&
 459                             limit > top_of_low_memory) {
 460                                 limit -= TLOW_MAX - top_of_low_memory;
 461                         }
 462                         if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
 463                             nb_mode != NB_MEMORY_MIRROR) {
 464                                 limit = limit / 2;
 465                         }
 466                 }
 467         }
 468         return (limit);
 469 }
 470 
 471 void
 472 nb_mc_init()
 473 {
 474         uint16_t tolm;
 475         uint16_t mir;
 476         uint32_t hole_base;
 477         uint32_t hole_size;
 478         uint32_t dmir;
 479         uint64_t base;
 480         uint64_t limit;
 481         uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
 482         int i, j, k;
 483         uint8_t interleave;
 484 
 485         base = 0;
 486         tolm = TOLM_RD();
 487         top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
 488         for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
 489                 mir = MIR_RD(i);
 490                 limit = (uint64_t)(mir >> 4) << 28;
 491                 way0 = mir & 1;
 492                 way1 = (mir >> 1) & 1;
 493                 if (way0 == 0 && way1 == 0) {
 494                         way0 = 1;
 495                         way1 = 1;
 496                 }
 497                 if (limit > top_of_low_memory)
 498                         limit += TLOW_MAX - top_of_low_memory;
 499                 nb_banks[i].base = base;
 500                 nb_banks[i].limit = limit;
 501                 nb_banks[i].way[0] = way0;
 502                 nb_banks[i].way[1] = way1;
 503                 base = limit;
 504         }
 505         for (i = 0; i < nb_number_memory_controllers; i++) {
 506                 base = 0;
 507 
 508                 for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
 509                         dmir = DMIR_RD(i, j);
 510                         limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
 511                         if (limit == 0) {
 512                                 limit = mc_range(i, base);
 513                         }
 514                         branch_interleave = 0;
 515                         hole_base = 0;
 516                         hole_size = 0;
 517                         DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
 518                         if (rank0 == rank1)
 519                                 interleave = 1;
 520                         else if (rank0 == rank2)
 521                                 interleave = 2;
 522                         else
 523                                 interleave = 4;
 524                         if (nb_mode != NB_MEMORY_MIRROR &&
 525                             nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
 526                                 for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
 527                                         if (base >= nb_banks[k].base &&
 528                                             base < nb_banks[k].limit) {
 529                                                 if (nb_banks[i].way[0] &&
 530                                                     nb_banks[i].way[1]) {
 531                                                         interleave *= 2;
 532                                                         limit *= 2;
 533                                                         branch_interleave = 1;
 534                                                 }
 535                                                 break;
 536                                         }
 537                                 }
 538                         }
 539                         if (base < top_of_low_memory &&
 540                             limit > top_of_low_memory) {
 541                                 hole_base = top_of_low_memory;
 542                                 hole_size = TLOW_MAX - top_of_low_memory;
 543                                 limit += hole_size;
 544                         } else if (base > top_of_low_memory) {
 545                                 limit += TLOW_MAX - top_of_low_memory;
 546                         }
 547                         nb_ranks[i][j].base = base;
 548                         nb_ranks[i][j].limit = limit;
 549                         nb_ranks[i][j].rank[0] = rank0;
 550                         nb_ranks[i][j].rank[1] = rank1;
 551                         nb_ranks[i][j].rank[2] = rank2;
 552                         nb_ranks[i][j].rank[3] = rank3;
 553                         nb_ranks[i][j].interleave = interleave;
 554                         nb_ranks[i][j].branch_interleave = branch_interleave;
 555                         nb_ranks[i][j].hole_base = hole_base;
 556                         nb_ranks[i][j].hole_size = hole_size;
 557                         if (limit > base) {
 558                                 if (rank0 != rank1) {
 559                                         dimm_add_rank(i, rank1,
 560                                             branch_interleave, 1, base,
 561                                             hole_base, hole_size, interleave,
 562                                             limit);
 563                                         if (rank0 != rank2) {
 564                                                 dimm_add_rank(i, rank2,
 565                                                     branch_interleave, 2, base,
 566                                                     hole_base, hole_size,
 567                                                     interleave, limit);
 568                                                 dimm_add_rank(i, rank3,
 569                                                     branch_interleave, 3, base,
 570                                                     hole_base, hole_size,
 571                                                     interleave, limit);
 572                                         }
 573                                 }
 574                         }
 575                         base = limit;
 576                 }
 577         }
 578 }
 579 
 580 void
 581 nb_used_spare_rank(int branch, int bad_rank)
 582 {
 583         int i;
 584         int j;
 585 
 586         for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
 587                 for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
 588                         if (nb_ranks[branch][i].rank[j] == bad_rank) {
 589                                 nb_ranks[branch][i].rank[j] =
 590                                     spare_rank[branch];
 591                                 i = NB_MEM_RANK_SELECT;
 592                                 break;
 593                         }
 594                 }
 595         }
 596 }
 597 
 598 find_dimm_label_t *
 599 find_dimms_per_channel()
 600 {
 601         struct platform_label *pl;
 602         smbios_info_t si;
 603         smbios_system_t sy;
 604         id_t id;
 605         int i, j;
 606         find_dimm_label_t *rt = NULL;
 607 
 608         if (ksmbios != NULL && nb_no_smbios == 0) {
 609                 if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
 610                     smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
 611                         for (pl = platform_label; pl->sys_vendor; pl++) {
 612                                 if (strncmp(pl->sys_vendor,
 613                                     si.smbi_manufacturer,
 614                                     strlen(pl->sys_vendor)) == 0 &&
 615                                     strncmp(pl->sys_product, si.smbi_product,
 616                                     strlen(pl->sys_product)) == 0) {
 617                                         nb_dimms_per_channel =
 618                                             pl->dimms_per_channel;
 619                                         rt = &pl->dimm_label;
 620                                         break;
 621                                 }
 622                         }
 623                 }
 624         }
 625         if (nb_dimms_per_channel == 0) {
 626                 /*
 627                  * Scan all memory channels if we find a channel which has more
 628                  * dimms then we have seen before set nb_dimms_per_channel to
 629                  * the number of dimms on the channel
 630                  */
 631                 for (i = 0; i < nb_number_memory_controllers; i++) {
 632                         for (j = nb_dimms_per_channel;
 633                             j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
 634                                 if (nb_dimm_present(i, j))
 635                                         nb_dimms_per_channel = j + 1;
 636                         }
 637                 }
 638         }
 639         return (rt);
 640 }
 641 
 642 struct smb_dimm_rec {
 643         int dimms;
 644         int slots;
 645         int populated;
 646         nb_dimm_t **dimmpp;
 647 };
 648 
 649 static int
 650 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
 651 {
 652         struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
 653         nb_dimm_t ***dimmpp;
 654         nb_dimm_t *dimmp;
 655         smbios_memdevice_t md;
 656 
 657         dimmpp = &rp->dimmpp;
 658         if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
 659                 if (*dimmpp >= &nb_dimms[nb_dimm_slots])
 660                         return (-1);
 661                 dimmp = **dimmpp;
 662                 if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0 &&
 663                     md.smbmd_dloc != NULL) {
 664                         if (md.smbmd_size) {
 665                                 if (dimmp == NULL &&
 666                                     (rp->slots == nb_dimm_slots ||
 667                                     rp->dimms < rp->populated)) {
 668                                         (*dimmpp)++;
 669                                         return (0);
 670                                 }
 671                                 /*
 672                                  * if there is no physical dimm for this smbios
 673                                  * record it is because this system has less
 674                                  * physical slots than the controller supports
 675                                  * so skip empty slots to find the slot this
 676                                  * smbios record belongs too
 677                                  */
 678                                 while (dimmp == NULL) {
 679                                         (*dimmpp)++;
 680                                         if (*dimmpp >= &nb_dimms[nb_dimm_slots])
 681                                                 return (-1);
 682                                         dimmp = **dimmpp;
 683                                 }
 684                                 (void) snprintf(dimmp->label,
 685                                     sizeof (dimmp->label), "%s", md.smbmd_dloc);
 686                                 (*dimmpp)++;
 687                         }
 688                 }
 689         }
 690         return (0);
 691 }
 692 
 693 static int
 694 check_memdevice(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
 695 {
 696         struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
 697         smbios_memdevice_t md;
 698 
 699         if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
 700                 if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0) {
 701                         rp->slots++;
 702                         if (md.smbmd_size) {
 703                                 rp->populated++;
 704                         }
 705                 }
 706         }
 707         return (0);
 708 }
 709 
 710 void
 711 nb_smbios()
 712 {
 713         struct smb_dimm_rec r;
 714         int i;
 715 
 716         if (ksmbios != NULL && nb_no_smbios == 0) {
 717                 r.dimms = 0;
 718                 r.slots = 0;
 719                 r.populated = 0;
 720                 r.dimmpp = nb_dimms;
 721                 for (i = 0; i < nb_dimm_slots; i++) {
 722                         if (nb_dimms[i] != NULL)
 723                                 r.dimms++;
 724                 }
 725                 (void) smbios_iter(ksmbios, check_memdevice, &r);
 726                 (void) smbios_iter(ksmbios, dimm_label, &r);
 727         }
 728 }
 729 
 730 static void
 731 x8450_dimm_label(int dimm, char *label, int label_sz)
 732 {
 733         int channel = dimm >> 3;
 734 
 735         dimm = dimm & 0x7;
 736         (void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
 737 }
 738 
 739 /*
 740  * CP3250 DIMM labels
 741  * Channel   Dimm   Label
 742  *       0      0      A0
 743  *       1      0      B0
 744  *       0      1      A1
 745  *       1      1      B1
 746  *       0      2      A2
 747  *       1      2      B2
 748  */
 749 static void
 750 cp3250_dimm_label(int dimm, char *label, int label_sz)
 751 {
 752         int channel = dimm / nb_dimms_per_channel;
 753 
 754         dimm = dimm % nb_dimms_per_channel;
 755         (void) snprintf(label, label_sz, "%c%d", channel == 0 ? 'A' : 'B',
 756             dimm);
 757 }
 758 
 759 /*
 760  * Map the rank id to dimm id of a channel
 761  * For the 5100 chipset, walk through the dimm list of channel the check if
 762  * the given rank id is within the rank range assigned to the dimm.
 763  * For other chipsets, the dimm is rank/2.
 764  */
 765 int
 766 nb_rank2dimm(int channel, int rank)
 767 {
 768         int i;
 769         nb_dimm_t **dimmpp = nb_dimms;
 770 
 771         if (nb_chipset != INTEL_NB_5100)
 772                 return (rank >> 1);
 773 
 774         dimmpp += channel * nb_dimms_per_channel;
 775         for (i = 0; i < nb_dimms_per_channel; i++) {
 776                 if ((rank >= dimmpp[i]->start_rank) &&
 777                     (rank < dimmpp[i]->start_rank + dimmpp[i]->nranks)) {
 778                         return (i);
 779                 }
 780         }
 781         return (-1);
 782 }
 783 
 784 static void
 785 nb_ddr2_dimms_init(find_dimm_label_t *label_function)
 786 {
 787         int i, j;
 788         int start_rank;
 789         uint32_t spcpc;
 790         uint8_t spcps;
 791         nb_dimm_t **dimmpp;
 792 
 793         nb_dimm_slots = nb_number_memory_controllers * nb_channels_per_branch *
 794             nb_dimms_per_channel;
 795         nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
 796             nb_dimm_slots, KM_SLEEP);
 797         dimmpp = nb_dimms;
 798         nb_mode = NB_MEMORY_NORMAL;
 799         for (i = 0; i < nb_number_memory_controllers; i++) {
 800                 if (nb_mode == NB_MEMORY_NORMAL) {
 801                         spcpc = SPCPC_RD(i);
 802                         spcps = SPCPS_RD(i);
 803                         if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
 804                             (spcps & SPCPS_SPARE_DEPLOYED) != 0)
 805                                 nb_mode = NB_MEMORY_SPARE_RANK;
 806                         spare_rank[i] = SPCPC_SPRANK(spcpc);
 807                 }
 808 
 809                 /* The 1st dimm of a channel starts at rank 0 */
 810                 start_rank = 0;
 811 
 812                 for (j = 0; j < nb_dimms_per_channel; j++) {
 813                         dimmpp[j] = nb_ddr2_dimm_init(i, j, start_rank);
 814                         if (dimmpp[j]) {
 815                                 nb_ndimm ++;
 816                                 if (label_function) {
 817                                         label_function->label_function(
 818                                             (i * nb_dimms_per_channel) + j,
 819                                             dimmpp[j]->label,
 820                                             sizeof (dimmpp[j]->label));
 821                                 }
 822                                 start_rank += dimmpp[j]->nranks;
 823                                 /*
 824                                  * add an extra rank because
 825                                  * single-ranked dimm still takes on two ranks.
 826                                  */
 827                                 if (dimmpp[j]->nranks & 0x1)
 828                                         start_rank++;
 829                                 }
 830                 }
 831                 dimmpp += nb_dimms_per_channel;
 832         }
 833 
 834         /*
 835          * single channel is supported.
 836          */
 837         if (nb_ndimm > 0 && nb_ndimm <= nb_dimms_per_channel) {
 838                 nb_mode = NB_MEMORY_SINGLE_CHANNEL;
 839         }
 840 }
 841 
 842 static void
 843 nb_fbd_dimms_init(find_dimm_label_t *label_function)
 844 {
 845         int i, j, k, l;
 846         uint16_t mtr;
 847         uint32_t mc, mca;
 848         uint32_t spcpc;
 849         uint8_t spcps;
 850         nb_dimm_t **dimmpp;
 851 
 852         mca = MCA_RD();
 853         mc = MC_RD();
 854         if (mca & MCA_SCHDIMM)  /* single-channel mode */
 855                 nb_mode = NB_MEMORY_SINGLE_CHANNEL;
 856         else if ((mc & MC_MIRROR) != 0) /* mirror mode */
 857                 nb_mode = NB_MEMORY_MIRROR;
 858         else
 859                 nb_mode = NB_MEMORY_NORMAL;
 860         nb_dimm_slots = nb_number_memory_controllers * 2 * nb_dimms_per_channel;
 861         nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
 862             nb_dimm_slots, KM_SLEEP);
 863         dimmpp = nb_dimms;
 864         for (i = 0; i < nb_number_memory_controllers; i++) {
 865                 if (nb_mode == NB_MEMORY_NORMAL) {
 866                         spcpc = SPCPC_RD(i);
 867                         spcps = SPCPS_RD(i);
 868                         if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
 869                             (spcps & SPCPS_SPARE_DEPLOYED) != 0)
 870                                 nb_mode = NB_MEMORY_SPARE_RANK;
 871                         spare_rank[i] = SPCPC_SPRANK(spcpc);
 872                 }
 873                 for (j = 0; j < nb_dimms_per_channel; j++) {
 874                         mtr = MTR_RD(i, j);
 875                         k = i * 2;
 876                         dimmpp[j] = nb_fbd_dimm_init(k, j, mtr);
 877                         if (dimmpp[j]) {
 878                                 nb_ndimm ++;
 879                                 if (label_function) {
 880                                         label_function->label_function(
 881                                             (k * nb_dimms_per_channel) + j,
 882                                             dimmpp[j]->label,
 883                                             sizeof (dimmpp[j]->label));
 884                                 }
 885                         }
 886                         dimmpp[j + nb_dimms_per_channel] =
 887                             nb_fbd_dimm_init(k + 1, j, mtr);
 888                         l = j + nb_dimms_per_channel;
 889                         if (dimmpp[l]) {
 890                                 if (label_function) {
 891                                         label_function->label_function(
 892                                             (k * nb_dimms_per_channel) + l,
 893                                             dimmpp[l]->label,
 894                                             sizeof (dimmpp[l]->label));
 895                                 }
 896                                 nb_ndimm ++;
 897                         }
 898                 }
 899                 dimmpp += nb_dimms_per_channel * 2;
 900         }
 901 }
 902 
 903 static void
 904 nb_dimms_init(find_dimm_label_t *label_function)
 905 {
 906         if (nb_chipset == INTEL_NB_5100)
 907                 nb_ddr2_dimms_init(label_function);
 908         else
 909                 nb_fbd_dimms_init(label_function);
 910 
 911         if (label_function == NULL)
 912                 nb_smbios();
 913 }
 914 
 915 /* Setup the ESI port registers to enable SERR for southbridge */
 916 static void
 917 nb_pex_init()
 918 {
 919         int i = 0; /* ESI port */
 920         uint16_t regw;
 921 
 922         emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
 923         emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
 924         emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
 925         docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
 926         uncerrsev[i] = UNCERRSEV_RD(i);
 927 
 928         if (nb5000_reset_uncor_pex)
 929                 EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
 930         if (nb5000_reset_cor_pex)
 931                 EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
 932         if (nb_chipset == INTEL_NB_5400) {
 933                 /* disable masking of ERR pins used by DOCMD */
 934                 PEX_ERR_PIN_MASK_WR(i, 0x10);
 935         }
 936 
 937         /* RP error message (CE/NFE/FE) detect mask */
 938         EMASK_RP_PEX_WR(i, nb5000_rp_pex);
 939 
 940         /* Command Register - Enable SERR */
 941         regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
 942         nb_pci_putw(0, i, 0, PCI_CONF_COMM,
 943             regw | PCI_COMM_SERR_ENABLE);
 944 
 945         /* Root Control Register - SERR on NFE/FE */
 946         PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
 947             PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
 948 
 949         /* AER UE Mask - Mask UR */
 950         UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
 951 }
 952 
 953 static void
 954 nb_pex_fini()
 955 {
 956         int i = 0; /* ESI port */
 957 
 958         EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
 959         EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
 960         EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
 961         PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
 962 
 963         if (nb5000_reset_uncor_pex)
 964                 EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
 965         if (nb5000_reset_cor_pex)
 966                 EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
 967 }
 968 
 969 void
 970 nb_int_init()
 971 {
 972         uint32_t err0_int;
 973         uint32_t err1_int;
 974         uint32_t err2_int;
 975         uint32_t mcerr_int;
 976         uint32_t emask_int;
 977         uint32_t nb_mask_bios_int;
 978         uint32_t nb_mask_poll_int;
 979         uint16_t stepping;
 980 
 981         if (nb_chipset == INTEL_NB_5100) {
 982                 nb_mask_bios_int = nb5100_mask_bios_int;
 983                 nb_mask_poll_int = nb5100_mask_poll_int;
 984         } else {
 985                 nb_mask_bios_int = nb5000_mask_bios_int;
 986                 nb_mask_poll_int = nb5000_mask_poll_int;
 987         }
 988         err0_int = ERR0_INT_RD();
 989         err1_int = ERR1_INT_RD();
 990         err2_int = ERR2_INT_RD();
 991         mcerr_int = MCERR_INT_RD();
 992         emask_int = EMASK_INT_RD();
 993 
 994         nb_err0_int = err0_int;
 995         nb_err1_int = err1_int;
 996         nb_err2_int = err2_int;
 997         nb_mcerr_int = mcerr_int;
 998         nb_emask_int = emask_int;
 999 
1000         ERR0_INT_WR(ERR_INT_ALL);
1001         ERR1_INT_WR(ERR_INT_ALL);
1002         ERR2_INT_WR(ERR_INT_ALL);
1003         MCERR_INT_WR(ERR_INT_ALL);
1004         EMASK_INT_WR(ERR_INT_ALL);
1005 
1006         mcerr_int &= ~nb_mask_bios_int;
1007         mcerr_int |= nb_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
1008         mcerr_int |= nb_mask_poll_int;
1009         err0_int |= nb_mask_poll_int;
1010         err1_int |= nb_mask_poll_int;
1011         err2_int |= nb_mask_poll_int;
1012 
1013         l_mcerr_int = mcerr_int;
1014         ERR0_INT_WR(err0_int);
1015         ERR1_INT_WR(err1_int);
1016         ERR2_INT_WR(err2_int);
1017         MCERR_INT_WR(mcerr_int);
1018         if (nb5000_reset_emask_int) {
1019                 if (nb_chipset == INTEL_NB_7300) {
1020                         stepping = NB5000_STEPPING();
1021                         if (stepping == 0)
1022                                 EMASK_5000_INT_WR(nb7300_emask_int_step0);
1023                         else
1024                                 EMASK_5000_INT_WR(nb7300_emask_int);
1025                 } else if (nb_chipset == INTEL_NB_5400) {
1026                         EMASK_5400_INT_WR(nb5400_emask_int |
1027                             (emask_int & EMASK_INT_RES));
1028                 } else if (nb_chipset == INTEL_NB_5100) {
1029                         EMASK_5000_INT_WR(nb5100_emask_int);
1030                 } else {
1031                         EMASK_5000_INT_WR(nb5000_emask_int);
1032                 }
1033         } else {
1034                 EMASK_INT_WR(nb_emask_int);
1035         }
1036 }
1037 
1038 void
1039 nb_int_fini()
1040 {
1041         ERR0_INT_WR(ERR_INT_ALL);
1042         ERR1_INT_WR(ERR_INT_ALL);
1043         ERR2_INT_WR(ERR_INT_ALL);
1044         MCERR_INT_WR(ERR_INT_ALL);
1045         EMASK_INT_WR(ERR_INT_ALL);
1046 
1047         ERR0_INT_WR(nb_err0_int);
1048         ERR1_INT_WR(nb_err1_int);
1049         ERR2_INT_WR(nb_err2_int);
1050         MCERR_INT_WR(nb_mcerr_int);
1051         EMASK_INT_WR(nb_emask_int);
1052 }
1053 
1054 void
1055 nb_int_mask_mc(uint32_t mc_mask_int)
1056 {
1057         uint32_t emask_int;
1058 
1059         emask_int = MCERR_INT_RD();
1060         if ((emask_int & mc_mask_int) != mc_mask_int) {
1061                 MCERR_INT_WR(emask_int|mc_mask_int);
1062                 nb_mask_mc_set = 1;
1063         }
1064 }
1065 
1066 static void
1067 nb_fbd_init()
1068 {
1069         uint32_t err0_fbd;
1070         uint32_t err1_fbd;
1071         uint32_t err2_fbd;
1072         uint32_t mcerr_fbd;
1073         uint32_t emask_fbd;
1074         uint32_t emask_bios_fbd;
1075         uint32_t emask_poll_fbd;
1076 
1077         err0_fbd = ERR0_FBD_RD();
1078         err1_fbd = ERR1_FBD_RD();
1079         err2_fbd = ERR2_FBD_RD();
1080         mcerr_fbd = MCERR_FBD_RD();
1081         emask_fbd = EMASK_FBD_RD();
1082 
1083         nb_err0_fbd = err0_fbd;
1084         nb_err1_fbd = err1_fbd;
1085         nb_err2_fbd = err2_fbd;
1086         nb_mcerr_fbd = mcerr_fbd;
1087         nb_emask_fbd = emask_fbd;
1088 
1089         ERR0_FBD_WR(0xffffffff);
1090         ERR1_FBD_WR(0xffffffff);
1091         ERR2_FBD_WR(0xffffffff);
1092         MCERR_FBD_WR(0xffffffff);
1093         EMASK_FBD_WR(0xffffffff);
1094 
1095         if (nb_chipset == INTEL_NB_7300) {
1096                 if (nb_mode == NB_MEMORY_MIRROR) {
1097                         /* MCH 7300 errata 34 */
1098                         emask_bios_fbd = nb7300_mask_bios_fbd & ~EMASK_FBD_M23;
1099                         emask_poll_fbd = nb7300_mask_poll_fbd;
1100                         mcerr_fbd |= EMASK_FBD_M23;
1101                 } else {
1102                         emask_bios_fbd = nb7300_mask_bios_fbd;
1103                         emask_poll_fbd = nb7300_mask_poll_fbd;
1104                 }
1105         } else if (nb_chipset == INTEL_NB_5400) {
1106                 emask_bios_fbd = nb5400_mask_bios_fbd;
1107                 emask_poll_fbd = nb5400_mask_poll_fbd;
1108         } else {
1109                 emask_bios_fbd = nb5000_mask_bios_fbd;
1110                 emask_poll_fbd = nb5000_mask_poll_fbd;
1111         }
1112         mcerr_fbd &= ~emask_bios_fbd;
1113         mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
1114         mcerr_fbd |= emask_poll_fbd;
1115         err0_fbd |= emask_poll_fbd;
1116         err1_fbd |= emask_poll_fbd;
1117         err2_fbd |= emask_poll_fbd;
1118 
1119         l_mcerr_fbd = mcerr_fbd;
1120         ERR0_FBD_WR(err0_fbd);
1121         ERR1_FBD_WR(err1_fbd);
1122         ERR2_FBD_WR(err2_fbd);
1123         MCERR_FBD_WR(mcerr_fbd);
1124         if (nb5000_reset_emask_fbd) {
1125                 if (nb_chipset == INTEL_NB_5400)
1126                         EMASK_FBD_WR(nb5400_emask_fbd);
1127                 else
1128                         EMASK_FBD_WR(nb5000_emask_fbd);
1129         } else {
1130                 EMASK_FBD_WR(nb_emask_fbd);
1131         }
1132 }
1133 
1134 void
1135 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
1136 {
1137         uint32_t emask_fbd;
1138 
1139         emask_fbd = MCERR_FBD_RD();
1140         if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
1141                 MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
1142                 nb_mask_mc_set = 1;
1143         }
1144 }
1145 
1146 static void
1147 nb_fbd_fini()
1148 {
1149         ERR0_FBD_WR(0xffffffff);
1150         ERR1_FBD_WR(0xffffffff);
1151         ERR2_FBD_WR(0xffffffff);
1152         MCERR_FBD_WR(0xffffffff);
1153         EMASK_FBD_WR(0xffffffff);
1154 
1155         ERR0_FBD_WR(nb_err0_fbd);
1156         ERR1_FBD_WR(nb_err1_fbd);
1157         ERR2_FBD_WR(nb_err2_fbd);
1158         MCERR_FBD_WR(nb_mcerr_fbd);
1159         EMASK_FBD_WR(nb_emask_fbd);
1160 }
1161 
1162 static void
1163 nb_mem_init()
1164 {
1165         uint32_t err0_mem;
1166         uint32_t err1_mem;
1167         uint32_t err2_mem;
1168         uint32_t mcerr_mem;
1169         uint32_t emask_mem;
1170         uint32_t emask_poll_mem;
1171 
1172         err0_mem = ERR0_MEM_RD();
1173         err1_mem = ERR1_MEM_RD();
1174         err2_mem = ERR2_MEM_RD();
1175         mcerr_mem = MCERR_MEM_RD();
1176         emask_mem = EMASK_MEM_RD();
1177 
1178         nb_err0_mem = err0_mem;
1179         nb_err1_mem = err1_mem;
1180         nb_err2_mem = err2_mem;
1181         nb_mcerr_mem = mcerr_mem;
1182         nb_emask_mem = emask_mem;
1183 
1184         ERR0_MEM_WR(0xffffffff);
1185         ERR1_MEM_WR(0xffffffff);
1186         ERR2_MEM_WR(0xffffffff);
1187         MCERR_MEM_WR(0xffffffff);
1188         EMASK_MEM_WR(0xffffffff);
1189 
1190         emask_poll_mem = nb5100_mask_poll_mem;
1191         mcerr_mem |= emask_poll_mem;
1192         err0_mem |= emask_poll_mem;
1193         err1_mem |= emask_poll_mem;
1194         err2_mem |= emask_poll_mem;
1195 
1196         l_mcerr_mem = mcerr_mem;
1197         ERR0_MEM_WR(err0_mem);
1198         ERR1_MEM_WR(err1_mem);
1199         ERR2_MEM_WR(err2_mem);
1200         MCERR_MEM_WR(mcerr_mem);
1201         if (nb5100_reset_emask_mem) {
1202                 EMASK_MEM_WR(~nb5100_mask_poll_mem);
1203         } else {
1204                 EMASK_MEM_WR(nb_emask_mem);
1205         }
1206 }
1207 
1208 void
1209 nb_mem_mask_mc(uint32_t mc_mask_mem)
1210 {
1211         uint32_t emask_mem;
1212 
1213         emask_mem = MCERR_MEM_RD();
1214         if ((emask_mem & mc_mask_mem) != mc_mask_mem) {
1215                 MCERR_MEM_WR(emask_mem|mc_mask_mem);
1216                 nb_mask_mc_set = 1;
1217         }
1218 }
1219 
1220 static void
1221 nb_mem_fini()
1222 {
1223         ERR0_MEM_WR(0xffffffff);
1224         ERR1_MEM_WR(0xffffffff);
1225         ERR2_MEM_WR(0xffffffff);
1226         MCERR_MEM_WR(0xffffffff);
1227         EMASK_MEM_WR(0xffffffff);
1228 
1229         ERR0_MEM_WR(nb_err0_mem);
1230         ERR1_MEM_WR(nb_err1_mem);
1231         ERR2_MEM_WR(nb_err2_mem);
1232         MCERR_MEM_WR(nb_mcerr_mem);
1233         EMASK_MEM_WR(nb_emask_mem);
1234 }
1235 
1236 static void
1237 nb_fsb_init()
1238 {
1239         uint16_t err0_fsb;
1240         uint16_t err1_fsb;
1241         uint16_t err2_fsb;
1242         uint16_t mcerr_fsb;
1243         uint16_t emask_fsb;
1244 
1245         err0_fsb = ERR0_FSB_RD(0);
1246         err1_fsb = ERR1_FSB_RD(0);
1247         err2_fsb = ERR2_FSB_RD(0);
1248         mcerr_fsb = MCERR_FSB_RD(0);
1249         emask_fsb = EMASK_FSB_RD(0);
1250 
1251         ERR0_FSB_WR(0, 0xffff);
1252         ERR1_FSB_WR(0, 0xffff);
1253         ERR2_FSB_WR(0, 0xffff);
1254         MCERR_FSB_WR(0, 0xffff);
1255         EMASK_FSB_WR(0, 0xffff);
1256 
1257         ERR0_FSB_WR(1, 0xffff);
1258         ERR1_FSB_WR(1, 0xffff);
1259         ERR2_FSB_WR(1, 0xffff);
1260         MCERR_FSB_WR(1, 0xffff);
1261         EMASK_FSB_WR(1, 0xffff);
1262 
1263         nb_err0_fsb = err0_fsb;
1264         nb_err1_fsb = err1_fsb;
1265         nb_err2_fsb = err2_fsb;
1266         nb_mcerr_fsb = mcerr_fsb;
1267         nb_emask_fsb = emask_fsb;
1268 
1269         mcerr_fsb &= ~nb5000_mask_bios_fsb;
1270         mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
1271         mcerr_fsb |= nb5000_mask_poll_fsb;
1272         err0_fsb |= nb5000_mask_poll_fsb;
1273         err1_fsb |= nb5000_mask_poll_fsb;
1274         err2_fsb |= nb5000_mask_poll_fsb;
1275 
1276         l_mcerr_fsb = mcerr_fsb;
1277         ERR0_FSB_WR(0, err0_fsb);
1278         ERR1_FSB_WR(0, err1_fsb);
1279         ERR2_FSB_WR(0, err2_fsb);
1280         MCERR_FSB_WR(0, mcerr_fsb);
1281         if (nb5000_reset_emask_fsb) {
1282                 EMASK_FSB_WR(0, nb5000_emask_fsb);
1283         } else {
1284                 EMASK_FSB_WR(0, nb_emask_fsb);
1285         }
1286 
1287         ERR0_FSB_WR(1, err0_fsb);
1288         ERR1_FSB_WR(1, err1_fsb);
1289         ERR2_FSB_WR(1, err2_fsb);
1290         MCERR_FSB_WR(1, mcerr_fsb);
1291         if (nb5000_reset_emask_fsb) {
1292                 EMASK_FSB_WR(1, nb5000_emask_fsb);
1293         } else {
1294                 EMASK_FSB_WR(1, nb_emask_fsb);
1295         }
1296 
1297         if (nb_chipset == INTEL_NB_7300) {
1298                 ERR0_FSB_WR(2, 0xffff);
1299                 ERR1_FSB_WR(2, 0xffff);
1300                 ERR2_FSB_WR(2, 0xffff);
1301                 MCERR_FSB_WR(2, 0xffff);
1302                 EMASK_FSB_WR(2, 0xffff);
1303 
1304                 ERR0_FSB_WR(3, 0xffff);
1305                 ERR1_FSB_WR(3, 0xffff);
1306                 ERR2_FSB_WR(3, 0xffff);
1307                 MCERR_FSB_WR(3, 0xffff);
1308                 EMASK_FSB_WR(3, 0xffff);
1309 
1310                 ERR0_FSB_WR(2, err0_fsb);
1311                 ERR1_FSB_WR(2, err1_fsb);
1312                 ERR2_FSB_WR(2, err2_fsb);
1313                 MCERR_FSB_WR(2, mcerr_fsb);
1314                 if (nb5000_reset_emask_fsb) {
1315                         EMASK_FSB_WR(2, nb5000_emask_fsb);
1316                 } else {
1317                         EMASK_FSB_WR(2, nb_emask_fsb);
1318                 }
1319 
1320                 ERR0_FSB_WR(3, err0_fsb);
1321                 ERR1_FSB_WR(3, err1_fsb);
1322                 ERR2_FSB_WR(3, err2_fsb);
1323                 MCERR_FSB_WR(3, mcerr_fsb);
1324                 if (nb5000_reset_emask_fsb) {
1325                         EMASK_FSB_WR(3, nb5000_emask_fsb);
1326                 } else {
1327                         EMASK_FSB_WR(3, nb_emask_fsb);
1328                 }
1329         }
1330 }
1331 
1332 static void
1333 nb_fsb_fini() {
1334         ERR0_FSB_WR(0, 0xffff);
1335         ERR1_FSB_WR(0, 0xffff);
1336         ERR2_FSB_WR(0, 0xffff);
1337         MCERR_FSB_WR(0, 0xffff);
1338         EMASK_FSB_WR(0, 0xffff);
1339 
1340         ERR0_FSB_WR(0, nb_err0_fsb);
1341         ERR1_FSB_WR(0, nb_err1_fsb);
1342         ERR2_FSB_WR(0, nb_err2_fsb);
1343         MCERR_FSB_WR(0, nb_mcerr_fsb);
1344         EMASK_FSB_WR(0, nb_emask_fsb);
1345 
1346         ERR0_FSB_WR(1, 0xffff);
1347         ERR1_FSB_WR(1, 0xffff);
1348         ERR2_FSB_WR(1, 0xffff);
1349         MCERR_FSB_WR(1, 0xffff);
1350         EMASK_FSB_WR(1, 0xffff);
1351 
1352         ERR0_FSB_WR(1, nb_err0_fsb);
1353         ERR1_FSB_WR(1, nb_err1_fsb);
1354         ERR2_FSB_WR(1, nb_err2_fsb);
1355         MCERR_FSB_WR(1, nb_mcerr_fsb);
1356         EMASK_FSB_WR(1, nb_emask_fsb);
1357 
1358         if (nb_chipset == INTEL_NB_7300) {
1359                 ERR0_FSB_WR(2, 0xffff);
1360                 ERR1_FSB_WR(2, 0xffff);
1361                 ERR2_FSB_WR(2, 0xffff);
1362                 MCERR_FSB_WR(2, 0xffff);
1363                 EMASK_FSB_WR(2, 0xffff);
1364 
1365                 ERR0_FSB_WR(2, nb_err0_fsb);
1366                 ERR1_FSB_WR(2, nb_err1_fsb);
1367                 ERR2_FSB_WR(2, nb_err2_fsb);
1368                 MCERR_FSB_WR(2, nb_mcerr_fsb);
1369                 EMASK_FSB_WR(2, nb_emask_fsb);
1370 
1371                 ERR0_FSB_WR(3, 0xffff);
1372                 ERR1_FSB_WR(3, 0xffff);
1373                 ERR2_FSB_WR(3, 0xffff);
1374                 MCERR_FSB_WR(3, 0xffff);
1375                 EMASK_FSB_WR(3, 0xffff);
1376 
1377                 ERR0_FSB_WR(3, nb_err0_fsb);
1378                 ERR1_FSB_WR(3, nb_err1_fsb);
1379                 ERR2_FSB_WR(3, nb_err2_fsb);
1380                 MCERR_FSB_WR(3, nb_mcerr_fsb);
1381                 EMASK_FSB_WR(3, nb_emask_fsb);
1382         }
1383 }
1384 
1385 void
1386 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1387 {
1388         uint16_t emask_fsb;
1389 
1390         emask_fsb = MCERR_FSB_RD(fsb);
1391         if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1392                 MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1393                 nb_mask_mc_set = 1;
1394         }
1395 }
1396 
1397 static void
1398 nb_thr_init()
1399 {
1400         uint16_t err0_thr;
1401         uint16_t err1_thr;
1402         uint16_t err2_thr;
1403         uint16_t mcerr_thr;
1404         uint16_t emask_thr;
1405 
1406         if (nb_chipset == INTEL_NB_5400) {
1407                 err0_thr = ERR0_THR_RD(0);
1408                 err1_thr = ERR1_THR_RD(0);
1409                 err2_thr = ERR2_THR_RD(0);
1410                 mcerr_thr = MCERR_THR_RD(0);
1411                 emask_thr = EMASK_THR_RD(0);
1412 
1413                 ERR0_THR_WR(0xffff);
1414                 ERR1_THR_WR(0xffff);
1415                 ERR2_THR_WR(0xffff);
1416                 MCERR_THR_WR(0xffff);
1417                 EMASK_THR_WR(0xffff);
1418 
1419                 nb_err0_thr = err0_thr;
1420                 nb_err1_thr = err1_thr;
1421                 nb_err2_thr = err2_thr;
1422                 nb_mcerr_thr = mcerr_thr;
1423                 nb_emask_thr = emask_thr;
1424 
1425                 mcerr_thr &= ~nb_mask_bios_thr;
1426                 mcerr_thr |= nb_mask_bios_thr &
1427                     (~err2_thr | ~err1_thr | ~err0_thr);
1428                 mcerr_thr |= nb_mask_poll_thr;
1429                 err0_thr |= nb_mask_poll_thr;
1430                 err1_thr |= nb_mask_poll_thr;
1431                 err2_thr |= nb_mask_poll_thr;
1432 
1433                 l_mcerr_thr = mcerr_thr;
1434                 ERR0_THR_WR(err0_thr);
1435                 ERR1_THR_WR(err1_thr);
1436                 ERR2_THR_WR(err2_thr);
1437                 MCERR_THR_WR(mcerr_thr);
1438                 EMASK_THR_WR(nb_emask_thr);
1439         }
1440 }
1441 
1442 static void
1443 nb_thr_fini()
1444 {
1445         if (nb_chipset == INTEL_NB_5400) {
1446                 ERR0_THR_WR(0xffff);
1447                 ERR1_THR_WR(0xffff);
1448                 ERR2_THR_WR(0xffff);
1449                 MCERR_THR_WR(0xffff);
1450                 EMASK_THR_WR(0xffff);
1451 
1452                 ERR0_THR_WR(nb_err0_thr);
1453                 ERR1_THR_WR(nb_err1_thr);
1454                 ERR2_THR_WR(nb_err2_thr);
1455                 MCERR_THR_WR(nb_mcerr_thr);
1456                 EMASK_THR_WR(nb_emask_thr);
1457         }
1458 }
1459 
1460 void
1461 nb_thr_mask_mc(uint16_t mc_mask_thr)
1462 {
1463         uint16_t emask_thr;
1464 
1465         emask_thr = MCERR_THR_RD(0);
1466         if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1467                 MCERR_THR_WR(emask_thr|mc_mask_thr);
1468                 nb_mask_mc_set = 1;
1469         }
1470 }
1471 
1472 void
1473 nb_mask_mc_reset()
1474 {
1475         if (nb_chipset == INTEL_NB_5100)
1476                 MCERR_MEM_WR(l_mcerr_mem);
1477         else
1478                 MCERR_FBD_WR(l_mcerr_fbd);
1479         MCERR_INT_WR(l_mcerr_int);
1480         MCERR_FSB_WR(0, l_mcerr_fsb);
1481         MCERR_FSB_WR(1, l_mcerr_fsb);
1482         if (nb_chipset == INTEL_NB_7300) {
1483                 MCERR_FSB_WR(2, l_mcerr_fsb);
1484                 MCERR_FSB_WR(3, l_mcerr_fsb);
1485         }
1486         if (nb_chipset == INTEL_NB_5400) {
1487                 MCERR_THR_WR(l_mcerr_thr);
1488         }
1489 }
1490 
1491 int
1492 nb_dev_init()
1493 {
1494         find_dimm_label_t *label_function_p;
1495 
1496         label_function_p = find_dimms_per_channel();
1497         mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1498         nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1499             sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1500         if (nb_queue == NULL) {
1501                 mutex_destroy(&nb_mutex);
1502                 return (EAGAIN);
1503         }
1504         nb_int_init();
1505         nb_thr_init();
1506         dimm_init();
1507         nb_dimms_init(label_function_p);
1508         nb_mc_init();
1509         nb_pex_init();
1510         if (nb_chipset == INTEL_NB_5100)
1511                 nb_mem_init();
1512         else
1513                 nb_fbd_init();
1514         nb_fsb_init();
1515         nb_scrubber_enable();
1516         return (0);
1517 }
1518 
1519 int
1520 nb_init()
1521 {
1522         /* return ENOTSUP if there is no PCI config space support. */
1523         if (pci_getl_func == NULL)
1524                 return (ENOTSUP);
1525 
1526         /* get vendor and device */
1527         nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1528         switch (nb_chipset) {
1529         default:
1530                 if (nb_5000_memory_controller == 0)
1531                         return (ENOTSUP);
1532                 break;
1533         case INTEL_NB_7300:
1534         case INTEL_NB_5000P:
1535         case INTEL_NB_5000X:
1536                 break;
1537         case INTEL_NB_5000V:
1538         case INTEL_NB_5000Z:
1539                 nb_number_memory_controllers = 1;
1540                 break;
1541         case INTEL_NB_5100:
1542                 nb_channels_per_branch = 1;
1543                 break;
1544         case INTEL_NB_5400:
1545         case INTEL_NB_5400A:
1546         case INTEL_NB_5400B:
1547                 nb_chipset = INTEL_NB_5400;
1548                 break;
1549         }
1550         return (0);
1551 }
1552 
1553 void
1554 nb_dev_reinit()
1555 {
1556         int i, j;
1557         int nchannels = nb_number_memory_controllers * 2;
1558         nb_dimm_t **dimmpp;
1559         nb_dimm_t *dimmp;
1560         nb_dimm_t **old_nb_dimms;
1561         int old_nb_dimms_per_channel;
1562         find_dimm_label_t *label_function_p;
1563         int dimm_slot = nb_dimm_slots;
1564 
1565         old_nb_dimms = nb_dimms;
1566         old_nb_dimms_per_channel = nb_dimms_per_channel;
1567 
1568         dimm_fini();
1569         nb_dimms_per_channel = 0;
1570         label_function_p = find_dimms_per_channel();
1571         dimm_init();
1572         nb_dimms_init(label_function_p);
1573         nb_mc_init();
1574         nb_pex_init();
1575         nb_int_init();
1576         nb_thr_init();
1577         if (nb_chipset == INTEL_NB_5100)
1578                 nb_mem_init();
1579         else
1580                 nb_fbd_init();
1581         nb_fsb_init();
1582         nb_scrubber_enable();
1583 
1584         dimmpp = old_nb_dimms;
1585         for (i = 0; i < nchannels; i++) {
1586                 for (j = 0; j < old_nb_dimms_per_channel; j++) {
1587                         dimmp = *dimmpp;
1588                         if (dimmp) {
1589                                 kmem_free(dimmp, sizeof (nb_dimm_t));
1590                                 *dimmpp = NULL;
1591                         }
1592                         dimmpp++;
1593                 }
1594         }
1595         kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) * dimm_slot);
1596 }
1597 
1598 void
1599 nb_dev_unload()
1600 {
1601         errorq_destroy(nb_queue);
1602         nb_queue = NULL;
1603         mutex_destroy(&nb_mutex);
1604         nb_int_fini();
1605         nb_thr_fini();
1606         if (nb_chipset == INTEL_NB_5100)
1607                 nb_mem_fini();
1608         else
1609                 nb_fbd_fini();
1610         nb_fsb_fini();
1611         nb_pex_fini();
1612         nb_fini();
1613 }
1614 
1615 void
1616 nb_unload()
1617 {
1618 }