Print this page
style updates to uts/intel/io
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c
+++ new/usr/src/uts/intel/io/intel_nb5000/nb5000_init.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 */
25 25
26 26 #include <sys/types.h>
27 27 #include <sys/cmn_err.h>
28 28 #include <sys/errno.h>
29 29 #include <sys/log.h>
30 30 #include <sys/systm.h>
31 31 #include <sys/modctl.h>
32 32 #include <sys/errorq.h>
33 33 #include <sys/controlregs.h>
34 34 #include <sys/fm/util.h>
35 35 #include <sys/fm/protocol.h>
36 36 #include <sys/sysevent.h>
37 37 #include <sys/pghw.h>
38 38 #include <sys/cyclic.h>
39 39 #include <sys/pci_cfgspace.h>
40 40 #include <sys/mc_intel.h>
41 41 #include <sys/smbios.h>
42 42 #include <sys/pci.h>
43 43 #include <sys/pcie.h>
44 44 #include "nb5000.h"
45 45 #include "nb_log.h"
46 46 #include "dimm_phys.h"
47 47 #include "rank.h"
48 48
49 49 int nb_hw_memory_scrub_enable = 1;
50 50 static int nb_sw_scrub_disabled = 0;
51 51
52 52 int nb_5000_memory_controller = 0;
53 53 int nb_number_memory_controllers = NB_5000_MAX_MEM_CONTROLLERS;
54 54 int nb_channels_per_branch = NB_MAX_CHANNELS_PER_BRANCH;
55 55 int nb_dimms_per_channel = 0;
56 56
57 57 nb_dimm_t **nb_dimms;
58 58 int nb_ndimm;
59 59 uint32_t nb_chipset;
60 60 enum nb_memory_mode nb_mode;
61 61 bank_select_t nb_banks[NB_MAX_MEM_BRANCH_SELECT];
62 62 rank_select_t nb_ranks[NB_5000_MAX_MEM_CONTROLLERS][NB_MAX_MEM_RANK_SELECT];
63 63 uint32_t top_of_low_memory;
64 64 uint8_t spare_rank[NB_5000_MAX_MEM_CONTROLLERS];
65 65
66 66 extern int nb_no_smbios;
67 67
68 68 errorq_t *nb_queue;
69 69 kmutex_t nb_mutex;
70 70
71 71 static int nb_dimm_slots;
72 72
73 73 static uint32_t nb_err0_int;
74 74 static uint32_t nb_err1_int;
75 75 static uint32_t nb_err2_int;
76 76 static uint32_t nb_mcerr_int;
77 77 static uint32_t nb_emask_int;
78 78
79 79 static uint32_t nb_err0_fbd;
80 80 static uint32_t nb_err1_fbd;
81 81 static uint32_t nb_err2_fbd;
82 82 static uint32_t nb_mcerr_fbd;
83 83 static uint32_t nb_emask_fbd;
84 84
85 85 static uint32_t nb_err0_mem;
86 86 static uint32_t nb_err1_mem;
87 87 static uint32_t nb_err2_mem;
88 88 static uint32_t nb_mcerr_mem;
89 89 static uint32_t nb_emask_mem;
90 90
91 91 static uint16_t nb_err0_fsb;
92 92 static uint16_t nb_err1_fsb;
93 93 static uint16_t nb_err2_fsb;
94 94 static uint16_t nb_mcerr_fsb;
95 95 static uint16_t nb_emask_fsb;
96 96
97 97 static uint16_t nb_err0_thr;
98 98 static uint16_t nb_err1_thr;
99 99 static uint16_t nb_err2_thr;
100 100 static uint16_t nb_mcerr_thr;
101 101 static uint16_t nb_emask_thr;
102 102
103 103 static uint32_t emask_uncor_pex[NB_PCI_DEV];
104 104 static uint32_t emask_cor_pex[NB_PCI_DEV];
105 105 static uint32_t emask_rp_pex[NB_PCI_DEV];
106 106 static uint32_t docmd_pex[NB_PCI_DEV];
107 107 static uint32_t uncerrsev[NB_PCI_DEV];
108 108
109 109 static uint32_t l_mcerr_int;
110 110 static uint32_t l_mcerr_fbd;
111 111 static uint32_t l_mcerr_mem;
112 112 static uint16_t l_mcerr_fsb;
113 113 static uint16_t l_mcerr_thr;
114 114
115 115 uint_t nb5000_emask_fbd = EMASK_5000_FBD_RES;
116 116 uint_t nb5400_emask_fbd = 0;
117 117 int nb5000_reset_emask_fbd = 1;
118 118 uint_t nb5000_mask_poll_fbd = EMASK_FBD_NF;
119 119 uint_t nb5000_mask_bios_fbd = EMASK_FBD_FATAL;
120 120 uint_t nb5400_mask_poll_fbd = EMASK_5400_FBD_NF;
121 121 uint_t nb5400_mask_bios_fbd = EMASK_5400_FBD_FATAL;
122 122 uint_t nb7300_mask_poll_fbd = EMASK_7300_FBD_NF;
123 123 uint_t nb7300_mask_bios_fbd = EMASK_7300_FBD_FATAL;
124 124
125 125 int nb5100_reset_emask_mem = 1;
126 126 uint_t nb5100_mask_poll_mem = EMASK_MEM_NF;
127 127
128 128 uint_t nb5000_emask_fsb = 0;
129 129 int nb5000_reset_emask_fsb = 1;
130 130 uint_t nb5000_mask_poll_fsb = EMASK_FSB_NF;
131 131 uint_t nb5000_mask_bios_fsb = EMASK_FSB_FATAL;
132 132
133 133 uint_t nb5100_emask_int = EMASK_INT_5100;
134 134 uint_t nb5400_emask_int = EMASK_INT_5400;
135 135
136 136 uint_t nb7300_emask_int = EMASK_INT_7300;
137 137 uint_t nb7300_emask_int_step0 = EMASK_INT_7300_STEP_0;
138 138 uint_t nb5000_emask_int = EMASK_INT_5000;
139 139 int nb5000_reset_emask_int = 1;
140 140 uint_t nb5000_mask_poll_int = EMASK_INT_NF;
141 141 uint_t nb5000_mask_bios_int = EMASK_INT_FATAL;
142 142 uint_t nb5100_mask_poll_int = EMASK_INT_5100_NF;
143 143 uint_t nb5100_mask_bios_int = EMASK_INT_5100_FATAL;
144 144
145 145 uint_t nb_mask_poll_thr = EMASK_THR_NF;
146 146 uint_t nb_mask_bios_thr = EMASK_THR_FATAL;
147 147
148 148 int nb5000_reset_uncor_pex = 0;
149 149 uint_t nb5000_mask_uncor_pex = 0;
150 150 int nb5000_reset_cor_pex = 0;
151 151 uint_t nb5000_mask_cor_pex = 0xffffffff;
152 152 uint32_t nb5000_rp_pex = 0x1;
153 153
154 154 int nb_mask_mc_set;
155 155
156 156 typedef struct find_dimm_label {
157 157 void (*label_function)(int, char *, int);
158 158 } find_dimm_label_t;
159 159
↓ open down ↓ |
159 lines elided |
↑ open up ↑ |
160 160 static void x8450_dimm_label(int, char *, int);
161 161 static void cp3250_dimm_label(int, char *, int);
162 162
163 163 static struct platform_label {
164 164 const char *sys_vendor; /* SMB_TYPE_SYSTEM vendor prefix */
165 165 const char *sys_product; /* SMB_TYPE_SYSTEM product prefix */
166 166 find_dimm_label_t dimm_label;
167 167 int dimms_per_channel;
168 168 } platform_label[] = {
169 169 { "SUN MICROSYSTEMS", "SUN BLADE X8450 SERVER MODULE",
170 - x8450_dimm_label, 8 },
171 - { "MiTAC,Shunde", "CP3250", cp3250_dimm_label, 0 },
172 - { NULL, NULL, NULL, 0 }
170 + { x8450_dimm_label }, 8 },
171 + { "MiTAC,Shunde", "CP3250", { cp3250_dimm_label }, 0 },
172 + { NULL, NULL, { NULL }, 0 }
173 173 };
174 174
175 175 static unsigned short
176 176 read_spd(int bus)
177 177 {
178 178 unsigned short rt = 0;
179 179 int branch = bus >> 1;
180 180 int channel = bus & 1;
181 181
182 182 rt = SPD_RD(branch, channel);
183 183
184 184 return (rt);
185 185 }
186 186
187 187 static void
188 188 write_spdcmd(int bus, uint32_t val)
189 189 {
190 190 int branch = bus >> 1;
191 191 int channel = bus & 1;
192 192 SPDCMD_WR(branch, channel, val);
193 193 }
194 194
195 195 static int
196 196 read_spd_eeprom(int bus, int slave, int addr)
197 197 {
198 198 int retry = 4;
199 199 int wait;
200 200 int spd;
201 201 uint32_t cmd;
202 202
203 203 for (;;) {
204 204 wait = 1000;
205 205 for (;;) {
206 206 spd = read_spd(bus);
207 207 if ((spd & SPD_BUSY) == 0)
208 208 break;
209 209 if (--wait == 0)
210 210 return (-1);
211 211 drv_usecwait(10);
212 212 }
213 213 cmd = SPD_EEPROM_WRITE | SPD_ADDR(slave, addr);
214 214 write_spdcmd(bus, cmd);
215 215 wait = 1000;
216 216 for (;;) {
217 217 spd = read_spd(bus);
218 218 if ((spd & SPD_BUSY) == 0)
219 219 break;
220 220 if (--wait == 0) {
221 221 spd = SPD_BUS_ERROR;
222 222 break;
223 223 }
224 224 drv_usecwait(10);
225 225 }
226 226 while ((spd & SPD_BUS_ERROR) == 0 &&
227 227 (spd & (SPD_READ_DATA_VALID|SPD_BUSY)) !=
228 228 SPD_READ_DATA_VALID) {
229 229 spd = read_spd(bus);
230 230 if (--wait == 0)
231 231 return (-1);
232 232 }
233 233 if ((spd & SPD_BUS_ERROR) == 0)
234 234 break;
235 235 if (--retry == 0)
236 236 return (-1);
237 237 }
238 238 return (spd & 0xff);
239 239 }
240 240
241 241 static void
242 242 nb_fini()
243 243 {
244 244 int i, j;
245 245 int nchannels = nb_number_memory_controllers * nb_channels_per_branch;
246 246 nb_dimm_t **dimmpp;
247 247 nb_dimm_t *dimmp;
248 248
249 249 dimmpp = nb_dimms;
250 250 for (i = 0; i < nchannels; i++) {
251 251 for (j = 0; j < nb_dimms_per_channel; j++) {
252 252 dimmp = *dimmpp;
253 253 if (dimmp) {
254 254 kmem_free(dimmp, sizeof (nb_dimm_t));
255 255 *dimmpp = NULL;
256 256 }
257 257 dimmpp++;
258 258 }
259 259 }
260 260 kmem_free(nb_dimms, sizeof (nb_dimm_t *) * nb_dimm_slots);
261 261 nb_dimms = NULL;
262 262 dimm_fini();
263 263 }
264 264
265 265 void
266 266 nb_scrubber_enable()
267 267 {
268 268 uint32_t mc;
269 269
270 270 if (!nb_hw_memory_scrub_enable)
271 271 return;
272 272
273 273 mc = MC_RD();
274 274 if ((mc & MC_MIRROR) != 0) /* mirror mode */
275 275 mc |= MC_PATROL_SCRUB;
276 276 else
277 277 mc |= MC_PATROL_SCRUB|MC_DEMAND_SCRUB;
278 278 MC_WR(mc);
279 279
280 280 if (nb_sw_scrub_disabled++)
281 281 cmi_mc_sw_memscrub_disable();
282 282 }
283 283
284 284 static void
285 285 fbd_eeprom(int channel, int dimm, nb_dimm_t *dp)
286 286 {
287 287 int i, t;
288 288 int spd_sz;
289 289
290 290 t = read_spd_eeprom(channel, dimm, 0) & 0xf;
291 291 if (t == 1)
292 292 spd_sz = 128;
293 293 else if (t == 2)
294 294 spd_sz = 176;
295 295 else
296 296 spd_sz = 256;
297 297 dp->manufacture_id = read_spd_eeprom(channel, dimm, 117) |
298 298 (read_spd_eeprom(channel, dimm, 118) << 8);
299 299 dp->manufacture_location = read_spd_eeprom(channel, dimm, 119);
300 300 dp->serial_number =
301 301 (read_spd_eeprom(channel, dimm, 122) << 24) |
302 302 (read_spd_eeprom(channel, dimm, 123) << 16) |
303 303 (read_spd_eeprom(channel, dimm, 124) << 8) |
304 304 read_spd_eeprom(channel, dimm, 125);
305 305 t = read_spd_eeprom(channel, dimm, 121);
306 306 dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
307 307 dp->manufacture_year = read_spd_eeprom(channel, dimm, 120);
308 308 if (spd_sz > 128) {
309 309 for (i = 0; i < sizeof (dp->part_number); i++) {
310 310 dp->part_number[i] =
311 311 read_spd_eeprom(channel, dimm, 128 + i);
312 312 }
313 313 for (i = 0; i < sizeof (dp->revision); i++) {
314 314 dp->revision[i] =
315 315 read_spd_eeprom(channel, dimm, 146 + i);
316 316 }
317 317 }
318 318 }
319 319
320 320 /* read the manR of the DDR2 dimm */
321 321 static void
322 322 ddr2_eeprom(int channel, int dimm, nb_dimm_t *dp)
323 323 {
324 324 int i, t;
325 325 int slave;
326 326
327 327 slave = channel & 0x1 ? dimm + 4 : dimm;
328 328
329 329 /* byte[3]: number of row addresses */
330 330 dp->nrow = read_spd_eeprom(channel, slave, 3) & 0x1f;
331 331
332 332 /* byte[4]: number of column addresses */
333 333 dp->ncolumn = read_spd_eeprom(channel, slave, 4) & 0xf;
334 334
335 335 /* byte[5]: numranks; 0 means one rank */
336 336 dp->nranks = (read_spd_eeprom(channel, slave, 5) & 0x3) + 1;
337 337
338 338 /* byte[6]: data width */
339 339 dp->width = (read_spd_eeprom(channel, slave, 6) >> 5) << 2;
340 340
341 341 /* byte[17]: number of banks */
342 342 dp->nbanks = read_spd_eeprom(channel, slave, 17);
343 343
344 344 dp->dimm_size = DIMMSIZE(dp->nrow, dp->ncolumn, dp->nranks, dp->nbanks,
345 345 dp->width);
346 346
347 347 /* manufacture-id - byte[64-65] */
348 348 dp->manufacture_id = read_spd_eeprom(channel, slave, 64) |
349 349 (read_spd_eeprom(channel, dimm, 65) << 8);
350 350
351 351 /* location - byte[72] */
352 352 dp->manufacture_location = read_spd_eeprom(channel, slave, 72);
353 353
354 354 /* serial number - byte[95-98] */
355 355 dp->serial_number =
356 356 (read_spd_eeprom(channel, slave, 98) << 24) |
357 357 (read_spd_eeprom(channel, slave, 97) << 16) |
358 358 (read_spd_eeprom(channel, slave, 96) << 8) |
359 359 read_spd_eeprom(channel, slave, 95);
360 360
361 361 /* week - byte[94] */
362 362 t = read_spd_eeprom(channel, slave, 94);
363 363 dp->manufacture_week = (t >> 4) * 10 + (t & 0xf);
364 364 /* week - byte[93] */
365 365 t = read_spd_eeprom(channel, slave, 93);
366 366 dp->manufacture_year = (t >> 4) * 10 + (t & 0xf) + 2000;
367 367
368 368 /* part number - byte[73-81] */
369 369 for (i = 0; i < 8; i++) {
370 370 dp->part_number[i] = read_spd_eeprom(channel, slave, 73 + i);
371 371 }
372 372
373 373 /* revision - byte[91-92] */
374 374 for (i = 0; i < 2; i++) {
375 375 dp->revision[i] = read_spd_eeprom(channel, slave, 91 + i);
376 376 }
377 377 }
378 378
379 379 static boolean_t
380 380 nb_dimm_present(int channel, int dimm)
381 381 {
382 382 boolean_t rc = B_FALSE;
383 383
384 384 if (nb_chipset == INTEL_NB_5100) {
385 385 int t, slave;
386 386 slave = channel & 0x1 ? dimm + 4 : dimm;
387 387 /* read the type field from the dimm and check for DDR2 type */
388 388 if ((t = read_spd_eeprom(channel, slave, SPD_MEM_TYPE)) == -1)
389 389 return (B_FALSE);
390 390 rc = (t & 0xf) == SPD_DDR2;
391 391 } else {
392 392 rc = MTR_PRESENT(MTR_RD(channel, dimm));
393 393 }
394 394
395 395 return (rc);
396 396 }
397 397
398 398 static nb_dimm_t *
399 399 nb_ddr2_dimm_init(int channel, int dimm, int start_rank)
400 400 {
401 401 nb_dimm_t *dp;
402 402
403 403 if (nb_dimm_present(channel, dimm) == B_FALSE)
404 404 return (NULL);
405 405
406 406 dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
407 407
408 408 ddr2_eeprom(channel, dimm, dp);
409 409
410 410 /* The 1st rank of the dimm takes on this value */
411 411 dp->start_rank = (uint8_t)start_rank;
412 412
413 413 dp->mtr_present = 1;
414 414
415 415 return (dp);
416 416 }
417 417
418 418 static nb_dimm_t *
419 419 nb_fbd_dimm_init(int channel, int dimm, uint16_t mtr)
420 420 {
421 421 nb_dimm_t *dp;
422 422 int t;
423 423
424 424 if (MTR_PRESENT(mtr) == 0)
425 425 return (NULL);
426 426 t = read_spd_eeprom(channel, dimm, SPD_MEM_TYPE) & 0xf;
427 427
428 428 /* check for the dimm type */
429 429 if (t != SPD_FBDIMM)
430 430 return (NULL);
431 431
432 432 dp = kmem_zalloc(sizeof (nb_dimm_t), KM_SLEEP);
433 433
434 434 fbd_eeprom(channel, dimm, dp);
435 435
436 436 dp->mtr_present = MTR_PRESENT(mtr);
437 437 dp->start_rank = dimm << 1;
438 438 dp->nranks = MTR_NUMRANK(mtr);
439 439 dp->nbanks = MTR_NUMBANK(mtr);
440 440 dp->ncolumn = MTR_NUMCOL(mtr);
441 441 dp->nrow = MTR_NUMROW(mtr);
442 442 dp->width = MTR_WIDTH(mtr);
443 443 dp->dimm_size = MTR_DIMMSIZE(mtr);
444 444
445 445 return (dp);
446 446 }
447 447
448 448 static uint64_t
449 449 mc_range(int controller, uint64_t base)
450 450 {
451 451 int i;
452 452 uint64_t limit = 0;
453 453
454 454 for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
455 455 if (nb_banks[i].way[controller] && base >= nb_banks[i].base &&
456 456 base < nb_banks[i].limit) {
457 457 limit = nb_banks[i].limit;
458 458 if (base <= top_of_low_memory &&
459 459 limit > top_of_low_memory) {
460 460 limit -= TLOW_MAX - top_of_low_memory;
461 461 }
462 462 if (nb_banks[i].way[0] && nb_banks[i].way[1] &&
463 463 nb_mode != NB_MEMORY_MIRROR) {
464 464 limit = limit / 2;
465 465 }
466 466 }
467 467 }
468 468 return (limit);
469 469 }
470 470
471 471 void
472 472 nb_mc_init()
473 473 {
474 474 uint16_t tolm;
475 475 uint16_t mir;
476 476 uint32_t hole_base;
477 477 uint32_t hole_size;
478 478 uint32_t dmir;
479 479 uint64_t base;
480 480 uint64_t limit;
481 481 uint8_t way0, way1, rank0, rank1, rank2, rank3, branch_interleave;
482 482 int i, j, k;
483 483 uint8_t interleave;
484 484
485 485 base = 0;
486 486 tolm = TOLM_RD();
487 487 top_of_low_memory = ((uint32_t)(tolm >> 12) & 0xf) << 28;
488 488 for (i = 0; i < NB_MEM_BRANCH_SELECT; i++) {
489 489 mir = MIR_RD(i);
490 490 limit = (uint64_t)(mir >> 4) << 28;
491 491 way0 = mir & 1;
492 492 way1 = (mir >> 1) & 1;
493 493 if (way0 == 0 && way1 == 0) {
494 494 way0 = 1;
495 495 way1 = 1;
496 496 }
497 497 if (limit > top_of_low_memory)
498 498 limit += TLOW_MAX - top_of_low_memory;
499 499 nb_banks[i].base = base;
500 500 nb_banks[i].limit = limit;
501 501 nb_banks[i].way[0] = way0;
502 502 nb_banks[i].way[1] = way1;
503 503 base = limit;
504 504 }
505 505 for (i = 0; i < nb_number_memory_controllers; i++) {
506 506 base = 0;
507 507
508 508 for (j = 0; j < NB_MEM_RANK_SELECT; j++) {
509 509 dmir = DMIR_RD(i, j);
510 510 limit = ((uint64_t)(dmir >> 16) & 0xff) << 28;
511 511 if (limit == 0) {
512 512 limit = mc_range(i, base);
513 513 }
514 514 branch_interleave = 0;
515 515 hole_base = 0;
516 516 hole_size = 0;
517 517 DMIR_RANKS(dmir, rank0, rank1, rank2, rank3);
518 518 if (rank0 == rank1)
519 519 interleave = 1;
520 520 else if (rank0 == rank2)
521 521 interleave = 2;
522 522 else
523 523 interleave = 4;
524 524 if (nb_mode != NB_MEMORY_MIRROR &&
525 525 nb_mode != NB_MEMORY_SINGLE_CHANNEL) {
526 526 for (k = 0; k < NB_MEM_BRANCH_SELECT; k++) {
527 527 if (base >= nb_banks[k].base &&
528 528 base < nb_banks[k].limit) {
529 529 if (nb_banks[i].way[0] &&
530 530 nb_banks[i].way[1]) {
531 531 interleave *= 2;
532 532 limit *= 2;
533 533 branch_interleave = 1;
534 534 }
535 535 break;
536 536 }
537 537 }
538 538 }
539 539 if (base < top_of_low_memory &&
540 540 limit > top_of_low_memory) {
541 541 hole_base = top_of_low_memory;
542 542 hole_size = TLOW_MAX - top_of_low_memory;
543 543 limit += hole_size;
544 544 } else if (base > top_of_low_memory) {
545 545 limit += TLOW_MAX - top_of_low_memory;
546 546 }
547 547 nb_ranks[i][j].base = base;
548 548 nb_ranks[i][j].limit = limit;
549 549 nb_ranks[i][j].rank[0] = rank0;
550 550 nb_ranks[i][j].rank[1] = rank1;
551 551 nb_ranks[i][j].rank[2] = rank2;
552 552 nb_ranks[i][j].rank[3] = rank3;
553 553 nb_ranks[i][j].interleave = interleave;
554 554 nb_ranks[i][j].branch_interleave = branch_interleave;
555 555 nb_ranks[i][j].hole_base = hole_base;
556 556 nb_ranks[i][j].hole_size = hole_size;
557 557 if (limit > base) {
558 558 if (rank0 != rank1) {
559 559 dimm_add_rank(i, rank1,
560 560 branch_interleave, 1, base,
561 561 hole_base, hole_size, interleave,
562 562 limit);
563 563 if (rank0 != rank2) {
564 564 dimm_add_rank(i, rank2,
565 565 branch_interleave, 2, base,
566 566 hole_base, hole_size,
567 567 interleave, limit);
568 568 dimm_add_rank(i, rank3,
569 569 branch_interleave, 3, base,
570 570 hole_base, hole_size,
571 571 interleave, limit);
572 572 }
573 573 }
574 574 }
575 575 base = limit;
576 576 }
577 577 }
578 578 }
579 579
580 580 void
581 581 nb_used_spare_rank(int branch, int bad_rank)
582 582 {
583 583 int i;
584 584 int j;
585 585
586 586 for (i = 0; i < NB_MEM_RANK_SELECT; i++) {
587 587 for (j = 0; j < NB_RANKS_IN_SELECT; j++) {
588 588 if (nb_ranks[branch][i].rank[j] == bad_rank) {
589 589 nb_ranks[branch][i].rank[j] =
590 590 spare_rank[branch];
591 591 i = NB_MEM_RANK_SELECT;
592 592 break;
593 593 }
594 594 }
595 595 }
596 596 }
597 597
598 598 find_dimm_label_t *
599 599 find_dimms_per_channel()
600 600 {
601 601 struct platform_label *pl;
602 602 smbios_info_t si;
603 603 smbios_system_t sy;
604 604 id_t id;
605 605 int i, j;
606 606 find_dimm_label_t *rt = NULL;
607 607
608 608 if (ksmbios != NULL && nb_no_smbios == 0) {
609 609 if ((id = smbios_info_system(ksmbios, &sy)) != SMB_ERR &&
610 610 smbios_info_common(ksmbios, id, &si) != SMB_ERR) {
611 611 for (pl = platform_label; pl->sys_vendor; pl++) {
612 612 if (strncmp(pl->sys_vendor,
613 613 si.smbi_manufacturer,
614 614 strlen(pl->sys_vendor)) == 0 &&
615 615 strncmp(pl->sys_product, si.smbi_product,
616 616 strlen(pl->sys_product)) == 0) {
617 617 nb_dimms_per_channel =
618 618 pl->dimms_per_channel;
619 619 rt = &pl->dimm_label;
620 620 break;
621 621 }
622 622 }
623 623 }
624 624 }
625 625 if (nb_dimms_per_channel == 0) {
626 626 /*
627 627 * Scan all memory channels if we find a channel which has more
628 628 * dimms then we have seen before set nb_dimms_per_channel to
629 629 * the number of dimms on the channel
630 630 */
631 631 for (i = 0; i < nb_number_memory_controllers; i++) {
632 632 for (j = nb_dimms_per_channel;
633 633 j < NB_MAX_DIMMS_PER_CHANNEL; j++) {
634 634 if (nb_dimm_present(i, j))
635 635 nb_dimms_per_channel = j + 1;
636 636 }
637 637 }
638 638 }
639 639 return (rt);
640 640 }
641 641
642 642 struct smb_dimm_rec {
643 643 int dimms;
644 644 int slots;
645 645 int populated;
646 646 nb_dimm_t **dimmpp;
647 647 };
648 648
649 649 static int
650 650 dimm_label(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
651 651 {
652 652 struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
653 653 nb_dimm_t ***dimmpp;
654 654 nb_dimm_t *dimmp;
655 655 smbios_memdevice_t md;
656 656
657 657 dimmpp = &rp->dimmpp;
658 658 if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
659 659 if (*dimmpp >= &nb_dimms[nb_dimm_slots])
660 660 return (-1);
661 661 dimmp = **dimmpp;
662 662 if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0 &&
663 663 md.smbmd_dloc != NULL) {
664 664 if (md.smbmd_size) {
665 665 if (dimmp == NULL &&
666 666 (rp->slots == nb_dimm_slots ||
667 667 rp->dimms < rp->populated)) {
668 668 (*dimmpp)++;
669 669 return (0);
670 670 }
671 671 /*
672 672 * if there is no physical dimm for this smbios
673 673 * record it is because this system has less
674 674 * physical slots than the controller supports
675 675 * so skip empty slots to find the slot this
676 676 * smbios record belongs too
677 677 */
678 678 while (dimmp == NULL) {
679 679 (*dimmpp)++;
680 680 if (*dimmpp >= &nb_dimms[nb_dimm_slots])
681 681 return (-1);
682 682 dimmp = **dimmpp;
683 683 }
684 684 (void) snprintf(dimmp->label,
685 685 sizeof (dimmp->label), "%s", md.smbmd_dloc);
686 686 (*dimmpp)++;
687 687 }
688 688 }
689 689 }
690 690 return (0);
691 691 }
692 692
693 693 static int
694 694 check_memdevice(smbios_hdl_t *shp, const smbios_struct_t *sp, void *arg)
695 695 {
696 696 struct smb_dimm_rec *rp = (struct smb_dimm_rec *)arg;
697 697 smbios_memdevice_t md;
698 698
699 699 if (sp->smbstr_type == SMB_TYPE_MEMDEVICE) {
700 700 if (smbios_info_memdevice(shp, sp->smbstr_id, &md) == 0) {
701 701 rp->slots++;
702 702 if (md.smbmd_size) {
703 703 rp->populated++;
704 704 }
705 705 }
706 706 }
707 707 return (0);
708 708 }
709 709
710 710 void
711 711 nb_smbios()
712 712 {
713 713 struct smb_dimm_rec r;
714 714 int i;
715 715
716 716 if (ksmbios != NULL && nb_no_smbios == 0) {
717 717 r.dimms = 0;
718 718 r.slots = 0;
719 719 r.populated = 0;
720 720 r.dimmpp = nb_dimms;
721 721 for (i = 0; i < nb_dimm_slots; i++) {
722 722 if (nb_dimms[i] != NULL)
723 723 r.dimms++;
724 724 }
725 725 (void) smbios_iter(ksmbios, check_memdevice, &r);
726 726 (void) smbios_iter(ksmbios, dimm_label, &r);
727 727 }
728 728 }
729 729
730 730 static void
731 731 x8450_dimm_label(int dimm, char *label, int label_sz)
732 732 {
733 733 int channel = dimm >> 3;
734 734
735 735 dimm = dimm & 0x7;
736 736 (void) snprintf(label, label_sz, "D%d", (dimm * 4) + channel);
737 737 }
738 738
739 739 /*
740 740 * CP3250 DIMM labels
741 741 * Channel Dimm Label
742 742 * 0 0 A0
743 743 * 1 0 B0
744 744 * 0 1 A1
745 745 * 1 1 B1
746 746 * 0 2 A2
747 747 * 1 2 B2
748 748 */
749 749 static void
750 750 cp3250_dimm_label(int dimm, char *label, int label_sz)
751 751 {
752 752 int channel = dimm / nb_dimms_per_channel;
753 753
754 754 dimm = dimm % nb_dimms_per_channel;
755 755 (void) snprintf(label, label_sz, "%c%d", channel == 0 ? 'A' : 'B',
756 756 dimm);
757 757 }
758 758
759 759 /*
760 760 * Map the rank id to dimm id of a channel
761 761 * For the 5100 chipset, walk through the dimm list of channel the check if
762 762 * the given rank id is within the rank range assigned to the dimm.
763 763 * For other chipsets, the dimm is rank/2.
764 764 */
765 765 int
766 766 nb_rank2dimm(int channel, int rank)
767 767 {
768 768 int i;
769 769 nb_dimm_t **dimmpp = nb_dimms;
770 770
771 771 if (nb_chipset != INTEL_NB_5100)
772 772 return (rank >> 1);
773 773
774 774 dimmpp += channel * nb_dimms_per_channel;
775 775 for (i = 0; i < nb_dimms_per_channel; i++) {
776 776 if ((rank >= dimmpp[i]->start_rank) &&
777 777 (rank < dimmpp[i]->start_rank + dimmpp[i]->nranks)) {
778 778 return (i);
779 779 }
780 780 }
781 781 return (-1);
782 782 }
783 783
784 784 static void
785 785 nb_ddr2_dimms_init(find_dimm_label_t *label_function)
786 786 {
787 787 int i, j;
788 788 int start_rank;
789 789 uint32_t spcpc;
790 790 uint8_t spcps;
791 791 nb_dimm_t **dimmpp;
792 792
793 793 nb_dimm_slots = nb_number_memory_controllers * nb_channels_per_branch *
794 794 nb_dimms_per_channel;
795 795 nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
796 796 nb_dimm_slots, KM_SLEEP);
797 797 dimmpp = nb_dimms;
798 798 nb_mode = NB_MEMORY_NORMAL;
799 799 for (i = 0; i < nb_number_memory_controllers; i++) {
800 800 if (nb_mode == NB_MEMORY_NORMAL) {
801 801 spcpc = SPCPC_RD(i);
802 802 spcps = SPCPS_RD(i);
803 803 if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
804 804 (spcps & SPCPS_SPARE_DEPLOYED) != 0)
805 805 nb_mode = NB_MEMORY_SPARE_RANK;
806 806 spare_rank[i] = SPCPC_SPRANK(spcpc);
807 807 }
808 808
809 809 /* The 1st dimm of a channel starts at rank 0 */
810 810 start_rank = 0;
811 811
812 812 for (j = 0; j < nb_dimms_per_channel; j++) {
813 813 dimmpp[j] = nb_ddr2_dimm_init(i, j, start_rank);
814 814 if (dimmpp[j]) {
815 815 nb_ndimm ++;
816 816 if (label_function) {
817 817 label_function->label_function(
818 818 (i * nb_dimms_per_channel) + j,
819 819 dimmpp[j]->label,
820 820 sizeof (dimmpp[j]->label));
821 821 }
822 822 start_rank += dimmpp[j]->nranks;
823 823 /*
824 824 * add an extra rank because
825 825 * single-ranked dimm still takes on two ranks.
826 826 */
827 827 if (dimmpp[j]->nranks & 0x1)
828 828 start_rank++;
829 829 }
830 830 }
831 831 dimmpp += nb_dimms_per_channel;
832 832 }
833 833
834 834 /*
835 835 * single channel is supported.
836 836 */
837 837 if (nb_ndimm > 0 && nb_ndimm <= nb_dimms_per_channel) {
838 838 nb_mode = NB_MEMORY_SINGLE_CHANNEL;
839 839 }
840 840 }
841 841
842 842 static void
843 843 nb_fbd_dimms_init(find_dimm_label_t *label_function)
844 844 {
845 845 int i, j, k, l;
846 846 uint16_t mtr;
847 847 uint32_t mc, mca;
848 848 uint32_t spcpc;
849 849 uint8_t spcps;
850 850 nb_dimm_t **dimmpp;
851 851
852 852 mca = MCA_RD();
853 853 mc = MC_RD();
854 854 if (mca & MCA_SCHDIMM) /* single-channel mode */
855 855 nb_mode = NB_MEMORY_SINGLE_CHANNEL;
856 856 else if ((mc & MC_MIRROR) != 0) /* mirror mode */
857 857 nb_mode = NB_MEMORY_MIRROR;
858 858 else
859 859 nb_mode = NB_MEMORY_NORMAL;
860 860 nb_dimm_slots = nb_number_memory_controllers * 2 * nb_dimms_per_channel;
861 861 nb_dimms = (nb_dimm_t **)kmem_zalloc(sizeof (nb_dimm_t *) *
862 862 nb_dimm_slots, KM_SLEEP);
863 863 dimmpp = nb_dimms;
864 864 for (i = 0; i < nb_number_memory_controllers; i++) {
865 865 if (nb_mode == NB_MEMORY_NORMAL) {
866 866 spcpc = SPCPC_RD(i);
867 867 spcps = SPCPS_RD(i);
868 868 if ((spcpc & SPCPC_SPARE_ENABLE) != 0 &&
869 869 (spcps & SPCPS_SPARE_DEPLOYED) != 0)
870 870 nb_mode = NB_MEMORY_SPARE_RANK;
871 871 spare_rank[i] = SPCPC_SPRANK(spcpc);
872 872 }
873 873 for (j = 0; j < nb_dimms_per_channel; j++) {
874 874 mtr = MTR_RD(i, j);
875 875 k = i * 2;
876 876 dimmpp[j] = nb_fbd_dimm_init(k, j, mtr);
877 877 if (dimmpp[j]) {
878 878 nb_ndimm ++;
879 879 if (label_function) {
880 880 label_function->label_function(
881 881 (k * nb_dimms_per_channel) + j,
882 882 dimmpp[j]->label,
883 883 sizeof (dimmpp[j]->label));
884 884 }
885 885 }
886 886 dimmpp[j + nb_dimms_per_channel] =
887 887 nb_fbd_dimm_init(k + 1, j, mtr);
888 888 l = j + nb_dimms_per_channel;
889 889 if (dimmpp[l]) {
890 890 if (label_function) {
891 891 label_function->label_function(
892 892 (k * nb_dimms_per_channel) + l,
893 893 dimmpp[l]->label,
894 894 sizeof (dimmpp[l]->label));
895 895 }
896 896 nb_ndimm ++;
897 897 }
898 898 }
899 899 dimmpp += nb_dimms_per_channel * 2;
900 900 }
901 901 }
902 902
903 903 static void
904 904 nb_dimms_init(find_dimm_label_t *label_function)
905 905 {
906 906 if (nb_chipset == INTEL_NB_5100)
907 907 nb_ddr2_dimms_init(label_function);
908 908 else
909 909 nb_fbd_dimms_init(label_function);
910 910
911 911 if (label_function == NULL)
912 912 nb_smbios();
913 913 }
914 914
915 915 /* Setup the ESI port registers to enable SERR for southbridge */
916 916 static void
917 917 nb_pex_init()
918 918 {
919 919 int i = 0; /* ESI port */
920 920 uint16_t regw;
921 921
922 922 emask_uncor_pex[i] = EMASK_UNCOR_PEX_RD(i);
923 923 emask_cor_pex[i] = EMASK_COR_PEX_RD(i);
924 924 emask_rp_pex[i] = EMASK_RP_PEX_RD(i);
925 925 docmd_pex[i] = PEX_ERR_DOCMD_RD(i);
926 926 uncerrsev[i] = UNCERRSEV_RD(i);
927 927
928 928 if (nb5000_reset_uncor_pex)
929 929 EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
930 930 if (nb5000_reset_cor_pex)
931 931 EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
932 932 if (nb_chipset == INTEL_NB_5400) {
933 933 /* disable masking of ERR pins used by DOCMD */
934 934 PEX_ERR_PIN_MASK_WR(i, 0x10);
935 935 }
936 936
937 937 /* RP error message (CE/NFE/FE) detect mask */
938 938 EMASK_RP_PEX_WR(i, nb5000_rp_pex);
939 939
940 940 /* Command Register - Enable SERR */
941 941 regw = nb_pci_getw(0, i, 0, PCI_CONF_COMM, 0);
942 942 nb_pci_putw(0, i, 0, PCI_CONF_COMM,
943 943 regw | PCI_COMM_SERR_ENABLE);
944 944
945 945 /* Root Control Register - SERR on NFE/FE */
946 946 PEXROOTCTL_WR(i, PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
947 947 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN);
948 948
949 949 /* AER UE Mask - Mask UR */
950 950 UNCERRMSK_WR(i, PCIE_AER_UCE_UR);
951 951 }
952 952
953 953 static void
954 954 nb_pex_fini()
955 955 {
956 956 int i = 0; /* ESI port */
957 957
958 958 EMASK_UNCOR_PEX_WR(i, emask_uncor_pex[i]);
959 959 EMASK_COR_PEX_WR(i, emask_cor_pex[i]);
960 960 EMASK_RP_PEX_WR(i, emask_rp_pex[i]);
961 961 PEX_ERR_DOCMD_WR(i, docmd_pex[i]);
962 962
963 963 if (nb5000_reset_uncor_pex)
964 964 EMASK_UNCOR_PEX_WR(i, nb5000_mask_uncor_pex);
965 965 if (nb5000_reset_cor_pex)
966 966 EMASK_COR_PEX_WR(i, nb5000_mask_cor_pex);
967 967 }
968 968
969 969 void
970 970 nb_int_init()
971 971 {
972 972 uint32_t err0_int;
973 973 uint32_t err1_int;
974 974 uint32_t err2_int;
975 975 uint32_t mcerr_int;
976 976 uint32_t emask_int;
977 977 uint32_t nb_mask_bios_int;
978 978 uint32_t nb_mask_poll_int;
979 979 uint16_t stepping;
980 980
981 981 if (nb_chipset == INTEL_NB_5100) {
982 982 nb_mask_bios_int = nb5100_mask_bios_int;
983 983 nb_mask_poll_int = nb5100_mask_poll_int;
984 984 } else {
985 985 nb_mask_bios_int = nb5000_mask_bios_int;
986 986 nb_mask_poll_int = nb5000_mask_poll_int;
987 987 }
988 988 err0_int = ERR0_INT_RD();
989 989 err1_int = ERR1_INT_RD();
990 990 err2_int = ERR2_INT_RD();
991 991 mcerr_int = MCERR_INT_RD();
992 992 emask_int = EMASK_INT_RD();
993 993
994 994 nb_err0_int = err0_int;
995 995 nb_err1_int = err1_int;
996 996 nb_err2_int = err2_int;
997 997 nb_mcerr_int = mcerr_int;
998 998 nb_emask_int = emask_int;
999 999
1000 1000 ERR0_INT_WR(ERR_INT_ALL);
1001 1001 ERR1_INT_WR(ERR_INT_ALL);
1002 1002 ERR2_INT_WR(ERR_INT_ALL);
1003 1003 MCERR_INT_WR(ERR_INT_ALL);
1004 1004 EMASK_INT_WR(ERR_INT_ALL);
1005 1005
1006 1006 mcerr_int &= ~nb_mask_bios_int;
1007 1007 mcerr_int |= nb_mask_bios_int & (~err0_int | ~err1_int | ~err2_int);
1008 1008 mcerr_int |= nb_mask_poll_int;
1009 1009 err0_int |= nb_mask_poll_int;
1010 1010 err1_int |= nb_mask_poll_int;
1011 1011 err2_int |= nb_mask_poll_int;
1012 1012
1013 1013 l_mcerr_int = mcerr_int;
1014 1014 ERR0_INT_WR(err0_int);
1015 1015 ERR1_INT_WR(err1_int);
1016 1016 ERR2_INT_WR(err2_int);
1017 1017 MCERR_INT_WR(mcerr_int);
1018 1018 if (nb5000_reset_emask_int) {
1019 1019 if (nb_chipset == INTEL_NB_7300) {
1020 1020 stepping = NB5000_STEPPING();
1021 1021 if (stepping == 0)
1022 1022 EMASK_5000_INT_WR(nb7300_emask_int_step0);
1023 1023 else
1024 1024 EMASK_5000_INT_WR(nb7300_emask_int);
1025 1025 } else if (nb_chipset == INTEL_NB_5400) {
1026 1026 EMASK_5400_INT_WR(nb5400_emask_int |
1027 1027 (emask_int & EMASK_INT_RES));
1028 1028 } else if (nb_chipset == INTEL_NB_5100) {
1029 1029 EMASK_5000_INT_WR(nb5100_emask_int);
1030 1030 } else {
1031 1031 EMASK_5000_INT_WR(nb5000_emask_int);
1032 1032 }
1033 1033 } else {
1034 1034 EMASK_INT_WR(nb_emask_int);
1035 1035 }
1036 1036 }
1037 1037
1038 1038 void
1039 1039 nb_int_fini()
1040 1040 {
1041 1041 ERR0_INT_WR(ERR_INT_ALL);
1042 1042 ERR1_INT_WR(ERR_INT_ALL);
1043 1043 ERR2_INT_WR(ERR_INT_ALL);
1044 1044 MCERR_INT_WR(ERR_INT_ALL);
1045 1045 EMASK_INT_WR(ERR_INT_ALL);
1046 1046
1047 1047 ERR0_INT_WR(nb_err0_int);
1048 1048 ERR1_INT_WR(nb_err1_int);
1049 1049 ERR2_INT_WR(nb_err2_int);
1050 1050 MCERR_INT_WR(nb_mcerr_int);
1051 1051 EMASK_INT_WR(nb_emask_int);
1052 1052 }
1053 1053
1054 1054 void
1055 1055 nb_int_mask_mc(uint32_t mc_mask_int)
1056 1056 {
1057 1057 uint32_t emask_int;
1058 1058
1059 1059 emask_int = MCERR_INT_RD();
1060 1060 if ((emask_int & mc_mask_int) != mc_mask_int) {
1061 1061 MCERR_INT_WR(emask_int|mc_mask_int);
1062 1062 nb_mask_mc_set = 1;
1063 1063 }
1064 1064 }
1065 1065
1066 1066 static void
1067 1067 nb_fbd_init()
1068 1068 {
1069 1069 uint32_t err0_fbd;
1070 1070 uint32_t err1_fbd;
1071 1071 uint32_t err2_fbd;
1072 1072 uint32_t mcerr_fbd;
1073 1073 uint32_t emask_fbd;
1074 1074 uint32_t emask_bios_fbd;
1075 1075 uint32_t emask_poll_fbd;
1076 1076
1077 1077 err0_fbd = ERR0_FBD_RD();
1078 1078 err1_fbd = ERR1_FBD_RD();
1079 1079 err2_fbd = ERR2_FBD_RD();
1080 1080 mcerr_fbd = MCERR_FBD_RD();
1081 1081 emask_fbd = EMASK_FBD_RD();
1082 1082
1083 1083 nb_err0_fbd = err0_fbd;
1084 1084 nb_err1_fbd = err1_fbd;
1085 1085 nb_err2_fbd = err2_fbd;
1086 1086 nb_mcerr_fbd = mcerr_fbd;
1087 1087 nb_emask_fbd = emask_fbd;
1088 1088
1089 1089 ERR0_FBD_WR(0xffffffff);
1090 1090 ERR1_FBD_WR(0xffffffff);
1091 1091 ERR2_FBD_WR(0xffffffff);
1092 1092 MCERR_FBD_WR(0xffffffff);
1093 1093 EMASK_FBD_WR(0xffffffff);
1094 1094
1095 1095 if (nb_chipset == INTEL_NB_7300) {
1096 1096 if (nb_mode == NB_MEMORY_MIRROR) {
1097 1097 /* MCH 7300 errata 34 */
1098 1098 emask_bios_fbd = nb7300_mask_bios_fbd & ~EMASK_FBD_M23;
1099 1099 emask_poll_fbd = nb7300_mask_poll_fbd;
1100 1100 mcerr_fbd |= EMASK_FBD_M23;
1101 1101 } else {
1102 1102 emask_bios_fbd = nb7300_mask_bios_fbd;
1103 1103 emask_poll_fbd = nb7300_mask_poll_fbd;
1104 1104 }
1105 1105 } else if (nb_chipset == INTEL_NB_5400) {
1106 1106 emask_bios_fbd = nb5400_mask_bios_fbd;
1107 1107 emask_poll_fbd = nb5400_mask_poll_fbd;
1108 1108 } else {
1109 1109 emask_bios_fbd = nb5000_mask_bios_fbd;
1110 1110 emask_poll_fbd = nb5000_mask_poll_fbd;
1111 1111 }
1112 1112 mcerr_fbd &= ~emask_bios_fbd;
1113 1113 mcerr_fbd |= emask_bios_fbd & (~err0_fbd | ~err1_fbd | ~err2_fbd);
1114 1114 mcerr_fbd |= emask_poll_fbd;
1115 1115 err0_fbd |= emask_poll_fbd;
1116 1116 err1_fbd |= emask_poll_fbd;
1117 1117 err2_fbd |= emask_poll_fbd;
1118 1118
1119 1119 l_mcerr_fbd = mcerr_fbd;
1120 1120 ERR0_FBD_WR(err0_fbd);
1121 1121 ERR1_FBD_WR(err1_fbd);
1122 1122 ERR2_FBD_WR(err2_fbd);
1123 1123 MCERR_FBD_WR(mcerr_fbd);
1124 1124 if (nb5000_reset_emask_fbd) {
1125 1125 if (nb_chipset == INTEL_NB_5400)
1126 1126 EMASK_FBD_WR(nb5400_emask_fbd);
1127 1127 else
1128 1128 EMASK_FBD_WR(nb5000_emask_fbd);
1129 1129 } else {
1130 1130 EMASK_FBD_WR(nb_emask_fbd);
1131 1131 }
1132 1132 }
1133 1133
1134 1134 void
1135 1135 nb_fbd_mask_mc(uint32_t mc_mask_fbd)
1136 1136 {
1137 1137 uint32_t emask_fbd;
1138 1138
1139 1139 emask_fbd = MCERR_FBD_RD();
1140 1140 if ((emask_fbd & mc_mask_fbd) != mc_mask_fbd) {
1141 1141 MCERR_FBD_WR(emask_fbd|mc_mask_fbd);
1142 1142 nb_mask_mc_set = 1;
1143 1143 }
1144 1144 }
1145 1145
1146 1146 static void
1147 1147 nb_fbd_fini()
1148 1148 {
1149 1149 ERR0_FBD_WR(0xffffffff);
1150 1150 ERR1_FBD_WR(0xffffffff);
1151 1151 ERR2_FBD_WR(0xffffffff);
1152 1152 MCERR_FBD_WR(0xffffffff);
1153 1153 EMASK_FBD_WR(0xffffffff);
1154 1154
1155 1155 ERR0_FBD_WR(nb_err0_fbd);
1156 1156 ERR1_FBD_WR(nb_err1_fbd);
1157 1157 ERR2_FBD_WR(nb_err2_fbd);
1158 1158 MCERR_FBD_WR(nb_mcerr_fbd);
1159 1159 EMASK_FBD_WR(nb_emask_fbd);
1160 1160 }
1161 1161
1162 1162 static void
1163 1163 nb_mem_init()
1164 1164 {
1165 1165 uint32_t err0_mem;
1166 1166 uint32_t err1_mem;
1167 1167 uint32_t err2_mem;
1168 1168 uint32_t mcerr_mem;
1169 1169 uint32_t emask_mem;
1170 1170 uint32_t emask_poll_mem;
1171 1171
1172 1172 err0_mem = ERR0_MEM_RD();
1173 1173 err1_mem = ERR1_MEM_RD();
1174 1174 err2_mem = ERR2_MEM_RD();
1175 1175 mcerr_mem = MCERR_MEM_RD();
1176 1176 emask_mem = EMASK_MEM_RD();
1177 1177
1178 1178 nb_err0_mem = err0_mem;
1179 1179 nb_err1_mem = err1_mem;
1180 1180 nb_err2_mem = err2_mem;
1181 1181 nb_mcerr_mem = mcerr_mem;
1182 1182 nb_emask_mem = emask_mem;
1183 1183
1184 1184 ERR0_MEM_WR(0xffffffff);
1185 1185 ERR1_MEM_WR(0xffffffff);
1186 1186 ERR2_MEM_WR(0xffffffff);
1187 1187 MCERR_MEM_WR(0xffffffff);
1188 1188 EMASK_MEM_WR(0xffffffff);
1189 1189
1190 1190 emask_poll_mem = nb5100_mask_poll_mem;
1191 1191 mcerr_mem |= emask_poll_mem;
1192 1192 err0_mem |= emask_poll_mem;
1193 1193 err1_mem |= emask_poll_mem;
1194 1194 err2_mem |= emask_poll_mem;
1195 1195
1196 1196 l_mcerr_mem = mcerr_mem;
1197 1197 ERR0_MEM_WR(err0_mem);
1198 1198 ERR1_MEM_WR(err1_mem);
1199 1199 ERR2_MEM_WR(err2_mem);
1200 1200 MCERR_MEM_WR(mcerr_mem);
1201 1201 if (nb5100_reset_emask_mem) {
1202 1202 EMASK_MEM_WR(~nb5100_mask_poll_mem);
1203 1203 } else {
1204 1204 EMASK_MEM_WR(nb_emask_mem);
1205 1205 }
1206 1206 }
1207 1207
1208 1208 void
1209 1209 nb_mem_mask_mc(uint32_t mc_mask_mem)
1210 1210 {
1211 1211 uint32_t emask_mem;
1212 1212
1213 1213 emask_mem = MCERR_MEM_RD();
1214 1214 if ((emask_mem & mc_mask_mem) != mc_mask_mem) {
1215 1215 MCERR_MEM_WR(emask_mem|mc_mask_mem);
1216 1216 nb_mask_mc_set = 1;
1217 1217 }
1218 1218 }
1219 1219
1220 1220 static void
1221 1221 nb_mem_fini()
1222 1222 {
1223 1223 ERR0_MEM_WR(0xffffffff);
1224 1224 ERR1_MEM_WR(0xffffffff);
1225 1225 ERR2_MEM_WR(0xffffffff);
1226 1226 MCERR_MEM_WR(0xffffffff);
1227 1227 EMASK_MEM_WR(0xffffffff);
1228 1228
1229 1229 ERR0_MEM_WR(nb_err0_mem);
1230 1230 ERR1_MEM_WR(nb_err1_mem);
1231 1231 ERR2_MEM_WR(nb_err2_mem);
1232 1232 MCERR_MEM_WR(nb_mcerr_mem);
1233 1233 EMASK_MEM_WR(nb_emask_mem);
1234 1234 }
1235 1235
1236 1236 static void
1237 1237 nb_fsb_init()
1238 1238 {
1239 1239 uint16_t err0_fsb;
1240 1240 uint16_t err1_fsb;
1241 1241 uint16_t err2_fsb;
1242 1242 uint16_t mcerr_fsb;
1243 1243 uint16_t emask_fsb;
1244 1244
1245 1245 err0_fsb = ERR0_FSB_RD(0);
1246 1246 err1_fsb = ERR1_FSB_RD(0);
1247 1247 err2_fsb = ERR2_FSB_RD(0);
1248 1248 mcerr_fsb = MCERR_FSB_RD(0);
1249 1249 emask_fsb = EMASK_FSB_RD(0);
1250 1250
1251 1251 ERR0_FSB_WR(0, 0xffff);
1252 1252 ERR1_FSB_WR(0, 0xffff);
1253 1253 ERR2_FSB_WR(0, 0xffff);
1254 1254 MCERR_FSB_WR(0, 0xffff);
1255 1255 EMASK_FSB_WR(0, 0xffff);
1256 1256
1257 1257 ERR0_FSB_WR(1, 0xffff);
1258 1258 ERR1_FSB_WR(1, 0xffff);
1259 1259 ERR2_FSB_WR(1, 0xffff);
1260 1260 MCERR_FSB_WR(1, 0xffff);
1261 1261 EMASK_FSB_WR(1, 0xffff);
1262 1262
1263 1263 nb_err0_fsb = err0_fsb;
1264 1264 nb_err1_fsb = err1_fsb;
1265 1265 nb_err2_fsb = err2_fsb;
1266 1266 nb_mcerr_fsb = mcerr_fsb;
1267 1267 nb_emask_fsb = emask_fsb;
1268 1268
1269 1269 mcerr_fsb &= ~nb5000_mask_bios_fsb;
1270 1270 mcerr_fsb |= nb5000_mask_bios_fsb & (~err2_fsb | ~err1_fsb | ~err0_fsb);
1271 1271 mcerr_fsb |= nb5000_mask_poll_fsb;
1272 1272 err0_fsb |= nb5000_mask_poll_fsb;
1273 1273 err1_fsb |= nb5000_mask_poll_fsb;
1274 1274 err2_fsb |= nb5000_mask_poll_fsb;
1275 1275
1276 1276 l_mcerr_fsb = mcerr_fsb;
1277 1277 ERR0_FSB_WR(0, err0_fsb);
1278 1278 ERR1_FSB_WR(0, err1_fsb);
1279 1279 ERR2_FSB_WR(0, err2_fsb);
1280 1280 MCERR_FSB_WR(0, mcerr_fsb);
1281 1281 if (nb5000_reset_emask_fsb) {
1282 1282 EMASK_FSB_WR(0, nb5000_emask_fsb);
1283 1283 } else {
1284 1284 EMASK_FSB_WR(0, nb_emask_fsb);
1285 1285 }
1286 1286
1287 1287 ERR0_FSB_WR(1, err0_fsb);
1288 1288 ERR1_FSB_WR(1, err1_fsb);
1289 1289 ERR2_FSB_WR(1, err2_fsb);
1290 1290 MCERR_FSB_WR(1, mcerr_fsb);
1291 1291 if (nb5000_reset_emask_fsb) {
1292 1292 EMASK_FSB_WR(1, nb5000_emask_fsb);
1293 1293 } else {
1294 1294 EMASK_FSB_WR(1, nb_emask_fsb);
1295 1295 }
1296 1296
1297 1297 if (nb_chipset == INTEL_NB_7300) {
1298 1298 ERR0_FSB_WR(2, 0xffff);
1299 1299 ERR1_FSB_WR(2, 0xffff);
1300 1300 ERR2_FSB_WR(2, 0xffff);
1301 1301 MCERR_FSB_WR(2, 0xffff);
1302 1302 EMASK_FSB_WR(2, 0xffff);
1303 1303
1304 1304 ERR0_FSB_WR(3, 0xffff);
1305 1305 ERR1_FSB_WR(3, 0xffff);
1306 1306 ERR2_FSB_WR(3, 0xffff);
1307 1307 MCERR_FSB_WR(3, 0xffff);
1308 1308 EMASK_FSB_WR(3, 0xffff);
1309 1309
1310 1310 ERR0_FSB_WR(2, err0_fsb);
1311 1311 ERR1_FSB_WR(2, err1_fsb);
1312 1312 ERR2_FSB_WR(2, err2_fsb);
1313 1313 MCERR_FSB_WR(2, mcerr_fsb);
1314 1314 if (nb5000_reset_emask_fsb) {
1315 1315 EMASK_FSB_WR(2, nb5000_emask_fsb);
1316 1316 } else {
1317 1317 EMASK_FSB_WR(2, nb_emask_fsb);
1318 1318 }
1319 1319
1320 1320 ERR0_FSB_WR(3, err0_fsb);
1321 1321 ERR1_FSB_WR(3, err1_fsb);
1322 1322 ERR2_FSB_WR(3, err2_fsb);
1323 1323 MCERR_FSB_WR(3, mcerr_fsb);
1324 1324 if (nb5000_reset_emask_fsb) {
1325 1325 EMASK_FSB_WR(3, nb5000_emask_fsb);
1326 1326 } else {
1327 1327 EMASK_FSB_WR(3, nb_emask_fsb);
1328 1328 }
1329 1329 }
1330 1330 }
1331 1331
1332 1332 static void
1333 1333 nb_fsb_fini() {
1334 1334 ERR0_FSB_WR(0, 0xffff);
1335 1335 ERR1_FSB_WR(0, 0xffff);
1336 1336 ERR2_FSB_WR(0, 0xffff);
1337 1337 MCERR_FSB_WR(0, 0xffff);
1338 1338 EMASK_FSB_WR(0, 0xffff);
1339 1339
1340 1340 ERR0_FSB_WR(0, nb_err0_fsb);
1341 1341 ERR1_FSB_WR(0, nb_err1_fsb);
1342 1342 ERR2_FSB_WR(0, nb_err2_fsb);
1343 1343 MCERR_FSB_WR(0, nb_mcerr_fsb);
1344 1344 EMASK_FSB_WR(0, nb_emask_fsb);
1345 1345
1346 1346 ERR0_FSB_WR(1, 0xffff);
1347 1347 ERR1_FSB_WR(1, 0xffff);
1348 1348 ERR2_FSB_WR(1, 0xffff);
1349 1349 MCERR_FSB_WR(1, 0xffff);
1350 1350 EMASK_FSB_WR(1, 0xffff);
1351 1351
1352 1352 ERR0_FSB_WR(1, nb_err0_fsb);
1353 1353 ERR1_FSB_WR(1, nb_err1_fsb);
1354 1354 ERR2_FSB_WR(1, nb_err2_fsb);
1355 1355 MCERR_FSB_WR(1, nb_mcerr_fsb);
1356 1356 EMASK_FSB_WR(1, nb_emask_fsb);
1357 1357
1358 1358 if (nb_chipset == INTEL_NB_7300) {
1359 1359 ERR0_FSB_WR(2, 0xffff);
1360 1360 ERR1_FSB_WR(2, 0xffff);
1361 1361 ERR2_FSB_WR(2, 0xffff);
1362 1362 MCERR_FSB_WR(2, 0xffff);
1363 1363 EMASK_FSB_WR(2, 0xffff);
1364 1364
1365 1365 ERR0_FSB_WR(2, nb_err0_fsb);
1366 1366 ERR1_FSB_WR(2, nb_err1_fsb);
1367 1367 ERR2_FSB_WR(2, nb_err2_fsb);
1368 1368 MCERR_FSB_WR(2, nb_mcerr_fsb);
1369 1369 EMASK_FSB_WR(2, nb_emask_fsb);
1370 1370
1371 1371 ERR0_FSB_WR(3, 0xffff);
1372 1372 ERR1_FSB_WR(3, 0xffff);
1373 1373 ERR2_FSB_WR(3, 0xffff);
1374 1374 MCERR_FSB_WR(3, 0xffff);
1375 1375 EMASK_FSB_WR(3, 0xffff);
1376 1376
1377 1377 ERR0_FSB_WR(3, nb_err0_fsb);
1378 1378 ERR1_FSB_WR(3, nb_err1_fsb);
1379 1379 ERR2_FSB_WR(3, nb_err2_fsb);
1380 1380 MCERR_FSB_WR(3, nb_mcerr_fsb);
1381 1381 EMASK_FSB_WR(3, nb_emask_fsb);
1382 1382 }
1383 1383 }
1384 1384
1385 1385 void
1386 1386 nb_fsb_mask_mc(int fsb, uint16_t mc_mask_fsb)
1387 1387 {
1388 1388 uint16_t emask_fsb;
1389 1389
1390 1390 emask_fsb = MCERR_FSB_RD(fsb);
1391 1391 if ((emask_fsb & mc_mask_fsb) != mc_mask_fsb) {
1392 1392 MCERR_FSB_WR(fsb, emask_fsb|mc_mask_fsb|EMASK_FBD_RES);
1393 1393 nb_mask_mc_set = 1;
1394 1394 }
1395 1395 }
1396 1396
1397 1397 static void
1398 1398 nb_thr_init()
1399 1399 {
1400 1400 uint16_t err0_thr;
1401 1401 uint16_t err1_thr;
1402 1402 uint16_t err2_thr;
1403 1403 uint16_t mcerr_thr;
1404 1404 uint16_t emask_thr;
1405 1405
1406 1406 if (nb_chipset == INTEL_NB_5400) {
1407 1407 err0_thr = ERR0_THR_RD(0);
1408 1408 err1_thr = ERR1_THR_RD(0);
1409 1409 err2_thr = ERR2_THR_RD(0);
1410 1410 mcerr_thr = MCERR_THR_RD(0);
1411 1411 emask_thr = EMASK_THR_RD(0);
1412 1412
1413 1413 ERR0_THR_WR(0xffff);
1414 1414 ERR1_THR_WR(0xffff);
1415 1415 ERR2_THR_WR(0xffff);
1416 1416 MCERR_THR_WR(0xffff);
1417 1417 EMASK_THR_WR(0xffff);
1418 1418
1419 1419 nb_err0_thr = err0_thr;
1420 1420 nb_err1_thr = err1_thr;
1421 1421 nb_err2_thr = err2_thr;
1422 1422 nb_mcerr_thr = mcerr_thr;
1423 1423 nb_emask_thr = emask_thr;
1424 1424
1425 1425 mcerr_thr &= ~nb_mask_bios_thr;
1426 1426 mcerr_thr |= nb_mask_bios_thr &
1427 1427 (~err2_thr | ~err1_thr | ~err0_thr);
1428 1428 mcerr_thr |= nb_mask_poll_thr;
1429 1429 err0_thr |= nb_mask_poll_thr;
1430 1430 err1_thr |= nb_mask_poll_thr;
1431 1431 err2_thr |= nb_mask_poll_thr;
1432 1432
1433 1433 l_mcerr_thr = mcerr_thr;
1434 1434 ERR0_THR_WR(err0_thr);
1435 1435 ERR1_THR_WR(err1_thr);
1436 1436 ERR2_THR_WR(err2_thr);
1437 1437 MCERR_THR_WR(mcerr_thr);
1438 1438 EMASK_THR_WR(nb_emask_thr);
1439 1439 }
1440 1440 }
1441 1441
1442 1442 static void
1443 1443 nb_thr_fini()
1444 1444 {
1445 1445 if (nb_chipset == INTEL_NB_5400) {
1446 1446 ERR0_THR_WR(0xffff);
1447 1447 ERR1_THR_WR(0xffff);
1448 1448 ERR2_THR_WR(0xffff);
1449 1449 MCERR_THR_WR(0xffff);
1450 1450 EMASK_THR_WR(0xffff);
1451 1451
1452 1452 ERR0_THR_WR(nb_err0_thr);
1453 1453 ERR1_THR_WR(nb_err1_thr);
1454 1454 ERR2_THR_WR(nb_err2_thr);
1455 1455 MCERR_THR_WR(nb_mcerr_thr);
1456 1456 EMASK_THR_WR(nb_emask_thr);
1457 1457 }
1458 1458 }
1459 1459
1460 1460 void
1461 1461 nb_thr_mask_mc(uint16_t mc_mask_thr)
1462 1462 {
1463 1463 uint16_t emask_thr;
1464 1464
1465 1465 emask_thr = MCERR_THR_RD(0);
1466 1466 if ((emask_thr & mc_mask_thr) != mc_mask_thr) {
1467 1467 MCERR_THR_WR(emask_thr|mc_mask_thr);
1468 1468 nb_mask_mc_set = 1;
1469 1469 }
1470 1470 }
1471 1471
1472 1472 void
1473 1473 nb_mask_mc_reset()
1474 1474 {
1475 1475 if (nb_chipset == INTEL_NB_5100)
1476 1476 MCERR_MEM_WR(l_mcerr_mem);
1477 1477 else
1478 1478 MCERR_FBD_WR(l_mcerr_fbd);
1479 1479 MCERR_INT_WR(l_mcerr_int);
1480 1480 MCERR_FSB_WR(0, l_mcerr_fsb);
1481 1481 MCERR_FSB_WR(1, l_mcerr_fsb);
1482 1482 if (nb_chipset == INTEL_NB_7300) {
1483 1483 MCERR_FSB_WR(2, l_mcerr_fsb);
1484 1484 MCERR_FSB_WR(3, l_mcerr_fsb);
1485 1485 }
1486 1486 if (nb_chipset == INTEL_NB_5400) {
1487 1487 MCERR_THR_WR(l_mcerr_thr);
1488 1488 }
1489 1489 }
1490 1490
1491 1491 int
1492 1492 nb_dev_init()
1493 1493 {
1494 1494 find_dimm_label_t *label_function_p;
1495 1495
1496 1496 label_function_p = find_dimms_per_channel();
1497 1497 mutex_init(&nb_mutex, NULL, MUTEX_DRIVER, NULL);
1498 1498 nb_queue = errorq_create("nb_queue", nb_drain, NULL, NB_MAX_ERRORS,
1499 1499 sizeof (nb_logout_t), 1, ERRORQ_VITAL);
1500 1500 if (nb_queue == NULL) {
1501 1501 mutex_destroy(&nb_mutex);
1502 1502 return (EAGAIN);
1503 1503 }
1504 1504 nb_int_init();
1505 1505 nb_thr_init();
1506 1506 dimm_init();
1507 1507 nb_dimms_init(label_function_p);
1508 1508 nb_mc_init();
1509 1509 nb_pex_init();
1510 1510 if (nb_chipset == INTEL_NB_5100)
1511 1511 nb_mem_init();
1512 1512 else
1513 1513 nb_fbd_init();
1514 1514 nb_fsb_init();
1515 1515 nb_scrubber_enable();
1516 1516 return (0);
1517 1517 }
1518 1518
1519 1519 int
1520 1520 nb_init()
1521 1521 {
1522 1522 /* return ENOTSUP if there is no PCI config space support. */
1523 1523 if (pci_getl_func == NULL)
1524 1524 return (ENOTSUP);
1525 1525
1526 1526 /* get vendor and device */
1527 1527 nb_chipset = (*pci_getl_func)(0, 0, 0, PCI_CONF_VENID);
1528 1528 switch (nb_chipset) {
1529 1529 default:
1530 1530 if (nb_5000_memory_controller == 0)
1531 1531 return (ENOTSUP);
1532 1532 break;
1533 1533 case INTEL_NB_7300:
1534 1534 case INTEL_NB_5000P:
1535 1535 case INTEL_NB_5000X:
1536 1536 break;
1537 1537 case INTEL_NB_5000V:
1538 1538 case INTEL_NB_5000Z:
1539 1539 nb_number_memory_controllers = 1;
1540 1540 break;
1541 1541 case INTEL_NB_5100:
1542 1542 nb_channels_per_branch = 1;
1543 1543 break;
1544 1544 case INTEL_NB_5400:
1545 1545 case INTEL_NB_5400A:
1546 1546 case INTEL_NB_5400B:
1547 1547 nb_chipset = INTEL_NB_5400;
1548 1548 break;
1549 1549 }
1550 1550 return (0);
1551 1551 }
1552 1552
1553 1553 void
1554 1554 nb_dev_reinit()
1555 1555 {
1556 1556 int i, j;
1557 1557 int nchannels = nb_number_memory_controllers * 2;
1558 1558 nb_dimm_t **dimmpp;
1559 1559 nb_dimm_t *dimmp;
1560 1560 nb_dimm_t **old_nb_dimms;
1561 1561 int old_nb_dimms_per_channel;
1562 1562 find_dimm_label_t *label_function_p;
1563 1563 int dimm_slot = nb_dimm_slots;
1564 1564
1565 1565 old_nb_dimms = nb_dimms;
1566 1566 old_nb_dimms_per_channel = nb_dimms_per_channel;
1567 1567
1568 1568 dimm_fini();
1569 1569 nb_dimms_per_channel = 0;
1570 1570 label_function_p = find_dimms_per_channel();
1571 1571 dimm_init();
1572 1572 nb_dimms_init(label_function_p);
1573 1573 nb_mc_init();
1574 1574 nb_pex_init();
1575 1575 nb_int_init();
1576 1576 nb_thr_init();
1577 1577 if (nb_chipset == INTEL_NB_5100)
1578 1578 nb_mem_init();
1579 1579 else
1580 1580 nb_fbd_init();
1581 1581 nb_fsb_init();
1582 1582 nb_scrubber_enable();
1583 1583
1584 1584 dimmpp = old_nb_dimms;
1585 1585 for (i = 0; i < nchannels; i++) {
1586 1586 for (j = 0; j < old_nb_dimms_per_channel; j++) {
1587 1587 dimmp = *dimmpp;
1588 1588 if (dimmp) {
1589 1589 kmem_free(dimmp, sizeof (nb_dimm_t));
1590 1590 *dimmpp = NULL;
1591 1591 }
1592 1592 dimmpp++;
1593 1593 }
1594 1594 }
1595 1595 kmem_free(old_nb_dimms, sizeof (nb_dimm_t *) * dimm_slot);
1596 1596 }
1597 1597
1598 1598 void
1599 1599 nb_dev_unload()
1600 1600 {
1601 1601 errorq_destroy(nb_queue);
1602 1602 nb_queue = NULL;
1603 1603 mutex_destroy(&nb_mutex);
1604 1604 nb_int_fini();
1605 1605 nb_thr_fini();
1606 1606 if (nb_chipset == INTEL_NB_5100)
1607 1607 nb_mem_fini();
1608 1608 else
1609 1609 nb_fbd_fini();
1610 1610 nb_fsb_fini();
1611 1611 nb_pex_fini();
1612 1612 nb_fini();
1613 1613 }
1614 1614
1615 1615 void
1616 1616 nb_unload()
1617 1617 {
1618 1618 }
↓ open down ↓ |
1436 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX