Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/i86pc/io/pciex/npe.c
+++ new/usr/src/uts/i86pc/io/pciex/npe.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 28 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
29 29 */
30 30
31 31 /*
32 32 * Host to PCI-Express local bus driver
33 33 */
34 34
35 35 #include <sys/conf.h>
36 36 #include <sys/modctl.h>
37 37 #include <sys/file.h>
38 38 #include <sys/pci_impl.h>
39 39 #include <sys/pcie_impl.h>
40 40 #include <sys/sysmacros.h>
41 41 #include <sys/ddi_intr.h>
42 42 #include <sys/sunndi.h>
43 43 #include <sys/sunddi.h>
44 44 #include <sys/ddifm.h>
45 45 #include <sys/ndifm.h>
46 46 #include <sys/fm/util.h>
47 47 #include <sys/hotplug/pci/pcie_hp.h>
48 48 #include <io/pci/pci_tools_ext.h>
49 49 #include <io/pci/pci_common.h>
50 50 #include <io/pciex/pcie_nvidia.h>
51 51
52 52 /*
53 53 * Helper Macros
54 54 */
55 55 #define NPE_IS_HANDLE_FOR_STDCFG_ACC(hp) \
56 56 ((hp) != NULL && \
57 57 ((ddi_acc_hdl_t *)(hp))->ah_platform_private != NULL && \
58 58 (((ddi_acc_impl_t *)((ddi_acc_hdl_t *)(hp))-> \
59 59 ah_platform_private)-> \
60 60 ahi_acc_attr &(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_CONFIG_SPACE)) \
61 61 == DDI_ACCATTR_CONFIG_SPACE)
62 62
63 63 /*
64 64 * Bus Operation functions
65 65 */
66 66 static int npe_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
67 67 off_t, off_t, caddr_t *);
68 68 static int npe_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t,
69 69 void *, void *);
70 70 static int npe_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
71 71 ddi_intr_handle_impl_t *, void *);
72 72 static int npe_fm_init(dev_info_t *, dev_info_t *, int,
73 73 ddi_iblock_cookie_t *);
74 74
75 75 static int npe_fm_callback(dev_info_t *, ddi_fm_error_t *, const void *);
76 76
77 77 /*
78 78 * Disable URs and Received MA for all PCIe devices. Until x86 SW is changed so
79 79 * that random drivers do not do PIO accesses on devices that it does not own,
80 80 * these error bits must be disabled. SERR must also be disabled if URs have
81 81 * been masked.
82 82 */
83 83 uint32_t npe_aer_uce_mask = PCIE_AER_UCE_UR;
84 84 uint32_t npe_aer_ce_mask = 0;
85 85 uint32_t npe_aer_suce_mask = PCIE_AER_SUCE_RCVD_MA;
86 86
87 87 struct bus_ops npe_bus_ops = {
88 88 BUSO_REV,
89 89 npe_bus_map,
90 90 NULL,
91 91 NULL,
92 92 NULL,
93 93 i_ddi_map_fault,
94 94 NULL,
95 95 ddi_dma_allochdl,
96 96 ddi_dma_freehdl,
97 97 ddi_dma_bindhdl,
98 98 ddi_dma_unbindhdl,
99 99 ddi_dma_flush,
100 100 ddi_dma_win,
101 101 ddi_dma_mctl,
102 102 npe_ctlops,
103 103 ddi_bus_prop_op,
104 104 0, /* (*bus_get_eventcookie)(); */
105 105 0, /* (*bus_add_eventcall)(); */
106 106 0, /* (*bus_remove_eventcall)(); */
107 107 0, /* (*bus_post_event)(); */
108 108 0, /* (*bus_intr_ctl)(); */
109 109 0, /* (*bus_config)(); */
110 110 0, /* (*bus_unconfig)(); */
111 111 npe_fm_init, /* (*bus_fm_init)(); */
112 112 NULL, /* (*bus_fm_fini)(); */
113 113 NULL, /* (*bus_fm_access_enter)(); */
114 114 NULL, /* (*bus_fm_access_exit)(); */
115 115 NULL, /* (*bus_power)(); */
116 116 npe_intr_ops, /* (*bus_intr_op)(); */
117 117 pcie_hp_common_ops /* (*bus_hp_op)(); */
118 118 };
119 119
120 120 static int npe_open(dev_t *, int, int, cred_t *);
121 121 static int npe_close(dev_t, int, int, cred_t *);
122 122 static int npe_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
123 123
124 124 struct cb_ops npe_cb_ops = {
125 125 npe_open, /* open */
126 126 npe_close, /* close */
127 127 nodev, /* strategy */
128 128 nodev, /* print */
129 129 nodev, /* dump */
130 130 nodev, /* read */
131 131 nodev, /* write */
132 132 npe_ioctl, /* ioctl */
133 133 nodev, /* devmap */
134 134 nodev, /* mmap */
135 135 nodev, /* segmap */
136 136 nochpoll, /* poll */
137 137 pcie_prop_op, /* cb_prop_op */
138 138 NULL, /* streamtab */
139 139 D_NEW | D_MP | D_HOTPLUG, /* Driver compatibility flag */
140 140 CB_REV, /* rev */
141 141 nodev, /* int (*cb_aread)() */
142 142 nodev /* int (*cb_awrite)() */
143 143 };
144 144
145 145
146 146 /*
147 147 * Device Node Operation functions
148 148 */
149 149 static int npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
150 150 static int npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
151 151 static int npe_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
152 152
153 153 struct dev_ops npe_ops = {
154 154 DEVO_REV, /* devo_rev */
155 155 0, /* refcnt */
156 156 npe_info, /* info */
157 157 nulldev, /* identify */
158 158 nulldev, /* probe */
159 159 npe_attach, /* attach */
160 160 npe_detach, /* detach */
161 161 nulldev, /* reset */
162 162 &npe_cb_ops, /* driver operations */
163 163 &npe_bus_ops, /* bus operations */
164 164 NULL, /* power */
165 165 ddi_quiesce_not_needed, /* quiesce */
166 166 };
167 167
168 168 /*
169 169 * Internal routines in support of particular npe_ctlops.
170 170 */
171 171 static int npe_removechild(dev_info_t *child);
172 172 static int npe_initchild(dev_info_t *child);
173 173
174 174 /*
175 175 * External support routine
176 176 */
177 177 extern void npe_query_acpi_mcfg(dev_info_t *dip);
178 178 extern void npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl);
179 179 extern int npe_disable_empty_bridges_workaround(dev_info_t *child);
180 180 extern void npe_nvidia_error_workaround(ddi_acc_handle_t cfg_hdl);
181 181 extern void npe_intel_error_workaround(ddi_acc_handle_t cfg_hdl);
182 182 extern boolean_t npe_is_mmcfg_supported(dev_info_t *dip);
183 183 extern void npe_enable_htmsi_children(dev_info_t *dip);
184 184 extern int npe_save_htconfig_children(dev_info_t *dip);
185 185 extern int npe_restore_htconfig_children(dev_info_t *dip);
186 186
187 187 /*
↓ open down ↓ |
187 lines elided |
↑ open up ↑ |
188 188 * Module linkage information for the kernel.
189 189 */
190 190 static struct modldrv modldrv = {
191 191 &mod_driverops, /* Type of module */
192 192 "Host to PCIe nexus driver", /* Name of module */
193 193 &npe_ops, /* driver ops */
194 194 };
195 195
196 196 static struct modlinkage modlinkage = {
197 197 MODREV_1,
198 - (void *)&modldrv,
199 - NULL
198 + { (void *)&modldrv, NULL }
200 199 };
201 200
202 201 /* Save minimal state. */
203 202 void *npe_statep;
204 203
205 204 int
206 205 _init(void)
207 206 {
208 207 int e;
209 208
210 209 /*
211 210 * Initialize per-pci bus soft state pointer.
212 211 */
213 212 e = ddi_soft_state_init(&npe_statep, sizeof (pci_state_t), 1);
214 213 if (e != 0)
215 214 return (e);
216 215
217 216 if ((e = mod_install(&modlinkage)) != 0)
218 217 ddi_soft_state_fini(&npe_statep);
219 218
220 219 return (e);
221 220 }
222 221
223 222
224 223 int
225 224 _fini(void)
226 225 {
227 226 int rc;
228 227
229 228 rc = mod_remove(&modlinkage);
230 229 if (rc != 0)
231 230 return (rc);
232 231
233 232 ddi_soft_state_fini(&npe_statep);
234 233 return (rc);
235 234 }
236 235
237 236
238 237 int
239 238 _info(struct modinfo *modinfop)
240 239 {
241 240 return (mod_info(&modlinkage, modinfop));
242 241 }
243 242
244 243 /*ARGSUSED*/
245 244 static int
246 245 npe_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
247 246 {
248 247 minor_t minor = getminor((dev_t)arg);
249 248 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
250 249 pci_state_t *pcip = ddi_get_soft_state(npe_statep, instance);
251 250 int ret = DDI_SUCCESS;
252 251
253 252 switch (cmd) {
254 253 case DDI_INFO_DEVT2INSTANCE:
255 254 *result = (void *)(intptr_t)instance;
256 255 break;
257 256 case DDI_INFO_DEVT2DEVINFO:
258 257 if (pcip == NULL) {
259 258 ret = DDI_FAILURE;
260 259 break;
261 260 }
262 261
263 262 *result = (void *)pcip->pci_dip;
264 263 break;
265 264 default:
266 265 ret = DDI_FAILURE;
267 266 break;
268 267 }
269 268
270 269 return (ret);
271 270 }
272 271
273 272 /*ARGSUSED*/
274 273 static int
275 274 npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
276 275 {
277 276 int instance = ddi_get_instance(devi);
278 277 pci_state_t *pcip = NULL;
279 278
280 279 if (cmd == DDI_RESUME) {
281 280 /*
282 281 * the system might still be able to resume even if this fails
283 282 */
284 283 (void) npe_restore_htconfig_children(devi);
285 284 return (DDI_SUCCESS);
286 285 }
287 286
288 287 /*
289 288 * We must do this here in order to ensure that all top level devices
290 289 * get their HyperTransport MSI mapping regs programmed first.
291 290 * "Memory controller" and "hostbridge" class devices are leaf devices
292 291 * that may affect MSI translation functionality for devices
293 292 * connected to the same link/bus.
294 293 *
295 294 * This will also program HT MSI mapping registers on root buses
296 295 * devices (basically sitting on an HT bus) that are not dependent
297 296 * on the aforementioned HT devices for MSI translation.
298 297 */
299 298 npe_enable_htmsi_children(devi);
300 299
301 300 if (ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type",
302 301 "pciex") != DDI_PROP_SUCCESS) {
303 302 cmn_err(CE_WARN, "npe: 'device_type' prop create failed");
304 303 }
305 304
306 305 if (ddi_soft_state_zalloc(npe_statep, instance) == DDI_SUCCESS)
307 306 pcip = ddi_get_soft_state(npe_statep, instance);
308 307
309 308 if (pcip == NULL)
310 309 return (DDI_FAILURE);
311 310
312 311 pcip->pci_dip = devi;
313 312 pcip->pci_soft_state = PCI_SOFT_STATE_CLOSED;
314 313
315 314 if (pcie_init(devi, NULL) != DDI_SUCCESS)
316 315 goto fail1;
317 316
318 317 /* Second arg: initialize for pci_express root nexus */
319 318 if (pcitool_init(devi, B_TRUE) != DDI_SUCCESS)
320 319 goto fail2;
321 320
322 321 pcip->pci_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
323 322 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
324 323 ddi_fm_init(devi, &pcip->pci_fmcap, &pcip->pci_fm_ibc);
325 324
326 325 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) {
327 326 ddi_fm_handler_register(devi, npe_fm_callback, NULL);
328 327 }
329 328
330 329 PCIE_DIP2PFD(devi) = kmem_zalloc(sizeof (pf_data_t), KM_SLEEP);
331 330 pcie_rc_init_pfd(devi, PCIE_DIP2PFD(devi));
332 331
333 332 npe_query_acpi_mcfg(devi);
334 333 ddi_report_dev(devi);
335 334 pcie_fab_init_bus(devi, PCIE_BUS_FINAL);
336 335
337 336 return (DDI_SUCCESS);
338 337
339 338 fail2:
340 339 (void) pcie_uninit(devi);
341 340 fail1:
342 341 pcie_rc_fini_bus(devi);
343 342 ddi_soft_state_free(npe_statep, instance);
344 343
345 344 return (DDI_FAILURE);
346 345 }
347 346
348 347 /*ARGSUSED*/
349 348 static int
350 349 npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
351 350 {
352 351 int instance = ddi_get_instance(devi);
353 352 pci_state_t *pcip;
354 353
355 354 pcip = ddi_get_soft_state(npe_statep, ddi_get_instance(devi));
356 355
357 356 switch (cmd) {
358 357 case DDI_DETACH:
359 358 pcie_fab_fini_bus(devi, PCIE_BUS_INITIAL);
360 359
361 360 /* Uninitialize pcitool support. */
362 361 pcitool_uninit(devi);
363 362
364 363 if (pcie_uninit(devi) != DDI_SUCCESS)
365 364 return (DDI_FAILURE);
366 365
367 366 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE)
368 367 ddi_fm_handler_unregister(devi);
369 368
370 369 pcie_rc_fini_pfd(PCIE_DIP2PFD(devi));
371 370 kmem_free(PCIE_DIP2PFD(devi), sizeof (pf_data_t));
372 371
373 372 ddi_fm_fini(devi);
374 373 ddi_soft_state_free(npe_statep, instance);
375 374 return (DDI_SUCCESS);
376 375
377 376 case DDI_SUSPEND:
378 377 /*
379 378 * the system might still be able to suspend/resume even if
380 379 * this fails
381 380 */
382 381 (void) npe_save_htconfig_children(devi);
383 382 return (DDI_SUCCESS);
384 383 default:
385 384 return (DDI_FAILURE);
386 385 }
387 386 }
388 387
389 388 /*
390 389 * Configure the access handle for standard configuration space
391 390 * access (see pci_fm_acc_setup for code that initializes the
392 391 * access-function pointers).
393 392 */
394 393 static int
395 394 npe_setup_std_pcicfg_acc(dev_info_t *rdip, ddi_map_req_t *mp,
396 395 ddi_acc_hdl_t *hp, off_t offset, off_t len)
397 396 {
398 397 int ret;
399 398
400 399 if ((ret = pci_fm_acc_setup(hp, offset, len)) ==
401 400 DDI_SUCCESS) {
402 401 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
403 402 mp->map_handlep->ah_acc.devacc_attr_access
404 403 != DDI_DEFAULT_ACC) {
405 404 ndi_fmc_insert(rdip, ACC_HANDLE,
406 405 (void *)mp->map_handlep, NULL);
407 406 }
408 407 }
409 408 return (ret);
410 409 }
411 410
412 411 static int
413 412 npe_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
414 413 off_t offset, off_t len, caddr_t *vaddrp)
415 414 {
416 415 int rnumber;
417 416 int length;
418 417 int space;
419 418 ddi_acc_impl_t *ap;
420 419 ddi_acc_hdl_t *hp;
421 420 ddi_map_req_t mr;
422 421 pci_regspec_t pci_reg;
423 422 pci_regspec_t *pci_rp;
424 423 struct regspec reg;
425 424 pci_acc_cfblk_t *cfp;
426 425 int retval;
427 426 int64_t *ecfginfo;
428 427 uint_t nelem;
429 428
430 429 mr = *mp; /* Get private copy of request */
431 430 mp = &mr;
432 431
433 432 /*
434 433 * check for register number
435 434 */
436 435 switch (mp->map_type) {
437 436 case DDI_MT_REGSPEC:
438 437 pci_reg = *(pci_regspec_t *)(mp->map_obj.rp);
439 438 pci_rp = &pci_reg;
440 439 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS)
441 440 return (DDI_FAILURE);
442 441 break;
443 442 case DDI_MT_RNUMBER:
444 443 rnumber = mp->map_obj.rnumber;
445 444 /*
446 445 * get ALL "reg" properties for dip, select the one of
447 446 * of interest. In x86, "assigned-addresses" property
448 447 * is identical to the "reg" property, so there is no
449 448 * need to cross check the two to determine the physical
450 449 * address of the registers.
451 450 * This routine still performs some validity checks to
452 451 * make sure that everything is okay.
453 452 */
454 453 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip,
455 454 DDI_PROP_DONTPASS, "reg", (int **)&pci_rp,
456 455 (uint_t *)&length) != DDI_PROP_SUCCESS)
457 456 return (DDI_FAILURE);
458 457
459 458 /*
460 459 * validate the register number.
461 460 */
462 461 length /= (sizeof (pci_regspec_t) / sizeof (int));
463 462 if (rnumber >= length) {
464 463 ddi_prop_free(pci_rp);
465 464 return (DDI_FAILURE);
466 465 }
467 466
468 467 /*
469 468 * copy the required entry.
470 469 */
471 470 pci_reg = pci_rp[rnumber];
472 471
473 472 /*
474 473 * free the memory allocated by ddi_prop_lookup_int_array
475 474 */
476 475 ddi_prop_free(pci_rp);
477 476
478 477 pci_rp = &pci_reg;
479 478 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS)
480 479 return (DDI_FAILURE);
481 480 mp->map_type = DDI_MT_REGSPEC;
482 481 break;
483 482 default:
484 483 return (DDI_ME_INVAL);
485 484 }
486 485
487 486 space = pci_rp->pci_phys_hi & PCI_REG_ADDR_M;
488 487
489 488 /*
490 489 * check for unmap and unlock of address space
491 490 */
492 491 if ((mp->map_op == DDI_MO_UNMAP) || (mp->map_op == DDI_MO_UNLOCK)) {
493 492 switch (space) {
494 493 case PCI_ADDR_IO:
495 494 reg.regspec_bustype = 1;
496 495 break;
497 496
498 497 case PCI_ADDR_CONFIG:
499 498 /*
500 499 * If this is an unmap/unlock of a standard config
501 500 * space mapping (memory-mapped config space mappings
502 501 * would have the DDI_ACCATTR_CPU_VADDR bit set in the
503 502 * acc_attr), undo that setup here.
504 503 */
505 504 if (NPE_IS_HANDLE_FOR_STDCFG_ACC(mp->map_handlep)) {
506 505
507 506 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
508 507 mp->map_handlep->ah_acc.devacc_attr_access
509 508 != DDI_DEFAULT_ACC) {
510 509 ndi_fmc_remove(rdip, ACC_HANDLE,
511 510 (void *)mp->map_handlep);
512 511 }
513 512 return (DDI_SUCCESS);
514 513 }
515 514
516 515 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE;
517 516
518 517 /* FALLTHROUGH */
519 518 case PCI_ADDR_MEM64:
520 519 /*
521 520 * MEM64 requires special treatment on map, to check
522 521 * that the device is below 4G. On unmap, however,
523 522 * we can assume that everything is OK... the map
524 523 * must have succeeded.
525 524 */
526 525 /* FALLTHROUGH */
527 526 case PCI_ADDR_MEM32:
528 527 reg.regspec_bustype = 0;
529 528 break;
530 529
531 530 default:
532 531 return (DDI_FAILURE);
533 532 }
534 533
535 534 /*
536 535 * Adjust offset and length
537 536 * A non-zero length means override the one in the regspec.
538 537 */
539 538 pci_rp->pci_phys_low += (uint_t)offset;
540 539 if (len != 0)
541 540 pci_rp->pci_size_low = len;
542 541
543 542 reg.regspec_addr = pci_rp->pci_phys_low;
544 543 reg.regspec_size = pci_rp->pci_size_low;
545 544
546 545 mp->map_obj.rp = ®
547 546 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp);
548 547 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
549 548 mp->map_handlep->ah_acc.devacc_attr_access !=
550 549 DDI_DEFAULT_ACC) {
551 550 ndi_fmc_remove(rdip, ACC_HANDLE,
552 551 (void *)mp->map_handlep);
553 552 }
554 553 return (retval);
555 554
556 555 }
557 556
558 557 /* check for user mapping request - not legal for Config */
559 558 if (mp->map_op == DDI_MO_MAP_HANDLE && space == PCI_ADDR_CONFIG) {
560 559 cmn_err(CE_NOTE, "npe: Config mapping request from user\n");
561 560 return (DDI_FAILURE);
562 561 }
563 562
564 563
565 564 /*
566 565 * Note that pci_fm_acc_setup() is called to serve two purposes
567 566 * i) enable legacy PCI I/O style config space access
568 567 * ii) register with FMA
569 568 */
570 569 if (space == PCI_ADDR_CONFIG) {
571 570
572 571 /* Can't map config space without a handle */
573 572 hp = (ddi_acc_hdl_t *)mp->map_handlep;
574 573 if (hp == NULL)
575 574 return (DDI_FAILURE);
576 575
577 576 /* record the device address for future reference */
578 577 cfp = (pci_acc_cfblk_t *)&hp->ah_bus_private;
579 578 cfp->c_busnum = PCI_REG_BUS_G(pci_rp->pci_phys_hi);
580 579 cfp->c_devnum = PCI_REG_DEV_G(pci_rp->pci_phys_hi);
581 580 cfp->c_funcnum = PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
582 581
583 582 *vaddrp = (caddr_t)offset;
584 583
585 584 /* Check if MMCFG is supported */
586 585 if (!npe_is_mmcfg_supported(rdip)) {
587 586 return (npe_setup_std_pcicfg_acc(rdip, mp, hp,
588 587 offset, len));
589 588 }
590 589
591 590
592 591 if (ddi_prop_lookup_int64_array(DDI_DEV_T_ANY, rdip, 0,
593 592 "ecfg", &ecfginfo, &nelem) == DDI_PROP_SUCCESS) {
594 593
595 594 if (nelem != 4 ||
596 595 cfp->c_busnum < ecfginfo[2] ||
597 596 cfp->c_busnum > ecfginfo[3]) {
598 597 /*
599 598 * Invalid property or Doesn't contain the
600 599 * requested bus; fall back to standard
601 600 * (I/O-based) config access.
602 601 */
603 602 ddi_prop_free(ecfginfo);
604 603 return (npe_setup_std_pcicfg_acc(rdip, mp, hp,
605 604 offset, len));
606 605 } else {
607 606 pci_rp->pci_phys_low = ecfginfo[0];
608 607
609 608 ddi_prop_free(ecfginfo);
610 609
611 610 pci_rp->pci_phys_low += ((cfp->c_busnum << 20) |
612 611 (cfp->c_devnum) << 15 |
613 612 (cfp->c_funcnum << 12));
614 613
615 614 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE;
616 615 }
617 616 } else {
618 617 /*
619 618 * Couldn't find the MMCFG property -- fall back to
620 619 * standard config access
621 620 */
622 621 return (npe_setup_std_pcicfg_acc(rdip, mp, hp,
623 622 offset, len));
624 623 }
625 624 }
626 625
627 626 length = pci_rp->pci_size_low;
628 627
629 628 /*
630 629 * range check
631 630 */
632 631 if ((offset >= length) || (len > length) || (offset + len > length))
633 632 return (DDI_FAILURE);
634 633
635 634 /*
636 635 * Adjust offset and length
637 636 * A non-zero length means override the one in the regspec.
638 637 */
639 638 pci_rp->pci_phys_low += (uint_t)offset;
640 639 if (len != 0)
641 640 pci_rp->pci_size_low = len;
642 641
643 642 /*
644 643 * convert the pci regsec into the generic regspec used by the
645 644 * parent root nexus driver.
646 645 */
647 646 switch (space) {
648 647 case PCI_ADDR_IO:
649 648 reg.regspec_bustype = 1;
650 649 break;
651 650 case PCI_ADDR_CONFIG:
652 651 case PCI_ADDR_MEM64:
653 652 /*
654 653 * We can't handle 64-bit devices that are mapped above
655 654 * 4G or that are larger than 4G.
656 655 */
657 656 if (pci_rp->pci_phys_mid != 0 || pci_rp->pci_size_hi != 0)
658 657 return (DDI_FAILURE);
659 658 /*
660 659 * Other than that, we can treat them as 32-bit mappings
661 660 */
662 661 /* FALLTHROUGH */
663 662 case PCI_ADDR_MEM32:
664 663 reg.regspec_bustype = 0;
665 664 break;
666 665 default:
667 666 return (DDI_FAILURE);
668 667 }
669 668
670 669 reg.regspec_addr = pci_rp->pci_phys_low;
671 670 reg.regspec_size = pci_rp->pci_size_low;
672 671
673 672 mp->map_obj.rp = ®
674 673 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp);
675 674 if (retval == DDI_SUCCESS) {
676 675 /*
677 676 * For config space gets force use of cautious access routines.
678 677 * These will handle default and protected mode accesses too.
679 678 */
680 679 if (space == PCI_ADDR_CONFIG) {
681 680 ap = (ddi_acc_impl_t *)mp->map_handlep;
682 681 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
683 682 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE;
684 683 ap->ahi_get8 = i_ddi_caut_get8;
685 684 ap->ahi_get16 = i_ddi_caut_get16;
686 685 ap->ahi_get32 = i_ddi_caut_get32;
687 686 ap->ahi_get64 = i_ddi_caut_get64;
688 687 ap->ahi_rep_get8 = i_ddi_caut_rep_get8;
689 688 ap->ahi_rep_get16 = i_ddi_caut_rep_get16;
690 689 ap->ahi_rep_get32 = i_ddi_caut_rep_get32;
691 690 ap->ahi_rep_get64 = i_ddi_caut_rep_get64;
692 691 }
693 692 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
694 693 mp->map_handlep->ah_acc.devacc_attr_access !=
695 694 DDI_DEFAULT_ACC) {
696 695 ndi_fmc_insert(rdip, ACC_HANDLE,
697 696 (void *)mp->map_handlep, NULL);
698 697 }
699 698 }
700 699 return (retval);
701 700 }
702 701
703 702
704 703
705 704 /*ARGSUSED*/
706 705 static int
707 706 npe_ctlops(dev_info_t *dip, dev_info_t *rdip,
708 707 ddi_ctl_enum_t ctlop, void *arg, void *result)
709 708 {
710 709 int rn;
711 710 int totreg;
712 711 uint_t reglen;
713 712 pci_regspec_t *drv_regp;
714 713 struct attachspec *asp;
715 714 struct detachspec *dsp;
716 715 pci_state_t *pci_p = ddi_get_soft_state(npe_statep,
717 716 ddi_get_instance(dip));
718 717
719 718 switch (ctlop) {
720 719 case DDI_CTLOPS_REPORTDEV:
721 720 if (rdip == (dev_info_t *)0)
722 721 return (DDI_FAILURE);
723 722 cmn_err(CE_CONT, "?PCI Express-device: %s@%s, %s%d\n",
724 723 ddi_node_name(rdip), ddi_get_name_addr(rdip),
725 724 ddi_driver_name(rdip), ddi_get_instance(rdip));
726 725 return (DDI_SUCCESS);
727 726
728 727 case DDI_CTLOPS_INITCHILD:
729 728 return (npe_initchild((dev_info_t *)arg));
730 729
731 730 case DDI_CTLOPS_UNINITCHILD:
732 731 return (npe_removechild((dev_info_t *)arg));
733 732
734 733 case DDI_CTLOPS_SIDDEV:
735 734 return (DDI_SUCCESS);
736 735
737 736 case DDI_CTLOPS_REGSIZE:
738 737 case DDI_CTLOPS_NREGS:
739 738 if (rdip == (dev_info_t *)0)
740 739 return (DDI_FAILURE);
741 740
742 741 *(int *)result = 0;
743 742 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip,
744 743 DDI_PROP_DONTPASS, "reg", (int **)&drv_regp,
745 744 ®len) != DDI_PROP_SUCCESS) {
746 745 return (DDI_FAILURE);
747 746 }
748 747
749 748 totreg = (reglen * sizeof (int)) / sizeof (pci_regspec_t);
750 749 if (ctlop == DDI_CTLOPS_NREGS)
751 750 *(int *)result = totreg;
752 751 else if (ctlop == DDI_CTLOPS_REGSIZE) {
753 752 rn = *(int *)arg;
754 753 if (rn >= totreg) {
755 754 ddi_prop_free(drv_regp);
756 755 return (DDI_FAILURE);
757 756 }
758 757 *(off_t *)result = drv_regp[rn].pci_size_low;
759 758 }
760 759 ddi_prop_free(drv_regp);
761 760
762 761 return (DDI_SUCCESS);
763 762
764 763 case DDI_CTLOPS_POWER:
765 764 {
766 765 power_req_t *reqp = (power_req_t *)arg;
767 766 /*
768 767 * We currently understand reporting of PCI_PM_IDLESPEED
769 768 * capability. Everything else is passed up.
770 769 */
771 770 if ((reqp->request_type == PMR_REPORT_PMCAP) &&
772 771 (reqp->req.report_pmcap_req.cap == PCI_PM_IDLESPEED))
773 772 return (DDI_SUCCESS);
774 773
775 774 break;
776 775 }
777 776
778 777 case DDI_CTLOPS_PEEK:
779 778 case DDI_CTLOPS_POKE:
780 779 return (pci_common_peekpoke(dip, rdip, ctlop, arg, result));
781 780
782 781 /* X86 systems support PME wakeup from suspended state */
783 782 case DDI_CTLOPS_ATTACH:
784 783 if (!pcie_is_child(dip, rdip))
785 784 return (DDI_SUCCESS);
786 785
787 786 asp = (struct attachspec *)arg;
788 787 if ((asp->when == DDI_POST) && (asp->result == DDI_SUCCESS)) {
789 788 pf_init(rdip, (void *)pci_p->pci_fm_ibc, asp->cmd);
790 789 (void) pcie_postattach_child(rdip);
791 790 }
792 791
793 792 /* only do this for immediate children */
794 793 if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE &&
795 794 ddi_get_parent(rdip) == dip)
796 795 if (pci_pre_resume(rdip) != DDI_SUCCESS) {
797 796 /* Not good, better stop now. */
798 797 cmn_err(CE_PANIC,
799 798 "Couldn't pre-resume device %p",
800 799 (void *) dip);
801 800 /* NOTREACHED */
802 801 }
803 802
804 803 return (DDI_SUCCESS);
805 804
806 805 case DDI_CTLOPS_DETACH:
807 806 if (!pcie_is_child(dip, rdip))
808 807 return (DDI_SUCCESS);
809 808
810 809 dsp = (struct detachspec *)arg;
811 810
812 811 if (dsp->when == DDI_PRE)
813 812 pf_fini(rdip, dsp->cmd);
814 813
815 814 /* only do this for immediate children */
816 815 if (dsp->cmd == DDI_SUSPEND && dsp->when == DDI_POST &&
817 816 ddi_get_parent(rdip) == dip)
818 817 if (pci_post_suspend(rdip) != DDI_SUCCESS)
819 818 return (DDI_FAILURE);
820 819
821 820 return (DDI_SUCCESS);
822 821
823 822 default:
824 823 break;
825 824 }
826 825
827 826 return (ddi_ctlops(dip, rdip, ctlop, arg, result));
828 827
829 828 }
830 829
831 830
832 831 /*
833 832 * npe_intr_ops
834 833 */
835 834 static int
836 835 npe_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
837 836 ddi_intr_handle_impl_t *hdlp, void *result)
838 837 {
839 838 return (pci_common_intr_ops(pdip, rdip, intr_op, hdlp, result));
840 839 }
841 840
842 841
843 842 static int
844 843 npe_initchild(dev_info_t *child)
845 844 {
846 845 char name[80];
847 846 pcie_bus_t *bus_p;
848 847 uint32_t regs;
849 848 ddi_acc_handle_t cfg_hdl;
850 849
851 850 /*
852 851 * Do not bind drivers to empty bridges.
853 852 * Fail above, if the bridge is found to be hotplug capable
854 853 */
855 854 if (npe_disable_empty_bridges_workaround(child) == 1)
856 855 return (DDI_FAILURE);
857 856
858 857 if (pci_common_name_child(child, name, 80) != DDI_SUCCESS)
859 858 return (DDI_FAILURE);
860 859
861 860 ddi_set_name_addr(child, name);
862 861
863 862 /*
864 863 * Pseudo nodes indicate a prototype node with per-instance
865 864 * properties to be merged into the real h/w device node.
866 865 * The interpretation of the unit-address is DD[,F]
867 866 * where DD is the device id and F is the function.
868 867 */
869 868 if (ndi_dev_is_persistent_node(child) == 0) {
870 869 extern int pci_allow_pseudo_children;
871 870
872 871 ddi_set_parent_data(child, NULL);
873 872
874 873 /*
875 874 * Try to merge the properties from this prototype
876 875 * node into real h/w nodes.
877 876 */
878 877 if (ndi_merge_node(child, pci_common_name_child) ==
879 878 DDI_SUCCESS) {
880 879 /*
881 880 * Merged ok - return failure to remove the node.
882 881 */
883 882 ddi_set_name_addr(child, NULL);
884 883 return (DDI_FAILURE);
885 884 }
886 885
887 886 /* workaround for DDIVS to run under PCI Express */
888 887 if (pci_allow_pseudo_children) {
889 888 /*
890 889 * If the "interrupts" property doesn't exist,
891 890 * this must be the ddivs no-intr case, and it returns
892 891 * DDI_SUCCESS instead of DDI_FAILURE.
893 892 */
894 893 if (ddi_prop_get_int(DDI_DEV_T_ANY, child,
895 894 DDI_PROP_DONTPASS, "interrupts", -1) == -1)
896 895 return (DDI_SUCCESS);
897 896 /*
898 897 * Create the ddi_parent_private_data for a pseudo
899 898 * child.
900 899 */
901 900 pci_common_set_parent_private_data(child);
902 901 return (DDI_SUCCESS);
903 902 }
904 903
905 904 /*
906 905 * The child was not merged into a h/w node,
907 906 * but there's not much we can do with it other
908 907 * than return failure to cause the node to be removed.
909 908 */
910 909 cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
911 910 ddi_get_name(child), ddi_get_name_addr(child),
912 911 ddi_get_name(child));
913 912 ddi_set_name_addr(child, NULL);
914 913 return (DDI_NOT_WELL_FORMED);
915 914 }
916 915
917 916 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
918 917 "interrupts", -1) != -1)
919 918 pci_common_set_parent_private_data(child);
920 919 else
921 920 ddi_set_parent_data(child, NULL);
922 921
923 922 /* Disable certain errors on PCIe drivers for x86 platforms */
924 923 regs = pcie_get_aer_uce_mask() | npe_aer_uce_mask;
925 924 pcie_set_aer_uce_mask(regs);
926 925 regs = pcie_get_aer_ce_mask() | npe_aer_ce_mask;
927 926 pcie_set_aer_ce_mask(regs);
928 927 regs = pcie_get_aer_suce_mask() | npe_aer_suce_mask;
929 928 pcie_set_aer_suce_mask(regs);
930 929
931 930 /*
932 931 * If URs are disabled, mask SERRs as well, otherwise the system will
933 932 * still be notified of URs
934 933 */
935 934 if (npe_aer_uce_mask & PCIE_AER_UCE_UR)
936 935 pcie_set_serr_mask(1);
937 936
938 937 if (pci_config_setup(child, &cfg_hdl) == DDI_SUCCESS) {
939 938 npe_ck804_fix_aer_ptr(cfg_hdl);
940 939 npe_nvidia_error_workaround(cfg_hdl);
941 940 npe_intel_error_workaround(cfg_hdl);
942 941 pci_config_teardown(&cfg_hdl);
943 942 }
944 943
945 944 bus_p = PCIE_DIP2BUS(child);
946 945 if (bus_p) {
947 946 uint16_t device_id = (uint16_t)(bus_p->bus_dev_ven_id >> 16);
948 947 uint16_t vendor_id = (uint16_t)(bus_p->bus_dev_ven_id & 0xFFFF);
949 948 uint16_t rev_id = bus_p->bus_rev_id;
950 949
951 950 /* Disable AER for certain NVIDIA Chipsets */
952 951 if ((vendor_id == NVIDIA_VENDOR_ID) &&
953 952 (device_id == NVIDIA_CK804_DEVICE_ID) &&
954 953 (rev_id < NVIDIA_CK804_AER_VALID_REVID))
955 954 bus_p->bus_aer_off = 0;
956 955
957 956 pcie_init_dom(child);
958 957 (void) pcie_initchild(child);
959 958 }
960 959
961 960 return (DDI_SUCCESS);
962 961 }
963 962
964 963
965 964 static int
966 965 npe_removechild(dev_info_t *dip)
967 966 {
968 967 pcie_uninitchild(dip);
969 968
970 969 ddi_set_name_addr(dip, NULL);
971 970
972 971 /*
973 972 * Strip the node to properly convert it back to prototype form
974 973 */
975 974 ddi_remove_minor_node(dip, NULL);
976 975
977 976 ddi_prop_remove_all(dip);
978 977
979 978 return (DDI_SUCCESS);
980 979 }
981 980
982 981 static int
983 982 npe_open(dev_t *devp, int flags, int otyp, cred_t *credp)
984 983 {
985 984 minor_t minor = getminor(*devp);
986 985 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
987 986 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance);
988 987 int rv;
989 988
990 989 /*
991 990 * Make sure the open is for the right file type.
992 991 */
993 992 if (otyp != OTYP_CHR)
994 993 return (EINVAL);
995 994
996 995 if (pci_p == NULL)
997 996 return (ENXIO);
998 997
999 998 mutex_enter(&pci_p->pci_mutex);
1000 999 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) {
1001 1000 case PCI_TOOL_REG_MINOR_NUM:
1002 1001 case PCI_TOOL_INTR_MINOR_NUM:
1003 1002 break;
1004 1003 default:
1005 1004 /* Handle devctl ioctls */
1006 1005 rv = pcie_open(pci_p->pci_dip, devp, flags, otyp, credp);
1007 1006 mutex_exit(&pci_p->pci_mutex);
1008 1007 return (rv);
1009 1008 }
1010 1009
1011 1010 /* Handle pcitool ioctls */
1012 1011 if (flags & FEXCL) {
1013 1012 if (pci_p->pci_soft_state != PCI_SOFT_STATE_CLOSED) {
1014 1013 mutex_exit(&pci_p->pci_mutex);
1015 1014 cmn_err(CE_NOTE, "npe_open: busy");
1016 1015 return (EBUSY);
1017 1016 }
1018 1017 pci_p->pci_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
1019 1018 } else {
1020 1019 if (pci_p->pci_soft_state == PCI_SOFT_STATE_OPEN_EXCL) {
1021 1020 mutex_exit(&pci_p->pci_mutex);
1022 1021 cmn_err(CE_NOTE, "npe_open: busy");
1023 1022 return (EBUSY);
1024 1023 }
1025 1024 pci_p->pci_soft_state = PCI_SOFT_STATE_OPEN;
1026 1025 }
1027 1026 mutex_exit(&pci_p->pci_mutex);
1028 1027
1029 1028 return (0);
1030 1029 }
1031 1030
1032 1031 static int
1033 1032 npe_close(dev_t dev, int flags, int otyp, cred_t *credp)
1034 1033 {
1035 1034 minor_t minor = getminor(dev);
1036 1035 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
1037 1036 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance);
1038 1037 int rv;
1039 1038
1040 1039 if (pci_p == NULL)
1041 1040 return (ENXIO);
1042 1041
1043 1042 mutex_enter(&pci_p->pci_mutex);
1044 1043
1045 1044 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) {
1046 1045 case PCI_TOOL_REG_MINOR_NUM:
1047 1046 case PCI_TOOL_INTR_MINOR_NUM:
1048 1047 break;
1049 1048 default:
1050 1049 /* Handle devctl ioctls */
1051 1050 rv = pcie_close(pci_p->pci_dip, dev, flags, otyp, credp);
1052 1051 mutex_exit(&pci_p->pci_mutex);
1053 1052 return (rv);
1054 1053 }
1055 1054
1056 1055 /* Handle pcitool ioctls */
1057 1056 pci_p->pci_soft_state = PCI_SOFT_STATE_CLOSED;
1058 1057 mutex_exit(&pci_p->pci_mutex);
1059 1058 return (0);
1060 1059 }
1061 1060
1062 1061 static int
1063 1062 npe_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1064 1063 {
1065 1064 minor_t minor = getminor(dev);
1066 1065 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
1067 1066 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance);
1068 1067 int ret = ENOTTY;
1069 1068
1070 1069 if (pci_p == NULL)
1071 1070 return (ENXIO);
1072 1071
1073 1072 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) {
1074 1073 case PCI_TOOL_REG_MINOR_NUM:
1075 1074 case PCI_TOOL_INTR_MINOR_NUM:
1076 1075 /* To handle pcitool related ioctls */
1077 1076 ret = pci_common_ioctl(pci_p->pci_dip, dev, cmd, arg, mode,
1078 1077 credp, rvalp);
1079 1078 break;
1080 1079 default:
1081 1080 /* To handle devctl and hotplug related ioctls */
1082 1081 ret = pcie_ioctl(pci_p->pci_dip, dev, cmd, arg, mode, credp,
1083 1082 rvalp);
1084 1083 break;
1085 1084 }
1086 1085
1087 1086 return (ret);
1088 1087 }
1089 1088
1090 1089 /*ARGSUSED*/
1091 1090 static int
1092 1091 npe_fm_init(dev_info_t *dip, dev_info_t *tdip, int cap,
1093 1092 ddi_iblock_cookie_t *ibc)
1094 1093 {
1095 1094 pci_state_t *pcip = ddi_get_soft_state(npe_statep,
1096 1095 ddi_get_instance(dip));
1097 1096
1098 1097 ASSERT(ibc != NULL);
1099 1098 *ibc = pcip->pci_fm_ibc;
1100 1099
1101 1100 return (pcip->pci_fmcap);
1102 1101 }
1103 1102
1104 1103 /*ARGSUSED*/
1105 1104 static int
1106 1105 npe_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *no_used)
1107 1106 {
1108 1107 /*
1109 1108 * On current x86 systems, npe's callback does not get called for failed
1110 1109 * loads. If in the future this feature is used, the fault PA should be
1111 1110 * logged in the derr->fme_bus_specific field. The appropriate PCIe
1112 1111 * error handling code should be called and needs to be coordinated with
1113 1112 * safe access handling.
1114 1113 */
1115 1114
1116 1115 return (DDI_FM_OK);
1117 1116 }
↓ open down ↓ |
908 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX