1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2010 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /*
28 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
29 */
30
31 /*
32 * Host to PCI-Express local bus driver
33 */
34
35 #include <sys/conf.h>
36 #include <sys/modctl.h>
37 #include <sys/file.h>
38 #include <sys/pci_impl.h>
39 #include <sys/pcie_impl.h>
40 #include <sys/sysmacros.h>
41 #include <sys/ddi_intr.h>
42 #include <sys/sunndi.h>
43 #include <sys/sunddi.h>
44 #include <sys/ddifm.h>
45 #include <sys/ndifm.h>
46 #include <sys/fm/util.h>
47 #include <sys/hotplug/pci/pcie_hp.h>
48 #include <io/pci/pci_tools_ext.h>
49 #include <io/pci/pci_common.h>
50 #include <io/pciex/pcie_nvidia.h>
51
52 /*
53 * Helper Macros
54 */
55 #define NPE_IS_HANDLE_FOR_STDCFG_ACC(hp) \
56 ((hp) != NULL && \
57 ((ddi_acc_hdl_t *)(hp))->ah_platform_private != NULL && \
58 (((ddi_acc_impl_t *)((ddi_acc_hdl_t *)(hp))-> \
59 ah_platform_private)-> \
60 ahi_acc_attr &(DDI_ACCATTR_CPU_VADDR|DDI_ACCATTR_CONFIG_SPACE)) \
61 == DDI_ACCATTR_CONFIG_SPACE)
62
63 /*
64 * Bus Operation functions
65 */
66 static int npe_bus_map(dev_info_t *, dev_info_t *, ddi_map_req_t *,
67 off_t, off_t, caddr_t *);
68 static int npe_ctlops(dev_info_t *, dev_info_t *, ddi_ctl_enum_t,
69 void *, void *);
70 static int npe_intr_ops(dev_info_t *, dev_info_t *, ddi_intr_op_t,
71 ddi_intr_handle_impl_t *, void *);
72 static int npe_fm_init(dev_info_t *, dev_info_t *, int,
73 ddi_iblock_cookie_t *);
74
75 static int npe_fm_callback(dev_info_t *, ddi_fm_error_t *, const void *);
76
77 /*
78 * Disable URs and Received MA for all PCIe devices. Until x86 SW is changed so
79 * that random drivers do not do PIO accesses on devices that it does not own,
80 * these error bits must be disabled. SERR must also be disabled if URs have
81 * been masked.
82 */
83 uint32_t npe_aer_uce_mask = PCIE_AER_UCE_UR;
84 uint32_t npe_aer_ce_mask = 0;
85 uint32_t npe_aer_suce_mask = PCIE_AER_SUCE_RCVD_MA;
86
87 struct bus_ops npe_bus_ops = {
88 BUSO_REV,
89 npe_bus_map,
90 NULL,
91 NULL,
92 NULL,
93 i_ddi_map_fault,
94 NULL,
95 ddi_dma_allochdl,
96 ddi_dma_freehdl,
97 ddi_dma_bindhdl,
98 ddi_dma_unbindhdl,
99 ddi_dma_flush,
100 ddi_dma_win,
101 ddi_dma_mctl,
102 npe_ctlops,
103 ddi_bus_prop_op,
104 0, /* (*bus_get_eventcookie)(); */
105 0, /* (*bus_add_eventcall)(); */
106 0, /* (*bus_remove_eventcall)(); */
107 0, /* (*bus_post_event)(); */
108 0, /* (*bus_intr_ctl)(); */
109 0, /* (*bus_config)(); */
110 0, /* (*bus_unconfig)(); */
111 npe_fm_init, /* (*bus_fm_init)(); */
112 NULL, /* (*bus_fm_fini)(); */
113 NULL, /* (*bus_fm_access_enter)(); */
114 NULL, /* (*bus_fm_access_exit)(); */
115 NULL, /* (*bus_power)(); */
116 npe_intr_ops, /* (*bus_intr_op)(); */
117 pcie_hp_common_ops /* (*bus_hp_op)(); */
118 };
119
120 static int npe_open(dev_t *, int, int, cred_t *);
121 static int npe_close(dev_t, int, int, cred_t *);
122 static int npe_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
123
124 struct cb_ops npe_cb_ops = {
125 npe_open, /* open */
126 npe_close, /* close */
127 nodev, /* strategy */
128 nodev, /* print */
129 nodev, /* dump */
130 nodev, /* read */
131 nodev, /* write */
132 npe_ioctl, /* ioctl */
133 nodev, /* devmap */
134 nodev, /* mmap */
135 nodev, /* segmap */
136 nochpoll, /* poll */
137 pcie_prop_op, /* cb_prop_op */
138 NULL, /* streamtab */
139 D_NEW | D_MP | D_HOTPLUG, /* Driver compatibility flag */
140 CB_REV, /* rev */
141 nodev, /* int (*cb_aread)() */
142 nodev /* int (*cb_awrite)() */
143 };
144
145
146 /*
147 * Device Node Operation functions
148 */
149 static int npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
150 static int npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
151 static int npe_info(dev_info_t *, ddi_info_cmd_t, void *, void **);
152
153 struct dev_ops npe_ops = {
154 DEVO_REV, /* devo_rev */
155 0, /* refcnt */
156 npe_info, /* info */
157 nulldev, /* identify */
158 nulldev, /* probe */
159 npe_attach, /* attach */
160 npe_detach, /* detach */
161 nulldev, /* reset */
162 &npe_cb_ops, /* driver operations */
163 &npe_bus_ops, /* bus operations */
164 NULL, /* power */
165 ddi_quiesce_not_needed, /* quiesce */
166 };
167
168 /*
169 * Internal routines in support of particular npe_ctlops.
170 */
171 static int npe_removechild(dev_info_t *child);
172 static int npe_initchild(dev_info_t *child);
173
174 /*
175 * External support routine
176 */
177 extern void npe_query_acpi_mcfg(dev_info_t *dip);
178 extern void npe_ck804_fix_aer_ptr(ddi_acc_handle_t cfg_hdl);
179 extern int npe_disable_empty_bridges_workaround(dev_info_t *child);
180 extern void npe_nvidia_error_workaround(ddi_acc_handle_t cfg_hdl);
181 extern void npe_intel_error_workaround(ddi_acc_handle_t cfg_hdl);
182 extern boolean_t npe_is_mmcfg_supported(dev_info_t *dip);
183 extern void npe_enable_htmsi_children(dev_info_t *dip);
184 extern int npe_save_htconfig_children(dev_info_t *dip);
185 extern int npe_restore_htconfig_children(dev_info_t *dip);
186
187 /*
188 * Module linkage information for the kernel.
189 */
190 static struct modldrv modldrv = {
191 &mod_driverops, /* Type of module */
192 "Host to PCIe nexus driver", /* Name of module */
193 &npe_ops, /* driver ops */
194 };
195
196 static struct modlinkage modlinkage = {
197 MODREV_1,
198 { (void *)&modldrv, NULL }
199 };
200
201 /* Save minimal state. */
202 void *npe_statep;
203
204 int
205 _init(void)
206 {
207 int e;
208
209 /*
210 * Initialize per-pci bus soft state pointer.
211 */
212 e = ddi_soft_state_init(&npe_statep, sizeof (pci_state_t), 1);
213 if (e != 0)
214 return (e);
215
216 if ((e = mod_install(&modlinkage)) != 0)
217 ddi_soft_state_fini(&npe_statep);
218
219 return (e);
220 }
221
222
223 int
224 _fini(void)
225 {
226 int rc;
227
228 rc = mod_remove(&modlinkage);
229 if (rc != 0)
230 return (rc);
231
232 ddi_soft_state_fini(&npe_statep);
233 return (rc);
234 }
235
236
237 int
238 _info(struct modinfo *modinfop)
239 {
240 return (mod_info(&modlinkage, modinfop));
241 }
242
243 /*ARGSUSED*/
244 static int
245 npe_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
246 {
247 minor_t minor = getminor((dev_t)arg);
248 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
249 pci_state_t *pcip = ddi_get_soft_state(npe_statep, instance);
250 int ret = DDI_SUCCESS;
251
252 switch (cmd) {
253 case DDI_INFO_DEVT2INSTANCE:
254 *result = (void *)(intptr_t)instance;
255 break;
256 case DDI_INFO_DEVT2DEVINFO:
257 if (pcip == NULL) {
258 ret = DDI_FAILURE;
259 break;
260 }
261
262 *result = (void *)pcip->pci_dip;
263 break;
264 default:
265 ret = DDI_FAILURE;
266 break;
267 }
268
269 return (ret);
270 }
271
272 /*ARGSUSED*/
273 static int
274 npe_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
275 {
276 int instance = ddi_get_instance(devi);
277 pci_state_t *pcip = NULL;
278
279 if (cmd == DDI_RESUME) {
280 /*
281 * the system might still be able to resume even if this fails
282 */
283 (void) npe_restore_htconfig_children(devi);
284 return (DDI_SUCCESS);
285 }
286
287 /*
288 * We must do this here in order to ensure that all top level devices
289 * get their HyperTransport MSI mapping regs programmed first.
290 * "Memory controller" and "hostbridge" class devices are leaf devices
291 * that may affect MSI translation functionality for devices
292 * connected to the same link/bus.
293 *
294 * This will also program HT MSI mapping registers on root buses
295 * devices (basically sitting on an HT bus) that are not dependent
296 * on the aforementioned HT devices for MSI translation.
297 */
298 npe_enable_htmsi_children(devi);
299
300 if (ddi_prop_update_string(DDI_DEV_T_NONE, devi, "device_type",
301 "pciex") != DDI_PROP_SUCCESS) {
302 cmn_err(CE_WARN, "npe: 'device_type' prop create failed");
303 }
304
305 if (ddi_soft_state_zalloc(npe_statep, instance) == DDI_SUCCESS)
306 pcip = ddi_get_soft_state(npe_statep, instance);
307
308 if (pcip == NULL)
309 return (DDI_FAILURE);
310
311 pcip->pci_dip = devi;
312 pcip->pci_soft_state = PCI_SOFT_STATE_CLOSED;
313
314 if (pcie_init(devi, NULL) != DDI_SUCCESS)
315 goto fail1;
316
317 /* Second arg: initialize for pci_express root nexus */
318 if (pcitool_init(devi, B_TRUE) != DDI_SUCCESS)
319 goto fail2;
320
321 pcip->pci_fmcap = DDI_FM_EREPORT_CAPABLE | DDI_FM_ERRCB_CAPABLE |
322 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE;
323 ddi_fm_init(devi, &pcip->pci_fmcap, &pcip->pci_fm_ibc);
324
325 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE) {
326 ddi_fm_handler_register(devi, npe_fm_callback, NULL);
327 }
328
329 PCIE_DIP2PFD(devi) = kmem_zalloc(sizeof (pf_data_t), KM_SLEEP);
330 pcie_rc_init_pfd(devi, PCIE_DIP2PFD(devi));
331
332 npe_query_acpi_mcfg(devi);
333 ddi_report_dev(devi);
334 pcie_fab_init_bus(devi, PCIE_BUS_FINAL);
335
336 return (DDI_SUCCESS);
337
338 fail2:
339 (void) pcie_uninit(devi);
340 fail1:
341 pcie_rc_fini_bus(devi);
342 ddi_soft_state_free(npe_statep, instance);
343
344 return (DDI_FAILURE);
345 }
346
347 /*ARGSUSED*/
348 static int
349 npe_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
350 {
351 int instance = ddi_get_instance(devi);
352 pci_state_t *pcip;
353
354 pcip = ddi_get_soft_state(npe_statep, ddi_get_instance(devi));
355
356 switch (cmd) {
357 case DDI_DETACH:
358 pcie_fab_fini_bus(devi, PCIE_BUS_INITIAL);
359
360 /* Uninitialize pcitool support. */
361 pcitool_uninit(devi);
362
363 if (pcie_uninit(devi) != DDI_SUCCESS)
364 return (DDI_FAILURE);
365
366 if (pcip->pci_fmcap & DDI_FM_ERRCB_CAPABLE)
367 ddi_fm_handler_unregister(devi);
368
369 pcie_rc_fini_pfd(PCIE_DIP2PFD(devi));
370 kmem_free(PCIE_DIP2PFD(devi), sizeof (pf_data_t));
371
372 ddi_fm_fini(devi);
373 ddi_soft_state_free(npe_statep, instance);
374 return (DDI_SUCCESS);
375
376 case DDI_SUSPEND:
377 /*
378 * the system might still be able to suspend/resume even if
379 * this fails
380 */
381 (void) npe_save_htconfig_children(devi);
382 return (DDI_SUCCESS);
383 default:
384 return (DDI_FAILURE);
385 }
386 }
387
388 /*
389 * Configure the access handle for standard configuration space
390 * access (see pci_fm_acc_setup for code that initializes the
391 * access-function pointers).
392 */
393 static int
394 npe_setup_std_pcicfg_acc(dev_info_t *rdip, ddi_map_req_t *mp,
395 ddi_acc_hdl_t *hp, off_t offset, off_t len)
396 {
397 int ret;
398
399 if ((ret = pci_fm_acc_setup(hp, offset, len)) ==
400 DDI_SUCCESS) {
401 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
402 mp->map_handlep->ah_acc.devacc_attr_access
403 != DDI_DEFAULT_ACC) {
404 ndi_fmc_insert(rdip, ACC_HANDLE,
405 (void *)mp->map_handlep, NULL);
406 }
407 }
408 return (ret);
409 }
410
411 static int
412 npe_bus_map(dev_info_t *dip, dev_info_t *rdip, ddi_map_req_t *mp,
413 off_t offset, off_t len, caddr_t *vaddrp)
414 {
415 int rnumber;
416 int length;
417 int space;
418 ddi_acc_impl_t *ap;
419 ddi_acc_hdl_t *hp;
420 ddi_map_req_t mr;
421 pci_regspec_t pci_reg;
422 pci_regspec_t *pci_rp;
423 struct regspec reg;
424 pci_acc_cfblk_t *cfp;
425 int retval;
426 int64_t *ecfginfo;
427 uint_t nelem;
428
429 mr = *mp; /* Get private copy of request */
430 mp = &mr;
431
432 /*
433 * check for register number
434 */
435 switch (mp->map_type) {
436 case DDI_MT_REGSPEC:
437 pci_reg = *(pci_regspec_t *)(mp->map_obj.rp);
438 pci_rp = &pci_reg;
439 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS)
440 return (DDI_FAILURE);
441 break;
442 case DDI_MT_RNUMBER:
443 rnumber = mp->map_obj.rnumber;
444 /*
445 * get ALL "reg" properties for dip, select the one of
446 * of interest. In x86, "assigned-addresses" property
447 * is identical to the "reg" property, so there is no
448 * need to cross check the two to determine the physical
449 * address of the registers.
450 * This routine still performs some validity checks to
451 * make sure that everything is okay.
452 */
453 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip,
454 DDI_PROP_DONTPASS, "reg", (int **)&pci_rp,
455 (uint_t *)&length) != DDI_PROP_SUCCESS)
456 return (DDI_FAILURE);
457
458 /*
459 * validate the register number.
460 */
461 length /= (sizeof (pci_regspec_t) / sizeof (int));
462 if (rnumber >= length) {
463 ddi_prop_free(pci_rp);
464 return (DDI_FAILURE);
465 }
466
467 /*
468 * copy the required entry.
469 */
470 pci_reg = pci_rp[rnumber];
471
472 /*
473 * free the memory allocated by ddi_prop_lookup_int_array
474 */
475 ddi_prop_free(pci_rp);
476
477 pci_rp = &pci_reg;
478 if (pci_common_get_reg_prop(rdip, pci_rp) != DDI_SUCCESS)
479 return (DDI_FAILURE);
480 mp->map_type = DDI_MT_REGSPEC;
481 break;
482 default:
483 return (DDI_ME_INVAL);
484 }
485
486 space = pci_rp->pci_phys_hi & PCI_REG_ADDR_M;
487
488 /*
489 * check for unmap and unlock of address space
490 */
491 if ((mp->map_op == DDI_MO_UNMAP) || (mp->map_op == DDI_MO_UNLOCK)) {
492 switch (space) {
493 case PCI_ADDR_IO:
494 reg.regspec_bustype = 1;
495 break;
496
497 case PCI_ADDR_CONFIG:
498 /*
499 * If this is an unmap/unlock of a standard config
500 * space mapping (memory-mapped config space mappings
501 * would have the DDI_ACCATTR_CPU_VADDR bit set in the
502 * acc_attr), undo that setup here.
503 */
504 if (NPE_IS_HANDLE_FOR_STDCFG_ACC(mp->map_handlep)) {
505
506 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
507 mp->map_handlep->ah_acc.devacc_attr_access
508 != DDI_DEFAULT_ACC) {
509 ndi_fmc_remove(rdip, ACC_HANDLE,
510 (void *)mp->map_handlep);
511 }
512 return (DDI_SUCCESS);
513 }
514
515 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE;
516
517 /* FALLTHROUGH */
518 case PCI_ADDR_MEM64:
519 /*
520 * MEM64 requires special treatment on map, to check
521 * that the device is below 4G. On unmap, however,
522 * we can assume that everything is OK... the map
523 * must have succeeded.
524 */
525 /* FALLTHROUGH */
526 case PCI_ADDR_MEM32:
527 reg.regspec_bustype = 0;
528 break;
529
530 default:
531 return (DDI_FAILURE);
532 }
533
534 /*
535 * Adjust offset and length
536 * A non-zero length means override the one in the regspec.
537 */
538 pci_rp->pci_phys_low += (uint_t)offset;
539 if (len != 0)
540 pci_rp->pci_size_low = len;
541
542 reg.regspec_addr = pci_rp->pci_phys_low;
543 reg.regspec_size = pci_rp->pci_size_low;
544
545 mp->map_obj.rp = ®
546 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp);
547 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
548 mp->map_handlep->ah_acc.devacc_attr_access !=
549 DDI_DEFAULT_ACC) {
550 ndi_fmc_remove(rdip, ACC_HANDLE,
551 (void *)mp->map_handlep);
552 }
553 return (retval);
554
555 }
556
557 /* check for user mapping request - not legal for Config */
558 if (mp->map_op == DDI_MO_MAP_HANDLE && space == PCI_ADDR_CONFIG) {
559 cmn_err(CE_NOTE, "npe: Config mapping request from user\n");
560 return (DDI_FAILURE);
561 }
562
563
564 /*
565 * Note that pci_fm_acc_setup() is called to serve two purposes
566 * i) enable legacy PCI I/O style config space access
567 * ii) register with FMA
568 */
569 if (space == PCI_ADDR_CONFIG) {
570
571 /* Can't map config space without a handle */
572 hp = (ddi_acc_hdl_t *)mp->map_handlep;
573 if (hp == NULL)
574 return (DDI_FAILURE);
575
576 /* record the device address for future reference */
577 cfp = (pci_acc_cfblk_t *)&hp->ah_bus_private;
578 cfp->c_busnum = PCI_REG_BUS_G(pci_rp->pci_phys_hi);
579 cfp->c_devnum = PCI_REG_DEV_G(pci_rp->pci_phys_hi);
580 cfp->c_funcnum = PCI_REG_FUNC_G(pci_rp->pci_phys_hi);
581
582 *vaddrp = (caddr_t)offset;
583
584 /* Check if MMCFG is supported */
585 if (!npe_is_mmcfg_supported(rdip)) {
586 return (npe_setup_std_pcicfg_acc(rdip, mp, hp,
587 offset, len));
588 }
589
590
591 if (ddi_prop_lookup_int64_array(DDI_DEV_T_ANY, rdip, 0,
592 "ecfg", &ecfginfo, &nelem) == DDI_PROP_SUCCESS) {
593
594 if (nelem != 4 ||
595 cfp->c_busnum < ecfginfo[2] ||
596 cfp->c_busnum > ecfginfo[3]) {
597 /*
598 * Invalid property or Doesn't contain the
599 * requested bus; fall back to standard
600 * (I/O-based) config access.
601 */
602 ddi_prop_free(ecfginfo);
603 return (npe_setup_std_pcicfg_acc(rdip, mp, hp,
604 offset, len));
605 } else {
606 pci_rp->pci_phys_low = ecfginfo[0];
607
608 ddi_prop_free(ecfginfo);
609
610 pci_rp->pci_phys_low += ((cfp->c_busnum << 20) |
611 (cfp->c_devnum) << 15 |
612 (cfp->c_funcnum << 12));
613
614 pci_rp->pci_size_low = PCIE_CONF_HDR_SIZE;
615 }
616 } else {
617 /*
618 * Couldn't find the MMCFG property -- fall back to
619 * standard config access
620 */
621 return (npe_setup_std_pcicfg_acc(rdip, mp, hp,
622 offset, len));
623 }
624 }
625
626 length = pci_rp->pci_size_low;
627
628 /*
629 * range check
630 */
631 if ((offset >= length) || (len > length) || (offset + len > length))
632 return (DDI_FAILURE);
633
634 /*
635 * Adjust offset and length
636 * A non-zero length means override the one in the regspec.
637 */
638 pci_rp->pci_phys_low += (uint_t)offset;
639 if (len != 0)
640 pci_rp->pci_size_low = len;
641
642 /*
643 * convert the pci regsec into the generic regspec used by the
644 * parent root nexus driver.
645 */
646 switch (space) {
647 case PCI_ADDR_IO:
648 reg.regspec_bustype = 1;
649 break;
650 case PCI_ADDR_CONFIG:
651 case PCI_ADDR_MEM64:
652 /*
653 * We can't handle 64-bit devices that are mapped above
654 * 4G or that are larger than 4G.
655 */
656 if (pci_rp->pci_phys_mid != 0 || pci_rp->pci_size_hi != 0)
657 return (DDI_FAILURE);
658 /*
659 * Other than that, we can treat them as 32-bit mappings
660 */
661 /* FALLTHROUGH */
662 case PCI_ADDR_MEM32:
663 reg.regspec_bustype = 0;
664 break;
665 default:
666 return (DDI_FAILURE);
667 }
668
669 reg.regspec_addr = pci_rp->pci_phys_low;
670 reg.regspec_size = pci_rp->pci_size_low;
671
672 mp->map_obj.rp = ®
673 retval = ddi_map(dip, mp, (off_t)0, (off_t)0, vaddrp);
674 if (retval == DDI_SUCCESS) {
675 /*
676 * For config space gets force use of cautious access routines.
677 * These will handle default and protected mode accesses too.
678 */
679 if (space == PCI_ADDR_CONFIG) {
680 ap = (ddi_acc_impl_t *)mp->map_handlep;
681 ap->ahi_acc_attr &= ~DDI_ACCATTR_DIRECT;
682 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE;
683 ap->ahi_get8 = i_ddi_caut_get8;
684 ap->ahi_get16 = i_ddi_caut_get16;
685 ap->ahi_get32 = i_ddi_caut_get32;
686 ap->ahi_get64 = i_ddi_caut_get64;
687 ap->ahi_rep_get8 = i_ddi_caut_rep_get8;
688 ap->ahi_rep_get16 = i_ddi_caut_rep_get16;
689 ap->ahi_rep_get32 = i_ddi_caut_rep_get32;
690 ap->ahi_rep_get64 = i_ddi_caut_rep_get64;
691 }
692 if (DDI_FM_ACC_ERR_CAP(ddi_fm_capable(rdip)) &&
693 mp->map_handlep->ah_acc.devacc_attr_access !=
694 DDI_DEFAULT_ACC) {
695 ndi_fmc_insert(rdip, ACC_HANDLE,
696 (void *)mp->map_handlep, NULL);
697 }
698 }
699 return (retval);
700 }
701
702
703
704 /*ARGSUSED*/
705 static int
706 npe_ctlops(dev_info_t *dip, dev_info_t *rdip,
707 ddi_ctl_enum_t ctlop, void *arg, void *result)
708 {
709 int rn;
710 int totreg;
711 uint_t reglen;
712 pci_regspec_t *drv_regp;
713 struct attachspec *asp;
714 struct detachspec *dsp;
715 pci_state_t *pci_p = ddi_get_soft_state(npe_statep,
716 ddi_get_instance(dip));
717
718 switch (ctlop) {
719 case DDI_CTLOPS_REPORTDEV:
720 if (rdip == (dev_info_t *)0)
721 return (DDI_FAILURE);
722 cmn_err(CE_CONT, "?PCI Express-device: %s@%s, %s%d\n",
723 ddi_node_name(rdip), ddi_get_name_addr(rdip),
724 ddi_driver_name(rdip), ddi_get_instance(rdip));
725 return (DDI_SUCCESS);
726
727 case DDI_CTLOPS_INITCHILD:
728 return (npe_initchild((dev_info_t *)arg));
729
730 case DDI_CTLOPS_UNINITCHILD:
731 return (npe_removechild((dev_info_t *)arg));
732
733 case DDI_CTLOPS_SIDDEV:
734 return (DDI_SUCCESS);
735
736 case DDI_CTLOPS_REGSIZE:
737 case DDI_CTLOPS_NREGS:
738 if (rdip == (dev_info_t *)0)
739 return (DDI_FAILURE);
740
741 *(int *)result = 0;
742 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, rdip,
743 DDI_PROP_DONTPASS, "reg", (int **)&drv_regp,
744 ®len) != DDI_PROP_SUCCESS) {
745 return (DDI_FAILURE);
746 }
747
748 totreg = (reglen * sizeof (int)) / sizeof (pci_regspec_t);
749 if (ctlop == DDI_CTLOPS_NREGS)
750 *(int *)result = totreg;
751 else if (ctlop == DDI_CTLOPS_REGSIZE) {
752 rn = *(int *)arg;
753 if (rn >= totreg) {
754 ddi_prop_free(drv_regp);
755 return (DDI_FAILURE);
756 }
757 *(off_t *)result = drv_regp[rn].pci_size_low;
758 }
759 ddi_prop_free(drv_regp);
760
761 return (DDI_SUCCESS);
762
763 case DDI_CTLOPS_POWER:
764 {
765 power_req_t *reqp = (power_req_t *)arg;
766 /*
767 * We currently understand reporting of PCI_PM_IDLESPEED
768 * capability. Everything else is passed up.
769 */
770 if ((reqp->request_type == PMR_REPORT_PMCAP) &&
771 (reqp->req.report_pmcap_req.cap == PCI_PM_IDLESPEED))
772 return (DDI_SUCCESS);
773
774 break;
775 }
776
777 case DDI_CTLOPS_PEEK:
778 case DDI_CTLOPS_POKE:
779 return (pci_common_peekpoke(dip, rdip, ctlop, arg, result));
780
781 /* X86 systems support PME wakeup from suspended state */
782 case DDI_CTLOPS_ATTACH:
783 if (!pcie_is_child(dip, rdip))
784 return (DDI_SUCCESS);
785
786 asp = (struct attachspec *)arg;
787 if ((asp->when == DDI_POST) && (asp->result == DDI_SUCCESS)) {
788 pf_init(rdip, (void *)pci_p->pci_fm_ibc, asp->cmd);
789 (void) pcie_postattach_child(rdip);
790 }
791
792 /* only do this for immediate children */
793 if (asp->cmd == DDI_RESUME && asp->when == DDI_PRE &&
794 ddi_get_parent(rdip) == dip)
795 if (pci_pre_resume(rdip) != DDI_SUCCESS) {
796 /* Not good, better stop now. */
797 cmn_err(CE_PANIC,
798 "Couldn't pre-resume device %p",
799 (void *) dip);
800 /* NOTREACHED */
801 }
802
803 return (DDI_SUCCESS);
804
805 case DDI_CTLOPS_DETACH:
806 if (!pcie_is_child(dip, rdip))
807 return (DDI_SUCCESS);
808
809 dsp = (struct detachspec *)arg;
810
811 if (dsp->when == DDI_PRE)
812 pf_fini(rdip, dsp->cmd);
813
814 /* only do this for immediate children */
815 if (dsp->cmd == DDI_SUSPEND && dsp->when == DDI_POST &&
816 ddi_get_parent(rdip) == dip)
817 if (pci_post_suspend(rdip) != DDI_SUCCESS)
818 return (DDI_FAILURE);
819
820 return (DDI_SUCCESS);
821
822 default:
823 break;
824 }
825
826 return (ddi_ctlops(dip, rdip, ctlop, arg, result));
827
828 }
829
830
831 /*
832 * npe_intr_ops
833 */
834 static int
835 npe_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op,
836 ddi_intr_handle_impl_t *hdlp, void *result)
837 {
838 return (pci_common_intr_ops(pdip, rdip, intr_op, hdlp, result));
839 }
840
841
842 static int
843 npe_initchild(dev_info_t *child)
844 {
845 char name[80];
846 pcie_bus_t *bus_p;
847 uint32_t regs;
848 ddi_acc_handle_t cfg_hdl;
849
850 /*
851 * Do not bind drivers to empty bridges.
852 * Fail above, if the bridge is found to be hotplug capable
853 */
854 if (npe_disable_empty_bridges_workaround(child) == 1)
855 return (DDI_FAILURE);
856
857 if (pci_common_name_child(child, name, 80) != DDI_SUCCESS)
858 return (DDI_FAILURE);
859
860 ddi_set_name_addr(child, name);
861
862 /*
863 * Pseudo nodes indicate a prototype node with per-instance
864 * properties to be merged into the real h/w device node.
865 * The interpretation of the unit-address is DD[,F]
866 * where DD is the device id and F is the function.
867 */
868 if (ndi_dev_is_persistent_node(child) == 0) {
869 extern int pci_allow_pseudo_children;
870
871 ddi_set_parent_data(child, NULL);
872
873 /*
874 * Try to merge the properties from this prototype
875 * node into real h/w nodes.
876 */
877 if (ndi_merge_node(child, pci_common_name_child) ==
878 DDI_SUCCESS) {
879 /*
880 * Merged ok - return failure to remove the node.
881 */
882 ddi_set_name_addr(child, NULL);
883 return (DDI_FAILURE);
884 }
885
886 /* workaround for DDIVS to run under PCI Express */
887 if (pci_allow_pseudo_children) {
888 /*
889 * If the "interrupts" property doesn't exist,
890 * this must be the ddivs no-intr case, and it returns
891 * DDI_SUCCESS instead of DDI_FAILURE.
892 */
893 if (ddi_prop_get_int(DDI_DEV_T_ANY, child,
894 DDI_PROP_DONTPASS, "interrupts", -1) == -1)
895 return (DDI_SUCCESS);
896 /*
897 * Create the ddi_parent_private_data for a pseudo
898 * child.
899 */
900 pci_common_set_parent_private_data(child);
901 return (DDI_SUCCESS);
902 }
903
904 /*
905 * The child was not merged into a h/w node,
906 * but there's not much we can do with it other
907 * than return failure to cause the node to be removed.
908 */
909 cmn_err(CE_WARN, "!%s@%s: %s.conf properties not merged",
910 ddi_get_name(child), ddi_get_name_addr(child),
911 ddi_get_name(child));
912 ddi_set_name_addr(child, NULL);
913 return (DDI_NOT_WELL_FORMED);
914 }
915
916 if (ddi_prop_get_int(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS,
917 "interrupts", -1) != -1)
918 pci_common_set_parent_private_data(child);
919 else
920 ddi_set_parent_data(child, NULL);
921
922 /* Disable certain errors on PCIe drivers for x86 platforms */
923 regs = pcie_get_aer_uce_mask() | npe_aer_uce_mask;
924 pcie_set_aer_uce_mask(regs);
925 regs = pcie_get_aer_ce_mask() | npe_aer_ce_mask;
926 pcie_set_aer_ce_mask(regs);
927 regs = pcie_get_aer_suce_mask() | npe_aer_suce_mask;
928 pcie_set_aer_suce_mask(regs);
929
930 /*
931 * If URs are disabled, mask SERRs as well, otherwise the system will
932 * still be notified of URs
933 */
934 if (npe_aer_uce_mask & PCIE_AER_UCE_UR)
935 pcie_set_serr_mask(1);
936
937 if (pci_config_setup(child, &cfg_hdl) == DDI_SUCCESS) {
938 npe_ck804_fix_aer_ptr(cfg_hdl);
939 npe_nvidia_error_workaround(cfg_hdl);
940 npe_intel_error_workaround(cfg_hdl);
941 pci_config_teardown(&cfg_hdl);
942 }
943
944 bus_p = PCIE_DIP2BUS(child);
945 if (bus_p) {
946 uint16_t device_id = (uint16_t)(bus_p->bus_dev_ven_id >> 16);
947 uint16_t vendor_id = (uint16_t)(bus_p->bus_dev_ven_id & 0xFFFF);
948 uint16_t rev_id = bus_p->bus_rev_id;
949
950 /* Disable AER for certain NVIDIA Chipsets */
951 if ((vendor_id == NVIDIA_VENDOR_ID) &&
952 (device_id == NVIDIA_CK804_DEVICE_ID) &&
953 (rev_id < NVIDIA_CK804_AER_VALID_REVID))
954 bus_p->bus_aer_off = 0;
955
956 pcie_init_dom(child);
957 (void) pcie_initchild(child);
958 }
959
960 return (DDI_SUCCESS);
961 }
962
963
964 static int
965 npe_removechild(dev_info_t *dip)
966 {
967 pcie_uninitchild(dip);
968
969 ddi_set_name_addr(dip, NULL);
970
971 /*
972 * Strip the node to properly convert it back to prototype form
973 */
974 ddi_remove_minor_node(dip, NULL);
975
976 ddi_prop_remove_all(dip);
977
978 return (DDI_SUCCESS);
979 }
980
981 static int
982 npe_open(dev_t *devp, int flags, int otyp, cred_t *credp)
983 {
984 minor_t minor = getminor(*devp);
985 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
986 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance);
987 int rv;
988
989 /*
990 * Make sure the open is for the right file type.
991 */
992 if (otyp != OTYP_CHR)
993 return (EINVAL);
994
995 if (pci_p == NULL)
996 return (ENXIO);
997
998 mutex_enter(&pci_p->pci_mutex);
999 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) {
1000 case PCI_TOOL_REG_MINOR_NUM:
1001 case PCI_TOOL_INTR_MINOR_NUM:
1002 break;
1003 default:
1004 /* Handle devctl ioctls */
1005 rv = pcie_open(pci_p->pci_dip, devp, flags, otyp, credp);
1006 mutex_exit(&pci_p->pci_mutex);
1007 return (rv);
1008 }
1009
1010 /* Handle pcitool ioctls */
1011 if (flags & FEXCL) {
1012 if (pci_p->pci_soft_state != PCI_SOFT_STATE_CLOSED) {
1013 mutex_exit(&pci_p->pci_mutex);
1014 cmn_err(CE_NOTE, "npe_open: busy");
1015 return (EBUSY);
1016 }
1017 pci_p->pci_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
1018 } else {
1019 if (pci_p->pci_soft_state == PCI_SOFT_STATE_OPEN_EXCL) {
1020 mutex_exit(&pci_p->pci_mutex);
1021 cmn_err(CE_NOTE, "npe_open: busy");
1022 return (EBUSY);
1023 }
1024 pci_p->pci_soft_state = PCI_SOFT_STATE_OPEN;
1025 }
1026 mutex_exit(&pci_p->pci_mutex);
1027
1028 return (0);
1029 }
1030
1031 static int
1032 npe_close(dev_t dev, int flags, int otyp, cred_t *credp)
1033 {
1034 minor_t minor = getminor(dev);
1035 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
1036 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance);
1037 int rv;
1038
1039 if (pci_p == NULL)
1040 return (ENXIO);
1041
1042 mutex_enter(&pci_p->pci_mutex);
1043
1044 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) {
1045 case PCI_TOOL_REG_MINOR_NUM:
1046 case PCI_TOOL_INTR_MINOR_NUM:
1047 break;
1048 default:
1049 /* Handle devctl ioctls */
1050 rv = pcie_close(pci_p->pci_dip, dev, flags, otyp, credp);
1051 mutex_exit(&pci_p->pci_mutex);
1052 return (rv);
1053 }
1054
1055 /* Handle pcitool ioctls */
1056 pci_p->pci_soft_state = PCI_SOFT_STATE_CLOSED;
1057 mutex_exit(&pci_p->pci_mutex);
1058 return (0);
1059 }
1060
1061 static int
1062 npe_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1063 {
1064 minor_t minor = getminor(dev);
1065 int instance = PCI_MINOR_NUM_TO_INSTANCE(minor);
1066 pci_state_t *pci_p = ddi_get_soft_state(npe_statep, instance);
1067 int ret = ENOTTY;
1068
1069 if (pci_p == NULL)
1070 return (ENXIO);
1071
1072 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) {
1073 case PCI_TOOL_REG_MINOR_NUM:
1074 case PCI_TOOL_INTR_MINOR_NUM:
1075 /* To handle pcitool related ioctls */
1076 ret = pci_common_ioctl(pci_p->pci_dip, dev, cmd, arg, mode,
1077 credp, rvalp);
1078 break;
1079 default:
1080 /* To handle devctl and hotplug related ioctls */
1081 ret = pcie_ioctl(pci_p->pci_dip, dev, cmd, arg, mode, credp,
1082 rvalp);
1083 break;
1084 }
1085
1086 return (ret);
1087 }
1088
1089 /*ARGSUSED*/
1090 static int
1091 npe_fm_init(dev_info_t *dip, dev_info_t *tdip, int cap,
1092 ddi_iblock_cookie_t *ibc)
1093 {
1094 pci_state_t *pcip = ddi_get_soft_state(npe_statep,
1095 ddi_get_instance(dip));
1096
1097 ASSERT(ibc != NULL);
1098 *ibc = pcip->pci_fm_ibc;
1099
1100 return (pcip->pci_fmcap);
1101 }
1102
1103 /*ARGSUSED*/
1104 static int
1105 npe_fm_callback(dev_info_t *dip, ddi_fm_error_t *derr, const void *no_used)
1106 {
1107 /*
1108 * On current x86 systems, npe's callback does not get called for failed
1109 * loads. If in the future this feature is used, the fault PA should be
1110 * logged in the derr->fme_bus_specific field. The appropriate PCIe
1111 * error handling code should be called and needs to be coordinated with
1112 * safe access handling.
1113 */
1114
1115 return (DDI_FM_OK);
1116 }