1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
24 */
25
26 #include <sys/sysmacros.h>
27 #include <sys/types.h>
28 #include <sys/kmem.h>
29 #include <sys/modctl.h>
30 #include <sys/ddi.h>
31 #include <sys/sunddi.h>
32 #include <sys/sunndi.h>
33 #include <sys/fm/protocol.h>
34 #include <sys/fm/util.h>
35 #include <sys/promif.h>
36 #include <sys/disp.h>
37 #include <sys/stat.h>
38 #include <sys/file.h>
39 #include <sys/pci_cap.h>
40 #include <sys/pci_impl.h>
41 #include <sys/pcie_impl.h>
42 #include <sys/hotplug/pci/pcie_hp.h>
43 #include <sys/hotplug/pci/pciehpc.h>
44 #include <sys/hotplug/pci/pcishpc.h>
45 #include <sys/hotplug/pci/pcicfg.h>
46 #include <sys/pci_cfgacc.h>
47
48 /* Local functions prototypes */
49 static void pcie_init_pfd(dev_info_t *);
50 static void pcie_fini_pfd(dev_info_t *);
51
52 #if defined(__i386) || defined(__amd64)
53 static void pcie_check_io_mem_range(ddi_acc_handle_t, boolean_t *, boolean_t *);
54 #endif /* defined(__i386) || defined(__amd64) */
55
56 #ifdef DEBUG
57 uint_t pcie_debug_flags = 0;
58 static void pcie_print_bus(pcie_bus_t *bus_p);
59 void pcie_dbg(char *fmt, ...);
60 #endif /* DEBUG */
61
62 /* Variable to control default PCI-Express config settings */
63 ushort_t pcie_command_default =
64 PCI_COMM_SERR_ENABLE |
65 PCI_COMM_WAIT_CYC_ENAB |
66 PCI_COMM_PARITY_DETECT |
67 PCI_COMM_ME |
68 PCI_COMM_MAE |
69 PCI_COMM_IO;
70
71 /* xxx_fw are bits that are controlled by FW and should not be modified */
72 ushort_t pcie_command_default_fw =
73 PCI_COMM_SPEC_CYC |
74 PCI_COMM_MEMWR_INVAL |
75 PCI_COMM_PALETTE_SNOOP |
76 PCI_COMM_WAIT_CYC_ENAB |
77 0xF800; /* Reserved Bits */
78
79 ushort_t pcie_bdg_command_default_fw =
80 PCI_BCNF_BCNTRL_ISA_ENABLE |
81 PCI_BCNF_BCNTRL_VGA_ENABLE |
82 0xF000; /* Reserved Bits */
83
84 /* PCI-Express Base error defaults */
85 ushort_t pcie_base_err_default =
86 PCIE_DEVCTL_CE_REPORTING_EN |
87 PCIE_DEVCTL_NFE_REPORTING_EN |
88 PCIE_DEVCTL_FE_REPORTING_EN |
89 PCIE_DEVCTL_UR_REPORTING_EN;
90
91 /* PCI-Express Device Control Register */
92 uint16_t pcie_devctl_default = PCIE_DEVCTL_RO_EN |
93 PCIE_DEVCTL_MAX_READ_REQ_512;
94
95 /* PCI-Express AER Root Control Register */
96 #define PCIE_ROOT_SYS_ERR (PCIE_ROOTCTL_SYS_ERR_ON_CE_EN | \
97 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN | \
98 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN)
99
100 ushort_t pcie_root_ctrl_default =
101 PCIE_ROOTCTL_SYS_ERR_ON_CE_EN |
102 PCIE_ROOTCTL_SYS_ERR_ON_NFE_EN |
103 PCIE_ROOTCTL_SYS_ERR_ON_FE_EN;
104
105 /* PCI-Express Root Error Command Register */
106 ushort_t pcie_root_error_cmd_default =
107 PCIE_AER_RE_CMD_CE_REP_EN |
108 PCIE_AER_RE_CMD_NFE_REP_EN |
109 PCIE_AER_RE_CMD_FE_REP_EN;
110
111 /* ECRC settings in the PCIe AER Control Register */
112 uint32_t pcie_ecrc_value =
113 PCIE_AER_CTL_ECRC_GEN_ENA |
114 PCIE_AER_CTL_ECRC_CHECK_ENA;
115
116 /*
117 * If a particular platform wants to disable certain errors such as UR/MA,
118 * instead of using #defines have the platform's PCIe Root Complex driver set
119 * these masks using the pcie_get_XXX_mask and pcie_set_XXX_mask functions. For
120 * x86 the closest thing to a PCIe root complex driver is NPE. For SPARC the
121 * closest PCIe root complex driver is PX.
122 *
123 * pcie_serr_disable_flag : disable SERR only (in RCR and command reg) x86
124 * systems may want to disable SERR in general. For root ports, enabling SERR
125 * causes NMIs which are not handled and results in a watchdog timeout error.
126 */
127 uint32_t pcie_aer_uce_mask = 0; /* AER UE Mask */
128 uint32_t pcie_aer_ce_mask = 0; /* AER CE Mask */
129 uint32_t pcie_aer_suce_mask = 0; /* AER Secondary UE Mask */
130 uint32_t pcie_serr_disable_flag = 0; /* Disable SERR */
131
132 /* Default severities needed for eversholt. Error handling doesn't care */
133 uint32_t pcie_aer_uce_severity = PCIE_AER_UCE_MTLP | PCIE_AER_UCE_RO | \
134 PCIE_AER_UCE_FCP | PCIE_AER_UCE_SD | PCIE_AER_UCE_DLP | \
135 PCIE_AER_UCE_TRAINING;
136 uint32_t pcie_aer_suce_severity = PCIE_AER_SUCE_SERR_ASSERT | \
137 PCIE_AER_SUCE_UC_ADDR_ERR | PCIE_AER_SUCE_UC_ATTR_ERR | \
138 PCIE_AER_SUCE_USC_MSG_DATA_ERR;
139
140 int pcie_max_mps = PCIE_DEVCTL_MAX_PAYLOAD_4096 >> 5;
141 int pcie_disable_ari = 0;
142
143 static void pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip,
144 int *max_supported);
145 static int pcie_get_max_supported(dev_info_t *dip, void *arg);
146 static int pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
147 caddr_t *addrp, ddi_acc_handle_t *handlep);
148 static void pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph);
149
150 dev_info_t *pcie_get_rc_dip(dev_info_t *dip);
151
152 /*
153 * modload support
154 */
155
156 static struct modlmisc modlmisc = {
157 &mod_miscops, /* Type of module */
158 "PCI Express Framework Module"
159 };
160
161 static struct modlinkage modlinkage = {
162 MODREV_1,
163 { (void *)&modlmisc, NULL }
164 };
165
166 /*
167 * Global Variables needed for a non-atomic version of ddi_fm_ereport_post.
168 * Currently used to send the pci.fabric ereports whose payload depends on the
169 * type of PCI device it is being sent for.
170 */
171 char *pcie_nv_buf;
172 nv_alloc_t *pcie_nvap;
173 nvlist_t *pcie_nvl;
174
175 int
176 _init(void)
177 {
178 int rval;
179
180 pcie_nv_buf = kmem_alloc(ERPT_DATA_SZ, KM_SLEEP);
181 pcie_nvap = fm_nva_xcreate(pcie_nv_buf, ERPT_DATA_SZ);
182 pcie_nvl = fm_nvlist_create(pcie_nvap);
183
184 if ((rval = mod_install(&modlinkage)) != 0) {
185 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
186 fm_nva_xdestroy(pcie_nvap);
187 kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
188 }
189 return (rval);
190 }
191
192 int
193 _fini()
194 {
195 int rval;
196
197 if ((rval = mod_remove(&modlinkage)) == 0) {
198 fm_nvlist_destroy(pcie_nvl, FM_NVA_RETAIN);
199 fm_nva_xdestroy(pcie_nvap);
200 kmem_free(pcie_nv_buf, ERPT_DATA_SZ);
201 }
202 return (rval);
203 }
204
205 int
206 _info(struct modinfo *modinfop)
207 {
208 return (mod_info(&modlinkage, modinfop));
209 }
210
211 /* ARGSUSED */
212 int
213 pcie_init(dev_info_t *dip, caddr_t arg)
214 {
215 int ret = DDI_SUCCESS;
216
217 /*
218 * Create a "devctl" minor node to support DEVCTL_DEVICE_*
219 * and DEVCTL_BUS_* ioctls to this bus.
220 */
221 if ((ret = ddi_create_minor_node(dip, "devctl", S_IFCHR,
222 PCI_MINOR_NUM(ddi_get_instance(dip), PCI_DEVCTL_MINOR),
223 DDI_NT_NEXUS, 0)) != DDI_SUCCESS) {
224 PCIE_DBG("Failed to create devctl minor node for %s%d\n",
225 ddi_driver_name(dip), ddi_get_instance(dip));
226
227 return (ret);
228 }
229
230 if ((ret = pcie_hp_init(dip, arg)) != DDI_SUCCESS) {
231 /*
232 * On some x86 platforms, we observed unexpected hotplug
233 * initialization failures in recent years. The known cause
234 * is a hardware issue: while the problem PCI bridges have
235 * the Hotplug Capable registers set, the machine actually
236 * does not implement the expected ACPI object.
237 *
238 * We don't want to stop PCI driver attach and system boot
239 * just because of this hotplug initialization failure.
240 * Continue with a debug message printed.
241 */
242 PCIE_DBG("%s%d: Failed setting hotplug framework\n",
243 ddi_driver_name(dip), ddi_get_instance(dip));
244
245 #if defined(__sparc)
246 ddi_remove_minor_node(dip, "devctl");
247
248 return (ret);
249 #endif /* defined(__sparc) */
250 }
251
252 return (DDI_SUCCESS);
253 }
254
255 /* ARGSUSED */
256 int
257 pcie_uninit(dev_info_t *dip)
258 {
259 int ret = DDI_SUCCESS;
260
261 if (pcie_ari_is_enabled(dip) == PCIE_ARI_FORW_ENABLED)
262 (void) pcie_ari_disable(dip);
263
264 if ((ret = pcie_hp_uninit(dip)) != DDI_SUCCESS) {
265 PCIE_DBG("Failed to uninitialize hotplug for %s%d\n",
266 ddi_driver_name(dip), ddi_get_instance(dip));
267
268 return (ret);
269 }
270
271 ddi_remove_minor_node(dip, "devctl");
272
273 return (ret);
274 }
275
276 /*
277 * PCIe module interface for enabling hotplug interrupt.
278 *
279 * It should be called after pcie_init() is done and bus driver's
280 * interrupt handlers have being attached.
281 */
282 int
283 pcie_hpintr_enable(dev_info_t *dip)
284 {
285 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
286 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip);
287
288 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
289 (void) (ctrl_p->hc_ops.enable_hpc_intr)(ctrl_p);
290 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
291 (void) pcishpc_enable_irqs(ctrl_p);
292 }
293 return (DDI_SUCCESS);
294 }
295
296 /*
297 * PCIe module interface for disabling hotplug interrupt.
298 *
299 * It should be called before pcie_uninit() is called and bus driver's
300 * interrupt handlers is dettached.
301 */
302 int
303 pcie_hpintr_disable(dev_info_t *dip)
304 {
305 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
306 pcie_hp_ctrl_t *ctrl_p = PCIE_GET_HP_CTRL(dip);
307
308 if (PCIE_IS_PCIE_HOTPLUG_ENABLED(bus_p)) {
309 (void) (ctrl_p->hc_ops.disable_hpc_intr)(ctrl_p);
310 } else if (PCIE_IS_PCI_HOTPLUG_ENABLED(bus_p)) {
311 (void) pcishpc_disable_irqs(ctrl_p);
312 }
313 return (DDI_SUCCESS);
314 }
315
316 /* ARGSUSED */
317 int
318 pcie_intr(dev_info_t *dip)
319 {
320 return (pcie_hp_intr(dip));
321 }
322
323 /* ARGSUSED */
324 int
325 pcie_open(dev_info_t *dip, dev_t *devp, int flags, int otyp, cred_t *credp)
326 {
327 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
328
329 /*
330 * Make sure the open is for the right file type.
331 */
332 if (otyp != OTYP_CHR)
333 return (EINVAL);
334
335 /*
336 * Handle the open by tracking the device state.
337 */
338 if ((bus_p->bus_soft_state == PCI_SOFT_STATE_OPEN_EXCL) ||
339 ((flags & FEXCL) &&
340 (bus_p->bus_soft_state != PCI_SOFT_STATE_CLOSED))) {
341 return (EBUSY);
342 }
343
344 if (flags & FEXCL)
345 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN_EXCL;
346 else
347 bus_p->bus_soft_state = PCI_SOFT_STATE_OPEN;
348
349 return (0);
350 }
351
352 /* ARGSUSED */
353 int
354 pcie_close(dev_info_t *dip, dev_t dev, int flags, int otyp, cred_t *credp)
355 {
356 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
357
358 if (otyp != OTYP_CHR)
359 return (EINVAL);
360
361 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
362
363 return (0);
364 }
365
366 /* ARGSUSED */
367 int
368 pcie_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, int mode,
369 cred_t *credp, int *rvalp)
370 {
371 struct devctl_iocdata *dcp;
372 uint_t bus_state;
373 int rv = DDI_SUCCESS;
374
375 /*
376 * We can use the generic implementation for devctl ioctl
377 */
378 switch (cmd) {
379 case DEVCTL_DEVICE_GETSTATE:
380 case DEVCTL_DEVICE_ONLINE:
381 case DEVCTL_DEVICE_OFFLINE:
382 case DEVCTL_BUS_GETSTATE:
383 return (ndi_devctl_ioctl(dip, cmd, arg, mode, 0));
384 default:
385 break;
386 }
387
388 /*
389 * read devctl ioctl data
390 */
391 if (ndi_dc_allochdl((void *)arg, &dcp) != NDI_SUCCESS)
392 return (EFAULT);
393
394 switch (cmd) {
395 case DEVCTL_BUS_QUIESCE:
396 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
397 if (bus_state == BUS_QUIESCED)
398 break;
399 (void) ndi_set_bus_state(dip, BUS_QUIESCED);
400 break;
401 case DEVCTL_BUS_UNQUIESCE:
402 if (ndi_get_bus_state(dip, &bus_state) == NDI_SUCCESS)
403 if (bus_state == BUS_ACTIVE)
404 break;
405 (void) ndi_set_bus_state(dip, BUS_ACTIVE);
406 break;
407 case DEVCTL_BUS_RESET:
408 case DEVCTL_BUS_RESETALL:
409 case DEVCTL_DEVICE_RESET:
410 rv = ENOTSUP;
411 break;
412 default:
413 rv = ENOTTY;
414 }
415
416 ndi_dc_freehdl(dcp);
417 return (rv);
418 }
419
420 /* ARGSUSED */
421 int
422 pcie_prop_op(dev_t dev, dev_info_t *dip, ddi_prop_op_t prop_op,
423 int flags, char *name, caddr_t valuep, int *lengthp)
424 {
425 if (dev == DDI_DEV_T_ANY)
426 goto skip;
427
428 if (PCIE_IS_HOTPLUG_CAPABLE(dip) &&
429 strcmp(name, "pci-occupant") == 0) {
430 int pci_dev = PCI_MINOR_NUM_TO_PCI_DEVNUM(getminor(dev));
431
432 pcie_hp_create_occupant_props(dip, dev, pci_dev);
433 }
434
435 skip:
436 return (ddi_prop_op(dev, dip, prop_op, flags, name, valuep, lengthp));
437 }
438
439 int
440 pcie_init_cfghdl(dev_info_t *cdip)
441 {
442 pcie_bus_t *bus_p;
443 ddi_acc_handle_t eh = NULL;
444
445 bus_p = PCIE_DIP2BUS(cdip);
446 if (bus_p == NULL)
447 return (DDI_FAILURE);
448
449 /* Create an config access special to error handling */
450 if (pci_config_setup(cdip, &eh) != DDI_SUCCESS) {
451 cmn_err(CE_WARN, "Cannot setup config access"
452 " for BDF 0x%x\n", bus_p->bus_bdf);
453 return (DDI_FAILURE);
454 }
455
456 bus_p->bus_cfg_hdl = eh;
457 return (DDI_SUCCESS);
458 }
459
460 void
461 pcie_fini_cfghdl(dev_info_t *cdip)
462 {
463 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
464
465 pci_config_teardown(&bus_p->bus_cfg_hdl);
466 }
467
468 /*
469 * PCI-Express child device initialization.
470 * This function enables generic pci-express interrupts and error
471 * handling.
472 *
473 * @param pdip root dip (root nexus's dip)
474 * @param cdip child's dip (device's dip)
475 * @return DDI_SUCCESS or DDI_FAILURE
476 */
477 /* ARGSUSED */
478 int
479 pcie_initchild(dev_info_t *cdip)
480 {
481 uint16_t tmp16, reg16;
482 pcie_bus_t *bus_p;
483 uint32_t devid, venid;
484
485 bus_p = PCIE_DIP2BUS(cdip);
486 if (bus_p == NULL) {
487 PCIE_DBG("%s: BUS not found.\n",
488 ddi_driver_name(cdip));
489
490 return (DDI_FAILURE);
491 }
492
493 if (pcie_init_cfghdl(cdip) != DDI_SUCCESS)
494 return (DDI_FAILURE);
495
496 /*
497 * Update pcie_bus_t with real Vendor Id Device Id.
498 *
499 * For assigned devices in IOV environment, the OBP will return
500 * faked device id/vendor id on configration read and for both
501 * properties in root domain. translate_devid() function will
502 * update the properties with real device-id/vendor-id on such
503 * platforms, so that we can utilize the properties here to get
504 * real device-id/vendor-id and overwrite the faked ids.
505 *
506 * For unassigned devices or devices in non-IOV environment, the
507 * operation below won't make a difference.
508 *
509 * The IOV implementation only supports assignment of PCIE
510 * endpoint devices. Devices under pci-pci bridges don't need
511 * operation like this.
512 */
513 devid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
514 "device-id", -1);
515 venid = ddi_prop_get_int(DDI_DEV_T_ANY, cdip, DDI_PROP_DONTPASS,
516 "vendor-id", -1);
517 bus_p->bus_dev_ven_id = (devid << 16) | (venid & 0xffff);
518
519 /* Clear the device's status register */
520 reg16 = PCIE_GET(16, bus_p, PCI_CONF_STAT);
521 PCIE_PUT(16, bus_p, PCI_CONF_STAT, reg16);
522
523 /* Setup the device's command register */
524 reg16 = PCIE_GET(16, bus_p, PCI_CONF_COMM);
525 tmp16 = (reg16 & pcie_command_default_fw) | pcie_command_default;
526
527 #if defined(__i386) || defined(__amd64)
528 boolean_t empty_io_range = B_FALSE;
529 boolean_t empty_mem_range = B_FALSE;
530 /*
531 * Check for empty IO and Mem ranges on bridges. If so disable IO/Mem
532 * access as it can cause a hang if enabled.
533 */
534 pcie_check_io_mem_range(bus_p->bus_cfg_hdl, &empty_io_range,
535 &empty_mem_range);
536 if ((empty_io_range == B_TRUE) &&
537 (pcie_command_default & PCI_COMM_IO)) {
538 tmp16 &= ~PCI_COMM_IO;
539 PCIE_DBG("No I/O range found for %s, bdf 0x%x\n",
540 ddi_driver_name(cdip), bus_p->bus_bdf);
541 }
542 if ((empty_mem_range == B_TRUE) &&
543 (pcie_command_default & PCI_COMM_MAE)) {
544 tmp16 &= ~PCI_COMM_MAE;
545 PCIE_DBG("No Mem range found for %s, bdf 0x%x\n",
546 ddi_driver_name(cdip), bus_p->bus_bdf);
547 }
548 #endif /* defined(__i386) || defined(__amd64) */
549
550 if (pcie_serr_disable_flag && PCIE_IS_PCIE(bus_p))
551 tmp16 &= ~PCI_COMM_SERR_ENABLE;
552
553 PCIE_PUT(16, bus_p, PCI_CONF_COMM, tmp16);
554 PCIE_DBG_CFG(cdip, bus_p, "COMMAND", 16, PCI_CONF_COMM, reg16);
555
556 /*
557 * If the device has a bus control register then program it
558 * based on the settings in the command register.
559 */
560 if (PCIE_IS_BDG(bus_p)) {
561 /* Clear the device's secondary status register */
562 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_SEC_STATUS);
563 PCIE_PUT(16, bus_p, PCI_BCNF_SEC_STATUS, reg16);
564
565 /* Setup the device's secondary command register */
566 reg16 = PCIE_GET(16, bus_p, PCI_BCNF_BCNTRL);
567 tmp16 = (reg16 & pcie_bdg_command_default_fw);
568
569 tmp16 |= PCI_BCNF_BCNTRL_SERR_ENABLE;
570 /*
571 * Workaround for this Nvidia bridge. Don't enable the SERR
572 * enable bit in the bridge control register as it could lead to
573 * bogus NMIs.
574 */
575 if (bus_p->bus_dev_ven_id == 0x037010DE)
576 tmp16 &= ~PCI_BCNF_BCNTRL_SERR_ENABLE;
577
578 if (pcie_command_default & PCI_COMM_PARITY_DETECT)
579 tmp16 |= PCI_BCNF_BCNTRL_PARITY_ENABLE;
580
581 /*
582 * Enable Master Abort Mode only if URs have not been masked.
583 * For PCI and PCIe-PCI bridges, enabling this bit causes a
584 * Master Aborts/UR to be forwarded as a UR/TA or SERR. If this
585 * bit is masked, posted requests are dropped and non-posted
586 * requests are returned with -1.
587 */
588 if (pcie_aer_uce_mask & PCIE_AER_UCE_UR)
589 tmp16 &= ~PCI_BCNF_BCNTRL_MAST_AB_MODE;
590 else
591 tmp16 |= PCI_BCNF_BCNTRL_MAST_AB_MODE;
592 PCIE_PUT(16, bus_p, PCI_BCNF_BCNTRL, tmp16);
593 PCIE_DBG_CFG(cdip, bus_p, "SEC CMD", 16, PCI_BCNF_BCNTRL,
594 reg16);
595 }
596
597 if (PCIE_IS_PCIE(bus_p)) {
598 /* Setup PCIe device control register */
599 reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
600 /* note: MPS/MRRS are initialized in pcie_initchild_mps() */
601 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
602 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
603 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
604 PCIE_DEVCTL_MAX_PAYLOAD_MASK));
605 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
606 PCIE_DBG_CAP(cdip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
607
608 /* Enable PCIe errors */
609 pcie_enable_errors(cdip);
610 }
611
612 bus_p->bus_ari = B_FALSE;
613 if ((pcie_ari_is_enabled(ddi_get_parent(cdip))
614 == PCIE_ARI_FORW_ENABLED) && (pcie_ari_device(cdip)
615 == PCIE_ARI_DEVICE)) {
616 bus_p->bus_ari = B_TRUE;
617 }
618
619 if (pcie_initchild_mps(cdip) == DDI_FAILURE) {
620 pcie_fini_cfghdl(cdip);
621 return (DDI_FAILURE);
622 }
623
624 return (DDI_SUCCESS);
625 }
626
627 static void
628 pcie_init_pfd(dev_info_t *dip)
629 {
630 pf_data_t *pfd_p = PCIE_ZALLOC(pf_data_t);
631 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
632
633 PCIE_DIP2PFD(dip) = pfd_p;
634
635 pfd_p->pe_bus_p = bus_p;
636 pfd_p->pe_severity_flags = 0;
637 pfd_p->pe_orig_severity_flags = 0;
638 pfd_p->pe_lock = B_FALSE;
639 pfd_p->pe_valid = B_FALSE;
640
641 /* Allocate the root fault struct for both RC and RP */
642 if (PCIE_IS_ROOT(bus_p)) {
643 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
644 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
645 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
646 }
647
648 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
649 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
650 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
651
652 if (PCIE_IS_BDG(bus_p))
653 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
654
655 if (PCIE_IS_PCIE(bus_p)) {
656 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
657
658 if (PCIE_IS_RP(bus_p))
659 PCIE_RP_REG(pfd_p) =
660 PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
661
662 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
663 PCIE_ADV_REG(pfd_p)->pcie_ue_tgt_bdf = PCIE_INVALID_BDF;
664
665 if (PCIE_IS_RP(bus_p)) {
666 PCIE_ADV_RP_REG(pfd_p) =
667 PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
668 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id =
669 PCIE_INVALID_BDF;
670 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id =
671 PCIE_INVALID_BDF;
672 } else if (PCIE_IS_PCIE_BDG(bus_p)) {
673 PCIE_ADV_BDG_REG(pfd_p) =
674 PCIE_ZALLOC(pf_pcie_adv_bdg_err_regs_t);
675 PCIE_ADV_BDG_REG(pfd_p)->pcie_sue_tgt_bdf =
676 PCIE_INVALID_BDF;
677 }
678
679 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
680 PCIX_BDG_ERR_REG(pfd_p) =
681 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
682
683 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
684 PCIX_BDG_ECC_REG(pfd_p, 0) =
685 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
686 PCIX_BDG_ECC_REG(pfd_p, 1) =
687 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
688 }
689 }
690 } else if (PCIE_IS_PCIX(bus_p)) {
691 if (PCIE_IS_BDG(bus_p)) {
692 PCIX_BDG_ERR_REG(pfd_p) =
693 PCIE_ZALLOC(pf_pcix_bdg_err_regs_t);
694
695 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
696 PCIX_BDG_ECC_REG(pfd_p, 0) =
697 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
698 PCIX_BDG_ECC_REG(pfd_p, 1) =
699 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
700 }
701 } else {
702 PCIX_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcix_err_regs_t);
703
704 if (PCIX_ECC_VERSION_CHECK(bus_p))
705 PCIX_ECC_REG(pfd_p) =
706 PCIE_ZALLOC(pf_pcix_ecc_regs_t);
707 }
708 }
709 }
710
711 static void
712 pcie_fini_pfd(dev_info_t *dip)
713 {
714 pf_data_t *pfd_p = PCIE_DIP2PFD(dip);
715 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
716
717 if (PCIE_IS_PCIE(bus_p)) {
718 if (PCIE_IS_PCIE_BDG(bus_p) && PCIE_IS_PCIX(bus_p)) {
719 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
720 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
721 sizeof (pf_pcix_ecc_regs_t));
722 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
723 sizeof (pf_pcix_ecc_regs_t));
724 }
725
726 kmem_free(PCIX_BDG_ERR_REG(pfd_p),
727 sizeof (pf_pcix_bdg_err_regs_t));
728 }
729
730 if (PCIE_IS_RP(bus_p))
731 kmem_free(PCIE_ADV_RP_REG(pfd_p),
732 sizeof (pf_pcie_adv_rp_err_regs_t));
733 else if (PCIE_IS_PCIE_BDG(bus_p))
734 kmem_free(PCIE_ADV_BDG_REG(pfd_p),
735 sizeof (pf_pcie_adv_bdg_err_regs_t));
736
737 kmem_free(PCIE_ADV_REG(pfd_p),
738 sizeof (pf_pcie_adv_err_regs_t));
739
740 if (PCIE_IS_RP(bus_p))
741 kmem_free(PCIE_RP_REG(pfd_p),
742 sizeof (pf_pcie_rp_err_regs_t));
743
744 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
745 } else if (PCIE_IS_PCIX(bus_p)) {
746 if (PCIE_IS_BDG(bus_p)) {
747 if (PCIX_ECC_VERSION_CHECK(bus_p)) {
748 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 0),
749 sizeof (pf_pcix_ecc_regs_t));
750 kmem_free(PCIX_BDG_ECC_REG(pfd_p, 1),
751 sizeof (pf_pcix_ecc_regs_t));
752 }
753
754 kmem_free(PCIX_BDG_ERR_REG(pfd_p),
755 sizeof (pf_pcix_bdg_err_regs_t));
756 } else {
757 if (PCIX_ECC_VERSION_CHECK(bus_p))
758 kmem_free(PCIX_ECC_REG(pfd_p),
759 sizeof (pf_pcix_ecc_regs_t));
760
761 kmem_free(PCIX_ERR_REG(pfd_p),
762 sizeof (pf_pcix_err_regs_t));
763 }
764 }
765
766 if (PCIE_IS_BDG(bus_p))
767 kmem_free(PCI_BDG_ERR_REG(pfd_p),
768 sizeof (pf_pci_bdg_err_regs_t));
769
770 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
771 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
772
773 if (PCIE_IS_ROOT(bus_p)) {
774 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
775 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
776 }
777
778 kmem_free(PCIE_DIP2PFD(dip), sizeof (pf_data_t));
779
780 PCIE_DIP2PFD(dip) = NULL;
781 }
782
783
784 /*
785 * Special functions to allocate pf_data_t's for PCIe root complexes.
786 * Note: Root Complex not Root Port
787 */
788 void
789 pcie_rc_init_pfd(dev_info_t *dip, pf_data_t *pfd_p)
790 {
791 pfd_p->pe_bus_p = PCIE_DIP2DOWNBUS(dip);
792 pfd_p->pe_severity_flags = 0;
793 pfd_p->pe_orig_severity_flags = 0;
794 pfd_p->pe_lock = B_FALSE;
795 pfd_p->pe_valid = B_FALSE;
796
797 PCIE_ROOT_FAULT(pfd_p) = PCIE_ZALLOC(pf_root_fault_t);
798 PCIE_ROOT_FAULT(pfd_p)->scan_bdf = PCIE_INVALID_BDF;
799 PCIE_ROOT_EH_SRC(pfd_p) = PCIE_ZALLOC(pf_root_eh_src_t);
800 PCI_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_err_regs_t);
801 PFD_AFFECTED_DEV(pfd_p) = PCIE_ZALLOC(pf_affected_dev_t);
802 PFD_AFFECTED_DEV(pfd_p)->pe_affected_bdf = PCIE_INVALID_BDF;
803 PCI_BDG_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pci_bdg_err_regs_t);
804 PCIE_ERR_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_err_regs_t);
805 PCIE_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_rp_err_regs_t);
806 PCIE_ADV_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_err_regs_t);
807 PCIE_ADV_RP_REG(pfd_p) = PCIE_ZALLOC(pf_pcie_adv_rp_err_regs_t);
808 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ce_src_id = PCIE_INVALID_BDF;
809 PCIE_ADV_RP_REG(pfd_p)->pcie_rp_ue_src_id = PCIE_INVALID_BDF;
810
811 PCIE_ADV_REG(pfd_p)->pcie_ue_sev = pcie_aer_uce_severity;
812 }
813
814 void
815 pcie_rc_fini_pfd(pf_data_t *pfd_p)
816 {
817 kmem_free(PCIE_ADV_RP_REG(pfd_p), sizeof (pf_pcie_adv_rp_err_regs_t));
818 kmem_free(PCIE_ADV_REG(pfd_p), sizeof (pf_pcie_adv_err_regs_t));
819 kmem_free(PCIE_RP_REG(pfd_p), sizeof (pf_pcie_rp_err_regs_t));
820 kmem_free(PCIE_ERR_REG(pfd_p), sizeof (pf_pcie_err_regs_t));
821 kmem_free(PCI_BDG_ERR_REG(pfd_p), sizeof (pf_pci_bdg_err_regs_t));
822 kmem_free(PFD_AFFECTED_DEV(pfd_p), sizeof (pf_affected_dev_t));
823 kmem_free(PCI_ERR_REG(pfd_p), sizeof (pf_pci_err_regs_t));
824 kmem_free(PCIE_ROOT_FAULT(pfd_p), sizeof (pf_root_fault_t));
825 kmem_free(PCIE_ROOT_EH_SRC(pfd_p), sizeof (pf_root_eh_src_t));
826 }
827
828 /*
829 * init pcie_bus_t for root complex
830 *
831 * Only a few of the fields in bus_t is valid for root complex.
832 * The fields that are bracketed are initialized in this routine:
833 *
834 * dev_info_t * <bus_dip>
835 * dev_info_t * bus_rp_dip
836 * ddi_acc_handle_t bus_cfg_hdl
837 * uint_t <bus_fm_flags>
838 * pcie_req_id_t bus_bdf
839 * pcie_req_id_t bus_rp_bdf
840 * uint32_t bus_dev_ven_id
841 * uint8_t bus_rev_id
842 * uint8_t <bus_hdr_type>
843 * uint16_t <bus_dev_type>
844 * uint8_t bus_bdg_secbus
845 * uint16_t bus_pcie_off
846 * uint16_t <bus_aer_off>
847 * uint16_t bus_pcix_off
848 * uint16_t bus_ecc_ver
849 * pci_bus_range_t bus_bus_range
850 * ppb_ranges_t * bus_addr_ranges
851 * int bus_addr_entries
852 * pci_regspec_t * bus_assigned_addr
853 * int bus_assigned_entries
854 * pf_data_t * bus_pfd
855 * pcie_domain_t * <bus_dom>
856 * int bus_mps
857 * uint64_t bus_cfgacc_base
858 * void * bus_plat_private
859 */
860 void
861 pcie_rc_init_bus(dev_info_t *dip)
862 {
863 pcie_bus_t *bus_p;
864
865 bus_p = (pcie_bus_t *)kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
866 bus_p->bus_dip = dip;
867 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_RC_PSEUDO;
868 bus_p->bus_hdr_type = PCI_HEADER_ONE;
869
870 /* Fake that there are AER logs */
871 bus_p->bus_aer_off = (uint16_t)-1;
872
873 /* Needed only for handle lookup */
874 bus_p->bus_fm_flags |= PF_FM_READY;
875
876 ndi_set_bus_private(dip, B_FALSE, DEVI_PORT_TYPE_PCI, bus_p);
877
878 PCIE_BUS2DOM(bus_p) = PCIE_ZALLOC(pcie_domain_t);
879 }
880
881 void
882 pcie_rc_fini_bus(dev_info_t *dip)
883 {
884 pcie_bus_t *bus_p = PCIE_DIP2DOWNBUS(dip);
885 ndi_set_bus_private(dip, B_FALSE, NULL, NULL);
886 kmem_free(PCIE_BUS2DOM(bus_p), sizeof (pcie_domain_t));
887 kmem_free(bus_p, sizeof (pcie_bus_t));
888 }
889
890 /*
891 * partially init pcie_bus_t for device (dip,bdf) for accessing pci
892 * config space
893 *
894 * This routine is invoked during boot, either after creating a devinfo node
895 * (x86 case) or during px driver attach (sparc case); it is also invoked
896 * in hotplug context after a devinfo node is created.
897 *
898 * The fields that are bracketed are initialized if flag PCIE_BUS_INITIAL
899 * is set:
900 *
901 * dev_info_t * <bus_dip>
902 * dev_info_t * <bus_rp_dip>
903 * ddi_acc_handle_t bus_cfg_hdl
904 * uint_t bus_fm_flags
905 * pcie_req_id_t <bus_bdf>
906 * pcie_req_id_t <bus_rp_bdf>
907 * uint32_t <bus_dev_ven_id>
908 * uint8_t <bus_rev_id>
909 * uint8_t <bus_hdr_type>
910 * uint16_t <bus_dev_type>
911 * uint8_t <bus_bdg_secbus
912 * uint16_t <bus_pcie_off>
913 * uint16_t <bus_aer_off>
914 * uint16_t <bus_pcix_off>
915 * uint16_t <bus_ecc_ver>
916 * pci_bus_range_t bus_bus_range
917 * ppb_ranges_t * bus_addr_ranges
918 * int bus_addr_entries
919 * pci_regspec_t * bus_assigned_addr
920 * int bus_assigned_entries
921 * pf_data_t * bus_pfd
922 * pcie_domain_t * bus_dom
923 * int bus_mps
924 * uint64_t bus_cfgacc_base
925 * void * bus_plat_private
926 *
927 * The fields that are bracketed are initialized if flag PCIE_BUS_FINAL
928 * is set:
929 *
930 * dev_info_t * bus_dip
931 * dev_info_t * bus_rp_dip
932 * ddi_acc_handle_t bus_cfg_hdl
933 * uint_t bus_fm_flags
934 * pcie_req_id_t bus_bdf
935 * pcie_req_id_t bus_rp_bdf
936 * uint32_t bus_dev_ven_id
937 * uint8_t bus_rev_id
938 * uint8_t bus_hdr_type
939 * uint16_t bus_dev_type
940 * uint8_t <bus_bdg_secbus>
941 * uint16_t bus_pcie_off
942 * uint16_t bus_aer_off
943 * uint16_t bus_pcix_off
944 * uint16_t bus_ecc_ver
945 * pci_bus_range_t <bus_bus_range>
946 * ppb_ranges_t * <bus_addr_ranges>
947 * int <bus_addr_entries>
948 * pci_regspec_t * <bus_assigned_addr>
949 * int <bus_assigned_entries>
950 * pf_data_t * <bus_pfd>
951 * pcie_domain_t * bus_dom
952 * int bus_mps
953 * uint64_t bus_cfgacc_base
954 * void * <bus_plat_private>
955 */
956
957 pcie_bus_t *
958 pcie_init_bus(dev_info_t *dip, pcie_req_id_t bdf, uint8_t flags)
959 {
960 uint16_t status, base, baseptr, num_cap;
961 uint32_t capid;
962 int range_size;
963 pcie_bus_t *bus_p;
964 dev_info_t *rcdip;
965 dev_info_t *pdip;
966 const char *errstr = NULL;
967
968 if (!(flags & PCIE_BUS_INITIAL))
969 goto initial_done;
970
971 bus_p = kmem_zalloc(sizeof (pcie_bus_t), KM_SLEEP);
972
973 bus_p->bus_dip = dip;
974 bus_p->bus_bdf = bdf;
975
976 rcdip = pcie_get_rc_dip(dip);
977 ASSERT(rcdip != NULL);
978
979 /* Save the Vendor ID, Device ID and revision ID */
980 bus_p->bus_dev_ven_id = pci_cfgacc_get32(rcdip, bdf, PCI_CONF_VENID);
981 bus_p->bus_rev_id = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_REVID);
982 /* Save the Header Type */
983 bus_p->bus_hdr_type = pci_cfgacc_get8(rcdip, bdf, PCI_CONF_HEADER);
984 bus_p->bus_hdr_type &= PCI_HEADER_TYPE_M;
985
986 /*
987 * Figure out the device type and all the relavant capability offsets
988 */
989 /* set default value */
990 bus_p->bus_dev_type = PCIE_PCIECAP_DEV_TYPE_PCI_PSEUDO;
991
992 status = pci_cfgacc_get16(rcdip, bdf, PCI_CONF_STAT);
993 if (status == PCI_CAP_EINVAL16 || !(status & PCI_STAT_CAP))
994 goto caps_done; /* capability not supported */
995
996 /* Relevant conventional capabilities first */
997
998 /* Conventional caps: PCI_CAP_ID_PCI_E, PCI_CAP_ID_PCIX */
999 num_cap = 2;
1000
1001 switch (bus_p->bus_hdr_type) {
1002 case PCI_HEADER_ZERO:
1003 baseptr = PCI_CONF_CAP_PTR;
1004 break;
1005 case PCI_HEADER_PPB:
1006 baseptr = PCI_BCNF_CAP_PTR;
1007 break;
1008 case PCI_HEADER_CARDBUS:
1009 baseptr = PCI_CBUS_CAP_PTR;
1010 break;
1011 default:
1012 cmn_err(CE_WARN, "%s: unexpected pci header type:%x",
1013 __func__, bus_p->bus_hdr_type);
1014 goto caps_done;
1015 }
1016
1017 base = baseptr;
1018 for (base = pci_cfgacc_get8(rcdip, bdf, base); base && num_cap;
1019 base = pci_cfgacc_get8(rcdip, bdf, base + PCI_CAP_NEXT_PTR)) {
1020 capid = pci_cfgacc_get8(rcdip, bdf, base);
1021 switch (capid) {
1022 case PCI_CAP_ID_PCI_E:
1023 bus_p->bus_pcie_off = base;
1024 bus_p->bus_dev_type = pci_cfgacc_get16(rcdip, bdf,
1025 base + PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1026
1027 /* Check and save PCIe hotplug capability information */
1028 if ((PCIE_IS_RP(bus_p) || PCIE_IS_SWD(bus_p)) &&
1029 (pci_cfgacc_get16(rcdip, bdf, base + PCIE_PCIECAP)
1030 & PCIE_PCIECAP_SLOT_IMPL) &&
1031 (pci_cfgacc_get32(rcdip, bdf, base + PCIE_SLOTCAP)
1032 & PCIE_SLOTCAP_HP_CAPABLE))
1033 bus_p->bus_hp_sup_modes |= PCIE_NATIVE_HP_MODE;
1034
1035 num_cap--;
1036 break;
1037 case PCI_CAP_ID_PCIX:
1038 bus_p->bus_pcix_off = base;
1039 if (PCIE_IS_BDG(bus_p))
1040 bus_p->bus_ecc_ver =
1041 pci_cfgacc_get16(rcdip, bdf, base +
1042 PCI_PCIX_SEC_STATUS) & PCI_PCIX_VER_MASK;
1043 else
1044 bus_p->bus_ecc_ver =
1045 pci_cfgacc_get16(rcdip, bdf, base +
1046 PCI_PCIX_COMMAND) & PCI_PCIX_VER_MASK;
1047 num_cap--;
1048 break;
1049 default:
1050 break;
1051 }
1052 }
1053
1054 /* Check and save PCI hotplug (SHPC) capability information */
1055 if (PCIE_IS_BDG(bus_p)) {
1056 base = baseptr;
1057 for (base = pci_cfgacc_get8(rcdip, bdf, base);
1058 base; base = pci_cfgacc_get8(rcdip, bdf,
1059 base + PCI_CAP_NEXT_PTR)) {
1060 capid = pci_cfgacc_get8(rcdip, bdf, base);
1061 if (capid == PCI_CAP_ID_PCI_HOTPLUG) {
1062 bus_p->bus_pci_hp_off = base;
1063 bus_p->bus_hp_sup_modes |= PCIE_PCI_HP_MODE;
1064 break;
1065 }
1066 }
1067 }
1068
1069 /* Then, relevant extended capabilities */
1070
1071 if (!PCIE_IS_PCIE(bus_p))
1072 goto caps_done;
1073
1074 /* Extended caps: PCIE_EXT_CAP_ID_AER */
1075 for (base = PCIE_EXT_CAP; base; base = (capid >>
1076 PCIE_EXT_CAP_NEXT_PTR_SHIFT) & PCIE_EXT_CAP_NEXT_PTR_MASK) {
1077 capid = pci_cfgacc_get32(rcdip, bdf, base);
1078 if (capid == PCI_CAP_EINVAL32)
1079 break;
1080 if (((capid >> PCIE_EXT_CAP_ID_SHIFT) & PCIE_EXT_CAP_ID_MASK)
1081 == PCIE_EXT_CAP_ID_AER) {
1082 bus_p->bus_aer_off = base;
1083 break;
1084 }
1085 }
1086
1087 caps_done:
1088 /* save RP dip and RP bdf */
1089 if (PCIE_IS_RP(bus_p)) {
1090 bus_p->bus_rp_dip = dip;
1091 bus_p->bus_rp_bdf = bus_p->bus_bdf;
1092 } else {
1093 for (pdip = ddi_get_parent(dip); pdip;
1094 pdip = ddi_get_parent(pdip)) {
1095 pcie_bus_t *parent_bus_p = PCIE_DIP2BUS(pdip);
1096
1097 /*
1098 * If RP dip and RP bdf in parent's bus_t have
1099 * been initialized, simply use these instead of
1100 * continuing up to the RC.
1101 */
1102 if (parent_bus_p->bus_rp_dip != NULL) {
1103 bus_p->bus_rp_dip = parent_bus_p->bus_rp_dip;
1104 bus_p->bus_rp_bdf = parent_bus_p->bus_rp_bdf;
1105 break;
1106 }
1107
1108 /*
1109 * When debugging be aware that some NVIDIA x86
1110 * architectures have 2 nodes for each RP, One at Bus
1111 * 0x0 and one at Bus 0x80. The requester is from Bus
1112 * 0x80
1113 */
1114 if (PCIE_IS_ROOT(parent_bus_p)) {
1115 bus_p->bus_rp_dip = pdip;
1116 bus_p->bus_rp_bdf = parent_bus_p->bus_bdf;
1117 break;
1118 }
1119 }
1120 }
1121
1122 bus_p->bus_soft_state = PCI_SOFT_STATE_CLOSED;
1123 bus_p->bus_fm_flags = 0;
1124 bus_p->bus_mps = 0;
1125
1126 ndi_set_bus_private(dip, B_TRUE, DEVI_PORT_TYPE_PCI, (void *)bus_p);
1127
1128 if (PCIE_IS_HOTPLUG_CAPABLE(dip))
1129 (void) ndi_prop_create_boolean(DDI_DEV_T_NONE, dip,
1130 "hotplug-capable");
1131
1132 initial_done:
1133 if (!(flags & PCIE_BUS_FINAL))
1134 goto final_done;
1135
1136 /* already initialized? */
1137 bus_p = PCIE_DIP2BUS(dip);
1138
1139 /* Save the Range information if device is a switch/bridge */
1140 if (PCIE_IS_BDG(bus_p)) {
1141 /* get "bus_range" property */
1142 range_size = sizeof (pci_bus_range_t);
1143 if (ddi_getlongprop_buf(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1144 "bus-range", (caddr_t)&bus_p->bus_bus_range, &range_size)
1145 != DDI_PROP_SUCCESS) {
1146 errstr = "Cannot find \"bus-range\" property";
1147 cmn_err(CE_WARN,
1148 "PCIE init err info failed BDF 0x%x:%s\n",
1149 bus_p->bus_bdf, errstr);
1150 }
1151
1152 /* get secondary bus number */
1153 rcdip = pcie_get_rc_dip(dip);
1154 ASSERT(rcdip != NULL);
1155
1156 bus_p->bus_bdg_secbus = pci_cfgacc_get8(rcdip,
1157 bus_p->bus_bdf, PCI_BCNF_SECBUS);
1158
1159 /* Get "ranges" property */
1160 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1161 "ranges", (caddr_t)&bus_p->bus_addr_ranges,
1162 &bus_p->bus_addr_entries) != DDI_PROP_SUCCESS)
1163 bus_p->bus_addr_entries = 0;
1164 bus_p->bus_addr_entries /= sizeof (ppb_ranges_t);
1165 }
1166
1167 /* save "assigned-addresses" property array, ignore failues */
1168 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1169 "assigned-addresses", (caddr_t)&bus_p->bus_assigned_addr,
1170 &bus_p->bus_assigned_entries) == DDI_PROP_SUCCESS)
1171 bus_p->bus_assigned_entries /= sizeof (pci_regspec_t);
1172 else
1173 bus_p->bus_assigned_entries = 0;
1174
1175 pcie_init_pfd(dip);
1176
1177 pcie_init_plat(dip);
1178
1179 final_done:
1180
1181 PCIE_DBG("Add %s(dip 0x%p, bdf 0x%x, secbus 0x%x)\n",
1182 ddi_driver_name(dip), (void *)dip, bus_p->bus_bdf,
1183 bus_p->bus_bdg_secbus);
1184 #ifdef DEBUG
1185 pcie_print_bus(bus_p);
1186 #endif
1187
1188 return (bus_p);
1189 }
1190
1191 /*
1192 * Invoked before destroying devinfo node, mostly during hotplug
1193 * operation to free pcie_bus_t data structure
1194 */
1195 /* ARGSUSED */
1196 void
1197 pcie_fini_bus(dev_info_t *dip, uint8_t flags)
1198 {
1199 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
1200 ASSERT(bus_p);
1201
1202 if (flags & PCIE_BUS_INITIAL) {
1203 pcie_fini_plat(dip);
1204 pcie_fini_pfd(dip);
1205
1206 kmem_free(bus_p->bus_assigned_addr,
1207 (sizeof (pci_regspec_t) * bus_p->bus_assigned_entries));
1208 kmem_free(bus_p->bus_addr_ranges,
1209 (sizeof (ppb_ranges_t) * bus_p->bus_addr_entries));
1210 /* zero out the fields that have been destroyed */
1211 bus_p->bus_assigned_addr = NULL;
1212 bus_p->bus_addr_ranges = NULL;
1213 bus_p->bus_assigned_entries = 0;
1214 bus_p->bus_addr_entries = 0;
1215 }
1216
1217 if (flags & PCIE_BUS_FINAL) {
1218 if (PCIE_IS_HOTPLUG_CAPABLE(dip)) {
1219 (void) ndi_prop_remove(DDI_DEV_T_NONE, dip,
1220 "hotplug-capable");
1221 }
1222
1223 ndi_set_bus_private(dip, B_TRUE, NULL, NULL);
1224 kmem_free(bus_p, sizeof (pcie_bus_t));
1225 }
1226 }
1227
1228 int
1229 pcie_postattach_child(dev_info_t *cdip)
1230 {
1231 pcie_bus_t *bus_p = PCIE_DIP2BUS(cdip);
1232
1233 if (!bus_p)
1234 return (DDI_FAILURE);
1235
1236 return (pcie_enable_ce(cdip));
1237 }
1238
1239 /*
1240 * PCI-Express child device de-initialization.
1241 * This function disables generic pci-express interrupts and error
1242 * handling.
1243 */
1244 void
1245 pcie_uninitchild(dev_info_t *cdip)
1246 {
1247 pcie_disable_errors(cdip);
1248 pcie_fini_cfghdl(cdip);
1249 pcie_fini_dom(cdip);
1250 }
1251
1252 /*
1253 * find the root complex dip
1254 */
1255 dev_info_t *
1256 pcie_get_rc_dip(dev_info_t *dip)
1257 {
1258 dev_info_t *rcdip;
1259 pcie_bus_t *rc_bus_p;
1260
1261 for (rcdip = ddi_get_parent(dip); rcdip;
1262 rcdip = ddi_get_parent(rcdip)) {
1263 rc_bus_p = PCIE_DIP2BUS(rcdip);
1264 if (rc_bus_p && PCIE_IS_RC(rc_bus_p))
1265 break;
1266 }
1267
1268 return (rcdip);
1269 }
1270
1271 static boolean_t
1272 pcie_is_pci_device(dev_info_t *dip)
1273 {
1274 dev_info_t *pdip;
1275 char *device_type;
1276
1277 pdip = ddi_get_parent(dip);
1278 ASSERT(pdip);
1279
1280 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip, DDI_PROP_DONTPASS,
1281 "device_type", &device_type) != DDI_PROP_SUCCESS)
1282 return (B_FALSE);
1283
1284 if (strcmp(device_type, "pciex") != 0 &&
1285 strcmp(device_type, "pci") != 0) {
1286 ddi_prop_free(device_type);
1287 return (B_FALSE);
1288 }
1289
1290 ddi_prop_free(device_type);
1291 return (B_TRUE);
1292 }
1293
1294 typedef struct {
1295 boolean_t init;
1296 uint8_t flags;
1297 } pcie_bus_arg_t;
1298
1299 /*ARGSUSED*/
1300 static int
1301 pcie_fab_do_init_fini(dev_info_t *dip, void *arg)
1302 {
1303 pcie_req_id_t bdf;
1304 pcie_bus_arg_t *bus_arg = (pcie_bus_arg_t *)arg;
1305
1306 if (!pcie_is_pci_device(dip))
1307 goto out;
1308
1309 if (bus_arg->init) {
1310 if (pcie_get_bdf_from_dip(dip, &bdf) != DDI_SUCCESS)
1311 goto out;
1312
1313 (void) pcie_init_bus(dip, bdf, bus_arg->flags);
1314 } else {
1315 (void) pcie_fini_bus(dip, bus_arg->flags);
1316 }
1317
1318 return (DDI_WALK_CONTINUE);
1319
1320 out:
1321 return (DDI_WALK_PRUNECHILD);
1322 }
1323
1324 void
1325 pcie_fab_init_bus(dev_info_t *rcdip, uint8_t flags)
1326 {
1327 int circular_count;
1328 dev_info_t *dip = ddi_get_child(rcdip);
1329 pcie_bus_arg_t arg;
1330
1331 arg.init = B_TRUE;
1332 arg.flags = flags;
1333
1334 ndi_devi_enter(rcdip, &circular_count);
1335 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1336 ndi_devi_exit(rcdip, circular_count);
1337 }
1338
1339 void
1340 pcie_fab_fini_bus(dev_info_t *rcdip, uint8_t flags)
1341 {
1342 int circular_count;
1343 dev_info_t *dip = ddi_get_child(rcdip);
1344 pcie_bus_arg_t arg;
1345
1346 arg.init = B_FALSE;
1347 arg.flags = flags;
1348
1349 ndi_devi_enter(rcdip, &circular_count);
1350 ddi_walk_devs(dip, pcie_fab_do_init_fini, &arg);
1351 ndi_devi_exit(rcdip, circular_count);
1352 }
1353
1354 void
1355 pcie_enable_errors(dev_info_t *dip)
1356 {
1357 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1358 uint16_t reg16, tmp16;
1359 uint32_t reg32, tmp32;
1360
1361 ASSERT(bus_p);
1362
1363 /*
1364 * Clear any pending errors
1365 */
1366 pcie_clear_errors(dip);
1367
1368 if (!PCIE_IS_PCIE(bus_p))
1369 return;
1370
1371 /*
1372 * Enable Baseline Error Handling but leave CE reporting off (poweron
1373 * default).
1374 */
1375 if ((reg16 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL)) !=
1376 PCI_CAP_EINVAL16) {
1377 tmp16 = (reg16 & (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1378 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1379 (pcie_devctl_default & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1380 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1381 (pcie_base_err_default & (~PCIE_DEVCTL_CE_REPORTING_EN));
1382
1383 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, tmp16);
1384 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, reg16);
1385 }
1386
1387 /* Enable Root Port Baseline Error Receiving */
1388 if (PCIE_IS_ROOT(bus_p) &&
1389 (reg16 = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL)) !=
1390 PCI_CAP_EINVAL16) {
1391
1392 tmp16 = pcie_serr_disable_flag ?
1393 (pcie_root_ctrl_default & ~PCIE_ROOT_SYS_ERR) :
1394 pcie_root_ctrl_default;
1395 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, tmp16);
1396 PCIE_DBG_CAP(dip, bus_p, "ROOT DEVCTL", 16, PCIE_ROOTCTL,
1397 reg16);
1398 }
1399
1400 /*
1401 * Enable PCI-Express Advanced Error Handling if Exists
1402 */
1403 if (!PCIE_HAS_AER(bus_p))
1404 return;
1405
1406 /* Set Uncorrectable Severity */
1407 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_SERV)) !=
1408 PCI_CAP_EINVAL32) {
1409 tmp32 = pcie_aer_uce_severity;
1410
1411 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_SERV, tmp32);
1412 PCIE_DBG_AER(dip, bus_p, "AER UCE SEV", 32, PCIE_AER_UCE_SERV,
1413 reg32);
1414 }
1415
1416 /* Enable Uncorrectable errors */
1417 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_UCE_MASK)) !=
1418 PCI_CAP_EINVAL32) {
1419 tmp32 = pcie_aer_uce_mask;
1420
1421 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, tmp32);
1422 PCIE_DBG_AER(dip, bus_p, "AER UCE MASK", 32, PCIE_AER_UCE_MASK,
1423 reg32);
1424 }
1425
1426 /* Enable ECRC generation and checking */
1427 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1428 PCI_CAP_EINVAL32) {
1429 tmp32 = reg32 | pcie_ecrc_value;
1430 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, tmp32);
1431 PCIE_DBG_AER(dip, bus_p, "AER CTL", 32, PCIE_AER_CTL, reg32);
1432 }
1433
1434 /* Enable Secondary Uncorrectable errors if this is a bridge */
1435 if (!PCIE_IS_PCIE_BDG(bus_p))
1436 goto root;
1437
1438 /* Set Uncorrectable Severity */
1439 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_SERV)) !=
1440 PCI_CAP_EINVAL32) {
1441 tmp32 = pcie_aer_suce_severity;
1442
1443 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_SERV, tmp32);
1444 PCIE_DBG_AER(dip, bus_p, "AER SUCE SEV", 32, PCIE_AER_SUCE_SERV,
1445 reg32);
1446 }
1447
1448 if ((reg32 = PCIE_AER_GET(32, bus_p, PCIE_AER_SUCE_MASK)) !=
1449 PCI_CAP_EINVAL32) {
1450 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, pcie_aer_suce_mask);
1451 PCIE_DBG_AER(dip, bus_p, "AER SUCE MASK", 32,
1452 PCIE_AER_SUCE_MASK, reg32);
1453 }
1454
1455 root:
1456 /*
1457 * Enable Root Control this is a Root device
1458 */
1459 if (!PCIE_IS_ROOT(bus_p))
1460 return;
1461
1462 if ((reg16 = PCIE_AER_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1463 PCI_CAP_EINVAL16) {
1464 PCIE_AER_PUT(16, bus_p, PCIE_AER_RE_CMD,
1465 pcie_root_error_cmd_default);
1466 PCIE_DBG_AER(dip, bus_p, "AER Root Err Cmd", 16,
1467 PCIE_AER_RE_CMD, reg16);
1468 }
1469 }
1470
1471 /*
1472 * This function is used for enabling CE reporting and setting the AER CE mask.
1473 * When called from outside the pcie module it should always be preceded by
1474 * a call to pcie_enable_errors.
1475 */
1476 int
1477 pcie_enable_ce(dev_info_t *dip)
1478 {
1479 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1480 uint16_t device_sts, device_ctl;
1481 uint32_t tmp_pcie_aer_ce_mask;
1482
1483 if (!PCIE_IS_PCIE(bus_p))
1484 return (DDI_SUCCESS);
1485
1486 /*
1487 * The "pcie_ce_mask" property is used to control both the CE reporting
1488 * enable field in the device control register and the AER CE mask. We
1489 * leave CE reporting disabled if pcie_ce_mask is set to -1.
1490 */
1491
1492 tmp_pcie_aer_ce_mask = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY, dip,
1493 DDI_PROP_DONTPASS, "pcie_ce_mask", pcie_aer_ce_mask);
1494
1495 if (tmp_pcie_aer_ce_mask == (uint32_t)-1) {
1496 /*
1497 * Nothing to do since CE reporting has already been disabled.
1498 */
1499 return (DDI_SUCCESS);
1500 }
1501
1502 if (PCIE_HAS_AER(bus_p)) {
1503 /* Enable AER CE */
1504 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, tmp_pcie_aer_ce_mask);
1505 PCIE_DBG_AER(dip, bus_p, "AER CE MASK", 32, PCIE_AER_CE_MASK,
1506 0);
1507
1508 /* Clear any pending AER CE errors */
1509 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_STS, -1);
1510 }
1511
1512 /* clear any pending CE errors */
1513 if ((device_sts = PCIE_CAP_GET(16, bus_p, PCIE_DEVSTS)) !=
1514 PCI_CAP_EINVAL16)
1515 PCIE_CAP_PUT(16, bus_p, PCIE_DEVSTS,
1516 device_sts & (~PCIE_DEVSTS_CE_DETECTED));
1517
1518 /* Enable CE reporting */
1519 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1520 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL,
1521 (device_ctl & (~PCIE_DEVCTL_ERR_MASK)) | pcie_base_err_default);
1522 PCIE_DBG_CAP(dip, bus_p, "DEVCTL", 16, PCIE_DEVCTL, device_ctl);
1523
1524 return (DDI_SUCCESS);
1525 }
1526
1527 /* ARGSUSED */
1528 void
1529 pcie_disable_errors(dev_info_t *dip)
1530 {
1531 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1532 uint16_t device_ctl;
1533 uint32_t aer_reg;
1534
1535 if (!PCIE_IS_PCIE(bus_p))
1536 return;
1537
1538 /*
1539 * Disable PCI-Express Baseline Error Handling
1540 */
1541 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1542 device_ctl &= ~PCIE_DEVCTL_ERR_MASK;
1543 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, device_ctl);
1544
1545 /*
1546 * Disable PCI-Express Advanced Error Handling if Exists
1547 */
1548 if (!PCIE_HAS_AER(bus_p))
1549 goto root;
1550
1551 /* Disable Uncorrectable errors */
1552 PCIE_AER_PUT(32, bus_p, PCIE_AER_UCE_MASK, PCIE_AER_UCE_BITS);
1553
1554 /* Disable Correctable errors */
1555 PCIE_AER_PUT(32, bus_p, PCIE_AER_CE_MASK, PCIE_AER_CE_BITS);
1556
1557 /* Disable ECRC generation and checking */
1558 if ((aer_reg = PCIE_AER_GET(32, bus_p, PCIE_AER_CTL)) !=
1559 PCI_CAP_EINVAL32) {
1560 aer_reg &= ~(PCIE_AER_CTL_ECRC_GEN_ENA |
1561 PCIE_AER_CTL_ECRC_CHECK_ENA);
1562
1563 PCIE_AER_PUT(32, bus_p, PCIE_AER_CTL, aer_reg);
1564 }
1565 /*
1566 * Disable Secondary Uncorrectable errors if this is a bridge
1567 */
1568 if (!PCIE_IS_PCIE_BDG(bus_p))
1569 goto root;
1570
1571 PCIE_AER_PUT(32, bus_p, PCIE_AER_SUCE_MASK, PCIE_AER_SUCE_BITS);
1572
1573 root:
1574 /*
1575 * disable Root Control this is a Root device
1576 */
1577 if (!PCIE_IS_ROOT(bus_p))
1578 return;
1579
1580 if (!pcie_serr_disable_flag) {
1581 device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_ROOTCTL);
1582 device_ctl &= ~PCIE_ROOT_SYS_ERR;
1583 PCIE_CAP_PUT(16, bus_p, PCIE_ROOTCTL, device_ctl);
1584 }
1585
1586 if (!PCIE_HAS_AER(bus_p))
1587 return;
1588
1589 if ((device_ctl = PCIE_CAP_GET(16, bus_p, PCIE_AER_RE_CMD)) !=
1590 PCI_CAP_EINVAL16) {
1591 device_ctl &= ~pcie_root_error_cmd_default;
1592 PCIE_CAP_PUT(16, bus_p, PCIE_AER_RE_CMD, device_ctl);
1593 }
1594 }
1595
1596 /*
1597 * Extract bdf from "reg" property.
1598 */
1599 int
1600 pcie_get_bdf_from_dip(dev_info_t *dip, pcie_req_id_t *bdf)
1601 {
1602 pci_regspec_t *regspec;
1603 int reglen;
1604
1605 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1606 "reg", (int **)®spec, (uint_t *)®len) != DDI_SUCCESS)
1607 return (DDI_FAILURE);
1608
1609 if (reglen < (sizeof (pci_regspec_t) / sizeof (int))) {
1610 ddi_prop_free(regspec);
1611 return (DDI_FAILURE);
1612 }
1613
1614 /* Get phys_hi from first element. All have same bdf. */
1615 *bdf = (regspec->pci_phys_hi & (PCI_REG_BDFR_M ^ PCI_REG_REG_M)) >> 8;
1616
1617 ddi_prop_free(regspec);
1618 return (DDI_SUCCESS);
1619 }
1620
1621 dev_info_t *
1622 pcie_get_my_childs_dip(dev_info_t *dip, dev_info_t *rdip)
1623 {
1624 dev_info_t *cdip = rdip;
1625
1626 for (; ddi_get_parent(cdip) != dip; cdip = ddi_get_parent(cdip))
1627 ;
1628
1629 return (cdip);
1630 }
1631
1632 uint32_t
1633 pcie_get_bdf_for_dma_xfer(dev_info_t *dip, dev_info_t *rdip)
1634 {
1635 dev_info_t *cdip;
1636
1637 /*
1638 * As part of the probing, the PCI fcode interpreter may setup a DMA
1639 * request if a given card has a fcode on it using dip and rdip of the
1640 * hotplug connector i.e, dip and rdip of px/pcieb driver. In this
1641 * case, return a invalid value for the bdf since we cannot get to the
1642 * bdf value of the actual device which will be initiating this DMA.
1643 */
1644 if (rdip == dip)
1645 return (PCIE_INVALID_BDF);
1646
1647 cdip = pcie_get_my_childs_dip(dip, rdip);
1648
1649 /*
1650 * For a given rdip, return the bdf value of dip's (px or pcieb)
1651 * immediate child or secondary bus-id if dip is a PCIe2PCI bridge.
1652 *
1653 * XXX - For now, return a invalid bdf value for all PCI and PCI-X
1654 * devices since this needs more work.
1655 */
1656 return (PCI_GET_PCIE2PCI_SECBUS(cdip) ?
1657 PCIE_INVALID_BDF : PCI_GET_BDF(cdip));
1658 }
1659
1660 uint32_t
1661 pcie_get_aer_uce_mask() {
1662 return (pcie_aer_uce_mask);
1663 }
1664 uint32_t
1665 pcie_get_aer_ce_mask() {
1666 return (pcie_aer_ce_mask);
1667 }
1668 uint32_t
1669 pcie_get_aer_suce_mask() {
1670 return (pcie_aer_suce_mask);
1671 }
1672 uint32_t
1673 pcie_get_serr_mask() {
1674 return (pcie_serr_disable_flag);
1675 }
1676
1677 void
1678 pcie_set_aer_uce_mask(uint32_t mask) {
1679 pcie_aer_uce_mask = mask;
1680 if (mask & PCIE_AER_UCE_UR)
1681 pcie_base_err_default &= ~PCIE_DEVCTL_UR_REPORTING_EN;
1682 else
1683 pcie_base_err_default |= PCIE_DEVCTL_UR_REPORTING_EN;
1684
1685 if (mask & PCIE_AER_UCE_ECRC)
1686 pcie_ecrc_value = 0;
1687 }
1688
1689 void
1690 pcie_set_aer_ce_mask(uint32_t mask) {
1691 pcie_aer_ce_mask = mask;
1692 }
1693 void
1694 pcie_set_aer_suce_mask(uint32_t mask) {
1695 pcie_aer_suce_mask = mask;
1696 }
1697 void
1698 pcie_set_serr_mask(uint32_t mask) {
1699 pcie_serr_disable_flag = mask;
1700 }
1701
1702 /*
1703 * Is the rdip a child of dip. Used for checking certain CTLOPS from bubbling
1704 * up erronously. Ex. ISA ctlops to a PCI-PCI Bridge.
1705 */
1706 boolean_t
1707 pcie_is_child(dev_info_t *dip, dev_info_t *rdip)
1708 {
1709 dev_info_t *cdip = ddi_get_child(dip);
1710 for (; cdip; cdip = ddi_get_next_sibling(cdip))
1711 if (cdip == rdip)
1712 break;
1713 return (cdip != NULL);
1714 }
1715
1716 boolean_t
1717 pcie_is_link_disabled(dev_info_t *dip)
1718 {
1719 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1720
1721 if (PCIE_IS_PCIE(bus_p)) {
1722 if (PCIE_CAP_GET(16, bus_p, PCIE_LINKCTL) &
1723 PCIE_LINKCTL_LINK_DISABLE)
1724 return (B_TRUE);
1725 }
1726 return (B_FALSE);
1727 }
1728
1729 /*
1730 * Initialize the MPS for a root port.
1731 *
1732 * dip - dip of root port device.
1733 */
1734 void
1735 pcie_init_root_port_mps(dev_info_t *dip)
1736 {
1737 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
1738 int rp_cap, max_supported = pcie_max_mps;
1739
1740 (void) pcie_get_fabric_mps(ddi_get_parent(dip),
1741 ddi_get_child(dip), &max_supported);
1742
1743 rp_cap = PCI_CAP_GET16(bus_p->bus_cfg_hdl, NULL,
1744 bus_p->bus_pcie_off, PCIE_DEVCAP) &
1745 PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1746
1747 if (rp_cap < max_supported)
1748 max_supported = rp_cap;
1749
1750 bus_p->bus_mps = max_supported;
1751 (void) pcie_initchild_mps(dip);
1752 }
1753
1754 /*
1755 * Initialize the Maximum Payload Size of a device.
1756 *
1757 * cdip - dip of device.
1758 *
1759 * returns - DDI_SUCCESS or DDI_FAILURE
1760 */
1761 int
1762 pcie_initchild_mps(dev_info_t *cdip)
1763 {
1764 pcie_bus_t *bus_p;
1765 dev_info_t *pdip = ddi_get_parent(cdip);
1766 uint8_t dev_type;
1767
1768 bus_p = PCIE_DIP2BUS(cdip);
1769 if (bus_p == NULL) {
1770 PCIE_DBG("%s: BUS not found.\n",
1771 ddi_driver_name(cdip));
1772 return (DDI_FAILURE);
1773 }
1774
1775 dev_type = bus_p->bus_dev_type;
1776
1777 /*
1778 * For ARI Devices, only function zero's MPS needs to be set.
1779 */
1780 if ((dev_type == PCIE_PCIECAP_DEV_TYPE_PCIE_DEV) &&
1781 (pcie_ari_is_enabled(pdip) == PCIE_ARI_FORW_ENABLED)) {
1782 pcie_req_id_t child_bdf;
1783
1784 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
1785 return (DDI_FAILURE);
1786 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) != 0)
1787 return (DDI_SUCCESS);
1788 }
1789
1790 if (PCIE_IS_PCIE(bus_p)) {
1791 int suggested_mrrs, fabric_mps;
1792 uint16_t device_mps, device_mps_cap, device_mrrs, dev_ctrl;
1793
1794 dev_ctrl = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL);
1795 if ((fabric_mps = (PCIE_IS_RP(bus_p) ? bus_p :
1796 PCIE_DIP2BUS(pdip))->bus_mps) < 0) {
1797 dev_ctrl = (dev_ctrl & ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1798 PCIE_DEVCTL_MAX_PAYLOAD_MASK)) |
1799 (pcie_devctl_default &
1800 (PCIE_DEVCTL_MAX_READ_REQ_MASK |
1801 PCIE_DEVCTL_MAX_PAYLOAD_MASK));
1802
1803 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
1804 return (DDI_SUCCESS);
1805 }
1806
1807 device_mps_cap = PCIE_CAP_GET(16, bus_p, PCIE_DEVCAP) &
1808 PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1809
1810 device_mrrs = (dev_ctrl & PCIE_DEVCTL_MAX_READ_REQ_MASK) >>
1811 PCIE_DEVCTL_MAX_READ_REQ_SHIFT;
1812
1813 if (device_mps_cap < fabric_mps)
1814 device_mrrs = device_mps = device_mps_cap;
1815 else
1816 device_mps = (uint16_t)fabric_mps;
1817
1818 suggested_mrrs = (uint32_t)ddi_prop_get_int(DDI_DEV_T_ANY,
1819 cdip, DDI_PROP_DONTPASS, "suggested-mrrs", device_mrrs);
1820
1821 if ((device_mps == fabric_mps) ||
1822 (suggested_mrrs < device_mrrs))
1823 device_mrrs = (uint16_t)suggested_mrrs;
1824
1825 /*
1826 * Replace MPS and MRRS settings.
1827 */
1828 dev_ctrl &= ~(PCIE_DEVCTL_MAX_READ_REQ_MASK |
1829 PCIE_DEVCTL_MAX_PAYLOAD_MASK);
1830
1831 dev_ctrl |= ((device_mrrs << PCIE_DEVCTL_MAX_READ_REQ_SHIFT) |
1832 device_mps << PCIE_DEVCTL_MAX_PAYLOAD_SHIFT);
1833
1834 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL, dev_ctrl);
1835
1836 bus_p->bus_mps = device_mps;
1837 }
1838
1839 return (DDI_SUCCESS);
1840 }
1841
1842 /*
1843 * Scans a device tree/branch for a maximum payload size capabilities.
1844 *
1845 * rc_dip - dip of Root Complex.
1846 * dip - dip of device where scan will begin.
1847 * max_supported (IN) - maximum allowable MPS.
1848 * max_supported (OUT) - maximum payload size capability of fabric.
1849 */
1850 void
1851 pcie_get_fabric_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
1852 {
1853 if (dip == NULL)
1854 return;
1855
1856 /*
1857 * Perform a fabric scan to obtain Maximum Payload Capabilities
1858 */
1859 (void) pcie_scan_mps(rc_dip, dip, max_supported);
1860
1861 PCIE_DBG("MPS: Highest Common MPS= %x\n", max_supported);
1862 }
1863
1864 /*
1865 * Scans fabric and determines Maximum Payload Size based on
1866 * highest common denominator alogorithm
1867 */
1868 static void
1869 pcie_scan_mps(dev_info_t *rc_dip, dev_info_t *dip, int *max_supported)
1870 {
1871 int circular_count;
1872 pcie_max_supported_t max_pay_load_supported;
1873
1874 max_pay_load_supported.dip = rc_dip;
1875 max_pay_load_supported.highest_common_mps = *max_supported;
1876
1877 ndi_devi_enter(ddi_get_parent(dip), &circular_count);
1878 ddi_walk_devs(dip, pcie_get_max_supported,
1879 (void *)&max_pay_load_supported);
1880 ndi_devi_exit(ddi_get_parent(dip), circular_count);
1881
1882 *max_supported = max_pay_load_supported.highest_common_mps;
1883 }
1884
1885 /*
1886 * Called as part of the Maximum Payload Size scan.
1887 */
1888 static int
1889 pcie_get_max_supported(dev_info_t *dip, void *arg)
1890 {
1891 uint32_t max_supported;
1892 uint16_t cap_ptr;
1893 pcie_max_supported_t *current = (pcie_max_supported_t *)arg;
1894 pci_regspec_t *reg;
1895 int rlen;
1896 caddr_t virt;
1897 ddi_acc_handle_t config_handle;
1898
1899 if (ddi_get_child(current->dip) == NULL) {
1900 goto fail1;
1901 }
1902
1903 if (pcie_dev(dip) == DDI_FAILURE) {
1904 PCIE_DBG("MPS: pcie_get_max_supported: %s: "
1905 "Not a PCIe dev\n", ddi_driver_name(dip));
1906 goto fail1;
1907 }
1908
1909 /*
1910 * If the suggested-mrrs property exists, then don't include this
1911 * device in the MPS capabilities scan.
1912 */
1913 if (ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1914 "suggested-mrrs") != 0)
1915 goto fail1;
1916
1917 if (ddi_getlongprop(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "reg",
1918 (caddr_t)®, &rlen) != DDI_PROP_SUCCESS) {
1919 PCIE_DBG("MPS: pcie_get_max_supported: %s: "
1920 "Can not read reg\n", ddi_driver_name(dip));
1921 goto fail1;
1922 }
1923
1924 if (pcie_map_phys(ddi_get_child(current->dip), reg, &virt,
1925 &config_handle) != DDI_SUCCESS) {
1926 PCIE_DBG("MPS: pcie_get_max_supported: %s: pcie_map_phys "
1927 "failed\n", ddi_driver_name(dip));
1928 goto fail2;
1929 }
1930
1931 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E, &cap_ptr)) ==
1932 DDI_FAILURE) {
1933 goto fail3;
1934 }
1935
1936 max_supported = PCI_CAP_GET16(config_handle, NULL, cap_ptr,
1937 PCIE_DEVCAP) & PCIE_DEVCAP_MAX_PAYLOAD_MASK;
1938
1939 PCIE_DBG("PCIE MPS: %s: MPS Capabilities %x\n", ddi_driver_name(dip),
1940 max_supported);
1941
1942 if (max_supported < current->highest_common_mps)
1943 current->highest_common_mps = max_supported;
1944
1945 fail3:
1946 pcie_unmap_phys(&config_handle, reg);
1947 fail2:
1948 kmem_free(reg, rlen);
1949 fail1:
1950 return (DDI_WALK_CONTINUE);
1951 }
1952
1953 /*
1954 * Determines if there are any root ports attached to a root complex.
1955 *
1956 * dip - dip of root complex
1957 *
1958 * Returns - DDI_SUCCESS if there is at least one root port otherwise
1959 * DDI_FAILURE.
1960 */
1961 int
1962 pcie_root_port(dev_info_t *dip)
1963 {
1964 int port_type;
1965 uint16_t cap_ptr;
1966 ddi_acc_handle_t config_handle;
1967 dev_info_t *cdip = ddi_get_child(dip);
1968
1969 /*
1970 * Determine if any of the children of the passed in dip
1971 * are root ports.
1972 */
1973 for (; cdip; cdip = ddi_get_next_sibling(cdip)) {
1974
1975 if (pci_config_setup(cdip, &config_handle) != DDI_SUCCESS)
1976 continue;
1977
1978 if ((PCI_CAP_LOCATE(config_handle, PCI_CAP_ID_PCI_E,
1979 &cap_ptr)) == DDI_FAILURE) {
1980 pci_config_teardown(&config_handle);
1981 continue;
1982 }
1983
1984 port_type = PCI_CAP_GET16(config_handle, NULL, cap_ptr,
1985 PCIE_PCIECAP) & PCIE_PCIECAP_DEV_TYPE_MASK;
1986
1987 pci_config_teardown(&config_handle);
1988
1989 if (port_type == PCIE_PCIECAP_DEV_TYPE_ROOT)
1990 return (DDI_SUCCESS);
1991 }
1992
1993 /* No root ports were found */
1994
1995 return (DDI_FAILURE);
1996 }
1997
1998 /*
1999 * Function that determines if a device a PCIe device.
2000 *
2001 * dip - dip of device.
2002 *
2003 * returns - DDI_SUCCESS if device is a PCIe device, otherwise DDI_FAILURE.
2004 */
2005 int
2006 pcie_dev(dev_info_t *dip)
2007 {
2008 /* get parent device's device_type property */
2009 char *device_type;
2010 int rc = DDI_FAILURE;
2011 dev_info_t *pdip = ddi_get_parent(dip);
2012
2013 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
2014 DDI_PROP_DONTPASS, "device_type", &device_type)
2015 != DDI_PROP_SUCCESS) {
2016 return (DDI_FAILURE);
2017 }
2018
2019 if (strcmp(device_type, "pciex") == 0)
2020 rc = DDI_SUCCESS;
2021 else
2022 rc = DDI_FAILURE;
2023
2024 ddi_prop_free(device_type);
2025 return (rc);
2026 }
2027
2028 /*
2029 * Function to map in a device's memory space.
2030 */
2031 static int
2032 pcie_map_phys(dev_info_t *dip, pci_regspec_t *phys_spec,
2033 caddr_t *addrp, ddi_acc_handle_t *handlep)
2034 {
2035 ddi_map_req_t mr;
2036 ddi_acc_hdl_t *hp;
2037 int result;
2038 ddi_device_acc_attr_t attr;
2039
2040 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2041 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2042 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2043 attr.devacc_attr_access = DDI_CAUTIOUS_ACC;
2044
2045 *handlep = impl_acc_hdl_alloc(KM_SLEEP, NULL);
2046 hp = impl_acc_hdl_get(*handlep);
2047 hp->ah_vers = VERS_ACCHDL;
2048 hp->ah_dip = dip;
2049 hp->ah_rnumber = 0;
2050 hp->ah_offset = 0;
2051 hp->ah_len = 0;
2052 hp->ah_acc = attr;
2053
2054 mr.map_op = DDI_MO_MAP_LOCKED;
2055 mr.map_type = DDI_MT_REGSPEC;
2056 mr.map_obj.rp = (struct regspec *)phys_spec;
2057 mr.map_prot = PROT_READ | PROT_WRITE;
2058 mr.map_flags = DDI_MF_KERNEL_MAPPING;
2059 mr.map_handlep = hp;
2060 mr.map_vers = DDI_MAP_VERSION;
2061
2062 result = ddi_map(dip, &mr, 0, 0, addrp);
2063
2064 if (result != DDI_SUCCESS) {
2065 impl_acc_hdl_free(*handlep);
2066 *handlep = (ddi_acc_handle_t)NULL;
2067 } else {
2068 hp->ah_addr = *addrp;
2069 }
2070
2071 return (result);
2072 }
2073
2074 /*
2075 * Map out memory that was mapped in with pcie_map_phys();
2076 */
2077 static void
2078 pcie_unmap_phys(ddi_acc_handle_t *handlep, pci_regspec_t *ph)
2079 {
2080 ddi_map_req_t mr;
2081 ddi_acc_hdl_t *hp;
2082
2083 hp = impl_acc_hdl_get(*handlep);
2084 ASSERT(hp);
2085
2086 mr.map_op = DDI_MO_UNMAP;
2087 mr.map_type = DDI_MT_REGSPEC;
2088 mr.map_obj.rp = (struct regspec *)ph;
2089 mr.map_prot = PROT_READ | PROT_WRITE;
2090 mr.map_flags = DDI_MF_KERNEL_MAPPING;
2091 mr.map_handlep = hp;
2092 mr.map_vers = DDI_MAP_VERSION;
2093
2094 (void) ddi_map(hp->ah_dip, &mr, hp->ah_offset,
2095 hp->ah_len, &hp->ah_addr);
2096
2097 impl_acc_hdl_free(*handlep);
2098 *handlep = (ddi_acc_handle_t)NULL;
2099 }
2100
2101 void
2102 pcie_set_rber_fatal(dev_info_t *dip, boolean_t val)
2103 {
2104 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2105 bus_p->bus_pfd->pe_rber_fatal = val;
2106 }
2107
2108 /*
2109 * Return parent Root Port's pe_rber_fatal value.
2110 */
2111 boolean_t
2112 pcie_get_rber_fatal(dev_info_t *dip)
2113 {
2114 pcie_bus_t *bus_p = PCIE_DIP2UPBUS(dip);
2115 pcie_bus_t *rp_bus_p = PCIE_DIP2UPBUS(bus_p->bus_rp_dip);
2116 return (rp_bus_p->bus_pfd->pe_rber_fatal);
2117 }
2118
2119 int
2120 pcie_ari_supported(dev_info_t *dip)
2121 {
2122 uint32_t devcap2;
2123 uint16_t pciecap;
2124 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2125 uint8_t dev_type;
2126
2127 PCIE_DBG("pcie_ari_supported: dip=%p\n", dip);
2128
2129 if (bus_p == NULL)
2130 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2131
2132 dev_type = bus_p->bus_dev_type;
2133
2134 if ((dev_type != PCIE_PCIECAP_DEV_TYPE_DOWN) &&
2135 (dev_type != PCIE_PCIECAP_DEV_TYPE_ROOT))
2136 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2137
2138 if (pcie_disable_ari) {
2139 PCIE_DBG("pcie_ari_supported: dip=%p: ARI Disabled\n", dip);
2140 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2141 }
2142
2143 pciecap = PCIE_CAP_GET(16, bus_p, PCIE_PCIECAP);
2144
2145 if ((pciecap & PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) {
2146 PCIE_DBG("pcie_ari_supported: dip=%p: Not 2.0\n", dip);
2147 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2148 }
2149
2150 devcap2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCAP2);
2151
2152 PCIE_DBG("pcie_ari_supported: dip=%p: DevCap2=0x%x\n",
2153 dip, devcap2);
2154
2155 if (devcap2 & PCIE_DEVCAP2_ARI_FORWARD) {
2156 PCIE_DBG("pcie_ari_supported: "
2157 "dip=%p: ARI Forwarding is supported\n", dip);
2158 return (PCIE_ARI_FORW_SUPPORTED);
2159 }
2160 return (PCIE_ARI_FORW_NOT_SUPPORTED);
2161 }
2162
2163 int
2164 pcie_ari_enable(dev_info_t *dip)
2165 {
2166 uint16_t devctl2;
2167 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2168
2169 PCIE_DBG("pcie_ari_enable: dip=%p\n", dip);
2170
2171 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2172 return (DDI_FAILURE);
2173
2174 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2175 devctl2 |= PCIE_DEVCTL2_ARI_FORWARD_EN;
2176 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2177
2178 PCIE_DBG("pcie_ari_enable: dip=%p: writing 0x%x to DevCtl2\n",
2179 dip, devctl2);
2180
2181 return (DDI_SUCCESS);
2182 }
2183
2184 int
2185 pcie_ari_disable(dev_info_t *dip)
2186 {
2187 uint16_t devctl2;
2188 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2189
2190 PCIE_DBG("pcie_ari_disable: dip=%p\n", dip);
2191
2192 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2193 return (DDI_FAILURE);
2194
2195 devctl2 = PCIE_CAP_GET(16, bus_p, PCIE_DEVCTL2);
2196 devctl2 &= ~PCIE_DEVCTL2_ARI_FORWARD_EN;
2197 PCIE_CAP_PUT(16, bus_p, PCIE_DEVCTL2, devctl2);
2198
2199 PCIE_DBG("pcie_ari_disable: dip=%p: writing 0x%x to DevCtl2\n",
2200 dip, devctl2);
2201
2202 return (DDI_SUCCESS);
2203 }
2204
2205 int
2206 pcie_ari_is_enabled(dev_info_t *dip)
2207 {
2208 uint16_t devctl2;
2209 pcie_bus_t *bus_p = PCIE_DIP2BUS(dip);
2210
2211 PCIE_DBG("pcie_ari_is_enabled: dip=%p\n", dip);
2212
2213 if (pcie_ari_supported(dip) == PCIE_ARI_FORW_NOT_SUPPORTED)
2214 return (PCIE_ARI_FORW_DISABLED);
2215
2216 devctl2 = PCIE_CAP_GET(32, bus_p, PCIE_DEVCTL2);
2217
2218 PCIE_DBG("pcie_ari_is_enabled: dip=%p: DevCtl2=0x%x\n",
2219 dip, devctl2);
2220
2221 if (devctl2 & PCIE_DEVCTL2_ARI_FORWARD_EN) {
2222 PCIE_DBG("pcie_ari_is_enabled: "
2223 "dip=%p: ARI Forwarding is enabled\n", dip);
2224 return (PCIE_ARI_FORW_ENABLED);
2225 }
2226
2227 return (PCIE_ARI_FORW_DISABLED);
2228 }
2229
2230 int
2231 pcie_ari_device(dev_info_t *dip)
2232 {
2233 ddi_acc_handle_t handle;
2234 uint16_t cap_ptr;
2235
2236 PCIE_DBG("pcie_ari_device: dip=%p\n", dip);
2237
2238 /*
2239 * XXX - This function may be called before the bus_p structure
2240 * has been populated. This code can be changed to remove
2241 * pci_config_setup()/pci_config_teardown() when the RFE
2242 * to populate the bus_p structures early in boot is putback.
2243 */
2244
2245 /* First make sure it is a PCIe device */
2246
2247 if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2248 return (PCIE_NOT_ARI_DEVICE);
2249
2250 if ((PCI_CAP_LOCATE(handle, PCI_CAP_ID_PCI_E, &cap_ptr))
2251 != DDI_SUCCESS) {
2252 pci_config_teardown(&handle);
2253 return (PCIE_NOT_ARI_DEVICE);
2254 }
2255
2256 /* Locate the ARI Capability */
2257
2258 if ((PCI_CAP_LOCATE(handle, PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI),
2259 &cap_ptr)) == DDI_FAILURE) {
2260 pci_config_teardown(&handle);
2261 return (PCIE_NOT_ARI_DEVICE);
2262 }
2263
2264 /* ARI Capability was found so it must be a ARI device */
2265 PCIE_DBG("pcie_ari_device: ARI Device dip=%p\n", dip);
2266
2267 pci_config_teardown(&handle);
2268 return (PCIE_ARI_DEVICE);
2269 }
2270
2271 int
2272 pcie_ari_get_next_function(dev_info_t *dip, int *func)
2273 {
2274 uint32_t val;
2275 uint16_t cap_ptr, next_function;
2276 ddi_acc_handle_t handle;
2277
2278 /*
2279 * XXX - This function may be called before the bus_p structure
2280 * has been populated. This code can be changed to remove
2281 * pci_config_setup()/pci_config_teardown() when the RFE
2282 * to populate the bus_p structures early in boot is putback.
2283 */
2284
2285 if (pci_config_setup(dip, &handle) != DDI_SUCCESS)
2286 return (DDI_FAILURE);
2287
2288 if ((PCI_CAP_LOCATE(handle,
2289 PCI_CAP_XCFG_SPC(PCIE_EXT_CAP_ID_ARI), &cap_ptr)) == DDI_FAILURE) {
2290 pci_config_teardown(&handle);
2291 return (DDI_FAILURE);
2292 }
2293
2294 val = PCI_CAP_GET32(handle, NULL, cap_ptr, PCIE_ARI_CAP);
2295
2296 next_function = (val >> PCIE_ARI_CAP_NEXT_FUNC_SHIFT) &
2297 PCIE_ARI_CAP_NEXT_FUNC_MASK;
2298
2299 pci_config_teardown(&handle);
2300
2301 *func = next_function;
2302
2303 return (DDI_SUCCESS);
2304 }
2305
2306 dev_info_t *
2307 pcie_func_to_dip(dev_info_t *dip, pcie_req_id_t function)
2308 {
2309 pcie_req_id_t child_bdf;
2310 dev_info_t *cdip;
2311
2312 for (cdip = ddi_get_child(dip); cdip;
2313 cdip = ddi_get_next_sibling(cdip)) {
2314
2315 if (pcie_get_bdf_from_dip(cdip, &child_bdf) == DDI_FAILURE)
2316 return (NULL);
2317
2318 if ((child_bdf & PCIE_REQ_ID_ARI_FUNC_MASK) == function)
2319 return (cdip);
2320 }
2321 return (NULL);
2322 }
2323
2324 #ifdef DEBUG
2325
2326 static void
2327 pcie_print_bus(pcie_bus_t *bus_p)
2328 {
2329 pcie_dbg("\tbus_dip = 0x%p\n", bus_p->bus_dip);
2330 pcie_dbg("\tbus_fm_flags = 0x%x\n", bus_p->bus_fm_flags);
2331
2332 pcie_dbg("\tbus_bdf = 0x%x\n", bus_p->bus_bdf);
2333 pcie_dbg("\tbus_dev_ven_id = 0x%x\n", bus_p->bus_dev_ven_id);
2334 pcie_dbg("\tbus_rev_id = 0x%x\n", bus_p->bus_rev_id);
2335 pcie_dbg("\tbus_hdr_type = 0x%x\n", bus_p->bus_hdr_type);
2336 pcie_dbg("\tbus_dev_type = 0x%x\n", bus_p->bus_dev_type);
2337 pcie_dbg("\tbus_bdg_secbus = 0x%x\n", bus_p->bus_bdg_secbus);
2338 pcie_dbg("\tbus_pcie_off = 0x%x\n", bus_p->bus_pcie_off);
2339 pcie_dbg("\tbus_aer_off = 0x%x\n", bus_p->bus_aer_off);
2340 pcie_dbg("\tbus_pcix_off = 0x%x\n", bus_p->bus_pcix_off);
2341 pcie_dbg("\tbus_ecc_ver = 0x%x\n", bus_p->bus_ecc_ver);
2342 }
2343
2344 /*
2345 * For debugging purposes set pcie_dbg_print != 0 to see printf messages
2346 * during interrupt.
2347 *
2348 * When a proper solution is in place this code will disappear.
2349 * Potential solutions are:
2350 * o circular buffers
2351 * o taskq to print at lower pil
2352 */
2353 int pcie_dbg_print = 0;
2354 void
2355 pcie_dbg(char *fmt, ...)
2356 {
2357 va_list ap;
2358
2359 if (!pcie_debug_flags) {
2360 return;
2361 }
2362 va_start(ap, fmt);
2363 if (servicing_interrupt()) {
2364 if (pcie_dbg_print) {
2365 prom_vprintf(fmt, ap);
2366 }
2367 } else {
2368 prom_vprintf(fmt, ap);
2369 }
2370 va_end(ap);
2371 }
2372 #endif /* DEBUG */
2373
2374 #if defined(__i386) || defined(__amd64)
2375 static void
2376 pcie_check_io_mem_range(ddi_acc_handle_t cfg_hdl, boolean_t *empty_io_range,
2377 boolean_t *empty_mem_range)
2378 {
2379 uint8_t class, subclass;
2380 uint_t val;
2381
2382 class = pci_config_get8(cfg_hdl, PCI_CONF_BASCLASS);
2383 subclass = pci_config_get8(cfg_hdl, PCI_CONF_SUBCLASS);
2384
2385 if ((class == PCI_CLASS_BRIDGE) && (subclass == PCI_BRIDGE_PCI)) {
2386 val = (((uint_t)pci_config_get8(cfg_hdl, PCI_BCNF_IO_BASE_LOW) &
2387 PCI_BCNF_IO_MASK) << 8);
2388 /*
2389 * Assuming that a zero based io_range[0] implies an
2390 * invalid I/O range. Likewise for mem_range[0].
2391 */
2392 if (val == 0)
2393 *empty_io_range = B_TRUE;
2394 val = (((uint_t)pci_config_get16(cfg_hdl, PCI_BCNF_MEM_BASE) &
2395 PCI_BCNF_MEM_MASK) << 16);
2396 if (val == 0)
2397 *empty_mem_range = B_TRUE;
2398 }
2399 }
2400
2401 #endif /* defined(__i386) || defined(__amd64) */