1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 22 /* 23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 24 * 25 * Copyright (c) 2012, Nexenta Systems, Inc. All rights reserved. 26 */ 27 28 /* 29 * File that has code which is common between pci(7d) and npe(7d) 30 * It shares the following: 31 * - interrupt code 32 * - pci_tools ioctl code 33 * - name_child code 34 * - set_parent_private_data code 35 */ 36 37 #include <sys/conf.h> 38 #include <sys/pci.h> 39 #include <sys/sunndi.h> 40 #include <sys/mach_intr.h> 41 #include <sys/pci_intr_lib.h> 42 #include <sys/psm.h> 43 #include <sys/policy.h> 44 #include <sys/sysmacros.h> 45 #include <sys/clock.h> 46 #include <sys/apic.h> 47 #include <sys/pci_tools.h> 48 #include <io/pci/pci_var.h> 49 #include <io/pci/pci_tools_ext.h> 50 #include <io/pci/pci_common.h> 51 #include <sys/pci_cfgspace.h> 52 #include <sys/pci_impl.h> 53 #include <sys/pci_cap.h> 54 55 /* 56 * Function prototypes 57 */ 58 static int pci_get_priority(dev_info_t *, ddi_intr_handle_impl_t *, int *); 59 static int pci_enable_intr(dev_info_t *, dev_info_t *, 60 ddi_intr_handle_impl_t *, uint32_t); 61 static void pci_disable_intr(dev_info_t *, dev_info_t *, 62 ddi_intr_handle_impl_t *, uint32_t); 63 static int pci_alloc_intr_fixed(dev_info_t *, dev_info_t *, 64 ddi_intr_handle_impl_t *, void *); 65 static int pci_free_intr_fixed(dev_info_t *, dev_info_t *, 66 ddi_intr_handle_impl_t *); 67 68 /* Extern declarations for PSM module */ 69 extern int (*psm_intr_ops)(dev_info_t *, ddi_intr_handle_impl_t *, 70 psm_intr_op_t, int *); 71 extern ddi_irm_pool_t *apix_irm_pool_p; 72 73 /* 74 * pci_name_child: 75 * 76 * Assign the address portion of the node name 77 */ 78 int 79 pci_common_name_child(dev_info_t *child, char *name, int namelen) 80 { 81 int dev, func, length; 82 char **unit_addr; 83 uint_t n; 84 pci_regspec_t *pci_rp; 85 86 if (ndi_dev_is_persistent_node(child) == 0) { 87 /* 88 * For .conf node, use "unit-address" property 89 */ 90 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, child, 91 DDI_PROP_DONTPASS, "unit-address", &unit_addr, &n) != 92 DDI_PROP_SUCCESS) { 93 cmn_err(CE_WARN, "cannot find unit-address in %s.conf", 94 ddi_get_name(child)); 95 return (DDI_FAILURE); 96 } 97 if (n != 1 || *unit_addr == NULL || **unit_addr == 0) { 98 cmn_err(CE_WARN, "unit-address property in %s.conf" 99 " not well-formed", ddi_get_name(child)); 100 ddi_prop_free(unit_addr); 101 return (DDI_FAILURE); 102 } 103 (void) snprintf(name, namelen, "%s", *unit_addr); 104 ddi_prop_free(unit_addr); 105 return (DDI_SUCCESS); 106 } 107 108 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, child, DDI_PROP_DONTPASS, 109 "reg", (int **)&pci_rp, (uint_t *)&length) != DDI_PROP_SUCCESS) { 110 cmn_err(CE_WARN, "cannot find reg property in %s", 111 ddi_get_name(child)); 112 return (DDI_FAILURE); 113 } 114 115 /* copy the device identifications */ 116 dev = PCI_REG_DEV_G(pci_rp->pci_phys_hi); 117 func = PCI_REG_FUNC_G(pci_rp->pci_phys_hi); 118 119 /* 120 * free the memory allocated by ddi_prop_lookup_int_array 121 */ 122 ddi_prop_free(pci_rp); 123 124 if (func != 0) { 125 (void) snprintf(name, namelen, "%x,%x", dev, func); 126 } else { 127 (void) snprintf(name, namelen, "%x", dev); 128 } 129 130 return (DDI_SUCCESS); 131 } 132 133 /* 134 * Interrupt related code: 135 * 136 * The following busop is common to npe and pci drivers 137 * bus_introp 138 */ 139 140 /* 141 * Create the ddi_parent_private_data for a pseudo child. 142 */ 143 void 144 pci_common_set_parent_private_data(dev_info_t *dip) 145 { 146 struct ddi_parent_private_data *pdptr; 147 148 pdptr = (struct ddi_parent_private_data *)kmem_zalloc( 149 (sizeof (struct ddi_parent_private_data) + 150 sizeof (struct intrspec)), KM_SLEEP); 151 pdptr->par_intr = (struct intrspec *)(pdptr + 1); 152 pdptr->par_nintr = 1; 153 ddi_set_parent_data(dip, pdptr); 154 } 155 156 /* 157 * pci_get_priority: 158 * Figure out the priority of the device 159 */ 160 static int 161 pci_get_priority(dev_info_t *dip, ddi_intr_handle_impl_t *hdlp, int *pri) 162 { 163 struct intrspec *ispec; 164 165 DDI_INTR_NEXDBG((CE_CONT, "pci_get_priority: dip = 0x%p, hdlp = %p\n", 166 (void *)dip, (void *)hdlp)); 167 168 if ((ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 169 hdlp->ih_inum)) == NULL) { 170 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 171 *pri = pci_class_to_pil(dip); 172 pci_common_set_parent_private_data(hdlp->ih_dip); 173 ispec = (struct intrspec *)pci_intx_get_ispec(dip, dip, 174 hdlp->ih_inum); 175 return (DDI_SUCCESS); 176 } 177 return (DDI_FAILURE); 178 } 179 180 *pri = ispec->intrspec_pri; 181 return (DDI_SUCCESS); 182 } 183 184 185 186 static int pcieb_intr_pri_counter = 0; 187 188 /* 189 * pci_common_intr_ops: bus_intr_op() function for interrupt support 190 */ 191 int 192 pci_common_intr_ops(dev_info_t *pdip, dev_info_t *rdip, ddi_intr_op_t intr_op, 193 ddi_intr_handle_impl_t *hdlp, void *result) 194 { 195 int priority = 0; 196 int psm_status = 0; 197 int pci_status = 0; 198 int pci_rval, psm_rval = PSM_FAILURE; 199 int types = 0; 200 int pciepci = 0; 201 int i, j, count; 202 int rv; 203 int behavior; 204 int cap_ptr; 205 boolean_t did_pci_config_setup = B_FALSE; 206 boolean_t did_intr_vec_alloc = B_FALSE; 207 boolean_t did_msi_cap_set = B_FALSE; 208 uint16_t msi_cap_base, msix_cap_base, cap_ctrl; 209 char *prop; 210 ddi_intrspec_t isp; 211 struct intrspec *ispec; 212 ddi_intr_handle_impl_t tmp_hdl; 213 ddi_intr_msix_t *msix_p; 214 ihdl_plat_t *ihdl_plat_datap; 215 ddi_intr_handle_t *h_array; 216 ddi_acc_handle_t handle; 217 apic_get_intr_t intrinfo; 218 219 DDI_INTR_NEXDBG((CE_CONT, 220 "pci_common_intr_ops: pdip 0x%p, rdip 0x%p, op %x handle 0x%p\n", 221 (void *)pdip, (void *)rdip, intr_op, (void *)hdlp)); 222 223 /* Process the request */ 224 switch (intr_op) { 225 case DDI_INTROP_SUPPORTED_TYPES: 226 /* 227 * First we determine the interrupt types supported by the 228 * device itself, then we filter them through what the OS 229 * and system supports. We determine system-level 230 * interrupt type support for anything other than fixed intrs 231 * through the psm_intr_ops vector 232 */ 233 rv = DDI_FAILURE; 234 235 /* Fixed supported by default */ 236 types = DDI_INTR_TYPE_FIXED; 237 238 if (psm_intr_ops == NULL) { 239 *(int *)result = types; 240 return (DDI_SUCCESS); 241 } 242 if (pci_config_setup(rdip, &handle) != DDI_SUCCESS) 243 return (DDI_FAILURE); 244 245 /* Sanity test cap control values if found */ 246 247 if (PCI_CAP_LOCATE(handle, PCI_CAP_ID_MSI, &msi_cap_base) == 248 DDI_SUCCESS) { 249 cap_ctrl = PCI_CAP_GET16(handle, 0, msi_cap_base, 250 PCI_MSI_CTRL); 251 if (cap_ctrl == PCI_CAP_EINVAL16) 252 goto SUPPORTED_TYPES_OUT; 253 254 types |= DDI_INTR_TYPE_MSI; 255 } 256 257 if (PCI_CAP_LOCATE(handle, PCI_CAP_ID_MSI_X, &msix_cap_base) == 258 DDI_SUCCESS) { 259 cap_ctrl = PCI_CAP_GET16(handle, 0, msix_cap_base, 260 PCI_MSIX_CTRL); 261 if (cap_ctrl == PCI_CAP_EINVAL16) 262 goto SUPPORTED_TYPES_OUT; 263 264 types |= DDI_INTR_TYPE_MSIX; 265 } 266 267 /* 268 * Filter device-level types through system-level support 269 */ 270 tmp_hdl.ih_type = types; 271 if ((*psm_intr_ops)(rdip, &tmp_hdl, PSM_INTR_OP_CHECK_MSI, 272 &types) != PSM_SUCCESS) 273 goto SUPPORTED_TYPES_OUT; 274 275 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 276 "rdip: 0x%p supported types: 0x%x\n", (void *)rdip, 277 types)); 278 279 /* 280 * Export any MSI/MSI-X cap locations via properties 281 */ 282 if (types & DDI_INTR_TYPE_MSI) { 283 if (ndi_prop_update_int(DDI_DEV_T_NONE, rdip, 284 "pci-msi-capid-pointer", (int)msi_cap_base) != 285 DDI_PROP_SUCCESS) 286 goto SUPPORTED_TYPES_OUT; 287 } 288 if (types & DDI_INTR_TYPE_MSIX) { 289 if (ndi_prop_update_int(DDI_DEV_T_NONE, rdip, 290 "pci-msix-capid-pointer", (int)msix_cap_base) != 291 DDI_PROP_SUCCESS) 292 goto SUPPORTED_TYPES_OUT; 293 } 294 295 rv = DDI_SUCCESS; 296 297 SUPPORTED_TYPES_OUT: 298 *(int *)result = types; 299 pci_config_teardown(&handle); 300 return (rv); 301 302 case DDI_INTROP_NAVAIL: 303 case DDI_INTROP_NINTRS: 304 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 305 if (pci_msi_get_nintrs(hdlp->ih_dip, hdlp->ih_type, 306 result) != DDI_SUCCESS) 307 return (DDI_FAILURE); 308 } else { 309 *(int *)result = i_ddi_get_intx_nintrs(hdlp->ih_dip); 310 if (*(int *)result == 0) 311 return (DDI_FAILURE); 312 } 313 break; 314 case DDI_INTROP_ALLOC: 315 316 /* 317 * FIXED type 318 */ 319 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 320 return (pci_alloc_intr_fixed(pdip, rdip, hdlp, result)); 321 /* 322 * MSI or MSIX (figure out number of vectors available) 323 */ 324 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 325 (psm_intr_ops != NULL) && 326 (pci_get_priority(rdip, hdlp, &priority) == DDI_SUCCESS)) { 327 /* 328 * Following check is a special case for 'pcieb'. 329 * This makes sure vectors with the right priority 330 * are allocated for pcieb during ALLOC time. 331 */ 332 if (strcmp(ddi_driver_name(rdip), "pcieb") == 0) { 333 hdlp->ih_pri = 334 (pcieb_intr_pri_counter % 2) ? 4 : 7; 335 pciepci = 1; 336 } else 337 hdlp->ih_pri = priority; 338 behavior = (int)(uintptr_t)hdlp->ih_scratch2; 339 340 /* 341 * Cache in the config handle and cap_ptr 342 */ 343 if (i_ddi_get_pci_config_handle(rdip) == NULL) { 344 if (pci_config_setup(rdip, &handle) != 345 DDI_SUCCESS) 346 return (DDI_FAILURE); 347 i_ddi_set_pci_config_handle(rdip, handle); 348 did_pci_config_setup = B_TRUE; 349 } 350 351 prop = NULL; 352 cap_ptr = 0; 353 if (hdlp->ih_type == DDI_INTR_TYPE_MSI) 354 prop = "pci-msi-capid-pointer"; 355 else if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) 356 prop = "pci-msix-capid-pointer"; 357 358 /* 359 * Enforce the calling of DDI_INTROP_SUPPORTED_TYPES 360 * for MSI(X) before allocation 361 */ 362 if (prop != NULL) { 363 cap_ptr = ddi_prop_get_int(DDI_DEV_T_ANY, rdip, 364 DDI_PROP_DONTPASS, prop, 0); 365 if (cap_ptr == 0) { 366 DDI_INTR_NEXDBG((CE_CONT, 367 "pci_common_intr_ops: rdip: 0x%p " 368 "attempted MSI(X) alloc without " 369 "cap property\n", (void *)rdip)); 370 return (DDI_FAILURE); 371 } 372 } 373 i_ddi_set_msi_msix_cap_ptr(rdip, cap_ptr); 374 did_msi_cap_set = B_TRUE; 375 376 /* 377 * Allocate interrupt vectors 378 */ 379 (void) (*psm_intr_ops)(rdip, hdlp, 380 PSM_INTR_OP_ALLOC_VECTORS, result); 381 382 if (*(int *)result == 0) { 383 rv = DDI_INTR_NOTFOUND; 384 goto HANDLE_ALLOC_FAILURE; 385 } 386 did_intr_vec_alloc = B_TRUE; 387 388 /* verify behavior flag and take appropriate action */ 389 if ((behavior == DDI_INTR_ALLOC_STRICT) && 390 (*(int *)result < hdlp->ih_scratch1)) { 391 DDI_INTR_NEXDBG((CE_CONT, 392 "pci_common_intr_ops: behavior %x, " 393 "couldn't get enough intrs\n", behavior)); 394 hdlp->ih_scratch1 = *(int *)result; 395 rv = DDI_EAGAIN; 396 goto HANDLE_ALLOC_FAILURE; 397 } 398 399 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 400 if (!(msix_p = i_ddi_get_msix(hdlp->ih_dip))) { 401 msix_p = pci_msix_init(hdlp->ih_dip); 402 if (msix_p) { 403 i_ddi_set_msix(hdlp->ih_dip, 404 msix_p); 405 } else { 406 DDI_INTR_NEXDBG((CE_CONT, 407 "pci_common_intr_ops: MSI-X" 408 "table initilization failed" 409 ", rdip 0x%p inum 0x%x\n", 410 (void *)rdip, 411 hdlp->ih_inum)); 412 413 rv = DDI_FAILURE; 414 goto HANDLE_ALLOC_FAILURE; 415 } 416 } 417 } 418 419 if (pciepci) { 420 /* update priority in ispec */ 421 isp = pci_intx_get_ispec(pdip, rdip, 422 (int)hdlp->ih_inum); 423 ispec = (struct intrspec *)isp; 424 if (ispec) 425 ispec->intrspec_pri = hdlp->ih_pri; 426 ++pcieb_intr_pri_counter; 427 } 428 429 } else 430 return (DDI_FAILURE); 431 break; 432 433 HANDLE_ALLOC_FAILURE: 434 if (did_intr_vec_alloc == B_TRUE) 435 (void) (*psm_intr_ops)(rdip, hdlp, 436 PSM_INTR_OP_FREE_VECTORS, NULL); 437 if (did_msi_cap_set == B_TRUE) 438 i_ddi_set_msi_msix_cap_ptr(rdip, 0); 439 if (did_pci_config_setup == B_TRUE) { 440 (void) pci_config_teardown(&handle); 441 i_ddi_set_pci_config_handle(rdip, NULL); 442 } 443 return (rv); 444 445 case DDI_INTROP_FREE: 446 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type) && 447 (psm_intr_ops != NULL)) { 448 if (i_ddi_intr_get_current_nintrs(hdlp->ih_dip) - 1 == 449 0) { 450 if (handle = i_ddi_get_pci_config_handle( 451 rdip)) { 452 (void) pci_config_teardown(&handle); 453 i_ddi_set_pci_config_handle(rdip, NULL); 454 } 455 if (cap_ptr = i_ddi_get_msi_msix_cap_ptr(rdip)) 456 i_ddi_set_msi_msix_cap_ptr(rdip, 0); 457 } 458 459 (void) (*psm_intr_ops)(rdip, hdlp, 460 PSM_INTR_OP_FREE_VECTORS, NULL); 461 462 if (hdlp->ih_type == DDI_INTR_TYPE_MSIX) { 463 msix_p = i_ddi_get_msix(hdlp->ih_dip); 464 if (msix_p && 465 (i_ddi_intr_get_current_nintrs( 466 hdlp->ih_dip) - 1) == 0) { 467 pci_msix_fini(msix_p); 468 i_ddi_set_msix(hdlp->ih_dip, NULL); 469 } 470 } 471 } else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 472 return (pci_free_intr_fixed(pdip, rdip, hdlp)); 473 } else 474 return (DDI_FAILURE); 475 break; 476 case DDI_INTROP_GETPRI: 477 /* Get the priority */ 478 if (pci_get_priority(rdip, hdlp, &priority) != DDI_SUCCESS) 479 return (DDI_FAILURE); 480 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 481 "priority = 0x%x\n", priority)); 482 *(int *)result = priority; 483 break; 484 case DDI_INTROP_SETPRI: 485 /* Validate the interrupt priority passed */ 486 if (*(int *)result > LOCK_LEVEL) 487 return (DDI_FAILURE); 488 489 /* Ensure that PSM is all initialized */ 490 if (psm_intr_ops == NULL) 491 return (DDI_FAILURE); 492 493 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 494 ispec = (struct intrspec *)isp; 495 if (ispec == NULL) 496 return (DDI_FAILURE); 497 498 /* For fixed interrupts */ 499 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) { 500 /* if interrupt is shared, return failure */ 501 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 502 psm_rval = (*psm_intr_ops)(rdip, hdlp, 503 PSM_INTR_OP_GET_SHARED, &psm_status); 504 /* 505 * For fixed interrupts, the irq may not have been 506 * allocated when SET_PRI is called, and the above 507 * GET_SHARED op may return PSM_FAILURE. This is not 508 * a real error and is ignored below. 509 */ 510 if ((psm_rval != PSM_FAILURE) && (psm_status == 1)) { 511 DDI_INTR_NEXDBG((CE_CONT, 512 "pci_common_intr_ops: " 513 "dip 0x%p cannot setpri, psm_rval=%d," 514 "psm_status=%d\n", (void *)rdip, psm_rval, 515 psm_status)); 516 return (DDI_FAILURE); 517 } 518 } 519 520 /* Change the priority */ 521 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_PRI, result) == 522 PSM_FAILURE) 523 return (DDI_FAILURE); 524 525 /* update ispec */ 526 ispec->intrspec_pri = *(int *)result; 527 break; 528 case DDI_INTROP_ADDISR: 529 /* update ispec */ 530 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 531 ispec = (struct intrspec *)isp; 532 if (ispec) { 533 ispec->intrspec_func = hdlp->ih_cb_func; 534 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 535 pci_kstat_create(&ihdl_plat_datap->ip_ksp, pdip, hdlp); 536 } 537 break; 538 case DDI_INTROP_REMISR: 539 /* Get the interrupt structure pointer */ 540 isp = pci_intx_get_ispec(pdip, rdip, (int)hdlp->ih_inum); 541 ispec = (struct intrspec *)isp; 542 if (ispec) { 543 ispec->intrspec_func = (uint_t (*)()) 0; 544 ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 545 if (ihdl_plat_datap->ip_ksp != NULL) 546 pci_kstat_delete(ihdl_plat_datap->ip_ksp); 547 } 548 break; 549 case DDI_INTROP_GETCAP: 550 /* 551 * First check the config space and/or 552 * MSI capability register(s) 553 */ 554 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 555 pci_rval = pci_msi_get_cap(rdip, hdlp->ih_type, 556 &pci_status); 557 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 558 pci_rval = pci_intx_get_cap(rdip, &pci_status); 559 560 /* next check with PSM module */ 561 if (psm_intr_ops != NULL) 562 psm_rval = (*psm_intr_ops)(rdip, hdlp, 563 PSM_INTR_OP_GET_CAP, &psm_status); 564 565 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned psm_rval = %x, " 566 "psm_status = %x, pci_rval = %x, pci_status = %x\n", 567 psm_rval, psm_status, pci_rval, pci_status)); 568 569 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 570 *(int *)result = 0; 571 return (DDI_FAILURE); 572 } 573 574 if (psm_rval == PSM_SUCCESS) 575 *(int *)result = psm_status; 576 577 if (pci_rval == DDI_SUCCESS) 578 *(int *)result |= pci_status; 579 580 DDI_INTR_NEXDBG((CE_CONT, "pci: GETCAP returned = %x\n", 581 *(int *)result)); 582 break; 583 case DDI_INTROP_SETCAP: 584 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 585 "SETCAP cap=0x%x\n", *(int *)result)); 586 if (psm_intr_ops == NULL) 587 return (DDI_FAILURE); 588 589 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_SET_CAP, result)) { 590 DDI_INTR_NEXDBG((CE_CONT, "GETCAP: psm_intr_ops" 591 " returned failure\n")); 592 return (DDI_FAILURE); 593 } 594 break; 595 case DDI_INTROP_ENABLE: 596 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE\n")); 597 if (psm_intr_ops == NULL) 598 return (DDI_FAILURE); 599 600 if (pci_enable_intr(pdip, rdip, hdlp, hdlp->ih_inum) != 601 DDI_SUCCESS) 602 return (DDI_FAILURE); 603 604 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: ENABLE " 605 "vector=0x%x\n", hdlp->ih_vector)); 606 break; 607 case DDI_INTROP_DISABLE: 608 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE\n")); 609 if (psm_intr_ops == NULL) 610 return (DDI_FAILURE); 611 612 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 613 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: DISABLE " 614 "vector = %x\n", hdlp->ih_vector)); 615 break; 616 case DDI_INTROP_BLOCKENABLE: 617 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 618 "BLOCKENABLE\n")); 619 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 620 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: not MSI\n")); 621 return (DDI_FAILURE); 622 } 623 624 /* Check if psm_intr_ops is NULL? */ 625 if (psm_intr_ops == NULL) 626 return (DDI_FAILURE); 627 628 count = hdlp->ih_scratch1; 629 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 630 for (i = 0; i < count; i++) { 631 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 632 if (pci_enable_intr(pdip, rdip, hdlp, 633 hdlp->ih_inum) != DDI_SUCCESS) { 634 DDI_INTR_NEXDBG((CE_CONT, "BLOCKENABLE: " 635 "pci_enable_intr failed for %d\n", i)); 636 for (j = 0; j < i; j++) { 637 hdlp = (ddi_intr_handle_impl_t *) 638 h_array[j]; 639 pci_disable_intr(pdip, rdip, hdlp, 640 hdlp->ih_inum); 641 } 642 return (DDI_FAILURE); 643 } 644 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 645 "BLOCKENABLE inum %x done\n", hdlp->ih_inum)); 646 } 647 break; 648 case DDI_INTROP_BLOCKDISABLE: 649 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 650 "BLOCKDISABLE\n")); 651 if (hdlp->ih_type != DDI_INTR_TYPE_MSI) { 652 DDI_INTR_NEXDBG((CE_CONT, "BLOCKDISABLE: not MSI\n")); 653 return (DDI_FAILURE); 654 } 655 656 /* Check if psm_intr_ops is present */ 657 if (psm_intr_ops == NULL) 658 return (DDI_FAILURE); 659 660 count = hdlp->ih_scratch1; 661 h_array = (ddi_intr_handle_t *)hdlp->ih_scratch2; 662 for (i = 0; i < count; i++) { 663 hdlp = (ddi_intr_handle_impl_t *)h_array[i]; 664 pci_disable_intr(pdip, rdip, hdlp, hdlp->ih_inum); 665 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: " 666 "BLOCKDISABLE inum %x done\n", hdlp->ih_inum)); 667 } 668 break; 669 case DDI_INTROP_SETMASK: 670 case DDI_INTROP_CLRMASK: 671 /* 672 * First handle in the config space 673 */ 674 if (intr_op == DDI_INTROP_SETMASK) { 675 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 676 pci_status = pci_msi_set_mask(rdip, 677 hdlp->ih_type, hdlp->ih_inum); 678 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 679 pci_status = pci_intx_set_mask(rdip); 680 } else { 681 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 682 pci_status = pci_msi_clr_mask(rdip, 683 hdlp->ih_type, hdlp->ih_inum); 684 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 685 pci_status = pci_intx_clr_mask(rdip); 686 } 687 688 /* For MSI/X; no need to check with PSM module */ 689 if (hdlp->ih_type != DDI_INTR_TYPE_FIXED) 690 return (pci_status); 691 692 /* For fixed interrupts only: handle config space first */ 693 if (hdlp->ih_type == DDI_INTR_TYPE_FIXED && 694 pci_status == DDI_SUCCESS) 695 break; 696 697 /* For fixed interrupts only: confer with PSM module next */ 698 if (psm_intr_ops != NULL) { 699 /* If interrupt is shared; do nothing */ 700 psm_rval = (*psm_intr_ops)(rdip, hdlp, 701 PSM_INTR_OP_GET_SHARED, &psm_status); 702 703 if (psm_rval == PSM_FAILURE || psm_status == 1) 704 return (pci_status); 705 706 /* Now, PSM module should try to set/clear the mask */ 707 if (intr_op == DDI_INTROP_SETMASK) 708 psm_rval = (*psm_intr_ops)(rdip, hdlp, 709 PSM_INTR_OP_SET_MASK, NULL); 710 else 711 psm_rval = (*psm_intr_ops)(rdip, hdlp, 712 PSM_INTR_OP_CLEAR_MASK, NULL); 713 } 714 return ((psm_rval == PSM_FAILURE) ? DDI_FAILURE : DDI_SUCCESS); 715 case DDI_INTROP_GETPENDING: 716 /* 717 * First check the config space and/or 718 * MSI capability register(s) 719 */ 720 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) 721 pci_rval = pci_msi_get_pending(rdip, hdlp->ih_type, 722 hdlp->ih_inum, &pci_status); 723 else if (hdlp->ih_type == DDI_INTR_TYPE_FIXED) 724 pci_rval = pci_intx_get_pending(rdip, &pci_status); 725 726 /* On failure; next try with PSM module */ 727 if (pci_rval != DDI_SUCCESS && psm_intr_ops != NULL) 728 psm_rval = (*psm_intr_ops)(rdip, hdlp, 729 PSM_INTR_OP_GET_PENDING, &psm_status); 730 731 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned " 732 "psm_rval = %x, psm_status = %x, pci_rval = %x, " 733 "pci_status = %x\n", psm_rval, psm_status, pci_rval, 734 pci_status)); 735 if (psm_rval == PSM_FAILURE && pci_rval == DDI_FAILURE) { 736 *(int *)result = 0; 737 return (DDI_FAILURE); 738 } 739 740 if (psm_rval != PSM_FAILURE) 741 *(int *)result = psm_status; 742 else if (pci_rval != DDI_FAILURE) 743 *(int *)result = pci_status; 744 DDI_INTR_NEXDBG((CE_CONT, "pci: GETPENDING returned = %x\n", 745 *(int *)result)); 746 break; 747 case DDI_INTROP_GETTARGET: 748 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: GETTARGET\n")); 749 750 bcopy(hdlp, &tmp_hdl, sizeof (ddi_intr_handle_impl_t)); 751 tmp_hdl.ih_private = (void *)&intrinfo; 752 intrinfo.avgi_req_flags = PSMGI_INTRBY_DEFAULT; 753 intrinfo.avgi_req_flags |= PSMGI_REQ_CPUID; 754 755 if ((*psm_intr_ops)(rdip, &tmp_hdl, PSM_INTR_OP_GET_INTR, 756 NULL) == PSM_FAILURE) 757 return (DDI_FAILURE); 758 759 *(int *)result = intrinfo.avgi_cpu_id; 760 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: GETTARGET " 761 "vector = 0x%x, cpu = 0x%x\n", hdlp->ih_vector, 762 *(int *)result)); 763 break; 764 case DDI_INTROP_SETTARGET: 765 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: SETTARGET\n")); 766 767 bcopy(hdlp, &tmp_hdl, sizeof (ddi_intr_handle_impl_t)); 768 tmp_hdl.ih_private = (void *)(uintptr_t)*(int *)result; 769 tmp_hdl.ih_flags = PSMGI_INTRBY_DEFAULT; 770 771 if ((*psm_intr_ops)(rdip, &tmp_hdl, PSM_INTR_OP_SET_CPU, 772 &psm_status) == PSM_FAILURE) 773 return (DDI_FAILURE); 774 775 hdlp->ih_vector = tmp_hdl.ih_vector; 776 DDI_INTR_NEXDBG((CE_CONT, "pci_common_intr_ops: SETTARGET " 777 "vector = 0x%x\n", hdlp->ih_vector)); 778 break; 779 case DDI_INTROP_GETPOOL: 780 /* 781 * For MSI/X interrupts use global IRM pool if available. 782 */ 783 if (apix_irm_pool_p && DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 784 *(ddi_irm_pool_t **)result = apix_irm_pool_p; 785 return (DDI_SUCCESS); 786 } 787 return (DDI_ENOTSUP); 788 default: 789 return (i_ddi_intr_ops(pdip, rdip, intr_op, hdlp, result)); 790 } 791 792 return (DDI_SUCCESS); 793 } 794 795 /* 796 * Allocate a vector for FIXED type interrupt. 797 */ 798 int 799 pci_alloc_intr_fixed(dev_info_t *pdip, dev_info_t *rdip, 800 ddi_intr_handle_impl_t *hdlp, void *result) 801 { 802 struct intrspec *ispec; 803 ddi_intr_handle_impl_t info_hdl; 804 int ret; 805 int free_phdl = 0; 806 int pci_rval; 807 int pci_status = 0; 808 apic_get_type_t type_info; 809 810 if (psm_intr_ops == NULL) 811 return (DDI_FAILURE); 812 813 /* Figure out if this device supports MASKING */ 814 pci_rval = pci_intx_get_cap(rdip, &pci_status); 815 if (pci_rval == DDI_SUCCESS && pci_status) 816 hdlp->ih_cap |= pci_status; 817 818 /* 819 * If the PSM module is "APIX" then pass the request for 820 * allocating the vector now. 821 */ 822 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 823 info_hdl.ih_private = &type_info; 824 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 825 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 826 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, 827 (int)hdlp->ih_inum); 828 if (ispec == NULL) 829 return (DDI_FAILURE); 830 if (hdlp->ih_private == NULL) { /* allocate phdl structure */ 831 free_phdl = 1; 832 i_ddi_alloc_intr_phdl(hdlp); 833 } 834 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 835 ret = (*psm_intr_ops)(rdip, hdlp, 836 PSM_INTR_OP_ALLOC_VECTORS, result); 837 if (free_phdl) { /* free up the phdl structure */ 838 free_phdl = 0; 839 i_ddi_free_intr_phdl(hdlp); 840 hdlp->ih_private = NULL; 841 } 842 } else { 843 /* 844 * No APIX module; fall back to the old scheme where the 845 * interrupt vector is allocated during ddi_enable_intr() call. 846 */ 847 *(int *)result = 1; 848 ret = DDI_SUCCESS; 849 } 850 851 return (ret); 852 } 853 854 /* 855 * Free up the vector for FIXED (legacy) type interrupt. 856 */ 857 static int 858 pci_free_intr_fixed(dev_info_t *pdip, dev_info_t *rdip, 859 ddi_intr_handle_impl_t *hdlp) 860 { 861 struct intrspec *ispec; 862 ddi_intr_handle_impl_t info_hdl; 863 int ret; 864 apic_get_type_t type_info; 865 866 if (psm_intr_ops == NULL) 867 return (DDI_FAILURE); 868 869 /* 870 * If the PSM module is "APIX" then pass the request to it 871 * to free up the vector now. 872 */ 873 bzero(&info_hdl, sizeof (ddi_intr_handle_impl_t)); 874 info_hdl.ih_private = &type_info; 875 if ((*psm_intr_ops)(NULL, &info_hdl, PSM_INTR_OP_APIC_TYPE, NULL) == 876 PSM_SUCCESS && strcmp(type_info.avgi_type, APIC_APIX_NAME) == 0) { 877 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, 878 (int)hdlp->ih_inum); 879 if (ispec == NULL) 880 return (DDI_FAILURE); 881 ((ihdl_plat_t *)hdlp->ih_private)->ip_ispecp = ispec; 882 ret = (*psm_intr_ops)(rdip, hdlp, 883 PSM_INTR_OP_FREE_VECTORS, NULL); 884 } else { 885 /* 886 * No APIX module; fall back to the old scheme where 887 * the interrupt vector was already freed during 888 * ddi_disable_intr() call. 889 */ 890 ret = DDI_SUCCESS; 891 } 892 893 return (ret); 894 } 895 896 int 897 pci_get_intr_from_vecirq(apic_get_intr_t *intrinfo_p, 898 int vecirq, boolean_t is_irq) 899 { 900 ddi_intr_handle_impl_t get_info_ii_hdl; 901 902 if (is_irq) 903 intrinfo_p->avgi_req_flags |= PSMGI_INTRBY_IRQ; 904 905 /* 906 * For this locally-declared and used handle, ih_private will contain a 907 * pointer to apic_get_intr_t, not an ihdl_plat_t as used for 908 * global interrupt handling. 909 */ 910 get_info_ii_hdl.ih_private = intrinfo_p; 911 get_info_ii_hdl.ih_vector = vecirq; 912 913 if ((*psm_intr_ops)(NULL, &get_info_ii_hdl, 914 PSM_INTR_OP_GET_INTR, NULL) == PSM_FAILURE) 915 return (DDI_FAILURE); 916 917 return (DDI_SUCCESS); 918 } 919 920 921 int 922 pci_get_cpu_from_vecirq(int vecirq, boolean_t is_irq) 923 { 924 int rval; 925 apic_get_intr_t intrinfo; 926 927 intrinfo.avgi_req_flags = PSMGI_REQ_CPUID; 928 rval = pci_get_intr_from_vecirq(&intrinfo, vecirq, is_irq); 929 930 if (rval == DDI_SUCCESS) 931 return (intrinfo.avgi_cpu_id); 932 else 933 return (-1); 934 } 935 936 937 static int 938 pci_enable_intr(dev_info_t *pdip, dev_info_t *rdip, 939 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 940 { 941 struct intrspec *ispec; 942 int irq; 943 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 944 945 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: hdlp %p inum %x\n", 946 (void *)hdlp, inum)); 947 948 /* Translate the interrupt if needed */ 949 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 950 if (ispec == NULL) 951 return (DDI_FAILURE); 952 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 953 ispec->intrspec_vec = inum; 954 ispec->intrspec_pri = hdlp->ih_pri; 955 } 956 ihdl_plat_datap->ip_ispecp = ispec; 957 958 /* translate the interrupt if needed */ 959 if ((*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq) == 960 PSM_FAILURE) 961 return (DDI_FAILURE); 962 DDI_INTR_NEXDBG((CE_CONT, "pci_enable_intr: priority=%x irq=%x\n", 963 hdlp->ih_pri, irq)); 964 965 /* Add the interrupt handler */ 966 if (!add_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, 967 DEVI(rdip)->devi_name, irq, hdlp->ih_cb_arg1, 968 hdlp->ih_cb_arg2, &ihdl_plat_datap->ip_ticks, rdip)) 969 return (DDI_FAILURE); 970 971 hdlp->ih_vector = irq; 972 973 return (DDI_SUCCESS); 974 } 975 976 977 static void 978 pci_disable_intr(dev_info_t *pdip, dev_info_t *rdip, 979 ddi_intr_handle_impl_t *hdlp, uint32_t inum) 980 { 981 int irq; 982 struct intrspec *ispec; 983 ihdl_plat_t *ihdl_plat_datap = (ihdl_plat_t *)hdlp->ih_private; 984 985 DDI_INTR_NEXDBG((CE_CONT, "pci_disable_intr: \n")); 986 ispec = (struct intrspec *)pci_intx_get_ispec(pdip, rdip, (int)inum); 987 if (ispec == NULL) 988 return; 989 if (DDI_INTR_IS_MSI_OR_MSIX(hdlp->ih_type)) { 990 ispec->intrspec_vec = inum; 991 ispec->intrspec_pri = hdlp->ih_pri; 992 } 993 ihdl_plat_datap->ip_ispecp = ispec; 994 995 /* translate the interrupt if needed */ 996 (void) (*psm_intr_ops)(rdip, hdlp, PSM_INTR_OP_XLATE_VECTOR, &irq); 997 998 /* Disable the interrupt handler */ 999 rem_avintr((void *)hdlp, hdlp->ih_pri, hdlp->ih_cb_func, irq); 1000 ihdl_plat_datap->ip_ispecp = NULL; 1001 } 1002 1003 /* 1004 * Miscellaneous library function 1005 */ 1006 int 1007 pci_common_get_reg_prop(dev_info_t *dip, pci_regspec_t *pci_rp) 1008 { 1009 int i; 1010 int number; 1011 int assigned_addr_len; 1012 uint_t phys_hi = pci_rp->pci_phys_hi; 1013 pci_regspec_t *assigned_addr; 1014 1015 if (((phys_hi & PCI_REG_ADDR_M) == PCI_ADDR_CONFIG) || 1016 (phys_hi & PCI_RELOCAT_B)) 1017 return (DDI_SUCCESS); 1018 1019 /* 1020 * the "reg" property specifies relocatable, get and interpret the 1021 * "assigned-addresses" property. 1022 */ 1023 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1024 "assigned-addresses", (int **)&assigned_addr, 1025 (uint_t *)&assigned_addr_len) != DDI_PROP_SUCCESS) 1026 return (DDI_FAILURE); 1027 1028 /* 1029 * Scan the "assigned-addresses" for one that matches the specified 1030 * "reg" property entry. 1031 */ 1032 phys_hi &= PCI_CONF_ADDR_MASK; 1033 number = assigned_addr_len / (sizeof (pci_regspec_t) / sizeof (int)); 1034 for (i = 0; i < number; i++) { 1035 if ((assigned_addr[i].pci_phys_hi & PCI_CONF_ADDR_MASK) == 1036 phys_hi) { 1037 pci_rp->pci_phys_mid = assigned_addr[i].pci_phys_mid; 1038 pci_rp->pci_phys_low = assigned_addr[i].pci_phys_low; 1039 ddi_prop_free(assigned_addr); 1040 return (DDI_SUCCESS); 1041 } 1042 } 1043 1044 ddi_prop_free(assigned_addr); 1045 return (DDI_FAILURE); 1046 } 1047 1048 1049 /* 1050 * To handle PCI tool ioctls 1051 */ 1052 1053 /*ARGSUSED*/ 1054 int 1055 pci_common_ioctl(dev_info_t *dip, dev_t dev, int cmd, intptr_t arg, 1056 int mode, cred_t *credp, int *rvalp) 1057 { 1058 minor_t minor = getminor(dev); 1059 int rv = ENOTTY; 1060 1061 switch (PCI_MINOR_NUM_TO_PCI_DEVNUM(minor)) { 1062 case PCI_TOOL_REG_MINOR_NUM: 1063 1064 switch (cmd) { 1065 case PCITOOL_DEVICE_SET_REG: 1066 case PCITOOL_DEVICE_GET_REG: 1067 1068 /* Require full privileges. */ 1069 if (secpolicy_kmdb(credp)) 1070 rv = EPERM; 1071 else 1072 rv = pcitool_dev_reg_ops(dip, (void *)arg, 1073 cmd, mode); 1074 break; 1075 1076 case PCITOOL_NEXUS_SET_REG: 1077 case PCITOOL_NEXUS_GET_REG: 1078 1079 /* Require full privileges. */ 1080 if (secpolicy_kmdb(credp)) 1081 rv = EPERM; 1082 else 1083 rv = pcitool_bus_reg_ops(dip, (void *)arg, 1084 cmd, mode); 1085 break; 1086 } 1087 break; 1088 1089 case PCI_TOOL_INTR_MINOR_NUM: 1090 1091 switch (cmd) { 1092 case PCITOOL_DEVICE_SET_INTR: 1093 1094 /* Require PRIV_SYS_RES_CONFIG, same as psradm */ 1095 if (secpolicy_ponline(credp)) { 1096 rv = EPERM; 1097 break; 1098 } 1099 1100 /*FALLTHRU*/ 1101 /* These require no special privileges. */ 1102 case PCITOOL_DEVICE_GET_INTR: 1103 case PCITOOL_SYSTEM_INTR_INFO: 1104 rv = pcitool_intr_admn(dip, (void *)arg, cmd, mode); 1105 break; 1106 } 1107 break; 1108 1109 default: 1110 break; 1111 } 1112 1113 return (rv); 1114 } 1115 1116 1117 int 1118 pci_common_ctlops_poke(peekpoke_ctlops_t *in_args) 1119 { 1120 size_t size = in_args->size; 1121 uintptr_t dev_addr = in_args->dev_addr; 1122 uintptr_t host_addr = in_args->host_addr; 1123 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1124 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 1125 size_t repcount = in_args->repcount; 1126 uint_t flags = in_args->flags; 1127 int err = DDI_SUCCESS; 1128 1129 /* 1130 * if no handle then this is a poke. We have to return failure here 1131 * as we have no way of knowing whether this is a MEM or IO space access 1132 */ 1133 if (in_args->handle == NULL) 1134 return (DDI_FAILURE); 1135 1136 /* 1137 * rest of this function is actually for cautious puts 1138 */ 1139 for (; repcount; repcount--) { 1140 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 1141 switch (size) { 1142 case sizeof (uint8_t): 1143 pci_config_wr8(hp, (uint8_t *)dev_addr, 1144 *(uint8_t *)host_addr); 1145 break; 1146 case sizeof (uint16_t): 1147 pci_config_wr16(hp, (uint16_t *)dev_addr, 1148 *(uint16_t *)host_addr); 1149 break; 1150 case sizeof (uint32_t): 1151 pci_config_wr32(hp, (uint32_t *)dev_addr, 1152 *(uint32_t *)host_addr); 1153 break; 1154 case sizeof (uint64_t): 1155 pci_config_wr64(hp, (uint64_t *)dev_addr, 1156 *(uint64_t *)host_addr); 1157 break; 1158 default: 1159 err = DDI_FAILURE; 1160 break; 1161 } 1162 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 1163 if (hdlp->ah_acc.devacc_attr_endian_flags == 1164 DDI_STRUCTURE_BE_ACC) { 1165 switch (size) { 1166 case sizeof (uint8_t): 1167 i_ddi_io_put8(hp, 1168 (uint8_t *)dev_addr, 1169 *(uint8_t *)host_addr); 1170 break; 1171 case sizeof (uint16_t): 1172 i_ddi_io_swap_put16(hp, 1173 (uint16_t *)dev_addr, 1174 *(uint16_t *)host_addr); 1175 break; 1176 case sizeof (uint32_t): 1177 i_ddi_io_swap_put32(hp, 1178 (uint32_t *)dev_addr, 1179 *(uint32_t *)host_addr); 1180 break; 1181 /* 1182 * note the 64-bit case is a dummy 1183 * function - so no need to swap 1184 */ 1185 case sizeof (uint64_t): 1186 i_ddi_io_put64(hp, 1187 (uint64_t *)dev_addr, 1188 *(uint64_t *)host_addr); 1189 break; 1190 default: 1191 err = DDI_FAILURE; 1192 break; 1193 } 1194 } else { 1195 switch (size) { 1196 case sizeof (uint8_t): 1197 i_ddi_io_put8(hp, 1198 (uint8_t *)dev_addr, 1199 *(uint8_t *)host_addr); 1200 break; 1201 case sizeof (uint16_t): 1202 i_ddi_io_put16(hp, 1203 (uint16_t *)dev_addr, 1204 *(uint16_t *)host_addr); 1205 break; 1206 case sizeof (uint32_t): 1207 i_ddi_io_put32(hp, 1208 (uint32_t *)dev_addr, 1209 *(uint32_t *)host_addr); 1210 break; 1211 case sizeof (uint64_t): 1212 i_ddi_io_put64(hp, 1213 (uint64_t *)dev_addr, 1214 *(uint64_t *)host_addr); 1215 break; 1216 default: 1217 err = DDI_FAILURE; 1218 break; 1219 } 1220 } 1221 } else { 1222 if (hdlp->ah_acc.devacc_attr_endian_flags == 1223 DDI_STRUCTURE_BE_ACC) { 1224 switch (size) { 1225 case sizeof (uint8_t): 1226 *(uint8_t *)dev_addr = 1227 *(uint8_t *)host_addr; 1228 break; 1229 case sizeof (uint16_t): 1230 *(uint16_t *)dev_addr = 1231 ddi_swap16(*(uint16_t *)host_addr); 1232 break; 1233 case sizeof (uint32_t): 1234 *(uint32_t *)dev_addr = 1235 ddi_swap32(*(uint32_t *)host_addr); 1236 break; 1237 case sizeof (uint64_t): 1238 *(uint64_t *)dev_addr = 1239 ddi_swap64(*(uint64_t *)host_addr); 1240 break; 1241 default: 1242 err = DDI_FAILURE; 1243 break; 1244 } 1245 } else { 1246 switch (size) { 1247 case sizeof (uint8_t): 1248 *(uint8_t *)dev_addr = 1249 *(uint8_t *)host_addr; 1250 break; 1251 case sizeof (uint16_t): 1252 *(uint16_t *)dev_addr = 1253 *(uint16_t *)host_addr; 1254 break; 1255 case sizeof (uint32_t): 1256 *(uint32_t *)dev_addr = 1257 *(uint32_t *)host_addr; 1258 break; 1259 case sizeof (uint64_t): 1260 *(uint64_t *)dev_addr = 1261 *(uint64_t *)host_addr; 1262 break; 1263 default: 1264 err = DDI_FAILURE; 1265 break; 1266 } 1267 } 1268 } 1269 host_addr += size; 1270 if (flags == DDI_DEV_AUTOINCR) 1271 dev_addr += size; 1272 } 1273 return (err); 1274 } 1275 1276 1277 int 1278 pci_fm_acc_setup(ddi_acc_hdl_t *hp, off_t offset, off_t len) 1279 { 1280 ddi_acc_impl_t *ap = (ddi_acc_impl_t *)hp->ah_platform_private; 1281 1282 /* endian-ness check */ 1283 if (hp->ah_acc.devacc_attr_endian_flags == DDI_STRUCTURE_BE_ACC) 1284 return (DDI_FAILURE); 1285 1286 /* 1287 * range check 1288 */ 1289 if ((offset >= PCI_CONF_HDR_SIZE) || 1290 (len > PCI_CONF_HDR_SIZE) || 1291 (offset + len > PCI_CONF_HDR_SIZE)) 1292 return (DDI_FAILURE); 1293 1294 ap->ahi_acc_attr |= DDI_ACCATTR_CONFIG_SPACE; 1295 /* 1296 * always use cautious mechanism for config space gets 1297 */ 1298 ap->ahi_get8 = i_ddi_caut_get8; 1299 ap->ahi_get16 = i_ddi_caut_get16; 1300 ap->ahi_get32 = i_ddi_caut_get32; 1301 ap->ahi_get64 = i_ddi_caut_get64; 1302 ap->ahi_rep_get8 = i_ddi_caut_rep_get8; 1303 ap->ahi_rep_get16 = i_ddi_caut_rep_get16; 1304 ap->ahi_rep_get32 = i_ddi_caut_rep_get32; 1305 ap->ahi_rep_get64 = i_ddi_caut_rep_get64; 1306 if (hp->ah_acc.devacc_attr_access == DDI_CAUTIOUS_ACC) { 1307 ap->ahi_put8 = i_ddi_caut_put8; 1308 ap->ahi_put16 = i_ddi_caut_put16; 1309 ap->ahi_put32 = i_ddi_caut_put32; 1310 ap->ahi_put64 = i_ddi_caut_put64; 1311 ap->ahi_rep_put8 = i_ddi_caut_rep_put8; 1312 ap->ahi_rep_put16 = i_ddi_caut_rep_put16; 1313 ap->ahi_rep_put32 = i_ddi_caut_rep_put32; 1314 ap->ahi_rep_put64 = i_ddi_caut_rep_put64; 1315 } else { 1316 ap->ahi_put8 = pci_config_wr8; 1317 ap->ahi_put16 = pci_config_wr16; 1318 ap->ahi_put32 = pci_config_wr32; 1319 ap->ahi_put64 = pci_config_wr64; 1320 ap->ahi_rep_put8 = pci_config_rep_wr8; 1321 ap->ahi_rep_put16 = pci_config_rep_wr16; 1322 ap->ahi_rep_put32 = pci_config_rep_wr32; 1323 ap->ahi_rep_put64 = pci_config_rep_wr64; 1324 } 1325 1326 /* Initialize to default check/notify functions */ 1327 ap->ahi_fault_check = i_ddi_acc_fault_check; 1328 ap->ahi_fault_notify = i_ddi_acc_fault_notify; 1329 ap->ahi_fault = 0; 1330 impl_acc_err_init(hp); 1331 return (DDI_SUCCESS); 1332 } 1333 1334 1335 int 1336 pci_common_ctlops_peek(peekpoke_ctlops_t *in_args) 1337 { 1338 size_t size = in_args->size; 1339 uintptr_t dev_addr = in_args->dev_addr; 1340 uintptr_t host_addr = in_args->host_addr; 1341 ddi_acc_impl_t *hp = (ddi_acc_impl_t *)in_args->handle; 1342 ddi_acc_hdl_t *hdlp = (ddi_acc_hdl_t *)in_args->handle; 1343 size_t repcount = in_args->repcount; 1344 uint_t flags = in_args->flags; 1345 int err = DDI_SUCCESS; 1346 1347 /* 1348 * if no handle then this is a peek. We have to return failure here 1349 * as we have no way of knowing whether this is a MEM or IO space access 1350 */ 1351 if (in_args->handle == NULL) 1352 return (DDI_FAILURE); 1353 1354 for (; repcount; repcount--) { 1355 if (hp->ahi_acc_attr == DDI_ACCATTR_CONFIG_SPACE) { 1356 switch (size) { 1357 case sizeof (uint8_t): 1358 *(uint8_t *)host_addr = pci_config_rd8(hp, 1359 (uint8_t *)dev_addr); 1360 break; 1361 case sizeof (uint16_t): 1362 *(uint16_t *)host_addr = pci_config_rd16(hp, 1363 (uint16_t *)dev_addr); 1364 break; 1365 case sizeof (uint32_t): 1366 *(uint32_t *)host_addr = pci_config_rd32(hp, 1367 (uint32_t *)dev_addr); 1368 break; 1369 case sizeof (uint64_t): 1370 *(uint64_t *)host_addr = pci_config_rd64(hp, 1371 (uint64_t *)dev_addr); 1372 break; 1373 default: 1374 err = DDI_FAILURE; 1375 break; 1376 } 1377 } else if (hp->ahi_acc_attr & DDI_ACCATTR_IO_SPACE) { 1378 if (hdlp->ah_acc.devacc_attr_endian_flags == 1379 DDI_STRUCTURE_BE_ACC) { 1380 switch (size) { 1381 case sizeof (uint8_t): 1382 *(uint8_t *)host_addr = 1383 i_ddi_io_get8(hp, 1384 (uint8_t *)dev_addr); 1385 break; 1386 case sizeof (uint16_t): 1387 *(uint16_t *)host_addr = 1388 i_ddi_io_swap_get16(hp, 1389 (uint16_t *)dev_addr); 1390 break; 1391 case sizeof (uint32_t): 1392 *(uint32_t *)host_addr = 1393 i_ddi_io_swap_get32(hp, 1394 (uint32_t *)dev_addr); 1395 break; 1396 /* 1397 * note the 64-bit case is a dummy 1398 * function - so no need to swap 1399 */ 1400 case sizeof (uint64_t): 1401 *(uint64_t *)host_addr = 1402 i_ddi_io_get64(hp, 1403 (uint64_t *)dev_addr); 1404 break; 1405 default: 1406 err = DDI_FAILURE; 1407 break; 1408 } 1409 } else { 1410 switch (size) { 1411 case sizeof (uint8_t): 1412 *(uint8_t *)host_addr = 1413 i_ddi_io_get8(hp, 1414 (uint8_t *)dev_addr); 1415 break; 1416 case sizeof (uint16_t): 1417 *(uint16_t *)host_addr = 1418 i_ddi_io_get16(hp, 1419 (uint16_t *)dev_addr); 1420 break; 1421 case sizeof (uint32_t): 1422 *(uint32_t *)host_addr = 1423 i_ddi_io_get32(hp, 1424 (uint32_t *)dev_addr); 1425 break; 1426 case sizeof (uint64_t): 1427 *(uint64_t *)host_addr = 1428 i_ddi_io_get64(hp, 1429 (uint64_t *)dev_addr); 1430 break; 1431 default: 1432 err = DDI_FAILURE; 1433 break; 1434 } 1435 } 1436 } else { 1437 if (hdlp->ah_acc.devacc_attr_endian_flags == 1438 DDI_STRUCTURE_BE_ACC) { 1439 switch (in_args->size) { 1440 case sizeof (uint8_t): 1441 *(uint8_t *)host_addr = 1442 *(uint8_t *)dev_addr; 1443 break; 1444 case sizeof (uint16_t): 1445 *(uint16_t *)host_addr = 1446 ddi_swap16(*(uint16_t *)dev_addr); 1447 break; 1448 case sizeof (uint32_t): 1449 *(uint32_t *)host_addr = 1450 ddi_swap32(*(uint32_t *)dev_addr); 1451 break; 1452 case sizeof (uint64_t): 1453 *(uint64_t *)host_addr = 1454 ddi_swap64(*(uint64_t *)dev_addr); 1455 break; 1456 default: 1457 err = DDI_FAILURE; 1458 break; 1459 } 1460 } else { 1461 switch (in_args->size) { 1462 case sizeof (uint8_t): 1463 *(uint8_t *)host_addr = 1464 *(uint8_t *)dev_addr; 1465 break; 1466 case sizeof (uint16_t): 1467 *(uint16_t *)host_addr = 1468 *(uint16_t *)dev_addr; 1469 break; 1470 case sizeof (uint32_t): 1471 *(uint32_t *)host_addr = 1472 *(uint32_t *)dev_addr; 1473 break; 1474 case sizeof (uint64_t): 1475 *(uint64_t *)host_addr = 1476 *(uint64_t *)dev_addr; 1477 break; 1478 default: 1479 err = DDI_FAILURE; 1480 break; 1481 } 1482 } 1483 } 1484 host_addr += size; 1485 if (flags == DDI_DEV_AUTOINCR) 1486 dev_addr += size; 1487 } 1488 return (err); 1489 } 1490 1491 /*ARGSUSED*/ 1492 int 1493 pci_common_peekpoke(dev_info_t *dip, dev_info_t *rdip, 1494 ddi_ctl_enum_t ctlop, void *arg, void *result) 1495 { 1496 if (ctlop == DDI_CTLOPS_PEEK) 1497 return (pci_common_ctlops_peek((peekpoke_ctlops_t *)arg)); 1498 else 1499 return (pci_common_ctlops_poke((peekpoke_ctlops_t *)arg)); 1500 } 1501 1502 /* 1503 * These are the get and put functions to be shared with drivers. The 1504 * mutex locking is done inside the functions referenced, rather than 1505 * here, and is thus shared across PCI child drivers and any other 1506 * consumers of PCI config space (such as the ACPI subsystem). 1507 * 1508 * The configuration space addresses come in as pointers. This is fine on 1509 * a 32-bit system, where the VM space and configuration space are the same 1510 * size. It's not such a good idea on a 64-bit system, where memory 1511 * addresses are twice as large as configuration space addresses. At some 1512 * point in the call tree we need to take a stand and say "you are 32-bit 1513 * from this time forth", and this seems like a nice self-contained place. 1514 */ 1515 1516 uint8_t 1517 pci_config_rd8(ddi_acc_impl_t *hdlp, uint8_t *addr) 1518 { 1519 pci_acc_cfblk_t *cfp; 1520 uint8_t rval; 1521 int reg; 1522 1523 ASSERT64(((uintptr_t)addr >> 32) == 0); 1524 1525 reg = (int)(uintptr_t)addr; 1526 1527 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1528 1529 rval = (*pci_getb_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1530 reg); 1531 1532 return (rval); 1533 } 1534 1535 void 1536 pci_config_rep_rd8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1537 uint8_t *dev_addr, size_t repcount, uint_t flags) 1538 { 1539 uint8_t *h, *d; 1540 1541 h = host_addr; 1542 d = dev_addr; 1543 1544 if (flags == DDI_DEV_AUTOINCR) 1545 for (; repcount; repcount--) 1546 *h++ = pci_config_rd8(hdlp, d++); 1547 else 1548 for (; repcount; repcount--) 1549 *h++ = pci_config_rd8(hdlp, d); 1550 } 1551 1552 uint16_t 1553 pci_config_rd16(ddi_acc_impl_t *hdlp, uint16_t *addr) 1554 { 1555 pci_acc_cfblk_t *cfp; 1556 uint16_t rval; 1557 int reg; 1558 1559 ASSERT64(((uintptr_t)addr >> 32) == 0); 1560 1561 reg = (int)(uintptr_t)addr; 1562 1563 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1564 1565 rval = (*pci_getw_func)(cfp->c_busnum, cfp->c_devnum, cfp->c_funcnum, 1566 reg); 1567 1568 return (rval); 1569 } 1570 1571 void 1572 pci_config_rep_rd16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1573 uint16_t *dev_addr, size_t repcount, uint_t flags) 1574 { 1575 uint16_t *h, *d; 1576 1577 h = host_addr; 1578 d = dev_addr; 1579 1580 if (flags == DDI_DEV_AUTOINCR) 1581 for (; repcount; repcount--) 1582 *h++ = pci_config_rd16(hdlp, d++); 1583 else 1584 for (; repcount; repcount--) 1585 *h++ = pci_config_rd16(hdlp, d); 1586 } 1587 1588 uint32_t 1589 pci_config_rd32(ddi_acc_impl_t *hdlp, uint32_t *addr) 1590 { 1591 pci_acc_cfblk_t *cfp; 1592 uint32_t rval; 1593 int reg; 1594 1595 ASSERT64(((uintptr_t)addr >> 32) == 0); 1596 1597 reg = (int)(uintptr_t)addr; 1598 1599 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1600 1601 rval = (*pci_getl_func)(cfp->c_busnum, cfp->c_devnum, 1602 cfp->c_funcnum, reg); 1603 1604 return (rval); 1605 } 1606 1607 void 1608 pci_config_rep_rd32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1609 uint32_t *dev_addr, size_t repcount, uint_t flags) 1610 { 1611 uint32_t *h, *d; 1612 1613 h = host_addr; 1614 d = dev_addr; 1615 1616 if (flags == DDI_DEV_AUTOINCR) 1617 for (; repcount; repcount--) 1618 *h++ = pci_config_rd32(hdlp, d++); 1619 else 1620 for (; repcount; repcount--) 1621 *h++ = pci_config_rd32(hdlp, d); 1622 } 1623 1624 1625 void 1626 pci_config_wr8(ddi_acc_impl_t *hdlp, uint8_t *addr, uint8_t value) 1627 { 1628 pci_acc_cfblk_t *cfp; 1629 int reg; 1630 1631 ASSERT64(((uintptr_t)addr >> 32) == 0); 1632 1633 reg = (int)(uintptr_t)addr; 1634 1635 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1636 1637 (*pci_putb_func)(cfp->c_busnum, cfp->c_devnum, 1638 cfp->c_funcnum, reg, value); 1639 } 1640 1641 void 1642 pci_config_rep_wr8(ddi_acc_impl_t *hdlp, uint8_t *host_addr, 1643 uint8_t *dev_addr, size_t repcount, uint_t flags) 1644 { 1645 uint8_t *h, *d; 1646 1647 h = host_addr; 1648 d = dev_addr; 1649 1650 if (flags == DDI_DEV_AUTOINCR) 1651 for (; repcount; repcount--) 1652 pci_config_wr8(hdlp, d++, *h++); 1653 else 1654 for (; repcount; repcount--) 1655 pci_config_wr8(hdlp, d, *h++); 1656 } 1657 1658 void 1659 pci_config_wr16(ddi_acc_impl_t *hdlp, uint16_t *addr, uint16_t value) 1660 { 1661 pci_acc_cfblk_t *cfp; 1662 int reg; 1663 1664 ASSERT64(((uintptr_t)addr >> 32) == 0); 1665 1666 reg = (int)(uintptr_t)addr; 1667 1668 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1669 1670 (*pci_putw_func)(cfp->c_busnum, cfp->c_devnum, 1671 cfp->c_funcnum, reg, value); 1672 } 1673 1674 void 1675 pci_config_rep_wr16(ddi_acc_impl_t *hdlp, uint16_t *host_addr, 1676 uint16_t *dev_addr, size_t repcount, uint_t flags) 1677 { 1678 uint16_t *h, *d; 1679 1680 h = host_addr; 1681 d = dev_addr; 1682 1683 if (flags == DDI_DEV_AUTOINCR) 1684 for (; repcount; repcount--) 1685 pci_config_wr16(hdlp, d++, *h++); 1686 else 1687 for (; repcount; repcount--) 1688 pci_config_wr16(hdlp, d, *h++); 1689 } 1690 1691 void 1692 pci_config_wr32(ddi_acc_impl_t *hdlp, uint32_t *addr, uint32_t value) 1693 { 1694 pci_acc_cfblk_t *cfp; 1695 int reg; 1696 1697 ASSERT64(((uintptr_t)addr >> 32) == 0); 1698 1699 reg = (int)(uintptr_t)addr; 1700 1701 cfp = (pci_acc_cfblk_t *)&hdlp->ahi_common.ah_bus_private; 1702 1703 (*pci_putl_func)(cfp->c_busnum, cfp->c_devnum, 1704 cfp->c_funcnum, reg, value); 1705 } 1706 1707 void 1708 pci_config_rep_wr32(ddi_acc_impl_t *hdlp, uint32_t *host_addr, 1709 uint32_t *dev_addr, size_t repcount, uint_t flags) 1710 { 1711 uint32_t *h, *d; 1712 1713 h = host_addr; 1714 d = dev_addr; 1715 1716 if (flags == DDI_DEV_AUTOINCR) 1717 for (; repcount; repcount--) 1718 pci_config_wr32(hdlp, d++, *h++); 1719 else 1720 for (; repcount; repcount--) 1721 pci_config_wr32(hdlp, d, *h++); 1722 } 1723 1724 uint64_t 1725 pci_config_rd64(ddi_acc_impl_t *hdlp, uint64_t *addr) 1726 { 1727 uint32_t lw_val; 1728 uint32_t hi_val; 1729 uint32_t *dp; 1730 uint64_t val; 1731 1732 dp = (uint32_t *)addr; 1733 lw_val = pci_config_rd32(hdlp, dp); 1734 dp++; 1735 hi_val = pci_config_rd32(hdlp, dp); 1736 val = ((uint64_t)hi_val << 32) | lw_val; 1737 return (val); 1738 } 1739 1740 void 1741 pci_config_wr64(ddi_acc_impl_t *hdlp, uint64_t *addr, uint64_t value) 1742 { 1743 uint32_t lw_val; 1744 uint32_t hi_val; 1745 uint32_t *dp; 1746 1747 dp = (uint32_t *)addr; 1748 lw_val = (uint32_t)(value & 0xffffffff); 1749 hi_val = (uint32_t)(value >> 32); 1750 pci_config_wr32(hdlp, dp, lw_val); 1751 dp++; 1752 pci_config_wr32(hdlp, dp, hi_val); 1753 } 1754 1755 void 1756 pci_config_rep_rd64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1757 uint64_t *dev_addr, size_t repcount, uint_t flags) 1758 { 1759 if (flags == DDI_DEV_AUTOINCR) { 1760 for (; repcount; repcount--) 1761 *host_addr++ = pci_config_rd64(hdlp, dev_addr++); 1762 } else { 1763 for (; repcount; repcount--) 1764 *host_addr++ = pci_config_rd64(hdlp, dev_addr); 1765 } 1766 } 1767 1768 void 1769 pci_config_rep_wr64(ddi_acc_impl_t *hdlp, uint64_t *host_addr, 1770 uint64_t *dev_addr, size_t repcount, uint_t flags) 1771 { 1772 if (flags == DDI_DEV_AUTOINCR) { 1773 for (; repcount; repcount--) 1774 pci_config_wr64(hdlp, host_addr++, *dev_addr++); 1775 } else { 1776 for (; repcount; repcount--) 1777 pci_config_wr64(hdlp, host_addr++, *dev_addr); 1778 } 1779 }