1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * This file is part of the Chelsio T4 support code. 14 * 15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved. 16 * 17 * This program is distributed in the hope that it will be useful, but WITHOUT 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this 20 * release for licensing terms and conditions. 21 */ 22 23 #include <sys/ddi.h> 24 #include <sys/sunddi.h> 25 #include <sys/sunndi.h> 26 #include <sys/modctl.h> 27 #include <sys/conf.h> 28 #include <sys/devops.h> 29 #include <sys/pci.h> 30 #include <sys/atomic.h> 31 #include <sys/types.h> 32 #include <sys/file.h> 33 #include <sys/errno.h> 34 #include <sys/open.h> 35 #include <sys/cred.h> 36 #include <sys/stat.h> 37 #include <sys/mkdev.h> 38 #include <sys/queue.h> 39 #include <sys/containerof.h> 40 41 #include "version.h" 42 #include "common/common.h" 43 #include "common/t4_msg.h" 44 #include "common/t4_regs.h" 45 #include "firmware/t4_fw.h" 46 #include "firmware/t4_cfg.h" 47 #include "firmware/t5_fw.h" 48 #include "firmware/t5_cfg.h" 49 #include "firmware/t6_fw.h" 50 #include "firmware/t6_cfg.h" 51 #include "t4_l2t.h" 52 53 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp); 54 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp); 55 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, 56 int *rp); 57 struct cb_ops t4_cb_ops = { 58 .cb_open = t4_cb_open, 59 .cb_close = t4_cb_close, 60 .cb_strategy = nodev, 61 .cb_print = nodev, 62 .cb_dump = nodev, 63 .cb_read = nodev, 64 .cb_write = nodev, 65 .cb_ioctl = t4_cb_ioctl, 66 .cb_devmap = nodev, 67 .cb_mmap = nodev, 68 .cb_segmap = nodev, 69 .cb_chpoll = nochpoll, 70 .cb_prop_op = ddi_prop_op, 71 .cb_flag = D_MP, 72 .cb_rev = CB_REV, 73 .cb_aread = nodev, 74 .cb_awrite = nodev 75 }; 76 77 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, 78 void *arg, void *result); 79 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, 80 void *arg, dev_info_t **cdipp); 81 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags, 82 ddi_bus_config_op_t op, void *arg); 83 struct bus_ops t4_bus_ops = { 84 .busops_rev = BUSO_REV, 85 .bus_ctl = t4_bus_ctl, 86 .bus_prop_op = ddi_bus_prop_op, 87 .bus_config = t4_bus_config, 88 .bus_unconfig = t4_bus_unconfig, 89 }; 90 91 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, 92 void **rp); 93 static int t4_devo_probe(dev_info_t *dip); 94 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd); 95 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd); 96 static int t4_devo_quiesce(dev_info_t *dip); 97 struct dev_ops t4_dev_ops = { 98 .devo_rev = DEVO_REV, 99 .devo_getinfo = t4_devo_getinfo, 100 .devo_identify = nulldev, 101 .devo_probe = t4_devo_probe, 102 .devo_attach = t4_devo_attach, 103 .devo_detach = t4_devo_detach, 104 .devo_reset = nodev, 105 .devo_cb_ops = &t4_cb_ops, 106 .devo_bus_ops = &t4_bus_ops, 107 .devo_quiesce = &t4_devo_quiesce, 108 }; 109 110 static struct modldrv modldrv = { 111 .drv_modops = &mod_driverops, 112 .drv_linkinfo = "Chelsio T4 nexus " DRV_VERSION, 113 .drv_dev_ops = &t4_dev_ops 114 }; 115 116 static struct modlinkage modlinkage = { 117 .ml_rev = MODREV_1, 118 .ml_linkage = {&modldrv, NULL}, 119 }; 120 121 void *t4_list; 122 123 struct intrs_and_queues { 124 int intr_type; /* DDI_INTR_TYPE_* */ 125 int nirq; /* Number of vectors */ 126 int intr_fwd; /* Interrupts forwarded */ 127 int ntxq10g; /* # of NIC txq's for each 10G port */ 128 int nrxq10g; /* # of NIC rxq's for each 10G port */ 129 int ntxq1g; /* # of NIC txq's for each 1G port */ 130 int nrxq1g; /* # of NIC rxq's for each 1G port */ 131 #ifdef TCP_OFFLOAD_ENABLE 132 int nofldtxq10g; /* # of TOE txq's for each 10G port */ 133 int nofldrxq10g; /* # of TOE rxq's for each 10G port */ 134 int nofldtxq1g; /* # of TOE txq's for each 1G port */ 135 int nofldrxq1g; /* # of TOE rxq's for each 1G port */ 136 #endif 137 }; 138 139 struct fw_info fi[3]; 140 141 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, 142 mblk_t *m); 143 static int fw_msg_not_handled(struct adapter *, const __be64 *); 144 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h); 145 static unsigned int getpf(struct adapter *sc); 146 static int prep_firmware(struct adapter *sc); 147 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma); 148 static int partition_resources(struct adapter *sc); 149 static int adap__pre_init_tweaks(struct adapter *sc); 150 static int get_params__pre_init(struct adapter *sc); 151 static int get_params__post_init(struct adapter *sc); 152 static int set_params__post_init(struct adapter *); 153 static void setup_memwin(struct adapter *sc); 154 static int validate_mt_off_len(struct adapter *, int, uint32_t, int, 155 uint32_t *); 156 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *); 157 uint32_t position_memwin(struct adapter *, int, uint32_t); 158 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data, 159 uint_t count); 160 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data, 161 uint_t count); 162 static int init_driver_props(struct adapter *sc, struct driver_properties *p); 163 static int remove_extra_props(struct adapter *sc, int n10g, int n1g); 164 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 165 struct intrs_and_queues *iaq); 166 static int add_child_node(struct adapter *sc, int idx); 167 static int remove_child_node(struct adapter *sc, int idx); 168 static kstat_t *setup_kstats(struct adapter *sc); 169 static kstat_t *setup_wc_kstats(struct adapter *); 170 static int update_wc_kstats(kstat_t *, int); 171 #ifdef TCP_OFFLOAD_ENABLE 172 static int toe_capability(struct port_info *pi, int enable); 173 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc); 174 static int deactivate_uld(struct uld_softc *usc); 175 #endif 176 static kmutex_t t4_adapter_list_lock; 177 static SLIST_HEAD(, adapter) t4_adapter_list; 178 #ifdef TCP_OFFLOAD_ENABLE 179 static kmutex_t t4_uld_list_lock; 180 static SLIST_HEAD(, uld_info) t4_uld_list; 181 #endif 182 183 int 184 _init(void) 185 { 186 int rc; 187 188 rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0); 189 if (rc != 0) 190 return (rc); 191 192 rc = mod_install(&modlinkage); 193 if (rc != 0) 194 ddi_soft_state_fini(&t4_list); 195 196 mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL); 197 SLIST_INIT(&t4_adapter_list); 198 199 #ifdef TCP_OFFLOAD_ENABLE 200 mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL); 201 SLIST_INIT(&t4_uld_list); 202 #endif 203 204 return (rc); 205 } 206 207 int 208 _fini(void) 209 { 210 int rc; 211 212 rc = mod_remove(&modlinkage); 213 if (rc != 0) 214 return (rc); 215 216 ddi_soft_state_fini(&t4_list); 217 return (0); 218 } 219 220 int 221 _info(struct modinfo *mi) 222 { 223 return (mod_info(&modlinkage, mi)); 224 } 225 226 /* ARGSUSED */ 227 static int 228 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp) 229 { 230 struct adapter *sc; 231 minor_t minor; 232 233 minor = getminor((dev_t)arg); /* same as instance# in our case */ 234 235 if (cmd == DDI_INFO_DEVT2DEVINFO) { 236 sc = ddi_get_soft_state(t4_list, minor); 237 if (sc == NULL) 238 return (DDI_FAILURE); 239 240 ASSERT(sc->dev == (dev_t)arg); 241 *rp = (void *)sc->dip; 242 } else if (cmd == DDI_INFO_DEVT2INSTANCE) 243 *rp = (void *) (unsigned long) minor; 244 else 245 ASSERT(0); 246 247 return (DDI_SUCCESS); 248 } 249 250 static int 251 t4_devo_probe(dev_info_t *dip) 252 { 253 int rc, id, *reg; 254 uint_t n, pf; 255 256 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 257 "device-id", 0xffff); 258 if (id == 0xffff) 259 return (DDI_PROBE_DONTCARE); 260 261 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 262 "reg", ®, &n); 263 if (rc != DDI_SUCCESS) 264 return (DDI_PROBE_DONTCARE); 265 266 pf = PCI_REG_FUNC_G(reg[0]); 267 ddi_prop_free(reg); 268 269 /* Prevent driver attachment on any PF except 0 on the FPGA */ 270 if (id == 0xa000 && pf != 0) 271 return (DDI_PROBE_FAILURE); 272 273 return (DDI_PROBE_DONTCARE); 274 } 275 276 static int 277 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 278 { 279 struct adapter *sc = NULL; 280 struct sge *s; 281 int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q; 282 int irq = 0, nxg, n100g, n40g, n25g, n10g, n1g; 283 #ifdef TCP_OFFLOAD_ENABLE 284 int ofld_rqidx, ofld_tqidx; 285 #endif 286 char name[16]; 287 struct driver_properties *prp; 288 struct intrs_and_queues iaq; 289 ddi_device_acc_attr_t da = { 290 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 291 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 292 .devacc_attr_dataorder = DDI_UNORDERED_OK_ACC 293 }; 294 ddi_device_acc_attr_t da1 = { 295 .devacc_attr_version = DDI_DEVICE_ATTR_V0, 296 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC, 297 .devacc_attr_dataorder = DDI_MERGING_OK_ACC 298 }; 299 300 if (cmd != DDI_ATTACH) 301 return (DDI_FAILURE); 302 303 /* 304 * Allocate space for soft state. 305 */ 306 instance = ddi_get_instance(dip); 307 rc = ddi_soft_state_zalloc(t4_list, instance); 308 if (rc != DDI_SUCCESS) { 309 cxgb_printf(dip, CE_WARN, 310 "failed to allocate soft state: %d", rc); 311 return (DDI_FAILURE); 312 } 313 314 sc = ddi_get_soft_state(t4_list, instance); 315 sc->dip = dip; 316 sc->dev = makedevice(ddi_driver_major(dip), instance); 317 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL); 318 cv_init(&sc->cv, NULL, CV_DRIVER, NULL); 319 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL); 320 321 mutex_enter(&t4_adapter_list_lock); 322 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link); 323 mutex_exit(&t4_adapter_list_lock); 324 325 sc->pf = getpf(sc); 326 if (sc->pf > 8) { 327 rc = EINVAL; 328 cxgb_printf(dip, CE_WARN, 329 "failed to determine PCI PF# of device"); 330 goto done; 331 } 332 sc->mbox = sc->pf; 333 334 /* Initialize the driver properties */ 335 prp = &sc->props; 336 (void)init_driver_props(sc, prp); 337 338 /* 339 * Enable access to the PCI config space. 340 */ 341 rc = pci_config_setup(dip, &sc->pci_regh); 342 if (rc != DDI_SUCCESS) { 343 cxgb_printf(dip, CE_WARN, 344 "failed to enable PCI config space access: %d", rc); 345 goto done; 346 } 347 348 /* TODO: Set max read request to 4K */ 349 350 /* 351 * Enable MMIO access. 352 */ 353 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh); 354 if (rc != DDI_SUCCESS) { 355 cxgb_printf(dip, CE_WARN, 356 "failed to map device registers: %d", rc); 357 goto done; 358 } 359 360 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map)); 361 362 /* 363 * Initialize cpl handler. 364 */ 365 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) { 366 sc->cpl_handler[i] = cpl_not_handled; 367 } 368 369 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) { 370 sc->fw_msg_handler[i] = fw_msg_not_handled; 371 } 372 373 for (i = 0; i < NCHAN; i++) { 374 (void) snprintf(name, sizeof (name), "%s-%d", 375 "reclaim", i); 376 sc->tq[i] = ddi_taskq_create(sc->dip, 377 name, 1, TASKQ_DEFAULTPRI, 0); 378 379 if (sc->tq[i] == NULL) { 380 cxgb_printf(dip, CE_WARN, 381 "failed to create task queues"); 382 rc = DDI_FAILURE; 383 goto done; 384 } 385 } 386 387 /* 388 * Prepare the adapter for operation. 389 */ 390 rc = -t4_prep_adapter(sc, false); 391 if (rc != 0) { 392 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc); 393 goto done; 394 } 395 396 /* 397 * Enable BAR1 access. 398 */ 399 sc->doorbells |= DOORBELL_KDB; 400 rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h); 401 if (rc != DDI_SUCCESS) { 402 cxgb_printf(dip, CE_WARN, 403 "failed to map BAR1 device registers: %d", rc); 404 goto done; 405 } else { 406 if (is_t5(sc->params.chip)) { 407 sc->doorbells |= DOORBELL_UDB; 408 if (prp->wc) { 409 /* 410 * Enable write combining on BAR2. This is the 411 * userspace doorbell BAR and is split into 128B 412 * (UDBS_SEG_SIZE) doorbell regions, each associated 413 * with an egress queue. The first 64B has the doorbell 414 * and the second 64B can be used to submit a tx work 415 * request with an implicit doorbell. 416 */ 417 sc->doorbells &= ~DOORBELL_UDB; 418 sc->doorbells |= (DOORBELL_WCWR | 419 DOORBELL_UDBWC); 420 t4_write_reg(sc, A_SGE_STAT_CFG, 421 V_STATSOURCE_T5(7) | V_STATMODE(0)); 422 } 423 } 424 } 425 426 /* 427 * Do this really early. Note that minor number = instance. 428 */ 429 (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance); 430 rc = ddi_create_minor_node(dip, name, S_IFCHR, instance, 431 DDI_NT_NEXUS, 0); 432 if (rc != DDI_SUCCESS) { 433 cxgb_printf(dip, CE_WARN, 434 "failed to create device node: %d", rc); 435 rc = DDI_SUCCESS; /* carry on */ 436 } 437 438 /* Do this early. Memory window is required for loading config file. */ 439 setup_memwin(sc); 440 441 /* Prepare the firmware for operation */ 442 rc = prep_firmware(sc); 443 if (rc != 0) 444 goto done; /* error message displayed already */ 445 446 rc = adap__pre_init_tweaks(sc); 447 if (rc != 0) 448 goto done; 449 450 rc = get_params__pre_init(sc); 451 if (rc != 0) 452 goto done; /* error message displayed already */ 453 454 t4_sge_init(sc); 455 456 if (sc->flags & MASTER_PF) { 457 /* get basic stuff going */ 458 rc = -t4_fw_initialize(sc, sc->mbox); 459 if (rc != 0) { 460 cxgb_printf(sc->dip, CE_WARN, 461 "early init failed: %d.\n", rc); 462 goto done; 463 } 464 } 465 466 rc = get_params__post_init(sc); 467 if (rc != 0) 468 goto done; /* error message displayed already */ 469 470 rc = set_params__post_init(sc); 471 if (rc != 0) 472 goto done; /* error message displayed already */ 473 474 /* 475 * TODO: This is the place to call t4_set_filter_mode() 476 */ 477 478 /* tweak some settings */ 479 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) | 480 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) | 481 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9)); 482 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12)); 483 484 /* 485 * Work-around for bug 2619 486 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the 487 * VLAN tag extraction is disabled. 488 */ 489 t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN); 490 491 /* Store filter mode */ 492 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1, 493 A_TP_VLAN_PRI_MAP); 494 495 /* 496 * First pass over all the ports - allocate VIs and initialize some 497 * basic parameters like mac address, port type, etc. We also figure 498 * out whether a port is 10G or 1G and use that information when 499 * calculating how many interrupts to attempt to allocate. 500 */ 501 n100g = n40g = n25g = n10g = n1g = 0; 502 for_each_port(sc, i) { 503 struct port_info *pi; 504 505 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP); 506 sc->port[i] = pi; 507 508 /* These must be set before t4_port_init */ 509 pi->adapter = sc; 510 /* LINTED: E_ASSIGN_NARROW_CONV */ 511 pi->port_id = i; 512 } 513 514 /* Allocate the vi and initialize parameters like mac addr */ 515 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0); 516 if (rc) { 517 cxgb_printf(dip, CE_WARN, 518 "unable to initialize port: %d", rc); 519 goto done; 520 } 521 522 for_each_port(sc, i) { 523 struct port_info *pi = sc->port[i]; 524 525 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL); 526 pi->mtu = ETHERMTU; 527 528 if (is_100G_port(pi)) { 529 n100g++; 530 pi->tmr_idx = prp->tmr_idx_10g; 531 pi->pktc_idx = prp->pktc_idx_10g; 532 } else if (is_40G_port(pi)) { 533 n40g++; 534 pi->tmr_idx = prp->tmr_idx_10g; 535 pi->pktc_idx = prp->pktc_idx_10g; 536 } else if (is_25G_port(pi)) { 537 n25g++; 538 pi->tmr_idx = prp->tmr_idx_10g; 539 pi->pktc_idx = prp->pktc_idx_10g; 540 } else if (is_10G_port(pi)) { 541 n10g++; 542 pi->tmr_idx = prp->tmr_idx_10g; 543 pi->pktc_idx = prp->pktc_idx_10g; 544 } else { 545 n1g++; 546 pi->tmr_idx = prp->tmr_idx_1g; 547 pi->pktc_idx = prp->pktc_idx_1g; 548 } 549 550 pi->xact_addr_filt = -1; 551 t4_mc_init(pi); 552 553 setbit(&sc->registered_device_map, i); 554 } 555 556 nxg = n10g + n25g + n40g + n100g; 557 (void) remove_extra_props(sc, nxg, n1g); 558 559 if (sc->registered_device_map == 0) { 560 cxgb_printf(dip, CE_WARN, "no usable ports"); 561 rc = DDI_FAILURE; 562 goto done; 563 } 564 565 rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq); 566 if (rc != 0) 567 goto done; /* error message displayed already */ 568 569 sc->intr_type = iaq.intr_type; 570 sc->intr_count = iaq.nirq; 571 572 if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) { 573 sc->props.multi_rings = 0; 574 cxgb_printf(dip, CE_WARN, 575 "Multiple rings disabled as interrupt type is not MSI-X"); 576 } 577 578 if (sc->props.multi_rings && iaq.intr_fwd) { 579 sc->props.multi_rings = 0; 580 cxgb_printf(dip, CE_WARN, 581 "Multiple rings disabled as interrupts are forwarded"); 582 } 583 584 if (!sc->props.multi_rings) { 585 iaq.ntxq10g = 1; 586 iaq.ntxq1g = 1; 587 } 588 s = &sc->sge; 589 s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g; 590 s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g; 591 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */ 592 #ifdef TCP_OFFLOAD_ENABLE 593 /* control queues, 1 per port + 1 mgmtq */ 594 s->neq += sc->params.nports + 1; 595 #endif 596 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */ 597 if (iaq.intr_fwd != 0) 598 sc->flags |= INTR_FWD; 599 #ifdef TCP_OFFLOAD_ENABLE 600 if (is_offload(sc) != 0) { 601 602 s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g; 603 s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g; 604 s->neq += s->nofldtxq + s->nofldrxq; 605 s->niq += s->nofldrxq; 606 607 s->ofld_rxq = kmem_zalloc(s->nofldrxq * 608 sizeof (struct sge_ofld_rxq), KM_SLEEP); 609 s->ofld_txq = kmem_zalloc(s->nofldtxq * 610 sizeof (struct sge_wrq), KM_SLEEP); 611 s->ctrlq = kmem_zalloc(sc->params.nports * 612 sizeof (struct sge_wrq), KM_SLEEP); 613 614 } 615 #endif 616 s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP); 617 s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP); 618 s->iqmap = kmem_zalloc(s->niq * sizeof (struct sge_iq *), KM_SLEEP); 619 s->eqmap = kmem_zalloc(s->neq * sizeof (struct sge_eq *), KM_SLEEP); 620 621 sc->intr_handle = kmem_zalloc(sc->intr_count * 622 sizeof (ddi_intr_handle_t), KM_SLEEP); 623 624 /* 625 * Second pass over the ports. This time we know the number of rx and 626 * tx queues that each port should get. 627 */ 628 rqidx = tqidx = 0; 629 #ifdef TCP_OFFLOAD_ENABLE 630 ofld_rqidx = ofld_tqidx = 0; 631 #endif 632 for_each_port(sc, i) { 633 struct port_info *pi = sc->port[i]; 634 635 if (pi == NULL) 636 continue; 637 638 t4_mc_cb_init(pi); 639 /* LINTED: E_ASSIGN_NARROW_CONV */ 640 pi->first_rxq = rqidx; 641 /* LINTED: E_ASSIGN_NARROW_CONV */ 642 pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g 643 : iaq.nrxq1g; 644 /* LINTED: E_ASSIGN_NARROW_CONV */ 645 pi->first_txq = tqidx; 646 /* LINTED: E_ASSIGN_NARROW_CONV */ 647 pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g 648 : iaq.ntxq1g; 649 650 rqidx += pi->nrxq; 651 tqidx += pi->ntxq; 652 653 #ifdef TCP_OFFLOAD_ENABLE 654 if (is_offload(sc) != 0) { 655 /* LINTED: E_ASSIGN_NARROW_CONV */ 656 pi->first_ofld_rxq = ofld_rqidx; 657 pi->nofldrxq = max(1, pi->nrxq / 4); 658 659 /* LINTED: E_ASSIGN_NARROW_CONV */ 660 pi->first_ofld_txq = ofld_tqidx; 661 pi->nofldtxq = max(1, pi->ntxq / 2); 662 663 ofld_rqidx += pi->nofldrxq; 664 ofld_tqidx += pi->nofldtxq; 665 } 666 #endif 667 668 /* 669 * Enable hw checksumming and LSO for all ports by default. 670 * They can be disabled using ndd (hw_csum and hw_lso). 671 */ 672 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO); 673 } 674 675 #ifdef TCP_OFFLOAD_ENABLE 676 sc->l2t = t4_init_l2t(sc); 677 #endif 678 679 /* 680 * Setup Interrupts. 681 */ 682 683 i = 0; 684 rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0, 685 sc->intr_count, &i, DDI_INTR_ALLOC_STRICT); 686 if (rc != DDI_SUCCESS) { 687 cxgb_printf(dip, CE_WARN, 688 "failed to allocate %d interrupt(s) of type %d: %d, %d", 689 sc->intr_count, sc->intr_type, rc, i); 690 goto done; 691 } 692 ASSERT(sc->intr_count == i); /* allocation was STRICT */ 693 (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap); 694 (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri); 695 if (sc->intr_count == 1) { 696 ASSERT(sc->flags & INTR_FWD); 697 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc, 698 &s->fwq); 699 } else { 700 /* Multiple interrupts. The first one is always error intr */ 701 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc, 702 NULL); 703 irq++; 704 705 /* The second one is always the firmware event queue */ 706 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc, 707 &s->fwq); 708 irq++; 709 /* 710 * Note that if INTR_FWD is set then either the NIC rx 711 * queues or (exclusive or) the TOE rx queueus will be taking 712 * direct interrupts. 713 * 714 * There is no need to check for is_offload(sc) as nofldrxq 715 * will be 0 if offload is disabled. 716 */ 717 for_each_port(sc, i) { 718 struct port_info *pi = sc->port[i]; 719 struct sge_rxq *rxq; 720 #ifdef TCP_OFFLOAD_ENABLE 721 struct sge_ofld_rxq *ofld_rxq; 722 723 /* 724 * Skip over the NIC queues if they aren't taking direct 725 * interrupts. 726 */ 727 if ((sc->flags & INTR_FWD) && 728 pi->nofldrxq > pi->nrxq) 729 goto ofld_queues; 730 #endif 731 rxq = &s->rxq[pi->first_rxq]; 732 for (q = 0; q < pi->nrxq; q++, rxq++) { 733 (void) ddi_intr_add_handler( 734 sc->intr_handle[irq], t4_intr, sc, 735 &rxq->iq); 736 irq++; 737 } 738 739 #ifdef TCP_OFFLOAD_ENABLE 740 /* 741 * Skip over the offload queues if they aren't taking 742 * direct interrupts. 743 */ 744 if ((sc->flags & INTR_FWD)) 745 continue; 746 ofld_queues: 747 ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq]; 748 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) { 749 (void) ddi_intr_add_handler( 750 sc->intr_handle[irq], t4_intr, sc, 751 &ofld_rxq->iq); 752 irq++; 753 } 754 #endif 755 } 756 757 } 758 sc->flags |= INTR_ALLOCATED; 759 760 ASSERT(rc == DDI_SUCCESS); 761 ddi_report_dev(dip); 762 763 /* 764 * Hardware/Firmware/etc. Version/Revision IDs. 765 */ 766 t4_dump_version_info(sc); 767 768 if (n100g) { 769 cxgb_printf(dip, CE_NOTE, 770 "%dx100G (%d rxq, %d txq total) %d %s.", 771 n100g, rqidx, tqidx, sc->intr_count, 772 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 773 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 774 "fixed interrupt"); 775 } else if (n40g) { 776 cxgb_printf(dip, CE_NOTE, 777 "%dx40G (%d rxq, %d txq total) %d %s.", 778 n40g, rqidx, tqidx, sc->intr_count, 779 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 780 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 781 "fixed interrupt"); 782 } else if (n25g) { 783 cxgb_printf(dip, CE_NOTE, 784 "%dx25G (%d rxq, %d txq total) %d %s.", 785 n25g, rqidx, tqidx, sc->intr_count, 786 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 787 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 788 "fixed interrupt"); 789 } else if (n10g && n1g) { 790 cxgb_printf(dip, CE_NOTE, 791 "%dx10G %dx1G (%d rxq, %d txq total) %d %s.", 792 n10g, n1g, rqidx, tqidx, sc->intr_count, 793 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 794 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 795 "fixed interrupt"); 796 } else { 797 cxgb_printf(dip, CE_NOTE, 798 "%dx%sG (%d rxq, %d txq per port) %d %s.", 799 n10g ? n10g : n1g, 800 n10g ? "10" : "1", 801 n10g ? iaq.nrxq10g : iaq.nrxq1g, 802 n10g ? iaq.ntxq10g : iaq.ntxq1g, 803 sc->intr_count, 804 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" : 805 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" : 806 "fixed interrupt"); 807 } 808 809 sc->ksp = setup_kstats(sc); 810 sc->ksp_stat = setup_wc_kstats(sc); 811 sc->params.drv_memwin = MEMWIN_NIC; 812 813 done: 814 if (rc != DDI_SUCCESS) { 815 (void) t4_devo_detach(dip, DDI_DETACH); 816 817 /* rc may have errno style errors or DDI errors */ 818 rc = DDI_FAILURE; 819 } 820 821 return (rc); 822 } 823 824 static int 825 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 826 { 827 int instance, i; 828 struct adapter *sc; 829 struct port_info *pi; 830 struct sge *s; 831 832 if (cmd != DDI_DETACH) 833 return (DDI_FAILURE); 834 835 instance = ddi_get_instance(dip); 836 sc = ddi_get_soft_state(t4_list, instance); 837 if (sc == NULL) 838 return (DDI_SUCCESS); 839 840 if (sc->flags & FULL_INIT_DONE) { 841 t4_intr_disable(sc); 842 for_each_port(sc, i) { 843 pi = sc->port[i]; 844 if (pi && pi->flags & PORT_INIT_DONE) 845 (void) port_full_uninit(pi); 846 } 847 (void) adapter_full_uninit(sc); 848 } 849 850 /* Safe to call no matter what */ 851 ddi_prop_remove_all(dip); 852 ddi_remove_minor_node(dip, NULL); 853 854 for (i = 0; i < NCHAN; i++) { 855 if (sc->tq[i]) { 856 ddi_taskq_wait(sc->tq[i]); 857 ddi_taskq_destroy(sc->tq[i]); 858 } 859 } 860 861 if (sc->ksp != NULL) 862 kstat_delete(sc->ksp); 863 if (sc->ksp_stat != NULL) 864 kstat_delete(sc->ksp_stat); 865 866 s = &sc->sge; 867 if (s->rxq != NULL) 868 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq)); 869 #ifdef TCP_OFFLOAD_ENABLE 870 if (s->ofld_txq != NULL) 871 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq)); 872 if (s->ofld_rxq != NULL) 873 kmem_free(s->ofld_rxq, 874 s->nofldrxq * sizeof (struct sge_ofld_rxq)); 875 if (s->ctrlq != NULL) 876 kmem_free(s->ctrlq, 877 sc->params.nports * sizeof (struct sge_wrq)); 878 #endif 879 if (s->txq != NULL) 880 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq)); 881 if (s->iqmap != NULL) 882 kmem_free(s->iqmap, s->niq * sizeof (struct sge_iq *)); 883 if (s->eqmap != NULL) 884 kmem_free(s->eqmap, s->neq * sizeof (struct sge_eq *)); 885 886 if (s->rxbuf_cache != NULL) 887 rxbuf_cache_destroy(s->rxbuf_cache); 888 889 if (sc->flags & INTR_ALLOCATED) { 890 for (i = 0; i < sc->intr_count; i++) { 891 (void) ddi_intr_remove_handler(sc->intr_handle[i]); 892 (void) ddi_intr_free(sc->intr_handle[i]); 893 } 894 sc->flags &= ~INTR_ALLOCATED; 895 } 896 897 if (sc->intr_handle != NULL) { 898 kmem_free(sc->intr_handle, 899 sc->intr_count * sizeof (*sc->intr_handle)); 900 } 901 902 for_each_port(sc, i) { 903 pi = sc->port[i]; 904 if (pi != NULL) { 905 mutex_destroy(&pi->lock); 906 kmem_free(pi, sizeof (*pi)); 907 clrbit(&sc->registered_device_map, i); 908 } 909 } 910 911 if (sc->flags & FW_OK) 912 (void) t4_fw_bye(sc, sc->mbox); 913 914 if (sc->reg1h != NULL) 915 ddi_regs_map_free(&sc->reg1h); 916 917 if (sc->regh != NULL) 918 ddi_regs_map_free(&sc->regh); 919 920 if (sc->pci_regh != NULL) 921 pci_config_teardown(&sc->pci_regh); 922 923 mutex_enter(&t4_adapter_list_lock); 924 SLIST_REMOVE_HEAD(&t4_adapter_list, link); 925 mutex_exit(&t4_adapter_list_lock); 926 927 mutex_destroy(&sc->lock); 928 cv_destroy(&sc->cv); 929 mutex_destroy(&sc->sfl_lock); 930 931 #ifdef DEBUG 932 bzero(sc, sizeof (*sc)); 933 #endif 934 ddi_soft_state_free(t4_list, instance); 935 936 return (DDI_SUCCESS); 937 } 938 939 static int 940 t4_devo_quiesce(dev_info_t *dip) 941 { 942 int instance; 943 struct adapter *sc; 944 945 instance = ddi_get_instance(dip); 946 sc = ddi_get_soft_state(t4_list, instance); 947 if (sc == NULL) 948 return (DDI_SUCCESS); 949 950 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 951 t4_intr_disable(sc); 952 t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST); 953 954 return (DDI_SUCCESS); 955 } 956 957 static int 958 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg, 959 void *result) 960 { 961 char s[4]; 962 struct port_info *pi; 963 dev_info_t *child = (dev_info_t *)arg; 964 965 switch (op) { 966 case DDI_CTLOPS_REPORTDEV: 967 pi = ddi_get_parent_data(rdip); 968 pi->instance = ddi_get_instance(dip); 969 pi->child_inst = ddi_get_instance(rdip); 970 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n", 971 ddi_node_name(rdip), ddi_get_instance(rdip), 972 ddi_get_name_addr(rdip), ddi_driver_name(dip), 973 ddi_get_instance(dip)); 974 return (DDI_SUCCESS); 975 976 case DDI_CTLOPS_INITCHILD: 977 pi = ddi_get_parent_data(child); 978 if (pi == NULL) 979 return (DDI_NOT_WELL_FORMED); 980 (void) snprintf(s, sizeof (s), "%d", pi->port_id); 981 ddi_set_name_addr(child, s); 982 return (DDI_SUCCESS); 983 984 case DDI_CTLOPS_UNINITCHILD: 985 ddi_set_name_addr(child, NULL); 986 return (DDI_SUCCESS); 987 988 case DDI_CTLOPS_ATTACH: 989 case DDI_CTLOPS_DETACH: 990 return (DDI_SUCCESS); 991 992 default: 993 return (ddi_ctlops(dip, rdip, op, arg, result)); 994 } 995 } 996 997 static int 998 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg, 999 dev_info_t **cdipp) 1000 { 1001 int instance, i; 1002 struct adapter *sc; 1003 1004 instance = ddi_get_instance(dip); 1005 sc = ddi_get_soft_state(t4_list, instance); 1006 1007 if (op == BUS_CONFIG_ONE) { 1008 char *c; 1009 1010 /* 1011 * arg is something like "cxgb@0" where 0 is the port_id hanging 1012 * off this nexus. 1013 */ 1014 1015 c = arg; 1016 while (*(c + 1)) 1017 c++; 1018 1019 /* There should be exactly 1 digit after '@' */ 1020 if (*(c - 1) != '@') 1021 return (NDI_FAILURE); 1022 1023 i = *c - '0'; 1024 1025 if (add_child_node(sc, i) != 0) 1026 return (NDI_FAILURE); 1027 1028 flags |= NDI_ONLINE_ATTACH; 1029 1030 } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) { 1031 /* Allocate and bind all child device nodes */ 1032 for_each_port(sc, i) 1033 (void) add_child_node(sc, i); 1034 flags |= NDI_ONLINE_ATTACH; 1035 } 1036 1037 return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0)); 1038 } 1039 1040 static int 1041 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, 1042 void *arg) 1043 { 1044 int instance, i, rc; 1045 struct adapter *sc; 1046 1047 instance = ddi_get_instance(dip); 1048 sc = ddi_get_soft_state(t4_list, instance); 1049 1050 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL || 1051 op == BUS_UNCONFIG_DRIVER) 1052 flags |= NDI_UNCONFIG; 1053 1054 rc = ndi_busop_bus_unconfig(dip, flags, op, arg); 1055 if (rc != 0) 1056 return (rc); 1057 1058 if (op == BUS_UNCONFIG_ONE) { 1059 char *c; 1060 1061 c = arg; 1062 while (*(c + 1)) 1063 c++; 1064 1065 if (*(c - 1) != '@') 1066 return (NDI_SUCCESS); 1067 1068 i = *c - '0'; 1069 1070 rc = remove_child_node(sc, i); 1071 1072 } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) { 1073 1074 for_each_port(sc, i) 1075 (void) remove_child_node(sc, i); 1076 } 1077 1078 return (rc); 1079 } 1080 1081 /* ARGSUSED */ 1082 static int 1083 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp) 1084 { 1085 struct adapter *sc; 1086 1087 if (otyp != OTYP_CHR) 1088 return (EINVAL); 1089 1090 sc = ddi_get_soft_state(t4_list, getminor(*devp)); 1091 if (sc == NULL) 1092 return (ENXIO); 1093 1094 return (atomic_cas_uint(&sc->open, 0, EBUSY)); 1095 } 1096 1097 /* ARGSUSED */ 1098 static int 1099 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp) 1100 { 1101 struct adapter *sc; 1102 1103 sc = ddi_get_soft_state(t4_list, getminor(dev)); 1104 if (sc == NULL) 1105 return (EINVAL); 1106 1107 (void) atomic_swap_uint(&sc->open, 0); 1108 return (0); 1109 } 1110 1111 /* ARGSUSED */ 1112 static int 1113 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp) 1114 { 1115 int instance; 1116 struct adapter *sc; 1117 void *data = (void *)d; 1118 1119 if (crgetuid(credp) != 0) 1120 return (EPERM); 1121 1122 instance = getminor(dev); 1123 sc = ddi_get_soft_state(t4_list, instance); 1124 if (sc == NULL) 1125 return (EINVAL); 1126 1127 return (t4_ioctl(sc, cmd, data, mode)); 1128 } 1129 1130 static unsigned int 1131 getpf(struct adapter *sc) 1132 { 1133 int rc, *data; 1134 uint_t n, pf; 1135 1136 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip, 1137 DDI_PROP_DONTPASS, "reg", &data, &n); 1138 if (rc != DDI_SUCCESS) { 1139 cxgb_printf(sc->dip, CE_WARN, 1140 "failed to lookup \"reg\" property: %d", rc); 1141 return (0xff); 1142 } 1143 1144 pf = PCI_REG_FUNC_G(data[0]); 1145 ddi_prop_free(data); 1146 1147 return (pf); 1148 } 1149 1150 1151 static struct fw_info * 1152 find_fw_info(int chip) 1153 { 1154 u32 i; 1155 1156 fi[0].chip = CHELSIO_T4; 1157 fi[0].fw_hdr.chip = FW_HDR_CHIP_T4; 1158 fi[0].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T4)); 1159 fi[0].fw_hdr.intfver_nic = FW_INTFVER(T4, NIC); 1160 fi[0].fw_hdr.intfver_vnic = FW_INTFVER(T4, VNIC); 1161 fi[0].fw_hdr.intfver_ofld = FW_INTFVER(T4, OFLD); 1162 fi[0].fw_hdr.intfver_ri = FW_INTFVER(T4, RI); 1163 fi[0].fw_hdr.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU); 1164 fi[0].fw_hdr.intfver_iscsi = FW_INTFVER(T4, ISCSI); 1165 fi[0].fw_hdr.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU); 1166 fi[0].fw_hdr.intfver_fcoe = FW_INTFVER(T4, FCOE); 1167 1168 fi[1].chip = CHELSIO_T5; 1169 fi[1].fw_hdr.chip = FW_HDR_CHIP_T5; 1170 fi[1].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T5)); 1171 fi[1].fw_hdr.intfver_nic = FW_INTFVER(T5, NIC); 1172 fi[1].fw_hdr.intfver_vnic = FW_INTFVER(T5, VNIC); 1173 fi[1].fw_hdr.intfver_ofld = FW_INTFVER(T5, OFLD); 1174 fi[1].fw_hdr.intfver_ri = FW_INTFVER(T5, RI); 1175 fi[1].fw_hdr.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU); 1176 fi[1].fw_hdr.intfver_iscsi = FW_INTFVER(T5, ISCSI); 1177 fi[1].fw_hdr.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU); 1178 fi[1].fw_hdr.intfver_fcoe = FW_INTFVER(T5, FCOE); 1179 1180 fi[2].chip = CHELSIO_T6; 1181 fi[2].fw_hdr.chip = FW_HDR_CHIP_T6; 1182 fi[2].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T6)); 1183 fi[2].fw_hdr.intfver_nic = FW_INTFVER(T6, NIC); 1184 fi[2].fw_hdr.intfver_vnic = FW_INTFVER(T6, VNIC); 1185 fi[2].fw_hdr.intfver_ofld = FW_INTFVER(T6, OFLD); 1186 fi[2].fw_hdr.intfver_ri = FW_INTFVER(T6, RI); 1187 fi[2].fw_hdr.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU); 1188 fi[2].fw_hdr.intfver_iscsi = FW_INTFVER(T6, ISCSI); 1189 fi[2].fw_hdr.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU); 1190 fi[2].fw_hdr.intfver_fcoe = FW_INTFVER(T6, FCOE); 1191 1192 for (i = 0; i < ARRAY_SIZE(fi); i++) { 1193 if (fi[i].chip == chip) 1194 return &fi[i]; 1195 } 1196 1197 return NULL; 1198 } 1199 1200 /* 1201 * Install a compatible firmware (if required), establish contact with it, 1202 * become the master, and reset the device. 1203 */ 1204 static int 1205 prep_firmware(struct adapter *sc) 1206 { 1207 int rc; 1208 int fw_size; 1209 int reset = 1; 1210 enum dev_state state; 1211 unsigned char *fw_data; 1212 struct fw_info *fw_info; 1213 struct fw_hdr *card_fw; 1214 1215 struct driver_properties *p = &sc->props; 1216 1217 /* Contact firmware, request master */ 1218 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state); 1219 if (rc < 0) { 1220 rc = -rc; 1221 cxgb_printf(sc->dip, CE_WARN, 1222 "failed to connect to the firmware: %d.", rc); 1223 return (rc); 1224 } 1225 1226 if (rc == sc->mbox) 1227 sc->flags |= MASTER_PF; 1228 1229 /* We may need FW version info for later reporting */ 1230 t4_get_version_info(sc); 1231 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(sc->params.chip)); 1232 /* allocate memory to read the header of the firmware on the 1233 * card 1234 */ 1235 if (!fw_info) { 1236 cxgb_printf(sc->dip, CE_WARN, 1237 "unable to look up firmware information for chip %d.\n", 1238 CHELSIO_CHIP_VERSION(sc->params.chip)); 1239 return EINVAL; 1240 } 1241 card_fw = kmem_zalloc(sizeof(*card_fw), KM_SLEEP); 1242 if(!card_fw) { 1243 cxgb_printf(sc->dip, CE_WARN, 1244 "Memory allocation for card FW header failed\n"); 1245 return ENOMEM; 1246 } 1247 switch(CHELSIO_CHIP_VERSION(sc->params.chip)) { 1248 case CHELSIO_T4: 1249 fw_data = t4fw_data; 1250 fw_size = t4fw_size; 1251 break; 1252 case CHELSIO_T5: 1253 fw_data = t5fw_data; 1254 fw_size = t5fw_size; 1255 break; 1256 case CHELSIO_T6: 1257 fw_data = t6fw_data; 1258 fw_size = t6fw_size; 1259 break; 1260 default: 1261 cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n"); 1262 kmem_free(card_fw, sizeof(*card_fw)); 1263 return EINVAL; 1264 } 1265 1266 rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw, 1267 p->t4_fw_install, state, &reset); 1268 1269 kmem_free(card_fw, sizeof(*card_fw)); 1270 1271 if (rc != 0) { 1272 cxgb_printf(sc->dip, CE_WARN, 1273 "failed to install firmware: %d", rc); 1274 return (rc); 1275 } else { 1276 /* refresh */ 1277 (void) t4_check_fw_version(sc); 1278 } 1279 1280 /* Reset device */ 1281 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST); 1282 if (rc != 0) { 1283 cxgb_printf(sc->dip, CE_WARN, 1284 "firmware reset failed: %d.", rc); 1285 if (rc != ETIMEDOUT && rc != EIO) 1286 (void) t4_fw_bye(sc, sc->mbox); 1287 return (rc); 1288 } 1289 1290 /* Partition adapter resources as specified in the config file. */ 1291 if (sc->flags & MASTER_PF) { 1292 /* Handle default vs special T4 config file */ 1293 1294 rc = partition_resources(sc); 1295 if (rc != 0) 1296 goto err; /* error message displayed already */ 1297 } 1298 1299 sc->flags |= FW_OK; 1300 return (0); 1301 err: 1302 return (rc); 1303 1304 } 1305 1306 static const struct memwin t4_memwin[] = { 1307 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1308 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1309 { MEMWIN2_BASE, MEMWIN2_APERTURE } 1310 }; 1311 1312 static const struct memwin t5_memwin[] = { 1313 { MEMWIN0_BASE, MEMWIN0_APERTURE }, 1314 { MEMWIN1_BASE, MEMWIN1_APERTURE }, 1315 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 }, 1316 }; 1317 1318 #define FW_PARAM_DEV(param) \ 1319 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \ 1320 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param)) 1321 #define FW_PARAM_PFVF(param) \ 1322 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \ 1323 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param)) 1324 1325 /* 1326 * Verify that the memory range specified by the memtype/offset/len pair is 1327 * valid and lies entirely within the memtype specified. The global address of 1328 * the start of the range is returned in addr. 1329 */ 1330 int 1331 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len, 1332 uint32_t *addr) 1333 { 1334 uint32_t em, addr_len, maddr, mlen; 1335 1336 /* Memory can only be accessed in naturally aligned 4 byte units */ 1337 if (off & 3 || len & 3 || len == 0) 1338 return (EINVAL); 1339 1340 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE); 1341 switch (mtype) { 1342 case MEM_EDC0: 1343 if (!(em & F_EDRAM0_ENABLE)) 1344 return (EINVAL); 1345 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR); 1346 maddr = G_EDRAM0_BASE(addr_len) << 20; 1347 mlen = G_EDRAM0_SIZE(addr_len) << 20; 1348 break; 1349 case MEM_EDC1: 1350 if (!(em & F_EDRAM1_ENABLE)) 1351 return (EINVAL); 1352 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR); 1353 maddr = G_EDRAM1_BASE(addr_len) << 20; 1354 mlen = G_EDRAM1_SIZE(addr_len) << 20; 1355 break; 1356 case MEM_MC: 1357 if (!(em & F_EXT_MEM_ENABLE)) 1358 return (EINVAL); 1359 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR); 1360 maddr = G_EXT_MEM_BASE(addr_len) << 20; 1361 mlen = G_EXT_MEM_SIZE(addr_len) << 20; 1362 break; 1363 case MEM_MC1: 1364 if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE)) 1365 return (EINVAL); 1366 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR); 1367 maddr = G_EXT_MEM1_BASE(addr_len) << 20; 1368 mlen = G_EXT_MEM1_SIZE(addr_len) << 20; 1369 break; 1370 default: 1371 return (EINVAL); 1372 } 1373 1374 if (mlen > 0 && off < mlen && off + len <= mlen) { 1375 *addr = maddr + off; /* global address */ 1376 return (0); 1377 } 1378 1379 return (EFAULT); 1380 } 1381 1382 void 1383 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture) 1384 { 1385 const struct memwin *mw; 1386 1387 if (is_t4(sc->params.chip)) { 1388 mw = &t4_memwin[win]; 1389 } else { 1390 mw = &t5_memwin[win]; 1391 } 1392 1393 if (base != NULL) 1394 *base = mw->base; 1395 if (aperture != NULL) 1396 *aperture = mw->aperture; 1397 } 1398 1399 /* 1400 * Upload configuration file to card's memory. 1401 */ 1402 static int 1403 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma) 1404 { 1405 int rc = 0, cflen; 1406 u_int i, n; 1407 uint32_t param, val, addr, mtype, maddr; 1408 uint32_t off, mw_base, mw_aperture; 1409 const uint32_t *cfdata; 1410 1411 /* Figure out where the firmware wants us to upload it. */ 1412 param = FW_PARAM_DEV(CF); 1413 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1414 if (rc != 0) { 1415 /* Firmwares without config file support will fail this way */ 1416 cxgb_printf(sc->dip, CE_WARN, 1417 "failed to query config file location: %d.\n", rc); 1418 return (rc); 1419 } 1420 *mt = mtype = G_FW_PARAMS_PARAM_Y(val); 1421 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16; 1422 1423 switch (CHELSIO_CHIP_VERSION(sc->params.chip)) { 1424 case CHELSIO_T4: 1425 cflen = t4cfg_size & ~3; 1426 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 1427 cfdata = (const uint32_t *)t4cfg_data; 1428 break; 1429 case CHELSIO_T5: 1430 cflen = t5cfg_size & ~3; 1431 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 1432 cfdata = (const uint32_t *)t5cfg_data; 1433 break; 1434 case CHELSIO_T6: 1435 cflen = t6cfg_size & ~3; 1436 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 1437 cfdata = (const uint32_t *)t6cfg_data; 1438 break; 1439 default: 1440 cxgb_printf(sc->dip, CE_WARN, 1441 "Invalid Adapter detected\n"); 1442 return EINVAL; 1443 } 1444 1445 if (cflen > FLASH_CFG_MAX_SIZE) { 1446 cxgb_printf(sc->dip, CE_WARN, 1447 "config file too long (%d, max allowed is %d). ", 1448 cflen, FLASH_CFG_MAX_SIZE); 1449 return (EFBIG); 1450 } 1451 1452 rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr); 1453 if (rc != 0) { 1454 1455 cxgb_printf(sc->dip, CE_WARN, 1456 "%s: addr (%d/0x%x) or len %d is not valid: %d. " 1457 "Will try to use the config on the card, if any.\n", 1458 __func__, mtype, maddr, cflen, rc); 1459 return (EFAULT); 1460 } 1461 1462 memwin_info(sc, 2, &mw_base, &mw_aperture); 1463 while (cflen) { 1464 off = position_memwin(sc, 2, addr); 1465 n = min(cflen, mw_aperture - off); 1466 for (i = 0; i < n; i += 4) 1467 t4_write_reg(sc, mw_base + off + i, *cfdata++); 1468 cflen -= n; 1469 addr += n; 1470 } 1471 1472 return (rc); 1473 } 1474 1475 /* 1476 * Partition chip resources for use between various PFs, VFs, etc. This is done 1477 * by uploading the firmware configuration file to the adapter and instructing 1478 * the firmware to process it. 1479 */ 1480 static int 1481 partition_resources(struct adapter *sc) 1482 { 1483 int rc; 1484 struct fw_caps_config_cmd caps; 1485 uint32_t mtype, maddr, finicsum, cfcsum; 1486 1487 rc = upload_config_file(sc, &mtype, &maddr); 1488 if (rc != 0) { 1489 mtype = FW_MEMTYPE_CF_FLASH; 1490 maddr = t4_flash_cfg_addr(sc); 1491 } 1492 1493 bzero(&caps, sizeof (caps)); 1494 caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1495 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1496 caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID | 1497 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) | 1498 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps)); 1499 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps); 1500 if (rc != 0) { 1501 cxgb_printf(sc->dip, CE_WARN, 1502 "failed to pre-process config file: %d.\n", rc); 1503 return (rc); 1504 } 1505 1506 finicsum = ntohl(caps.finicsum); 1507 cfcsum = ntohl(caps.cfcsum); 1508 if (finicsum != cfcsum) { 1509 cxgb_printf(sc->dip, CE_WARN, 1510 "WARNING: config file checksum mismatch: %08x %08x\n", 1511 finicsum, cfcsum); 1512 } 1513 sc->cfcsum = cfcsum; 1514 1515 /* TODO: Need to configure this correctly */ 1516 caps.toecaps = htons(FW_CAPS_CONFIG_TOE); 1517 caps.iscsicaps = 0; 1518 caps.rdmacaps = 0; 1519 caps.fcoecaps = 0; 1520 /* TODO: Disable VNIC cap for now */ 1521 caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM); 1522 1523 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1524 F_FW_CMD_REQUEST | F_FW_CMD_WRITE); 1525 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps)); 1526 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL); 1527 if (rc != 0) { 1528 cxgb_printf(sc->dip, CE_WARN, 1529 "failed to process config file: %d.\n", rc); 1530 return (rc); 1531 } 1532 1533 return (0); 1534 } 1535 1536 /* 1537 * Tweak configuration based on module parameters, etc. Most of these have 1538 * defaults assigned to them by Firmware Configuration Files (if we're using 1539 * them) but need to be explicitly set if we're using hard-coded 1540 * initialization. But even in the case of using Firmware Configuration 1541 * Files, we'd like to expose the ability to change these via module 1542 * parameters so these are essentially common tweaks/settings for 1543 * Configuration Files and hard-coded initialization ... 1544 */ 1545 static int 1546 adap__pre_init_tweaks(struct adapter *sc) 1547 { 1548 int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */ 1549 1550 /* 1551 * Fix up various Host-Dependent Parameters like Page Size, Cache 1552 * Line Size, etc. The firmware default is for a 4KB Page Size and 1553 * 64B Cache Line Size ... 1554 */ 1555 (void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV); 1556 1557 t4_set_reg_field(sc, A_SGE_CONTROL, 1558 V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset)); 1559 1560 return 0; 1561 } 1562 /* 1563 * Retrieve parameters that are needed (or nice to have) prior to calling 1564 * t4_sge_init and t4_fw_initialize. 1565 */ 1566 static int 1567 get_params__pre_init(struct adapter *sc) 1568 { 1569 int rc; 1570 uint32_t param[2], val[2]; 1571 struct fw_devlog_cmd cmd; 1572 struct devlog_params *dlog = &sc->params.devlog; 1573 1574 /* 1575 * Grab the raw VPD parameters. 1576 */ 1577 rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd); 1578 if (rc != 0) { 1579 cxgb_printf(sc->dip, CE_WARN, 1580 "failed to query VPD parameters (pre_init): %d.\n", rc); 1581 return (rc); 1582 } 1583 1584 param[0] = FW_PARAM_DEV(PORTVEC); 1585 param[1] = FW_PARAM_DEV(CCLK); 1586 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val); 1587 if (rc != 0) { 1588 cxgb_printf(sc->dip, CE_WARN, 1589 "failed to query parameters (pre_init): %d.\n", rc); 1590 return (rc); 1591 } 1592 1593 sc->params.portvec = val[0]; 1594 sc->params.nports = 0; 1595 while (val[0]) { 1596 sc->params.nports++; 1597 val[0] &= val[0] - 1; 1598 } 1599 1600 sc->params.vpd.cclk = val[1]; 1601 1602 /* Read device log parameters. */ 1603 bzero(&cmd, sizeof (cmd)); 1604 cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) | 1605 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1606 cmd.retval_len16 = htonl(FW_LEN16(cmd)); 1607 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd); 1608 if (rc != 0) { 1609 cxgb_printf(sc->dip, CE_WARN, 1610 "failed to get devlog parameters: %d.\n", rc); 1611 bzero(dlog, sizeof (*dlog)); 1612 rc = 0; /* devlog isn't critical for device operation */ 1613 } else { 1614 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog); 1615 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]); 1616 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4; 1617 dlog->size = ntohl(cmd.memsize_devlog); 1618 } 1619 1620 return (rc); 1621 } 1622 1623 /* 1624 * Retrieve various parameters that are of interest to the driver. The device 1625 * has been initialized by the firmware at this point. 1626 */ 1627 static int 1628 get_params__post_init(struct adapter *sc) 1629 { 1630 int rc; 1631 uint32_t param[7], val[7]; 1632 struct fw_caps_config_cmd caps; 1633 1634 param[0] = FW_PARAM_PFVF(IQFLINT_START); 1635 param[1] = FW_PARAM_PFVF(EQ_START); 1636 param[2] = FW_PARAM_PFVF(FILTER_START); 1637 param[3] = FW_PARAM_PFVF(FILTER_END); 1638 param[4] = FW_PARAM_PFVF(L2T_START); 1639 param[5] = FW_PARAM_PFVF(L2T_END); 1640 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1641 if (rc != 0) { 1642 cxgb_printf(sc->dip, CE_WARN, 1643 "failed to query parameters (post_init): %d.\n", rc); 1644 return (rc); 1645 } 1646 1647 /* LINTED: E_ASSIGN_NARROW_CONV */ 1648 sc->sge.iq_start = val[0]; 1649 sc->sge.eq_start = val[1]; 1650 sc->tids.ftid_base = val[2]; 1651 sc->tids.nftids = val[3] - val[2] + 1; 1652 sc->vres.l2t.start = val[4]; 1653 sc->vres.l2t.size = val[5] - val[4] + 1; 1654 1655 /* get capabilites */ 1656 bzero(&caps, sizeof (caps)); 1657 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) | 1658 F_FW_CMD_REQUEST | F_FW_CMD_READ); 1659 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps)); 1660 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps); 1661 if (rc != 0) { 1662 cxgb_printf(sc->dip, CE_WARN, 1663 "failed to get card capabilities: %d.\n", rc); 1664 return (rc); 1665 } 1666 1667 if (caps.toecaps != 0) { 1668 /* query offload-related parameters */ 1669 param[0] = FW_PARAM_DEV(NTID); 1670 param[1] = FW_PARAM_PFVF(SERVER_START); 1671 param[2] = FW_PARAM_PFVF(SERVER_END); 1672 param[3] = FW_PARAM_PFVF(TDDP_START); 1673 param[4] = FW_PARAM_PFVF(TDDP_END); 1674 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ); 1675 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val); 1676 if (rc != 0) { 1677 cxgb_printf(sc->dip, CE_WARN, 1678 "failed to query TOE parameters: %d.\n", rc); 1679 return (rc); 1680 } 1681 sc->tids.ntids = val[0]; 1682 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS); 1683 sc->tids.stid_base = val[1]; 1684 sc->tids.nstids = val[2] - val[1] + 1; 1685 sc->vres.ddp.start = val[3]; 1686 sc->vres.ddp.size = val[4] - val[3] + 1; 1687 sc->params.ofldq_wr_cred = val[5]; 1688 sc->params.offload = 1; 1689 } 1690 1691 /* These are finalized by FW initialization, load their values now */ 1692 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION); 1693 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]); 1694 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]); 1695 t4_read_mtu_tbl(sc, sc->params.mtus, NULL); 1696 1697 return (rc); 1698 } 1699 1700 static int 1701 set_params__post_init(struct adapter *sc) 1702 { 1703 uint32_t param, val; 1704 1705 /* ask for encapsulated CPLs */ 1706 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP); 1707 val = 1; 1708 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val); 1709 1710 return (0); 1711 } 1712 1713 /* TODO: verify */ 1714 static void 1715 setup_memwin(struct adapter *sc) 1716 { 1717 pci_regspec_t *data; 1718 int rc; 1719 uint_t n; 1720 uintptr_t bar0; 1721 uintptr_t mem_win0_base, mem_win1_base, mem_win2_base; 1722 uintptr_t mem_win2_aperture; 1723 1724 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip, 1725 DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n); 1726 if (rc != DDI_SUCCESS) { 1727 cxgb_printf(sc->dip, CE_WARN, 1728 "failed to lookup \"assigned-addresses\" property: %d", rc); 1729 return; 1730 } 1731 n /= sizeof (*data); 1732 1733 bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low; 1734 ddi_prop_free(data); 1735 1736 if (is_t4(sc->params.chip)) { 1737 mem_win0_base = bar0 + MEMWIN0_BASE; 1738 mem_win1_base = bar0 + MEMWIN1_BASE; 1739 mem_win2_base = bar0 + MEMWIN2_BASE; 1740 mem_win2_aperture = MEMWIN2_APERTURE; 1741 } else { 1742 /* For T5, only relative offset inside the PCIe BAR is passed */ 1743 mem_win0_base = MEMWIN0_BASE; 1744 mem_win1_base = MEMWIN1_BASE; 1745 mem_win2_base = MEMWIN2_BASE_T5; 1746 mem_win2_aperture = MEMWIN2_APERTURE_T5; 1747 } 1748 1749 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0), 1750 mem_win0_base | V_BIR(0) | 1751 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10)); 1752 1753 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1), 1754 mem_win1_base | V_BIR(0) | 1755 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10)); 1756 1757 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2), 1758 mem_win2_base | V_BIR(0) | 1759 V_WINDOW(ilog2(mem_win2_aperture) - 10)); 1760 1761 /* flush */ 1762 (void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2)); 1763 } 1764 1765 /* 1766 * Positions the memory window such that it can be used to access the specified 1767 * address in the chip's address space. The return value is the offset of addr 1768 * from the start of the window. 1769 */ 1770 uint32_t 1771 position_memwin(struct adapter *sc, int n, uint32_t addr) 1772 { 1773 uint32_t start, pf; 1774 uint32_t reg; 1775 1776 if (addr & 3) { 1777 cxgb_printf(sc->dip, CE_WARN, 1778 "addr (0x%x) is not at a 4B boundary.\n", addr); 1779 return (EFAULT); 1780 } 1781 1782 if (is_t4(sc->params.chip)) { 1783 pf = 0; 1784 start = addr & ~0xf; /* start must be 16B aligned */ 1785 } else { 1786 pf = V_PFNUM(sc->pf); 1787 start = addr & ~0x7f; /* start must be 128B aligned */ 1788 } 1789 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n); 1790 1791 t4_write_reg(sc, reg, start | pf); 1792 (void) t4_read_reg(sc, reg); 1793 1794 return (addr - start); 1795 } 1796 1797 1798 /* 1799 * Reads the named property and fills up the "data" array (which has at least 1800 * "count" elements). We first try and lookup the property for our dev_t and 1801 * then retry with DDI_DEV_T_ANY if it's not found. 1802 * 1803 * Returns non-zero if the property was found and "data" has been updated. 1804 */ 1805 static int 1806 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count) 1807 { 1808 dev_info_t *dip = sc->dip; 1809 dev_t dev = sc->dev; 1810 int rc, *d; 1811 uint_t i, n; 1812 1813 rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS, 1814 name, &d, &n); 1815 if (rc == DDI_PROP_SUCCESS) 1816 goto found; 1817 1818 if (rc != DDI_PROP_NOT_FOUND) { 1819 cxgb_printf(dip, CE_WARN, 1820 "failed to lookup property %s for minor %d: %d.", 1821 name, getminor(dev), rc); 1822 return (0); 1823 } 1824 1825 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 1826 name, &d, &n); 1827 if (rc == DDI_PROP_SUCCESS) 1828 goto found; 1829 1830 if (rc != DDI_PROP_NOT_FOUND) { 1831 cxgb_printf(dip, CE_WARN, 1832 "failed to lookup property %s: %d.", name, rc); 1833 return (0); 1834 } 1835 1836 return (0); 1837 1838 found: 1839 if (n > count) { 1840 cxgb_printf(dip, CE_NOTE, 1841 "property %s has too many elements (%d), ignoring extras", 1842 name, n); 1843 } 1844 1845 for (i = 0; i < n && i < count; i++) 1846 data[i] = d[i]; 1847 ddi_prop_free(d); 1848 1849 return (1); 1850 } 1851 1852 static int 1853 prop_lookup_int(struct adapter *sc, char *name, int defval) 1854 { 1855 int rc; 1856 1857 rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1); 1858 if (rc != -1) 1859 return (rc); 1860 1861 return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS, 1862 name, defval)); 1863 } 1864 1865 static int 1866 init_driver_props(struct adapter *sc, struct driver_properties *p) 1867 { 1868 dev_t dev = sc->dev; 1869 dev_info_t *dip = sc->dip; 1870 int i, *data; 1871 uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200}; 1872 uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */ 1873 1874 /* 1875 * Holdoff timer 1876 */ 1877 data = &p->timer_val[0]; 1878 for (i = 0; i < SGE_NTIMERS; i++) 1879 data[i] = tmr[i]; 1880 (void) prop_lookup_int_array(sc, "holdoff-timer-values", data, 1881 SGE_NTIMERS); 1882 for (i = 0; i < SGE_NTIMERS; i++) { 1883 int limit = 200U; 1884 if (data[i] > limit) { 1885 cxgb_printf(dip, CE_WARN, 1886 "holdoff timer %d is too high (%d), lowered to %d.", 1887 i, data[i], limit); 1888 data[i] = limit; 1889 } 1890 } 1891 (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values", 1892 data, SGE_NTIMERS); 1893 1894 /* 1895 * Holdoff packet counter 1896 */ 1897 data = &p->counter_val[0]; 1898 for (i = 0; i < SGE_NCOUNTERS; i++) 1899 data[i] = cnt[i]; 1900 (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data, 1901 SGE_NCOUNTERS); 1902 for (i = 0; i < SGE_NCOUNTERS; i++) { 1903 int limit = M_THRESHOLD_0; 1904 if (data[i] > limit) { 1905 cxgb_printf(dip, CE_WARN, 1906 "holdoff pkt-counter %d is too high (%d), " 1907 "lowered to %d.", i, data[i], limit); 1908 data[i] = limit; 1909 } 1910 } 1911 (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values", 1912 data, SGE_NCOUNTERS); 1913 1914 /* 1915 * Maximum # of tx and rx queues to use for each 1916 * 100G, 40G, 25G, 10G and 1G port. 1917 */ 1918 p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8); 1919 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port", 1920 p->max_ntxq_10g); 1921 1922 p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8); 1923 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port", 1924 p->max_nrxq_10g); 1925 1926 p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2); 1927 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port", 1928 p->max_ntxq_1g); 1929 1930 p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2); 1931 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port", 1932 p->max_nrxq_1g); 1933 1934 #ifdef TCP_OFFLOAD_ENABLE 1935 p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8); 1936 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port", 1937 p->max_nofldtxq_10g); 1938 1939 p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2); 1940 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port", 1941 p->max_nofldrxq_10g); 1942 1943 p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2); 1944 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port", 1945 p->max_nofldtxq_1g); 1946 1947 p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1); 1948 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port", 1949 p->max_nofldrxq_1g); 1950 #endif 1951 1952 /* 1953 * Holdoff parameters for 10G and 1G ports. 1954 */ 1955 p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0); 1956 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G", 1957 p->tmr_idx_10g); 1958 1959 p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2); 1960 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G", 1961 p->pktc_idx_10g); 1962 1963 p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0); 1964 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G", 1965 p->tmr_idx_1g); 1966 1967 p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2); 1968 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G", 1969 p->pktc_idx_1g); 1970 1971 /* 1972 * Size (number of entries) of each tx and rx queue. 1973 */ 1974 i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE); 1975 p->qsize_txq = max(i, 128); 1976 if (p->qsize_txq != i) { 1977 cxgb_printf(dip, CE_WARN, 1978 "using %d instead of %d as the tx queue size", 1979 p->qsize_txq, i); 1980 } 1981 (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq); 1982 1983 i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE); 1984 p->qsize_rxq = max(i, 128); 1985 while (p->qsize_rxq & 7) 1986 p->qsize_rxq--; 1987 if (p->qsize_rxq != i) { 1988 cxgb_printf(dip, CE_WARN, 1989 "using %d instead of %d as the rx queue size", 1990 p->qsize_rxq, i); 1991 } 1992 (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq); 1993 1994 /* 1995 * Interrupt types allowed. 1996 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively. See sys/ddi_intr.h 1997 */ 1998 p->intr_types = prop_lookup_int(sc, "interrupt-types", 1999 DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED); 2000 (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types); 2001 2002 /* 2003 * Forwarded interrupt queues. Create this property to force the driver 2004 * to use forwarded interrupt queues. 2005 */ 2006 if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS, 2007 "interrupt-forwarding") != 0 || 2008 ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, 2009 "interrupt-forwarding") != 0) { 2010 UNIMPLEMENTED(); 2011 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP, 2012 "interrupt-forwarding", NULL, 0); 2013 } 2014 2015 /* 2016 * Write combining 2017 * 0 to disable, 1 to enable 2018 */ 2019 p->wc = prop_lookup_int(sc, "write-combine", 1); 2020 cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc); 2021 if (p->wc != 0 && p->wc != 1) { 2022 cxgb_printf(dip, CE_WARN, 2023 "write-combine: using 1 instead of %d", p->wc); 2024 p->wc = 1; 2025 } 2026 (void) ddi_prop_update_int(dev, dip, "write-combine", p->wc); 2027 2028 p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1); 2029 if (p->t4_fw_install != 0 && p->t4_fw_install != 2) 2030 p->t4_fw_install = 1; 2031 (void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install); 2032 2033 /* Multiple Rings */ 2034 p->multi_rings = prop_lookup_int(sc, "multi-rings", 1); 2035 if (p->multi_rings != 0 && p->multi_rings != 1) { 2036 cxgb_printf(dip, CE_NOTE, 2037 "multi-rings: using value 1 instead of %d", p->multi_rings); 2038 p->multi_rings = 1; 2039 } 2040 2041 (void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings); 2042 2043 return (0); 2044 } 2045 2046 static int 2047 remove_extra_props(struct adapter *sc, int n10g, int n1g) 2048 { 2049 if (n10g == 0) { 2050 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port"); 2051 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port"); 2052 (void) ddi_prop_remove(sc->dev, sc->dip, 2053 "holdoff-timer-idx-10G"); 2054 (void) ddi_prop_remove(sc->dev, sc->dip, 2055 "holdoff-pktc-idx-10G"); 2056 } 2057 2058 if (n1g == 0) { 2059 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port"); 2060 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port"); 2061 (void) ddi_prop_remove(sc->dev, sc->dip, 2062 "holdoff-timer-idx-1G"); 2063 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G"); 2064 } 2065 2066 return (0); 2067 } 2068 2069 static int 2070 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g, 2071 struct intrs_and_queues *iaq) 2072 { 2073 struct driver_properties *p = &sc->props; 2074 int rc, itype, itypes, navail, nc, nrxq10g, nrxq1g, n; 2075 int nofldrxq10g = 0, nofldrxq1g = 0; 2076 2077 bzero(iaq, sizeof (*iaq)); 2078 nc = ncpus; /* our snapshot of the number of CPUs */ 2079 iaq->ntxq10g = min(nc, p->max_ntxq_10g); 2080 iaq->ntxq1g = min(nc, p->max_ntxq_1g); 2081 iaq->nrxq10g = nrxq10g = min(nc, p->max_nrxq_10g); 2082 iaq->nrxq1g = nrxq1g = min(nc, p->max_nrxq_1g); 2083 #ifdef TCP_OFFLOAD_ENABLE 2084 iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g); 2085 iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g); 2086 iaq->nofldrxq10g = nofldrxq10g = min(nc, p->max_nofldrxq_10g); 2087 iaq->nofldrxq1g = nofldrxq1g = min(nc, p->max_nofldrxq_1g); 2088 #endif 2089 2090 rc = ddi_intr_get_supported_types(sc->dip, &itypes); 2091 if (rc != DDI_SUCCESS) { 2092 cxgb_printf(sc->dip, CE_WARN, 2093 "failed to determine supported interrupt types: %d", rc); 2094 return (rc); 2095 } 2096 2097 for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) { 2098 ASSERT(itype == DDI_INTR_TYPE_MSIX || 2099 itype == DDI_INTR_TYPE_MSI || 2100 itype == DDI_INTR_TYPE_FIXED); 2101 2102 if ((itype & itypes & p->intr_types) == 0) 2103 continue; /* not supported or not allowed */ 2104 2105 navail = 0; 2106 rc = ddi_intr_get_navail(sc->dip, itype, &navail); 2107 if (rc != DDI_SUCCESS || navail == 0) { 2108 cxgb_printf(sc->dip, CE_WARN, 2109 "failed to get # of interrupts for type %d: %d", 2110 itype, rc); 2111 continue; /* carry on */ 2112 } 2113 2114 iaq->intr_type = itype; 2115 if (navail == 0) 2116 continue; 2117 2118 /* 2119 * Best option: an interrupt vector for errors, one for the 2120 * firmware event queue, and one each for each rxq (NIC as well 2121 * as offload). 2122 */ 2123 iaq->nirq = T4_EXTRA_INTR; 2124 iaq->nirq += n10g * (nrxq10g + nofldrxq10g); 2125 iaq->nirq += n1g * (nrxq1g + nofldrxq1g); 2126 2127 if (iaq->nirq <= navail && 2128 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) { 2129 iaq->intr_fwd = 0; 2130 goto allocate; 2131 } 2132 2133 /* 2134 * Second best option: an interrupt vector for errors, one for 2135 * the firmware event queue, and one each for either NIC or 2136 * offload rxq's. 2137 */ 2138 iaq->nirq = T4_EXTRA_INTR; 2139 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g); 2140 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g); 2141 if (iaq->nirq <= navail && 2142 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) { 2143 iaq->intr_fwd = 1; 2144 goto allocate; 2145 } 2146 2147 /* 2148 * Next best option: an interrupt vector for errors, one for the 2149 * firmware event queue, and at least one per port. At this 2150 * point we know we'll have to downsize nrxq or nofldrxq to fit 2151 * what's available to us. 2152 */ 2153 iaq->nirq = T4_EXTRA_INTR; 2154 iaq->nirq += n10g + n1g; 2155 if (iaq->nirq <= navail) { 2156 int leftover = navail - iaq->nirq; 2157 2158 if (n10g > 0) { 2159 int target = max(nrxq10g, nofldrxq10g); 2160 2161 n = 1; 2162 while (n < target && leftover >= n10g) { 2163 leftover -= n10g; 2164 iaq->nirq += n10g; 2165 n++; 2166 } 2167 iaq->nrxq10g = min(n, nrxq10g); 2168 #ifdef TCP_OFFLOAD_ENABLE 2169 iaq->nofldrxq10g = min(n, nofldrxq10g); 2170 #endif 2171 } 2172 2173 if (n1g > 0) { 2174 int target = max(nrxq1g, nofldrxq1g); 2175 2176 n = 1; 2177 while (n < target && leftover >= n1g) { 2178 leftover -= n1g; 2179 iaq->nirq += n1g; 2180 n++; 2181 } 2182 iaq->nrxq1g = min(n, nrxq1g); 2183 #ifdef TCP_OFFLOAD_ENABLE 2184 iaq->nofldrxq1g = min(n, nofldrxq1g); 2185 #endif 2186 } 2187 2188 /* We have arrived at a minimum value required to enable 2189 * per queue irq(either NIC or offload). Thus for non- 2190 * offload case, we will get a vector per queue, while 2191 * offload case, we will get a vector per offload/NIC q. 2192 * Hence enable Interrupt forwarding only for offload 2193 * case. 2194 */ 2195 #ifdef TCP_OFFLOAD_ENABLE 2196 if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) { 2197 iaq->intr_fwd = 1; 2198 #else 2199 if (itype != DDI_INTR_TYPE_MSI) { 2200 #endif 2201 goto allocate; 2202 } 2203 } 2204 2205 /* 2206 * Least desirable option: one interrupt vector for everything. 2207 */ 2208 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1; 2209 #ifdef TCP_OFFLOAD_ENABLE 2210 iaq->nofldrxq10g = iaq->nofldrxq1g = 1; 2211 #endif 2212 iaq->intr_fwd = 1; 2213 2214 allocate: 2215 return (0); 2216 } 2217 2218 cxgb_printf(sc->dip, CE_WARN, 2219 "failed to find a usable interrupt type. supported=%d, allowed=%d", 2220 itypes, p->intr_types); 2221 return (DDI_FAILURE); 2222 } 2223 2224 static int 2225 add_child_node(struct adapter *sc, int idx) 2226 { 2227 int rc; 2228 struct port_info *pi; 2229 2230 if (idx < 0 || idx >= sc->params.nports) 2231 return (EINVAL); 2232 2233 pi = sc->port[idx]; 2234 if (pi == NULL) 2235 return (ENODEV); /* t4_port_init failed earlier */ 2236 2237 PORT_LOCK(pi); 2238 if (pi->dip != NULL) { 2239 rc = 0; /* EEXIST really, but then bus_config fails */ 2240 goto done; 2241 } 2242 2243 rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip); 2244 if (rc != DDI_SUCCESS || pi->dip == NULL) { 2245 rc = ENOMEM; 2246 goto done; 2247 } 2248 2249 (void) ddi_set_parent_data(pi->dip, pi); 2250 (void) ndi_devi_bind_driver(pi->dip, 0); 2251 rc = 0; 2252 done: 2253 PORT_UNLOCK(pi); 2254 return (rc); 2255 } 2256 2257 static int 2258 remove_child_node(struct adapter *sc, int idx) 2259 { 2260 int rc; 2261 struct port_info *pi; 2262 2263 if (idx < 0 || idx >= sc->params.nports) 2264 return (EINVAL); 2265 2266 pi = sc->port[idx]; 2267 if (pi == NULL) 2268 return (ENODEV); 2269 2270 PORT_LOCK(pi); 2271 if (pi->dip == NULL) { 2272 rc = ENODEV; 2273 goto done; 2274 } 2275 2276 rc = ndi_devi_free(pi->dip); 2277 if (rc == 0) 2278 pi->dip = NULL; 2279 done: 2280 PORT_UNLOCK(pi); 2281 return (rc); 2282 } 2283 2284 #define KS_UINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG) 2285 #define KS_CINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR) 2286 #define KS_U_SET(x, y) kstatp->x.value.ul = (y) 2287 #define KS_C_SET(x, ...) \ 2288 (void) snprintf(kstatp->x.value.c, 16, __VA_ARGS__) 2289 2290 /* 2291 * t4nex:X:config 2292 */ 2293 struct t4_kstats { 2294 kstat_named_t chip_ver; 2295 kstat_named_t fw_vers; 2296 kstat_named_t tp_vers; 2297 kstat_named_t driver_version; 2298 kstat_named_t serial_number; 2299 kstat_named_t ec_level; 2300 kstat_named_t id; 2301 kstat_named_t bus_type; 2302 kstat_named_t bus_width; 2303 kstat_named_t bus_speed; 2304 kstat_named_t core_clock; 2305 kstat_named_t port_cnt; 2306 kstat_named_t port_type; 2307 kstat_named_t pci_vendor_id; 2308 kstat_named_t pci_device_id; 2309 }; 2310 static kstat_t * 2311 setup_kstats(struct adapter *sc) 2312 { 2313 kstat_t *ksp; 2314 struct t4_kstats *kstatp; 2315 int ndata; 2316 struct pci_params *p = &sc->params.pci; 2317 struct vpd_params *v = &sc->params.vpd; 2318 uint16_t pci_vendor, pci_device; 2319 2320 ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t); 2321 2322 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config", 2323 "nexus", KSTAT_TYPE_NAMED, ndata, 0); 2324 if (ksp == NULL) { 2325 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats."); 2326 return (NULL); 2327 } 2328 2329 kstatp = (struct t4_kstats *)ksp->ks_data; 2330 2331 KS_UINIT(chip_ver); 2332 KS_CINIT(fw_vers); 2333 KS_CINIT(tp_vers); 2334 KS_CINIT(driver_version); 2335 KS_CINIT(serial_number); 2336 KS_CINIT(ec_level); 2337 KS_CINIT(id); 2338 KS_CINIT(bus_type); 2339 KS_CINIT(bus_width); 2340 KS_CINIT(bus_speed); 2341 KS_UINIT(core_clock); 2342 KS_UINIT(port_cnt); 2343 KS_CINIT(port_type); 2344 KS_CINIT(pci_vendor_id); 2345 KS_CINIT(pci_device_id); 2346 2347 KS_U_SET(chip_ver, sc->params.chip); 2348 KS_C_SET(fw_vers, "%d.%d.%d.%d", 2349 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers), 2350 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers), 2351 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers), 2352 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers)); 2353 KS_C_SET(tp_vers, "%d.%d.%d.%d", 2354 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers), 2355 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers), 2356 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers), 2357 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers)); 2358 KS_C_SET(driver_version, DRV_VERSION); 2359 KS_C_SET(serial_number, "%s", v->sn); 2360 KS_C_SET(ec_level, "%s", v->ec); 2361 KS_C_SET(id, "%s", v->id); 2362 KS_C_SET(bus_type, "pci-express"); 2363 KS_C_SET(bus_width, "x%d lanes", p->width); 2364 KS_C_SET(bus_speed, "%d", p->speed); 2365 KS_U_SET(core_clock, v->cclk); 2366 KS_U_SET(port_cnt, sc->params.nports); 2367 2368 t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor); 2369 KS_C_SET(pci_vendor_id, "0x%x", pci_vendor); 2370 2371 t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device); 2372 KS_C_SET(pci_device_id, "0x%x", pci_device); 2373 2374 KS_C_SET(port_type, "%s/%s/%s/%s", 2375 print_port_speed(sc->port[0]), 2376 print_port_speed(sc->port[1]), 2377 print_port_speed(sc->port[2]), 2378 print_port_speed(sc->port[3])); 2379 2380 /* Do NOT set ksp->ks_update. These kstats do not change. */ 2381 2382 /* Install the kstat */ 2383 ksp->ks_private = (void *)sc; 2384 kstat_install(ksp); 2385 2386 return (ksp); 2387 } 2388 2389 /* 2390 * t4nex:X:stat 2391 */ 2392 struct t4_wc_kstats { 2393 kstat_named_t write_coal_success; 2394 kstat_named_t write_coal_failure; 2395 }; 2396 static kstat_t * 2397 setup_wc_kstats(struct adapter *sc) 2398 { 2399 kstat_t *ksp; 2400 struct t4_wc_kstats *kstatp; 2401 int ndata; 2402 2403 ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t); 2404 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats", 2405 "nexus", KSTAT_TYPE_NAMED, ndata, 0); 2406 if (ksp == NULL) { 2407 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats."); 2408 return (NULL); 2409 } 2410 2411 kstatp = (struct t4_wc_kstats *)ksp->ks_data; 2412 2413 KS_UINIT(write_coal_success); 2414 KS_UINIT(write_coal_failure); 2415 2416 ksp->ks_update = update_wc_kstats; 2417 /* Install the kstat */ 2418 ksp->ks_private = (void *)sc; 2419 kstat_install(ksp); 2420 2421 return (ksp); 2422 } 2423 2424 static int 2425 update_wc_kstats(kstat_t *ksp, int rw) 2426 { 2427 struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data; 2428 struct adapter *sc = ksp->ks_private; 2429 uint32_t wc_total, wc_success, wc_failure; 2430 2431 if (rw == KSTAT_WRITE) 2432 return (0); 2433 2434 if (is_t5(sc->params.chip)) { 2435 wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL); 2436 wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH); 2437 wc_success = wc_total - wc_failure; 2438 } else { 2439 wc_success = 0; 2440 wc_failure = 0; 2441 } 2442 2443 KS_U_SET(write_coal_success, wc_success); 2444 KS_U_SET(write_coal_failure, wc_failure); 2445 2446 return (0); 2447 } 2448 2449 int 2450 adapter_full_init(struct adapter *sc) 2451 { 2452 int i, rc = 0; 2453 2454 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2455 2456 rc = t4_setup_adapter_queues(sc); 2457 if (rc != 0) 2458 goto done; 2459 2460 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK) 2461 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count); 2462 else { 2463 for (i = 0; i < sc->intr_count; i++) 2464 (void) ddi_intr_enable(sc->intr_handle[i]); 2465 } 2466 t4_intr_enable(sc); 2467 sc->flags |= FULL_INIT_DONE; 2468 2469 #ifdef TCP_OFFLOAD_ENABLE 2470 /* TODO: wrong place to enable TOE capability */ 2471 if (is_offload(sc) != 0) { 2472 for_each_port(sc, i) { 2473 struct port_info *pi = sc->port[i]; 2474 rc = toe_capability(pi, 1); 2475 if (rc != 0) { 2476 cxgb_printf(pi->dip, CE_WARN, 2477 "Failed to activate toe capability: %d", 2478 rc); 2479 rc = 0; /* not a fatal error */ 2480 } 2481 } 2482 } 2483 #endif 2484 2485 done: 2486 if (rc != 0) 2487 (void) adapter_full_uninit(sc); 2488 2489 return (rc); 2490 } 2491 2492 int 2493 adapter_full_uninit(struct adapter *sc) 2494 { 2495 int i, rc = 0; 2496 2497 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2498 2499 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK) 2500 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count); 2501 else { 2502 for (i = 0; i < sc->intr_count; i++) 2503 (void) ddi_intr_disable(sc->intr_handle[i]); 2504 } 2505 2506 rc = t4_teardown_adapter_queues(sc); 2507 if (rc != 0) 2508 return (rc); 2509 2510 sc->flags &= ~FULL_INIT_DONE; 2511 2512 return (0); 2513 } 2514 2515 int 2516 port_full_init(struct port_info *pi) 2517 { 2518 struct adapter *sc = pi->adapter; 2519 uint16_t *rss; 2520 struct sge_rxq *rxq; 2521 int rc, i; 2522 2523 ADAPTER_LOCK_ASSERT_NOTOWNED(sc); 2524 ASSERT((pi->flags & PORT_INIT_DONE) == 0); 2525 2526 /* 2527 * Allocate tx/rx/fl queues for this port. 2528 */ 2529 rc = t4_setup_port_queues(pi); 2530 if (rc != 0) 2531 goto done; /* error message displayed already */ 2532 2533 /* 2534 * Setup RSS for this port. 2535 */ 2536 rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP); 2537 for_each_rxq(pi, i, rxq) { 2538 rss[i] = rxq->iq.abs_id; 2539 } 2540 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0, 2541 pi->rss_size, rss, pi->nrxq); 2542 kmem_free(rss, pi->nrxq * sizeof (*rss)); 2543 if (rc != 0) { 2544 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc); 2545 goto done; 2546 } 2547 2548 pi->flags |= PORT_INIT_DONE; 2549 done: 2550 if (rc != 0) 2551 (void) port_full_uninit(pi); 2552 2553 return (rc); 2554 } 2555 2556 /* 2557 * Idempotent. 2558 */ 2559 int 2560 port_full_uninit(struct port_info *pi) 2561 { 2562 2563 ASSERT(pi->flags & PORT_INIT_DONE); 2564 2565 (void) t4_teardown_port_queues(pi); 2566 pi->flags &= ~PORT_INIT_DONE; 2567 2568 return (0); 2569 } 2570 2571 void 2572 enable_port_queues(struct port_info *pi) 2573 { 2574 struct adapter *sc = pi->adapter; 2575 int i; 2576 struct sge_iq *iq; 2577 struct sge_rxq *rxq; 2578 #ifdef TCP_OFFLOAD_ENABLE 2579 struct sge_ofld_rxq *ofld_rxq; 2580 #endif 2581 2582 ASSERT(pi->flags & PORT_INIT_DONE); 2583 2584 /* 2585 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED 2586 * back in disable_port_queues will be processed now, after an unbounded 2587 * delay. This can't be good. 2588 */ 2589 2590 #ifdef TCP_OFFLOAD_ENABLE 2591 for_each_ofld_rxq(pi, i, ofld_rxq) { 2592 iq = &ofld_rxq->iq; 2593 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) != 2594 IQS_DISABLED) 2595 panic("%s: iq %p wasn't disabled", __func__, 2596 (void *)iq); 2597 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 2598 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id)); 2599 } 2600 #endif 2601 2602 for_each_rxq(pi, i, rxq) { 2603 iq = &rxq->iq; 2604 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) != 2605 IQS_DISABLED) 2606 panic("%s: iq %p wasn't disabled", __func__, 2607 (void *) iq); 2608 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), 2609 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id)); 2610 } 2611 } 2612 2613 void 2614 disable_port_queues(struct port_info *pi) 2615 { 2616 int i; 2617 struct adapter *sc = pi->adapter; 2618 struct sge_rxq *rxq; 2619 #ifdef TCP_OFFLOAD_ENABLE 2620 struct sge_ofld_rxq *ofld_rxq; 2621 #endif 2622 2623 ASSERT(pi->flags & PORT_INIT_DONE); 2624 2625 /* 2626 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld). 2627 */ 2628 2629 #ifdef TCP_OFFLOAD_ENABLE 2630 for_each_ofld_rxq(pi, i, ofld_rxq) { 2631 while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE, 2632 IQS_DISABLED) != IQS_IDLE) 2633 msleep(1); 2634 } 2635 #endif 2636 2637 for_each_rxq(pi, i, rxq) { 2638 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE, 2639 IQS_DISABLED) != IQS_IDLE) 2640 msleep(1); 2641 } 2642 2643 mutex_enter(&sc->sfl_lock); 2644 #ifdef TCP_OFFLOAD_ENABLE 2645 for_each_ofld_rxq(pi, i, ofld_rxq) 2646 ofld_rxq->fl.flags |= FL_DOOMED; 2647 #endif 2648 for_each_rxq(pi, i, rxq) 2649 rxq->fl.flags |= FL_DOOMED; 2650 mutex_exit(&sc->sfl_lock); 2651 /* TODO: need to wait for all fl's to be removed from sc->sfl */ 2652 } 2653 2654 void 2655 t4_fatal_err(struct adapter *sc) 2656 { 2657 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0); 2658 t4_intr_disable(sc); 2659 cxgb_printf(sc->dip, CE_WARN, 2660 "encountered fatal error, adapter stopped."); 2661 } 2662 2663 int 2664 t4_os_find_pci_capability(struct adapter *sc, int cap) 2665 { 2666 uint16_t stat; 2667 uint8_t cap_ptr, cap_id; 2668 2669 t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat); 2670 if ((stat & PCI_STAT_CAP) == 0) 2671 return (0); /* does not implement capabilities */ 2672 2673 t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr); 2674 while (cap_ptr) { 2675 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id); 2676 if (cap_id == cap) 2677 return (cap_ptr); /* found */ 2678 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr); 2679 } 2680 2681 return (0); /* not found */ 2682 } 2683 2684 void 2685 t4_os_portmod_changed(const struct adapter *sc, int idx) 2686 { 2687 static const char *mod_str[] = { 2688 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM" 2689 }; 2690 const struct port_info *pi = sc->port[idx]; 2691 2692 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE) 2693 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged."); 2694 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN) 2695 cxgb_printf(pi->dip, CE_NOTE, 2696 "unknown transceiver inserted.\n"); 2697 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED) 2698 cxgb_printf(pi->dip, CE_NOTE, 2699 "unsupported transceiver inserted.\n"); 2700 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str)) 2701 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n", 2702 mod_str[pi->mod_type]); 2703 else 2704 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.", 2705 pi->mod_type); 2706 } 2707 2708 /* ARGSUSED */ 2709 static int 2710 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m) 2711 { 2712 if (m != NULL) 2713 freemsg(m); 2714 return (0); 2715 } 2716 2717 int 2718 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h) 2719 { 2720 uint_t *loc, new; 2721 2722 if (opcode >= ARRAY_SIZE(sc->cpl_handler)) 2723 return (EINVAL); 2724 2725 new = (uint_t)(unsigned long) (h ? h : cpl_not_handled); 2726 loc = (uint_t *)&sc->cpl_handler[opcode]; 2727 (void) atomic_swap_uint(loc, new); 2728 2729 return (0); 2730 } 2731 2732 static int 2733 fw_msg_not_handled(struct adapter *sc, const __be64 *data) 2734 { 2735 struct cpl_fw6_msg *cpl; 2736 2737 cpl = __containerof((void *)data, struct cpl_fw6_msg, data); 2738 2739 cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type); 2740 return (0); 2741 } 2742 2743 int 2744 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h) 2745 { 2746 fw_msg_handler_t *loc, new; 2747 2748 if (type >= ARRAY_SIZE(sc->fw_msg_handler)) 2749 return (EINVAL); 2750 2751 /* 2752 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL 2753 * handler dispatch table. Reject any attempt to install a handler for 2754 * this subtype. 2755 */ 2756 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL) 2757 return (EINVAL); 2758 2759 new = h ? h : fw_msg_not_handled; 2760 loc = &sc->fw_msg_handler[type]; 2761 (void)atomic_swap_ptr(loc, (void *)new); 2762 2763 return (0); 2764 } 2765 2766 #ifdef TCP_OFFLOAD_ENABLE 2767 static int 2768 toe_capability(struct port_info *pi, int enable) 2769 { 2770 int rc; 2771 struct adapter *sc = pi->adapter; 2772 2773 if (!is_offload(sc)) 2774 return (ENODEV); 2775 2776 if (enable != 0) { 2777 if (isset(&sc->offload_map, pi->port_id) != 0) 2778 return (0); 2779 2780 if (sc->offload_map == 0) { 2781 rc = activate_uld(sc, ULD_TOM, &sc->tom); 2782 if (rc != 0) 2783 return (rc); 2784 } 2785 2786 setbit(&sc->offload_map, pi->port_id); 2787 } else { 2788 if (!isset(&sc->offload_map, pi->port_id)) 2789 return (0); 2790 2791 clrbit(&sc->offload_map, pi->port_id); 2792 2793 if (sc->offload_map == 0) { 2794 rc = deactivate_uld(&sc->tom); 2795 if (rc != 0) { 2796 setbit(&sc->offload_map, pi->port_id); 2797 return (rc); 2798 } 2799 } 2800 } 2801 2802 return (0); 2803 } 2804 2805 /* 2806 * Add an upper layer driver to the global list. 2807 */ 2808 int 2809 t4_register_uld(struct uld_info *ui) 2810 { 2811 int rc = 0; 2812 struct uld_info *u; 2813 2814 mutex_enter(&t4_uld_list_lock); 2815 SLIST_FOREACH(u, &t4_uld_list, link) { 2816 if (u->uld_id == ui->uld_id) { 2817 rc = EEXIST; 2818 goto done; 2819 } 2820 } 2821 2822 SLIST_INSERT_HEAD(&t4_uld_list, ui, link); 2823 ui->refcount = 0; 2824 done: 2825 mutex_exit(&t4_uld_list_lock); 2826 return (rc); 2827 } 2828 2829 int 2830 t4_unregister_uld(struct uld_info *ui) 2831 { 2832 int rc = EINVAL; 2833 struct uld_info *u; 2834 2835 mutex_enter(&t4_uld_list_lock); 2836 2837 SLIST_FOREACH(u, &t4_uld_list, link) { 2838 if (u == ui) { 2839 if (ui->refcount > 0) { 2840 rc = EBUSY; 2841 goto done; 2842 } 2843 2844 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link); 2845 rc = 0; 2846 goto done; 2847 } 2848 } 2849 done: 2850 mutex_exit(&t4_uld_list_lock); 2851 return (rc); 2852 } 2853 2854 static int 2855 activate_uld(struct adapter *sc, int id, struct uld_softc *usc) 2856 { 2857 int rc = EAGAIN; 2858 struct uld_info *ui; 2859 2860 mutex_enter(&t4_uld_list_lock); 2861 2862 SLIST_FOREACH(ui, &t4_uld_list, link) { 2863 if (ui->uld_id == id) { 2864 rc = ui->attach(sc, &usc->softc); 2865 if (rc == 0) { 2866 ASSERT(usc->softc != NULL); 2867 ui->refcount++; 2868 usc->uld = ui; 2869 } 2870 goto done; 2871 } 2872 } 2873 done: 2874 mutex_exit(&t4_uld_list_lock); 2875 2876 return (rc); 2877 } 2878 2879 static int 2880 deactivate_uld(struct uld_softc *usc) 2881 { 2882 int rc; 2883 2884 mutex_enter(&t4_uld_list_lock); 2885 2886 if (usc->uld == NULL || usc->softc == NULL) { 2887 rc = EINVAL; 2888 goto done; 2889 } 2890 2891 rc = usc->uld->detach(usc->softc); 2892 if (rc == 0) { 2893 ASSERT(usc->uld->refcount > 0); 2894 usc->uld->refcount--; 2895 usc->uld = NULL; 2896 usc->softc = NULL; 2897 } 2898 done: 2899 mutex_exit(&t4_uld_list_lock); 2900 2901 return (rc); 2902 } 2903 2904 void 2905 t4_iterate(void (*func)(int, void *), void *arg) 2906 { 2907 struct adapter *sc; 2908 2909 mutex_enter(&t4_adapter_list_lock); 2910 SLIST_FOREACH(sc, &t4_adapter_list, link) { 2911 /* 2912 * func should not make any assumptions about what state sc is 2913 * in - the only guarantee is that sc->sc_lock is a valid lock. 2914 */ 2915 func(ddi_get_instance(sc->dip), arg); 2916 } 2917 mutex_exit(&t4_adapter_list_lock); 2918 } 2919 2920 #endif