1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source. A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * This file is part of the Chelsio T4 support code.
  14  *
  15  * Copyright (C) 2010-2013 Chelsio Communications.  All rights reserved.
  16  *
  17  * This program is distributed in the hope that it will be useful, but WITHOUT
  18  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19  * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
  20  * release for licensing terms and conditions.
  21  */
  22 
  23 #include <sys/ddi.h>
  24 #include <sys/sunddi.h>
  25 #include <sys/sunndi.h>
  26 #include <sys/modctl.h>
  27 #include <sys/conf.h>
  28 #include <sys/devops.h>
  29 #include <sys/pci.h>
  30 #include <sys/atomic.h>
  31 #include <sys/types.h>
  32 #include <sys/file.h>
  33 #include <sys/errno.h>
  34 #include <sys/open.h>
  35 #include <sys/cred.h>
  36 #include <sys/stat.h>
  37 #include <sys/mkdev.h>
  38 #include <sys/queue.h>
  39 #include <sys/containerof.h>
  40 
  41 #include "version.h"
  42 #include "common/common.h"
  43 #include "common/t4_msg.h"
  44 #include "common/t4_regs.h"
  45 #include "firmware/t4_fw.h"
  46 #include "firmware/t4_cfg.h"
  47 #include "firmware/t5_fw.h"
  48 #include "firmware/t5_cfg.h"
  49 #include "firmware/t6_fw.h"
  50 #include "firmware/t6_cfg.h"
  51 #include "t4_l2t.h"
  52 
  53 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
  54 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
  55 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
  56     int *rp);
  57 struct cb_ops t4_cb_ops = {
  58         .cb_open =              t4_cb_open,
  59         .cb_close =             t4_cb_close,
  60         .cb_strategy =          nodev,
  61         .cb_print =             nodev,
  62         .cb_dump =              nodev,
  63         .cb_read =              nodev,
  64         .cb_write =             nodev,
  65         .cb_ioctl =             t4_cb_ioctl,
  66         .cb_devmap =            nodev,
  67         .cb_mmap =              nodev,
  68         .cb_segmap =            nodev,
  69         .cb_chpoll =            nochpoll,
  70         .cb_prop_op =           ddi_prop_op,
  71         .cb_flag =              D_MP,
  72         .cb_rev =               CB_REV,
  73         .cb_aread =             nodev,
  74         .cb_awrite =            nodev
  75 };
  76 
  77 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
  78     void *arg, void *result);
  79 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
  80     void *arg, dev_info_t **cdipp);
  81 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
  82     ddi_bus_config_op_t op, void *arg);
  83 struct bus_ops t4_bus_ops = {
  84         .busops_rev =           BUSO_REV,
  85         .bus_ctl =              t4_bus_ctl,
  86         .bus_prop_op =          ddi_bus_prop_op,
  87         .bus_config =           t4_bus_config,
  88         .bus_unconfig =         t4_bus_unconfig,
  89 };
  90 
  91 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
  92     void **rp);
  93 static int t4_devo_probe(dev_info_t *dip);
  94 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
  95 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
  96 static int t4_devo_quiesce(dev_info_t *dip);
  97 struct dev_ops t4_dev_ops = {
  98         .devo_rev =             DEVO_REV,
  99         .devo_getinfo =         t4_devo_getinfo,
 100         .devo_identify =        nulldev,
 101         .devo_probe =           t4_devo_probe,
 102         .devo_attach =          t4_devo_attach,
 103         .devo_detach =          t4_devo_detach,
 104         .devo_reset =           nodev,
 105         .devo_cb_ops =          &t4_cb_ops,
 106         .devo_bus_ops =         &t4_bus_ops,
 107         .devo_quiesce =         &t4_devo_quiesce,
 108 };
 109 
 110 static struct modldrv modldrv = {
 111         .drv_modops =           &mod_driverops,
 112         .drv_linkinfo =         "Chelsio T4 nexus " DRV_VERSION,
 113         .drv_dev_ops =          &t4_dev_ops
 114 };
 115 
 116 static struct modlinkage modlinkage = {
 117         .ml_rev =               MODREV_1,
 118         .ml_linkage =           {&modldrv, NULL},
 119 };
 120 
 121 void *t4_list;
 122 
 123 struct intrs_and_queues {
 124         int intr_type;          /* DDI_INTR_TYPE_* */
 125         int nirq;               /* Number of vectors */
 126         int intr_fwd;           /* Interrupts forwarded */
 127         int ntxq10g;            /* # of NIC txq's for each 10G port */
 128         int nrxq10g;            /* # of NIC rxq's for each 10G port */
 129         int ntxq1g;             /* # of NIC txq's for each 1G port */
 130         int nrxq1g;             /* # of NIC rxq's for each 1G port */
 131 #ifdef TCP_OFFLOAD_ENABLE
 132         int nofldtxq10g;        /* # of TOE txq's for each 10G port */
 133         int nofldrxq10g;        /* # of TOE rxq's for each 10G port */
 134         int nofldtxq1g;         /* # of TOE txq's for each 1G port */
 135         int nofldrxq1g;         /* # of TOE rxq's for each 1G port */
 136 #endif
 137 };
 138 
 139 struct fw_info fi[3];
 140 
 141 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
 142     mblk_t *m);
 143 static int fw_msg_not_handled(struct adapter *, const __be64 *);
 144 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
 145 static unsigned int getpf(struct adapter *sc);
 146 static int prep_firmware(struct adapter *sc);
 147 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
 148 static int partition_resources(struct adapter *sc);
 149 static int adap__pre_init_tweaks(struct adapter *sc);
 150 static int get_params__pre_init(struct adapter *sc);
 151 static int get_params__post_init(struct adapter *sc);
 152 static int set_params__post_init(struct adapter *);
 153 static void setup_memwin(struct adapter *sc);
 154 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
 155     uint32_t *);
 156 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
 157 uint32_t position_memwin(struct adapter *, int, uint32_t);
 158 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
 159     uint_t count);
 160 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
 161     uint_t count);
 162 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
 163 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
 164 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
 165     struct intrs_and_queues *iaq);
 166 static int add_child_node(struct adapter *sc, int idx);
 167 static int remove_child_node(struct adapter *sc, int idx);
 168 static kstat_t *setup_kstats(struct adapter *sc);
 169 static kstat_t *setup_wc_kstats(struct adapter *);
 170 static int update_wc_kstats(kstat_t *, int);
 171 #ifdef TCP_OFFLOAD_ENABLE
 172 static int toe_capability(struct port_info *pi, int enable);
 173 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc);
 174 static int deactivate_uld(struct uld_softc *usc);
 175 #endif
 176 static kmutex_t t4_adapter_list_lock;
 177 static SLIST_HEAD(, adapter) t4_adapter_list;
 178 #ifdef TCP_OFFLOAD_ENABLE
 179 static kmutex_t t4_uld_list_lock;
 180 static SLIST_HEAD(, uld_info) t4_uld_list;
 181 #endif
 182 
 183 int
 184 _init(void)
 185 {
 186         int rc;
 187 
 188         rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
 189         if (rc != 0)
 190                 return (rc);
 191 
 192         rc = mod_install(&modlinkage);
 193         if (rc != 0)
 194                 ddi_soft_state_fini(&t4_list);
 195 
 196         mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
 197         SLIST_INIT(&t4_adapter_list);
 198 
 199 #ifdef TCP_OFFLOAD_ENABLE
 200         mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL);
 201         SLIST_INIT(&t4_uld_list);
 202 #endif
 203 
 204         return (rc);
 205 }
 206 
 207 int
 208 _fini(void)
 209 {
 210         int rc;
 211 
 212         rc = mod_remove(&modlinkage);
 213         if (rc != 0)
 214                 return (rc);
 215 
 216         ddi_soft_state_fini(&t4_list);
 217         return (0);
 218 }
 219 
 220 int
 221 _info(struct modinfo *mi)
 222 {
 223         return (mod_info(&modlinkage, mi));
 224 }
 225 
 226 /* ARGSUSED */
 227 static int
 228 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
 229 {
 230         struct adapter *sc;
 231         minor_t minor;
 232 
 233         minor = getminor((dev_t)arg);   /* same as instance# in our case */
 234 
 235         if (cmd == DDI_INFO_DEVT2DEVINFO) {
 236                 sc = ddi_get_soft_state(t4_list, minor);
 237                 if (sc == NULL)
 238                         return (DDI_FAILURE);
 239 
 240                 ASSERT(sc->dev == (dev_t)arg);
 241                 *rp = (void *)sc->dip;
 242         } else if (cmd == DDI_INFO_DEVT2INSTANCE)
 243                 *rp = (void *) (unsigned long) minor;
 244         else
 245                 ASSERT(0);
 246 
 247         return (DDI_SUCCESS);
 248 }
 249 
 250 static int
 251 t4_devo_probe(dev_info_t *dip)
 252 {
 253         int rc, id, *reg;
 254         uint_t n, pf;
 255 
 256         id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
 257             "device-id", 0xffff);
 258         if (id == 0xffff)
 259                 return (DDI_PROBE_DONTCARE);
 260 
 261         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
 262             "reg", &reg, &n);
 263         if (rc != DDI_SUCCESS)
 264                 return (DDI_PROBE_DONTCARE);
 265 
 266         pf = PCI_REG_FUNC_G(reg[0]);
 267         ddi_prop_free(reg);
 268 
 269         /* Prevent driver attachment on any PF except 0 on the FPGA */
 270         if (id == 0xa000 && pf != 0)
 271                 return (DDI_PROBE_FAILURE);
 272 
 273         return (DDI_PROBE_DONTCARE);
 274 }
 275 
 276 static int
 277 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 278 {
 279         struct adapter *sc = NULL;
 280         struct sge *s;
 281         int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
 282         int irq = 0, nxg, n100g, n40g, n25g, n10g, n1g;
 283 #ifdef TCP_OFFLOAD_ENABLE
 284         int ofld_rqidx, ofld_tqidx;
 285 #endif
 286         char name[16];
 287         struct driver_properties *prp;
 288         struct intrs_and_queues iaq;
 289         ddi_device_acc_attr_t da = {
 290                 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
 291                 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
 292                 .devacc_attr_dataorder = DDI_UNORDERED_OK_ACC
 293         };
 294         ddi_device_acc_attr_t da1 = {
 295                 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
 296                 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
 297                 .devacc_attr_dataorder = DDI_MERGING_OK_ACC
 298         };
 299 
 300         if (cmd != DDI_ATTACH)
 301                 return (DDI_FAILURE);
 302 
 303         /*
 304          * Allocate space for soft state.
 305          */
 306         instance = ddi_get_instance(dip);
 307         rc = ddi_soft_state_zalloc(t4_list, instance);
 308         if (rc != DDI_SUCCESS) {
 309                 cxgb_printf(dip, CE_WARN,
 310                     "failed to allocate soft state: %d", rc);
 311                 return (DDI_FAILURE);
 312         }
 313 
 314         sc = ddi_get_soft_state(t4_list, instance);
 315         sc->dip = dip;
 316         sc->dev = makedevice(ddi_driver_major(dip), instance);
 317         mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
 318         cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
 319         mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
 320         TAILQ_INIT(&sc->sfl);
 321 
 322         mutex_enter(&t4_adapter_list_lock);
 323         SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
 324         mutex_exit(&t4_adapter_list_lock);
 325 
 326         sc->pf = getpf(sc);
 327         if (sc->pf > 8) {
 328                 rc = EINVAL;
 329                 cxgb_printf(dip, CE_WARN,
 330                     "failed to determine PCI PF# of device");
 331                 goto done;
 332         }
 333         sc->mbox = sc->pf;
 334 
 335         /* Initialize the driver properties */
 336         prp = &sc->props;
 337         (void)init_driver_props(sc, prp);
 338 
 339         /*
 340          * Enable access to the PCI config space.
 341          */
 342         rc = pci_config_setup(dip, &sc->pci_regh);
 343         if (rc != DDI_SUCCESS) {
 344                 cxgb_printf(dip, CE_WARN,
 345                     "failed to enable PCI config space access: %d", rc);
 346                 goto done;
 347         }
 348 
 349         /* TODO: Set max read request to 4K */
 350 
 351         /*
 352          * Enable MMIO access.
 353          */
 354         rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
 355         if (rc != DDI_SUCCESS) {
 356                 cxgb_printf(dip, CE_WARN,
 357                     "failed to map device registers: %d", rc);
 358                 goto done;
 359         }
 360 
 361         (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
 362 
 363         /*
 364          * Initialize cpl handler.
 365          */
 366         for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
 367                 sc->cpl_handler[i] = cpl_not_handled;
 368         }
 369 
 370         for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
 371                 sc->fw_msg_handler[i] = fw_msg_not_handled;
 372         }
 373 
 374         for (i = 0; i < NCHAN; i++) {
 375                 (void) snprintf(name, sizeof (name), "%s-%d",
 376                                 "reclaim", i);
 377                 sc->tq[i] = ddi_taskq_create(sc->dip,
 378                    name, 1, TASKQ_DEFAULTPRI, 0);
 379 
 380                 if (sc->tq[i] == NULL) {
 381                         cxgb_printf(dip, CE_WARN,
 382                                    "failed to create task queues");
 383                         rc = DDI_FAILURE;
 384                         goto done;
 385                 }
 386         }
 387 
 388         /*
 389          * Prepare the adapter for operation.
 390          */
 391         rc = -t4_prep_adapter(sc, false);
 392         if (rc != 0) {
 393                 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
 394                 goto done;
 395         }
 396 
 397         /*
 398          * Enable BAR1 access.
 399          */
 400         sc->doorbells |= DOORBELL_KDB;
 401         rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
 402         if (rc != DDI_SUCCESS) {
 403                 cxgb_printf(dip, CE_WARN,
 404                     "failed to map BAR1 device registers: %d", rc);
 405                 goto done;
 406         } else {
 407                 if (is_t5(sc->params.chip)) {
 408                         sc->doorbells |= DOORBELL_UDB;
 409                         if (prp->wc) {
 410                                 /*
 411                                  * Enable write combining on BAR2.  This is the
 412                                  * userspace doorbell BAR and is split into 128B
 413                                  * (UDBS_SEG_SIZE) doorbell regions, each associated
 414                                  * with an egress queue.  The first 64B has the doorbell
 415                                  * and the second 64B can be used to submit a tx work
 416                                  * request with an implicit doorbell.
 417                                  */
 418                                 sc->doorbells &= ~DOORBELL_UDB;
 419                                 sc->doorbells |= (DOORBELL_WCWR |
 420                                     DOORBELL_UDBWC);
 421                                 t4_write_reg(sc, A_SGE_STAT_CFG,
 422                                     V_STATSOURCE_T5(7) | V_STATMODE(0));
 423                         }
 424                 }
 425         }
 426 
 427         /*
 428          * Do this really early.  Note that minor number = instance.
 429          */
 430         (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
 431         rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
 432             DDI_NT_NEXUS, 0);
 433         if (rc != DDI_SUCCESS) {
 434                 cxgb_printf(dip, CE_WARN,
 435                     "failed to create device node: %d", rc);
 436                 rc = DDI_SUCCESS; /* carry on */
 437         }
 438 
 439         /* Do this early. Memory window is required for loading config file. */
 440         setup_memwin(sc);
 441 
 442         /* Prepare the firmware for operation */
 443         rc = prep_firmware(sc);
 444         if (rc != 0)
 445                 goto done; /* error message displayed already */
 446 
 447         rc = adap__pre_init_tweaks(sc);
 448         if (rc != 0)
 449                 goto done;
 450 
 451         rc = get_params__pre_init(sc);
 452         if (rc != 0)
 453                 goto done; /* error message displayed already */
 454 
 455         t4_sge_init(sc);
 456 
 457         if (sc->flags & MASTER_PF) {
 458                 /* get basic stuff going */
 459                 rc = -t4_fw_initialize(sc, sc->mbox);
 460                 if (rc != 0) {
 461                         cxgb_printf(sc->dip, CE_WARN,
 462                             "early init failed: %d.\n", rc);
 463                         goto done;
 464                 }
 465         }
 466 
 467         rc = get_params__post_init(sc);
 468         if (rc != 0)
 469                 goto done; /* error message displayed already */
 470 
 471         rc = set_params__post_init(sc);
 472         if (rc != 0)
 473                 goto done; /* error message displayed already */
 474 
 475         /*
 476          * TODO: This is the place to call t4_set_filter_mode()
 477          */
 478 
 479         /* tweak some settings */
 480         t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
 481             V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
 482             V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
 483         t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
 484 
 485         /*
 486          * Work-around for bug 2619
 487          * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
 488          * VLAN tag extraction is disabled.
 489          */
 490         t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
 491 
 492         /* Store filter mode */
 493         t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
 494             A_TP_VLAN_PRI_MAP);
 495 
 496         /*
 497          * First pass over all the ports - allocate VIs and initialize some
 498          * basic parameters like mac address, port type, etc.  We also figure
 499          * out whether a port is 10G or 1G and use that information when
 500          * calculating how many interrupts to attempt to allocate.
 501          */
 502         n100g = n40g = n25g = n10g = n1g = 0;
 503         for_each_port(sc, i) {
 504                 struct port_info *pi;
 505 
 506                 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
 507                 sc->port[i] = pi;
 508 
 509                 /* These must be set before t4_port_init */
 510                 pi->adapter = sc;
 511                 /* LINTED: E_ASSIGN_NARROW_CONV */
 512                 pi->port_id = i;
 513         }
 514 
 515         /* Allocate the vi and initialize parameters like mac addr */
 516         rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
 517         if (rc) {
 518                 cxgb_printf(dip, CE_WARN,
 519                             "unable to initialize port: %d", rc);
 520                 goto done;
 521         }
 522 
 523         for_each_port(sc, i) {
 524                 struct port_info *pi = sc->port[i];
 525 
 526                 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
 527                 pi->mtu = ETHERMTU;
 528 
 529                 if (is_100G_port(pi)) {
 530                         n100g++;
 531                         pi->tmr_idx = prp->tmr_idx_10g;
 532                         pi->pktc_idx = prp->pktc_idx_10g;
 533                 } else if (is_40G_port(pi)) {
 534                         n40g++;
 535                         pi->tmr_idx = prp->tmr_idx_10g;
 536                         pi->pktc_idx = prp->pktc_idx_10g;
 537                 } else if (is_25G_port(pi)) {
 538                         n25g++;
 539                         pi->tmr_idx = prp->tmr_idx_10g;
 540                         pi->pktc_idx = prp->pktc_idx_10g;
 541                 } else if (is_10G_port(pi)) {
 542                         n10g++;
 543                         pi->tmr_idx = prp->tmr_idx_10g;
 544                         pi->pktc_idx = prp->pktc_idx_10g;
 545                 } else {
 546                         n1g++;
 547                         pi->tmr_idx = prp->tmr_idx_1g;
 548                         pi->pktc_idx = prp->pktc_idx_1g;
 549                 }
 550 
 551                 pi->xact_addr_filt = -1;
 552                 t4_mc_init(pi);
 553 
 554                 setbit(&sc->registered_device_map, i);
 555         }
 556 
 557         nxg = n10g + n25g + n40g + n100g;
 558         (void) remove_extra_props(sc, nxg, n1g);
 559 
 560         if (sc->registered_device_map == 0) {
 561                 cxgb_printf(dip, CE_WARN, "no usable ports");
 562                 rc = DDI_FAILURE;
 563                 goto done;
 564         }
 565 
 566         rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
 567         if (rc != 0)
 568                 goto done; /* error message displayed already */
 569 
 570         sc->intr_type = iaq.intr_type;
 571         sc->intr_count = iaq.nirq;
 572 
 573         if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
 574                 sc->props.multi_rings = 0;
 575                 cxgb_printf(dip, CE_WARN,
 576                     "Multiple rings disabled as interrupt type is not MSI-X");
 577         }
 578 
 579         if (sc->props.multi_rings && iaq.intr_fwd) {
 580                 sc->props.multi_rings = 0;
 581                 cxgb_printf(dip, CE_WARN,
 582                     "Multiple rings disabled as interrupts are forwarded");
 583         }
 584 
 585         if (!sc->props.multi_rings) {
 586                 iaq.ntxq10g = 1;
 587                 iaq.ntxq1g = 1;
 588         }
 589         s = &sc->sge;
 590         s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
 591         s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
 592         s->neq = s->ntxq + s->nrxq;    /* the fl in an rxq is an eq */
 593 #ifdef TCP_OFFLOAD_ENABLE
 594         /* control queues, 1 per port + 1 mgmtq */
 595         s->neq += sc->params.nports + 1;
 596 #endif
 597         s->niq = s->nrxq + 1;             /* 1 extra for firmware event queue */
 598         if (iaq.intr_fwd != 0)
 599                 sc->flags |= INTR_FWD;
 600 #ifdef TCP_OFFLOAD_ENABLE
 601         if (is_offload(sc) != 0) {
 602 
 603                 s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
 604                 s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
 605                 s->neq += s->nofldtxq + s->nofldrxq;
 606                 s->niq += s->nofldrxq;
 607 
 608                 s->ofld_rxq = kmem_zalloc(s->nofldrxq *
 609                     sizeof (struct sge_ofld_rxq), KM_SLEEP);
 610                 s->ofld_txq = kmem_zalloc(s->nofldtxq *
 611                     sizeof (struct sge_wrq), KM_SLEEP);
 612                 s->ctrlq = kmem_zalloc(sc->params.nports *
 613                     sizeof (struct sge_wrq), KM_SLEEP);
 614 
 615         }
 616 #endif
 617         s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
 618         s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
 619         s->iqmap = kmem_zalloc(s->niq * sizeof (struct sge_iq *), KM_SLEEP);
 620         s->eqmap = kmem_zalloc(s->neq * sizeof (struct sge_eq *), KM_SLEEP);
 621 
 622         sc->intr_handle = kmem_zalloc(sc->intr_count *
 623             sizeof (ddi_intr_handle_t), KM_SLEEP);
 624 
 625         /*
 626          * Second pass over the ports.  This time we know the number of rx and
 627          * tx queues that each port should get.
 628          */
 629         rqidx = tqidx = 0;
 630 #ifdef TCP_OFFLOAD_ENABLE
 631         ofld_rqidx = ofld_tqidx = 0;
 632 #endif
 633         for_each_port(sc, i) {
 634                 struct port_info *pi = sc->port[i];
 635 
 636                 if (pi == NULL)
 637                         continue;
 638 
 639                 t4_mc_cb_init(pi);
 640                 /* LINTED: E_ASSIGN_NARROW_CONV */
 641                 pi->first_rxq = rqidx;
 642                 /* LINTED: E_ASSIGN_NARROW_CONV */
 643                 pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
 644                     : iaq.nrxq1g;
 645                 /* LINTED: E_ASSIGN_NARROW_CONV */
 646                 pi->first_txq = tqidx;
 647                 /* LINTED: E_ASSIGN_NARROW_CONV */
 648                 pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
 649                     : iaq.ntxq1g;
 650 
 651                 rqidx += pi->nrxq;
 652                 tqidx += pi->ntxq;
 653 
 654 #ifdef TCP_OFFLOAD_ENABLE
 655                 if (is_offload(sc) != 0) {
 656                         /* LINTED: E_ASSIGN_NARROW_CONV */
 657                         pi->first_ofld_rxq = ofld_rqidx;
 658                         pi->nofldrxq = max(1, pi->nrxq / 4);
 659 
 660                         /* LINTED: E_ASSIGN_NARROW_CONV */
 661                         pi->first_ofld_txq = ofld_tqidx;
 662                         pi->nofldtxq = max(1, pi->ntxq / 2);
 663 
 664                         ofld_rqidx += pi->nofldrxq;
 665                         ofld_tqidx += pi->nofldtxq;
 666                 }
 667 #endif
 668 
 669                 /*
 670                  * Enable hw checksumming and LSO for all ports by default.
 671                  * They can be disabled using ndd (hw_csum and hw_lso).
 672                  */
 673                 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
 674         }
 675 
 676 #ifdef TCP_OFFLOAD_ENABLE
 677                 sc->l2t = t4_init_l2t(sc);
 678 #endif
 679 
 680         /*
 681          * Setup Interrupts.
 682          */
 683 
 684         i = 0;
 685         rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
 686             sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
 687         if (rc != DDI_SUCCESS) {
 688                 cxgb_printf(dip, CE_WARN,
 689                     "failed to allocate %d interrupt(s) of type %d: %d, %d",
 690                     sc->intr_count, sc->intr_type, rc, i);
 691                 goto done;
 692         }
 693         ASSERT(sc->intr_count == i); /* allocation was STRICT */
 694         (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
 695         (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
 696         if (sc->intr_count == 1) {
 697                 ASSERT(sc->flags & INTR_FWD);
 698                 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
 699                     &s->fwq);
 700         } else {
 701                 /* Multiple interrupts.  The first one is always error intr */
 702                 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
 703                     NULL);
 704                 irq++;
 705 
 706                 /* The second one is always the firmware event queue */
 707                 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
 708                     &s->fwq);
 709                 irq++;
 710                 /*
 711                  * Note that if INTR_FWD is set then either the NIC rx
 712                  * queues or (exclusive or) the TOE rx queueus will be taking
 713                  * direct interrupts.
 714                  *
 715                  * There is no need to check for is_offload(sc) as nofldrxq
 716                  * will be 0 if offload is disabled.
 717                  */
 718                 for_each_port(sc, i) {
 719                         struct port_info *pi = sc->port[i];
 720                         struct sge_rxq *rxq;
 721 #ifdef TCP_OFFLOAD_ENABLE
 722                         struct sge_ofld_rxq *ofld_rxq;
 723 
 724                         /*
 725                          * Skip over the NIC queues if they aren't taking direct
 726                          * interrupts.
 727                          */
 728                         if ((sc->flags & INTR_FWD) &&
 729                             pi->nofldrxq > pi->nrxq)
 730                                 goto ofld_queues;
 731 #endif
 732                         rxq = &s->rxq[pi->first_rxq];
 733                         for (q = 0; q < pi->nrxq; q++, rxq++) {
 734                                 (void) ddi_intr_add_handler(
 735                                     sc->intr_handle[irq], t4_intr, sc,
 736                                     &rxq->iq);
 737                                 irq++;
 738                         }
 739 
 740 #ifdef TCP_OFFLOAD_ENABLE
 741                         /*
 742                          * Skip over the offload queues if they aren't taking
 743                          * direct interrupts.
 744                          */
 745                         if ((sc->flags & INTR_FWD))
 746                                 continue;
 747 ofld_queues:
 748                         ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq];
 749                         for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
 750                                 (void) ddi_intr_add_handler(
 751                                     sc->intr_handle[irq], t4_intr, sc,
 752                                     &ofld_rxq->iq);
 753                                 irq++;
 754                         }
 755 #endif
 756                 }
 757 
 758         }
 759         sc->flags |= INTR_ALLOCATED;
 760 
 761         ASSERT(rc == DDI_SUCCESS);
 762         ddi_report_dev(dip);
 763 
 764         /*
 765          * Hardware/Firmware/etc. Version/Revision IDs.
 766          */
 767         t4_dump_version_info(sc);
 768 
 769         if (n100g) {
 770                 cxgb_printf(dip, CE_NOTE,
 771                     "%dx100G (%d rxq, %d txq total) %d %s.",
 772                     n100g, rqidx, tqidx, sc->intr_count,
 773                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 774                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 775                     "fixed interrupt");
 776         } else if (n40g) {
 777                 cxgb_printf(dip, CE_NOTE,
 778                     "%dx40G (%d rxq, %d txq total) %d %s.",
 779                     n40g, rqidx, tqidx, sc->intr_count,
 780                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 781                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 782                     "fixed interrupt");
 783         } else if (n25g) {
 784                 cxgb_printf(dip, CE_NOTE,
 785                     "%dx25G (%d rxq, %d txq total) %d %s.",
 786                     n25g, rqidx, tqidx, sc->intr_count,
 787                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 788                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 789                     "fixed interrupt");
 790         } else if (n10g && n1g) {
 791                 cxgb_printf(dip, CE_NOTE,
 792                     "%dx10G %dx1G (%d rxq, %d txq total) %d %s.",
 793                     n10g, n1g, rqidx, tqidx, sc->intr_count,
 794                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 795                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 796                     "fixed interrupt");
 797         } else {
 798                 cxgb_printf(dip, CE_NOTE,
 799                     "%dx%sG (%d rxq, %d txq per port) %d %s.",
 800                     n10g ? n10g : n1g,
 801                     n10g ? "10" : "1",
 802                     n10g ? iaq.nrxq10g : iaq.nrxq1g,
 803                     n10g ? iaq.ntxq10g : iaq.ntxq1g,
 804                     sc->intr_count,
 805                     sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
 806                     sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
 807                     "fixed interrupt");
 808         }
 809 
 810         sc->ksp = setup_kstats(sc);
 811         sc->ksp_stat = setup_wc_kstats(sc);
 812         sc->params.drv_memwin = MEMWIN_NIC;
 813 
 814 done:
 815         if (rc != DDI_SUCCESS) {
 816                 (void) t4_devo_detach(dip, DDI_DETACH);
 817 
 818                 /* rc may have errno style errors or DDI errors */
 819                 rc = DDI_FAILURE;
 820         }
 821 
 822         return (rc);
 823 }
 824 
 825 static int
 826 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 827 {
 828         int instance, i;
 829         struct adapter *sc;
 830         struct port_info *pi;
 831         struct sge *s;
 832 
 833         if (cmd != DDI_DETACH)
 834                 return (DDI_FAILURE);
 835 
 836         instance = ddi_get_instance(dip);
 837         sc = ddi_get_soft_state(t4_list, instance);
 838         if (sc == NULL)
 839                 return (DDI_SUCCESS);
 840 
 841         if (sc->flags & FULL_INIT_DONE) {
 842                 t4_intr_disable(sc);
 843                 for_each_port(sc, i) {
 844                         pi = sc->port[i];
 845                         if (pi && pi->flags & PORT_INIT_DONE)
 846                                 (void) port_full_uninit(pi);
 847                 }
 848                 (void) adapter_full_uninit(sc);
 849         }
 850 
 851         /* Safe to call no matter what */
 852         ddi_prop_remove_all(dip);
 853         ddi_remove_minor_node(dip, NULL);
 854 
 855         for (i = 0; i < NCHAN; i++) {
 856                 if (sc->tq[i]) {
 857                         ddi_taskq_wait(sc->tq[i]);
 858                         ddi_taskq_destroy(sc->tq[i]);
 859                 }
 860         }
 861 
 862         if (sc->ksp != NULL)
 863                 kstat_delete(sc->ksp);
 864         if (sc->ksp_stat != NULL)
 865                 kstat_delete(sc->ksp_stat);
 866 
 867         s = &sc->sge;
 868         if (s->rxq != NULL)
 869                 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
 870 #ifdef TCP_OFFLOAD_ENABLE
 871         if (s->ofld_txq != NULL)
 872                 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
 873         if (s->ofld_rxq != NULL)
 874                 kmem_free(s->ofld_rxq,
 875                     s->nofldrxq * sizeof (struct sge_ofld_rxq));
 876         if (s->ctrlq != NULL)
 877                 kmem_free(s->ctrlq,
 878                     sc->params.nports * sizeof (struct sge_wrq));
 879 #endif
 880         if (s->txq != NULL)
 881                 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
 882         if (s->iqmap != NULL)
 883                 kmem_free(s->iqmap, s->niq * sizeof (struct sge_iq *));
 884         if (s->eqmap != NULL)
 885                 kmem_free(s->eqmap, s->neq * sizeof (struct sge_eq *));
 886 
 887         if (s->rxbuf_cache != NULL)
 888                 rxbuf_cache_destroy(s->rxbuf_cache);
 889 
 890         if (sc->flags & INTR_ALLOCATED) {
 891                 for (i = 0; i < sc->intr_count; i++) {
 892                         (void) ddi_intr_remove_handler(sc->intr_handle[i]);
 893                         (void) ddi_intr_free(sc->intr_handle[i]);
 894                 }
 895                 sc->flags &= ~INTR_ALLOCATED;
 896         }
 897 
 898         if (sc->intr_handle != NULL) {
 899                 kmem_free(sc->intr_handle,
 900                     sc->intr_count * sizeof (*sc->intr_handle));
 901         }
 902 
 903         for_each_port(sc, i) {
 904                 pi = sc->port[i];
 905                 if (pi != NULL) {
 906                         mutex_destroy(&pi->lock);
 907                         kmem_free(pi, sizeof (*pi));
 908                         clrbit(&sc->registered_device_map, i);
 909                 }
 910         }
 911 
 912         if (sc->flags & FW_OK)
 913                 (void) t4_fw_bye(sc, sc->mbox);
 914 
 915         if (sc->reg1h != NULL)
 916                 ddi_regs_map_free(&sc->reg1h);
 917 
 918         if (sc->regh != NULL)
 919                 ddi_regs_map_free(&sc->regh);
 920 
 921         if (sc->pci_regh != NULL)
 922                 pci_config_teardown(&sc->pci_regh);
 923 
 924         mutex_enter(&t4_adapter_list_lock);
 925         SLIST_REMOVE_HEAD(&t4_adapter_list, link);
 926         mutex_exit(&t4_adapter_list_lock);
 927 
 928         mutex_destroy(&sc->lock);
 929         cv_destroy(&sc->cv);
 930         mutex_destroy(&sc->sfl_lock);
 931 
 932 #ifdef DEBUG
 933         bzero(sc, sizeof (*sc));
 934 #endif
 935         ddi_soft_state_free(t4_list, instance);
 936 
 937         return (DDI_SUCCESS);
 938 }
 939 
 940 static int
 941 t4_devo_quiesce(dev_info_t *dip)
 942 {
 943         int instance;
 944         struct adapter *sc;
 945 
 946         instance = ddi_get_instance(dip);
 947         sc = ddi_get_soft_state(t4_list, instance);
 948         if (sc == NULL)
 949                 return (DDI_SUCCESS);
 950 
 951         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
 952         t4_intr_disable(sc);
 953         t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
 954 
 955         return (DDI_SUCCESS);
 956 }
 957 
 958 static int
 959 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
 960     void *result)
 961 {
 962         char s[4];
 963         struct port_info *pi;
 964         dev_info_t *child = (dev_info_t *)arg;
 965 
 966         switch (op) {
 967         case DDI_CTLOPS_REPORTDEV:
 968                 pi = ddi_get_parent_data(rdip);
 969                 pi->instance = ddi_get_instance(dip);
 970                 pi->child_inst = ddi_get_instance(rdip);
 971                 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n",
 972                     ddi_node_name(rdip), ddi_get_instance(rdip),
 973                     ddi_get_name_addr(rdip), ddi_driver_name(dip),
 974                     ddi_get_instance(dip));
 975                 return (DDI_SUCCESS);
 976 
 977         case DDI_CTLOPS_INITCHILD:
 978                 pi = ddi_get_parent_data(child);
 979                 if (pi == NULL)
 980                         return (DDI_NOT_WELL_FORMED);
 981                 (void) snprintf(s, sizeof (s), "%d", pi->port_id);
 982                 ddi_set_name_addr(child, s);
 983                 return (DDI_SUCCESS);
 984 
 985         case DDI_CTLOPS_UNINITCHILD:
 986                 ddi_set_name_addr(child, NULL);
 987                 return (DDI_SUCCESS);
 988 
 989         case DDI_CTLOPS_ATTACH:
 990         case DDI_CTLOPS_DETACH:
 991                 return (DDI_SUCCESS);
 992 
 993         default:
 994                 return (ddi_ctlops(dip, rdip, op, arg, result));
 995         }
 996 }
 997 
 998 static int
 999 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
1000     dev_info_t **cdipp)
1001 {
1002         int instance, i;
1003         struct adapter *sc;
1004 
1005         instance = ddi_get_instance(dip);
1006         sc = ddi_get_soft_state(t4_list, instance);
1007 
1008         if (op == BUS_CONFIG_ONE) {
1009                 char *c;
1010 
1011                 /*
1012                  * arg is something like "cxgb@0" where 0 is the port_id hanging
1013                  * off this nexus.
1014                  */
1015 
1016                 c = arg;
1017                 while (*(c + 1))
1018                         c++;
1019 
1020                 /* There should be exactly 1 digit after '@' */
1021                 if (*(c - 1) != '@')
1022                         return (NDI_FAILURE);
1023 
1024                 i = *c - '0';
1025 
1026                 if (add_child_node(sc, i) != 0)
1027                         return (NDI_FAILURE);
1028 
1029                 flags |= NDI_ONLINE_ATTACH;
1030 
1031         } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
1032                 /* Allocate and bind all child device nodes */
1033                 for_each_port(sc, i)
1034                     (void) add_child_node(sc, i);
1035                 flags |= NDI_ONLINE_ATTACH;
1036         }
1037 
1038         return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
1039 }
1040 
1041 static int
1042 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
1043     void *arg)
1044 {
1045         int instance, i, rc;
1046         struct adapter *sc;
1047 
1048         instance = ddi_get_instance(dip);
1049         sc = ddi_get_soft_state(t4_list, instance);
1050 
1051         if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
1052             op == BUS_UNCONFIG_DRIVER)
1053                 flags |= NDI_UNCONFIG;
1054 
1055         rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
1056         if (rc != 0)
1057                 return (rc);
1058 
1059         if (op == BUS_UNCONFIG_ONE) {
1060                 char *c;
1061 
1062                 c = arg;
1063                 while (*(c + 1))
1064                         c++;
1065 
1066                 if (*(c - 1) != '@')
1067                         return (NDI_SUCCESS);
1068 
1069                 i = *c - '0';
1070 
1071                 rc = remove_child_node(sc, i);
1072 
1073         } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
1074 
1075                 for_each_port(sc, i)
1076                     (void) remove_child_node(sc, i);
1077         }
1078 
1079         return (rc);
1080 }
1081 
1082 /* ARGSUSED */
1083 static int
1084 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1085 {
1086         struct adapter *sc;
1087 
1088         if (otyp != OTYP_CHR)
1089                 return (EINVAL);
1090 
1091         sc = ddi_get_soft_state(t4_list, getminor(*devp));
1092         if (sc == NULL)
1093                 return (ENXIO);
1094 
1095         return (atomic_cas_uint(&sc->open, 0, EBUSY));
1096 }
1097 
1098 /* ARGSUSED */
1099 static int
1100 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
1101 {
1102         struct adapter *sc;
1103 
1104         sc = ddi_get_soft_state(t4_list, getminor(dev));
1105         if (sc == NULL)
1106                 return (EINVAL);
1107 
1108         (void) atomic_swap_uint(&sc->open, 0);
1109         return (0);
1110 }
1111 
1112 /* ARGSUSED */
1113 static int
1114 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1115 {
1116         int instance;
1117         struct adapter *sc;
1118         void *data = (void *)d;
1119 
1120         if (crgetuid(credp) != 0)
1121                 return (EPERM);
1122 
1123         instance = getminor(dev);
1124         sc = ddi_get_soft_state(t4_list, instance);
1125         if (sc == NULL)
1126                 return (EINVAL);
1127 
1128         return (t4_ioctl(sc, cmd, data, mode));
1129 }
1130 
1131 static unsigned int
1132 getpf(struct adapter *sc)
1133 {
1134         int rc, *data;
1135         uint_t n, pf;
1136 
1137         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1138             DDI_PROP_DONTPASS, "reg", &data, &n);
1139         if (rc != DDI_SUCCESS) {
1140                 cxgb_printf(sc->dip, CE_WARN,
1141                     "failed to lookup \"reg\" property: %d", rc);
1142                 return (0xff);
1143         }
1144 
1145         pf = PCI_REG_FUNC_G(data[0]);
1146         ddi_prop_free(data);
1147 
1148         return (pf);
1149 }
1150 
1151 
1152 static struct fw_info *
1153 find_fw_info(int chip)
1154 {
1155         u32 i;
1156 
1157         fi[0].chip = CHELSIO_T4;
1158         fi[0].fw_hdr.chip = FW_HDR_CHIP_T4;
1159         fi[0].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T4));
1160         fi[0].fw_hdr.intfver_nic = FW_INTFVER(T4, NIC);
1161         fi[0].fw_hdr.intfver_vnic = FW_INTFVER(T4, VNIC);
1162         fi[0].fw_hdr.intfver_ofld = FW_INTFVER(T4, OFLD);
1163         fi[0].fw_hdr.intfver_ri = FW_INTFVER(T4, RI);
1164         fi[0].fw_hdr.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU);
1165         fi[0].fw_hdr.intfver_iscsi = FW_INTFVER(T4, ISCSI);
1166         fi[0].fw_hdr.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU);
1167         fi[0].fw_hdr.intfver_fcoe = FW_INTFVER(T4, FCOE);
1168 
1169         fi[1].chip = CHELSIO_T5;
1170         fi[1].fw_hdr.chip = FW_HDR_CHIP_T5;
1171         fi[1].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T5));
1172         fi[1].fw_hdr.intfver_nic = FW_INTFVER(T5, NIC);
1173         fi[1].fw_hdr.intfver_vnic = FW_INTFVER(T5, VNIC);
1174         fi[1].fw_hdr.intfver_ofld = FW_INTFVER(T5, OFLD);
1175         fi[1].fw_hdr.intfver_ri = FW_INTFVER(T5, RI);
1176         fi[1].fw_hdr.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU);
1177         fi[1].fw_hdr.intfver_iscsi = FW_INTFVER(T5, ISCSI);
1178         fi[1].fw_hdr.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU);
1179         fi[1].fw_hdr.intfver_fcoe = FW_INTFVER(T5, FCOE);
1180 
1181         fi[2].chip = CHELSIO_T6;
1182         fi[2].fw_hdr.chip = FW_HDR_CHIP_T6;
1183         fi[2].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T6));
1184         fi[2].fw_hdr.intfver_nic = FW_INTFVER(T6, NIC);
1185         fi[2].fw_hdr.intfver_vnic = FW_INTFVER(T6, VNIC);
1186         fi[2].fw_hdr.intfver_ofld = FW_INTFVER(T6, OFLD);
1187         fi[2].fw_hdr.intfver_ri = FW_INTFVER(T6, RI);
1188         fi[2].fw_hdr.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU);
1189         fi[2].fw_hdr.intfver_iscsi = FW_INTFVER(T6, ISCSI);
1190         fi[2].fw_hdr.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU);
1191         fi[2].fw_hdr.intfver_fcoe = FW_INTFVER(T6, FCOE);
1192 
1193         for (i = 0; i < ARRAY_SIZE(fi); i++) {
1194                 if (fi[i].chip == chip)
1195                         return &fi[i];
1196         }
1197 
1198         return NULL;
1199 }
1200 
1201 /*
1202  * Install a compatible firmware (if required), establish contact with it,
1203  * become the master, and reset the device.
1204  */
1205 static int
1206 prep_firmware(struct adapter *sc)
1207 {
1208         int rc;
1209         int fw_size;
1210         int reset = 1;
1211         enum dev_state state;
1212         unsigned char *fw_data;
1213         struct fw_info *fw_info;
1214         struct fw_hdr *card_fw;
1215 
1216         struct driver_properties *p = &sc->props;
1217 
1218         /* Contact firmware, request master */
1219         rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1220         if (rc < 0) {
1221                 rc = -rc;
1222                 cxgb_printf(sc->dip, CE_WARN,
1223                     "failed to connect to the firmware: %d.", rc);
1224                 return (rc);
1225         }
1226 
1227         if (rc == sc->mbox)
1228                 sc->flags |= MASTER_PF;
1229 
1230         /* We may need FW version info for later reporting */
1231         t4_get_version_info(sc);
1232         fw_info = find_fw_info(CHELSIO_CHIP_VERSION(sc->params.chip));
1233         /* allocate memory to read the header of the firmware on the
1234          * card
1235          */
1236         if (!fw_info) {
1237                 cxgb_printf(sc->dip, CE_WARN,
1238                             "unable to look up firmware information for chip %d.\n",
1239                             CHELSIO_CHIP_VERSION(sc->params.chip));
1240                 return EINVAL;
1241         }
1242         card_fw = kmem_zalloc(sizeof(*card_fw), KM_SLEEP);
1243         if(!card_fw) {
1244                 cxgb_printf(sc->dip, CE_WARN,
1245                             "Memory allocation for card FW header failed\n");
1246                 return ENOMEM;
1247         }
1248         switch(CHELSIO_CHIP_VERSION(sc->params.chip)) {
1249         case CHELSIO_T4:
1250                 fw_data = t4fw_data;
1251                 fw_size = t4fw_size;
1252                 break;
1253         case CHELSIO_T5:
1254                 fw_data = t5fw_data;
1255                 fw_size = t5fw_size;
1256                 break;
1257         case CHELSIO_T6:
1258                 fw_data = t6fw_data;
1259                 fw_size = t6fw_size;
1260                 break;
1261         default:
1262                 cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1263                 kmem_free(card_fw, sizeof(*card_fw));
1264                 return EINVAL;
1265         }
1266 
1267         rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1268                          p->t4_fw_install, state, &reset);
1269 
1270         kmem_free(card_fw, sizeof(*card_fw));
1271 
1272         if (rc != 0) {
1273                 cxgb_printf(sc->dip, CE_WARN,
1274                     "failed to install firmware: %d", rc);
1275                 return (rc);
1276         } else {
1277                 /* refresh */
1278                 (void) t4_check_fw_version(sc);
1279         }
1280 
1281         /* Reset device */
1282         rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1283         if (rc != 0) {
1284                 cxgb_printf(sc->dip, CE_WARN,
1285                     "firmware reset failed: %d.", rc);
1286                 if (rc != ETIMEDOUT && rc != EIO)
1287                         (void) t4_fw_bye(sc, sc->mbox);
1288                 return (rc);
1289         }
1290 
1291         /* Partition adapter resources as specified in the config file. */
1292         if (sc->flags & MASTER_PF) {
1293                 /* Handle default vs special T4 config file */
1294 
1295                 rc = partition_resources(sc);
1296                 if (rc != 0)
1297                         goto err;       /* error message displayed already */
1298         }
1299 
1300         sc->flags |= FW_OK;
1301         return (0);
1302 err:
1303         return (rc);
1304 
1305 }
1306 
1307 static const struct memwin t4_memwin[] = {
1308         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1309         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1310         { MEMWIN2_BASE, MEMWIN2_APERTURE }
1311 };
1312 
1313 static const struct memwin t5_memwin[] = {
1314         { MEMWIN0_BASE, MEMWIN0_APERTURE },
1315         { MEMWIN1_BASE, MEMWIN1_APERTURE },
1316         { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1317 };
1318 
1319 #define FW_PARAM_DEV(param) \
1320         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1321             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1322 #define FW_PARAM_PFVF(param) \
1323         (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1324             V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1325 
1326 /*
1327  * Verify that the memory range specified by the memtype/offset/len pair is
1328  * valid and lies entirely within the memtype specified.  The global address of
1329  * the start of the range is returned in addr.
1330  */
1331 int
1332 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1333         uint32_t *addr)
1334 {
1335         uint32_t em, addr_len, maddr, mlen;
1336 
1337         /* Memory can only be accessed in naturally aligned 4 byte units */
1338         if (off & 3 || len & 3 || len == 0)
1339                 return (EINVAL);
1340 
1341         em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1342         switch (mtype) {
1343                 case MEM_EDC0:
1344                         if (!(em & F_EDRAM0_ENABLE))
1345                                 return (EINVAL);
1346                         addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1347                         maddr = G_EDRAM0_BASE(addr_len) << 20;
1348                         mlen = G_EDRAM0_SIZE(addr_len) << 20;
1349                         break;
1350                 case MEM_EDC1:
1351                         if (!(em & F_EDRAM1_ENABLE))
1352                                 return (EINVAL);
1353                         addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1354                         maddr = G_EDRAM1_BASE(addr_len) << 20;
1355                         mlen = G_EDRAM1_SIZE(addr_len) << 20;
1356                         break;
1357                 case MEM_MC:
1358                         if (!(em & F_EXT_MEM_ENABLE))
1359                                 return (EINVAL);
1360                         addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1361                         maddr = G_EXT_MEM_BASE(addr_len) << 20;
1362                         mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1363                         break;
1364                 case MEM_MC1:
1365                         if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1366                                 return (EINVAL);
1367                         addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1368                         maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1369                         mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1370                         break;
1371                 default:
1372                         return (EINVAL);
1373         }
1374 
1375         if (mlen > 0 && off < mlen && off + len <= mlen) {
1376                 *addr = maddr + off;    /* global address */
1377                 return (0);
1378         }
1379 
1380         return (EFAULT);
1381 }
1382 
1383 void
1384 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1385 {
1386         const struct memwin *mw;
1387 
1388         if (is_t4(sc->params.chip)) {
1389                 mw = &t4_memwin[win];
1390         } else {
1391                 mw = &t5_memwin[win];
1392         }
1393 
1394         if (base != NULL)
1395                 *base = mw->base;
1396         if (aperture != NULL)
1397                 *aperture = mw->aperture;
1398 }
1399 
1400 /*
1401  * Upload configuration file to card's memory.
1402  */
1403 static int
1404 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1405 {
1406         int rc = 0, cflen;
1407         u_int i, n;
1408         uint32_t param, val, addr, mtype, maddr;
1409         uint32_t off, mw_base, mw_aperture;
1410         const uint32_t *cfdata;
1411 
1412         /* Figure out where the firmware wants us to upload it. */
1413         param = FW_PARAM_DEV(CF);
1414         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1415         if (rc != 0) {
1416                 /* Firmwares without config file support will fail this way */
1417                 cxgb_printf(sc->dip, CE_WARN,
1418                     "failed to query config file location: %d.\n", rc);
1419                 return (rc);
1420         }
1421         *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1422         *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1423 
1424         switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1425         case CHELSIO_T4:
1426                 cflen = t4cfg_size & ~3;
1427                 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1428                 cfdata = (const uint32_t *)t4cfg_data;
1429                 break;
1430         case CHELSIO_T5:
1431                 cflen = t5cfg_size & ~3;
1432                 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1433                 cfdata = (const uint32_t *)t5cfg_data;
1434                 break;
1435         case CHELSIO_T6:
1436                 cflen = t6cfg_size & ~3;
1437                 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1438                 cfdata = (const uint32_t *)t6cfg_data;
1439                 break;
1440         default:
1441                 cxgb_printf(sc->dip, CE_WARN,
1442                             "Invalid Adapter detected\n");
1443                 return EINVAL;
1444         }
1445 
1446         if (cflen > FLASH_CFG_MAX_SIZE) {
1447                 cxgb_printf(sc->dip, CE_WARN,
1448                     "config file too long (%d, max allowed is %d).  ",
1449                     cflen, FLASH_CFG_MAX_SIZE);
1450                 return (EFBIG);
1451         }
1452 
1453         rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1454         if (rc != 0) {
1455 
1456                 cxgb_printf(sc->dip, CE_WARN,
1457                     "%s: addr (%d/0x%x) or len %d is not valid: %d.  "
1458                     "Will try to use the config on the card, if any.\n",
1459                     __func__, mtype, maddr, cflen, rc);
1460                 return (EFAULT);
1461         }
1462 
1463         memwin_info(sc, 2, &mw_base, &mw_aperture);
1464         while (cflen) {
1465                 off = position_memwin(sc, 2, addr);
1466                 n = min(cflen, mw_aperture - off);
1467                 for (i = 0; i < n; i += 4)
1468                         t4_write_reg(sc, mw_base + off + i, *cfdata++);
1469                 cflen -= n;
1470                 addr += n;
1471         }
1472 
1473         return (rc);
1474 }
1475 
1476 /*
1477  * Partition chip resources for use between various PFs, VFs, etc.  This is done
1478  * by uploading the firmware configuration file to the adapter and instructing
1479  * the firmware to process it.
1480  */
1481 static int
1482 partition_resources(struct adapter *sc)
1483 {
1484         int rc;
1485         struct fw_caps_config_cmd caps;
1486         uint32_t mtype, maddr, finicsum, cfcsum;
1487 
1488         rc = upload_config_file(sc, &mtype, &maddr);
1489         if (rc != 0) {
1490                 mtype = FW_MEMTYPE_CF_FLASH;
1491                 maddr = t4_flash_cfg_addr(sc);
1492         }
1493 
1494         bzero(&caps, sizeof (caps));
1495         caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1496             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1497         caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1498             V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1499             V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1500         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1501         if (rc != 0) {
1502                 cxgb_printf(sc->dip, CE_WARN,
1503                     "failed to pre-process config file: %d.\n", rc);
1504                 return (rc);
1505         }
1506 
1507         finicsum = ntohl(caps.finicsum);
1508         cfcsum = ntohl(caps.cfcsum);
1509         if (finicsum != cfcsum) {
1510                 cxgb_printf(sc->dip, CE_WARN,
1511                     "WARNING: config file checksum mismatch: %08x %08x\n",
1512                     finicsum, cfcsum);
1513         }
1514         sc->cfcsum = cfcsum;
1515 
1516         /* TODO: Need to configure this correctly */
1517         caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1518         caps.iscsicaps = 0;
1519         caps.rdmacaps = 0;
1520         caps.fcoecaps = 0;
1521         /* TODO: Disable VNIC cap for now */
1522         caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1523 
1524         caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1525             F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1526         caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1527         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1528         if (rc != 0) {
1529                 cxgb_printf(sc->dip, CE_WARN,
1530                     "failed to process config file: %d.\n", rc);
1531                 return (rc);
1532         }
1533 
1534         return (0);
1535 }
1536 
1537 /*
1538  * Tweak configuration based on module parameters, etc.  Most of these have
1539  * defaults assigned to them by Firmware Configuration Files (if we're using
1540  * them) but need to be explicitly set if we're using hard-coded
1541  * initialization.  But even in the case of using Firmware Configuration
1542  * Files, we'd like to expose the ability to change these via module
1543  * parameters so these are essentially common tweaks/settings for
1544  * Configuration Files and hard-coded initialization ...
1545  */
1546 static int
1547 adap__pre_init_tweaks(struct adapter *sc)
1548 {
1549         int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1550 
1551         /*
1552          * Fix up various Host-Dependent Parameters like Page Size, Cache
1553          * Line Size, etc.  The firmware default is for a 4KB Page Size and
1554          * 64B Cache Line Size ...
1555          */
1556         (void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV);
1557 
1558         t4_set_reg_field(sc, A_SGE_CONTROL,
1559                          V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset));
1560 
1561         return 0;
1562 }
1563 /*
1564  * Retrieve parameters that are needed (or nice to have) prior to calling
1565  * t4_sge_init and t4_fw_initialize.
1566  */
1567 static int
1568 get_params__pre_init(struct adapter *sc)
1569 {
1570         int rc;
1571         uint32_t param[2], val[2];
1572         struct fw_devlog_cmd cmd;
1573         struct devlog_params *dlog = &sc->params.devlog;
1574 
1575         /*
1576          * Grab the raw VPD parameters.
1577          */
1578         rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1579         if (rc != 0) {
1580                 cxgb_printf(sc->dip, CE_WARN,
1581                     "failed to query VPD parameters (pre_init): %d.\n", rc);
1582                 return (rc);
1583         }
1584 
1585         param[0] = FW_PARAM_DEV(PORTVEC);
1586         param[1] = FW_PARAM_DEV(CCLK);
1587         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1588         if (rc != 0) {
1589                 cxgb_printf(sc->dip, CE_WARN,
1590                     "failed to query parameters (pre_init): %d.\n", rc);
1591                 return (rc);
1592         }
1593 
1594         sc->params.portvec = val[0];
1595         sc->params.nports = 0;
1596         while (val[0]) {
1597                 sc->params.nports++;
1598                 val[0] &= val[0] - 1;
1599         }
1600 
1601         sc->params.vpd.cclk = val[1];
1602 
1603         /* Read device log parameters. */
1604         bzero(&cmd, sizeof (cmd));
1605         cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1606             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1607         cmd.retval_len16 = htonl(FW_LEN16(cmd));
1608         rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1609         if (rc != 0) {
1610                 cxgb_printf(sc->dip, CE_WARN,
1611                     "failed to get devlog parameters: %d.\n", rc);
1612                 bzero(dlog, sizeof (*dlog));
1613                 rc = 0; /* devlog isn't critical for device operation */
1614         } else {
1615                 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1616                 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1617                 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1618                 dlog->size = ntohl(cmd.memsize_devlog);
1619         }
1620 
1621         return (rc);
1622 }
1623 
1624 /*
1625  * Retrieve various parameters that are of interest to the driver.  The device
1626  * has been initialized by the firmware at this point.
1627  */
1628 static int
1629 get_params__post_init(struct adapter *sc)
1630 {
1631         int rc;
1632         uint32_t param[7], val[7];
1633         struct fw_caps_config_cmd caps;
1634 
1635         param[0] = FW_PARAM_PFVF(IQFLINT_START);
1636         param[1] = FW_PARAM_PFVF(EQ_START);
1637         param[2] = FW_PARAM_PFVF(FILTER_START);
1638         param[3] = FW_PARAM_PFVF(FILTER_END);
1639         param[4] = FW_PARAM_PFVF(L2T_START);
1640         param[5] = FW_PARAM_PFVF(L2T_END);
1641         rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1642         if (rc != 0) {
1643                 cxgb_printf(sc->dip, CE_WARN,
1644                     "failed to query parameters (post_init): %d.\n", rc);
1645                 return (rc);
1646         }
1647 
1648         /* LINTED: E_ASSIGN_NARROW_CONV */
1649         sc->sge.iq_start = val[0];
1650         sc->sge.eq_start = val[1];
1651         sc->tids.ftid_base = val[2];
1652         sc->tids.nftids = val[3] - val[2] + 1;
1653         sc->vres.l2t.start = val[4];
1654         sc->vres.l2t.size = val[5] - val[4] + 1;
1655 
1656         /* get capabilites */
1657         bzero(&caps, sizeof (caps));
1658         caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1659             F_FW_CMD_REQUEST | F_FW_CMD_READ);
1660         caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1661         rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1662         if (rc != 0) {
1663                 cxgb_printf(sc->dip, CE_WARN,
1664                     "failed to get card capabilities: %d.\n", rc);
1665                 return (rc);
1666         }
1667 
1668         if (caps.toecaps != 0) {
1669                 /* query offload-related parameters */
1670                 param[0] = FW_PARAM_DEV(NTID);
1671                 param[1] = FW_PARAM_PFVF(SERVER_START);
1672                 param[2] = FW_PARAM_PFVF(SERVER_END);
1673                 param[3] = FW_PARAM_PFVF(TDDP_START);
1674                 param[4] = FW_PARAM_PFVF(TDDP_END);
1675                 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1676                 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1677                 if (rc != 0) {
1678                         cxgb_printf(sc->dip, CE_WARN,
1679                             "failed to query TOE parameters: %d.\n", rc);
1680                         return (rc);
1681                 }
1682                 sc->tids.ntids = val[0];
1683                 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1684                 sc->tids.stid_base = val[1];
1685                 sc->tids.nstids = val[2] - val[1] + 1;
1686                 sc->vres.ddp.start = val[3];
1687                 sc->vres.ddp.size = val[4] - val[3] + 1;
1688                 sc->params.ofldq_wr_cred = val[5];
1689                 sc->params.offload = 1;
1690         }
1691 
1692         /* These are finalized by FW initialization, load their values now */
1693         val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1694         sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1695         sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1696         t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1697 
1698         return (rc);
1699 }
1700 
1701 static int
1702 set_params__post_init(struct adapter *sc)
1703 {
1704         uint32_t param, val;
1705 
1706         /* ask for encapsulated CPLs */
1707         param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1708         val = 1;
1709         (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, &param, &val);
1710 
1711         return (0);
1712 }
1713 
1714 /* TODO: verify */
1715 static void
1716 setup_memwin(struct adapter *sc)
1717 {
1718         pci_regspec_t *data;
1719         int rc;
1720         uint_t n;
1721         uintptr_t bar0;
1722         uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1723         uintptr_t mem_win2_aperture;
1724 
1725         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1726             DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1727         if (rc != DDI_SUCCESS) {
1728                 cxgb_printf(sc->dip, CE_WARN,
1729                     "failed to lookup \"assigned-addresses\" property: %d", rc);
1730                 return;
1731         }
1732         n /= sizeof (*data);
1733 
1734         bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1735         ddi_prop_free(data);
1736 
1737         if (is_t4(sc->params.chip)) {
1738                 mem_win0_base = bar0 + MEMWIN0_BASE;
1739                 mem_win1_base = bar0 + MEMWIN1_BASE;
1740                 mem_win2_base = bar0 + MEMWIN2_BASE;
1741                 mem_win2_aperture = MEMWIN2_APERTURE;
1742         } else {
1743                 /* For T5, only relative offset inside the PCIe BAR is passed */
1744                 mem_win0_base = MEMWIN0_BASE;
1745                 mem_win1_base = MEMWIN1_BASE;
1746                 mem_win2_base = MEMWIN2_BASE_T5;
1747                 mem_win2_aperture = MEMWIN2_APERTURE_T5;
1748         }
1749 
1750         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1751             mem_win0_base | V_BIR(0) |
1752             V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1753 
1754         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1755             mem_win1_base | V_BIR(0) |
1756             V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1757 
1758         t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1759             mem_win2_base | V_BIR(0) |
1760             V_WINDOW(ilog2(mem_win2_aperture) - 10));
1761 
1762         /* flush */
1763         (void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1764 }
1765 
1766 /*
1767  * Positions the memory window such that it can be used to access the specified
1768  * address in the chip's address space.  The return value is the offset of addr
1769  * from the start of the window.
1770  */
1771 uint32_t
1772 position_memwin(struct adapter *sc, int n, uint32_t addr)
1773 {
1774         uint32_t start, pf;
1775         uint32_t reg;
1776 
1777         if (addr & 3) {
1778                 cxgb_printf(sc->dip, CE_WARN,
1779                     "addr (0x%x) is not at a 4B boundary.\n", addr);
1780                 return (EFAULT);
1781         }
1782 
1783         if (is_t4(sc->params.chip)) {
1784                 pf = 0;
1785                 start = addr & ~0xf;    /* start must be 16B aligned */
1786         } else {
1787                 pf = V_PFNUM(sc->pf);
1788                 start = addr & ~0x7f;   /* start must be 128B aligned */
1789         }
1790         reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1791 
1792         t4_write_reg(sc, reg, start | pf);
1793         (void) t4_read_reg(sc, reg);
1794 
1795         return (addr - start);
1796 }
1797 
1798 
1799 /*
1800  * Reads the named property and fills up the "data" array (which has at least
1801  * "count" elements).  We first try and lookup the property for our dev_t and
1802  * then retry with DDI_DEV_T_ANY if it's not found.
1803  *
1804  * Returns non-zero if the property was found and "data" has been updated.
1805  */
1806 static int
1807 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1808 {
1809         dev_info_t *dip = sc->dip;
1810         dev_t dev = sc->dev;
1811         int rc, *d;
1812         uint_t i, n;
1813 
1814         rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1815             name, &d, &n);
1816         if (rc == DDI_PROP_SUCCESS)
1817                 goto found;
1818 
1819         if (rc != DDI_PROP_NOT_FOUND) {
1820                 cxgb_printf(dip, CE_WARN,
1821                     "failed to lookup property %s for minor %d: %d.",
1822                     name, getminor(dev), rc);
1823                 return (0);
1824         }
1825 
1826         rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1827             name, &d, &n);
1828         if (rc == DDI_PROP_SUCCESS)
1829                 goto found;
1830 
1831         if (rc != DDI_PROP_NOT_FOUND) {
1832                 cxgb_printf(dip, CE_WARN,
1833                     "failed to lookup property %s: %d.", name, rc);
1834                 return (0);
1835         }
1836 
1837         return (0);
1838 
1839 found:
1840         if (n > count) {
1841                 cxgb_printf(dip, CE_NOTE,
1842                     "property %s has too many elements (%d), ignoring extras",
1843                     name, n);
1844         }
1845 
1846         for (i = 0; i < n && i < count; i++)
1847                 data[i] = d[i];
1848         ddi_prop_free(d);
1849 
1850         return (1);
1851 }
1852 
1853 static int
1854 prop_lookup_int(struct adapter *sc, char *name, int defval)
1855 {
1856         int rc;
1857 
1858         rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1859         if (rc != -1)
1860                 return (rc);
1861 
1862         return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1863             name, defval));
1864 }
1865 
1866 static int
1867 init_driver_props(struct adapter *sc, struct driver_properties *p)
1868 {
1869         dev_t dev = sc->dev;
1870         dev_info_t *dip = sc->dip;
1871         int i, *data;
1872         uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1873         uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1874 
1875         /*
1876          * Holdoff timer
1877          */
1878         data = &p->timer_val[0];
1879         for (i = 0; i < SGE_NTIMERS; i++)
1880                 data[i] = tmr[i];
1881         (void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1882             SGE_NTIMERS);
1883         for (i = 0; i < SGE_NTIMERS; i++) {
1884                 int limit = 200U;
1885                 if (data[i] > limit) {
1886                         cxgb_printf(dip, CE_WARN,
1887                             "holdoff timer %d is too high (%d), lowered to %d.",
1888                             i, data[i], limit);
1889                         data[i] = limit;
1890                 }
1891         }
1892         (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1893             data, SGE_NTIMERS);
1894 
1895         /*
1896          * Holdoff packet counter
1897          */
1898         data = &p->counter_val[0];
1899         for (i = 0; i < SGE_NCOUNTERS; i++)
1900                 data[i] = cnt[i];
1901         (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1902             SGE_NCOUNTERS);
1903         for (i = 0; i < SGE_NCOUNTERS; i++) {
1904                 int limit = M_THRESHOLD_0;
1905                 if (data[i] > limit) {
1906                         cxgb_printf(dip, CE_WARN,
1907                             "holdoff pkt-counter %d is too high (%d), "
1908                             "lowered to %d.", i, data[i], limit);
1909                         data[i] = limit;
1910                 }
1911         }
1912         (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1913             data, SGE_NCOUNTERS);
1914 
1915         /*
1916          * Maximum # of tx and rx queues to use for each
1917          * 100G, 40G, 25G, 10G and 1G port.
1918          */
1919         p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1920         (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1921             p->max_ntxq_10g);
1922 
1923         p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1924         (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1925             p->max_nrxq_10g);
1926 
1927         p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1928         (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1929             p->max_ntxq_1g);
1930 
1931         p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1932         (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1933             p->max_nrxq_1g);
1934 
1935 #ifdef TCP_OFFLOAD_ENABLE
1936         p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8);
1937         (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1938             p->max_nofldtxq_10g);
1939 
1940         p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2);
1941         (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1942             p->max_nofldrxq_10g);
1943 
1944         p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2);
1945         (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1946             p->max_nofldtxq_1g);
1947 
1948         p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1);
1949         (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1950             p->max_nofldrxq_1g);
1951 #endif
1952 
1953         /*
1954          * Holdoff parameters for 10G and 1G ports.
1955          */
1956         p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1957         (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1958             p->tmr_idx_10g);
1959 
1960         p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1961         (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1962             p->pktc_idx_10g);
1963 
1964         p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1965         (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1966             p->tmr_idx_1g);
1967 
1968         p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
1969         (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
1970             p->pktc_idx_1g);
1971 
1972         /*
1973          * Size (number of entries) of each tx and rx queue.
1974          */
1975         i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
1976         p->qsize_txq = max(i, 128);
1977         if (p->qsize_txq != i) {
1978                 cxgb_printf(dip, CE_WARN,
1979                     "using %d instead of %d as the tx queue size",
1980                     p->qsize_txq, i);
1981         }
1982         (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
1983 
1984         i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
1985         p->qsize_rxq = max(i, 128);
1986         while (p->qsize_rxq & 7)
1987                 p->qsize_rxq--;
1988         if (p->qsize_rxq != i) {
1989                 cxgb_printf(dip, CE_WARN,
1990                     "using %d instead of %d as the rx queue size",
1991                     p->qsize_rxq, i);
1992         }
1993         (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
1994 
1995         /*
1996          * Interrupt types allowed.
1997          * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively.  See sys/ddi_intr.h
1998          */
1999         p->intr_types = prop_lookup_int(sc, "interrupt-types",
2000             DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
2001         (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
2002 
2003         /*
2004          * Forwarded interrupt queues.  Create this property to force the driver
2005          * to use forwarded interrupt queues.
2006          */
2007         if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS,
2008             "interrupt-forwarding") != 0 ||
2009             ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2010             "interrupt-forwarding") != 0) {
2011                 UNIMPLEMENTED();
2012                 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP,
2013                     "interrupt-forwarding", NULL, 0);
2014         }
2015 
2016         /*
2017          * Write combining
2018          * 0 to disable, 1 to enable
2019          */
2020         p->wc = prop_lookup_int(sc, "write-combine", 1);
2021         cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
2022         if (p->wc != 0 && p->wc != 1) {
2023                 cxgb_printf(dip, CE_WARN,
2024                     "write-combine: using 1 instead of %d", p->wc);
2025                 p->wc = 1;
2026         }
2027         (void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
2028 
2029         p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
2030         if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
2031                 p->t4_fw_install = 1;
2032         (void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
2033 
2034         /* Multiple Rings */
2035         p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
2036         if (p->multi_rings != 0 && p->multi_rings != 1) {
2037                 cxgb_printf(dip, CE_NOTE,
2038                            "multi-rings: using value 1 instead of %d", p->multi_rings);
2039                 p->multi_rings = 1;
2040         }
2041 
2042         (void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
2043 
2044         return (0);
2045 }
2046 
2047 static int
2048 remove_extra_props(struct adapter *sc, int n10g, int n1g)
2049 {
2050         if (n10g == 0) {
2051                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
2052                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
2053                 (void) ddi_prop_remove(sc->dev, sc->dip,
2054                     "holdoff-timer-idx-10G");
2055                 (void) ddi_prop_remove(sc->dev, sc->dip,
2056                     "holdoff-pktc-idx-10G");
2057         }
2058 
2059         if (n1g == 0) {
2060                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
2061                 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
2062                 (void) ddi_prop_remove(sc->dev, sc->dip,
2063                     "holdoff-timer-idx-1G");
2064                 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
2065         }
2066 
2067         return (0);
2068 }
2069 
2070 static int
2071 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
2072     struct intrs_and_queues *iaq)
2073 {
2074         struct driver_properties *p = &sc->props;
2075         int rc, itype, itypes, navail, nc, nrxq10g, nrxq1g, n;
2076         int nofldrxq10g = 0, nofldrxq1g = 0;
2077 
2078         bzero(iaq, sizeof (*iaq));
2079         nc = ncpus;     /* our snapshot of the number of CPUs */
2080         iaq->ntxq10g = min(nc, p->max_ntxq_10g);
2081         iaq->ntxq1g = min(nc, p->max_ntxq_1g);
2082         iaq->nrxq10g = nrxq10g = min(nc, p->max_nrxq_10g);
2083         iaq->nrxq1g = nrxq1g = min(nc, p->max_nrxq_1g);
2084 #ifdef TCP_OFFLOAD_ENABLE
2085         iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g);
2086         iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g);
2087         iaq->nofldrxq10g = nofldrxq10g = min(nc, p->max_nofldrxq_10g);
2088         iaq->nofldrxq1g = nofldrxq1g = min(nc, p->max_nofldrxq_1g);
2089 #endif
2090 
2091         rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2092         if (rc != DDI_SUCCESS) {
2093                 cxgb_printf(sc->dip, CE_WARN,
2094                     "failed to determine supported interrupt types: %d", rc);
2095                 return (rc);
2096         }
2097 
2098         for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2099                 ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2100                     itype == DDI_INTR_TYPE_MSI ||
2101                     itype == DDI_INTR_TYPE_FIXED);
2102 
2103                 if ((itype & itypes & p->intr_types) == 0)
2104                         continue;       /* not supported or not allowed */
2105 
2106                 navail = 0;
2107                 rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2108                 if (rc != DDI_SUCCESS || navail == 0) {
2109                         cxgb_printf(sc->dip, CE_WARN,
2110                             "failed to get # of interrupts for type %d: %d",
2111                             itype, rc);
2112                         continue;       /* carry on */
2113                 }
2114 
2115                 iaq->intr_type = itype;
2116                 if (navail == 0)
2117                         continue;
2118 
2119                 /*
2120                  * Best option: an interrupt vector for errors, one for the
2121                  * firmware event queue, and one each for each rxq (NIC as well
2122                  * as offload).
2123                  */
2124                 iaq->nirq = T4_EXTRA_INTR;
2125                 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2126                 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2127 
2128                 if (iaq->nirq <= navail &&
2129                     (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2130                         iaq->intr_fwd = 0;
2131                         goto allocate;
2132                 }
2133 
2134                 /*
2135                  * Second best option: an interrupt vector for errors, one for
2136                  * the firmware event queue, and one each for either NIC or
2137                  * offload rxq's.
2138                  */
2139                 iaq->nirq = T4_EXTRA_INTR;
2140                 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
2141                 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
2142                 if (iaq->nirq <= navail &&
2143                     (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2144                         iaq->intr_fwd = 1;
2145                         goto allocate;
2146                 }
2147 
2148                 /*
2149                  * Next best option: an interrupt vector for errors, one for the
2150                  * firmware event queue, and at least one per port.  At this
2151                  * point we know we'll have to downsize nrxq or nofldrxq to fit
2152                  * what's available to us.
2153                  */
2154                 iaq->nirq = T4_EXTRA_INTR;
2155                 iaq->nirq += n10g + n1g;
2156                 if (iaq->nirq <= navail) {
2157                         int leftover = navail - iaq->nirq;
2158 
2159                         if (n10g > 0) {
2160                                 int target = max(nrxq10g, nofldrxq10g);
2161 
2162                                 n = 1;
2163                                 while (n < target && leftover >= n10g) {
2164                                         leftover -= n10g;
2165                                         iaq->nirq += n10g;
2166                                         n++;
2167                                 }
2168                                 iaq->nrxq10g = min(n, nrxq10g);
2169 #ifdef TCP_OFFLOAD_ENABLE
2170                                 iaq->nofldrxq10g = min(n, nofldrxq10g);
2171 #endif
2172                         }
2173 
2174                         if (n1g > 0) {
2175                                 int target = max(nrxq1g, nofldrxq1g);
2176 
2177                                 n = 1;
2178                                 while (n < target && leftover >= n1g) {
2179                                         leftover -= n1g;
2180                                         iaq->nirq += n1g;
2181                                         n++;
2182                                 }
2183                                 iaq->nrxq1g = min(n, nrxq1g);
2184 #ifdef TCP_OFFLOAD_ENABLE
2185                                 iaq->nofldrxq1g = min(n, nofldrxq1g);
2186 #endif
2187                         }
2188 
2189                         /* We have arrived at a minimum value required to enable
2190                          * per queue irq(either NIC or offload). Thus for non-
2191                          * offload case, we will get a vector per queue, while
2192                          * offload case, we will get a vector per offload/NIC q.
2193                          * Hence enable Interrupt forwarding only for offload
2194                          * case.
2195                          */
2196 #ifdef TCP_OFFLOAD_ENABLE
2197                         if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) {
2198                                 iaq->intr_fwd = 1;
2199 #else
2200                         if (itype != DDI_INTR_TYPE_MSI) {
2201 #endif
2202                                 goto allocate;
2203                         }
2204                 }
2205 
2206                 /*
2207                  * Least desirable option: one interrupt vector for everything.
2208                  */
2209                 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2210 #ifdef TCP_OFFLOAD_ENABLE
2211                 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2212 #endif
2213                 iaq->intr_fwd = 1;
2214 
2215 allocate:
2216                 return (0);
2217         }
2218 
2219         cxgb_printf(sc->dip, CE_WARN,
2220             "failed to find a usable interrupt type.  supported=%d, allowed=%d",
2221             itypes, p->intr_types);
2222         return (DDI_FAILURE);
2223 }
2224 
2225 static int
2226 add_child_node(struct adapter *sc, int idx)
2227 {
2228         int rc;
2229         struct port_info *pi;
2230 
2231         if (idx < 0 || idx >= sc->params.nports)
2232                 return (EINVAL);
2233 
2234         pi = sc->port[idx];
2235         if (pi == NULL)
2236                 return (ENODEV);        /* t4_port_init failed earlier */
2237 
2238         PORT_LOCK(pi);
2239         if (pi->dip != NULL) {
2240                 rc = 0;         /* EEXIST really, but then bus_config fails */
2241                 goto done;
2242         }
2243 
2244         rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2245         if (rc != DDI_SUCCESS || pi->dip == NULL) {
2246                 rc = ENOMEM;
2247                 goto done;
2248         }
2249 
2250         (void) ddi_set_parent_data(pi->dip, pi);
2251         (void) ndi_devi_bind_driver(pi->dip, 0);
2252         rc = 0;
2253 done:
2254         PORT_UNLOCK(pi);
2255         return (rc);
2256 }
2257 
2258 static int
2259 remove_child_node(struct adapter *sc, int idx)
2260 {
2261         int rc;
2262         struct port_info *pi;
2263 
2264         if (idx < 0 || idx >= sc->params.nports)
2265                 return (EINVAL);
2266 
2267         pi = sc->port[idx];
2268         if (pi == NULL)
2269                 return (ENODEV);
2270 
2271         PORT_LOCK(pi);
2272         if (pi->dip == NULL) {
2273                 rc = ENODEV;
2274                 goto done;
2275         }
2276 
2277         rc = ndi_devi_free(pi->dip);
2278         if (rc == 0)
2279                 pi->dip = NULL;
2280 done:
2281         PORT_UNLOCK(pi);
2282         return (rc);
2283 }
2284 
2285 #define KS_UINIT(x)     kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2286 #define KS_CINIT(x)     kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2287 #define KS_U_SET(x, y)  kstatp->x.value.ul = (y)
2288 #define KS_C_SET(x, ...)        \
2289                         (void) snprintf(kstatp->x.value.c, 16,  __VA_ARGS__)
2290 
2291 /*
2292  * t4nex:X:config
2293  */
2294 struct t4_kstats {
2295         kstat_named_t chip_ver;
2296         kstat_named_t fw_vers;
2297         kstat_named_t tp_vers;
2298         kstat_named_t driver_version;
2299         kstat_named_t serial_number;
2300         kstat_named_t ec_level;
2301         kstat_named_t id;
2302         kstat_named_t bus_type;
2303         kstat_named_t bus_width;
2304         kstat_named_t bus_speed;
2305         kstat_named_t core_clock;
2306         kstat_named_t port_cnt;
2307         kstat_named_t port_type;
2308         kstat_named_t pci_vendor_id;
2309         kstat_named_t pci_device_id;
2310 };
2311 static kstat_t *
2312 setup_kstats(struct adapter *sc)
2313 {
2314         kstat_t *ksp;
2315         struct t4_kstats *kstatp;
2316         int ndata;
2317         struct pci_params *p = &sc->params.pci;
2318         struct vpd_params *v = &sc->params.vpd;
2319         uint16_t pci_vendor, pci_device;
2320 
2321         ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2322 
2323         ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2324             "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2325         if (ksp == NULL) {
2326                 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2327                 return (NULL);
2328         }
2329 
2330         kstatp = (struct t4_kstats *)ksp->ks_data;
2331 
2332         KS_UINIT(chip_ver);
2333         KS_CINIT(fw_vers);
2334         KS_CINIT(tp_vers);
2335         KS_CINIT(driver_version);
2336         KS_CINIT(serial_number);
2337         KS_CINIT(ec_level);
2338         KS_CINIT(id);
2339         KS_CINIT(bus_type);
2340         KS_CINIT(bus_width);
2341         KS_CINIT(bus_speed);
2342         KS_UINIT(core_clock);
2343         KS_UINIT(port_cnt);
2344         KS_CINIT(port_type);
2345         KS_CINIT(pci_vendor_id);
2346         KS_CINIT(pci_device_id);
2347 
2348         KS_U_SET(chip_ver, sc->params.chip);
2349         KS_C_SET(fw_vers, "%d.%d.%d.%d",
2350             G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2351             G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2352             G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2353             G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2354         KS_C_SET(tp_vers, "%d.%d.%d.%d",
2355             G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2356             G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2357             G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2358             G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2359         KS_C_SET(driver_version, DRV_VERSION);
2360         KS_C_SET(serial_number, "%s", v->sn);
2361         KS_C_SET(ec_level, "%s", v->ec);
2362         KS_C_SET(id, "%s", v->id);
2363         KS_C_SET(bus_type, "pci-express");
2364         KS_C_SET(bus_width, "x%d lanes", p->width);
2365         KS_C_SET(bus_speed, "%d", p->speed);
2366         KS_U_SET(core_clock, v->cclk);
2367         KS_U_SET(port_cnt, sc->params.nports);
2368 
2369         t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2370         KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2371 
2372         t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2373         KS_C_SET(pci_device_id, "0x%x", pci_device);
2374 
2375         KS_C_SET(port_type, "%s/%s/%s/%s",
2376                  print_port_speed(sc->port[0]),
2377                  print_port_speed(sc->port[1]),
2378                  print_port_speed(sc->port[2]),
2379                  print_port_speed(sc->port[3]));
2380 
2381         /* Do NOT set ksp->ks_update.  These kstats do not change. */
2382 
2383         /* Install the kstat */
2384         ksp->ks_private = (void *)sc;
2385         kstat_install(ksp);
2386 
2387         return (ksp);
2388 }
2389 
2390 /*
2391  * t4nex:X:stat
2392  */
2393 struct t4_wc_kstats {
2394         kstat_named_t write_coal_success;
2395         kstat_named_t write_coal_failure;
2396 };
2397 static kstat_t *
2398 setup_wc_kstats(struct adapter *sc)
2399 {
2400         kstat_t *ksp;
2401         struct t4_wc_kstats *kstatp;
2402         int ndata;
2403 
2404         ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t);
2405         ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2406             "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2407         if (ksp == NULL) {
2408                 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2409                 return (NULL);
2410         }
2411 
2412         kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2413 
2414         KS_UINIT(write_coal_success);
2415         KS_UINIT(write_coal_failure);
2416 
2417         ksp->ks_update = update_wc_kstats;
2418         /* Install the kstat */
2419         ksp->ks_private = (void *)sc;
2420         kstat_install(ksp);
2421 
2422         return (ksp);
2423 }
2424 
2425 static int
2426 update_wc_kstats(kstat_t *ksp, int rw)
2427 {
2428         struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2429         struct adapter *sc = ksp->ks_private;
2430         uint32_t wc_total, wc_success, wc_failure;
2431 
2432         if (rw == KSTAT_WRITE)
2433                 return (0);
2434 
2435         if (is_t5(sc->params.chip)) {
2436                 wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2437                 wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2438                 wc_success = wc_total - wc_failure;
2439         } else {
2440                 wc_success = 0;
2441                 wc_failure = 0;
2442         }
2443 
2444         KS_U_SET(write_coal_success, wc_success);
2445         KS_U_SET(write_coal_failure, wc_failure);
2446 
2447         return (0);
2448 }
2449 
2450 int
2451 adapter_full_init(struct adapter *sc)
2452 {
2453         int i, rc = 0;
2454 
2455         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2456 
2457         rc = t4_setup_adapter_queues(sc);
2458         if (rc != 0)
2459                 goto done;
2460 
2461         if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2462                 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2463         else {
2464                 for (i = 0; i < sc->intr_count; i++)
2465                         (void) ddi_intr_enable(sc->intr_handle[i]);
2466         }
2467         t4_intr_enable(sc);
2468         sc->flags |= FULL_INIT_DONE;
2469 
2470 #ifdef TCP_OFFLOAD_ENABLE
2471         /* TODO: wrong place to enable TOE capability */
2472         if (is_offload(sc) != 0) {
2473                 for_each_port(sc, i) {
2474                         struct port_info *pi = sc->port[i];
2475                         rc = toe_capability(pi, 1);
2476                         if (rc != 0) {
2477                                 cxgb_printf(pi->dip, CE_WARN,
2478                                     "Failed to activate toe capability: %d",
2479                                     rc);
2480                                 rc = 0;         /* not a fatal error */
2481                         }
2482                 }
2483         }
2484 #endif
2485 
2486 done:
2487         if (rc != 0)
2488                 (void) adapter_full_uninit(sc);
2489 
2490         return (rc);
2491 }
2492 
2493 int
2494 adapter_full_uninit(struct adapter *sc)
2495 {
2496         int i, rc = 0;
2497 
2498         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2499 
2500         if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2501                 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2502         else {
2503                 for (i = 0; i < sc->intr_count; i++)
2504                         (void) ddi_intr_disable(sc->intr_handle[i]);
2505         }
2506 
2507         rc = t4_teardown_adapter_queues(sc);
2508         if (rc != 0)
2509                 return (rc);
2510 
2511         sc->flags &= ~FULL_INIT_DONE;
2512 
2513         return (0);
2514 }
2515 
2516 int
2517 port_full_init(struct port_info *pi)
2518 {
2519         struct adapter *sc = pi->adapter;
2520         uint16_t *rss;
2521         struct sge_rxq *rxq;
2522         int rc, i;
2523 
2524         ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2525         ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2526 
2527         /*
2528          * Allocate tx/rx/fl queues for this port.
2529          */
2530         rc = t4_setup_port_queues(pi);
2531         if (rc != 0)
2532                 goto done;      /* error message displayed already */
2533 
2534         /*
2535          * Setup RSS for this port.
2536          */
2537         rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2538         for_each_rxq(pi, i, rxq) {
2539                 rss[i] = rxq->iq.abs_id;
2540         }
2541         rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2542             pi->rss_size, rss, pi->nrxq);
2543         kmem_free(rss, pi->nrxq * sizeof (*rss));
2544         if (rc != 0) {
2545                 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2546                 goto done;
2547         }
2548 
2549         pi->flags |= PORT_INIT_DONE;
2550 done:
2551         if (rc != 0)
2552                 (void) port_full_uninit(pi);
2553 
2554         return (rc);
2555 }
2556 
2557 /*
2558  * Idempotent.
2559  */
2560 int
2561 port_full_uninit(struct port_info *pi)
2562 {
2563 
2564         ASSERT(pi->flags & PORT_INIT_DONE);
2565 
2566         (void) t4_teardown_port_queues(pi);
2567         pi->flags &= ~PORT_INIT_DONE;
2568 
2569         return (0);
2570 }
2571 
2572 void
2573 enable_port_queues(struct port_info *pi)
2574 {
2575         struct adapter *sc = pi->adapter;
2576         int i;
2577         struct sge_iq *iq;
2578         struct sge_rxq *rxq;
2579 #ifdef TCP_OFFLOAD_ENABLE
2580         struct sge_ofld_rxq *ofld_rxq;
2581 #endif
2582 
2583         ASSERT(pi->flags & PORT_INIT_DONE);
2584 
2585         /*
2586          * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2587          * back in disable_port_queues will be processed now, after an unbounded
2588          * delay.  This can't be good.
2589          */
2590 
2591 #ifdef TCP_OFFLOAD_ENABLE
2592         for_each_ofld_rxq(pi, i, ofld_rxq) {
2593                 iq = &ofld_rxq->iq;
2594                 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2595                     IQS_DISABLED)
2596                         panic("%s: iq %p wasn't disabled", __func__,
2597                             (void *)iq);
2598                 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2599                     V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2600         }
2601 #endif
2602 
2603         for_each_rxq(pi, i, rxq) {
2604                 iq = &rxq->iq;
2605                 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2606                     IQS_DISABLED)
2607                         panic("%s: iq %p wasn't disabled", __func__,
2608                             (void *) iq);
2609                 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2610                     V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2611         }
2612 }
2613 
2614 void
2615 disable_port_queues(struct port_info *pi)
2616 {
2617         int i;
2618         struct adapter *sc = pi->adapter;
2619         struct sge_rxq *rxq;
2620 #ifdef TCP_OFFLOAD_ENABLE
2621         struct sge_ofld_rxq *ofld_rxq;
2622 #endif
2623 
2624         ASSERT(pi->flags & PORT_INIT_DONE);
2625 
2626         /*
2627          * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2628          */
2629 
2630 #ifdef TCP_OFFLOAD_ENABLE
2631         for_each_ofld_rxq(pi, i, ofld_rxq) {
2632                 while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE,
2633                     IQS_DISABLED) != IQS_IDLE)
2634                         msleep(1);
2635         }
2636 #endif
2637 
2638         for_each_rxq(pi, i, rxq) {
2639                 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2640                     IQS_DISABLED) != IQS_IDLE)
2641                         msleep(1);
2642         }
2643 
2644         mutex_enter(&sc->sfl_lock);
2645 #ifdef TCP_OFFLOAD_ENABLE
2646         for_each_ofld_rxq(pi, i, ofld_rxq)
2647             ofld_rxq->fl.flags |= FL_DOOMED;
2648 #endif
2649         for_each_rxq(pi, i, rxq)
2650             rxq->fl.flags |= FL_DOOMED;
2651         mutex_exit(&sc->sfl_lock);
2652         /* TODO: need to wait for all fl's to be removed from sc->sfl */
2653 }
2654 
2655 void
2656 t4_fatal_err(struct adapter *sc)
2657 {
2658         t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2659         t4_intr_disable(sc);
2660         cxgb_printf(sc->dip, CE_WARN,
2661             "encountered fatal error, adapter stopped.");
2662 }
2663 
2664 int
2665 t4_os_find_pci_capability(struct adapter *sc, int cap)
2666 {
2667         uint16_t stat;
2668         uint8_t cap_ptr, cap_id;
2669 
2670         t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2671         if ((stat & PCI_STAT_CAP) == 0)
2672                 return (0); /* does not implement capabilities */
2673 
2674         t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2675         while (cap_ptr) {
2676                 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2677                 if (cap_id == cap)
2678                         return (cap_ptr); /* found */
2679                 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2680         }
2681 
2682         return (0); /* not found */
2683 }
2684 
2685 void
2686 t4_os_portmod_changed(const struct adapter *sc, int idx)
2687 {
2688         static const char *mod_str[] = {
2689                 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2690         };
2691         const struct port_info *pi = sc->port[idx];
2692 
2693         if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2694                 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2695         else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2696                 cxgb_printf(pi->dip, CE_NOTE,
2697                     "unknown transceiver inserted.\n");
2698         else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2699                 cxgb_printf(pi->dip, CE_NOTE,
2700                     "unsupported transceiver inserted.\n");
2701         else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2702                 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2703                     mod_str[pi->mod_type]);
2704         else
2705                 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2706                     pi->mod_type);
2707 }
2708 
2709 /* ARGSUSED */
2710 static int
2711 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
2712 {
2713         if (m != NULL)
2714                 freemsg(m);
2715         return (0);
2716 }
2717 
2718 int
2719 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2720 {
2721         uint_t *loc, new;
2722 
2723         if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2724                 return (EINVAL);
2725 
2726         new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
2727         loc = (uint_t *)&sc->cpl_handler[opcode];
2728         (void) atomic_swap_uint(loc, new);
2729 
2730         return (0);
2731 }
2732 
2733 static int
2734 fw_msg_not_handled(struct adapter *sc, const __be64 *data)
2735 {
2736         struct cpl_fw6_msg *cpl;
2737 
2738         cpl = __containerof((void *)data, struct cpl_fw6_msg, data);
2739 
2740         cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type);
2741         return (0);
2742 }
2743 
2744 int
2745 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
2746 {
2747         fw_msg_handler_t *loc, new;
2748 
2749         if (type >= ARRAY_SIZE(sc->fw_msg_handler))
2750                 return (EINVAL);
2751 
2752         /*
2753          * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
2754          * handler dispatch table.  Reject any attempt to install a handler for
2755          * this subtype.
2756          */
2757         if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
2758                 return (EINVAL);
2759 
2760         new = h ? h : fw_msg_not_handled;
2761         loc = &sc->fw_msg_handler[type];
2762         (void)atomic_swap_ptr(loc, (void *)new);
2763 
2764         return (0);
2765 }
2766 
2767 #ifdef TCP_OFFLOAD_ENABLE
2768 static int
2769 toe_capability(struct port_info *pi, int enable)
2770 {
2771         int rc;
2772         struct adapter *sc = pi->adapter;
2773 
2774         if (!is_offload(sc))
2775                 return (ENODEV);
2776 
2777         if (enable != 0) {
2778                 if (isset(&sc->offload_map, pi->port_id) != 0)
2779                         return (0);
2780 
2781                 if (sc->offload_map == 0) {
2782                         rc = activate_uld(sc, ULD_TOM, &sc->tom);
2783                         if (rc != 0)
2784                                 return (rc);
2785                 }
2786 
2787                 setbit(&sc->offload_map, pi->port_id);
2788         } else {
2789                 if (!isset(&sc->offload_map, pi->port_id))
2790                         return (0);
2791 
2792                 clrbit(&sc->offload_map, pi->port_id);
2793 
2794                 if (sc->offload_map == 0) {
2795                         rc = deactivate_uld(&sc->tom);
2796                         if (rc != 0) {
2797                                 setbit(&sc->offload_map, pi->port_id);
2798                                 return (rc);
2799                         }
2800                 }
2801         }
2802 
2803         return (0);
2804 }
2805 
2806 /*
2807  * Add an upper layer driver to the global list.
2808  */
2809 int
2810 t4_register_uld(struct uld_info *ui)
2811 {
2812         int rc = 0;
2813         struct uld_info *u;
2814 
2815         mutex_enter(&t4_uld_list_lock);
2816         SLIST_FOREACH(u, &t4_uld_list, link) {
2817                 if (u->uld_id == ui->uld_id) {
2818                         rc = EEXIST;
2819                         goto done;
2820                 }
2821         }
2822 
2823         SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
2824         ui->refcount = 0;
2825 done:
2826         mutex_exit(&t4_uld_list_lock);
2827         return (rc);
2828 }
2829 
2830 int
2831 t4_unregister_uld(struct uld_info *ui)
2832 {
2833         int rc = EINVAL;
2834         struct uld_info *u;
2835 
2836         mutex_enter(&t4_uld_list_lock);
2837 
2838         SLIST_FOREACH(u, &t4_uld_list, link) {
2839                 if (u == ui) {
2840                         if (ui->refcount > 0) {
2841                                 rc = EBUSY;
2842                                 goto done;
2843                         }
2844 
2845                         SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
2846                         rc = 0;
2847                         goto done;
2848                 }
2849         }
2850 done:
2851         mutex_exit(&t4_uld_list_lock);
2852         return (rc);
2853 }
2854 
2855 static int
2856 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
2857 {
2858         int rc = EAGAIN;
2859         struct uld_info *ui;
2860 
2861         mutex_enter(&t4_uld_list_lock);
2862 
2863         SLIST_FOREACH(ui, &t4_uld_list, link) {
2864                 if (ui->uld_id == id) {
2865                         rc = ui->attach(sc, &usc->softc);
2866                         if (rc == 0) {
2867                                 ASSERT(usc->softc != NULL);
2868                                 ui->refcount++;
2869                                 usc->uld = ui;
2870                         }
2871                         goto done;
2872                 }
2873         }
2874 done:
2875         mutex_exit(&t4_uld_list_lock);
2876 
2877         return (rc);
2878 }
2879 
2880 static int
2881 deactivate_uld(struct uld_softc *usc)
2882 {
2883         int rc;
2884 
2885         mutex_enter(&t4_uld_list_lock);
2886 
2887         if (usc->uld == NULL || usc->softc == NULL) {
2888                 rc = EINVAL;
2889                 goto done;
2890         }
2891 
2892         rc = usc->uld->detach(usc->softc);
2893         if (rc == 0) {
2894                 ASSERT(usc->uld->refcount > 0);
2895                 usc->uld->refcount--;
2896                 usc->uld = NULL;
2897                 usc->softc = NULL;
2898         }
2899 done:
2900         mutex_exit(&t4_uld_list_lock);
2901 
2902         return (rc);
2903 }
2904 
2905 void
2906 t4_iterate(void (*func)(int, void *), void *arg)
2907 {
2908         struct adapter *sc;
2909 
2910         mutex_enter(&t4_adapter_list_lock);
2911         SLIST_FOREACH(sc, &t4_adapter_list, link) {
2912                 /*
2913                  * func should not make any assumptions about what state sc is
2914                  * in - the only guarantee is that sc->sc_lock is a valid lock.
2915                  */
2916                 func(ddi_get_instance(sc->dip), arg);
2917         }
2918         mutex_exit(&t4_adapter_list_lock);
2919 }
2920 
2921 #endif