Print this page
9994 cxgbe t4nex: Handle get_fl_payload() alloc failures
9995 cxgbe t4_devo_attach() should initialize ->sfl
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c
+++ new/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * This file is part of the Chelsio T4 support code.
14 14 *
15 15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved.
16 16 *
17 17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 20 * release for licensing terms and conditions.
21 21 */
22 22
23 23 #include <sys/ddi.h>
24 24 #include <sys/sunddi.h>
25 25 #include <sys/sunndi.h>
26 26 #include <sys/modctl.h>
27 27 #include <sys/conf.h>
28 28 #include <sys/devops.h>
29 29 #include <sys/pci.h>
30 30 #include <sys/atomic.h>
31 31 #include <sys/types.h>
32 32 #include <sys/file.h>
33 33 #include <sys/errno.h>
34 34 #include <sys/open.h>
35 35 #include <sys/cred.h>
36 36 #include <sys/stat.h>
37 37 #include <sys/mkdev.h>
38 38 #include <sys/queue.h>
39 39 #include <sys/containerof.h>
40 40
41 41 #include "version.h"
42 42 #include "common/common.h"
43 43 #include "common/t4_msg.h"
44 44 #include "common/t4_regs.h"
45 45 #include "firmware/t4_fw.h"
46 46 #include "firmware/t4_cfg.h"
47 47 #include "firmware/t5_fw.h"
48 48 #include "firmware/t5_cfg.h"
49 49 #include "firmware/t6_fw.h"
50 50 #include "firmware/t6_cfg.h"
51 51 #include "t4_l2t.h"
52 52
53 53 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
54 54 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
55 55 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
56 56 int *rp);
57 57 struct cb_ops t4_cb_ops = {
58 58 .cb_open = t4_cb_open,
59 59 .cb_close = t4_cb_close,
60 60 .cb_strategy = nodev,
61 61 .cb_print = nodev,
62 62 .cb_dump = nodev,
63 63 .cb_read = nodev,
64 64 .cb_write = nodev,
65 65 .cb_ioctl = t4_cb_ioctl,
66 66 .cb_devmap = nodev,
67 67 .cb_mmap = nodev,
68 68 .cb_segmap = nodev,
69 69 .cb_chpoll = nochpoll,
70 70 .cb_prop_op = ddi_prop_op,
71 71 .cb_flag = D_MP,
72 72 .cb_rev = CB_REV,
73 73 .cb_aread = nodev,
74 74 .cb_awrite = nodev
75 75 };
76 76
77 77 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
78 78 void *arg, void *result);
79 79 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
80 80 void *arg, dev_info_t **cdipp);
81 81 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
82 82 ddi_bus_config_op_t op, void *arg);
83 83 struct bus_ops t4_bus_ops = {
84 84 .busops_rev = BUSO_REV,
85 85 .bus_ctl = t4_bus_ctl,
86 86 .bus_prop_op = ddi_bus_prop_op,
87 87 .bus_config = t4_bus_config,
88 88 .bus_unconfig = t4_bus_unconfig,
89 89 };
90 90
91 91 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
92 92 void **rp);
93 93 static int t4_devo_probe(dev_info_t *dip);
94 94 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
95 95 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
96 96 static int t4_devo_quiesce(dev_info_t *dip);
97 97 struct dev_ops t4_dev_ops = {
98 98 .devo_rev = DEVO_REV,
99 99 .devo_getinfo = t4_devo_getinfo,
100 100 .devo_identify = nulldev,
101 101 .devo_probe = t4_devo_probe,
102 102 .devo_attach = t4_devo_attach,
103 103 .devo_detach = t4_devo_detach,
104 104 .devo_reset = nodev,
105 105 .devo_cb_ops = &t4_cb_ops,
106 106 .devo_bus_ops = &t4_bus_ops,
107 107 .devo_quiesce = &t4_devo_quiesce,
108 108 };
109 109
110 110 static struct modldrv modldrv = {
111 111 .drv_modops = &mod_driverops,
112 112 .drv_linkinfo = "Chelsio T4 nexus " DRV_VERSION,
113 113 .drv_dev_ops = &t4_dev_ops
114 114 };
115 115
116 116 static struct modlinkage modlinkage = {
117 117 .ml_rev = MODREV_1,
118 118 .ml_linkage = {&modldrv, NULL},
119 119 };
120 120
121 121 void *t4_list;
122 122
123 123 struct intrs_and_queues {
124 124 int intr_type; /* DDI_INTR_TYPE_* */
125 125 int nirq; /* Number of vectors */
126 126 int intr_fwd; /* Interrupts forwarded */
127 127 int ntxq10g; /* # of NIC txq's for each 10G port */
128 128 int nrxq10g; /* # of NIC rxq's for each 10G port */
129 129 int ntxq1g; /* # of NIC txq's for each 1G port */
130 130 int nrxq1g; /* # of NIC rxq's for each 1G port */
131 131 #ifdef TCP_OFFLOAD_ENABLE
132 132 int nofldtxq10g; /* # of TOE txq's for each 10G port */
133 133 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
134 134 int nofldtxq1g; /* # of TOE txq's for each 1G port */
135 135 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
136 136 #endif
137 137 };
138 138
139 139 struct fw_info fi[3];
140 140
141 141 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
142 142 mblk_t *m);
143 143 static int fw_msg_not_handled(struct adapter *, const __be64 *);
144 144 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
145 145 static unsigned int getpf(struct adapter *sc);
146 146 static int prep_firmware(struct adapter *sc);
147 147 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
148 148 static int partition_resources(struct adapter *sc);
149 149 static int adap__pre_init_tweaks(struct adapter *sc);
150 150 static int get_params__pre_init(struct adapter *sc);
151 151 static int get_params__post_init(struct adapter *sc);
152 152 static int set_params__post_init(struct adapter *);
153 153 static void setup_memwin(struct adapter *sc);
154 154 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
155 155 uint32_t *);
156 156 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
157 157 uint32_t position_memwin(struct adapter *, int, uint32_t);
158 158 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
159 159 uint_t count);
160 160 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
161 161 uint_t count);
162 162 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
163 163 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
164 164 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
165 165 struct intrs_and_queues *iaq);
166 166 static int add_child_node(struct adapter *sc, int idx);
167 167 static int remove_child_node(struct adapter *sc, int idx);
168 168 static kstat_t *setup_kstats(struct adapter *sc);
169 169 static kstat_t *setup_wc_kstats(struct adapter *);
170 170 static int update_wc_kstats(kstat_t *, int);
171 171 #ifdef TCP_OFFLOAD_ENABLE
172 172 static int toe_capability(struct port_info *pi, int enable);
173 173 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc);
174 174 static int deactivate_uld(struct uld_softc *usc);
175 175 #endif
176 176 static kmutex_t t4_adapter_list_lock;
177 177 static SLIST_HEAD(, adapter) t4_adapter_list;
178 178 #ifdef TCP_OFFLOAD_ENABLE
179 179 static kmutex_t t4_uld_list_lock;
180 180 static SLIST_HEAD(, uld_info) t4_uld_list;
181 181 #endif
182 182
183 183 int
184 184 _init(void)
185 185 {
186 186 int rc;
187 187
188 188 rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
189 189 if (rc != 0)
190 190 return (rc);
191 191
192 192 rc = mod_install(&modlinkage);
193 193 if (rc != 0)
194 194 ddi_soft_state_fini(&t4_list);
195 195
196 196 mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
197 197 SLIST_INIT(&t4_adapter_list);
198 198
199 199 #ifdef TCP_OFFLOAD_ENABLE
200 200 mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL);
201 201 SLIST_INIT(&t4_uld_list);
202 202 #endif
203 203
204 204 return (rc);
205 205 }
206 206
207 207 int
208 208 _fini(void)
209 209 {
210 210 int rc;
211 211
212 212 rc = mod_remove(&modlinkage);
213 213 if (rc != 0)
214 214 return (rc);
215 215
216 216 ddi_soft_state_fini(&t4_list);
217 217 return (0);
218 218 }
219 219
220 220 int
221 221 _info(struct modinfo *mi)
222 222 {
223 223 return (mod_info(&modlinkage, mi));
224 224 }
225 225
226 226 /* ARGSUSED */
227 227 static int
228 228 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
229 229 {
230 230 struct adapter *sc;
231 231 minor_t minor;
232 232
233 233 minor = getminor((dev_t)arg); /* same as instance# in our case */
234 234
235 235 if (cmd == DDI_INFO_DEVT2DEVINFO) {
236 236 sc = ddi_get_soft_state(t4_list, minor);
237 237 if (sc == NULL)
238 238 return (DDI_FAILURE);
239 239
240 240 ASSERT(sc->dev == (dev_t)arg);
241 241 *rp = (void *)sc->dip;
242 242 } else if (cmd == DDI_INFO_DEVT2INSTANCE)
243 243 *rp = (void *) (unsigned long) minor;
244 244 else
245 245 ASSERT(0);
246 246
247 247 return (DDI_SUCCESS);
248 248 }
249 249
250 250 static int
251 251 t4_devo_probe(dev_info_t *dip)
252 252 {
253 253 int rc, id, *reg;
254 254 uint_t n, pf;
255 255
256 256 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
257 257 "device-id", 0xffff);
258 258 if (id == 0xffff)
259 259 return (DDI_PROBE_DONTCARE);
260 260
261 261 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
262 262 "reg", ®, &n);
263 263 if (rc != DDI_SUCCESS)
264 264 return (DDI_PROBE_DONTCARE);
265 265
266 266 pf = PCI_REG_FUNC_G(reg[0]);
267 267 ddi_prop_free(reg);
268 268
269 269 /* Prevent driver attachment on any PF except 0 on the FPGA */
270 270 if (id == 0xa000 && pf != 0)
271 271 return (DDI_PROBE_FAILURE);
272 272
273 273 return (DDI_PROBE_DONTCARE);
274 274 }
275 275
276 276 static int
277 277 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
278 278 {
279 279 struct adapter *sc = NULL;
280 280 struct sge *s;
281 281 int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
282 282 int irq = 0, nxg, n100g, n40g, n25g, n10g, n1g;
283 283 #ifdef TCP_OFFLOAD_ENABLE
284 284 int ofld_rqidx, ofld_tqidx;
285 285 #endif
286 286 char name[16];
287 287 struct driver_properties *prp;
288 288 struct intrs_and_queues iaq;
289 289 ddi_device_acc_attr_t da = {
290 290 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
291 291 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
292 292 .devacc_attr_dataorder = DDI_UNORDERED_OK_ACC
293 293 };
294 294 ddi_device_acc_attr_t da1 = {
295 295 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
296 296 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
297 297 .devacc_attr_dataorder = DDI_MERGING_OK_ACC
298 298 };
299 299
300 300 if (cmd != DDI_ATTACH)
301 301 return (DDI_FAILURE);
302 302
303 303 /*
304 304 * Allocate space for soft state.
305 305 */
306 306 instance = ddi_get_instance(dip);
307 307 rc = ddi_soft_state_zalloc(t4_list, instance);
308 308 if (rc != DDI_SUCCESS) {
309 309 cxgb_printf(dip, CE_WARN,
↓ open down ↓ |
309 lines elided |
↑ open up ↑ |
310 310 "failed to allocate soft state: %d", rc);
311 311 return (DDI_FAILURE);
312 312 }
313 313
314 314 sc = ddi_get_soft_state(t4_list, instance);
315 315 sc->dip = dip;
316 316 sc->dev = makedevice(ddi_driver_major(dip), instance);
317 317 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
318 318 cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
319 319 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
320 + TAILQ_INIT(&sc->sfl);
320 321
321 322 mutex_enter(&t4_adapter_list_lock);
322 323 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
323 324 mutex_exit(&t4_adapter_list_lock);
324 325
325 326 sc->pf = getpf(sc);
326 327 if (sc->pf > 8) {
327 328 rc = EINVAL;
328 329 cxgb_printf(dip, CE_WARN,
329 330 "failed to determine PCI PF# of device");
330 331 goto done;
331 332 }
332 333 sc->mbox = sc->pf;
333 334
334 335 /* Initialize the driver properties */
335 336 prp = &sc->props;
336 337 (void)init_driver_props(sc, prp);
337 338
338 339 /*
339 340 * Enable access to the PCI config space.
340 341 */
341 342 rc = pci_config_setup(dip, &sc->pci_regh);
342 343 if (rc != DDI_SUCCESS) {
343 344 cxgb_printf(dip, CE_WARN,
344 345 "failed to enable PCI config space access: %d", rc);
345 346 goto done;
346 347 }
347 348
348 349 /* TODO: Set max read request to 4K */
349 350
350 351 /*
351 352 * Enable MMIO access.
352 353 */
353 354 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
354 355 if (rc != DDI_SUCCESS) {
355 356 cxgb_printf(dip, CE_WARN,
356 357 "failed to map device registers: %d", rc);
357 358 goto done;
358 359 }
359 360
360 361 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
361 362
362 363 /*
363 364 * Initialize cpl handler.
364 365 */
365 366 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
366 367 sc->cpl_handler[i] = cpl_not_handled;
367 368 }
368 369
369 370 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
370 371 sc->fw_msg_handler[i] = fw_msg_not_handled;
371 372 }
372 373
373 374 for (i = 0; i < NCHAN; i++) {
374 375 (void) snprintf(name, sizeof (name), "%s-%d",
375 376 "reclaim", i);
376 377 sc->tq[i] = ddi_taskq_create(sc->dip,
377 378 name, 1, TASKQ_DEFAULTPRI, 0);
378 379
379 380 if (sc->tq[i] == NULL) {
380 381 cxgb_printf(dip, CE_WARN,
381 382 "failed to create task queues");
382 383 rc = DDI_FAILURE;
383 384 goto done;
384 385 }
385 386 }
386 387
387 388 /*
388 389 * Prepare the adapter for operation.
389 390 */
390 391 rc = -t4_prep_adapter(sc, false);
391 392 if (rc != 0) {
392 393 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
393 394 goto done;
394 395 }
395 396
396 397 /*
397 398 * Enable BAR1 access.
398 399 */
399 400 sc->doorbells |= DOORBELL_KDB;
400 401 rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
401 402 if (rc != DDI_SUCCESS) {
402 403 cxgb_printf(dip, CE_WARN,
403 404 "failed to map BAR1 device registers: %d", rc);
404 405 goto done;
405 406 } else {
406 407 if (is_t5(sc->params.chip)) {
407 408 sc->doorbells |= DOORBELL_UDB;
408 409 if (prp->wc) {
409 410 /*
410 411 * Enable write combining on BAR2. This is the
411 412 * userspace doorbell BAR and is split into 128B
412 413 * (UDBS_SEG_SIZE) doorbell regions, each associated
413 414 * with an egress queue. The first 64B has the doorbell
414 415 * and the second 64B can be used to submit a tx work
415 416 * request with an implicit doorbell.
416 417 */
417 418 sc->doorbells &= ~DOORBELL_UDB;
418 419 sc->doorbells |= (DOORBELL_WCWR |
419 420 DOORBELL_UDBWC);
420 421 t4_write_reg(sc, A_SGE_STAT_CFG,
421 422 V_STATSOURCE_T5(7) | V_STATMODE(0));
422 423 }
423 424 }
424 425 }
425 426
426 427 /*
427 428 * Do this really early. Note that minor number = instance.
428 429 */
429 430 (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
430 431 rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
431 432 DDI_NT_NEXUS, 0);
432 433 if (rc != DDI_SUCCESS) {
433 434 cxgb_printf(dip, CE_WARN,
434 435 "failed to create device node: %d", rc);
435 436 rc = DDI_SUCCESS; /* carry on */
436 437 }
437 438
438 439 /* Do this early. Memory window is required for loading config file. */
439 440 setup_memwin(sc);
440 441
441 442 /* Prepare the firmware for operation */
442 443 rc = prep_firmware(sc);
443 444 if (rc != 0)
444 445 goto done; /* error message displayed already */
445 446
446 447 rc = adap__pre_init_tweaks(sc);
447 448 if (rc != 0)
448 449 goto done;
449 450
450 451 rc = get_params__pre_init(sc);
451 452 if (rc != 0)
452 453 goto done; /* error message displayed already */
453 454
454 455 t4_sge_init(sc);
455 456
456 457 if (sc->flags & MASTER_PF) {
457 458 /* get basic stuff going */
458 459 rc = -t4_fw_initialize(sc, sc->mbox);
459 460 if (rc != 0) {
460 461 cxgb_printf(sc->dip, CE_WARN,
461 462 "early init failed: %d.\n", rc);
462 463 goto done;
463 464 }
464 465 }
465 466
466 467 rc = get_params__post_init(sc);
467 468 if (rc != 0)
468 469 goto done; /* error message displayed already */
469 470
470 471 rc = set_params__post_init(sc);
471 472 if (rc != 0)
472 473 goto done; /* error message displayed already */
473 474
474 475 /*
475 476 * TODO: This is the place to call t4_set_filter_mode()
476 477 */
477 478
478 479 /* tweak some settings */
479 480 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
480 481 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
481 482 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
482 483 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
483 484
484 485 /*
485 486 * Work-around for bug 2619
486 487 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
487 488 * VLAN tag extraction is disabled.
488 489 */
489 490 t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
490 491
491 492 /* Store filter mode */
492 493 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
493 494 A_TP_VLAN_PRI_MAP);
494 495
495 496 /*
496 497 * First pass over all the ports - allocate VIs and initialize some
497 498 * basic parameters like mac address, port type, etc. We also figure
498 499 * out whether a port is 10G or 1G and use that information when
499 500 * calculating how many interrupts to attempt to allocate.
500 501 */
501 502 n100g = n40g = n25g = n10g = n1g = 0;
502 503 for_each_port(sc, i) {
503 504 struct port_info *pi;
504 505
505 506 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
506 507 sc->port[i] = pi;
507 508
508 509 /* These must be set before t4_port_init */
509 510 pi->adapter = sc;
510 511 /* LINTED: E_ASSIGN_NARROW_CONV */
511 512 pi->port_id = i;
512 513 }
513 514
514 515 /* Allocate the vi and initialize parameters like mac addr */
515 516 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
516 517 if (rc) {
517 518 cxgb_printf(dip, CE_WARN,
518 519 "unable to initialize port: %d", rc);
519 520 goto done;
520 521 }
521 522
522 523 for_each_port(sc, i) {
523 524 struct port_info *pi = sc->port[i];
524 525
525 526 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
526 527 pi->mtu = ETHERMTU;
527 528
528 529 if (is_100G_port(pi)) {
529 530 n100g++;
530 531 pi->tmr_idx = prp->tmr_idx_10g;
531 532 pi->pktc_idx = prp->pktc_idx_10g;
532 533 } else if (is_40G_port(pi)) {
533 534 n40g++;
534 535 pi->tmr_idx = prp->tmr_idx_10g;
535 536 pi->pktc_idx = prp->pktc_idx_10g;
536 537 } else if (is_25G_port(pi)) {
537 538 n25g++;
538 539 pi->tmr_idx = prp->tmr_idx_10g;
539 540 pi->pktc_idx = prp->pktc_idx_10g;
540 541 } else if (is_10G_port(pi)) {
541 542 n10g++;
542 543 pi->tmr_idx = prp->tmr_idx_10g;
543 544 pi->pktc_idx = prp->pktc_idx_10g;
544 545 } else {
545 546 n1g++;
546 547 pi->tmr_idx = prp->tmr_idx_1g;
547 548 pi->pktc_idx = prp->pktc_idx_1g;
548 549 }
549 550
550 551 pi->xact_addr_filt = -1;
551 552 t4_mc_init(pi);
552 553
553 554 setbit(&sc->registered_device_map, i);
554 555 }
555 556
556 557 nxg = n10g + n25g + n40g + n100g;
557 558 (void) remove_extra_props(sc, nxg, n1g);
558 559
559 560 if (sc->registered_device_map == 0) {
560 561 cxgb_printf(dip, CE_WARN, "no usable ports");
561 562 rc = DDI_FAILURE;
562 563 goto done;
563 564 }
564 565
565 566 rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
566 567 if (rc != 0)
567 568 goto done; /* error message displayed already */
568 569
569 570 sc->intr_type = iaq.intr_type;
570 571 sc->intr_count = iaq.nirq;
571 572
572 573 if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
573 574 sc->props.multi_rings = 0;
574 575 cxgb_printf(dip, CE_WARN,
575 576 "Multiple rings disabled as interrupt type is not MSI-X");
576 577 }
577 578
578 579 if (sc->props.multi_rings && iaq.intr_fwd) {
579 580 sc->props.multi_rings = 0;
580 581 cxgb_printf(dip, CE_WARN,
581 582 "Multiple rings disabled as interrupts are forwarded");
582 583 }
583 584
584 585 if (!sc->props.multi_rings) {
585 586 iaq.ntxq10g = 1;
586 587 iaq.ntxq1g = 1;
587 588 }
588 589 s = &sc->sge;
589 590 s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
590 591 s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
591 592 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */
592 593 #ifdef TCP_OFFLOAD_ENABLE
593 594 /* control queues, 1 per port + 1 mgmtq */
594 595 s->neq += sc->params.nports + 1;
595 596 #endif
596 597 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
597 598 if (iaq.intr_fwd != 0)
598 599 sc->flags |= INTR_FWD;
599 600 #ifdef TCP_OFFLOAD_ENABLE
600 601 if (is_offload(sc) != 0) {
601 602
602 603 s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
603 604 s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
604 605 s->neq += s->nofldtxq + s->nofldrxq;
605 606 s->niq += s->nofldrxq;
606 607
607 608 s->ofld_rxq = kmem_zalloc(s->nofldrxq *
608 609 sizeof (struct sge_ofld_rxq), KM_SLEEP);
609 610 s->ofld_txq = kmem_zalloc(s->nofldtxq *
610 611 sizeof (struct sge_wrq), KM_SLEEP);
611 612 s->ctrlq = kmem_zalloc(sc->params.nports *
612 613 sizeof (struct sge_wrq), KM_SLEEP);
613 614
614 615 }
615 616 #endif
616 617 s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
617 618 s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
618 619 s->iqmap = kmem_zalloc(s->niq * sizeof (struct sge_iq *), KM_SLEEP);
619 620 s->eqmap = kmem_zalloc(s->neq * sizeof (struct sge_eq *), KM_SLEEP);
620 621
621 622 sc->intr_handle = kmem_zalloc(sc->intr_count *
622 623 sizeof (ddi_intr_handle_t), KM_SLEEP);
623 624
624 625 /*
625 626 * Second pass over the ports. This time we know the number of rx and
626 627 * tx queues that each port should get.
627 628 */
628 629 rqidx = tqidx = 0;
629 630 #ifdef TCP_OFFLOAD_ENABLE
630 631 ofld_rqidx = ofld_tqidx = 0;
631 632 #endif
632 633 for_each_port(sc, i) {
633 634 struct port_info *pi = sc->port[i];
634 635
635 636 if (pi == NULL)
636 637 continue;
637 638
638 639 t4_mc_cb_init(pi);
639 640 /* LINTED: E_ASSIGN_NARROW_CONV */
640 641 pi->first_rxq = rqidx;
641 642 /* LINTED: E_ASSIGN_NARROW_CONV */
642 643 pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
643 644 : iaq.nrxq1g;
644 645 /* LINTED: E_ASSIGN_NARROW_CONV */
645 646 pi->first_txq = tqidx;
646 647 /* LINTED: E_ASSIGN_NARROW_CONV */
647 648 pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
648 649 : iaq.ntxq1g;
649 650
650 651 rqidx += pi->nrxq;
651 652 tqidx += pi->ntxq;
652 653
653 654 #ifdef TCP_OFFLOAD_ENABLE
654 655 if (is_offload(sc) != 0) {
655 656 /* LINTED: E_ASSIGN_NARROW_CONV */
656 657 pi->first_ofld_rxq = ofld_rqidx;
657 658 pi->nofldrxq = max(1, pi->nrxq / 4);
658 659
659 660 /* LINTED: E_ASSIGN_NARROW_CONV */
660 661 pi->first_ofld_txq = ofld_tqidx;
661 662 pi->nofldtxq = max(1, pi->ntxq / 2);
662 663
663 664 ofld_rqidx += pi->nofldrxq;
664 665 ofld_tqidx += pi->nofldtxq;
665 666 }
666 667 #endif
667 668
668 669 /*
669 670 * Enable hw checksumming and LSO for all ports by default.
670 671 * They can be disabled using ndd (hw_csum and hw_lso).
671 672 */
672 673 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
673 674 }
674 675
675 676 #ifdef TCP_OFFLOAD_ENABLE
676 677 sc->l2t = t4_init_l2t(sc);
677 678 #endif
678 679
679 680 /*
680 681 * Setup Interrupts.
681 682 */
682 683
683 684 i = 0;
684 685 rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
685 686 sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
686 687 if (rc != DDI_SUCCESS) {
687 688 cxgb_printf(dip, CE_WARN,
688 689 "failed to allocate %d interrupt(s) of type %d: %d, %d",
689 690 sc->intr_count, sc->intr_type, rc, i);
690 691 goto done;
691 692 }
692 693 ASSERT(sc->intr_count == i); /* allocation was STRICT */
693 694 (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
694 695 (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
695 696 if (sc->intr_count == 1) {
696 697 ASSERT(sc->flags & INTR_FWD);
697 698 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
698 699 &s->fwq);
699 700 } else {
700 701 /* Multiple interrupts. The first one is always error intr */
701 702 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
702 703 NULL);
703 704 irq++;
704 705
705 706 /* The second one is always the firmware event queue */
706 707 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
707 708 &s->fwq);
708 709 irq++;
709 710 /*
710 711 * Note that if INTR_FWD is set then either the NIC rx
711 712 * queues or (exclusive or) the TOE rx queueus will be taking
712 713 * direct interrupts.
713 714 *
714 715 * There is no need to check for is_offload(sc) as nofldrxq
715 716 * will be 0 if offload is disabled.
716 717 */
717 718 for_each_port(sc, i) {
718 719 struct port_info *pi = sc->port[i];
719 720 struct sge_rxq *rxq;
720 721 #ifdef TCP_OFFLOAD_ENABLE
721 722 struct sge_ofld_rxq *ofld_rxq;
722 723
723 724 /*
724 725 * Skip over the NIC queues if they aren't taking direct
725 726 * interrupts.
726 727 */
727 728 if ((sc->flags & INTR_FWD) &&
728 729 pi->nofldrxq > pi->nrxq)
729 730 goto ofld_queues;
730 731 #endif
731 732 rxq = &s->rxq[pi->first_rxq];
732 733 for (q = 0; q < pi->nrxq; q++, rxq++) {
733 734 (void) ddi_intr_add_handler(
734 735 sc->intr_handle[irq], t4_intr, sc,
735 736 &rxq->iq);
736 737 irq++;
737 738 }
738 739
739 740 #ifdef TCP_OFFLOAD_ENABLE
740 741 /*
741 742 * Skip over the offload queues if they aren't taking
742 743 * direct interrupts.
743 744 */
744 745 if ((sc->flags & INTR_FWD))
745 746 continue;
746 747 ofld_queues:
747 748 ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq];
748 749 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
749 750 (void) ddi_intr_add_handler(
750 751 sc->intr_handle[irq], t4_intr, sc,
751 752 &ofld_rxq->iq);
752 753 irq++;
753 754 }
754 755 #endif
755 756 }
756 757
757 758 }
758 759 sc->flags |= INTR_ALLOCATED;
759 760
760 761 ASSERT(rc == DDI_SUCCESS);
761 762 ddi_report_dev(dip);
762 763
763 764 /*
764 765 * Hardware/Firmware/etc. Version/Revision IDs.
765 766 */
766 767 t4_dump_version_info(sc);
767 768
768 769 if (n100g) {
769 770 cxgb_printf(dip, CE_NOTE,
770 771 "%dx100G (%d rxq, %d txq total) %d %s.",
771 772 n100g, rqidx, tqidx, sc->intr_count,
772 773 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
773 774 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
774 775 "fixed interrupt");
775 776 } else if (n40g) {
776 777 cxgb_printf(dip, CE_NOTE,
777 778 "%dx40G (%d rxq, %d txq total) %d %s.",
778 779 n40g, rqidx, tqidx, sc->intr_count,
779 780 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
780 781 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
781 782 "fixed interrupt");
782 783 } else if (n25g) {
783 784 cxgb_printf(dip, CE_NOTE,
784 785 "%dx25G (%d rxq, %d txq total) %d %s.",
785 786 n25g, rqidx, tqidx, sc->intr_count,
786 787 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
787 788 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
788 789 "fixed interrupt");
789 790 } else if (n10g && n1g) {
790 791 cxgb_printf(dip, CE_NOTE,
791 792 "%dx10G %dx1G (%d rxq, %d txq total) %d %s.",
792 793 n10g, n1g, rqidx, tqidx, sc->intr_count,
793 794 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
794 795 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
795 796 "fixed interrupt");
796 797 } else {
797 798 cxgb_printf(dip, CE_NOTE,
798 799 "%dx%sG (%d rxq, %d txq per port) %d %s.",
799 800 n10g ? n10g : n1g,
800 801 n10g ? "10" : "1",
801 802 n10g ? iaq.nrxq10g : iaq.nrxq1g,
802 803 n10g ? iaq.ntxq10g : iaq.ntxq1g,
803 804 sc->intr_count,
804 805 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
805 806 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
806 807 "fixed interrupt");
807 808 }
808 809
809 810 sc->ksp = setup_kstats(sc);
810 811 sc->ksp_stat = setup_wc_kstats(sc);
811 812 sc->params.drv_memwin = MEMWIN_NIC;
812 813
813 814 done:
814 815 if (rc != DDI_SUCCESS) {
815 816 (void) t4_devo_detach(dip, DDI_DETACH);
816 817
817 818 /* rc may have errno style errors or DDI errors */
818 819 rc = DDI_FAILURE;
819 820 }
820 821
821 822 return (rc);
822 823 }
823 824
824 825 static int
825 826 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
826 827 {
827 828 int instance, i;
828 829 struct adapter *sc;
829 830 struct port_info *pi;
830 831 struct sge *s;
831 832
832 833 if (cmd != DDI_DETACH)
833 834 return (DDI_FAILURE);
834 835
835 836 instance = ddi_get_instance(dip);
836 837 sc = ddi_get_soft_state(t4_list, instance);
837 838 if (sc == NULL)
838 839 return (DDI_SUCCESS);
839 840
840 841 if (sc->flags & FULL_INIT_DONE) {
841 842 t4_intr_disable(sc);
842 843 for_each_port(sc, i) {
843 844 pi = sc->port[i];
844 845 if (pi && pi->flags & PORT_INIT_DONE)
845 846 (void) port_full_uninit(pi);
846 847 }
847 848 (void) adapter_full_uninit(sc);
848 849 }
849 850
850 851 /* Safe to call no matter what */
851 852 ddi_prop_remove_all(dip);
852 853 ddi_remove_minor_node(dip, NULL);
853 854
854 855 for (i = 0; i < NCHAN; i++) {
855 856 if (sc->tq[i]) {
856 857 ddi_taskq_wait(sc->tq[i]);
857 858 ddi_taskq_destroy(sc->tq[i]);
858 859 }
859 860 }
860 861
861 862 if (sc->ksp != NULL)
862 863 kstat_delete(sc->ksp);
863 864 if (sc->ksp_stat != NULL)
864 865 kstat_delete(sc->ksp_stat);
865 866
866 867 s = &sc->sge;
867 868 if (s->rxq != NULL)
868 869 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
869 870 #ifdef TCP_OFFLOAD_ENABLE
870 871 if (s->ofld_txq != NULL)
871 872 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
872 873 if (s->ofld_rxq != NULL)
873 874 kmem_free(s->ofld_rxq,
874 875 s->nofldrxq * sizeof (struct sge_ofld_rxq));
875 876 if (s->ctrlq != NULL)
876 877 kmem_free(s->ctrlq,
877 878 sc->params.nports * sizeof (struct sge_wrq));
878 879 #endif
879 880 if (s->txq != NULL)
880 881 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
881 882 if (s->iqmap != NULL)
882 883 kmem_free(s->iqmap, s->niq * sizeof (struct sge_iq *));
883 884 if (s->eqmap != NULL)
884 885 kmem_free(s->eqmap, s->neq * sizeof (struct sge_eq *));
885 886
886 887 if (s->rxbuf_cache != NULL)
887 888 rxbuf_cache_destroy(s->rxbuf_cache);
888 889
889 890 if (sc->flags & INTR_ALLOCATED) {
890 891 for (i = 0; i < sc->intr_count; i++) {
891 892 (void) ddi_intr_remove_handler(sc->intr_handle[i]);
892 893 (void) ddi_intr_free(sc->intr_handle[i]);
893 894 }
894 895 sc->flags &= ~INTR_ALLOCATED;
895 896 }
896 897
897 898 if (sc->intr_handle != NULL) {
898 899 kmem_free(sc->intr_handle,
899 900 sc->intr_count * sizeof (*sc->intr_handle));
900 901 }
901 902
902 903 for_each_port(sc, i) {
903 904 pi = sc->port[i];
904 905 if (pi != NULL) {
905 906 mutex_destroy(&pi->lock);
906 907 kmem_free(pi, sizeof (*pi));
907 908 clrbit(&sc->registered_device_map, i);
908 909 }
909 910 }
910 911
911 912 if (sc->flags & FW_OK)
912 913 (void) t4_fw_bye(sc, sc->mbox);
913 914
914 915 if (sc->reg1h != NULL)
915 916 ddi_regs_map_free(&sc->reg1h);
916 917
917 918 if (sc->regh != NULL)
918 919 ddi_regs_map_free(&sc->regh);
919 920
920 921 if (sc->pci_regh != NULL)
921 922 pci_config_teardown(&sc->pci_regh);
922 923
923 924 mutex_enter(&t4_adapter_list_lock);
924 925 SLIST_REMOVE_HEAD(&t4_adapter_list, link);
925 926 mutex_exit(&t4_adapter_list_lock);
926 927
927 928 mutex_destroy(&sc->lock);
928 929 cv_destroy(&sc->cv);
929 930 mutex_destroy(&sc->sfl_lock);
930 931
931 932 #ifdef DEBUG
932 933 bzero(sc, sizeof (*sc));
933 934 #endif
934 935 ddi_soft_state_free(t4_list, instance);
935 936
936 937 return (DDI_SUCCESS);
937 938 }
938 939
939 940 static int
940 941 t4_devo_quiesce(dev_info_t *dip)
941 942 {
942 943 int instance;
943 944 struct adapter *sc;
944 945
945 946 instance = ddi_get_instance(dip);
946 947 sc = ddi_get_soft_state(t4_list, instance);
947 948 if (sc == NULL)
948 949 return (DDI_SUCCESS);
949 950
950 951 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
951 952 t4_intr_disable(sc);
952 953 t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
953 954
954 955 return (DDI_SUCCESS);
955 956 }
956 957
957 958 static int
958 959 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
959 960 void *result)
960 961 {
961 962 char s[4];
962 963 struct port_info *pi;
963 964 dev_info_t *child = (dev_info_t *)arg;
964 965
965 966 switch (op) {
966 967 case DDI_CTLOPS_REPORTDEV:
967 968 pi = ddi_get_parent_data(rdip);
968 969 pi->instance = ddi_get_instance(dip);
969 970 pi->child_inst = ddi_get_instance(rdip);
970 971 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n",
971 972 ddi_node_name(rdip), ddi_get_instance(rdip),
972 973 ddi_get_name_addr(rdip), ddi_driver_name(dip),
973 974 ddi_get_instance(dip));
974 975 return (DDI_SUCCESS);
975 976
976 977 case DDI_CTLOPS_INITCHILD:
977 978 pi = ddi_get_parent_data(child);
978 979 if (pi == NULL)
979 980 return (DDI_NOT_WELL_FORMED);
980 981 (void) snprintf(s, sizeof (s), "%d", pi->port_id);
981 982 ddi_set_name_addr(child, s);
982 983 return (DDI_SUCCESS);
983 984
984 985 case DDI_CTLOPS_UNINITCHILD:
985 986 ddi_set_name_addr(child, NULL);
986 987 return (DDI_SUCCESS);
987 988
988 989 case DDI_CTLOPS_ATTACH:
989 990 case DDI_CTLOPS_DETACH:
990 991 return (DDI_SUCCESS);
991 992
992 993 default:
993 994 return (ddi_ctlops(dip, rdip, op, arg, result));
994 995 }
995 996 }
996 997
997 998 static int
998 999 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
999 1000 dev_info_t **cdipp)
1000 1001 {
1001 1002 int instance, i;
1002 1003 struct adapter *sc;
1003 1004
1004 1005 instance = ddi_get_instance(dip);
1005 1006 sc = ddi_get_soft_state(t4_list, instance);
1006 1007
1007 1008 if (op == BUS_CONFIG_ONE) {
1008 1009 char *c;
1009 1010
1010 1011 /*
1011 1012 * arg is something like "cxgb@0" where 0 is the port_id hanging
1012 1013 * off this nexus.
1013 1014 */
1014 1015
1015 1016 c = arg;
1016 1017 while (*(c + 1))
1017 1018 c++;
1018 1019
1019 1020 /* There should be exactly 1 digit after '@' */
1020 1021 if (*(c - 1) != '@')
1021 1022 return (NDI_FAILURE);
1022 1023
1023 1024 i = *c - '0';
1024 1025
1025 1026 if (add_child_node(sc, i) != 0)
1026 1027 return (NDI_FAILURE);
1027 1028
1028 1029 flags |= NDI_ONLINE_ATTACH;
1029 1030
1030 1031 } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
1031 1032 /* Allocate and bind all child device nodes */
1032 1033 for_each_port(sc, i)
1033 1034 (void) add_child_node(sc, i);
1034 1035 flags |= NDI_ONLINE_ATTACH;
1035 1036 }
1036 1037
1037 1038 return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
1038 1039 }
1039 1040
1040 1041 static int
1041 1042 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
1042 1043 void *arg)
1043 1044 {
1044 1045 int instance, i, rc;
1045 1046 struct adapter *sc;
1046 1047
1047 1048 instance = ddi_get_instance(dip);
1048 1049 sc = ddi_get_soft_state(t4_list, instance);
1049 1050
1050 1051 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
1051 1052 op == BUS_UNCONFIG_DRIVER)
1052 1053 flags |= NDI_UNCONFIG;
1053 1054
1054 1055 rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
1055 1056 if (rc != 0)
1056 1057 return (rc);
1057 1058
1058 1059 if (op == BUS_UNCONFIG_ONE) {
1059 1060 char *c;
1060 1061
1061 1062 c = arg;
1062 1063 while (*(c + 1))
1063 1064 c++;
1064 1065
1065 1066 if (*(c - 1) != '@')
1066 1067 return (NDI_SUCCESS);
1067 1068
1068 1069 i = *c - '0';
1069 1070
1070 1071 rc = remove_child_node(sc, i);
1071 1072
1072 1073 } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
1073 1074
1074 1075 for_each_port(sc, i)
1075 1076 (void) remove_child_node(sc, i);
1076 1077 }
1077 1078
1078 1079 return (rc);
1079 1080 }
1080 1081
1081 1082 /* ARGSUSED */
1082 1083 static int
1083 1084 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1084 1085 {
1085 1086 struct adapter *sc;
1086 1087
1087 1088 if (otyp != OTYP_CHR)
1088 1089 return (EINVAL);
1089 1090
1090 1091 sc = ddi_get_soft_state(t4_list, getminor(*devp));
1091 1092 if (sc == NULL)
1092 1093 return (ENXIO);
1093 1094
1094 1095 return (atomic_cas_uint(&sc->open, 0, EBUSY));
1095 1096 }
1096 1097
1097 1098 /* ARGSUSED */
1098 1099 static int
1099 1100 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
1100 1101 {
1101 1102 struct adapter *sc;
1102 1103
1103 1104 sc = ddi_get_soft_state(t4_list, getminor(dev));
1104 1105 if (sc == NULL)
1105 1106 return (EINVAL);
1106 1107
1107 1108 (void) atomic_swap_uint(&sc->open, 0);
1108 1109 return (0);
1109 1110 }
1110 1111
1111 1112 /* ARGSUSED */
1112 1113 static int
1113 1114 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1114 1115 {
1115 1116 int instance;
1116 1117 struct adapter *sc;
1117 1118 void *data = (void *)d;
1118 1119
1119 1120 if (crgetuid(credp) != 0)
1120 1121 return (EPERM);
1121 1122
1122 1123 instance = getminor(dev);
1123 1124 sc = ddi_get_soft_state(t4_list, instance);
1124 1125 if (sc == NULL)
1125 1126 return (EINVAL);
1126 1127
1127 1128 return (t4_ioctl(sc, cmd, data, mode));
1128 1129 }
1129 1130
1130 1131 static unsigned int
1131 1132 getpf(struct adapter *sc)
1132 1133 {
1133 1134 int rc, *data;
1134 1135 uint_t n, pf;
1135 1136
1136 1137 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1137 1138 DDI_PROP_DONTPASS, "reg", &data, &n);
1138 1139 if (rc != DDI_SUCCESS) {
1139 1140 cxgb_printf(sc->dip, CE_WARN,
1140 1141 "failed to lookup \"reg\" property: %d", rc);
1141 1142 return (0xff);
1142 1143 }
1143 1144
1144 1145 pf = PCI_REG_FUNC_G(data[0]);
1145 1146 ddi_prop_free(data);
1146 1147
1147 1148 return (pf);
1148 1149 }
1149 1150
1150 1151
1151 1152 static struct fw_info *
1152 1153 find_fw_info(int chip)
1153 1154 {
1154 1155 u32 i;
1155 1156
1156 1157 fi[0].chip = CHELSIO_T4;
1157 1158 fi[0].fw_hdr.chip = FW_HDR_CHIP_T4;
1158 1159 fi[0].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T4));
1159 1160 fi[0].fw_hdr.intfver_nic = FW_INTFVER(T4, NIC);
1160 1161 fi[0].fw_hdr.intfver_vnic = FW_INTFVER(T4, VNIC);
1161 1162 fi[0].fw_hdr.intfver_ofld = FW_INTFVER(T4, OFLD);
1162 1163 fi[0].fw_hdr.intfver_ri = FW_INTFVER(T4, RI);
1163 1164 fi[0].fw_hdr.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU);
1164 1165 fi[0].fw_hdr.intfver_iscsi = FW_INTFVER(T4, ISCSI);
1165 1166 fi[0].fw_hdr.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU);
1166 1167 fi[0].fw_hdr.intfver_fcoe = FW_INTFVER(T4, FCOE);
1167 1168
1168 1169 fi[1].chip = CHELSIO_T5;
1169 1170 fi[1].fw_hdr.chip = FW_HDR_CHIP_T5;
1170 1171 fi[1].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T5));
1171 1172 fi[1].fw_hdr.intfver_nic = FW_INTFVER(T5, NIC);
1172 1173 fi[1].fw_hdr.intfver_vnic = FW_INTFVER(T5, VNIC);
1173 1174 fi[1].fw_hdr.intfver_ofld = FW_INTFVER(T5, OFLD);
1174 1175 fi[1].fw_hdr.intfver_ri = FW_INTFVER(T5, RI);
1175 1176 fi[1].fw_hdr.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU);
1176 1177 fi[1].fw_hdr.intfver_iscsi = FW_INTFVER(T5, ISCSI);
1177 1178 fi[1].fw_hdr.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU);
1178 1179 fi[1].fw_hdr.intfver_fcoe = FW_INTFVER(T5, FCOE);
1179 1180
1180 1181 fi[2].chip = CHELSIO_T6;
1181 1182 fi[2].fw_hdr.chip = FW_HDR_CHIP_T6;
1182 1183 fi[2].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T6));
1183 1184 fi[2].fw_hdr.intfver_nic = FW_INTFVER(T6, NIC);
1184 1185 fi[2].fw_hdr.intfver_vnic = FW_INTFVER(T6, VNIC);
1185 1186 fi[2].fw_hdr.intfver_ofld = FW_INTFVER(T6, OFLD);
1186 1187 fi[2].fw_hdr.intfver_ri = FW_INTFVER(T6, RI);
1187 1188 fi[2].fw_hdr.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU);
1188 1189 fi[2].fw_hdr.intfver_iscsi = FW_INTFVER(T6, ISCSI);
1189 1190 fi[2].fw_hdr.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU);
1190 1191 fi[2].fw_hdr.intfver_fcoe = FW_INTFVER(T6, FCOE);
1191 1192
1192 1193 for (i = 0; i < ARRAY_SIZE(fi); i++) {
1193 1194 if (fi[i].chip == chip)
1194 1195 return &fi[i];
1195 1196 }
1196 1197
1197 1198 return NULL;
1198 1199 }
1199 1200
1200 1201 /*
1201 1202 * Install a compatible firmware (if required), establish contact with it,
1202 1203 * become the master, and reset the device.
1203 1204 */
1204 1205 static int
1205 1206 prep_firmware(struct adapter *sc)
1206 1207 {
1207 1208 int rc;
1208 1209 int fw_size;
1209 1210 int reset = 1;
1210 1211 enum dev_state state;
1211 1212 unsigned char *fw_data;
1212 1213 struct fw_info *fw_info;
1213 1214 struct fw_hdr *card_fw;
1214 1215
1215 1216 struct driver_properties *p = &sc->props;
1216 1217
1217 1218 /* Contact firmware, request master */
1218 1219 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1219 1220 if (rc < 0) {
1220 1221 rc = -rc;
1221 1222 cxgb_printf(sc->dip, CE_WARN,
1222 1223 "failed to connect to the firmware: %d.", rc);
1223 1224 return (rc);
1224 1225 }
1225 1226
1226 1227 if (rc == sc->mbox)
1227 1228 sc->flags |= MASTER_PF;
1228 1229
1229 1230 /* We may need FW version info for later reporting */
1230 1231 t4_get_version_info(sc);
1231 1232 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(sc->params.chip));
1232 1233 /* allocate memory to read the header of the firmware on the
1233 1234 * card
1234 1235 */
1235 1236 if (!fw_info) {
1236 1237 cxgb_printf(sc->dip, CE_WARN,
1237 1238 "unable to look up firmware information for chip %d.\n",
1238 1239 CHELSIO_CHIP_VERSION(sc->params.chip));
1239 1240 return EINVAL;
1240 1241 }
1241 1242 card_fw = kmem_zalloc(sizeof(*card_fw), KM_SLEEP);
1242 1243 if(!card_fw) {
1243 1244 cxgb_printf(sc->dip, CE_WARN,
1244 1245 "Memory allocation for card FW header failed\n");
1245 1246 return ENOMEM;
1246 1247 }
1247 1248 switch(CHELSIO_CHIP_VERSION(sc->params.chip)) {
1248 1249 case CHELSIO_T4:
1249 1250 fw_data = t4fw_data;
1250 1251 fw_size = t4fw_size;
1251 1252 break;
1252 1253 case CHELSIO_T5:
1253 1254 fw_data = t5fw_data;
1254 1255 fw_size = t5fw_size;
1255 1256 break;
1256 1257 case CHELSIO_T6:
1257 1258 fw_data = t6fw_data;
1258 1259 fw_size = t6fw_size;
1259 1260 break;
1260 1261 default:
1261 1262 cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1262 1263 kmem_free(card_fw, sizeof(*card_fw));
1263 1264 return EINVAL;
1264 1265 }
1265 1266
1266 1267 rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1267 1268 p->t4_fw_install, state, &reset);
1268 1269
1269 1270 kmem_free(card_fw, sizeof(*card_fw));
1270 1271
1271 1272 if (rc != 0) {
1272 1273 cxgb_printf(sc->dip, CE_WARN,
1273 1274 "failed to install firmware: %d", rc);
1274 1275 return (rc);
1275 1276 } else {
1276 1277 /* refresh */
1277 1278 (void) t4_check_fw_version(sc);
1278 1279 }
1279 1280
1280 1281 /* Reset device */
1281 1282 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1282 1283 if (rc != 0) {
1283 1284 cxgb_printf(sc->dip, CE_WARN,
1284 1285 "firmware reset failed: %d.", rc);
1285 1286 if (rc != ETIMEDOUT && rc != EIO)
1286 1287 (void) t4_fw_bye(sc, sc->mbox);
1287 1288 return (rc);
1288 1289 }
1289 1290
1290 1291 /* Partition adapter resources as specified in the config file. */
1291 1292 if (sc->flags & MASTER_PF) {
1292 1293 /* Handle default vs special T4 config file */
1293 1294
1294 1295 rc = partition_resources(sc);
1295 1296 if (rc != 0)
1296 1297 goto err; /* error message displayed already */
1297 1298 }
1298 1299
1299 1300 sc->flags |= FW_OK;
1300 1301 return (0);
1301 1302 err:
1302 1303 return (rc);
1303 1304
1304 1305 }
1305 1306
1306 1307 static const struct memwin t4_memwin[] = {
1307 1308 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1308 1309 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1309 1310 { MEMWIN2_BASE, MEMWIN2_APERTURE }
1310 1311 };
1311 1312
1312 1313 static const struct memwin t5_memwin[] = {
1313 1314 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1314 1315 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1315 1316 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1316 1317 };
1317 1318
1318 1319 #define FW_PARAM_DEV(param) \
1319 1320 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1320 1321 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1321 1322 #define FW_PARAM_PFVF(param) \
1322 1323 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1323 1324 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1324 1325
1325 1326 /*
1326 1327 * Verify that the memory range specified by the memtype/offset/len pair is
1327 1328 * valid and lies entirely within the memtype specified. The global address of
1328 1329 * the start of the range is returned in addr.
1329 1330 */
1330 1331 int
1331 1332 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1332 1333 uint32_t *addr)
1333 1334 {
1334 1335 uint32_t em, addr_len, maddr, mlen;
1335 1336
1336 1337 /* Memory can only be accessed in naturally aligned 4 byte units */
1337 1338 if (off & 3 || len & 3 || len == 0)
1338 1339 return (EINVAL);
1339 1340
1340 1341 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1341 1342 switch (mtype) {
1342 1343 case MEM_EDC0:
1343 1344 if (!(em & F_EDRAM0_ENABLE))
1344 1345 return (EINVAL);
1345 1346 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1346 1347 maddr = G_EDRAM0_BASE(addr_len) << 20;
1347 1348 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1348 1349 break;
1349 1350 case MEM_EDC1:
1350 1351 if (!(em & F_EDRAM1_ENABLE))
1351 1352 return (EINVAL);
1352 1353 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1353 1354 maddr = G_EDRAM1_BASE(addr_len) << 20;
1354 1355 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1355 1356 break;
1356 1357 case MEM_MC:
1357 1358 if (!(em & F_EXT_MEM_ENABLE))
1358 1359 return (EINVAL);
1359 1360 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1360 1361 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1361 1362 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1362 1363 break;
1363 1364 case MEM_MC1:
1364 1365 if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1365 1366 return (EINVAL);
1366 1367 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1367 1368 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1368 1369 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1369 1370 break;
1370 1371 default:
1371 1372 return (EINVAL);
1372 1373 }
1373 1374
1374 1375 if (mlen > 0 && off < mlen && off + len <= mlen) {
1375 1376 *addr = maddr + off; /* global address */
1376 1377 return (0);
1377 1378 }
1378 1379
1379 1380 return (EFAULT);
1380 1381 }
1381 1382
1382 1383 void
1383 1384 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1384 1385 {
1385 1386 const struct memwin *mw;
1386 1387
1387 1388 if (is_t4(sc->params.chip)) {
1388 1389 mw = &t4_memwin[win];
1389 1390 } else {
1390 1391 mw = &t5_memwin[win];
1391 1392 }
1392 1393
1393 1394 if (base != NULL)
1394 1395 *base = mw->base;
1395 1396 if (aperture != NULL)
1396 1397 *aperture = mw->aperture;
1397 1398 }
1398 1399
1399 1400 /*
1400 1401 * Upload configuration file to card's memory.
1401 1402 */
1402 1403 static int
1403 1404 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1404 1405 {
1405 1406 int rc = 0, cflen;
1406 1407 u_int i, n;
1407 1408 uint32_t param, val, addr, mtype, maddr;
1408 1409 uint32_t off, mw_base, mw_aperture;
1409 1410 const uint32_t *cfdata;
1410 1411
1411 1412 /* Figure out where the firmware wants us to upload it. */
1412 1413 param = FW_PARAM_DEV(CF);
1413 1414 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1414 1415 if (rc != 0) {
1415 1416 /* Firmwares without config file support will fail this way */
1416 1417 cxgb_printf(sc->dip, CE_WARN,
1417 1418 "failed to query config file location: %d.\n", rc);
1418 1419 return (rc);
1419 1420 }
1420 1421 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1421 1422 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1422 1423
1423 1424 switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1424 1425 case CHELSIO_T4:
1425 1426 cflen = t4cfg_size & ~3;
1426 1427 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1427 1428 cfdata = (const uint32_t *)t4cfg_data;
1428 1429 break;
1429 1430 case CHELSIO_T5:
1430 1431 cflen = t5cfg_size & ~3;
1431 1432 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1432 1433 cfdata = (const uint32_t *)t5cfg_data;
1433 1434 break;
1434 1435 case CHELSIO_T6:
1435 1436 cflen = t6cfg_size & ~3;
1436 1437 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1437 1438 cfdata = (const uint32_t *)t6cfg_data;
1438 1439 break;
1439 1440 default:
1440 1441 cxgb_printf(sc->dip, CE_WARN,
1441 1442 "Invalid Adapter detected\n");
1442 1443 return EINVAL;
1443 1444 }
1444 1445
1445 1446 if (cflen > FLASH_CFG_MAX_SIZE) {
1446 1447 cxgb_printf(sc->dip, CE_WARN,
1447 1448 "config file too long (%d, max allowed is %d). ",
1448 1449 cflen, FLASH_CFG_MAX_SIZE);
1449 1450 return (EFBIG);
1450 1451 }
1451 1452
1452 1453 rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1453 1454 if (rc != 0) {
1454 1455
1455 1456 cxgb_printf(sc->dip, CE_WARN,
1456 1457 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
1457 1458 "Will try to use the config on the card, if any.\n",
1458 1459 __func__, mtype, maddr, cflen, rc);
1459 1460 return (EFAULT);
1460 1461 }
1461 1462
1462 1463 memwin_info(sc, 2, &mw_base, &mw_aperture);
1463 1464 while (cflen) {
1464 1465 off = position_memwin(sc, 2, addr);
1465 1466 n = min(cflen, mw_aperture - off);
1466 1467 for (i = 0; i < n; i += 4)
1467 1468 t4_write_reg(sc, mw_base + off + i, *cfdata++);
1468 1469 cflen -= n;
1469 1470 addr += n;
1470 1471 }
1471 1472
1472 1473 return (rc);
1473 1474 }
1474 1475
1475 1476 /*
1476 1477 * Partition chip resources for use between various PFs, VFs, etc. This is done
1477 1478 * by uploading the firmware configuration file to the adapter and instructing
1478 1479 * the firmware to process it.
1479 1480 */
1480 1481 static int
1481 1482 partition_resources(struct adapter *sc)
1482 1483 {
1483 1484 int rc;
1484 1485 struct fw_caps_config_cmd caps;
1485 1486 uint32_t mtype, maddr, finicsum, cfcsum;
1486 1487
1487 1488 rc = upload_config_file(sc, &mtype, &maddr);
1488 1489 if (rc != 0) {
1489 1490 mtype = FW_MEMTYPE_CF_FLASH;
1490 1491 maddr = t4_flash_cfg_addr(sc);
1491 1492 }
1492 1493
1493 1494 bzero(&caps, sizeof (caps));
1494 1495 caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1495 1496 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1496 1497 caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1497 1498 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1498 1499 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1499 1500 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1500 1501 if (rc != 0) {
1501 1502 cxgb_printf(sc->dip, CE_WARN,
1502 1503 "failed to pre-process config file: %d.\n", rc);
1503 1504 return (rc);
1504 1505 }
1505 1506
1506 1507 finicsum = ntohl(caps.finicsum);
1507 1508 cfcsum = ntohl(caps.cfcsum);
1508 1509 if (finicsum != cfcsum) {
1509 1510 cxgb_printf(sc->dip, CE_WARN,
1510 1511 "WARNING: config file checksum mismatch: %08x %08x\n",
1511 1512 finicsum, cfcsum);
1512 1513 }
1513 1514 sc->cfcsum = cfcsum;
1514 1515
1515 1516 /* TODO: Need to configure this correctly */
1516 1517 caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1517 1518 caps.iscsicaps = 0;
1518 1519 caps.rdmacaps = 0;
1519 1520 caps.fcoecaps = 0;
1520 1521 /* TODO: Disable VNIC cap for now */
1521 1522 caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1522 1523
1523 1524 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1524 1525 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1525 1526 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1526 1527 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1527 1528 if (rc != 0) {
1528 1529 cxgb_printf(sc->dip, CE_WARN,
1529 1530 "failed to process config file: %d.\n", rc);
1530 1531 return (rc);
1531 1532 }
1532 1533
1533 1534 return (0);
1534 1535 }
1535 1536
1536 1537 /*
1537 1538 * Tweak configuration based on module parameters, etc. Most of these have
1538 1539 * defaults assigned to them by Firmware Configuration Files (if we're using
1539 1540 * them) but need to be explicitly set if we're using hard-coded
1540 1541 * initialization. But even in the case of using Firmware Configuration
1541 1542 * Files, we'd like to expose the ability to change these via module
1542 1543 * parameters so these are essentially common tweaks/settings for
1543 1544 * Configuration Files and hard-coded initialization ...
1544 1545 */
1545 1546 static int
1546 1547 adap__pre_init_tweaks(struct adapter *sc)
1547 1548 {
1548 1549 int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1549 1550
1550 1551 /*
1551 1552 * Fix up various Host-Dependent Parameters like Page Size, Cache
1552 1553 * Line Size, etc. The firmware default is for a 4KB Page Size and
1553 1554 * 64B Cache Line Size ...
1554 1555 */
1555 1556 (void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV);
1556 1557
1557 1558 t4_set_reg_field(sc, A_SGE_CONTROL,
1558 1559 V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset));
1559 1560
1560 1561 return 0;
1561 1562 }
1562 1563 /*
1563 1564 * Retrieve parameters that are needed (or nice to have) prior to calling
1564 1565 * t4_sge_init and t4_fw_initialize.
1565 1566 */
1566 1567 static int
1567 1568 get_params__pre_init(struct adapter *sc)
1568 1569 {
1569 1570 int rc;
1570 1571 uint32_t param[2], val[2];
1571 1572 struct fw_devlog_cmd cmd;
1572 1573 struct devlog_params *dlog = &sc->params.devlog;
1573 1574
1574 1575 /*
1575 1576 * Grab the raw VPD parameters.
1576 1577 */
1577 1578 rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1578 1579 if (rc != 0) {
1579 1580 cxgb_printf(sc->dip, CE_WARN,
1580 1581 "failed to query VPD parameters (pre_init): %d.\n", rc);
1581 1582 return (rc);
1582 1583 }
1583 1584
1584 1585 param[0] = FW_PARAM_DEV(PORTVEC);
1585 1586 param[1] = FW_PARAM_DEV(CCLK);
1586 1587 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1587 1588 if (rc != 0) {
1588 1589 cxgb_printf(sc->dip, CE_WARN,
1589 1590 "failed to query parameters (pre_init): %d.\n", rc);
1590 1591 return (rc);
1591 1592 }
1592 1593
1593 1594 sc->params.portvec = val[0];
1594 1595 sc->params.nports = 0;
1595 1596 while (val[0]) {
1596 1597 sc->params.nports++;
1597 1598 val[0] &= val[0] - 1;
1598 1599 }
1599 1600
1600 1601 sc->params.vpd.cclk = val[1];
1601 1602
1602 1603 /* Read device log parameters. */
1603 1604 bzero(&cmd, sizeof (cmd));
1604 1605 cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1605 1606 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1606 1607 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1607 1608 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1608 1609 if (rc != 0) {
1609 1610 cxgb_printf(sc->dip, CE_WARN,
1610 1611 "failed to get devlog parameters: %d.\n", rc);
1611 1612 bzero(dlog, sizeof (*dlog));
1612 1613 rc = 0; /* devlog isn't critical for device operation */
1613 1614 } else {
1614 1615 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1615 1616 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1616 1617 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1617 1618 dlog->size = ntohl(cmd.memsize_devlog);
1618 1619 }
1619 1620
1620 1621 return (rc);
1621 1622 }
1622 1623
1623 1624 /*
1624 1625 * Retrieve various parameters that are of interest to the driver. The device
1625 1626 * has been initialized by the firmware at this point.
1626 1627 */
1627 1628 static int
1628 1629 get_params__post_init(struct adapter *sc)
1629 1630 {
1630 1631 int rc;
1631 1632 uint32_t param[7], val[7];
1632 1633 struct fw_caps_config_cmd caps;
1633 1634
1634 1635 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1635 1636 param[1] = FW_PARAM_PFVF(EQ_START);
1636 1637 param[2] = FW_PARAM_PFVF(FILTER_START);
1637 1638 param[3] = FW_PARAM_PFVF(FILTER_END);
1638 1639 param[4] = FW_PARAM_PFVF(L2T_START);
1639 1640 param[5] = FW_PARAM_PFVF(L2T_END);
1640 1641 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1641 1642 if (rc != 0) {
1642 1643 cxgb_printf(sc->dip, CE_WARN,
1643 1644 "failed to query parameters (post_init): %d.\n", rc);
1644 1645 return (rc);
1645 1646 }
1646 1647
1647 1648 /* LINTED: E_ASSIGN_NARROW_CONV */
1648 1649 sc->sge.iq_start = val[0];
1649 1650 sc->sge.eq_start = val[1];
1650 1651 sc->tids.ftid_base = val[2];
1651 1652 sc->tids.nftids = val[3] - val[2] + 1;
1652 1653 sc->vres.l2t.start = val[4];
1653 1654 sc->vres.l2t.size = val[5] - val[4] + 1;
1654 1655
1655 1656 /* get capabilites */
1656 1657 bzero(&caps, sizeof (caps));
1657 1658 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1658 1659 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1659 1660 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1660 1661 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1661 1662 if (rc != 0) {
1662 1663 cxgb_printf(sc->dip, CE_WARN,
1663 1664 "failed to get card capabilities: %d.\n", rc);
1664 1665 return (rc);
1665 1666 }
1666 1667
1667 1668 if (caps.toecaps != 0) {
1668 1669 /* query offload-related parameters */
1669 1670 param[0] = FW_PARAM_DEV(NTID);
1670 1671 param[1] = FW_PARAM_PFVF(SERVER_START);
1671 1672 param[2] = FW_PARAM_PFVF(SERVER_END);
1672 1673 param[3] = FW_PARAM_PFVF(TDDP_START);
1673 1674 param[4] = FW_PARAM_PFVF(TDDP_END);
1674 1675 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1675 1676 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1676 1677 if (rc != 0) {
1677 1678 cxgb_printf(sc->dip, CE_WARN,
1678 1679 "failed to query TOE parameters: %d.\n", rc);
1679 1680 return (rc);
1680 1681 }
1681 1682 sc->tids.ntids = val[0];
1682 1683 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1683 1684 sc->tids.stid_base = val[1];
1684 1685 sc->tids.nstids = val[2] - val[1] + 1;
1685 1686 sc->vres.ddp.start = val[3];
1686 1687 sc->vres.ddp.size = val[4] - val[3] + 1;
1687 1688 sc->params.ofldq_wr_cred = val[5];
1688 1689 sc->params.offload = 1;
1689 1690 }
1690 1691
1691 1692 /* These are finalized by FW initialization, load their values now */
1692 1693 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1693 1694 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1694 1695 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1695 1696 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1696 1697
1697 1698 return (rc);
1698 1699 }
1699 1700
1700 1701 static int
1701 1702 set_params__post_init(struct adapter *sc)
1702 1703 {
1703 1704 uint32_t param, val;
1704 1705
1705 1706 /* ask for encapsulated CPLs */
1706 1707 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1707 1708 val = 1;
1708 1709 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1709 1710
1710 1711 return (0);
1711 1712 }
1712 1713
1713 1714 /* TODO: verify */
1714 1715 static void
1715 1716 setup_memwin(struct adapter *sc)
1716 1717 {
1717 1718 pci_regspec_t *data;
1718 1719 int rc;
1719 1720 uint_t n;
1720 1721 uintptr_t bar0;
1721 1722 uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1722 1723 uintptr_t mem_win2_aperture;
1723 1724
1724 1725 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1725 1726 DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1726 1727 if (rc != DDI_SUCCESS) {
1727 1728 cxgb_printf(sc->dip, CE_WARN,
1728 1729 "failed to lookup \"assigned-addresses\" property: %d", rc);
1729 1730 return;
1730 1731 }
1731 1732 n /= sizeof (*data);
1732 1733
1733 1734 bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1734 1735 ddi_prop_free(data);
1735 1736
1736 1737 if (is_t4(sc->params.chip)) {
1737 1738 mem_win0_base = bar0 + MEMWIN0_BASE;
1738 1739 mem_win1_base = bar0 + MEMWIN1_BASE;
1739 1740 mem_win2_base = bar0 + MEMWIN2_BASE;
1740 1741 mem_win2_aperture = MEMWIN2_APERTURE;
1741 1742 } else {
1742 1743 /* For T5, only relative offset inside the PCIe BAR is passed */
1743 1744 mem_win0_base = MEMWIN0_BASE;
1744 1745 mem_win1_base = MEMWIN1_BASE;
1745 1746 mem_win2_base = MEMWIN2_BASE_T5;
1746 1747 mem_win2_aperture = MEMWIN2_APERTURE_T5;
1747 1748 }
1748 1749
1749 1750 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1750 1751 mem_win0_base | V_BIR(0) |
1751 1752 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1752 1753
1753 1754 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1754 1755 mem_win1_base | V_BIR(0) |
1755 1756 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1756 1757
1757 1758 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1758 1759 mem_win2_base | V_BIR(0) |
1759 1760 V_WINDOW(ilog2(mem_win2_aperture) - 10));
1760 1761
1761 1762 /* flush */
1762 1763 (void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1763 1764 }
1764 1765
1765 1766 /*
1766 1767 * Positions the memory window such that it can be used to access the specified
1767 1768 * address in the chip's address space. The return value is the offset of addr
1768 1769 * from the start of the window.
1769 1770 */
1770 1771 uint32_t
1771 1772 position_memwin(struct adapter *sc, int n, uint32_t addr)
1772 1773 {
1773 1774 uint32_t start, pf;
1774 1775 uint32_t reg;
1775 1776
1776 1777 if (addr & 3) {
1777 1778 cxgb_printf(sc->dip, CE_WARN,
1778 1779 "addr (0x%x) is not at a 4B boundary.\n", addr);
1779 1780 return (EFAULT);
1780 1781 }
1781 1782
1782 1783 if (is_t4(sc->params.chip)) {
1783 1784 pf = 0;
1784 1785 start = addr & ~0xf; /* start must be 16B aligned */
1785 1786 } else {
1786 1787 pf = V_PFNUM(sc->pf);
1787 1788 start = addr & ~0x7f; /* start must be 128B aligned */
1788 1789 }
1789 1790 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1790 1791
1791 1792 t4_write_reg(sc, reg, start | pf);
1792 1793 (void) t4_read_reg(sc, reg);
1793 1794
1794 1795 return (addr - start);
1795 1796 }
1796 1797
1797 1798
1798 1799 /*
1799 1800 * Reads the named property and fills up the "data" array (which has at least
1800 1801 * "count" elements). We first try and lookup the property for our dev_t and
1801 1802 * then retry with DDI_DEV_T_ANY if it's not found.
1802 1803 *
1803 1804 * Returns non-zero if the property was found and "data" has been updated.
1804 1805 */
1805 1806 static int
1806 1807 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1807 1808 {
1808 1809 dev_info_t *dip = sc->dip;
1809 1810 dev_t dev = sc->dev;
1810 1811 int rc, *d;
1811 1812 uint_t i, n;
1812 1813
1813 1814 rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1814 1815 name, &d, &n);
1815 1816 if (rc == DDI_PROP_SUCCESS)
1816 1817 goto found;
1817 1818
1818 1819 if (rc != DDI_PROP_NOT_FOUND) {
1819 1820 cxgb_printf(dip, CE_WARN,
1820 1821 "failed to lookup property %s for minor %d: %d.",
1821 1822 name, getminor(dev), rc);
1822 1823 return (0);
1823 1824 }
1824 1825
1825 1826 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1826 1827 name, &d, &n);
1827 1828 if (rc == DDI_PROP_SUCCESS)
1828 1829 goto found;
1829 1830
1830 1831 if (rc != DDI_PROP_NOT_FOUND) {
1831 1832 cxgb_printf(dip, CE_WARN,
1832 1833 "failed to lookup property %s: %d.", name, rc);
1833 1834 return (0);
1834 1835 }
1835 1836
1836 1837 return (0);
1837 1838
1838 1839 found:
1839 1840 if (n > count) {
1840 1841 cxgb_printf(dip, CE_NOTE,
1841 1842 "property %s has too many elements (%d), ignoring extras",
1842 1843 name, n);
1843 1844 }
1844 1845
1845 1846 for (i = 0; i < n && i < count; i++)
1846 1847 data[i] = d[i];
1847 1848 ddi_prop_free(d);
1848 1849
1849 1850 return (1);
1850 1851 }
1851 1852
1852 1853 static int
1853 1854 prop_lookup_int(struct adapter *sc, char *name, int defval)
1854 1855 {
1855 1856 int rc;
1856 1857
1857 1858 rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1858 1859 if (rc != -1)
1859 1860 return (rc);
1860 1861
1861 1862 return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1862 1863 name, defval));
1863 1864 }
1864 1865
1865 1866 static int
1866 1867 init_driver_props(struct adapter *sc, struct driver_properties *p)
1867 1868 {
1868 1869 dev_t dev = sc->dev;
1869 1870 dev_info_t *dip = sc->dip;
1870 1871 int i, *data;
1871 1872 uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1872 1873 uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1873 1874
1874 1875 /*
1875 1876 * Holdoff timer
1876 1877 */
1877 1878 data = &p->timer_val[0];
1878 1879 for (i = 0; i < SGE_NTIMERS; i++)
1879 1880 data[i] = tmr[i];
1880 1881 (void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1881 1882 SGE_NTIMERS);
1882 1883 for (i = 0; i < SGE_NTIMERS; i++) {
1883 1884 int limit = 200U;
1884 1885 if (data[i] > limit) {
1885 1886 cxgb_printf(dip, CE_WARN,
1886 1887 "holdoff timer %d is too high (%d), lowered to %d.",
1887 1888 i, data[i], limit);
1888 1889 data[i] = limit;
1889 1890 }
1890 1891 }
1891 1892 (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1892 1893 data, SGE_NTIMERS);
1893 1894
1894 1895 /*
1895 1896 * Holdoff packet counter
1896 1897 */
1897 1898 data = &p->counter_val[0];
1898 1899 for (i = 0; i < SGE_NCOUNTERS; i++)
1899 1900 data[i] = cnt[i];
1900 1901 (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1901 1902 SGE_NCOUNTERS);
1902 1903 for (i = 0; i < SGE_NCOUNTERS; i++) {
1903 1904 int limit = M_THRESHOLD_0;
1904 1905 if (data[i] > limit) {
1905 1906 cxgb_printf(dip, CE_WARN,
1906 1907 "holdoff pkt-counter %d is too high (%d), "
1907 1908 "lowered to %d.", i, data[i], limit);
1908 1909 data[i] = limit;
1909 1910 }
1910 1911 }
1911 1912 (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1912 1913 data, SGE_NCOUNTERS);
1913 1914
1914 1915 /*
1915 1916 * Maximum # of tx and rx queues to use for each
1916 1917 * 100G, 40G, 25G, 10G and 1G port.
1917 1918 */
1918 1919 p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1919 1920 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1920 1921 p->max_ntxq_10g);
1921 1922
1922 1923 p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1923 1924 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1924 1925 p->max_nrxq_10g);
1925 1926
1926 1927 p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1927 1928 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1928 1929 p->max_ntxq_1g);
1929 1930
1930 1931 p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1931 1932 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1932 1933 p->max_nrxq_1g);
1933 1934
1934 1935 #ifdef TCP_OFFLOAD_ENABLE
1935 1936 p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8);
1936 1937 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1937 1938 p->max_nofldtxq_10g);
1938 1939
1939 1940 p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2);
1940 1941 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1941 1942 p->max_nofldrxq_10g);
1942 1943
1943 1944 p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2);
1944 1945 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1945 1946 p->max_nofldtxq_1g);
1946 1947
1947 1948 p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1);
1948 1949 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1949 1950 p->max_nofldrxq_1g);
1950 1951 #endif
1951 1952
1952 1953 /*
1953 1954 * Holdoff parameters for 10G and 1G ports.
1954 1955 */
1955 1956 p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1956 1957 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1957 1958 p->tmr_idx_10g);
1958 1959
1959 1960 p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1960 1961 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1961 1962 p->pktc_idx_10g);
1962 1963
1963 1964 p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1964 1965 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1965 1966 p->tmr_idx_1g);
1966 1967
1967 1968 p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
1968 1969 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
1969 1970 p->pktc_idx_1g);
1970 1971
1971 1972 /*
1972 1973 * Size (number of entries) of each tx and rx queue.
1973 1974 */
1974 1975 i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
1975 1976 p->qsize_txq = max(i, 128);
1976 1977 if (p->qsize_txq != i) {
1977 1978 cxgb_printf(dip, CE_WARN,
1978 1979 "using %d instead of %d as the tx queue size",
1979 1980 p->qsize_txq, i);
1980 1981 }
1981 1982 (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
1982 1983
1983 1984 i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
1984 1985 p->qsize_rxq = max(i, 128);
1985 1986 while (p->qsize_rxq & 7)
1986 1987 p->qsize_rxq--;
1987 1988 if (p->qsize_rxq != i) {
1988 1989 cxgb_printf(dip, CE_WARN,
1989 1990 "using %d instead of %d as the rx queue size",
1990 1991 p->qsize_rxq, i);
1991 1992 }
1992 1993 (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
1993 1994
1994 1995 /*
1995 1996 * Interrupt types allowed.
1996 1997 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively. See sys/ddi_intr.h
1997 1998 */
1998 1999 p->intr_types = prop_lookup_int(sc, "interrupt-types",
1999 2000 DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
2000 2001 (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
2001 2002
2002 2003 /*
2003 2004 * Forwarded interrupt queues. Create this property to force the driver
2004 2005 * to use forwarded interrupt queues.
2005 2006 */
2006 2007 if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS,
2007 2008 "interrupt-forwarding") != 0 ||
2008 2009 ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
2009 2010 "interrupt-forwarding") != 0) {
2010 2011 UNIMPLEMENTED();
2011 2012 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP,
2012 2013 "interrupt-forwarding", NULL, 0);
2013 2014 }
2014 2015
2015 2016 /*
2016 2017 * Write combining
2017 2018 * 0 to disable, 1 to enable
2018 2019 */
2019 2020 p->wc = prop_lookup_int(sc, "write-combine", 1);
2020 2021 cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
2021 2022 if (p->wc != 0 && p->wc != 1) {
2022 2023 cxgb_printf(dip, CE_WARN,
2023 2024 "write-combine: using 1 instead of %d", p->wc);
2024 2025 p->wc = 1;
2025 2026 }
2026 2027 (void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
2027 2028
2028 2029 p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
2029 2030 if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
2030 2031 p->t4_fw_install = 1;
2031 2032 (void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
2032 2033
2033 2034 /* Multiple Rings */
2034 2035 p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
2035 2036 if (p->multi_rings != 0 && p->multi_rings != 1) {
2036 2037 cxgb_printf(dip, CE_NOTE,
2037 2038 "multi-rings: using value 1 instead of %d", p->multi_rings);
2038 2039 p->multi_rings = 1;
2039 2040 }
2040 2041
2041 2042 (void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
2042 2043
2043 2044 return (0);
2044 2045 }
2045 2046
2046 2047 static int
2047 2048 remove_extra_props(struct adapter *sc, int n10g, int n1g)
2048 2049 {
2049 2050 if (n10g == 0) {
2050 2051 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
2051 2052 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
2052 2053 (void) ddi_prop_remove(sc->dev, sc->dip,
2053 2054 "holdoff-timer-idx-10G");
2054 2055 (void) ddi_prop_remove(sc->dev, sc->dip,
2055 2056 "holdoff-pktc-idx-10G");
2056 2057 }
2057 2058
2058 2059 if (n1g == 0) {
2059 2060 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
2060 2061 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
2061 2062 (void) ddi_prop_remove(sc->dev, sc->dip,
2062 2063 "holdoff-timer-idx-1G");
2063 2064 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
2064 2065 }
2065 2066
2066 2067 return (0);
2067 2068 }
2068 2069
2069 2070 static int
2070 2071 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
2071 2072 struct intrs_and_queues *iaq)
2072 2073 {
2073 2074 struct driver_properties *p = &sc->props;
2074 2075 int rc, itype, itypes, navail, nc, nrxq10g, nrxq1g, n;
2075 2076 int nofldrxq10g = 0, nofldrxq1g = 0;
2076 2077
2077 2078 bzero(iaq, sizeof (*iaq));
2078 2079 nc = ncpus; /* our snapshot of the number of CPUs */
2079 2080 iaq->ntxq10g = min(nc, p->max_ntxq_10g);
2080 2081 iaq->ntxq1g = min(nc, p->max_ntxq_1g);
2081 2082 iaq->nrxq10g = nrxq10g = min(nc, p->max_nrxq_10g);
2082 2083 iaq->nrxq1g = nrxq1g = min(nc, p->max_nrxq_1g);
2083 2084 #ifdef TCP_OFFLOAD_ENABLE
2084 2085 iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g);
2085 2086 iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g);
2086 2087 iaq->nofldrxq10g = nofldrxq10g = min(nc, p->max_nofldrxq_10g);
2087 2088 iaq->nofldrxq1g = nofldrxq1g = min(nc, p->max_nofldrxq_1g);
2088 2089 #endif
2089 2090
2090 2091 rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2091 2092 if (rc != DDI_SUCCESS) {
2092 2093 cxgb_printf(sc->dip, CE_WARN,
2093 2094 "failed to determine supported interrupt types: %d", rc);
2094 2095 return (rc);
2095 2096 }
2096 2097
2097 2098 for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2098 2099 ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2099 2100 itype == DDI_INTR_TYPE_MSI ||
2100 2101 itype == DDI_INTR_TYPE_FIXED);
2101 2102
2102 2103 if ((itype & itypes & p->intr_types) == 0)
2103 2104 continue; /* not supported or not allowed */
2104 2105
2105 2106 navail = 0;
2106 2107 rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2107 2108 if (rc != DDI_SUCCESS || navail == 0) {
2108 2109 cxgb_printf(sc->dip, CE_WARN,
2109 2110 "failed to get # of interrupts for type %d: %d",
2110 2111 itype, rc);
2111 2112 continue; /* carry on */
2112 2113 }
2113 2114
2114 2115 iaq->intr_type = itype;
2115 2116 if (navail == 0)
2116 2117 continue;
2117 2118
2118 2119 /*
2119 2120 * Best option: an interrupt vector for errors, one for the
2120 2121 * firmware event queue, and one each for each rxq (NIC as well
2121 2122 * as offload).
2122 2123 */
2123 2124 iaq->nirq = T4_EXTRA_INTR;
2124 2125 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2125 2126 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2126 2127
2127 2128 if (iaq->nirq <= navail &&
2128 2129 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2129 2130 iaq->intr_fwd = 0;
2130 2131 goto allocate;
2131 2132 }
2132 2133
2133 2134 /*
2134 2135 * Second best option: an interrupt vector for errors, one for
2135 2136 * the firmware event queue, and one each for either NIC or
2136 2137 * offload rxq's.
2137 2138 */
2138 2139 iaq->nirq = T4_EXTRA_INTR;
2139 2140 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
2140 2141 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
2141 2142 if (iaq->nirq <= navail &&
2142 2143 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2143 2144 iaq->intr_fwd = 1;
2144 2145 goto allocate;
2145 2146 }
2146 2147
2147 2148 /*
2148 2149 * Next best option: an interrupt vector for errors, one for the
2149 2150 * firmware event queue, and at least one per port. At this
2150 2151 * point we know we'll have to downsize nrxq or nofldrxq to fit
2151 2152 * what's available to us.
2152 2153 */
2153 2154 iaq->nirq = T4_EXTRA_INTR;
2154 2155 iaq->nirq += n10g + n1g;
2155 2156 if (iaq->nirq <= navail) {
2156 2157 int leftover = navail - iaq->nirq;
2157 2158
2158 2159 if (n10g > 0) {
2159 2160 int target = max(nrxq10g, nofldrxq10g);
2160 2161
2161 2162 n = 1;
2162 2163 while (n < target && leftover >= n10g) {
2163 2164 leftover -= n10g;
2164 2165 iaq->nirq += n10g;
2165 2166 n++;
2166 2167 }
2167 2168 iaq->nrxq10g = min(n, nrxq10g);
2168 2169 #ifdef TCP_OFFLOAD_ENABLE
2169 2170 iaq->nofldrxq10g = min(n, nofldrxq10g);
2170 2171 #endif
2171 2172 }
2172 2173
2173 2174 if (n1g > 0) {
2174 2175 int target = max(nrxq1g, nofldrxq1g);
2175 2176
2176 2177 n = 1;
2177 2178 while (n < target && leftover >= n1g) {
2178 2179 leftover -= n1g;
2179 2180 iaq->nirq += n1g;
2180 2181 n++;
2181 2182 }
2182 2183 iaq->nrxq1g = min(n, nrxq1g);
2183 2184 #ifdef TCP_OFFLOAD_ENABLE
2184 2185 iaq->nofldrxq1g = min(n, nofldrxq1g);
2185 2186 #endif
2186 2187 }
2187 2188
2188 2189 /* We have arrived at a minimum value required to enable
2189 2190 * per queue irq(either NIC or offload). Thus for non-
2190 2191 * offload case, we will get a vector per queue, while
2191 2192 * offload case, we will get a vector per offload/NIC q.
2192 2193 * Hence enable Interrupt forwarding only for offload
2193 2194 * case.
2194 2195 */
2195 2196 #ifdef TCP_OFFLOAD_ENABLE
2196 2197 if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) {
2197 2198 iaq->intr_fwd = 1;
2198 2199 #else
2199 2200 if (itype != DDI_INTR_TYPE_MSI) {
2200 2201 #endif
2201 2202 goto allocate;
2202 2203 }
2203 2204 }
2204 2205
2205 2206 /*
2206 2207 * Least desirable option: one interrupt vector for everything.
2207 2208 */
2208 2209 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2209 2210 #ifdef TCP_OFFLOAD_ENABLE
2210 2211 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2211 2212 #endif
2212 2213 iaq->intr_fwd = 1;
2213 2214
2214 2215 allocate:
2215 2216 return (0);
2216 2217 }
2217 2218
2218 2219 cxgb_printf(sc->dip, CE_WARN,
2219 2220 "failed to find a usable interrupt type. supported=%d, allowed=%d",
2220 2221 itypes, p->intr_types);
2221 2222 return (DDI_FAILURE);
2222 2223 }
2223 2224
2224 2225 static int
2225 2226 add_child_node(struct adapter *sc, int idx)
2226 2227 {
2227 2228 int rc;
2228 2229 struct port_info *pi;
2229 2230
2230 2231 if (idx < 0 || idx >= sc->params.nports)
2231 2232 return (EINVAL);
2232 2233
2233 2234 pi = sc->port[idx];
2234 2235 if (pi == NULL)
2235 2236 return (ENODEV); /* t4_port_init failed earlier */
2236 2237
2237 2238 PORT_LOCK(pi);
2238 2239 if (pi->dip != NULL) {
2239 2240 rc = 0; /* EEXIST really, but then bus_config fails */
2240 2241 goto done;
2241 2242 }
2242 2243
2243 2244 rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2244 2245 if (rc != DDI_SUCCESS || pi->dip == NULL) {
2245 2246 rc = ENOMEM;
2246 2247 goto done;
2247 2248 }
2248 2249
2249 2250 (void) ddi_set_parent_data(pi->dip, pi);
2250 2251 (void) ndi_devi_bind_driver(pi->dip, 0);
2251 2252 rc = 0;
2252 2253 done:
2253 2254 PORT_UNLOCK(pi);
2254 2255 return (rc);
2255 2256 }
2256 2257
2257 2258 static int
2258 2259 remove_child_node(struct adapter *sc, int idx)
2259 2260 {
2260 2261 int rc;
2261 2262 struct port_info *pi;
2262 2263
2263 2264 if (idx < 0 || idx >= sc->params.nports)
2264 2265 return (EINVAL);
2265 2266
2266 2267 pi = sc->port[idx];
2267 2268 if (pi == NULL)
2268 2269 return (ENODEV);
2269 2270
2270 2271 PORT_LOCK(pi);
2271 2272 if (pi->dip == NULL) {
2272 2273 rc = ENODEV;
2273 2274 goto done;
2274 2275 }
2275 2276
2276 2277 rc = ndi_devi_free(pi->dip);
2277 2278 if (rc == 0)
2278 2279 pi->dip = NULL;
2279 2280 done:
2280 2281 PORT_UNLOCK(pi);
2281 2282 return (rc);
2282 2283 }
2283 2284
2284 2285 #define KS_UINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2285 2286 #define KS_CINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2286 2287 #define KS_U_SET(x, y) kstatp->x.value.ul = (y)
2287 2288 #define KS_C_SET(x, ...) \
2288 2289 (void) snprintf(kstatp->x.value.c, 16, __VA_ARGS__)
2289 2290
2290 2291 /*
2291 2292 * t4nex:X:config
2292 2293 */
2293 2294 struct t4_kstats {
2294 2295 kstat_named_t chip_ver;
2295 2296 kstat_named_t fw_vers;
2296 2297 kstat_named_t tp_vers;
2297 2298 kstat_named_t driver_version;
2298 2299 kstat_named_t serial_number;
2299 2300 kstat_named_t ec_level;
2300 2301 kstat_named_t id;
2301 2302 kstat_named_t bus_type;
2302 2303 kstat_named_t bus_width;
2303 2304 kstat_named_t bus_speed;
2304 2305 kstat_named_t core_clock;
2305 2306 kstat_named_t port_cnt;
2306 2307 kstat_named_t port_type;
2307 2308 kstat_named_t pci_vendor_id;
2308 2309 kstat_named_t pci_device_id;
2309 2310 };
2310 2311 static kstat_t *
2311 2312 setup_kstats(struct adapter *sc)
2312 2313 {
2313 2314 kstat_t *ksp;
2314 2315 struct t4_kstats *kstatp;
2315 2316 int ndata;
2316 2317 struct pci_params *p = &sc->params.pci;
2317 2318 struct vpd_params *v = &sc->params.vpd;
2318 2319 uint16_t pci_vendor, pci_device;
2319 2320
2320 2321 ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2321 2322
2322 2323 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2323 2324 "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2324 2325 if (ksp == NULL) {
2325 2326 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2326 2327 return (NULL);
2327 2328 }
2328 2329
2329 2330 kstatp = (struct t4_kstats *)ksp->ks_data;
2330 2331
2331 2332 KS_UINIT(chip_ver);
2332 2333 KS_CINIT(fw_vers);
2333 2334 KS_CINIT(tp_vers);
2334 2335 KS_CINIT(driver_version);
2335 2336 KS_CINIT(serial_number);
2336 2337 KS_CINIT(ec_level);
2337 2338 KS_CINIT(id);
2338 2339 KS_CINIT(bus_type);
2339 2340 KS_CINIT(bus_width);
2340 2341 KS_CINIT(bus_speed);
2341 2342 KS_UINIT(core_clock);
2342 2343 KS_UINIT(port_cnt);
2343 2344 KS_CINIT(port_type);
2344 2345 KS_CINIT(pci_vendor_id);
2345 2346 KS_CINIT(pci_device_id);
2346 2347
2347 2348 KS_U_SET(chip_ver, sc->params.chip);
2348 2349 KS_C_SET(fw_vers, "%d.%d.%d.%d",
2349 2350 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2350 2351 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2351 2352 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2352 2353 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2353 2354 KS_C_SET(tp_vers, "%d.%d.%d.%d",
2354 2355 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2355 2356 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2356 2357 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2357 2358 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2358 2359 KS_C_SET(driver_version, DRV_VERSION);
2359 2360 KS_C_SET(serial_number, "%s", v->sn);
2360 2361 KS_C_SET(ec_level, "%s", v->ec);
2361 2362 KS_C_SET(id, "%s", v->id);
2362 2363 KS_C_SET(bus_type, "pci-express");
2363 2364 KS_C_SET(bus_width, "x%d lanes", p->width);
2364 2365 KS_C_SET(bus_speed, "%d", p->speed);
2365 2366 KS_U_SET(core_clock, v->cclk);
2366 2367 KS_U_SET(port_cnt, sc->params.nports);
2367 2368
2368 2369 t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2369 2370 KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2370 2371
2371 2372 t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2372 2373 KS_C_SET(pci_device_id, "0x%x", pci_device);
2373 2374
2374 2375 KS_C_SET(port_type, "%s/%s/%s/%s",
2375 2376 print_port_speed(sc->port[0]),
2376 2377 print_port_speed(sc->port[1]),
2377 2378 print_port_speed(sc->port[2]),
2378 2379 print_port_speed(sc->port[3]));
2379 2380
2380 2381 /* Do NOT set ksp->ks_update. These kstats do not change. */
2381 2382
2382 2383 /* Install the kstat */
2383 2384 ksp->ks_private = (void *)sc;
2384 2385 kstat_install(ksp);
2385 2386
2386 2387 return (ksp);
2387 2388 }
2388 2389
2389 2390 /*
2390 2391 * t4nex:X:stat
2391 2392 */
2392 2393 struct t4_wc_kstats {
2393 2394 kstat_named_t write_coal_success;
2394 2395 kstat_named_t write_coal_failure;
2395 2396 };
2396 2397 static kstat_t *
2397 2398 setup_wc_kstats(struct adapter *sc)
2398 2399 {
2399 2400 kstat_t *ksp;
2400 2401 struct t4_wc_kstats *kstatp;
2401 2402 int ndata;
2402 2403
2403 2404 ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t);
2404 2405 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2405 2406 "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2406 2407 if (ksp == NULL) {
2407 2408 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2408 2409 return (NULL);
2409 2410 }
2410 2411
2411 2412 kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2412 2413
2413 2414 KS_UINIT(write_coal_success);
2414 2415 KS_UINIT(write_coal_failure);
2415 2416
2416 2417 ksp->ks_update = update_wc_kstats;
2417 2418 /* Install the kstat */
2418 2419 ksp->ks_private = (void *)sc;
2419 2420 kstat_install(ksp);
2420 2421
2421 2422 return (ksp);
2422 2423 }
2423 2424
2424 2425 static int
2425 2426 update_wc_kstats(kstat_t *ksp, int rw)
2426 2427 {
2427 2428 struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2428 2429 struct adapter *sc = ksp->ks_private;
2429 2430 uint32_t wc_total, wc_success, wc_failure;
2430 2431
2431 2432 if (rw == KSTAT_WRITE)
2432 2433 return (0);
2433 2434
2434 2435 if (is_t5(sc->params.chip)) {
2435 2436 wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2436 2437 wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2437 2438 wc_success = wc_total - wc_failure;
2438 2439 } else {
2439 2440 wc_success = 0;
2440 2441 wc_failure = 0;
2441 2442 }
2442 2443
2443 2444 KS_U_SET(write_coal_success, wc_success);
2444 2445 KS_U_SET(write_coal_failure, wc_failure);
2445 2446
2446 2447 return (0);
2447 2448 }
2448 2449
2449 2450 int
2450 2451 adapter_full_init(struct adapter *sc)
2451 2452 {
2452 2453 int i, rc = 0;
2453 2454
2454 2455 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2455 2456
2456 2457 rc = t4_setup_adapter_queues(sc);
2457 2458 if (rc != 0)
2458 2459 goto done;
2459 2460
2460 2461 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2461 2462 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2462 2463 else {
2463 2464 for (i = 0; i < sc->intr_count; i++)
2464 2465 (void) ddi_intr_enable(sc->intr_handle[i]);
2465 2466 }
2466 2467 t4_intr_enable(sc);
2467 2468 sc->flags |= FULL_INIT_DONE;
2468 2469
2469 2470 #ifdef TCP_OFFLOAD_ENABLE
2470 2471 /* TODO: wrong place to enable TOE capability */
2471 2472 if (is_offload(sc) != 0) {
2472 2473 for_each_port(sc, i) {
2473 2474 struct port_info *pi = sc->port[i];
2474 2475 rc = toe_capability(pi, 1);
2475 2476 if (rc != 0) {
2476 2477 cxgb_printf(pi->dip, CE_WARN,
2477 2478 "Failed to activate toe capability: %d",
2478 2479 rc);
2479 2480 rc = 0; /* not a fatal error */
2480 2481 }
2481 2482 }
2482 2483 }
2483 2484 #endif
2484 2485
2485 2486 done:
2486 2487 if (rc != 0)
2487 2488 (void) adapter_full_uninit(sc);
2488 2489
2489 2490 return (rc);
2490 2491 }
2491 2492
2492 2493 int
2493 2494 adapter_full_uninit(struct adapter *sc)
2494 2495 {
2495 2496 int i, rc = 0;
2496 2497
2497 2498 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2498 2499
2499 2500 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2500 2501 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2501 2502 else {
2502 2503 for (i = 0; i < sc->intr_count; i++)
2503 2504 (void) ddi_intr_disable(sc->intr_handle[i]);
2504 2505 }
2505 2506
2506 2507 rc = t4_teardown_adapter_queues(sc);
2507 2508 if (rc != 0)
2508 2509 return (rc);
2509 2510
2510 2511 sc->flags &= ~FULL_INIT_DONE;
2511 2512
2512 2513 return (0);
2513 2514 }
2514 2515
2515 2516 int
2516 2517 port_full_init(struct port_info *pi)
2517 2518 {
2518 2519 struct adapter *sc = pi->adapter;
2519 2520 uint16_t *rss;
2520 2521 struct sge_rxq *rxq;
2521 2522 int rc, i;
2522 2523
2523 2524 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2524 2525 ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2525 2526
2526 2527 /*
2527 2528 * Allocate tx/rx/fl queues for this port.
2528 2529 */
2529 2530 rc = t4_setup_port_queues(pi);
2530 2531 if (rc != 0)
2531 2532 goto done; /* error message displayed already */
2532 2533
2533 2534 /*
2534 2535 * Setup RSS for this port.
2535 2536 */
2536 2537 rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2537 2538 for_each_rxq(pi, i, rxq) {
2538 2539 rss[i] = rxq->iq.abs_id;
2539 2540 }
2540 2541 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2541 2542 pi->rss_size, rss, pi->nrxq);
2542 2543 kmem_free(rss, pi->nrxq * sizeof (*rss));
2543 2544 if (rc != 0) {
2544 2545 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2545 2546 goto done;
2546 2547 }
2547 2548
2548 2549 pi->flags |= PORT_INIT_DONE;
2549 2550 done:
2550 2551 if (rc != 0)
2551 2552 (void) port_full_uninit(pi);
2552 2553
2553 2554 return (rc);
2554 2555 }
2555 2556
2556 2557 /*
2557 2558 * Idempotent.
2558 2559 */
2559 2560 int
2560 2561 port_full_uninit(struct port_info *pi)
2561 2562 {
2562 2563
2563 2564 ASSERT(pi->flags & PORT_INIT_DONE);
2564 2565
2565 2566 (void) t4_teardown_port_queues(pi);
2566 2567 pi->flags &= ~PORT_INIT_DONE;
2567 2568
2568 2569 return (0);
2569 2570 }
2570 2571
2571 2572 void
2572 2573 enable_port_queues(struct port_info *pi)
2573 2574 {
2574 2575 struct adapter *sc = pi->adapter;
2575 2576 int i;
2576 2577 struct sge_iq *iq;
2577 2578 struct sge_rxq *rxq;
2578 2579 #ifdef TCP_OFFLOAD_ENABLE
2579 2580 struct sge_ofld_rxq *ofld_rxq;
2580 2581 #endif
2581 2582
2582 2583 ASSERT(pi->flags & PORT_INIT_DONE);
2583 2584
2584 2585 /*
2585 2586 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2586 2587 * back in disable_port_queues will be processed now, after an unbounded
2587 2588 * delay. This can't be good.
2588 2589 */
2589 2590
2590 2591 #ifdef TCP_OFFLOAD_ENABLE
2591 2592 for_each_ofld_rxq(pi, i, ofld_rxq) {
2592 2593 iq = &ofld_rxq->iq;
2593 2594 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2594 2595 IQS_DISABLED)
2595 2596 panic("%s: iq %p wasn't disabled", __func__,
2596 2597 (void *)iq);
2597 2598 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2598 2599 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2599 2600 }
2600 2601 #endif
2601 2602
2602 2603 for_each_rxq(pi, i, rxq) {
2603 2604 iq = &rxq->iq;
2604 2605 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2605 2606 IQS_DISABLED)
2606 2607 panic("%s: iq %p wasn't disabled", __func__,
2607 2608 (void *) iq);
2608 2609 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2609 2610 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2610 2611 }
2611 2612 }
2612 2613
2613 2614 void
2614 2615 disable_port_queues(struct port_info *pi)
2615 2616 {
2616 2617 int i;
2617 2618 struct adapter *sc = pi->adapter;
2618 2619 struct sge_rxq *rxq;
2619 2620 #ifdef TCP_OFFLOAD_ENABLE
2620 2621 struct sge_ofld_rxq *ofld_rxq;
2621 2622 #endif
2622 2623
2623 2624 ASSERT(pi->flags & PORT_INIT_DONE);
2624 2625
2625 2626 /*
2626 2627 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2627 2628 */
2628 2629
2629 2630 #ifdef TCP_OFFLOAD_ENABLE
2630 2631 for_each_ofld_rxq(pi, i, ofld_rxq) {
2631 2632 while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE,
2632 2633 IQS_DISABLED) != IQS_IDLE)
2633 2634 msleep(1);
2634 2635 }
2635 2636 #endif
2636 2637
2637 2638 for_each_rxq(pi, i, rxq) {
2638 2639 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2639 2640 IQS_DISABLED) != IQS_IDLE)
2640 2641 msleep(1);
2641 2642 }
2642 2643
2643 2644 mutex_enter(&sc->sfl_lock);
2644 2645 #ifdef TCP_OFFLOAD_ENABLE
2645 2646 for_each_ofld_rxq(pi, i, ofld_rxq)
2646 2647 ofld_rxq->fl.flags |= FL_DOOMED;
2647 2648 #endif
2648 2649 for_each_rxq(pi, i, rxq)
2649 2650 rxq->fl.flags |= FL_DOOMED;
2650 2651 mutex_exit(&sc->sfl_lock);
2651 2652 /* TODO: need to wait for all fl's to be removed from sc->sfl */
2652 2653 }
2653 2654
2654 2655 void
2655 2656 t4_fatal_err(struct adapter *sc)
2656 2657 {
2657 2658 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2658 2659 t4_intr_disable(sc);
2659 2660 cxgb_printf(sc->dip, CE_WARN,
2660 2661 "encountered fatal error, adapter stopped.");
2661 2662 }
2662 2663
2663 2664 int
2664 2665 t4_os_find_pci_capability(struct adapter *sc, int cap)
2665 2666 {
2666 2667 uint16_t stat;
2667 2668 uint8_t cap_ptr, cap_id;
2668 2669
2669 2670 t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2670 2671 if ((stat & PCI_STAT_CAP) == 0)
2671 2672 return (0); /* does not implement capabilities */
2672 2673
2673 2674 t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2674 2675 while (cap_ptr) {
2675 2676 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2676 2677 if (cap_id == cap)
2677 2678 return (cap_ptr); /* found */
2678 2679 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2679 2680 }
2680 2681
2681 2682 return (0); /* not found */
2682 2683 }
2683 2684
2684 2685 void
2685 2686 t4_os_portmod_changed(const struct adapter *sc, int idx)
2686 2687 {
2687 2688 static const char *mod_str[] = {
2688 2689 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2689 2690 };
2690 2691 const struct port_info *pi = sc->port[idx];
2691 2692
2692 2693 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2693 2694 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2694 2695 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2695 2696 cxgb_printf(pi->dip, CE_NOTE,
2696 2697 "unknown transceiver inserted.\n");
2697 2698 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2698 2699 cxgb_printf(pi->dip, CE_NOTE,
2699 2700 "unsupported transceiver inserted.\n");
2700 2701 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2701 2702 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2702 2703 mod_str[pi->mod_type]);
2703 2704 else
2704 2705 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2705 2706 pi->mod_type);
2706 2707 }
2707 2708
2708 2709 /* ARGSUSED */
2709 2710 static int
2710 2711 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
2711 2712 {
2712 2713 if (m != NULL)
2713 2714 freemsg(m);
2714 2715 return (0);
2715 2716 }
2716 2717
2717 2718 int
2718 2719 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2719 2720 {
2720 2721 uint_t *loc, new;
2721 2722
2722 2723 if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2723 2724 return (EINVAL);
2724 2725
2725 2726 new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
2726 2727 loc = (uint_t *)&sc->cpl_handler[opcode];
2727 2728 (void) atomic_swap_uint(loc, new);
2728 2729
2729 2730 return (0);
2730 2731 }
2731 2732
2732 2733 static int
2733 2734 fw_msg_not_handled(struct adapter *sc, const __be64 *data)
2734 2735 {
2735 2736 struct cpl_fw6_msg *cpl;
2736 2737
2737 2738 cpl = __containerof((void *)data, struct cpl_fw6_msg, data);
2738 2739
2739 2740 cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type);
2740 2741 return (0);
2741 2742 }
2742 2743
2743 2744 int
2744 2745 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
2745 2746 {
2746 2747 fw_msg_handler_t *loc, new;
2747 2748
2748 2749 if (type >= ARRAY_SIZE(sc->fw_msg_handler))
2749 2750 return (EINVAL);
2750 2751
2751 2752 /*
2752 2753 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
2753 2754 * handler dispatch table. Reject any attempt to install a handler for
2754 2755 * this subtype.
2755 2756 */
2756 2757 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
2757 2758 return (EINVAL);
2758 2759
2759 2760 new = h ? h : fw_msg_not_handled;
2760 2761 loc = &sc->fw_msg_handler[type];
2761 2762 (void)atomic_swap_ptr(loc, (void *)new);
2762 2763
2763 2764 return (0);
2764 2765 }
2765 2766
2766 2767 #ifdef TCP_OFFLOAD_ENABLE
2767 2768 static int
2768 2769 toe_capability(struct port_info *pi, int enable)
2769 2770 {
2770 2771 int rc;
2771 2772 struct adapter *sc = pi->adapter;
2772 2773
2773 2774 if (!is_offload(sc))
2774 2775 return (ENODEV);
2775 2776
2776 2777 if (enable != 0) {
2777 2778 if (isset(&sc->offload_map, pi->port_id) != 0)
2778 2779 return (0);
2779 2780
2780 2781 if (sc->offload_map == 0) {
2781 2782 rc = activate_uld(sc, ULD_TOM, &sc->tom);
2782 2783 if (rc != 0)
2783 2784 return (rc);
2784 2785 }
2785 2786
2786 2787 setbit(&sc->offload_map, pi->port_id);
2787 2788 } else {
2788 2789 if (!isset(&sc->offload_map, pi->port_id))
2789 2790 return (0);
2790 2791
2791 2792 clrbit(&sc->offload_map, pi->port_id);
2792 2793
2793 2794 if (sc->offload_map == 0) {
2794 2795 rc = deactivate_uld(&sc->tom);
2795 2796 if (rc != 0) {
2796 2797 setbit(&sc->offload_map, pi->port_id);
2797 2798 return (rc);
2798 2799 }
2799 2800 }
2800 2801 }
2801 2802
2802 2803 return (0);
2803 2804 }
2804 2805
2805 2806 /*
2806 2807 * Add an upper layer driver to the global list.
2807 2808 */
2808 2809 int
2809 2810 t4_register_uld(struct uld_info *ui)
2810 2811 {
2811 2812 int rc = 0;
2812 2813 struct uld_info *u;
2813 2814
2814 2815 mutex_enter(&t4_uld_list_lock);
2815 2816 SLIST_FOREACH(u, &t4_uld_list, link) {
2816 2817 if (u->uld_id == ui->uld_id) {
2817 2818 rc = EEXIST;
2818 2819 goto done;
2819 2820 }
2820 2821 }
2821 2822
2822 2823 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
2823 2824 ui->refcount = 0;
2824 2825 done:
2825 2826 mutex_exit(&t4_uld_list_lock);
2826 2827 return (rc);
2827 2828 }
2828 2829
2829 2830 int
2830 2831 t4_unregister_uld(struct uld_info *ui)
2831 2832 {
2832 2833 int rc = EINVAL;
2833 2834 struct uld_info *u;
2834 2835
2835 2836 mutex_enter(&t4_uld_list_lock);
2836 2837
2837 2838 SLIST_FOREACH(u, &t4_uld_list, link) {
2838 2839 if (u == ui) {
2839 2840 if (ui->refcount > 0) {
2840 2841 rc = EBUSY;
2841 2842 goto done;
2842 2843 }
2843 2844
2844 2845 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
2845 2846 rc = 0;
2846 2847 goto done;
2847 2848 }
2848 2849 }
2849 2850 done:
2850 2851 mutex_exit(&t4_uld_list_lock);
2851 2852 return (rc);
2852 2853 }
2853 2854
2854 2855 static int
2855 2856 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
2856 2857 {
2857 2858 int rc = EAGAIN;
2858 2859 struct uld_info *ui;
2859 2860
2860 2861 mutex_enter(&t4_uld_list_lock);
2861 2862
2862 2863 SLIST_FOREACH(ui, &t4_uld_list, link) {
2863 2864 if (ui->uld_id == id) {
2864 2865 rc = ui->attach(sc, &usc->softc);
2865 2866 if (rc == 0) {
2866 2867 ASSERT(usc->softc != NULL);
2867 2868 ui->refcount++;
2868 2869 usc->uld = ui;
2869 2870 }
2870 2871 goto done;
2871 2872 }
2872 2873 }
2873 2874 done:
2874 2875 mutex_exit(&t4_uld_list_lock);
2875 2876
2876 2877 return (rc);
2877 2878 }
2878 2879
2879 2880 static int
2880 2881 deactivate_uld(struct uld_softc *usc)
2881 2882 {
2882 2883 int rc;
2883 2884
2884 2885 mutex_enter(&t4_uld_list_lock);
2885 2886
2886 2887 if (usc->uld == NULL || usc->softc == NULL) {
2887 2888 rc = EINVAL;
2888 2889 goto done;
2889 2890 }
2890 2891
2891 2892 rc = usc->uld->detach(usc->softc);
2892 2893 if (rc == 0) {
2893 2894 ASSERT(usc->uld->refcount > 0);
2894 2895 usc->uld->refcount--;
2895 2896 usc->uld = NULL;
2896 2897 usc->softc = NULL;
2897 2898 }
2898 2899 done:
2899 2900 mutex_exit(&t4_uld_list_lock);
2900 2901
2901 2902 return (rc);
2902 2903 }
2903 2904
2904 2905 void
2905 2906 t4_iterate(void (*func)(int, void *), void *arg)
2906 2907 {
2907 2908 struct adapter *sc;
2908 2909
2909 2910 mutex_enter(&t4_adapter_list_lock);
2910 2911 SLIST_FOREACH(sc, &t4_adapter_list, link) {
2911 2912 /*
2912 2913 * func should not make any assumptions about what state sc is
2913 2914 * in - the only guarantee is that sc->sc_lock is a valid lock.
2914 2915 */
2915 2916 func(ddi_get_instance(sc->dip), arg);
2916 2917 }
2917 2918 mutex_exit(&t4_adapter_list_lock);
2918 2919 }
2919 2920
2920 2921 #endif
↓ open down ↓ |
2591 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX