Print this page
9724 qede needs updates for newer GCC
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/qede/qede_gld.c
+++ new/usr/src/uts/common/io/qede/qede_gld.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, v.1, (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://opensource.org/licenses/CDDL-1.0.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2014-2017 Cavium, Inc.
24 24 * The contents of this file are subject to the terms of the Common Development
25 25 * and Distribution License, v.1, (the "License").
↓ open down ↓ |
25 lines elided |
↑ open up ↑ |
26 26
27 27 * You may not use this file except in compliance with the License.
28 28
29 29 * You can obtain a copy of the License at available
30 30 * at http://opensource.org/licenses/CDDL-1.0
31 31
32 32 * See the License for the specific language governing permissions and
33 33 * limitations under the License.
34 34 */
35 35
36 +/*
37 + * Copyright 2018 Joyent, Inc.
38 + */
36 39
37 40 #include "qede.h"
38 41
39 42 #define FP_LOCK(ptr) \
40 43 mutex_enter(&ptr->fp_lock);
41 44 #define FP_UNLOCK(ptr) \
42 45 mutex_exit(&ptr->fp_lock);
43 46
44 47 int
45 48 qede_ucst_find(qede_t *qede, const uint8_t *mac_addr)
46 49 {
47 50 int slot;
48 51
49 52 for(slot = 0; slot < qede->ucst_total; slot++) {
50 53 if (bcmp(qede->ucst_mac[slot].mac_addr.ether_addr_octet,
51 54 mac_addr, ETHERADDRL) == 0) {
52 55 return (slot);
53 56 }
54 57 }
55 58 return (-1);
56 59
57 60 }
58 61
59 62 static int
60 63 qede_set_mac_addr(qede_t *qede, uint8_t *mac_addr, uint8_t fl)
61 64 {
62 65 struct ecore_filter_ucast params;
63 66
64 67 memset(¶ms, 0, sizeof (params));
65 68
66 69 params.opcode = fl;
67 70 params.type = ECORE_FILTER_MAC;
68 71 params.is_rx_filter = true;
69 72 params.is_tx_filter = true;
70 73 COPY_ETH_ADDRESS(mac_addr, params.mac);
71 74
72 75 return (ecore_filter_ucast_cmd(&qede->edev,
73 76 ¶ms, ECORE_SPQ_MODE_EBLOCK, NULL));
74 77
75 78
76 79 }
77 80 static int
78 81 qede_add_macaddr(qede_t *qede, uint8_t *mac_addr)
79 82 {
80 83 int i, ret = 0;
81 84
82 85 i = qede_ucst_find(qede, mac_addr);
83 86 if (i != -1) {
84 87 /* LINTED E_ARGUMENT_MISMATCH */
85 88 qede_info(qede, "mac addr already added %d\n",
86 89 qede->ucst_avail);
87 90 return (0);
88 91 }
89 92 if (qede->ucst_avail == 0) {
90 93 qede_info(qede, "add macaddr ignored \n");
91 94 return (ENOSPC);
92 95 }
93 96 for (i = 0; i < qede->ucst_total; i++) {
94 97 if (qede->ucst_mac[i].set == 0) {
95 98 break;
96 99 }
97 100 }
98 101 if (i >= qede->ucst_total) {
99 102 qede_info(qede, "add macaddr ignored no space");
100 103 return (ENOSPC);
101 104 }
102 105 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_ADD);
103 106 if (ret == 0) {
104 107 bcopy(mac_addr,
105 108 qede->ucst_mac[i].mac_addr.ether_addr_octet,
106 109 ETHERADDRL);
107 110 qede->ucst_mac[i].set = 1;
108 111 qede->ucst_avail--;
109 112 /* LINTED E_ARGUMENT_MISMATCH */
110 113 qede_info(qede, " add macaddr passed for addr "
111 114 "%02x:%02x:%02x:%02x:%02x:%02x",
112 115 mac_addr[0], mac_addr[1],
113 116 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
114 117 } else {
115 118 /* LINTED E_ARGUMENT_MISMATCH */
116 119 qede_info(qede, "add macaddr failed for addr "
117 120 "%02x:%02x:%02x:%02x:%02x:%02x",
118 121 mac_addr[0], mac_addr[1],
119 122 mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
120 123
121 124 }
122 125 if (qede->ucst_avail == (qede->ucst_total -1)) {
123 126 u8 bcast_addr[] =
124 127 {
125 128 0xff, 0xff, 0xff, 0xff, 0xff,
126 129 0xff
127 130 };
128 131 for (i = 0; i < qede->ucst_total; i++) {
129 132 if (qede->ucst_mac[i].set == 0)
130 133 break;
131 134 }
132 135 ret = qede_set_mac_addr(qede,
133 136 (uint8_t *)bcast_addr, ECORE_FILTER_ADD);
134 137 if (ret == 0) {
135 138 bcopy(bcast_addr,
136 139 qede->ucst_mac[i].mac_addr.ether_addr_octet,
137 140 ETHERADDRL);
138 141 qede->ucst_mac[i].set = 1;
139 142 qede->ucst_avail--;
140 143 } else {
141 144
142 145 /* LINTED E_ARGUMENT_MISMATCH */
143 146 qede_info(qede, "add macaddr failed for addr "
144 147 "%02x:%02x:%02x:%02x:%02x:%02x",
145 148 mac_addr[0], mac_addr[1],
146 149 mac_addr[2], mac_addr[3], mac_addr[4],
147 150 mac_addr[5]);
148 151 }
149 152
150 153 }
151 154
152 155 return (ret);
153 156
154 157 }
155 158
156 159 #ifndef ILLUMOS
157 160 static int
158 161 qede_add_mac_addr(void *arg, const uint8_t *mac_addr, const uint64_t flags)
159 162 #else
160 163 static int
161 164 qede_add_mac_addr(void *arg, const uint8_t *mac_addr)
162 165 #endif
163 166 {
164 167 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
165 168 qede_t *qede = rx_group->qede;
166 169 int ret = DDI_SUCCESS;
167 170
168 171 /* LINTED E_ARGUMENT_MISMATCH */
169 172 qede_info(qede, " mac addr :" MAC_STRING, MACTOSTR(mac_addr));
170 173
171 174 mutex_enter(&qede->gld_lock);
172 175 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
173 176 mutex_exit(&qede->gld_lock);
174 177 return (ECANCELED);
175 178 }
176 179 ret = qede_add_macaddr(qede, (uint8_t *)mac_addr);
177 180
178 181 mutex_exit(&qede->gld_lock);
179 182
180 183
181 184 return (ret);
182 185 }
183 186
184 187 static int
185 188 qede_rem_macaddr(qede_t *qede, uint8_t *mac_addr)
186 189 {
187 190 int ret = 0;
188 191 int i;
189 192
190 193 i = qede_ucst_find(qede, mac_addr);
191 194 if (i == -1) {
192 195 /* LINTED E_ARGUMENT_MISMATCH */
193 196 qede_info(qede,
194 197 "mac addr not there to remove",
195 198 MAC_STRING, MACTOSTR(mac_addr));
196 199 return (0);
197 200 }
198 201 if (qede->ucst_mac[i].set == 0) {
199 202 return (EINVAL);
200 203 }
201 204 ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_REMOVE);
202 205 if (ret == 0) {
203 206 bzero(qede->ucst_mac[i].mac_addr.ether_addr_octet,ETHERADDRL);
204 207 qede->ucst_mac[i].set = 0;
205 208 qede->ucst_avail++;
206 209 } else {
207 210 /* LINTED E_ARGUMENT_MISMATCH */
208 211 qede_info(qede, "mac addr remove failed",
209 212 MAC_STRING, MACTOSTR(mac_addr));
210 213 }
211 214 return (ret);
212 215
213 216 }
214 217
215 218
216 219 static int
217 220 qede_rem_mac_addr(void *arg, const uint8_t *mac_addr)
218 221 {
219 222 qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
220 223 qede_t *qede = rx_group->qede;
221 224 int ret = DDI_SUCCESS;
222 225
223 226 /* LINTED E_ARGUMENT_MISMATCH */
224 227 qede_info(qede, "mac addr remove:" MAC_STRING, MACTOSTR(mac_addr));
225 228 mutex_enter(&qede->gld_lock);
226 229 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
227 230 mutex_exit(&qede->gld_lock);
228 231 return (ECANCELED);
229 232 }
230 233 ret = qede_rem_macaddr(qede, (uint8_t *)mac_addr);
231 234 mutex_exit(&qede->gld_lock);
232 235 return (ret);
233 236 }
234 237
235 238
236 239 static int
237 240 qede_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
238 241 {
239 242 int ret = 0;
240 243
241 244 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
242 245 qede_tx_ring_t *tx_ring = fp->tx_ring[0];
243 246 qede_t *qede = fp->qede;
244 247
245 248
246 249 if (qede->qede_state == QEDE_STATE_SUSPENDED)
247 250 return (ECANCELED);
248 251
249 252 switch (stat) {
250 253 case MAC_STAT_OBYTES:
251 254 *val = tx_ring->tx_byte_count;
252 255 break;
253 256
254 257 case MAC_STAT_OPACKETS:
255 258 *val = tx_ring->tx_pkt_count;
256 259 break;
257 260
258 261 default:
259 262 *val = 0;
260 263 ret = ENOTSUP;
261 264 }
262 265
263 266 return (ret);
264 267 }
265 268
266 269 #ifndef ILLUMOS
267 270 static mblk_t *
268 271 qede_rx_ring_poll(void *arg, int poll_bytes, int poll_pkts)
269 272 {
270 273 #else
271 274 static mblk_t *
272 275 qede_rx_ring_poll(void *arg, int poll_bytes)
273 276 {
274 277 /* XXX pick a value at the moment */
275 278 int poll_pkts = 100;
276 279 #endif
277 280 qede_fastpath_t *fp = (qede_fastpath_t *)arg;
278 281 mblk_t *mp = NULL;
279 282 int work_done = 0;
280 283 qede_t *qede = fp->qede;
281 284
282 285 if (poll_bytes == 0) {
283 286 return (NULL);
284 287 }
285 288
286 289 mutex_enter(&fp->fp_lock);
287 290 qede->intrSbPollCnt[fp->vect_info->vect_index]++;
288 291
289 292 mp = qede_process_fastpath(fp, poll_bytes, poll_pkts, &work_done);
290 293 if (mp != NULL) {
291 294 fp->rx_ring->rx_poll_cnt++;
292 295 } else if ((mp == NULL) && (work_done == 0)) {
293 296 qede->intrSbPollNoChangeCnt[fp->vect_info->vect_index]++;
294 297 }
295 298
296 299 mutex_exit(&fp->fp_lock);
297 300 return (mp);
298 301 }
299 302
300 303 #ifndef ILLUMOS
301 304 static int
302 305 qede_rx_ring_intr_enable(mac_ring_driver_t rh)
303 306 #else
304 307 static int
305 308 qede_rx_ring_intr_enable(mac_intr_handle_t rh)
306 309 #endif
307 310 {
308 311 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
309 312
310 313 mutex_enter(&fp->qede->drv_lock);
311 314 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
312 315 mutex_exit(&fp->qede->drv_lock);
313 316 return (DDI_FAILURE);
314 317 }
315 318
316 319 fp->rx_ring->intrEnableCnt++;
317 320 qede_enable_hw_intr(fp);
318 321 fp->disabled_by_poll = 0;
319 322 mutex_exit(&fp->qede->drv_lock);
320 323
321 324 return (DDI_SUCCESS);
322 325 }
323 326
324 327 #ifndef ILLUMOS
325 328 static int
326 329 qede_rx_ring_intr_disable(mac_ring_driver_t rh)
327 330 #else
328 331 static int
329 332 qede_rx_ring_intr_disable(mac_intr_handle_t rh)
330 333 #endif
331 334 {
332 335 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
333 336
334 337 mutex_enter(&fp->qede->drv_lock);
335 338 if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
336 339 mutex_exit(&fp->qede->drv_lock);
337 340 return (DDI_FAILURE);
338 341 }
339 342 fp->rx_ring->intrDisableCnt++;
340 343 qede_disable_hw_intr(fp);
341 344 fp->disabled_by_poll = 1;
342 345 mutex_exit(&fp->qede->drv_lock);
343 346 return (DDI_SUCCESS);
344 347 }
345 348
346 349 static int
347 350 qede_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
348 351 {
349 352
350 353 int ret = 0;
351 354
352 355 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
353 356 qede_t *qede = fp->qede;
354 357 qede_rx_ring_t *rx_ring = fp->rx_ring;
355 358
356 359 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
357 360 return (ECANCELED);
358 361 }
359 362
360 363 switch (stat) {
361 364 case MAC_STAT_RBYTES:
362 365 *val = rx_ring->rx_byte_cnt;
363 366 break;
364 367 case MAC_STAT_IPACKETS:
365 368 *val = rx_ring->rx_pkt_cnt;
366 369 break;
367 370 default:
368 371 *val = 0;
369 372 ret = ENOTSUP;
370 373 break;
371 374 }
372 375
373 376 return (ret);
374 377 }
375 378
376 379 static int
377 380 qede_get_global_ring_index(qede_t *qede, int gindex, int rindex)
378 381 {
379 382 qede_fastpath_t *fp;
380 383 qede_rx_ring_t *rx_ring;
381 384 int i = 0;
382 385
383 386 for (i = 0; i < qede->num_fp; i++) {
384 387 fp = &qede->fp_array[i];
385 388 rx_ring = fp->rx_ring;
386 389
387 390 if (rx_ring->group_index == gindex) {
388 391 rindex--;
389 392 }
390 393 if (rindex < 0) {
391 394 return (i);
392 395 }
393 396 }
394 397
395 398 return (-1);
396 399 }
397 400
398 401 static void
399 402 qede_rx_ring_stop(mac_ring_driver_t rh)
400 403 {
401 404 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
402 405 qede_rx_ring_t *rx_ring = fp->rx_ring;
403 406
404 407 qede_print("!%s(%d): called", __func__,fp->qede->instance);
405 408 mutex_enter(&fp->fp_lock);
406 409 rx_ring->mac_ring_started = B_FALSE;
407 410 mutex_exit(&fp->fp_lock);
408 411 }
409 412
410 413 static int
411 414 qede_rx_ring_start(mac_ring_driver_t rh, u64 mr_gen_num)
412 415 {
413 416 qede_fastpath_t *fp = (qede_fastpath_t *)rh;
414 417 qede_rx_ring_t *rx_ring = fp->rx_ring;
415 418
416 419 qede_print("!%s(%d): called", __func__,fp->qede->instance);
417 420 mutex_enter(&fp->fp_lock);
418 421 rx_ring->mr_gen_num = mr_gen_num;
419 422 rx_ring->mac_ring_started = B_TRUE;
420 423 rx_ring->intrDisableCnt = 0;
421 424 rx_ring->intrEnableCnt = 0;
422 425 fp->disabled_by_poll = 0;
423 426
424 427 mutex_exit(&fp->fp_lock);
425 428
426 429 return (DDI_SUCCESS);
427 430 }
428 431
429 432 /* Callback function from mac layer to register rings */
430 433 void
431 434 qede_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
432 435 const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
433 436 {
434 437 qede_t *qede = (qede_t *)arg;
435 438 mac_intr_t *mintr = &infop->mri_intr;
436 439
437 440 switch (rtype) {
438 441 case MAC_RING_TYPE_RX: {
439 442 /*
440 443 * Index passed as a param is the ring index within the
441 444 * given group index. If multiple groups are supported
442 445 * then need to search into all groups to find out the
443 446 * global ring index for the passed group relative
444 447 * ring index
445 448 */
446 449 int global_ring_index = qede_get_global_ring_index(qede,
447 450 group_index, ring_index);
448 451 qede_fastpath_t *fp;
449 452 qede_rx_ring_t *rx_ring;
450 453 int i;
451 454
452 455 /*
453 456 * global_ring_index < 0 means group index passed
454 457 * was registered by our driver
455 458 */
456 459 ASSERT(global_ring_index >= 0);
457 460
458 461 if (rh == NULL) {
459 462 cmn_err(CE_WARN, "!rx ring(%d) ring handle NULL",
460 463 global_ring_index);
461 464 }
462 465
463 466 fp = &qede->fp_array[global_ring_index];
464 467 rx_ring = fp->rx_ring;
465 468 fp->qede = qede;
466 469
467 470 rx_ring->mac_ring_handle = rh;
468 471
469 472 qede_info(qede, "rx_ring %d mac_ring_handle %p",
470 473 rx_ring->rss_id, rh);
471 474
472 475 /* mri_driver passed as arg to mac_ring* callbacks */
473 476 infop->mri_driver = (mac_ring_driver_t)fp;
474 477 /*
475 478 * mri_start callback will supply a mac rings generation
476 479 * number which is needed while indicating packets
477 480 * upstream via mac_ring_rx() call
478 481 */
479 482 infop->mri_start = qede_rx_ring_start;
480 483 infop->mri_stop = qede_rx_ring_stop;
481 484 infop->mri_poll = qede_rx_ring_poll;
482 485 infop->mri_stat = qede_rx_ring_stat;
483 486
484 487 mintr->mi_handle = (mac_intr_handle_t)fp;
485 488 mintr->mi_enable = qede_rx_ring_intr_enable;
486 489 mintr->mi_disable = qede_rx_ring_intr_disable;
487 490 if (qede->intr_ctx.intr_type_in_use &
488 491 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
489 492 mintr->mi_ddi_handle =
490 493 qede->intr_ctx.
491 494 intr_hdl_array[global_ring_index + qede->num_hwfns];
492 495 }
493 496 break;
494 497 }
495 498 case MAC_RING_TYPE_TX: {
496 499 qede_fastpath_t *fp;
497 500 qede_tx_ring_t *tx_ring;
498 501 int i, tc;
499 502
500 503 ASSERT(ring_index < qede->num_fp);
501 504
502 505 fp = &qede->fp_array[ring_index];
503 506 fp->qede = qede;
504 507 tx_ring = fp->tx_ring[0];
505 508 tx_ring->mac_ring_handle = rh;
506 509 qede_info(qede, "tx_ring %d mac_ring_handle %p",
507 510 tx_ring->tx_queue_index, rh);
508 511 infop->mri_driver = (mac_ring_driver_t)fp;
509 512 infop->mri_start = NULL;
510 513 infop->mri_stop = NULL;
511 514 infop->mri_tx = qede_ring_tx;
512 515 infop->mri_stat = qede_tx_ring_stat;
513 516 if (qede->intr_ctx.intr_type_in_use &
514 517 (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
515 518 mintr->mi_ddi_handle =
516 519 qede->intr_ctx.
517 520 intr_hdl_array[ring_index + qede->num_hwfns];
518 521 }
519 522 break;
520 523 }
521 524 default:
522 525 break;
523 526 }
524 527 }
525 528
526 529 /*
527 530 * Callback function from mac layer to register group
528 531 */
529 532 void
530 533 qede_fill_group(void *arg, mac_ring_type_t rtype, const int index,
531 534 mac_group_info_t *infop, mac_group_handle_t gh)
532 535 {
533 536 qede_t *qede = (qede_t *)arg;
534 537
535 538 switch (rtype) {
536 539 case MAC_RING_TYPE_RX: {
537 540 qede_mac_group_t *rx_group;
538 541
539 542 rx_group = &qede->rx_groups[index];
540 543 rx_group->group_handle = gh;
541 544 rx_group->group_index = index;
542 545 rx_group->qede = qede;
543 546 infop->mgi_driver = (mac_group_driver_t)rx_group;
544 547 infop->mgi_start = NULL;
545 548 infop->mgi_stop = NULL;
546 549 #ifndef ILLUMOS
547 550 infop->mgi_addvlan = NULL;
548 551 infop->mgi_remvlan = NULL;
549 552 infop->mgi_getsriov_info = NULL;
550 553 infop->mgi_setmtu = NULL;
551 554 #endif
552 555 infop->mgi_addmac = qede_add_mac_addr;
553 556 infop->mgi_remmac = qede_rem_mac_addr;
554 557 infop->mgi_count = qede->num_fp;
555 558 #ifndef ILLUMOS
556 559 if (index == 0) {
557 560 infop->mgi_flags = MAC_GROUP_DEFAULT;
558 561 }
559 562 #endif
560 563
561 564 break;
562 565 }
563 566 case MAC_RING_TYPE_TX: {
564 567 qede_mac_group_t *tx_group;
565 568
566 569 tx_group = &qede->tx_groups[index];
567 570 tx_group->group_handle = gh;
568 571 tx_group->group_index = index;
569 572 tx_group->qede = qede;
570 573
571 574 infop->mgi_driver = (mac_group_driver_t)tx_group;
572 575 infop->mgi_start = NULL;
573 576 infop->mgi_stop = NULL;
574 577 infop->mgi_addmac = NULL;
575 578 infop->mgi_remmac = NULL;
576 579 #ifndef ILLUMOS
577 580 infop->mgi_addvlan = NULL;
578 581 infop->mgi_remvlan = NULL;
579 582 infop->mgi_setmtu = NULL;
580 583 infop->mgi_getsriov_info = NULL;
581 584 #endif
582 585
583 586 infop->mgi_count = qede->num_fp;
584 587
585 588 #ifndef ILLUMOS
586 589 if (index == 0) {
587 590 infop->mgi_flags = MAC_GROUP_DEFAULT;
588 591 }
589 592 #endif
590 593 break;
591 594 }
592 595 default:
593 596 break;
594 597 }
595 598 }
596 599
597 600 #ifdef ILLUMOS
598 601 static int
599 602 qede_transceiver_info(void *arg, uint_t id, mac_transceiver_info_t *infop)
600 603 {
601 604 qede_t *qede = arg;
602 605 struct ecore_dev *edev = &qede->edev;
603 606 struct ecore_hwfn *hwfn;
604 607 struct ecore_ptt *ptt;
605 608 uint32_t transceiver_state;
606 609
607 610 if (id >= edev->num_hwfns || arg == NULL || infop == NULL)
608 611 return (EINVAL);
609 612
610 613 hwfn = &edev->hwfns[id];
611 614 ptt = ecore_ptt_acquire(hwfn);
612 615 if (ptt == NULL) {
613 616 return (EIO);
614 617 }
615 618 /*
616 619 * Use the underlying raw API to get this information. While the
617 620 * ecore_phy routines have some ways of getting to this information, it
618 621 * ends up writing the raw data as ASCII characters which doesn't help
619 622 * us one bit.
620 623 */
621 624 transceiver_state = ecore_rd(hwfn, ptt, hwfn->mcp_info->port_addr +
622 625 OFFSETOF(struct public_port, transceiver_data));
623 626 transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
624 627 ecore_ptt_release(hwfn, ptt);
625 628
626 629 if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) != 0) {
627 630 mac_transceiver_info_set_present(infop, B_TRUE);
628 631 /*
629 632 * Based on our testing, the ETH_TRANSCEIVER_STATE_VALID flag is
630 633 * not set, so we cannot rely on it. Instead, we have found that
631 634 * the ETH_TRANSCEIVER_STATE_UPDATING will be set when we cannot
632 635 * use the transceiver.
633 636 */
634 637 if ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) != 0) {
635 638 mac_transceiver_info_set_usable(infop, B_FALSE);
636 639 } else {
637 640 mac_transceiver_info_set_usable(infop, B_TRUE);
638 641 }
639 642 } else {
640 643 mac_transceiver_info_set_present(infop, B_FALSE);
641 644 mac_transceiver_info_set_usable(infop, B_FALSE);
642 645 }
643 646
644 647 return (0);
645 648 }
646 649
647 650 static int
648 651 qede_transceiver_read(void *arg, uint_t id, uint_t page, void *buf,
649 652 size_t nbytes, off_t offset, size_t *nread)
650 653 {
651 654 qede_t *qede = arg;
652 655 struct ecore_dev *edev = &qede->edev;
653 656 struct ecore_hwfn *hwfn;
654 657 uint32_t port, lane;
655 658 struct ecore_ptt *ptt;
656 659 enum _ecore_status_t ret;
657 660
658 661 if (id >= edev->num_hwfns || buf == NULL || nbytes == 0 || nread == NULL ||
659 662 (page != 0xa0 && page != 0xa2) || offset < 0)
660 663 return (EINVAL);
661 664
662 665 /*
663 666 * Both supported pages have a length of 256 bytes, ensure nothing asks
664 667 * us to go beyond that.
665 668 */
666 669 if (nbytes > 256 || offset >= 256 || (offset + nbytes > 256)) {
667 670 return (EINVAL);
668 671 }
669 672
670 673 hwfn = &edev->hwfns[id];
671 674 ptt = ecore_ptt_acquire(hwfn);
672 675 if (ptt == NULL) {
673 676 return (EIO);
674 677 }
675 678
676 679 ret = ecore_mcp_phy_sfp_read(hwfn, ptt, hwfn->port_id, page, offset,
677 680 nbytes, buf);
678 681 ecore_ptt_release(hwfn, ptt);
679 682 if (ret != ECORE_SUCCESS) {
680 683 return (EIO);
681 684 }
682 685 *nread = nbytes;
683 686 return (0);
684 687 }
685 688 #endif /* ILLUMOS */
686 689
687 690
688 691 static int
689 692 qede_mac_stats(void * arg,
690 693 uint_t stat,
691 694 uint64_t * value)
692 695 {
693 696 qede_t * qede = (qede_t *)arg;
694 697 struct ecore_eth_stats vstats;
695 698 struct ecore_dev *edev = &qede->edev;
696 699 struct qede_link_cfg lnkcfg;
697 700 int rc = 0;
698 701 qede_fastpath_t *fp = &qede->fp_array[0];
699 702 qede_rx_ring_t *rx_ring;
700 703 qede_tx_ring_t *tx_ring;
701 704
702 705 if ((qede == NULL) || (value == NULL)) {
703 706 return EINVAL;
704 707 }
705 708
706 709
707 710 mutex_enter(&qede->gld_lock);
708 711
709 712 if(qede->qede_state != QEDE_STATE_STARTED) {
710 713 mutex_exit(&qede->gld_lock);
711 714 return EAGAIN;
712 715 }
713 716
714 717 *value = 0;
715 718
716 719 memset(&vstats, 0, sizeof(struct ecore_eth_stats));
717 720 ecore_get_vport_stats(edev, &vstats);
718 721
719 722
720 723 memset(&qede->curcfg, 0, sizeof(struct qede_link_cfg));
721 724 qede_get_link_info(&edev->hwfns[0], &qede->curcfg);
722 725
723 726
724 727
725 728 switch (stat)
726 729 {
727 730 case MAC_STAT_IFSPEED:
728 731 *value = (qede->props.link_speed * 1000000ULL);
729 732 break;
730 733 case MAC_STAT_MULTIRCV:
731 734 *value = vstats.common.rx_mcast_pkts;
732 735 break;
733 736 case MAC_STAT_BRDCSTRCV:
734 737 *value = vstats.common.rx_bcast_pkts;
735 738 break;
736 739 case MAC_STAT_MULTIXMT:
737 740 *value = vstats.common.tx_mcast_pkts;
738 741 break;
739 742 case MAC_STAT_BRDCSTXMT:
740 743 *value = vstats.common.tx_bcast_pkts;
741 744 break;
742 745 case MAC_STAT_NORCVBUF:
743 746 *value = vstats.common.no_buff_discards;
744 747 break;
745 748 case MAC_STAT_NOXMTBUF:
746 749 *value = 0;
747 750 break;
748 751 case MAC_STAT_IERRORS:
749 752 case ETHER_STAT_MACRCV_ERRORS:
750 753 *value = vstats.common.mac_filter_discards +
751 754 vstats.common.packet_too_big_discard +
752 755 vstats.common.rx_crc_errors;
753 756 break;
754 757
755 758 case MAC_STAT_OERRORS:
756 759 break;
757 760
758 761 case MAC_STAT_COLLISIONS:
759 762 *value = vstats.bb.tx_total_collisions;
760 763 break;
761 764
762 765 case MAC_STAT_RBYTES:
763 766 *value = vstats.common.rx_ucast_bytes +
764 767 vstats.common.rx_mcast_bytes +
765 768 vstats.common.rx_bcast_bytes;
766 769 break;
767 770
768 771 case MAC_STAT_IPACKETS:
769 772 *value = vstats.common.rx_ucast_pkts +
770 773 vstats.common.rx_mcast_pkts +
771 774 vstats.common.rx_bcast_pkts;
772 775 break;
773 776
774 777 case MAC_STAT_OBYTES:
775 778 *value = vstats.common.tx_ucast_bytes +
776 779 vstats.common.tx_mcast_bytes +
777 780 vstats.common.tx_bcast_bytes;
778 781 break;
779 782
780 783 case MAC_STAT_OPACKETS:
781 784 *value = vstats.common.tx_ucast_pkts +
782 785 vstats.common.tx_mcast_pkts +
783 786 vstats.common.tx_bcast_pkts;
784 787 break;
785 788
786 789 case ETHER_STAT_ALIGN_ERRORS:
787 790 *value = vstats.common.rx_align_errors;
788 791 break;
789 792
790 793 case ETHER_STAT_FCS_ERRORS:
791 794 *value = vstats.common.rx_crc_errors;
792 795 break;
793 796
794 797 case ETHER_STAT_FIRST_COLLISIONS:
795 798 break;
796 799
797 800 case ETHER_STAT_MULTI_COLLISIONS:
798 801 break;
799 802
800 803 case ETHER_STAT_DEFER_XMTS:
801 804 break;
802 805
803 806 case ETHER_STAT_TX_LATE_COLLISIONS:
804 807 break;
805 808
806 809 case ETHER_STAT_EX_COLLISIONS:
807 810 break;
808 811
809 812 case ETHER_STAT_MACXMT_ERRORS:
810 813 *value = 0;
811 814 break;
812 815
813 816 case ETHER_STAT_CARRIER_ERRORS:
814 817 break;
815 818
816 819 case ETHER_STAT_TOOLONG_ERRORS:
817 820 *value = vstats.common.rx_oversize_packets;
818 821 break;
819 822
820 823 #if (MAC_VERSION > 1)
821 824 case ETHER_STAT_TOOSHORT_ERRORS:
822 825 *value = vstats.common.rx_undersize_packets;
823 826 break;
824 827 #endif
825 828
826 829 case ETHER_STAT_XCVR_ADDR:
827 830 *value = 0;
828 831 break;
829 832
830 833 case ETHER_STAT_XCVR_ID:
831 834 *value = 0;
832 835 break;
833 836
834 837 case ETHER_STAT_XCVR_INUSE:
835 838 switch (qede->props.link_speed) {
836 839 default:
837 840 *value = XCVR_UNDEFINED;
838 841 }
839 842 break;
840 843 #if (MAC_VERSION > 1)
841 844 case ETHER_STAT_CAP_10GFDX:
842 845 *value = 0;
843 846 break;
844 847 #endif
845 848 case ETHER_STAT_CAP_100FDX:
846 849 *value = 0;
847 850 break;
848 851 case ETHER_STAT_CAP_100HDX:
849 852 *value = 0;
850 853 break;
851 854 case ETHER_STAT_CAP_ASMPAUSE:
852 855 *value = 1;
853 856 break;
854 857 case ETHER_STAT_CAP_PAUSE:
855 858 *value = 1;
856 859 break;
857 860 case ETHER_STAT_CAP_AUTONEG:
858 861 *value = 1;
859 862 break;
860 863
861 864 #if (MAC_VERSION > 1)
862 865 case ETHER_STAT_CAP_REMFAULT:
863 866 *value = 0;
864 867 break;
865 868 #endif
866 869
867 870 #if (MAC_VERSION > 1)
868 871 case ETHER_STAT_ADV_CAP_10GFDX:
869 872 *value = 0;
870 873 break;
871 874 #endif
872 875 case ETHER_STAT_ADV_CAP_ASMPAUSE:
873 876 *value = 1;
874 877 break;
875 878
876 879 case ETHER_STAT_ADV_CAP_PAUSE:
877 880 *value = 1;
878 881 break;
879 882
880 883 case ETHER_STAT_ADV_CAP_AUTONEG:
881 884 *value = qede->curcfg.adv_capab.autoneg;
882 885 break;
883 886
884 887 #if (MAC_VERSION > 1)
885 888 case ETHER_STAT_ADV_REMFAULT:
886 889 *value = 0;
887 890 break;
888 891 #endif
889 892
890 893 case ETHER_STAT_LINK_AUTONEG:
891 894 *value = qede->curcfg.autoneg;
892 895 break;
893 896
894 897 case ETHER_STAT_LINK_DUPLEX:
895 898 *value = (qede->props.link_duplex == DUPLEX_FULL) ?
896 899 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
897 900 break;
898 901 /*
899 902 * Supported speeds. These indicate what hardware is capable of.
900 903 */
901 904 case ETHER_STAT_CAP_1000HDX:
902 905 *value = qede->curcfg.supp_capab.param_1000hdx;
903 906 break;
904 907
905 908 case ETHER_STAT_CAP_1000FDX:
906 909 *value = qede->curcfg.supp_capab.param_1000fdx;
907 910 break;
908 911
909 912 case ETHER_STAT_CAP_10GFDX:
910 913 *value = qede->curcfg.supp_capab.param_10000fdx;
911 914 break;
912 915
913 916 case ETHER_STAT_CAP_25GFDX:
914 917 *value = qede->curcfg.supp_capab.param_25000fdx;
915 918 break;
916 919
917 920 case ETHER_STAT_CAP_40GFDX:
918 921 *value = qede->curcfg.supp_capab.param_40000fdx;
919 922 break;
920 923
921 924 case ETHER_STAT_CAP_50GFDX:
922 925 *value = qede->curcfg.supp_capab.param_50000fdx;
923 926 break;
924 927
925 928 case ETHER_STAT_CAP_100GFDX:
926 929 *value = qede->curcfg.supp_capab.param_100000fdx;
927 930 break;
928 931
929 932 /*
930 933 * Advertised speeds. These indicate what hardware is currently sending.
931 934 */
932 935 case ETHER_STAT_ADV_CAP_1000HDX:
933 936 *value = qede->curcfg.adv_capab.param_1000hdx;
934 937 break;
935 938
936 939 case ETHER_STAT_ADV_CAP_1000FDX:
937 940 *value = qede->curcfg.adv_capab.param_1000fdx;
938 941 break;
939 942
940 943 case ETHER_STAT_ADV_CAP_10GFDX:
941 944 *value = qede->curcfg.adv_capab.param_10000fdx;
942 945 break;
943 946
944 947 case ETHER_STAT_ADV_CAP_25GFDX:
945 948 *value = qede->curcfg.adv_capab.param_25000fdx;
946 949 break;
947 950
948 951 case ETHER_STAT_ADV_CAP_40GFDX:
949 952 *value = qede->curcfg.adv_capab.param_40000fdx;
950 953 break;
951 954
952 955 case ETHER_STAT_ADV_CAP_50GFDX:
953 956 *value = qede->curcfg.adv_capab.param_50000fdx;
954 957 break;
955 958
956 959 case ETHER_STAT_ADV_CAP_100GFDX:
957 960 *value = qede->curcfg.adv_capab.param_100000fdx;
958 961 break;
959 962
960 963 default:
961 964 rc = ENOTSUP;
962 965 }
963 966
964 967 mutex_exit(&qede->gld_lock);
965 968 return (rc);
966 969 }
967 970
968 971 /* (flag) TRUE = on, FALSE = off */
969 972 static int
970 973 qede_mac_promiscuous(void *arg,
971 974 boolean_t on)
972 975 {
973 976 qede_t *qede = (qede_t *)arg;
974 977 qede_print("!%s(%d): called", __func__,qede->instance);
975 978 int ret = DDI_SUCCESS;
976 979 enum qede_filter_rx_mode_type mode;
977 980
978 981 mutex_enter(&qede->drv_lock);
979 982
980 983 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
981 984 ret = ECANCELED;
982 985 goto exit;
983 986 }
984 987
985 988 if (on) {
986 989 qede_info(qede, "Entering promiscuous mode");
987 990 mode = QEDE_FILTER_RX_MODE_PROMISC;
988 991 qede->params.promisc_fl = B_TRUE;
989 992 } else {
990 993 qede_info(qede, "Leaving promiscuous mode");
991 994 if(qede->params.multi_promisc_fl == B_TRUE) {
992 995 mode = QEDE_FILTER_RX_MODE_MULTI_PROMISC;
993 996 } else {
994 997 mode = QEDE_FILTER_RX_MODE_REGULAR;
995 998 }
996 999 qede->params.promisc_fl = B_FALSE;
997 1000 }
998 1001
999 1002 ret = qede_set_filter_rx_mode(qede, mode);
1000 1003
1001 1004 exit:
1002 1005 mutex_exit(&qede->drv_lock);
1003 1006 return (ret);
1004 1007 }
1005 1008
1006 1009 int qede_set_rx_mac_mcast(qede_t *qede, enum ecore_filter_opcode opcode,
1007 1010 uint8_t *mac, int mc_cnt)
1008 1011 {
1009 1012 struct ecore_filter_mcast cmd;
1010 1013 int i;
1011 1014 memset(&cmd, 0, sizeof(cmd));
1012 1015 cmd.opcode = opcode;
1013 1016 cmd.num_mc_addrs = mc_cnt;
1014 1017
1015 1018 for (i = 0; i < mc_cnt; i++, mac += ETH_ALLEN) {
1016 1019 COPY_ETH_ADDRESS(mac, cmd.mac[i]);
1017 1020 }
1018 1021
1019 1022
1020 1023 return (ecore_filter_mcast_cmd(&qede->edev, &cmd,
1021 1024 ECORE_SPQ_MODE_CB, NULL));
1022 1025
1023 1026 }
1024 1027
1025 1028 int
1026 1029 qede_set_filter_rx_mode(qede_t * qede, enum qede_filter_rx_mode_type type)
1027 1030 {
1028 1031 struct ecore_filter_accept_flags flg;
1029 1032
1030 1033 memset(&flg, 0, sizeof(flg));
1031 1034
1032 1035 flg.update_rx_mode_config = 1;
1033 1036 flg.update_tx_mode_config = 1;
1034 1037 flg.rx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1035 1038 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1036 1039 flg.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED |
1037 1040 ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1038 1041
1039 1042 if (type == QEDE_FILTER_RX_MODE_PROMISC)
1040 1043 flg.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED |
1041 1044 ECORE_ACCEPT_MCAST_UNMATCHED;
1042 1045 else if (type == QEDE_FILTER_RX_MODE_MULTI_PROMISC)
1043 1046 flg.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
1044 1047 qede_info(qede, "rx_mode rx_filter=0x%x tx_filter=0x%x type=0x%x\n",
1045 1048 flg.rx_accept_filter, flg.tx_accept_filter, type);
1046 1049 return (ecore_filter_accept_cmd(&qede->edev, 0, flg,
1047 1050 0, /* update_accept_any_vlan */
1048 1051 0, /* accept_any_vlan */
1049 1052 ECORE_SPQ_MODE_CB, NULL));
1050 1053 }
1051 1054
1052 1055 int
1053 1056 qede_multicast(qede_t *qede, boolean_t flag, const uint8_t *ptr_mcaddr)
1054 1057 {
1055 1058 int i, ret = DDI_SUCCESS;
1056 1059 qede_mcast_list_entry_t *ptr_mlist;
1057 1060 qede_mcast_list_entry_t *ptr_entry;
1058 1061 int mc_cnt;
1059 1062 unsigned char *mc_macs, *tmpmc;
1060 1063 size_t size;
1061 1064 boolean_t mcmac_exists = B_FALSE;
1062 1065 enum qede_filter_rx_mode_type mode;
1063 1066
1064 1067 if (!ptr_mcaddr) {
1065 1068 cmn_err(CE_NOTE, "Removing all multicast");
1066 1069 } else {
1067 1070 cmn_err(CE_NOTE,
1068 1071 "qede=%p %s multicast: %02x:%02x:%02x:%02x:%02x:%02x",
1069 1072 qede, (flag) ? "Adding" : "Removing", ptr_mcaddr[0],
1070 1073 ptr_mcaddr[1],ptr_mcaddr[2],ptr_mcaddr[3],ptr_mcaddr[4],
1071 1074 ptr_mcaddr[5]);
1072 1075 }
1073 1076
1074 1077
1075 1078 if (flag && (ptr_mcaddr == NULL)) {
1076 1079 cmn_err(CE_WARN, "ERROR: Multicast address not specified");
1077 1080 return EINVAL;
1078 1081 }
1079 1082
1080 1083
1081 1084 /* exceeds addition of mcaddr above limit */
1082 1085 if (flag && (qede->mc_cnt >= MAX_MC_SOFT_LIMIT)) {
1083 1086 qede_info(qede, "Cannot add more than MAX_MC_SOFT_LIMIT");
1084 1087 return ENOENT;
1085 1088 }
1086 1089
1087 1090 size = MAX_MC_SOFT_LIMIT * ETH_ALLEN;
1088 1091
1089 1092 mc_macs = kmem_zalloc(size, KM_NOSLEEP);
1090 1093 if (!mc_macs) {
1091 1094 cmn_err(CE_WARN, "ERROR: Failed to allocate for mc_macs");
1092 1095 return EINVAL;
1093 1096 }
1094 1097
1095 1098 tmpmc = mc_macs;
1096 1099
1097 1100 /* remove all multicast - as flag not set and mcaddr not specified*/
1098 1101 if (!flag && (ptr_mcaddr == NULL)) {
1099 1102 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1100 1103 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1101 1104 {
1102 1105 if (ptr_entry != NULL) {
1103 1106 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry,
1104 1107 &qede->mclist.head);
1105 1108 kmem_free(ptr_entry,
1106 1109 sizeof (qede_mcast_list_entry_t) + ETH_ALLEN);
1107 1110 }
1108 1111 }
1109 1112
1110 1113 ret = qede_set_rx_mac_mcast(qede,
1111 1114 ECORE_FILTER_REMOVE, mc_macs, 1);
1112 1115 qede->mc_cnt = 0;
1113 1116 goto exit;
1114 1117 }
1115 1118
1116 1119 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry,
1117 1120 &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1118 1121 {
1119 1122 if ((ptr_entry != NULL) &&
1120 1123 IS_ETH_ADDRESS_EQUAL(ptr_mcaddr, ptr_entry->mac)) {
1121 1124 mcmac_exists = B_TRUE;
1122 1125 break;
1123 1126 }
1124 1127 }
1125 1128 if (flag && mcmac_exists) {
1126 1129 ret = DDI_SUCCESS;
1127 1130 goto exit;
1128 1131 } else if (!flag && !mcmac_exists) {
1129 1132 ret = DDI_SUCCESS;
1130 1133 goto exit;
1131 1134 }
1132 1135
1133 1136 if (flag) {
1134 1137 ptr_entry = kmem_zalloc((sizeof (qede_mcast_list_entry_t) +
1135 1138 ETH_ALLEN), KM_NOSLEEP);
1136 1139 ptr_entry->mac = (uint8_t *)ptr_entry +
1137 1140 sizeof (qede_mcast_list_entry_t);
1138 1141 COPY_ETH_ADDRESS(ptr_mcaddr, ptr_entry->mac);
1139 1142 QEDE_LIST_ADD(&ptr_entry->mclist_entry, &qede->mclist.head);
1140 1143 } else {
1141 1144 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry, &qede->mclist.head);
1142 1145 kmem_free(ptr_entry, sizeof(qede_mcast_list_entry_t) +
1143 1146 ETH_ALLEN);
1144 1147 }
1145 1148
1146 1149 mc_cnt = 0;
1147 1150 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, &qede->mclist.head,
1148 1151 qede_mcast_list_entry_t, mclist_entry) {
1149 1152 COPY_ETH_ADDRESS(ptr_entry->mac, tmpmc);
1150 1153 tmpmc += ETH_ALLEN;
1151 1154 mc_cnt++;
1152 1155 }
1153 1156 qede->mc_cnt = mc_cnt;
1154 1157 if (mc_cnt <=64) {
1155 1158 ret = qede_set_rx_mac_mcast(qede, ECORE_FILTER_ADD,
1156 1159 (unsigned char *)mc_macs, mc_cnt);
1157 1160 if ((qede->params.multi_promisc_fl == B_TRUE) &&
1158 1161 (qede->params.promisc_fl == B_FALSE)) {
1159 1162 mode = QEDE_FILTER_RX_MODE_REGULAR;
1160 1163 ret = qede_set_filter_rx_mode(qede, mode);
1161 1164 }
1162 1165 qede->params.multi_promisc_fl = B_FALSE;
1163 1166 } else {
1164 1167 if ((qede->params.multi_promisc_fl == B_FALSE) &&
1165 1168 (qede->params.promisc_fl = B_FALSE)) {
1166 1169 ret = qede_set_filter_rx_mode(qede,
1167 1170 QEDE_FILTER_RX_MODE_MULTI_PROMISC);
1168 1171 }
1169 1172 qede->params.multi_promisc_fl = B_TRUE;
1170 1173 qede_info(qede, "mode is MULTI_PROMISC");
1171 1174 }
1172 1175 exit:
1173 1176 kmem_free(mc_macs, size);
1174 1177 qede_info(qede, "multicast ret %d mc_cnt %d\n", ret, qede->mc_cnt);
1175 1178 return (ret);
1176 1179 }
1177 1180
1178 1181 /*
1179 1182 * This function is used to enable or disable multicast packet reception for
1180 1183 * particular multicast addresses.
1181 1184 * (flag) TRUE = add, FALSE = remove
1182 1185 */
1183 1186 static int
1184 1187 qede_mac_multicast(void *arg,
1185 1188 boolean_t flag,
1186 1189 const uint8_t * mcast_addr)
1187 1190 {
1188 1191 qede_t *qede = (qede_t *)arg;
1189 1192 int ret = DDI_SUCCESS;
1190 1193
1191 1194
1192 1195 mutex_enter(&qede->gld_lock);
1193 1196 if(qede->qede_state != QEDE_STATE_STARTED) {
1194 1197 mutex_exit(&qede->gld_lock);
1195 1198 return (EAGAIN);
1196 1199 }
1197 1200 ret = qede_multicast(qede, flag, mcast_addr);
1198 1201
1199 1202 mutex_exit(&qede->gld_lock);
1200 1203
1201 1204 return (ret);
1202 1205 }
1203 1206 int
1204 1207 qede_clear_filters(qede_t *qede)
1205 1208 {
1206 1209 int ret = 0;
1207 1210 int i;
1208 1211 if ((qede->params.promisc_fl == B_TRUE) ||
1209 1212 (qede->params.multi_promisc_fl == B_TRUE)) {
1210 1213 ret = qede_set_filter_rx_mode(qede,
1211 1214 QEDE_FILTER_RX_MODE_REGULAR);
1212 1215 if (ret) {
1213 1216 qede_info(qede,
1214 1217 "qede_clear_filters failed to set rx_mode");
1215 1218 }
1216 1219 }
1217 1220 for (i=0; i < qede->ucst_total; i++)
1218 1221 {
1219 1222 if (qede->ucst_mac[i].set) {
1220 1223 qede_rem_macaddr(qede,
1221 1224 qede->ucst_mac[i].mac_addr.ether_addr_octet);
1222 1225 }
1223 1226 }
1224 1227 qede_multicast(qede, B_FALSE, NULL);
1225 1228 return (ret);
1226 1229 }
1227 1230
1228 1231
1229 1232 #ifdef NO_CROSSBOW
1230 1233 static int
1231 1234 qede_mac_unicast(void *arg,
1232 1235 const uint8_t * mac_addr)
1233 1236 {
1234 1237 qede_t *qede = (qede_t *)arg;
1235 1238 return 0;
1236 1239 }
1237 1240
1238 1241
1239 1242 static mblk_t *
1240 1243 qede_mac_tx(void *arg,
1241 1244 mblk_t * mblk)
1242 1245 {
1243 1246 qede_t *qede = (qede_t *)arg;
1244 1247 qede_fastpath_t *fp = &qede->fp_array[0];
1245 1248
1246 1249 mblk = qede_ring_tx((void *)fp, mblk);
1247 1250
1248 1251 return (mblk);
1249 1252 }
1250 1253 #endif /* NO_CROSSBOW */
1251 1254
1252 1255
1253 1256 static lb_property_t loopmodes[] = {
1254 1257 { normal, "normal", QEDE_LOOP_NONE },
1255 1258 { internal, "internal", QEDE_LOOP_INTERNAL },
↓ open down ↓ |
1210 lines elided |
↑ open up ↑ |
1256 1259 { external, "external", QEDE_LOOP_EXTERNAL },
1257 1260 };
1258 1261
1259 1262 /*
1260 1263 * Set Loopback mode
1261 1264 */
1262 1265
1263 1266 static enum ioc_reply
1264 1267 qede_set_loopback_mode(qede_t *qede, uint32_t mode)
1265 1268 {
1266 - int ret, i = 0;
1269 + int i = 0;
1267 1270 struct ecore_dev *edev = &qede->edev;
1268 1271 struct ecore_hwfn *hwfn;
1269 1272 struct ecore_ptt *ptt = NULL;
1270 1273 struct ecore_mcp_link_params *link_params;
1271 1274
1272 1275 hwfn = &edev->hwfns[0];
1273 1276 link_params = ecore_mcp_get_link_params(hwfn);
1274 1277 ptt = ecore_ptt_acquire(hwfn);
1275 1278
1276 1279 switch(mode) {
1277 1280 default:
1278 1281 qede_info(qede, "unknown loopback mode !!");
1279 1282 ecore_ptt_release(hwfn, ptt);
1280 1283 return IOC_INVAL;
1281 1284
1282 1285 case QEDE_LOOP_NONE:
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
1283 1286 ecore_mcp_set_link(hwfn, ptt, 0);
1284 1287
1285 1288 while (qede->params.link_state && i < 5000) {
1286 1289 OSAL_MSLEEP(1);
1287 1290 i++;
1288 1291 }
1289 1292 i = 0;
1290 1293
1291 1294 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1292 1295 qede->loop_back_mode = QEDE_LOOP_NONE;
1293 - ret = ecore_mcp_set_link(hwfn, ptt, 1);
1296 + (void) ecore_mcp_set_link(hwfn, ptt, 1);
1294 1297 ecore_ptt_release(hwfn, ptt);
1295 1298
1296 1299 while (!qede->params.link_state && i < 5000) {
1297 1300 OSAL_MSLEEP(1);
1298 1301 i++;
1299 1302 }
1300 1303 return IOC_REPLY;
1301 1304
1302 1305 case QEDE_LOOP_INTERNAL:
1303 1306 qede_print("!%s(%d) : loopback mode (INTERNAL) is set!",
1304 1307 __func__, qede->instance);
1305 1308 ecore_mcp_set_link(hwfn, ptt, 0);
1306 1309
1307 1310 while(qede->params.link_state && i < 5000) {
1308 1311 OSAL_MSLEEP(1);
1309 1312 i++;
1310 1313 }
1311 1314 i = 0;
1312 1315 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1313 1316 qede->loop_back_mode = QEDE_LOOP_INTERNAL;
1314 - ret = ecore_mcp_set_link(hwfn, ptt, 1);
1317 + (void) ecore_mcp_set_link(hwfn, ptt, 1);
1315 1318 ecore_ptt_release(hwfn, ptt);
1316 1319
1317 1320 while(!qede->params.link_state && i < 5000) {
1318 1321 OSAL_MSLEEP(1);
1319 1322 i++;
1320 1323 }
1321 1324 return IOC_REPLY;
1322 1325
1323 1326 case QEDE_LOOP_EXTERNAL:
1324 1327 qede_print("!%s(%d) : External loopback mode is not supported",
1325 1328 __func__, qede->instance);
1326 1329 ecore_ptt_release(hwfn, ptt);
1327 1330 return IOC_INVAL;
1328 1331 }
1329 1332 }
1330 1333
1331 1334 static int
1332 1335 qede_ioctl_pcicfg_rd(qede_t *qede, u32 addr, void *data,
1333 1336 int len)
1334 1337 {
1335 1338 u32 crb, actual_crb;
1336 1339 uint32_t ret = 0;
1337 1340 int cap_offset = 0, cap_id = 0, next_cap = 0;
1338 1341 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1339 1342 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1340 1343
1341 1344 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1342 1345 while (cap_offset != 0) {
1343 1346 /* Check for an invalid PCI read. */
1344 1347 if (cap_offset == PCI_EINVAL8) {
1345 1348 return DDI_FAILURE;
1346 1349 }
1347 1350 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1348 1351 if (cap_id == PCI_CAP_ID_PCI_E) {
1349 1352 /* PCIe expr capab struct found */
1350 1353 break;
1351 1354 } else {
1352 1355 next_cap = pci_config_get8(pci_cfg_handle,
1353 1356 cap_offset + 1);
1354 1357 cap_offset = next_cap;
1355 1358 }
1356 1359 }
1357 1360
1358 1361 switch (len) {
1359 1362 case 1:
1360 1363 ret = pci_config_get8(qede->pci_cfg_handle, addr);
1361 1364 (void) memcpy(data, &ret, sizeof(uint8_t));
1362 1365 break;
1363 1366 case 2:
1364 1367 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1365 1368 (void) memcpy(data, &ret, sizeof(uint16_t));
1366 1369 break;
1367 1370 case 4:
1368 1371 ret = pci_config_get32(qede->pci_cfg_handle, addr);
1369 1372 (void) memcpy(data, &ret, sizeof(uint32_t));
1370 1373 break;
1371 1374 default:
1372 1375 cmn_err(CE_WARN, "bad length for pci config read\n");
1373 1376 return (1);
1374 1377 }
1375 1378 return (0);
1376 1379 }
1377 1380
1378 1381 static int
1379 1382 qede_ioctl_pcicfg_wr(qede_t *qede, u32 addr, void *data,
1380 1383 int len)
1381 1384 {
1382 1385 uint16_t ret = 0;
1383 1386 int cap_offset = 0, cap_id = 0, next_cap = 0;
1384 1387 qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1385 1388 ddi_acc_handle_t pci_cfg_handle = qede->pci_cfg_handle;
1386 1389 #if 1
1387 1390 cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1388 1391 while (cap_offset != 0) {
1389 1392 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1390 1393 if (cap_id == PCI_CAP_ID_PCI_E) {
1391 1394 /* PCIe expr capab struct found */
1392 1395 break;
1393 1396 } else {
1394 1397 next_cap = pci_config_get8(pci_cfg_handle,
1395 1398 cap_offset + 1);
1396 1399 cap_offset = next_cap;
1397 1400 }
1398 1401 }
1399 1402 #endif
1400 1403
1401 1404 switch(len) {
1402 1405 case 1:
1403 1406 pci_config_put8(qede->pci_cfg_handle, addr,
1404 1407 *(char *)&(data));
1405 1408 break;
1406 1409 case 2:
1407 1410 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1408 1411 ret = ret | *(uint16_t *)data1->uabc;
1409 1412
1410 1413 pci_config_put16(qede->pci_cfg_handle, addr,
1411 1414 ret);
1412 1415 break;
1413 1416 case 4:
1414 1417 pci_config_put32(qede->pci_cfg_handle, addr, *(uint32_t *)data1->uabc);
1415 1418 break;
1416 1419
1417 1420 default:
1418 1421 return (1);
1419 1422 }
1420 1423 return (0);
1421 1424 }
1422 1425
1423 1426 static int
1424 1427 qede_ioctl_rd_wr_reg(qede_t *qede, void *data)
1425 1428 {
1426 1429 struct ecore_hwfn *p_hwfn;
1427 1430 struct ecore_dev *edev = &qede->edev;
1428 1431 struct ecore_ptt *ptt;
1429 1432 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1430 1433 uint32_t ret = 0;
1431 1434 uint8_t cmd = (uint8_t) data1->unused1;
1432 1435 uint32_t addr = data1->off;
1433 1436 uint32_t val = *(uint32_t *)&data1->uabc[1];
1434 1437 uint32_t hwfn_index = *(uint32_t *)&data1->uabc[5];
1435 1438 uint32_t *reg_addr;
1436 1439
1437 1440 if (hwfn_index > qede->num_hwfns) {
1438 1441 cmn_err(CE_WARN, "invalid hwfn index from application\n");
1439 1442 return (EINVAL);
1440 1443 }
1441 1444 p_hwfn = &edev->hwfns[hwfn_index];
1442 1445
1443 1446 switch(cmd) {
1444 1447 case QEDE_REG_READ:
1445 1448 ret = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, addr);
1446 1449 (void) memcpy(data1->uabc, &ret, sizeof(uint32_t));
1447 1450 break;
1448 1451
1449 1452 case QEDE_REG_WRITE:
1450 1453 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, addr, val);
1451 1454 break;
1452 1455
1453 1456 default:
1454 1457 cmn_err(CE_WARN,
1455 1458 "wrong command in register read/write from application\n");
1456 1459 break;
↓ open down ↓ |
132 lines elided |
↑ open up ↑ |
1457 1460 }
1458 1461 return (ret);
1459 1462 }
1460 1463
1461 1464 static int
1462 1465 qede_ioctl_rd_wr_nvram(qede_t *qede, mblk_t *mp)
1463 1466 {
1464 1467 qede_nvram_data_t *data1 = (qede_nvram_data_t *)(mp->b_cont->b_rptr);
1465 1468 qede_nvram_data_t *data2, *next_data;
1466 1469 struct ecore_dev *edev = &qede->edev;
1467 - uint32_t ret = 0, hdr_size = 24, bytes_to_copy, copy_len = 0;
1470 + uint32_t hdr_size = 24, bytes_to_copy, copy_len = 0;
1468 1471 uint32_t copy_len1 = 0;
1469 1472 uint32_t addr = data1->off;
1470 1473 uint32_t size = data1->size, i, buf_size;
1471 1474 uint8_t cmd, cmd2;
1472 1475 uint8_t *buf, *tmp_buf;
1473 1476 mblk_t *mp1;
1474 1477
1475 1478 cmd = (uint8_t)data1->unused1;
1476 1479
1477 1480 switch(cmd) {
1478 1481 case QEDE_NVRAM_CMD_READ:
1479 1482 buf = kmem_zalloc(size, GFP_KERNEL);
1480 1483 if(buf == NULL) {
1481 1484 cmn_err(CE_WARN, "memory allocation failed"
1482 1485 " in nvram read ioctl\n");
1483 1486 return (DDI_FAILURE);
1484 1487 }
1485 - ret = ecore_mcp_nvm_read(edev, addr, buf, data1->size);
1488 + (void) ecore_mcp_nvm_read(edev, addr, buf, data1->size);
1486 1489
1487 1490 copy_len = (MBLKL(mp->b_cont)) - hdr_size;
1488 1491 if(copy_len > size) {
1489 1492 (void) memcpy(data1->uabc, buf, size);
1490 1493 kmem_free(buf, size);
1491 1494 //OSAL_FREE(edev, buf);
1492 - ret = 0;
1493 1495 break;
1494 1496 }
1495 1497 (void) memcpy(data1->uabc, buf, copy_len);
1496 1498 bytes_to_copy = size - copy_len;
1497 1499 tmp_buf = ((uint8_t *)buf) + copy_len;
1498 1500 copy_len1 = copy_len;
1499 1501 mp1 = mp->b_cont;
1500 1502 mp1 = mp1->b_cont;
1501 1503
1502 1504 while (mp1) {
1503 1505 copy_len = MBLKL(mp1);
1504 1506 if(mp1->b_cont == NULL) {
1505 1507 copy_len = MBLKL(mp1) - 4;
1506 1508 }
1507 1509 data2 = (qede_nvram_data_t *)mp1->b_rptr;
1508 1510 if (copy_len > bytes_to_copy) {
1509 1511 (void) memcpy(data2->uabc, tmp_buf,
1510 1512 bytes_to_copy);
1511 1513 kmem_free(buf, size);
1512 1514 //OSAL_FREE(edev, buf);
1513 1515 break;
1514 1516 }
1515 1517 (void) memcpy(data2->uabc, tmp_buf, copy_len);
1516 1518 tmp_buf = tmp_buf + copy_len;
1517 1519 copy_len += copy_len;
1518 1520 mp1 = mp1->b_cont;
1519 1521 bytes_to_copy = bytes_to_copy - copy_len;
1520 1522 }
1521 1523
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1522 1524 kmem_free(buf, size);
1523 1525 //OSAL_FREE(edev, buf);
1524 1526 break;
1525 1527
1526 1528 case QEDE_NVRAM_CMD_WRITE:
1527 1529 cmd2 = (uint8_t )data1->cmd2;
1528 1530 size = data1->size;
1529 1531 addr = data1->off;
1530 1532 buf_size = size; //data1->buf_size;
1531 1533 //buf_size = data1->buf_size;
1532 - ret = 0;
1533 1534
1534 1535 switch(cmd2){
1535 1536 case START_NVM_WRITE:
1536 1537 buf = kmem_zalloc(size, GFP_KERNEL);
1537 1538 //buf = qede->reserved_buf;
1538 1539 qede->nvm_buf_size = data1->size;
1539 1540 if(buf == NULL) {
1540 1541 cmn_err(CE_WARN,
1541 1542 "memory allocation failed in START_NVM_WRITE\n");
1542 1543 return DDI_FAILURE;
1543 1544 }
1544 1545 qede->nvm_buf_start = buf;
1545 1546 cmn_err(CE_NOTE,
1546 1547 "buf = %p, size = %x\n", qede->nvm_buf_start, size);
1547 1548 qede->nvm_buf = buf;
1548 1549 qede->copy_len = 0;
1549 1550 //tmp_buf = buf + addr;
1550 - ret = 0;
1551 1551 break;
1552 1552
1553 1553 case ACCUMULATE_NVM_BUF:
1554 1554 tmp_buf = qede->nvm_buf;
1555 1555 copy_len = MBLKL(mp->b_cont) - hdr_size;
1556 1556 if(copy_len > buf_size) {
1557 1557 if (buf_size < qede->nvm_buf_size) {
1558 1558 (void) memcpy(tmp_buf, data1->uabc, buf_size);
1559 1559 qede->copy_len = qede->copy_len +
1560 1560 buf_size;
1561 1561 } else {
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1562 1562 (void) memcpy(tmp_buf,
1563 1563 data1->uabc, qede->nvm_buf_size);
1564 1564 qede->copy_len =
1565 1565 qede->copy_len + qede->nvm_buf_size;
1566 1566 }
1567 1567 tmp_buf = tmp_buf + buf_size;
1568 1568 qede->nvm_buf = tmp_buf;
1569 1569 //qede->copy_len = qede->copy_len + buf_size;
1570 1570 cmn_err(CE_NOTE,
1571 1571 "buf_size from app = %x\n", copy_len);
1572 - ret = 0;
1573 1572 break;
1574 1573 }
1575 1574 (void) memcpy(tmp_buf, data1->uabc, copy_len);
1576 1575 tmp_buf = tmp_buf + copy_len;
1577 1576 bytes_to_copy = buf_size - copy_len;
1578 1577 mp1 = mp->b_cont;
1579 1578 mp1 = mp1->b_cont;
1580 1579 copy_len1 = copy_len;
1581 1580
1582 1581 while (mp1) {
1583 1582 copy_len = MBLKL(mp1);
1584 1583 if (mp1->b_cont == NULL) {
1585 1584 copy_len = MBLKL(mp1) - 4;
1586 1585 }
1587 1586 next_data = (qede_nvram_data_t *) mp1->b_rptr;
1588 1587 if (copy_len > bytes_to_copy){
1589 1588 (void) memcpy(tmp_buf, next_data->uabc,
1590 1589 bytes_to_copy);
1591 1590 qede->copy_len = qede->copy_len +
1592 1591 bytes_to_copy;
1593 - ret = 0;
1594 1592 break;
1595 1593 }
1596 1594 (void) memcpy(tmp_buf, next_data->uabc,
1597 1595 copy_len);
1598 1596 qede->copy_len = qede->copy_len + copy_len;
1599 1597 tmp_buf = tmp_buf + copy_len;
1600 1598 copy_len = copy_len1 + copy_len;
1601 1599 bytes_to_copy = bytes_to_copy - copy_len;
1602 1600 mp1 = mp1->b_cont;
1603 1601 }
1604 1602 qede->nvm_buf = tmp_buf;
1605 - ret = 0;
1606 1603 break;
1607 1604
1608 1605 case STOP_NVM_WRITE:
1609 1606 //qede->nvm_buf = tmp_buf;
1610 - ret = 0;
1611 1607 break;
1612 1608 case READ_BUF:
1613 1609 tmp_buf = (uint8_t *)qede->nvm_buf_start;
1614 1610 for(i = 0; i < size ; i++){
1615 1611 cmn_err(CE_NOTE,
1616 1612 "buff (%d) : %d\n", i, *tmp_buf);
1617 1613 tmp_buf ++;
1618 1614 }
1619 - ret = 0;
1620 1615 break;
1621 1616 }
1622 1617 break;
1623 1618 case QEDE_NVRAM_CMD_PUT_FILE_DATA:
1624 1619 tmp_buf = qede->nvm_buf_start;
1625 - ret = ecore_mcp_nvm_write(edev, ECORE_PUT_FILE_DATA,
1620 + (void) ecore_mcp_nvm_write(edev, ECORE_PUT_FILE_DATA,
1626 1621 addr, tmp_buf, size);
1627 1622 kmem_free(qede->nvm_buf_start, size);
1628 1623 //OSAL_FREE(edev, tmp_buf);
1629 1624 cmn_err(CE_NOTE, "total size = %x, copied size = %x\n",
1630 1625 qede->nvm_buf_size, qede->copy_len);
1631 1626 tmp_buf = NULL;
1632 1627 qede->nvm_buf = NULL;
1633 1628 qede->nvm_buf_start = NULL;
1634 - ret = 0;
1635 1629 break;
1636 1630
1637 1631 case QEDE_NVRAM_CMD_SET_SECURE_MODE:
1638 - ret = ecore_mcp_nvm_set_secure_mode(edev, addr);
1632 + (void) ecore_mcp_nvm_set_secure_mode(edev, addr);
1639 1633 break;
1640 1634
1641 1635 case QEDE_NVRAM_CMD_DEL_FILE:
1642 - ret = ecore_mcp_nvm_del_file(edev, addr);
1636 + (void) ecore_mcp_nvm_del_file(edev, addr);
1643 1637 break;
1644 1638
1645 1639 case QEDE_NVRAM_CMD_PUT_FILE_BEGIN:
1646 - ret = ecore_mcp_nvm_put_file_begin(edev, addr);
1640 + (void) ecore_mcp_nvm_put_file_begin(edev, addr);
1647 1641 break;
1648 1642
1649 1643 case QEDE_NVRAM_CMD_GET_NVRAM_RESP:
1650 1644 buf = kmem_zalloc(size, KM_SLEEP);
1651 - ret = ecore_mcp_nvm_resp(edev, buf);
1645 + (void) ecore_mcp_nvm_resp(edev, buf);
1652 1646 (void)memcpy(data1->uabc, buf, size);
1653 1647 kmem_free(buf, size);
1654 1648 break;
1655 1649
1656 1650 default:
1657 1651 cmn_err(CE_WARN,
1658 1652 "wrong command in NVRAM read/write from application\n");
1659 1653 break;
1660 1654 }
1661 1655 return (DDI_SUCCESS);
1662 1656 }
1663 1657
1664 1658 static int
1665 1659 qede_get_func_info(qede_t *qede, void *data)
1666 1660 {
1667 1661 qede_link_output_t link_op;
1668 1662 qede_func_info_t func_info;
1669 1663 qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1670 1664 struct ecore_dev *edev = &qede->edev;
1671 1665 struct ecore_hwfn *hwfn;
1672 1666 struct ecore_mcp_link_params params;
1673 1667 struct ecore_mcp_link_state link;
1674 1668
1675 1669 hwfn = &edev->hwfns[0];
1676 1670
1677 1671 if(hwfn == NULL){
1678 1672 cmn_err(CE_WARN, "(%s) : cannot acquire hwfn\n",
1679 1673 __func__);
1680 1674 return (DDI_FAILURE);
1681 1675 }
1682 1676 memcpy(¶ms, &hwfn->mcp_info->link_input, sizeof(params));
1683 1677 memcpy(&link, &hwfn->mcp_info->link_output, sizeof(link));
1684 1678
1685 1679 if(link.link_up) {
1686 1680 link_op.link_up = true;
1687 1681 }
1688 1682
1689 1683 link_op.supported_caps = SUPPORTED_FIBRE;
1690 1684 if(params.speed.autoneg) {
1691 1685 link_op.supported_caps |= SUPPORTED_Autoneg;
1692 1686 }
1693 1687
1694 1688 if(params.pause.autoneg ||
1695 1689 (params.pause.forced_rx && params.pause.forced_tx)) {
1696 1690 link_op.supported_caps |= SUPPORTED_Asym_Pause;
1697 1691 }
1698 1692
1699 1693 if (params.pause.autoneg || params.pause.forced_rx ||
1700 1694 params.pause.forced_tx) {
1701 1695 link_op.supported_caps |= SUPPORTED_Pause;
1702 1696 }
1703 1697
1704 1698 if (params.speed.advertised_speeds &
1705 1699 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1706 1700 link_op.supported_caps |= SUPPORTED_1000baseT_Half |
1707 1701 SUPPORTED_1000baseT_Full;
1708 1702 }
1709 1703
1710 1704 if (params.speed.advertised_speeds &
1711 1705 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1712 1706 link_op.supported_caps |= SUPPORTED_10000baseKR_Full;
1713 1707 }
1714 1708
1715 1709 if (params.speed.advertised_speeds &
1716 1710 NVM_CFG1_PORT_DRV_LINK_SPEED_40G) {
1717 1711 link_op.supported_caps |= SUPPORTED_40000baseLR4_Full;
1718 1712 }
1719 1713
1720 1714 link_op.advertised_caps = link_op.supported_caps;
1721 1715
1722 1716 if(link.link_up) {
1723 1717 link_op.speed = link.speed;
1724 1718 } else {
1725 1719 link_op.speed = 0;
1726 1720 }
1727 1721
1728 1722 link_op.duplex = DUPLEX_FULL;
1729 1723 link_op.port = PORT_FIBRE;
1730 1724
1731 1725 link_op.autoneg = params.speed.autoneg;
1732 1726
1733 1727 /* Link partner capabilities */
1734 1728 if (link.partner_adv_speed &
1735 1729 ECORE_LINK_PARTNER_SPEED_1G_HD) {
1736 1730 link_op.lp_caps |= SUPPORTED_1000baseT_Half;
1737 1731 }
1738 1732
1739 1733 if (link.partner_adv_speed &
1740 1734 ECORE_LINK_PARTNER_SPEED_1G_FD) {
1741 1735 link_op.lp_caps |= SUPPORTED_1000baseT_Full;
1742 1736 }
1743 1737
1744 1738 if (link.partner_adv_speed &
1745 1739 ECORE_LINK_PARTNER_SPEED_10G) {
1746 1740 link_op.lp_caps |= SUPPORTED_10000baseKR_Full;
1747 1741 }
1748 1742
1749 1743 if (link.partner_adv_speed &
1750 1744 ECORE_LINK_PARTNER_SPEED_20G) {
1751 1745 link_op.lp_caps |= SUPPORTED_20000baseKR2_Full;
1752 1746 }
1753 1747
1754 1748 if (link.partner_adv_speed &
1755 1749 ECORE_LINK_PARTNER_SPEED_40G) {
1756 1750 link_op.lp_caps |= SUPPORTED_40000baseLR4_Full;
1757 1751 }
1758 1752
1759 1753 if (link.an_complete) {
1760 1754 link_op.lp_caps |= SUPPORTED_Autoneg;
1761 1755 }
1762 1756
1763 1757 if (link.partner_adv_pause) {
1764 1758 link_op.lp_caps |= SUPPORTED_Pause;
1765 1759 }
1766 1760
1767 1761 if (link.partner_adv_pause == ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1768 1762 link.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
1769 1763 link_op.lp_caps |= SUPPORTED_Asym_Pause;
1770 1764 }
1771 1765
1772 1766 func_info.supported = link_op.supported_caps;
1773 1767 func_info.advertising = link_op.advertised_caps;
1774 1768 func_info.speed = link_op.speed;
1775 1769 func_info.duplex = link_op.duplex;
1776 1770 func_info.port = qede->pci_func & 0x1;
1777 1771 func_info.autoneg = link_op.autoneg;
1778 1772
1779 1773 (void) memcpy(data1->uabc, &func_info, sizeof(qede_func_info_t));
1780 1774
1781 1775 return (0);
1782 1776 }
1783 1777
1784 1778 static int
1785 1779 qede_do_ioctl(qede_t *qede, queue_t *q, mblk_t *mp)
1786 1780 {
1787 1781 qede_ioctl_data_t *up_data;
1788 1782 qede_driver_info_t driver_info;
1789 1783 struct ecore_dev *edev = &qede->edev;
1790 1784 struct ecore_hwfn *hwfn;
1791 1785 struct ecore_ptt *ptt = NULL;
1792 1786 struct mcp_file_att attrib;
1793 1787 uint32_t flash_size;
1794 1788 uint32_t mcp_resp, mcp_param, txn_size;
1795 1789 uint32_t cmd, size, ret = 0;
1796 1790 uint64_t off;
1797 1791 int * up_data1;
1798 1792 void * ptr;
1799 1793 mblk_t *mp1 = mp;
1800 1794 char mac_addr[32];
1801 1795
1802 1796 up_data = (qede_ioctl_data_t *)(mp->b_cont->b_rptr);
1803 1797
1804 1798 cmd = up_data->cmd;
1805 1799 off = up_data->off;
1806 1800 size = up_data->size;
1807 1801
1808 1802 switch (cmd) {
1809 1803 case QEDE_DRV_INFO:
1810 1804 hwfn = &edev->hwfns[0];
1811 1805 ptt = ecore_ptt_acquire(hwfn);
1812 1806
1813 1807 snprintf(driver_info.drv_name, MAX_QEDE_NAME_LEN, "%s", "qede");
1814 1808 snprintf(driver_info.drv_version, QEDE_STR_SIZE,
1815 1809 "v:%s", qede->version);
1816 1810 snprintf(driver_info.mfw_version, QEDE_STR_SIZE,
1817 1811 "%s", qede->versionMFW);
1818 1812 snprintf(driver_info.stormfw_version, QEDE_STR_SIZE,
1819 1813 "%s", qede->versionFW);
1820 1814 snprintf(driver_info.bus_info, QEDE_STR_SIZE,
1821 1815 "%s", qede->bus_dev_func);
1822 1816
1823 1817
1824 1818 /*
1825 1819 * calling ecore_mcp_nvm_rd_cmd to find the flash length, i
1826 1820 * 0x08 is equivalent of NVM_TYPE_MFW_TRACE1
1827 1821 */
1828 1822 ecore_mcp_get_flash_size(hwfn, ptt, &flash_size);
1829 1823 driver_info.eeprom_dump_len = flash_size;
1830 1824 (void) memcpy(up_data->uabc, &driver_info,
1831 1825 sizeof (qede_driver_info_t));
1832 1826 up_data->size = sizeof (qede_driver_info_t);
1833 1827
1834 1828 ecore_ptt_release(hwfn, ptt);
1835 1829 break;
1836 1830
1837 1831 case QEDE_RD_PCICFG:
1838 1832 ret = qede_ioctl_pcicfg_rd(qede, off, up_data->uabc, size);
1839 1833 break;
1840 1834
1841 1835 case QEDE_WR_PCICFG:
1842 1836 ret = qede_ioctl_pcicfg_wr(qede, off, up_data, size);
1843 1837 break;
1844 1838
1845 1839 case QEDE_RW_REG:
1846 1840 ret = qede_ioctl_rd_wr_reg(qede, (void *)up_data);
1847 1841 break;
1848 1842
1849 1843 case QEDE_RW_NVRAM:
1850 1844 ret = qede_ioctl_rd_wr_nvram(qede, mp1);
1851 1845 break;
1852 1846
1853 1847 case QEDE_FUNC_INFO:
1854 1848 ret = qede_get_func_info(qede, (void *)up_data);
1855 1849 break;
1856 1850
1857 1851 case QEDE_MAC_ADDR:
1858 1852 snprintf(mac_addr, sizeof(mac_addr),
1859 1853 "%02x:%02x:%02x:%02x:%02x:%02x",
1860 1854 qede->ether_addr[0], qede->ether_addr[1],
1861 1855 qede->ether_addr[2], qede->ether_addr[3],
1862 1856 qede->ether_addr[4], qede->ether_addr[5]);
1863 1857 (void) memcpy(up_data->uabc, &mac_addr, sizeof(mac_addr));
1864 1858 break;
1865 1859
1866 1860 }
1867 1861 //if (cmd == QEDE_RW_NVRAM) {
1868 1862 // miocack (q, mp, (sizeof(qede_ioctl_data_t)), 0);
1869 1863 // return IOC_REPLY;
1870 1864 //}
1871 1865 miocack (q, mp, (sizeof(qede_ioctl_data_t)), ret);
1872 1866 //miocack (q, mp, 0, ret);
1873 1867 return (IOC_REPLY);
1874 1868 }
1875 1869
1876 1870 static void
1877 1871 qede_ioctl(qede_t *qede, int cmd, queue_t *q, mblk_t *mp)
1878 1872 {
1879 1873 void *ptr;
1880 1874
1881 1875 switch(cmd) {
1882 1876 case QEDE_CMD:
1883 1877 (void) qede_do_ioctl(qede, q, mp);
1884 1878 break;
1885 1879 default :
1886 1880 cmn_err(CE_WARN, "qede ioctl command %x not supported\n", cmd);
1887 1881 break;
1888 1882 }
1889 1883 return;
1890 1884 }
1891 1885 enum ioc_reply
1892 1886 qede_loopback_ioctl(qede_t *qede, queue_t *wq, mblk_t *mp,
1893 1887 struct iocblk *iocp)
1894 1888 {
1895 1889 lb_info_sz_t *lb_info_size;
1896 1890 lb_property_t *lb_prop;
1897 1891 uint32_t *lb_mode;
1898 1892 int cmd;
1899 1893
1900 1894 /*
1901 1895 * Validate format of ioctl
1902 1896 */
1903 1897 if(mp->b_cont == NULL) {
1904 1898 return IOC_INVAL;
1905 1899 }
1906 1900
1907 1901 cmd = iocp->ioc_cmd;
1908 1902
1909 1903 switch(cmd) {
1910 1904 default:
1911 1905 qede_print("!%s(%d): unknown ioctl command %x\n",
1912 1906 __func__, qede->instance, cmd);
1913 1907 return IOC_INVAL;
1914 1908 case LB_GET_INFO_SIZE:
1915 1909 if (iocp->ioc_count != sizeof(lb_info_sz_t)) {
1916 1910 qede_info(qede, "error: ioc_count %d, sizeof %d",
1917 1911 iocp->ioc_count, sizeof(lb_info_sz_t));
1918 1912 return IOC_INVAL;
1919 1913 }
1920 1914 lb_info_size = (void *)mp->b_cont->b_rptr;
1921 1915 *lb_info_size = sizeof(loopmodes);
1922 1916 return IOC_REPLY;
1923 1917 case LB_GET_INFO:
1924 1918 if (iocp->ioc_count != sizeof (loopmodes)) {
1925 1919 qede_info(qede, "error: iocp->ioc_count %d, sizepof %d",
1926 1920 iocp->ioc_count, sizeof (loopmodes));
1927 1921 return (IOC_INVAL);
1928 1922 }
1929 1923 lb_prop = (void *)mp->b_cont->b_rptr;
1930 1924 bcopy(loopmodes, lb_prop, sizeof (loopmodes));
1931 1925 return IOC_REPLY;
1932 1926 case LB_GET_MODE:
1933 1927 if (iocp->ioc_count != sizeof (uint32_t)) {
1934 1928 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1935 1929 iocp->ioc_count, sizeof (uint32_t));
1936 1930 return (IOC_INVAL);
1937 1931 }
1938 1932 lb_mode = (void *)mp->b_cont->b_rptr;
1939 1933 *lb_mode = qede->loop_back_mode;
1940 1934 return IOC_REPLY;
1941 1935 case LB_SET_MODE:
1942 1936 if (iocp->ioc_count != sizeof (uint32_t)) {
1943 1937 qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1944 1938 iocp->ioc_count, sizeof (uint32_t));
1945 1939 return (IOC_INVAL);
1946 1940 }
1947 1941 lb_mode = (void *)mp->b_cont->b_rptr;
1948 1942 return (qede_set_loopback_mode(qede,*lb_mode));
1949 1943 }
1950 1944 }
1951 1945
1952 1946 static void
1953 1947 qede_mac_ioctl(void * arg,
1954 1948 queue_t * wq,
1955 1949 mblk_t * mp)
1956 1950 {
1957 1951 int err, cmd;
1958 1952 qede_t * qede = (qede_t *)arg;
1959 1953 struct iocblk *iocp = (struct iocblk *) (uintptr_t)mp->b_rptr;
1960 1954 enum ioc_reply status = IOC_DONE;
1961 1955 boolean_t need_privilege = B_TRUE;
1962 1956
1963 1957 iocp->ioc_error = 0;
1964 1958 cmd = iocp->ioc_cmd;
1965 1959
1966 1960 mutex_enter(&qede->drv_lock);
1967 1961 if ((qede->qede_state == QEDE_STATE_SUSPENDING) ||
1968 1962 (qede->qede_state == QEDE_STATE_SUSPENDED)) {
1969 1963 mutex_exit(&qede->drv_lock);
1970 1964 miocnak(wq, mp, 0, EINVAL);
1971 1965 return;
1972 1966 }
1973 1967
1974 1968 switch(cmd) {
1975 1969 case QEDE_CMD:
1976 1970 break;
1977 1971 case LB_GET_INFO_SIZE:
1978 1972 case LB_GET_INFO:
1979 1973 case LB_GET_MODE:
1980 1974 need_privilege = B_FALSE;
1981 1975 case LB_SET_MODE:
1982 1976 break;
1983 1977 default:
1984 1978 qede_print("!%s(%d) unknown ioctl command %x\n",
1985 1979 __func__, qede->instance, cmd);
1986 1980 miocnak(wq, mp, 0, EINVAL);
1987 1981 mutex_exit(&qede->drv_lock);
1988 1982 return;
1989 1983 }
1990 1984
1991 1985 if(need_privilege) {
1992 1986 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1993 1987 if(err){
1994 1988 qede_info(qede, "secpolicy() failed");
1995 1989 miocnak(wq, mp, 0, err);
1996 1990 mutex_exit(&qede->drv_lock);
1997 1991 return;
1998 1992 }
1999 1993 }
2000 1994
2001 1995 switch (cmd) {
2002 1996 default:
2003 1997 qede_print("!%s(%d) : unknown ioctl command %x\n",
2004 1998 __func__, qede->instance, cmd);
2005 1999 status = IOC_INVAL;
2006 2000 mutex_exit(&qede->drv_lock);
2007 2001 return;
2008 2002 case LB_GET_INFO_SIZE:
2009 2003 case LB_GET_INFO:
2010 2004 case LB_GET_MODE:
2011 2005 case LB_SET_MODE:
2012 2006 status = qede_loopback_ioctl(qede, wq, mp, iocp);
2013 2007 break;
2014 2008 case QEDE_CMD:
2015 2009 qede_ioctl(qede, cmd, wq, mp);
2016 2010 status = IOC_DONE;
2017 2011 break;
2018 2012 }
2019 2013
2020 2014 switch(status){
2021 2015 default:
2022 2016 qede_print("!%s(%d) : invalid status from ioctl",
2023 2017 __func__,qede->instance);
2024 2018 break;
2025 2019 case IOC_DONE:
2026 2020 /*
2027 2021 * OK, Reply already sent
2028 2022 */
2029 2023
2030 2024 break;
2031 2025 case IOC_REPLY:
2032 2026 mp->b_datap->db_type = iocp->ioc_error == 0 ?
2033 2027 M_IOCACK : M_IOCNAK;
2034 2028 qreply(wq, mp);
2035 2029 break;
2036 2030 case IOC_INVAL:
2037 2031 mutex_exit(&qede->drv_lock);
2038 2032 //miocack(wq, mp, 0, 0);
2039 2033 miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2040 2034 EINVAL : iocp->ioc_error);
2041 2035 return;
2042 2036 }
2043 2037 mutex_exit(&qede->drv_lock);
2044 2038 }
2045 2039
2046 2040 extern ddi_dma_attr_t qede_buf2k_dma_attr_txbuf;
2047 2041 extern ddi_dma_attr_t qede_dma_attr_rxbuf;
2048 2042 extern ddi_dma_attr_t qede_dma_attr_desc;
2049 2043
2050 2044 static boolean_t
2051 2045 qede_mac_get_capability(void *arg,
2052 2046 mac_capab_t capability,
2053 2047 void * cap_data)
2054 2048 {
2055 2049 qede_t * qede = (qede_t *)arg;
2056 2050 uint32_t *txflags = cap_data;
2057 2051 boolean_t ret = B_FALSE;
2058 2052
2059 2053 switch (capability) {
2060 2054 case MAC_CAPAB_HCKSUM: {
2061 2055 u32 *tx_flags = cap_data;
2062 2056 /*
2063 2057 * Check if checksum is enabled on
2064 2058 * tx and advertise the cksum capab
2065 2059 * to mac layer accordingly. On Rx
2066 2060 * side checksummed packets are
2067 2061 * reveiced anyway
2068 2062 */
2069 2063 qede_info(qede, "%s tx checksum offload",
2070 2064 (qede->checksum == DEFAULT_CKSUM_OFFLOAD) ?
2071 2065 "Enabling":
2072 2066 "Disabling");
2073 2067
2074 2068 if (qede->checksum != DEFAULT_CKSUM_OFFLOAD) {
2075 2069 ret = B_FALSE;
2076 2070 break;
2077 2071 }
2078 2072 /*
2079 2073 * Hardware does not support ICMPv6 checksumming. Right now the
2080 2074 * GLDv3 doesn't provide us a way to specify that we don't
2081 2075 * support that. As such, we cannot indicate
2082 2076 * HCKSUM_INET_FULL_V6.
2083 2077 */
2084 2078
2085 2079 *tx_flags = HCKSUM_INET_FULL_V4 |
2086 2080 HCKSUM_IPHDRCKSUM;
2087 2081 ret = B_TRUE;
2088 2082 break;
2089 2083 }
2090 2084 case MAC_CAPAB_LSO: {
2091 2085 mac_capab_lso_t *cap_lso = (mac_capab_lso_t *)cap_data;
2092 2086
2093 2087 qede_info(qede, "%s large segmentation offload",
2094 2088 qede->lso_enable ? "Enabling": "Disabling");
2095 2089 if (qede->lso_enable) {
2096 2090 cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2097 2091 cap_lso->lso_basic_tcp_ipv4.lso_max = QEDE_LSO_MAXLEN;
2098 2092 ret = B_TRUE;
2099 2093 }
2100 2094 break;
2101 2095 }
2102 2096 case MAC_CAPAB_RINGS: {
2103 2097 #ifndef NO_CROSSBOW
2104 2098 mac_capab_rings_t *cap_rings = cap_data;
2105 2099 #ifndef ILLUMOS
2106 2100 cap_rings->mr_version = MAC_RINGS_VERSION_1;
2107 2101 #endif
2108 2102
2109 2103 switch (cap_rings->mr_type) {
2110 2104 case MAC_RING_TYPE_RX:
2111 2105 #ifndef ILLUMOS
2112 2106 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2113 2107 #endif
2114 2108 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2115 2109 //cap_rings->mr_rnum = 1; /* qede variable */
2116 2110 cap_rings->mr_rnum = qede->num_fp; /* qede variable */
2117 2111 cap_rings->mr_gnum = 1;
2118 2112 cap_rings->mr_rget = qede_fill_ring;
2119 2113 cap_rings->mr_gget = qede_fill_group;
2120 2114 cap_rings->mr_gaddring = NULL;
2121 2115 cap_rings->mr_gremring = NULL;
2122 2116 #ifndef ILLUMOS
2123 2117 cap_rings->mr_ggetringtc = NULL;
2124 2118 #endif
2125 2119 ret = B_TRUE;
2126 2120 break;
2127 2121 case MAC_RING_TYPE_TX:
2128 2122 #ifndef ILLUMOS
2129 2123 cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2130 2124 #endif
2131 2125 cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2132 2126 //cap_rings->mr_rnum = 1;
2133 2127 cap_rings->mr_rnum = qede->num_fp;
2134 2128 cap_rings->mr_gnum = 0;
2135 2129 cap_rings->mr_rget = qede_fill_ring;
2136 2130 cap_rings->mr_gget = qede_fill_group;
2137 2131 cap_rings->mr_gaddring = NULL;
2138 2132 cap_rings->mr_gremring = NULL;
2139 2133 #ifndef ILLUMOS
2140 2134 cap_rings->mr_ggetringtc = NULL;
2141 2135 #endif
2142 2136 ret = B_TRUE;
2143 2137 break;
2144 2138 default:
2145 2139 ret = B_FALSE;
2146 2140 break;
2147 2141 }
2148 2142 #endif
2149 2143 break; /* CASE MAC_CAPAB_RINGS */
2150 2144 }
2151 2145 #ifdef ILLUMOS
2152 2146 case MAC_CAPAB_TRANSCEIVER: {
2153 2147 mac_capab_transceiver_t *mct = cap_data;
2154 2148
2155 2149 mct->mct_flags = 0;
2156 2150 mct->mct_ntransceivers = qede->edev.num_hwfns;
2157 2151 mct->mct_info = qede_transceiver_info;
2158 2152 mct->mct_read = qede_transceiver_read;
2159 2153
2160 2154 ret = B_TRUE;
2161 2155 break;
2162 2156 }
2163 2157 #endif
2164 2158 default:
2165 2159 break;
2166 2160 }
2167 2161
2168 2162 return (ret);
2169 2163 }
2170 2164
2171 2165 int
2172 2166 qede_configure_link(qede_t *qede, bool op);
2173 2167
2174 2168 static int
2175 2169 qede_mac_set_property(void * arg,
2176 2170 const char * pr_name,
2177 2171 mac_prop_id_t pr_num,
2178 2172 uint_t pr_valsize,
2179 2173 const void * pr_val)
2180 2174 {
2181 2175 qede_t * qede = (qede_t *)arg;
2182 2176 struct ecore_mcp_link_params *link_params;
2183 2177 struct ecore_dev *edev = &qede->edev;
2184 2178 struct ecore_hwfn *hwfn;
2185 2179 int ret_val = 0, i;
2186 2180 uint32_t option;
2187 2181
2188 2182 mutex_enter(&qede->gld_lock);
2189 2183 switch (pr_num)
2190 2184 {
2191 2185 case MAC_PROP_MTU:
2192 2186 bcopy(pr_val, &option, sizeof (option));
2193 2187
2194 2188 if(option == qede->mtu) {
2195 2189 ret_val = 0;
2196 2190 break;
2197 2191 }
2198 2192 if ((option != DEFAULT_JUMBO_MTU) &&
2199 2193 (option != DEFAULT_MTU)) {
2200 2194 ret_val = EINVAL;
2201 2195 break;
2202 2196 }
2203 2197 if(qede->qede_state == QEDE_STATE_STARTED) {
2204 2198 ret_val = EBUSY;
2205 2199 break;
2206 2200 }
2207 2201
2208 2202 ret_val = mac_maxsdu_update(qede->mac_handle, qede->mtu);
2209 2203 if (ret_val == 0) {
2210 2204
2211 2205 qede->mtu = option;
2212 2206 if (option == DEFAULT_JUMBO_MTU) {
2213 2207 qede->jumbo_enable = B_TRUE;
2214 2208 } else {
2215 2209 qede->jumbo_enable = B_FALSE;
2216 2210 }
2217 2211
2218 2212 hwfn = ECORE_LEADING_HWFN(edev);
2219 2213 hwfn->hw_info.mtu = qede->mtu;
2220 2214 ret_val = ecore_mcp_ov_update_mtu(hwfn,
2221 2215 hwfn->p_main_ptt,
2222 2216 hwfn->hw_info.mtu);
2223 2217 if (ret_val != ECORE_SUCCESS) {
2224 2218 qede_print("!%s(%d): MTU change %d option %d"
2225 2219 "FAILED",
2226 2220 __func__,qede->instance, qede->mtu, option);
2227 2221 break;
2228 2222 }
2229 2223 qede_print("!%s(%d): MTU changed %d MTU option"
2230 2224 " %d hwfn %d",
2231 2225 __func__,qede->instance, qede->mtu,
2232 2226 option, hwfn->hw_info.mtu);
2233 2227 }
2234 2228 break;
2235 2229
2236 2230 case MAC_PROP_EN_10GFDX_CAP:
2237 2231 hwfn = &edev->hwfns[0];
2238 2232 link_params = ecore_mcp_get_link_params(hwfn);
2239 2233 if (*(uint8_t *) pr_val) {
2240 2234 link_params->speed.autoneg = 0;
2241 2235 link_params->speed.forced_speed = 10000;
2242 2236 link_params->speed.advertised_speeds =
2243 2237 NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2244 2238 qede->forced_speed_10G = *(uint8_t *)pr_val;
2245 2239 }
2246 2240 else {
2247 2241 memcpy(link_params,
2248 2242 &qede->link_input_params.default_link_params,
2249 2243 sizeof (struct ecore_mcp_link_params));
2250 2244 qede->forced_speed_10G = *(uint8_t *)pr_val;
2251 2245 }
2252 2246 if (qede->qede_state == QEDE_STATE_STARTED) {
2253 2247 qede_configure_link(qede,1);
2254 2248 } else {
2255 2249 mutex_exit(&qede->gld_lock);
2256 2250 return (0);
2257 2251 }
2258 2252 break;
2259 2253 default:
2260 2254 ret_val = ENOTSUP;
2261 2255 break;
2262 2256 }
2263 2257 mutex_exit(&qede->gld_lock);
2264 2258 return (ret_val);
2265 2259 }
2266 2260
2267 2261 static void
2268 2262 qede_mac_stop(void *arg)
2269 2263 {
2270 2264 qede_t *qede = (qede_t *)arg;
2271 2265 int status;
2272 2266
2273 2267 qede_print("!%s(%d): called",
2274 2268 __func__,qede->instance);
2275 2269 mutex_enter(&qede->drv_lock);
2276 2270 status = qede_stop(qede);
2277 2271 if (status != DDI_SUCCESS) {
2278 2272 qede_print("!%s(%d): qede_stop "
2279 2273 "FAILED",
2280 2274 __func__,qede->instance);
2281 2275 }
2282 2276
2283 2277 mac_link_update(qede->mac_handle, LINK_STATE_UNKNOWN);
2284 2278 mutex_exit(&qede->drv_lock);
2285 2279 }
2286 2280
2287 2281 static int
2288 2282 qede_mac_start(void *arg)
2289 2283 {
2290 2284 qede_t *qede = (qede_t *)arg;
2291 2285 int status;
2292 2286
2293 2287 qede_print("!%s(%d): called", __func__,qede->instance);
2294 2288 if (!mutex_tryenter(&qede->drv_lock)) {
2295 2289 return (EAGAIN);
2296 2290 }
2297 2291
2298 2292 if (qede->qede_state == QEDE_STATE_SUSPENDED) {
2299 2293 mutex_exit(&qede->drv_lock);
2300 2294 return (ECANCELED);
2301 2295 }
2302 2296
2303 2297 status = qede_start(qede);
2304 2298 if (status != DDI_SUCCESS) {
2305 2299 mutex_exit(&qede->drv_lock);
2306 2300 return (EIO);
2307 2301 }
2308 2302
2309 2303 mutex_exit(&qede->drv_lock);
2310 2304
2311 2305 #ifdef DBLK_DMA_PREMAP
2312 2306 qede->pm_handle = mac_pmh_tx_get(qede->mac_handle);
2313 2307 #endif
2314 2308 return (0);
2315 2309 }
2316 2310
2317 2311 static int
2318 2312 qede_mac_get_property(void *arg,
2319 2313 const char *pr_name,
2320 2314 mac_prop_id_t pr_num,
2321 2315 uint_t pr_valsize,
2322 2316 void *pr_val)
2323 2317 {
2324 2318 qede_t *qede = (qede_t *)arg;
2325 2319 struct ecore_dev *edev = &qede->edev;
2326 2320 link_state_t link_state;
2327 2321 link_duplex_t link_duplex;
2328 2322 uint64_t link_speed;
2329 2323 link_flowctrl_t link_flowctrl;
2330 2324 struct qede_link_cfg link_cfg;
2331 2325 qede_link_cfg_t *hw_cfg = &qede->hwinit;
2332 2326 int ret_val = 0;
2333 2327
2334 2328 memset(&link_cfg, 0, sizeof (struct qede_link_cfg));
2335 2329 qede_get_link_info(&edev->hwfns[0], &link_cfg);
2336 2330
2337 2331
2338 2332
2339 2333 switch (pr_num)
2340 2334 {
2341 2335 case MAC_PROP_MTU:
2342 2336
2343 2337 ASSERT(pr_valsize >= sizeof(uint32_t));
2344 2338 bcopy(&qede->mtu, pr_val, sizeof(uint32_t));
2345 2339 break;
2346 2340
2347 2341 case MAC_PROP_DUPLEX:
2348 2342
2349 2343 ASSERT(pr_valsize >= sizeof(link_duplex_t));
2350 2344 link_duplex = (qede->props.link_duplex) ?
2351 2345 LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
2352 2346 bcopy(&link_duplex, pr_val, sizeof(link_duplex_t));
2353 2347 break;
2354 2348
2355 2349 case MAC_PROP_SPEED:
2356 2350
2357 2351 ASSERT(pr_valsize >= sizeof(link_speed));
2358 2352
2359 2353 link_speed = (qede->props.link_speed * 1000000ULL);
2360 2354 bcopy(&link_speed, pr_val, sizeof(link_speed));
2361 2355 break;
2362 2356
2363 2357 case MAC_PROP_STATUS:
2364 2358
2365 2359 ASSERT(pr_valsize >= sizeof(link_state_t));
2366 2360
2367 2361 link_state = (qede->params.link_state) ?
2368 2362 LINK_STATE_UP : LINK_STATE_DOWN;
2369 2363 bcopy(&link_state, pr_val, sizeof(link_state_t));
2370 2364 qede_info(qede, "mac_prop_status %d\n", link_state);
2371 2365 break;
2372 2366
2373 2367 case MAC_PROP_AUTONEG:
2374 2368
2375 2369 *(uint8_t *)pr_val = link_cfg.autoneg;
2376 2370 break;
2377 2371
2378 2372 case MAC_PROP_FLOWCTRL:
2379 2373
2380 2374 ASSERT(pr_valsize >= sizeof(link_flowctrl_t));
2381 2375
2382 2376 /*
2383 2377 * illumos does not have the notion of LINK_FLOWCTRL_AUTO at this time.
2384 2378 */
2385 2379 #ifndef ILLUMOS
2386 2380 if (link_cfg.pause_cfg & QEDE_LINK_PAUSE_AUTONEG_ENABLE) {
2387 2381 link_flowctrl = LINK_FLOWCTRL_AUTO;
2388 2382 }
2389 2383 #endif
2390 2384
2391 2385 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2392 2386 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2393 2387 link_flowctrl = LINK_FLOWCTRL_NONE;
2394 2388 }
2395 2389 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2396 2390 !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2397 2391 link_flowctrl = LINK_FLOWCTRL_RX;
2398 2392 }
2399 2393 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2400 2394 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2401 2395 link_flowctrl = LINK_FLOWCTRL_TX;
2402 2396 }
2403 2397 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) &&
2404 2398 (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2405 2399 link_flowctrl = LINK_FLOWCTRL_BI;
2406 2400 }
2407 2401
2408 2402 bcopy(&link_flowctrl, pr_val, sizeof (link_flowctrl_t));
2409 2403 break;
2410 2404
2411 2405 case MAC_PROP_ADV_10GFDX_CAP:
2412 2406 *(uint8_t *)pr_val = link_cfg.adv_capab.param_10000fdx;
2413 2407 break;
2414 2408
2415 2409 case MAC_PROP_EN_10GFDX_CAP:
2416 2410 *(uint8_t *)pr_val = qede->forced_speed_10G;
2417 2411 break;
2418 2412
2419 2413 case MAC_PROP_PRIVATE:
2420 2414 default:
2421 2415 return (ENOTSUP);
2422 2416
2423 2417 }
2424 2418
2425 2419 return (0);
2426 2420 }
2427 2421
2428 2422 static void
2429 2423 qede_mac_property_info(void *arg,
2430 2424 const char *pr_name,
2431 2425 mac_prop_id_t pr_num,
2432 2426 mac_prop_info_handle_t prh)
2433 2427 {
2434 2428 qede_t *qede = (qede_t *)arg;
2435 2429 qede_link_props_t *def_cfg = &qede_def_link_props;
2436 2430 link_flowctrl_t link_flowctrl;
2437 2431
2438 2432
2439 2433 switch (pr_num)
2440 2434 {
2441 2435
2442 2436 case MAC_PROP_STATUS:
2443 2437 case MAC_PROP_SPEED:
2444 2438 case MAC_PROP_DUPLEX:
2445 2439 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2446 2440 break;
2447 2441
2448 2442 case MAC_PROP_MTU:
2449 2443
2450 2444 mac_prop_info_set_range_uint32(prh,
2451 2445 MIN_MTU,
2452 2446 MAX_MTU);
2453 2447 break;
2454 2448
2455 2449 case MAC_PROP_AUTONEG:
2456 2450
2457 2451 mac_prop_info_set_default_uint8(prh, def_cfg->autoneg);
2458 2452 break;
2459 2453
2460 2454 case MAC_PROP_FLOWCTRL:
2461 2455
2462 2456 if (!def_cfg->pause) {
2463 2457 link_flowctrl = LINK_FLOWCTRL_NONE;
2464 2458 } else {
2465 2459 link_flowctrl = LINK_FLOWCTRL_BI;
2466 2460 }
2467 2461
2468 2462 mac_prop_info_set_default_link_flowctrl(prh, link_flowctrl);
2469 2463 break;
2470 2464
2471 2465 case MAC_PROP_EN_10GFDX_CAP:
2472 2466 mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
2473 2467 break;
2474 2468
2475 2469 case MAC_PROP_ADV_10GFDX_CAP:
2476 2470 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2477 2471 break;
2478 2472
2479 2473 default:
2480 2474 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2481 2475 break;
2482 2476
2483 2477 }
2484 2478 }
2485 2479
2486 2480 static mac_callbacks_t qede_callbacks =
2487 2481 {
2488 2482 (
2489 2483 MC_IOCTL
2490 2484 /* | MC_RESOURCES */
2491 2485 | MC_SETPROP
2492 2486 | MC_GETPROP
2493 2487 | MC_PROPINFO
2494 2488 | MC_GETCAPAB
2495 2489 ),
2496 2490 qede_mac_stats,
2497 2491 qede_mac_start,
2498 2492 qede_mac_stop,
2499 2493 qede_mac_promiscuous,
2500 2494 qede_mac_multicast,
2501 2495 NULL,
2502 2496 #ifndef NO_CROSSBOW
2503 2497 NULL,
2504 2498 #else
2505 2499 qede_mac_tx,
2506 2500 #endif
2507 2501 NULL, /* qede_mac_resources, */
2508 2502 qede_mac_ioctl,
2509 2503 qede_mac_get_capability,
2510 2504 NULL,
2511 2505 NULL,
2512 2506 qede_mac_set_property,
2513 2507 qede_mac_get_property,
2514 2508 #ifdef MC_PROPINFO
2515 2509 qede_mac_property_info
2516 2510 #endif
2517 2511 };
2518 2512
2519 2513 boolean_t
2520 2514 qede_gld_init(qede_t *qede)
2521 2515 {
2522 2516 int status, ret;
2523 2517 mac_register_t *macp;
2524 2518
2525 2519 macp = mac_alloc(MAC_VERSION);
2526 2520 if (macp == NULL) {
2527 2521 cmn_err(CE_NOTE, "%s: mac_alloc() failed\n", __func__);
2528 2522 return (B_FALSE);
2529 2523 }
2530 2524
2531 2525 macp->m_driver = qede;
2532 2526 macp->m_dip = qede->dip;
2533 2527 macp->m_instance = qede->instance;
2534 2528 macp->m_priv_props = NULL;
2535 2529 macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2536 2530 macp->m_src_addr = qede->ether_addr;
2537 2531 macp->m_callbacks = &qede_callbacks;
2538 2532 macp->m_min_sdu = 0;
2539 2533 macp->m_max_sdu = qede->mtu;
2540 2534 macp->m_margin = VLAN_TAGSZ;
2541 2535 #ifdef ILLUMOS
2542 2536 macp->m_v12n = MAC_VIRT_LEVEL1;
2543 2537 #endif
2544 2538
2545 2539 status = mac_register(macp, &qede->mac_handle);
2546 2540 if (status != 0) {
2547 2541 cmn_err(CE_NOTE, "%s: mac_register() failed\n", __func__);
2548 2542 }
2549 2543
2550 2544 mac_free(macp);
2551 2545 if (status == 0) {
2552 2546 return (B_TRUE);
2553 2547 }
2554 2548 return (B_FALSE);
2555 2549 }
2556 2550
2557 2551 boolean_t qede_gld_fini(qede_t * qede)
2558 2552 {
2559 2553 return (B_TRUE);
2560 2554 }
2561 2555
2562 2556
2563 2557 void qede_link_update(qede_t * qede,
2564 2558 link_state_t state)
2565 2559 {
2566 2560 mac_link_update(qede->mac_handle, state);
2567 2561 }
2568 2562
↓ open down ↓ |
907 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX