1 /*
   2 * CDDL HEADER START
   3 *
   4 * The contents of this file are subject to the terms of the
   5 * Common Development and Distribution License, v.1,  (the "License").
   6 * You may not use this file except in compliance with the License.
   7 *
   8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9 * or http://opensource.org/licenses/CDDL-1.0.
  10 * See the License for the specific language governing permissions
  11 * and limitations under the License.
  12 *
  13 * When distributing Covered Code, include this CDDL HEADER in each
  14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15 * If applicable, add the following below this CDDL HEADER, with the
  16 * fields enclosed by brackets "[]" replaced with your own identifying
  17 * information: Portions Copyright [yyyy] [name of copyright owner]
  18 *
  19 * CDDL HEADER END
  20 */
  21 
  22 /*
  23 * Copyright 2014-2017 Cavium, Inc. 
  24 * The contents of this file are subject to the terms of the Common Development 
  25 * and Distribution License, v.1,  (the "License").
  26 
  27 * You may not use this file except in compliance with the License.
  28 
  29 * You can obtain a copy of the License at available 
  30 * at http://opensource.org/licenses/CDDL-1.0
  31 
  32 * See the License for the specific language governing permissions and 
  33 * limitations under the License.
  34 */
  35 
  36 /*
  37  * Copyright 2018 Joyent, Inc.
  38  */
  39 
  40 #include "qede.h"
  41 
  42 #define FP_LOCK(ptr)    \
  43 mutex_enter(&ptr->fp_lock);
  44 #define FP_UNLOCK(ptr)  \
  45 mutex_exit(&ptr->fp_lock);
  46 
  47 int
  48 qede_ucst_find(qede_t *qede, const uint8_t *mac_addr)
  49 {
  50         int slot;
  51 
  52         for(slot = 0; slot < qede->ucst_total; slot++) {
  53                 if (bcmp(qede->ucst_mac[slot].mac_addr.ether_addr_octet,
  54                     mac_addr, ETHERADDRL) == 0) {
  55                         return (slot);
  56                 }
  57         }
  58         return (-1);
  59 
  60 }
  61 
  62 static int
  63 qede_set_mac_addr(qede_t *qede, uint8_t *mac_addr, uint8_t fl)
  64 {
  65         struct ecore_filter_ucast params;
  66 
  67         memset(&params, 0, sizeof (params));
  68 
  69         params.opcode = fl;
  70         params.type = ECORE_FILTER_MAC;
  71         params.is_rx_filter = true;
  72         params.is_tx_filter = true;
  73         COPY_ETH_ADDRESS(mac_addr, params.mac);
  74 
  75         return (ecore_filter_ucast_cmd(&qede->edev, 
  76             &params, ECORE_SPQ_MODE_EBLOCK, NULL));
  77 
  78                         
  79 }
  80 static int 
  81 qede_add_macaddr(qede_t *qede, uint8_t *mac_addr) 
  82 {
  83         int i, ret = 0;
  84 
  85         i = qede_ucst_find(qede, mac_addr);
  86         if (i != -1) {
  87                 /* LINTED E_ARGUMENT_MISMATCH */
  88                 qede_info(qede, "mac addr already added %d\n", 
  89                     qede->ucst_avail);
  90                 return (0);
  91         }
  92         if (qede->ucst_avail == 0) {
  93                 qede_info(qede, "add macaddr ignored \n");
  94                 return (ENOSPC);
  95         }
  96         for (i = 0; i < qede->ucst_total; i++) {
  97                 if (qede->ucst_mac[i].set == 0) {
  98                         break;
  99                 }
 100         }
 101         if (i >= qede->ucst_total) {
 102                 qede_info(qede, "add macaddr ignored no space");
 103                 return (ENOSPC);
 104         }
 105         ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_ADD);
 106         if (ret == 0) {
 107                 bcopy(mac_addr, 
 108                     qede->ucst_mac[i].mac_addr.ether_addr_octet,
 109                     ETHERADDRL);
 110                 qede->ucst_mac[i].set = 1;
 111                 qede->ucst_avail--;
 112                 /* LINTED E_ARGUMENT_MISMATCH */
 113                 qede_info(qede,  " add macaddr passed for addr "
 114                     "%02x:%02x:%02x:%02x:%02x:%02x",
 115                     mac_addr[0], mac_addr[1],
 116                     mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
 117         } else {
 118                 /* LINTED E_ARGUMENT_MISMATCH */
 119                 qede_info(qede,  "add macaddr failed for addr "
 120                     "%02x:%02x:%02x:%02x:%02x:%02x",
 121                     mac_addr[0], mac_addr[1],
 122                     mac_addr[2], mac_addr[3], mac_addr[4], mac_addr[5]);
 123 
 124         }
 125         if (qede->ucst_avail == (qede->ucst_total -1)) {
 126                         u8 bcast_addr[] = 
 127                         { 
 128                                 0xff, 0xff, 0xff, 0xff, 0xff,
 129                                 0xff 
 130                         };
 131                         for (i = 0; i < qede->ucst_total; i++) {
 132                                 if (qede->ucst_mac[i].set == 0)
 133                                         break;
 134                         }
 135                         ret = qede_set_mac_addr(qede, 
 136                             (uint8_t *)bcast_addr, ECORE_FILTER_ADD);
 137                         if (ret == 0) {
 138                                 bcopy(bcast_addr, 
 139                                     qede->ucst_mac[i].mac_addr.ether_addr_octet,
 140                                     ETHERADDRL);
 141                                 qede->ucst_mac[i].set = 1;
 142                                 qede->ucst_avail--;
 143                         } else {
 144 
 145                         /* LINTED E_ARGUMENT_MISMATCH */
 146                         qede_info(qede,  "add macaddr failed for addr "
 147                             "%02x:%02x:%02x:%02x:%02x:%02x",
 148                             mac_addr[0], mac_addr[1],
 149                             mac_addr[2], mac_addr[3], mac_addr[4], 
 150                             mac_addr[5]);
 151                        }
 152 
 153                 }       
 154 
 155         return (ret);
 156 
 157 }
 158 
 159 #ifndef ILLUMOS
 160 static int
 161 qede_add_mac_addr(void *arg, const uint8_t *mac_addr, const uint64_t flags)
 162 #else
 163 static int
 164 qede_add_mac_addr(void *arg, const uint8_t *mac_addr)
 165 #endif
 166 {
 167         qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
 168         qede_t *qede = rx_group->qede;
 169         int ret = DDI_SUCCESS;
 170 
 171         /* LINTED E_ARGUMENT_MISMATCH */
 172         qede_info(qede, " mac addr :" MAC_STRING,  MACTOSTR(mac_addr));
 173         
 174         mutex_enter(&qede->gld_lock);
 175         if (qede->qede_state == QEDE_STATE_SUSPENDED) {
 176                 mutex_exit(&qede->gld_lock);
 177                 return (ECANCELED);
 178         }
 179         ret = qede_add_macaddr(qede, (uint8_t *)mac_addr);
 180 
 181         mutex_exit(&qede->gld_lock);
 182 
 183 
 184         return (ret);
 185 }
 186 
 187 static int
 188 qede_rem_macaddr(qede_t *qede, uint8_t *mac_addr)
 189 {
 190         int ret = 0;
 191         int i;
 192 
 193         i = qede_ucst_find(qede, mac_addr);
 194         if (i == -1) {
 195                 /* LINTED E_ARGUMENT_MISMATCH */
 196                 qede_info(qede, 
 197                     "mac addr not there to remove", 
 198                     MAC_STRING, MACTOSTR(mac_addr));
 199                 return (0);
 200         }
 201         if (qede->ucst_mac[i].set == 0) {
 202                 return (EINVAL);
 203         }       
 204         ret = qede_set_mac_addr(qede, (uint8_t *)mac_addr, ECORE_FILTER_REMOVE);
 205         if (ret == 0) {
 206                 bzero(qede->ucst_mac[i].mac_addr.ether_addr_octet,ETHERADDRL);
 207                 qede->ucst_mac[i].set = 0;
 208                 qede->ucst_avail++;
 209         } else {
 210                 /* LINTED E_ARGUMENT_MISMATCH */
 211                 qede_info(qede, "mac addr remove failed", 
 212                     MAC_STRING, MACTOSTR(mac_addr));
 213         }
 214         return (ret);
 215 
 216 }
 217 
 218 
 219 static int
 220 qede_rem_mac_addr(void *arg, const uint8_t *mac_addr)
 221 {
 222         qede_mac_group_t *rx_group = (qede_mac_group_t *)arg;
 223         qede_t *qede = rx_group->qede;
 224         int ret = DDI_SUCCESS;
 225 
 226         /* LINTED E_ARGUMENT_MISMATCH */
 227         qede_info(qede, "mac addr remove:" MAC_STRING, MACTOSTR(mac_addr));
 228         mutex_enter(&qede->gld_lock);
 229         if (qede->qede_state == QEDE_STATE_SUSPENDED) {
 230                 mutex_exit(&qede->gld_lock);
 231                 return (ECANCELED);
 232         }
 233         ret = qede_rem_macaddr(qede, (uint8_t *)mac_addr);
 234         mutex_exit(&qede->gld_lock);
 235         return (ret);
 236 }
 237 
 238 
 239 static int
 240 qede_tx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
 241 {
 242         int ret = 0;
 243 
 244         qede_fastpath_t *fp = (qede_fastpath_t *)rh;
 245         qede_tx_ring_t *tx_ring = fp->tx_ring[0];
 246         qede_t *qede = fp->qede;
 247 
 248 
 249         if (qede->qede_state == QEDE_STATE_SUSPENDED)
 250                 return (ECANCELED);
 251 
 252         switch (stat) {
 253         case MAC_STAT_OBYTES:
 254                 *val = tx_ring->tx_byte_count;
 255                 break;
 256 
 257         case MAC_STAT_OPACKETS:
 258                 *val = tx_ring->tx_pkt_count;
 259                 break;
 260 
 261         default:
 262                 *val = 0;
 263                 ret = ENOTSUP;
 264         }
 265 
 266         return (ret);
 267 }
 268 
 269 #ifndef ILLUMOS
 270 static mblk_t *
 271 qede_rx_ring_poll(void *arg, int poll_bytes, int poll_pkts)
 272 {
 273 #else
 274 static mblk_t *
 275 qede_rx_ring_poll(void *arg, int poll_bytes)
 276 {
 277         /* XXX pick a value at the moment */
 278         int poll_pkts = 100;
 279 #endif
 280         qede_fastpath_t *fp = (qede_fastpath_t *)arg;
 281         mblk_t *mp = NULL;
 282         int work_done = 0;
 283         qede_t *qede = fp->qede;
 284 
 285         if (poll_bytes == 0) {
 286                 return (NULL);
 287         }
 288 
 289         mutex_enter(&fp->fp_lock);
 290         qede->intrSbPollCnt[fp->vect_info->vect_index]++;
 291 
 292         mp = qede_process_fastpath(fp, poll_bytes, poll_pkts, &work_done);
 293         if (mp != NULL) {
 294                 fp->rx_ring->rx_poll_cnt++;
 295         } else if ((mp == NULL) && (work_done == 0)) {
 296                 qede->intrSbPollNoChangeCnt[fp->vect_info->vect_index]++;
 297         }
 298 
 299         mutex_exit(&fp->fp_lock);
 300         return (mp);
 301 }
 302 
 303 #ifndef ILLUMOS
 304 static int
 305 qede_rx_ring_intr_enable(mac_ring_driver_t rh)
 306 #else
 307 static int
 308 qede_rx_ring_intr_enable(mac_intr_handle_t rh)
 309 #endif
 310 {
 311         qede_fastpath_t *fp = (qede_fastpath_t *)rh;
 312 
 313         mutex_enter(&fp->qede->drv_lock);
 314         if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
 315                 mutex_exit(&fp->qede->drv_lock);
 316                 return (DDI_FAILURE);
 317         }
 318 
 319         fp->rx_ring->intrEnableCnt++;
 320         qede_enable_hw_intr(fp);
 321         fp->disabled_by_poll = 0;
 322         mutex_exit(&fp->qede->drv_lock);
 323 
 324         return (DDI_SUCCESS);
 325 }
 326 
 327 #ifndef ILLUMOS
 328 static int
 329 qede_rx_ring_intr_disable(mac_ring_driver_t rh)
 330 #else
 331 static int
 332 qede_rx_ring_intr_disable(mac_intr_handle_t rh)
 333 #endif
 334 {
 335         qede_fastpath_t *fp = (qede_fastpath_t *)rh;
 336 
 337         mutex_enter(&fp->qede->drv_lock);
 338         if (!fp->sb_phys && (fp->sb_dma_handle == NULL)) {
 339                 mutex_exit(&fp->qede->drv_lock);
 340                 return (DDI_FAILURE);
 341         }
 342         fp->rx_ring->intrDisableCnt++;
 343         qede_disable_hw_intr(fp);
 344         fp->disabled_by_poll = 1;
 345         mutex_exit(&fp->qede->drv_lock);
 346         return (DDI_SUCCESS);
 347 }
 348 
 349 static int
 350 qede_rx_ring_stat(mac_ring_driver_t rh, uint_t stat, uint64_t *val)
 351 {
 352 
 353         int ret = 0;
 354 
 355         qede_fastpath_t *fp = (qede_fastpath_t *)rh;
 356         qede_t *qede = fp->qede;
 357         qede_rx_ring_t *rx_ring = fp->rx_ring;
 358 
 359         if (qede->qede_state == QEDE_STATE_SUSPENDED) {
 360                 return (ECANCELED);
 361         }
 362 
 363         switch (stat) {
 364         case MAC_STAT_RBYTES:
 365                 *val = rx_ring->rx_byte_cnt;
 366                 break;
 367         case MAC_STAT_IPACKETS:
 368                 *val = rx_ring->rx_pkt_cnt;
 369                 break;
 370         default:
 371                 *val = 0;
 372                 ret = ENOTSUP;
 373                 break;  
 374         }
 375 
 376         return (ret);
 377 }
 378 
 379 static int
 380 qede_get_global_ring_index(qede_t *qede, int gindex, int rindex)
 381 {
 382         qede_fastpath_t *fp;
 383         qede_rx_ring_t *rx_ring;
 384         int i = 0;
 385 
 386         for (i = 0; i < qede->num_fp; i++) {
 387                 fp = &qede->fp_array[i];
 388                 rx_ring = fp->rx_ring;
 389 
 390                 if (rx_ring->group_index == gindex) {
 391                         rindex--;
 392                 }
 393                 if (rindex < 0) {
 394                         return (i);
 395                 }
 396         }
 397 
 398         return (-1);
 399 }
 400 
 401 static void
 402 qede_rx_ring_stop(mac_ring_driver_t rh)
 403 {
 404         qede_fastpath_t *fp = (qede_fastpath_t *)rh;
 405         qede_rx_ring_t *rx_ring = fp->rx_ring;
 406 
 407         qede_print("!%s(%d): called", __func__,fp->qede->instance);
 408         mutex_enter(&fp->fp_lock);
 409         rx_ring->mac_ring_started = B_FALSE;
 410         mutex_exit(&fp->fp_lock);
 411 }
 412 
 413 static int
 414 qede_rx_ring_start(mac_ring_driver_t rh, u64 mr_gen_num)
 415 {
 416         qede_fastpath_t *fp = (qede_fastpath_t *)rh;
 417         qede_rx_ring_t *rx_ring = fp->rx_ring;
 418 
 419         qede_print("!%s(%d): called", __func__,fp->qede->instance);
 420         mutex_enter(&fp->fp_lock);
 421         rx_ring->mr_gen_num = mr_gen_num;
 422         rx_ring->mac_ring_started = B_TRUE;
 423         rx_ring->intrDisableCnt = 0;
 424         rx_ring->intrEnableCnt  = 0;
 425         fp->disabled_by_poll = 0;
 426 
 427         mutex_exit(&fp->fp_lock);
 428 
 429         return (DDI_SUCCESS);
 430 }
 431 
 432 /* Callback function from mac layer to register rings */
 433 void
 434 qede_fill_ring(void *arg, mac_ring_type_t rtype, const int group_index,
 435     const int ring_index, mac_ring_info_t *infop, mac_ring_handle_t rh)
 436 {
 437         qede_t *qede = (qede_t *)arg;
 438         mac_intr_t *mintr = &infop->mri_intr;
 439 
 440         switch (rtype) {
 441         case MAC_RING_TYPE_RX: {
 442                 /*
 443                  * Index passed as a param is the ring index within the
 444                  * given group index. If multiple groups are supported
 445                  * then need to search into all groups to find out the
 446                  * global ring index for the passed group relative
 447                  * ring index
 448                  */
 449                 int global_ring_index = qede_get_global_ring_index(qede,
 450                     group_index, ring_index);
 451                 qede_fastpath_t *fp;
 452                 qede_rx_ring_t *rx_ring;
 453                 int i;
 454 
 455                 /* 
 456                  * global_ring_index < 0 means group index passed
 457                  * was registered by our driver
 458                  */
 459                 ASSERT(global_ring_index >= 0);
 460 
 461                 if (rh == NULL) {
 462                         cmn_err(CE_WARN, "!rx ring(%d) ring handle NULL",
 463                             global_ring_index);
 464                 }
 465 
 466                 fp = &qede->fp_array[global_ring_index];
 467                 rx_ring = fp->rx_ring;
 468                 fp->qede = qede;
 469 
 470                 rx_ring->mac_ring_handle = rh;
 471 
 472                 qede_info(qede, "rx_ring %d mac_ring_handle %p",
 473                     rx_ring->rss_id, rh);
 474 
 475                 /* mri_driver passed as arg to mac_ring* callbacks */
 476                 infop->mri_driver = (mac_ring_driver_t)fp;
 477                 /*
 478                  * mri_start callback will supply a mac rings generation
 479                  * number which is needed while indicating packets
 480                  * upstream via mac_ring_rx() call
 481                  */
 482                 infop->mri_start = qede_rx_ring_start;
 483                 infop->mri_stop = qede_rx_ring_stop;
 484                 infop->mri_poll = qede_rx_ring_poll;
 485                 infop->mri_stat = qede_rx_ring_stat;
 486 
 487                 mintr->mi_handle = (mac_intr_handle_t)fp;
 488                 mintr->mi_enable = qede_rx_ring_intr_enable;
 489                 mintr->mi_disable = qede_rx_ring_intr_disable;
 490                 if (qede->intr_ctx.intr_type_in_use &
 491                     (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
 492                         mintr->mi_ddi_handle =
 493                             qede->intr_ctx.
 494                             intr_hdl_array[global_ring_index + qede->num_hwfns];
 495                 }
 496                 break;
 497         }
 498         case MAC_RING_TYPE_TX: {
 499                 qede_fastpath_t *fp;
 500                 qede_tx_ring_t *tx_ring;
 501                 int i, tc;
 502 
 503                 ASSERT(ring_index < qede->num_fp);
 504                 
 505                 fp = &qede->fp_array[ring_index];
 506                 fp->qede = qede;
 507                 tx_ring = fp->tx_ring[0];
 508                 tx_ring->mac_ring_handle = rh;
 509                 qede_info(qede, "tx_ring %d mac_ring_handle %p",
 510                     tx_ring->tx_queue_index, rh);
 511                 infop->mri_driver = (mac_ring_driver_t)fp;
 512                 infop->mri_start = NULL;
 513                 infop->mri_stop = NULL;
 514                 infop->mri_tx = qede_ring_tx;
 515                 infop->mri_stat = qede_tx_ring_stat;
 516                 if (qede->intr_ctx.intr_type_in_use &
 517                     (DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI)) {
 518                         mintr->mi_ddi_handle =
 519                             qede->intr_ctx.
 520                             intr_hdl_array[ring_index + qede->num_hwfns]; 
 521                 }
 522                 break;
 523         }
 524         default:
 525                 break;
 526         }
 527 }
 528 
 529 /*
 530  * Callback function from mac layer to register group
 531  */
 532 void
 533 qede_fill_group(void *arg, mac_ring_type_t rtype, const int index,
 534     mac_group_info_t *infop, mac_group_handle_t gh)
 535 {
 536         qede_t *qede = (qede_t *)arg;
 537 
 538         switch (rtype) {
 539         case MAC_RING_TYPE_RX: {
 540                 qede_mac_group_t *rx_group;
 541 
 542                 rx_group = &qede->rx_groups[index];
 543                 rx_group->group_handle = gh;
 544                 rx_group->group_index = index;
 545                 rx_group->qede = qede;
 546                 infop->mgi_driver = (mac_group_driver_t)rx_group;
 547                 infop->mgi_start = NULL;
 548                 infop->mgi_stop = NULL;
 549 #ifndef ILLUMOS
 550                 infop->mgi_addvlan = NULL;
 551                 infop->mgi_remvlan = NULL;
 552                 infop->mgi_getsriov_info = NULL;
 553                 infop->mgi_setmtu = NULL;
 554 #endif
 555                 infop->mgi_addmac = qede_add_mac_addr;
 556                 infop->mgi_remmac = qede_rem_mac_addr;
 557                 infop->mgi_count =  qede->num_fp;
 558 #ifndef ILLUMOS
 559                 if (index == 0) {
 560                         infop->mgi_flags = MAC_GROUP_DEFAULT;
 561                 }
 562 #endif
 563 
 564                 break;
 565         }
 566         case MAC_RING_TYPE_TX: {
 567                 qede_mac_group_t *tx_group;
 568 
 569                 tx_group = &qede->tx_groups[index];
 570                 tx_group->group_handle = gh;
 571                 tx_group->group_index = index;
 572                 tx_group->qede = qede;
 573 
 574                 infop->mgi_driver = (mac_group_driver_t)tx_group;
 575                 infop->mgi_start = NULL;
 576                 infop->mgi_stop = NULL;
 577                 infop->mgi_addmac = NULL;
 578                 infop->mgi_remmac = NULL;
 579 #ifndef ILLUMOS
 580                 infop->mgi_addvlan = NULL;
 581                 infop->mgi_remvlan = NULL;
 582                 infop->mgi_setmtu = NULL;
 583                 infop->mgi_getsriov_info = NULL;
 584 #endif
 585 
 586                 infop->mgi_count = qede->num_fp;
 587 
 588 #ifndef ILLUMOS
 589                 if (index == 0) {
 590                         infop->mgi_flags = MAC_GROUP_DEFAULT;
 591                 }
 592 #endif
 593                 break;
 594         }
 595         default:
 596                 break;
 597         }
 598 }
 599 
 600 #ifdef ILLUMOS
 601 static int
 602 qede_transceiver_info(void *arg, uint_t id, mac_transceiver_info_t *infop)
 603 {
 604         qede_t *qede = arg;
 605         struct ecore_dev *edev = &qede->edev;
 606         struct ecore_hwfn *hwfn;
 607         struct ecore_ptt *ptt;
 608         uint32_t transceiver_state;
 609 
 610         if (id >= edev->num_hwfns || arg == NULL || infop == NULL)
 611                 return (EINVAL);
 612 
 613         hwfn = &edev->hwfns[id];
 614         ptt = ecore_ptt_acquire(hwfn);
 615         if (ptt == NULL) {
 616                 return (EIO);
 617         }
 618         /*
 619          * Use the underlying raw API to get this information. While the
 620          * ecore_phy routines have some ways of getting to this information, it
 621          * ends up writing the raw data as ASCII characters which doesn't help
 622          * us one bit.
 623          */
 624         transceiver_state = ecore_rd(hwfn, ptt, hwfn->mcp_info->port_addr +
 625             offsetof(struct public_port, transceiver_data));
 626         transceiver_state = GET_FIELD(transceiver_state, ETH_TRANSCEIVER_STATE);
 627         ecore_ptt_release(hwfn, ptt);
 628 
 629         if ((transceiver_state & ETH_TRANSCEIVER_STATE_PRESENT) != 0) {
 630                 mac_transceiver_info_set_present(infop, B_TRUE);
 631                 /*
 632                  * Based on our testing, the ETH_TRANSCEIVER_STATE_VALID flag is
 633                  * not set, so we cannot rely on it. Instead, we have found that
 634                  * the ETH_TRANSCEIVER_STATE_UPDATING will be set when we cannot
 635                  * use the transceiver.
 636                  */
 637                 if ((transceiver_state & ETH_TRANSCEIVER_STATE_UPDATING) != 0) {
 638                         mac_transceiver_info_set_usable(infop, B_FALSE);
 639                 } else {
 640                         mac_transceiver_info_set_usable(infop, B_TRUE);
 641                 }
 642         } else {
 643                 mac_transceiver_info_set_present(infop, B_FALSE);
 644                 mac_transceiver_info_set_usable(infop, B_FALSE);
 645         }
 646 
 647         return (0);
 648 }
 649 
 650 static int
 651 qede_transceiver_read(void *arg, uint_t id, uint_t page, void *buf,
 652     size_t nbytes, off_t offset, size_t *nread)
 653 {
 654         qede_t *qede = arg;
 655         struct ecore_dev *edev = &qede->edev;
 656         struct ecore_hwfn *hwfn;
 657         uint32_t port, lane;
 658         struct ecore_ptt *ptt;
 659         enum _ecore_status_t ret;
 660 
 661         if (id >= edev->num_hwfns || buf == NULL || nbytes == 0 || nread == NULL ||
 662             (page != 0xa0 && page != 0xa2) || offset < 0)
 663                 return (EINVAL);
 664 
 665         /*
 666          * Both supported pages have a length of 256 bytes, ensure nothing asks
 667          * us to go beyond that.
 668          */
 669         if (nbytes > 256 || offset >= 256 || (offset + nbytes > 256)) {
 670                return (EINVAL);
 671         }
 672 
 673         hwfn = &edev->hwfns[id];
 674         ptt = ecore_ptt_acquire(hwfn);
 675         if (ptt == NULL) {
 676                 return (EIO);
 677         }
 678 
 679         ret = ecore_mcp_phy_sfp_read(hwfn, ptt, hwfn->port_id, page, offset,
 680             nbytes, buf);
 681         ecore_ptt_release(hwfn, ptt);
 682         if (ret != ECORE_SUCCESS) {
 683                 return (EIO);
 684         }
 685         *nread = nbytes;
 686         return (0);
 687 }
 688 #endif /* ILLUMOS */
 689 
 690 
 691 static int
 692 qede_mac_stats(void *     arg,
 693                         uint_t     stat,
 694                         uint64_t * value)
 695 {
 696         qede_t * qede = (qede_t *)arg;
 697         struct ecore_eth_stats vstats;
 698         struct ecore_dev *edev = &qede->edev;
 699         struct qede_link_cfg lnkcfg;
 700         int rc = 0;
 701         qede_fastpath_t *fp = &qede->fp_array[0];
 702         qede_rx_ring_t *rx_ring;
 703         qede_tx_ring_t *tx_ring;
 704 
 705         if ((qede == NULL) || (value == NULL)) {
 706                 return EINVAL;
 707         }
 708 
 709 
 710         mutex_enter(&qede->gld_lock);
 711 
 712         if(qede->qede_state != QEDE_STATE_STARTED) {
 713                 mutex_exit(&qede->gld_lock);
 714                 return EAGAIN;
 715         }
 716 
 717         *value = 0;
 718         
 719         memset(&vstats, 0, sizeof(struct ecore_eth_stats));
 720         ecore_get_vport_stats(edev, &vstats);
 721         
 722 
 723         memset(&qede->curcfg, 0, sizeof(struct qede_link_cfg));
 724         qede_get_link_info(&edev->hwfns[0], &qede->curcfg);
 725 
 726 
 727 
 728         switch (stat)
 729         {
 730         case MAC_STAT_IFSPEED:
 731                 *value = (qede->props.link_speed * 1000000ULL);
 732                 break;
 733         case MAC_STAT_MULTIRCV:
 734                 *value = vstats.common.rx_mcast_pkts;
 735                 break;
 736         case MAC_STAT_BRDCSTRCV:
 737                 *value = vstats.common.rx_bcast_pkts;
 738                 break;
 739         case MAC_STAT_MULTIXMT:
 740                 *value = vstats.common.tx_mcast_pkts;
 741                 break;
 742         case MAC_STAT_BRDCSTXMT:
 743                 *value = vstats.common.tx_bcast_pkts;
 744                 break;
 745         case MAC_STAT_NORCVBUF:
 746                 *value = vstats.common.no_buff_discards;
 747                 break;
 748         case MAC_STAT_NOXMTBUF:
 749                 *value = 0;
 750                 break;
 751         case MAC_STAT_IERRORS:
 752         case ETHER_STAT_MACRCV_ERRORS:
 753                 *value = vstats.common.mac_filter_discards + 
 754                     vstats.common.packet_too_big_discard + 
 755                     vstats.common.rx_crc_errors;        
 756                 break;
 757         
 758         case MAC_STAT_OERRORS:
 759                 break;
 760 
 761         case MAC_STAT_COLLISIONS:
 762                 *value = vstats.bb.tx_total_collisions;
 763                 break;
 764 
 765         case MAC_STAT_RBYTES:
 766                 *value = vstats.common.rx_ucast_bytes + 
 767                     vstats.common.rx_mcast_bytes + 
 768                     vstats.common.rx_bcast_bytes;
 769                 break;
 770 
 771         case MAC_STAT_IPACKETS:
 772                 *value = vstats.common.rx_ucast_pkts + 
 773                     vstats.common.rx_mcast_pkts + 
 774                     vstats.common.rx_bcast_pkts; 
 775                 break;
 776 
 777         case MAC_STAT_OBYTES:
 778                 *value = vstats.common.tx_ucast_bytes + 
 779                     vstats.common.tx_mcast_bytes + 
 780                     vstats.common.tx_bcast_bytes;
 781                 break;
 782 
 783         case MAC_STAT_OPACKETS:
 784                 *value = vstats.common.tx_ucast_pkts + 
 785                     vstats.common.tx_mcast_pkts + 
 786                     vstats.common.tx_bcast_pkts;
 787                 break;
 788 
 789         case ETHER_STAT_ALIGN_ERRORS:
 790                 *value = vstats.common.rx_align_errors;
 791                 break;
 792         
 793         case ETHER_STAT_FCS_ERRORS:
 794                 *value = vstats.common.rx_crc_errors;
 795                 break;
 796 
 797         case ETHER_STAT_FIRST_COLLISIONS:
 798                 break;
 799 
 800         case ETHER_STAT_MULTI_COLLISIONS:
 801                 break;
 802 
 803         case ETHER_STAT_DEFER_XMTS:
 804                 break;
 805 
 806         case ETHER_STAT_TX_LATE_COLLISIONS:
 807                 break;
 808 
 809         case ETHER_STAT_EX_COLLISIONS:
 810                 break;
 811 
 812         case ETHER_STAT_MACXMT_ERRORS:
 813                 *value = 0;
 814                 break;
 815 
 816         case ETHER_STAT_CARRIER_ERRORS:
 817                 break;
 818 
 819         case ETHER_STAT_TOOLONG_ERRORS:
 820                 *value = vstats.common.rx_oversize_packets;
 821                 break;
 822 
 823 #if (MAC_VERSION > 1)
 824         case ETHER_STAT_TOOSHORT_ERRORS:
 825                 *value = vstats.common.rx_undersize_packets;
 826                 break;
 827 #endif
 828 
 829         case ETHER_STAT_XCVR_ADDR:
 830                 *value = 0;
 831                 break;
 832 
 833         case ETHER_STAT_XCVR_ID:
 834                 *value = 0;
 835                 break;
 836 
 837         case ETHER_STAT_XCVR_INUSE:
 838                 switch (qede->props.link_speed) {
 839                 default:
 840                         *value = XCVR_UNDEFINED;
 841                 }
 842                 break;
 843 #if (MAC_VERSION > 1)
 844         case ETHER_STAT_CAP_10GFDX:
 845                 *value = 0;
 846                 break;
 847 #endif
 848         case ETHER_STAT_CAP_100FDX:
 849                 *value = 0;
 850                 break;  
 851         case ETHER_STAT_CAP_100HDX:
 852                 *value = 0;
 853                 break;  
 854         case ETHER_STAT_CAP_ASMPAUSE:
 855                 *value = 1;
 856                 break;
 857         case ETHER_STAT_CAP_PAUSE:      
 858                 *value = 1;
 859                 break;
 860         case ETHER_STAT_CAP_AUTONEG:
 861                 *value = 1;
 862                 break;
 863         
 864 #if (MAC_VERSION > 1)
 865         case ETHER_STAT_CAP_REMFAULT:
 866                 *value = 0;
 867                 break;
 868 #endif
 869 
 870 #if (MAC_VERSION > 1)
 871         case ETHER_STAT_ADV_CAP_10GFDX:
 872                 *value = 0; 
 873                 break;
 874 #endif
 875     case ETHER_STAT_ADV_CAP_ASMPAUSE:
 876                 *value = 1;
 877                 break;
 878 
 879         case ETHER_STAT_ADV_CAP_PAUSE:
 880                 *value = 1;
 881                 break;
 882 
 883         case ETHER_STAT_ADV_CAP_AUTONEG:
 884                 *value = qede->curcfg.adv_capab.autoneg;
 885                 break;
 886 
 887 #if (MAC_VERSION > 1)
 888         case ETHER_STAT_ADV_REMFAULT:
 889                 *value = 0;
 890                 break;
 891 #endif  
 892 
 893         case ETHER_STAT_LINK_AUTONEG:
 894                 *value  = qede->curcfg.autoneg;
 895                 break;
 896 
 897         case ETHER_STAT_LINK_DUPLEX:
 898                 *value = (qede->props.link_duplex == DUPLEX_FULL) ?
 899                                     LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
 900                 break;
 901         /*
 902          * Supported speeds. These indicate what hardware is capable of.
 903          */
 904         case ETHER_STAT_CAP_1000HDX:
 905                 *value = qede->curcfg.supp_capab.param_1000hdx;
 906                 break;
 907 
 908         case ETHER_STAT_CAP_1000FDX:
 909                 *value = qede->curcfg.supp_capab.param_1000fdx;
 910                 break;
 911 
 912         case ETHER_STAT_CAP_10GFDX:
 913                 *value = qede->curcfg.supp_capab.param_10000fdx;
 914                 break;
 915 
 916         case ETHER_STAT_CAP_25GFDX:
 917                 *value = qede->curcfg.supp_capab.param_25000fdx;
 918                 break;
 919 
 920         case ETHER_STAT_CAP_40GFDX:
 921                 *value = qede->curcfg.supp_capab.param_40000fdx;
 922                 break;
 923 
 924         case ETHER_STAT_CAP_50GFDX:
 925                 *value = qede->curcfg.supp_capab.param_50000fdx;
 926                 break;
 927 
 928         case ETHER_STAT_CAP_100GFDX:
 929                 *value = qede->curcfg.supp_capab.param_100000fdx;
 930                 break;
 931 
 932         /*
 933          * Advertised speeds. These indicate what hardware is currently sending.
 934          */
 935         case ETHER_STAT_ADV_CAP_1000HDX:
 936                 *value = qede->curcfg.adv_capab.param_1000hdx;
 937                 break;
 938 
 939         case ETHER_STAT_ADV_CAP_1000FDX:
 940                 *value = qede->curcfg.adv_capab.param_1000fdx;
 941                 break;
 942 
 943         case ETHER_STAT_ADV_CAP_10GFDX:
 944                 *value = qede->curcfg.adv_capab.param_10000fdx;
 945                 break;
 946 
 947         case ETHER_STAT_ADV_CAP_25GFDX:
 948                 *value = qede->curcfg.adv_capab.param_25000fdx;
 949                 break;
 950 
 951         case ETHER_STAT_ADV_CAP_40GFDX:
 952                 *value = qede->curcfg.adv_capab.param_40000fdx;
 953                 break;
 954 
 955         case ETHER_STAT_ADV_CAP_50GFDX:
 956                 *value = qede->curcfg.adv_capab.param_50000fdx;
 957                 break;
 958 
 959         case ETHER_STAT_ADV_CAP_100GFDX:
 960                 *value = qede->curcfg.adv_capab.param_100000fdx;
 961                 break;
 962 
 963         default:
 964                 rc = ENOTSUP;
 965         }
 966 
 967         mutex_exit(&qede->gld_lock);
 968         return (rc);
 969 }
 970 
 971 /* (flag) TRUE = on, FALSE = off */
 972 static int
 973 qede_mac_promiscuous(void *arg,
 974     boolean_t on)
 975 {
 976         qede_t *qede = (qede_t *)arg;
 977         qede_print("!%s(%d): called", __func__,qede->instance);
 978         int ret = DDI_SUCCESS;
 979         enum qede_filter_rx_mode_type mode;
 980         
 981         mutex_enter(&qede->drv_lock);
 982         
 983         if (qede->qede_state == QEDE_STATE_SUSPENDED) {
 984                 ret = ECANCELED;
 985                 goto exit;
 986         }
 987 
 988         if (on) {
 989                 qede_info(qede, "Entering promiscuous mode");
 990                 mode = QEDE_FILTER_RX_MODE_PROMISC;
 991                 qede->params.promisc_fl = B_TRUE;
 992         } else {
 993                 qede_info(qede, "Leaving promiscuous mode");
 994                 if(qede->params.multi_promisc_fl == B_TRUE) {
 995                         mode = QEDE_FILTER_RX_MODE_MULTI_PROMISC;
 996                 } else {        
 997                          mode = QEDE_FILTER_RX_MODE_REGULAR;
 998                 }
 999                 qede->params.promisc_fl = B_FALSE;
1000         }
1001 
1002         ret = qede_set_filter_rx_mode(qede, mode);
1003 
1004 exit:
1005         mutex_exit(&qede->drv_lock);
1006         return (ret);
1007 }
1008 
1009 int qede_set_rx_mac_mcast(qede_t *qede, enum ecore_filter_opcode opcode, 
1010                           uint8_t *mac, int mc_cnt) 
1011 {
1012         struct ecore_filter_mcast cmd;
1013         int i;
1014         memset(&cmd, 0, sizeof(cmd));
1015         cmd.opcode = opcode;
1016         cmd.num_mc_addrs = mc_cnt;
1017 
1018         for (i = 0; i < mc_cnt; i++, mac += ETH_ALLEN) {
1019                 COPY_ETH_ADDRESS(mac, cmd.mac[i]);
1020         }
1021 
1022 
1023         return (ecore_filter_mcast_cmd(&qede->edev, &cmd, 
1024             ECORE_SPQ_MODE_CB, NULL));
1025                 
1026 }
1027 
1028 int
1029 qede_set_filter_rx_mode(qede_t * qede, enum qede_filter_rx_mode_type type) 
1030 {
1031         struct ecore_filter_accept_flags flg;
1032 
1033         memset(&flg, 0, sizeof(flg));
1034 
1035         flg.update_rx_mode_config      = 1;
1036         flg.update_tx_mode_config      = 1;
1037         flg.rx_accept_filter           = ECORE_ACCEPT_UCAST_MATCHED | 
1038             ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1039         flg.tx_accept_filter = ECORE_ACCEPT_UCAST_MATCHED | 
1040             ECORE_ACCEPT_MCAST_MATCHED | ECORE_ACCEPT_BCAST;
1041 
1042         if (type == QEDE_FILTER_RX_MODE_PROMISC)
1043                 flg.rx_accept_filter |= ECORE_ACCEPT_UCAST_UNMATCHED | 
1044                     ECORE_ACCEPT_MCAST_UNMATCHED;
1045         else if (type == QEDE_FILTER_RX_MODE_MULTI_PROMISC)
1046                 flg.rx_accept_filter |= ECORE_ACCEPT_MCAST_UNMATCHED;
1047         qede_info(qede, "rx_mode rx_filter=0x%x tx_filter=0x%x type=0x%x\n", 
1048             flg.rx_accept_filter, flg.tx_accept_filter, type);
1049         return (ecore_filter_accept_cmd(&qede->edev, 0, flg,
1050                         0, /* update_accept_any_vlan */
1051                         0, /* accept_any_vlan */
1052                         ECORE_SPQ_MODE_CB, NULL));
1053 }
1054 
1055 int 
1056 qede_multicast(qede_t *qede, boolean_t flag, const uint8_t *ptr_mcaddr)
1057 {
1058         int i, ret = DDI_SUCCESS;
1059         qede_mcast_list_entry_t *ptr_mlist;
1060         qede_mcast_list_entry_t *ptr_entry;
1061         int mc_cnt;
1062         unsigned char *mc_macs, *tmpmc;
1063         size_t size;
1064         boolean_t mcmac_exists = B_FALSE;
1065         enum qede_filter_rx_mode_type mode;
1066 
1067         if (!ptr_mcaddr)  {
1068                 cmn_err(CE_NOTE, "Removing all multicast");
1069         } else  {
1070                 cmn_err(CE_NOTE,
1071                     "qede=%p %s multicast: %02x:%02x:%02x:%02x:%02x:%02x",
1072                     qede, (flag) ? "Adding" : "Removing", ptr_mcaddr[0], 
1073                     ptr_mcaddr[1],ptr_mcaddr[2],ptr_mcaddr[3],ptr_mcaddr[4],
1074                     ptr_mcaddr[5]);
1075         }
1076 
1077 
1078         if (flag && (ptr_mcaddr == NULL)) {
1079                 cmn_err(CE_WARN, "ERROR: Multicast address not specified");
1080                 return EINVAL;
1081         }
1082 
1083 
1084         /* exceeds addition of mcaddr above limit */
1085         if (flag && (qede->mc_cnt >= MAX_MC_SOFT_LIMIT)) {
1086                 qede_info(qede, "Cannot add more than MAX_MC_SOFT_LIMIT");
1087                 return ENOENT;
1088         }
1089 
1090         size = MAX_MC_SOFT_LIMIT * ETH_ALLEN;
1091 
1092         mc_macs = kmem_zalloc(size, KM_NOSLEEP);
1093         if (!mc_macs) { 
1094                 cmn_err(CE_WARN, "ERROR: Failed to allocate for mc_macs");
1095                 return EINVAL;
1096         }
1097 
1098         tmpmc = mc_macs;
1099 
1100         /* remove all multicast - as flag not set and mcaddr not specified*/
1101         if (!flag && (ptr_mcaddr == NULL)) {
1102                 QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, 
1103                     &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1104                 {
1105                         if (ptr_entry != NULL) {
1106                         QEDE_LIST_REMOVE(&ptr_entry->mclist_entry, 
1107                             &qede->mclist.head);
1108                         kmem_free(ptr_entry, 
1109                             sizeof (qede_mcast_list_entry_t) + ETH_ALLEN);
1110                         }
1111                 }
1112 
1113                 ret = qede_set_rx_mac_mcast(qede, 
1114                     ECORE_FILTER_REMOVE, mc_macs, 1);
1115                 qede->mc_cnt = 0;
1116                 goto exit;
1117         }
1118 
1119         QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, 
1120             &qede->mclist.head, qede_mcast_list_entry_t, mclist_entry)
1121         {
1122                 if ((ptr_entry != NULL) && 
1123                     IS_ETH_ADDRESS_EQUAL(ptr_mcaddr, ptr_entry->mac)) {
1124                         mcmac_exists = B_TRUE;
1125                         break;
1126                 }
1127         }
1128         if (flag && mcmac_exists) {
1129                 ret = DDI_SUCCESS;
1130                 goto exit;
1131         } else if (!flag && !mcmac_exists) {
1132                 ret = DDI_SUCCESS;
1133                 goto exit;
1134         }
1135 
1136        if (flag) {
1137                 ptr_entry = kmem_zalloc((sizeof (qede_mcast_list_entry_t) + 
1138                     ETH_ALLEN), KM_NOSLEEP);
1139                 ptr_entry->mac = (uint8_t *)ptr_entry + 
1140                     sizeof (qede_mcast_list_entry_t);
1141                 COPY_ETH_ADDRESS(ptr_mcaddr, ptr_entry->mac);
1142                 QEDE_LIST_ADD(&ptr_entry->mclist_entry, &qede->mclist.head);
1143         } else {
1144                 QEDE_LIST_REMOVE(&ptr_entry->mclist_entry, &qede->mclist.head);
1145                 kmem_free(ptr_entry, sizeof(qede_mcast_list_entry_t) + 
1146                     ETH_ALLEN);
1147         }
1148 
1149         mc_cnt = 0;
1150         QEDE_LIST_FOR_EACH_ENTRY(ptr_entry, &qede->mclist.head, 
1151             qede_mcast_list_entry_t, mclist_entry) {
1152                 COPY_ETH_ADDRESS(ptr_entry->mac, tmpmc);
1153                 tmpmc += ETH_ALLEN;
1154                 mc_cnt++;
1155         }
1156         qede->mc_cnt = mc_cnt;
1157         if (mc_cnt <=64) {
1158                 ret = qede_set_rx_mac_mcast(qede, ECORE_FILTER_ADD, 
1159                     (unsigned char *)mc_macs, mc_cnt);
1160                 if ((qede->params.multi_promisc_fl == B_TRUE) && 
1161                     (qede->params.promisc_fl == B_FALSE)) {
1162                         mode = QEDE_FILTER_RX_MODE_REGULAR;
1163                         ret = qede_set_filter_rx_mode(qede, mode);
1164                 }
1165                 qede->params.multi_promisc_fl = B_FALSE;
1166         } else {
1167                 if ((qede->params.multi_promisc_fl == B_FALSE) && 
1168                     (qede->params.promisc_fl == B_FALSE)) {
1169                         ret = qede_set_filter_rx_mode(qede, 
1170                             QEDE_FILTER_RX_MODE_MULTI_PROMISC);
1171                 }
1172                 qede->params.multi_promisc_fl = B_TRUE;
1173                 qede_info(qede, "mode is MULTI_PROMISC");
1174         }
1175 exit:
1176 kmem_free(mc_macs, size);
1177 qede_info(qede, "multicast ret %d mc_cnt %d\n", ret, qede->mc_cnt);
1178 return (ret);
1179 }
1180 
1181 /*
1182  * This function is used to enable or disable multicast packet reception for
1183  * particular multicast addresses.
1184  * (flag) TRUE = add, FALSE = remove
1185  */
1186 static int
1187 qede_mac_multicast(void *arg,
1188     boolean_t       flag,
1189     const uint8_t * mcast_addr)
1190 {
1191         qede_t *qede = (qede_t *)arg;
1192         int ret = DDI_SUCCESS;
1193 
1194 
1195         mutex_enter(&qede->gld_lock);
1196         if(qede->qede_state != QEDE_STATE_STARTED) {
1197                 mutex_exit(&qede->gld_lock);
1198                 return (EAGAIN);
1199         }
1200         ret = qede_multicast(qede, flag, mcast_addr);
1201                 
1202         mutex_exit(&qede->gld_lock);
1203 
1204     return (ret);
1205 }
1206 int 
1207 qede_clear_filters(qede_t *qede)
1208 {
1209         int ret = 0;
1210         int i;
1211         if ((qede->params.promisc_fl == B_TRUE) || 
1212             (qede->params.multi_promisc_fl == B_TRUE)) {
1213                 ret = qede_set_filter_rx_mode(qede, 
1214                     QEDE_FILTER_RX_MODE_REGULAR);
1215                 if (ret) {
1216                         qede_info(qede, 
1217                             "qede_clear_filters failed to set rx_mode");
1218                 }
1219         }
1220         for (i=0; i < qede->ucst_total; i++)
1221         {
1222                 if (qede->ucst_mac[i].set) {
1223                         qede_rem_macaddr(qede, 
1224                             qede->ucst_mac[i].mac_addr.ether_addr_octet);
1225                 }
1226         }
1227         qede_multicast(qede, B_FALSE, NULL);
1228         return (ret);
1229 }
1230 
1231 
1232 #ifdef  NO_CROSSBOW
1233 static int
1234 qede_mac_unicast(void *arg,
1235     const uint8_t * mac_addr)
1236 {
1237     qede_t *qede = (qede_t *)arg;
1238     return 0;
1239 }
1240 
1241 
1242 static mblk_t *
1243 qede_mac_tx(void *arg,
1244     mblk_t * mblk)
1245 {
1246     qede_t *qede = (qede_t *)arg;
1247     qede_fastpath_t *fp = &qede->fp_array[0];
1248 
1249     mblk = qede_ring_tx((void *)fp, mblk);
1250 
1251     return (mblk);
1252 }
1253 #endif  /* NO_CROSSBOW */
1254 
1255 
1256 static lb_property_t loopmodes[] = {
1257         { normal,       "normal",       QEDE_LOOP_NONE                },
1258         { internal,     "internal",     QEDE_LOOP_INTERNAL            },
1259         { external,     "external",     QEDE_LOOP_EXTERNAL            },
1260 };
1261 
1262 /* 
1263  * Set Loopback mode 
1264  */
1265 
1266 static enum ioc_reply
1267 qede_set_loopback_mode(qede_t *qede, uint32_t mode)
1268 {
1269         int i = 0;
1270         struct ecore_dev *edev = &qede->edev;
1271         struct ecore_hwfn *hwfn;
1272         struct ecore_ptt *ptt = NULL;
1273         struct ecore_mcp_link_params *link_params;
1274 
1275         hwfn = &edev->hwfns[0];
1276         link_params = ecore_mcp_get_link_params(hwfn);
1277         ptt = ecore_ptt_acquire(hwfn);
1278 
1279         switch(mode) {
1280         default:
1281                 qede_info(qede, "unknown loopback mode !!");
1282                 ecore_ptt_release(hwfn, ptt);
1283                 return IOC_INVAL;
1284 
1285         case QEDE_LOOP_NONE:
1286                 ecore_mcp_set_link(hwfn, ptt, 0);
1287 
1288                 while (qede->params.link_state && i < 5000) {
1289                         OSAL_MSLEEP(1);
1290                         i++;
1291                 }
1292                 i = 0;
1293 
1294                 link_params->loopback_mode = ETH_LOOPBACK_NONE;
1295                 qede->loop_back_mode = QEDE_LOOP_NONE;
1296                 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1297                 ecore_ptt_release(hwfn, ptt);
1298 
1299                 while (!qede->params.link_state && i < 5000) {
1300                         OSAL_MSLEEP(1);
1301                         i++;
1302                 }
1303                 return IOC_REPLY;
1304 
1305         case QEDE_LOOP_INTERNAL:
1306                 qede_print("!%s(%d) : loopback mode (INTERNAL) is set!",
1307                     __func__, qede->instance);
1308                     ecore_mcp_set_link(hwfn, ptt, 0);
1309 
1310                 while(qede->params.link_state && i < 5000) {
1311                         OSAL_MSLEEP(1);
1312                         i++;
1313                 }
1314                 i = 0;
1315                 link_params->loopback_mode = ETH_LOOPBACK_INT_PHY;
1316                 qede->loop_back_mode = QEDE_LOOP_INTERNAL;
1317                 (void) ecore_mcp_set_link(hwfn, ptt, 1);
1318                 ecore_ptt_release(hwfn, ptt);
1319 
1320                 while(!qede->params.link_state && i < 5000) {
1321                         OSAL_MSLEEP(1);
1322                         i++;
1323                 }
1324                 return IOC_REPLY;
1325 
1326         case QEDE_LOOP_EXTERNAL:
1327                 qede_print("!%s(%d) : External loopback mode is not supported",
1328                     __func__, qede->instance);
1329                 ecore_ptt_release(hwfn, ptt);
1330                 return IOC_INVAL;
1331         }
1332 }
1333 
1334 static int
1335 qede_ioctl_pcicfg_rd(qede_t *qede, u32 addr, void *data,
1336     int len)
1337 {
1338         u32 crb, actual_crb; 
1339         uint32_t ret = 0;
1340         int cap_offset = 0, cap_id = 0, next_cap = 0;
1341         ddi_acc_handle_t pci_cfg_handle  = qede->pci_cfg_handle;
1342         qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1343         
1344         cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1345         while (cap_offset != 0) {
1346                 /* Check for an invalid PCI read. */
1347                 if (cap_offset == PCI_EINVAL8) {
1348                         return DDI_FAILURE;
1349                 }
1350                 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1351                 if (cap_id == PCI_CAP_ID_PCI_E) {
1352                         /* PCIe expr capab struct found */
1353                         break;
1354                 } else {
1355                         next_cap = pci_config_get8(pci_cfg_handle,
1356                             cap_offset + 1);
1357                         cap_offset = next_cap;
1358                 }
1359         }
1360 
1361         switch (len) {
1362         case 1:
1363                 ret = pci_config_get8(qede->pci_cfg_handle, addr);
1364                 (void) memcpy(data, &ret, sizeof(uint8_t));
1365                 break;
1366         case 2:
1367                 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1368                 (void) memcpy(data, &ret, sizeof(uint16_t));
1369                 break;
1370         case 4:
1371                 ret = pci_config_get32(qede->pci_cfg_handle, addr);
1372                 (void) memcpy(data, &ret, sizeof(uint32_t));
1373                 break;
1374         default:
1375                 cmn_err(CE_WARN, "bad length for pci config read\n");
1376                 return (1);
1377         }
1378         return (0);
1379 }
1380 
1381 static int
1382 qede_ioctl_pcicfg_wr(qede_t *qede, u32 addr, void *data,
1383     int len)
1384 {
1385         uint16_t ret = 0;
1386         int cap_offset = 0, cap_id = 0, next_cap = 0;
1387         qede_ioctl_data_t * data1 = (qede_ioctl_data_t *) data;
1388         ddi_acc_handle_t pci_cfg_handle  = qede->pci_cfg_handle;
1389 #if 1
1390         cap_offset = pci_config_get8(pci_cfg_handle, PCI_CONF_CAP_PTR);
1391         while (cap_offset != 0) {
1392                 cap_id = pci_config_get8(pci_cfg_handle, cap_offset);
1393                 if (cap_id == PCI_CAP_ID_PCI_E) {
1394                         /* PCIe expr capab struct found */
1395                         break;
1396                 } else {
1397                         next_cap = pci_config_get8(pci_cfg_handle, 
1398                             cap_offset + 1);
1399                         cap_offset = next_cap;
1400                 }
1401         }
1402 #endif
1403 
1404         switch(len) {
1405         case 1:
1406                 pci_config_put8(qede->pci_cfg_handle, addr, 
1407                     *(char *)&(data));
1408                 break;
1409         case 2:
1410                 ret = pci_config_get16(qede->pci_cfg_handle, addr);
1411                 ret = ret | *(uint16_t *)data1->uabc;
1412 
1413                 pci_config_put16(qede->pci_cfg_handle, addr, 
1414                     ret);
1415                 break;
1416         case 4:
1417                 pci_config_put32(qede->pci_cfg_handle, addr, *(uint32_t *)data1->uabc);
1418                 break;
1419                 
1420         default:
1421                 return (1);
1422         }
1423         return (0);
1424 }
1425 
1426 static int
1427 qede_ioctl_rd_wr_reg(qede_t *qede, void *data)
1428 {
1429         struct ecore_hwfn *p_hwfn;
1430         struct ecore_dev *edev = &qede->edev;
1431         struct ecore_ptt *ptt;
1432         qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1433         uint32_t ret = 0;
1434         uint8_t cmd = (uint8_t) data1->unused1;
1435         uint32_t addr = data1->off;
1436         uint32_t val = *(uint32_t *)&data1->uabc[1];
1437         uint32_t hwfn_index = *(uint32_t *)&data1->uabc[5];      
1438         uint32_t *reg_addr;
1439 
1440         if (hwfn_index > qede->num_hwfns) {
1441                 cmn_err(CE_WARN, "invalid hwfn index from application\n");
1442                 return (EINVAL);
1443         }
1444         p_hwfn = &edev->hwfns[hwfn_index];
1445         
1446         switch(cmd) {
1447         case QEDE_REG_READ:
1448                 ret = ecore_rd(p_hwfn, p_hwfn->p_main_ptt, addr);
1449                 (void) memcpy(data1->uabc, &ret, sizeof(uint32_t));
1450                 break;
1451                 
1452         case QEDE_REG_WRITE:
1453                 ecore_wr(p_hwfn, p_hwfn->p_main_ptt, addr, val);
1454                 break;
1455 
1456         default:
1457                 cmn_err(CE_WARN, 
1458                     "wrong command in register read/write from application\n");
1459                 break;
1460         }
1461         return (ret);
1462 }
1463 
1464 static int
1465 qede_ioctl_rd_wr_nvram(qede_t *qede, mblk_t *mp)
1466 {
1467         qede_nvram_data_t *data1 = (qede_nvram_data_t *)(mp->b_cont->b_rptr); 
1468         qede_nvram_data_t *data2, *next_data;
1469         struct ecore_dev *edev = &qede->edev;
1470         uint32_t hdr_size = 24, bytes_to_copy, copy_len = 0;
1471         uint32_t copy_len1 = 0;
1472         uint32_t addr = data1->off;
1473         uint32_t size = data1->size, i, buf_size;
1474         uint8_t cmd, cmd2;
1475         uint8_t *buf, *tmp_buf;
1476         mblk_t *mp1;
1477 
1478         cmd = (uint8_t)data1->unused1;
1479 
1480         switch(cmd) {
1481         case QEDE_NVRAM_CMD_READ:
1482                 buf = kmem_zalloc(size, GFP_KERNEL);
1483                 if(buf == NULL) {
1484                         cmn_err(CE_WARN, "memory allocation failed" 
1485                         " in nvram read ioctl\n");
1486                         return (DDI_FAILURE);
1487                 }
1488                 (void) ecore_mcp_nvm_read(edev, addr, buf, data1->size);
1489 
1490                 copy_len = (MBLKL(mp->b_cont)) - hdr_size;
1491                 if(copy_len > size) {
1492                         (void) memcpy(data1->uabc, buf, size);
1493                         kmem_free(buf, size);
1494                         //OSAL_FREE(edev, buf);
1495                         break;
1496                 }
1497                 (void) memcpy(data1->uabc, buf, copy_len);
1498                 bytes_to_copy = size - copy_len;
1499                 tmp_buf = ((uint8_t *)buf) + copy_len;
1500                 copy_len1 = copy_len;
1501                 mp1 = mp->b_cont;
1502                 mp1 = mp1->b_cont;
1503 
1504                 while (mp1) {
1505                         copy_len = MBLKL(mp1);
1506                         if(mp1->b_cont == NULL) {
1507                                 copy_len = MBLKL(mp1) - 4;
1508                         }
1509                         data2 = (qede_nvram_data_t *)mp1->b_rptr;
1510                         if (copy_len > bytes_to_copy) {
1511                                 (void) memcpy(data2->uabc, tmp_buf, 
1512                                     bytes_to_copy);
1513                                 kmem_free(buf, size);
1514                                 //OSAL_FREE(edev, buf);
1515                                 break;
1516                         }
1517                         (void) memcpy(data2->uabc, tmp_buf, copy_len);
1518                         tmp_buf = tmp_buf + copy_len;
1519                         copy_len += copy_len;
1520                         mp1 = mp1->b_cont;
1521                         bytes_to_copy = bytes_to_copy - copy_len;
1522                 }
1523                         
1524                 kmem_free(buf, size);
1525                 //OSAL_FREE(edev, buf);
1526                 break;
1527         
1528         case QEDE_NVRAM_CMD_WRITE:
1529                 cmd2 = (uint8_t )data1->cmd2;
1530                 size = data1->size;
1531                 addr = data1->off;
1532                 buf_size =  size; //data1->buf_size;
1533                 //buf_size =  data1->buf_size;
1534 
1535                 switch(cmd2){
1536                 case START_NVM_WRITE:
1537                         buf = kmem_zalloc(size, GFP_KERNEL);
1538                         //buf = qede->reserved_buf;
1539                         qede->nvm_buf_size = data1->size;
1540                         if(buf == NULL) {
1541                                 cmn_err(CE_WARN, 
1542                                 "memory allocation failed in START_NVM_WRITE\n");
1543                                 return DDI_FAILURE;
1544                         }
1545                         qede->nvm_buf_start = buf;
1546                         cmn_err(CE_NOTE, 
1547                             "buf = %p, size = %x\n", qede->nvm_buf_start, size);
1548                         qede->nvm_buf = buf;
1549                         qede->copy_len = 0;
1550                         //tmp_buf = buf + addr;
1551                         break;
1552                         
1553                 case ACCUMULATE_NVM_BUF:
1554                         tmp_buf = qede->nvm_buf;
1555                         copy_len = MBLKL(mp->b_cont) - hdr_size;
1556                         if(copy_len > buf_size) {
1557                                 if (buf_size < qede->nvm_buf_size) {
1558                                 (void) memcpy(tmp_buf, data1->uabc, buf_size);
1559                                         qede->copy_len = qede->copy_len + 
1560                                             buf_size;
1561                                 } else {
1562                                         (void) memcpy(tmp_buf, 
1563                                             data1->uabc, qede->nvm_buf_size);
1564                                         qede->copy_len = 
1565                                             qede->copy_len + qede->nvm_buf_size;
1566                                 }
1567                                 tmp_buf = tmp_buf + buf_size;
1568                                 qede->nvm_buf = tmp_buf;
1569                                 //qede->copy_len = qede->copy_len + buf_size;
1570                                 cmn_err(CE_NOTE, 
1571                                     "buf_size from app = %x\n", copy_len);
1572                                 break;
1573                         }
1574                         (void) memcpy(tmp_buf, data1->uabc, copy_len);
1575                         tmp_buf = tmp_buf + copy_len;
1576                         bytes_to_copy = buf_size - copy_len;
1577                         mp1 = mp->b_cont;
1578                         mp1 = mp1->b_cont;
1579                         copy_len1 = copy_len;
1580                         
1581                         while (mp1) {
1582                                 copy_len = MBLKL(mp1);
1583                                 if (mp1->b_cont == NULL) {
1584                                         copy_len = MBLKL(mp1) - 4;
1585                                 }
1586                                 next_data = (qede_nvram_data_t *) mp1->b_rptr;
1587                                 if (copy_len > bytes_to_copy){
1588                                         (void) memcpy(tmp_buf, next_data->uabc,
1589                                             bytes_to_copy);
1590                                         qede->copy_len = qede->copy_len + 
1591                                             bytes_to_copy;
1592                                         break;
1593                                 }
1594                                 (void) memcpy(tmp_buf, next_data->uabc, 
1595                                     copy_len);
1596                                 qede->copy_len = qede->copy_len + copy_len;
1597                                 tmp_buf = tmp_buf + copy_len;
1598                                 copy_len = copy_len1 + copy_len;
1599                                 bytes_to_copy = bytes_to_copy - copy_len;
1600                                 mp1 = mp1->b_cont;
1601                         }
1602                         qede->nvm_buf = tmp_buf;
1603                         break;
1604 
1605                 case STOP_NVM_WRITE:
1606                         //qede->nvm_buf = tmp_buf;
1607                         break;
1608                 case READ_BUF:
1609                         tmp_buf = (uint8_t *)qede->nvm_buf_start;
1610                         for(i = 0; i < size ; i++){
1611                                 cmn_err(CE_NOTE, 
1612                                     "buff (%d) : %d\n", i, *tmp_buf);
1613                                 tmp_buf ++;
1614                         }
1615                         break;
1616                 }
1617                 break;
1618         case QEDE_NVRAM_CMD_PUT_FILE_DATA:
1619                 tmp_buf = qede->nvm_buf_start;       
1620                 (void) ecore_mcp_nvm_write(edev, ECORE_PUT_FILE_DATA,
1621                           addr, tmp_buf, size);
1622                 kmem_free(qede->nvm_buf_start, size);
1623                 //OSAL_FREE(edev, tmp_buf);
1624                 cmn_err(CE_NOTE, "total size = %x, copied size = %x\n",
1625                     qede->nvm_buf_size, qede->copy_len);
1626                 tmp_buf = NULL;
1627                 qede->nvm_buf = NULL;
1628                 qede->nvm_buf_start = NULL;
1629                 break;
1630 
1631         case QEDE_NVRAM_CMD_SET_SECURE_MODE:
1632                 (void) ecore_mcp_nvm_set_secure_mode(edev, addr);
1633                 break;
1634 
1635         case QEDE_NVRAM_CMD_DEL_FILE:
1636                 (void) ecore_mcp_nvm_del_file(edev, addr);
1637                 break;
1638 
1639         case QEDE_NVRAM_CMD_PUT_FILE_BEGIN:
1640                 (void) ecore_mcp_nvm_put_file_begin(edev, addr);
1641                 break;
1642 
1643         case QEDE_NVRAM_CMD_GET_NVRAM_RESP:
1644                 buf = kmem_zalloc(size, KM_SLEEP);
1645                 (void) ecore_mcp_nvm_resp(edev, buf);
1646                 (void)memcpy(data1->uabc, buf, size);
1647                 kmem_free(buf, size);
1648                 break;
1649 
1650         default:
1651                 cmn_err(CE_WARN, 
1652                     "wrong command in NVRAM read/write from application\n");
1653                 break;
1654         }
1655         return (DDI_SUCCESS);   
1656 }
1657 
1658 static int
1659 qede_get_func_info(qede_t *qede, void *data)
1660 {
1661         qede_link_output_t link_op;
1662         qede_func_info_t func_info;
1663         qede_ioctl_data_t *data1 = (qede_ioctl_data_t *)data;
1664         struct ecore_dev *edev = &qede->edev;
1665         struct ecore_hwfn *hwfn;
1666         struct ecore_mcp_link_params params;
1667         struct ecore_mcp_link_state link;
1668         
1669         hwfn = &edev->hwfns[0];
1670 
1671         if(hwfn == NULL){
1672                 cmn_err(CE_WARN, "(%s) : cannot acquire hwfn\n",
1673                     __func__);
1674                 return (DDI_FAILURE);
1675         }
1676         memcpy(&params, &hwfn->mcp_info->link_input, sizeof(params));
1677         memcpy(&link, &hwfn->mcp_info->link_output, sizeof(link));
1678 
1679         if(link.link_up) {
1680                 link_op.link_up = true;
1681         }
1682 
1683         link_op.supported_caps = SUPPORTED_FIBRE;
1684         if(params.speed.autoneg) {
1685                 link_op.supported_caps |= SUPPORTED_Autoneg;
1686         }
1687         
1688         if(params.pause.autoneg ||
1689             (params.pause.forced_rx && params.pause.forced_tx)) {
1690                 link_op.supported_caps |= SUPPORTED_Asym_Pause;
1691         }
1692 
1693         if (params.pause.autoneg || params.pause.forced_rx ||
1694              params.pause.forced_tx) {
1695                 link_op.supported_caps |= SUPPORTED_Pause;
1696         }
1697         
1698         if (params.speed.advertised_speeds &
1699             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_1G) {
1700                 link_op.supported_caps |= SUPPORTED_1000baseT_Half |
1701                     SUPPORTED_1000baseT_Full;
1702         }
1703 
1704         if (params.speed.advertised_speeds &
1705             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G) {
1706                 link_op.supported_caps |= SUPPORTED_10000baseKR_Full;
1707         }
1708         
1709         if (params.speed.advertised_speeds &
1710             NVM_CFG1_PORT_DRV_LINK_SPEED_40G) {
1711                 link_op.supported_caps |= SUPPORTED_40000baseLR4_Full;
1712         }
1713         
1714         link_op.advertised_caps = link_op.supported_caps;
1715 
1716         if(link.link_up) {
1717                 link_op.speed = link.speed;
1718         } else {
1719                 link_op.speed = 0;
1720         }
1721 
1722         link_op.duplex = DUPLEX_FULL;
1723         link_op.port = PORT_FIBRE;
1724         
1725         link_op.autoneg = params.speed.autoneg;
1726 
1727         /* Link partner capabilities */
1728         if (link.partner_adv_speed &
1729             ECORE_LINK_PARTNER_SPEED_1G_HD) {
1730                 link_op.lp_caps |= SUPPORTED_1000baseT_Half;
1731         }
1732         
1733         if (link.partner_adv_speed &
1734             ECORE_LINK_PARTNER_SPEED_1G_FD) {
1735                 link_op.lp_caps |= SUPPORTED_1000baseT_Full;
1736         }
1737         
1738         if (link.partner_adv_speed &
1739             ECORE_LINK_PARTNER_SPEED_10G) {
1740                 link_op.lp_caps |= SUPPORTED_10000baseKR_Full;
1741         }
1742         
1743         if (link.partner_adv_speed &
1744             ECORE_LINK_PARTNER_SPEED_20G) {
1745                 link_op.lp_caps |= SUPPORTED_20000baseKR2_Full;
1746         }
1747         
1748         if (link.partner_adv_speed &
1749             ECORE_LINK_PARTNER_SPEED_40G) {
1750                 link_op.lp_caps |= SUPPORTED_40000baseLR4_Full;
1751         }
1752         
1753         if (link.an_complete) {
1754                 link_op.lp_caps |= SUPPORTED_Autoneg;
1755         }
1756         
1757         if (link.partner_adv_pause) {
1758                 link_op.lp_caps |= SUPPORTED_Pause;
1759         }
1760         
1761         if (link.partner_adv_pause == ECORE_LINK_PARTNER_ASYMMETRIC_PAUSE ||
1762             link.partner_adv_pause == ECORE_LINK_PARTNER_BOTH_PAUSE) {
1763                 link_op.lp_caps |= SUPPORTED_Asym_Pause;
1764         }
1765 
1766         func_info.supported = link_op.supported_caps;
1767         func_info.advertising = link_op.advertised_caps;
1768         func_info.speed = link_op.speed;
1769         func_info.duplex = link_op.duplex;
1770         func_info.port = qede->pci_func & 0x1;
1771         func_info.autoneg = link_op.autoneg;    
1772         
1773         (void) memcpy(data1->uabc, &func_info, sizeof(qede_func_info_t));
1774         
1775         return (0);
1776 }
1777 
1778 static int 
1779 qede_do_ioctl(qede_t *qede, queue_t *q, mblk_t *mp)
1780 {
1781         qede_ioctl_data_t *up_data;
1782         qede_driver_info_t driver_info;
1783         struct ecore_dev *edev = &qede->edev;
1784         struct ecore_hwfn *hwfn;
1785         struct ecore_ptt *ptt = NULL;
1786         struct mcp_file_att attrib;
1787         uint32_t flash_size;
1788         uint32_t mcp_resp, mcp_param, txn_size;
1789         uint32_t cmd, size, ret = 0;
1790         uint64_t off;
1791         int * up_data1;
1792         void * ptr;
1793         mblk_t *mp1 = mp;
1794         char mac_addr[32];
1795         
1796         up_data = (qede_ioctl_data_t *)(mp->b_cont->b_rptr);
1797         
1798         cmd = up_data->cmd;
1799         off = up_data->off;
1800         size = up_data->size;
1801         
1802         switch (cmd) {
1803         case QEDE_DRV_INFO:
1804                 hwfn = &edev->hwfns[0]; 
1805                 ptt = ecore_ptt_acquire(hwfn);
1806         
1807                 snprintf(driver_info.drv_name, MAX_QEDE_NAME_LEN, "%s", "qede");
1808                 snprintf(driver_info.drv_version, QEDE_STR_SIZE, 
1809                     "v:%s", qede->version);
1810                 snprintf(driver_info.mfw_version, QEDE_STR_SIZE, 
1811                     "%s", qede->versionMFW);
1812                 snprintf(driver_info.stormfw_version, QEDE_STR_SIZE, 
1813                     "%s", qede->versionFW);
1814                 snprintf(driver_info.bus_info, QEDE_STR_SIZE, 
1815                     "%s", qede->bus_dev_func);
1816 
1817 
1818                 /* 
1819                  * calling ecore_mcp_nvm_rd_cmd to find the flash length, i
1820                  * 0x08 is equivalent of NVM_TYPE_MFW_TRACE1
1821                  */
1822                 ecore_mcp_get_flash_size(hwfn, ptt, &flash_size);
1823                 driver_info.eeprom_dump_len = flash_size;       
1824                 (void) memcpy(up_data->uabc, &driver_info, 
1825                     sizeof (qede_driver_info_t));
1826                 up_data->size = sizeof (qede_driver_info_t);
1827 
1828                 ecore_ptt_release(hwfn, ptt);
1829                 break;
1830 
1831         case QEDE_RD_PCICFG:
1832                 ret = qede_ioctl_pcicfg_rd(qede, off, up_data->uabc, size);
1833                 break;
1834 
1835         case QEDE_WR_PCICFG:
1836                 ret = qede_ioctl_pcicfg_wr(qede, off, up_data, size);
1837                 break;
1838         
1839         case QEDE_RW_REG:
1840                 ret = qede_ioctl_rd_wr_reg(qede, (void *)up_data);
1841                 break;
1842 
1843         case QEDE_RW_NVRAM:
1844                 ret = qede_ioctl_rd_wr_nvram(qede, mp1);
1845                 break;
1846 
1847         case QEDE_FUNC_INFO:
1848                 ret = qede_get_func_info(qede, (void *)up_data);
1849                 break;
1850 
1851         case QEDE_MAC_ADDR:
1852                 snprintf(mac_addr, sizeof(mac_addr),
1853                         "%02x:%02x:%02x:%02x:%02x:%02x", 
1854                         qede->ether_addr[0], qede->ether_addr[1],
1855                         qede->ether_addr[2], qede->ether_addr[3],
1856                         qede->ether_addr[4], qede->ether_addr[5]);
1857                 (void) memcpy(up_data->uabc, &mac_addr, sizeof(mac_addr));
1858                 break;
1859 
1860         }
1861         //if (cmd == QEDE_RW_NVRAM) {
1862         //      miocack (q, mp, (sizeof(qede_ioctl_data_t)), 0);
1863         //      return IOC_REPLY;
1864         //}
1865         miocack (q, mp, (sizeof(qede_ioctl_data_t)), ret);
1866         //miocack (q, mp, 0, ret);
1867         return (IOC_REPLY);
1868 }
1869 
1870 static void
1871 qede_ioctl(qede_t *qede, int cmd, queue_t *q, mblk_t *mp)
1872 {
1873         void *ptr;
1874 
1875         switch(cmd) {
1876         case QEDE_CMD:
1877                 (void) qede_do_ioctl(qede, q, mp);
1878                 break;
1879         default :
1880                 cmn_err(CE_WARN, "qede ioctl command %x not supported\n", cmd);
1881                 break;
1882         }
1883         return;
1884 }
1885 enum ioc_reply
1886 qede_loopback_ioctl(qede_t *qede, queue_t *wq, mblk_t *mp,
1887     struct iocblk *iocp)
1888 {
1889         lb_info_sz_t *lb_info_size;
1890         lb_property_t *lb_prop;
1891         uint32_t *lb_mode;
1892         int cmd;
1893 
1894         /*
1895          * Validate format of ioctl
1896          */
1897         if(mp->b_cont == NULL) {
1898                 return IOC_INVAL;
1899         }
1900         
1901         cmd = iocp->ioc_cmd;
1902 
1903         switch(cmd) {
1904         default:
1905                 qede_print("!%s(%d): unknown ioctl command %x\n",
1906                     __func__, qede->instance, cmd);
1907                 return IOC_INVAL;
1908         case LB_GET_INFO_SIZE:
1909                 if (iocp->ioc_count != sizeof(lb_info_sz_t)) {
1910                         qede_info(qede, "error: ioc_count %d, sizeof %d",
1911                             iocp->ioc_count,  sizeof(lb_info_sz_t));
1912                         return IOC_INVAL;
1913                 }
1914                 lb_info_size = (void *)mp->b_cont->b_rptr;
1915                 *lb_info_size = sizeof(loopmodes);
1916                 return IOC_REPLY;
1917         case LB_GET_INFO:
1918                 if (iocp->ioc_count != sizeof (loopmodes)) {
1919                         qede_info(qede, "error: iocp->ioc_count %d, sizepof %d",
1920                             iocp->ioc_count,  sizeof (loopmodes));
1921                         return (IOC_INVAL);
1922                 }
1923                 lb_prop = (void *)mp->b_cont->b_rptr;
1924                 bcopy(loopmodes, lb_prop, sizeof (loopmodes));
1925                 return IOC_REPLY;
1926         case LB_GET_MODE:
1927                 if (iocp->ioc_count != sizeof (uint32_t)) {
1928                         qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1929                             iocp->ioc_count, sizeof (uint32_t));
1930                         return (IOC_INVAL);
1931                 }
1932                 lb_mode = (void *)mp->b_cont->b_rptr;
1933                 *lb_mode = qede->loop_back_mode;
1934                 return IOC_REPLY;
1935         case LB_SET_MODE:
1936                 if (iocp->ioc_count != sizeof (uint32_t)) {
1937                         qede_info(qede, "iocp->ioc_count %d, sizeof : %d\n",
1938                             iocp->ioc_count, sizeof (uint32_t));
1939                         return (IOC_INVAL);
1940                 }
1941                 lb_mode = (void *)mp->b_cont->b_rptr;
1942                 return (qede_set_loopback_mode(qede,*lb_mode));
1943         }
1944 }
1945 
1946 static void
1947 qede_mac_ioctl(void *    arg,
1948                queue_t * wq,
1949                mblk_t *  mp)
1950 {
1951         int err, cmd;
1952         qede_t * qede = (qede_t *)arg;
1953         struct iocblk *iocp = (struct iocblk *) (uintptr_t)mp->b_rptr;
1954         enum ioc_reply status = IOC_DONE;
1955         boolean_t need_privilege = B_TRUE;
1956 
1957         iocp->ioc_error = 0;
1958         cmd = iocp->ioc_cmd;
1959 
1960         mutex_enter(&qede->drv_lock);
1961         if ((qede->qede_state == QEDE_STATE_SUSPENDING) ||
1962            (qede->qede_state == QEDE_STATE_SUSPENDED)) {
1963                 mutex_exit(&qede->drv_lock);
1964                 miocnak(wq, mp, 0, EINVAL);
1965                 return;
1966         }
1967 
1968         switch(cmd) {
1969                 case QEDE_CMD:
1970                         break;
1971                 case LB_GET_INFO_SIZE:
1972                 case LB_GET_INFO:
1973                 case LB_GET_MODE:
1974                         need_privilege = B_FALSE;
1975                 case LB_SET_MODE:
1976                         break;
1977                 default:
1978                         qede_print("!%s(%d) unknown ioctl command %x\n",
1979                             __func__, qede->instance, cmd);
1980                         miocnak(wq, mp, 0, EINVAL);
1981                         mutex_exit(&qede->drv_lock);
1982                         return;
1983         }
1984         
1985         if(need_privilege) {
1986                 err = secpolicy_net_config(iocp->ioc_cr, B_FALSE);
1987                 if(err){
1988                         qede_info(qede, "secpolicy() failed");
1989                         miocnak(wq, mp, 0, err);
1990                         mutex_exit(&qede->drv_lock);
1991                         return;
1992                 }
1993         }
1994 
1995         switch (cmd) {
1996                 default:
1997                         qede_print("!%s(%d) : unknown ioctl command %x\n", 
1998                             __func__, qede->instance, cmd);
1999                         status = IOC_INVAL;
2000                         mutex_exit(&qede->drv_lock);
2001                         return;
2002                 case LB_GET_INFO_SIZE:
2003                 case LB_GET_INFO:
2004                 case LB_GET_MODE:
2005                 case LB_SET_MODE:
2006                         status = qede_loopback_ioctl(qede, wq, mp, iocp);
2007                         break;
2008                 case QEDE_CMD:
2009                         qede_ioctl(qede, cmd, wq, mp);
2010                         status = IOC_DONE; 
2011                         break;
2012         }
2013 
2014         switch(status){
2015                 default:
2016                         qede_print("!%s(%d) : invalid status from ioctl",
2017                             __func__,qede->instance);
2018                         break;
2019                 case IOC_DONE:
2020                         /*
2021                          * OK, Reply already sent
2022                          */
2023                         
2024                         break;
2025                 case IOC_REPLY:
2026                         mp->b_datap->db_type = iocp->ioc_error == 0 ?
2027                                 M_IOCACK : M_IOCNAK;
2028                         qreply(wq, mp);
2029                         break;
2030                 case IOC_INVAL:
2031                         mutex_exit(&qede->drv_lock);
2032                         //miocack(wq, mp, 0, 0);
2033                         miocnak(wq, mp, 0, iocp->ioc_error == 0 ?
2034                             EINVAL : iocp->ioc_error); 
2035                         return; 
2036         }
2037         mutex_exit(&qede->drv_lock);
2038 }
2039 
2040 extern ddi_dma_attr_t qede_buf2k_dma_attr_txbuf;
2041 extern ddi_dma_attr_t qede_dma_attr_rxbuf;
2042 extern ddi_dma_attr_t qede_dma_attr_desc;
2043 
2044 static boolean_t
2045 qede_mac_get_capability(void *arg,
2046         mac_capab_t capability,
2047         void *      cap_data)
2048 {
2049         qede_t * qede = (qede_t *)arg;
2050         uint32_t *txflags = cap_data;
2051         boolean_t ret = B_FALSE;
2052 
2053         switch (capability) {
2054         case MAC_CAPAB_HCKSUM: {
2055                 u32 *tx_flags = cap_data;
2056                 /*
2057                  * Check if checksum is enabled on
2058                  * tx and advertise the cksum capab
2059                  * to mac layer accordingly. On Rx
2060                  * side checksummed packets are
2061                  * reveiced anyway
2062                  */
2063                 qede_info(qede, "%s tx checksum offload",
2064                     (qede->checksum == DEFAULT_CKSUM_OFFLOAD) ?
2065                     "Enabling":
2066                     "Disabling");
2067 
2068                 if (qede->checksum != DEFAULT_CKSUM_OFFLOAD) {
2069                         ret = B_FALSE;
2070                         break;
2071                 }
2072                 /*
2073                  * Hardware does not support ICMPv6 checksumming. Right now the
2074                  * GLDv3 doesn't provide us a way to specify that we don't
2075                  * support that. As such, we cannot indicate
2076                  * HCKSUM_INET_FULL_V6.
2077                  */
2078 
2079                 *tx_flags = HCKSUM_INET_FULL_V4 |
2080                     HCKSUM_IPHDRCKSUM;
2081                 ret = B_TRUE;
2082                 break;
2083         }
2084         case MAC_CAPAB_LSO: {
2085                 mac_capab_lso_t *cap_lso = (mac_capab_lso_t *)cap_data;
2086 
2087                 qede_info(qede, "%s large segmentation offload",
2088                     qede->lso_enable ? "Enabling": "Disabling");
2089                 if (qede->lso_enable) {
2090                         cap_lso->lso_flags = LSO_TX_BASIC_TCP_IPV4;
2091                         cap_lso->lso_basic_tcp_ipv4.lso_max = QEDE_LSO_MAXLEN;
2092                         ret = B_TRUE;
2093                 }
2094                 break;
2095         }
2096         case MAC_CAPAB_RINGS: {
2097 #ifndef NO_CROSSBOW
2098                 mac_capab_rings_t *cap_rings = cap_data;
2099 #ifndef ILLUMOS
2100                 cap_rings->mr_version = MAC_RINGS_VERSION_1;
2101 #endif
2102 
2103                 switch (cap_rings->mr_type) {
2104                 case MAC_RING_TYPE_RX:
2105 #ifndef ILLUMOS
2106                         cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2107 #endif
2108                         cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2109                         //cap_rings->mr_rnum = 1; /* qede variable */
2110                         cap_rings->mr_rnum = qede->num_fp; /* qede variable */
2111                         cap_rings->mr_gnum = 1;
2112                         cap_rings->mr_rget = qede_fill_ring;
2113                         cap_rings->mr_gget = qede_fill_group;
2114                         cap_rings->mr_gaddring = NULL;
2115                         cap_rings->mr_gremring = NULL;
2116 #ifndef ILLUMOS
2117                         cap_rings->mr_ggetringtc = NULL;
2118 #endif
2119                         ret = B_TRUE;
2120                         break;
2121                 case MAC_RING_TYPE_TX:
2122 #ifndef ILLUMOS
2123                         cap_rings->mr_flags = MAC_RINGS_VLAN_TRANSPARENT;
2124 #endif
2125                         cap_rings->mr_group_type = MAC_GROUP_TYPE_STATIC;
2126                         //cap_rings->mr_rnum = 1;
2127                         cap_rings->mr_rnum = qede->num_fp;
2128                         cap_rings->mr_gnum = 0;
2129                         cap_rings->mr_rget = qede_fill_ring;
2130                         cap_rings->mr_gget = qede_fill_group;
2131                         cap_rings->mr_gaddring = NULL;
2132                         cap_rings->mr_gremring = NULL;
2133 #ifndef ILLUMOS
2134                         cap_rings->mr_ggetringtc = NULL;
2135 #endif
2136                         ret = B_TRUE;
2137                         break;
2138                 default:
2139                         ret = B_FALSE;
2140                         break;
2141                 }
2142 #endif
2143                 break; /* CASE MAC_CAPAB_RINGS */
2144         }
2145 #ifdef ILLUMOS
2146         case MAC_CAPAB_TRANSCEIVER: {
2147                 mac_capab_transceiver_t *mct = cap_data;
2148 
2149                 mct->mct_flags = 0;
2150                 mct->mct_ntransceivers = qede->edev.num_hwfns;
2151                 mct->mct_info = qede_transceiver_info;
2152                 mct->mct_read = qede_transceiver_read;
2153 
2154                 ret = B_TRUE;
2155                 break;
2156         }
2157 #endif
2158         default:
2159                 break;
2160         }
2161 
2162     return (ret);
2163 }
2164 
2165 int
2166 qede_configure_link(qede_t *qede, bool op);
2167 
2168 static int
2169 qede_mac_set_property(void *        arg,
2170                               const char *  pr_name,
2171                               mac_prop_id_t pr_num,
2172                               uint_t        pr_valsize,
2173                               const void *  pr_val)
2174 {
2175         qede_t * qede = (qede_t *)arg;
2176         struct ecore_mcp_link_params *link_params;
2177         struct ecore_dev *edev = &qede->edev;
2178         struct ecore_hwfn *hwfn;
2179         int ret_val = 0, i;
2180         uint32_t option;
2181 
2182         mutex_enter(&qede->gld_lock);
2183         switch (pr_num)
2184         {
2185         case MAC_PROP_MTU:
2186                 bcopy(pr_val, &option, sizeof (option));
2187 
2188                 if(option == qede->mtu) {
2189                         ret_val = 0;
2190                         break;
2191                 }
2192                 if ((option != DEFAULT_JUMBO_MTU) &&
2193                    (option != DEFAULT_MTU)) {
2194                         ret_val = EINVAL;
2195                         break;
2196                 }
2197                 if(qede->qede_state == QEDE_STATE_STARTED) {
2198                         ret_val = EBUSY;
2199                         break;
2200                 }
2201 
2202                 ret_val = mac_maxsdu_update(qede->mac_handle, qede->mtu);
2203                 if (ret_val == 0) {
2204 
2205                         qede->mtu = option;
2206                         if (option == DEFAULT_JUMBO_MTU) {
2207                                 qede->jumbo_enable = B_TRUE;
2208                         } else {
2209                                 qede->jumbo_enable = B_FALSE;
2210                         }
2211 
2212                         hwfn = ECORE_LEADING_HWFN(edev);
2213                         hwfn->hw_info.mtu = qede->mtu;
2214                         ret_val = ecore_mcp_ov_update_mtu(hwfn, 
2215                             hwfn->p_main_ptt,
2216                             hwfn->hw_info.mtu);
2217                         if (ret_val != ECORE_SUCCESS) {
2218                                 qede_print("!%s(%d): MTU change %d option %d"
2219                                     "FAILED",
2220                                     __func__,qede->instance, qede->mtu, option);
2221                                 break;
2222                         }
2223                         qede_print("!%s(%d): MTU changed  %d MTU option"
2224                             " %d hwfn %d",
2225                             __func__,qede->instance, qede->mtu, 
2226                             option, hwfn->hw_info.mtu);
2227                 }
2228                 break;
2229 
2230         case MAC_PROP_EN_10GFDX_CAP:
2231                 hwfn = &edev->hwfns[0];
2232                 link_params = ecore_mcp_get_link_params(hwfn);
2233                 if (*(uint8_t *) pr_val) {
2234                         link_params->speed.autoneg = 0;
2235                         link_params->speed.forced_speed = 10000;
2236                         link_params->speed.advertised_speeds = 
2237                             NVM_CFG1_PORT_DRV_SPEED_CAPABILITY_MASK_10G;
2238                         qede->forced_speed_10G = *(uint8_t *)pr_val;
2239                 }
2240                 else {
2241                         memcpy(link_params, 
2242                             &qede->link_input_params.default_link_params, 
2243                             sizeof (struct ecore_mcp_link_params));
2244                         qede->forced_speed_10G = *(uint8_t *)pr_val;
2245                 }
2246                 if (qede->qede_state == QEDE_STATE_STARTED) {
2247                         qede_configure_link(qede,1);
2248                 } else {
2249                         mutex_exit(&qede->gld_lock);
2250                         return (0);
2251                 }
2252                 break;
2253         default:
2254                 ret_val = ENOTSUP;
2255                 break;
2256         }
2257         mutex_exit(&qede->gld_lock);
2258         return (ret_val); 
2259 }
2260 
2261 static void
2262 qede_mac_stop(void *arg)
2263 {
2264     qede_t *qede = (qede_t *)arg;
2265         int status;
2266 
2267         qede_print("!%s(%d): called",
2268             __func__,qede->instance);
2269         mutex_enter(&qede->drv_lock);
2270         status = qede_stop(qede);
2271         if (status != DDI_SUCCESS) {
2272                 qede_print("!%s(%d): qede_stop "
2273                     "FAILED",
2274                 __func__,qede->instance);
2275         }
2276 
2277         mac_link_update(qede->mac_handle, LINK_STATE_UNKNOWN);
2278         mutex_exit(&qede->drv_lock);
2279 }
2280 
2281 static int
2282 qede_mac_start(void *arg)
2283 {
2284         qede_t *qede = (qede_t *)arg;
2285         int status;
2286 
2287         qede_print("!%s(%d): called", __func__,qede->instance);
2288         if (!mutex_tryenter(&qede->drv_lock)) {
2289                 return (EAGAIN);
2290         }
2291 
2292         if (qede->qede_state == QEDE_STATE_SUSPENDED) {
2293                 mutex_exit(&qede->drv_lock);
2294                 return (ECANCELED);
2295         }
2296 
2297         status = qede_start(qede);
2298         if (status != DDI_SUCCESS) {
2299                 mutex_exit(&qede->drv_lock);
2300                 return (EIO);
2301         }
2302 
2303         mutex_exit(&qede->drv_lock);
2304 
2305 #ifdef  DBLK_DMA_PREMAP
2306         qede->pm_handle = mac_pmh_tx_get(qede->mac_handle);
2307 #endif
2308         return (0);
2309 }
2310 
2311 static int
2312 qede_mac_get_property(void *arg,
2313     const char *pr_name,
2314     mac_prop_id_t pr_num,
2315     uint_t        pr_valsize,
2316     void *pr_val)
2317 {
2318         qede_t *qede = (qede_t *)arg;
2319         struct ecore_dev *edev = &qede->edev;
2320         link_state_t    link_state;
2321         link_duplex_t   link_duplex;
2322         uint64_t        link_speed;
2323         link_flowctrl_t link_flowctrl;
2324         struct qede_link_cfg link_cfg;
2325         qede_link_cfg_t  *hw_cfg  = &qede->hwinit;
2326         int ret_val = 0;
2327 
2328         memset(&link_cfg, 0, sizeof (struct qede_link_cfg));
2329         qede_get_link_info(&edev->hwfns[0], &link_cfg);
2330 
2331         
2332 
2333         switch (pr_num)
2334         {
2335         case MAC_PROP_MTU:
2336 
2337                 ASSERT(pr_valsize >= sizeof(uint32_t));
2338                 bcopy(&qede->mtu, pr_val, sizeof(uint32_t));
2339                 break;
2340 
2341         case MAC_PROP_DUPLEX:
2342 
2343                 ASSERT(pr_valsize >= sizeof(link_duplex_t));
2344                 link_duplex = (qede->props.link_duplex) ?
2345                                           LINK_DUPLEX_FULL : LINK_DUPLEX_HALF;
2346                 bcopy(&link_duplex, pr_val, sizeof(link_duplex_t));
2347                 break;
2348 
2349         case MAC_PROP_SPEED:
2350 
2351                 ASSERT(pr_valsize >= sizeof(link_speed));
2352 
2353                 link_speed = (qede->props.link_speed * 1000000ULL);
2354                 bcopy(&link_speed, pr_val, sizeof(link_speed));
2355                 break;
2356 
2357         case MAC_PROP_STATUS:
2358 
2359                 ASSERT(pr_valsize >= sizeof(link_state_t));
2360 
2361                 link_state = (qede->params.link_state) ?
2362                                         LINK_STATE_UP : LINK_STATE_DOWN;
2363                 bcopy(&link_state, pr_val, sizeof(link_state_t));
2364                 qede_info(qede, "mac_prop_status %d\n", link_state);
2365                 break;  
2366 
2367         case MAC_PROP_AUTONEG:
2368 
2369                 *(uint8_t *)pr_val = link_cfg.autoneg;
2370                 break;
2371 
2372         case MAC_PROP_FLOWCTRL:
2373 
2374                 ASSERT(pr_valsize >= sizeof(link_flowctrl_t));
2375 
2376 /*
2377  * illumos does not have the notion of LINK_FLOWCTRL_AUTO at this time.
2378  */
2379 #ifndef ILLUMOS
2380                 if (link_cfg.pause_cfg & QEDE_LINK_PAUSE_AUTONEG_ENABLE)  {
2381                     link_flowctrl = LINK_FLOWCTRL_AUTO;
2382                 }
2383 #endif
2384 
2385                 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) && 
2386                     !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2387                     link_flowctrl = LINK_FLOWCTRL_NONE;
2388                 }
2389                 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) && 
2390                     !(link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2391                     link_flowctrl = LINK_FLOWCTRL_RX;
2392                 }
2393                 if (!(link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) && 
2394                     (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2395                     link_flowctrl = LINK_FLOWCTRL_TX;
2396                 }
2397                 if ((link_cfg.pause_cfg & QEDE_LINK_PAUSE_RX_ENABLE) && 
2398                     (link_cfg.pause_cfg & QEDE_LINK_PAUSE_TX_ENABLE)) {
2399                     link_flowctrl = LINK_FLOWCTRL_BI;
2400                 }
2401 
2402                 bcopy(&link_flowctrl, pr_val, sizeof (link_flowctrl_t));
2403                 break;
2404 
2405         case MAC_PROP_ADV_10GFDX_CAP:
2406                 *(uint8_t *)pr_val = link_cfg.adv_capab.param_10000fdx;
2407                 break;
2408 
2409         case MAC_PROP_EN_10GFDX_CAP:
2410                 *(uint8_t *)pr_val = qede->forced_speed_10G;
2411                 break;
2412 
2413         case MAC_PROP_PRIVATE:
2414         default:
2415                 return (ENOTSUP);
2416 
2417         }
2418                 
2419         return (0);
2420 }
2421 
2422 static void
2423 qede_mac_property_info(void *arg,
2424     const char *pr_name,
2425     mac_prop_id_t  pr_num, 
2426     mac_prop_info_handle_t prh)
2427 {
2428         qede_t *qede = (qede_t *)arg;
2429         qede_link_props_t *def_cfg = &qede_def_link_props;
2430         link_flowctrl_t link_flowctrl;
2431 
2432 
2433         switch (pr_num)
2434         {
2435 
2436         case MAC_PROP_STATUS:
2437         case MAC_PROP_SPEED:
2438         case MAC_PROP_DUPLEX:
2439                 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2440                 break;
2441 
2442         case MAC_PROP_MTU:
2443 
2444                 mac_prop_info_set_range_uint32(prh,
2445                     MIN_MTU,
2446                     MAX_MTU);
2447                 break;
2448 
2449         case MAC_PROP_AUTONEG:
2450 
2451                 mac_prop_info_set_default_uint8(prh, def_cfg->autoneg);
2452                 break;
2453  
2454         case MAC_PROP_FLOWCTRL:
2455 
2456                 if (!def_cfg->pause) {
2457                         link_flowctrl = LINK_FLOWCTRL_NONE;
2458                 } else {
2459                         link_flowctrl = LINK_FLOWCTRL_BI;
2460                 }
2461 
2462                 mac_prop_info_set_default_link_flowctrl(prh, link_flowctrl);
2463                 break;
2464 
2465         case MAC_PROP_EN_10GFDX_CAP:
2466                 mac_prop_info_set_perm(prh, MAC_PROP_PERM_RW);
2467                 break;
2468 
2469         case MAC_PROP_ADV_10GFDX_CAP:
2470                 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2471                 break;
2472 
2473         default:
2474                 mac_prop_info_set_perm(prh, MAC_PROP_PERM_READ);
2475                 break;
2476 
2477     }
2478 }
2479 
2480 static mac_callbacks_t qede_callbacks =
2481 {
2482     (
2483       MC_IOCTL
2484 /*    | MC_RESOURCES */
2485     | MC_SETPROP
2486     | MC_GETPROP
2487     | MC_PROPINFO
2488     | MC_GETCAPAB
2489     ),
2490     qede_mac_stats,
2491     qede_mac_start,
2492     qede_mac_stop,
2493     qede_mac_promiscuous,
2494     qede_mac_multicast,
2495     NULL,
2496 #ifndef NO_CROSSBOW
2497     NULL,
2498 #else
2499     qede_mac_tx,
2500 #endif
2501     NULL,       /* qede_mac_resources, */
2502     qede_mac_ioctl,
2503     qede_mac_get_capability,
2504     NULL,
2505     NULL,
2506     qede_mac_set_property,
2507     qede_mac_get_property,
2508 #ifdef MC_PROPINFO
2509     qede_mac_property_info
2510 #endif
2511 };
2512 
2513 boolean_t
2514 qede_gld_init(qede_t *qede)
2515 {
2516         int status, ret;
2517         mac_register_t *macp;
2518 
2519         macp = mac_alloc(MAC_VERSION);
2520         if (macp == NULL) {
2521                 cmn_err(CE_NOTE, "%s: mac_alloc() failed\n", __func__);
2522                 return (B_FALSE);
2523         }
2524 
2525         macp->m_driver = qede;
2526         macp->m_dip = qede->dip;
2527         macp->m_instance = qede->instance;
2528         macp->m_priv_props = NULL;
2529         macp->m_type_ident = MAC_PLUGIN_IDENT_ETHER;
2530         macp->m_src_addr = qede->ether_addr;
2531         macp->m_callbacks = &qede_callbacks;
2532         macp->m_min_sdu = 0;
2533         macp->m_max_sdu = qede->mtu;
2534         macp->m_margin = VLAN_TAGSZ;
2535 #ifdef  ILLUMOS
2536         macp->m_v12n = MAC_VIRT_LEVEL1;
2537 #endif
2538 
2539         status = mac_register(macp, &qede->mac_handle);
2540         if (status != 0) {
2541                 cmn_err(CE_NOTE, "%s: mac_register() failed\n", __func__);
2542         }
2543 
2544         mac_free(macp);
2545         if (status == 0) {
2546                 return (B_TRUE);
2547         }
2548         return (B_FALSE);
2549 }
2550 
2551 boolean_t qede_gld_fini(qede_t * qede)
2552 {
2553     return (B_TRUE);
2554 }
2555 
2556 
2557 void qede_link_update(qede_t * qede,
2558                  link_state_t  state)
2559 {
2560     mac_link_update(qede->mac_handle, state);
2561 }
2562