1 /******************************************************************************
   2 
   3   Copyright (c) 2001-2015, Intel Corporation 
   4   All rights reserved.
   5   
   6   Redistribution and use in source and binary forms, with or without 
   7   modification, are permitted provided that the following conditions are met:
   8   
   9    1. Redistributions of source code must retain the above copyright notice, 
  10       this list of conditions and the following disclaimer.
  11   
  12    2. Redistributions in binary form must reproduce the above copyright 
  13       notice, this list of conditions and the following disclaimer in the 
  14       documentation and/or other materials provided with the distribution.
  15   
  16    3. Neither the name of the Intel Corporation nor the names of its 
  17       contributors may be used to endorse or promote products derived from 
  18       this software without specific prior written permission.
  19   
  20   THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
  21   AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  22   IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 
  23   ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 
  24   LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 
  25   CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 
  26   SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 
  27   INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 
  28   CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 
  29   ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
  30   POSSIBILITY OF SUCH DAMAGE.
  31 
  32 ******************************************************************************/
  33 /*$FreeBSD$*/
  34 
  35 
  36 #include "ixgbe_type.h"
  37 #include "ixgbe_dcb.h"
  38 #include "ixgbe_dcb_82599.h"
  39 
  40 /**
  41  * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class
  42  * @hw: pointer to hardware structure
  43  * @stats: pointer to statistics structure
  44  * @tc_count:  Number of elements in bwg_array.
  45  *
  46  * This function returns the status data for each of the Traffic Classes in use.
  47  */
  48 s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw,
  49                                  struct ixgbe_hw_stats *stats,
  50                                  u8 tc_count)
  51 {
  52         int tc;
  53 
  54         DEBUGFUNC("dcb_get_tc_stats");
  55 
  56         if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
  57                 return IXGBE_ERR_PARAM;
  58 
  59         /* Statistics pertaining to each traffic class */
  60         for (tc = 0; tc < tc_count; tc++) {
  61                 /* Transmitted Packets */
  62                 stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc));
  63                 /* Transmitted Bytes (read low first to prevent missed carry) */
  64                 stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc));
  65                 stats->qbtc[tc] +=
  66                         (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32);
  67                 /* Received Packets */
  68                 stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc));
  69                 /* Received Bytes (read low first to prevent missed carry) */
  70                 stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc));
  71                 stats->qbrc[tc] +=
  72                         (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32);
  73 
  74                 /* Received Dropped Packet */
  75                 stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc));
  76         }
  77 
  78         return IXGBE_SUCCESS;
  79 }
  80 
  81 /**
  82  * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data
  83  * @hw: pointer to hardware structure
  84  * @stats: pointer to statistics structure
  85  * @tc_count:  Number of elements in bwg_array.
  86  *
  87  * This function returns the CBFC status data for each of the Traffic Classes.
  88  */
  89 s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw,
  90                                   struct ixgbe_hw_stats *stats,
  91                                   u8 tc_count)
  92 {
  93         int tc;
  94 
  95         DEBUGFUNC("dcb_get_pfc_stats");
  96 
  97         if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS)
  98                 return IXGBE_ERR_PARAM;
  99 
 100         for (tc = 0; tc < tc_count; tc++) {
 101                 /* Priority XOFF Transmitted */
 102                 stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc));
 103                 /* Priority XOFF Received */
 104                 stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc));
 105         }
 106 
 107         return IXGBE_SUCCESS;
 108 }
 109 
 110 /**
 111  * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter
 112  * @hw: pointer to hardware structure
 113  * @dcb_config: pointer to ixgbe_dcb_config structure
 114  *
 115  * Configure Rx Packet Arbiter and credits for each traffic class.
 116  */
 117 s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
 118                                       u16 *max, u8 *bwg_id, u8 *tsa,
 119                                       u8 *map)
 120 {
 121         u32 reg = 0;
 122         u32 credit_refill = 0;
 123         u32 credit_max = 0;
 124         u8  i = 0;
 125 
 126         /*
 127          * Disable the arbiter before changing parameters
 128          * (always enable recycle mode; WSP)
 129          */
 130         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS;
 131         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
 132 
 133         /*
 134          * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
 135          * bits sets for the UPs that needs to be mappped to that TC.
 136          * e.g if priorities 6 and 7 are to be mapped to a TC then the
 137          * up_to_tc_bitmap value for that TC will be 11000000 in binary.
 138          */
 139         reg = 0;
 140         for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
 141                 reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT));
 142 
 143         IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg);
 144 
 145         /* Configure traffic class credits and priority */
 146         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
 147                 credit_refill = refill[i];
 148                 credit_max = max[i];
 149                 reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT);
 150 
 151                 reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT;
 152 
 153                 if (tsa[i] == ixgbe_dcb_tsa_strict)
 154                         reg |= IXGBE_RTRPT4C_LSP;
 155 
 156                 IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg);
 157         }
 158 
 159         /*
 160          * Configure Rx packet plane (recycle mode; WSP) and
 161          * enable arbiter
 162          */
 163         reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC;
 164         IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg);
 165 
 166         return IXGBE_SUCCESS;
 167 }
 168 
 169 /**
 170  * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter
 171  * @hw: pointer to hardware structure
 172  * @dcb_config: pointer to ixgbe_dcb_config structure
 173  *
 174  * Configure Tx Descriptor Arbiter and credits for each traffic class.
 175  */
 176 s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
 177                                            u16 *max, u8 *bwg_id, u8 *tsa)
 178 {
 179         u32 reg, max_credits;
 180         u8  i;
 181 
 182         /* Clear the per-Tx queue credits; we use per-TC instead */
 183         for (i = 0; i < 128; i++) {
 184                 IXGBE_WRITE_REG(hw, IXGBE_RTTDQSEL, i);
 185                 IXGBE_WRITE_REG(hw, IXGBE_RTTDT1C, 0);
 186         }
 187 
 188         /* Configure traffic class credits and priority */
 189         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
 190                 max_credits = max[i];
 191                 reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT;
 192                 reg |= refill[i];
 193                 reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT;
 194 
 195                 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
 196                         reg |= IXGBE_RTTDT2C_GSP;
 197 
 198                 if (tsa[i] == ixgbe_dcb_tsa_strict)
 199                         reg |= IXGBE_RTTDT2C_LSP;
 200 
 201                 IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg);
 202         }
 203 
 204         /*
 205          * Configure Tx descriptor plane (recycle mode; WSP) and
 206          * enable arbiter
 207          */
 208         reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM;
 209         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
 210 
 211         return IXGBE_SUCCESS;
 212 }
 213 
 214 /**
 215  * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter
 216  * @hw: pointer to hardware structure
 217  * @dcb_config: pointer to ixgbe_dcb_config structure
 218  *
 219  * Configure Tx Packet Arbiter and credits for each traffic class.
 220  */
 221 s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill,
 222                                            u16 *max, u8 *bwg_id, u8 *tsa,
 223                                            u8 *map)
 224 {
 225         u32 reg;
 226         u8 i;
 227 
 228         /*
 229          * Disable the arbiter before changing parameters
 230          * (always enable recycle mode; SP; arb delay)
 231          */
 232         reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
 233               (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT) |
 234               IXGBE_RTTPCS_ARBDIS;
 235         IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
 236 
 237         /*
 238          * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding
 239          * bits sets for the UPs that needs to be mappped to that TC.
 240          * e.g if priorities 6 and 7 are to be mapped to a TC then the
 241          * up_to_tc_bitmap value for that TC will be 11000000 in binary.
 242          */
 243         reg = 0;
 244         for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++)
 245                 reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT));
 246 
 247         IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg);
 248 
 249         /* Configure traffic class credits and priority */
 250         for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
 251                 reg = refill[i];
 252                 reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT;
 253                 reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT;
 254 
 255                 if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee)
 256                         reg |= IXGBE_RTTPT2C_GSP;
 257 
 258                 if (tsa[i] == ixgbe_dcb_tsa_strict)
 259                         reg |= IXGBE_RTTPT2C_LSP;
 260 
 261                 IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg);
 262         }
 263 
 264         /*
 265          * Configure Tx packet plane (recycle mode; SP; arb delay) and
 266          * enable arbiter
 267          */
 268         reg = IXGBE_RTTPCS_TPPAC | IXGBE_RTTPCS_TPRM |
 269               (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT);
 270         IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg);
 271 
 272         return IXGBE_SUCCESS;
 273 }
 274 
 275 /**
 276  * ixgbe_dcb_config_pfc_82599 - Configure priority flow control
 277  * @hw: pointer to hardware structure
 278  * @pfc_en: enabled pfc bitmask
 279  * @map: priority to tc assignments indexed by priority
 280  *
 281  * Configure Priority Flow Control (PFC) for each traffic class.
 282  */
 283 s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map)
 284 {
 285         u32 i, j, fcrtl, reg;
 286         u8 max_tc = 0;
 287 
 288         /* Enable Transmit Priority Flow Control */
 289         IXGBE_WRITE_REG(hw, IXGBE_FCCFG, IXGBE_FCCFG_TFCE_PRIORITY);
 290 
 291         /* Enable Receive Priority Flow Control */
 292         reg = IXGBE_READ_REG(hw, IXGBE_MFLCN);
 293         reg |= IXGBE_MFLCN_DPF;
 294 
 295         /*
 296          * X540 supports per TC Rx priority flow control.  So
 297          * clear all TCs and only enable those that should be
 298          * enabled.
 299          */
 300         reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE);
 301 
 302         if (hw->mac.type >= ixgbe_mac_X540)
 303                 reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT;
 304 
 305         if (pfc_en)
 306                 reg |= IXGBE_MFLCN_RPFCE;
 307 
 308         IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg);
 309 
 310         for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) {
 311                 if (map[i] > max_tc)
 312                         max_tc = map[i];
 313         }
 314 
 315 
 316         /* Configure PFC Tx thresholds per TC */
 317         for (i = 0; i <= max_tc; i++) {
 318                 int enabled = 0;
 319 
 320                 for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) {
 321                         if ((map[j] == i) && (pfc_en & (1 << j))) {
 322                                 enabled = 1;
 323                                 break;
 324                         }
 325                 }
 326 
 327                 if (enabled) {
 328                         reg = (hw->fc.high_water[i] << 10) | IXGBE_FCRTH_FCEN;
 329                         fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE;
 330                         IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl);
 331                 } else {
 332                         /*
 333                          * In order to prevent Tx hangs when the internal Tx
 334                          * switch is enabled we must set the high water mark
 335                          * to the Rx packet buffer size - 24KB.  This allows
 336                          * the Tx switch to function even under heavy Rx
 337                          * workloads.
 338                          */
 339                         reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576;
 340                         IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
 341                 }
 342 
 343                 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg);
 344         }
 345 
 346         for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) {
 347                 IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0);
 348                 IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0);
 349         }
 350 
 351         /* Configure pause time (2 TCs per register) */
 352         reg = hw->fc.pause_time | (hw->fc.pause_time << 16);
 353         for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++)
 354                 IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg);
 355 
 356         /* Configure flow control refresh threshold value */
 357         IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2);
 358 
 359         return IXGBE_SUCCESS;
 360 }
 361 
 362 /**
 363  * ixgbe_dcb_config_tc_stats_82599 - Config traffic class statistics
 364  * @hw: pointer to hardware structure
 365  *
 366  * Configure queue statistics registers, all queues belonging to same traffic
 367  * class uses a single set of queue statistics counters.
 368  */
 369 s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw,
 370                                     struct ixgbe_dcb_config *dcb_config)
 371 {
 372         u32 reg = 0;
 373         u8  i   = 0;
 374         u8 tc_count = 8;
 375         bool vt_mode = FALSE;
 376 
 377         if (dcb_config != NULL) {
 378                 tc_count = dcb_config->num_tcs.pg_tcs;
 379                 vt_mode = dcb_config->vt_mode;
 380         }
 381 
 382         if (!((tc_count == 8 && vt_mode == FALSE) || tc_count == 4))
 383                 return IXGBE_ERR_PARAM;
 384 
 385         if (tc_count == 8 && vt_mode == FALSE) {
 386                 /*
 387                  * Receive Queues stats setting
 388                  * 32 RQSMR registers, each configuring 4 queues.
 389                  *
 390                  * Set all 16 queues of each TC to the same stat
 391                  * with TC 'n' going to stat 'n'.
 392                  */
 393                 for (i = 0; i < 32; i++) {
 394                         reg = 0x01010101 * (i / 4);
 395                         IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
 396                 }
 397                 /*
 398                  * Transmit Queues stats setting
 399                  * 32 TQSM registers, each controlling 4 queues.
 400                  *
 401                  * Set all queues of each TC to the same stat
 402                  * with TC 'n' going to stat 'n'.
 403                  * Tx queues are allocated non-uniformly to TCs:
 404                  * 32, 32, 16, 16, 8, 8, 8, 8.
 405                  */
 406                 for (i = 0; i < 32; i++) {
 407                         if (i < 8)
 408                                 reg = 0x00000000;
 409                         else if (i < 16)
 410                                 reg = 0x01010101;
 411                         else if (i < 20)
 412                                 reg = 0x02020202;
 413                         else if (i < 24)
 414                                 reg = 0x03030303;
 415                         else if (i < 26)
 416                                 reg = 0x04040404;
 417                         else if (i < 28)
 418                                 reg = 0x05050505;
 419                         else if (i < 30)
 420                                 reg = 0x06060606;
 421                         else
 422                                 reg = 0x07070707;
 423                         IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
 424                 }
 425         } else if (tc_count == 4 && vt_mode == FALSE) {
 426                 /*
 427                  * Receive Queues stats setting
 428                  * 32 RQSMR registers, each configuring 4 queues.
 429                  *
 430                  * Set all 16 queues of each TC to the same stat
 431                  * with TC 'n' going to stat 'n'.
 432                  */
 433                 for (i = 0; i < 32; i++) {
 434                         if (i % 8 > 3)
 435                                 /* In 4 TC mode, odd 16-queue ranges are
 436                                  *  not used.
 437                                 */
 438                                 continue;
 439                         reg = 0x01010101 * (i / 8);
 440                         IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg);
 441                 }
 442                 /*
 443                  * Transmit Queues stats setting
 444                  * 32 TQSM registers, each controlling 4 queues.
 445                  *
 446                  * Set all queues of each TC to the same stat
 447                  * with TC 'n' going to stat 'n'.
 448                  * Tx queues are allocated non-uniformly to TCs:
 449                  * 64, 32, 16, 16.
 450                  */
 451                 for (i = 0; i < 32; i++) {
 452                         if (i < 16)
 453                                 reg = 0x00000000;
 454                         else if (i < 24)
 455                                 reg = 0x01010101;
 456                         else if (i < 28)
 457                                 reg = 0x02020202;
 458                         else
 459                                 reg = 0x03030303;
 460                         IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg);
 461                 }
 462         } else if (tc_count == 4 && vt_mode == TRUE) {
 463                 /*
 464                  * Receive Queues stats setting
 465                  * 32 RQSMR registers, each configuring 4 queues.
 466                  *
 467                  * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
 468                  * pool. Set all 32 queues of each TC across pools to the same
 469                  * stat with TC 'n' going to stat 'n'.
 470                  */
 471                 for (i = 0; i < 32; i++)
 472                         IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100);
 473                 /*
 474                  * Transmit Queues stats setting
 475                  * 32 TQSM registers, each controlling 4 queues.
 476                  *
 477                  * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each
 478                  * pool. Set all 32 queues of each TC across pools to the same
 479                  * stat with TC 'n' going to stat 'n'.
 480                  */
 481                 for (i = 0; i < 32; i++)
 482                         IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100);
 483         }
 484 
 485         return IXGBE_SUCCESS;
 486 }
 487 
 488 /**
 489  * ixgbe_dcb_config_82599 - Configure general DCB parameters
 490  * @hw: pointer to hardware structure
 491  * @dcb_config: pointer to ixgbe_dcb_config structure
 492  *
 493  * Configure general DCB parameters.
 494  */
 495 s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw,
 496                            struct ixgbe_dcb_config *dcb_config)
 497 {
 498         u32 reg;
 499         u32 q;
 500 
 501         /* Disable the Tx desc arbiter so that MTQC can be changed */
 502         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 503         reg |= IXGBE_RTTDCS_ARBDIS;
 504         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
 505 
 506         reg = IXGBE_READ_REG(hw, IXGBE_MRQC);
 507         if (dcb_config->num_tcs.pg_tcs == 8) {
 508                 /* Enable DCB for Rx with 8 TCs */
 509                 switch (reg & IXGBE_MRQC_MRQE_MASK) {
 510                 case 0:
 511                 case IXGBE_MRQC_RT4TCEN:
 512                         /* RSS disabled cases */
 513                         reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
 514                               IXGBE_MRQC_RT8TCEN;
 515                         break;
 516                 case IXGBE_MRQC_RSSEN:
 517                 case IXGBE_MRQC_RTRSS4TCEN:
 518                         /* RSS enabled cases */
 519                         reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
 520                               IXGBE_MRQC_RTRSS8TCEN;
 521                         break;
 522                 default:
 523                         /*
 524                          * Unsupported value, assume stale data,
 525                          * overwrite no RSS
 526                          */
 527                         ASSERT(0);
 528                         reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
 529                               IXGBE_MRQC_RT8TCEN;
 530                 }
 531         }
 532         if (dcb_config->num_tcs.pg_tcs == 4) {
 533                 /* We support both VT-on and VT-off with 4 TCs. */
 534                 if (dcb_config->vt_mode)
 535                         reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
 536                               IXGBE_MRQC_VMDQRT4TCEN;
 537                 else
 538                         reg = (reg & ~IXGBE_MRQC_MRQE_MASK) |
 539                               IXGBE_MRQC_RTRSS4TCEN;
 540         }
 541         IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg);
 542 
 543         /* Enable DCB for Tx with 8 TCs */
 544         if (dcb_config->num_tcs.pg_tcs == 8)
 545                 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ;
 546         else {
 547                 /* We support both VT-on and VT-off with 4 TCs. */
 548                 reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ;
 549                 if (dcb_config->vt_mode)
 550                         reg |= IXGBE_MTQC_VT_ENA;
 551         }
 552         IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg);
 553 
 554         /* Disable drop for all queues */
 555         for (q = 0; q < 128; q++)
 556                 IXGBE_WRITE_REG(hw, IXGBE_QDE,
 557                                 (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT)));
 558 
 559         /* Enable the Tx desc arbiter */
 560         reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS);
 561         reg &= ~IXGBE_RTTDCS_ARBDIS;
 562         IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg);
 563 
 564         /* Enable Security TX Buffer IFG for DCB */
 565         reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG);
 566         reg |= IXGBE_SECTX_DCB;
 567         IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg);
 568 
 569         return IXGBE_SUCCESS;
 570 }
 571 
 572 /**
 573  * ixgbe_dcb_hw_config_82599 - Configure and enable DCB
 574  * @hw: pointer to hardware structure
 575  * @dcb_config: pointer to ixgbe_dcb_config structure
 576  *
 577  * Configure dcb settings and enable dcb mode.
 578  */
 579 s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed,
 580                               u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa,
 581                               u8 *map)
 582 {
 583         UNREFERENCED_1PARAMETER(link_speed);
 584 
 585         ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa,
 586                                           map);
 587         ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id,
 588                                                tsa);
 589         ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id,
 590                                                tsa, map);
 591 
 592         return IXGBE_SUCCESS;
 593 }
 594