1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved. 14 * Copyright 2019 Joyent, Inc. 15 * Copyright 2017 Tegile Systems, Inc. All rights reserved. 16 */ 17 18 /* 19 * i40e - Intel 10/40 Gb Ethernet driver 20 * 21 * The i40e driver is the main software device driver for the Intel 40 Gb family 22 * of devices. Note that these devices come in many flavors with both 40 GbE 23 * ports and 10 GbE ports. This device is the successor to the 82599 family of 24 * devices (ixgbe). 25 * 26 * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE 27 * devices defined in the XL710 controller (previously known as Fortville) are a 28 * rather different beast and have a small switch embedded inside of them. In 29 * addition, the way that most of the programming is done has been overhauled. 30 * As opposed to just using PCIe memory mapped registers, it also has an 31 * administrative queue which is used to communicate with firmware running on 32 * the chip. 33 * 34 * Each physical function in the hardware shows up as a device that this driver 35 * will bind to. The hardware splits many resources evenly across all of the 36 * physical functions present on the device, while other resources are instead 37 * shared across the entire card and its up to the device driver to 38 * intelligently partition them. 39 * 40 * ------------ 41 * Organization 42 * ------------ 43 * 44 * This driver is made up of several files which have their own theory 45 * statements spread across them. We'll touch on the high level purpose of each 46 * file here, and then we'll get into more discussion on how the device is 47 * generally modelled with respect to the interfaces in illumos. 48 * 49 * i40e_gld.c: This file contains all of the bindings to MAC and the networking 50 * stack. 51 * 52 * i40e_intr.c: This file contains all of the interrupt service routines and 53 * contains logic to enable and disable interrupts on the hardware. 54 * It also contains the logic to map hardware resources such as the 55 * rings to and from interrupts and controls their ability to fire. 56 * 57 * There is a big theory statement on interrupts present there. 58 * 59 * i40e_main.c: The file that you're currently in. It interfaces with the 60 * traditional OS DDI interfaces and is in charge of configuring 61 * the device. 62 * 63 * i40e_osdep.[ch]: These files contain interfaces and definitions needed to 64 * work with Intel's common code for the device. 65 * 66 * i40e_stats.c: This file contains the general work and logic around our 67 * kstats. A theory statement on their organization and use of the 68 * hardware exists there. 69 * 70 * i40e_sw.h: This header file contains all of the primary structure definitions 71 * and constants that are used across the entire driver. 72 * 73 * i40e_transceiver.c: This file contains all of the logic for sending and 74 * receiving data. It contains all of the ring and DMA 75 * allocation logic, as well as, the actual interfaces to 76 * send and receive data. 77 * 78 * A big theory statement on ring management, descriptors, 79 * and how it ties into the OS is present there. 80 * 81 * -------------- 82 * General Design 83 * -------------- 84 * 85 * Before we go too far into the general way we've laid out data structures and 86 * the like, it's worth taking some time to explain how the hardware is 87 * organized. This organization informs a lot of how we do things at this time 88 * in the driver. 89 * 90 * Each physical device consists of a number of one or more ports, which are 91 * considered physical functions in the PCI sense and thus each get enumerated 92 * by the system, resulting in an instance being created and attached to. While 93 * there are many resources that are unique to each physical function eg. 94 * instance of the device, there are many that are shared across all of them. 95 * Several resources have an amount reserved for each Virtual Station Interface 96 * (VSI) and then a static pool of resources, available for all functions on the 97 * card. 98 * 99 * The most important resource in hardware are its transmit and receive queue 100 * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3 101 * parlance. There are a set number of these on each device; however, they are 102 * statically partitioned among all of the different physical functions. 103 * 104 * 'Fortville' (the code name for this device family) is basically a switch. To 105 * map MAC addresses and other things to queues, we end up having to create 106 * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct 107 * traffic to a queue. A VSI owns a collection of queues and has a series of 108 * forwarding rules that point to it. One way to think of this is to treat it 109 * like MAC does a VNIC. When MAC refers to a group, a collection of rings and 110 * classification resources, that is a VSI in i40e. 111 * 112 * The sets of VSIs is shared across the entire device, though there may be some 113 * amount that are reserved to each PF. Because the GLDv3 does not let us change 114 * the number of groups dynamically, we instead statically divide this amount 115 * evenly between all the functions that exist. In addition, we have the same 116 * problem with the mac address forwarding rules. There are a static number that 117 * exist shared across all the functions. 118 * 119 * To handle both of these resources, what we end up doing is going through and 120 * determining which functions belong to the same device. Nominally one might do 121 * this by having a nexus driver; however, a prime requirement for a nexus 122 * driver is identifying the various children and activating them. While it is 123 * possible to get this information from NVRAM, we would end up duplicating a 124 * lot of the PCI enumeration logic. Really, at the end of the day, the device 125 * doesn't give us the traditional identification properties we want from a 126 * nexus driver. 127 * 128 * Instead, we rely on some properties that are guaranteed to be unique. While 129 * it might be tempting to leverage the PBA or serial number of the device from 130 * NVRAM, there is nothing that says that two devices can't be mis-programmed to 131 * have the same values in NVRAM. Instead, we uniquely identify a group of 132 * functions based on their parent in the /devices tree, their PCI bus and PCI 133 * function identifiers. Using either on their own may not be sufficient. 134 * 135 * For each unique PCI device that we encounter, we'll create a i40e_device_t. 136 * From there, because we don't have a good way to tell the GLDv3 about sharing 137 * resources between everything, we'll end up just dividing the resources 138 * evenly between all of the functions. Longer term, if we don't have to declare 139 * to the GLDv3 that these resources are shared, then we'll maintain a pool and 140 * have each PF allocate from the pool in the device, thus if only two of four 141 * ports are being used, for example, then all of the resources can still be 142 * used. 143 * 144 * ------------------------------------------- 145 * Transmit and Receive Queue Pair Allocations 146 * ------------------------------------------- 147 * 148 * NVRAM ends up assigning each PF its own share of the transmit and receive LAN 149 * queue pairs, we have no way of modifying it, only observing it. From there, 150 * it's up to us to map these queues to VSIs and VFs. Since we don't support any 151 * VFs at this time, we only focus on assignments to VSIs. 152 * 153 * At the moment, we used a static mapping of transmit/receive queue pairs to a 154 * given VSI (eg. rings to a group). Though in the fullness of time, we want to 155 * make this something which is fully dynamic and take advantage of documented, 156 * but not yet available functionality for adding filters based on VXLAN and 157 * other encapsulation technologies. 158 * 159 * ------------------------------------- 160 * Broadcast, Multicast, and Promiscuous 161 * ------------------------------------- 162 * 163 * As part of the GLDv3, we need to make sure that we can handle receiving 164 * broadcast and multicast traffic. As well as enabling promiscuous mode when 165 * requested. GLDv3 requires that all broadcast and multicast traffic be 166 * retrieved by the default group, eg. the first one. This is the same thing as 167 * the default VSI. 168 * 169 * To receieve broadcast traffic, we enable it through the admin queue, rather 170 * than use one of our filters for it. For multicast traffic, we reserve a 171 * certain number of the hash filters and assign them to a given PF. When we 172 * exceed those, we then switch to using promiscuous mode for multicast traffic. 173 * 174 * More specifically, once we exceed the number of filters (indicated because 175 * the i40e_t`i40e_resources.ifr_nmcastfilt == 176 * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle 177 * promiscuous mode. If promiscuous mode is toggled then we keep track of the 178 * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count. 179 * That will stay enabled until that count reaches zero indicating that we have 180 * only added multicast addresses that we have a corresponding entry for. 181 * 182 * Because MAC itself wants to toggle promiscuous mode, which includes both 183 * unicast and multicast traffic, we go through and keep track of that 184 * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on 185 * member. 186 * 187 * -------------- 188 * VSI Management 189 * -------------- 190 * 191 * The PFs share 384 VSIs. The firmware creates one VSI per PF by default. 192 * During chip start we retrieve the SEID of this VSI and assign it as the 193 * default VSI for our VEB (one VEB per PF). We then add additional VSIs to 194 * the VEB up to the determined number of rx groups: i40e_t`i40e_num_rx_groups. 195 * We currently cap this number to I40E_GROUP_MAX to a) make sure all PFs can 196 * allocate the same number of VSIs, and b) to keep the interrupt multiplexing 197 * under control. In the future, when we improve the interrupt allocation, we 198 * may want to revisit this cap to make better use of the available VSIs. The 199 * VSI allocation and configuration can be found in i40e_chip_start(). 200 * 201 * ---------------- 202 * Structure Layout 203 * ---------------- 204 * 205 * The following images relates the core data structures together. The primary 206 * structure in the system is the i40e_t. It itself contains multiple rings, 207 * i40e_trqpair_t's which contain the various transmit and receive data. The 208 * receive data is stored outside of the i40e_trqpair_t and instead in the 209 * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps 210 * track of per-physical device state. Finally, for every active descriptor, 211 * there is a corresponding control block, which is where the 212 * i40e_rx_control_block_t and the i40e_tx_control_block_t come from. 213 * 214 * +-----------------------+ +-----------------------+ 215 * | Global i40e_t list | | Global Device list | 216 * | | +--| | 217 * | i40e_glist | | | i40e_dlist | 218 * +-----------------------+ | +-----------------------+ 219 * | v 220 * | +------------------------+ +-----------------------+ 221 * | | Device-wide Structure |----->| Device-wide Structure |--> ... 222 * | | i40e_device_t | | i40e_device_t | 223 * | | | +-----------------------+ 224 * | | dev_info_t * ------+--> Parent in devices tree. 225 * | | uint_t ------+--> PCI bus number 226 * | | uint_t ------+--> PCI device number 227 * | | uint_t ------+--> Number of functions 228 * | | i40e_switch_rsrcs_t ---+--> Captured total switch resources 229 * | | list_t ------+-------------+ 230 * | +------------------------+ | 231 * | ^ | 232 * | +--------+ | 233 * | | v 234 * | +---------------------------+ | +-------------------+ 235 * +->| GLDv3 Device, per PF |-----|-->| GLDv3 Device (PF) |--> ... 236 * | i40e_t | | | i40e_t | 237 * | **Primary Structure** | | +-------------------+ 238 * | | | 239 * | i40e_device_t * --+-----+ 240 * | i40e_state_t --+---> Device State 241 * | i40e_hw_t --+---> Intel common code structure 242 * | mac_handle_t --+---> GLDv3 handle to MAC 243 * | ddi_periodic_t --+---> Link activity timer 244 * | i40e_vsi_t * --+---> Array of VSIs 245 * | i40e_func_rsrc_t --+---> Available hardware resources 246 * | i40e_switch_rsrc_t * --+---> Switch resource snapshot 247 * | i40e_sdu --+---> Current MTU 248 * | i40e_frame_max --+---> Current HW frame size 249 * | i40e_uaddr_t * --+---> Array of assigned unicast MACs 250 * | i40e_maddr_t * --+---> Array of assigned multicast MACs 251 * | i40e_mcast_promisccount --+---> Active multicast state 252 * | i40e_promisc_on --+---> Current promiscuous mode state 253 * | uint_t --+---> Number of transmit/receive pairs 254 * | i40e_rx_group_t * --+---> Array of Rx groups 255 * | kstat_t * --+---> PF kstats 256 * | i40e_pf_stats_t --+---> PF kstat backing data 257 * | i40e_trqpair_t * --+---------+ 258 * +---------------------------+ | 259 * | 260 * v 261 * +-------------------------------+ +-----------------------------+ 262 * | Transmit/Receive Queue Pair |-------| Transmit/Receive Queue Pair |->... 263 * | i40e_trqpair_t | | i40e_trqpair_t | 264 * + Ring Data Structure | +-----------------------------+ 265 * | | 266 * | mac_ring_handle_t +--> MAC RX ring handle 267 * | mac_ring_handle_t +--> MAC TX ring handle 268 * | i40e_rxq_stat_t --+--> RX Queue stats 269 * | i40e_txq_stat_t --+--> TX Queue stats 270 * | uint32_t (tx ring size) +--> TX Ring Size 271 * | uint32_t (tx free list size) +--> TX Free List Size 272 * | i40e_dma_buffer_t --------+--> TX Descriptor ring DMA 273 * | i40e_tx_desc_t * --------+--> TX descriptor ring 274 * | volatile unt32_t * +--> TX Write back head 275 * | uint32_t -------+--> TX ring head 276 * | uint32_t -------+--> TX ring tail 277 * | uint32_t -------+--> Num TX desc free 278 * | i40e_tx_control_block_t * --+--> TX control block array ---+ 279 * | i40e_tx_control_block_t ** --+--> TCB work list ----+ 280 * | i40e_tx_control_block_t ** --+--> TCB free list ---+ 281 * | uint32_t -------+--> Free TCB count | 282 * | i40e_rx_data_t * -------+--+ v 283 * +-------------------------------+ | +---------------------------+ 284 * | | Per-TX Frame Metadata | 285 * | | i40e_tx_control_block_t | 286 * +--------------------+ | | 287 * | mblk to transmit <--+--- mblk_t * | 288 * | type of transmit <--+--- i40e_tx_type_t | 289 * | TX DMA handle <--+--- ddi_dma_handle_t | 290 * v TX DMA buffer <--+--- i40e_dma_buffer_t | 291 * +------------------------------+ +---------------------------+ 292 * | Core Receive Data | 293 * | i40e_rx_data_t | 294 * | | 295 * | i40e_dma_buffer_t --+--> RX descriptor DMA Data 296 * | i40e_rx_desc_t --+--> RX descriptor ring 297 * | uint32_t --+--> Next free desc. 298 * | i40e_rx_control_block_t * --+--> RX Control Block Array ---+ 299 * | i40e_rx_control_block_t ** --+--> RCB work list ---+ 300 * | i40e_rx_control_block_t ** --+--> RCB free list ---+ 301 * +------------------------------+ | 302 * ^ | 303 * | +---------------------------+ | 304 * | | Per-RX Frame Metadata |<---------------+ 305 * | | i40e_rx_control_block_t | 306 * | | | 307 * | | mblk_t * ----+--> Received mblk_t data 308 * | | uint32_t ----+--> Reference count 309 * | | i40e_dma_buffer_t ----+--> Receive data DMA info 310 * | | frtn_t ----+--> mblk free function info 311 * +-----+-- i40e_rx_data_t * | 312 * +---------------------------+ 313 * 314 * ------------- 315 * Lock Ordering 316 * ------------- 317 * 318 * In order to ensure that we don't deadlock, the following represents the 319 * lock order being used. When grabbing locks, follow the following order. Lower 320 * numbers are more important. Thus, the i40e_glock which is number 0, must be 321 * taken before any other locks in the driver. On the other hand, the 322 * i40e_t`i40e_stat_lock, has the highest number because it's the least 323 * important lock. Note, that just because one lock is higher than another does 324 * not mean that all intermediary locks are required. 325 * 326 * 0) i40e_glock 327 * 1) i40e_t`i40e_general_lock 328 * 329 * 2) i40e_trqpair_t`itrq_rx_lock 330 * 3) i40e_trqpair_t`itrq_tx_lock 331 * 4) i40e_t`i40e_rx_pending_lock 332 * 5) i40e_trqpair_t`itrq_tcb_lock 333 * 334 * 6) i40e_t`i40e_stat_lock 335 * 336 * Rules and expectations: 337 * 338 * 1) A thread holding locks belong to one PF should not hold locks belonging to 339 * a second. If for some reason this becomes necessary, locks should be grabbed 340 * based on the list order in the i40e_device_t, which implies that the 341 * i40e_glock is held. 342 * 343 * 2) When grabbing locks between multiple transmit and receive queues, the 344 * locks for the lowest number transmit/receive queue should be grabbed first. 345 * 346 * 3) When grabbing both the transmit and receive lock for a given queue, always 347 * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock. 348 * 349 * 4) The following pairs of locks are not expected to be held at the same time: 350 * 351 * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock 352 * 353 * ----------- 354 * Future Work 355 * ----------- 356 * 357 * At the moment the i40e_t driver is rather bare bones, allowing us to start 358 * getting data flowing and folks using it while we develop additional features. 359 * While bugs have been filed to cover this future work, the following gives an 360 * overview of expected work: 361 * 362 * o DMA binding and breaking up the locking in ring recycling. 363 * o Enhanced detection of device errors 364 * o Participation in IRM 365 * o FMA device reset 366 * o Stall detection, temperature error detection, etc. 367 * o More dynamic resource pools 368 */ 369 370 #include "i40e_sw.h" 371 372 static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.3"; 373 374 /* 375 * The i40e_glock primarily protects the lists below and the i40e_device_t 376 * structures. 377 */ 378 static kmutex_t i40e_glock; 379 static list_t i40e_glist; 380 static list_t i40e_dlist; 381 382 /* 383 * Access attributes for register mapping. 384 */ 385 static ddi_device_acc_attr_t i40e_regs_acc_attr = { 386 DDI_DEVICE_ATTR_V1, 387 DDI_STRUCTURE_LE_ACC, 388 DDI_STRICTORDER_ACC, 389 DDI_FLAGERR_ACC 390 }; 391 392 /* 393 * Logging function for this driver. 394 */ 395 static void 396 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt, 397 va_list ap) 398 { 399 char buf[1024]; 400 401 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 402 403 if (i40e == NULL) { 404 cmn_err(level, (console) ? "%s: %s" : "!%s: %s", 405 I40E_MODULE_NAME, buf); 406 } else { 407 dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s", 408 buf); 409 } 410 } 411 412 /* 413 * Because there's the stupid trailing-comma problem with the C preprocessor 414 * and variable arguments, I need to instantiate these. Pardon the redundant 415 * code. 416 */ 417 /*PRINTFLIKE2*/ 418 void 419 i40e_error(i40e_t *i40e, const char *fmt, ...) 420 { 421 va_list ap; 422 423 va_start(ap, fmt); 424 i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap); 425 va_end(ap); 426 } 427 428 /*PRINTFLIKE2*/ 429 void 430 i40e_log(i40e_t *i40e, const char *fmt, ...) 431 { 432 va_list ap; 433 434 va_start(ap, fmt); 435 i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap); 436 va_end(ap); 437 } 438 439 /*PRINTFLIKE2*/ 440 void 441 i40e_notice(i40e_t *i40e, const char *fmt, ...) 442 { 443 va_list ap; 444 445 va_start(ap, fmt); 446 i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap); 447 va_end(ap); 448 } 449 450 /* 451 * Various parts of the driver need to know if the controller is from the X722 452 * family, which has a few additional capabilities and different programming 453 * means. We don't consider virtual functions as part of this as they are quite 454 * different and will require substantially more work. 455 */ 456 static boolean_t 457 i40e_is_x722(i40e_t *i40e) 458 { 459 return (i40e->i40e_hw_space.mac.type == I40E_MAC_X722); 460 } 461 462 static void 463 i40e_device_rele(i40e_t *i40e) 464 { 465 i40e_device_t *idp = i40e->i40e_device; 466 467 if (idp == NULL) 468 return; 469 470 mutex_enter(&i40e_glock); 471 VERIFY(idp->id_nreg > 0); 472 list_remove(&idp->id_i40e_list, i40e); 473 idp->id_nreg--; 474 if (idp->id_nreg == 0) { 475 list_remove(&i40e_dlist, idp); 476 list_destroy(&idp->id_i40e_list); 477 kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) * 478 idp->id_rsrcs_alloc); 479 kmem_free(idp, sizeof (i40e_device_t)); 480 } 481 i40e->i40e_device = NULL; 482 mutex_exit(&i40e_glock); 483 } 484 485 static i40e_device_t * 486 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device) 487 { 488 i40e_device_t *idp; 489 mutex_enter(&i40e_glock); 490 for (idp = list_head(&i40e_dlist); idp != NULL; 491 idp = list_next(&i40e_dlist, idp)) { 492 if (idp->id_parent == parent && idp->id_pci_bus == bus && 493 idp->id_pci_device == device) { 494 break; 495 } 496 } 497 498 if (idp != NULL) { 499 VERIFY(idp->id_nreg < idp->id_nfuncs); 500 idp->id_nreg++; 501 } else { 502 i40e_hw_t *hw = &i40e->i40e_hw_space; 503 ASSERT(hw->num_ports > 0); 504 ASSERT(hw->num_partitions > 0); 505 506 /* 507 * The Intel common code doesn't exactly keep the number of PCI 508 * functions. But it calculates it during discovery of 509 * partitions and ports. So what we do is undo the calculation 510 * that it does originally, as functions are evenly spread 511 * across ports in the rare case of partitions. 512 */ 513 idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP); 514 idp->id_parent = parent; 515 idp->id_pci_bus = bus; 516 idp->id_pci_device = device; 517 idp->id_nfuncs = hw->num_ports * hw->num_partitions; 518 idp->id_nreg = 1; 519 idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc; 520 idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual; 521 idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) * 522 idp->id_rsrcs_alloc, KM_SLEEP); 523 bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs, 524 sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc); 525 list_create(&idp->id_i40e_list, sizeof (i40e_t), 526 offsetof(i40e_t, i40e_dlink)); 527 528 list_insert_tail(&i40e_dlist, idp); 529 } 530 531 list_insert_tail(&idp->id_i40e_list, i40e); 532 mutex_exit(&i40e_glock); 533 534 return (idp); 535 } 536 537 static void 538 i40e_link_state_set(i40e_t *i40e, link_state_t state) 539 { 540 if (i40e->i40e_link_state == state) 541 return; 542 543 i40e->i40e_link_state = state; 544 mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state); 545 } 546 547 /* 548 * This is a basic link check routine. Mostly we're using this just to see 549 * if we can get any accurate information about the state of the link being 550 * up or down, as well as updating the link state, speed, etc. information. 551 */ 552 void 553 i40e_link_check(i40e_t *i40e) 554 { 555 i40e_hw_t *hw = &i40e->i40e_hw_space; 556 boolean_t ls; 557 int ret; 558 559 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 560 561 hw->phy.get_link_info = B_TRUE; 562 if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) { 563 i40e->i40e_s_link_status_errs++; 564 i40e->i40e_s_link_status_lasterr = ret; 565 return; 566 } 567 568 /* 569 * Firmware abstracts all of the mac and phy information for us, so we 570 * can use i40e_get_link_status to determine the current state. 571 */ 572 if (ls == B_TRUE) { 573 enum i40e_aq_link_speed speed; 574 575 speed = i40e_get_link_speed(hw); 576 577 /* 578 * Translate from an i40e value to a value in Mbits/s. 579 */ 580 switch (speed) { 581 case I40E_LINK_SPEED_100MB: 582 i40e->i40e_link_speed = 100; 583 break; 584 case I40E_LINK_SPEED_1GB: 585 i40e->i40e_link_speed = 1000; 586 break; 587 case I40E_LINK_SPEED_10GB: 588 i40e->i40e_link_speed = 10000; 589 break; 590 case I40E_LINK_SPEED_20GB: 591 i40e->i40e_link_speed = 20000; 592 break; 593 case I40E_LINK_SPEED_40GB: 594 i40e->i40e_link_speed = 40000; 595 break; 596 case I40E_LINK_SPEED_25GB: 597 i40e->i40e_link_speed = 25000; 598 break; 599 default: 600 i40e->i40e_link_speed = 0; 601 break; 602 } 603 604 /* 605 * At this time, hardware does not support half-duplex 606 * operation, hence why we don't ask the hardware about our 607 * current speed. 608 */ 609 i40e->i40e_link_duplex = LINK_DUPLEX_FULL; 610 i40e_link_state_set(i40e, LINK_STATE_UP); 611 } else { 612 i40e->i40e_link_speed = 0; 613 i40e->i40e_link_duplex = 0; 614 i40e_link_state_set(i40e, LINK_STATE_DOWN); 615 } 616 } 617 618 static void 619 i40e_rem_intrs(i40e_t *i40e) 620 { 621 int i, rc; 622 623 for (i = 0; i < i40e->i40e_intr_count; i++) { 624 rc = ddi_intr_free(i40e->i40e_intr_handles[i]); 625 if (rc != DDI_SUCCESS) { 626 i40e_log(i40e, "failed to free interrupt %d: %d", 627 i, rc); 628 } 629 } 630 631 kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size); 632 i40e->i40e_intr_handles = NULL; 633 } 634 635 static void 636 i40e_rem_intr_handlers(i40e_t *i40e) 637 { 638 int i, rc; 639 640 for (i = 0; i < i40e->i40e_intr_count; i++) { 641 rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]); 642 if (rc != DDI_SUCCESS) { 643 i40e_log(i40e, "failed to remove interrupt %d: %d", 644 i, rc); 645 } 646 } 647 } 648 649 /* 650 * illumos Fault Management Architecture (FMA) support. 651 */ 652 653 int 654 i40e_check_acc_handle(ddi_acc_handle_t handle) 655 { 656 ddi_fm_error_t de; 657 658 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 659 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 660 return (de.fme_status); 661 } 662 663 int 664 i40e_check_dma_handle(ddi_dma_handle_t handle) 665 { 666 ddi_fm_error_t de; 667 668 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 669 return (de.fme_status); 670 } 671 672 /* 673 * Fault service error handling callback function. 674 */ 675 /* ARGSUSED */ 676 static int 677 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 678 { 679 pci_ereport_post(dip, err, NULL); 680 return (err->fme_status); 681 } 682 683 static void 684 i40e_fm_init(i40e_t *i40e) 685 { 686 ddi_iblock_cookie_t iblk; 687 688 i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, 689 i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable", 690 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 691 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 692 693 if (i40e->i40e_fm_capabilities < 0) { 694 i40e->i40e_fm_capabilities = 0; 695 } else if (i40e->i40e_fm_capabilities > 0xf) { 696 i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE | 697 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 698 DDI_FM_ERRCB_CAPABLE; 699 } 700 701 /* 702 * Only register with IO Fault Services if we have some capability 703 */ 704 if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 705 i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 706 } else { 707 i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 708 } 709 710 if (i40e->i40e_fm_capabilities) { 711 ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk); 712 713 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 714 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 715 pci_ereport_setup(i40e->i40e_dip); 716 } 717 718 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 719 ddi_fm_handler_register(i40e->i40e_dip, 720 i40e_fm_error_cb, (void*)i40e); 721 } 722 } 723 724 if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 725 i40e_init_dma_attrs(i40e, B_TRUE); 726 } else { 727 i40e_init_dma_attrs(i40e, B_FALSE); 728 } 729 } 730 731 static void 732 i40e_fm_fini(i40e_t *i40e) 733 { 734 if (i40e->i40e_fm_capabilities) { 735 736 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 737 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 738 pci_ereport_teardown(i40e->i40e_dip); 739 740 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 741 ddi_fm_handler_unregister(i40e->i40e_dip); 742 743 ddi_fm_fini(i40e->i40e_dip); 744 } 745 } 746 747 void 748 i40e_fm_ereport(i40e_t *i40e, char *detail) 749 { 750 uint64_t ena; 751 char buf[FM_MAX_CLASS]; 752 753 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 754 ena = fm_ena_generate(0, FM_ENA_FMT1); 755 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) { 756 ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP, 757 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 758 } 759 } 760 761 /* 762 * Here we're trying to set the SEID of the default VSI. In general, 763 * when we come through and look at this shortly after attach, we 764 * expect there to only be a single element present, which is the 765 * default VSI. Importantly, each PF seems to not see any other 766 * devices, in part because of the simple switch mode that we're 767 * using. If for some reason, we see more artifacts, we'll need to 768 * revisit what we're doing here. 769 */ 770 static boolean_t 771 i40e_set_def_vsi_seid(i40e_t *i40e) 772 { 773 i40e_hw_t *hw = &i40e->i40e_hw_space; 774 struct i40e_aqc_get_switch_config_resp *sw_config; 775 uint8_t aq_buf[I40E_AQ_LARGE_BUF]; 776 uint16_t next = 0; 777 int rc; 778 779 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 780 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 781 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next, 782 NULL); 783 if (rc != I40E_SUCCESS) { 784 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d", 785 rc, hw->aq.asq_last_status); 786 return (B_FALSE); 787 } 788 789 if (LE_16(sw_config->header.num_reported) != 1) { 790 i40e_error(i40e, "encountered multiple (%d) switching units " 791 "during attach, not proceeding", 792 LE_16(sw_config->header.num_reported)); 793 return (B_FALSE); 794 } 795 796 I40E_DEF_VSI_SEID(i40e) = sw_config->element[0].seid; 797 return (B_TRUE); 798 } 799 800 /* 801 * Get the SEID of the uplink MAC. 802 */ 803 static int 804 i40e_get_mac_seid(i40e_t *i40e) 805 { 806 i40e_hw_t *hw = &i40e->i40e_hw_space; 807 struct i40e_aqc_get_switch_config_resp *sw_config; 808 uint8_t aq_buf[I40E_AQ_LARGE_BUF]; 809 uint16_t next = 0; 810 int rc; 811 812 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 813 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 814 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next, 815 NULL); 816 if (rc != I40E_SUCCESS) { 817 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d", 818 rc, hw->aq.asq_last_status); 819 return (-1); 820 } 821 822 return (LE_16(sw_config->element[0].uplink_seid)); 823 } 824 825 /* 826 * We need to fill the i40e_hw_t structure with the capabilities of this PF. We 827 * must also provide the memory for it; however, we don't need to keep it around 828 * to the call to the common code. It takes it and parses it into an internal 829 * structure. 830 */ 831 static boolean_t 832 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw) 833 { 834 struct i40e_aqc_list_capabilities_element_resp *buf; 835 int rc; 836 size_t len; 837 uint16_t needed; 838 int nelems = I40E_HW_CAP_DEFAULT; 839 840 len = nelems * sizeof (*buf); 841 842 for (;;) { 843 ASSERT(len > 0); 844 buf = kmem_alloc(len, KM_SLEEP); 845 rc = i40e_aq_discover_capabilities(hw, buf, len, 846 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 847 kmem_free(buf, len); 848 849 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM && 850 nelems == I40E_HW_CAP_DEFAULT) { 851 if (nelems == needed) { 852 i40e_error(i40e, "Capability discovery failed " 853 "due to byzantine common code"); 854 return (B_FALSE); 855 } 856 len = needed; 857 continue; 858 } else if (rc != I40E_SUCCESS || 859 hw->aq.asq_last_status != I40E_AQ_RC_OK) { 860 i40e_error(i40e, "Capability discovery failed: %d", rc); 861 return (B_FALSE); 862 } 863 864 break; 865 } 866 867 return (B_TRUE); 868 } 869 870 /* 871 * Obtain the switch's capabilities as seen by this PF and keep it around for 872 * our later use. 873 */ 874 static boolean_t 875 i40e_get_switch_resources(i40e_t *i40e) 876 { 877 i40e_hw_t *hw = &i40e->i40e_hw_space; 878 uint8_t cnt = 2; 879 uint8_t act; 880 size_t size; 881 i40e_switch_rsrc_t *buf; 882 883 for (;;) { 884 enum i40e_status_code ret; 885 size = cnt * sizeof (i40e_switch_rsrc_t); 886 ASSERT(size > 0); 887 if (size > UINT16_MAX) 888 return (B_FALSE); 889 buf = kmem_alloc(size, KM_SLEEP); 890 891 ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf, 892 cnt, NULL); 893 if (ret == I40E_ERR_ADMIN_QUEUE_ERROR && 894 hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) { 895 kmem_free(buf, size); 896 cnt += I40E_SWITCH_CAP_DEFAULT; 897 continue; 898 } else if (ret != I40E_SUCCESS) { 899 kmem_free(buf, size); 900 i40e_error(i40e, 901 "failed to retrieve switch statistics: %d", ret); 902 return (B_FALSE); 903 } 904 905 break; 906 } 907 908 i40e->i40e_switch_rsrc_alloc = cnt; 909 i40e->i40e_switch_rsrc_actual = act; 910 i40e->i40e_switch_rsrcs = buf; 911 912 return (B_TRUE); 913 } 914 915 static void 916 i40e_cleanup_resources(i40e_t *i40e) 917 { 918 if (i40e->i40e_uaddrs != NULL) { 919 kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) * 920 i40e->i40e_resources.ifr_nmacfilt); 921 i40e->i40e_uaddrs = NULL; 922 } 923 924 if (i40e->i40e_maddrs != NULL) { 925 kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) * 926 i40e->i40e_resources.ifr_nmcastfilt); 927 i40e->i40e_maddrs = NULL; 928 } 929 930 if (i40e->i40e_switch_rsrcs != NULL) { 931 size_t sz = sizeof (i40e_switch_rsrc_t) * 932 i40e->i40e_switch_rsrc_alloc; 933 ASSERT(sz > 0); 934 kmem_free(i40e->i40e_switch_rsrcs, sz); 935 i40e->i40e_switch_rsrcs = NULL; 936 } 937 938 if (i40e->i40e_device != NULL) 939 i40e_device_rele(i40e); 940 } 941 942 static boolean_t 943 i40e_get_available_resources(i40e_t *i40e) 944 { 945 dev_info_t *parent; 946 uint16_t bus, device, func; 947 uint_t nregs; 948 int *regs, i; 949 i40e_device_t *idp; 950 i40e_hw_t *hw = &i40e->i40e_hw_space; 951 952 parent = ddi_get_parent(i40e->i40e_dip); 953 954 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg", 955 ®s, &nregs) != DDI_PROP_SUCCESS) { 956 return (B_FALSE); 957 } 958 959 if (nregs < 1) { 960 ddi_prop_free(regs); 961 return (B_FALSE); 962 } 963 964 bus = PCI_REG_BUS_G(regs[0]); 965 device = PCI_REG_DEV_G(regs[0]); 966 func = PCI_REG_FUNC_G(regs[0]); 967 ddi_prop_free(regs); 968 969 i40e->i40e_hw_space.bus.func = func; 970 i40e->i40e_hw_space.bus.device = device; 971 972 if (i40e_get_switch_resources(i40e) == B_FALSE) { 973 return (B_FALSE); 974 } 975 976 /* 977 * To calculate the total amount of a resource we have available, we 978 * need to add how many our i40e_t thinks it has guaranteed, if any, and 979 * then we need to go through and divide the number of available on the 980 * device, which was snapshotted before anyone should have allocated 981 * anything, and use that to derive how many are available from the 982 * pool. Longer term, we may want to turn this into something that's 983 * more of a pool-like resource that everything can share (though that 984 * may require some more assistance from MAC). 985 * 986 * Though for transmit and receive queue pairs, we just have to ask 987 * firmware instead. 988 */ 989 idp = i40e_device_find(i40e, parent, bus, device); 990 i40e->i40e_device = idp; 991 i40e->i40e_resources.ifr_nvsis = 0; 992 i40e->i40e_resources.ifr_nvsis_used = 0; 993 i40e->i40e_resources.ifr_nmacfilt = 0; 994 i40e->i40e_resources.ifr_nmacfilt_used = 0; 995 i40e->i40e_resources.ifr_nmcastfilt = 0; 996 i40e->i40e_resources.ifr_nmcastfilt_used = 0; 997 998 for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) { 999 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 1000 1001 switch (srp->resource_type) { 1002 case I40E_AQ_RESOURCE_TYPE_VSI: 1003 i40e->i40e_resources.ifr_nvsis += 1004 LE_16(srp->guaranteed); 1005 i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used); 1006 break; 1007 case I40E_AQ_RESOURCE_TYPE_MACADDR: 1008 i40e->i40e_resources.ifr_nmacfilt += 1009 LE_16(srp->guaranteed); 1010 i40e->i40e_resources.ifr_nmacfilt_used = 1011 LE_16(srp->used); 1012 break; 1013 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 1014 i40e->i40e_resources.ifr_nmcastfilt += 1015 LE_16(srp->guaranteed); 1016 i40e->i40e_resources.ifr_nmcastfilt_used = 1017 LE_16(srp->used); 1018 break; 1019 default: 1020 break; 1021 } 1022 } 1023 1024 for (i = 0; i < idp->id_rsrcs_act; i++) { 1025 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 1026 switch (srp->resource_type) { 1027 case I40E_AQ_RESOURCE_TYPE_VSI: 1028 i40e->i40e_resources.ifr_nvsis += 1029 LE_16(srp->total_unalloced) / idp->id_nfuncs; 1030 break; 1031 case I40E_AQ_RESOURCE_TYPE_MACADDR: 1032 i40e->i40e_resources.ifr_nmacfilt += 1033 LE_16(srp->total_unalloced) / idp->id_nfuncs; 1034 break; 1035 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 1036 i40e->i40e_resources.ifr_nmcastfilt += 1037 LE_16(srp->total_unalloced) / idp->id_nfuncs; 1038 default: 1039 break; 1040 } 1041 } 1042 1043 i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp; 1044 i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp; 1045 1046 i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) * 1047 i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP); 1048 i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) * 1049 i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP); 1050 1051 /* 1052 * Initialize these as multicast addresses to indicate it's invalid for 1053 * sanity purposes. Think of it like 0xdeadbeef. 1054 */ 1055 for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++) 1056 i40e->i40e_uaddrs[i].iua_mac[0] = 0x01; 1057 1058 return (B_TRUE); 1059 } 1060 1061 static boolean_t 1062 i40e_enable_interrupts(i40e_t *i40e) 1063 { 1064 int i, rc; 1065 1066 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1067 rc = ddi_intr_block_enable(i40e->i40e_intr_handles, 1068 i40e->i40e_intr_count); 1069 if (rc != DDI_SUCCESS) { 1070 i40e_error(i40e, "Interrupt block-enable failed: %d", 1071 rc); 1072 return (B_FALSE); 1073 } 1074 } else { 1075 for (i = 0; i < i40e->i40e_intr_count; i++) { 1076 rc = ddi_intr_enable(i40e->i40e_intr_handles[i]); 1077 if (rc != DDI_SUCCESS) { 1078 i40e_error(i40e, 1079 "Failed to enable interrupt %d: %d", i, rc); 1080 while (--i >= 0) { 1081 (void) ddi_intr_disable( 1082 i40e->i40e_intr_handles[i]); 1083 } 1084 return (B_FALSE); 1085 } 1086 } 1087 } 1088 1089 return (B_TRUE); 1090 } 1091 1092 static boolean_t 1093 i40e_disable_interrupts(i40e_t *i40e) 1094 { 1095 int i, rc; 1096 1097 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1098 rc = ddi_intr_block_disable(i40e->i40e_intr_handles, 1099 i40e->i40e_intr_count); 1100 if (rc != DDI_SUCCESS) { 1101 i40e_error(i40e, 1102 "Interrupt block-disabled failed: %d", rc); 1103 return (B_FALSE); 1104 } 1105 } else { 1106 for (i = 0; i < i40e->i40e_intr_count; i++) { 1107 rc = ddi_intr_disable(i40e->i40e_intr_handles[i]); 1108 if (rc != DDI_SUCCESS) { 1109 i40e_error(i40e, 1110 "Failed to disable interrupt %d: %d", 1111 i, rc); 1112 return (B_FALSE); 1113 } 1114 } 1115 } 1116 1117 return (B_TRUE); 1118 } 1119 1120 /* 1121 * Free receive & transmit rings. 1122 */ 1123 static void 1124 i40e_free_trqpairs(i40e_t *i40e) 1125 { 1126 i40e_trqpair_t *itrq; 1127 1128 if (i40e->i40e_rx_groups != NULL) { 1129 kmem_free(i40e->i40e_rx_groups, 1130 sizeof (i40e_rx_group_t) * i40e->i40e_num_rx_groups); 1131 i40e->i40e_rx_groups = NULL; 1132 } 1133 1134 if (i40e->i40e_trqpairs != NULL) { 1135 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) { 1136 itrq = &i40e->i40e_trqpairs[i]; 1137 mutex_destroy(&itrq->itrq_rx_lock); 1138 mutex_destroy(&itrq->itrq_tx_lock); 1139 mutex_destroy(&itrq->itrq_tcb_lock); 1140 1141 /* 1142 * Should have already been cleaned up by start/stop, 1143 * etc. 1144 */ 1145 ASSERT(itrq->itrq_txkstat == NULL); 1146 ASSERT(itrq->itrq_rxkstat == NULL); 1147 } 1148 1149 kmem_free(i40e->i40e_trqpairs, 1150 sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs); 1151 i40e->i40e_trqpairs = NULL; 1152 } 1153 1154 cv_destroy(&i40e->i40e_rx_pending_cv); 1155 mutex_destroy(&i40e->i40e_rx_pending_lock); 1156 mutex_destroy(&i40e->i40e_general_lock); 1157 } 1158 1159 /* 1160 * Allocate transmit and receive rings, as well as other data structures that we 1161 * need. 1162 */ 1163 static boolean_t 1164 i40e_alloc_trqpairs(i40e_t *i40e) 1165 { 1166 void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri); 1167 1168 /* 1169 * Now that we have the priority for the interrupts, initialize 1170 * all relevant locks. 1171 */ 1172 mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri); 1173 mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri); 1174 cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL); 1175 1176 i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) * 1177 i40e->i40e_num_trqpairs, KM_SLEEP); 1178 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) { 1179 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 1180 1181 itrq->itrq_i40e = i40e; 1182 mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri); 1183 mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri); 1184 mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri); 1185 itrq->itrq_index = i; 1186 } 1187 1188 i40e->i40e_rx_groups = kmem_zalloc(sizeof (i40e_rx_group_t) * 1189 i40e->i40e_num_rx_groups, KM_SLEEP); 1190 1191 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) { 1192 i40e_rx_group_t *rxg = &i40e->i40e_rx_groups[i]; 1193 1194 rxg->irg_index = i; 1195 rxg->irg_i40e = i40e; 1196 } 1197 1198 return (B_TRUE); 1199 } 1200 1201 1202 1203 /* 1204 * Unless a .conf file already overrode i40e_t structure values, they will 1205 * be 0, and need to be set in conjunction with the now-available HW report. 1206 */ 1207 /* ARGSUSED */ 1208 static void 1209 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw) 1210 { 1211 if (i40e->i40e_num_trqpairs_per_vsi == 0) { 1212 if (i40e_is_x722(i40e)) { 1213 i40e->i40e_num_trqpairs_per_vsi = 1214 I40E_722_MAX_TC_QUEUES; 1215 } else { 1216 i40e->i40e_num_trqpairs_per_vsi = 1217 I40E_710_MAX_TC_QUEUES; 1218 } 1219 } 1220 1221 if (i40e->i40e_num_rx_groups == 0) { 1222 i40e->i40e_num_rx_groups = I40E_GROUP_MAX; 1223 } 1224 } 1225 1226 /* 1227 * Free any resources required by, or setup by, the Intel common code. 1228 */ 1229 static void 1230 i40e_common_code_fini(i40e_t *i40e) 1231 { 1232 i40e_hw_t *hw = &i40e->i40e_hw_space; 1233 int rc; 1234 1235 rc = i40e_shutdown_lan_hmc(hw); 1236 if (rc != I40E_SUCCESS) 1237 i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc); 1238 1239 rc = i40e_shutdown_adminq(hw); 1240 if (rc != I40E_SUCCESS) 1241 i40e_error(i40e, "failed to shutdown admin queue: %d", rc); 1242 } 1243 1244 /* 1245 * Initialize and call Intel common-code routines, includes some setup 1246 * the common code expects from the driver. Also prints on failure, so 1247 * the caller doesn't have to. 1248 */ 1249 static boolean_t 1250 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw) 1251 { 1252 int rc; 1253 1254 i40e_clear_hw(hw); 1255 rc = i40e_pf_reset(hw); 1256 if (rc != 0) { 1257 i40e_error(i40e, "failed to reset hardware: %d", rc); 1258 i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE); 1259 return (B_FALSE); 1260 } 1261 1262 rc = i40e_init_shared_code(hw); 1263 if (rc != 0) { 1264 i40e_error(i40e, "failed to initialize i40e core: %d", rc); 1265 return (B_FALSE); 1266 } 1267 1268 hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE; 1269 hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE; 1270 hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ; 1271 hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ; 1272 1273 rc = i40e_init_adminq(hw); 1274 if (rc != 0) { 1275 i40e_error(i40e, "failed to initialize firmware admin queue: " 1276 "%d, potential firmware version mismatch", rc); 1277 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 1278 return (B_FALSE); 1279 } 1280 1281 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1282 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) { 1283 i40e_log(i40e, "The driver for the device detected a newer " 1284 "version of the NVM image (%d.%d) than expected (%d.%d).\n" 1285 "Please install the most recent version of the network " 1286 "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, 1287 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); 1288 } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 1289 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) { 1290 i40e_log(i40e, "The driver for the device detected an older" 1291 " version of the NVM image (%d.%d) than expected (%d.%d)." 1292 "\nPlease update the NVM image.\n", 1293 hw->aq.api_maj_ver, hw->aq.api_min_ver, 1294 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR - 1); 1295 } 1296 1297 i40e_clear_pxe_mode(hw); 1298 1299 /* 1300 * We need to call this so that the common code can discover 1301 * capabilities of the hardware, which it uses throughout the rest. 1302 */ 1303 if (!i40e_get_hw_capabilities(i40e, hw)) { 1304 i40e_error(i40e, "failed to obtain hardware capabilities"); 1305 return (B_FALSE); 1306 } 1307 1308 if (i40e_get_available_resources(i40e) == B_FALSE) { 1309 i40e_error(i40e, "failed to obtain hardware resources"); 1310 return (B_FALSE); 1311 } 1312 1313 i40e_hw_to_instance(i40e, hw); 1314 1315 rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 1316 hw->func_caps.num_rx_qp, 0, 0); 1317 if (rc != 0) { 1318 i40e_error(i40e, "failed to initialize hardware memory cache: " 1319 "%d", rc); 1320 return (B_FALSE); 1321 } 1322 1323 rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 1324 if (rc != 0) { 1325 i40e_error(i40e, "failed to configure hardware memory cache: " 1326 "%d", rc); 1327 return (B_FALSE); 1328 } 1329 1330 (void) i40e_aq_stop_lldp(hw, TRUE, NULL); 1331 1332 rc = i40e_get_mac_addr(hw, hw->mac.addr); 1333 if (rc != I40E_SUCCESS) { 1334 i40e_error(i40e, "failed to retrieve hardware mac address: %d", 1335 rc); 1336 return (B_FALSE); 1337 } 1338 1339 rc = i40e_validate_mac_addr(hw->mac.addr); 1340 if (rc != 0) { 1341 i40e_error(i40e, "failed to validate internal mac address: " 1342 "%d", rc); 1343 return (B_FALSE); 1344 } 1345 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 1346 if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) != 1347 I40E_SUCCESS) { 1348 i40e_error(i40e, "failed to retrieve port mac address: %d", 1349 rc); 1350 return (B_FALSE); 1351 } 1352 1353 /* 1354 * We need to obtain the Default Virtual Station SEID (VSI) 1355 * before we can perform other operations on the device. 1356 */ 1357 if (!i40e_set_def_vsi_seid(i40e)) { 1358 i40e_error(i40e, "failed to obtain Default VSI SEID"); 1359 return (B_FALSE); 1360 } 1361 1362 return (B_TRUE); 1363 } 1364 1365 static void 1366 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e) 1367 { 1368 int rc; 1369 1370 if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR) 1371 (void) i40e_disable_interrupts(i40e); 1372 1373 if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) && 1374 i40e->i40e_periodic_id != 0) { 1375 ddi_periodic_delete(i40e->i40e_periodic_id); 1376 i40e->i40e_periodic_id = 0; 1377 } 1378 1379 if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) { 1380 rc = mac_unregister(i40e->i40e_mac_hdl); 1381 if (rc != 0) { 1382 i40e_error(i40e, "failed to unregister from mac: %d", 1383 rc); 1384 } 1385 } 1386 1387 if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) { 1388 i40e_stats_fini(i40e); 1389 } 1390 1391 if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR) 1392 i40e_rem_intr_handlers(i40e); 1393 1394 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS) 1395 i40e_free_trqpairs(i40e); 1396 1397 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR) 1398 i40e_rem_intrs(i40e); 1399 1400 if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE) 1401 i40e_common_code_fini(i40e); 1402 1403 i40e_cleanup_resources(i40e); 1404 1405 if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS) 1406 (void) ddi_prop_remove_all(devinfo); 1407 1408 if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP && 1409 i40e->i40e_osdep_space.ios_reg_handle != NULL) { 1410 ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle); 1411 i40e->i40e_osdep_space.ios_reg_handle = NULL; 1412 } 1413 1414 if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) && 1415 i40e->i40e_osdep_space.ios_cfg_handle != NULL) { 1416 pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle); 1417 i40e->i40e_osdep_space.ios_cfg_handle = NULL; 1418 } 1419 1420 if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT) 1421 i40e_fm_fini(i40e); 1422 1423 if (i40e->i40e_attach_progress & I40E_ATTACH_UFM_INIT) 1424 ddi_ufm_fini(i40e->i40e_ufmh); 1425 1426 kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ); 1427 kmem_free(i40e, sizeof (i40e_t)); 1428 1429 ddi_set_driver_private(devinfo, NULL); 1430 } 1431 1432 static boolean_t 1433 i40e_final_init(i40e_t *i40e) 1434 { 1435 i40e_hw_t *hw = &i40e->i40e_hw_space; 1436 struct i40e_osdep *osdep = OS_DEP(hw); 1437 uint8_t pbanum[I40E_PBANUM_STRLEN]; 1438 enum i40e_status_code irc; 1439 char buf[I40E_DDI_PROP_LEN]; 1440 1441 pbanum[0] = '\0'; 1442 irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum)); 1443 if (irc != I40E_SUCCESS) { 1444 i40e_log(i40e, "failed to read PBA string: %d", irc); 1445 } else { 1446 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1447 "printed-board-assembly", (char *)pbanum); 1448 } 1449 1450 #ifdef DEBUG 1451 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver, 1452 hw->aq.fw_min_ver) < sizeof (buf)); 1453 ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf)); 1454 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver, 1455 hw->aq.api_min_ver) < sizeof (buf)); 1456 #endif 1457 1458 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver, 1459 hw->aq.fw_min_ver); 1460 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1461 "firmware-version", buf); 1462 (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build); 1463 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1464 "firmware-build", buf); 1465 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver, 1466 hw->aq.api_min_ver); 1467 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1468 "api-version", buf); 1469 1470 if (!i40e_set_hw_bus_info(hw)) 1471 return (B_FALSE); 1472 1473 if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) { 1474 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 1475 return (B_FALSE); 1476 } 1477 1478 return (B_TRUE); 1479 } 1480 1481 static void 1482 i40e_identify_hardware(i40e_t *i40e) 1483 { 1484 i40e_hw_t *hw = &i40e->i40e_hw_space; 1485 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1486 1487 hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID); 1488 hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID); 1489 hw->revision_id = pci_config_get8(osdep->ios_cfg_handle, 1490 PCI_CONF_REVID); 1491 hw->subsystem_device_id = 1492 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID); 1493 hw->subsystem_vendor_id = 1494 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID); 1495 1496 /* 1497 * Note that we set the hardware's bus information later on, in 1498 * i40e_get_available_resources(). The common code doesn't seem to 1499 * require that it be set in any ways, it seems to be mostly for 1500 * book-keeping. 1501 */ 1502 } 1503 1504 static boolean_t 1505 i40e_regs_map(i40e_t *i40e) 1506 { 1507 dev_info_t *devinfo = i40e->i40e_dip; 1508 i40e_hw_t *hw = &i40e->i40e_hw_space; 1509 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1510 off_t memsize; 1511 int ret; 1512 1513 if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) != 1514 DDI_SUCCESS) { 1515 i40e_error(i40e, "Used invalid register set to map PCIe regs"); 1516 return (B_FALSE); 1517 } 1518 1519 if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET, 1520 (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr, 1521 &osdep->ios_reg_handle)) != DDI_SUCCESS) { 1522 i40e_error(i40e, "failed to map device registers: %d", ret); 1523 return (B_FALSE); 1524 } 1525 1526 osdep->ios_reg_size = memsize; 1527 return (B_TRUE); 1528 } 1529 1530 /* 1531 * Update parameters required when a new MTU has been configured. Calculate the 1532 * maximum frame size, as well as, size our DMA buffers which we size in 1533 * increments of 1K. 1534 */ 1535 void 1536 i40e_update_mtu(i40e_t *i40e) 1537 { 1538 uint32_t rx, tx; 1539 1540 i40e->i40e_frame_max = i40e->i40e_sdu + 1541 sizeof (struct ether_vlan_header) + ETHERFCSL; 1542 1543 rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT; 1544 i40e->i40e_rx_buf_size = ((rx >> 10) + 1545 ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1546 1547 tx = i40e->i40e_frame_max; 1548 i40e->i40e_tx_buf_size = ((tx >> 10) + 1549 ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1550 } 1551 1552 static int 1553 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def) 1554 { 1555 int val; 1556 1557 val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS, 1558 prop, def); 1559 if (val > max) 1560 val = max; 1561 if (val < min) 1562 val = min; 1563 return (val); 1564 } 1565 1566 static void 1567 i40e_init_properties(i40e_t *i40e) 1568 { 1569 i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu", 1570 I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU); 1571 1572 i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force", 1573 I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE); 1574 1575 i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable", 1576 B_FALSE, B_TRUE, B_TRUE); 1577 1578 i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size", 1579 I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE, 1580 I40E_DEF_TX_RING_SIZE); 1581 if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) { 1582 i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size, 1583 I40E_DESC_ALIGN); 1584 } 1585 1586 i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold", 1587 I40E_MIN_TX_BLOCK_THRESH, 1588 i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE, 1589 I40E_DEF_TX_BLOCK_THRESH); 1590 1591 i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size", 1592 I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE, 1593 I40E_DEF_RX_RING_SIZE); 1594 if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) { 1595 i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size, 1596 I40E_DESC_ALIGN); 1597 } 1598 1599 i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr", 1600 I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR, 1601 I40E_DEF_RX_LIMIT_PER_INTR); 1602 1603 i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable", 1604 B_FALSE, B_TRUE, B_TRUE); 1605 1606 i40e->i40e_tx_lso_enable = i40e_get_prop(i40e, "tx_lso_enable", 1607 B_FALSE, B_TRUE, B_TRUE); 1608 1609 i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable", 1610 B_FALSE, B_TRUE, B_TRUE); 1611 1612 i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold", 1613 I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH, 1614 I40E_DEF_RX_DMA_THRESH); 1615 1616 i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold", 1617 I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH, 1618 I40E_DEF_TX_DMA_THRESH); 1619 1620 i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle", 1621 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR); 1622 1623 i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle", 1624 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR); 1625 1626 i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle", 1627 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR); 1628 1629 if (!i40e->i40e_mr_enable) { 1630 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1631 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1632 } 1633 1634 i40e_update_mtu(i40e); 1635 } 1636 1637 /* 1638 * There are a few constraints on interrupts that we're currently imposing, some 1639 * of which are restrictions from hardware. For a fuller treatment, see 1640 * i40e_intr.c. 1641 * 1642 * Currently, to use MSI-X we require two interrupts be available though in 1643 * theory we should participate in IRM and happily use more interrupts. 1644 * 1645 * Hardware only supports a single MSI being programmed and therefore if we 1646 * don't have MSI-X interrupts available at this time, then we ratchet down the 1647 * number of rings and groups available. Obviously, we only bother with a single 1648 * fixed interrupt. 1649 */ 1650 static boolean_t 1651 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type) 1652 { 1653 i40e_hw_t *hw = &i40e->i40e_hw_space; 1654 ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle; 1655 int request, count, actual, rc, min; 1656 uint32_t reg; 1657 1658 switch (intr_type) { 1659 case DDI_INTR_TYPE_FIXED: 1660 case DDI_INTR_TYPE_MSI: 1661 request = 1; 1662 min = 1; 1663 break; 1664 case DDI_INTR_TYPE_MSIX: 1665 min = 2; 1666 if (!i40e->i40e_mr_enable) { 1667 request = 2; 1668 break; 1669 } 1670 reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2); 1671 /* 1672 * Should this read fail, we will drop back to using 1673 * MSI or fixed interrupts. 1674 */ 1675 if (i40e_check_acc_handle(rh) != DDI_FM_OK) { 1676 ddi_fm_service_impact(i40e->i40e_dip, 1677 DDI_SERVICE_DEGRADED); 1678 return (B_FALSE); 1679 } 1680 request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >> 1681 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT; 1682 request++; /* the register value is n - 1 */ 1683 break; 1684 default: 1685 panic("bad interrupt type passed to i40e_alloc_intr_handles: " 1686 "%d", intr_type); 1687 return (B_FALSE); 1688 } 1689 1690 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 1691 if (rc != DDI_SUCCESS || count < min) { 1692 i40e_log(i40e, "Get interrupt number failed, " 1693 "returned %d, count %d", rc, count); 1694 return (B_FALSE); 1695 } 1696 1697 rc = ddi_intr_get_navail(devinfo, intr_type, &count); 1698 if (rc != DDI_SUCCESS || count < min) { 1699 i40e_log(i40e, "Get AVAILABLE interrupt number failed, " 1700 "returned %d, count %d", rc, count); 1701 return (B_FALSE); 1702 } 1703 1704 actual = 0; 1705 i40e->i40e_intr_count = 0; 1706 i40e->i40e_intr_count_max = 0; 1707 i40e->i40e_intr_count_min = 0; 1708 1709 i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t); 1710 ASSERT(i40e->i40e_intr_size != 0); 1711 i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP); 1712 1713 rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0, 1714 min(request, count), &actual, DDI_INTR_ALLOC_NORMAL); 1715 if (rc != DDI_SUCCESS) { 1716 i40e_log(i40e, "Interrupt allocation failed with %d.", rc); 1717 goto alloc_handle_fail; 1718 } 1719 1720 i40e->i40e_intr_count = actual; 1721 i40e->i40e_intr_count_max = request; 1722 i40e->i40e_intr_count_min = min; 1723 1724 if (actual < min) { 1725 i40e_log(i40e, "actual (%d) is less than minimum (%d).", 1726 actual, min); 1727 goto alloc_handle_fail; 1728 } 1729 1730 /* 1731 * Record the priority and capabilities for our first vector. Once 1732 * we have it, that's our priority until detach time. Even if we 1733 * eventually participate in IRM, our priority shouldn't change. 1734 */ 1735 rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri); 1736 if (rc != DDI_SUCCESS) { 1737 i40e_log(i40e, 1738 "Getting interrupt priority failed with %d.", rc); 1739 goto alloc_handle_fail; 1740 } 1741 1742 rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap); 1743 if (rc != DDI_SUCCESS) { 1744 i40e_log(i40e, 1745 "Getting interrupt capabilities failed with %d.", rc); 1746 goto alloc_handle_fail; 1747 } 1748 1749 i40e->i40e_intr_type = intr_type; 1750 return (B_TRUE); 1751 1752 alloc_handle_fail: 1753 1754 i40e_rem_intrs(i40e); 1755 return (B_FALSE); 1756 } 1757 1758 static boolean_t 1759 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo) 1760 { 1761 int intr_types, rc; 1762 uint_t max_trqpairs; 1763 1764 if (i40e_is_x722(i40e)) { 1765 max_trqpairs = I40E_722_MAX_TC_QUEUES; 1766 } else { 1767 max_trqpairs = I40E_710_MAX_TC_QUEUES; 1768 } 1769 1770 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 1771 if (rc != DDI_SUCCESS) { 1772 i40e_error(i40e, "failed to get supported interrupt types: %d", 1773 rc); 1774 return (B_FALSE); 1775 } 1776 1777 i40e->i40e_intr_type = 0; 1778 i40e->i40e_num_rx_groups = I40E_GROUP_MAX; 1779 1780 /* 1781 * We need to determine the number of queue pairs per traffic 1782 * class. We only have one traffic class (TC0), so we'll base 1783 * this off the number of interrupts provided. Furthermore, 1784 * since we only use one traffic class, the number of queues 1785 * per traffic class and per VSI are the same. 1786 */ 1787 if ((intr_types & DDI_INTR_TYPE_MSIX) && 1788 (i40e->i40e_intr_force <= I40E_INTR_MSIX) && 1789 (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))) { 1790 uint32_t n; 1791 1792 /* 1793 * While we want the number of queue pairs to match 1794 * the number of interrupts, we must keep stay in 1795 * bounds of the maximum number of queues per traffic 1796 * class. We subtract one from i40e_intr_count to 1797 * account for interrupt zero; which is currently 1798 * restricted to admin queue commands and other 1799 * interrupt causes. 1800 */ 1801 n = MIN(i40e->i40e_intr_count - 1, max_trqpairs); 1802 ASSERT3U(n, >, 0); 1803 1804 /* 1805 * Round up to the nearest power of two to ensure that 1806 * the QBASE aligns with the TC size which must be 1807 * programmed as a power of two. See the queue mapping 1808 * description in section 7.4.9.5.5.1. 1809 * 1810 * If i40e_intr_count - 1 is not a power of two then 1811 * some queue pairs on the same VSI will have to share 1812 * an interrupt. 1813 * 1814 * We may want to revisit this logic in a future where 1815 * we have more interrupts and more VSIs. Otherwise, 1816 * each VSI will use as many interrupts as possible. 1817 * Using more QPs per VSI means better RSS for each 1818 * group, but at the same time may require more 1819 * sharing of interrupts across VSIs. This may be a 1820 * good candidate for a .conf tunable. 1821 */ 1822 n = 0x1 << ddi_fls(n); 1823 i40e->i40e_num_trqpairs_per_vsi = n; 1824 ASSERT3U(i40e->i40e_num_rx_groups, >, 0); 1825 i40e->i40e_num_trqpairs = i40e->i40e_num_trqpairs_per_vsi * 1826 i40e->i40e_num_rx_groups; 1827 return (B_TRUE); 1828 } 1829 1830 /* 1831 * We only use multiple transmit/receive pairs when MSI-X interrupts are 1832 * available due to the fact that the device basically only supports a 1833 * single MSI interrupt. 1834 */ 1835 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1836 i40e->i40e_num_trqpairs_per_vsi = i40e->i40e_num_trqpairs; 1837 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1838 1839 if ((intr_types & DDI_INTR_TYPE_MSI) && 1840 (i40e->i40e_intr_force <= I40E_INTR_MSI)) { 1841 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI)) 1842 return (B_TRUE); 1843 } 1844 1845 if (intr_types & DDI_INTR_TYPE_FIXED) { 1846 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED)) 1847 return (B_TRUE); 1848 } 1849 1850 return (B_FALSE); 1851 } 1852 1853 /* 1854 * Map different interrupts to MSI-X vectors. 1855 */ 1856 static boolean_t 1857 i40e_map_intrs_to_vectors(i40e_t *i40e) 1858 { 1859 if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) { 1860 return (B_TRUE); 1861 } 1862 1863 /* 1864 * Each queue pair is mapped to a single interrupt, so 1865 * transmit and receive interrupts for a given queue share the 1866 * same vector. Vector zero is reserved for the admin queue. 1867 */ 1868 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) { 1869 uint_t vector = i % (i40e->i40e_intr_count - 1); 1870 1871 i40e->i40e_trqpairs[i].itrq_rx_intrvec = vector + 1; 1872 i40e->i40e_trqpairs[i].itrq_tx_intrvec = vector + 1; 1873 } 1874 1875 return (B_TRUE); 1876 } 1877 1878 static boolean_t 1879 i40e_add_intr_handlers(i40e_t *i40e) 1880 { 1881 int rc, vector; 1882 1883 switch (i40e->i40e_intr_type) { 1884 case DDI_INTR_TYPE_MSIX: 1885 for (vector = 0; vector < i40e->i40e_intr_count; vector++) { 1886 rc = ddi_intr_add_handler( 1887 i40e->i40e_intr_handles[vector], 1888 (ddi_intr_handler_t *)i40e_intr_msix, i40e, 1889 (void *)(uintptr_t)vector); 1890 if (rc != DDI_SUCCESS) { 1891 i40e_log(i40e, "Add interrupt handler (MSI-X) " 1892 "failed: return %d, vector %d", rc, vector); 1893 for (vector--; vector >= 0; vector--) { 1894 (void) ddi_intr_remove_handler( 1895 i40e->i40e_intr_handles[vector]); 1896 } 1897 return (B_FALSE); 1898 } 1899 } 1900 break; 1901 case DDI_INTR_TYPE_MSI: 1902 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1903 (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL); 1904 if (rc != DDI_SUCCESS) { 1905 i40e_log(i40e, "Add interrupt handler (MSI) failed: " 1906 "return %d", rc); 1907 return (B_FALSE); 1908 } 1909 break; 1910 case DDI_INTR_TYPE_FIXED: 1911 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1912 (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL); 1913 if (rc != DDI_SUCCESS) { 1914 i40e_log(i40e, "Add interrupt handler (legacy) failed:" 1915 " return %d", rc); 1916 return (B_FALSE); 1917 } 1918 break; 1919 default: 1920 /* Cast to pacify lint */ 1921 panic("i40e_intr_type %p contains an unknown type: %d", 1922 (void *)i40e, i40e->i40e_intr_type); 1923 } 1924 1925 return (B_TRUE); 1926 } 1927 1928 /* 1929 * Perform periodic checks. Longer term, we should be thinking about additional 1930 * things here: 1931 * 1932 * o Stall Detection 1933 * o Temperature sensor detection 1934 * o Device resetting 1935 * o Statistics updating to avoid wraparound 1936 */ 1937 static void 1938 i40e_timer(void *arg) 1939 { 1940 i40e_t *i40e = arg; 1941 1942 mutex_enter(&i40e->i40e_general_lock); 1943 i40e_link_check(i40e); 1944 mutex_exit(&i40e->i40e_general_lock); 1945 } 1946 1947 /* 1948 * Get the hardware state, and scribble away anything that needs scribbling. 1949 */ 1950 static void 1951 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw) 1952 { 1953 int rc; 1954 1955 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 1956 1957 (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL); 1958 i40e_link_check(i40e); 1959 1960 /* 1961 * Try and determine our PHY. Note that we may have to retry to and 1962 * delay to detect fiber correctly. 1963 */ 1964 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy, 1965 NULL); 1966 if (rc == I40E_ERR_UNKNOWN_PHY) { 1967 i40e_msec_delay(200); 1968 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, 1969 &i40e->i40e_phy, NULL); 1970 } 1971 1972 if (rc != I40E_SUCCESS) { 1973 if (rc == I40E_ERR_UNKNOWN_PHY) { 1974 i40e_error(i40e, "encountered unknown PHY type, " 1975 "not attaching."); 1976 } else { 1977 i40e_error(i40e, "error getting physical capabilities: " 1978 "%d, %d", rc, hw->aq.asq_last_status); 1979 } 1980 } 1981 1982 rc = i40e_update_link_info(hw); 1983 if (rc != I40E_SUCCESS) { 1984 i40e_error(i40e, "failed to update link information: %d", rc); 1985 } 1986 1987 /* 1988 * In general, we don't want to mask off (as in stop from being a cause) 1989 * any of the interrupts that the phy might be able to generate. 1990 */ 1991 rc = i40e_aq_set_phy_int_mask(hw, 0, NULL); 1992 if (rc != I40E_SUCCESS) { 1993 i40e_error(i40e, "failed to update phy link mask: %d", rc); 1994 } 1995 } 1996 1997 /* 1998 * Go through and re-initialize any existing filters that we may have set up for 1999 * this device. Note that we would only expect them to exist if hardware had 2000 * already been initialized and we had just reset it. While we're not 2001 * implementing this yet, we're keeping this around for when we add reset 2002 * capabilities, so this isn't forgotten. 2003 */ 2004 /* ARGSUSED */ 2005 static void 2006 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw) 2007 { 2008 } 2009 2010 /* 2011 * Set the properties which have common values across all the VSIs. 2012 * Consult the "Add VSI" command section (7.4.9.5.5.1) for a 2013 * complete description of these properties. 2014 */ 2015 static void 2016 i40e_set_shared_vsi_props(i40e_t *i40e, 2017 struct i40e_aqc_vsi_properties_data *info, uint_t vsi_idx) 2018 { 2019 uint_t tc_queues; 2020 uint16_t vsi_qp_base; 2021 2022 /* 2023 * It's important that we use bitwise-OR here; callers to this 2024 * function might enable other sections before calling this 2025 * function. 2026 */ 2027 info->valid_sections |= LE_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID | 2028 I40E_AQ_VSI_PROP_VLAN_VALID); 2029 2030 /* 2031 * Calculate the starting QP index for this VSI. This base is 2032 * relative to the PF queue space; so a value of 0 for PF#1 2033 * represents the absolute index PFLAN_QALLOC_FIRSTQ for PF#1. 2034 */ 2035 vsi_qp_base = vsi_idx * i40e->i40e_num_trqpairs_per_vsi; 2036 info->mapping_flags = LE_16(I40E_AQ_VSI_QUE_MAP_CONTIG); 2037 info->queue_mapping[0] = 2038 LE_16((vsi_qp_base << I40E_AQ_VSI_QUEUE_SHIFT) & 2039 I40E_AQ_VSI_QUEUE_MASK); 2040 2041 /* 2042 * tc_queues determines the size of the traffic class, where 2043 * the size is 2^^tc_queues to a maximum of 64 for the X710 2044 * and 128 for the X722. 2045 * 2046 * Some examples: 2047 * i40e_num_trqpairs_per_vsi == 1 => tc_queues = 0, 2^^0 = 1. 2048 * i40e_num_trqpairs_per_vsi == 7 => tc_queues = 3, 2^^3 = 8. 2049 * i40e_num_trqpairs_per_vsi == 8 => tc_queues = 3, 2^^3 = 8. 2050 * i40e_num_trqpairs_per_vsi == 9 => tc_queues = 4, 2^^4 = 16. 2051 * i40e_num_trqpairs_per_vsi == 17 => tc_queues = 5, 2^^5 = 32. 2052 * i40e_num_trqpairs_per_vsi == 64 => tc_queues = 6, 2^^6 = 64. 2053 */ 2054 tc_queues = ddi_fls(i40e->i40e_num_trqpairs_per_vsi - 1); 2055 2056 /* 2057 * The TC queue mapping is in relation to the VSI queue space. 2058 * Since we are only using one traffic class (TC0) we always 2059 * start at queue offset 0. 2060 */ 2061 info->tc_mapping[0] = 2062 LE_16(((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) & 2063 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | 2064 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) & 2065 I40E_AQ_VSI_TC_QUE_NUMBER_MASK)); 2066 2067 /* 2068 * I40E_AQ_VSI_PVLAN_MODE_ALL ("VLAN driver insertion mode") 2069 * 2070 * Allow tagged and untagged packets to be sent to this 2071 * VSI from the host. 2072 * 2073 * I40E_AQ_VSI_PVLAN_EMOD_NOTHING ("VLAN and UP expose mode") 2074 * 2075 * Leave the tag on the frame and place no VLAN 2076 * information in the descriptor. We want this mode 2077 * because our MAC layer will take care of the VLAN tag, 2078 * if there is one. 2079 */ 2080 info->port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 2081 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 2082 } 2083 2084 /* 2085 * Delete the VSI at this index, if one exists. We assume there is no 2086 * action we can take if this command fails but to log the failure. 2087 */ 2088 static void 2089 i40e_delete_vsi(i40e_t *i40e, uint_t idx) 2090 { 2091 i40e_hw_t *hw = &i40e->i40e_hw_space; 2092 uint16_t seid = i40e->i40e_vsis[idx].iv_seid; 2093 2094 if (seid != 0) { 2095 int rc; 2096 2097 rc = i40e_aq_delete_element(hw, seid, NULL); 2098 2099 if (rc != I40E_SUCCESS) { 2100 i40e_error(i40e, "Failed to delete VSI %d: %d", 2101 rc, hw->aq.asq_last_status); 2102 } 2103 2104 i40e->i40e_vsis[idx].iv_seid = 0; 2105 } 2106 } 2107 2108 /* 2109 * Add a new VSI. 2110 */ 2111 static boolean_t 2112 i40e_add_vsi(i40e_t *i40e, i40e_hw_t *hw, uint_t idx) 2113 { 2114 struct i40e_vsi_context ctx; 2115 i40e_rx_group_t *rxg; 2116 int rc; 2117 2118 /* 2119 * The default VSI is created by the controller. This function 2120 * creates new, non-defualt VSIs only. 2121 */ 2122 ASSERT3U(idx, !=, 0); 2123 2124 bzero(&ctx, sizeof (struct i40e_vsi_context)); 2125 ctx.uplink_seid = i40e->i40e_veb_seid; 2126 ctx.pf_num = hw->pf_id; 2127 ctx.flags = I40E_AQ_VSI_TYPE_PF; 2128 ctx.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL; 2129 i40e_set_shared_vsi_props(i40e, &ctx.info, idx); 2130 2131 rc = i40e_aq_add_vsi(hw, &ctx, NULL); 2132 if (rc != I40E_SUCCESS) { 2133 i40e_error(i40e, "i40e_aq_add_vsi() failed %d: %d", rc, 2134 hw->aq.asq_last_status); 2135 return (B_FALSE); 2136 } 2137 2138 rxg = &i40e->i40e_rx_groups[idx]; 2139 rxg->irg_vsi_seid = ctx.seid; 2140 i40e->i40e_vsis[idx].iv_number = ctx.vsi_number; 2141 i40e->i40e_vsis[idx].iv_seid = ctx.seid; 2142 i40e->i40e_vsis[idx].iv_stats_id = LE_16(ctx.info.stat_counter_idx); 2143 2144 if (i40e_stat_vsi_init(i40e, idx) == B_FALSE) 2145 return (B_FALSE); 2146 2147 return (B_TRUE); 2148 } 2149 2150 /* 2151 * Configure the hardware for the Default Virtual Station Interface (VSI). 2152 */ 2153 static boolean_t 2154 i40e_config_def_vsi(i40e_t *i40e, i40e_hw_t *hw) 2155 { 2156 struct i40e_vsi_context ctx; 2157 i40e_rx_group_t *def_rxg; 2158 int err; 2159 struct i40e_aqc_remove_macvlan_element_data filt; 2160 2161 bzero(&ctx, sizeof (struct i40e_vsi_context)); 2162 ctx.seid = I40E_DEF_VSI_SEID(i40e); 2163 ctx.pf_num = hw->pf_id; 2164 err = i40e_aq_get_vsi_params(hw, &ctx, NULL); 2165 if (err != I40E_SUCCESS) { 2166 i40e_error(i40e, "get VSI params failed with %d", err); 2167 return (B_FALSE); 2168 } 2169 2170 ctx.info.valid_sections = 0; 2171 i40e->i40e_vsis[0].iv_number = ctx.vsi_number; 2172 i40e->i40e_vsis[0].iv_stats_id = LE_16(ctx.info.stat_counter_idx); 2173 if (i40e_stat_vsi_init(i40e, 0) == B_FALSE) 2174 return (B_FALSE); 2175 2176 i40e_set_shared_vsi_props(i40e, &ctx.info, I40E_DEF_VSI_IDX); 2177 2178 err = i40e_aq_update_vsi_params(hw, &ctx, NULL); 2179 if (err != I40E_SUCCESS) { 2180 i40e_error(i40e, "Update VSI params failed with %d", err); 2181 return (B_FALSE); 2182 } 2183 2184 def_rxg = &i40e->i40e_rx_groups[0]; 2185 def_rxg->irg_vsi_seid = I40E_DEF_VSI_SEID(i40e); 2186 2187 /* 2188 * We have seen three different behaviors in regards to the 2189 * Default VSI and its implicit L2 MAC+VLAN filter. 2190 * 2191 * 1. It has an implicit filter for the factory MAC address 2192 * and this filter counts against 'ifr_nmacfilt_used'. 2193 * 2194 * 2. It has an implicit filter for the factory MAC address 2195 * and this filter DOES NOT count against 'ifr_nmacfilt_used'. 2196 * 2197 * 3. It DOES NOT have an implicit filter. 2198 * 2199 * All three of these cases are accounted for below. If we 2200 * fail to remove the L2 filter (ENOENT) then we assume there 2201 * wasn't one. Otherwise, if we successfully remove the 2202 * filter, we make sure to update the 'ifr_nmacfilt_used' 2203 * count accordingly. 2204 * 2205 * We remove this filter to prevent duplicate delivery of 2206 * packets destined for the primary MAC address as DLS will 2207 * create the same filter on a non-default VSI for the primary 2208 * MAC client. 2209 * 2210 * If you change the following code please test it across as 2211 * many X700 series controllers and firmware revisions as you 2212 * can. 2213 */ 2214 bzero(&filt, sizeof (filt)); 2215 bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL); 2216 filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH; 2217 filt.vlan_tag = 0; 2218 2219 ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1); 2220 i40e_log(i40e, "Num L2 filters: %u", 2221 i40e->i40e_resources.ifr_nmacfilt_used); 2222 2223 err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1, 2224 NULL); 2225 if (err == I40E_SUCCESS) { 2226 i40e_log(i40e, 2227 "Removed L2 filter from Default VSI with SEID %u", 2228 I40E_DEF_VSI_SEID(i40e)); 2229 } else if (hw->aq.asq_last_status == ENOENT) { 2230 i40e_log(i40e, 2231 "No L2 filter for Default VSI with SEID %u", 2232 I40E_DEF_VSI_SEID(i40e)); 2233 } else { 2234 i40e_error(i40e, "Failed to remove L2 filter from" 2235 " Default VSI with SEID %u: %d (%d)", 2236 I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status); 2237 2238 return (B_FALSE); 2239 } 2240 2241 /* 2242 * As mentioned above, the controller created an implicit L2 2243 * filter for the primary MAC. We want to remove both the 2244 * filter and decrement the filter count. However, not all 2245 * controllers count this implicit filter against the total 2246 * MAC filter count. So here we are making sure it is either 2247 * one or zero. If it is one, then we know it is for the 2248 * implicit filter and we should decrement since we just 2249 * removed the filter above. If it is zero then we know the 2250 * controller that does not count the implicit filter, and it 2251 * was enough to just remove it; we leave the count alone. 2252 * But if it is neither, then we have never seen a controller 2253 * like this before and we should fail to attach. 2254 * 2255 * It is unfortunate that this code must exist but the 2256 * behavior of this implicit L2 filter and its corresponding 2257 * count were dicovered through empirical testing. The 2258 * programming manuals hint at this filter but do not 2259 * explicitly call out the exact behavior. 2260 */ 2261 if (i40e->i40e_resources.ifr_nmacfilt_used == 1) { 2262 i40e->i40e_resources.ifr_nmacfilt_used--; 2263 } else { 2264 if (i40e->i40e_resources.ifr_nmacfilt_used != 0) { 2265 i40e_error(i40e, "Unexpected L2 filter count: %u" 2266 " (expected 0)", 2267 i40e->i40e_resources.ifr_nmacfilt_used); 2268 return (B_FALSE); 2269 } 2270 } 2271 2272 return (B_TRUE); 2273 } 2274 2275 static boolean_t 2276 i40e_config_rss_key_x722(i40e_t *i40e, i40e_hw_t *hw) 2277 { 2278 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) { 2279 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1]; 2280 struct i40e_aqc_get_set_rss_key_data key; 2281 const char *u8seed; 2282 enum i40e_status_code status; 2283 uint16_t vsi_number = i40e->i40e_vsis[i].iv_number; 2284 2285 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed)); 2286 u8seed = (char *)seed; 2287 2288 CTASSERT(sizeof (key) >= (sizeof (key.standard_rss_key) + 2289 sizeof (key.extended_hash_key))); 2290 2291 bcopy(u8seed, key.standard_rss_key, 2292 sizeof (key.standard_rss_key)); 2293 bcopy(&u8seed[sizeof (key.standard_rss_key)], 2294 key.extended_hash_key, sizeof (key.extended_hash_key)); 2295 2296 ASSERT3U(vsi_number, !=, 0); 2297 status = i40e_aq_set_rss_key(hw, vsi_number, &key); 2298 2299 if (status != I40E_SUCCESS) { 2300 i40e_error(i40e, "failed to set RSS key for VSI %u: %d", 2301 vsi_number, status); 2302 return (B_FALSE); 2303 } 2304 } 2305 2306 return (B_TRUE); 2307 } 2308 2309 /* 2310 * Configure the RSS key. For the X710 controller family, this is set on a 2311 * per-PF basis via registers. For the X722, this is done on a per-VSI basis 2312 * through the admin queue. 2313 */ 2314 static boolean_t 2315 i40e_config_rss_key(i40e_t *i40e, i40e_hw_t *hw) 2316 { 2317 if (i40e_is_x722(i40e)) { 2318 if (!i40e_config_rss_key_x722(i40e, hw)) 2319 return (B_FALSE); 2320 } else { 2321 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1]; 2322 2323 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed)); 2324 for (uint_t i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) 2325 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]); 2326 } 2327 2328 return (B_TRUE); 2329 } 2330 2331 /* 2332 * Populate the LUT. The size of each entry in the LUT depends on the controller 2333 * family, with the X722 using a known 7-bit width. On the X710 controller, this 2334 * is programmed through its control registers where as on the X722 this is 2335 * configured through the admin queue. Also of note, the X722 allows the LUT to 2336 * be set on a per-PF or VSI basis. At this time we use the PF setting. If we 2337 * decide to use the per-VSI LUT in the future, then we will need to modify the 2338 * i40e_add_vsi() function to set the RSS LUT bits in the queueing section. 2339 * 2340 * We populate the LUT in a round robin fashion with the rx queue indices from 0 2341 * to i40e_num_trqpairs_per_vsi - 1. 2342 */ 2343 static boolean_t 2344 i40e_config_rss_hlut(i40e_t *i40e, i40e_hw_t *hw) 2345 { 2346 uint32_t *hlut; 2347 uint8_t lut_mask; 2348 uint_t i; 2349 boolean_t ret = B_FALSE; 2350 2351 /* 2352 * We always configure the PF with a table size of 512 bytes in 2353 * i40e_chip_start(). 2354 */ 2355 hlut = kmem_alloc(I40E_HLUT_TABLE_SIZE, KM_NOSLEEP); 2356 if (hlut == NULL) { 2357 i40e_error(i40e, "i40e_config_rss() buffer allocation failed"); 2358 return (B_FALSE); 2359 } 2360 2361 /* 2362 * The width of the X722 is apparently defined to be 7 bits, regardless 2363 * of the capability. 2364 */ 2365 if (i40e_is_x722(i40e)) { 2366 lut_mask = (1 << 7) - 1; 2367 } else { 2368 lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1; 2369 } 2370 2371 for (i = 0; i < I40E_HLUT_TABLE_SIZE; i++) { 2372 ((uint8_t *)hlut)[i] = 2373 (i % i40e->i40e_num_trqpairs_per_vsi) & lut_mask; 2374 } 2375 2376 if (i40e_is_x722(i40e)) { 2377 enum i40e_status_code status; 2378 2379 status = i40e_aq_set_rss_lut(hw, 0, B_TRUE, (uint8_t *)hlut, 2380 I40E_HLUT_TABLE_SIZE); 2381 2382 if (status != I40E_SUCCESS) { 2383 i40e_error(i40e, "failed to set RSS LUT %d: %d", 2384 status, hw->aq.asq_last_status); 2385 goto out; 2386 } 2387 } else { 2388 for (i = 0; i < I40E_HLUT_TABLE_SIZE >> 2; i++) { 2389 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]); 2390 } 2391 } 2392 ret = B_TRUE; 2393 out: 2394 kmem_free(hlut, I40E_HLUT_TABLE_SIZE); 2395 return (ret); 2396 } 2397 2398 /* 2399 * Set up RSS. 2400 * 1. Seed the hash key. 2401 * 2. Enable PCTYPEs for the hash filter. 2402 * 3. Populate the LUT. 2403 */ 2404 static boolean_t 2405 i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw) 2406 { 2407 uint64_t hena; 2408 2409 /* 2410 * 1. Seed the hash key 2411 */ 2412 if (!i40e_config_rss_key(i40e, hw)) 2413 return (B_FALSE); 2414 2415 /* 2416 * 2. Configure PCTYPES 2417 */ 2418 hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | 2419 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | 2420 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | 2421 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | 2422 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) | 2423 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | 2424 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | 2425 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | 2426 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | 2427 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) | 2428 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD); 2429 2430 /* 2431 * Add additional types supported by the X722 controller. 2432 */ 2433 if (i40e_is_x722(i40e)) { 2434 hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | 2435 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | 2436 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | 2437 (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | 2438 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) | 2439 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK); 2440 } 2441 2442 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena); 2443 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32)); 2444 2445 /* 2446 * 3. Populate LUT 2447 */ 2448 return (i40e_config_rss_hlut(i40e, hw)); 2449 } 2450 2451 /* 2452 * Wrapper to kick the chipset on. 2453 */ 2454 static boolean_t 2455 i40e_chip_start(i40e_t *i40e) 2456 { 2457 i40e_hw_t *hw = &i40e->i40e_hw_space; 2458 struct i40e_filter_control_settings filter; 2459 int rc; 2460 uint8_t err; 2461 2462 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 2463 (hw->aq.fw_maj_ver < 4)) { 2464 i40e_msec_delay(75); 2465 if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) != 2466 I40E_SUCCESS) { 2467 i40e_error(i40e, "failed to restart link: admin queue " 2468 "error: %d", hw->aq.asq_last_status); 2469 return (B_FALSE); 2470 } 2471 } 2472 2473 /* Determine hardware state */ 2474 i40e_get_hw_state(i40e, hw); 2475 2476 /* For now, we always disable Ethernet Flow Control. */ 2477 hw->fc.requested_mode = I40E_FC_NONE; 2478 rc = i40e_set_fc(hw, &err, B_TRUE); 2479 if (rc != I40E_SUCCESS) { 2480 i40e_error(i40e, "Setting flow control failed, returned %d" 2481 " with error: 0x%x", rc, err); 2482 return (B_FALSE); 2483 } 2484 2485 /* Initialize mac addresses. */ 2486 i40e_init_macaddrs(i40e, hw); 2487 2488 /* 2489 * Set up the filter control. If the hash lut size is changed from 2490 * I40E_HASH_LUT_SIZE_512 then I40E_HLUT_TABLE_SIZE and 2491 * i40e_config_rss_hlut() will need to be updated. 2492 */ 2493 bzero(&filter, sizeof (filter)); 2494 filter.enable_ethtype = TRUE; 2495 filter.enable_macvlan = TRUE; 2496 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; 2497 2498 rc = i40e_set_filter_control(hw, &filter); 2499 if (rc != I40E_SUCCESS) { 2500 i40e_error(i40e, "i40e_set_filter_control() returned %d", rc); 2501 return (B_FALSE); 2502 } 2503 2504 i40e_intr_chip_init(i40e); 2505 2506 rc = i40e_get_mac_seid(i40e); 2507 if (rc == -1) { 2508 i40e_error(i40e, "failed to obtain MAC Uplink SEID"); 2509 return (B_FALSE); 2510 } 2511 i40e->i40e_mac_seid = (uint16_t)rc; 2512 2513 /* 2514 * Create a VEB in order to support multiple VSIs. Each VSI 2515 * functions as a MAC group. This call sets the PF's MAC as 2516 * the uplink port and the PF's default VSI as the default 2517 * downlink port. 2518 */ 2519 rc = i40e_aq_add_veb(hw, i40e->i40e_mac_seid, I40E_DEF_VSI_SEID(i40e), 2520 0x1, B_TRUE, &i40e->i40e_veb_seid, B_FALSE, NULL); 2521 if (rc != I40E_SUCCESS) { 2522 i40e_error(i40e, "i40e_aq_add_veb() failed %d: %d", rc, 2523 hw->aq.asq_last_status); 2524 return (B_FALSE); 2525 } 2526 2527 if (!i40e_config_def_vsi(i40e, hw)) 2528 return (B_FALSE); 2529 2530 for (uint_t i = 1; i < i40e->i40e_num_rx_groups; i++) { 2531 if (!i40e_add_vsi(i40e, hw, i)) 2532 return (B_FALSE); 2533 } 2534 2535 if (!i40e_config_rss(i40e, hw)) 2536 return (B_FALSE); 2537 2538 i40e_flush(hw); 2539 2540 return (B_TRUE); 2541 } 2542 2543 /* 2544 * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information. 2545 */ 2546 static void 2547 i40e_shutdown_rx_rings(i40e_t *i40e) 2548 { 2549 int i; 2550 uint32_t reg; 2551 2552 i40e_hw_t *hw = &i40e->i40e_hw_space; 2553 2554 /* 2555 * Step 1. The interrupt linked list (see i40e_intr.c for more 2556 * information) should have already been cleared before calling this 2557 * function. 2558 */ 2559 #ifdef DEBUG 2560 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2561 for (i = 1; i < i40e->i40e_intr_count; i++) { 2562 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2563 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2564 } 2565 } else { 2566 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2567 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2568 } 2569 2570 #endif /* DEBUG */ 2571 2572 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2573 /* 2574 * Step 1. Request the queue by clearing QENA_REQ. It may not be 2575 * set due to unwinding from failures and a partially enabled 2576 * ring set. 2577 */ 2578 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2579 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK)) 2580 continue; 2581 VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) == 2582 I40E_QRX_ENA_QENA_REQ_MASK); 2583 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 2584 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2585 } 2586 2587 /* 2588 * Step 2. Wait for the disable to take, by having QENA_STAT in the FPM 2589 * be cleared. Note that we could still receive data in the queue during 2590 * this time. We don't actually wait for this now and instead defer this 2591 * to i40e_shutdown_rings_wait(), after we've interleaved disabling the 2592 * TX queues as well. 2593 */ 2594 } 2595 2596 static void 2597 i40e_shutdown_tx_rings(i40e_t *i40e) 2598 { 2599 int i; 2600 uint32_t reg; 2601 2602 i40e_hw_t *hw = &i40e->i40e_hw_space; 2603 2604 /* 2605 * Step 1. The interrupt linked list should already have been cleared. 2606 */ 2607 #ifdef DEBUG 2608 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2609 for (i = 1; i < i40e->i40e_intr_count; i++) { 2610 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2611 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2612 } 2613 } else { 2614 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2615 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2616 2617 } 2618 #endif /* DEBUG */ 2619 2620 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2621 /* 2622 * Step 2. Set the SET_QDIS flag for every queue. 2623 */ 2624 i40e_pre_tx_queue_cfg(hw, i, B_FALSE); 2625 } 2626 2627 /* 2628 * Step 3. Wait at least 400 usec (can be done once for all queues). 2629 */ 2630 drv_usecwait(500); 2631 2632 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2633 /* 2634 * Step 4. Clear the QENA_REQ flag which tells hardware to 2635 * quiesce. If QENA_REQ is not already set then that means that 2636 * we likely already tried to disable this queue. 2637 */ 2638 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2639 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK)) 2640 continue; 2641 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 2642 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2643 } 2644 2645 /* 2646 * Step 5. Wait for all drains to finish. This will be done by the 2647 * hardware removing the QENA_STAT flag from the queue. Rather than 2648 * waiting here, we interleave it with all the others in 2649 * i40e_shutdown_rings_wait(). 2650 */ 2651 } 2652 2653 /* 2654 * Wait for all the rings to be shut down. e.g. Steps 2 and 5 from the above 2655 * functions. 2656 */ 2657 static boolean_t 2658 i40e_shutdown_rings_wait(i40e_t *i40e) 2659 { 2660 int i, try; 2661 i40e_hw_t *hw = &i40e->i40e_hw_space; 2662 2663 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2664 uint32_t reg; 2665 2666 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2667 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2668 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) 2669 break; 2670 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2671 } 2672 2673 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) { 2674 i40e_error(i40e, "timed out disabling rx queue %d", 2675 i); 2676 return (B_FALSE); 2677 } 2678 2679 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2680 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2681 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2682 break; 2683 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2684 } 2685 2686 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) { 2687 i40e_error(i40e, "timed out disabling tx queue %d", 2688 i); 2689 return (B_FALSE); 2690 } 2691 } 2692 2693 return (B_TRUE); 2694 } 2695 2696 static boolean_t 2697 i40e_shutdown_rings(i40e_t *i40e) 2698 { 2699 i40e_shutdown_rx_rings(i40e); 2700 i40e_shutdown_tx_rings(i40e); 2701 return (i40e_shutdown_rings_wait(i40e)); 2702 } 2703 2704 static void 2705 i40e_setup_rx_descs(i40e_trqpair_t *itrq) 2706 { 2707 int i; 2708 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2709 2710 for (i = 0; i < rxd->rxd_ring_size; i++) { 2711 i40e_rx_control_block_t *rcb; 2712 i40e_rx_desc_t *rdesc; 2713 2714 rcb = rxd->rxd_work_list[i]; 2715 rdesc = &rxd->rxd_desc_ring[i]; 2716 2717 rdesc->read.pkt_addr = 2718 CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address); 2719 rdesc->read.hdr_addr = 0; 2720 } 2721 } 2722 2723 static boolean_t 2724 i40e_setup_rx_hmc(i40e_trqpair_t *itrq) 2725 { 2726 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2727 i40e_t *i40e = itrq->itrq_i40e; 2728 i40e_hw_t *hw = &i40e->i40e_hw_space; 2729 2730 struct i40e_hmc_obj_rxq rctx; 2731 int err; 2732 2733 bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq)); 2734 rctx.base = rxd->rxd_desc_area.dmab_dma_address / 2735 I40E_HMC_RX_CTX_UNIT; 2736 rctx.qlen = rxd->rxd_ring_size; 2737 VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN); 2738 VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX); 2739 rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 2740 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; 2741 rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT; 2742 rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE; 2743 rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE; 2744 rctx.fc_ena = I40E_HMC_RX_FC_DISABLE; 2745 rctx.l2tsel = I40E_HMC_RX_L2TAGORDER; 2746 rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2747 rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2748 rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP; 2749 rctx.rxmax = i40e->i40e_frame_max; 2750 rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2751 rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2752 rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE; 2753 rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE; 2754 rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR; 2755 2756 /* 2757 * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2. 2758 */ 2759 rctx.prefena = I40E_HMC_RX_PREFENA; 2760 2761 err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index); 2762 if (err != I40E_SUCCESS) { 2763 i40e_error(i40e, "failed to clear rx queue %d context: %d", 2764 itrq->itrq_index, err); 2765 return (B_FALSE); 2766 } 2767 2768 err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx); 2769 if (err != I40E_SUCCESS) { 2770 i40e_error(i40e, "failed to set rx queue %d context: %d", 2771 itrq->itrq_index, err); 2772 return (B_FALSE); 2773 } 2774 2775 return (B_TRUE); 2776 } 2777 2778 /* 2779 * Take care of setting up the descriptor rings and actually programming the 2780 * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the 2781 * rx rings. 2782 */ 2783 static boolean_t 2784 i40e_setup_rx_rings(i40e_t *i40e) 2785 { 2786 int i; 2787 i40e_hw_t *hw = &i40e->i40e_hw_space; 2788 2789 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2790 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2791 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2792 uint32_t reg; 2793 2794 /* 2795 * Step 1. Program all receive ring descriptors. 2796 */ 2797 i40e_setup_rx_descs(itrq); 2798 2799 /* 2800 * Step 2. Program the queue's FPM/HMC context. 2801 */ 2802 if (i40e_setup_rx_hmc(itrq) == B_FALSE) 2803 return (B_FALSE); 2804 2805 /* 2806 * Step 3. Clear the queue's tail pointer and set it to the end 2807 * of the space. 2808 */ 2809 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), 0); 2810 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), rxd->rxd_ring_size - 1); 2811 2812 /* 2813 * Step 4. Enable the queue via the QENA_REQ. 2814 */ 2815 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2816 VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK | 2817 I40E_QRX_ENA_QENA_STAT_MASK)); 2818 reg |= I40E_QRX_ENA_QENA_REQ_MASK; 2819 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2820 } 2821 2822 /* 2823 * Note, we wait for every queue to be enabled before we start checking. 2824 * This will hopefully cause most queues to be enabled at this point. 2825 */ 2826 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2827 uint32_t j, reg; 2828 2829 /* 2830 * Step 5. Verify that QENA_STAT has been set. It's promised 2831 * that this should occur within about 10 us, but like other 2832 * systems, we give the card a bit more time. 2833 */ 2834 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2835 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2836 2837 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 2838 break; 2839 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2840 } 2841 2842 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 2843 i40e_error(i40e, "failed to enable rx queue %d, timed " 2844 "out.", i); 2845 return (B_FALSE); 2846 } 2847 } 2848 2849 return (B_TRUE); 2850 } 2851 2852 static boolean_t 2853 i40e_setup_tx_hmc(i40e_trqpair_t *itrq) 2854 { 2855 i40e_t *i40e = itrq->itrq_i40e; 2856 i40e_hw_t *hw = &i40e->i40e_hw_space; 2857 2858 struct i40e_hmc_obj_txq tctx; 2859 struct i40e_vsi_context context; 2860 int err; 2861 2862 bzero(&tctx, sizeof (struct i40e_hmc_obj_txq)); 2863 tctx.new_context = I40E_HMC_TX_NEW_CONTEXT; 2864 tctx.base = itrq->itrq_desc_area.dmab_dma_address / 2865 I40E_HMC_TX_CTX_UNIT; 2866 tctx.fc_ena = I40E_HMC_TX_FC_DISABLE; 2867 tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE; 2868 tctx.fd_ena = I40E_HMC_TX_FD_DISABLE; 2869 tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE; 2870 tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE; 2871 tctx.qlen = itrq->itrq_tx_ring_size; 2872 tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2873 tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE; 2874 tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2875 tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address + 2876 sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size; 2877 2878 /* 2879 * This field isn't actually documented, like crc, but it suggests that 2880 * it should be zeroed. We leave both of these here because of that for 2881 * now. We should check with Intel on why these are here even. 2882 */ 2883 tctx.crc = 0; 2884 tctx.rdylist_act = 0; 2885 2886 /* 2887 * We're supposed to assign the rdylist field with the value of the 2888 * traffic class index for the first device. We query the VSI parameters 2889 * again to get what the handle is. Note that every queue is always 2890 * assigned to traffic class zero, because we don't actually use them. 2891 */ 2892 bzero(&context, sizeof (struct i40e_vsi_context)); 2893 context.seid = I40E_DEF_VSI_SEID(i40e); 2894 context.pf_num = hw->pf_id; 2895 err = i40e_aq_get_vsi_params(hw, &context, NULL); 2896 if (err != I40E_SUCCESS) { 2897 i40e_error(i40e, "get VSI params failed with %d", err); 2898 return (B_FALSE); 2899 } 2900 tctx.rdylist = LE_16(context.info.qs_handle[0]); 2901 2902 err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index); 2903 if (err != I40E_SUCCESS) { 2904 i40e_error(i40e, "failed to clear tx queue %d context: %d", 2905 itrq->itrq_index, err); 2906 return (B_FALSE); 2907 } 2908 2909 err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx); 2910 if (err != I40E_SUCCESS) { 2911 i40e_error(i40e, "failed to set tx queue %d context: %d", 2912 itrq->itrq_index, err); 2913 return (B_FALSE); 2914 } 2915 2916 return (B_TRUE); 2917 } 2918 2919 /* 2920 * Take care of setting up the descriptor rings and actually programming the 2921 * device. See 8.4.3.1.1 for what we need to do here. 2922 */ 2923 static boolean_t 2924 i40e_setup_tx_rings(i40e_t *i40e) 2925 { 2926 int i; 2927 i40e_hw_t *hw = &i40e->i40e_hw_space; 2928 2929 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2930 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2931 uint32_t reg; 2932 2933 /* 2934 * Step 1. Clear the queue disable flag and verify that the 2935 * index is set correctly. 2936 */ 2937 i40e_pre_tx_queue_cfg(hw, i, B_TRUE); 2938 2939 /* 2940 * Step 2. Prepare the queue's FPM/HMC context. 2941 */ 2942 if (i40e_setup_tx_hmc(itrq) == B_FALSE) 2943 return (B_FALSE); 2944 2945 /* 2946 * Step 3. Verify that it's clear that this PF owns this queue. 2947 */ 2948 reg = I40E_QTX_CTL_PF_QUEUE; 2949 reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2950 I40E_QTX_CTL_PF_INDX_MASK; 2951 I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg); 2952 i40e_flush(hw); 2953 2954 /* 2955 * Step 4. Set the QENA_REQ flag. 2956 */ 2957 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2958 VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK | 2959 I40E_QTX_ENA_QENA_STAT_MASK)); 2960 reg |= I40E_QTX_ENA_QENA_REQ_MASK; 2961 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2962 } 2963 2964 /* 2965 * Note, we wait for every queue to be enabled before we start checking. 2966 * This will hopefully cause most queues to be enabled at this point. 2967 */ 2968 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2969 uint32_t j, reg; 2970 2971 /* 2972 * Step 5. Verify that QENA_STAT has been set. It's promised 2973 * that this should occur within about 10 us, but like BSD, 2974 * we'll try for up to 100 ms for this queue. 2975 */ 2976 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2977 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2978 2979 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 2980 break; 2981 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2982 } 2983 2984 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 2985 i40e_error(i40e, "failed to enable tx queue %d, timed " 2986 "out", i); 2987 return (B_FALSE); 2988 } 2989 } 2990 2991 return (B_TRUE); 2992 } 2993 2994 void 2995 i40e_stop(i40e_t *i40e, boolean_t free_allocations) 2996 { 2997 uint_t i; 2998 i40e_hw_t *hw = &i40e->i40e_hw_space; 2999 3000 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 3001 3002 /* 3003 * Shutdown and drain the tx and rx pipeline. We do this using the 3004 * following steps. 3005 * 3006 * 1) Shutdown interrupts to all the queues (trying to keep the admin 3007 * queue alive). 3008 * 3009 * 2) Remove all of the interrupt tx and rx causes by setting the 3010 * interrupt linked lists to zero. 3011 * 3012 * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should 3013 * wait for all the queues to be disabled, once we reach that point 3014 * it should be safe to free associated data. 3015 * 3016 * 4) Wait 50ms after all that is done. This ensures that the rings are 3017 * ready for programming again and we don't have to think about this 3018 * in other parts of the driver. 3019 * 3020 * 5) Disable remaining chip interrupts, (admin queue, etc.) 3021 * 3022 * 6) Verify that FM is happy with all the register accesses we 3023 * performed. 3024 */ 3025 i40e_intr_io_disable_all(i40e); 3026 i40e_intr_io_clear_cause(i40e); 3027 3028 if (i40e_shutdown_rings(i40e) == B_FALSE) { 3029 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3030 } 3031 3032 delay(50 * drv_usectohz(1000)); 3033 3034 /* 3035 * We don't delete the default VSI because it replaces the VEB 3036 * after VEB deletion (see the "Delete Element" section). 3037 * Furthermore, since the default VSI is provided by the 3038 * firmware, we never attempt to delete it. 3039 */ 3040 for (i = 1; i < i40e->i40e_num_rx_groups; i++) { 3041 i40e_delete_vsi(i40e, i); 3042 } 3043 3044 if (i40e->i40e_veb_seid != 0) { 3045 int rc = i40e_aq_delete_element(hw, i40e->i40e_veb_seid, NULL); 3046 3047 if (rc != I40E_SUCCESS) { 3048 i40e_error(i40e, "Failed to delete VEB %d: %d", rc, 3049 hw->aq.asq_last_status); 3050 } 3051 3052 i40e->i40e_veb_seid = 0; 3053 } 3054 3055 i40e_intr_chip_fini(i40e); 3056 3057 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3058 mutex_enter(&i40e->i40e_trqpairs[i].itrq_rx_lock); 3059 mutex_enter(&i40e->i40e_trqpairs[i].itrq_tx_lock); 3060 } 3061 3062 /* 3063 * We should consider refactoring this to be part of the ring start / 3064 * stop routines at some point. 3065 */ 3066 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3067 i40e_stats_trqpair_fini(&i40e->i40e_trqpairs[i]); 3068 } 3069 3070 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 3071 DDI_FM_OK) { 3072 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3073 } 3074 3075 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3076 i40e_tx_cleanup_ring(&i40e->i40e_trqpairs[i]); 3077 } 3078 3079 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3080 mutex_exit(&i40e->i40e_trqpairs[i].itrq_rx_lock); 3081 mutex_exit(&i40e->i40e_trqpairs[i].itrq_tx_lock); 3082 } 3083 3084 for (i = 0; i < i40e->i40e_num_rx_groups; i++) { 3085 i40e_stat_vsi_fini(i40e, i); 3086 } 3087 3088 i40e->i40e_link_speed = 0; 3089 i40e->i40e_link_duplex = 0; 3090 i40e_link_state_set(i40e, LINK_STATE_UNKNOWN); 3091 3092 if (free_allocations) { 3093 i40e_free_ring_mem(i40e, B_FALSE); 3094 } 3095 } 3096 3097 boolean_t 3098 i40e_start(i40e_t *i40e, boolean_t alloc) 3099 { 3100 i40e_hw_t *hw = &i40e->i40e_hw_space; 3101 boolean_t rc = B_TRUE; 3102 int i, err; 3103 3104 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 3105 3106 if (alloc) { 3107 if (i40e_alloc_ring_mem(i40e) == B_FALSE) { 3108 i40e_error(i40e, 3109 "Failed to allocate ring memory"); 3110 return (B_FALSE); 3111 } 3112 } 3113 3114 /* 3115 * This should get refactored to be part of ring start and stop at 3116 * some point, along with most of the logic here. 3117 */ 3118 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 3119 if (i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i]) == 3120 B_FALSE) { 3121 int j; 3122 3123 for (j = 0; j < i; j++) { 3124 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[j]; 3125 i40e_stats_trqpair_fini(itrq); 3126 } 3127 return (B_FALSE); 3128 } 3129 } 3130 3131 if (!i40e_chip_start(i40e)) { 3132 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 3133 rc = B_FALSE; 3134 goto done; 3135 } 3136 3137 if (i40e_setup_rx_rings(i40e) == B_FALSE) { 3138 rc = B_FALSE; 3139 goto done; 3140 } 3141 3142 if (i40e_setup_tx_rings(i40e) == B_FALSE) { 3143 rc = B_FALSE; 3144 goto done; 3145 } 3146 3147 /* 3148 * Enable broadcast traffic; however, do not enable multicast traffic. 3149 * That's handle exclusively through MAC's mc_multicst routines. 3150 */ 3151 err = i40e_aq_set_vsi_broadcast(hw, I40E_DEF_VSI_SEID(i40e), B_TRUE, 3152 NULL); 3153 if (err != I40E_SUCCESS) { 3154 i40e_error(i40e, "failed to set default VSI: %d", err); 3155 rc = B_FALSE; 3156 goto done; 3157 } 3158 3159 err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0, NULL); 3160 if (err != I40E_SUCCESS) { 3161 i40e_error(i40e, "failed to set MAC config: %d", err); 3162 rc = B_FALSE; 3163 goto done; 3164 } 3165 3166 /* 3167 * Finally, make sure that we're happy from an FM perspective. 3168 */ 3169 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) != 3170 DDI_FM_OK) { 3171 rc = B_FALSE; 3172 goto done; 3173 } 3174 3175 /* Clear state bits prior to final interrupt enabling. */ 3176 atomic_and_32(&i40e->i40e_state, 3177 ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP)); 3178 3179 i40e_intr_io_enable_all(i40e); 3180 3181 done: 3182 if (rc == B_FALSE) { 3183 i40e_stop(i40e, B_FALSE); 3184 if (alloc == B_TRUE) { 3185 i40e_free_ring_mem(i40e, B_TRUE); 3186 } 3187 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3188 } 3189 3190 return (rc); 3191 } 3192 3193 /* 3194 * We may have loaned up descriptors to the stack. As such, if we still have 3195 * them outstanding, then we will not continue with detach. 3196 */ 3197 static boolean_t 3198 i40e_drain_rx(i40e_t *i40e) 3199 { 3200 mutex_enter(&i40e->i40e_rx_pending_lock); 3201 while (i40e->i40e_rx_pending > 0) { 3202 if (cv_reltimedwait(&i40e->i40e_rx_pending_cv, 3203 &i40e->i40e_rx_pending_lock, 3204 drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) { 3205 mutex_exit(&i40e->i40e_rx_pending_lock); 3206 return (B_FALSE); 3207 } 3208 } 3209 mutex_exit(&i40e->i40e_rx_pending_lock); 3210 3211 return (B_TRUE); 3212 } 3213 3214 /* 3215 * DDI UFM Callbacks 3216 */ 3217 static int 3218 i40e_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 3219 ddi_ufm_image_t *img) 3220 { 3221 if (imgno != 0) 3222 return (EINVAL); 3223 3224 ddi_ufm_image_set_desc(img, "Firmware"); 3225 ddi_ufm_image_set_nslots(img, 1); 3226 3227 return (0); 3228 } 3229 3230 static int 3231 i40e_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno, 3232 uint_t slotno, ddi_ufm_slot_t *slot) 3233 { 3234 i40e_t *i40e = (i40e_t *)arg; 3235 char *fw_ver = NULL, *fw_bld = NULL, *api_ver = NULL; 3236 nvlist_t *misc = NULL; 3237 uint_t flags = DDI_PROP_DONTPASS; 3238 int err; 3239 3240 if (imgno != 0 || slotno != 0 || 3241 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags, 3242 "firmware-version", &fw_ver) != DDI_PROP_SUCCESS || 3243 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags, 3244 "firmware-build", &fw_bld) != DDI_PROP_SUCCESS || 3245 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags, 3246 "api-version", &api_ver) != DDI_PROP_SUCCESS) { 3247 err = EINVAL; 3248 goto err; 3249 } 3250 3251 ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE); 3252 ddi_ufm_slot_set_version(slot, fw_ver); 3253 3254 (void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP); 3255 if ((err = nvlist_add_string(misc, "firmware-build", fw_bld)) != 0 || 3256 (err = nvlist_add_string(misc, "api-version", api_ver)) != 0) { 3257 goto err; 3258 } 3259 ddi_ufm_slot_set_misc(slot, misc); 3260 3261 ddi_prop_free(fw_ver); 3262 ddi_prop_free(fw_bld); 3263 ddi_prop_free(api_ver); 3264 3265 return (0); 3266 err: 3267 nvlist_free(misc); 3268 if (fw_ver != NULL) 3269 ddi_prop_free(fw_ver); 3270 if (fw_bld != NULL) 3271 ddi_prop_free(fw_bld); 3272 if (api_ver != NULL) 3273 ddi_prop_free(api_ver); 3274 3275 return (err); 3276 } 3277 3278 static int 3279 i40e_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps) 3280 { 3281 *caps = DDI_UFM_CAP_REPORT; 3282 3283 return (0); 3284 } 3285 3286 static ddi_ufm_ops_t i40e_ufm_ops = { 3287 NULL, 3288 i40e_ufm_fill_image, 3289 i40e_ufm_fill_slot, 3290 i40e_ufm_getcaps 3291 }; 3292 3293 static int 3294 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 3295 { 3296 i40e_t *i40e; 3297 struct i40e_osdep *osdep; 3298 i40e_hw_t *hw; 3299 int instance; 3300 3301 if (cmd != DDI_ATTACH) 3302 return (DDI_FAILURE); 3303 3304 instance = ddi_get_instance(devinfo); 3305 i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP); 3306 3307 i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP); 3308 i40e->i40e_instance = instance; 3309 i40e->i40e_dip = devinfo; 3310 3311 hw = &i40e->i40e_hw_space; 3312 osdep = &i40e->i40e_osdep_space; 3313 hw->back = osdep; 3314 osdep->ios_i40e = i40e; 3315 3316 ddi_set_driver_private(devinfo, i40e); 3317 3318 i40e_fm_init(i40e); 3319 i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT; 3320 3321 if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) { 3322 i40e_error(i40e, "Failed to map PCI configurations."); 3323 goto attach_fail; 3324 } 3325 i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG; 3326 3327 i40e_identify_hardware(i40e); 3328 3329 if (!i40e_regs_map(i40e)) { 3330 i40e_error(i40e, "Failed to map device registers."); 3331 goto attach_fail; 3332 } 3333 i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP; 3334 3335 i40e_init_properties(i40e); 3336 i40e->i40e_attach_progress |= I40E_ATTACH_PROPS; 3337 3338 if (!i40e_common_code_init(i40e, hw)) 3339 goto attach_fail; 3340 i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE; 3341 3342 /* 3343 * When we participate in IRM, we should make sure that we register 3344 * ourselves with it before callbacks. 3345 */ 3346 if (!i40e_alloc_intrs(i40e, devinfo)) { 3347 i40e_error(i40e, "Failed to allocate interrupts."); 3348 goto attach_fail; 3349 } 3350 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR; 3351 3352 if (!i40e_alloc_trqpairs(i40e)) { 3353 i40e_error(i40e, 3354 "Failed to allocate receive & transmit rings."); 3355 goto attach_fail; 3356 } 3357 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS; 3358 3359 if (!i40e_map_intrs_to_vectors(i40e)) { 3360 i40e_error(i40e, "Failed to map interrupts to vectors."); 3361 goto attach_fail; 3362 } 3363 3364 if (!i40e_add_intr_handlers(i40e)) { 3365 i40e_error(i40e, "Failed to add the interrupt handlers."); 3366 goto attach_fail; 3367 } 3368 i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR; 3369 3370 if (!i40e_final_init(i40e)) { 3371 i40e_error(i40e, "Final initialization failed."); 3372 goto attach_fail; 3373 } 3374 i40e->i40e_attach_progress |= I40E_ATTACH_INIT; 3375 3376 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 3377 DDI_FM_OK) { 3378 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 3379 goto attach_fail; 3380 } 3381 3382 if (!i40e_stats_init(i40e)) { 3383 i40e_error(i40e, "Stats initialization failed."); 3384 goto attach_fail; 3385 } 3386 i40e->i40e_attach_progress |= I40E_ATTACH_STATS; 3387 3388 if (!i40e_register_mac(i40e)) { 3389 i40e_error(i40e, "Failed to register to MAC/GLDv3"); 3390 goto attach_fail; 3391 } 3392 i40e->i40e_attach_progress |= I40E_ATTACH_MAC; 3393 3394 i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e, 3395 I40E_CYCLIC_PERIOD, DDI_IPL_0); 3396 if (i40e->i40e_periodic_id == 0) { 3397 i40e_error(i40e, "Failed to add the link-check timer"); 3398 goto attach_fail; 3399 } 3400 i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER; 3401 3402 if (!i40e_enable_interrupts(i40e)) { 3403 i40e_error(i40e, "Failed to enable DDI interrupts"); 3404 goto attach_fail; 3405 } 3406 i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR; 3407 3408 if (ddi_ufm_init(i40e->i40e_dip, DDI_UFM_CURRENT_VERSION, &i40e_ufm_ops, 3409 &i40e->i40e_ufmh, i40e) != 0) { 3410 i40e_error(i40e, "failed to initialize UFM subsystem"); 3411 goto attach_fail; 3412 } 3413 ddi_ufm_update(i40e->i40e_ufmh); 3414 i40e->i40e_attach_progress |= I40E_ATTACH_UFM_INIT; 3415 3416 atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED); 3417 3418 mutex_enter(&i40e_glock); 3419 list_insert_tail(&i40e_glist, i40e); 3420 mutex_exit(&i40e_glock); 3421 3422 return (DDI_SUCCESS); 3423 3424 attach_fail: 3425 i40e_unconfigure(devinfo, i40e); 3426 return (DDI_FAILURE); 3427 } 3428 3429 static int 3430 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 3431 { 3432 i40e_t *i40e; 3433 3434 if (cmd != DDI_DETACH) 3435 return (DDI_FAILURE); 3436 3437 i40e = (i40e_t *)ddi_get_driver_private(devinfo); 3438 if (i40e == NULL) { 3439 i40e_log(NULL, "i40e_detach() called with no i40e pointer!"); 3440 return (DDI_FAILURE); 3441 } 3442 3443 if (i40e_drain_rx(i40e) == B_FALSE) { 3444 i40e_log(i40e, "timed out draining DMA resources, %d buffers " 3445 "remain", i40e->i40e_rx_pending); 3446 return (DDI_FAILURE); 3447 } 3448 3449 mutex_enter(&i40e_glock); 3450 list_remove(&i40e_glist, i40e); 3451 mutex_exit(&i40e_glock); 3452 3453 i40e_unconfigure(devinfo, i40e); 3454 3455 return (DDI_SUCCESS); 3456 } 3457 3458 static struct cb_ops i40e_cb_ops = { 3459 nulldev, /* cb_open */ 3460 nulldev, /* cb_close */ 3461 nodev, /* cb_strategy */ 3462 nodev, /* cb_print */ 3463 nodev, /* cb_dump */ 3464 nodev, /* cb_read */ 3465 nodev, /* cb_write */ 3466 nodev, /* cb_ioctl */ 3467 nodev, /* cb_devmap */ 3468 nodev, /* cb_mmap */ 3469 nodev, /* cb_segmap */ 3470 nochpoll, /* cb_chpoll */ 3471 ddi_prop_op, /* cb_prop_op */ 3472 NULL, /* cb_stream */ 3473 D_MP | D_HOTPLUG, /* cb_flag */ 3474 CB_REV, /* cb_rev */ 3475 nodev, /* cb_aread */ 3476 nodev /* cb_awrite */ 3477 }; 3478 3479 static struct dev_ops i40e_dev_ops = { 3480 DEVO_REV, /* devo_rev */ 3481 0, /* devo_refcnt */ 3482 NULL, /* devo_getinfo */ 3483 nulldev, /* devo_identify */ 3484 nulldev, /* devo_probe */ 3485 i40e_attach, /* devo_attach */ 3486 i40e_detach, /* devo_detach */ 3487 nodev, /* devo_reset */ 3488 &i40e_cb_ops, /* devo_cb_ops */ 3489 NULL, /* devo_bus_ops */ 3490 ddi_power, /* devo_power */ 3491 ddi_quiesce_not_supported /* devo_quiesce */ 3492 }; 3493 3494 static struct modldrv i40e_modldrv = { 3495 &mod_driverops, 3496 i40e_ident, 3497 &i40e_dev_ops 3498 }; 3499 3500 static struct modlinkage i40e_modlinkage = { 3501 MODREV_1, 3502 &i40e_modldrv, 3503 NULL 3504 }; 3505 3506 /* 3507 * Module Initialization Functions. 3508 */ 3509 int 3510 _init(void) 3511 { 3512 int status; 3513 3514 list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink)); 3515 list_create(&i40e_dlist, sizeof (i40e_device_t), 3516 offsetof(i40e_device_t, id_link)); 3517 mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL); 3518 mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME); 3519 3520 status = mod_install(&i40e_modlinkage); 3521 if (status != DDI_SUCCESS) { 3522 mac_fini_ops(&i40e_dev_ops); 3523 mutex_destroy(&i40e_glock); 3524 list_destroy(&i40e_dlist); 3525 list_destroy(&i40e_glist); 3526 } 3527 3528 return (status); 3529 } 3530 3531 int 3532 _info(struct modinfo *modinfop) 3533 { 3534 return (mod_info(&i40e_modlinkage, modinfop)); 3535 } 3536 3537 int 3538 _fini(void) 3539 { 3540 int status; 3541 3542 status = mod_remove(&i40e_modlinkage); 3543 if (status == DDI_SUCCESS) { 3544 mac_fini_ops(&i40e_dev_ops); 3545 mutex_destroy(&i40e_glock); 3546 list_destroy(&i40e_dlist); 3547 list_destroy(&i40e_glist); 3548 } 3549 3550 return (status); 3551 }