1 /* 2 * This file and its contents are supplied under the terms of the 3 * Common Development and Distribution License ("CDDL"), version 1.0. 4 * You may only use this file in accordance with the terms of version 5 * 1.0 of the CDDL. 6 * 7 * A full copy of the text of the CDDL should have accompanied this 8 * source. A copy of the CDDL is also available via the Internet at 9 * http://www.illumos.org/license/CDDL. 10 */ 11 12 /* 13 * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved. 14 * Copyright 2016 Joyent, Inc. 15 */ 16 17 /* 18 * i40e - Intel 10/40 Gb Ethernet driver 19 * 20 * The i40e driver is the main software device driver for the Intel 40 Gb family 21 * of devices. Note that these devices come in many flavors with both 40 GbE 22 * ports and 10 GbE ports. This device is the successor to the 82599 family of 23 * devices (ixgbe). 24 * 25 * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE 26 * devices defined in the XL710 controller (previously known as Fortville) are a 27 * rather different beast and have a small switch embedded inside of them. In 28 * addition, the way that most of the programming is done has been overhauled. 29 * As opposed to just using PCIe memory mapped registers, it also has an 30 * administrative queue which is used to communicate with firmware running on 31 * the chip. 32 * 33 * Each physical function in the hardware shows up as a device that this driver 34 * will bind to. The hardware splits many resources evenly across all of the 35 * physical functions present on the device, while other resources are instead 36 * shared across the entire card and its up to the device driver to 37 * intelligently partition them. 38 * 39 * ------------ 40 * Organization 41 * ------------ 42 * 43 * This driver is made up of several files which have their own theory 44 * statements spread across them. We'll touch on the high level purpose of each 45 * file here, and then we'll get into more discussion on how the device is 46 * generally modelled with respect to the interfaces in illumos. 47 * 48 * i40e_gld.c: This file contains all of the bindings to MAC and the networking 49 * stack. 50 * 51 * i40e_intr.c: This file contains all of the interrupt service routines and 52 * contains logic to enable and disable interrupts on the hardware. 53 * It also contains the logic to map hardware resources such as the 54 * rings to and from interrupts and controls their ability to fire. 55 * 56 * There is a big theory statement on interrupts present there. 57 * 58 * i40e_main.c: The file that you're currently in. It interfaces with the 59 * traditional OS DDI interfaces and is in charge of configuring 60 * the device. 61 * 62 * i40e_osdep.[ch]: These files contain interfaces and definitions needed to 63 * work with Intel's common code for the device. 64 * 65 * i40e_stats.c: This file contains the general work and logic around our 66 * kstats. A theory statement on their organization and use of the 67 * hardware exists there. 68 * 69 * i40e_sw.h: This header file contains all of the primary structure definitions 70 * and constants that are used across the entire driver. 71 * 72 * i40e_transceiver.c: This file contains all of the logic for sending and 73 * receiving data. It contains all of the ring and DMA 74 * allocation logic, as well as, the actual interfaces to 75 * send and receive data. 76 * 77 * A big theory statement on ring management, descriptors, 78 * and how it ties into the OS is present there. 79 * 80 * -------------- 81 * General Design 82 * -------------- 83 * 84 * Before we go too far into the general way we've laid out data structures and 85 * the like, it's worth taking some time to explain how the hardware is 86 * organized. This organization informs a lot of how we do things at this time 87 * in the driver. 88 * 89 * Each physical device consists of a number of one or more ports, which are 90 * considered physical functions in the PCI sense and thus each get enumerated 91 * by the system, resulting in an instance being created and attached to. While 92 * there are many resources that are unique to each physical function eg. 93 * instance of the device, there are many that are shared across all of them. 94 * Several resources have an amount reserved for each Virtual Station Interface 95 * (VSI) and then a static pool of resources, available for all functions on the 96 * card. 97 * 98 * The most important resource in hardware are its transmit and receive queue 99 * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3 100 * parlance. There are a set number of these on each device; however, they are 101 * statically partitioned among all of the different physical functions. 102 * 103 * 'Fortville' (the code name for this device family) is basically a switch. To 104 * map MAC addresses and other things to queues, we end up having to create 105 * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct 106 * traffic to a queue. A VSI owns a collection of queues and has a series of 107 * forwarding rules that point to it. One way to think of this is to treat it 108 * like MAC does a VNIC. When MAC refers to a group, a collection of rings and 109 * classification resources, that is a VSI in i40e. 110 * 111 * The sets of VSIs is shared across the entire device, though there may be some 112 * amount that are reserved to each PF. Because the GLDv3 does not let us change 113 * the number of groups dynamically, we instead statically divide this amount 114 * evenly between all the functions that exist. In addition, we have the same 115 * problem with the mac address forwarding rules. There are a static number that 116 * exist shared across all the functions. 117 * 118 * To handle both of these resources, what we end up doing is going through and 119 * determining which functions belong to the same device. Nominally one might do 120 * this by having a nexus driver; however, a prime requirement for a nexus 121 * driver is identifying the various children and activating them. While it is 122 * possible to get this information from NVRAM, we would end up duplicating a 123 * lot of the PCI enumeration logic. Really, at the end of the day, the device 124 * doesn't give us the traditional identification properties we want from a 125 * nexus driver. 126 * 127 * Instead, we rely on some properties that are guaranteed to be unique. While 128 * it might be tempting to leverage the PBA or serial number of the device from 129 * NVRAM, there is nothing that says that two devices can't be mis-programmed to 130 * have the same values in NVRAM. Instead, we uniquely identify a group of 131 * functions based on their parent in the /devices tree, their PCI bus and PCI 132 * function identifiers. Using either on their own may not be sufficient. 133 * 134 * For each unique PCI device that we encounter, we'll create a i40e_device_t. 135 * From there, because we don't have a good way to tell the GLDv3 about sharing 136 * resources between everything, we'll end up just dividing the resources 137 * evenly between all of the functions. Longer term, if we don't have to declare 138 * to the GLDv3 that these resources are shared, then we'll maintain a pool and 139 * hae each PF allocate from the pool in the device, thus if only two of four 140 * ports are being used, for example, then all of the resources can still be 141 * used. 142 * 143 * ------------------------------------------- 144 * Transmit and Receive Queue Pair Allocations 145 * ------------------------------------------- 146 * 147 * NVRAM ends up assigning each PF its own share of the transmit and receive LAN 148 * queue pairs, we have no way of modifying it, only observing it. From there, 149 * it's up to us to map these queues to VSIs and VFs. Since we don't support any 150 * VFs at this time, we only focus on assignments to VSIs. 151 * 152 * At the moment, we used a static mapping of transmit/receive queue pairs to a 153 * given VSI (eg. rings to a group). Though in the fullness of time, we want to 154 * make this something which is fully dynamic and take advantage of documented, 155 * but not yet available functionality for adding filters based on VXLAN and 156 * other encapsulation technologies. 157 * 158 * ------------------------------------- 159 * Broadcast, Multicast, and Promiscuous 160 * ------------------------------------- 161 * 162 * As part of the GLDv3, we need to make sure that we can handle receiving 163 * broadcast and multicast traffic. As well as enabling promiscuous mode when 164 * requested. GLDv3 requires that all broadcast and multicast traffic be 165 * retrieved by the default group, eg. the first one. This is the same thing as 166 * the default VSI. 167 * 168 * To receieve broadcast traffic, we enable it through the admin queue, rather 169 * than use one of our filters for it. For multicast traffic, we reserve a 170 * certain number of the hash filters and assign them to a given PF. When we 171 * exceed those, we then switch to using promicuous mode for multicast traffic. 172 * 173 * More specifically, once we exceed the number of filters (indicated because 174 * the i40e_t`i40e_resources.ifr_nmcastfilt == 175 * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle 176 * promiscuous mode. If promiscuous mode is toggled then we keep track of the 177 * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count. 178 * That will stay enabled until that count reaches zero indicating that we have 179 * only added multicast addresses that we have a corresponding entry for. 180 * 181 * Because MAC itself wants to toggle promiscuous mode, which includes both 182 * unicast and multicast traffic, we go through and keep track of that 183 * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on 184 * member. 185 * 186 * -------------- 187 * VSI Management 188 * -------------- 189 * 190 * At this time, we currently only support a single MAC group, and thus a single 191 * VSI. This VSI is considered the default VSI and should be the only one that 192 * exists after a reset. Currently it is stored as the member 193 * i40e_t`i40e_vsi_id. While this works for the moment and for an initial 194 * driver, it's not sufficient for the longer-term path of the driver. Instead, 195 * we'll want to actually have a unique i40e_vsi_t structure which is used 196 * everywhere. Note that this means that every place that uses the 197 * i40e_t`i40e_vsi_id will need to be refactored. 198 * 199 * ---------------- 200 * Structure Layout 201 * ---------------- 202 * 203 * The following images relates the core data structures together. The primary 204 * structure in the system is the i40e_t. It itself contains multiple rings, 205 * i40e_trqpair_t's which contain the various transmit and receive data. The 206 * receive data is stored outside of the i40e_trqpair_t and instead in the 207 * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps 208 * track of per-physical device state. Finally, for every active descriptor, 209 * there is a corresponding control block, which is where the 210 * i40e_rx_control_block_t and the i40e_tx_control_block_t come from. 211 * 212 * +-----------------------+ +-----------------------+ 213 * | Global i40e_t list | | Global Device list | 214 * | | +--| | 215 * | i40e_glist | | | i40e_dlist | 216 * +-----------------------+ | +-----------------------+ 217 * | v 218 * | +------------------------+ +-----------------------+ 219 * | | Device-wide Structure |----->| Device-wide Structure |--> ... 220 * | | i40e_device_t | | i40e_device_t | 221 * | | | +-----------------------+ 222 * | | dev_info_t * ------+--> Parent in devices tree. 223 * | | uint_t ------+--> PCI bus number 224 * | | uint_t ------+--> PCI device number 225 * | | uint_t ------+--> Number of functions 226 * | | i40e_switch_rsrcs_t ---+--> Captured total switch resources 227 * | | list_t ------+-------------+ 228 * | +------------------------+ | 229 * | ^ | 230 * | +--------+ | 231 * | | v 232 * | +---------------------------+ | +-------------------+ 233 * +->| GLDv3 Device, per PF |-----|-->| GLDv3 Device (PF) |--> ... 234 * | i40e_t | | | i40e_t | 235 * | **Primary Structure** | | +-------------------+ 236 * | | | 237 * | i40e_device_t * --+-----+ 238 * | i40e_state_t --+---> Device State 239 * | i40e_hw_t --+---> Intel common code structure 240 * | mac_handle_t --+---> GLDv3 handle to MAC 241 * | ddi_periodic_t --+---> Link activity timer 242 * | int (vsi_id) --+---> VSI ID, main identifier 243 * | i40e_func_rsrc_t --+---> Available hardware resources 244 * | i40e_switch_rsrc_t * --+---> Switch resource snapshot 245 * | i40e_sdu --+---> Current MTU 246 * | i40e_frame_max --+---> Current HW frame size 247 * | i40e_uaddr_t * --+---> Array of assigned unicast MACs 248 * | i40e_maddr_t * --+---> Array of assigned multicast MACs 249 * | i40e_mcast_promisccount --+---> Active multicast state 250 * | i40e_promisc_on --+---> Current promiscuous mode state 251 * | int --+---> Number of transmit/receive pairs 252 * | kstat_t * --+---> PF kstats 253 * | kstat_t * --+---> VSI kstats 254 * | i40e_pf_stats_t --+---> PF kstat backing data 255 * | i40e_vsi_stats_t --+---> VSI kstat backing data 256 * | i40e_trqpair_t * --+---------+ 257 * +---------------------------+ | 258 * | 259 * v 260 * +-------------------------------+ +-----------------------------+ 261 * | Transmit/Receive Queue Pair |-------| Transmit/Receive Queue Pair |->... 262 * | i40e_trqpair_t | | i40e_trqpair_t | 263 * + Ring Data Structure | +-----------------------------+ 264 * | | 265 * | mac_ring_handle_t +--> MAC RX ring handle 266 * | mac_ring_handle_t +--> MAC TX ring handle 267 * | i40e_rxq_stat_t --+--> RX Queue stats 268 * | i40e_txq_stat_t --+--> TX Queue stats 269 * | uint32_t (tx ring size) +--> TX Ring Size 270 * | uint32_t (tx free list size) +--> TX Free List Size 271 * | i40e_dma_buffer_t --------+--> TX Descriptor ring DMA 272 * | i40e_tx_desc_t * --------+--> TX descriptor ring 273 * | volatile unt32_t * +--> TX Write back head 274 * | uint32_t -------+--> TX ring head 275 * | uint32_t -------+--> TX ring tail 276 * | uint32_t -------+--> Num TX desc free 277 * | i40e_tx_control_block_t * --+--> TX control block array ---+ 278 * | i40e_tx_control_block_t ** --+--> TCB work list ----+ 279 * | i40e_tx_control_block_t ** --+--> TCB free list ---+ 280 * | uint32_t -------+--> Free TCB count | 281 * | i40e_rx_data_t * -------+--+ v 282 * +-------------------------------+ | +---------------------------+ 283 * | | Per-TX Frame Metadata | 284 * | | i40e_tx_control_block_t | 285 * +--------------------+ | | 286 * | mblk to transmit <--+--- mblk_t * | 287 * | type of transmit <--+--- i40e_tx_type_t | 288 * | TX DMA handle <--+--- ddi_dma_handle_t | 289 * v TX DMA buffer <--+--- i40e_dma_buffer_t | 290 * +------------------------------+ +---------------------------+ 291 * | Core Receive Data | 292 * | i40e_rx_data_t | 293 * | | 294 * | i40e_dma_buffer_t --+--> RX descriptor DMA Data 295 * | i40e_rx_desc_t --+--> RX descriptor ring 296 * | uint32_t --+--> Next free desc. 297 * | i40e_rx_control_block_t * --+--> RX Control Block Array ---+ 298 * | i40e_rx_control_block_t ** --+--> RCB work list ---+ 299 * | i40e_rx_control_block_t ** --+--> RCB free list ---+ 300 * +------------------------------+ | 301 * ^ | 302 * | +---------------------------+ | 303 * | | Per-RX Frame Metadata |<---------------+ 304 * | | i40e_rx_control_block_t | 305 * | | | 306 * | | mblk_t * ----+--> Received mblk_t data 307 * | | uint32_t ----+--> Reference count 308 * | | i40e_dma_buffer_t ----+--> Receive data DMA info 309 * | | frtn_t ----+--> mblk free function info 310 * +-----+-- i40e_rx_data_t * | 311 * +---------------------------+ 312 * 313 * ------------- 314 * Lock Ordering 315 * ------------- 316 * 317 * In order to ensure that we don't deadlock, the following represents the 318 * lock order being used. When grabbing locks, follow the following order. Lower 319 * numbers are more important. Thus, the i40e_glock which is number 0, must be 320 * taken before any other locks in the driver. On the other hand, the 321 * i40e_t`i40e_stat_lock, has the highest number because it's the least 322 * important lock. Note, that just because one lock is higher than another does 323 * not mean that all intermediary locks are required. 324 * 325 * 0) i40e_glock 326 * 1) i40e_t`i40e_general_lock 327 * 328 * 2) i40e_trqpair_t`itrq_rx_lock 329 * 3) i40e_trqpair_t`itrq_tx_lock 330 * 4) i40e_t`i40e_rx_pending_lock 331 * 5) i40e_trqpair_t`itrq_tcb_lock 332 * 333 * 6) i40e_t`i40e_stat_lock 334 * 335 * Rules and expectations: 336 * 337 * 1) A thread holding locks belong to one PF should not hold locks belonging to 338 * a second. If for some reason this becomes necessary, locks should be grabbed 339 * based on the list order in the i40e_device_t, which implies that the 340 * i40e_glock is held. 341 * 342 * 2) When grabbing locks between multiple transmit and receive queues, the 343 * locks for the lowest number transmit/receive queue should be grabbed first. 344 * 345 * 3) When grabbing both the transmit and receive lock for a given queue, always 346 * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock. 347 * 348 * 4) The following pairs of locks are not expected to be held at the same time: 349 * 350 * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock 351 * 352 * ----------- 353 * Future Work 354 * ----------- 355 * 356 * At the moment the i40e_t driver is rather bare bones, allowing us to start 357 * getting data flowing and folks using it while we develop additional features. 358 * While bugs have been filed to cover this future work, the following gives an 359 * overview of expected work: 360 * 361 * o TSO support 362 * o RSS / multiple ring support 363 * o Multiple group support 364 * o DMA binding and breaking up the locking in ring recycling. 365 * o Enhanced detection of device errors 366 * o Participation in IRM 367 * o FMA device reset 368 * o Stall detection, temperature error detection, etc. 369 * o More dynamic resource pools 370 */ 371 372 #include "i40e_sw.h" 373 374 static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.0"; 375 376 /* 377 * The i40e_glock primarily protects the lists below and the i40e_device_t 378 * structures. 379 */ 380 static kmutex_t i40e_glock; 381 static list_t i40e_glist; 382 static list_t i40e_dlist; 383 384 /* 385 * Access attributes for register mapping. 386 */ 387 static ddi_device_acc_attr_t i40e_regs_acc_attr = { 388 DDI_DEVICE_ATTR_V1, 389 DDI_STRUCTURE_LE_ACC, 390 DDI_STRICTORDER_ACC, 391 DDI_FLAGERR_ACC 392 }; 393 394 /* 395 * Logging function for this driver. 396 */ 397 static void 398 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt, 399 va_list ap) 400 { 401 char buf[1024]; 402 403 (void) vsnprintf(buf, sizeof (buf), fmt, ap); 404 405 if (i40e == NULL) { 406 cmn_err(level, (console) ? "%s: %s" : "!%s: %s", 407 I40E_MODULE_NAME, buf); 408 } else { 409 dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s", 410 buf); 411 } 412 } 413 414 /* 415 * Because there's the stupid trailing-comma problem with the C preprocessor 416 * and variable arguments, I need to instantiate these. Pardon the redundant 417 * code. 418 */ 419 /*PRINTFLIKE2*/ 420 void 421 i40e_error(i40e_t *i40e, const char *fmt, ...) 422 { 423 va_list ap; 424 425 va_start(ap, fmt); 426 i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap); 427 va_end(ap); 428 } 429 430 /*PRINTFLIKE2*/ 431 void 432 i40e_log(i40e_t *i40e, const char *fmt, ...) 433 { 434 va_list ap; 435 436 va_start(ap, fmt); 437 i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap); 438 va_end(ap); 439 } 440 441 /*PRINTFLIKE2*/ 442 void 443 i40e_notice(i40e_t *i40e, const char *fmt, ...) 444 { 445 va_list ap; 446 447 va_start(ap, fmt); 448 i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap); 449 va_end(ap); 450 } 451 452 static void 453 i40e_device_rele(i40e_t *i40e) 454 { 455 i40e_device_t *idp = i40e->i40e_device; 456 457 if (idp == NULL) 458 return; 459 460 mutex_enter(&i40e_glock); 461 VERIFY(idp->id_nreg > 0); 462 list_remove(&idp->id_i40e_list, i40e); 463 idp->id_nreg--; 464 if (idp->id_nreg == 0) { 465 list_remove(&i40e_dlist, idp); 466 list_destroy(&idp->id_i40e_list); 467 kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) * 468 idp->id_rsrcs_alloc); 469 kmem_free(idp, sizeof (i40e_device_t)); 470 } 471 i40e->i40e_device = NULL; 472 mutex_exit(&i40e_glock); 473 } 474 475 static i40e_device_t * 476 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device) 477 { 478 i40e_device_t *idp; 479 mutex_enter(&i40e_glock); 480 for (idp = list_head(&i40e_dlist); idp != NULL; 481 idp = list_next(&i40e_dlist, idp)) { 482 if (idp->id_parent == parent && idp->id_pci_bus == bus && 483 idp->id_pci_device == device) { 484 break; 485 } 486 } 487 488 if (idp != NULL) { 489 VERIFY(idp->id_nreg < idp->id_nfuncs); 490 idp->id_nreg++; 491 } else { 492 i40e_hw_t *hw = &i40e->i40e_hw_space; 493 ASSERT(hw->num_ports > 0); 494 ASSERT(hw->num_partitions > 0); 495 496 /* 497 * The Intel common code doesn't exactly keep the number of PCI 498 * functions. But it calculates it during discovery of 499 * partitions and ports. So what we do is undo the calculation 500 * that it does originally, as functions are evenly spread 501 * across ports in the rare case of partitions. 502 */ 503 idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP); 504 idp->id_parent = parent; 505 idp->id_pci_bus = bus; 506 idp->id_pci_device = device; 507 idp->id_nfuncs = hw->num_ports * hw->num_partitions; 508 idp->id_nreg = 1; 509 idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc; 510 idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual; 511 idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) * 512 idp->id_rsrcs_alloc, KM_SLEEP); 513 bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs, 514 sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc); 515 list_create(&idp->id_i40e_list, sizeof (i40e_t), 516 offsetof(i40e_t, i40e_dlink)); 517 518 list_insert_tail(&i40e_dlist, idp); 519 } 520 521 list_insert_tail(&idp->id_i40e_list, i40e); 522 mutex_exit(&i40e_glock); 523 524 return (idp); 525 } 526 527 static void 528 i40e_link_state_set(i40e_t *i40e, link_state_t state) 529 { 530 if (i40e->i40e_link_state == state) 531 return; 532 533 i40e->i40e_link_state = state; 534 mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state); 535 } 536 537 /* 538 * This is a basic link check routine. Mostly we're using this just to see 539 * if we can get any accurate information about the state of the link being 540 * up or down, as well as updating the link state, speed, etc. information. 541 */ 542 void 543 i40e_link_check(i40e_t *i40e) 544 { 545 i40e_hw_t *hw = &i40e->i40e_hw_space; 546 boolean_t ls; 547 int ret; 548 549 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 550 551 hw->phy.get_link_info = B_TRUE; 552 if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) { 553 i40e->i40e_s_link_status_errs++; 554 i40e->i40e_s_link_status_lasterr = ret; 555 return; 556 } 557 558 /* 559 * Firmware abstracts all of the mac and phy information for us, so we 560 * can use i40e_get_link_status to determine the current state. 561 */ 562 if (ls == B_TRUE) { 563 enum i40e_aq_link_speed speed; 564 565 speed = i40e_get_link_speed(hw); 566 567 /* 568 * Translate from an i40e value to a value in Mbits/s. 569 */ 570 switch (speed) { 571 case I40E_LINK_SPEED_100MB: 572 i40e->i40e_link_speed = 100; 573 break; 574 case I40E_LINK_SPEED_1GB: 575 i40e->i40e_link_speed = 1000; 576 break; 577 case I40E_LINK_SPEED_10GB: 578 i40e->i40e_link_speed = 10000; 579 break; 580 case I40E_LINK_SPEED_20GB: 581 i40e->i40e_link_speed = 20000; 582 break; 583 case I40E_LINK_SPEED_40GB: 584 i40e->i40e_link_speed = 40000; 585 break; 586 default: 587 i40e->i40e_link_speed = 0; 588 break; 589 } 590 591 /* 592 * At this time, hardware does not support half-duplex 593 * operation, hence why we don't ask the hardware about our 594 * current speed. 595 */ 596 i40e->i40e_link_duplex = LINK_DUPLEX_FULL; 597 i40e_link_state_set(i40e, LINK_STATE_UP); 598 } else { 599 i40e->i40e_link_speed = 0; 600 i40e->i40e_link_duplex = 0; 601 i40e_link_state_set(i40e, LINK_STATE_DOWN); 602 } 603 } 604 605 static void 606 i40e_rem_intrs(i40e_t *i40e) 607 { 608 int i, rc; 609 610 for (i = 0; i < i40e->i40e_intr_count; i++) { 611 rc = ddi_intr_free(i40e->i40e_intr_handles[i]); 612 if (rc != DDI_SUCCESS) { 613 i40e_log(i40e, "failed to free interrupt %d: %d", 614 i, rc); 615 } 616 } 617 618 kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size); 619 i40e->i40e_intr_handles = NULL; 620 } 621 622 static void 623 i40e_rem_intr_handlers(i40e_t *i40e) 624 { 625 int i, rc; 626 627 for (i = 0; i < i40e->i40e_intr_count; i++) { 628 rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]); 629 if (rc != DDI_SUCCESS) { 630 i40e_log(i40e, "failed to remove interrupt %d: %d", 631 i, rc); 632 } 633 } 634 } 635 636 /* 637 * illumos Fault Management Architecture (FMA) support. 638 */ 639 640 int 641 i40e_check_acc_handle(ddi_acc_handle_t handle) 642 { 643 ddi_fm_error_t de; 644 645 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION); 646 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION); 647 return (de.fme_status); 648 } 649 650 int 651 i40e_check_dma_handle(ddi_dma_handle_t handle) 652 { 653 ddi_fm_error_t de; 654 655 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION); 656 return (de.fme_status); 657 } 658 659 /* 660 * Fault service error handling callback function. 661 */ 662 /* ARGSUSED */ 663 static int 664 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data) 665 { 666 pci_ereport_post(dip, err, NULL); 667 return (err->fme_status); 668 } 669 670 static void 671 i40e_fm_init(i40e_t *i40e) 672 { 673 ddi_iblock_cookie_t iblk; 674 675 i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY, 676 i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable", 677 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE | 678 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE); 679 680 if (i40e->i40e_fm_capabilities < 0) { 681 i40e->i40e_fm_capabilities = 0; 682 } else if (i40e->i40e_fm_capabilities > 0xf) { 683 i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE | 684 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE | 685 DDI_FM_ERRCB_CAPABLE; 686 } 687 688 /* 689 * Only register with IO Fault Services if we have some capability 690 */ 691 if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) { 692 i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC; 693 } else { 694 i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC; 695 } 696 697 if (i40e->i40e_fm_capabilities) { 698 ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk); 699 700 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 701 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 702 pci_ereport_setup(i40e->i40e_dip); 703 } 704 705 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) { 706 ddi_fm_handler_register(i40e->i40e_dip, 707 i40e_fm_error_cb, (void*)i40e); 708 } 709 } 710 711 if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) { 712 i40e_init_dma_attrs(i40e, B_TRUE); 713 } else { 714 i40e_init_dma_attrs(i40e, B_FALSE); 715 } 716 } 717 718 static void 719 i40e_fm_fini(i40e_t *i40e) 720 { 721 if (i40e->i40e_fm_capabilities) { 722 723 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) || 724 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 725 pci_ereport_teardown(i40e->i40e_dip); 726 727 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) 728 ddi_fm_handler_unregister(i40e->i40e_dip); 729 730 ddi_fm_fini(i40e->i40e_dip); 731 } 732 } 733 734 void 735 i40e_fm_ereport(i40e_t *i40e, char *detail) 736 { 737 uint64_t ena; 738 char buf[FM_MAX_CLASS]; 739 740 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail); 741 ena = fm_ena_generate(0, FM_ENA_FMT1); 742 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) { 743 ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP, 744 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL); 745 } 746 } 747 748 /* 749 * Here we're trying to get the ID of the default VSI. In general, when we come 750 * through and look at this shortly after attach, we expect there to only be a 751 * single element present, which is the default VSI. Importantly, each PF seems 752 * to not see any other devices, in part because of the simple switch mode that 753 * we're using. If for some reason, we see more artifact, we'll need to revisit 754 * what we're doing here. 755 */ 756 static int 757 i40e_get_vsi_id(i40e_t *i40e) 758 { 759 i40e_hw_t *hw = &i40e->i40e_hw_space; 760 struct i40e_aqc_get_switch_config_resp *sw_config; 761 uint8_t aq_buf[I40E_AQ_LARGE_BUF]; 762 uint16_t next = 0; 763 int rc; 764 765 /* LINTED: E_BAD_PTR_CAST_ALIGN */ 766 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf; 767 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next, 768 NULL); 769 if (rc != I40E_SUCCESS) { 770 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d", 771 rc, hw->aq.asq_last_status); 772 return (-1); 773 } 774 775 if (LE_16(sw_config->header.num_reported) != 1) { 776 i40e_error(i40e, "encountered multiple (%d) switching units " 777 "during attach, not proceeding", 778 LE_16(sw_config->header.num_reported)); 779 return (-1); 780 } 781 782 return (sw_config->element[0].seid); 783 } 784 785 /* 786 * We need to fill the i40e_hw_t structure with the capabilities of this PF. We 787 * must also provide the memory for it; however, we don't need to keep it around 788 * to the call to the common code. It takes it and parses it into an internal 789 * structure. 790 */ 791 static boolean_t 792 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw) 793 { 794 struct i40e_aqc_list_capabilities_element_resp *buf; 795 int rc; 796 size_t len; 797 uint16_t needed; 798 int nelems = I40E_HW_CAP_DEFAULT; 799 800 len = nelems * sizeof (*buf); 801 802 for (;;) { 803 ASSERT(len > 0); 804 buf = kmem_alloc(len, KM_SLEEP); 805 rc = i40e_aq_discover_capabilities(hw, buf, len, 806 &needed, i40e_aqc_opc_list_func_capabilities, NULL); 807 kmem_free(buf, len); 808 809 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM && 810 nelems == I40E_HW_CAP_DEFAULT) { 811 if (nelems == needed) { 812 i40e_error(i40e, "Capability discovery failed " 813 "due to byzantine common code"); 814 return (B_FALSE); 815 } 816 len = needed; 817 continue; 818 } else if (rc != I40E_SUCCESS || 819 hw->aq.asq_last_status != I40E_AQ_RC_OK) { 820 i40e_error(i40e, "Capability discovery failed: %d", rc); 821 return (B_FALSE); 822 } 823 824 break; 825 } 826 827 return (B_TRUE); 828 } 829 830 /* 831 * Obtain the switch's capabilities as seen by this PF and keep it around for 832 * our later use. 833 */ 834 static boolean_t 835 i40e_get_switch_resources(i40e_t *i40e) 836 { 837 i40e_hw_t *hw = &i40e->i40e_hw_space; 838 uint8_t cnt = 2; 839 uint8_t act; 840 size_t size; 841 i40e_switch_rsrc_t *buf; 842 843 for (;;) { 844 enum i40e_status_code ret; 845 size = cnt * sizeof (i40e_switch_rsrc_t); 846 ASSERT(size > 0); 847 if (size > UINT16_MAX) 848 return (B_FALSE); 849 buf = kmem_alloc(size, KM_SLEEP); 850 851 ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf, 852 cnt, NULL); 853 if (ret == I40E_ERR_ADMIN_QUEUE_ERROR && 854 hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) { 855 kmem_free(buf, size); 856 cnt += I40E_SWITCH_CAP_DEFAULT; 857 continue; 858 } else if (ret != I40E_SUCCESS) { 859 kmem_free(buf, size); 860 i40e_error(i40e, 861 "failed to retrieve switch statistics: %d", ret); 862 return (B_FALSE); 863 } 864 865 break; 866 } 867 868 i40e->i40e_switch_rsrc_alloc = cnt; 869 i40e->i40e_switch_rsrc_actual = act; 870 i40e->i40e_switch_rsrcs = buf; 871 872 return (B_TRUE); 873 } 874 875 static void 876 i40e_cleanup_resources(i40e_t *i40e) 877 { 878 if (i40e->i40e_uaddrs != NULL) { 879 kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) * 880 i40e->i40e_resources.ifr_nmacfilt); 881 i40e->i40e_uaddrs = NULL; 882 } 883 884 if (i40e->i40e_maddrs != NULL) { 885 kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) * 886 i40e->i40e_resources.ifr_nmcastfilt); 887 i40e->i40e_maddrs = NULL; 888 } 889 890 if (i40e->i40e_switch_rsrcs != NULL) { 891 size_t sz = sizeof (i40e_switch_rsrc_t) * 892 i40e->i40e_switch_rsrc_alloc; 893 ASSERT(sz > 0); 894 kmem_free(i40e->i40e_switch_rsrcs, sz); 895 i40e->i40e_switch_rsrcs = NULL; 896 } 897 898 if (i40e->i40e_device != NULL) 899 i40e_device_rele(i40e); 900 } 901 902 static boolean_t 903 i40e_get_available_resources(i40e_t *i40e) 904 { 905 dev_info_t *parent; 906 uint16_t bus, device, func; 907 uint_t nregs; 908 int *regs, i; 909 i40e_device_t *idp; 910 i40e_hw_t *hw = &i40e->i40e_hw_space; 911 912 parent = ddi_get_parent(i40e->i40e_dip); 913 914 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg", 915 ®s, &nregs) != DDI_PROP_SUCCESS) { 916 return (B_FALSE); 917 } 918 919 if (nregs < 1) { 920 ddi_prop_free(regs); 921 return (B_FALSE); 922 } 923 924 bus = PCI_REG_BUS_G(regs[0]); 925 device = PCI_REG_DEV_G(regs[0]); 926 func = PCI_REG_FUNC_G(regs[0]); 927 ddi_prop_free(regs); 928 929 i40e->i40e_hw_space.bus.func = func; 930 i40e->i40e_hw_space.bus.device = device; 931 932 if (i40e_get_switch_resources(i40e) == B_FALSE) { 933 return (B_FALSE); 934 } 935 936 /* 937 * To calculate the total amount of a resource we have available, we 938 * need to add how many our i40e_t thinks it has guaranteed, if any, and 939 * then we need to go through and divide the number of available on the 940 * device, which was snapshotted before anyone should have allocated 941 * anything, and use that to derive how many are available from the 942 * pool. Longer term, we may want to turn this into something that's 943 * more of a pool-like resource that everything can share (though that 944 * may require some more assistance from MAC). 945 * 946 * Though for transmit and receive queue pairs, we just have to ask 947 * firmware instead. 948 */ 949 idp = i40e_device_find(i40e, parent, bus, device); 950 i40e->i40e_device = idp; 951 i40e->i40e_resources.ifr_nvsis = 0; 952 i40e->i40e_resources.ifr_nvsis_used = 0; 953 i40e->i40e_resources.ifr_nmacfilt = 0; 954 i40e->i40e_resources.ifr_nmacfilt_used = 0; 955 i40e->i40e_resources.ifr_nmcastfilt = 0; 956 i40e->i40e_resources.ifr_nmcastfilt_used = 0; 957 958 for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) { 959 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 960 961 switch (srp->resource_type) { 962 case I40E_AQ_RESOURCE_TYPE_VSI: 963 i40e->i40e_resources.ifr_nvsis += 964 LE_16(srp->guaranteed); 965 i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used); 966 break; 967 case I40E_AQ_RESOURCE_TYPE_MACADDR: 968 i40e->i40e_resources.ifr_nmacfilt += 969 LE_16(srp->guaranteed); 970 i40e->i40e_resources.ifr_nmacfilt_used = 971 LE_16(srp->used); 972 break; 973 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 974 i40e->i40e_resources.ifr_nmcastfilt += 975 LE_16(srp->guaranteed); 976 i40e->i40e_resources.ifr_nmcastfilt_used = 977 LE_16(srp->used); 978 break; 979 default: 980 break; 981 } 982 } 983 984 for (i = 0; i < idp->id_rsrcs_act; i++) { 985 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i]; 986 switch (srp->resource_type) { 987 case I40E_AQ_RESOURCE_TYPE_VSI: 988 i40e->i40e_resources.ifr_nvsis += 989 LE_16(srp->total_unalloced) / idp->id_nfuncs; 990 break; 991 case I40E_AQ_RESOURCE_TYPE_MACADDR: 992 i40e->i40e_resources.ifr_nmacfilt += 993 LE_16(srp->total_unalloced) / idp->id_nfuncs; 994 break; 995 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH: 996 i40e->i40e_resources.ifr_nmcastfilt += 997 LE_16(srp->total_unalloced) / idp->id_nfuncs; 998 default: 999 break; 1000 } 1001 } 1002 1003 i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp; 1004 i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp; 1005 1006 i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) * 1007 i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP); 1008 i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) * 1009 i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP); 1010 1011 /* 1012 * Initialize these as multicast addresses to indicate it's invalid for 1013 * sanity purposes. Think of it like 0xdeadbeef. 1014 */ 1015 for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++) 1016 i40e->i40e_uaddrs[i].iua_mac[0] = 0x01; 1017 1018 return (B_TRUE); 1019 } 1020 1021 static boolean_t 1022 i40e_enable_interrupts(i40e_t *i40e) 1023 { 1024 int i, rc; 1025 1026 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1027 rc = ddi_intr_block_enable(i40e->i40e_intr_handles, 1028 i40e->i40e_intr_count); 1029 if (rc != DDI_SUCCESS) { 1030 i40e_error(i40e, "Interrupt block-enable failed: %d", 1031 rc); 1032 return (B_FALSE); 1033 } 1034 } else { 1035 for (i = 0; i < i40e->i40e_intr_count; i++) { 1036 rc = ddi_intr_enable(i40e->i40e_intr_handles[i]); 1037 if (rc != DDI_SUCCESS) { 1038 i40e_error(i40e, 1039 "Failed to enable interrupt %d: %d", i, rc); 1040 while (--i >= 0) { 1041 (void) ddi_intr_disable( 1042 i40e->i40e_intr_handles[i]); 1043 } 1044 return (B_FALSE); 1045 } 1046 } 1047 } 1048 1049 return (B_TRUE); 1050 } 1051 1052 static boolean_t 1053 i40e_disable_interrupts(i40e_t *i40e) 1054 { 1055 int i, rc; 1056 1057 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) { 1058 rc = ddi_intr_block_disable(i40e->i40e_intr_handles, 1059 i40e->i40e_intr_count); 1060 if (rc != DDI_SUCCESS) { 1061 i40e_error(i40e, 1062 "Interrupt block-disabled failed: %d", rc); 1063 return (B_FALSE); 1064 } 1065 } else { 1066 for (i = 0; i < i40e->i40e_intr_count; i++) { 1067 rc = ddi_intr_disable(i40e->i40e_intr_handles[i]); 1068 if (rc != DDI_SUCCESS) { 1069 i40e_error(i40e, 1070 "Failed to disable interrupt %d: %d", 1071 i, rc); 1072 return (B_FALSE); 1073 } 1074 } 1075 } 1076 1077 return (B_TRUE); 1078 } 1079 1080 /* 1081 * Free receive & transmit rings. 1082 */ 1083 static void 1084 i40e_free_trqpairs(i40e_t *i40e) 1085 { 1086 int i; 1087 i40e_trqpair_t *itrq; 1088 1089 if (i40e->i40e_trqpairs != NULL) { 1090 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 1091 itrq = &i40e->i40e_trqpairs[i]; 1092 mutex_destroy(&itrq->itrq_rx_lock); 1093 mutex_destroy(&itrq->itrq_tx_lock); 1094 mutex_destroy(&itrq->itrq_tcb_lock); 1095 1096 /* 1097 * Should have already been cleaned up by start/stop, 1098 * etc. 1099 */ 1100 ASSERT(itrq->itrq_txkstat == NULL); 1101 ASSERT(itrq->itrq_rxkstat == NULL); 1102 } 1103 1104 kmem_free(i40e->i40e_trqpairs, 1105 sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs); 1106 i40e->i40e_trqpairs = NULL; 1107 } 1108 1109 cv_destroy(&i40e->i40e_rx_pending_cv); 1110 mutex_destroy(&i40e->i40e_rx_pending_lock); 1111 mutex_destroy(&i40e->i40e_general_lock); 1112 } 1113 1114 /* 1115 * Allocate transmit and receive rings, as well as other data structures that we 1116 * need. 1117 */ 1118 static boolean_t 1119 i40e_alloc_trqpairs(i40e_t *i40e) 1120 { 1121 int i; 1122 void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri); 1123 1124 /* 1125 * Now that we have the priority for the interrupts, initialize 1126 * all relevant locks. 1127 */ 1128 mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri); 1129 mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri); 1130 cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL); 1131 1132 i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) * 1133 i40e->i40e_num_trqpairs, KM_SLEEP); 1134 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 1135 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 1136 1137 itrq->itrq_i40e = i40e; 1138 mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri); 1139 mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri); 1140 mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri); 1141 itrq->itrq_index = i; 1142 } 1143 1144 return (B_TRUE); 1145 } 1146 1147 1148 1149 /* 1150 * Unless a .conf file already overrode i40e_t structure values, they will 1151 * be 0, and need to be set in conjunction with the now-available HW report. 1152 * 1153 * However, at the moment, we cap all of these resources as we only support a 1154 * single receive ring and a single group. 1155 */ 1156 /* ARGSUSED */ 1157 static void 1158 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw) 1159 { 1160 if (i40e->i40e_num_trqpairs == 0) { 1161 i40e->i40e_num_trqpairs = I40E_TRQPAIR_MAX; 1162 } 1163 1164 if (i40e->i40e_num_rx_groups == 0) { 1165 i40e->i40e_num_rx_groups = I40E_GROUP_MAX; 1166 } 1167 } 1168 1169 /* 1170 * Free any resources required by, or setup by, the Intel common code. 1171 */ 1172 static void 1173 i40e_common_code_fini(i40e_t *i40e) 1174 { 1175 i40e_hw_t *hw = &i40e->i40e_hw_space; 1176 int rc; 1177 1178 rc = i40e_shutdown_lan_hmc(hw); 1179 if (rc != I40E_SUCCESS) 1180 i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc); 1181 1182 rc = i40e_shutdown_adminq(hw); 1183 if (rc != I40E_SUCCESS) 1184 i40e_error(i40e, "failed to shutdown admin queue: %d", rc); 1185 } 1186 1187 /* 1188 * Initialize and call Intel common-code routines, includes some setup 1189 * the common code expects from the driver. Also prints on failure, so 1190 * the caller doesn't have to. 1191 */ 1192 static boolean_t 1193 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw) 1194 { 1195 int rc; 1196 1197 i40e_clear_hw(hw); 1198 rc = i40e_pf_reset(hw); 1199 if (rc != 0) { 1200 i40e_error(i40e, "failed to reset hardware: %d", rc); 1201 i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE); 1202 return (B_FALSE); 1203 } 1204 1205 rc = i40e_init_shared_code(hw); 1206 if (rc != 0) { 1207 i40e_error(i40e, "failed to initialize i40e core: %d", rc); 1208 return (B_FALSE); 1209 } 1210 1211 hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE; 1212 hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE; 1213 hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ; 1214 hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ; 1215 1216 rc = i40e_init_adminq(hw); 1217 if (rc != 0) { 1218 i40e_error(i40e, "failed to initialize firmware admin queue: " 1219 "%d, potential firmware version mismatch", rc); 1220 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 1221 return (B_FALSE); 1222 } 1223 1224 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && 1225 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) { 1226 i40e_notice(i40e, "The driver for the device detected a newer " 1227 "version of the NVM image (%d.%d) than expected (%d.%d).\n" 1228 "Please install the most recent version of the network " 1229 "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver, 1230 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR); 1231 } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR || 1232 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) { 1233 i40e_notice(i40e, "The driver for the device detected an older" 1234 " version of the NVM image (%d.%d) than expected (%d.%d)." 1235 "\nPlease update the NVM image.\n", 1236 hw->aq.api_maj_ver, hw->aq.api_min_ver, 1237 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR - 1); 1238 } 1239 1240 i40e_clear_pxe_mode(hw); 1241 1242 /* 1243 * We need to call this so that the common code can discover 1244 * capabilities of the hardware, which it uses throughout the rest. 1245 */ 1246 if (!i40e_get_hw_capabilities(i40e, hw)) { 1247 i40e_error(i40e, "failed to obtain hardware capabilities"); 1248 return (B_FALSE); 1249 } 1250 1251 if (i40e_get_available_resources(i40e) == B_FALSE) { 1252 i40e_error(i40e, "failed to obtain hardware resources"); 1253 return (B_FALSE); 1254 } 1255 1256 i40e_hw_to_instance(i40e, hw); 1257 1258 rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp, 1259 hw->func_caps.num_rx_qp, 0, 0); 1260 if (rc != 0) { 1261 i40e_error(i40e, "failed to initialize hardware memory cache: " 1262 "%d", rc); 1263 return (B_FALSE); 1264 } 1265 1266 rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY); 1267 if (rc != 0) { 1268 i40e_error(i40e, "failed to configure hardware memory cache: " 1269 "%d", rc); 1270 return (B_FALSE); 1271 } 1272 1273 (void) i40e_aq_stop_lldp(hw, TRUE, NULL); 1274 1275 rc = i40e_get_mac_addr(hw, hw->mac.addr); 1276 if (rc != I40E_SUCCESS) { 1277 i40e_error(i40e, "failed to retrieve hardware mac address: %d", 1278 rc); 1279 return (B_FALSE); 1280 } 1281 1282 rc = i40e_validate_mac_addr(hw->mac.addr); 1283 if (rc != 0) { 1284 i40e_error(i40e, "failed to validate internal mac address: " 1285 "%d", rc); 1286 return (B_FALSE); 1287 } 1288 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL); 1289 if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) != 1290 I40E_SUCCESS) { 1291 i40e_error(i40e, "failed to retrieve port mac address: %d", 1292 rc); 1293 return (B_FALSE); 1294 } 1295 1296 /* 1297 * We need to obtain the Virtual Station ID (VSI) before we can 1298 * perform other operations on the device. 1299 */ 1300 i40e->i40e_vsi_id = i40e_get_vsi_id(i40e); 1301 if (i40e->i40e_vsi_id == -1) { 1302 i40e_error(i40e, "failed to obtain VSI ID"); 1303 return (B_FALSE); 1304 } 1305 1306 return (B_TRUE); 1307 } 1308 1309 static void 1310 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e) 1311 { 1312 int rc; 1313 1314 if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR) 1315 (void) i40e_disable_interrupts(i40e); 1316 1317 if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) && 1318 i40e->i40e_periodic_id != 0) { 1319 ddi_periodic_delete(i40e->i40e_periodic_id); 1320 i40e->i40e_periodic_id = 0; 1321 } 1322 1323 if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) { 1324 rc = mac_unregister(i40e->i40e_mac_hdl); 1325 if (rc != 0) { 1326 i40e_error(i40e, "failed to unregister from mac: %d", 1327 rc); 1328 } 1329 } 1330 1331 if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) { 1332 i40e_stats_fini(i40e); 1333 } 1334 1335 if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR) 1336 i40e_rem_intr_handlers(i40e); 1337 1338 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS) 1339 i40e_free_trqpairs(i40e); 1340 1341 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR) 1342 i40e_rem_intrs(i40e); 1343 1344 if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE) 1345 i40e_common_code_fini(i40e); 1346 1347 i40e_cleanup_resources(i40e); 1348 1349 if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS) 1350 (void) ddi_prop_remove_all(devinfo); 1351 1352 if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP && 1353 i40e->i40e_osdep_space.ios_reg_handle != NULL) { 1354 ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle); 1355 i40e->i40e_osdep_space.ios_reg_handle = NULL; 1356 } 1357 1358 if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) && 1359 i40e->i40e_osdep_space.ios_cfg_handle != NULL) { 1360 pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle); 1361 i40e->i40e_osdep_space.ios_cfg_handle = NULL; 1362 } 1363 1364 if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT) 1365 i40e_fm_fini(i40e); 1366 1367 kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ); 1368 kmem_free(i40e, sizeof (i40e_t)); 1369 1370 ddi_set_driver_private(devinfo, NULL); 1371 } 1372 1373 static boolean_t 1374 i40e_final_init(i40e_t *i40e) 1375 { 1376 i40e_hw_t *hw = &i40e->i40e_hw_space; 1377 struct i40e_osdep *osdep = OS_DEP(hw); 1378 uint8_t pbanum[I40E_PBANUM_STRLEN]; 1379 enum i40e_status_code irc; 1380 char buf[I40E_DDI_PROP_LEN]; 1381 1382 pbanum[0] = '\0'; 1383 irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum)); 1384 if (irc != I40E_SUCCESS) { 1385 i40e_log(i40e, "failed to read PBA string: %d", irc); 1386 } else { 1387 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1388 "printed-board-assembly", (char *)pbanum); 1389 } 1390 1391 #ifdef DEBUG 1392 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver, 1393 hw->aq.fw_min_ver) < sizeof (buf)); 1394 ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf)); 1395 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver, 1396 hw->aq.api_min_ver) < sizeof (buf)); 1397 #endif 1398 1399 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver, 1400 hw->aq.fw_min_ver); 1401 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1402 "firmware-version", buf); 1403 (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build); 1404 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1405 "firmware-build", buf); 1406 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver, 1407 hw->aq.api_min_ver); 1408 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip, 1409 "api-version", buf); 1410 1411 if (!i40e_set_hw_bus_info(hw)) 1412 return (B_FALSE); 1413 1414 if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) { 1415 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 1416 return (B_FALSE); 1417 } 1418 1419 return (B_TRUE); 1420 } 1421 1422 static boolean_t 1423 i40e_identify_hardware(i40e_t *i40e) 1424 { 1425 i40e_hw_t *hw = &i40e->i40e_hw_space; 1426 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1427 1428 hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID); 1429 hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID); 1430 hw->revision_id = pci_config_get8(osdep->ios_cfg_handle, 1431 PCI_CONF_REVID); 1432 hw->subsystem_device_id = 1433 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID); 1434 hw->subsystem_vendor_id = 1435 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID); 1436 1437 /* 1438 * Note that we set the hardware's bus information later on, in 1439 * i40e_get_available_resources(). The common code doesn't seem to 1440 * require that it be set in any ways, it seems to be mostly for 1441 * book-keeping. 1442 */ 1443 1444 /* Call common code to set the MAC type for this adapter. */ 1445 if (i40e_set_mac_type(hw) != I40E_SUCCESS) 1446 return (B_FALSE); 1447 1448 return (B_TRUE); 1449 } 1450 1451 static boolean_t 1452 i40e_regs_map(i40e_t *i40e) 1453 { 1454 dev_info_t *devinfo = i40e->i40e_dip; 1455 i40e_hw_t *hw = &i40e->i40e_hw_space; 1456 struct i40e_osdep *osdep = &i40e->i40e_osdep_space; 1457 off_t memsize; 1458 int ret; 1459 1460 if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) != 1461 DDI_SUCCESS) { 1462 i40e_error(i40e, "Used invalid register set to map PCIe regs"); 1463 return (B_FALSE); 1464 } 1465 1466 if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET, 1467 (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr, 1468 &osdep->ios_reg_handle)) != DDI_SUCCESS) { 1469 i40e_error(i40e, "failed to map device registers: %d", ret); 1470 return (B_FALSE); 1471 } 1472 1473 osdep->ios_reg_size = memsize; 1474 return (B_TRUE); 1475 } 1476 1477 /* 1478 * Update parameters required when a new MTU has been configured. Calculate the 1479 * maximum frame size, as well as, size our DMA buffers which we size in 1480 * increments of 1K. 1481 */ 1482 void 1483 i40e_update_mtu(i40e_t *i40e) 1484 { 1485 uint32_t rx, tx; 1486 1487 i40e->i40e_frame_max = i40e->i40e_sdu + 1488 sizeof (struct ether_vlan_header) + ETHERFCSL; 1489 1490 rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT; 1491 i40e->i40e_rx_buf_size = ((rx >> 10) + 1492 ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1493 1494 tx = i40e->i40e_frame_max; 1495 i40e->i40e_tx_buf_size = ((tx >> 10) + 1496 ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10; 1497 } 1498 1499 static int 1500 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def) 1501 { 1502 int val; 1503 1504 val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS, 1505 prop, def); 1506 if (val > max) 1507 val = max; 1508 if (val < min) 1509 val = min; 1510 return (val); 1511 } 1512 1513 static void 1514 i40e_init_properties(i40e_t *i40e) 1515 { 1516 i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu", 1517 I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU); 1518 1519 i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force", 1520 I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE); 1521 1522 i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable", 1523 B_FALSE, B_TRUE, B_TRUE); 1524 1525 i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size", 1526 I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE, 1527 I40E_DEF_TX_RING_SIZE); 1528 if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) { 1529 i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size, 1530 I40E_DESC_ALIGN); 1531 } 1532 1533 i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold", 1534 I40E_MIN_TX_BLOCK_THRESH, 1535 i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE, 1536 I40E_DEF_TX_BLOCK_THRESH); 1537 1538 i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size", 1539 I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE, 1540 I40E_DEF_RX_RING_SIZE); 1541 if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) { 1542 i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size, 1543 I40E_DESC_ALIGN); 1544 } 1545 1546 i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr", 1547 I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR, 1548 I40E_DEF_RX_LIMIT_PER_INTR); 1549 1550 i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable", 1551 B_FALSE, B_TRUE, B_TRUE); 1552 1553 i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable", 1554 B_FALSE, B_TRUE, B_TRUE); 1555 1556 i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold", 1557 I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH, 1558 I40E_DEF_RX_DMA_THRESH); 1559 1560 i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold", 1561 I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH, 1562 I40E_DEF_TX_DMA_THRESH); 1563 1564 i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle", 1565 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR); 1566 1567 i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle", 1568 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR); 1569 1570 i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle", 1571 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR); 1572 1573 if (!i40e->i40e_mr_enable) { 1574 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1575 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1576 } 1577 1578 i40e_update_mtu(i40e); 1579 } 1580 1581 /* 1582 * There are a few constraints on interrupts that we're currently imposing, some 1583 * of which are restrictions from hardware. For a fuller treatment, see 1584 * i40e_intr.c. 1585 * 1586 * Currently, to use MSI-X we require two interrupts be available though in 1587 * theory we should participate in IRM and happily use more interrupts. 1588 * 1589 * Hardware only supports a single MSI being programmed and therefore if we 1590 * don't have MSI-X interrupts available at this time, then we ratchet down the 1591 * number of rings and groups available. Obviously, we only bother with a single 1592 * fixed interrupt. 1593 */ 1594 static boolean_t 1595 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type) 1596 { 1597 int request, count, actual, rc, min; 1598 1599 switch (intr_type) { 1600 case DDI_INTR_TYPE_FIXED: 1601 case DDI_INTR_TYPE_MSI: 1602 request = 1; 1603 min = 1; 1604 break; 1605 case DDI_INTR_TYPE_MSIX: 1606 /* 1607 * At the moment, we always request two MSI-X while we still 1608 * only support a single interrupt. The upper bound on what's 1609 * supported by a given device is defined by MSI_X_PF_N in 1610 * GLPCI_CNF2. When we evolve, we should read it to determine 1611 * what the real max is. 1612 */ 1613 ASSERT(i40e->i40e_num_trqpairs == 1); 1614 request = 2; 1615 min = 2; 1616 break; 1617 default: 1618 panic("bad interrupt type passed to i40e_alloc_intr_handles: " 1619 "%d", intr_type); 1620 return (B_FALSE); 1621 } 1622 1623 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count); 1624 if (rc != DDI_SUCCESS || count < min) { 1625 i40e_log(i40e, "Get interrupt number failed, " 1626 "returned %d, count %d", rc, count); 1627 return (B_FALSE); 1628 } 1629 1630 rc = ddi_intr_get_navail(devinfo, intr_type, &count); 1631 if (rc != DDI_SUCCESS || count < min) { 1632 i40e_log(i40e, "Get AVAILABLE interrupt number failed, " 1633 "returned %d, count %d", rc, count); 1634 return (B_FALSE); 1635 } 1636 1637 actual = 0; 1638 i40e->i40e_intr_count = 0; 1639 i40e->i40e_intr_count_max = 0; 1640 i40e->i40e_intr_count_min = 0; 1641 1642 i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t); 1643 ASSERT(i40e->i40e_intr_size != 0); 1644 i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP); 1645 1646 rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0, 1647 min(request, count), &actual, DDI_INTR_ALLOC_NORMAL); 1648 if (rc != DDI_SUCCESS) { 1649 i40e_log(i40e, "Interrupt allocation failed with %d.", rc); 1650 goto alloc_handle_fail; 1651 } 1652 1653 i40e->i40e_intr_count = actual; 1654 i40e->i40e_intr_count_max = request; 1655 i40e->i40e_intr_count_min = min; 1656 1657 if (actual < min) { 1658 i40e_log(i40e, "actual (%d) is less than minimum (%d).", 1659 actual, min); 1660 goto alloc_handle_fail; 1661 } 1662 1663 /* 1664 * Record the priority and capabilities for our first vector. Once 1665 * we have it, that's our priority until detach time. Even if we 1666 * eventually participate in IRM, our priority shouldn't change. 1667 */ 1668 rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri); 1669 if (rc != DDI_SUCCESS) { 1670 i40e_log(i40e, 1671 "Getting interrupt priority failed with %d.", rc); 1672 goto alloc_handle_fail; 1673 } 1674 1675 rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap); 1676 if (rc != DDI_SUCCESS) { 1677 i40e_log(i40e, 1678 "Getting interrupt capabilities failed with %d.", rc); 1679 goto alloc_handle_fail; 1680 } 1681 1682 i40e->i40e_intr_type = intr_type; 1683 return (B_TRUE); 1684 1685 alloc_handle_fail: 1686 1687 i40e_rem_intrs(i40e); 1688 return (B_FALSE); 1689 } 1690 1691 static boolean_t 1692 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo) 1693 { 1694 int intr_types, rc; 1695 1696 rc = ddi_intr_get_supported_types(devinfo, &intr_types); 1697 if (rc != DDI_SUCCESS) { 1698 i40e_error(i40e, "failed to get supported interrupt types: %d", 1699 rc); 1700 return (B_FALSE); 1701 } 1702 1703 i40e->i40e_intr_type = 0; 1704 1705 if ((intr_types & DDI_INTR_TYPE_MSIX) && 1706 i40e->i40e_intr_force <= I40E_INTR_MSIX) { 1707 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX)) 1708 return (B_TRUE); 1709 } 1710 1711 /* 1712 * We only use multiple transmit/receive pairs when MSI-X interrupts are 1713 * available due to the fact that the device basically only supports a 1714 * single MSI interrupt. 1715 */ 1716 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX; 1717 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX; 1718 1719 if ((intr_types & DDI_INTR_TYPE_MSI) && 1720 (i40e->i40e_intr_force <= I40E_INTR_MSI)) { 1721 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI)) 1722 return (B_TRUE); 1723 } 1724 1725 if (intr_types & DDI_INTR_TYPE_FIXED) { 1726 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED)) 1727 return (B_TRUE); 1728 } 1729 1730 return (B_FALSE); 1731 } 1732 1733 /* 1734 * Map different interrupts to MSI-X vectors. 1735 */ 1736 static boolean_t 1737 i40e_map_intrs_to_vectors(i40e_t *i40e) 1738 { 1739 if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) { 1740 return (B_TRUE); 1741 } 1742 1743 /* 1744 * At the moment, we only have one queue and one interrupt thus both are 1745 * on that one interrupt. However, longer term we need to go back to 1746 * using the ixgbe style map of queues to vectors or walk the linked 1747 * list from the device to know what to go handle. Therefore for the 1748 * moment, since we need to map our single set of rings to the one 1749 * I/O interrupt that exists for MSI-X. 1750 */ 1751 ASSERT(i40e->i40e_intr_count == 2); 1752 ASSERT(i40e->i40e_num_trqpairs == 1); 1753 1754 i40e->i40e_trqpairs[0].itrq_rx_intrvec = 1; 1755 i40e->i40e_trqpairs[0].itrq_tx_intrvec = 1; 1756 1757 return (B_TRUE); 1758 } 1759 1760 static boolean_t 1761 i40e_add_intr_handlers(i40e_t *i40e) 1762 { 1763 int rc, vector; 1764 1765 switch (i40e->i40e_intr_type) { 1766 case DDI_INTR_TYPE_MSIX: 1767 for (vector = 0; vector < i40e->i40e_intr_count; vector++) { 1768 rc = ddi_intr_add_handler( 1769 i40e->i40e_intr_handles[vector], 1770 (ddi_intr_handler_t *)i40e_intr_msix, i40e, 1771 (void *)(uintptr_t)vector); 1772 if (rc != DDI_SUCCESS) { 1773 i40e_log(i40e, "Add interrupt handler (MSI-X) " 1774 "failed: return %d, vector %d", rc, vector); 1775 for (vector--; vector >= 0; vector--) { 1776 (void) ddi_intr_remove_handler( 1777 i40e->i40e_intr_handles[vector]); 1778 } 1779 return (B_FALSE); 1780 } 1781 } 1782 break; 1783 case DDI_INTR_TYPE_MSI: 1784 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1785 (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL); 1786 if (rc != DDI_SUCCESS) { 1787 i40e_log(i40e, "Add interrupt handler (MSI) failed: " 1788 "return %d", rc); 1789 return (B_FALSE); 1790 } 1791 break; 1792 case DDI_INTR_TYPE_FIXED: 1793 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0], 1794 (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL); 1795 if (rc != DDI_SUCCESS) { 1796 i40e_log(i40e, "Add interrupt handler (legacy) failed:" 1797 " return %d", rc); 1798 return (B_FALSE); 1799 } 1800 break; 1801 default: 1802 /* Cast to pacify lint */ 1803 panic("i40e_intr_type %p contains an unknown type: %d", 1804 (void *)i40e, i40e->i40e_intr_type); 1805 } 1806 1807 return (B_TRUE); 1808 } 1809 1810 /* 1811 * Perform periodic checks. Longer term, we should be thinking about additional 1812 * things here: 1813 * 1814 * o Stall Detection 1815 * o Temperature sensor detection 1816 * o Device resetting 1817 * o Statistics updating to avoid wraparound 1818 */ 1819 static void 1820 i40e_timer(void *arg) 1821 { 1822 i40e_t *i40e = arg; 1823 1824 mutex_enter(&i40e->i40e_general_lock); 1825 i40e_link_check(i40e); 1826 mutex_exit(&i40e->i40e_general_lock); 1827 } 1828 1829 /* 1830 * Get the hardware state, and scribble away anything that needs scribbling. 1831 */ 1832 static void 1833 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw) 1834 { 1835 int rc; 1836 1837 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 1838 1839 (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL); 1840 i40e_link_check(i40e); 1841 1842 /* 1843 * Try and determine our PHY. Note that we may have to retry to and 1844 * delay to detect fiber correctly. 1845 */ 1846 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy, 1847 NULL); 1848 if (rc == I40E_ERR_UNKNOWN_PHY) { 1849 i40e_msec_delay(200); 1850 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, 1851 &i40e->i40e_phy, NULL); 1852 } 1853 1854 if (rc != I40E_SUCCESS) { 1855 if (rc == I40E_ERR_UNKNOWN_PHY) { 1856 i40e_error(i40e, "encountered unknown PHY type, " 1857 "not attaching."); 1858 } else { 1859 i40e_error(i40e, "error getting physical capabilities: " 1860 "%d, %d", rc, hw->aq.asq_last_status); 1861 } 1862 } 1863 1864 rc = i40e_update_link_info(hw); 1865 if (rc != I40E_SUCCESS) { 1866 i40e_error(i40e, "failed to update link information: %d", rc); 1867 } 1868 1869 /* 1870 * In general, we don't want to mask off (as in stop from being a cause) 1871 * any of the interrupts that the phy might be able to generate. 1872 */ 1873 rc = i40e_aq_set_phy_int_mask(hw, 0, NULL); 1874 if (rc != I40E_SUCCESS) { 1875 i40e_error(i40e, "failed to update phy link mask: %d", rc); 1876 } 1877 } 1878 1879 /* 1880 * Go through and re-initialize any existing filters that we may have set up for 1881 * this device. Note that we would only expect them to exist if hardware had 1882 * already been initialized and we had just reset it. While we're not 1883 * implementing this yet, we're keeping this around for when we add reset 1884 * capabilities, so this isn't forgotten. 1885 */ 1886 /* ARGSUSED */ 1887 static void 1888 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw) 1889 { 1890 } 1891 1892 /* 1893 * Configure the hardware for the Virtual Station Interface (VSI). Currently 1894 * we only support one, but in the future we could instantiate more than one 1895 * per attach-point. 1896 */ 1897 static boolean_t 1898 i40e_config_vsi(i40e_t *i40e, i40e_hw_t *hw) 1899 { 1900 struct i40e_vsi_context context; 1901 int err; 1902 1903 bzero(&context, sizeof (struct i40e_vsi_context)); 1904 context.seid = i40e->i40e_vsi_id; 1905 context.pf_num = hw->pf_id; 1906 err = i40e_aq_get_vsi_params(hw, &context, NULL); 1907 if (err != I40E_SUCCESS) { 1908 i40e_error(i40e, "get VSI params failed with %d", err); 1909 return (B_FALSE); 1910 } 1911 1912 /* 1913 * Set the queue and traffic class bits. Keep it simple for now. 1914 */ 1915 context.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; 1916 context.info.mapping_flags = I40E_AQ_VSI_QUE_MAP_CONTIG; 1917 context.info.queue_mapping[0] = I40E_ASSIGN_ALL_QUEUES; 1918 context.info.tc_mapping[0] = I40E_TRAFFIC_CLASS_NO_QUEUES; 1919 1920 context.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; 1921 context.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL | 1922 I40E_AQ_VSI_PVLAN_EMOD_NOTHING; 1923 1924 context.flags = LE16_TO_CPU(I40E_AQ_VSI_TYPE_PF); 1925 1926 i40e->i40e_vsi_stat_id = LE16_TO_CPU(context.info.stat_counter_idx); 1927 if (i40e_stat_vsi_init(i40e) == B_FALSE) 1928 return (B_FALSE); 1929 1930 err = i40e_aq_update_vsi_params(hw, &context, NULL); 1931 if (err != I40E_SUCCESS) { 1932 i40e_error(i40e, "Update VSI params failed with %d", err); 1933 return (B_FALSE); 1934 } 1935 1936 1937 return (B_TRUE); 1938 } 1939 1940 /* 1941 * Wrapper to kick the chipset on. 1942 */ 1943 static boolean_t 1944 i40e_chip_start(i40e_t *i40e) 1945 { 1946 i40e_hw_t *hw = &i40e->i40e_hw_space; 1947 struct i40e_filter_control_settings filter; 1948 int rc; 1949 1950 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) || 1951 (hw->aq.fw_maj_ver < 4)) { 1952 i40e_msec_delay(75); 1953 if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) != 1954 I40E_SUCCESS) { 1955 i40e_error(i40e, "failed to restart link: admin queue " 1956 "error: %d", hw->aq.asq_last_status); 1957 return (B_FALSE); 1958 } 1959 } 1960 1961 /* Determine hardware state */ 1962 i40e_get_hw_state(i40e, hw); 1963 1964 /* Initialize mac addresses. */ 1965 i40e_init_macaddrs(i40e, hw); 1966 1967 /* 1968 * Set up the filter control. 1969 */ 1970 bzero(&filter, sizeof (filter)); 1971 filter.enable_ethtype = TRUE; 1972 filter.enable_macvlan = TRUE; 1973 1974 rc = i40e_set_filter_control(hw, &filter); 1975 if (rc != I40E_SUCCESS) { 1976 i40e_error(i40e, "i40e_set_filter_control() returned %d", rc); 1977 return (B_FALSE); 1978 } 1979 1980 i40e_intr_chip_init(i40e); 1981 1982 if (!i40e_config_vsi(i40e, hw)) 1983 return (B_FALSE); 1984 1985 i40e_flush(hw); 1986 1987 return (B_TRUE); 1988 } 1989 1990 /* 1991 * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information. 1992 */ 1993 static void 1994 i40e_shutdown_rx_rings(i40e_t *i40e) 1995 { 1996 int i; 1997 uint32_t reg; 1998 1999 i40e_hw_t *hw = &i40e->i40e_hw_space; 2000 2001 /* 2002 * Step 1. The interrupt linked list (see i40e_intr.c for more 2003 * information) should have already been cleared before calling this 2004 * function. 2005 */ 2006 #ifdef DEBUG 2007 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2008 for (i = 1; i < i40e->i40e_intr_count; i++) { 2009 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2010 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2011 } 2012 } else { 2013 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2014 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2015 } 2016 2017 #endif /* DEBUG */ 2018 2019 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2020 /* 2021 * Step 1. Request the queue by clearing QENA_REQ. It may not be 2022 * set due to unwinding from failures and a partially enabled 2023 * ring set. 2024 */ 2025 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2026 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK)) 2027 continue; 2028 VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) == 2029 I40E_QRX_ENA_QENA_REQ_MASK); 2030 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; 2031 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2032 } 2033 2034 /* 2035 * Step 2. Wait for the disable to take, by having QENA_STAT in the FPM 2036 * be cleared. Note that we could still receive data in the queue during 2037 * this time. We don't actually wait for this now and instead defer this 2038 * to i40e_shutdown_rings_wait(), after we've interleaved disabling the 2039 * TX queues as well. 2040 */ 2041 } 2042 2043 static void 2044 i40e_shutdown_tx_rings(i40e_t *i40e) 2045 { 2046 int i; 2047 uint32_t reg; 2048 2049 i40e_hw_t *hw = &i40e->i40e_hw_space; 2050 2051 /* 2052 * Step 1. The interrupt linked list should already have been cleared. 2053 */ 2054 #ifdef DEBUG 2055 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) { 2056 for (i = 1; i < i40e->i40e_intr_count; i++) { 2057 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1)); 2058 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2059 } 2060 } else { 2061 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0); 2062 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL); 2063 2064 } 2065 #endif /* DEBUG */ 2066 2067 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2068 /* 2069 * Step 2. Set the SET_QDIS flag for every queue. 2070 */ 2071 i40e_pre_tx_queue_cfg(hw, i, B_FALSE); 2072 } 2073 2074 /* 2075 * Step 3. Wait at least 400 usec (can be done once for all queues). 2076 */ 2077 drv_usecwait(500); 2078 2079 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2080 /* 2081 * Step 4. Clear the QENA_REQ flag which tells hardware to 2082 * quiesce. If QENA_REQ is not already set then that means that 2083 * we likely already tried to disable this queue. 2084 */ 2085 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2086 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK)) 2087 continue; 2088 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK; 2089 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2090 } 2091 2092 /* 2093 * Step 5. Wait for all drains to finish. This will be done by the 2094 * hardware removing the QENA_STAT flag from the queue. Rather than 2095 * waiting here, we interleave it with all the others in 2096 * i40e_shutdown_rings_wait(). 2097 */ 2098 } 2099 2100 /* 2101 * Wait for all the rings to be shut down. e.g. Steps 2 and 5 from the above 2102 * functions. 2103 */ 2104 static boolean_t 2105 i40e_shutdown_rings_wait(i40e_t *i40e) 2106 { 2107 int i, try; 2108 i40e_hw_t *hw = &i40e->i40e_hw_space; 2109 2110 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2111 uint32_t reg; 2112 2113 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2114 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2115 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) 2116 break; 2117 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2118 } 2119 2120 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) { 2121 i40e_error(i40e, "timed out disabling rx queue %d", 2122 i); 2123 return (B_FALSE); 2124 } 2125 2126 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) { 2127 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2128 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) 2129 break; 2130 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2131 } 2132 2133 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) { 2134 i40e_error(i40e, "timed out disabling tx queue %d", 2135 i); 2136 return (B_FALSE); 2137 } 2138 } 2139 2140 return (B_TRUE); 2141 } 2142 2143 static boolean_t 2144 i40e_shutdown_rings(i40e_t *i40e) 2145 { 2146 i40e_shutdown_rx_rings(i40e); 2147 i40e_shutdown_tx_rings(i40e); 2148 return (i40e_shutdown_rings_wait(i40e)); 2149 } 2150 2151 static void 2152 i40e_setup_rx_descs(i40e_trqpair_t *itrq) 2153 { 2154 int i; 2155 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2156 2157 for (i = 0; i < rxd->rxd_ring_size; i++) { 2158 i40e_rx_control_block_t *rcb; 2159 i40e_rx_desc_t *rdesc; 2160 2161 rcb = rxd->rxd_work_list[i]; 2162 rdesc = &rxd->rxd_desc_ring[i]; 2163 2164 rdesc->read.pkt_addr = 2165 CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address); 2166 rdesc->read.hdr_addr = 0; 2167 } 2168 } 2169 2170 static boolean_t 2171 i40e_setup_rx_hmc(i40e_trqpair_t *itrq) 2172 { 2173 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2174 i40e_t *i40e = itrq->itrq_i40e; 2175 i40e_hw_t *hw = &i40e->i40e_hw_space; 2176 2177 struct i40e_hmc_obj_rxq rctx; 2178 int err; 2179 2180 bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq)); 2181 rctx.base = rxd->rxd_desc_area.dmab_dma_address / 2182 I40E_HMC_RX_CTX_UNIT; 2183 rctx.qlen = rxd->rxd_ring_size; 2184 VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN); 2185 VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX); 2186 rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT; 2187 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT; 2188 rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT; 2189 rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE; 2190 rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE; 2191 rctx.fc_ena = I40E_HMC_RX_FC_DISABLE; 2192 rctx.l2tsel = I40E_HMC_RX_L2TAGORDER; 2193 rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2194 rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE; 2195 rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP; 2196 rctx.rxmax = i40e->i40e_frame_max; 2197 rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2198 rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE; 2199 rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE; 2200 rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE; 2201 rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR; 2202 2203 /* 2204 * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2. 2205 */ 2206 rctx.prefena = I40E_HMC_RX_PREFENA; 2207 2208 err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index); 2209 if (err != I40E_SUCCESS) { 2210 i40e_error(i40e, "failed to clear rx queue %d context: %d", 2211 itrq->itrq_index, err); 2212 return (B_FALSE); 2213 } 2214 2215 err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx); 2216 if (err != I40E_SUCCESS) { 2217 i40e_error(i40e, "failed to set rx queue %d context: %d", 2218 itrq->itrq_index, err); 2219 return (B_FALSE); 2220 } 2221 2222 return (B_TRUE); 2223 } 2224 2225 /* 2226 * Take care of setting up the descriptor rings and actually programming the 2227 * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the 2228 * rx rings. 2229 */ 2230 static boolean_t 2231 i40e_setup_rx_rings(i40e_t *i40e) 2232 { 2233 int i; 2234 i40e_hw_t *hw = &i40e->i40e_hw_space; 2235 2236 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2237 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2238 i40e_rx_data_t *rxd = itrq->itrq_rxdata; 2239 uint32_t reg; 2240 2241 /* 2242 * Step 1. Program all receive ring descriptors. 2243 */ 2244 i40e_setup_rx_descs(itrq); 2245 2246 /* 2247 * Step 2. Program the queue's FPM/HMC context. 2248 */ 2249 if (i40e_setup_rx_hmc(itrq) == B_FALSE) 2250 return (B_FALSE); 2251 2252 /* 2253 * Step 3. Clear the queue's tail pointer and set it to the end 2254 * of the space. 2255 */ 2256 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), 0); 2257 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), rxd->rxd_ring_size - 1); 2258 2259 /* 2260 * Step 4. Enable the queue via the QENA_REQ. 2261 */ 2262 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2263 VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK | 2264 I40E_QRX_ENA_QENA_STAT_MASK)); 2265 reg |= I40E_QRX_ENA_QENA_REQ_MASK; 2266 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg); 2267 } 2268 2269 /* 2270 * Note, we wait for every queue to be enabled before we start checking. 2271 * This will hopefully cause most queues to be enabled at this point. 2272 */ 2273 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2274 uint32_t j, reg; 2275 2276 /* 2277 * Step 5. Verify that QENA_STAT has been set. It's promised 2278 * that this should occur within about 10 us, but like other 2279 * systems, we give the card a bit more time. 2280 */ 2281 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2282 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i)); 2283 2284 if (reg & I40E_QRX_ENA_QENA_STAT_MASK) 2285 break; 2286 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2287 } 2288 2289 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) { 2290 i40e_error(i40e, "failed to enable rx queue %d, timed " 2291 "out.", i); 2292 return (B_FALSE); 2293 } 2294 } 2295 2296 return (B_TRUE); 2297 } 2298 2299 static boolean_t 2300 i40e_setup_tx_hmc(i40e_trqpair_t *itrq) 2301 { 2302 i40e_t *i40e = itrq->itrq_i40e; 2303 i40e_hw_t *hw = &i40e->i40e_hw_space; 2304 2305 struct i40e_hmc_obj_txq tctx; 2306 struct i40e_vsi_context context; 2307 int err; 2308 2309 bzero(&tctx, sizeof (struct i40e_hmc_obj_txq)); 2310 tctx.new_context = I40E_HMC_TX_NEW_CONTEXT; 2311 tctx.base = itrq->itrq_desc_area.dmab_dma_address / 2312 I40E_HMC_TX_CTX_UNIT; 2313 tctx.fc_ena = I40E_HMC_TX_FC_DISABLE; 2314 tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE; 2315 tctx.fd_ena = I40E_HMC_TX_FD_DISABLE; 2316 tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE; 2317 tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE; 2318 tctx.qlen = itrq->itrq_tx_ring_size; 2319 tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2320 tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE; 2321 tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE; 2322 tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address + 2323 sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size; 2324 2325 /* 2326 * This field isn't actually documented, like crc, but it suggests that 2327 * it should be zeroed. We leave both of these here because of that for 2328 * now. We should check with Intel on why these are here even. 2329 */ 2330 tctx.crc = 0; 2331 tctx.rdylist_act = 0; 2332 2333 /* 2334 * We're supposed to assign the rdylist field with the value of the 2335 * traffic class index for the first device. We query the VSI parameters 2336 * again to get what the handle is. Note that every queue is always 2337 * assigned to traffic class zero, because we don't actually use them. 2338 */ 2339 bzero(&context, sizeof (struct i40e_vsi_context)); 2340 context.seid = i40e->i40e_vsi_id; 2341 context.pf_num = hw->pf_id; 2342 err = i40e_aq_get_vsi_params(hw, &context, NULL); 2343 if (err != I40E_SUCCESS) { 2344 i40e_error(i40e, "get VSI params failed with %d", err); 2345 return (B_FALSE); 2346 } 2347 tctx.rdylist = LE_16(context.info.qs_handle[0]); 2348 2349 err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index); 2350 if (err != I40E_SUCCESS) { 2351 i40e_error(i40e, "failed to clear tx queue %d context: %d", 2352 itrq->itrq_index, err); 2353 return (B_FALSE); 2354 } 2355 2356 err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx); 2357 if (err != I40E_SUCCESS) { 2358 i40e_error(i40e, "failed to set tx queue %d context: %d", 2359 itrq->itrq_index, err); 2360 return (B_FALSE); 2361 } 2362 2363 return (B_TRUE); 2364 } 2365 2366 /* 2367 * Take care of setting up the descriptor rings and actually programming the 2368 * device. See 8.4.3.1.1 for what we need to do here. 2369 */ 2370 static boolean_t 2371 i40e_setup_tx_rings(i40e_t *i40e) 2372 { 2373 int i; 2374 i40e_hw_t *hw = &i40e->i40e_hw_space; 2375 2376 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2377 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i]; 2378 uint32_t reg; 2379 2380 /* 2381 * Step 1. Clear the queue disable flag and verify that the 2382 * index is set correctly. 2383 */ 2384 i40e_pre_tx_queue_cfg(hw, i, B_TRUE); 2385 2386 /* 2387 * Step 2. Prepare the queue's FPM/HMC context. 2388 */ 2389 if (i40e_setup_tx_hmc(itrq) == B_FALSE) 2390 return (B_FALSE); 2391 2392 /* 2393 * Step 3. Verify that it's clear that this PF owns this queue. 2394 */ 2395 reg = I40E_QTX_CTL_PF_QUEUE; 2396 reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) & 2397 I40E_QTX_CTL_PF_INDX_MASK; 2398 I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg); 2399 i40e_flush(hw); 2400 2401 /* 2402 * Step 4. Set the QENA_REQ flag. 2403 */ 2404 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2405 VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK | 2406 I40E_QTX_ENA_QENA_STAT_MASK)); 2407 reg |= I40E_QTX_ENA_QENA_REQ_MASK; 2408 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg); 2409 } 2410 2411 /* 2412 * Note, we wait for every queue to be enabled before we start checking. 2413 * This will hopefully cause most queues to be enabled at this point. 2414 */ 2415 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2416 uint32_t j, reg; 2417 2418 /* 2419 * Step 5. Verify that QENA_STAT has been set. It's promised 2420 * that this should occur within about 10 us, but like BSD, 2421 * we'll try for up to 100 ms for this queue. 2422 */ 2423 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) { 2424 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i)); 2425 2426 if (reg & I40E_QTX_ENA_QENA_STAT_MASK) 2427 break; 2428 i40e_msec_delay(I40E_RING_WAIT_PAUSE); 2429 } 2430 2431 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) { 2432 i40e_error(i40e, "failed to enable tx queue %d, timed " 2433 "out", i); 2434 return (B_FALSE); 2435 } 2436 } 2437 2438 return (B_TRUE); 2439 } 2440 2441 void 2442 i40e_stop(i40e_t *i40e, boolean_t free_allocations) 2443 { 2444 int i; 2445 2446 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 2447 2448 /* 2449 * Shutdown and drain the tx and rx pipeline. We do this using the 2450 * following steps. 2451 * 2452 * 1) Shutdown interrupts to all the queues (trying to keep the admin 2453 * queue alive). 2454 * 2455 * 2) Remove all of the interrupt tx and rx causes by setting the 2456 * interrupt linked lists to zero. 2457 * 2458 * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should 2459 * wait for all the queues to be disabled, once we reach that point 2460 * it should be safe to free associated data. 2461 * 2462 * 4) Wait 50ms after all that is done. This ensures that the rings are 2463 * ready for programming again and we don't have to think about this 2464 * in other parts of the driver. 2465 * 2466 * 5) Disable remaining chip interrupts, (admin queue, etc.) 2467 * 2468 * 6) Verify that FM is happy with all the register accesses we 2469 * performed. 2470 */ 2471 i40e_intr_io_disable_all(i40e); 2472 i40e_intr_io_clear_cause(i40e); 2473 2474 if (i40e_shutdown_rings(i40e) == B_FALSE) { 2475 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2476 } 2477 2478 delay(50 * drv_usectohz(1000)); 2479 2480 i40e_intr_chip_fini(i40e); 2481 2482 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2483 mutex_enter(&i40e->i40e_trqpairs[i].itrq_rx_lock); 2484 mutex_enter(&i40e->i40e_trqpairs[i].itrq_tx_lock); 2485 } 2486 2487 /* 2488 * We should consider refactoring this to be part of the ring start / 2489 * stop routines at some point. 2490 */ 2491 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2492 i40e_stats_trqpair_fini(&i40e->i40e_trqpairs[i]); 2493 } 2494 2495 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 2496 DDI_FM_OK) { 2497 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2498 } 2499 2500 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2501 i40e_tx_cleanup_ring(&i40e->i40e_trqpairs[i]); 2502 } 2503 2504 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2505 mutex_exit(&i40e->i40e_trqpairs[i].itrq_rx_lock); 2506 mutex_exit(&i40e->i40e_trqpairs[i].itrq_tx_lock); 2507 } 2508 2509 i40e_stat_vsi_fini(i40e); 2510 2511 i40e->i40e_link_speed = 0; 2512 i40e->i40e_link_duplex = 0; 2513 i40e_link_state_set(i40e, LINK_STATE_UNKNOWN); 2514 2515 if (free_allocations) { 2516 i40e_free_ring_mem(i40e, B_FALSE); 2517 } 2518 } 2519 2520 boolean_t 2521 i40e_start(i40e_t *i40e, boolean_t alloc) 2522 { 2523 i40e_hw_t *hw = &i40e->i40e_hw_space; 2524 boolean_t rc = B_TRUE; 2525 int i, err; 2526 2527 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock)); 2528 2529 if (alloc) { 2530 if (i40e_alloc_ring_mem(i40e) == B_FALSE) { 2531 i40e_error(i40e, 2532 "Failed to allocate ring memory"); 2533 return (B_FALSE); 2534 } 2535 } 2536 2537 /* 2538 * This should get refactored to be part of ring start and stop at 2539 * some point, along with most of the logic here. 2540 */ 2541 for (i = 0; i < i40e->i40e_num_trqpairs; i++) { 2542 if (i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i]) == 2543 B_FALSE) { 2544 int j; 2545 2546 for (j = 0; j < i; j++) { 2547 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[j]; 2548 i40e_stats_trqpair_fini(itrq); 2549 } 2550 return (B_FALSE); 2551 } 2552 } 2553 2554 if (!i40e_chip_start(i40e)) { 2555 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE); 2556 rc = B_FALSE; 2557 goto done; 2558 } 2559 2560 if (i40e_setup_rx_rings(i40e) == B_FALSE) { 2561 rc = B_FALSE; 2562 goto done; 2563 } 2564 2565 if (i40e_setup_tx_rings(i40e) == B_FALSE) { 2566 rc = B_FALSE; 2567 goto done; 2568 } 2569 2570 /* 2571 * Enable broadcast traffic; however, do not enable multicast traffic. 2572 * That's handle exclusively through MAC's mc_multicst routines. 2573 */ 2574 err = i40e_aq_set_vsi_broadcast(hw, i40e->i40e_vsi_id, B_TRUE, NULL); 2575 if (err != I40E_SUCCESS) { 2576 i40e_error(i40e, "failed to set default VSI: %d", err); 2577 rc = B_FALSE; 2578 goto done; 2579 } 2580 2581 err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0, NULL); 2582 if (err != I40E_SUCCESS) { 2583 i40e_error(i40e, "failed to set MAC config: %d", err); 2584 rc = B_FALSE; 2585 goto done; 2586 } 2587 2588 /* 2589 * Finally, make sure that we're happy from an FM perspective. 2590 */ 2591 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) != 2592 DDI_FM_OK) { 2593 rc = B_FALSE; 2594 goto done; 2595 } 2596 2597 /* Clear state bits prior to final interrupt enabling. */ 2598 atomic_and_32(&i40e->i40e_state, 2599 ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP)); 2600 2601 i40e_intr_io_enable_all(i40e); 2602 2603 done: 2604 if (rc == B_FALSE) { 2605 i40e_stop(i40e, B_FALSE); 2606 if (alloc == B_TRUE) { 2607 i40e_free_ring_mem(i40e, B_TRUE); 2608 } 2609 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2610 } 2611 2612 return (rc); 2613 } 2614 2615 /* 2616 * We may have loaned up descriptors to the stack. As such, if we still have 2617 * them outstanding, then we will not continue with detach. 2618 */ 2619 static boolean_t 2620 i40e_drain_rx(i40e_t *i40e) 2621 { 2622 mutex_enter(&i40e->i40e_rx_pending_lock); 2623 while (i40e->i40e_rx_pending > 0) { 2624 if (cv_reltimedwait(&i40e->i40e_rx_pending_cv, 2625 &i40e->i40e_rx_pending_lock, 2626 drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) { 2627 mutex_exit(&i40e->i40e_rx_pending_lock); 2628 return (B_FALSE); 2629 } 2630 } 2631 mutex_exit(&i40e->i40e_rx_pending_lock); 2632 2633 return (B_TRUE); 2634 } 2635 2636 static int 2637 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd) 2638 { 2639 i40e_t *i40e; 2640 struct i40e_osdep *osdep; 2641 i40e_hw_t *hw; 2642 int instance; 2643 2644 if (cmd != DDI_ATTACH) 2645 return (DDI_FAILURE); 2646 2647 instance = ddi_get_instance(devinfo); 2648 i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP); 2649 2650 i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP); 2651 i40e->i40e_instance = instance; 2652 i40e->i40e_dip = devinfo; 2653 2654 hw = &i40e->i40e_hw_space; 2655 osdep = &i40e->i40e_osdep_space; 2656 hw->back = osdep; 2657 osdep->ios_i40e = i40e; 2658 2659 ddi_set_driver_private(devinfo, i40e); 2660 2661 i40e_fm_init(i40e); 2662 i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT; 2663 2664 if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) { 2665 i40e_error(i40e, "Failed to map PCI configurations."); 2666 goto attach_fail; 2667 } 2668 i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG; 2669 2670 if (!i40e_identify_hardware(i40e)) { 2671 i40e_error(i40e, "Failed to identify hardware"); 2672 goto attach_fail; 2673 } 2674 2675 if (!i40e_regs_map(i40e)) { 2676 i40e_error(i40e, "Failed to map device registers."); 2677 goto attach_fail; 2678 } 2679 i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP; 2680 2681 i40e_init_properties(i40e); 2682 i40e->i40e_attach_progress |= I40E_ATTACH_PROPS; 2683 2684 if (!i40e_common_code_init(i40e, hw)) 2685 goto attach_fail; 2686 i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE; 2687 2688 /* 2689 * When we participate in IRM, we should make sure that we register 2690 * ourselves with it before callbacks. 2691 */ 2692 if (!i40e_alloc_intrs(i40e, devinfo)) { 2693 i40e_error(i40e, "Failed to allocate interrupts."); 2694 goto attach_fail; 2695 } 2696 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR; 2697 2698 if (!i40e_alloc_trqpairs(i40e)) { 2699 i40e_error(i40e, 2700 "Failed to allocate receive & transmit rings."); 2701 goto attach_fail; 2702 } 2703 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS; 2704 2705 if (!i40e_map_intrs_to_vectors(i40e)) { 2706 i40e_error(i40e, "Failed to map interrupts to vectors."); 2707 goto attach_fail; 2708 } 2709 2710 if (!i40e_add_intr_handlers(i40e)) { 2711 i40e_error(i40e, "Failed to add the interrupt handlers."); 2712 goto attach_fail; 2713 } 2714 i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR; 2715 2716 if (!i40e_final_init(i40e)) { 2717 i40e_error(i40e, "Final initialization failed."); 2718 goto attach_fail; 2719 } 2720 i40e->i40e_attach_progress |= I40E_ATTACH_INIT; 2721 2722 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) != 2723 DDI_FM_OK) { 2724 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST); 2725 goto attach_fail; 2726 } 2727 2728 if (!i40e_stats_init(i40e)) { 2729 i40e_error(i40e, "Stats initialization failed."); 2730 goto attach_fail; 2731 } 2732 i40e->i40e_attach_progress |= I40E_ATTACH_STATS; 2733 2734 if (!i40e_register_mac(i40e)) { 2735 i40e_error(i40e, "Failed to register to MAC/GLDv3"); 2736 goto attach_fail; 2737 } 2738 i40e->i40e_attach_progress |= I40E_ATTACH_MAC; 2739 2740 i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e, 2741 I40E_CYCLIC_PERIOD, DDI_IPL_0); 2742 if (i40e->i40e_periodic_id == 0) { 2743 i40e_error(i40e, "Failed to add the link-check timer"); 2744 goto attach_fail; 2745 } 2746 i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER; 2747 2748 if (!i40e_enable_interrupts(i40e)) { 2749 i40e_error(i40e, "Failed to enable DDI interrupts"); 2750 goto attach_fail; 2751 } 2752 i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR; 2753 2754 atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED); 2755 2756 mutex_enter(&i40e_glock); 2757 list_insert_tail(&i40e_glist, i40e); 2758 mutex_exit(&i40e_glock); 2759 2760 return (DDI_SUCCESS); 2761 2762 attach_fail: 2763 i40e_unconfigure(devinfo, i40e); 2764 return (DDI_FAILURE); 2765 } 2766 2767 static int 2768 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd) 2769 { 2770 i40e_t *i40e; 2771 2772 if (cmd != DDI_DETACH) 2773 return (DDI_FAILURE); 2774 2775 i40e = (i40e_t *)ddi_get_driver_private(devinfo); 2776 if (i40e == NULL) { 2777 i40e_log(NULL, "i40e_detach() called with no i40e pointer!"); 2778 return (DDI_FAILURE); 2779 } 2780 2781 if (i40e_drain_rx(i40e) == B_FALSE) { 2782 i40e_log(i40e, "timed out draining DMA resources, %d buffers " 2783 "remain", i40e->i40e_rx_pending); 2784 return (DDI_FAILURE); 2785 } 2786 2787 mutex_enter(&i40e_glock); 2788 list_remove(&i40e_glist, i40e); 2789 mutex_exit(&i40e_glock); 2790 2791 i40e_unconfigure(devinfo, i40e); 2792 2793 return (DDI_SUCCESS); 2794 } 2795 2796 static struct cb_ops i40e_cb_ops = { 2797 nulldev, /* cb_open */ 2798 nulldev, /* cb_close */ 2799 nodev, /* cb_strategy */ 2800 nodev, /* cb_print */ 2801 nodev, /* cb_dump */ 2802 nodev, /* cb_read */ 2803 nodev, /* cb_write */ 2804 nodev, /* cb_ioctl */ 2805 nodev, /* cb_devmap */ 2806 nodev, /* cb_mmap */ 2807 nodev, /* cb_segmap */ 2808 nochpoll, /* cb_chpoll */ 2809 ddi_prop_op, /* cb_prop_op */ 2810 NULL, /* cb_stream */ 2811 D_MP | D_HOTPLUG, /* cb_flag */ 2812 CB_REV, /* cb_rev */ 2813 nodev, /* cb_aread */ 2814 nodev /* cb_awrite */ 2815 }; 2816 2817 static struct dev_ops i40e_dev_ops = { 2818 DEVO_REV, /* devo_rev */ 2819 0, /* devo_refcnt */ 2820 NULL, /* devo_getinfo */ 2821 nulldev, /* devo_identify */ 2822 nulldev, /* devo_probe */ 2823 i40e_attach, /* devo_attach */ 2824 i40e_detach, /* devo_detach */ 2825 nodev, /* devo_reset */ 2826 &i40e_cb_ops, /* devo_cb_ops */ 2827 NULL, /* devo_bus_ops */ 2828 ddi_power, /* devo_power */ 2829 ddi_quiesce_not_supported /* devo_quiesce */ 2830 }; 2831 2832 static struct modldrv i40e_modldrv = { 2833 &mod_driverops, 2834 i40e_ident, 2835 &i40e_dev_ops 2836 }; 2837 2838 static struct modlinkage i40e_modlinkage = { 2839 MODREV_1, 2840 { &i40e_modldrv, NULL } 2841 }; 2842 2843 /* 2844 * Module Initialization Functions. 2845 */ 2846 int 2847 _init(void) 2848 { 2849 int status; 2850 2851 list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink)); 2852 list_create(&i40e_dlist, sizeof (i40e_device_t), 2853 offsetof(i40e_device_t, id_link)); 2854 mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL); 2855 mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME); 2856 2857 status = mod_install(&i40e_modlinkage); 2858 if (status != DDI_SUCCESS) { 2859 mac_fini_ops(&i40e_dev_ops); 2860 mutex_destroy(&i40e_glock); 2861 list_destroy(&i40e_dlist); 2862 list_destroy(&i40e_glist); 2863 } 2864 2865 return (status); 2866 } 2867 2868 int 2869 _info(struct modinfo *modinfop) 2870 { 2871 return (mod_info(&i40e_modlinkage, modinfop)); 2872 } 2873 2874 int 2875 _fini(void) 2876 { 2877 int status; 2878 2879 status = mod_remove(&i40e_modlinkage); 2880 if (status == DDI_SUCCESS) { 2881 mac_fini_ops(&i40e_dev_ops); 2882 mutex_destroy(&i40e_glock); 2883 list_destroy(&i40e_dlist); 2884 list_destroy(&i40e_glist); 2885 } 2886 2887 return (status); 2888 }