1 /*
2 * This file and its contents are supplied under the terms of the
3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 * You may only use this file in accordance with the terms of version
5 * 1.0 of the CDDL.
6 *
7 * A full copy of the text of the CDDL should have accompanied this
8 * source. A copy of the CDDL is also available via the Internet at
9 * http://www.illumos.org/license/CDDL.
10 */
11
12 /*
13 * Copyright 2015 OmniTI Computer Consulting, Inc. All rights reserved.
14 * Copyright 2019 Joyent, Inc.
15 * Copyright 2017 Tegile Systems, Inc. All rights reserved.
16 */
17
18 /*
19 * i40e - Intel 10/40 Gb Ethernet driver
20 *
21 * The i40e driver is the main software device driver for the Intel 40 Gb family
22 * of devices. Note that these devices come in many flavors with both 40 GbE
23 * ports and 10 GbE ports. This device is the successor to the 82599 family of
24 * devices (ixgbe).
25 *
26 * Unlike previous generations of Intel 1 GbE and 10 GbE devices, the 40 GbE
27 * devices defined in the XL710 controller (previously known as Fortville) are a
28 * rather different beast and have a small switch embedded inside of them. In
29 * addition, the way that most of the programming is done has been overhauled.
30 * As opposed to just using PCIe memory mapped registers, it also has an
31 * administrative queue which is used to communicate with firmware running on
32 * the chip.
33 *
34 * Each physical function in the hardware shows up as a device that this driver
35 * will bind to. The hardware splits many resources evenly across all of the
36 * physical functions present on the device, while other resources are instead
37 * shared across the entire card and its up to the device driver to
38 * intelligently partition them.
39 *
40 * ------------
41 * Organization
42 * ------------
43 *
44 * This driver is made up of several files which have their own theory
45 * statements spread across them. We'll touch on the high level purpose of each
46 * file here, and then we'll get into more discussion on how the device is
47 * generally modelled with respect to the interfaces in illumos.
48 *
49 * i40e_gld.c: This file contains all of the bindings to MAC and the networking
50 * stack.
51 *
52 * i40e_intr.c: This file contains all of the interrupt service routines and
53 * contains logic to enable and disable interrupts on the hardware.
54 * It also contains the logic to map hardware resources such as the
55 * rings to and from interrupts and controls their ability to fire.
56 *
57 * There is a big theory statement on interrupts present there.
58 *
59 * i40e_main.c: The file that you're currently in. It interfaces with the
60 * traditional OS DDI interfaces and is in charge of configuring
61 * the device.
62 *
63 * i40e_osdep.[ch]: These files contain interfaces and definitions needed to
64 * work with Intel's common code for the device.
65 *
66 * i40e_stats.c: This file contains the general work and logic around our
67 * kstats. A theory statement on their organization and use of the
68 * hardware exists there.
69 *
70 * i40e_sw.h: This header file contains all of the primary structure definitions
71 * and constants that are used across the entire driver.
72 *
73 * i40e_transceiver.c: This file contains all of the logic for sending and
74 * receiving data. It contains all of the ring and DMA
75 * allocation logic, as well as, the actual interfaces to
76 * send and receive data.
77 *
78 * A big theory statement on ring management, descriptors,
79 * and how it ties into the OS is present there.
80 *
81 * --------------
82 * General Design
83 * --------------
84 *
85 * Before we go too far into the general way we've laid out data structures and
86 * the like, it's worth taking some time to explain how the hardware is
87 * organized. This organization informs a lot of how we do things at this time
88 * in the driver.
89 *
90 * Each physical device consists of a number of one or more ports, which are
91 * considered physical functions in the PCI sense and thus each get enumerated
92 * by the system, resulting in an instance being created and attached to. While
93 * there are many resources that are unique to each physical function eg.
94 * instance of the device, there are many that are shared across all of them.
95 * Several resources have an amount reserved for each Virtual Station Interface
96 * (VSI) and then a static pool of resources, available for all functions on the
97 * card.
98 *
99 * The most important resource in hardware are its transmit and receive queue
100 * pairs (i40e_trqpair_t). These should be thought of as rings in GLDv3
101 * parlance. There are a set number of these on each device; however, they are
102 * statically partitioned among all of the different physical functions.
103 *
104 * 'Fortville' (the code name for this device family) is basically a switch. To
105 * map MAC addresses and other things to queues, we end up having to create
106 * Virtual Station Interfaces (VSIs) and establish forwarding rules that direct
107 * traffic to a queue. A VSI owns a collection of queues and has a series of
108 * forwarding rules that point to it. One way to think of this is to treat it
109 * like MAC does a VNIC. When MAC refers to a group, a collection of rings and
110 * classification resources, that is a VSI in i40e.
111 *
112 * The sets of VSIs is shared across the entire device, though there may be some
113 * amount that are reserved to each PF. Because the GLDv3 does not let us change
114 * the number of groups dynamically, we instead statically divide this amount
115 * evenly between all the functions that exist. In addition, we have the same
116 * problem with the mac address forwarding rules. There are a static number that
117 * exist shared across all the functions.
118 *
119 * To handle both of these resources, what we end up doing is going through and
120 * determining which functions belong to the same device. Nominally one might do
121 * this by having a nexus driver; however, a prime requirement for a nexus
122 * driver is identifying the various children and activating them. While it is
123 * possible to get this information from NVRAM, we would end up duplicating a
124 * lot of the PCI enumeration logic. Really, at the end of the day, the device
125 * doesn't give us the traditional identification properties we want from a
126 * nexus driver.
127 *
128 * Instead, we rely on some properties that are guaranteed to be unique. While
129 * it might be tempting to leverage the PBA or serial number of the device from
130 * NVRAM, there is nothing that says that two devices can't be mis-programmed to
131 * have the same values in NVRAM. Instead, we uniquely identify a group of
132 * functions based on their parent in the /devices tree, their PCI bus and PCI
133 * function identifiers. Using either on their own may not be sufficient.
134 *
135 * For each unique PCI device that we encounter, we'll create a i40e_device_t.
136 * From there, because we don't have a good way to tell the GLDv3 about sharing
137 * resources between everything, we'll end up just dividing the resources
138 * evenly between all of the functions. Longer term, if we don't have to declare
139 * to the GLDv3 that these resources are shared, then we'll maintain a pool and
140 * have each PF allocate from the pool in the device, thus if only two of four
141 * ports are being used, for example, then all of the resources can still be
142 * used.
143 *
144 * -------------------------------------------
145 * Transmit and Receive Queue Pair Allocations
146 * -------------------------------------------
147 *
148 * NVRAM ends up assigning each PF its own share of the transmit and receive LAN
149 * queue pairs, we have no way of modifying it, only observing it. From there,
150 * it's up to us to map these queues to VSIs and VFs. Since we don't support any
151 * VFs at this time, we only focus on assignments to VSIs.
152 *
153 * At the moment, we used a static mapping of transmit/receive queue pairs to a
154 * given VSI (eg. rings to a group). Though in the fullness of time, we want to
155 * make this something which is fully dynamic and take advantage of documented,
156 * but not yet available functionality for adding filters based on VXLAN and
157 * other encapsulation technologies.
158 *
159 * -------------------------------------
160 * Broadcast, Multicast, and Promiscuous
161 * -------------------------------------
162 *
163 * As part of the GLDv3, we need to make sure that we can handle receiving
164 * broadcast and multicast traffic. As well as enabling promiscuous mode when
165 * requested. GLDv3 requires that all broadcast and multicast traffic be
166 * retrieved by the default group, eg. the first one. This is the same thing as
167 * the default VSI.
168 *
169 * To receieve broadcast traffic, we enable it through the admin queue, rather
170 * than use one of our filters for it. For multicast traffic, we reserve a
171 * certain number of the hash filters and assign them to a given PF. When we
172 * exceed those, we then switch to using promiscuous mode for multicast traffic.
173 *
174 * More specifically, once we exceed the number of filters (indicated because
175 * the i40e_t`i40e_resources.ifr_nmcastfilt ==
176 * i40e_t`i40e_resources.ifr_nmcastfilt_used), we then instead need to toggle
177 * promiscuous mode. If promiscuous mode is toggled then we keep track of the
178 * number of MACs added to it by incrementing i40e_t`i40e_mcast_promisc_count.
179 * That will stay enabled until that count reaches zero indicating that we have
180 * only added multicast addresses that we have a corresponding entry for.
181 *
182 * Because MAC itself wants to toggle promiscuous mode, which includes both
183 * unicast and multicast traffic, we go through and keep track of that
184 * ourselves. That is maintained through the use of the i40e_t`i40e_promisc_on
185 * member.
186 *
187 * --------------
188 * VSI Management
189 * --------------
190 *
191 * The PFs share 384 VSIs. The firmware creates one VSI per PF by default.
192 * During chip start we retrieve the SEID of this VSI and assign it as the
193 * default VSI for our VEB (one VEB per PF). We then add additional VSIs to
194 * the VEB up to the determined number of rx groups: i40e_t`i40e_num_rx_groups.
195 * We currently cap this number to I40E_GROUP_MAX to a) make sure all PFs can
196 * allocate the same number of VSIs, and b) to keep the interrupt multiplexing
197 * under control. In the future, when we improve the interrupt allocation, we
198 * may want to revisit this cap to make better use of the available VSIs. The
199 * VSI allocation and configuration can be found in i40e_chip_start().
200 *
201 * ----------------
202 * Structure Layout
203 * ----------------
204 *
205 * The following images relates the core data structures together. The primary
206 * structure in the system is the i40e_t. It itself contains multiple rings,
207 * i40e_trqpair_t's which contain the various transmit and receive data. The
208 * receive data is stored outside of the i40e_trqpair_t and instead in the
209 * i40e_rx_data_t. The i40e_t has a corresponding i40e_device_t which keeps
210 * track of per-physical device state. Finally, for every active descriptor,
211 * there is a corresponding control block, which is where the
212 * i40e_rx_control_block_t and the i40e_tx_control_block_t come from.
213 *
214 * +-----------------------+ +-----------------------+
215 * | Global i40e_t list | | Global Device list |
216 * | | +--| |
217 * | i40e_glist | | | i40e_dlist |
218 * +-----------------------+ | +-----------------------+
219 * | v
220 * | +------------------------+ +-----------------------+
221 * | | Device-wide Structure |----->| Device-wide Structure |--> ...
222 * | | i40e_device_t | | i40e_device_t |
223 * | | | +-----------------------+
224 * | | dev_info_t * ------+--> Parent in devices tree.
225 * | | uint_t ------+--> PCI bus number
226 * | | uint_t ------+--> PCI device number
227 * | | uint_t ------+--> Number of functions
228 * | | i40e_switch_rsrcs_t ---+--> Captured total switch resources
229 * | | list_t ------+-------------+
230 * | +------------------------+ |
231 * | ^ |
232 * | +--------+ |
233 * | | v
234 * | +---------------------------+ | +-------------------+
235 * +->| GLDv3 Device, per PF |-----|-->| GLDv3 Device (PF) |--> ...
236 * | i40e_t | | | i40e_t |
237 * | **Primary Structure** | | +-------------------+
238 * | | |
239 * | i40e_device_t * --+-----+
240 * | i40e_state_t --+---> Device State
241 * | i40e_hw_t --+---> Intel common code structure
242 * | mac_handle_t --+---> GLDv3 handle to MAC
243 * | ddi_periodic_t --+---> Link activity timer
244 * | i40e_vsi_t * --+---> Array of VSIs
245 * | i40e_func_rsrc_t --+---> Available hardware resources
246 * | i40e_switch_rsrc_t * --+---> Switch resource snapshot
247 * | i40e_sdu --+---> Current MTU
248 * | i40e_frame_max --+---> Current HW frame size
249 * | i40e_uaddr_t * --+---> Array of assigned unicast MACs
250 * | i40e_maddr_t * --+---> Array of assigned multicast MACs
251 * | i40e_mcast_promisccount --+---> Active multicast state
252 * | i40e_promisc_on --+---> Current promiscuous mode state
253 * | uint_t --+---> Number of transmit/receive pairs
254 * | i40e_rx_group_t * --+---> Array of Rx groups
255 * | kstat_t * --+---> PF kstats
256 * | i40e_pf_stats_t --+---> PF kstat backing data
257 * | i40e_trqpair_t * --+---------+
258 * +---------------------------+ |
259 * |
260 * v
261 * +-------------------------------+ +-----------------------------+
262 * | Transmit/Receive Queue Pair |-------| Transmit/Receive Queue Pair |->...
263 * | i40e_trqpair_t | | i40e_trqpair_t |
264 * + Ring Data Structure | +-----------------------------+
265 * | |
266 * | mac_ring_handle_t +--> MAC RX ring handle
267 * | mac_ring_handle_t +--> MAC TX ring handle
268 * | i40e_rxq_stat_t --+--> RX Queue stats
269 * | i40e_txq_stat_t --+--> TX Queue stats
270 * | uint32_t (tx ring size) +--> TX Ring Size
271 * | uint32_t (tx free list size) +--> TX Free List Size
272 * | i40e_dma_buffer_t --------+--> TX Descriptor ring DMA
273 * | i40e_tx_desc_t * --------+--> TX descriptor ring
274 * | volatile unt32_t * +--> TX Write back head
275 * | uint32_t -------+--> TX ring head
276 * | uint32_t -------+--> TX ring tail
277 * | uint32_t -------+--> Num TX desc free
278 * | i40e_tx_control_block_t * --+--> TX control block array ---+
279 * | i40e_tx_control_block_t ** --+--> TCB work list ----+
280 * | i40e_tx_control_block_t ** --+--> TCB free list ---+
281 * | uint32_t -------+--> Free TCB count |
282 * | i40e_rx_data_t * -------+--+ v
283 * +-------------------------------+ | +---------------------------+
284 * | | Per-TX Frame Metadata |
285 * | | i40e_tx_control_block_t |
286 * +--------------------+ | |
287 * | mblk to transmit <--+--- mblk_t * |
288 * | type of transmit <--+--- i40e_tx_type_t |
289 * | TX DMA handle <--+--- ddi_dma_handle_t |
290 * v TX DMA buffer <--+--- i40e_dma_buffer_t |
291 * +------------------------------+ +---------------------------+
292 * | Core Receive Data |
293 * | i40e_rx_data_t |
294 * | |
295 * | i40e_dma_buffer_t --+--> RX descriptor DMA Data
296 * | i40e_rx_desc_t --+--> RX descriptor ring
297 * | uint32_t --+--> Next free desc.
298 * | i40e_rx_control_block_t * --+--> RX Control Block Array ---+
299 * | i40e_rx_control_block_t ** --+--> RCB work list ---+
300 * | i40e_rx_control_block_t ** --+--> RCB free list ---+
301 * +------------------------------+ |
302 * ^ |
303 * | +---------------------------+ |
304 * | | Per-RX Frame Metadata |<---------------+
305 * | | i40e_rx_control_block_t |
306 * | | |
307 * | | mblk_t * ----+--> Received mblk_t data
308 * | | uint32_t ----+--> Reference count
309 * | | i40e_dma_buffer_t ----+--> Receive data DMA info
310 * | | frtn_t ----+--> mblk free function info
311 * +-----+-- i40e_rx_data_t * |
312 * +---------------------------+
313 *
314 * -------------
315 * Lock Ordering
316 * -------------
317 *
318 * In order to ensure that we don't deadlock, the following represents the
319 * lock order being used. When grabbing locks, follow the following order. Lower
320 * numbers are more important. Thus, the i40e_glock which is number 0, must be
321 * taken before any other locks in the driver. On the other hand, the
322 * i40e_t`i40e_stat_lock, has the highest number because it's the least
323 * important lock. Note, that just because one lock is higher than another does
324 * not mean that all intermediary locks are required.
325 *
326 * 0) i40e_glock
327 * 1) i40e_t`i40e_general_lock
328 *
329 * 2) i40e_trqpair_t`itrq_rx_lock
330 * 3) i40e_trqpair_t`itrq_tx_lock
331 * 4) i40e_t`i40e_rx_pending_lock
332 * 5) i40e_trqpair_t`itrq_tcb_lock
333 *
334 * 6) i40e_t`i40e_stat_lock
335 *
336 * Rules and expectations:
337 *
338 * 1) A thread holding locks belong to one PF should not hold locks belonging to
339 * a second. If for some reason this becomes necessary, locks should be grabbed
340 * based on the list order in the i40e_device_t, which implies that the
341 * i40e_glock is held.
342 *
343 * 2) When grabbing locks between multiple transmit and receive queues, the
344 * locks for the lowest number transmit/receive queue should be grabbed first.
345 *
346 * 3) When grabbing both the transmit and receive lock for a given queue, always
347 * grab i40e_trqpair_t`itrq_rx_lock before the i40e_trqpair_t`itrq_tx_lock.
348 *
349 * 4) The following pairs of locks are not expected to be held at the same time:
350 *
351 * o i40e_t`i40e_rx_pending_lock and i40e_trqpair_t`itrq_tcb_lock
352 *
353 * -----------
354 * Future Work
355 * -----------
356 *
357 * At the moment the i40e_t driver is rather bare bones, allowing us to start
358 * getting data flowing and folks using it while we develop additional features.
359 * While bugs have been filed to cover this future work, the following gives an
360 * overview of expected work:
361 *
362 * o DMA binding and breaking up the locking in ring recycling.
363 * o Enhanced detection of device errors
364 * o Participation in IRM
365 * o FMA device reset
366 * o Stall detection, temperature error detection, etc.
367 * o More dynamic resource pools
368 */
369
370 #include "i40e_sw.h"
371
372 static char i40e_ident[] = "Intel 10/40Gb Ethernet v1.0.3";
373
374 /*
375 * The i40e_glock primarily protects the lists below and the i40e_device_t
376 * structures.
377 */
378 static kmutex_t i40e_glock;
379 static list_t i40e_glist;
380 static list_t i40e_dlist;
381
382 /*
383 * Access attributes for register mapping.
384 */
385 static ddi_device_acc_attr_t i40e_regs_acc_attr = {
386 DDI_DEVICE_ATTR_V1,
387 DDI_STRUCTURE_LE_ACC,
388 DDI_STRICTORDER_ACC,
389 DDI_FLAGERR_ACC
390 };
391
392 /*
393 * Logging function for this driver.
394 */
395 static void
396 i40e_dev_err(i40e_t *i40e, int level, boolean_t console, const char *fmt,
397 va_list ap)
398 {
399 char buf[1024];
400
401 (void) vsnprintf(buf, sizeof (buf), fmt, ap);
402
403 if (i40e == NULL) {
404 cmn_err(level, (console) ? "%s: %s" : "!%s: %s",
405 I40E_MODULE_NAME, buf);
406 } else {
407 dev_err(i40e->i40e_dip, level, (console) ? "%s" : "!%s",
408 buf);
409 }
410 }
411
412 /*
413 * Because there's the stupid trailing-comma problem with the C preprocessor
414 * and variable arguments, I need to instantiate these. Pardon the redundant
415 * code.
416 */
417 /*PRINTFLIKE2*/
418 void
419 i40e_error(i40e_t *i40e, const char *fmt, ...)
420 {
421 va_list ap;
422
423 va_start(ap, fmt);
424 i40e_dev_err(i40e, CE_WARN, B_FALSE, fmt, ap);
425 va_end(ap);
426 }
427
428 /*PRINTFLIKE2*/
429 void
430 i40e_log(i40e_t *i40e, const char *fmt, ...)
431 {
432 va_list ap;
433
434 va_start(ap, fmt);
435 i40e_dev_err(i40e, CE_NOTE, B_FALSE, fmt, ap);
436 va_end(ap);
437 }
438
439 /*PRINTFLIKE2*/
440 void
441 i40e_notice(i40e_t *i40e, const char *fmt, ...)
442 {
443 va_list ap;
444
445 va_start(ap, fmt);
446 i40e_dev_err(i40e, CE_NOTE, B_TRUE, fmt, ap);
447 va_end(ap);
448 }
449
450 /*
451 * Various parts of the driver need to know if the controller is from the X722
452 * family, which has a few additional capabilities and different programming
453 * means. We don't consider virtual functions as part of this as they are quite
454 * different and will require substantially more work.
455 */
456 static boolean_t
457 i40e_is_x722(i40e_t *i40e)
458 {
459 return (i40e->i40e_hw_space.mac.type == I40E_MAC_X722);
460 }
461
462 static void
463 i40e_device_rele(i40e_t *i40e)
464 {
465 i40e_device_t *idp = i40e->i40e_device;
466
467 if (idp == NULL)
468 return;
469
470 mutex_enter(&i40e_glock);
471 VERIFY(idp->id_nreg > 0);
472 list_remove(&idp->id_i40e_list, i40e);
473 idp->id_nreg--;
474 if (idp->id_nreg == 0) {
475 list_remove(&i40e_dlist, idp);
476 list_destroy(&idp->id_i40e_list);
477 kmem_free(idp->id_rsrcs, sizeof (i40e_switch_rsrc_t) *
478 idp->id_rsrcs_alloc);
479 kmem_free(idp, sizeof (i40e_device_t));
480 }
481 i40e->i40e_device = NULL;
482 mutex_exit(&i40e_glock);
483 }
484
485 static i40e_device_t *
486 i40e_device_find(i40e_t *i40e, dev_info_t *parent, uint_t bus, uint_t device)
487 {
488 i40e_device_t *idp;
489 mutex_enter(&i40e_glock);
490 for (idp = list_head(&i40e_dlist); idp != NULL;
491 idp = list_next(&i40e_dlist, idp)) {
492 if (idp->id_parent == parent && idp->id_pci_bus == bus &&
493 idp->id_pci_device == device) {
494 break;
495 }
496 }
497
498 if (idp != NULL) {
499 VERIFY(idp->id_nreg < idp->id_nfuncs);
500 idp->id_nreg++;
501 } else {
502 i40e_hw_t *hw = &i40e->i40e_hw_space;
503 ASSERT(hw->num_ports > 0);
504 ASSERT(hw->num_partitions > 0);
505
506 /*
507 * The Intel common code doesn't exactly keep the number of PCI
508 * functions. But it calculates it during discovery of
509 * partitions and ports. So what we do is undo the calculation
510 * that it does originally, as functions are evenly spread
511 * across ports in the rare case of partitions.
512 */
513 idp = kmem_alloc(sizeof (i40e_device_t), KM_SLEEP);
514 idp->id_parent = parent;
515 idp->id_pci_bus = bus;
516 idp->id_pci_device = device;
517 idp->id_nfuncs = hw->num_ports * hw->num_partitions;
518 idp->id_nreg = 1;
519 idp->id_rsrcs_alloc = i40e->i40e_switch_rsrc_alloc;
520 idp->id_rsrcs_act = i40e->i40e_switch_rsrc_actual;
521 idp->id_rsrcs = kmem_alloc(sizeof (i40e_switch_rsrc_t) *
522 idp->id_rsrcs_alloc, KM_SLEEP);
523 bcopy(i40e->i40e_switch_rsrcs, idp->id_rsrcs,
524 sizeof (i40e_switch_rsrc_t) * idp->id_rsrcs_alloc);
525 list_create(&idp->id_i40e_list, sizeof (i40e_t),
526 offsetof(i40e_t, i40e_dlink));
527
528 list_insert_tail(&i40e_dlist, idp);
529 }
530
531 list_insert_tail(&idp->id_i40e_list, i40e);
532 mutex_exit(&i40e_glock);
533
534 return (idp);
535 }
536
537 static void
538 i40e_link_state_set(i40e_t *i40e, link_state_t state)
539 {
540 if (i40e->i40e_link_state == state)
541 return;
542
543 i40e->i40e_link_state = state;
544 mac_link_update(i40e->i40e_mac_hdl, i40e->i40e_link_state);
545 }
546
547 /*
548 * This is a basic link check routine. Mostly we're using this just to see
549 * if we can get any accurate information about the state of the link being
550 * up or down, as well as updating the link state, speed, etc. information.
551 */
552 void
553 i40e_link_check(i40e_t *i40e)
554 {
555 i40e_hw_t *hw = &i40e->i40e_hw_space;
556 boolean_t ls;
557 int ret;
558
559 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
560
561 hw->phy.get_link_info = B_TRUE;
562 if ((ret = i40e_get_link_status(hw, &ls)) != I40E_SUCCESS) {
563 i40e->i40e_s_link_status_errs++;
564 i40e->i40e_s_link_status_lasterr = ret;
565 return;
566 }
567
568 /*
569 * Firmware abstracts all of the mac and phy information for us, so we
570 * can use i40e_get_link_status to determine the current state.
571 */
572 if (ls == B_TRUE) {
573 enum i40e_aq_link_speed speed;
574
575 speed = i40e_get_link_speed(hw);
576
577 /*
578 * Translate from an i40e value to a value in Mbits/s.
579 */
580 switch (speed) {
581 case I40E_LINK_SPEED_100MB:
582 i40e->i40e_link_speed = 100;
583 break;
584 case I40E_LINK_SPEED_1GB:
585 i40e->i40e_link_speed = 1000;
586 break;
587 case I40E_LINK_SPEED_10GB:
588 i40e->i40e_link_speed = 10000;
589 break;
590 case I40E_LINK_SPEED_20GB:
591 i40e->i40e_link_speed = 20000;
592 break;
593 case I40E_LINK_SPEED_40GB:
594 i40e->i40e_link_speed = 40000;
595 break;
596 case I40E_LINK_SPEED_25GB:
597 i40e->i40e_link_speed = 25000;
598 break;
599 default:
600 i40e->i40e_link_speed = 0;
601 break;
602 }
603
604 /*
605 * At this time, hardware does not support half-duplex
606 * operation, hence why we don't ask the hardware about our
607 * current speed.
608 */
609 i40e->i40e_link_duplex = LINK_DUPLEX_FULL;
610 i40e_link_state_set(i40e, LINK_STATE_UP);
611 } else {
612 i40e->i40e_link_speed = 0;
613 i40e->i40e_link_duplex = 0;
614 i40e_link_state_set(i40e, LINK_STATE_DOWN);
615 }
616 }
617
618 static void
619 i40e_rem_intrs(i40e_t *i40e)
620 {
621 int i, rc;
622
623 for (i = 0; i < i40e->i40e_intr_count; i++) {
624 rc = ddi_intr_free(i40e->i40e_intr_handles[i]);
625 if (rc != DDI_SUCCESS) {
626 i40e_log(i40e, "failed to free interrupt %d: %d",
627 i, rc);
628 }
629 }
630
631 kmem_free(i40e->i40e_intr_handles, i40e->i40e_intr_size);
632 i40e->i40e_intr_handles = NULL;
633 }
634
635 static void
636 i40e_rem_intr_handlers(i40e_t *i40e)
637 {
638 int i, rc;
639
640 for (i = 0; i < i40e->i40e_intr_count; i++) {
641 rc = ddi_intr_remove_handler(i40e->i40e_intr_handles[i]);
642 if (rc != DDI_SUCCESS) {
643 i40e_log(i40e, "failed to remove interrupt %d: %d",
644 i, rc);
645 }
646 }
647 }
648
649 /*
650 * illumos Fault Management Architecture (FMA) support.
651 */
652
653 int
654 i40e_check_acc_handle(ddi_acc_handle_t handle)
655 {
656 ddi_fm_error_t de;
657
658 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
659 ddi_fm_acc_err_clear(handle, DDI_FME_VERSION);
660 return (de.fme_status);
661 }
662
663 int
664 i40e_check_dma_handle(ddi_dma_handle_t handle)
665 {
666 ddi_fm_error_t de;
667
668 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
669 return (de.fme_status);
670 }
671
672 /*
673 * Fault service error handling callback function.
674 */
675 /* ARGSUSED */
676 static int
677 i40e_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
678 {
679 pci_ereport_post(dip, err, NULL);
680 return (err->fme_status);
681 }
682
683 static void
684 i40e_fm_init(i40e_t *i40e)
685 {
686 ddi_iblock_cookie_t iblk;
687
688 i40e->i40e_fm_capabilities = ddi_prop_get_int(DDI_DEV_T_ANY,
689 i40e->i40e_dip, DDI_PROP_DONTPASS, "fm_capable",
690 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
691 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
692
693 if (i40e->i40e_fm_capabilities < 0) {
694 i40e->i40e_fm_capabilities = 0;
695 } else if (i40e->i40e_fm_capabilities > 0xf) {
696 i40e->i40e_fm_capabilities = DDI_FM_EREPORT_CAPABLE |
697 DDI_FM_ACCCHK_CAPABLE | DDI_FM_DMACHK_CAPABLE |
698 DDI_FM_ERRCB_CAPABLE;
699 }
700
701 /*
702 * Only register with IO Fault Services if we have some capability
703 */
704 if (i40e->i40e_fm_capabilities & DDI_FM_ACCCHK_CAPABLE) {
705 i40e_regs_acc_attr.devacc_attr_access = DDI_FLAGERR_ACC;
706 } else {
707 i40e_regs_acc_attr.devacc_attr_access = DDI_DEFAULT_ACC;
708 }
709
710 if (i40e->i40e_fm_capabilities) {
711 ddi_fm_init(i40e->i40e_dip, &i40e->i40e_fm_capabilities, &iblk);
712
713 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
714 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
715 pci_ereport_setup(i40e->i40e_dip);
716 }
717
718 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities)) {
719 ddi_fm_handler_register(i40e->i40e_dip,
720 i40e_fm_error_cb, (void*)i40e);
721 }
722 }
723
724 if (i40e->i40e_fm_capabilities & DDI_FM_DMACHK_CAPABLE) {
725 i40e_init_dma_attrs(i40e, B_TRUE);
726 } else {
727 i40e_init_dma_attrs(i40e, B_FALSE);
728 }
729 }
730
731 static void
732 i40e_fm_fini(i40e_t *i40e)
733 {
734 if (i40e->i40e_fm_capabilities) {
735
736 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities) ||
737 DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
738 pci_ereport_teardown(i40e->i40e_dip);
739
740 if (DDI_FM_ERRCB_CAP(i40e->i40e_fm_capabilities))
741 ddi_fm_handler_unregister(i40e->i40e_dip);
742
743 ddi_fm_fini(i40e->i40e_dip);
744 }
745 }
746
747 void
748 i40e_fm_ereport(i40e_t *i40e, char *detail)
749 {
750 uint64_t ena;
751 char buf[FM_MAX_CLASS];
752
753 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
754 ena = fm_ena_generate(0, FM_ENA_FMT1);
755 if (DDI_FM_EREPORT_CAP(i40e->i40e_fm_capabilities)) {
756 ddi_fm_ereport_post(i40e->i40e_dip, buf, ena, DDI_NOSLEEP,
757 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERS0, NULL);
758 }
759 }
760
761 /*
762 * Here we're trying to set the SEID of the default VSI. In general,
763 * when we come through and look at this shortly after attach, we
764 * expect there to only be a single element present, which is the
765 * default VSI. Importantly, each PF seems to not see any other
766 * devices, in part because of the simple switch mode that we're
767 * using. If for some reason, we see more artifacts, we'll need to
768 * revisit what we're doing here.
769 */
770 static boolean_t
771 i40e_set_def_vsi_seid(i40e_t *i40e)
772 {
773 i40e_hw_t *hw = &i40e->i40e_hw_space;
774 struct i40e_aqc_get_switch_config_resp *sw_config;
775 uint8_t aq_buf[I40E_AQ_LARGE_BUF];
776 uint16_t next = 0;
777 int rc;
778
779 /* LINTED: E_BAD_PTR_CAST_ALIGN */
780 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
781 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
782 NULL);
783 if (rc != I40E_SUCCESS) {
784 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
785 rc, hw->aq.asq_last_status);
786 return (B_FALSE);
787 }
788
789 if (LE_16(sw_config->header.num_reported) != 1) {
790 i40e_error(i40e, "encountered multiple (%d) switching units "
791 "during attach, not proceeding",
792 LE_16(sw_config->header.num_reported));
793 return (B_FALSE);
794 }
795
796 I40E_DEF_VSI_SEID(i40e) = sw_config->element[0].seid;
797 return (B_TRUE);
798 }
799
800 /*
801 * Get the SEID of the uplink MAC.
802 */
803 static int
804 i40e_get_mac_seid(i40e_t *i40e)
805 {
806 i40e_hw_t *hw = &i40e->i40e_hw_space;
807 struct i40e_aqc_get_switch_config_resp *sw_config;
808 uint8_t aq_buf[I40E_AQ_LARGE_BUF];
809 uint16_t next = 0;
810 int rc;
811
812 /* LINTED: E_BAD_PTR_CAST_ALIGN */
813 sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
814 rc = i40e_aq_get_switch_config(hw, sw_config, sizeof (aq_buf), &next,
815 NULL);
816 if (rc != I40E_SUCCESS) {
817 i40e_error(i40e, "i40e_aq_get_switch_config() failed %d: %d",
818 rc, hw->aq.asq_last_status);
819 return (-1);
820 }
821
822 return (LE_16(sw_config->element[0].uplink_seid));
823 }
824
825 /*
826 * We need to fill the i40e_hw_t structure with the capabilities of this PF. We
827 * must also provide the memory for it; however, we don't need to keep it around
828 * to the call to the common code. It takes it and parses it into an internal
829 * structure.
830 */
831 static boolean_t
832 i40e_get_hw_capabilities(i40e_t *i40e, i40e_hw_t *hw)
833 {
834 struct i40e_aqc_list_capabilities_element_resp *buf;
835 int rc;
836 size_t len;
837 uint16_t needed;
838 int nelems = I40E_HW_CAP_DEFAULT;
839
840 len = nelems * sizeof (*buf);
841
842 for (;;) {
843 ASSERT(len > 0);
844 buf = kmem_alloc(len, KM_SLEEP);
845 rc = i40e_aq_discover_capabilities(hw, buf, len,
846 &needed, i40e_aqc_opc_list_func_capabilities, NULL);
847 kmem_free(buf, len);
848
849 if (hw->aq.asq_last_status == I40E_AQ_RC_ENOMEM &&
850 nelems == I40E_HW_CAP_DEFAULT) {
851 if (nelems == needed) {
852 i40e_error(i40e, "Capability discovery failed "
853 "due to byzantine common code");
854 return (B_FALSE);
855 }
856 len = needed;
857 continue;
858 } else if (rc != I40E_SUCCESS ||
859 hw->aq.asq_last_status != I40E_AQ_RC_OK) {
860 i40e_error(i40e, "Capability discovery failed: %d", rc);
861 return (B_FALSE);
862 }
863
864 break;
865 }
866
867 return (B_TRUE);
868 }
869
870 /*
871 * Obtain the switch's capabilities as seen by this PF and keep it around for
872 * our later use.
873 */
874 static boolean_t
875 i40e_get_switch_resources(i40e_t *i40e)
876 {
877 i40e_hw_t *hw = &i40e->i40e_hw_space;
878 uint8_t cnt = 2;
879 uint8_t act;
880 size_t size;
881 i40e_switch_rsrc_t *buf;
882
883 for (;;) {
884 enum i40e_status_code ret;
885 size = cnt * sizeof (i40e_switch_rsrc_t);
886 ASSERT(size > 0);
887 if (size > UINT16_MAX)
888 return (B_FALSE);
889 buf = kmem_alloc(size, KM_SLEEP);
890
891 ret = i40e_aq_get_switch_resource_alloc(hw, &act, buf,
892 cnt, NULL);
893 if (ret == I40E_ERR_ADMIN_QUEUE_ERROR &&
894 hw->aq.asq_last_status == I40E_AQ_RC_EINVAL) {
895 kmem_free(buf, size);
896 cnt += I40E_SWITCH_CAP_DEFAULT;
897 continue;
898 } else if (ret != I40E_SUCCESS) {
899 kmem_free(buf, size);
900 i40e_error(i40e,
901 "failed to retrieve switch statistics: %d", ret);
902 return (B_FALSE);
903 }
904
905 break;
906 }
907
908 i40e->i40e_switch_rsrc_alloc = cnt;
909 i40e->i40e_switch_rsrc_actual = act;
910 i40e->i40e_switch_rsrcs = buf;
911
912 return (B_TRUE);
913 }
914
915 static void
916 i40e_cleanup_resources(i40e_t *i40e)
917 {
918 if (i40e->i40e_uaddrs != NULL) {
919 kmem_free(i40e->i40e_uaddrs, sizeof (i40e_uaddr_t) *
920 i40e->i40e_resources.ifr_nmacfilt);
921 i40e->i40e_uaddrs = NULL;
922 }
923
924 if (i40e->i40e_maddrs != NULL) {
925 kmem_free(i40e->i40e_maddrs, sizeof (i40e_maddr_t) *
926 i40e->i40e_resources.ifr_nmcastfilt);
927 i40e->i40e_maddrs = NULL;
928 }
929
930 if (i40e->i40e_switch_rsrcs != NULL) {
931 size_t sz = sizeof (i40e_switch_rsrc_t) *
932 i40e->i40e_switch_rsrc_alloc;
933 ASSERT(sz > 0);
934 kmem_free(i40e->i40e_switch_rsrcs, sz);
935 i40e->i40e_switch_rsrcs = NULL;
936 }
937
938 if (i40e->i40e_device != NULL)
939 i40e_device_rele(i40e);
940 }
941
942 static boolean_t
943 i40e_get_available_resources(i40e_t *i40e)
944 {
945 dev_info_t *parent;
946 uint16_t bus, device, func;
947 uint_t nregs;
948 int *regs, i;
949 i40e_device_t *idp;
950 i40e_hw_t *hw = &i40e->i40e_hw_space;
951
952 parent = ddi_get_parent(i40e->i40e_dip);
953
954 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, i40e->i40e_dip, 0, "reg",
955 ®s, &nregs) != DDI_PROP_SUCCESS) {
956 return (B_FALSE);
957 }
958
959 if (nregs < 1) {
960 ddi_prop_free(regs);
961 return (B_FALSE);
962 }
963
964 bus = PCI_REG_BUS_G(regs[0]);
965 device = PCI_REG_DEV_G(regs[0]);
966 func = PCI_REG_FUNC_G(regs[0]);
967 ddi_prop_free(regs);
968
969 i40e->i40e_hw_space.bus.func = func;
970 i40e->i40e_hw_space.bus.device = device;
971
972 if (i40e_get_switch_resources(i40e) == B_FALSE) {
973 return (B_FALSE);
974 }
975
976 /*
977 * To calculate the total amount of a resource we have available, we
978 * need to add how many our i40e_t thinks it has guaranteed, if any, and
979 * then we need to go through and divide the number of available on the
980 * device, which was snapshotted before anyone should have allocated
981 * anything, and use that to derive how many are available from the
982 * pool. Longer term, we may want to turn this into something that's
983 * more of a pool-like resource that everything can share (though that
984 * may require some more assistance from MAC).
985 *
986 * Though for transmit and receive queue pairs, we just have to ask
987 * firmware instead.
988 */
989 idp = i40e_device_find(i40e, parent, bus, device);
990 i40e->i40e_device = idp;
991 i40e->i40e_resources.ifr_nvsis = 0;
992 i40e->i40e_resources.ifr_nvsis_used = 0;
993 i40e->i40e_resources.ifr_nmacfilt = 0;
994 i40e->i40e_resources.ifr_nmacfilt_used = 0;
995 i40e->i40e_resources.ifr_nmcastfilt = 0;
996 i40e->i40e_resources.ifr_nmcastfilt_used = 0;
997
998 for (i = 0; i < i40e->i40e_switch_rsrc_actual; i++) {
999 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1000
1001 switch (srp->resource_type) {
1002 case I40E_AQ_RESOURCE_TYPE_VSI:
1003 i40e->i40e_resources.ifr_nvsis +=
1004 LE_16(srp->guaranteed);
1005 i40e->i40e_resources.ifr_nvsis_used = LE_16(srp->used);
1006 break;
1007 case I40E_AQ_RESOURCE_TYPE_MACADDR:
1008 i40e->i40e_resources.ifr_nmacfilt +=
1009 LE_16(srp->guaranteed);
1010 i40e->i40e_resources.ifr_nmacfilt_used =
1011 LE_16(srp->used);
1012 break;
1013 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1014 i40e->i40e_resources.ifr_nmcastfilt +=
1015 LE_16(srp->guaranteed);
1016 i40e->i40e_resources.ifr_nmcastfilt_used =
1017 LE_16(srp->used);
1018 break;
1019 default:
1020 break;
1021 }
1022 }
1023
1024 for (i = 0; i < idp->id_rsrcs_act; i++) {
1025 i40e_switch_rsrc_t *srp = &i40e->i40e_switch_rsrcs[i];
1026 switch (srp->resource_type) {
1027 case I40E_AQ_RESOURCE_TYPE_VSI:
1028 i40e->i40e_resources.ifr_nvsis +=
1029 LE_16(srp->total_unalloced) / idp->id_nfuncs;
1030 break;
1031 case I40E_AQ_RESOURCE_TYPE_MACADDR:
1032 i40e->i40e_resources.ifr_nmacfilt +=
1033 LE_16(srp->total_unalloced) / idp->id_nfuncs;
1034 break;
1035 case I40E_AQ_RESOURCE_TYPE_MULTICAST_HASH:
1036 i40e->i40e_resources.ifr_nmcastfilt +=
1037 LE_16(srp->total_unalloced) / idp->id_nfuncs;
1038 default:
1039 break;
1040 }
1041 }
1042
1043 i40e->i40e_resources.ifr_nrx_queue = hw->func_caps.num_rx_qp;
1044 i40e->i40e_resources.ifr_ntx_queue = hw->func_caps.num_tx_qp;
1045
1046 i40e->i40e_uaddrs = kmem_zalloc(sizeof (i40e_uaddr_t) *
1047 i40e->i40e_resources.ifr_nmacfilt, KM_SLEEP);
1048 i40e->i40e_maddrs = kmem_zalloc(sizeof (i40e_maddr_t) *
1049 i40e->i40e_resources.ifr_nmcastfilt, KM_SLEEP);
1050
1051 /*
1052 * Initialize these as multicast addresses to indicate it's invalid for
1053 * sanity purposes. Think of it like 0xdeadbeef.
1054 */
1055 for (i = 0; i < i40e->i40e_resources.ifr_nmacfilt; i++)
1056 i40e->i40e_uaddrs[i].iua_mac[0] = 0x01;
1057
1058 return (B_TRUE);
1059 }
1060
1061 static boolean_t
1062 i40e_enable_interrupts(i40e_t *i40e)
1063 {
1064 int i, rc;
1065
1066 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1067 rc = ddi_intr_block_enable(i40e->i40e_intr_handles,
1068 i40e->i40e_intr_count);
1069 if (rc != DDI_SUCCESS) {
1070 i40e_error(i40e, "Interrupt block-enable failed: %d",
1071 rc);
1072 return (B_FALSE);
1073 }
1074 } else {
1075 for (i = 0; i < i40e->i40e_intr_count; i++) {
1076 rc = ddi_intr_enable(i40e->i40e_intr_handles[i]);
1077 if (rc != DDI_SUCCESS) {
1078 i40e_error(i40e,
1079 "Failed to enable interrupt %d: %d", i, rc);
1080 while (--i >= 0) {
1081 (void) ddi_intr_disable(
1082 i40e->i40e_intr_handles[i]);
1083 }
1084 return (B_FALSE);
1085 }
1086 }
1087 }
1088
1089 return (B_TRUE);
1090 }
1091
1092 static boolean_t
1093 i40e_disable_interrupts(i40e_t *i40e)
1094 {
1095 int i, rc;
1096
1097 if (i40e->i40e_intr_cap & DDI_INTR_FLAG_BLOCK) {
1098 rc = ddi_intr_block_disable(i40e->i40e_intr_handles,
1099 i40e->i40e_intr_count);
1100 if (rc != DDI_SUCCESS) {
1101 i40e_error(i40e,
1102 "Interrupt block-disabled failed: %d", rc);
1103 return (B_FALSE);
1104 }
1105 } else {
1106 for (i = 0; i < i40e->i40e_intr_count; i++) {
1107 rc = ddi_intr_disable(i40e->i40e_intr_handles[i]);
1108 if (rc != DDI_SUCCESS) {
1109 i40e_error(i40e,
1110 "Failed to disable interrupt %d: %d",
1111 i, rc);
1112 return (B_FALSE);
1113 }
1114 }
1115 }
1116
1117 return (B_TRUE);
1118 }
1119
1120 /*
1121 * Free receive & transmit rings.
1122 */
1123 static void
1124 i40e_free_trqpairs(i40e_t *i40e)
1125 {
1126 i40e_trqpair_t *itrq;
1127
1128 if (i40e->i40e_rx_groups != NULL) {
1129 kmem_free(i40e->i40e_rx_groups,
1130 sizeof (i40e_rx_group_t) * i40e->i40e_num_rx_groups);
1131 i40e->i40e_rx_groups = NULL;
1132 }
1133
1134 if (i40e->i40e_trqpairs != NULL) {
1135 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1136 itrq = &i40e->i40e_trqpairs[i];
1137 mutex_destroy(&itrq->itrq_rx_lock);
1138 mutex_destroy(&itrq->itrq_tx_lock);
1139 mutex_destroy(&itrq->itrq_tcb_lock);
1140
1141 /*
1142 * Should have already been cleaned up by start/stop,
1143 * etc.
1144 */
1145 ASSERT(itrq->itrq_txkstat == NULL);
1146 ASSERT(itrq->itrq_rxkstat == NULL);
1147 }
1148
1149 kmem_free(i40e->i40e_trqpairs,
1150 sizeof (i40e_trqpair_t) * i40e->i40e_num_trqpairs);
1151 i40e->i40e_trqpairs = NULL;
1152 }
1153
1154 cv_destroy(&i40e->i40e_rx_pending_cv);
1155 mutex_destroy(&i40e->i40e_rx_pending_lock);
1156 mutex_destroy(&i40e->i40e_general_lock);
1157 }
1158
1159 /*
1160 * Allocate transmit and receive rings, as well as other data structures that we
1161 * need.
1162 */
1163 static boolean_t
1164 i40e_alloc_trqpairs(i40e_t *i40e)
1165 {
1166 void *mutexpri = DDI_INTR_PRI(i40e->i40e_intr_pri);
1167
1168 /*
1169 * Now that we have the priority for the interrupts, initialize
1170 * all relevant locks.
1171 */
1172 mutex_init(&i40e->i40e_general_lock, NULL, MUTEX_DRIVER, mutexpri);
1173 mutex_init(&i40e->i40e_rx_pending_lock, NULL, MUTEX_DRIVER, mutexpri);
1174 cv_init(&i40e->i40e_rx_pending_cv, NULL, CV_DRIVER, NULL);
1175
1176 i40e->i40e_trqpairs = kmem_zalloc(sizeof (i40e_trqpair_t) *
1177 i40e->i40e_num_trqpairs, KM_SLEEP);
1178 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1179 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
1180
1181 itrq->itrq_i40e = i40e;
1182 mutex_init(&itrq->itrq_rx_lock, NULL, MUTEX_DRIVER, mutexpri);
1183 mutex_init(&itrq->itrq_tx_lock, NULL, MUTEX_DRIVER, mutexpri);
1184 mutex_init(&itrq->itrq_tcb_lock, NULL, MUTEX_DRIVER, mutexpri);
1185 itrq->itrq_index = i;
1186 }
1187
1188 i40e->i40e_rx_groups = kmem_zalloc(sizeof (i40e_rx_group_t) *
1189 i40e->i40e_num_rx_groups, KM_SLEEP);
1190
1191 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
1192 i40e_rx_group_t *rxg = &i40e->i40e_rx_groups[i];
1193
1194 rxg->irg_index = i;
1195 rxg->irg_i40e = i40e;
1196 }
1197
1198 return (B_TRUE);
1199 }
1200
1201
1202
1203 /*
1204 * Unless a .conf file already overrode i40e_t structure values, they will
1205 * be 0, and need to be set in conjunction with the now-available HW report.
1206 */
1207 /* ARGSUSED */
1208 static void
1209 i40e_hw_to_instance(i40e_t *i40e, i40e_hw_t *hw)
1210 {
1211 if (i40e->i40e_num_trqpairs_per_vsi == 0) {
1212 if (i40e_is_x722(i40e)) {
1213 i40e->i40e_num_trqpairs_per_vsi =
1214 I40E_722_MAX_TC_QUEUES;
1215 } else {
1216 i40e->i40e_num_trqpairs_per_vsi =
1217 I40E_710_MAX_TC_QUEUES;
1218 }
1219 }
1220
1221 if (i40e->i40e_num_rx_groups == 0) {
1222 i40e->i40e_num_rx_groups = I40E_GROUP_MAX;
1223 }
1224 }
1225
1226 /*
1227 * Free any resources required by, or setup by, the Intel common code.
1228 */
1229 static void
1230 i40e_common_code_fini(i40e_t *i40e)
1231 {
1232 i40e_hw_t *hw = &i40e->i40e_hw_space;
1233 int rc;
1234
1235 rc = i40e_shutdown_lan_hmc(hw);
1236 if (rc != I40E_SUCCESS)
1237 i40e_error(i40e, "failed to shutdown LAN hmc: %d", rc);
1238
1239 rc = i40e_shutdown_adminq(hw);
1240 if (rc != I40E_SUCCESS)
1241 i40e_error(i40e, "failed to shutdown admin queue: %d", rc);
1242 }
1243
1244 /*
1245 * Initialize and call Intel common-code routines, includes some setup
1246 * the common code expects from the driver. Also prints on failure, so
1247 * the caller doesn't have to.
1248 */
1249 static boolean_t
1250 i40e_common_code_init(i40e_t *i40e, i40e_hw_t *hw)
1251 {
1252 int rc;
1253
1254 i40e_clear_hw(hw);
1255 rc = i40e_pf_reset(hw);
1256 if (rc != 0) {
1257 i40e_error(i40e, "failed to reset hardware: %d", rc);
1258 i40e_fm_ereport(i40e, DDI_FM_DEVICE_NO_RESPONSE);
1259 return (B_FALSE);
1260 }
1261
1262 rc = i40e_init_shared_code(hw);
1263 if (rc != 0) {
1264 i40e_error(i40e, "failed to initialize i40e core: %d", rc);
1265 return (B_FALSE);
1266 }
1267
1268 hw->aq.num_arq_entries = I40E_DEF_ADMINQ_SIZE;
1269 hw->aq.num_asq_entries = I40E_DEF_ADMINQ_SIZE;
1270 hw->aq.arq_buf_size = I40E_ADMINQ_BUFSZ;
1271 hw->aq.asq_buf_size = I40E_ADMINQ_BUFSZ;
1272
1273 rc = i40e_init_adminq(hw);
1274 if (rc != 0) {
1275 i40e_error(i40e, "failed to initialize firmware admin queue: "
1276 "%d, potential firmware version mismatch", rc);
1277 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
1278 return (B_FALSE);
1279 }
1280
1281 if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
1282 hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) {
1283 i40e_log(i40e, "The driver for the device detected a newer "
1284 "version of the NVM image (%d.%d) than expected (%d.%d).\n"
1285 "Please install the most recent version of the network "
1286 "driver.\n", hw->aq.api_maj_ver, hw->aq.api_min_ver,
1287 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR);
1288 } else if (hw->aq.api_maj_ver < I40E_FW_API_VERSION_MAJOR ||
1289 hw->aq.api_min_ver < (I40E_FW_API_VERSION_MINOR - 1)) {
1290 i40e_log(i40e, "The driver for the device detected an older"
1291 " version of the NVM image (%d.%d) than expected (%d.%d)."
1292 "\nPlease update the NVM image.\n",
1293 hw->aq.api_maj_ver, hw->aq.api_min_ver,
1294 I40E_FW_API_VERSION_MAJOR, I40E_FW_API_VERSION_MINOR - 1);
1295 }
1296
1297 i40e_clear_pxe_mode(hw);
1298
1299 /*
1300 * We need to call this so that the common code can discover
1301 * capabilities of the hardware, which it uses throughout the rest.
1302 */
1303 if (!i40e_get_hw_capabilities(i40e, hw)) {
1304 i40e_error(i40e, "failed to obtain hardware capabilities");
1305 return (B_FALSE);
1306 }
1307
1308 if (i40e_get_available_resources(i40e) == B_FALSE) {
1309 i40e_error(i40e, "failed to obtain hardware resources");
1310 return (B_FALSE);
1311 }
1312
1313 i40e_hw_to_instance(i40e, hw);
1314
1315 rc = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
1316 hw->func_caps.num_rx_qp, 0, 0);
1317 if (rc != 0) {
1318 i40e_error(i40e, "failed to initialize hardware memory cache: "
1319 "%d", rc);
1320 return (B_FALSE);
1321 }
1322
1323 rc = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
1324 if (rc != 0) {
1325 i40e_error(i40e, "failed to configure hardware memory cache: "
1326 "%d", rc);
1327 return (B_FALSE);
1328 }
1329
1330 (void) i40e_aq_stop_lldp(hw, TRUE, NULL);
1331
1332 rc = i40e_get_mac_addr(hw, hw->mac.addr);
1333 if (rc != I40E_SUCCESS) {
1334 i40e_error(i40e, "failed to retrieve hardware mac address: %d",
1335 rc);
1336 return (B_FALSE);
1337 }
1338
1339 rc = i40e_validate_mac_addr(hw->mac.addr);
1340 if (rc != 0) {
1341 i40e_error(i40e, "failed to validate internal mac address: "
1342 "%d", rc);
1343 return (B_FALSE);
1344 }
1345 bcopy(hw->mac.addr, hw->mac.perm_addr, ETHERADDRL);
1346 if ((rc = i40e_get_port_mac_addr(hw, hw->mac.port_addr)) !=
1347 I40E_SUCCESS) {
1348 i40e_error(i40e, "failed to retrieve port mac address: %d",
1349 rc);
1350 return (B_FALSE);
1351 }
1352
1353 /*
1354 * We need to obtain the Default Virtual Station SEID (VSI)
1355 * before we can perform other operations on the device.
1356 */
1357 if (!i40e_set_def_vsi_seid(i40e)) {
1358 i40e_error(i40e, "failed to obtain Default VSI SEID");
1359 return (B_FALSE);
1360 }
1361
1362 return (B_TRUE);
1363 }
1364
1365 static void
1366 i40e_unconfigure(dev_info_t *devinfo, i40e_t *i40e)
1367 {
1368 int rc;
1369
1370 if (i40e->i40e_attach_progress & I40E_ATTACH_ENABLE_INTR)
1371 (void) i40e_disable_interrupts(i40e);
1372
1373 if ((i40e->i40e_attach_progress & I40E_ATTACH_LINK_TIMER) &&
1374 i40e->i40e_periodic_id != 0) {
1375 ddi_periodic_delete(i40e->i40e_periodic_id);
1376 i40e->i40e_periodic_id = 0;
1377 }
1378
1379 if (i40e->i40e_attach_progress & I40E_ATTACH_MAC) {
1380 rc = mac_unregister(i40e->i40e_mac_hdl);
1381 if (rc != 0) {
1382 i40e_error(i40e, "failed to unregister from mac: %d",
1383 rc);
1384 }
1385 }
1386
1387 if (i40e->i40e_attach_progress & I40E_ATTACH_STATS) {
1388 i40e_stats_fini(i40e);
1389 }
1390
1391 if (i40e->i40e_attach_progress & I40E_ATTACH_ADD_INTR)
1392 i40e_rem_intr_handlers(i40e);
1393
1394 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_RINGSLOCKS)
1395 i40e_free_trqpairs(i40e);
1396
1397 if (i40e->i40e_attach_progress & I40E_ATTACH_ALLOC_INTR)
1398 i40e_rem_intrs(i40e);
1399
1400 if (i40e->i40e_attach_progress & I40E_ATTACH_COMMON_CODE)
1401 i40e_common_code_fini(i40e);
1402
1403 i40e_cleanup_resources(i40e);
1404
1405 if (i40e->i40e_attach_progress & I40E_ATTACH_PROPS)
1406 (void) ddi_prop_remove_all(devinfo);
1407
1408 if (i40e->i40e_attach_progress & I40E_ATTACH_REGS_MAP &&
1409 i40e->i40e_osdep_space.ios_reg_handle != NULL) {
1410 ddi_regs_map_free(&i40e->i40e_osdep_space.ios_reg_handle);
1411 i40e->i40e_osdep_space.ios_reg_handle = NULL;
1412 }
1413
1414 if ((i40e->i40e_attach_progress & I40E_ATTACH_PCI_CONFIG) &&
1415 i40e->i40e_osdep_space.ios_cfg_handle != NULL) {
1416 pci_config_teardown(&i40e->i40e_osdep_space.ios_cfg_handle);
1417 i40e->i40e_osdep_space.ios_cfg_handle = NULL;
1418 }
1419
1420 if (i40e->i40e_attach_progress & I40E_ATTACH_FM_INIT)
1421 i40e_fm_fini(i40e);
1422
1423 if (i40e->i40e_attach_progress & I40E_ATTACH_UFM_INIT)
1424 ddi_ufm_fini(i40e->i40e_ufmh);
1425
1426 kmem_free(i40e->i40e_aqbuf, I40E_ADMINQ_BUFSZ);
1427 kmem_free(i40e, sizeof (i40e_t));
1428
1429 ddi_set_driver_private(devinfo, NULL);
1430 }
1431
1432 static boolean_t
1433 i40e_final_init(i40e_t *i40e)
1434 {
1435 i40e_hw_t *hw = &i40e->i40e_hw_space;
1436 struct i40e_osdep *osdep = OS_DEP(hw);
1437 uint8_t pbanum[I40E_PBANUM_STRLEN];
1438 enum i40e_status_code irc;
1439 char buf[I40E_DDI_PROP_LEN];
1440
1441 pbanum[0] = '\0';
1442 irc = i40e_read_pba_string(hw, pbanum, sizeof (pbanum));
1443 if (irc != I40E_SUCCESS) {
1444 i40e_log(i40e, "failed to read PBA string: %d", irc);
1445 } else {
1446 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1447 "printed-board-assembly", (char *)pbanum);
1448 }
1449
1450 #ifdef DEBUG
1451 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.fw_maj_ver,
1452 hw->aq.fw_min_ver) < sizeof (buf));
1453 ASSERT(snprintf(NULL, 0, "%x", hw->aq.fw_build) < sizeof (buf));
1454 ASSERT(snprintf(NULL, 0, "%d.%d", hw->aq.api_maj_ver,
1455 hw->aq.api_min_ver) < sizeof (buf));
1456 #endif
1457
1458 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.fw_maj_ver,
1459 hw->aq.fw_min_ver);
1460 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1461 "firmware-version", buf);
1462 (void) snprintf(buf, sizeof (buf), "%x", hw->aq.fw_build);
1463 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1464 "firmware-build", buf);
1465 (void) snprintf(buf, sizeof (buf), "%d.%d", hw->aq.api_maj_ver,
1466 hw->aq.api_min_ver);
1467 (void) ddi_prop_update_string(DDI_DEV_T_NONE, i40e->i40e_dip,
1468 "api-version", buf);
1469
1470 if (!i40e_set_hw_bus_info(hw))
1471 return (B_FALSE);
1472
1473 if (i40e_check_acc_handle(osdep->ios_reg_handle) != DDI_FM_OK) {
1474 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
1475 return (B_FALSE);
1476 }
1477
1478 return (B_TRUE);
1479 }
1480
1481 static void
1482 i40e_identify_hardware(i40e_t *i40e)
1483 {
1484 i40e_hw_t *hw = &i40e->i40e_hw_space;
1485 struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1486
1487 hw->vendor_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_VENID);
1488 hw->device_id = pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_DEVID);
1489 hw->revision_id = pci_config_get8(osdep->ios_cfg_handle,
1490 PCI_CONF_REVID);
1491 hw->subsystem_device_id =
1492 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBSYSID);
1493 hw->subsystem_vendor_id =
1494 pci_config_get16(osdep->ios_cfg_handle, PCI_CONF_SUBVENID);
1495
1496 /*
1497 * Note that we set the hardware's bus information later on, in
1498 * i40e_get_available_resources(). The common code doesn't seem to
1499 * require that it be set in any ways, it seems to be mostly for
1500 * book-keeping.
1501 */
1502 }
1503
1504 static boolean_t
1505 i40e_regs_map(i40e_t *i40e)
1506 {
1507 dev_info_t *devinfo = i40e->i40e_dip;
1508 i40e_hw_t *hw = &i40e->i40e_hw_space;
1509 struct i40e_osdep *osdep = &i40e->i40e_osdep_space;
1510 off_t memsize;
1511 int ret;
1512
1513 if (ddi_dev_regsize(devinfo, I40E_ADAPTER_REGSET, &memsize) !=
1514 DDI_SUCCESS) {
1515 i40e_error(i40e, "Used invalid register set to map PCIe regs");
1516 return (B_FALSE);
1517 }
1518
1519 if ((ret = ddi_regs_map_setup(devinfo, I40E_ADAPTER_REGSET,
1520 (caddr_t *)&hw->hw_addr, 0, memsize, &i40e_regs_acc_attr,
1521 &osdep->ios_reg_handle)) != DDI_SUCCESS) {
1522 i40e_error(i40e, "failed to map device registers: %d", ret);
1523 return (B_FALSE);
1524 }
1525
1526 osdep->ios_reg_size = memsize;
1527 return (B_TRUE);
1528 }
1529
1530 /*
1531 * Update parameters required when a new MTU has been configured. Calculate the
1532 * maximum frame size, as well as, size our DMA buffers which we size in
1533 * increments of 1K.
1534 */
1535 void
1536 i40e_update_mtu(i40e_t *i40e)
1537 {
1538 uint32_t rx, tx;
1539
1540 i40e->i40e_frame_max = i40e->i40e_sdu +
1541 sizeof (struct ether_vlan_header) + ETHERFCSL;
1542
1543 rx = i40e->i40e_frame_max + I40E_BUF_IPHDR_ALIGNMENT;
1544 i40e->i40e_rx_buf_size = ((rx >> 10) +
1545 ((rx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1546
1547 tx = i40e->i40e_frame_max;
1548 i40e->i40e_tx_buf_size = ((tx >> 10) +
1549 ((tx & (((uint32_t)1 << 10) -1)) > 0 ? 1 : 0)) << 10;
1550 }
1551
1552 static int
1553 i40e_get_prop(i40e_t *i40e, char *prop, int min, int max, int def)
1554 {
1555 int val;
1556
1557 val = ddi_prop_get_int(DDI_DEV_T_ANY, i40e->i40e_dip, DDI_PROP_DONTPASS,
1558 prop, def);
1559 if (val > max)
1560 val = max;
1561 if (val < min)
1562 val = min;
1563 return (val);
1564 }
1565
1566 static void
1567 i40e_init_properties(i40e_t *i40e)
1568 {
1569 i40e->i40e_sdu = i40e_get_prop(i40e, "default_mtu",
1570 I40E_MIN_MTU, I40E_MAX_MTU, I40E_DEF_MTU);
1571
1572 i40e->i40e_intr_force = i40e_get_prop(i40e, "intr_force",
1573 I40E_INTR_NONE, I40E_INTR_LEGACY, I40E_INTR_NONE);
1574
1575 i40e->i40e_mr_enable = i40e_get_prop(i40e, "mr_enable",
1576 B_FALSE, B_TRUE, B_TRUE);
1577
1578 i40e->i40e_tx_ring_size = i40e_get_prop(i40e, "tx_ring_size",
1579 I40E_MIN_TX_RING_SIZE, I40E_MAX_TX_RING_SIZE,
1580 I40E_DEF_TX_RING_SIZE);
1581 if ((i40e->i40e_tx_ring_size % I40E_DESC_ALIGN) != 0) {
1582 i40e->i40e_tx_ring_size = P2ROUNDUP(i40e->i40e_tx_ring_size,
1583 I40E_DESC_ALIGN);
1584 }
1585
1586 i40e->i40e_tx_block_thresh = i40e_get_prop(i40e, "tx_resched_threshold",
1587 I40E_MIN_TX_BLOCK_THRESH,
1588 i40e->i40e_tx_ring_size - I40E_TX_MAX_COOKIE,
1589 I40E_DEF_TX_BLOCK_THRESH);
1590
1591 i40e->i40e_rx_ring_size = i40e_get_prop(i40e, "rx_ring_size",
1592 I40E_MIN_RX_RING_SIZE, I40E_MAX_RX_RING_SIZE,
1593 I40E_DEF_RX_RING_SIZE);
1594 if ((i40e->i40e_rx_ring_size % I40E_DESC_ALIGN) != 0) {
1595 i40e->i40e_rx_ring_size = P2ROUNDUP(i40e->i40e_rx_ring_size,
1596 I40E_DESC_ALIGN);
1597 }
1598
1599 i40e->i40e_rx_limit_per_intr = i40e_get_prop(i40e, "rx_limit_per_intr",
1600 I40E_MIN_RX_LIMIT_PER_INTR, I40E_MAX_RX_LIMIT_PER_INTR,
1601 I40E_DEF_RX_LIMIT_PER_INTR);
1602
1603 i40e->i40e_tx_hcksum_enable = i40e_get_prop(i40e, "tx_hcksum_enable",
1604 B_FALSE, B_TRUE, B_TRUE);
1605
1606 i40e->i40e_tx_lso_enable = i40e_get_prop(i40e, "tx_lso_enable",
1607 B_FALSE, B_TRUE, B_TRUE);
1608
1609 i40e->i40e_rx_hcksum_enable = i40e_get_prop(i40e, "rx_hcksum_enable",
1610 B_FALSE, B_TRUE, B_TRUE);
1611
1612 i40e->i40e_rx_dma_min = i40e_get_prop(i40e, "rx_dma_threshold",
1613 I40E_MIN_RX_DMA_THRESH, I40E_MAX_RX_DMA_THRESH,
1614 I40E_DEF_RX_DMA_THRESH);
1615
1616 i40e->i40e_tx_dma_min = i40e_get_prop(i40e, "tx_dma_threshold",
1617 I40E_MIN_TX_DMA_THRESH, I40E_MAX_TX_DMA_THRESH,
1618 I40E_DEF_TX_DMA_THRESH);
1619
1620 i40e->i40e_tx_itr = i40e_get_prop(i40e, "tx_intr_throttle",
1621 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_TX_ITR);
1622
1623 i40e->i40e_rx_itr = i40e_get_prop(i40e, "rx_intr_throttle",
1624 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_RX_ITR);
1625
1626 i40e->i40e_other_itr = i40e_get_prop(i40e, "other_intr_throttle",
1627 I40E_MIN_ITR, I40E_MAX_ITR, I40E_DEF_OTHER_ITR);
1628
1629 if (!i40e->i40e_mr_enable) {
1630 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1631 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1632 }
1633
1634 i40e_update_mtu(i40e);
1635 }
1636
1637 /*
1638 * There are a few constraints on interrupts that we're currently imposing, some
1639 * of which are restrictions from hardware. For a fuller treatment, see
1640 * i40e_intr.c.
1641 *
1642 * Currently, to use MSI-X we require two interrupts be available though in
1643 * theory we should participate in IRM and happily use more interrupts.
1644 *
1645 * Hardware only supports a single MSI being programmed and therefore if we
1646 * don't have MSI-X interrupts available at this time, then we ratchet down the
1647 * number of rings and groups available. Obviously, we only bother with a single
1648 * fixed interrupt.
1649 */
1650 static boolean_t
1651 i40e_alloc_intr_handles(i40e_t *i40e, dev_info_t *devinfo, int intr_type)
1652 {
1653 i40e_hw_t *hw = &i40e->i40e_hw_space;
1654 ddi_acc_handle_t rh = i40e->i40e_osdep_space.ios_reg_handle;
1655 int request, count, actual, rc, min;
1656 uint32_t reg;
1657
1658 switch (intr_type) {
1659 case DDI_INTR_TYPE_FIXED:
1660 case DDI_INTR_TYPE_MSI:
1661 request = 1;
1662 min = 1;
1663 break;
1664 case DDI_INTR_TYPE_MSIX:
1665 min = 2;
1666 if (!i40e->i40e_mr_enable) {
1667 request = 2;
1668 break;
1669 }
1670 reg = I40E_READ_REG(hw, I40E_GLPCI_CNF2);
1671 /*
1672 * Should this read fail, we will drop back to using
1673 * MSI or fixed interrupts.
1674 */
1675 if (i40e_check_acc_handle(rh) != DDI_FM_OK) {
1676 ddi_fm_service_impact(i40e->i40e_dip,
1677 DDI_SERVICE_DEGRADED);
1678 return (B_FALSE);
1679 }
1680 request = (reg & I40E_GLPCI_CNF2_MSI_X_PF_N_MASK) >>
1681 I40E_GLPCI_CNF2_MSI_X_PF_N_SHIFT;
1682 request++; /* the register value is n - 1 */
1683 break;
1684 default:
1685 panic("bad interrupt type passed to i40e_alloc_intr_handles: "
1686 "%d", intr_type);
1687 }
1688
1689 rc = ddi_intr_get_nintrs(devinfo, intr_type, &count);
1690 if (rc != DDI_SUCCESS || count < min) {
1691 i40e_log(i40e, "Get interrupt number failed, "
1692 "returned %d, count %d", rc, count);
1693 return (B_FALSE);
1694 }
1695
1696 rc = ddi_intr_get_navail(devinfo, intr_type, &count);
1697 if (rc != DDI_SUCCESS || count < min) {
1698 i40e_log(i40e, "Get AVAILABLE interrupt number failed, "
1699 "returned %d, count %d", rc, count);
1700 return (B_FALSE);
1701 }
1702
1703 actual = 0;
1704 i40e->i40e_intr_count = 0;
1705 i40e->i40e_intr_count_max = 0;
1706 i40e->i40e_intr_count_min = 0;
1707
1708 i40e->i40e_intr_size = request * sizeof (ddi_intr_handle_t);
1709 ASSERT(i40e->i40e_intr_size != 0);
1710 i40e->i40e_intr_handles = kmem_alloc(i40e->i40e_intr_size, KM_SLEEP);
1711
1712 rc = ddi_intr_alloc(devinfo, i40e->i40e_intr_handles, intr_type, 0,
1713 min(request, count), &actual, DDI_INTR_ALLOC_NORMAL);
1714 if (rc != DDI_SUCCESS) {
1715 i40e_log(i40e, "Interrupt allocation failed with %d.", rc);
1716 goto alloc_handle_fail;
1717 }
1718
1719 i40e->i40e_intr_count = actual;
1720 i40e->i40e_intr_count_max = request;
1721 i40e->i40e_intr_count_min = min;
1722
1723 if (actual < min) {
1724 i40e_log(i40e, "actual (%d) is less than minimum (%d).",
1725 actual, min);
1726 goto alloc_handle_fail;
1727 }
1728
1729 /*
1730 * Record the priority and capabilities for our first vector. Once
1731 * we have it, that's our priority until detach time. Even if we
1732 * eventually participate in IRM, our priority shouldn't change.
1733 */
1734 rc = ddi_intr_get_pri(i40e->i40e_intr_handles[0], &i40e->i40e_intr_pri);
1735 if (rc != DDI_SUCCESS) {
1736 i40e_log(i40e,
1737 "Getting interrupt priority failed with %d.", rc);
1738 goto alloc_handle_fail;
1739 }
1740
1741 rc = ddi_intr_get_cap(i40e->i40e_intr_handles[0], &i40e->i40e_intr_cap);
1742 if (rc != DDI_SUCCESS) {
1743 i40e_log(i40e,
1744 "Getting interrupt capabilities failed with %d.", rc);
1745 goto alloc_handle_fail;
1746 }
1747
1748 i40e->i40e_intr_type = intr_type;
1749 return (B_TRUE);
1750
1751 alloc_handle_fail:
1752
1753 i40e_rem_intrs(i40e);
1754 return (B_FALSE);
1755 }
1756
1757 static boolean_t
1758 i40e_alloc_intrs(i40e_t *i40e, dev_info_t *devinfo)
1759 {
1760 int intr_types, rc;
1761 uint_t max_trqpairs;
1762
1763 if (i40e_is_x722(i40e)) {
1764 max_trqpairs = I40E_722_MAX_TC_QUEUES;
1765 } else {
1766 max_trqpairs = I40E_710_MAX_TC_QUEUES;
1767 }
1768
1769 rc = ddi_intr_get_supported_types(devinfo, &intr_types);
1770 if (rc != DDI_SUCCESS) {
1771 i40e_error(i40e, "failed to get supported interrupt types: %d",
1772 rc);
1773 return (B_FALSE);
1774 }
1775
1776 i40e->i40e_intr_type = 0;
1777 i40e->i40e_num_rx_groups = I40E_GROUP_MAX;
1778
1779 /*
1780 * We need to determine the number of queue pairs per traffic
1781 * class. We only have one traffic class (TC0), so we'll base
1782 * this off the number of interrupts provided. Furthermore,
1783 * since we only use one traffic class, the number of queues
1784 * per traffic class and per VSI are the same.
1785 */
1786 if ((intr_types & DDI_INTR_TYPE_MSIX) &&
1787 (i40e->i40e_intr_force <= I40E_INTR_MSIX) &&
1788 (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSIX))) {
1789 uint32_t n;
1790
1791 /*
1792 * While we want the number of queue pairs to match
1793 * the number of interrupts, we must keep stay in
1794 * bounds of the maximum number of queues per traffic
1795 * class. We subtract one from i40e_intr_count to
1796 * account for interrupt zero; which is currently
1797 * restricted to admin queue commands and other
1798 * interrupt causes.
1799 */
1800 n = MIN(i40e->i40e_intr_count - 1, max_trqpairs);
1801 ASSERT3U(n, >, 0);
1802
1803 /*
1804 * Round up to the nearest power of two to ensure that
1805 * the QBASE aligns with the TC size which must be
1806 * programmed as a power of two. See the queue mapping
1807 * description in section 7.4.9.5.5.1.
1808 *
1809 * If i40e_intr_count - 1 is not a power of two then
1810 * some queue pairs on the same VSI will have to share
1811 * an interrupt.
1812 *
1813 * We may want to revisit this logic in a future where
1814 * we have more interrupts and more VSIs. Otherwise,
1815 * each VSI will use as many interrupts as possible.
1816 * Using more QPs per VSI means better RSS for each
1817 * group, but at the same time may require more
1818 * sharing of interrupts across VSIs. This may be a
1819 * good candidate for a .conf tunable.
1820 */
1821 n = 0x1 << ddi_fls(n);
1822 i40e->i40e_num_trqpairs_per_vsi = n;
1823 ASSERT3U(i40e->i40e_num_rx_groups, >, 0);
1824 i40e->i40e_num_trqpairs = i40e->i40e_num_trqpairs_per_vsi *
1825 i40e->i40e_num_rx_groups;
1826 return (B_TRUE);
1827 }
1828
1829 /*
1830 * We only use multiple transmit/receive pairs when MSI-X interrupts are
1831 * available due to the fact that the device basically only supports a
1832 * single MSI interrupt.
1833 */
1834 i40e->i40e_num_trqpairs = I40E_TRQPAIR_NOMSIX;
1835 i40e->i40e_num_trqpairs_per_vsi = i40e->i40e_num_trqpairs;
1836 i40e->i40e_num_rx_groups = I40E_GROUP_NOMSIX;
1837
1838 if ((intr_types & DDI_INTR_TYPE_MSI) &&
1839 (i40e->i40e_intr_force <= I40E_INTR_MSI)) {
1840 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_MSI))
1841 return (B_TRUE);
1842 }
1843
1844 if (intr_types & DDI_INTR_TYPE_FIXED) {
1845 if (i40e_alloc_intr_handles(i40e, devinfo, DDI_INTR_TYPE_FIXED))
1846 return (B_TRUE);
1847 }
1848
1849 return (B_FALSE);
1850 }
1851
1852 /*
1853 * Map different interrupts to MSI-X vectors.
1854 */
1855 static boolean_t
1856 i40e_map_intrs_to_vectors(i40e_t *i40e)
1857 {
1858 if (i40e->i40e_intr_type != DDI_INTR_TYPE_MSIX) {
1859 return (B_TRUE);
1860 }
1861
1862 /*
1863 * Each queue pair is mapped to a single interrupt, so
1864 * transmit and receive interrupts for a given queue share the
1865 * same vector. Vector zero is reserved for the admin queue.
1866 */
1867 for (uint_t i = 0; i < i40e->i40e_num_trqpairs; i++) {
1868 uint_t vector = i % (i40e->i40e_intr_count - 1);
1869
1870 i40e->i40e_trqpairs[i].itrq_rx_intrvec = vector + 1;
1871 i40e->i40e_trqpairs[i].itrq_tx_intrvec = vector + 1;
1872 }
1873
1874 return (B_TRUE);
1875 }
1876
1877 static boolean_t
1878 i40e_add_intr_handlers(i40e_t *i40e)
1879 {
1880 int rc, vector;
1881
1882 switch (i40e->i40e_intr_type) {
1883 case DDI_INTR_TYPE_MSIX:
1884 for (vector = 0; vector < i40e->i40e_intr_count; vector++) {
1885 rc = ddi_intr_add_handler(
1886 i40e->i40e_intr_handles[vector],
1887 (ddi_intr_handler_t *)i40e_intr_msix, i40e,
1888 (void *)(uintptr_t)vector);
1889 if (rc != DDI_SUCCESS) {
1890 i40e_log(i40e, "Add interrupt handler (MSI-X) "
1891 "failed: return %d, vector %d", rc, vector);
1892 for (vector--; vector >= 0; vector--) {
1893 (void) ddi_intr_remove_handler(
1894 i40e->i40e_intr_handles[vector]);
1895 }
1896 return (B_FALSE);
1897 }
1898 }
1899 break;
1900 case DDI_INTR_TYPE_MSI:
1901 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1902 (ddi_intr_handler_t *)i40e_intr_msi, i40e, NULL);
1903 if (rc != DDI_SUCCESS) {
1904 i40e_log(i40e, "Add interrupt handler (MSI) failed: "
1905 "return %d", rc);
1906 return (B_FALSE);
1907 }
1908 break;
1909 case DDI_INTR_TYPE_FIXED:
1910 rc = ddi_intr_add_handler(i40e->i40e_intr_handles[0],
1911 (ddi_intr_handler_t *)i40e_intr_legacy, i40e, NULL);
1912 if (rc != DDI_SUCCESS) {
1913 i40e_log(i40e, "Add interrupt handler (legacy) failed:"
1914 " return %d", rc);
1915 return (B_FALSE);
1916 }
1917 break;
1918 default:
1919 /* Cast to pacify lint */
1920 panic("i40e_intr_type %p contains an unknown type: %d",
1921 (void *)i40e, i40e->i40e_intr_type);
1922 }
1923
1924 return (B_TRUE);
1925 }
1926
1927 /*
1928 * Perform periodic checks. Longer term, we should be thinking about additional
1929 * things here:
1930 *
1931 * o Stall Detection
1932 * o Temperature sensor detection
1933 * o Device resetting
1934 * o Statistics updating to avoid wraparound
1935 */
1936 static void
1937 i40e_timer(void *arg)
1938 {
1939 i40e_t *i40e = arg;
1940
1941 mutex_enter(&i40e->i40e_general_lock);
1942 i40e_link_check(i40e);
1943 mutex_exit(&i40e->i40e_general_lock);
1944 }
1945
1946 /*
1947 * Get the hardware state, and scribble away anything that needs scribbling.
1948 */
1949 static void
1950 i40e_get_hw_state(i40e_t *i40e, i40e_hw_t *hw)
1951 {
1952 int rc;
1953
1954 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
1955
1956 (void) i40e_aq_get_link_info(hw, TRUE, NULL, NULL);
1957 i40e_link_check(i40e);
1958
1959 /*
1960 * Try and determine our PHY. Note that we may have to retry to and
1961 * delay to detect fiber correctly.
1962 */
1963 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE, &i40e->i40e_phy,
1964 NULL);
1965 if (rc == I40E_ERR_UNKNOWN_PHY) {
1966 i40e_msec_delay(200);
1967 rc = i40e_aq_get_phy_capabilities(hw, B_FALSE, B_TRUE,
1968 &i40e->i40e_phy, NULL);
1969 }
1970
1971 if (rc != I40E_SUCCESS) {
1972 if (rc == I40E_ERR_UNKNOWN_PHY) {
1973 i40e_error(i40e, "encountered unknown PHY type, "
1974 "not attaching.");
1975 } else {
1976 i40e_error(i40e, "error getting physical capabilities: "
1977 "%d, %d", rc, hw->aq.asq_last_status);
1978 }
1979 }
1980
1981 rc = i40e_update_link_info(hw);
1982 if (rc != I40E_SUCCESS) {
1983 i40e_error(i40e, "failed to update link information: %d", rc);
1984 }
1985
1986 /*
1987 * In general, we don't want to mask off (as in stop from being a cause)
1988 * any of the interrupts that the phy might be able to generate.
1989 */
1990 rc = i40e_aq_set_phy_int_mask(hw, 0, NULL);
1991 if (rc != I40E_SUCCESS) {
1992 i40e_error(i40e, "failed to update phy link mask: %d", rc);
1993 }
1994 }
1995
1996 /*
1997 * Go through and re-initialize any existing filters that we may have set up for
1998 * this device. Note that we would only expect them to exist if hardware had
1999 * already been initialized and we had just reset it. While we're not
2000 * implementing this yet, we're keeping this around for when we add reset
2001 * capabilities, so this isn't forgotten.
2002 */
2003 /* ARGSUSED */
2004 static void
2005 i40e_init_macaddrs(i40e_t *i40e, i40e_hw_t *hw)
2006 {
2007 }
2008
2009 /*
2010 * Set the properties which have common values across all the VSIs.
2011 * Consult the "Add VSI" command section (7.4.9.5.5.1) for a
2012 * complete description of these properties.
2013 */
2014 static void
2015 i40e_set_shared_vsi_props(i40e_t *i40e,
2016 struct i40e_aqc_vsi_properties_data *info, uint_t vsi_idx)
2017 {
2018 uint_t tc_queues;
2019 uint16_t vsi_qp_base;
2020
2021 /*
2022 * It's important that we use bitwise-OR here; callers to this
2023 * function might enable other sections before calling this
2024 * function.
2025 */
2026 info->valid_sections |= LE_16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID |
2027 I40E_AQ_VSI_PROP_VLAN_VALID);
2028
2029 /*
2030 * Calculate the starting QP index for this VSI. This base is
2031 * relative to the PF queue space; so a value of 0 for PF#1
2032 * represents the absolute index PFLAN_QALLOC_FIRSTQ for PF#1.
2033 */
2034 vsi_qp_base = vsi_idx * i40e->i40e_num_trqpairs_per_vsi;
2035 info->mapping_flags = LE_16(I40E_AQ_VSI_QUE_MAP_CONTIG);
2036 info->queue_mapping[0] =
2037 LE_16((vsi_qp_base << I40E_AQ_VSI_QUEUE_SHIFT) &
2038 I40E_AQ_VSI_QUEUE_MASK);
2039
2040 /*
2041 * tc_queues determines the size of the traffic class, where
2042 * the size is 2^^tc_queues to a maximum of 64 for the X710
2043 * and 128 for the X722.
2044 *
2045 * Some examples:
2046 * i40e_num_trqpairs_per_vsi == 1 => tc_queues = 0, 2^^0 = 1.
2047 * i40e_num_trqpairs_per_vsi == 7 => tc_queues = 3, 2^^3 = 8.
2048 * i40e_num_trqpairs_per_vsi == 8 => tc_queues = 3, 2^^3 = 8.
2049 * i40e_num_trqpairs_per_vsi == 9 => tc_queues = 4, 2^^4 = 16.
2050 * i40e_num_trqpairs_per_vsi == 17 => tc_queues = 5, 2^^5 = 32.
2051 * i40e_num_trqpairs_per_vsi == 64 => tc_queues = 6, 2^^6 = 64.
2052 */
2053 tc_queues = ddi_fls(i40e->i40e_num_trqpairs_per_vsi - 1);
2054
2055 /*
2056 * The TC queue mapping is in relation to the VSI queue space.
2057 * Since we are only using one traffic class (TC0) we always
2058 * start at queue offset 0.
2059 */
2060 info->tc_mapping[0] =
2061 LE_16(((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) &
2062 I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
2063 ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) &
2064 I40E_AQ_VSI_TC_QUE_NUMBER_MASK));
2065
2066 /*
2067 * I40E_AQ_VSI_PVLAN_MODE_ALL ("VLAN driver insertion mode")
2068 *
2069 * Allow tagged and untagged packets to be sent to this
2070 * VSI from the host.
2071 *
2072 * I40E_AQ_VSI_PVLAN_EMOD_NOTHING ("VLAN and UP expose mode")
2073 *
2074 * Leave the tag on the frame and place no VLAN
2075 * information in the descriptor. We want this mode
2076 * because our MAC layer will take care of the VLAN tag,
2077 * if there is one.
2078 */
2079 info->port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
2080 I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
2081 }
2082
2083 /*
2084 * Delete the VSI at this index, if one exists. We assume there is no
2085 * action we can take if this command fails but to log the failure.
2086 */
2087 static void
2088 i40e_delete_vsi(i40e_t *i40e, uint_t idx)
2089 {
2090 i40e_hw_t *hw = &i40e->i40e_hw_space;
2091 uint16_t seid = i40e->i40e_vsis[idx].iv_seid;
2092
2093 if (seid != 0) {
2094 int rc;
2095
2096 rc = i40e_aq_delete_element(hw, seid, NULL);
2097
2098 if (rc != I40E_SUCCESS) {
2099 i40e_error(i40e, "Failed to delete VSI %d: %d",
2100 rc, hw->aq.asq_last_status);
2101 }
2102
2103 i40e->i40e_vsis[idx].iv_seid = 0;
2104 }
2105 }
2106
2107 /*
2108 * Add a new VSI.
2109 */
2110 static boolean_t
2111 i40e_add_vsi(i40e_t *i40e, i40e_hw_t *hw, uint_t idx)
2112 {
2113 struct i40e_vsi_context ctx;
2114 i40e_rx_group_t *rxg;
2115 int rc;
2116
2117 /*
2118 * The default VSI is created by the controller. This function
2119 * creates new, non-defualt VSIs only.
2120 */
2121 ASSERT3U(idx, !=, 0);
2122
2123 bzero(&ctx, sizeof (struct i40e_vsi_context));
2124 ctx.uplink_seid = i40e->i40e_veb_seid;
2125 ctx.pf_num = hw->pf_id;
2126 ctx.flags = I40E_AQ_VSI_TYPE_PF;
2127 ctx.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
2128 i40e_set_shared_vsi_props(i40e, &ctx.info, idx);
2129
2130 rc = i40e_aq_add_vsi(hw, &ctx, NULL);
2131 if (rc != I40E_SUCCESS) {
2132 i40e_error(i40e, "i40e_aq_add_vsi() failed %d: %d", rc,
2133 hw->aq.asq_last_status);
2134 return (B_FALSE);
2135 }
2136
2137 rxg = &i40e->i40e_rx_groups[idx];
2138 rxg->irg_vsi_seid = ctx.seid;
2139 i40e->i40e_vsis[idx].iv_number = ctx.vsi_number;
2140 i40e->i40e_vsis[idx].iv_seid = ctx.seid;
2141 i40e->i40e_vsis[idx].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2142
2143 if (i40e_stat_vsi_init(i40e, idx) == B_FALSE)
2144 return (B_FALSE);
2145
2146 return (B_TRUE);
2147 }
2148
2149 /*
2150 * Configure the hardware for the Default Virtual Station Interface (VSI).
2151 */
2152 static boolean_t
2153 i40e_config_def_vsi(i40e_t *i40e, i40e_hw_t *hw)
2154 {
2155 struct i40e_vsi_context ctx;
2156 i40e_rx_group_t *def_rxg;
2157 int err;
2158 struct i40e_aqc_remove_macvlan_element_data filt;
2159
2160 bzero(&ctx, sizeof (struct i40e_vsi_context));
2161 ctx.seid = I40E_DEF_VSI_SEID(i40e);
2162 ctx.pf_num = hw->pf_id;
2163 err = i40e_aq_get_vsi_params(hw, &ctx, NULL);
2164 if (err != I40E_SUCCESS) {
2165 i40e_error(i40e, "get VSI params failed with %d", err);
2166 return (B_FALSE);
2167 }
2168
2169 ctx.info.valid_sections = 0;
2170 i40e->i40e_vsis[0].iv_number = ctx.vsi_number;
2171 i40e->i40e_vsis[0].iv_stats_id = LE_16(ctx.info.stat_counter_idx);
2172 if (i40e_stat_vsi_init(i40e, 0) == B_FALSE)
2173 return (B_FALSE);
2174
2175 i40e_set_shared_vsi_props(i40e, &ctx.info, I40E_DEF_VSI_IDX);
2176
2177 err = i40e_aq_update_vsi_params(hw, &ctx, NULL);
2178 if (err != I40E_SUCCESS) {
2179 i40e_error(i40e, "Update VSI params failed with %d", err);
2180 return (B_FALSE);
2181 }
2182
2183 def_rxg = &i40e->i40e_rx_groups[0];
2184 def_rxg->irg_vsi_seid = I40E_DEF_VSI_SEID(i40e);
2185
2186 /*
2187 * We have seen three different behaviors in regards to the
2188 * Default VSI and its implicit L2 MAC+VLAN filter.
2189 *
2190 * 1. It has an implicit filter for the factory MAC address
2191 * and this filter counts against 'ifr_nmacfilt_used'.
2192 *
2193 * 2. It has an implicit filter for the factory MAC address
2194 * and this filter DOES NOT count against 'ifr_nmacfilt_used'.
2195 *
2196 * 3. It DOES NOT have an implicit filter.
2197 *
2198 * All three of these cases are accounted for below. If we
2199 * fail to remove the L2 filter (ENOENT) then we assume there
2200 * wasn't one. Otherwise, if we successfully remove the
2201 * filter, we make sure to update the 'ifr_nmacfilt_used'
2202 * count accordingly.
2203 *
2204 * We remove this filter to prevent duplicate delivery of
2205 * packets destined for the primary MAC address as DLS will
2206 * create the same filter on a non-default VSI for the primary
2207 * MAC client.
2208 *
2209 * If you change the following code please test it across as
2210 * many X700 series controllers and firmware revisions as you
2211 * can.
2212 */
2213 bzero(&filt, sizeof (filt));
2214 bcopy(hw->mac.port_addr, filt.mac_addr, ETHERADDRL);
2215 filt.flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
2216 filt.vlan_tag = 0;
2217
2218 ASSERT3U(i40e->i40e_resources.ifr_nmacfilt_used, <=, 1);
2219 i40e_log(i40e, "Num L2 filters: %u",
2220 i40e->i40e_resources.ifr_nmacfilt_used);
2221
2222 err = i40e_aq_remove_macvlan(hw, I40E_DEF_VSI_SEID(i40e), &filt, 1,
2223 NULL);
2224 if (err == I40E_SUCCESS) {
2225 i40e_log(i40e,
2226 "Removed L2 filter from Default VSI with SEID %u",
2227 I40E_DEF_VSI_SEID(i40e));
2228 } else if (hw->aq.asq_last_status == ENOENT) {
2229 i40e_log(i40e,
2230 "No L2 filter for Default VSI with SEID %u",
2231 I40E_DEF_VSI_SEID(i40e));
2232 } else {
2233 i40e_error(i40e, "Failed to remove L2 filter from"
2234 " Default VSI with SEID %u: %d (%d)",
2235 I40E_DEF_VSI_SEID(i40e), err, hw->aq.asq_last_status);
2236
2237 return (B_FALSE);
2238 }
2239
2240 /*
2241 * As mentioned above, the controller created an implicit L2
2242 * filter for the primary MAC. We want to remove both the
2243 * filter and decrement the filter count. However, not all
2244 * controllers count this implicit filter against the total
2245 * MAC filter count. So here we are making sure it is either
2246 * one or zero. If it is one, then we know it is for the
2247 * implicit filter and we should decrement since we just
2248 * removed the filter above. If it is zero then we know the
2249 * controller that does not count the implicit filter, and it
2250 * was enough to just remove it; we leave the count alone.
2251 * But if it is neither, then we have never seen a controller
2252 * like this before and we should fail to attach.
2253 *
2254 * It is unfortunate that this code must exist but the
2255 * behavior of this implicit L2 filter and its corresponding
2256 * count were dicovered through empirical testing. The
2257 * programming manuals hint at this filter but do not
2258 * explicitly call out the exact behavior.
2259 */
2260 if (i40e->i40e_resources.ifr_nmacfilt_used == 1) {
2261 i40e->i40e_resources.ifr_nmacfilt_used--;
2262 } else {
2263 if (i40e->i40e_resources.ifr_nmacfilt_used != 0) {
2264 i40e_error(i40e, "Unexpected L2 filter count: %u"
2265 " (expected 0)",
2266 i40e->i40e_resources.ifr_nmacfilt_used);
2267 return (B_FALSE);
2268 }
2269 }
2270
2271 return (B_TRUE);
2272 }
2273
2274 static boolean_t
2275 i40e_config_rss_key_x722(i40e_t *i40e, i40e_hw_t *hw)
2276 {
2277 for (uint_t i = 0; i < i40e->i40e_num_rx_groups; i++) {
2278 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2279 struct i40e_aqc_get_set_rss_key_data key;
2280 const char *u8seed;
2281 enum i40e_status_code status;
2282 uint16_t vsi_number = i40e->i40e_vsis[i].iv_number;
2283
2284 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2285 u8seed = (char *)seed;
2286
2287 CTASSERT(sizeof (key) >= (sizeof (key.standard_rss_key) +
2288 sizeof (key.extended_hash_key)));
2289
2290 bcopy(u8seed, key.standard_rss_key,
2291 sizeof (key.standard_rss_key));
2292 bcopy(&u8seed[sizeof (key.standard_rss_key)],
2293 key.extended_hash_key, sizeof (key.extended_hash_key));
2294
2295 ASSERT3U(vsi_number, !=, 0);
2296 status = i40e_aq_set_rss_key(hw, vsi_number, &key);
2297
2298 if (status != I40E_SUCCESS) {
2299 i40e_error(i40e, "failed to set RSS key for VSI %u: %d",
2300 vsi_number, status);
2301 return (B_FALSE);
2302 }
2303 }
2304
2305 return (B_TRUE);
2306 }
2307
2308 /*
2309 * Configure the RSS key. For the X710 controller family, this is set on a
2310 * per-PF basis via registers. For the X722, this is done on a per-VSI basis
2311 * through the admin queue.
2312 */
2313 static boolean_t
2314 i40e_config_rss_key(i40e_t *i40e, i40e_hw_t *hw)
2315 {
2316 if (i40e_is_x722(i40e)) {
2317 if (!i40e_config_rss_key_x722(i40e, hw))
2318 return (B_FALSE);
2319 } else {
2320 uint32_t seed[I40E_PFQF_HKEY_MAX_INDEX + 1];
2321
2322 (void) random_get_pseudo_bytes((uint8_t *)seed, sizeof (seed));
2323 for (uint_t i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
2324 i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed[i]);
2325 }
2326
2327 return (B_TRUE);
2328 }
2329
2330 /*
2331 * Populate the LUT. The size of each entry in the LUT depends on the controller
2332 * family, with the X722 using a known 7-bit width. On the X710 controller, this
2333 * is programmed through its control registers where as on the X722 this is
2334 * configured through the admin queue. Also of note, the X722 allows the LUT to
2335 * be set on a per-PF or VSI basis. At this time we use the PF setting. If we
2336 * decide to use the per-VSI LUT in the future, then we will need to modify the
2337 * i40e_add_vsi() function to set the RSS LUT bits in the queueing section.
2338 *
2339 * We populate the LUT in a round robin fashion with the rx queue indices from 0
2340 * to i40e_num_trqpairs_per_vsi - 1.
2341 */
2342 static boolean_t
2343 i40e_config_rss_hlut(i40e_t *i40e, i40e_hw_t *hw)
2344 {
2345 uint32_t *hlut;
2346 uint8_t lut_mask;
2347 uint_t i;
2348 boolean_t ret = B_FALSE;
2349
2350 /*
2351 * We always configure the PF with a table size of 512 bytes in
2352 * i40e_chip_start().
2353 */
2354 hlut = kmem_alloc(I40E_HLUT_TABLE_SIZE, KM_NOSLEEP);
2355 if (hlut == NULL) {
2356 i40e_error(i40e, "i40e_config_rss() buffer allocation failed");
2357 return (B_FALSE);
2358 }
2359
2360 /*
2361 * The width of the X722 is apparently defined to be 7 bits, regardless
2362 * of the capability.
2363 */
2364 if (i40e_is_x722(i40e)) {
2365 lut_mask = (1 << 7) - 1;
2366 } else {
2367 lut_mask = (1 << hw->func_caps.rss_table_entry_width) - 1;
2368 }
2369
2370 for (i = 0; i < I40E_HLUT_TABLE_SIZE; i++) {
2371 ((uint8_t *)hlut)[i] =
2372 (i % i40e->i40e_num_trqpairs_per_vsi) & lut_mask;
2373 }
2374
2375 if (i40e_is_x722(i40e)) {
2376 enum i40e_status_code status;
2377
2378 status = i40e_aq_set_rss_lut(hw, 0, B_TRUE, (uint8_t *)hlut,
2379 I40E_HLUT_TABLE_SIZE);
2380
2381 if (status != I40E_SUCCESS) {
2382 i40e_error(i40e, "failed to set RSS LUT %d: %d",
2383 status, hw->aq.asq_last_status);
2384 goto out;
2385 }
2386 } else {
2387 for (i = 0; i < I40E_HLUT_TABLE_SIZE >> 2; i++) {
2388 I40E_WRITE_REG(hw, I40E_PFQF_HLUT(i), hlut[i]);
2389 }
2390 }
2391 ret = B_TRUE;
2392 out:
2393 kmem_free(hlut, I40E_HLUT_TABLE_SIZE);
2394 return (ret);
2395 }
2396
2397 /*
2398 * Set up RSS.
2399 * 1. Seed the hash key.
2400 * 2. Enable PCTYPEs for the hash filter.
2401 * 3. Populate the LUT.
2402 */
2403 static boolean_t
2404 i40e_config_rss(i40e_t *i40e, i40e_hw_t *hw)
2405 {
2406 uint64_t hena;
2407
2408 /*
2409 * 1. Seed the hash key
2410 */
2411 if (!i40e_config_rss_key(i40e, hw))
2412 return (B_FALSE);
2413
2414 /*
2415 * 2. Configure PCTYPES
2416 */
2417 hena = (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
2418 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
2419 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
2420 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
2421 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV4) |
2422 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
2423 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
2424 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
2425 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
2426 (1ULL << I40E_FILTER_PCTYPE_FRAG_IPV6) |
2427 (1ULL << I40E_FILTER_PCTYPE_L2_PAYLOAD);
2428
2429 /*
2430 * Add additional types supported by the X722 controller.
2431 */
2432 if (i40e_is_x722(i40e)) {
2433 hena |= (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) |
2434 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) |
2435 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) |
2436 (1ULL << I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) |
2437 (1ULL << I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP) |
2438 (1ULL << I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK);
2439 }
2440
2441 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (uint32_t)hena);
2442 i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (uint32_t)(hena >> 32));
2443
2444 /*
2445 * 3. Populate LUT
2446 */
2447 return (i40e_config_rss_hlut(i40e, hw));
2448 }
2449
2450 /*
2451 * Wrapper to kick the chipset on.
2452 */
2453 static boolean_t
2454 i40e_chip_start(i40e_t *i40e)
2455 {
2456 i40e_hw_t *hw = &i40e->i40e_hw_space;
2457 struct i40e_filter_control_settings filter;
2458 int rc;
2459 uint8_t err;
2460
2461 if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
2462 (hw->aq.fw_maj_ver < 4)) {
2463 i40e_msec_delay(75);
2464 if (i40e_aq_set_link_restart_an(hw, TRUE, NULL) !=
2465 I40E_SUCCESS) {
2466 i40e_error(i40e, "failed to restart link: admin queue "
2467 "error: %d", hw->aq.asq_last_status);
2468 return (B_FALSE);
2469 }
2470 }
2471
2472 /* Determine hardware state */
2473 i40e_get_hw_state(i40e, hw);
2474
2475 /* For now, we always disable Ethernet Flow Control. */
2476 hw->fc.requested_mode = I40E_FC_NONE;
2477 rc = i40e_set_fc(hw, &err, B_TRUE);
2478 if (rc != I40E_SUCCESS) {
2479 i40e_error(i40e, "Setting flow control failed, returned %d"
2480 " with error: 0x%x", rc, err);
2481 return (B_FALSE);
2482 }
2483
2484 /* Initialize mac addresses. */
2485 i40e_init_macaddrs(i40e, hw);
2486
2487 /*
2488 * Set up the filter control. If the hash lut size is changed from
2489 * I40E_HASH_LUT_SIZE_512 then I40E_HLUT_TABLE_SIZE and
2490 * i40e_config_rss_hlut() will need to be updated.
2491 */
2492 bzero(&filter, sizeof (filter));
2493 filter.enable_ethtype = TRUE;
2494 filter.enable_macvlan = TRUE;
2495 filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
2496
2497 rc = i40e_set_filter_control(hw, &filter);
2498 if (rc != I40E_SUCCESS) {
2499 i40e_error(i40e, "i40e_set_filter_control() returned %d", rc);
2500 return (B_FALSE);
2501 }
2502
2503 i40e_intr_chip_init(i40e);
2504
2505 rc = i40e_get_mac_seid(i40e);
2506 if (rc == -1) {
2507 i40e_error(i40e, "failed to obtain MAC Uplink SEID");
2508 return (B_FALSE);
2509 }
2510 i40e->i40e_mac_seid = (uint16_t)rc;
2511
2512 /*
2513 * Create a VEB in order to support multiple VSIs. Each VSI
2514 * functions as a MAC group. This call sets the PF's MAC as
2515 * the uplink port and the PF's default VSI as the default
2516 * downlink port.
2517 */
2518 rc = i40e_aq_add_veb(hw, i40e->i40e_mac_seid, I40E_DEF_VSI_SEID(i40e),
2519 0x1, B_TRUE, &i40e->i40e_veb_seid, B_FALSE, NULL);
2520 if (rc != I40E_SUCCESS) {
2521 i40e_error(i40e, "i40e_aq_add_veb() failed %d: %d", rc,
2522 hw->aq.asq_last_status);
2523 return (B_FALSE);
2524 }
2525
2526 if (!i40e_config_def_vsi(i40e, hw))
2527 return (B_FALSE);
2528
2529 for (uint_t i = 1; i < i40e->i40e_num_rx_groups; i++) {
2530 if (!i40e_add_vsi(i40e, hw, i))
2531 return (B_FALSE);
2532 }
2533
2534 if (!i40e_config_rss(i40e, hw))
2535 return (B_FALSE);
2536
2537 i40e_flush(hw);
2538
2539 return (B_TRUE);
2540 }
2541
2542 /*
2543 * Take care of tearing down the rx ring. See 8.3.3.1.2 for more information.
2544 */
2545 static void
2546 i40e_shutdown_rx_rings(i40e_t *i40e)
2547 {
2548 int i;
2549 uint32_t reg;
2550
2551 i40e_hw_t *hw = &i40e->i40e_hw_space;
2552
2553 /*
2554 * Step 1. The interrupt linked list (see i40e_intr.c for more
2555 * information) should have already been cleared before calling this
2556 * function.
2557 */
2558 #ifdef DEBUG
2559 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
2560 for (i = 1; i < i40e->i40e_intr_count; i++) {
2561 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1));
2562 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
2563 }
2564 } else {
2565 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0);
2566 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
2567 }
2568
2569 #endif /* DEBUG */
2570
2571 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2572 /*
2573 * Step 1. Request the queue by clearing QENA_REQ. It may not be
2574 * set due to unwinding from failures and a partially enabled
2575 * ring set.
2576 */
2577 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
2578 if (!(reg & I40E_QRX_ENA_QENA_REQ_MASK))
2579 continue;
2580 VERIFY((reg & I40E_QRX_ENA_QENA_REQ_MASK) ==
2581 I40E_QRX_ENA_QENA_REQ_MASK);
2582 reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
2583 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg);
2584 }
2585
2586 /*
2587 * Step 2. Wait for the disable to take, by having QENA_STAT in the FPM
2588 * be cleared. Note that we could still receive data in the queue during
2589 * this time. We don't actually wait for this now and instead defer this
2590 * to i40e_shutdown_rings_wait(), after we've interleaved disabling the
2591 * TX queues as well.
2592 */
2593 }
2594
2595 static void
2596 i40e_shutdown_tx_rings(i40e_t *i40e)
2597 {
2598 int i;
2599 uint32_t reg;
2600
2601 i40e_hw_t *hw = &i40e->i40e_hw_space;
2602
2603 /*
2604 * Step 1. The interrupt linked list should already have been cleared.
2605 */
2606 #ifdef DEBUG
2607 if (i40e->i40e_intr_type == DDI_INTR_TYPE_MSIX) {
2608 for (i = 1; i < i40e->i40e_intr_count; i++) {
2609 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLSTN(i - 1));
2610 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
2611 }
2612 } else {
2613 reg = I40E_READ_REG(hw, I40E_PFINT_LNKLST0);
2614 VERIFY3U(reg, ==, I40E_QUEUE_TYPE_EOL);
2615
2616 }
2617 #endif /* DEBUG */
2618
2619 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2620 /*
2621 * Step 2. Set the SET_QDIS flag for every queue.
2622 */
2623 i40e_pre_tx_queue_cfg(hw, i, B_FALSE);
2624 }
2625
2626 /*
2627 * Step 3. Wait at least 400 usec (can be done once for all queues).
2628 */
2629 drv_usecwait(500);
2630
2631 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2632 /*
2633 * Step 4. Clear the QENA_REQ flag which tells hardware to
2634 * quiesce. If QENA_REQ is not already set then that means that
2635 * we likely already tried to disable this queue.
2636 */
2637 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
2638 if (!(reg & I40E_QTX_ENA_QENA_REQ_MASK))
2639 continue;
2640 reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
2641 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg);
2642 }
2643
2644 /*
2645 * Step 5. Wait for all drains to finish. This will be done by the
2646 * hardware removing the QENA_STAT flag from the queue. Rather than
2647 * waiting here, we interleave it with all the others in
2648 * i40e_shutdown_rings_wait().
2649 */
2650 }
2651
2652 /*
2653 * Wait for all the rings to be shut down. e.g. Steps 2 and 5 from the above
2654 * functions.
2655 */
2656 static boolean_t
2657 i40e_shutdown_rings_wait(i40e_t *i40e)
2658 {
2659 int i, try;
2660 i40e_hw_t *hw = &i40e->i40e_hw_space;
2661
2662 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2663 uint32_t reg;
2664
2665 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2666 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
2667 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0)
2668 break;
2669 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2670 }
2671
2672 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) != 0) {
2673 i40e_error(i40e, "timed out disabling rx queue %d",
2674 i);
2675 return (B_FALSE);
2676 }
2677
2678 for (try = 0; try < I40E_RING_WAIT_NTRIES; try++) {
2679 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
2680 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0)
2681 break;
2682 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2683 }
2684
2685 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) != 0) {
2686 i40e_error(i40e, "timed out disabling tx queue %d",
2687 i);
2688 return (B_FALSE);
2689 }
2690 }
2691
2692 return (B_TRUE);
2693 }
2694
2695 static boolean_t
2696 i40e_shutdown_rings(i40e_t *i40e)
2697 {
2698 i40e_shutdown_rx_rings(i40e);
2699 i40e_shutdown_tx_rings(i40e);
2700 return (i40e_shutdown_rings_wait(i40e));
2701 }
2702
2703 static void
2704 i40e_setup_rx_descs(i40e_trqpair_t *itrq)
2705 {
2706 int i;
2707 i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2708
2709 for (i = 0; i < rxd->rxd_ring_size; i++) {
2710 i40e_rx_control_block_t *rcb;
2711 i40e_rx_desc_t *rdesc;
2712
2713 rcb = rxd->rxd_work_list[i];
2714 rdesc = &rxd->rxd_desc_ring[i];
2715
2716 rdesc->read.pkt_addr =
2717 CPU_TO_LE64((uintptr_t)rcb->rcb_dma.dmab_dma_address);
2718 rdesc->read.hdr_addr = 0;
2719 }
2720 }
2721
2722 static boolean_t
2723 i40e_setup_rx_hmc(i40e_trqpair_t *itrq)
2724 {
2725 i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2726 i40e_t *i40e = itrq->itrq_i40e;
2727 i40e_hw_t *hw = &i40e->i40e_hw_space;
2728
2729 struct i40e_hmc_obj_rxq rctx;
2730 int err;
2731
2732 bzero(&rctx, sizeof (struct i40e_hmc_obj_rxq));
2733 rctx.base = rxd->rxd_desc_area.dmab_dma_address /
2734 I40E_HMC_RX_CTX_UNIT;
2735 rctx.qlen = rxd->rxd_ring_size;
2736 VERIFY(i40e->i40e_rx_buf_size >= I40E_HMC_RX_DBUFF_MIN);
2737 VERIFY(i40e->i40e_rx_buf_size <= I40E_HMC_RX_DBUFF_MAX);
2738 rctx.dbuff = i40e->i40e_rx_buf_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
2739 rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
2740 rctx.dtype = I40E_HMC_RX_DTYPE_NOSPLIT;
2741 rctx.dsize = I40E_HMC_RX_DSIZE_32BYTE;
2742 rctx.crcstrip = I40E_HMC_RX_CRCSTRIP_ENABLE;
2743 rctx.fc_ena = I40E_HMC_RX_FC_DISABLE;
2744 rctx.l2tsel = I40E_HMC_RX_L2TAGORDER;
2745 rctx.hsplit_0 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2746 rctx.hsplit_1 = I40E_HMC_RX_HDRSPLIT_DISABLE;
2747 rctx.showiv = I40E_HMC_RX_INVLAN_DONTSTRIP;
2748 rctx.rxmax = i40e->i40e_frame_max;
2749 rctx.tphrdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2750 rctx.tphwdesc_ena = I40E_HMC_RX_TPH_DISABLE;
2751 rctx.tphdata_ena = I40E_HMC_RX_TPH_DISABLE;
2752 rctx.tphhead_ena = I40E_HMC_RX_TPH_DISABLE;
2753 rctx.lrxqthresh = I40E_HMC_RX_LOWRXQ_NOINTR;
2754
2755 /*
2756 * This must be set to 0x1, see Table 8-12 in section 8.3.3.2.2.
2757 */
2758 rctx.prefena = I40E_HMC_RX_PREFENA;
2759
2760 err = i40e_clear_lan_rx_queue_context(hw, itrq->itrq_index);
2761 if (err != I40E_SUCCESS) {
2762 i40e_error(i40e, "failed to clear rx queue %d context: %d",
2763 itrq->itrq_index, err);
2764 return (B_FALSE);
2765 }
2766
2767 err = i40e_set_lan_rx_queue_context(hw, itrq->itrq_index, &rctx);
2768 if (err != I40E_SUCCESS) {
2769 i40e_error(i40e, "failed to set rx queue %d context: %d",
2770 itrq->itrq_index, err);
2771 return (B_FALSE);
2772 }
2773
2774 return (B_TRUE);
2775 }
2776
2777 /*
2778 * Take care of setting up the descriptor rings and actually programming the
2779 * device. See 8.3.3.1.1 for the full list of steps we need to do to enable the
2780 * rx rings.
2781 */
2782 static boolean_t
2783 i40e_setup_rx_rings(i40e_t *i40e)
2784 {
2785 int i;
2786 i40e_hw_t *hw = &i40e->i40e_hw_space;
2787
2788 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2789 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
2790 i40e_rx_data_t *rxd = itrq->itrq_rxdata;
2791 uint32_t reg;
2792
2793 /*
2794 * Step 1. Program all receive ring descriptors.
2795 */
2796 i40e_setup_rx_descs(itrq);
2797
2798 /*
2799 * Step 2. Program the queue's FPM/HMC context.
2800 */
2801 if (i40e_setup_rx_hmc(itrq) == B_FALSE)
2802 return (B_FALSE);
2803
2804 /*
2805 * Step 3. Clear the queue's tail pointer and set it to the end
2806 * of the space.
2807 */
2808 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), 0);
2809 I40E_WRITE_REG(hw, I40E_QRX_TAIL(i), rxd->rxd_ring_size - 1);
2810
2811 /*
2812 * Step 4. Enable the queue via the QENA_REQ.
2813 */
2814 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
2815 VERIFY0(reg & (I40E_QRX_ENA_QENA_REQ_MASK |
2816 I40E_QRX_ENA_QENA_STAT_MASK));
2817 reg |= I40E_QRX_ENA_QENA_REQ_MASK;
2818 I40E_WRITE_REG(hw, I40E_QRX_ENA(i), reg);
2819 }
2820
2821 /*
2822 * Note, we wait for every queue to be enabled before we start checking.
2823 * This will hopefully cause most queues to be enabled at this point.
2824 */
2825 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2826 uint32_t j, reg;
2827
2828 /*
2829 * Step 5. Verify that QENA_STAT has been set. It's promised
2830 * that this should occur within about 10 us, but like other
2831 * systems, we give the card a bit more time.
2832 */
2833 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) {
2834 reg = I40E_READ_REG(hw, I40E_QRX_ENA(i));
2835
2836 if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
2837 break;
2838 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2839 }
2840
2841 if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
2842 i40e_error(i40e, "failed to enable rx queue %d, timed "
2843 "out.", i);
2844 return (B_FALSE);
2845 }
2846 }
2847
2848 return (B_TRUE);
2849 }
2850
2851 static boolean_t
2852 i40e_setup_tx_hmc(i40e_trqpair_t *itrq)
2853 {
2854 i40e_t *i40e = itrq->itrq_i40e;
2855 i40e_hw_t *hw = &i40e->i40e_hw_space;
2856
2857 struct i40e_hmc_obj_txq tctx;
2858 struct i40e_vsi_context context;
2859 int err;
2860
2861 bzero(&tctx, sizeof (struct i40e_hmc_obj_txq));
2862 tctx.new_context = I40E_HMC_TX_NEW_CONTEXT;
2863 tctx.base = itrq->itrq_desc_area.dmab_dma_address /
2864 I40E_HMC_TX_CTX_UNIT;
2865 tctx.fc_ena = I40E_HMC_TX_FC_DISABLE;
2866 tctx.timesync_ena = I40E_HMC_TX_TS_DISABLE;
2867 tctx.fd_ena = I40E_HMC_TX_FD_DISABLE;
2868 tctx.alt_vlan_ena = I40E_HMC_TX_ALT_VLAN_DISABLE;
2869 tctx.head_wb_ena = I40E_HMC_TX_WB_ENABLE;
2870 tctx.qlen = itrq->itrq_tx_ring_size;
2871 tctx.tphrdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2872 tctx.tphrpacket_ena = I40E_HMC_TX_TPH_DISABLE;
2873 tctx.tphwdesc_ena = I40E_HMC_TX_TPH_DISABLE;
2874 tctx.head_wb_addr = itrq->itrq_desc_area.dmab_dma_address +
2875 sizeof (i40e_tx_desc_t) * itrq->itrq_tx_ring_size;
2876
2877 /*
2878 * This field isn't actually documented, like crc, but it suggests that
2879 * it should be zeroed. We leave both of these here because of that for
2880 * now. We should check with Intel on why these are here even.
2881 */
2882 tctx.crc = 0;
2883 tctx.rdylist_act = 0;
2884
2885 /*
2886 * We're supposed to assign the rdylist field with the value of the
2887 * traffic class index for the first device. We query the VSI parameters
2888 * again to get what the handle is. Note that every queue is always
2889 * assigned to traffic class zero, because we don't actually use them.
2890 */
2891 bzero(&context, sizeof (struct i40e_vsi_context));
2892 context.seid = I40E_DEF_VSI_SEID(i40e);
2893 context.pf_num = hw->pf_id;
2894 err = i40e_aq_get_vsi_params(hw, &context, NULL);
2895 if (err != I40E_SUCCESS) {
2896 i40e_error(i40e, "get VSI params failed with %d", err);
2897 return (B_FALSE);
2898 }
2899 tctx.rdylist = LE_16(context.info.qs_handle[0]);
2900
2901 err = i40e_clear_lan_tx_queue_context(hw, itrq->itrq_index);
2902 if (err != I40E_SUCCESS) {
2903 i40e_error(i40e, "failed to clear tx queue %d context: %d",
2904 itrq->itrq_index, err);
2905 return (B_FALSE);
2906 }
2907
2908 err = i40e_set_lan_tx_queue_context(hw, itrq->itrq_index, &tctx);
2909 if (err != I40E_SUCCESS) {
2910 i40e_error(i40e, "failed to set tx queue %d context: %d",
2911 itrq->itrq_index, err);
2912 return (B_FALSE);
2913 }
2914
2915 return (B_TRUE);
2916 }
2917
2918 /*
2919 * Take care of setting up the descriptor rings and actually programming the
2920 * device. See 8.4.3.1.1 for what we need to do here.
2921 */
2922 static boolean_t
2923 i40e_setup_tx_rings(i40e_t *i40e)
2924 {
2925 int i;
2926 i40e_hw_t *hw = &i40e->i40e_hw_space;
2927
2928 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2929 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[i];
2930 uint32_t reg;
2931
2932 /*
2933 * Step 1. Clear the queue disable flag and verify that the
2934 * index is set correctly.
2935 */
2936 i40e_pre_tx_queue_cfg(hw, i, B_TRUE);
2937
2938 /*
2939 * Step 2. Prepare the queue's FPM/HMC context.
2940 */
2941 if (i40e_setup_tx_hmc(itrq) == B_FALSE)
2942 return (B_FALSE);
2943
2944 /*
2945 * Step 3. Verify that it's clear that this PF owns this queue.
2946 */
2947 reg = I40E_QTX_CTL_PF_QUEUE;
2948 reg |= (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
2949 I40E_QTX_CTL_PF_INDX_MASK;
2950 I40E_WRITE_REG(hw, I40E_QTX_CTL(itrq->itrq_index), reg);
2951 i40e_flush(hw);
2952
2953 /*
2954 * Step 4. Set the QENA_REQ flag.
2955 */
2956 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
2957 VERIFY0(reg & (I40E_QTX_ENA_QENA_REQ_MASK |
2958 I40E_QTX_ENA_QENA_STAT_MASK));
2959 reg |= I40E_QTX_ENA_QENA_REQ_MASK;
2960 I40E_WRITE_REG(hw, I40E_QTX_ENA(i), reg);
2961 }
2962
2963 /*
2964 * Note, we wait for every queue to be enabled before we start checking.
2965 * This will hopefully cause most queues to be enabled at this point.
2966 */
2967 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
2968 uint32_t j, reg;
2969
2970 /*
2971 * Step 5. Verify that QENA_STAT has been set. It's promised
2972 * that this should occur within about 10 us, but like BSD,
2973 * we'll try for up to 100 ms for this queue.
2974 */
2975 for (j = 0; j < I40E_RING_WAIT_NTRIES; j++) {
2976 reg = I40E_READ_REG(hw, I40E_QTX_ENA(i));
2977
2978 if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
2979 break;
2980 i40e_msec_delay(I40E_RING_WAIT_PAUSE);
2981 }
2982
2983 if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
2984 i40e_error(i40e, "failed to enable tx queue %d, timed "
2985 "out", i);
2986 return (B_FALSE);
2987 }
2988 }
2989
2990 return (B_TRUE);
2991 }
2992
2993 void
2994 i40e_stop(i40e_t *i40e, boolean_t free_allocations)
2995 {
2996 uint_t i;
2997 i40e_hw_t *hw = &i40e->i40e_hw_space;
2998
2999 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3000
3001 /*
3002 * Shutdown and drain the tx and rx pipeline. We do this using the
3003 * following steps.
3004 *
3005 * 1) Shutdown interrupts to all the queues (trying to keep the admin
3006 * queue alive).
3007 *
3008 * 2) Remove all of the interrupt tx and rx causes by setting the
3009 * interrupt linked lists to zero.
3010 *
3011 * 2) Shutdown the tx and rx rings. Because i40e_shutdown_rings() should
3012 * wait for all the queues to be disabled, once we reach that point
3013 * it should be safe to free associated data.
3014 *
3015 * 4) Wait 50ms after all that is done. This ensures that the rings are
3016 * ready for programming again and we don't have to think about this
3017 * in other parts of the driver.
3018 *
3019 * 5) Disable remaining chip interrupts, (admin queue, etc.)
3020 *
3021 * 6) Verify that FM is happy with all the register accesses we
3022 * performed.
3023 */
3024 i40e_intr_io_disable_all(i40e);
3025 i40e_intr_io_clear_cause(i40e);
3026
3027 if (i40e_shutdown_rings(i40e) == B_FALSE) {
3028 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3029 }
3030
3031 delay(50 * drv_usectohz(1000));
3032
3033 /*
3034 * We don't delete the default VSI because it replaces the VEB
3035 * after VEB deletion (see the "Delete Element" section).
3036 * Furthermore, since the default VSI is provided by the
3037 * firmware, we never attempt to delete it.
3038 */
3039 for (i = 1; i < i40e->i40e_num_rx_groups; i++) {
3040 i40e_delete_vsi(i40e, i);
3041 }
3042
3043 if (i40e->i40e_veb_seid != 0) {
3044 int rc = i40e_aq_delete_element(hw, i40e->i40e_veb_seid, NULL);
3045
3046 if (rc != I40E_SUCCESS) {
3047 i40e_error(i40e, "Failed to delete VEB %d: %d", rc,
3048 hw->aq.asq_last_status);
3049 }
3050
3051 i40e->i40e_veb_seid = 0;
3052 }
3053
3054 i40e_intr_chip_fini(i40e);
3055
3056 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
3057 mutex_enter(&i40e->i40e_trqpairs[i].itrq_rx_lock);
3058 mutex_enter(&i40e->i40e_trqpairs[i].itrq_tx_lock);
3059 }
3060
3061 /*
3062 * We should consider refactoring this to be part of the ring start /
3063 * stop routines at some point.
3064 */
3065 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
3066 i40e_stats_trqpair_fini(&i40e->i40e_trqpairs[i]);
3067 }
3068
3069 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3070 DDI_FM_OK) {
3071 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3072 }
3073
3074 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
3075 i40e_tx_cleanup_ring(&i40e->i40e_trqpairs[i]);
3076 }
3077
3078 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
3079 mutex_exit(&i40e->i40e_trqpairs[i].itrq_rx_lock);
3080 mutex_exit(&i40e->i40e_trqpairs[i].itrq_tx_lock);
3081 }
3082
3083 for (i = 0; i < i40e->i40e_num_rx_groups; i++) {
3084 i40e_stat_vsi_fini(i40e, i);
3085 }
3086
3087 i40e->i40e_link_speed = 0;
3088 i40e->i40e_link_duplex = 0;
3089 i40e_link_state_set(i40e, LINK_STATE_UNKNOWN);
3090
3091 if (free_allocations) {
3092 i40e_free_ring_mem(i40e, B_FALSE);
3093 }
3094 }
3095
3096 boolean_t
3097 i40e_start(i40e_t *i40e, boolean_t alloc)
3098 {
3099 i40e_hw_t *hw = &i40e->i40e_hw_space;
3100 boolean_t rc = B_TRUE;
3101 int i, err;
3102
3103 ASSERT(MUTEX_HELD(&i40e->i40e_general_lock));
3104
3105 if (alloc) {
3106 if (i40e_alloc_ring_mem(i40e) == B_FALSE) {
3107 i40e_error(i40e,
3108 "Failed to allocate ring memory");
3109 return (B_FALSE);
3110 }
3111 }
3112
3113 /*
3114 * This should get refactored to be part of ring start and stop at
3115 * some point, along with most of the logic here.
3116 */
3117 for (i = 0; i < i40e->i40e_num_trqpairs; i++) {
3118 if (i40e_stats_trqpair_init(&i40e->i40e_trqpairs[i]) ==
3119 B_FALSE) {
3120 int j;
3121
3122 for (j = 0; j < i; j++) {
3123 i40e_trqpair_t *itrq = &i40e->i40e_trqpairs[j];
3124 i40e_stats_trqpair_fini(itrq);
3125 }
3126 return (B_FALSE);
3127 }
3128 }
3129
3130 if (!i40e_chip_start(i40e)) {
3131 i40e_fm_ereport(i40e, DDI_FM_DEVICE_INVAL_STATE);
3132 rc = B_FALSE;
3133 goto done;
3134 }
3135
3136 if (i40e_setup_rx_rings(i40e) == B_FALSE) {
3137 rc = B_FALSE;
3138 goto done;
3139 }
3140
3141 if (i40e_setup_tx_rings(i40e) == B_FALSE) {
3142 rc = B_FALSE;
3143 goto done;
3144 }
3145
3146 /*
3147 * Enable broadcast traffic; however, do not enable multicast traffic.
3148 * That's handle exclusively through MAC's mc_multicst routines.
3149 */
3150 err = i40e_aq_set_vsi_broadcast(hw, I40E_DEF_VSI_SEID(i40e), B_TRUE,
3151 NULL);
3152 if (err != I40E_SUCCESS) {
3153 i40e_error(i40e, "failed to set default VSI: %d", err);
3154 rc = B_FALSE;
3155 goto done;
3156 }
3157
3158 err = i40e_aq_set_mac_config(hw, i40e->i40e_frame_max, B_TRUE, 0, NULL);
3159 if (err != I40E_SUCCESS) {
3160 i40e_error(i40e, "failed to set MAC config: %d", err);
3161 rc = B_FALSE;
3162 goto done;
3163 }
3164
3165 /*
3166 * Finally, make sure that we're happy from an FM perspective.
3167 */
3168 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_reg_handle) !=
3169 DDI_FM_OK) {
3170 rc = B_FALSE;
3171 goto done;
3172 }
3173
3174 /* Clear state bits prior to final interrupt enabling. */
3175 atomic_and_32(&i40e->i40e_state,
3176 ~(I40E_ERROR | I40E_STALL | I40E_OVERTEMP));
3177
3178 i40e_intr_io_enable_all(i40e);
3179
3180 done:
3181 if (rc == B_FALSE) {
3182 i40e_stop(i40e, B_FALSE);
3183 if (alloc == B_TRUE) {
3184 i40e_free_ring_mem(i40e, B_TRUE);
3185 }
3186 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3187 }
3188
3189 return (rc);
3190 }
3191
3192 /*
3193 * We may have loaned up descriptors to the stack. As such, if we still have
3194 * them outstanding, then we will not continue with detach.
3195 */
3196 static boolean_t
3197 i40e_drain_rx(i40e_t *i40e)
3198 {
3199 mutex_enter(&i40e->i40e_rx_pending_lock);
3200 while (i40e->i40e_rx_pending > 0) {
3201 if (cv_reltimedwait(&i40e->i40e_rx_pending_cv,
3202 &i40e->i40e_rx_pending_lock,
3203 drv_usectohz(I40E_DRAIN_RX_WAIT), TR_CLOCK_TICK) == -1) {
3204 mutex_exit(&i40e->i40e_rx_pending_lock);
3205 return (B_FALSE);
3206 }
3207 }
3208 mutex_exit(&i40e->i40e_rx_pending_lock);
3209
3210 return (B_TRUE);
3211 }
3212
3213 /*
3214 * DDI UFM Callbacks
3215 */
3216 static int
3217 i40e_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3218 ddi_ufm_image_t *img)
3219 {
3220 if (imgno != 0)
3221 return (EINVAL);
3222
3223 ddi_ufm_image_set_desc(img, "Firmware");
3224 ddi_ufm_image_set_nslots(img, 1);
3225
3226 return (0);
3227 }
3228
3229 static int
3230 i40e_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
3231 uint_t slotno, ddi_ufm_slot_t *slot)
3232 {
3233 i40e_t *i40e = (i40e_t *)arg;
3234 char *fw_ver = NULL, *fw_bld = NULL, *api_ver = NULL;
3235 nvlist_t *misc = NULL;
3236 uint_t flags = DDI_PROP_DONTPASS;
3237 int err;
3238
3239 if (imgno != 0 || slotno != 0 ||
3240 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3241 "firmware-version", &fw_ver) != DDI_PROP_SUCCESS ||
3242 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3243 "firmware-build", &fw_bld) != DDI_PROP_SUCCESS ||
3244 ddi_prop_lookup_string(DDI_DEV_T_ANY, i40e->i40e_dip, flags,
3245 "api-version", &api_ver) != DDI_PROP_SUCCESS) {
3246 err = EINVAL;
3247 goto err;
3248 }
3249
3250 ddi_ufm_slot_set_attrs(slot, DDI_UFM_ATTR_ACTIVE);
3251 ddi_ufm_slot_set_version(slot, fw_ver);
3252
3253 (void) nvlist_alloc(&misc, NV_UNIQUE_NAME, KM_SLEEP);
3254 if ((err = nvlist_add_string(misc, "firmware-build", fw_bld)) != 0 ||
3255 (err = nvlist_add_string(misc, "api-version", api_ver)) != 0) {
3256 goto err;
3257 }
3258 ddi_ufm_slot_set_misc(slot, misc);
3259
3260 ddi_prop_free(fw_ver);
3261 ddi_prop_free(fw_bld);
3262 ddi_prop_free(api_ver);
3263
3264 return (0);
3265 err:
3266 nvlist_free(misc);
3267 if (fw_ver != NULL)
3268 ddi_prop_free(fw_ver);
3269 if (fw_bld != NULL)
3270 ddi_prop_free(fw_bld);
3271 if (api_ver != NULL)
3272 ddi_prop_free(api_ver);
3273
3274 return (err);
3275 }
3276
3277 static int
3278 i40e_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
3279 {
3280 *caps = DDI_UFM_CAP_REPORT;
3281
3282 return (0);
3283 }
3284
3285 static ddi_ufm_ops_t i40e_ufm_ops = {
3286 NULL,
3287 i40e_ufm_fill_image,
3288 i40e_ufm_fill_slot,
3289 i40e_ufm_getcaps
3290 };
3291
3292 static int
3293 i40e_attach(dev_info_t *devinfo, ddi_attach_cmd_t cmd)
3294 {
3295 i40e_t *i40e;
3296 struct i40e_osdep *osdep;
3297 i40e_hw_t *hw;
3298 int instance;
3299
3300 if (cmd != DDI_ATTACH)
3301 return (DDI_FAILURE);
3302
3303 instance = ddi_get_instance(devinfo);
3304 i40e = kmem_zalloc(sizeof (i40e_t), KM_SLEEP);
3305
3306 i40e->i40e_aqbuf = kmem_zalloc(I40E_ADMINQ_BUFSZ, KM_SLEEP);
3307 i40e->i40e_instance = instance;
3308 i40e->i40e_dip = devinfo;
3309
3310 hw = &i40e->i40e_hw_space;
3311 osdep = &i40e->i40e_osdep_space;
3312 hw->back = osdep;
3313 osdep->ios_i40e = i40e;
3314
3315 ddi_set_driver_private(devinfo, i40e);
3316
3317 i40e_fm_init(i40e);
3318 i40e->i40e_attach_progress |= I40E_ATTACH_FM_INIT;
3319
3320 if (pci_config_setup(devinfo, &osdep->ios_cfg_handle) != DDI_SUCCESS) {
3321 i40e_error(i40e, "Failed to map PCI configurations.");
3322 goto attach_fail;
3323 }
3324 i40e->i40e_attach_progress |= I40E_ATTACH_PCI_CONFIG;
3325
3326 i40e_identify_hardware(i40e);
3327
3328 if (!i40e_regs_map(i40e)) {
3329 i40e_error(i40e, "Failed to map device registers.");
3330 goto attach_fail;
3331 }
3332 i40e->i40e_attach_progress |= I40E_ATTACH_REGS_MAP;
3333
3334 i40e_init_properties(i40e);
3335 i40e->i40e_attach_progress |= I40E_ATTACH_PROPS;
3336
3337 if (!i40e_common_code_init(i40e, hw))
3338 goto attach_fail;
3339 i40e->i40e_attach_progress |= I40E_ATTACH_COMMON_CODE;
3340
3341 /*
3342 * When we participate in IRM, we should make sure that we register
3343 * ourselves with it before callbacks.
3344 */
3345 if (!i40e_alloc_intrs(i40e, devinfo)) {
3346 i40e_error(i40e, "Failed to allocate interrupts.");
3347 goto attach_fail;
3348 }
3349 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_INTR;
3350
3351 if (!i40e_alloc_trqpairs(i40e)) {
3352 i40e_error(i40e,
3353 "Failed to allocate receive & transmit rings.");
3354 goto attach_fail;
3355 }
3356 i40e->i40e_attach_progress |= I40E_ATTACH_ALLOC_RINGSLOCKS;
3357
3358 if (!i40e_map_intrs_to_vectors(i40e)) {
3359 i40e_error(i40e, "Failed to map interrupts to vectors.");
3360 goto attach_fail;
3361 }
3362
3363 if (!i40e_add_intr_handlers(i40e)) {
3364 i40e_error(i40e, "Failed to add the interrupt handlers.");
3365 goto attach_fail;
3366 }
3367 i40e->i40e_attach_progress |= I40E_ATTACH_ADD_INTR;
3368
3369 if (!i40e_final_init(i40e)) {
3370 i40e_error(i40e, "Final initialization failed.");
3371 goto attach_fail;
3372 }
3373 i40e->i40e_attach_progress |= I40E_ATTACH_INIT;
3374
3375 if (i40e_check_acc_handle(i40e->i40e_osdep_space.ios_cfg_handle) !=
3376 DDI_FM_OK) {
3377 ddi_fm_service_impact(i40e->i40e_dip, DDI_SERVICE_LOST);
3378 goto attach_fail;
3379 }
3380
3381 if (!i40e_stats_init(i40e)) {
3382 i40e_error(i40e, "Stats initialization failed.");
3383 goto attach_fail;
3384 }
3385 i40e->i40e_attach_progress |= I40E_ATTACH_STATS;
3386
3387 if (!i40e_register_mac(i40e)) {
3388 i40e_error(i40e, "Failed to register to MAC/GLDv3");
3389 goto attach_fail;
3390 }
3391 i40e->i40e_attach_progress |= I40E_ATTACH_MAC;
3392
3393 i40e->i40e_periodic_id = ddi_periodic_add(i40e_timer, i40e,
3394 I40E_CYCLIC_PERIOD, DDI_IPL_0);
3395 if (i40e->i40e_periodic_id == 0) {
3396 i40e_error(i40e, "Failed to add the link-check timer");
3397 goto attach_fail;
3398 }
3399 i40e->i40e_attach_progress |= I40E_ATTACH_LINK_TIMER;
3400
3401 if (!i40e_enable_interrupts(i40e)) {
3402 i40e_error(i40e, "Failed to enable DDI interrupts");
3403 goto attach_fail;
3404 }
3405 i40e->i40e_attach_progress |= I40E_ATTACH_ENABLE_INTR;
3406
3407 if (ddi_ufm_init(i40e->i40e_dip, DDI_UFM_CURRENT_VERSION, &i40e_ufm_ops,
3408 &i40e->i40e_ufmh, i40e) != 0) {
3409 i40e_error(i40e, "failed to initialize UFM subsystem");
3410 goto attach_fail;
3411 }
3412 ddi_ufm_update(i40e->i40e_ufmh);
3413 i40e->i40e_attach_progress |= I40E_ATTACH_UFM_INIT;
3414
3415 atomic_or_32(&i40e->i40e_state, I40E_INITIALIZED);
3416
3417 mutex_enter(&i40e_glock);
3418 list_insert_tail(&i40e_glist, i40e);
3419 mutex_exit(&i40e_glock);
3420
3421 return (DDI_SUCCESS);
3422
3423 attach_fail:
3424 i40e_unconfigure(devinfo, i40e);
3425 return (DDI_FAILURE);
3426 }
3427
3428 static int
3429 i40e_detach(dev_info_t *devinfo, ddi_detach_cmd_t cmd)
3430 {
3431 i40e_t *i40e;
3432
3433 if (cmd != DDI_DETACH)
3434 return (DDI_FAILURE);
3435
3436 i40e = (i40e_t *)ddi_get_driver_private(devinfo);
3437 if (i40e == NULL) {
3438 i40e_log(NULL, "i40e_detach() called with no i40e pointer!");
3439 return (DDI_FAILURE);
3440 }
3441
3442 if (i40e_drain_rx(i40e) == B_FALSE) {
3443 i40e_log(i40e, "timed out draining DMA resources, %d buffers "
3444 "remain", i40e->i40e_rx_pending);
3445 return (DDI_FAILURE);
3446 }
3447
3448 mutex_enter(&i40e_glock);
3449 list_remove(&i40e_glist, i40e);
3450 mutex_exit(&i40e_glock);
3451
3452 i40e_unconfigure(devinfo, i40e);
3453
3454 return (DDI_SUCCESS);
3455 }
3456
3457 static struct cb_ops i40e_cb_ops = {
3458 nulldev, /* cb_open */
3459 nulldev, /* cb_close */
3460 nodev, /* cb_strategy */
3461 nodev, /* cb_print */
3462 nodev, /* cb_dump */
3463 nodev, /* cb_read */
3464 nodev, /* cb_write */
3465 nodev, /* cb_ioctl */
3466 nodev, /* cb_devmap */
3467 nodev, /* cb_mmap */
3468 nodev, /* cb_segmap */
3469 nochpoll, /* cb_chpoll */
3470 ddi_prop_op, /* cb_prop_op */
3471 NULL, /* cb_stream */
3472 D_MP | D_HOTPLUG, /* cb_flag */
3473 CB_REV, /* cb_rev */
3474 nodev, /* cb_aread */
3475 nodev /* cb_awrite */
3476 };
3477
3478 static struct dev_ops i40e_dev_ops = {
3479 DEVO_REV, /* devo_rev */
3480 0, /* devo_refcnt */
3481 NULL, /* devo_getinfo */
3482 nulldev, /* devo_identify */
3483 nulldev, /* devo_probe */
3484 i40e_attach, /* devo_attach */
3485 i40e_detach, /* devo_detach */
3486 nodev, /* devo_reset */
3487 &i40e_cb_ops, /* devo_cb_ops */
3488 NULL, /* devo_bus_ops */
3489 ddi_power, /* devo_power */
3490 ddi_quiesce_not_supported /* devo_quiesce */
3491 };
3492
3493 static struct modldrv i40e_modldrv = {
3494 &mod_driverops,
3495 i40e_ident,
3496 &i40e_dev_ops
3497 };
3498
3499 static struct modlinkage i40e_modlinkage = {
3500 MODREV_1,
3501 &i40e_modldrv,
3502 NULL
3503 };
3504
3505 /*
3506 * Module Initialization Functions.
3507 */
3508 int
3509 _init(void)
3510 {
3511 int status;
3512
3513 list_create(&i40e_glist, sizeof (i40e_t), offsetof(i40e_t, i40e_glink));
3514 list_create(&i40e_dlist, sizeof (i40e_device_t),
3515 offsetof(i40e_device_t, id_link));
3516 mutex_init(&i40e_glock, NULL, MUTEX_DRIVER, NULL);
3517 mac_init_ops(&i40e_dev_ops, I40E_MODULE_NAME);
3518
3519 status = mod_install(&i40e_modlinkage);
3520 if (status != DDI_SUCCESS) {
3521 mac_fini_ops(&i40e_dev_ops);
3522 mutex_destroy(&i40e_glock);
3523 list_destroy(&i40e_dlist);
3524 list_destroy(&i40e_glist);
3525 }
3526
3527 return (status);
3528 }
3529
3530 int
3531 _info(struct modinfo *modinfop)
3532 {
3533 return (mod_info(&i40e_modlinkage, modinfop));
3534 }
3535
3536 int
3537 _fini(void)
3538 {
3539 int status;
3540
3541 status = mod_remove(&i40e_modlinkage);
3542 if (status == DDI_SUCCESS) {
3543 mac_fini_ops(&i40e_dev_ops);
3544 mutex_destroy(&i40e_glock);
3545 list_destroy(&i40e_dlist);
3546 list_destroy(&i40e_glist);
3547 }
3548
3549 return (status);
3550 }