1 /*
   2  * This file and its contents are supplied under the terms of the
   3  * Common Development and Distribution License ("CDDL"), version 1.0.
   4  * You may only use this file in accordance with the terms of version
   5  * 1.0 of the CDDL.
   6  *
   7  * A full copy of the text of the CDDL should have accompanied this
   8  * source.  A copy of the CDDL is also available via the Internet at
   9  * http://www.illumos.org/license/CDDL.
  10  */
  11 
  12 /*
  13  * Copyright (c) 2017, Joyent, Inc.
  14  * Copyright (c) 2018, Western Digital Corporation.
  15  */
  16 
  17 /*
  18  * Extensible Host Controller Interface (xHCI) USB Driver
  19  *
  20  * The xhci driver is an HCI driver for USB that bridges the gap between client
  21  * device drivers and implements the actual way that we talk to devices. The
  22  * xhci specification provides access to USB 3.x capable devices, as well as all
  23  * prior generations. Like other host controllers, it both provides the way to
  24  * talk to devices and also is treated like a hub (often called the root hub).
  25  *
  26  * This driver is part of the USBA (USB Architecture). It implements the HCDI
  27  * (host controller device interface) end of USBA. These entry points are used
  28  * by the USBA on behalf of client device drivers to access their devices. The
  29  * driver also provides notifications to deal with hot plug events, which are
  30  * quite common in USB.
  31  *
  32  * ----------------
  33  * USB Introduction
  34  * ----------------
  35  *
  36  * To properly understand the xhci driver and the design of the USBA HCDI
  37  * interfaces it implements, it helps to have a bit of background into how USB
  38  * devices are structured and understand how they work at a high-level.
  39  *
  40  * USB devices, like PCI devices, are broken down into different classes of
  41  * device. For example, with USB you have hubs, human-input devices (keyboards,
  42  * mice, etc.), mass storage, etc. Every device also has a vendor and device ID.
  43  * Many client drivers bind to an entire class of device, for example, the hubd
  44  * driver (to hubs) or scsa2usb (USB storage). However, there are other drivers
  45  * that bind to explicit IDs such as usbsprl (specific USB to Serial devices).
  46  *
  47  * USB SPEEDS AND VERSIONS
  48  *
  49  * USB devices are often referred to in two different ways. One way they're
  50  * described is with the USB version that they conform to. In the wild, you're
  51  * most likely going to see USB 1.1, 2.0, 2.1, and 3.0. However, you may also
  52  * see devices referred to as 'full-', 'low-', 'high-', and 'super-' speed
  53  * devices.
  54  *
  55  * The latter description describes the maximum theoretical speed of a given
  56  * device. For example, a super-speed device theoretically caps out around 5
  57  * Gbit/s, whereas a low-speed device caps out at 1.5 Mbit/s.
  58  *
  59  * In general, each speed usually corresponds to a specific USB protocol
  60  * generation. For example, all USB 3.0 devices are super-speed devices. All
  61  * 'high-speed' devices are USB 2.x devices. Full-speed devices are special in
  62  * that they can either be USB 1.x or USB 2.x devices. Low-speed devices are
  63  * only a USB 1.x thing, they did not jump the fire line to USB 2.x.
  64  *
  65  * USB 3.0 devices and ports generally have the wiring for both USB 2.0 and USB
  66  * 3.0. When a USB 3.x device is plugged into a USB 2.0 port or hub, then it
  67  * will report its version as USB 2.1, to indicate that it is actually a USB 3.x
  68  * device.
  69  *
  70  * USB ENDPOINTS
  71  *
  72  * A given USB device is made up of endpoints. A request, or transfer, is made
  73  * to a specific USB endpoint. These endpoints can provide different services
  74  * and have different expectations around the size of the data that'll be used
  75  * in a given request and the periodicity of requests. Endpoints themselves are
  76  * either used to make one-shot requests, for example, making requests to a mass
  77  * storage device for a given sector, or for making periodic requests where you
  78  * end up polling on the endpoint, for example, polling on a USB keyboard for
  79  * keystrokes.
  80  *
  81  * Each endpoint encodes two different pieces of information: a direction and a
  82  * type. There are two different directions: IN and OUT. These refer to the
  83  * general direction that data moves relative to the operating system. For
  84  * example, an IN transfer transfers data in to the operating system, from the
  85  * device. An OUT transfer transfers data from the operating system, out to the
  86  * device.
  87  *
  88  * There are four different kinds of endpoints:
  89  *
  90  *      BULK            These transfers are large transfers of data to or from
  91  *                      a device. The most common use for bulk transfers is for
  92  *                      mass storage devices. Though they are often also used by
  93  *                      network devices and more. Bulk endpoints do not have an
  94  *                      explicit time component to them. They are always used
  95  *                      for one-shot transfers.
  96  *
  97  *      CONTROL         These transfers are used to manipulate devices
  98  *                      themselves and are used for USB protocol level
  99  *                      operations (whether device-specific, class-specific, or
 100  *                      generic across all of USB). Unlike other transfers,
 101  *                      control transfers are always bi-directional and use
 102  *                      different kinds of transfers.
 103  *
 104  *      INTERRUPT       Interrupt transfers are used for small transfers that
 105  *                      happen infrequently, but need reasonable latency. A good
 106  *                      example of interrupt transfers is to receive input from
 107  *                      a USB keyboard. Interrupt-IN transfers are generally
 108  *                      polled. Meaning that a client (device driver) opens up
 109  *                      an interrupt-IN pipe to poll on it, and receives
 110  *                      periodic updates whenever there is information
 111  *                      available. However, Interrupt transfers can be used
 112  *                      as one-shot transfers both going IN and OUT.
 113  *
 114  *      ISOCHRONOUS     These transfers are things that happen once per
 115  *                      time-interval at a very regular rate. A good example of
 116  *                      these transfers are for audio and video. A device may
 117  *                      describe an interval as 10ms at which point it will read
 118  *                      or write the next batch of data every 10ms and transform
 119  *                      it for the user. There are no one-shot Isochronous-IN
 120  *                      transfers. There are one-shot Isochronous-OUT transfers,
 121  *                      but these are used by device drivers to always provide
 122  *                      the system with sufficient data.
 123  *
 124  * To find out information about the endpoints, USB devices have a series of
 125  * descriptors that cover different aspects of the device. For example, there
 126  * are endpoint descriptors which cover the properties of endpoints such as the
 127  * maximum packet size or polling interval.
 128  *
 129  * Descriptors exist at all levels of USB. For example, there are general
 130  * descriptors for every device. The USB device descriptor is described in
 131  * usb_dev_descr(9S). Host controllers will look at these descriptors to ensure
 132  * that they program the device correctly; however, they are more often used by
 133  * client device drivers. There are also descriptors that exist at a class
 134  * level. For example, the hub class has a class-specific descriptor which
 135  * describes properties of the hub. That information is requested for and used
 136  * by the hub driver.
 137  *
 138  * All of the different descriptors are gathered by the system and placed into a
 139  * tree which USBA sometimes calls the 'Configuration Cloud'. Client device
 140  * drivers gain access to this cloud and then use them to open endpoints, which
 141  * are called pipes in USBA (and some revisions of the USB specification).
 142  *
 143  * Each pipe gives access to a specific endpoint on the device which can be used
 144  * to perform transfers of a specific type and direction. For example, a mass
 145  * storage device often has three different endpoints, the default control
 146  * endpoint (which every device has), a Bulk-IN endpoint, and a Bulk-OUT
 147  * endpoint. The device driver ends up with three open pipes. One to the default
 148  * control endpoint to configure the device, and then the other two are used to
 149  * perform I/O.
 150  *
 151  * These routines translate more or less directly into calls to a host
 152  * controller driver. A request to open a pipe takes an endpoint descriptor that
 153  * describes the properties of the pipe, and the host controller driver (this
 154  * driver) goes through and does any work necessary to allow the client device
 155  * driver to access it. Once the pipe is open, it either makes one-shot
 156  * transfers specific to the transfer type or it starts performing a periodic
 157  * poll of an endpoint.
 158  *
 159  * All of these different actions translate into requests to the host
 160  * controller. The host controller driver itself is in charge of making sure
 161  * that all of the required resources for polling are allocated with a request
 162  * and then proceed to give the driver's periodic callbacks.
 163  *
 164  * HUBS AND HOST CONTROLLERS
 165  *
 166  * Every device is always plugged into a hub, even if the device is itself a
 167  * hub. This continues until we reach what we call the root-hub. The root-hub is
 168  * special in that it is not an actual USB hub, but is integrated into the host
 169  * controller and is manipulated in its own way. For example, the host
 170  * controller is used to turn on and off a given port's power. This may happen
 171  * over any interface, though the most common way is through PCI.
 172  *
 173  * In addition to the normal character device that exists for a host controller
 174  * driver, as part of attaching, the host controller binds to an instance of the
 175  * hubd driver. While the root-hub is a bit of a fiction, everyone models the
 176  * root-hub as the same as any other hub that's plugged in. The hub kernel
 177  * module doesn't know that the hub isn't a physical device that's been plugged
 178  * in. The host controller driver simulates that view by taking hub requests
 179  * that are made and translating them into corresponding requests that are
 180  * understood by the host controller, for example, reading and writing to a
 181  * memory mapped register.
 182  *
 183  * The hub driver polls for changes in device state using an Interrupt-IN
 184  * request, which is the same as is done for the root-hub. This allows the host
 185  * controller driver to not have to know about the implementation of device hot
 186  * plug, merely react to requests from a hub, the same as if it were an external
 187  * device. When the hub driver detects a change, it will go through the
 188  * corresponding state machine and attach or detach the corresponding client
 189  * device driver, depending if the device was inserted or removed.
 190  *
 191  * We detect the changes for the Interrupt-IN primarily based on the port state
 192  * change events that are delivered to the event ring. Whenever any event is
 193  * fired, we use this to update the hub driver about _all_ ports with
 194  * outstanding events. This more closely matches how a hub is supposed to behave
 195  * and leaves things less likely for the hub driver to end up without clearing a
 196  * flag on a port.
 197  *
 198  * PACKET SIZES AND BURSTING
 199  *
 200  * A given USB endpoint has an explicit packet size and a number of packets that
 201  * can be sent per time interval. These concepts are abstracted away from client
 202  * device drives usually, though they sometimes inform the upper bounds of what
 203  * a device can perform.
 204  *
 205  * The host controller uses this information to transform arbitrary transfer
 206  * requests into USB protocol packets. One of the nice things about the host
 207  * controllers is that they abstract away all of the signaling and semantics of
 208  * the actual USB protocols, allowing for life to be slightly easier in the
 209  * operating system.
 210  *
 211  * That said, if the host controller is not programmed correctly, these can end
 212  * up causing transaction errors and other problems in response to the data that
 213  * the host controller is trying to send or receive.
 214  *
 215  * ------------
 216  * Organization
 217  * ------------
 218  *
 219  * The driver is made up of the following files. Many of these have their own
 220  * theory statements to describe what they do. Here, we touch on each of the
 221  * purpose of each of these files.
 222  *
 223  * xhci_command.c:      This file contains the logic to issue commands to the
 224  *                      controller as well as the actual functions that the
 225  *                      other parts of the driver use to cause those commands.
 226  *
 227  * xhci_context.c:      This file manages various data structures used by the
 228  *                      controller to manage the controller's and device's
 229  *                      context data structures. See more in the xHCI Overview
 230  *                      and General Design for more information.
 231  *
 232  * xhci_dma.c:          This manages the allocation of DMA memory and DMA
 233  *                      attributes for controller, whether memory is for a
 234  *                      transfer or something else. This file also deals with
 235  *                      all the logic of getting data in and out of DMA buffers.
 236  *
 237  * xhci_endpoint.c:     This manages all of the logic of handling endpoints or
 238  *                      pipes. It deals with endpoint configuration, I/O
 239  *                      scheduling, timeouts, and callbacks to USBA.
 240  *
 241  * xhci_event.c:        This manages callbacks from the hardware to the driver.
 242  *                      This covers command completion notifications and I/O
 243  *                      notifications.
 244  *
 245  * xhci_hub.c:          This manages the virtual root-hub. It basically
 246  *                      implements and translates all of the USB level requests
 247  *                      into xhci specific implements. It also contains the
 248  *                      functions to register this hub with USBA.
 249  *
 250  * xhci_intr.c:         This manages the underlying interrupt allocation,
 251  *                      interrupt moderation, and interrupt routines.
 252  *
 253  * xhci_quirks.c:       This manages information about buggy hardware that's
 254  *                      been collected and experienced primarily from other
 255  *                      systems.
 256  *
 257  * xhci_ring.c:         This manages the abstraction of a ring in xhci, which is
 258  *                      the primary of communication between the driver and the
 259  *                      hardware, whether for the controller or a device.
 260  *
 261  * xhci_usba.c:         This implements all of the HCDI functions required by
 262  *                      USBA. This is the main entry point that drivers and the
 263  *                      kernel frameworks will reach to start any operation.
 264  *                      Many functions here will end up in the command and
 265  *                      endpoint code.
 266  *
 267  * xhci.c:              This provides the main kernel DDI interfaces and
 268  *                      performs device initialization.
 269  *
 270  * xhci.h:              This is the primary header file which defines
 271  *                      illumos-specific data structures and constants to manage
 272  *                      the system.
 273  *
 274  * xhcireg.h:           This header file defines all of the register offsets,
 275  *                      masks, and related macros. It also contains all of the
 276  *                      constants that are used in various structures as defined
 277  *                      by the specification, such as command offsets, etc.
 278  *
 279  * xhci_ioctl.h:        This contains a few private ioctls that are used by a
 280  *                      private debugging command. These are private.
 281  *
 282  * cmd/xhci/xhci_portsc:        This is a private utility that can be useful for
 283  *                              debugging xhci state. It is the only consumer of
 284  *                              xhci_ioctl.h and the private ioctls.
 285  *
 286  * ----------------------------------
 287  * xHCI Overview and Structure Layout
 288  * ----------------------------------
 289  *
 290  * The design and structure of this driver follows from the way that the xHCI
 291  * specification tells us that we have to work with hardware. First we'll give a
 292  * rough summary of how that works, though the xHCI 1.1 specification should be
 293  * referenced when going through this.
 294  *
 295  * There are three primary parts of the hardware -- registers, contexts, and
 296  * rings. The registers are memory mapped registers that come in four sets,
 297  * though all are found within the first BAR. These are used to program and
 298  * control the hardware and aspects of the devices. Beyond more traditional
 299  * device programming there are two primary sets of registers that are
 300  * important:
 301  *
 302  *   o Port Status and Control Registers (XHCI_PORTSC)
 303  *   o Doorbell Array (XHCI_DOORBELL)
 304  *
 305  * The port status and control registers are used to get and manipulate the
 306  * status of a given device. For example, turning on and off the power to it.
 307  * The Doorbell Array is used to kick off I/O operations and start the
 308  * processing of an I/O ring.
 309  *
 310  * The contexts are data structures that represent various pieces of information
 311  * in the controller. These contexts are generally filled out by the driver and
 312  * then acknowledged and consumed by the hardware. There are controller-wide
 313  * contexts (mostly managed in xhci_context.c) that are used to point to the
 314  * contexts that exist for each device in the system. The primary context is
 315  * called the Device Context Base Address Array (DCBAA).
 316  *
 317  * Each device in the system is allocated a 'slot', which is used to index into
 318  * the DCBAA. Slots are assigned based on issuing commands to the controller.
 319  * There are a fixed number of slots that determine the maximum number of
 320  * devices that can end up being supported in the system. Note this includes all
 321  * the devices plugged into the USB device tree, not just devices plugged into
 322  * ports on the chassis.
 323  *
 324  * For each device, there is a context structure that describes properties of
 325  * the device. For example, what speed is the device, is it a hub, etc. The
 326  * context has slots for the device and for each endpoint on the device. As
 327  * endpoints are enabled, their context information which describes things like
 328  * the maximum packet size, is filled in and enabled. The mapping between these
 329  * contexts look like:
 330  *
 331  *
 332  *      DCBAA
 333  *    +--------+                    Device Context
 334  *    | Slot 0 |------------------>+--------------+
 335  *    +--------+                   | Slot Context |
 336  *    |  ...   |                   +--------------+       +----------+
 337  *    +--------+   +------+        |  Endpoint 0  |------>| I/O Ring |
 338  *    | Slot n |-->| NULL |     | Context (Bi) |       +----------+
 339  *    +--------+   +------+        +--------------+
 340  *                                 |  Endpoint 1  |
 341  *                                 | Context (Out)|
 342  *                                 +--------------+
 343  *                                 |  Endpoint 1  |
 344  *                                 | Context (In) |
 345  *                                 +--------------+
 346  *                                 |      ...     |
 347  *                                 +--------------+
 348  *                                 | Endpoint 15  |
 349  *                                 | Context (In) |
 350  *                                 +--------------+
 351  *
 352  * These contexts are always owned by the controller, though we can read them
 353  * after various operations complete. Commands that toggle device state use a
 354  * specific input context, which is a variant of the device context. The only
 355  * difference is that it has an input context structure ahead of it to say which
 356  * sections of the device context should be evaluated.
 357  *
 358  * Each active endpoint points us to an I/O ring, which leads us to the third
 359  * main data structure that's used by the device: rings. Rings are made up of
 360  * transfer request blocks (TRBs), which are joined together to form a given
 361  * transfer description (TD) which represents a single I/O request.
 362  *
 363  * These rings are used to issue I/O to individual endpoints, to issue commands
 364  * to the controller, and to receive notification of changes and completions.
 365  * Issued commands go on the special ring called the command ring while the
 366  * change and completion notifications go on the event ring.  More details are
 367  * available in xhci_ring.c. Each of these structures is represented by an
 368  * xhci_ring_t.
 369  *
 370  * Each ring can be made up of one or more disjoint regions of DMA; however, we
 371  * only use a single one. This also impacts some additional registers and
 372  * structures that exist. The event ring has an indirection table called the
 373  * Event Ring Segment Table (ERST). Each entry in the table (a segment)
 374  * describes a chunk of the event ring.
 375  *
 376  * One other thing worth calling out is the scratchpad. The scratchpad is a way
 377  * for the controller to be given arbitrary memory by the OS that it can use.
 378  * There are two parts to the scratchpad. The first part is an array whose
 379  * entries contain pointers to the actual addresses for the pages. The second
 380  * part that we allocate are the actual pages themselves.
 381  *
 382  * -----------------------------
 383  * Endpoint State and Management
 384  * -----------------------------
 385  *
 386  * Endpoint management is one of the key parts to the xhci driver as every
 387  * endpoint is a pipe that a device driver uses, so they are our primary
 388  * currency. Endpoints are enabled and disabled when the client device drivers
 389  * open and close a pipe. When an endpoint is enabled, we have to fill in an
 390  * endpoint's context structure with information about the endpoint. These
 391  * basically tell the controller important properties which it uses to ensure
 392  * that there is adequate bandwidth for the device.
 393  *
 394  * Each endpoint has its own ring as described in the previous section. We place
 395  * TRBs (transfer request blocks) onto a given ring to request I/O be performed.
 396  * Responses are placed on the event ring, in other words, the rings associated
 397  * with an endpoint are purely for producing I/O.
 398  *
 399  * Endpoints have a defined state machine as described in xHCI 1.1 / 4.8.3.
 400  * These states generally correspond with the state of the endpoint to process
 401  * I/O and handle timeouts. The driver basically follows a similar state machine
 402  * as described there. There are some deviations. For example, what they
 403  * describe as 'running' we break into both the Idle and Running states below.
 404  * We also have a notion of timed out and quiescing. The following image
 405  * summarizes the states and transitions:
 406  *
 407  *     +------+                                +-----------+
 408  *     | Idle |---------*--------------------->|  Running  |<-+
 409  *     +------+         . I/O queued on        +-----------+  |
 410  *        ^               ring and timeout        |  |  |     |
 411  *        |               scheduled.              |  |  |     |
 412  *        |                                       |  |  |     |
 413  *        +-----*---------------------------------+  |  |     |
 414  *        |     . No I/Os remain                     |  |     |
 415  *        |                                          |  |     |
 416  *        |                +------*------------------+  |     |
 417  *        |                |      . Timeout             |     |
 418  *        |                |        fires for           |     |
 419  *        |                |        I/O                 |     |
 420  *        |                v                            v     |
 421  *        |          +-----------+                +--------+  |
 422  *        |          | Timed Out |                | Halted |  |
 423  *        |          +-----------+                +--------+  |
 424  *        |             |                           |         |
 425  *        |             |   +-----------+           |         |
 426  *        |             +-->| Quiescing |<----------+           |
 427  *        |                 +-----------+                     |
 428  *        |   No TRBs.           |                . TRBs      |
 429  *        |   remain .           |                . Remain    |
 430  *        +----------*----<------+-------->-------*-----------+
 431  *
 432  * Normally, a given endpoint will oscillate between having TRBs scheduled and
 433  * not. Every time a new I/O is added to the endpoint, we'll ring the doorbell,
 434  * making sure that we're processing the ring, presuming that the endpoint isn't
 435  * in one of the error states.
 436  *
 437  * To detect device hangs, we have an active timeout(9F) per active endpoint
 438  * that ticks at a one second rate while we still have TRBs outstanding on an
 439  * endpoint. Once all outstanding TRBs have been processed, the timeout will
 440  * stop itself and there will be no active checking until the endpoint has I/O
 441  * scheduled on it again.
 442  *
 443  * There are two primary ways that things can go wrong on the endpoint. We can
 444  * either have a timeout or an event that transitions the endpoint to the Halted
 445  * state. In the halted state, we need to issue explicit commands to reset the
 446  * endpoint before removing the I/O.
 447  *
 448  * The way we handle both a timeout and a halted condition is similar, but the
 449  * way they are triggered is different. When we detect a halted condition, we
 450  * don't immediately clean it up, and wait for the client device driver (or USBA
 451  * on its behalf) to issue a pipe reset. When we detect a timeout, we
 452  * immediately take action (assuming no other action is ongoing).
 453  *
 454  * In both cases, we quiesce the device, which takes care of dealing with taking
 455  * the endpoint from whatever state it may be in and taking the appropriate
 456  * actions based on the state machine in xHCI 1.1 / 4.8.3. The end of quiescing
 457  * leaves the device stopped, which allows us to update the ring's pointer and
 458  * remove any TRBs that are causing problems.
 459  *
 460  * As part of all this, we ensure that we can only be quiescing the device from
 461  * a given path at a time. Any requests to schedule I/O during this time will
 462  * generally fail.
 463  *
 464  * The following image describes the state machine for the timeout logic. It
 465  * ties into the image above.
 466  *
 467  *         +----------+                            +---------+
 468  *         | Disabled |-----*--------------------->| Enabled |<--+
 469  *         +----------+     . TRBs scheduled       +---------+   *. 1 sec timer
 470  *             ^              and no active          |  |  |     |  fires and
 471  *             |              timer.                 |  |  |     |  another
 472  *             |                                     |  |  +--+--+  quiesce, in
 473  *             |                                     |  |     |     a bad state,
 474  *             +------*------------------------------+  |     ^     or decrement
 475  *             |      . 1 sec timer                     |     |     I/O timeout
 476  *             |        fires and                       |     |
 477  *             |        no TRBs or                      |     +--------------+
 478  *             |        endpoint shutdown               |                    |
 479  *             |                                        *. . timer counter   |
 480  *             ^                                        |    reaches zero    |
 481  *             |                                        v                    |
 482  *             |                                +--------------+             |
 483  *             +-------------*---------------<--| Quiesce ring |->---*-------+
 484  *                           . No more          | and fail I/O |     . restart
 485  *                             I/Os             +--------------+       timer as
 486  *                                                                     more I/Os
 487  *
 488  * As we described above, when there are active TRBs and I/Os, a 1 second
 489  * timeout(9F) will be active. Each second, we decrement a counter on the
 490  * current, active I/O until either a new I/O takes the head, or the counter
 491  * reaches zero. If the counter reaches zero, then we go through, quiesce the
 492  * ring, and then clean things up.
 493  *
 494  * ------------------
 495  * Periodic Endpoints
 496  * ------------------
 497  *
 498  * It's worth calling out periodic endpoints explicitly, as they operate
 499  * somewhat differently. Periodic endpoints are limited to Interrupt-IN and
 500  * Isochronous-IN. The USBA often uses the term polling for these. That's
 501  * because the client only needs to make a single API call; however, they'll
 502  * receive multiple callbacks until either an error occurs or polling is
 503  * requested to be terminated.
 504  *
 505  * When we have one of these periodic requests, we end up always rescheduling
 506  * I/O requests, as well as, having a specific number of pre-existing I/O
 507  * requests to cover the periodic needs, in case of latency spikes. Normally,
 508  * when replying to a request, we use the request handle that we were given.
 509  * However, when we have a periodic request, we're required to duplicate the
 510  * handle before giving them data.
 511  *
 512  * However, the duplication is a bit tricky. For everything that was duplicated,
 513  * the framework expects us to submit data. Because of that we, don't duplicate
 514  * them until they are needed. This minimizes the likelihood that we have
 515  * outstanding requests to deal with when we encounter a fatal polling failure.
 516  *
 517  * Most of the polling setup logic happens in xhci_usba.c in
 518  * xhci_hcdi_periodic_init(). The consumption and duplication is handled in
 519  * xhci_endpoint.c.
 520  *
 521  * ----------------
 522  * Structure Layout
 523  * ----------------
 524  *
 525  * The following images relate the core data structures. The primary structure
 526  * in the system is the xhci_t. This is the per-controller data structure that
 527  * exists for each instance of the driver. From there, each device in the system
 528  * is represented by an xhci_device_t and each endpoint is represented by an
 529  * xhci_endpoint_t. For each client that opens a given endpoint, there is an
 530  * xhci_pipe_t. For each I/O related ring, there is an xhci_ring_t in the
 531  * system.
 532  *
 533  *     +------------------------+
 534  *     | Per-Controller         |
 535  *     | Structure              |
 536  *     | xhci_t                 |
 537  *     |                        |
 538  *     | uint_t              ---+--> Capability regs offset
 539  *     | uint_t              ---+--> Operational regs offset
 540  *     | uint_t              ---+--> Runtime regs offset
 541  *     | uint_t              ---+--> Doorbell regs offset
 542  *     | xhci_state_flags_t  ---+--> Device state flags
 543  *     | xhci_quirks_t       ---+--> Device quirk flags
 544  *     | xhci_capability_t   ---+--> Controller capability structure
 545  *     | xhci_dcbaa_t        ---+----------------------------------+
 546  *     | xhci_scratchpad_t   ---+---------+                        |
 547  *     | xhci_command_ing_t  ---+------+  |                        v
 548  *     | xhci_event_ring_t   ---+----+ |  |              +---------------------+
 549  *     | xhci_usba_t         ---+--+ | |  |              | Device Context      |
 550  *     +------------------------+  | | |  |              | Base Address        |
 551  *                                 | | |  |              | Array Structure     |
 552  *                                 | | |  |              | xhci_dcbaa_t        |
 553  * +-------------------------------+ | |  |              |                     |
 554  * | +-------------------------------+ |  |  DCBAA KVA <-+--     uint64_t * |
 555  * | |    +----------------------------+  | DMA Buffer <-+-- xhci_dma_buffer_t |
 556  * | |    v                               |              +---------------------+
 557  * | | +--------------------------+       +-----------------------+
 558  * | | | Event Ring               |                               |
 559  * | | | Management               |                               |
 560  * | | | xhci_event_ring_t        |                               v
 561  * | | |                          |   Event Ring        +----------------------+
 562  * | | | xhci_event_segment_t * --|-> Segment VA     |   Scratchpad (Extra  |
 563  * | | | xhci_dma_buffer_t      --|-> Segment DMA Buf.       |   Controller Memory) |
 564  * | | | xhci_ring_t            --|--+                  |    xhci_scratchpad_t |
 565  * | | +--------------------------+  |      Scratchpad  |                      |
 566  * | |                               | Base Array KVA <-+-       uint64_t * |
 567  * | +------------+                  | Array DMA Buf. <-+-   xhci_dma_buffer_t |
 568  * |              v                  | Scratchpad DMA <-+- xhci_dma_buffer_t * |
 569  * |   +---------------------------+ | Buffer per page  +----------------------+
 570  * |   | Command Ring              | |
 571  * |   | xhci_command_ring_t       | +------------------------------+
 572  * |   |                           |                                |
 573  * |   | xhci_ring_t             --+-> Command Ring --->------------+
 574  * |   | list_t                  --+-> Command List              v
 575  * |   | timeout_id_t            --+-> Timeout State  +---------------------+
 576  * |   | xhci_command_ring_state_t +-> State Flags    | I/O Ring            |
 577  * |   +---------------------------+                     | xhci_ring_t         |
 578  * |                                                     |                     |
 579  * |                                     Ring DMA Buf. <-+-- xhci_dma_buffer_t |
 580  * |                                       Ring Length <-+--         uint_t |
 581  * |                                    Ring Entry KVA <-+--   xhci_trb_t * |
 582  * |    +---------------------------+        Ring Head <-+--         uint_t |
 583  * +--->| USBA State             |        Ring Tail <-+--         uint_t |
 584  *      | xhci_usba_t               |       Ring Cycle <-+--         uint_t |
 585  *      |                           |                    +---------------------+
 586  *      | usba_hcdi_ops_t *        -+-> USBA Ops Vector                            ^
 587  *      | usb_dev_dscr_t           -+-> USB Virtual Device Descriptor              |
 588  *      | usb_ss_hub_descr_t       -+-> USB Virtual Hub Descriptor         |
 589  *      | usba_pipe_handle_data_t * +-> Interrupt polling client           |
 590  *      | usb_intr_req_t           -+-> Interrupt polling request          |
 591  *      | uint32_t                --+-> Interrupt polling device mask              |
 592  *      | list_t                  --+-> Pipe List (Active Users)           |
 593  *      | list_t                  --+-------------------+                     |
 594  *      +---------------------------+                   |                     ^
 595  *                                                      |                     |
 596  *                                                      v                     |
 597  *     +-------------------------------+             +---------------+        |
 598  *     | USB Device                    |------------>| USB Device    |--> ... |
 599  *     | xhci_device_t                 |             | xhci_device_t |        |
 600  *     |                               |             +---------------+        |
 601  *     | usb_port_t                  --+-> USB Port plugged into           |
 602  *     | uint8_t                     --+-> Slot Number                             |
 603  *     | boolean_t                   --+-> Address Assigned                |
 604  *     | usba_device_t *             --+-> USBA Device State               |
 605  *     | xhci_dma_buffer_t           --+-> Input Context DMA Buffer        |
 606  *     | xhci_input_context_t *      --+-> Input Context KVA               |
 607  *     | xhci_slot_contex_t *        --+-> Input Slot Context KVA          |
 608  *     | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA              |
 609  *     | xhci_dma_buffer_t           --+-> Output Context DMA Buffer       |
 610  *     | xhci_slot_context_t *       --+-> Output Slot Context KVA         ^
 611  *     | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA             |
 612  *     | xhci_endpoint_t *[]         --+-> Endpoint Tracking ---+          |
 613  *     +-------------------------------+                        |             |
 614  *                                                              |             |
 615  *                                                              v             |
 616  *     +------------------------------+            +-----------------+        |
 617  *     | Endpoint Data                |----------->| Endpoint Data   |--> ... |
 618  *     | xhci_endpoint_t              |            | xhci_endpoint_t |        |
 619  *     |                              |            +-----------------+        |
 620  *     | int                        --+-> Endpoint Number                  |
 621  *     | int                        --+-> Endpoint Type                            |
 622  *     | xhci_endpoint_state_t      --+-> Endpoint State                   |
 623  *     | timeout_id_t               --+-> Endpoint Timeout State           |
 624  *     | usba_pipe_handle_data_t *  --+-> USBA Client Handle               |
 625  *     | xhci_ring_t                --+-> Endpoint I/O Ring  -------->--------+
 626  *     | list_t                     --+-> Transfer List --------+
 627  *     +------------------------------+                         |
 628  *                                                              v
 629  *     +-------------------------+                  +--------------------+
 630  *     | Transfer Structure      |----------------->| Transfer Structure |-> ...
 631  *     | xhci_transfer_t         |                  | xhci_transfer_t    |
 632  *     |                         |                  +--------------------+
 633  *     | xhci_dma_buffer_t     --+-> I/O DMA Buffer
 634  *     | uint_t                --+-> Number of TRBs
 635  *     | uint_t                --+-> Short transfer data
 636  *     | uint_t                --+-> Timeout seconds remaining
 637  *     | usb_cr_t              --+-> USB Transfer return value
 638  *     | boolean_t             --+-> Data direction
 639  *     | xhci_trb_t *          --+-> Host-order transfer requests for I/O
 640  *     | usb_isoc_pkt_descr_t * -+-> Isochronous only response data
 641  *     | usb_opaque_t          --+-> USBA Request Handle
 642  *     +-------------------------+
 643  *
 644  * -------------
 645  * Lock Ordering
 646  * -------------
 647  *
 648  * There are three different tiers of locks that exist in the driver. First,
 649  * there is a lock for each controller: xhci_t`xhci_lock. This protects all the
 650  * data for that instance of the controller. If there are multiple instances of
 651  * the xHCI controller in the system, each one is independent and protected
 652  * separately. The two do not share any data.
 653  *
 654  * From there, there are two other, specific locks in the system:
 655  *
 656  *   o xhci_command_ring_t`xcr_lock
 657  *   o xhci_device_t`xd_imtx
 658  *
 659  * There is only one xcr_lock per controller, like the xhci_lock. It protects
 660  * the state of the command ring. However, there is on xd_imtx per device.
 661  * Recall that each device is scoped to a given controller. This protects the
 662  * input slot context for a given device.
 663  *
 664  * There are a few important rules to keep in mind here that are true
 665  * universally throughout the driver:
 666  *
 667  * 1) Always grab the xhci_t`xhci_lock, before grabbing any of the other locks.
 668  * 2) A given xhci_device_t`xd_imtx, must be taken before grabbing the
 669  *    xhci_command_ring_t`xcr_lock.
 670  * 3) A given thread can only hold one of the given xhci_device_t`xd_imtx locks
 671  *    at a given time. In other words, we should never be manipulating the input
 672  *    context of two different devices at once.
 673  * 4) It is safe to hold the xhci_device_t`xd_imtx while tearing down the
 674  *    endpoint timer. Conversely, the endpoint specific logic should never enter
 675  *    this lock.
 676  *
 677  * --------------------
 678  * Relationship to EHCI
 679  * --------------------
 680  *
 681  * On some Intel chipsets, a given physical port on the system may be routed to
 682  * one of the EHCI or xHCI controllers. This association can be dynamically
 683  * changed by writing to platform specific registers as handled by the quirk
 684  * logic in xhci_quirk.c.
 685  *
 686  * As these ports may support USB 3.x speeds, we always route all such ports to
 687  * the xHCI controller, when supported. In addition, to minimize disruptions
 688  * from devices being enumerated and attached to the EHCI driver and then
 689  * disappearing, we generally attempt to load the xHCI controller before the
 690  * EHCI controller. This logic is not done in the driver; however, it is done in
 691  * other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
 692  * function consconfig_load_drivres().
 693  *
 694  * -----------
 695  * Future Work
 696  * -----------
 697  *
 698  * The primary future work in this driver spans two different, but related
 699  * areas. The first area is around controller resets and how they tie into FM.
 700  * Presently, we do not have a good way to handle controllers coming and going
 701  * in the broader USB stack or properly reconfigure the device after a reset.
 702  * Secondly, we don't handle the suspend and resume of devices and drivers.
 703  */
 704 
 705 #include <sys/param.h>
 706 #include <sys/modctl.h>
 707 #include <sys/conf.h>
 708 #include <sys/devops.h>
 709 #include <sys/ddi.h>
 710 #include <sys/sunddi.h>
 711 #include <sys/cmn_err.h>
 712 #include <sys/ddifm.h>
 713 #include <sys/pci.h>
 714 #include <sys/class.h>
 715 #include <sys/policy.h>
 716 
 717 #include <sys/usb/hcd/xhci/xhci.h>
 718 #include <sys/usb/hcd/xhci/xhci_ioctl.h>
 719 
 720 /*
 721  * We want to use the first BAR to access its registers. The regs[] array is
 722  * ordered based on the rules for the PCI supplement to IEEE 1275. So regs[1]
 723  * will always be the first BAR.
 724  */
 725 #define XHCI_REG_NUMBER 1
 726 
 727 /*
 728  * This task queue exists as a global taskq that is used for resetting the
 729  * device in the face of FM or runtime errors. Each instance of the device
 730  * (xhci_t) happens to have a single taskq_dispatch_ent already allocated so we
 731  * know that we should always be able to dispatch such an event.
 732  */
 733 static taskq_t *xhci_taskq;
 734 
 735 /*
 736  * Global soft state for per-instance data. Note that we must use the soft state
 737  * routines and cannot use the ddi_set_driver_private() routines. The USB
 738  * framework presumes that it can use the dip's private data.
 739  */
 740 void *xhci_soft_state;
 741 
 742 /*
 743  * This is the time in us that we wait after a controller resets before we
 744  * consider reading any register. There are some controllers that want at least
 745  * 1 ms, therefore we default to 10 ms.
 746  */
 747 clock_t xhci_reset_delay = 10000;
 748 
 749 void
 750 xhci_error(xhci_t *xhcip, const char *fmt, ...)
 751 {
 752         va_list ap;
 753 
 754         va_start(ap, fmt);
 755         if (xhcip != NULL && xhcip->xhci_dip != NULL) {
 756                 vdev_err(xhcip->xhci_dip, CE_WARN, fmt, ap);
 757         } else {
 758                 vcmn_err(CE_WARN, fmt, ap);
 759         }
 760         va_end(ap);
 761 }
 762 
 763 void
 764 xhci_log(xhci_t *xhcip, const char *fmt, ...)
 765 {
 766         va_list ap;
 767 
 768         va_start(ap, fmt);
 769         if (xhcip != NULL && xhcip->xhci_dip != NULL) {
 770                 vdev_err(xhcip->xhci_dip, CE_NOTE, fmt, ap);
 771         } else {
 772                 vcmn_err(CE_NOTE, fmt, ap);
 773         }
 774         va_end(ap);
 775 }
 776 
 777 /*
 778  * USBA is in charge of creating device nodes for us. USBA explicitly ORs in the
 779  * constant HUBD_IS_ROOT_HUB, so we have to undo that when we're looking at
 780  * things here. A simple bitwise-and will take care of this. And hey, it could
 781  * always be more complex, USBA could clone!
 782  */
 783 static dev_info_t *
 784 xhci_get_dip(dev_t dev)
 785 {
 786         xhci_t *xhcip;
 787         int instance = getminor(dev) & ~HUBD_IS_ROOT_HUB;
 788 
 789         xhcip = ddi_get_soft_state(xhci_soft_state, instance);
 790         if (xhcip != NULL)
 791                 return (xhcip->xhci_dip);
 792         return (NULL);
 793 }
 794 
 795 uint8_t
 796 xhci_get8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
 797 {
 798         uintptr_t addr, roff;
 799 
 800         switch (rtt) {
 801         case XHCI_R_CAP:
 802                 roff = xhcip->xhci_regs_capoff;
 803                 break;
 804         case XHCI_R_OPER:
 805                 roff = xhcip->xhci_regs_operoff;
 806                 break;
 807         case XHCI_R_RUN:
 808                 roff = xhcip->xhci_regs_runoff;
 809                 break;
 810         case XHCI_R_DOOR:
 811                 roff = xhcip->xhci_regs_dooroff;
 812                 break;
 813         default:
 814                 panic("called %s with bad reg type: %d", __func__, rtt);
 815         }
 816         ASSERT(roff != PCI_EINVAL32);
 817         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
 818 
 819         return (ddi_get8(xhcip->xhci_regs_handle, (void *)addr));
 820 }
 821 
 822 uint16_t
 823 xhci_get16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
 824 {
 825         uintptr_t addr, roff;
 826 
 827         switch (rtt) {
 828         case XHCI_R_CAP:
 829                 roff = xhcip->xhci_regs_capoff;
 830                 break;
 831         case XHCI_R_OPER:
 832                 roff = xhcip->xhci_regs_operoff;
 833                 break;
 834         case XHCI_R_RUN:
 835                 roff = xhcip->xhci_regs_runoff;
 836                 break;
 837         case XHCI_R_DOOR:
 838                 roff = xhcip->xhci_regs_dooroff;
 839                 break;
 840         default:
 841                 panic("called %s with bad reg type: %d", __func__, rtt);
 842         }
 843         ASSERT(roff != PCI_EINVAL32);
 844         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
 845 
 846         return (ddi_get16(xhcip->xhci_regs_handle, (void *)addr));
 847 }
 848 
 849 uint32_t
 850 xhci_get32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
 851 {
 852         uintptr_t addr, roff;
 853 
 854         switch (rtt) {
 855         case XHCI_R_CAP:
 856                 roff = xhcip->xhci_regs_capoff;
 857                 break;
 858         case XHCI_R_OPER:
 859                 roff = xhcip->xhci_regs_operoff;
 860                 break;
 861         case XHCI_R_RUN:
 862                 roff = xhcip->xhci_regs_runoff;
 863                 break;
 864         case XHCI_R_DOOR:
 865                 roff = xhcip->xhci_regs_dooroff;
 866                 break;
 867         default:
 868                 panic("called %s with bad reg type: %d", __func__, rtt);
 869         }
 870         ASSERT(roff != PCI_EINVAL32);
 871         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
 872 
 873         return (ddi_get32(xhcip->xhci_regs_handle, (void *)addr));
 874 }
 875 
 876 uint64_t
 877 xhci_get64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
 878 {
 879         uintptr_t addr, roff;
 880 
 881         switch (rtt) {
 882         case XHCI_R_CAP:
 883                 roff = xhcip->xhci_regs_capoff;
 884                 break;
 885         case XHCI_R_OPER:
 886                 roff = xhcip->xhci_regs_operoff;
 887                 break;
 888         case XHCI_R_RUN:
 889                 roff = xhcip->xhci_regs_runoff;
 890                 break;
 891         case XHCI_R_DOOR:
 892                 roff = xhcip->xhci_regs_dooroff;
 893                 break;
 894         default:
 895                 panic("called %s with bad reg type: %d", __func__, rtt);
 896         }
 897         ASSERT(roff != PCI_EINVAL32);
 898         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
 899 
 900         return (ddi_get64(xhcip->xhci_regs_handle, (void *)addr));
 901 }
 902 
 903 void
 904 xhci_put8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint8_t val)
 905 {
 906         uintptr_t addr, roff;
 907 
 908         switch (rtt) {
 909         case XHCI_R_CAP:
 910                 roff = xhcip->xhci_regs_capoff;
 911                 break;
 912         case XHCI_R_OPER:
 913                 roff = xhcip->xhci_regs_operoff;
 914                 break;
 915         case XHCI_R_RUN:
 916                 roff = xhcip->xhci_regs_runoff;
 917                 break;
 918         case XHCI_R_DOOR:
 919                 roff = xhcip->xhci_regs_dooroff;
 920                 break;
 921         default:
 922                 panic("called %s with bad reg type: %d", __func__, rtt);
 923         }
 924         ASSERT(roff != PCI_EINVAL32);
 925         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
 926 
 927         ddi_put8(xhcip->xhci_regs_handle, (void *)addr, val);
 928 }
 929 
 930 void
 931 xhci_put16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint16_t val)
 932 {
 933         uintptr_t addr, roff;
 934 
 935         switch (rtt) {
 936         case XHCI_R_CAP:
 937                 roff = xhcip->xhci_regs_capoff;
 938                 break;
 939         case XHCI_R_OPER:
 940                 roff = xhcip->xhci_regs_operoff;
 941                 break;
 942         case XHCI_R_RUN:
 943                 roff = xhcip->xhci_regs_runoff;
 944                 break;
 945         case XHCI_R_DOOR:
 946                 roff = xhcip->xhci_regs_dooroff;
 947                 break;
 948         default:
 949                 panic("called %s with bad reg type: %d", __func__, rtt);
 950         }
 951         ASSERT(roff != PCI_EINVAL32);
 952         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
 953 
 954         ddi_put16(xhcip->xhci_regs_handle, (void *)addr, val);
 955 }
 956 
 957 void
 958 xhci_put32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint32_t val)
 959 {
 960         uintptr_t addr, roff;
 961 
 962         switch (rtt) {
 963         case XHCI_R_CAP:
 964                 roff = xhcip->xhci_regs_capoff;
 965                 break;
 966         case XHCI_R_OPER:
 967                 roff = xhcip->xhci_regs_operoff;
 968                 break;
 969         case XHCI_R_RUN:
 970                 roff = xhcip->xhci_regs_runoff;
 971                 break;
 972         case XHCI_R_DOOR:
 973                 roff = xhcip->xhci_regs_dooroff;
 974                 break;
 975         default:
 976                 panic("called %s with bad reg type: %d", __func__, rtt);
 977         }
 978         ASSERT(roff != PCI_EINVAL32);
 979         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
 980 
 981         ddi_put32(xhcip->xhci_regs_handle, (void *)addr, val);
 982 }
 983 
 984 void
 985 xhci_put64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint64_t val)
 986 {
 987         uintptr_t addr, roff;
 988 
 989         switch (rtt) {
 990         case XHCI_R_CAP:
 991                 roff = xhcip->xhci_regs_capoff;
 992                 break;
 993         case XHCI_R_OPER:
 994                 roff = xhcip->xhci_regs_operoff;
 995                 break;
 996         case XHCI_R_RUN:
 997                 roff = xhcip->xhci_regs_runoff;
 998                 break;
 999         case XHCI_R_DOOR:
1000                 roff = xhcip->xhci_regs_dooroff;
1001                 break;
1002         default:
1003                 panic("called %s with bad reg type: %d", __func__, rtt);
1004         }
1005         ASSERT(roff != PCI_EINVAL32);
1006         addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
1007 
1008         ddi_put64(xhcip->xhci_regs_handle, (void *)addr, val);
1009 }
1010 
1011 int
1012 xhci_check_regs_acc(xhci_t *xhcip)
1013 {
1014         ddi_fm_error_t de;
1015 
1016         /*
1017          * Treat cases where we can't check as fine so we can treat the code
1018          * more simply.
1019          */
1020         if (quiesce_active || !DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps))
1021                 return (DDI_FM_OK);
1022 
1023         ddi_fm_acc_err_get(xhcip->xhci_regs_handle, &de, DDI_FME_VERSION);
1024         ddi_fm_acc_err_clear(xhcip->xhci_regs_handle, DDI_FME_VERSION);
1025         return (de.fme_status);
1026 }
1027 
1028 /*
1029  * As a leaf PCIe driver, we just post the ereport and continue on.
1030  */
1031 /* ARGSUSED */
1032 static int
1033 xhci_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
1034 {
1035         pci_ereport_post(dip, err, NULL);
1036         return (err->fme_status);
1037 }
1038 
1039 static void
1040 xhci_fm_fini(xhci_t *xhcip)
1041 {
1042         if (xhcip->xhci_fm_caps == 0)
1043                 return;
1044 
1045         if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1046                 ddi_fm_handler_unregister(xhcip->xhci_dip);
1047 
1048         if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1049             DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1050                 pci_ereport_teardown(xhcip->xhci_dip);
1051 
1052         ddi_fm_fini(xhcip->xhci_dip);
1053 }
1054 
1055 static void
1056 xhci_fm_init(xhci_t *xhcip)
1057 {
1058         ddi_iblock_cookie_t iblk;
1059         int def = DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1060             DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
1061 
1062         xhcip->xhci_fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
1063             DDI_PROP_DONTPASS, "fm_capable", def);
1064 
1065         if (xhcip->xhci_fm_caps < 0) {
1066                 xhcip->xhci_fm_caps = 0;
1067         } else if (xhcip->xhci_fm_caps & ~def) {
1068                 xhcip->xhci_fm_caps &= def;
1069         }
1070 
1071         if (xhcip->xhci_fm_caps == 0)
1072                 return;
1073 
1074         ddi_fm_init(xhcip->xhci_dip, &xhcip->xhci_fm_caps, &iblk);
1075         if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1076             DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1077                 pci_ereport_setup(xhcip->xhci_dip);
1078         }
1079 
1080         if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1081                 ddi_fm_handler_register(xhcip->xhci_dip,
1082                     xhci_fm_error_cb, xhcip);
1083         }
1084 }
1085 
1086 static int
1087 xhci_reg_poll(xhci_t *xhcip, xhci_reg_type_t rt, int reg, uint32_t mask,
1088     uint32_t targ, uint_t tries, int delay_ms)
1089 {
1090         uint_t i;
1091 
1092         for (i = 0; i < tries; i++) {
1093                 uint32_t val = xhci_get32(xhcip, rt, reg);
1094                 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1095                         ddi_fm_service_impact(xhcip->xhci_dip,
1096                             DDI_SERVICE_LOST);
1097                         return (EIO);
1098                 }
1099 
1100                 if ((val & mask) == targ)
1101                         return (0);
1102 
1103                 delay(drv_usectohz(delay_ms * 1000));
1104         }
1105         return (ETIMEDOUT);
1106 }
1107 
1108 static boolean_t
1109 xhci_regs_map(xhci_t *xhcip)
1110 {
1111         off_t memsize;
1112         int ret;
1113         ddi_device_acc_attr_t da;
1114 
1115         if (ddi_dev_regsize(xhcip->xhci_dip, XHCI_REG_NUMBER, &memsize) !=
1116             DDI_SUCCESS) {
1117                 xhci_error(xhcip, "failed to get register set size");
1118                 return (B_FALSE);
1119         }
1120 
1121         bzero(&da, sizeof (ddi_device_acc_attr_t));
1122         da.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1123         da.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1124         da.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1125         if (DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps)) {
1126                 da.devacc_attr_access = DDI_FLAGERR_ACC;
1127         } else {
1128                 da.devacc_attr_access = DDI_DEFAULT_ACC;
1129         }
1130 
1131         ret = ddi_regs_map_setup(xhcip->xhci_dip, XHCI_REG_NUMBER,
1132             &xhcip->xhci_regs_base, 0, memsize, &da, &xhcip->xhci_regs_handle);
1133 
1134         if (ret != DDI_SUCCESS) {
1135                 xhci_error(xhcip, "failed to map device registers: %d", ret);
1136                 return (B_FALSE);
1137         }
1138 
1139         return (B_TRUE);
1140 }
1141 
1142 static boolean_t
1143 xhci_regs_init(xhci_t *xhcip)
1144 {
1145         /*
1146          * The capabilities always begin at offset zero.
1147          */
1148         xhcip->xhci_regs_capoff = 0;
1149         xhcip->xhci_regs_operoff = xhci_get8(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1150         xhcip->xhci_regs_runoff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_RTSOFF);
1151         xhcip->xhci_regs_runoff &= ~0x1f;
1152         xhcip->xhci_regs_dooroff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_DBOFF);
1153         xhcip->xhci_regs_dooroff &= ~0x3;
1154 
1155         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1156                 xhci_error(xhcip, "failed to initialize controller register "
1157                     "offsets: encountered FM register error");
1158                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1159                 return (B_FALSE);
1160         }
1161 
1162         return (B_TRUE);
1163 }
1164 
1165 /*
1166  * Read various parameters from PCI configuration space and from the Capability
1167  * registers that we'll need to register the device. We cache all of the
1168  * Capability registers.
1169  */
1170 static boolean_t
1171 xhci_read_params(xhci_t *xhcip)
1172 {
1173         uint8_t usb;
1174         uint16_t vers;
1175         uint32_t struc1, struc2, struc3, cap1, cap2, pgsz;
1176         uint32_t psize, pbit, capreg;
1177         xhci_capability_t *xcap;
1178         unsigned long ps;
1179 
1180         /*
1181          * While it's tempting to do a 16-bit read at offset 0x2, unfortunately,
1182          * a few emulated systems don't support reading at offset 0x2 for the
1183          * version. Instead we need to read the caplength register and get the
1184          * upper two bytes.
1185          */
1186         capreg = xhci_get32(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1187         vers = XHCI_VERSION_MASK(capreg);
1188         usb = pci_config_get8(xhcip->xhci_cfg_handle, PCI_XHCI_USBREV);
1189         struc1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS1);
1190         struc2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS2);
1191         struc3 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS3);
1192         cap1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS1);
1193         cap2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS2);
1194         pgsz = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PAGESIZE);
1195         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1196                 xhci_error(xhcip, "failed to read controller parameters: "
1197                     "encountered FM register error");
1198                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1199                 return (B_FALSE);
1200         }
1201 
1202         xcap = &xhcip->xhci_caps;
1203         xcap->xcap_usb_vers = usb;
1204         xcap->xcap_hci_vers = vers;
1205         xcap->xcap_max_slots = XHCI_HCS1_DEVSLOT_MAX(struc1);
1206         xcap->xcap_max_intrs = XHCI_HCS1_IRQ_MAX(struc1);
1207         xcap->xcap_max_ports = XHCI_HCS1_N_PORTS(struc1);
1208         if (xcap->xcap_max_ports > MAX_PORTS) {
1209                 xhci_error(xhcip, "Root hub has %d ports, but system only "
1210                     "supports %d, limiting to %d\n", xcap->xcap_max_ports,
1211                     MAX_PORTS, MAX_PORTS);
1212                 xcap->xcap_max_ports = MAX_PORTS;
1213         }
1214 
1215         xcap->xcap_ist_micro = XHCI_HCS2_IST_MICRO(struc2);
1216         xcap->xcap_ist = XHCI_HCS2_IST(struc2);
1217         xcap->xcap_max_esrt = XHCI_HCS2_ERST_MAX(struc2);
1218         xcap->xcap_scratch_restore = XHCI_HCS2_SPR(struc2);
1219         xcap->xcap_max_scratch = XHCI_HCS2_SPB_MAX(struc2);
1220 
1221         xcap->xcap_u1_lat = XHCI_HCS3_U1_DEL(struc3);
1222         xcap->xcap_u2_lat = XHCI_HCS3_U2_DEL(struc3);
1223 
1224         xcap->xcap_flags = XHCI_HCC1_FLAGS_MASK(cap1);
1225         xcap->xcap_max_psa = XHCI_HCC1_PSA_SZ_MAX(cap1);
1226         xcap->xcap_xecp_off = XHCI_HCC1_XECP(cap1);
1227         xcap->xcap_flags2 = XHCI_HCC2_FLAGS_MASK(cap2);
1228 
1229         /*
1230          * We don't have documentation for what changed from before xHCI 0.96,
1231          * so we just refuse to support versions before 0.96. We also will
1232          * ignore anything with a major version greater than 1.
1233          */
1234         if (xcap->xcap_hci_vers < 0x96 || xcap->xcap_hci_vers >= 0x200) {
1235                 xhci_error(xhcip, "Encountered unsupported xHCI version 0.%2x",
1236                     xcap->xcap_hci_vers);
1237                 return (B_FALSE);
1238         }
1239 
1240         /*
1241          * Determine the smallest size page that the controller supports and
1242          * make sure that it matches our pagesize. We basically check here for
1243          * the presence of 4k and 8k pages. The basis of the pagesize is used
1244          * extensively throughout the code and specification. While we could
1245          * support other page sizes here, given that we don't support systems
1246          * with it at this time, it doesn't make much sense.
1247          */
1248         ps = PAGESIZE;
1249         if (ps == 0x1000) {
1250                 pbit = XHCI_PAGESIZE_4K;
1251                 psize = 0x1000;
1252         } else if (ps == 0x2000) {
1253                 pbit = XHCI_PAGESIZE_8K;
1254                 psize = 0x2000;
1255         } else {
1256                 xhci_error(xhcip, "Encountered host page size that the driver "
1257                     "doesn't know how to handle: %lx\n", ps);
1258                 return (B_FALSE);
1259         }
1260 
1261         if (!(pgsz & pbit)) {
1262                 xhci_error(xhcip, "Encountered controller that didn't support "
1263                     "the host page size (%d), supports: %x", psize, pgsz);
1264                 return (B_FALSE);
1265         }
1266         xcap->xcap_pagesize = psize;
1267 
1268         return (B_TRUE);
1269 }
1270 
1271 /*
1272  * Apply known workarounds and issues. These reports come from other
1273  * Operating Systems and have been collected over time.
1274  */
1275 static boolean_t
1276 xhci_identify(xhci_t *xhcip)
1277 {
1278         xhci_quirks_populate(xhcip);
1279 
1280         if (xhcip->xhci_quirks & XHCI_QUIRK_NO_MSI) {
1281                 xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED;
1282         } else {
1283                 xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED |
1284                     DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX;
1285         }
1286 
1287         if (xhcip->xhci_quirks & XHCI_QUIRK_32_ONLY) {
1288                 xhcip->xhci_caps.xcap_flags &= ~XCAP_AC64;
1289         }
1290 
1291         return (B_TRUE);
1292 }
1293 
1294 static boolean_t
1295 xhci_alloc_intr_handle(xhci_t *xhcip, int type)
1296 {
1297         int ret;
1298 
1299         /*
1300          * Normally a well-behaving driver would more carefully request an
1301          * amount of interrupts based on the number available, etc. But since we
1302          * only actually want a single interrupt, we're just going to go ahead
1303          * and ask for a single interrupt.
1304          */
1305         ret = ddi_intr_alloc(xhcip->xhci_dip, &xhcip->xhci_intr_hdl, type, 0,
1306             XHCI_NINTR, &xhcip->xhci_intr_num, DDI_INTR_ALLOC_NORMAL);
1307         if (ret != DDI_SUCCESS) {
1308                 xhci_log(xhcip, "!failed to allocate interrupts of type %d: %d",
1309                     type, ret);
1310                 return (B_FALSE);
1311         }
1312         xhcip->xhci_intr_type = type;
1313 
1314         return (B_TRUE);
1315 }
1316 
1317 static boolean_t
1318 xhci_alloc_intrs(xhci_t *xhcip)
1319 {
1320         int intr_types, ret;
1321 
1322         if (XHCI_NINTR > xhcip->xhci_caps.xcap_max_intrs) {
1323                 xhci_error(xhcip, "controller does not support the minimum "
1324                     "number of interrupts required (%d), supports %d",
1325                     XHCI_NINTR, xhcip->xhci_caps.xcap_max_intrs);
1326                 return (B_FALSE);
1327         }
1328 
1329         if ((ret = ddi_intr_get_supported_types(xhcip->xhci_dip,
1330             &intr_types)) != DDI_SUCCESS) {
1331                 xhci_error(xhcip, "failed to get supported interrupt types: "
1332                     "%d", ret);
1333                 return (B_FALSE);
1334         }
1335 
1336         /*
1337          * Mask off interrupt types we've already ruled out due to quirks or
1338          * other reasons.
1339          */
1340         intr_types &= xhcip->xhci_caps.xcap_intr_types;
1341         if (intr_types & DDI_INTR_TYPE_MSIX) {
1342                 if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSIX))
1343                         return (B_TRUE);
1344         }
1345 
1346         if (intr_types & DDI_INTR_TYPE_MSI) {
1347                 if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSI))
1348                         return (B_TRUE);
1349         }
1350 
1351         if (intr_types & DDI_INTR_TYPE_FIXED) {
1352                 if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_FIXED))
1353                         return (B_TRUE);
1354         }
1355 
1356         xhci_error(xhcip, "failed to allocate an interrupt, supported types: "
1357             "0x%x", intr_types);
1358         return (B_FALSE);
1359 }
1360 
1361 static boolean_t
1362 xhci_add_intr_handler(xhci_t *xhcip)
1363 {
1364         int ret;
1365 
1366         if ((ret = ddi_intr_get_pri(xhcip->xhci_intr_hdl,
1367             &xhcip->xhci_intr_pri)) != DDI_SUCCESS) {
1368                 xhci_error(xhcip, "failed to get interrupt priority: %d", ret);
1369                 return (B_FALSE);
1370         }
1371 
1372         if ((ret = ddi_intr_get_cap(xhcip->xhci_intr_hdl,
1373             &xhcip->xhci_intr_caps)) != DDI_SUCCESS) {
1374                 xhci_error(xhcip, "failed to get interrupt capabilities: %d",
1375                     ret);
1376                 return (B_FALSE);
1377         }
1378 
1379         if ((ret = ddi_intr_add_handler(xhcip->xhci_intr_hdl, xhci_intr, xhcip,
1380             (uintptr_t)0)) != DDI_SUCCESS) {
1381                 xhci_error(xhcip, "failed to add interrupt handler: %d", ret);
1382                 return (B_FALSE);
1383         }
1384         return (B_TRUE);
1385 }
1386 
1387 /*
1388  * Find a capability with an identifier whose value is 'id'. The 'init' argument
1389  * gives us the offset to start searching at. See xHCI 1.1 / 7 for more
1390  * information. This is more or less exactly like PCI capabilities.
1391  */
1392 static boolean_t
1393 xhci_find_ext_cap(xhci_t *xhcip, uint32_t id, uint32_t init, uint32_t *outp)
1394 {
1395         uint32_t off;
1396         uint8_t next = 0;
1397 
1398         /*
1399          * If we have no offset, we're done.
1400          */
1401         if (xhcip->xhci_caps.xcap_xecp_off == 0)
1402                 return (B_FALSE);
1403 
1404         off = xhcip->xhci_caps.xcap_xecp_off << 2;
1405         do {
1406                 uint32_t cap_hdr;
1407 
1408                 off += next << 2;
1409                 cap_hdr = xhci_get32(xhcip, XHCI_R_CAP, off);
1410                 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1411                         xhci_error(xhcip, "failed to read xhci extended "
1412                             "capabilities at offset 0x%x: encountered FM "
1413                             "register error", off);
1414                         ddi_fm_service_impact(xhcip->xhci_dip,
1415                             DDI_SERVICE_LOST);
1416                         break;
1417                 }
1418 
1419                 if (cap_hdr == PCI_EINVAL32)
1420                         break;
1421                 if (XHCI_XECP_ID(cap_hdr) == id &&
1422                     (init == UINT32_MAX || off > init)) {
1423                         *outp = off;
1424                         return (B_TRUE);
1425                 }
1426                 next = XHCI_XECP_NEXT(cap_hdr);
1427                 /*
1428                  * Watch out for overflow if we somehow end up with a more than
1429                  * 2 GiB space.
1430                  */
1431                 if (next << 2 > (INT32_MAX - off))
1432                         return (B_FALSE);
1433         } while (next != 0);
1434 
1435         return (B_FALSE);
1436 }
1437 
1438 /*
1439  * For mostly information purposes, we'd like to walk to augment the devinfo
1440  * tree with the number of ports that support USB 2 and USB 3. Note though that
1441  * these ports may be overlapping. Many ports can support both USB 2 and USB 3
1442  * and are wired up to the same physical port, even though they show up as
1443  * separate 'ports' in the xhci sense.
1444  */
1445 static boolean_t
1446 xhci_port_count(xhci_t *xhcip)
1447 {
1448         uint_t nusb2 = 0, nusb3 = 0;
1449         uint32_t off = UINT32_MAX;
1450 
1451         while (xhci_find_ext_cap(xhcip, XHCI_ID_PROTOCOLS, off, &off) ==
1452             B_TRUE) {
1453                 uint32_t rvers, rport;
1454 
1455                 /*
1456                  * See xHCI 1.1 / 7.2 for the format of this. The first uint32_t
1457                  * has version information while the third uint32_t has the port
1458                  * count.
1459                  */
1460                 rvers = xhci_get32(xhcip, XHCI_R_CAP, off);
1461                 rport = xhci_get32(xhcip, XHCI_R_CAP, off + 8);
1462                 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1463                         xhci_error(xhcip, "failed to read xhci port counts: "
1464                             "encountered fatal FM register error");
1465                         ddi_fm_service_impact(xhcip->xhci_dip,
1466                             DDI_SERVICE_LOST);
1467                         return (B_FALSE);
1468                 }
1469 
1470                 rvers = XHCI_XECP_PROT_MAJOR(rvers);
1471                 rport = XHCI_XECP_PROT_PCOUNT(rport);
1472 
1473                 if (rvers == 3) {
1474                         nusb3 += rport;
1475                 } else if (rvers <= 2) {
1476                         nusb2 += rport;
1477                 } else {
1478                         xhci_error(xhcip, "encountered port capabilities with "
1479                             "unknown major USB version: %d\n", rvers);
1480                 }
1481         }
1482 
1483         (void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1484             "usb2-capable-ports", nusb2);
1485         (void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1486             "usb3-capable-ports", nusb3);
1487 
1488         return (B_TRUE);
1489 }
1490 
1491 /*
1492  * Take over control from the BIOS or other firmware, if applicable.
1493  */
1494 static boolean_t
1495 xhci_controller_takeover(xhci_t *xhcip)
1496 {
1497         int ret;
1498         uint32_t val, off;
1499 
1500         /*
1501          * If we can't find the legacy capability, then there's nothing to do.
1502          */
1503         if (xhci_find_ext_cap(xhcip, XHCI_ID_USB_LEGACY, UINT32_MAX, &off) ==
1504             B_FALSE)
1505                 return (B_TRUE);
1506         val = xhci_get32(xhcip, XHCI_R_CAP, off);
1507         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1508                 xhci_error(xhcip, "failed to read BIOS take over registers: "
1509                     "encountered fatal FM register error");
1510                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1511                 return (B_FALSE);
1512         }
1513 
1514         if (val & XHCI_BIOS_OWNED) {
1515                 val |= XHCI_OS_OWNED;
1516                 xhci_put32(xhcip, XHCI_R_CAP, off, val);
1517                 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1518                         xhci_error(xhcip, "failed to write BIOS take over "
1519                             "registers: encountered fatal FM register error");
1520                         ddi_fm_service_impact(xhcip->xhci_dip,
1521                             DDI_SERVICE_LOST);
1522                         return (B_FALSE);
1523                 }
1524 
1525                 /*
1526                  * Wait up to 5 seconds for things to change. While this number
1527                  * isn't specified in the xHCI spec, it seems to be the de facto
1528                  * value that various systems are using today. We'll use a 10ms
1529                  * interval to check.
1530                  */
1531                 ret = xhci_reg_poll(xhcip, XHCI_R_CAP, off,
1532                     XHCI_BIOS_OWNED | XHCI_OS_OWNED, XHCI_OS_OWNED, 500, 10);
1533                 if (ret == EIO)
1534                         return (B_FALSE);
1535                 if (ret == ETIMEDOUT) {
1536                         xhci_log(xhcip, "!timed out waiting for firmware to "
1537                             "hand off, taking over");
1538                         val &= ~XHCI_BIOS_OWNED;
1539                         xhci_put32(xhcip, XHCI_R_CAP, off, val);
1540                         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1541                                 xhci_error(xhcip, "failed to write forced "
1542                                     "takeover: encountered fatal FM register "
1543                                     "error");
1544                                 ddi_fm_service_impact(xhcip->xhci_dip,
1545                                     DDI_SERVICE_LOST);
1546                                 return (B_FALSE);
1547                         }
1548                 }
1549         }
1550 
1551         val = xhci_get32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS);
1552         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1553                 xhci_error(xhcip, "failed to read legacy control registers: "
1554                     "encountered fatal FM register error");
1555                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1556                 return (B_FALSE);
1557         }
1558         val &= XHCI_XECP_SMI_MASK;
1559         val |= XHCI_XECP_CLEAR_SMI;
1560         xhci_put32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS, val);
1561         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1562                 xhci_error(xhcip, "failed to write legacy control registers: "
1563                     "encountered fatal FM register error");
1564                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1565                 return (B_FALSE);
1566         }
1567 
1568         return (B_TRUE);
1569 }
1570 
1571 static int
1572 xhci_controller_stop(xhci_t *xhcip)
1573 {
1574         uint32_t cmdreg;
1575 
1576         cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1577         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1578                 xhci_error(xhcip, "failed to read USB Command register: "
1579                     "encountered fatal FM register error");
1580                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1581                 return (EIO);
1582         }
1583 
1584         cmdreg &= ~(XHCI_CMD_RS | XHCI_CMD_INTE);
1585         xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1586         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1587                 xhci_error(xhcip, "failed to write USB Command register: "
1588                     "encountered fatal FM register error");
1589                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1590                 return (EIO);
1591         }
1592 
1593         /*
1594          * Wait up to 50ms for this to occur. The specification says that this
1595          * should stop within 16ms, but we give ourselves a bit more time just
1596          * in case.
1597          */
1598         return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS, XHCI_STS_HCH,
1599             XHCI_STS_HCH, 50, 10));
1600 }
1601 
1602 static int
1603 xhci_controller_reset(xhci_t *xhcip)
1604 {
1605         int ret;
1606         uint32_t cmdreg;
1607 
1608         cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1609         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1610                 xhci_error(xhcip, "failed to read USB Command register for "
1611                     "reset: encountered fatal FM register error");
1612                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1613                 return (EIO);
1614         }
1615 
1616         cmdreg |= XHCI_CMD_HCRST;
1617         xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1618         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1619                 xhci_error(xhcip, "failed to write USB Command register for "
1620                     "reset: encountered fatal FM register error");
1621                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1622                 return (EIO);
1623         }
1624 
1625         /*
1626          * Some controllers apparently don't want to be touched for at least 1ms
1627          * after we initiate the reset. Therefore give all controllers this
1628          * moment to breathe.
1629          */
1630         delay(drv_usectohz(xhci_reset_delay));
1631 
1632         /*
1633          * To tell that the reset has completed we first verify that the reset
1634          * has finished and that the USBCMD register no longer has the reset bit
1635          * asserted. However, once that's done we have to go verify that CNR
1636          * (Controller Not Ready) is no longer asserted.
1637          */
1638         if ((ret = xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBCMD,
1639             XHCI_CMD_HCRST, 0, 500, 10)) != 0)
1640                 return (ret);
1641 
1642         return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1643             XHCI_STS_CNR, 0, 500, 10));
1644 }
1645 
1646 /*
1647  * Take care of all the required initialization before we can actually enable
1648  * the controller. This means that we need to:
1649  *
1650  *    o Program the maximum number of slots
1651  *    o Program the DCBAAP and allocate the scratchpad
1652  *    o Program the Command Ring
1653  *    o Initialize the Event Ring
1654  *    o Enable interrupts (set imod)
1655  */
1656 static int
1657 xhci_controller_configure(xhci_t *xhcip)
1658 {
1659         int ret;
1660         uint32_t config;
1661 
1662         config = xhci_get32(xhcip, XHCI_R_OPER, XHCI_CONFIG);
1663         config &= ~XHCI_CONFIG_SLOTS_MASK;
1664         config |= xhcip->xhci_caps.xcap_max_slots;
1665         xhci_put32(xhcip, XHCI_R_OPER, XHCI_CONFIG, config);
1666         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1667                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1668                 return (EIO);
1669         }
1670 
1671         if ((ret = xhci_context_init(xhcip)) != 0) {
1672                 const char *reason;
1673                 if (ret == EIO) {
1674                         reason = "fatal FM I/O error occurred";
1675                 } else if (ret == ENOMEM) {
1676                         reason = "unable to allocate DMA memory";
1677                 } else {
1678                         reason = "unexpected error occurred";
1679                 }
1680 
1681                 xhci_error(xhcip, "failed to initialize xhci context "
1682                     "registers: %s (%d)", reason, ret);
1683                 return (ret);
1684         }
1685 
1686         if ((ret = xhci_command_ring_init(xhcip)) != 0) {
1687                 xhci_error(xhcip, "failed to initialize commands: %d", ret);
1688                 return (ret);
1689         }
1690 
1691         if ((ret = xhci_event_init(xhcip)) != 0) {
1692                 xhci_error(xhcip, "failed to initialize events: %d", ret);
1693                 return (ret);
1694         }
1695 
1696         if ((ret = xhci_intr_conf(xhcip)) != 0) {
1697                 xhci_error(xhcip, "failed to configure interrupts: %d", ret);
1698                 return (ret);
1699         }
1700 
1701         return (0);
1702 }
1703 
1704 static int
1705 xhci_controller_start(xhci_t *xhcip)
1706 {
1707         uint32_t reg;
1708 
1709         reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1710         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1711                 xhci_error(xhcip, "failed to read USB Command register for "
1712                     "start: encountered fatal FM register error");
1713                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1714                 return (EIO);
1715         }
1716 
1717         reg |= XHCI_CMD_RS;
1718         xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, reg);
1719         if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1720                 xhci_error(xhcip, "failed to write USB Command register for "
1721                     "start: encountered fatal FM register error");
1722                 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1723                 return (EIO);
1724         }
1725 
1726         return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1727             XHCI_STS_HCH, 0, 500, 10));
1728 }
1729 
1730 /* ARGSUSED */
1731 static void
1732 xhci_reset_task(void *arg)
1733 {
1734         /*
1735          * Longer term, we'd like to properly perform a controller reset.
1736          * However, that requires a bit more assistance from USBA to work
1737          * properly and tear down devices. In the meantime, we panic.
1738          */
1739         panic("XHCI runtime reset required");
1740 }
1741 
1742 /*
1743  * This function is called when we've detected a fatal FM condition that has
1744  * resulted in a loss of service and we need to force a reset of the controller
1745  * as a whole. Only one such reset may be ongoing at a time.
1746  */
1747 void
1748 xhci_fm_runtime_reset(xhci_t *xhcip)
1749 {
1750         boolean_t locked = B_FALSE;
1751 
1752         if (mutex_owned(&xhcip->xhci_lock)) {
1753                 locked = B_TRUE;
1754         } else {
1755                 mutex_enter(&xhcip->xhci_lock);
1756         }
1757 
1758         /*
1759          * If we're already in the error state than a reset is already ongoing
1760          * and there is nothing for us to do here.
1761          */
1762         if (xhcip->xhci_state & XHCI_S_ERROR) {
1763                 goto out;
1764         }
1765 
1766         xhcip->xhci_state |= XHCI_S_ERROR;
1767         ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1768         taskq_dispatch_ent(xhci_taskq, xhci_reset_task, xhcip, 0,
1769             &xhcip->xhci_tqe);
1770 out:
1771         if (!locked) {
1772                 mutex_exit(&xhcip->xhci_lock);
1773         }
1774 }
1775 
1776 static int
1777 xhci_ioctl_portsc(xhci_t *xhcip, intptr_t arg)
1778 {
1779         int i;
1780         xhci_ioctl_portsc_t xhi;
1781 
1782         bzero(&xhi, sizeof (xhci_ioctl_portsc_t));
1783         xhi.xhi_nports = xhcip->xhci_caps.xcap_max_ports;
1784         for (i = 1; i <= xhcip->xhci_caps.xcap_max_ports; i++) {
1785                 xhi.xhi_portsc[i] = xhci_get32(xhcip, XHCI_R_OPER,
1786                     XHCI_PORTSC(i));
1787         }
1788 
1789         if (ddi_copyout(&xhi, (void *)(uintptr_t)arg, sizeof (xhi), 0) != 0)
1790                 return (EFAULT);
1791 
1792         return (0);
1793 }
1794 
1795 static int
1796 xhci_ioctl_clear(xhci_t *xhcip, intptr_t arg)
1797 {
1798         uint32_t reg;
1799         xhci_ioctl_clear_t xic;
1800 
1801         if (ddi_copyin((const void *)(uintptr_t)arg, &xic, sizeof (xic),
1802             0) != 0)
1803                 return (EFAULT);
1804 
1805         if (xic.xic_port == 0 || xic.xic_port >
1806             xhcip->xhci_caps.xcap_max_ports)
1807                 return (EINVAL);
1808 
1809         reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port));
1810         reg &= ~XHCI_PS_CLEAR;
1811         reg |= XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_WRC | XHCI_PS_OCC |
1812             XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC;
1813         xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port), reg);
1814 
1815         return (0);
1816 }
1817 
1818 static int
1819 xhci_ioctl_setpls(xhci_t *xhcip, intptr_t arg)
1820 {
1821         uint32_t reg;
1822         xhci_ioctl_setpls_t xis;
1823 
1824         if (ddi_copyin((const void *)(uintptr_t)arg, &xis, sizeof (xis),
1825             0) != 0)
1826                 return (EFAULT);
1827 
1828         if (xis.xis_port == 0 || xis.xis_port >
1829             xhcip->xhci_caps.xcap_max_ports)
1830                 return (EINVAL);
1831 
1832         if (xis.xis_pls & ~0xf)
1833                 return (EINVAL);
1834 
1835         reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port));
1836         reg &= ~XHCI_PS_CLEAR;
1837         reg |= XHCI_PS_PLS_SET(xis.xis_pls);
1838         reg |= XHCI_PS_LWS;
1839         xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port), reg);
1840 
1841         return (0);
1842 }
1843 
1844 static int
1845 xhci_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1846 {
1847         dev_info_t *dip = xhci_get_dip(*devp);
1848 
1849         return (usba_hubdi_open(dip, devp, flags, otyp, credp));
1850 }
1851 
1852 static int
1853 xhci_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1854     int *rvalp)
1855 {
1856         dev_info_t *dip = xhci_get_dip(dev);
1857 
1858         if (cmd == XHCI_IOCTL_PORTSC ||
1859             cmd == XHCI_IOCTL_CLEAR ||
1860             cmd == XHCI_IOCTL_SETPLS) {
1861                 xhci_t *xhcip = ddi_get_soft_state(xhci_soft_state,
1862                     getminor(dev) & ~HUBD_IS_ROOT_HUB);
1863 
1864                 if (secpolicy_xhci(credp) != 0 ||
1865                     crgetzoneid(credp) != GLOBAL_ZONEID)
1866                         return (EPERM);
1867 
1868                 if (mode & FKIOCTL)
1869                         return (ENOTSUP);
1870 
1871                 if (!(mode & FWRITE))
1872                         return (EBADF);
1873 
1874                 if (cmd == XHCI_IOCTL_PORTSC)
1875                         return (xhci_ioctl_portsc(xhcip, arg));
1876                 else if (cmd == XHCI_IOCTL_CLEAR)
1877                         return (xhci_ioctl_clear(xhcip, arg));
1878                 else
1879                         return (xhci_ioctl_setpls(xhcip, arg));
1880         }
1881 
1882         return (usba_hubdi_ioctl(dip, dev, cmd, arg, mode, credp, rvalp));
1883 }
1884 
1885 static int
1886 xhci_close(dev_t dev, int flag, int otyp, cred_t *credp)
1887 {
1888         dev_info_t *dip = xhci_get_dip(dev);
1889 
1890         return (usba_hubdi_close(dip, dev, flag, otyp, credp));
1891 }
1892 
1893 /*
1894  * We try to clean up everything that we can. The only thing that we let stop us
1895  * at this time is a failure to remove the root hub, which is realistically the
1896  * equivalent of our EBUSY case.
1897  */
1898 static int
1899 xhci_cleanup(xhci_t *xhcip)
1900 {
1901         int ret, inst;
1902 
1903         if (xhcip->xhci_seq & XHCI_ATTACH_ROOT_HUB) {
1904                 if ((ret = xhci_root_hub_fini(xhcip)) != 0)
1905                         return (ret);
1906         }
1907 
1908         if (xhcip->xhci_seq & XHCI_ATTACH_USBA) {
1909                 xhci_hcd_fini(xhcip);
1910         }
1911 
1912         if (xhcip->xhci_seq & XHCI_ATTACH_STARTED) {
1913                 mutex_enter(&xhcip->xhci_lock);
1914                 while (xhcip->xhci_state & XHCI_S_ERROR)
1915                         cv_wait(&xhcip->xhci_statecv, &xhcip->xhci_lock);
1916                 mutex_exit(&xhcip->xhci_lock);
1917 
1918                 (void) xhci_controller_stop(xhcip);
1919         }
1920 
1921         /*
1922          * Always release the context, command, and event data. They handle the
1923          * fact that they me be in an arbitrary state or unallocated.
1924          */
1925         xhci_event_fini(xhcip);
1926         xhci_command_ring_fini(xhcip);
1927         xhci_context_fini(xhcip);
1928 
1929         if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ENABLE) {
1930                 (void) xhci_ddi_intr_disable(xhcip);
1931         }
1932 
1933         if (xhcip->xhci_seq & XHCI_ATTACH_SYNCH) {
1934                 cv_destroy(&xhcip->xhci_statecv);
1935                 mutex_destroy(&xhcip->xhci_lock);
1936         }
1937 
1938         if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ADD) {
1939                 if ((ret = ddi_intr_remove_handler(xhcip->xhci_intr_hdl)) !=
1940                     DDI_SUCCESS) {
1941                         xhci_error(xhcip, "failed to remove interrupt "
1942                             "handler: %d", ret);
1943                 }
1944         }
1945 
1946         if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ALLOC) {
1947                 if ((ret = ddi_intr_free(xhcip->xhci_intr_hdl)) !=
1948                     DDI_SUCCESS) {
1949                         xhci_error(xhcip, "failed to free interrupts: %d", ret);
1950                 }
1951         }
1952 
1953         if (xhcip->xhci_seq & XHCI_ATTACH_REGS_MAP) {
1954                 ddi_regs_map_free(&xhcip->xhci_regs_handle);
1955                 xhcip->xhci_regs_handle = NULL;
1956         }
1957 
1958         if (xhcip->xhci_seq & XHCI_ATTACH_PCI_CONFIG) {
1959                 pci_config_teardown(&xhcip->xhci_cfg_handle);
1960                 xhcip->xhci_cfg_handle = NULL;
1961         }
1962 
1963         if (xhcip->xhci_seq & XHCI_ATTACH_FM) {
1964                 xhci_fm_fini(xhcip);
1965                 xhcip->xhci_fm_caps = 0;
1966         }
1967 
1968         inst = ddi_get_instance(xhcip->xhci_dip);
1969         xhcip->xhci_dip = NULL;
1970         ddi_soft_state_free(xhci_soft_state, inst);
1971 
1972         return (DDI_SUCCESS);
1973 }
1974 
1975 /* QUIESCE(9E) to support fast reboot */
1976 int
1977 xhci_quiesce(dev_info_t *dip)
1978 {
1979         xhci_t *xhcip;
1980 
1981         xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
1982 
1983         return (xhci_controller_stop(xhcip) == 0 &&
1984             xhci_controller_reset(xhcip) == 0 ? DDI_SUCCESS : DDI_FAILURE);
1985 }
1986 
1987 static int
1988 xhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1989 {
1990         int ret, inst, route;
1991         xhci_t *xhcip;
1992 
1993         if (cmd != DDI_ATTACH)
1994                 return (DDI_FAILURE);
1995 
1996         inst = ddi_get_instance(dip);
1997         if (ddi_soft_state_zalloc(xhci_soft_state, inst) != 0)
1998                 return (DDI_FAILURE);
1999         xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2000         xhcip->xhci_dip = dip;
2001 
2002         xhcip->xhci_regs_capoff = PCI_EINVAL32;
2003         xhcip->xhci_regs_operoff = PCI_EINVAL32;
2004         xhcip->xhci_regs_runoff = PCI_EINVAL32;
2005         xhcip->xhci_regs_dooroff = PCI_EINVAL32;
2006 
2007         xhci_fm_init(xhcip);
2008         xhcip->xhci_seq |= XHCI_ATTACH_FM;
2009 
2010         if (pci_config_setup(xhcip->xhci_dip, &xhcip->xhci_cfg_handle) !=
2011             DDI_SUCCESS) {
2012                 goto err;
2013         }
2014         xhcip->xhci_seq |= XHCI_ATTACH_PCI_CONFIG;
2015         xhcip->xhci_vendor_id = pci_config_get16(xhcip->xhci_cfg_handle,
2016             PCI_CONF_VENID);
2017         xhcip->xhci_device_id = pci_config_get16(xhcip->xhci_cfg_handle,
2018             PCI_CONF_DEVID);
2019 
2020         if (xhci_regs_map(xhcip) == B_FALSE) {
2021                 goto err;
2022         }
2023 
2024         xhcip->xhci_seq |= XHCI_ATTACH_REGS_MAP;
2025 
2026         if (xhci_regs_init(xhcip) == B_FALSE)
2027                 goto err;
2028 
2029         if (xhci_read_params(xhcip) == B_FALSE)
2030                 goto err;
2031 
2032         if (xhci_identify(xhcip) == B_FALSE)
2033                 goto err;
2034 
2035         if (xhci_alloc_intrs(xhcip) == B_FALSE)
2036                 goto err;
2037         xhcip->xhci_seq |= XHCI_ATTACH_INTR_ALLOC;
2038 
2039         if (xhci_add_intr_handler(xhcip) == B_FALSE)
2040                 goto err;
2041         xhcip->xhci_seq |= XHCI_ATTACH_INTR_ADD;
2042 
2043         mutex_init(&xhcip->xhci_lock, NULL, MUTEX_DRIVER,
2044             (void *)(uintptr_t)xhcip->xhci_intr_pri);
2045         cv_init(&xhcip->xhci_statecv, NULL, CV_DRIVER, NULL);
2046         xhcip->xhci_seq |= XHCI_ATTACH_SYNCH;
2047 
2048         if (xhci_port_count(xhcip) == B_FALSE)
2049                 goto err;
2050 
2051         if (xhci_controller_takeover(xhcip) == B_FALSE)
2052                 goto err;
2053 
2054         /*
2055          * We don't enable interrupts until after we take over the controller
2056          * from the BIOS. We've observed cases where this can cause spurious
2057          * interrupts.
2058          */
2059         if (xhci_ddi_intr_enable(xhcip) == B_FALSE)
2060                 goto err;
2061         xhcip->xhci_seq |= XHCI_ATTACH_INTR_ENABLE;
2062 
2063         if ((ret = xhci_controller_stop(xhcip)) != 0) {
2064                 xhci_error(xhcip, "failed to stop controller: %s",
2065                     ret == EIO ? "encountered FM register error" :
2066                     "timed out while waiting for controller");
2067                 goto err;
2068         }
2069 
2070         if ((ret = xhci_controller_reset(xhcip)) != 0) {
2071                 xhci_error(xhcip, "failed to reset controller: %s",
2072                     ret == EIO ? "encountered FM register error" :
2073                     "timed out while waiting for controller");
2074                 goto err;
2075         }
2076 
2077         if ((ret = xhci_controller_configure(xhcip)) != 0) {
2078                 xhci_error(xhcip, "failed to configure controller: %d", ret);
2079                 goto err;
2080         }
2081 
2082         /*
2083          * Some systems support having ports routed to both an ehci and xhci
2084          * controller. If we support it and the user hasn't requested otherwise
2085          * via a driver.conf tuning, we reroute it now.
2086          */
2087         route = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
2088             DDI_PROP_DONTPASS, "xhci-reroute", XHCI_PROP_REROUTE_DEFAULT);
2089         if (route != XHCI_PROP_REROUTE_DISABLE &&
2090             (xhcip->xhci_quirks & XHCI_QUIRK_INTC_EHCI))
2091                 (void) xhci_reroute_intel(xhcip);
2092 
2093         if ((ret = xhci_controller_start(xhcip)) != 0) {
2094                 xhci_log(xhcip, "failed to reset controller: %s",
2095                     ret == EIO ? "encountered FM register error" :
2096                     "timed out while waiting for controller");
2097                 goto err;
2098         }
2099         xhcip->xhci_seq |= XHCI_ATTACH_STARTED;
2100 
2101         /*
2102          * Finally, register ourselves with the USB framework itself.
2103          */
2104         if ((ret = xhci_hcd_init(xhcip)) != 0) {
2105                 xhci_error(xhcip, "failed to register hcd with usba");
2106                 goto err;
2107         }
2108         xhcip->xhci_seq |= XHCI_ATTACH_USBA;
2109 
2110         if ((ret = xhci_root_hub_init(xhcip)) != 0) {
2111                 xhci_error(xhcip, "failed to load the root hub driver");
2112                 goto err;
2113         }
2114         xhcip->xhci_seq |= XHCI_ATTACH_ROOT_HUB;
2115 
2116         return (DDI_SUCCESS);
2117 
2118 err:
2119         (void) xhci_cleanup(xhcip);
2120         return (DDI_FAILURE);
2121 }
2122 
2123 static int
2124 xhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2125 {
2126         xhci_t *xhcip;
2127 
2128         if (cmd != DDI_DETACH)
2129                 return (DDI_FAILURE);
2130 
2131         xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2132         if (xhcip == NULL) {
2133                 dev_err(dip, CE_WARN, "detach called without soft state!");
2134                 return (DDI_FAILURE);
2135         }
2136 
2137         return (xhci_cleanup(xhcip));
2138 }
2139 
2140 /* ARGSUSED */
2141 static int
2142 xhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **outp)
2143 {
2144         dev_t dev;
2145         int inst;
2146 
2147         switch (cmd) {
2148         case DDI_INFO_DEVT2DEVINFO:
2149                 dev = (dev_t)arg;
2150                 *outp = xhci_get_dip(dev);
2151                 if (*outp == NULL)
2152                         return (DDI_FAILURE);
2153                 break;
2154         case DDI_INFO_DEVT2INSTANCE:
2155                 dev = (dev_t)arg;
2156                 inst = getminor(dev) & ~HUBD_IS_ROOT_HUB;
2157                 *outp = (void *)(uintptr_t)inst;
2158                 break;
2159         default:
2160                 return (DDI_FAILURE);
2161         }
2162 
2163         return (DDI_SUCCESS);
2164 }
2165 
2166 static struct cb_ops xhci_cb_ops = {
2167         xhci_open,              /* cb_open */
2168         xhci_close,             /* cb_close */
2169         nodev,                  /* cb_strategy */
2170         nodev,                  /* cb_print */
2171         nodev,                  /* cb_dump */
2172         nodev,                  /* cb_read */
2173         nodev,                  /* cb_write */
2174         xhci_ioctl,             /* cb_ioctl */
2175         nodev,                  /* cb_devmap */
2176         nodev,                  /* cb_mmap */
2177         nodev,                  /* cb_segmap */
2178         nochpoll,               /* cb_chpoll */
2179         ddi_prop_op,            /* cb_prop_op */
2180         NULL,                   /* cb_stream */
2181         D_MP | D_HOTPLUG,       /* cb_flag */
2182         CB_REV,                 /* cb_rev */
2183         nodev,                  /* cb_aread */
2184         nodev                   /* cb_awrite */
2185 };
2186 
2187 static struct dev_ops xhci_dev_ops = {
2188         DEVO_REV,                       /* devo_rev */
2189         0,                              /* devo_refcnt */
2190         xhci_getinfo,                   /* devo_getinfo */
2191         nulldev,                        /* devo_identify */
2192         nulldev,                        /* devo_probe */
2193         xhci_attach,                    /* devo_attach */
2194         xhci_detach,                    /* devo_detach */
2195         nodev,                          /* devo_reset */
2196         &xhci_cb_ops,                       /* devo_cb_ops */
2197         &usba_hubdi_busops,         /* devo_bus_ops */
2198         usba_hubdi_root_hub_power,      /* devo_power */
2199         xhci_quiesce                    /* devo_quiesce */
2200 };
2201 
2202 static struct modldrv xhci_modldrv = {
2203         &mod_driverops,
2204         "USB xHCI Driver",
2205         &xhci_dev_ops
2206 };
2207 
2208 static struct modlinkage xhci_modlinkage = {
2209         MODREV_1,
2210         &xhci_modldrv,
2211         NULL
2212 };
2213 
2214 int
2215 _init(void)
2216 {
2217         int ret;
2218 
2219         if ((ret = ddi_soft_state_init(&xhci_soft_state, sizeof (xhci_t),
2220             0)) != 0) {
2221                 return (ret);
2222         }
2223 
2224         xhci_taskq = taskq_create("xhci_taskq", 1, minclsyspri, 0, 0, 0);
2225         if (xhci_taskq == NULL) {
2226                 ddi_soft_state_fini(&xhci_soft_state);
2227                 return (ENOMEM);
2228         }
2229 
2230         if ((ret = mod_install(&xhci_modlinkage)) != 0) {
2231                 taskq_destroy(xhci_taskq);
2232                 xhci_taskq = NULL;
2233         }
2234 
2235         return (ret);
2236 }
2237 
2238 int
2239 _info(struct modinfo *modinfop)
2240 {
2241         return (mod_info(&xhci_modlinkage, modinfop));
2242 }
2243 
2244 int
2245 _fini(void)
2246 {
2247         int ret;
2248 
2249         if ((ret = mod_remove(&xhci_modlinkage)) != 0)
2250                 return (ret);
2251 
2252         if (xhci_taskq != NULL) {
2253                 taskq_destroy(xhci_taskq);
2254                 xhci_taskq = NULL;
2255         }
2256 
2257         ddi_soft_state_fini(&xhci_soft_state);
2258 
2259         return (0);
2260 }