Print this page
Add xhci_quiesce to support fast reboot.
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/usb/hcd/xhci/xhci.c
+++ new/usr/src/uts/common/io/usb/hcd/xhci/xhci.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
↓ open down ↓ |
3 lines elided |
↑ open up ↑ |
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright (c) 2017, Joyent, Inc.
14 + * Copyright (c) 2018, Western Digital Corporation.
14 15 */
15 16
16 17 /*
17 18 * Extensible Host Controller Interface (xHCI) USB Driver
18 19 *
19 20 * The xhci driver is an HCI driver for USB that bridges the gap between client
20 21 * device drivers and implements the actual way that we talk to devices. The
21 22 * xhci specification provides access to USB 3.x capable devices, as well as all
22 23 * prior generations. Like other host controllers, it both provides the way to
23 24 * talk to devices and also is treated like a hub (often called the root hub).
24 25 *
25 26 * This driver is part of the USBA (USB Architecture). It implements the HCDI
26 27 * (host controller device interface) end of USBA. These entry points are used
27 28 * by the USBA on behalf of client device drivers to access their devices. The
28 29 * driver also provides notifications to deal with hot plug events, which are
29 30 * quite common in USB.
30 31 *
31 32 * ----------------
32 33 * USB Introduction
33 34 * ----------------
34 35 *
35 36 * To properly understand the xhci driver and the design of the USBA HCDI
36 37 * interfaces it implements, it helps to have a bit of background into how USB
37 38 * devices are structured and understand how they work at a high-level.
38 39 *
39 40 * USB devices, like PCI devices, are broken down into different classes of
40 41 * device. For example, with USB you have hubs, human-input devices (keyboards,
41 42 * mice, etc.), mass storage, etc. Every device also has a vendor and device ID.
42 43 * Many client drivers bind to an entire class of device, for example, the hubd
43 44 * driver (to hubs) or scsa2usb (USB storage). However, there are other drivers
44 45 * that bind to explicit IDs such as usbsprl (specific USB to Serial devices).
45 46 *
46 47 * USB SPEEDS AND VERSIONS
47 48 *
48 49 * USB devices are often referred to in two different ways. One way they're
49 50 * described is with the USB version that they conform to. In the wild, you're
50 51 * most likely going to see USB 1.1, 2.0, 2.1, and 3.0. However, you may also
51 52 * see devices referred to as 'full-', 'low-', 'high-', and 'super-' speed
52 53 * devices.
53 54 *
54 55 * The latter description describes the maximum theoretical speed of a given
55 56 * device. For example, a super-speed device theoretically caps out around 5
56 57 * Gbit/s, whereas a low-speed device caps out at 1.5 Mbit/s.
57 58 *
58 59 * In general, each speed usually corresponds to a specific USB protocol
59 60 * generation. For example, all USB 3.0 devices are super-speed devices. All
60 61 * 'high-speed' devices are USB 2.x devices. Full-speed devices are special in
61 62 * that they can either be USB 1.x or USB 2.x devices. Low-speed devices are
62 63 * only a USB 1.x thing, they did not jump the fire line to USB 2.x.
63 64 *
64 65 * USB 3.0 devices and ports generally have the wiring for both USB 2.0 and USB
65 66 * 3.0. When a USB 3.x device is plugged into a USB 2.0 port or hub, then it
66 67 * will report its version as USB 2.1, to indicate that it is actually a USB 3.x
67 68 * device.
68 69 *
69 70 * USB ENDPOINTS
70 71 *
71 72 * A given USB device is made up of endpoints. A request, or transfer, is made
72 73 * to a specific USB endpoint. These endpoints can provide different services
73 74 * and have different expectations around the size of the data that'll be used
74 75 * in a given request and the periodicity of requests. Endpoints themselves are
75 76 * either used to make one-shot requests, for example, making requests to a mass
76 77 * storage device for a given sector, or for making periodic requests where you
77 78 * end up polling on the endpoint, for example, polling on a USB keyboard for
78 79 * keystrokes.
↓ open down ↓ |
55 lines elided |
↑ open up ↑ |
79 80 *
80 81 * Each endpoint encodes two different pieces of information: a direction and a
81 82 * type. There are two different directions: IN and OUT. These refer to the
82 83 * general direction that data moves relative to the operating system. For
83 84 * example, an IN transfer transfers data in to the operating system, from the
84 85 * device. An OUT transfer transfers data from the operating system, out to the
85 86 * device.
86 87 *
87 88 * There are four different kinds of endpoints:
88 89 *
89 - * BULK These transfers are large transfers of data to or from
90 - * a device. The most common use for bulk transfers is for
91 - * mass storage devices. Though they are often also used by
92 - * network devices and more. Bulk endpoints do not have an
93 - * explicit time component to them. They are always used
94 - * for one-shot transfers.
90 + * BULK These transfers are large transfers of data to or from
91 + * a device. The most common use for bulk transfers is for
92 + * mass storage devices. Though they are often also used by
93 + * network devices and more. Bulk endpoints do not have an
94 + * explicit time component to them. They are always used
95 + * for one-shot transfers.
95 96 *
96 - * CONTROL These transfers are used to manipulate devices
97 - * themselves and are used for USB protocol level
98 - * operations (whether device-specific, class-specific, or
99 - * generic across all of USB). Unlike other transfers,
100 - * control transfers are always bi-directional and use
101 - * different kinds of transfers.
97 + * CONTROL These transfers are used to manipulate devices
98 + * themselves and are used for USB protocol level
99 + * operations (whether device-specific, class-specific, or
100 + * generic across all of USB). Unlike other transfers,
101 + * control transfers are always bi-directional and use
102 + * different kinds of transfers.
102 103 *
103 - * INTERRUPT Interrupt transfers are used for small transfers that
104 - * happen infrequently, but need reasonable latency. A good
105 - * example of interrupt transfers is to receive input from
106 - * a USB keyboard. Interrupt-IN transfers are generally
107 - * polled. Meaning that a client (device driver) opens up
108 - * an interrupt-IN pipe to poll on it, and receives
109 - * periodic updates whenever there is information
110 - * available. However, Interrupt transfers can be used
111 - * as one-shot transfers both going IN and OUT.
104 + * INTERRUPT Interrupt transfers are used for small transfers that
105 + * happen infrequently, but need reasonable latency. A good
106 + * example of interrupt transfers is to receive input from
107 + * a USB keyboard. Interrupt-IN transfers are generally
108 + * polled. Meaning that a client (device driver) opens up
109 + * an interrupt-IN pipe to poll on it, and receives
110 + * periodic updates whenever there is information
111 + * available. However, Interrupt transfers can be used
112 + * as one-shot transfers both going IN and OUT.
112 113 *
113 - * ISOCHRONOUS These transfers are things that happen once per
114 - * time-interval at a very regular rate. A good example of
115 - * these transfers are for audio and video. A device may
116 - * describe an interval as 10ms at which point it will read
117 - * or write the next batch of data every 10ms and transform
118 - * it for the user. There are no one-shot Isochronous-IN
119 - * transfers. There are one-shot Isochronous-OUT transfers,
120 - * but these are used by device drivers to always provide
121 - * the system with sufficient data.
114 + * ISOCHRONOUS These transfers are things that happen once per
115 + * time-interval at a very regular rate. A good example of
116 + * these transfers are for audio and video. A device may
117 + * describe an interval as 10ms at which point it will read
118 + * or write the next batch of data every 10ms and transform
119 + * it for the user. There are no one-shot Isochronous-IN
120 + * transfers. There are one-shot Isochronous-OUT transfers,
121 + * but these are used by device drivers to always provide
122 + * the system with sufficient data.
122 123 *
123 124 * To find out information about the endpoints, USB devices have a series of
124 125 * descriptors that cover different aspects of the device. For example, there
125 126 * are endpoint descriptors which cover the properties of endpoints such as the
126 127 * maximum packet size or polling interval.
127 128 *
128 129 * Descriptors exist at all levels of USB. For example, there are general
129 130 * descriptors for every device. The USB device descriptor is described in
130 131 * usb_dev_descr(9S). Host controllers will look at these descriptors to ensure
131 132 * that they program the device correctly; however, they are more often used by
132 133 * client device drivers. There are also descriptors that exist at a class
133 134 * level. For example, the hub class has a class-specific descriptor which
134 135 * describes properties of the hub. That information is requested for and used
135 136 * by the hub driver.
136 137 *
137 138 * All of the different descriptors are gathered by the system and placed into a
138 139 * tree which USBA sometimes calls the 'Configuration Cloud'. Client device
139 140 * drivers gain access to this cloud and then use them to open endpoints, which
140 141 * are called pipes in USBA (and some revisions of the USB specification).
141 142 *
142 143 * Each pipe gives access to a specific endpoint on the device which can be used
143 144 * to perform transfers of a specific type and direction. For example, a mass
144 145 * storage device often has three different endpoints, the default control
145 146 * endpoint (which every device has), a Bulk-IN endpoint, and a Bulk-OUT
146 147 * endpoint. The device driver ends up with three open pipes. One to the default
147 148 * control endpoint to configure the device, and then the other two are used to
148 149 * perform I/O.
149 150 *
150 151 * These routines translate more or less directly into calls to a host
151 152 * controller driver. A request to open a pipe takes an endpoint descriptor that
152 153 * describes the properties of the pipe, and the host controller driver (this
153 154 * driver) goes through and does any work necessary to allow the client device
154 155 * driver to access it. Once the pipe is open, it either makes one-shot
155 156 * transfers specific to the transfer type or it starts performing a periodic
156 157 * poll of an endpoint.
157 158 *
158 159 * All of these different actions translate into requests to the host
159 160 * controller. The host controller driver itself is in charge of making sure
160 161 * that all of the required resources for polling are allocated with a request
161 162 * and then proceed to give the driver's periodic callbacks.
162 163 *
163 164 * HUBS AND HOST CONTROLLERS
164 165 *
165 166 * Every device is always plugged into a hub, even if the device is itself a
166 167 * hub. This continues until we reach what we call the root-hub. The root-hub is
167 168 * special in that it is not an actual USB hub, but is integrated into the host
168 169 * controller and is manipulated in its own way. For example, the host
169 170 * controller is used to turn on and off a given port's power. This may happen
170 171 * over any interface, though the most common way is through PCI.
171 172 *
172 173 * In addition to the normal character device that exists for a host controller
173 174 * driver, as part of attaching, the host controller binds to an instance of the
174 175 * hubd driver. While the root-hub is a bit of a fiction, everyone models the
175 176 * root-hub as the same as any other hub that's plugged in. The hub kernel
176 177 * module doesn't know that the hub isn't a physical device that's been plugged
177 178 * in. The host controller driver simulates that view by taking hub requests
178 179 * that are made and translating them into corresponding requests that are
179 180 * understood by the host controller, for example, reading and writing to a
180 181 * memory mapped register.
181 182 *
182 183 * The hub driver polls for changes in device state using an Interrupt-IN
183 184 * request, which is the same as is done for the root-hub. This allows the host
184 185 * controller driver to not have to know about the implementation of device hot
185 186 * plug, merely react to requests from a hub, the same as if it were an external
186 187 * device. When the hub driver detects a change, it will go through the
187 188 * corresponding state machine and attach or detach the corresponding client
188 189 * device driver, depending if the device was inserted or removed.
189 190 *
190 191 * We detect the changes for the Interrupt-IN primarily based on the port state
191 192 * change events that are delivered to the event ring. Whenever any event is
192 193 * fired, we use this to update the hub driver about _all_ ports with
193 194 * outstanding events. This more closely matches how a hub is supposed to behave
194 195 * and leaves things less likely for the hub driver to end up without clearing a
195 196 * flag on a port.
196 197 *
197 198 * PACKET SIZES AND BURSTING
198 199 *
199 200 * A given USB endpoint has an explicit packet size and a number of packets that
200 201 * can be sent per time interval. These concepts are abstracted away from client
201 202 * device drives usually, though they sometimes inform the upper bounds of what
202 203 * a device can perform.
203 204 *
204 205 * The host controller uses this information to transform arbitrary transfer
205 206 * requests into USB protocol packets. One of the nice things about the host
206 207 * controllers is that they abstract away all of the signaling and semantics of
207 208 * the actual USB protocols, allowing for life to be slightly easier in the
208 209 * operating system.
209 210 *
210 211 * That said, if the host controller is not programmed correctly, these can end
211 212 * up causing transaction errors and other problems in response to the data that
212 213 * the host controller is trying to send or receive.
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
213 214 *
214 215 * ------------
215 216 * Organization
216 217 * ------------
217 218 *
218 219 * The driver is made up of the following files. Many of these have their own
219 220 * theory statements to describe what they do. Here, we touch on each of the
220 221 * purpose of each of these files.
221 222 *
222 223 * xhci_command.c: This file contains the logic to issue commands to the
223 - * controller as well as the actual functions that the
224 - * other parts of the driver use to cause those commands.
224 + * controller as well as the actual functions that the
225 + * other parts of the driver use to cause those commands.
225 226 *
226 227 * xhci_context.c: This file manages various data structures used by the
227 - * controller to manage the controller's and device's
228 - * context data structures. See more in the xHCI Overview
229 - * and General Design for more information.
228 + * controller to manage the controller's and device's
229 + * context data structures. See more in the xHCI Overview
230 + * and General Design for more information.
230 231 *
231 232 * xhci_dma.c: This manages the allocation of DMA memory and DMA
232 - * attributes for controller, whether memory is for a
233 - * transfer or something else. This file also deals with
234 - * all the logic of getting data in and out of DMA buffers.
233 + * attributes for controller, whether memory is for a
234 + * transfer or something else. This file also deals with
235 + * all the logic of getting data in and out of DMA buffers.
235 236 *
236 237 * xhci_endpoint.c: This manages all of the logic of handling endpoints or
237 - * pipes. It deals with endpoint configuration, I/O
238 - * scheduling, timeouts, and callbacks to USBA.
238 + * pipes. It deals with endpoint configuration, I/O
239 + * scheduling, timeouts, and callbacks to USBA.
239 240 *
240 241 * xhci_event.c: This manages callbacks from the hardware to the driver.
241 - * This covers command completion notifications and I/O
242 - * notifications.
242 + * This covers command completion notifications and I/O
243 + * notifications.
243 244 *
244 245 * xhci_hub.c: This manages the virtual root-hub. It basically
245 - * implements and translates all of the USB level requests
246 - * into xhci specific implements. It also contains the
247 - * functions to register this hub with USBA.
246 + * implements and translates all of the USB level requests
247 + * into xhci specific implements. It also contains the
248 + * functions to register this hub with USBA.
248 249 *
249 250 * xhci_intr.c: This manages the underlying interrupt allocation,
250 - * interrupt moderation, and interrupt routines.
251 + * interrupt moderation, and interrupt routines.
251 252 *
252 253 * xhci_quirks.c: This manages information about buggy hardware that's
253 - * been collected and experienced primarily from other
254 - * systems.
254 + * been collected and experienced primarily from other
255 + * systems.
255 256 *
256 257 * xhci_ring.c: This manages the abstraction of a ring in xhci, which is
257 - * the primary of communication between the driver and the
258 - * hardware, whether for the controller or a device.
258 + * the primary of communication between the driver and the
259 + * hardware, whether for the controller or a device.
259 260 *
260 261 * xhci_usba.c: This implements all of the HCDI functions required by
261 - * USBA. This is the main entry point that drivers and the
262 - * kernel frameworks will reach to start any operation.
263 - * Many functions here will end up in the command and
264 - * endpoint code.
262 + * USBA. This is the main entry point that drivers and the
263 + * kernel frameworks will reach to start any operation.
264 + * Many functions here will end up in the command and
265 + * endpoint code.
265 266 *
266 267 * xhci.c: This provides the main kernel DDI interfaces and
267 - * performs device initialization.
268 + * performs device initialization.
268 269 *
269 270 * xhci.h: This is the primary header file which defines
270 - * illumos-specific data structures and constants to manage
271 - * the system.
271 + * illumos-specific data structures and constants to manage
272 + * the system.
272 273 *
273 274 * xhcireg.h: This header file defines all of the register offsets,
274 - * masks, and related macros. It also contains all of the
275 - * constants that are used in various structures as defined
276 - * by the specification, such as command offsets, etc.
275 + * masks, and related macros. It also contains all of the
276 + * constants that are used in various structures as defined
277 + * by the specification, such as command offsets, etc.
277 278 *
278 279 * xhci_ioctl.h: This contains a few private ioctls that are used by a
279 - * private debugging command. These are private.
280 + * private debugging command. These are private.
280 281 *
281 282 * cmd/xhci/xhci_portsc: This is a private utility that can be useful for
282 - * debugging xhci state. It is the only consumer of
283 - * xhci_ioctl.h and the private ioctls.
283 + * debugging xhci state. It is the only consumer of
284 + * xhci_ioctl.h and the private ioctls.
284 285 *
285 286 * ----------------------------------
286 287 * xHCI Overview and Structure Layout
287 288 * ----------------------------------
288 289 *
289 290 * The design and structure of this driver follows from the way that the xHCI
290 291 * specification tells us that we have to work with hardware. First we'll give a
291 292 * rough summary of how that works, though the xHCI 1.1 specification should be
292 293 * referenced when going through this.
293 294 *
294 295 * There are three primary parts of the hardware -- registers, contexts, and
295 296 * rings. The registers are memory mapped registers that come in four sets,
296 297 * though all are found within the first BAR. These are used to program and
297 298 * control the hardware and aspects of the devices. Beyond more traditional
298 299 * device programming there are two primary sets of registers that are
299 300 * important:
300 301 *
301 302 * o Port Status and Control Registers (XHCI_PORTSC)
302 303 * o Doorbell Array (XHCI_DOORBELL)
303 304 *
304 305 * The port status and control registers are used to get and manipulate the
305 306 * status of a given device. For example, turning on and off the power to it.
306 307 * The Doorbell Array is used to kick off I/O operations and start the
307 308 * processing of an I/O ring.
308 309 *
309 310 * The contexts are data structures that represent various pieces of information
310 311 * in the controller. These contexts are generally filled out by the driver and
311 312 * then acknowledged and consumed by the hardware. There are controller-wide
312 313 * contexts (mostly managed in xhci_context.c) that are used to point to the
313 314 * contexts that exist for each device in the system. The primary context is
314 315 * called the Device Context Base Address Array (DCBAA).
315 316 *
316 317 * Each device in the system is allocated a 'slot', which is used to index into
317 318 * the DCBAA. Slots are assigned based on issuing commands to the controller.
318 319 * There are a fixed number of slots that determine the maximum number of
319 320 * devices that can end up being supported in the system. Note this includes all
320 321 * the devices plugged into the USB device tree, not just devices plugged into
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
321 322 * ports on the chassis.
322 323 *
323 324 * For each device, there is a context structure that describes properties of
324 325 * the device. For example, what speed is the device, is it a hub, etc. The
325 326 * context has slots for the device and for each endpoint on the device. As
326 327 * endpoints are enabled, their context information which describes things like
327 328 * the maximum packet size, is filled in and enabled. The mapping between these
328 329 * contexts look like:
329 330 *
330 331 *
331 - * DCBAA
332 - * +--------+ Device Context
332 + * DCBAA
333 + * +--------+ Device Context
333 334 * | Slot 0 |------------------>+--------------+
334 - * +--------+ | Slot Context |
335 - * | ... | +--------------+ +----------+
336 - * +--------+ +------+ | Endpoint 0 |------>| I/O Ring |
337 - * | Slot n |-->| NULL | | Context (Bi) | +----------+
338 - * +--------+ +------+ +--------------+
339 - * | Endpoint 1 |
340 - * | Context (Out)|
341 - * +--------------+
342 - * | Endpoint 1 |
343 - * | Context (In) |
344 - * +--------------+
345 - * | ... |
346 - * +--------------+
347 - * | Endpoint 15 |
348 - * | Context (In) |
349 - * +--------------+
335 + * +--------+ | Slot Context |
336 + * | ... | +--------------+ +----------+
337 + * +--------+ +------+ | Endpoint 0 |------>| I/O Ring |
338 + * | Slot n |-->| NULL | | Context (Bi) | +----------+
339 + * +--------+ +------+ +--------------+
340 + * | Endpoint 1 |
341 + * | Context (Out)|
342 + * +--------------+
343 + * | Endpoint 1 |
344 + * | Context (In) |
345 + * +--------------+
346 + * | ... |
347 + * +--------------+
348 + * | Endpoint 15 |
349 + * | Context (In) |
350 + * +--------------+
350 351 *
351 352 * These contexts are always owned by the controller, though we can read them
352 353 * after various operations complete. Commands that toggle device state use a
353 354 * specific input context, which is a variant of the device context. The only
354 355 * difference is that it has an input context structure ahead of it to say which
355 356 * sections of the device context should be evaluated.
356 357 *
357 358 * Each active endpoint points us to an I/O ring, which leads us to the third
358 359 * main data structure that's used by the device: rings. Rings are made up of
359 360 * transfer request blocks (TRBs), which are joined together to form a given
360 361 * transfer description (TD) which represents a single I/O request.
361 362 *
362 363 * These rings are used to issue I/O to individual endpoints, to issue commands
363 364 * to the controller, and to receive notification of changes and completions.
364 365 * Issued commands go on the special ring called the command ring while the
365 366 * change and completion notifications go on the event ring. More details are
366 367 * available in xhci_ring.c. Each of these structures is represented by an
367 368 * xhci_ring_t.
368 369 *
369 370 * Each ring can be made up of one or more disjoint regions of DMA; however, we
370 371 * only use a single one. This also impacts some additional registers and
371 372 * structures that exist. The event ring has an indirection table called the
372 373 * Event Ring Segment Table (ERST). Each entry in the table (a segment)
373 374 * describes a chunk of the event ring.
374 375 *
375 376 * One other thing worth calling out is the scratchpad. The scratchpad is a way
376 377 * for the controller to be given arbitrary memory by the OS that it can use.
377 378 * There are two parts to the scratchpad. The first part is an array whose
378 379 * entries contain pointers to the actual addresses for the pages. The second
379 380 * part that we allocate are the actual pages themselves.
380 381 *
381 382 * -----------------------------
382 383 * Endpoint State and Management
383 384 * -----------------------------
384 385 *
385 386 * Endpoint management is one of the key parts to the xhci driver as every
386 387 * endpoint is a pipe that a device driver uses, so they are our primary
387 388 * currency. Endpoints are enabled and disabled when the client device drivers
388 389 * open and close a pipe. When an endpoint is enabled, we have to fill in an
389 390 * endpoint's context structure with information about the endpoint. These
390 391 * basically tell the controller important properties which it uses to ensure
391 392 * that there is adequate bandwidth for the device.
392 393 *
393 394 * Each endpoint has its own ring as described in the previous section. We place
394 395 * TRBs (transfer request blocks) onto a given ring to request I/O be performed.
395 396 * Responses are placed on the event ring, in other words, the rings associated
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
396 397 * with an endpoint are purely for producing I/O.
397 398 *
398 399 * Endpoints have a defined state machine as described in xHCI 1.1 / 4.8.3.
399 400 * These states generally correspond with the state of the endpoint to process
400 401 * I/O and handle timeouts. The driver basically follows a similar state machine
401 402 * as described there. There are some deviations. For example, what they
402 403 * describe as 'running' we break into both the Idle and Running states below.
403 404 * We also have a notion of timed out and quiescing. The following image
404 405 * summarizes the states and transitions:
405 406 *
406 - * +------+ +-----------+
407 + * +------+ +-----------+
407 408 * | Idle |---------*--------------------->| Running |<-+
408 - * +------+ . I/O queued on +-----------+ |
409 - * ^ ring and timeout | | | |
410 - * | scheduled. | | | |
411 - * | | | | |
412 - * +-----*---------------------------------+ | | |
413 - * | . No I/Os remain | | |
414 - * | | | |
415 - * | +------*------------------+ | |
416 - * | | . Timeout | |
417 - * | | fires for | |
418 - * | | I/O | |
419 - * | v v |
420 - * | +-----------+ +--------+ |
421 - * | | Timed Out | | Halted | |
422 - * | +-----------+ +--------+ |
423 - * | | | |
424 - * | | +-----------+ | |
425 - * | +-->| Quiescing |<----------+ |
426 - * | +-----------+ |
427 - * | No TRBs. | . TRBs |
428 - * | remain . | . Remain |
429 - * +----------*----<------+-------->-------*-----------+
409 + * +------+ . I/O queued on +-----------+ |
410 + * ^ ring and timeout | | | |
411 + * | scheduled. | | | |
412 + * | | | | |
413 + * +-----*---------------------------------+ | | |
414 + * | . No I/Os remain | | |
415 + * | | | |
416 + * | +------*------------------+ | |
417 + * | | . Timeout | |
418 + * | | fires for | |
419 + * | | I/O | |
420 + * | v v |
421 + * | +-----------+ +--------+ |
422 + * | | Timed Out | | Halted | |
423 + * | +-----------+ +--------+ |
424 + * | | | |
425 + * | | +-----------+ | |
426 + * | +-->| Quiescing |<----------+ |
427 + * | +-----------+ |
428 + * | No TRBs. | . TRBs |
429 + * | remain . | . Remain |
430 + * +----------*----<------+-------->-------*-----------+
430 431 *
431 432 * Normally, a given endpoint will oscillate between having TRBs scheduled and
432 433 * not. Every time a new I/O is added to the endpoint, we'll ring the doorbell,
433 434 * making sure that we're processing the ring, presuming that the endpoint isn't
434 435 * in one of the error states.
435 436 *
436 437 * To detect device hangs, we have an active timeout(9F) per active endpoint
437 438 * that ticks at a one second rate while we still have TRBs outstanding on an
438 439 * endpoint. Once all outstanding TRBs have been processed, the timeout will
439 440 * stop itself and there will be no active checking until the endpoint has I/O
440 441 * scheduled on it again.
441 442 *
442 443 * There are two primary ways that things can go wrong on the endpoint. We can
443 444 * either have a timeout or an event that transitions the endpoint to the Halted
444 445 * state. In the halted state, we need to issue explicit commands to reset the
445 446 * endpoint before removing the I/O.
446 447 *
447 448 * The way we handle both a timeout and a halted condition is similar, but the
448 449 * way they are triggered is different. When we detect a halted condition, we
449 450 * don't immediately clean it up, and wait for the client device driver (or USBA
450 451 * on its behalf) to issue a pipe reset. When we detect a timeout, we
451 452 * immediately take action (assuming no other action is ongoing).
452 453 *
453 454 * In both cases, we quiesce the device, which takes care of dealing with taking
454 455 * the endpoint from whatever state it may be in and taking the appropriate
455 456 * actions based on the state machine in xHCI 1.1 / 4.8.3. The end of quiescing
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
456 457 * leaves the device stopped, which allows us to update the ring's pointer and
457 458 * remove any TRBs that are causing problems.
458 459 *
459 460 * As part of all this, we ensure that we can only be quiescing the device from
460 461 * a given path at a time. Any requests to schedule I/O during this time will
461 462 * generally fail.
462 463 *
463 464 * The following image describes the state machine for the timeout logic. It
464 465 * ties into the image above.
465 466 *
466 - * +----------+ +---------+
467 - * | Disabled |-----*--------------------->| Enabled |<--+
468 - * +----------+ . TRBs scheduled +---------+ *. 1 sec timer
469 - * ^ and no active | | | | fires and
470 - * | timer. | | | | another
471 - * | | | +--+--+ quiesce, in
472 - * | | | | a bad state,
473 - * +------*------------------------------+ | ^ or decrement
474 - * | . 1 sec timer | | I/O timeout
475 - * | fires and | |
476 - * | no TRBs or | +--------------+
477 - * | endpoint shutdown | |
478 - * | *. . timer counter |
479 - * ^ | reaches zero |
480 - * | v |
481 - * | +--------------+ |
482 - * +-------------*---------------<--| Quiesce ring |->---*-------+
483 - * . No more | and fail I/O | . restart
484 - * I/Os +--------------+ timer as
485 - * more I/Os
467 + * +----------+ +---------+
468 + * | Disabled |-----*--------------------->| Enabled |<--+
469 + * +----------+ . TRBs scheduled +---------+ *. 1 sec timer
470 + * ^ and no active | | | | fires and
471 + * | timer. | | | | another
472 + * | | | +--+--+ quiesce, in
473 + * | | | | a bad state,
474 + * +------*------------------------------+ | ^ or decrement
475 + * | . 1 sec timer | | I/O timeout
476 + * | fires and | |
477 + * | no TRBs or | +--------------+
478 + * | endpoint shutdown | |
479 + * | *. . timer counter |
480 + * ^ | reaches zero |
481 + * | v |
482 + * | +--------------+ |
483 + * +-------------*---------------<--| Quiesce ring |->---*-------+
484 + * . No more | and fail I/O | . restart
485 + * I/Os +--------------+ timer as
486 + * more I/Os
486 487 *
487 488 * As we described above, when there are active TRBs and I/Os, a 1 second
488 489 * timeout(9F) will be active. Each second, we decrement a counter on the
489 490 * current, active I/O until either a new I/O takes the head, or the counter
490 491 * reaches zero. If the counter reaches zero, then we go through, quiesce the
491 492 * ring, and then clean things up.
492 493 *
493 494 * ------------------
494 495 * Periodic Endpoints
495 496 * ------------------
496 497 *
497 498 * It's worth calling out periodic endpoints explicitly, as they operate
498 499 * somewhat differently. Periodic endpoints are limited to Interrupt-IN and
499 500 * Isochronous-IN. The USBA often uses the term polling for these. That's
500 501 * because the client only needs to make a single API call; however, they'll
501 502 * receive multiple callbacks until either an error occurs or polling is
502 503 * requested to be terminated.
503 504 *
504 505 * When we have one of these periodic requests, we end up always rescheduling
505 506 * I/O requests, as well as, having a specific number of pre-existing I/O
506 507 * requests to cover the periodic needs, in case of latency spikes. Normally,
507 508 * when replying to a request, we use the request handle that we were given.
508 509 * However, when we have a periodic request, we're required to duplicate the
509 510 * handle before giving them data.
510 511 *
511 512 * However, the duplication is a bit tricky. For everything that was duplicated,
512 513 * the framework expects us to submit data. Because of that we, don't duplicate
513 514 * them until they are needed. This minimizes the likelihood that we have
514 515 * outstanding requests to deal with when we encounter a fatal polling failure.
515 516 *
516 517 * Most of the polling setup logic happens in xhci_usba.c in
517 518 * xhci_hcdi_periodic_init(). The consumption and duplication is handled in
518 519 * xhci_endpoint.c.
519 520 *
520 521 * ----------------
521 522 * Structure Layout
522 523 * ----------------
↓ open down ↓ |
27 lines elided |
↑ open up ↑ |
523 524 *
524 525 * The following images relate the core data structures. The primary structure
525 526 * in the system is the xhci_t. This is the per-controller data structure that
526 527 * exists for each instance of the driver. From there, each device in the system
527 528 * is represented by an xhci_device_t and each endpoint is represented by an
528 529 * xhci_endpoint_t. For each client that opens a given endpoint, there is an
529 530 * xhci_pipe_t. For each I/O related ring, there is an xhci_ring_t in the
530 531 * system.
531 532 *
532 533 * +------------------------+
533 - * | Per-Controller |
534 - * | Structure |
535 - * | xhci_t |
536 - * | |
537 - * | uint_t ---+--> Capability regs offset
538 - * | uint_t ---+--> Operational regs offset
539 - * | uint_t ---+--> Runtime regs offset
540 - * | uint_t ---+--> Doorbell regs offset
534 + * | Per-Controller |
535 + * | Structure |
536 + * | xhci_t |
537 + * | |
538 + * | uint_t ---+--> Capability regs offset
539 + * | uint_t ---+--> Operational regs offset
540 + * | uint_t ---+--> Runtime regs offset
541 + * | uint_t ---+--> Doorbell regs offset
541 542 * | xhci_state_flags_t ---+--> Device state flags
542 - * | xhci_quirks_t ---+--> Device quirk flags
543 + * | xhci_quirks_t ---+--> Device quirk flags
543 544 * | xhci_capability_t ---+--> Controller capability structure
544 - * | xhci_dcbaa_t ---+----------------------------------+
545 - * | xhci_scratchpad_t ---+---------+ |
546 - * | xhci_command_ing_t ---+------+ | v
547 - * | xhci_event_ring_t ---+----+ | | +---------------------+
548 - * | xhci_usba_t ---+--+ | | | | Device Context |
549 - * +------------------------+ | | | | | Base Address |
550 - * | | | | | Array Structure |
551 - * | | | | | xhci_dcbaa_t |
552 - * +-------------------------------+ | | | | |
553 - * | +-------------------------------+ | | DCBAA KVA <-+-- uint64_t * |
554 - * | | +----------------------------+ | DMA Buffer <-+-- xhci_dma_buffer_t |
555 - * | | v | +---------------------+
556 - * | | +--------------------------+ +-----------------------+
557 - * | | | Event Ring | |
558 - * | | | Management | |
559 - * | | | xhci_event_ring_t | v
560 - * | | | | Event Ring +----------------------+
561 - * | | | xhci_event_segment_t * --|-> Segment VA | Scratchpad (Extra |
562 - * | | | xhci_dma_buffer_t --|-> Segment DMA Buf. | Controller Memory) |
563 - * | | | xhci_ring_t --|--+ | xhci_scratchpad_t |
564 - * | | +--------------------------+ | Scratchpad | |
565 - * | | | Base Array KVA <-+- uint64_t * |
566 - * | +------------+ | Array DMA Buf. <-+- xhci_dma_buffer_t |
567 - * | v | Scratchpad DMA <-+- xhci_dma_buffer_t * |
568 - * | +---------------------------+ | Buffer per page +----------------------+
569 - * | | Command Ring | |
570 - * | | xhci_command_ring_t | +------------------------------+
571 - * | | | |
572 - * | | xhci_ring_t --+-> Command Ring --->------------+
573 - * | | list_t --+-> Command List v
574 - * | | timeout_id_t --+-> Timeout State +---------------------+
575 - * | | xhci_command_ring_state_t +-> State Flags | I/O Ring |
576 - * | +---------------------------+ | xhci_ring_t |
577 - * | | |
578 - * | Ring DMA Buf. <-+-- xhci_dma_buffer_t |
579 - * | Ring Length <-+-- uint_t |
580 - * | Ring Entry KVA <-+-- xhci_trb_t * |
581 - * | +---------------------------+ Ring Head <-+-- uint_t |
582 - * +--->| USBA State | Ring Tail <-+-- uint_t |
583 - * | xhci_usba_t | Ring Cycle <-+-- uint_t |
584 - * | | +---------------------+
585 - * | usba_hcdi_ops_t * -+-> USBA Ops Vector ^
586 - * | usb_dev_dscr_t -+-> USB Virtual Device Descriptor |
587 - * | usb_ss_hub_descr_t -+-> USB Virtual Hub Descriptor |
588 - * | usba_pipe_handle_data_t * +-> Interrupt polling client |
589 - * | usb_intr_req_t -+-> Interrupt polling request |
590 - * | uint32_t --+-> Interrupt polling device mask |
591 - * | list_t --+-> Pipe List (Active Users) |
592 - * | list_t --+-------------------+ |
593 - * +---------------------------+ | ^
594 - * | |
595 - * v |
596 - * +-------------------------------+ +---------------+ |
597 - * | USB Device |------------>| USB Device |--> ... |
598 - * | xhci_device_t | | xhci_device_t | |
599 - * | | +---------------+ |
600 - * | usb_port_t --+-> USB Port plugged into |
601 - * | uint8_t --+-> Slot Number |
602 - * | boolean_t --+-> Address Assigned |
603 - * | usba_device_t * --+-> USBA Device State |
604 - * | xhci_dma_buffer_t --+-> Input Context DMA Buffer |
605 - * | xhci_input_context_t * --+-> Input Context KVA |
606 - * | xhci_slot_contex_t * --+-> Input Slot Context KVA |
607 - * | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA |
608 - * | xhci_dma_buffer_t --+-> Output Context DMA Buffer |
609 - * | xhci_slot_context_t * --+-> Output Slot Context KVA ^
610 - * | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA |
611 - * | xhci_endpoint_t *[] --+-> Endpoint Tracking ---+ |
612 - * +-------------------------------+ | |
613 - * | |
614 - * v |
615 - * +------------------------------+ +-----------------+ |
616 - * | Endpoint Data |----------->| Endpoint Data |--> ... |
617 - * | xhci_endpoint_t | | xhci_endpoint_t | |
618 - * | | +-----------------+ |
619 - * | int --+-> Endpoint Number |
620 - * | int --+-> Endpoint Type |
621 - * | xhci_endpoint_state_t --+-> Endpoint State |
622 - * | timeout_id_t --+-> Endpoint Timeout State |
623 - * | usba_pipe_handle_data_t * --+-> USBA Client Handle |
624 - * | xhci_ring_t --+-> Endpoint I/O Ring -------->--------+
625 - * | list_t --+-> Transfer List --------+
626 - * +------------------------------+ |
627 - * v
628 - * +-------------------------+ +--------------------+
629 - * | Transfer Structure |----------------->| Transfer Structure |-> ...
630 - * | xhci_transfer_t | | xhci_transfer_t |
631 - * | | +--------------------+
545 + * | xhci_dcbaa_t ---+----------------------------------+
546 + * | xhci_scratchpad_t ---+---------+ |
547 + * | xhci_command_ing_t ---+------+ | v
548 + * | xhci_event_ring_t ---+----+ | | +---------------------+
549 + * | xhci_usba_t ---+--+ | | | | Device Context |
550 + * +------------------------+ | | | | | Base Address |
551 + * | | | | | Array Structure |
552 + * | | | | | xhci_dcbaa_t |
553 + * +-------------------------------+ | | | | |
554 + * | +-------------------------------+ | | DCBAA KVA <-+-- uint64_t * |
555 + * | | +----------------------------+ | DMA Buffer <-+-- xhci_dma_buffer_t |
556 + * | | v | +---------------------+
557 + * | | +--------------------------+ +-----------------------+
558 + * | | | Event Ring | |
559 + * | | | Management | |
560 + * | | | xhci_event_ring_t | v
561 + * | | | | Event Ring +----------------------+
562 + * | | | xhci_event_segment_t * --|-> Segment VA | Scratchpad (Extra |
563 + * | | | xhci_dma_buffer_t --|-> Segment DMA Buf. | Controller Memory) |
564 + * | | | xhci_ring_t --|--+ | xhci_scratchpad_t |
565 + * | | +--------------------------+ | Scratchpad | |
566 + * | | | Base Array KVA <-+- uint64_t * |
567 + * | +------------+ | Array DMA Buf. <-+- xhci_dma_buffer_t |
568 + * | v | Scratchpad DMA <-+- xhci_dma_buffer_t * |
569 + * | +---------------------------+ | Buffer per page +----------------------+
570 + * | | Command Ring | |
571 + * | | xhci_command_ring_t | +------------------------------+
572 + * | | | |
573 + * | | xhci_ring_t --+-> Command Ring --->------------+
574 + * | | list_t --+-> Command List v
575 + * | | timeout_id_t --+-> Timeout State +---------------------+
576 + * | | xhci_command_ring_state_t +-> State Flags | I/O Ring |
577 + * | +---------------------------+ | xhci_ring_t |
578 + * | | |
579 + * | Ring DMA Buf. <-+-- xhci_dma_buffer_t |
580 + * | Ring Length <-+-- uint_t |
581 + * | Ring Entry KVA <-+-- xhci_trb_t * |
582 + * | +---------------------------+ Ring Head <-+-- uint_t |
583 + * +--->| USBA State | Ring Tail <-+-- uint_t |
584 + * | xhci_usba_t | Ring Cycle <-+-- uint_t |
585 + * | | +---------------------+
586 + * | usba_hcdi_ops_t * -+-> USBA Ops Vector ^
587 + * | usb_dev_dscr_t -+-> USB Virtual Device Descriptor |
588 + * | usb_ss_hub_descr_t -+-> USB Virtual Hub Descriptor |
589 + * | usba_pipe_handle_data_t * +-> Interrupt polling client |
590 + * | usb_intr_req_t -+-> Interrupt polling request |
591 + * | uint32_t --+-> Interrupt polling device mask |
592 + * | list_t --+-> Pipe List (Active Users) |
593 + * | list_t --+-------------------+ |
594 + * +---------------------------+ | ^
595 + * | |
596 + * v |
597 + * +-------------------------------+ +---------------+ |
598 + * | USB Device |------------>| USB Device |--> ... |
599 + * | xhci_device_t | | xhci_device_t | |
600 + * | | +---------------+ |
601 + * | usb_port_t --+-> USB Port plugged into |
602 + * | uint8_t --+-> Slot Number |
603 + * | boolean_t --+-> Address Assigned |
604 + * | usba_device_t * --+-> USBA Device State |
605 + * | xhci_dma_buffer_t --+-> Input Context DMA Buffer |
606 + * | xhci_input_context_t * --+-> Input Context KVA |
607 + * | xhci_slot_contex_t * --+-> Input Slot Context KVA |
608 + * | xhci_endpoint_context_t *[] --+-> Input Endpoint Context KVA |
609 + * | xhci_dma_buffer_t --+-> Output Context DMA Buffer |
610 + * | xhci_slot_context_t * --+-> Output Slot Context KVA ^
611 + * | xhci_endpoint_context_t *[] --+-> Output Endpoint Context KVA |
612 + * | xhci_endpoint_t *[] --+-> Endpoint Tracking ---+ |
613 + * +-------------------------------+ | |
614 + * | |
615 + * v |
616 + * +------------------------------+ +-----------------+ |
617 + * | Endpoint Data |----------->| Endpoint Data |--> ... |
618 + * | xhci_endpoint_t | | xhci_endpoint_t | |
619 + * | | +-----------------+ |
620 + * | int --+-> Endpoint Number |
621 + * | int --+-> Endpoint Type |
622 + * | xhci_endpoint_state_t --+-> Endpoint State |
623 + * | timeout_id_t --+-> Endpoint Timeout State |
624 + * | usba_pipe_handle_data_t * --+-> USBA Client Handle |
625 + * | xhci_ring_t --+-> Endpoint I/O Ring -------->--------+
626 + * | list_t --+-> Transfer List --------+
627 + * +------------------------------+ |
628 + * v
629 + * +-------------------------+ +--------------------+
630 + * | Transfer Structure |----------------->| Transfer Structure |-> ...
631 + * | xhci_transfer_t | | xhci_transfer_t |
632 + * | | +--------------------+
632 633 * | xhci_dma_buffer_t --+-> I/O DMA Buffer
633 - * | uint_t --+-> Number of TRBs
634 - * | uint_t --+-> Short transfer data
635 - * | uint_t --+-> Timeout seconds remaining
636 - * | usb_cr_t --+-> USB Transfer return value
637 - * | boolean_t --+-> Data direction
638 - * | xhci_trb_t * --+-> Host-order transfer requests for I/O
634 + * | uint_t --+-> Number of TRBs
635 + * | uint_t --+-> Short transfer data
636 + * | uint_t --+-> Timeout seconds remaining
637 + * | usb_cr_t --+-> USB Transfer return value
638 + * | boolean_t --+-> Data direction
639 + * | xhci_trb_t * --+-> Host-order transfer requests for I/O
639 640 * | usb_isoc_pkt_descr_t * -+-> Isochronous only response data
640 - * | usb_opaque_t --+-> USBA Request Handle
641 + * | usb_opaque_t --+-> USBA Request Handle
641 642 * +-------------------------+
642 643 *
643 644 * -------------
644 645 * Lock Ordering
645 646 * -------------
646 647 *
647 648 * There are three different tiers of locks that exist in the driver. First,
648 649 * there is a lock for each controller: xhci_t`xhci_lock. This protects all the
649 650 * data for that instance of the controller. If there are multiple instances of
650 651 * the xHCI controller in the system, each one is independent and protected
651 652 * separately. The two do not share any data.
652 653 *
653 654 * From there, there are two other, specific locks in the system:
654 655 *
655 656 * o xhci_command_ring_t`xcr_lock
656 657 * o xhci_device_t`xd_imtx
657 658 *
658 659 * There is only one xcr_lock per controller, like the xhci_lock. It protects
659 660 * the state of the command ring. However, there is on xd_imtx per device.
660 661 * Recall that each device is scoped to a given controller. This protects the
661 662 * input slot context for a given device.
662 663 *
663 664 * There are a few important rules to keep in mind here that are true
664 665 * universally throughout the driver:
665 666 *
666 667 * 1) Always grab the xhci_t`xhci_lock, before grabbing any of the other locks.
667 668 * 2) A given xhci_device_t`xd_imtx, must be taken before grabbing the
668 669 * xhci_command_ring_t`xcr_lock.
669 670 * 3) A given thread can only hold one of the given xhci_device_t`xd_imtx locks
670 671 * at a given time. In other words, we should never be manipulating the input
671 672 * context of two different devices at once.
672 673 * 4) It is safe to hold the xhci_device_t`xd_imtx while tearing down the
673 674 * endpoint timer. Conversely, the endpoint specific logic should never enter
674 675 * this lock.
675 676 *
676 677 * --------------------
677 678 * Relationship to EHCI
678 679 * --------------------
679 680 *
680 681 * On some Intel chipsets, a given physical port on the system may be routed to
681 682 * one of the EHCI or xHCI controllers. This association can be dynamically
682 683 * changed by writing to platform specific registers as handled by the quirk
683 684 * logic in xhci_quirk.c.
684 685 *
685 686 * As these ports may support USB 3.x speeds, we always route all such ports to
686 687 * the xHCI controller, when supported. In addition, to minimize disruptions
687 688 * from devices being enumerated and attached to the EHCI driver and then
688 689 * disappearing, we generally attempt to load the xHCI controller before the
689 690 * EHCI controller. This logic is not done in the driver; however, it is done in
690 691 * other parts of the kernel like in uts/common/io/consconfig_dacf.c in the
691 692 * function consconfig_load_drivres().
692 693 *
693 694 * -----------
694 695 * Future Work
695 696 * -----------
696 697 *
697 698 * The primary future work in this driver spans two different, but related
698 699 * areas. The first area is around controller resets and how they tie into FM.
699 700 * Presently, we do not have a good way to handle controllers coming and going
700 701 * in the broader USB stack or properly reconfigure the device after a reset.
701 702 * Secondly, we don't handle the suspend and resume of devices and drivers.
702 703 */
703 704
704 705 #include <sys/param.h>
705 706 #include <sys/modctl.h>
706 707 #include <sys/conf.h>
707 708 #include <sys/devops.h>
708 709 #include <sys/ddi.h>
709 710 #include <sys/sunddi.h>
710 711 #include <sys/cmn_err.h>
711 712 #include <sys/ddifm.h>
712 713 #include <sys/pci.h>
713 714 #include <sys/class.h>
714 715 #include <sys/policy.h>
715 716
716 717 #include <sys/usb/hcd/xhci/xhci.h>
717 718 #include <sys/usb/hcd/xhci/xhci_ioctl.h>
718 719
719 720 /*
720 721 * We want to use the first BAR to access its registers. The regs[] array is
721 722 * ordered based on the rules for the PCI supplement to IEEE 1275. So regs[1]
722 723 * will always be the first BAR.
723 724 */
724 725 #define XHCI_REG_NUMBER 1
725 726
726 727 /*
727 728 * This task queue exists as a global taskq that is used for resetting the
728 729 * device in the face of FM or runtime errors. Each instance of the device
729 730 * (xhci_t) happens to have a single taskq_dispatch_ent already allocated so we
730 731 * know that we should always be able to dispatch such an event.
731 732 */
732 733 static taskq_t *xhci_taskq;
733 734
734 735 /*
735 736 * Global soft state for per-instance data. Note that we must use the soft state
736 737 * routines and cannot use the ddi_set_driver_private() routines. The USB
737 738 * framework presumes that it can use the dip's private data.
738 739 */
739 740 void *xhci_soft_state;
740 741
741 742 /*
742 743 * This is the time in us that we wait after a controller resets before we
743 744 * consider reading any register. There are some controllers that want at least
744 745 * 1 ms, therefore we default to 10 ms.
745 746 */
746 747 clock_t xhci_reset_delay = 10000;
747 748
748 749 void
749 750 xhci_error(xhci_t *xhcip, const char *fmt, ...)
750 751 {
751 752 va_list ap;
752 753
753 754 va_start(ap, fmt);
754 755 if (xhcip != NULL && xhcip->xhci_dip != NULL) {
755 756 vdev_err(xhcip->xhci_dip, CE_WARN, fmt, ap);
756 757 } else {
757 758 vcmn_err(CE_WARN, fmt, ap);
758 759 }
759 760 va_end(ap);
760 761 }
761 762
762 763 void
763 764 xhci_log(xhci_t *xhcip, const char *fmt, ...)
764 765 {
765 766 va_list ap;
766 767
767 768 va_start(ap, fmt);
768 769 if (xhcip != NULL && xhcip->xhci_dip != NULL) {
769 770 vdev_err(xhcip->xhci_dip, CE_NOTE, fmt, ap);
770 771 } else {
771 772 vcmn_err(CE_NOTE, fmt, ap);
772 773 }
773 774 va_end(ap);
774 775 }
775 776
776 777 /*
777 778 * USBA is in charge of creating device nodes for us. USBA explicitly ORs in the
778 779 * constant HUBD_IS_ROOT_HUB, so we have to undo that when we're looking at
779 780 * things here. A simple bitwise-and will take care of this. And hey, it could
780 781 * always be more complex, USBA could clone!
781 782 */
782 783 static dev_info_t *
783 784 xhci_get_dip(dev_t dev)
784 785 {
785 786 xhci_t *xhcip;
786 787 int instance = getminor(dev) & ~HUBD_IS_ROOT_HUB;
787 788
788 789 xhcip = ddi_get_soft_state(xhci_soft_state, instance);
789 790 if (xhcip != NULL)
790 791 return (xhcip->xhci_dip);
791 792 return (NULL);
792 793 }
793 794
794 795 uint8_t
795 796 xhci_get8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
796 797 {
797 798 uintptr_t addr, roff;
798 799
799 800 switch (rtt) {
800 801 case XHCI_R_CAP:
801 802 roff = xhcip->xhci_regs_capoff;
802 803 break;
803 804 case XHCI_R_OPER:
804 805 roff = xhcip->xhci_regs_operoff;
805 806 break;
806 807 case XHCI_R_RUN:
807 808 roff = xhcip->xhci_regs_runoff;
808 809 break;
809 810 case XHCI_R_DOOR:
810 811 roff = xhcip->xhci_regs_dooroff;
811 812 break;
812 813 default:
813 814 panic("called %s with bad reg type: %d", __func__, rtt);
814 815 }
815 816 ASSERT(roff != PCI_EINVAL32);
816 817 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
817 818
818 819 return (ddi_get8(xhcip->xhci_regs_handle, (void *)addr));
819 820 }
820 821
821 822 uint16_t
822 823 xhci_get16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
823 824 {
824 825 uintptr_t addr, roff;
825 826
826 827 switch (rtt) {
827 828 case XHCI_R_CAP:
828 829 roff = xhcip->xhci_regs_capoff;
829 830 break;
830 831 case XHCI_R_OPER:
831 832 roff = xhcip->xhci_regs_operoff;
832 833 break;
833 834 case XHCI_R_RUN:
834 835 roff = xhcip->xhci_regs_runoff;
835 836 break;
836 837 case XHCI_R_DOOR:
837 838 roff = xhcip->xhci_regs_dooroff;
838 839 break;
839 840 default:
840 841 panic("called %s with bad reg type: %d", __func__, rtt);
841 842 }
842 843 ASSERT(roff != PCI_EINVAL32);
843 844 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
844 845
845 846 return (ddi_get16(xhcip->xhci_regs_handle, (void *)addr));
846 847 }
847 848
848 849 uint32_t
849 850 xhci_get32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
850 851 {
851 852 uintptr_t addr, roff;
852 853
853 854 switch (rtt) {
854 855 case XHCI_R_CAP:
855 856 roff = xhcip->xhci_regs_capoff;
856 857 break;
857 858 case XHCI_R_OPER:
858 859 roff = xhcip->xhci_regs_operoff;
859 860 break;
860 861 case XHCI_R_RUN:
861 862 roff = xhcip->xhci_regs_runoff;
862 863 break;
863 864 case XHCI_R_DOOR:
864 865 roff = xhcip->xhci_regs_dooroff;
865 866 break;
866 867 default:
867 868 panic("called %s with bad reg type: %d", __func__, rtt);
868 869 }
869 870 ASSERT(roff != PCI_EINVAL32);
870 871 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
871 872
872 873 return (ddi_get32(xhcip->xhci_regs_handle, (void *)addr));
873 874 }
874 875
875 876 uint64_t
876 877 xhci_get64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off)
877 878 {
878 879 uintptr_t addr, roff;
879 880
880 881 switch (rtt) {
881 882 case XHCI_R_CAP:
882 883 roff = xhcip->xhci_regs_capoff;
883 884 break;
884 885 case XHCI_R_OPER:
885 886 roff = xhcip->xhci_regs_operoff;
886 887 break;
887 888 case XHCI_R_RUN:
888 889 roff = xhcip->xhci_regs_runoff;
889 890 break;
890 891 case XHCI_R_DOOR:
891 892 roff = xhcip->xhci_regs_dooroff;
892 893 break;
893 894 default:
894 895 panic("called %s with bad reg type: %d", __func__, rtt);
895 896 }
896 897 ASSERT(roff != PCI_EINVAL32);
897 898 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
898 899
899 900 return (ddi_get64(xhcip->xhci_regs_handle, (void *)addr));
900 901 }
901 902
902 903 void
903 904 xhci_put8(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint8_t val)
904 905 {
905 906 uintptr_t addr, roff;
906 907
907 908 switch (rtt) {
908 909 case XHCI_R_CAP:
909 910 roff = xhcip->xhci_regs_capoff;
910 911 break;
911 912 case XHCI_R_OPER:
912 913 roff = xhcip->xhci_regs_operoff;
913 914 break;
914 915 case XHCI_R_RUN:
915 916 roff = xhcip->xhci_regs_runoff;
916 917 break;
917 918 case XHCI_R_DOOR:
918 919 roff = xhcip->xhci_regs_dooroff;
919 920 break;
920 921 default:
921 922 panic("called %s with bad reg type: %d", __func__, rtt);
922 923 }
923 924 ASSERT(roff != PCI_EINVAL32);
924 925 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
925 926
926 927 ddi_put8(xhcip->xhci_regs_handle, (void *)addr, val);
927 928 }
928 929
929 930 void
930 931 xhci_put16(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint16_t val)
931 932 {
932 933 uintptr_t addr, roff;
933 934
934 935 switch (rtt) {
935 936 case XHCI_R_CAP:
936 937 roff = xhcip->xhci_regs_capoff;
937 938 break;
938 939 case XHCI_R_OPER:
939 940 roff = xhcip->xhci_regs_operoff;
940 941 break;
941 942 case XHCI_R_RUN:
942 943 roff = xhcip->xhci_regs_runoff;
943 944 break;
944 945 case XHCI_R_DOOR:
945 946 roff = xhcip->xhci_regs_dooroff;
946 947 break;
947 948 default:
948 949 panic("called %s with bad reg type: %d", __func__, rtt);
949 950 }
950 951 ASSERT(roff != PCI_EINVAL32);
951 952 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
952 953
953 954 ddi_put16(xhcip->xhci_regs_handle, (void *)addr, val);
954 955 }
955 956
956 957 void
957 958 xhci_put32(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint32_t val)
958 959 {
959 960 uintptr_t addr, roff;
960 961
961 962 switch (rtt) {
962 963 case XHCI_R_CAP:
963 964 roff = xhcip->xhci_regs_capoff;
964 965 break;
965 966 case XHCI_R_OPER:
966 967 roff = xhcip->xhci_regs_operoff;
967 968 break;
968 969 case XHCI_R_RUN:
969 970 roff = xhcip->xhci_regs_runoff;
970 971 break;
971 972 case XHCI_R_DOOR:
972 973 roff = xhcip->xhci_regs_dooroff;
973 974 break;
974 975 default:
975 976 panic("called %s with bad reg type: %d", __func__, rtt);
976 977 }
977 978 ASSERT(roff != PCI_EINVAL32);
978 979 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
979 980
980 981 ddi_put32(xhcip->xhci_regs_handle, (void *)addr, val);
981 982 }
982 983
983 984 void
984 985 xhci_put64(xhci_t *xhcip, xhci_reg_type_t rtt, uintptr_t off, uint64_t val)
985 986 {
986 987 uintptr_t addr, roff;
987 988
988 989 switch (rtt) {
989 990 case XHCI_R_CAP:
990 991 roff = xhcip->xhci_regs_capoff;
991 992 break;
992 993 case XHCI_R_OPER:
993 994 roff = xhcip->xhci_regs_operoff;
994 995 break;
995 996 case XHCI_R_RUN:
996 997 roff = xhcip->xhci_regs_runoff;
997 998 break;
998 999 case XHCI_R_DOOR:
999 1000 roff = xhcip->xhci_regs_dooroff;
1000 1001 break;
1001 1002 default:
1002 1003 panic("called %s with bad reg type: %d", __func__, rtt);
1003 1004 }
1004 1005 ASSERT(roff != PCI_EINVAL32);
1005 1006 addr = roff + off + (uintptr_t)xhcip->xhci_regs_base;
↓ open down ↓ |
355 lines elided |
↑ open up ↑ |
1006 1007
1007 1008 ddi_put64(xhcip->xhci_regs_handle, (void *)addr, val);
1008 1009 }
1009 1010
1010 1011 int
1011 1012 xhci_check_regs_acc(xhci_t *xhcip)
1012 1013 {
1013 1014 ddi_fm_error_t de;
1014 1015
1015 1016 /*
1016 - * Treat the case where we can't check as fine so we can treat the code
1017 + * Treat cases where we can't check as fine so we can treat the code
1017 1018 * more simply.
1018 1019 */
1019 - if (!DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps))
1020 + if (quiesce_active || !DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps))
1020 1021 return (DDI_FM_OK);
1021 1022
1022 1023 ddi_fm_acc_err_get(xhcip->xhci_regs_handle, &de, DDI_FME_VERSION);
1023 1024 ddi_fm_acc_err_clear(xhcip->xhci_regs_handle, DDI_FME_VERSION);
1024 1025 return (de.fme_status);
1025 1026 }
1026 1027
1027 1028 /*
1028 1029 * As a leaf PCIe driver, we just post the ereport and continue on.
1029 1030 */
1030 1031 /* ARGSUSED */
1031 1032 static int
1032 1033 xhci_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
1033 1034 {
1034 1035 pci_ereport_post(dip, err, NULL);
1035 1036 return (err->fme_status);
1036 1037 }
1037 1038
1038 1039 static void
1039 1040 xhci_fm_fini(xhci_t *xhcip)
1040 1041 {
1041 1042 if (xhcip->xhci_fm_caps == 0)
1042 1043 return;
1043 1044
1044 1045 if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1045 1046 ddi_fm_handler_unregister(xhcip->xhci_dip);
1046 1047
1047 1048 if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1048 1049 DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps))
1049 1050 pci_ereport_teardown(xhcip->xhci_dip);
1050 1051
1051 1052 ddi_fm_fini(xhcip->xhci_dip);
1052 1053 }
1053 1054
1054 1055 static void
1055 1056 xhci_fm_init(xhci_t *xhcip)
1056 1057 {
1057 1058 ddi_iblock_cookie_t iblk;
1058 1059 int def = DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
1059 1060 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE;
1060 1061
1061 1062 xhcip->xhci_fm_caps = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
1062 1063 DDI_PROP_DONTPASS, "fm_capable", def);
1063 1064
1064 1065 if (xhcip->xhci_fm_caps < 0) {
1065 1066 xhcip->xhci_fm_caps = 0;
1066 1067 } else if (xhcip->xhci_fm_caps & ~def) {
1067 1068 xhcip->xhci_fm_caps &= def;
1068 1069 }
1069 1070
1070 1071 if (xhcip->xhci_fm_caps == 0)
1071 1072 return;
1072 1073
1073 1074 ddi_fm_init(xhcip->xhci_dip, &xhcip->xhci_fm_caps, &iblk);
1074 1075 if (DDI_FM_EREPORT_CAP(xhcip->xhci_fm_caps) ||
1075 1076 DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1076 1077 pci_ereport_setup(xhcip->xhci_dip);
1077 1078 }
1078 1079
1079 1080 if (DDI_FM_ERRCB_CAP(xhcip->xhci_fm_caps)) {
1080 1081 ddi_fm_handler_register(xhcip->xhci_dip,
1081 1082 xhci_fm_error_cb, xhcip);
1082 1083 }
1083 1084 }
1084 1085
1085 1086 static int
1086 1087 xhci_reg_poll(xhci_t *xhcip, xhci_reg_type_t rt, int reg, uint32_t mask,
1087 1088 uint32_t targ, uint_t tries, int delay_ms)
1088 1089 {
1089 1090 uint_t i;
1090 1091
1091 1092 for (i = 0; i < tries; i++) {
1092 1093 uint32_t val = xhci_get32(xhcip, rt, reg);
1093 1094 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1094 1095 ddi_fm_service_impact(xhcip->xhci_dip,
1095 1096 DDI_SERVICE_LOST);
1096 1097 return (EIO);
1097 1098 }
1098 1099
1099 1100 if ((val & mask) == targ)
1100 1101 return (0);
1101 1102
1102 1103 delay(drv_usectohz(delay_ms * 1000));
1103 1104 }
1104 1105 return (ETIMEDOUT);
1105 1106 }
1106 1107
1107 1108 static boolean_t
1108 1109 xhci_regs_map(xhci_t *xhcip)
1109 1110 {
1110 1111 off_t memsize;
1111 1112 int ret;
1112 1113 ddi_device_acc_attr_t da;
1113 1114
1114 1115 if (ddi_dev_regsize(xhcip->xhci_dip, XHCI_REG_NUMBER, &memsize) !=
1115 1116 DDI_SUCCESS) {
1116 1117 xhci_error(xhcip, "failed to get register set size");
1117 1118 return (B_FALSE);
1118 1119 }
1119 1120
1120 1121 bzero(&da, sizeof (ddi_device_acc_attr_t));
1121 1122 da.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1122 1123 da.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1123 1124 da.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1124 1125 if (DDI_FM_ACC_ERR_CAP(xhcip->xhci_fm_caps)) {
1125 1126 da.devacc_attr_access = DDI_FLAGERR_ACC;
1126 1127 } else {
1127 1128 da.devacc_attr_access = DDI_DEFAULT_ACC;
1128 1129 }
1129 1130
1130 1131 ret = ddi_regs_map_setup(xhcip->xhci_dip, XHCI_REG_NUMBER,
1131 1132 &xhcip->xhci_regs_base, 0, memsize, &da, &xhcip->xhci_regs_handle);
1132 1133
1133 1134 if (ret != DDI_SUCCESS) {
1134 1135 xhci_error(xhcip, "failed to map device registers: %d", ret);
1135 1136 return (B_FALSE);
1136 1137 }
1137 1138
1138 1139 return (B_TRUE);
1139 1140 }
1140 1141
1141 1142 static boolean_t
1142 1143 xhci_regs_init(xhci_t *xhcip)
1143 1144 {
1144 1145 /*
1145 1146 * The capabilities always begin at offset zero.
1146 1147 */
1147 1148 xhcip->xhci_regs_capoff = 0;
1148 1149 xhcip->xhci_regs_operoff = xhci_get8(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1149 1150 xhcip->xhci_regs_runoff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_RTSOFF);
1150 1151 xhcip->xhci_regs_runoff &= ~0x1f;
1151 1152 xhcip->xhci_regs_dooroff = xhci_get32(xhcip, XHCI_R_CAP, XHCI_DBOFF);
1152 1153 xhcip->xhci_regs_dooroff &= ~0x3;
1153 1154
1154 1155 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1155 1156 xhci_error(xhcip, "failed to initialize controller register "
1156 1157 "offsets: encountered FM register error");
1157 1158 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1158 1159 return (B_FALSE);
1159 1160 }
1160 1161
1161 1162 return (B_TRUE);
1162 1163 }
1163 1164
1164 1165 /*
1165 1166 * Read various parameters from PCI configuration space and from the Capability
1166 1167 * registers that we'll need to register the device. We cache all of the
1167 1168 * Capability registers.
1168 1169 */
1169 1170 static boolean_t
1170 1171 xhci_read_params(xhci_t *xhcip)
1171 1172 {
1172 1173 uint8_t usb;
1173 1174 uint16_t vers;
1174 1175 uint32_t struc1, struc2, struc3, cap1, cap2, pgsz;
1175 1176 uint32_t psize, pbit, capreg;
1176 1177 xhci_capability_t *xcap;
1177 1178 unsigned long ps;
1178 1179
1179 1180 /*
1180 1181 * While it's tempting to do a 16-bit read at offset 0x2, unfortunately,
1181 1182 * a few emulated systems don't support reading at offset 0x2 for the
1182 1183 * version. Instead we need to read the caplength register and get the
1183 1184 * upper two bytes.
1184 1185 */
1185 1186 capreg = xhci_get32(xhcip, XHCI_R_CAP, XHCI_CAPLENGTH);
1186 1187 vers = XHCI_VERSION_MASK(capreg);
1187 1188 usb = pci_config_get8(xhcip->xhci_cfg_handle, PCI_XHCI_USBREV);
1188 1189 struc1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS1);
1189 1190 struc2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS2);
1190 1191 struc3 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCSPARAMS3);
1191 1192 cap1 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS1);
1192 1193 cap2 = xhci_get32(xhcip, XHCI_R_CAP, XHCI_HCCPARAMS2);
1193 1194 pgsz = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PAGESIZE);
1194 1195 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1195 1196 xhci_error(xhcip, "failed to read controller parameters: "
1196 1197 "encountered FM register error");
1197 1198 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1198 1199 return (B_FALSE);
1199 1200 }
1200 1201
1201 1202 xcap = &xhcip->xhci_caps;
1202 1203 xcap->xcap_usb_vers = usb;
1203 1204 xcap->xcap_hci_vers = vers;
1204 1205 xcap->xcap_max_slots = XHCI_HCS1_DEVSLOT_MAX(struc1);
1205 1206 xcap->xcap_max_intrs = XHCI_HCS1_IRQ_MAX(struc1);
1206 1207 xcap->xcap_max_ports = XHCI_HCS1_N_PORTS(struc1);
1207 1208 if (xcap->xcap_max_ports > MAX_PORTS) {
1208 1209 xhci_error(xhcip, "Root hub has %d ports, but system only "
1209 1210 "supports %d, limiting to %d\n", xcap->xcap_max_ports,
1210 1211 MAX_PORTS, MAX_PORTS);
1211 1212 xcap->xcap_max_ports = MAX_PORTS;
1212 1213 }
1213 1214
1214 1215 xcap->xcap_ist_micro = XHCI_HCS2_IST_MICRO(struc2);
1215 1216 xcap->xcap_ist = XHCI_HCS2_IST(struc2);
1216 1217 xcap->xcap_max_esrt = XHCI_HCS2_ERST_MAX(struc2);
1217 1218 xcap->xcap_scratch_restore = XHCI_HCS2_SPR(struc2);
1218 1219 xcap->xcap_max_scratch = XHCI_HCS2_SPB_MAX(struc2);
1219 1220
1220 1221 xcap->xcap_u1_lat = XHCI_HCS3_U1_DEL(struc3);
1221 1222 xcap->xcap_u2_lat = XHCI_HCS3_U2_DEL(struc3);
1222 1223
1223 1224 xcap->xcap_flags = XHCI_HCC1_FLAGS_MASK(cap1);
1224 1225 xcap->xcap_max_psa = XHCI_HCC1_PSA_SZ_MAX(cap1);
1225 1226 xcap->xcap_xecp_off = XHCI_HCC1_XECP(cap1);
1226 1227 xcap->xcap_flags2 = XHCI_HCC2_FLAGS_MASK(cap2);
1227 1228
1228 1229 /*
1229 1230 * We don't have documentation for what changed from before xHCI 0.96,
1230 1231 * so we just refuse to support versions before 0.96. We also will
1231 1232 * ignore anything with a major version greater than 1.
1232 1233 */
1233 1234 if (xcap->xcap_hci_vers < 0x96 || xcap->xcap_hci_vers >= 0x200) {
1234 1235 xhci_error(xhcip, "Encountered unsupported xHCI version 0.%2x",
1235 1236 xcap->xcap_hci_vers);
1236 1237 return (B_FALSE);
1237 1238 }
1238 1239
1239 1240 /*
1240 1241 * Determine the smallest size page that the controller supports and
1241 1242 * make sure that it matches our pagesize. We basically check here for
1242 1243 * the presence of 4k and 8k pages. The basis of the pagesize is used
1243 1244 * extensively throughout the code and specification. While we could
1244 1245 * support other page sizes here, given that we don't support systems
1245 1246 * with it at this time, it doesn't make much sense.
1246 1247 */
1247 1248 ps = PAGESIZE;
1248 1249 if (ps == 0x1000) {
1249 1250 pbit = XHCI_PAGESIZE_4K;
1250 1251 psize = 0x1000;
1251 1252 } else if (ps == 0x2000) {
1252 1253 pbit = XHCI_PAGESIZE_8K;
1253 1254 psize = 0x2000;
1254 1255 } else {
1255 1256 xhci_error(xhcip, "Encountered host page size that the driver "
1256 1257 "doesn't know how to handle: %lx\n", ps);
1257 1258 return (B_FALSE);
1258 1259 }
1259 1260
1260 1261 if (!(pgsz & pbit)) {
1261 1262 xhci_error(xhcip, "Encountered controller that didn't support "
1262 1263 "the host page size (%d), supports: %x", psize, pgsz);
1263 1264 return (B_FALSE);
1264 1265 }
1265 1266 xcap->xcap_pagesize = psize;
1266 1267
1267 1268 return (B_TRUE);
1268 1269 }
1269 1270
1270 1271 /*
1271 1272 * Apply known workarounds and issues. These reports come from other
1272 1273 * Operating Systems and have been collected over time.
1273 1274 */
1274 1275 static boolean_t
1275 1276 xhci_identify(xhci_t *xhcip)
1276 1277 {
1277 1278 xhci_quirks_populate(xhcip);
1278 1279
1279 1280 if (xhcip->xhci_quirks & XHCI_QUIRK_NO_MSI) {
1280 1281 xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED;
1281 1282 } else {
1282 1283 xhcip->xhci_caps.xcap_intr_types = DDI_INTR_TYPE_FIXED |
1283 1284 DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX;
1284 1285 }
1285 1286
1286 1287 if (xhcip->xhci_quirks & XHCI_QUIRK_32_ONLY) {
1287 1288 xhcip->xhci_caps.xcap_flags &= ~XCAP_AC64;
1288 1289 }
1289 1290
1290 1291 return (B_TRUE);
1291 1292 }
1292 1293
1293 1294 static boolean_t
1294 1295 xhci_alloc_intr_handle(xhci_t *xhcip, int type)
1295 1296 {
1296 1297 int ret;
1297 1298
1298 1299 /*
1299 1300 * Normally a well-behaving driver would more carefully request an
1300 1301 * amount of interrupts based on the number available, etc. But since we
1301 1302 * only actually want a single interrupt, we're just going to go ahead
1302 1303 * and ask for a single interrupt.
1303 1304 */
1304 1305 ret = ddi_intr_alloc(xhcip->xhci_dip, &xhcip->xhci_intr_hdl, type, 0,
1305 1306 XHCI_NINTR, &xhcip->xhci_intr_num, DDI_INTR_ALLOC_NORMAL);
1306 1307 if (ret != DDI_SUCCESS) {
1307 1308 xhci_log(xhcip, "!failed to allocate interrupts of type %d: %d",
1308 1309 type, ret);
1309 1310 return (B_FALSE);
1310 1311 }
1311 1312 xhcip->xhci_intr_type = type;
1312 1313
1313 1314 return (B_TRUE);
1314 1315 }
1315 1316
1316 1317 static boolean_t
1317 1318 xhci_alloc_intrs(xhci_t *xhcip)
1318 1319 {
1319 1320 int intr_types, ret;
1320 1321
1321 1322 if (XHCI_NINTR > xhcip->xhci_caps.xcap_max_intrs) {
1322 1323 xhci_error(xhcip, "controller does not support the minimum "
1323 1324 "number of interrupts required (%d), supports %d",
1324 1325 XHCI_NINTR, xhcip->xhci_caps.xcap_max_intrs);
1325 1326 return (B_FALSE);
1326 1327 }
1327 1328
1328 1329 if ((ret = ddi_intr_get_supported_types(xhcip->xhci_dip,
1329 1330 &intr_types)) != DDI_SUCCESS) {
1330 1331 xhci_error(xhcip, "failed to get supported interrupt types: "
1331 1332 "%d", ret);
1332 1333 return (B_FALSE);
1333 1334 }
1334 1335
1335 1336 /*
1336 1337 * Mask off interrupt types we've already ruled out due to quirks or
1337 1338 * other reasons.
1338 1339 */
1339 1340 intr_types &= xhcip->xhci_caps.xcap_intr_types;
1340 1341 if (intr_types & DDI_INTR_TYPE_MSIX) {
1341 1342 if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSIX))
1342 1343 return (B_TRUE);
1343 1344 }
1344 1345
1345 1346 if (intr_types & DDI_INTR_TYPE_MSI) {
1346 1347 if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_MSI))
1347 1348 return (B_TRUE);
1348 1349 }
1349 1350
1350 1351 if (intr_types & DDI_INTR_TYPE_FIXED) {
1351 1352 if (xhci_alloc_intr_handle(xhcip, DDI_INTR_TYPE_FIXED))
1352 1353 return (B_TRUE);
1353 1354 }
1354 1355
1355 1356 xhci_error(xhcip, "failed to allocate an interrupt, supported types: "
1356 1357 "0x%x", intr_types);
1357 1358 return (B_FALSE);
1358 1359 }
1359 1360
1360 1361 static boolean_t
1361 1362 xhci_add_intr_handler(xhci_t *xhcip)
1362 1363 {
1363 1364 int ret;
1364 1365
1365 1366 if ((ret = ddi_intr_get_pri(xhcip->xhci_intr_hdl,
1366 1367 &xhcip->xhci_intr_pri)) != DDI_SUCCESS) {
1367 1368 xhci_error(xhcip, "failed to get interrupt priority: %d", ret);
1368 1369 return (B_FALSE);
1369 1370 }
1370 1371
1371 1372 if ((ret = ddi_intr_get_cap(xhcip->xhci_intr_hdl,
1372 1373 &xhcip->xhci_intr_caps)) != DDI_SUCCESS) {
1373 1374 xhci_error(xhcip, "failed to get interrupt capabilities: %d",
1374 1375 ret);
1375 1376 return (B_FALSE);
1376 1377 }
1377 1378
1378 1379 if ((ret = ddi_intr_add_handler(xhcip->xhci_intr_hdl, xhci_intr, xhcip,
1379 1380 (uintptr_t)0)) != DDI_SUCCESS) {
1380 1381 xhci_error(xhcip, "failed to add interrupt handler: %d", ret);
1381 1382 return (B_FALSE);
1382 1383 }
1383 1384 return (B_TRUE);
1384 1385 }
1385 1386
1386 1387 /*
1387 1388 * Find a capability with an identifier whose value is 'id'. The 'init' argument
1388 1389 * gives us the offset to start searching at. See xHCI 1.1 / 7 for more
1389 1390 * information. This is more or less exactly like PCI capabilities.
1390 1391 */
1391 1392 static boolean_t
1392 1393 xhci_find_ext_cap(xhci_t *xhcip, uint32_t id, uint32_t init, uint32_t *outp)
1393 1394 {
1394 1395 uint32_t off;
1395 1396 uint8_t next = 0;
1396 1397
1397 1398 /*
1398 1399 * If we have no offset, we're done.
1399 1400 */
1400 1401 if (xhcip->xhci_caps.xcap_xecp_off == 0)
1401 1402 return (B_FALSE);
1402 1403
1403 1404 off = xhcip->xhci_caps.xcap_xecp_off << 2;
1404 1405 do {
1405 1406 uint32_t cap_hdr;
1406 1407
1407 1408 off += next << 2;
1408 1409 cap_hdr = xhci_get32(xhcip, XHCI_R_CAP, off);
1409 1410 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1410 1411 xhci_error(xhcip, "failed to read xhci extended "
1411 1412 "capabilities at offset 0x%x: encountered FM "
1412 1413 "register error", off);
1413 1414 ddi_fm_service_impact(xhcip->xhci_dip,
1414 1415 DDI_SERVICE_LOST);
1415 1416 break;
1416 1417 }
1417 1418
1418 1419 if (cap_hdr == PCI_EINVAL32)
1419 1420 break;
1420 1421 if (XHCI_XECP_ID(cap_hdr) == id &&
1421 1422 (init == UINT32_MAX || off > init)) {
1422 1423 *outp = off;
1423 1424 return (B_TRUE);
1424 1425 }
1425 1426 next = XHCI_XECP_NEXT(cap_hdr);
1426 1427 /*
1427 1428 * Watch out for overflow if we somehow end up with a more than
1428 1429 * 2 GiB space.
1429 1430 */
1430 1431 if (next << 2 > (INT32_MAX - off))
1431 1432 return (B_FALSE);
1432 1433 } while (next != 0);
1433 1434
1434 1435 return (B_FALSE);
1435 1436 }
1436 1437
1437 1438 /*
1438 1439 * For mostly information purposes, we'd like to walk to augment the devinfo
1439 1440 * tree with the number of ports that support USB 2 and USB 3. Note though that
1440 1441 * these ports may be overlapping. Many ports can support both USB 2 and USB 3
1441 1442 * and are wired up to the same physical port, even though they show up as
1442 1443 * separate 'ports' in the xhci sense.
1443 1444 */
1444 1445 static boolean_t
1445 1446 xhci_port_count(xhci_t *xhcip)
1446 1447 {
1447 1448 uint_t nusb2 = 0, nusb3 = 0;
1448 1449 uint32_t off = UINT32_MAX;
1449 1450
1450 1451 while (xhci_find_ext_cap(xhcip, XHCI_ID_PROTOCOLS, off, &off) ==
1451 1452 B_TRUE) {
1452 1453 uint32_t rvers, rport;
1453 1454
1454 1455 /*
1455 1456 * See xHCI 1.1 / 7.2 for the format of this. The first uint32_t
1456 1457 * has version information while the third uint32_t has the port
1457 1458 * count.
1458 1459 */
1459 1460 rvers = xhci_get32(xhcip, XHCI_R_CAP, off);
1460 1461 rport = xhci_get32(xhcip, XHCI_R_CAP, off + 8);
1461 1462 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1462 1463 xhci_error(xhcip, "failed to read xhci port counts: "
1463 1464 "encountered fatal FM register error");
1464 1465 ddi_fm_service_impact(xhcip->xhci_dip,
1465 1466 DDI_SERVICE_LOST);
1466 1467 return (B_FALSE);
1467 1468 }
1468 1469
1469 1470 rvers = XHCI_XECP_PROT_MAJOR(rvers);
1470 1471 rport = XHCI_XECP_PROT_PCOUNT(rport);
1471 1472
1472 1473 if (rvers == 3) {
1473 1474 nusb3 += rport;
1474 1475 } else if (rvers <= 2) {
1475 1476 nusb2 += rport;
1476 1477 } else {
1477 1478 xhci_error(xhcip, "encountered port capabilities with "
1478 1479 "unknown major USB version: %d\n", rvers);
1479 1480 }
1480 1481 }
1481 1482
1482 1483 (void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1483 1484 "usb2-capable-ports", nusb2);
1484 1485 (void) ddi_prop_update_int(DDI_DEV_T_NONE, xhcip->xhci_dip,
1485 1486 "usb3-capable-ports", nusb3);
1486 1487
1487 1488 return (B_TRUE);
1488 1489 }
1489 1490
1490 1491 /*
1491 1492 * Take over control from the BIOS or other firmware, if applicable.
1492 1493 */
1493 1494 static boolean_t
1494 1495 xhci_controller_takeover(xhci_t *xhcip)
1495 1496 {
1496 1497 int ret;
1497 1498 uint32_t val, off;
1498 1499
1499 1500 /*
1500 1501 * If we can't find the legacy capability, then there's nothing to do.
1501 1502 */
1502 1503 if (xhci_find_ext_cap(xhcip, XHCI_ID_USB_LEGACY, UINT32_MAX, &off) ==
1503 1504 B_FALSE)
1504 1505 return (B_TRUE);
1505 1506 val = xhci_get32(xhcip, XHCI_R_CAP, off);
1506 1507 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1507 1508 xhci_error(xhcip, "failed to read BIOS take over registers: "
1508 1509 "encountered fatal FM register error");
1509 1510 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1510 1511 return (B_FALSE);
1511 1512 }
1512 1513
1513 1514 if (val & XHCI_BIOS_OWNED) {
1514 1515 val |= XHCI_OS_OWNED;
1515 1516 xhci_put32(xhcip, XHCI_R_CAP, off, val);
1516 1517 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1517 1518 xhci_error(xhcip, "failed to write BIOS take over "
1518 1519 "registers: encountered fatal FM register error");
1519 1520 ddi_fm_service_impact(xhcip->xhci_dip,
1520 1521 DDI_SERVICE_LOST);
1521 1522 return (B_FALSE);
1522 1523 }
1523 1524
1524 1525 /*
1525 1526 * Wait up to 5 seconds for things to change. While this number
1526 1527 * isn't specified in the xHCI spec, it seems to be the de facto
1527 1528 * value that various systems are using today. We'll use a 10ms
1528 1529 * interval to check.
1529 1530 */
1530 1531 ret = xhci_reg_poll(xhcip, XHCI_R_CAP, off,
1531 1532 XHCI_BIOS_OWNED | XHCI_OS_OWNED, XHCI_OS_OWNED, 500, 10);
1532 1533 if (ret == EIO)
1533 1534 return (B_FALSE);
1534 1535 if (ret == ETIMEDOUT) {
1535 1536 xhci_log(xhcip, "!timed out waiting for firmware to "
1536 1537 "hand off, taking over");
1537 1538 val &= ~XHCI_BIOS_OWNED;
1538 1539 xhci_put32(xhcip, XHCI_R_CAP, off, val);
1539 1540 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1540 1541 xhci_error(xhcip, "failed to write forced "
1541 1542 "takeover: encountered fatal FM register "
1542 1543 "error");
1543 1544 ddi_fm_service_impact(xhcip->xhci_dip,
1544 1545 DDI_SERVICE_LOST);
1545 1546 return (B_FALSE);
1546 1547 }
1547 1548 }
1548 1549 }
1549 1550
1550 1551 val = xhci_get32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS);
1551 1552 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1552 1553 xhci_error(xhcip, "failed to read legacy control registers: "
1553 1554 "encountered fatal FM register error");
1554 1555 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1555 1556 return (B_FALSE);
1556 1557 }
1557 1558 val &= XHCI_XECP_SMI_MASK;
1558 1559 val |= XHCI_XECP_CLEAR_SMI;
1559 1560 xhci_put32(xhcip, XHCI_R_CAP, off + XHCI_XECP_LEGCTLSTS, val);
1560 1561 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1561 1562 xhci_error(xhcip, "failed to write legacy control registers: "
1562 1563 "encountered fatal FM register error");
1563 1564 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1564 1565 return (B_FALSE);
1565 1566 }
1566 1567
1567 1568 return (B_TRUE);
1568 1569 }
1569 1570
1570 1571 static int
1571 1572 xhci_controller_stop(xhci_t *xhcip)
1572 1573 {
1573 1574 uint32_t cmdreg;
1574 1575
1575 1576 cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1576 1577 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1577 1578 xhci_error(xhcip, "failed to read USB Command register: "
1578 1579 "encountered fatal FM register error");
1579 1580 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1580 1581 return (EIO);
1581 1582 }
1582 1583
1583 1584 cmdreg &= ~(XHCI_CMD_RS | XHCI_CMD_INTE);
1584 1585 xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1585 1586 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1586 1587 xhci_error(xhcip, "failed to write USB Command register: "
1587 1588 "encountered fatal FM register error");
1588 1589 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1589 1590 return (EIO);
1590 1591 }
1591 1592
1592 1593 /*
1593 1594 * Wait up to 50ms for this to occur. The specification says that this
1594 1595 * should stop within 16ms, but we give ourselves a bit more time just
1595 1596 * in case.
1596 1597 */
1597 1598 return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS, XHCI_STS_HCH,
1598 1599 XHCI_STS_HCH, 50, 10));
1599 1600 }
1600 1601
1601 1602 static int
1602 1603 xhci_controller_reset(xhci_t *xhcip)
1603 1604 {
1604 1605 int ret;
1605 1606 uint32_t cmdreg;
1606 1607
1607 1608 cmdreg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1608 1609 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1609 1610 xhci_error(xhcip, "failed to read USB Command register for "
1610 1611 "reset: encountered fatal FM register error");
1611 1612 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1612 1613 return (EIO);
1613 1614 }
1614 1615
1615 1616 cmdreg |= XHCI_CMD_HCRST;
1616 1617 xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, cmdreg);
1617 1618 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1618 1619 xhci_error(xhcip, "failed to write USB Command register for "
1619 1620 "reset: encountered fatal FM register error");
1620 1621 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1621 1622 return (EIO);
1622 1623 }
1623 1624
1624 1625 /*
1625 1626 * Some controllers apparently don't want to be touched for at least 1ms
1626 1627 * after we initiate the reset. Therefore give all controllers this
1627 1628 * moment to breathe.
1628 1629 */
1629 1630 delay(drv_usectohz(xhci_reset_delay));
1630 1631
1631 1632 /*
1632 1633 * To tell that the reset has completed we first verify that the reset
1633 1634 * has finished and that the USBCMD register no longer has the reset bit
1634 1635 * asserted. However, once that's done we have to go verify that CNR
1635 1636 * (Controller Not Ready) is no longer asserted.
1636 1637 */
1637 1638 if ((ret = xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBCMD,
1638 1639 XHCI_CMD_HCRST, 0, 500, 10)) != 0)
1639 1640 return (ret);
1640 1641
1641 1642 return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1642 1643 XHCI_STS_CNR, 0, 500, 10));
1643 1644 }
1644 1645
1645 1646 /*
1646 1647 * Take care of all the required initialization before we can actually enable
1647 1648 * the controller. This means that we need to:
1648 1649 *
1649 1650 * o Program the maximum number of slots
1650 1651 * o Program the DCBAAP and allocate the scratchpad
1651 1652 * o Program the Command Ring
1652 1653 * o Initialize the Event Ring
1653 1654 * o Enable interrupts (set imod)
1654 1655 */
1655 1656 static int
1656 1657 xhci_controller_configure(xhci_t *xhcip)
1657 1658 {
1658 1659 int ret;
1659 1660 uint32_t config;
1660 1661
1661 1662 config = xhci_get32(xhcip, XHCI_R_OPER, XHCI_CONFIG);
1662 1663 config &= ~XHCI_CONFIG_SLOTS_MASK;
1663 1664 config |= xhcip->xhci_caps.xcap_max_slots;
1664 1665 xhci_put32(xhcip, XHCI_R_OPER, XHCI_CONFIG, config);
1665 1666 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1666 1667 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1667 1668 return (EIO);
1668 1669 }
1669 1670
1670 1671 if ((ret = xhci_context_init(xhcip)) != 0) {
1671 1672 const char *reason;
1672 1673 if (ret == EIO) {
1673 1674 reason = "fatal FM I/O error occurred";
1674 1675 } else if (ret == ENOMEM) {
1675 1676 reason = "unable to allocate DMA memory";
1676 1677 } else {
1677 1678 reason = "unexpected error occurred";
1678 1679 }
1679 1680
1680 1681 xhci_error(xhcip, "failed to initialize xhci context "
1681 1682 "registers: %s (%d)", reason, ret);
1682 1683 return (ret);
1683 1684 }
1684 1685
1685 1686 if ((ret = xhci_command_ring_init(xhcip)) != 0) {
1686 1687 xhci_error(xhcip, "failed to initialize commands: %d", ret);
1687 1688 return (ret);
1688 1689 }
1689 1690
1690 1691 if ((ret = xhci_event_init(xhcip)) != 0) {
1691 1692 xhci_error(xhcip, "failed to initialize events: %d", ret);
1692 1693 return (ret);
1693 1694 }
1694 1695
1695 1696 if ((ret = xhci_intr_conf(xhcip)) != 0) {
1696 1697 xhci_error(xhcip, "failed to configure interrupts: %d", ret);
1697 1698 return (ret);
1698 1699 }
1699 1700
1700 1701 return (0);
1701 1702 }
1702 1703
1703 1704 static int
1704 1705 xhci_controller_start(xhci_t *xhcip)
1705 1706 {
1706 1707 uint32_t reg;
1707 1708
1708 1709 reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_USBCMD);
1709 1710 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1710 1711 xhci_error(xhcip, "failed to read USB Command register for "
1711 1712 "start: encountered fatal FM register error");
1712 1713 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1713 1714 return (EIO);
1714 1715 }
1715 1716
1716 1717 reg |= XHCI_CMD_RS;
1717 1718 xhci_put32(xhcip, XHCI_R_OPER, XHCI_USBCMD, reg);
1718 1719 if (xhci_check_regs_acc(xhcip) != DDI_FM_OK) {
1719 1720 xhci_error(xhcip, "failed to write USB Command register for "
1720 1721 "start: encountered fatal FM register error");
1721 1722 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1722 1723 return (EIO);
1723 1724 }
1724 1725
1725 1726 return (xhci_reg_poll(xhcip, XHCI_R_OPER, XHCI_USBSTS,
1726 1727 XHCI_STS_HCH, 0, 500, 10));
1727 1728 }
1728 1729
1729 1730 /* ARGSUSED */
1730 1731 static void
1731 1732 xhci_reset_task(void *arg)
1732 1733 {
1733 1734 /*
1734 1735 * Longer term, we'd like to properly perform a controller reset.
1735 1736 * However, that requires a bit more assistance from USBA to work
1736 1737 * properly and tear down devices. In the meantime, we panic.
1737 1738 */
1738 1739 panic("XHCI runtime reset required");
1739 1740 }
1740 1741
1741 1742 /*
1742 1743 * This function is called when we've detected a fatal FM condition that has
1743 1744 * resulted in a loss of service and we need to force a reset of the controller
1744 1745 * as a whole. Only one such reset may be ongoing at a time.
1745 1746 */
1746 1747 void
1747 1748 xhci_fm_runtime_reset(xhci_t *xhcip)
1748 1749 {
1749 1750 boolean_t locked = B_FALSE;
1750 1751
1751 1752 if (mutex_owned(&xhcip->xhci_lock)) {
1752 1753 locked = B_TRUE;
1753 1754 } else {
1754 1755 mutex_enter(&xhcip->xhci_lock);
1755 1756 }
1756 1757
1757 1758 /*
1758 1759 * If we're already in the error state than a reset is already ongoing
1759 1760 * and there is nothing for us to do here.
1760 1761 */
1761 1762 if (xhcip->xhci_state & XHCI_S_ERROR) {
1762 1763 goto out;
1763 1764 }
1764 1765
1765 1766 xhcip->xhci_state |= XHCI_S_ERROR;
1766 1767 ddi_fm_service_impact(xhcip->xhci_dip, DDI_SERVICE_LOST);
1767 1768 taskq_dispatch_ent(xhci_taskq, xhci_reset_task, xhcip, 0,
1768 1769 &xhcip->xhci_tqe);
1769 1770 out:
1770 1771 if (!locked) {
1771 1772 mutex_exit(&xhcip->xhci_lock);
1772 1773 }
1773 1774 }
1774 1775
1775 1776 static int
1776 1777 xhci_ioctl_portsc(xhci_t *xhcip, intptr_t arg)
1777 1778 {
1778 1779 int i;
1779 1780 xhci_ioctl_portsc_t xhi;
1780 1781
1781 1782 bzero(&xhi, sizeof (xhci_ioctl_portsc_t));
1782 1783 xhi.xhi_nports = xhcip->xhci_caps.xcap_max_ports;
1783 1784 for (i = 1; i <= xhcip->xhci_caps.xcap_max_ports; i++) {
1784 1785 xhi.xhi_portsc[i] = xhci_get32(xhcip, XHCI_R_OPER,
1785 1786 XHCI_PORTSC(i));
1786 1787 }
1787 1788
1788 1789 if (ddi_copyout(&xhi, (void *)(uintptr_t)arg, sizeof (xhi), 0) != 0)
1789 1790 return (EFAULT);
1790 1791
1791 1792 return (0);
1792 1793 }
1793 1794
1794 1795 static int
1795 1796 xhci_ioctl_clear(xhci_t *xhcip, intptr_t arg)
1796 1797 {
1797 1798 uint32_t reg;
1798 1799 xhci_ioctl_clear_t xic;
1799 1800
1800 1801 if (ddi_copyin((const void *)(uintptr_t)arg, &xic, sizeof (xic),
1801 1802 0) != 0)
1802 1803 return (EFAULT);
1803 1804
1804 1805 if (xic.xic_port == 0 || xic.xic_port >
1805 1806 xhcip->xhci_caps.xcap_max_ports)
1806 1807 return (EINVAL);
1807 1808
1808 1809 reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port));
1809 1810 reg &= ~XHCI_PS_CLEAR;
1810 1811 reg |= XHCI_PS_CSC | XHCI_PS_PEC | XHCI_PS_WRC | XHCI_PS_OCC |
1811 1812 XHCI_PS_PRC | XHCI_PS_PLC | XHCI_PS_CEC;
1812 1813 xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xic.xic_port), reg);
1813 1814
1814 1815 return (0);
1815 1816 }
1816 1817
1817 1818 static int
1818 1819 xhci_ioctl_setpls(xhci_t *xhcip, intptr_t arg)
1819 1820 {
1820 1821 uint32_t reg;
1821 1822 xhci_ioctl_setpls_t xis;
1822 1823
1823 1824 if (ddi_copyin((const void *)(uintptr_t)arg, &xis, sizeof (xis),
1824 1825 0) != 0)
1825 1826 return (EFAULT);
1826 1827
1827 1828 if (xis.xis_port == 0 || xis.xis_port >
1828 1829 xhcip->xhci_caps.xcap_max_ports)
1829 1830 return (EINVAL);
1830 1831
1831 1832 if (xis.xis_pls & ~0xf)
1832 1833 return (EINVAL);
1833 1834
1834 1835 reg = xhci_get32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port));
1835 1836 reg &= ~XHCI_PS_CLEAR;
1836 1837 reg |= XHCI_PS_PLS_SET(xis.xis_pls);
1837 1838 reg |= XHCI_PS_LWS;
1838 1839 xhci_put32(xhcip, XHCI_R_OPER, XHCI_PORTSC(xis.xis_port), reg);
1839 1840
1840 1841 return (0);
1841 1842 }
1842 1843
1843 1844 static int
1844 1845 xhci_open(dev_t *devp, int flags, int otyp, cred_t *credp)
1845 1846 {
1846 1847 dev_info_t *dip = xhci_get_dip(*devp);
1847 1848
1848 1849 return (usba_hubdi_open(dip, devp, flags, otyp, credp));
1849 1850 }
1850 1851
1851 1852 static int
1852 1853 xhci_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp,
1853 1854 int *rvalp)
1854 1855 {
1855 1856 dev_info_t *dip = xhci_get_dip(dev);
1856 1857
1857 1858 if (cmd == XHCI_IOCTL_PORTSC ||
1858 1859 cmd == XHCI_IOCTL_CLEAR ||
1859 1860 cmd == XHCI_IOCTL_SETPLS) {
1860 1861 xhci_t *xhcip = ddi_get_soft_state(xhci_soft_state,
1861 1862 getminor(dev) & ~HUBD_IS_ROOT_HUB);
1862 1863
1863 1864 if (secpolicy_xhci(credp) != 0 ||
1864 1865 crgetzoneid(credp) != GLOBAL_ZONEID)
1865 1866 return (EPERM);
1866 1867
1867 1868 if (mode & FKIOCTL)
1868 1869 return (ENOTSUP);
1869 1870
1870 1871 if (!(mode & FWRITE))
1871 1872 return (EBADF);
1872 1873
1873 1874 if (cmd == XHCI_IOCTL_PORTSC)
1874 1875 return (xhci_ioctl_portsc(xhcip, arg));
1875 1876 else if (cmd == XHCI_IOCTL_CLEAR)
1876 1877 return (xhci_ioctl_clear(xhcip, arg));
1877 1878 else
1878 1879 return (xhci_ioctl_setpls(xhcip, arg));
1879 1880 }
1880 1881
1881 1882 return (usba_hubdi_ioctl(dip, dev, cmd, arg, mode, credp, rvalp));
1882 1883 }
1883 1884
1884 1885 static int
1885 1886 xhci_close(dev_t dev, int flag, int otyp, cred_t *credp)
1886 1887 {
1887 1888 dev_info_t *dip = xhci_get_dip(dev);
1888 1889
1889 1890 return (usba_hubdi_close(dip, dev, flag, otyp, credp));
1890 1891 }
1891 1892
1892 1893 /*
1893 1894 * We try to clean up everything that we can. The only thing that we let stop us
1894 1895 * at this time is a failure to remove the root hub, which is realistically the
1895 1896 * equivalent of our EBUSY case.
1896 1897 */
1897 1898 static int
1898 1899 xhci_cleanup(xhci_t *xhcip)
1899 1900 {
1900 1901 int ret, inst;
1901 1902
1902 1903 if (xhcip->xhci_seq & XHCI_ATTACH_ROOT_HUB) {
1903 1904 if ((ret = xhci_root_hub_fini(xhcip)) != 0)
1904 1905 return (ret);
1905 1906 }
1906 1907
1907 1908 if (xhcip->xhci_seq & XHCI_ATTACH_USBA) {
1908 1909 xhci_hcd_fini(xhcip);
1909 1910 }
1910 1911
1911 1912 if (xhcip->xhci_seq & XHCI_ATTACH_STARTED) {
1912 1913 mutex_enter(&xhcip->xhci_lock);
1913 1914 while (xhcip->xhci_state & XHCI_S_ERROR)
1914 1915 cv_wait(&xhcip->xhci_statecv, &xhcip->xhci_lock);
1915 1916 mutex_exit(&xhcip->xhci_lock);
1916 1917
1917 1918 (void) xhci_controller_stop(xhcip);
1918 1919 }
1919 1920
1920 1921 /*
1921 1922 * Always release the context, command, and event data. They handle the
1922 1923 * fact that they me be in an arbitrary state or unallocated.
1923 1924 */
1924 1925 xhci_event_fini(xhcip);
1925 1926 xhci_command_ring_fini(xhcip);
1926 1927 xhci_context_fini(xhcip);
1927 1928
1928 1929 if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ENABLE) {
1929 1930 (void) xhci_ddi_intr_disable(xhcip);
1930 1931 }
1931 1932
1932 1933 if (xhcip->xhci_seq & XHCI_ATTACH_SYNCH) {
1933 1934 cv_destroy(&xhcip->xhci_statecv);
1934 1935 mutex_destroy(&xhcip->xhci_lock);
1935 1936 }
1936 1937
1937 1938 if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ADD) {
1938 1939 if ((ret = ddi_intr_remove_handler(xhcip->xhci_intr_hdl)) !=
1939 1940 DDI_SUCCESS) {
1940 1941 xhci_error(xhcip, "failed to remove interrupt "
1941 1942 "handler: %d", ret);
1942 1943 }
1943 1944 }
1944 1945
1945 1946 if (xhcip->xhci_seq & XHCI_ATTACH_INTR_ALLOC) {
1946 1947 if ((ret = ddi_intr_free(xhcip->xhci_intr_hdl)) !=
1947 1948 DDI_SUCCESS) {
1948 1949 xhci_error(xhcip, "failed to free interrupts: %d", ret);
1949 1950 }
1950 1951 }
1951 1952
1952 1953 if (xhcip->xhci_seq & XHCI_ATTACH_REGS_MAP) {
1953 1954 ddi_regs_map_free(&xhcip->xhci_regs_handle);
1954 1955 xhcip->xhci_regs_handle = NULL;
1955 1956 }
1956 1957
1957 1958 if (xhcip->xhci_seq & XHCI_ATTACH_PCI_CONFIG) {
1958 1959 pci_config_teardown(&xhcip->xhci_cfg_handle);
1959 1960 xhcip->xhci_cfg_handle = NULL;
1960 1961 }
1961 1962
1962 1963 if (xhcip->xhci_seq & XHCI_ATTACH_FM) {
1963 1964 xhci_fm_fini(xhcip);
↓ open down ↓ |
934 lines elided |
↑ open up ↑ |
1964 1965 xhcip->xhci_fm_caps = 0;
1965 1966 }
1966 1967
1967 1968 inst = ddi_get_instance(xhcip->xhci_dip);
1968 1969 xhcip->xhci_dip = NULL;
1969 1970 ddi_soft_state_free(xhci_soft_state, inst);
1970 1971
1971 1972 return (DDI_SUCCESS);
1972 1973 }
1973 1974
1975 +/* QUIESCE(9E) to support fast reboot */
1976 +int
1977 +xhci_quiesce(dev_info_t *dip)
1978 +{
1979 + xhci_t *xhcip;
1980 +
1981 + xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
1982 +
1983 + return (xhci_controller_stop(xhcip) == 0 &&
1984 + xhci_controller_reset(xhcip) == 0 ? DDI_SUCCESS : DDI_FAILURE);
1985 +}
1986 +
1974 1987 static int
1975 1988 xhci_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
1976 1989 {
1977 1990 int ret, inst, route;
1978 1991 xhci_t *xhcip;
1979 1992
1980 1993 if (cmd != DDI_ATTACH)
1981 1994 return (DDI_FAILURE);
1982 1995
1983 1996 inst = ddi_get_instance(dip);
1984 1997 if (ddi_soft_state_zalloc(xhci_soft_state, inst) != 0)
1985 1998 return (DDI_FAILURE);
1986 1999 xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
1987 2000 xhcip->xhci_dip = dip;
1988 2001
1989 2002 xhcip->xhci_regs_capoff = PCI_EINVAL32;
1990 2003 xhcip->xhci_regs_operoff = PCI_EINVAL32;
1991 2004 xhcip->xhci_regs_runoff = PCI_EINVAL32;
1992 2005 xhcip->xhci_regs_dooroff = PCI_EINVAL32;
1993 2006
1994 2007 xhci_fm_init(xhcip);
1995 2008 xhcip->xhci_seq |= XHCI_ATTACH_FM;
1996 2009
1997 2010 if (pci_config_setup(xhcip->xhci_dip, &xhcip->xhci_cfg_handle) !=
1998 2011 DDI_SUCCESS) {
1999 2012 goto err;
2000 2013 }
2001 2014 xhcip->xhci_seq |= XHCI_ATTACH_PCI_CONFIG;
2002 2015 xhcip->xhci_vendor_id = pci_config_get16(xhcip->xhci_cfg_handle,
2003 2016 PCI_CONF_VENID);
2004 2017 xhcip->xhci_device_id = pci_config_get16(xhcip->xhci_cfg_handle,
2005 2018 PCI_CONF_DEVID);
2006 2019
2007 2020 if (xhci_regs_map(xhcip) == B_FALSE) {
2008 2021 goto err;
2009 2022 }
2010 2023
2011 2024 xhcip->xhci_seq |= XHCI_ATTACH_REGS_MAP;
2012 2025
2013 2026 if (xhci_regs_init(xhcip) == B_FALSE)
2014 2027 goto err;
2015 2028
2016 2029 if (xhci_read_params(xhcip) == B_FALSE)
2017 2030 goto err;
2018 2031
2019 2032 if (xhci_identify(xhcip) == B_FALSE)
2020 2033 goto err;
2021 2034
2022 2035 if (xhci_alloc_intrs(xhcip) == B_FALSE)
2023 2036 goto err;
2024 2037 xhcip->xhci_seq |= XHCI_ATTACH_INTR_ALLOC;
2025 2038
2026 2039 if (xhci_add_intr_handler(xhcip) == B_FALSE)
2027 2040 goto err;
2028 2041 xhcip->xhci_seq |= XHCI_ATTACH_INTR_ADD;
2029 2042
2030 2043 mutex_init(&xhcip->xhci_lock, NULL, MUTEX_DRIVER,
2031 2044 (void *)(uintptr_t)xhcip->xhci_intr_pri);
2032 2045 cv_init(&xhcip->xhci_statecv, NULL, CV_DRIVER, NULL);
2033 2046 xhcip->xhci_seq |= XHCI_ATTACH_SYNCH;
2034 2047
2035 2048 if (xhci_port_count(xhcip) == B_FALSE)
2036 2049 goto err;
2037 2050
2038 2051 if (xhci_controller_takeover(xhcip) == B_FALSE)
2039 2052 goto err;
2040 2053
2041 2054 /*
2042 2055 * We don't enable interrupts until after we take over the controller
2043 2056 * from the BIOS. We've observed cases where this can cause spurious
2044 2057 * interrupts.
2045 2058 */
2046 2059 if (xhci_ddi_intr_enable(xhcip) == B_FALSE)
2047 2060 goto err;
2048 2061 xhcip->xhci_seq |= XHCI_ATTACH_INTR_ENABLE;
2049 2062
2050 2063 if ((ret = xhci_controller_stop(xhcip)) != 0) {
2051 2064 xhci_error(xhcip, "failed to stop controller: %s",
2052 2065 ret == EIO ? "encountered FM register error" :
2053 2066 "timed out while waiting for controller");
2054 2067 goto err;
2055 2068 }
2056 2069
2057 2070 if ((ret = xhci_controller_reset(xhcip)) != 0) {
2058 2071 xhci_error(xhcip, "failed to reset controller: %s",
2059 2072 ret == EIO ? "encountered FM register error" :
2060 2073 "timed out while waiting for controller");
2061 2074 goto err;
2062 2075 }
2063 2076
2064 2077 if ((ret = xhci_controller_configure(xhcip)) != 0) {
2065 2078 xhci_error(xhcip, "failed to configure controller: %d", ret);
2066 2079 goto err;
2067 2080 }
2068 2081
2069 2082 /*
2070 2083 * Some systems support having ports routed to both an ehci and xhci
2071 2084 * controller. If we support it and the user hasn't requested otherwise
2072 2085 * via a driver.conf tuning, we reroute it now.
2073 2086 */
2074 2087 route = ddi_prop_get_int(DDI_DEV_T_ANY, xhcip->xhci_dip,
2075 2088 DDI_PROP_DONTPASS, "xhci-reroute", XHCI_PROP_REROUTE_DEFAULT);
2076 2089 if (route != XHCI_PROP_REROUTE_DISABLE &&
2077 2090 (xhcip->xhci_quirks & XHCI_QUIRK_INTC_EHCI))
2078 2091 (void) xhci_reroute_intel(xhcip);
2079 2092
2080 2093 if ((ret = xhci_controller_start(xhcip)) != 0) {
2081 2094 xhci_log(xhcip, "failed to reset controller: %s",
2082 2095 ret == EIO ? "encountered FM register error" :
2083 2096 "timed out while waiting for controller");
2084 2097 goto err;
2085 2098 }
2086 2099 xhcip->xhci_seq |= XHCI_ATTACH_STARTED;
2087 2100
2088 2101 /*
2089 2102 * Finally, register ourselves with the USB framework itself.
2090 2103 */
2091 2104 if ((ret = xhci_hcd_init(xhcip)) != 0) {
2092 2105 xhci_error(xhcip, "failed to register hcd with usba");
2093 2106 goto err;
2094 2107 }
2095 2108 xhcip->xhci_seq |= XHCI_ATTACH_USBA;
2096 2109
2097 2110 if ((ret = xhci_root_hub_init(xhcip)) != 0) {
2098 2111 xhci_error(xhcip, "failed to load the root hub driver");
2099 2112 goto err;
2100 2113 }
2101 2114 xhcip->xhci_seq |= XHCI_ATTACH_ROOT_HUB;
2102 2115
2103 2116 return (DDI_SUCCESS);
2104 2117
2105 2118 err:
2106 2119 (void) xhci_cleanup(xhcip);
2107 2120 return (DDI_FAILURE);
2108 2121 }
2109 2122
2110 2123 static int
2111 2124 xhci_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2112 2125 {
2113 2126 xhci_t *xhcip;
2114 2127
2115 2128 if (cmd != DDI_DETACH)
2116 2129 return (DDI_FAILURE);
2117 2130
2118 2131 xhcip = ddi_get_soft_state(xhci_soft_state, ddi_get_instance(dip));
2119 2132 if (xhcip == NULL) {
2120 2133 dev_err(dip, CE_WARN, "detach called without soft state!");
2121 2134 return (DDI_FAILURE);
2122 2135 }
2123 2136
2124 2137 return (xhci_cleanup(xhcip));
2125 2138 }
2126 2139
2127 2140 /* ARGSUSED */
2128 2141 static int
2129 2142 xhci_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **outp)
2130 2143 {
2131 2144 dev_t dev;
2132 2145 int inst;
2133 2146
2134 2147 switch (cmd) {
2135 2148 case DDI_INFO_DEVT2DEVINFO:
2136 2149 dev = (dev_t)arg;
2137 2150 *outp = xhci_get_dip(dev);
2138 2151 if (*outp == NULL)
2139 2152 return (DDI_FAILURE);
2140 2153 break;
2141 2154 case DDI_INFO_DEVT2INSTANCE:
2142 2155 dev = (dev_t)arg;
2143 2156 inst = getminor(dev) & ~HUBD_IS_ROOT_HUB;
2144 2157 *outp = (void *)(uintptr_t)inst;
2145 2158 break;
2146 2159 default:
2147 2160 return (DDI_FAILURE);
2148 2161 }
2149 2162
2150 2163 return (DDI_SUCCESS);
2151 2164 }
2152 2165
2153 2166 static struct cb_ops xhci_cb_ops = {
2154 2167 xhci_open, /* cb_open */
2155 2168 xhci_close, /* cb_close */
2156 2169 nodev, /* cb_strategy */
2157 2170 nodev, /* cb_print */
2158 2171 nodev, /* cb_dump */
2159 2172 nodev, /* cb_read */
2160 2173 nodev, /* cb_write */
2161 2174 xhci_ioctl, /* cb_ioctl */
2162 2175 nodev, /* cb_devmap */
2163 2176 nodev, /* cb_mmap */
2164 2177 nodev, /* cb_segmap */
2165 2178 nochpoll, /* cb_chpoll */
2166 2179 ddi_prop_op, /* cb_prop_op */
2167 2180 NULL, /* cb_stream */
2168 2181 D_MP | D_HOTPLUG, /* cb_flag */
2169 2182 CB_REV, /* cb_rev */
2170 2183 nodev, /* cb_aread */
2171 2184 nodev /* cb_awrite */
2172 2185 };
2173 2186
2174 2187 static struct dev_ops xhci_dev_ops = {
2175 2188 DEVO_REV, /* devo_rev */
↓ open down ↓ |
192 lines elided |
↑ open up ↑ |
2176 2189 0, /* devo_refcnt */
2177 2190 xhci_getinfo, /* devo_getinfo */
2178 2191 nulldev, /* devo_identify */
2179 2192 nulldev, /* devo_probe */
2180 2193 xhci_attach, /* devo_attach */
2181 2194 xhci_detach, /* devo_detach */
2182 2195 nodev, /* devo_reset */
2183 2196 &xhci_cb_ops, /* devo_cb_ops */
2184 2197 &usba_hubdi_busops, /* devo_bus_ops */
2185 2198 usba_hubdi_root_hub_power, /* devo_power */
2186 - ddi_quiesce_not_supported /* devo_quiesce */
2199 + xhci_quiesce /* devo_quiesce */
2187 2200 };
2188 2201
2189 2202 static struct modldrv xhci_modldrv = {
2190 2203 &mod_driverops,
2191 2204 "USB xHCI Driver",
2192 2205 &xhci_dev_ops
2193 2206 };
2194 2207
2195 2208 static struct modlinkage xhci_modlinkage = {
2196 2209 MODREV_1,
2197 2210 &xhci_modldrv,
2198 2211 NULL
2199 2212 };
2200 2213
2201 2214 int
2202 2215 _init(void)
2203 2216 {
2204 2217 int ret;
2205 2218
2206 2219 if ((ret = ddi_soft_state_init(&xhci_soft_state, sizeof (xhci_t),
2207 2220 0)) != 0) {
2208 2221 return (ret);
2209 2222 }
2210 2223
2211 2224 xhci_taskq = taskq_create("xhci_taskq", 1, minclsyspri, 0, 0, 0);
2212 2225 if (xhci_taskq == NULL) {
2213 2226 ddi_soft_state_fini(&xhci_soft_state);
2214 2227 return (ENOMEM);
2215 2228 }
2216 2229
2217 2230 if ((ret = mod_install(&xhci_modlinkage)) != 0) {
2218 2231 taskq_destroy(xhci_taskq);
2219 2232 xhci_taskq = NULL;
2220 2233 }
2221 2234
2222 2235 return (ret);
2223 2236 }
2224 2237
2225 2238 int
2226 2239 _info(struct modinfo *modinfop)
2227 2240 {
2228 2241 return (mod_info(&xhci_modlinkage, modinfop));
2229 2242 }
2230 2243
2231 2244 int
2232 2245 _fini(void)
2233 2246 {
2234 2247 int ret;
2235 2248
2236 2249 if ((ret = mod_remove(&xhci_modlinkage)) != 0)
2237 2250 return (ret);
2238 2251
2239 2252 if (xhci_taskq != NULL) {
2240 2253 taskq_destroy(xhci_taskq);
2241 2254 xhci_taskq = NULL;
2242 2255 }
2243 2256
2244 2257 ddi_soft_state_fini(&xhci_soft_state);
2245 2258
2246 2259 return (0);
2247 2260 }
↓ open down ↓ |
51 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX