Print this page
Customize reset for HPE iLO
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
+++ new/usr/src/uts/common/io/usb/hcd/ehci/ehci_util.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright (c) 2018, Joyent, Inc.
25 25 */
26 26
27 27 /*
28 28 * EHCI Host Controller Driver (EHCI)
29 29 *
30 30 * The EHCI driver is a software driver which interfaces to the Universal
31 31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32 32 * the Host Controller is defined by the EHCI Host Controller Interface.
33 33 *
34 34 * This module contains the main EHCI driver code which handles all USB
35 35 * transfers, bandwidth allocations and other general functionalities.
36 36 */
37 37
38 38 #include <sys/usb/hcd/ehci/ehcid.h>
39 39 #include <sys/usb/hcd/ehci/ehci_isoch.h>
40 40 #include <sys/usb/hcd/ehci/ehci_xfer.h>
41 41
42 42 /*
43 43 * EHCI MSI tunable:
44 44 *
45 45 * By default MSI is enabled on all supported platforms except for the
46 46 * EHCI controller of ULI1575 South bridge.
47 47 */
48 48 boolean_t ehci_enable_msi = B_TRUE;
49 49
50 50 /* Pointer to the state structure */
51 51 extern void *ehci_statep;
52 52
53 53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
54 54
55 55 extern uint_t ehci_vt62x2_workaround;
56 56 extern int force_ehci_off;
57 57
58 58 /* Adjustable variables for the size of the pools */
59 59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
60 60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
61 61
62 62 /*
63 63 * Initialize the values which the order of 32ms intr qh are executed
64 64 * by the host controller in the lattice tree.
65 65 */
66 66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
67 67 {0x00, 0x10, 0x08, 0x18,
68 68 0x04, 0x14, 0x0c, 0x1c,
69 69 0x02, 0x12, 0x0a, 0x1a,
70 70 0x06, 0x16, 0x0e, 0x1e,
71 71 0x01, 0x11, 0x09, 0x19,
72 72 0x05, 0x15, 0x0d, 0x1d,
73 73 0x03, 0x13, 0x0b, 0x1b,
74 74 0x07, 0x17, 0x0f, 0x1f};
75 75
76 76 /*
77 77 * Initialize the values which are used to calculate start split mask
78 78 * for the low/full/high speed interrupt and isochronous endpoints.
79 79 */
80 80 static uint_t ehci_start_split_mask[15] = {
81 81 /*
82 82 * For high/full/low speed usb devices. For high speed
83 83 * device with polling interval greater than or equal
84 84 * to 8us (125us).
85 85 */
86 86 0x01, /* 00000001 */
87 87 0x02, /* 00000010 */
88 88 0x04, /* 00000100 */
89 89 0x08, /* 00001000 */
90 90 0x10, /* 00010000 */
91 91 0x20, /* 00100000 */
92 92 0x40, /* 01000000 */
93 93 0x80, /* 10000000 */
94 94
95 95 /* Only for high speed devices with polling interval 4us */
96 96 0x11, /* 00010001 */
97 97 0x22, /* 00100010 */
98 98 0x44, /* 01000100 */
99 99 0x88, /* 10001000 */
100 100
101 101 /* Only for high speed devices with polling interval 2us */
102 102 0x55, /* 01010101 */
103 103 0xaa, /* 10101010 */
104 104
105 105 /* Only for high speed devices with polling interval 1us */
106 106 0xff /* 11111111 */
107 107 };
108 108
109 109 /*
110 110 * Initialize the values which are used to calculate complete split mask
111 111 * for the low/full speed interrupt and isochronous endpoints.
112 112 */
113 113 static uint_t ehci_intr_complete_split_mask[7] = {
114 114 /* Only full/low speed devices */
115 115 0x1c, /* 00011100 */
116 116 0x38, /* 00111000 */
117 117 0x70, /* 01110000 */
118 118 0xe0, /* 11100000 */
119 119 0x00, /* Need FSTN feature */
120 120 0x00, /* Need FSTN feature */
121 121 0x00 /* Need FSTN feature */
122 122 };
123 123
124 124
125 125 /*
126 126 * EHCI Internal Function Prototypes
127 127 */
128 128
129 129 /* Host Controller Driver (HCD) initialization functions */
130 130 void ehci_set_dma_attributes(ehci_state_t *ehcip);
131 131 int ehci_allocate_pools(ehci_state_t *ehcip);
132 132 void ehci_decode_ddi_dma_addr_bind_handle_result(
133 133 ehci_state_t *ehcip,
134 134 int result);
135 135 int ehci_map_regs(ehci_state_t *ehcip);
136 136 int ehci_register_intrs_and_init_mutex(
137 137 ehci_state_t *ehcip);
138 138 static int ehci_add_intrs(ehci_state_t *ehcip,
139 139 int intr_type);
140 140 int ehci_init_ctlr(ehci_state_t *ehcip,
141 141 int init_type);
142 142 static int ehci_take_control(ehci_state_t *ehcip);
143 143 static int ehci_init_periodic_frame_lst_table(
144 144 ehci_state_t *ehcip);
145 145 static void ehci_build_interrupt_lattice(
146 146 ehci_state_t *ehcip);
147 147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
148 148
149 149 /* Host Controller Driver (HCD) deinitialization functions */
150 150 int ehci_cleanup(ehci_state_t *ehcip);
151 151 static void ehci_rem_intrs(ehci_state_t *ehcip);
152 152 int ehci_cpr_suspend(ehci_state_t *ehcip);
153 153 int ehci_cpr_resume(ehci_state_t *ehcip);
154 154
155 155 /* Bandwidth Allocation functions */
156 156 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
157 157 usba_pipe_handle_data_t *ph,
158 158 uint_t *pnode,
159 159 uchar_t *smask,
160 160 uchar_t *cmask);
161 161 static int ehci_allocate_high_speed_bandwidth(
162 162 ehci_state_t *ehcip,
163 163 usba_pipe_handle_data_t *ph,
164 164 uint_t *hnode,
165 165 uchar_t *smask,
166 166 uchar_t *cmask);
167 167 static int ehci_allocate_classic_tt_bandwidth(
168 168 ehci_state_t *ehcip,
169 169 usba_pipe_handle_data_t *ph,
170 170 uint_t pnode);
171 171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
172 172 usba_pipe_handle_data_t *ph,
173 173 uint_t pnode,
174 174 uchar_t smask,
175 175 uchar_t cmask);
176 176 static void ehci_deallocate_high_speed_bandwidth(
177 177 ehci_state_t *ehcip,
178 178 usba_pipe_handle_data_t *ph,
179 179 uint_t hnode,
180 180 uchar_t smask,
181 181 uchar_t cmask);
182 182 static void ehci_deallocate_classic_tt_bandwidth(
183 183 ehci_state_t *ehcip,
184 184 usba_pipe_handle_data_t *ph,
185 185 uint_t pnode);
186 186 static int ehci_compute_high_speed_bandwidth(
187 187 ehci_state_t *ehcip,
188 188 usb_ep_descr_t *endpoint,
189 189 usb_port_status_t port_status,
190 190 uint_t *sbandwidth,
191 191 uint_t *cbandwidth);
192 192 static int ehci_compute_classic_bandwidth(
193 193 usb_ep_descr_t *endpoint,
194 194 usb_port_status_t port_status,
195 195 uint_t *bandwidth);
196 196 int ehci_adjust_polling_interval(
197 197 ehci_state_t *ehcip,
198 198 usb_ep_descr_t *endpoint,
199 199 usb_port_status_t port_status);
200 200 static int ehci_adjust_high_speed_polling_interval(
201 201 ehci_state_t *ehcip,
202 202 usb_ep_descr_t *endpoint);
203 203 static uint_t ehci_lattice_height(uint_t interval);
204 204 static uint_t ehci_lattice_parent(uint_t node);
205 205 static uint_t ehci_find_periodic_node(
206 206 uint_t leaf,
207 207 int interval);
208 208 static uint_t ehci_leftmost_leaf(uint_t node,
209 209 uint_t height);
210 210 static uint_t ehci_pow_2(uint_t x);
211 211 static uint_t ehci_log_2(uint_t x);
212 212 static int ehci_find_bestfit_hs_mask(
213 213 ehci_state_t *ehcip,
214 214 uchar_t *smask,
215 215 uint_t *pnode,
216 216 usb_ep_descr_t *endpoint,
217 217 uint_t bandwidth,
218 218 int interval);
219 219 static int ehci_find_bestfit_ls_intr_mask(
220 220 ehci_state_t *ehcip,
221 221 uchar_t *smask,
222 222 uchar_t *cmask,
223 223 uint_t *pnode,
224 224 uint_t sbandwidth,
225 225 uint_t cbandwidth,
226 226 int interval);
227 227 static int ehci_find_bestfit_sitd_in_mask(
228 228 ehci_state_t *ehcip,
229 229 uchar_t *smask,
230 230 uchar_t *cmask,
231 231 uint_t *pnode,
232 232 uint_t sbandwidth,
233 233 uint_t cbandwidth,
234 234 int interval);
235 235 static int ehci_find_bestfit_sitd_out_mask(
236 236 ehci_state_t *ehcip,
237 237 uchar_t *smask,
238 238 uint_t *pnode,
239 239 uint_t sbandwidth,
240 240 int interval);
241 241 static uint_t ehci_calculate_bw_availability_mask(
242 242 ehci_state_t *ehcip,
243 243 uint_t bandwidth,
244 244 int leaf,
245 245 int leaf_count,
246 246 uchar_t *bw_mask);
247 247 static void ehci_update_bw_availability(
248 248 ehci_state_t *ehcip,
249 249 int bandwidth,
250 250 int leftmost_leaf,
251 251 int leaf_count,
252 252 uchar_t mask);
253 253
254 254 /* Miscellaneous functions */
255 255 ehci_state_t *ehci_obtain_state(
256 256 dev_info_t *dip);
257 257 int ehci_state_is_operational(
258 258 ehci_state_t *ehcip);
259 259 int ehci_do_soft_reset(
260 260 ehci_state_t *ehcip);
261 261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
262 262 ehci_pipe_private_t *pp,
263 263 ehci_trans_wrapper_t *tw);
264 264 usb_frame_number_t ehci_get_current_frame_number(
265 265 ehci_state_t *ehcip);
266 266 static void ehci_cpr_cleanup(
267 267 ehci_state_t *ehcip);
268 268 int ehci_wait_for_sof(
269 269 ehci_state_t *ehcip);
270 270 void ehci_toggle_scheduler(
271 271 ehci_state_t *ehcip);
272 272 void ehci_print_caps(ehci_state_t *ehcip);
273 273 void ehci_print_regs(ehci_state_t *ehcip);
274 274 void ehci_print_qh(ehci_state_t *ehcip,
275 275 ehci_qh_t *qh);
276 276 void ehci_print_qtd(ehci_state_t *ehcip,
277 277 ehci_qtd_t *qtd);
278 278 void ehci_create_stats(ehci_state_t *ehcip);
279 279 void ehci_destroy_stats(ehci_state_t *ehcip);
280 280 void ehci_do_intrs_stats(ehci_state_t *ehcip,
281 281 int val);
282 282 void ehci_do_byte_stats(ehci_state_t *ehcip,
283 283 size_t len,
284 284 uint8_t attr,
285 285 uint8_t addr);
286 286
287 287 /*
288 288 * check if this ehci controller can support PM
289 289 */
290 290 int
291 291 ehci_hcdi_pm_support(dev_info_t *dip)
292 292 {
293 293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
294 294 ddi_get_instance(dip));
295 295
296 296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
297 297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
298 298
299 299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
300 300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
301 301
302 302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
303 303
304 304 return (USB_SUCCESS);
305 305 }
306 306
307 307 return (USB_FAILURE);
308 308 }
309 309
310 310 void
311 311 ehci_dma_attr_workaround(ehci_state_t *ehcip)
312 312 {
313 313 /*
314 314 * Some Nvidia chips can not handle qh dma address above 2G.
315 315 * The bit 31 of the dma address might be omitted and it will
316 316 * cause system crash or other unpredicable result. So force
317 317 * the dma address allocated below 2G to make ehci work.
318 318 */
319 319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
320 320 switch (ehcip->ehci_device_id) {
321 321 case PCI_DEVICE_NVIDIA_CK804:
322 322 case PCI_DEVICE_NVIDIA_MCP04:
323 323 USB_DPRINTF_L2(PRINT_MASK_ATTA,
324 324 ehcip->ehci_log_hdl,
325 325 "ehci_dma_attr_workaround: NVIDIA dma "
326 326 "workaround enabled, force dma address "
327 327 "to be allocated below 2G");
328 328 ehcip->ehci_dma_attr.dma_attr_addr_hi =
329 329 0x7fffffffull;
330 330 break;
331 331 default:
332 332 break;
333 333
334 334 }
335 335 }
336 336 }
337 337
338 338 /*
339 339 * Host Controller Driver (HCD) initialization functions
340 340 */
341 341
342 342 /*
343 343 * ehci_set_dma_attributes:
344 344 *
345 345 * Set the limits in the DMA attributes structure. Most of the values used
346 346 * in the DMA limit structures are the default values as specified by the
347 347 * Writing PCI device drivers document.
348 348 */
349 349 void
350 350 ehci_set_dma_attributes(ehci_state_t *ehcip)
351 351 {
352 352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
353 353 "ehci_set_dma_attributes:");
354 354
355 355 /* Initialize the DMA attributes */
356 356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
357 357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
358 358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
359 359
360 360 /* 32 bit addressing */
361 361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
362 362
363 363 /* Byte alignment */
364 364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
365 365
366 366 /*
367 367 * Since PCI specification is byte alignment, the
368 368 * burst size field should be set to 1 for PCI devices.
369 369 */
370 370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
371 371
372 372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
373 373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
374 374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
375 375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
376 376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
377 377 ehcip->ehci_dma_attr.dma_attr_flags = 0;
378 378 ehci_dma_attr_workaround(ehcip);
379 379 }
380 380
381 381
382 382 /*
383 383 * ehci_allocate_pools:
384 384 *
385 385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
386 386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
387 387 * to a 16 byte boundary.
388 388 */
389 389 int
390 390 ehci_allocate_pools(ehci_state_t *ehcip)
391 391 {
392 392 ddi_device_acc_attr_t dev_attr;
393 393 size_t real_length;
394 394 int result;
395 395 uint_t ccount;
396 396 int i;
397 397
398 398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
399 399 "ehci_allocate_pools:");
400 400
401 401 /* The host controller will be little endian */
402 402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
403 403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
404 404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
405 405
406 406 /* Byte alignment */
407 407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
408 408
409 409 /* Allocate the QTD pool DMA handle */
410 410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
411 411 DDI_DMA_SLEEP, 0,
412 412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
413 413
414 414 goto failure;
415 415 }
416 416
417 417 /* Allocate the memory for the QTD pool */
418 418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
419 419 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
420 420 &dev_attr,
421 421 DDI_DMA_CONSISTENT,
422 422 DDI_DMA_SLEEP,
423 423 0,
424 424 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
425 425 &real_length,
426 426 &ehcip->ehci_qtd_pool_mem_handle)) {
427 427
428 428 goto failure;
429 429 }
430 430
431 431 /* Map the QTD pool into the I/O address space */
432 432 result = ddi_dma_addr_bind_handle(
433 433 ehcip->ehci_qtd_pool_dma_handle,
434 434 NULL,
435 435 (caddr_t)ehcip->ehci_qtd_pool_addr,
436 436 real_length,
437 437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
438 438 DDI_DMA_SLEEP,
439 439 NULL,
440 440 &ehcip->ehci_qtd_pool_cookie,
441 441 &ccount);
442 442
443 443 bzero((void *)ehcip->ehci_qtd_pool_addr,
444 444 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
445 445
446 446 /* Process the result */
447 447 if (result == DDI_DMA_MAPPED) {
448 448 /* The cookie count should be 1 */
449 449 if (ccount != 1) {
450 450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
451 451 "ehci_allocate_pools: More than 1 cookie");
452 452
453 453 goto failure;
454 454 }
455 455 } else {
456 456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
457 457 "ehci_allocate_pools: Result = %d", result);
458 458
459 459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
460 460
461 461 goto failure;
462 462 }
463 463
464 464 /*
465 465 * DMA addresses for QTD pools are bound
466 466 */
467 467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
468 468
469 469 /* Initialize the QTD pool */
470 470 for (i = 0; i < ehci_qtd_pool_size; i ++) {
471 471 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
472 472 qtd_state, EHCI_QTD_FREE);
473 473 }
474 474
475 475 /* Allocate the QTD pool DMA handle */
476 476 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
477 477 &ehcip->ehci_dma_attr,
478 478 DDI_DMA_SLEEP,
479 479 0,
480 480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
481 481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
482 482 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
483 483
484 484 goto failure;
485 485 }
486 486
487 487 /* Allocate the memory for the QH pool */
488 488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
489 489 ehci_qh_pool_size * sizeof (ehci_qh_t),
490 490 &dev_attr,
491 491 DDI_DMA_CONSISTENT,
492 492 DDI_DMA_SLEEP,
493 493 0,
494 494 (caddr_t *)&ehcip->ehci_qh_pool_addr,
495 495 &real_length,
496 496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
497 497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
498 498 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
499 499
500 500 goto failure;
501 501 }
502 502
503 503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
504 504 NULL,
505 505 (caddr_t)ehcip->ehci_qh_pool_addr,
506 506 real_length,
507 507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
508 508 DDI_DMA_SLEEP,
509 509 NULL,
510 510 &ehcip->ehci_qh_pool_cookie,
511 511 &ccount);
512 512
513 513 bzero((void *)ehcip->ehci_qh_pool_addr,
514 514 ehci_qh_pool_size * sizeof (ehci_qh_t));
515 515
516 516 /* Process the result */
517 517 if (result == DDI_DMA_MAPPED) {
518 518 /* The cookie count should be 1 */
519 519 if (ccount != 1) {
520 520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
521 521 "ehci_allocate_pools: More than 1 cookie");
522 522
523 523 goto failure;
524 524 }
525 525 } else {
526 526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
527 527
528 528 goto failure;
529 529 }
530 530
531 531 /*
532 532 * DMA addresses for QH pools are bound
533 533 */
534 534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
535 535
536 536 /* Initialize the QH pool */
537 537 for (i = 0; i < ehci_qh_pool_size; i ++) {
538 538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
539 539 }
540 540
541 541 /* Byte alignment */
542 542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
543 543
544 544 return (DDI_SUCCESS);
545 545
546 546 failure:
547 547 /* Byte alignment */
548 548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
549 549
550 550 return (DDI_FAILURE);
551 551 }
552 552
553 553
554 554 /*
555 555 * ehci_decode_ddi_dma_addr_bind_handle_result:
556 556 *
557 557 * Process the return values of ddi_dma_addr_bind_handle()
558 558 */
559 559 void
560 560 ehci_decode_ddi_dma_addr_bind_handle_result(
561 561 ehci_state_t *ehcip,
562 562 int result)
563 563 {
564 564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
565 565 "ehci_decode_ddi_dma_addr_bind_handle_result:");
566 566
567 567 switch (result) {
568 568 case DDI_DMA_PARTIAL_MAP:
569 569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
570 570 "Partial transfers not allowed");
571 571 break;
572 572 case DDI_DMA_INUSE:
573 573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
574 574 "Handle is in use");
575 575 break;
576 576 case DDI_DMA_NORESOURCES:
577 577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
578 578 "No resources");
579 579 break;
580 580 case DDI_DMA_NOMAPPING:
581 581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
582 582 "No mapping");
583 583 break;
584 584 case DDI_DMA_TOOBIG:
585 585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
586 586 "Object is too big");
587 587 break;
588 588 default:
589 589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
590 590 "Unknown dma error");
591 591 }
592 592 }
593 593
594 594
595 595 /*
596 596 * ehci_map_regs:
597 597 *
598 598 * The Host Controller (HC) contains a set of on-chip operational registers
599 599 * and which should be mapped into a non-cacheable portion of the system
600 600 * addressable space.
601 601 */
602 602 int
603 603 ehci_map_regs(ehci_state_t *ehcip)
604 604 {
605 605 ddi_device_acc_attr_t attr;
606 606 uint16_t cmd_reg;
607 607 uint_t length;
608 608
609 609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
610 610
611 611 /* Check to make sure we have memory access */
612 612 if (pci_config_setup(ehcip->ehci_dip,
613 613 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
614 614
615 615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
616 616 "ehci_map_regs: Config error");
617 617
618 618 return (DDI_FAILURE);
619 619 }
620 620
621 621 /* Make sure Memory Access Enable is set */
622 622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
623 623
624 624 if (!(cmd_reg & PCI_COMM_MAE)) {
625 625
626 626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
627 627 "ehci_map_regs: Memory base address access disabled");
628 628
629 629 return (DDI_FAILURE);
630 630 }
631 631
632 632 /* The host controller will be little endian */
633 633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
634 634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
635 635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
636 636
637 637 /* Map in EHCI Capability registers */
638 638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
639 639 (caddr_t *)&ehcip->ehci_capsp, 0,
640 640 sizeof (ehci_caps_t), &attr,
641 641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
642 642
643 643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
644 644 "ehci_map_regs: Map setup error");
645 645
646 646 return (DDI_FAILURE);
647 647 }
648 648
649 649 length = ddi_get8(ehcip->ehci_caps_handle,
650 650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
651 651
652 652 /* Free the original mapping */
653 653 ddi_regs_map_free(&ehcip->ehci_caps_handle);
654 654
655 655 /* Re-map in EHCI Capability and Operational registers */
656 656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
657 657 (caddr_t *)&ehcip->ehci_capsp, 0,
658 658 length + sizeof (ehci_regs_t), &attr,
659 659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
660 660
661 661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
662 662 "ehci_map_regs: Map setup error");
663 663
664 664 return (DDI_FAILURE);
665 665 }
666 666
667 667 /* Get the pointer to EHCI Operational Register */
668 668 ehcip->ehci_regsp = (ehci_regs_t *)
669 669 ((uintptr_t)ehcip->ehci_capsp + length);
670 670
671 671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
672 672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
673 673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
674 674
675 675 return (DDI_SUCCESS);
676 676 }
677 677
678 678 /*
679 679 * The following simulated polling is for debugging purposes only.
680 680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
681 681 */
682 682 static int
683 683 ehci_is_polled(dev_info_t *dip)
684 684 {
685 685 int ret;
686 686 char *propval;
687 687
688 688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
689 689 "usb-polling", &propval) != DDI_SUCCESS)
690 690
691 691 return (0);
692 692
693 693 ret = (strcmp(propval, "true") == 0);
694 694 ddi_prop_free(propval);
695 695
696 696 return (ret);
697 697 }
698 698
699 699 static void
700 700 ehci_poll_intr(void *arg)
701 701 {
702 702 /* poll every msec */
703 703 for (;;) {
704 704 (void) ehci_intr(arg, NULL);
705 705 delay(drv_usectohz(1000));
706 706 }
707 707 }
708 708
709 709 /*
710 710 * ehci_register_intrs_and_init_mutex:
711 711 *
712 712 * Register interrupts and initialize each mutex and condition variables
713 713 */
714 714 int
715 715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
716 716 {
717 717 int intr_types;
718 718
719 719 #if defined(__x86)
720 720 uint8_t iline;
721 721 #endif
722 722
723 723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
724 724 "ehci_register_intrs_and_init_mutex:");
725 725
726 726 /*
727 727 * There is a known MSI hardware bug with the EHCI controller
728 728 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
729 729 */
730 730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
731 731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
732 732 ehcip->ehci_msi_enabled = B_FALSE;
733 733 } else {
734 734 /* Set the MSI enable flag from the global EHCI MSI tunable */
735 735 ehcip->ehci_msi_enabled = ehci_enable_msi;
736 736 }
737 737
738 738 /* launch polling thread instead of enabling pci interrupt */
739 739 if (ehci_is_polled(ehcip->ehci_dip)) {
740 740 extern pri_t maxclsyspri;
741 741
742 742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
743 743 "ehci_register_intrs_and_init_mutex: "
744 744 "running in simulated polled mode");
745 745
746 746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
747 747 TS_RUN, maxclsyspri);
748 748
749 749 goto skip_intr;
750 750 }
751 751
752 752 #if defined(__x86)
753 753 /*
754 754 * Make sure that the interrupt pin is connected to the
755 755 * interrupt controller on x86. Interrupt line 255 means
756 756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
757 757 * If we would return failure when interrupt line equals 255, then
758 758 * high speed devices will be routed to companion host controllers.
759 759 * However, it is not necessary to return failure here, and
760 760 * o/uhci codes don't check the interrupt line either.
761 761 * But it's good to log a message here for debug purposes.
762 762 */
763 763 iline = pci_config_get8(ehcip->ehci_config_handle,
764 764 PCI_CONF_ILINE);
765 765
766 766 if (iline == 255) {
767 767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
768 768 "ehci_register_intrs_and_init_mutex: "
769 769 "interrupt line value out of range (%d)",
770 770 iline);
771 771 }
772 772 #endif /* __x86 */
773 773
774 774 /* Get supported interrupt types */
775 775 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
776 776 &intr_types) != DDI_SUCCESS) {
777 777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
778 778 "ehci_register_intrs_and_init_mutex: "
779 779 "ddi_intr_get_supported_types failed");
780 780
781 781 return (DDI_FAILURE);
782 782 }
783 783
784 784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
785 785 "ehci_register_intrs_and_init_mutex: "
786 786 "supported interrupt types 0x%x", intr_types);
787 787
788 788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
789 789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
790 790 != DDI_SUCCESS) {
791 791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
792 792 "ehci_register_intrs_and_init_mutex: MSI "
793 793 "registration failed, trying FIXED interrupt \n");
794 794 } else {
795 795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
796 796 "ehci_register_intrs_and_init_mutex: "
797 797 "Using MSI interrupt type\n");
798 798
799 799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
800 800 ehcip->ehci_flags |= EHCI_INTR;
801 801 }
802 802 }
803 803
804 804 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
805 805 (intr_types & DDI_INTR_TYPE_FIXED)) {
806 806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
807 807 != DDI_SUCCESS) {
808 808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
809 809 "ehci_register_intrs_and_init_mutex: "
810 810 "FIXED interrupt registration failed\n");
811 811
812 812 return (DDI_FAILURE);
813 813 }
814 814
815 815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
816 816 "ehci_register_intrs_and_init_mutex: "
817 817 "Using FIXED interrupt type\n");
818 818
819 819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
820 820 ehcip->ehci_flags |= EHCI_INTR;
821 821 }
822 822
823 823 skip_intr:
824 824 /* Create prototype for advance on async schedule */
825 825 cv_init(&ehcip->ehci_async_schedule_advance_cv,
826 826 NULL, CV_DRIVER, NULL);
827 827
828 828 return (DDI_SUCCESS);
829 829 }
830 830
831 831
832 832 /*
833 833 * ehci_add_intrs:
834 834 *
835 835 * Register FIXED or MSI interrupts.
836 836 */
837 837 static int
838 838 ehci_add_intrs(ehci_state_t *ehcip, int intr_type)
839 839 {
840 840 int actual, avail, intr_size, count = 0;
841 841 int i, flag, ret;
842 842
843 843 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 844 "ehci_add_intrs: interrupt type 0x%x", intr_type);
845 845
846 846 /* Get number of interrupts */
847 847 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 848 if ((ret != DDI_SUCCESS) || (count == 0)) {
849 849 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 850 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 851 "ret: %d, count: %d", ret, count);
852 852
853 853 return (DDI_FAILURE);
854 854 }
855 855
856 856 /* Get number of available interrupts */
857 857 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 858 if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 859 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 860 "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 861 "ret: %d, count: %d", ret, count);
862 862
863 863 return (DDI_FAILURE);
864 864 }
865 865
866 866 if (avail < count) {
867 867 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 868 "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 869 "returned %d, navail returned %d\n", count, avail);
870 870 }
871 871
872 872 /* Allocate an array of interrupt handles */
873 873 intr_size = count * sizeof (ddi_intr_handle_t);
874 874 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
875 875
876 876 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 877 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
878 878
879 879 /* call ddi_intr_alloc() */
880 880 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 881 intr_type, 0, count, &actual, flag);
882 882
883 883 if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 884 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 885 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
886 886
887 887 kmem_free(ehcip->ehci_htable, intr_size);
888 888
889 889 return (DDI_FAILURE);
890 890 }
891 891
892 892 if (actual < count) {
893 893 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 894 "ehci_add_intrs: Requested: %d, Received: %d\n",
895 895 count, actual);
896 896
897 897 for (i = 0; i < actual; i++)
898 898 (void) ddi_intr_free(ehcip->ehci_htable[i]);
899 899
900 900 kmem_free(ehcip->ehci_htable, intr_size);
901 901
902 902 return (DDI_FAILURE);
903 903 }
904 904
905 905 ehcip->ehci_intr_cnt = actual;
906 906
907 907 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 908 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 909 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 910 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
911 911
912 912 for (i = 0; i < actual; i++)
913 913 (void) ddi_intr_free(ehcip->ehci_htable[i]);
914 914
915 915 kmem_free(ehcip->ehci_htable, intr_size);
916 916
917 917 return (DDI_FAILURE);
918 918 }
919 919
920 920 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 921 "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 922 ehcip->ehci_intr_pri);
923 923
924 924 /* Test for high level mutex */
925 925 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 926 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 927 "ehci_add_intrs: Hi level interrupt not supported");
928 928
929 929 for (i = 0; i < actual; i++)
930 930 (void) ddi_intr_free(ehcip->ehci_htable[i]);
931 931
932 932 kmem_free(ehcip->ehci_htable, intr_size);
933 933
934 934 return (DDI_FAILURE);
935 935 }
936 936
937 937 /* Initialize the mutex */
938 938 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 939 DDI_INTR_PRI(ehcip->ehci_intr_pri));
940 940
941 941 /* Call ddi_intr_add_handler() */
942 942 for (i = 0; i < actual; i++) {
943 943 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 944 ehci_intr, (caddr_t)ehcip,
945 945 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 946 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 947 "ehci_add_intrs:ddi_intr_add_handler() "
948 948 "failed %d", ret);
949 949
950 950 for (i = 0; i < actual; i++)
951 951 (void) ddi_intr_free(ehcip->ehci_htable[i]);
952 952
953 953 mutex_destroy(&ehcip->ehci_int_mutex);
954 954 kmem_free(ehcip->ehci_htable, intr_size);
955 955
956 956 return (DDI_FAILURE);
957 957 }
958 958 }
959 959
960 960 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 961 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 962 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 963 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
964 964
965 965 for (i = 0; i < actual; i++) {
966 966 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 967 (void) ddi_intr_free(ehcip->ehci_htable[i]);
968 968 }
969 969
970 970 mutex_destroy(&ehcip->ehci_int_mutex);
971 971 kmem_free(ehcip->ehci_htable, intr_size);
972 972
973 973 return (DDI_FAILURE);
974 974 }
975 975
976 976 /* Enable all interrupts */
977 977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 978 /* Call ddi_intr_block_enable() for MSI interrupts */
979 979 (void) ddi_intr_block_enable(ehcip->ehci_htable,
↓ open down ↓ |
979 lines elided |
↑ open up ↑ |
980 980 ehcip->ehci_intr_cnt);
981 981 } else {
982 982 /* Call ddi_intr_enable for MSI or FIXED interrupts */
983 983 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 984 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
985 985 }
986 986
987 987 return (DDI_SUCCESS);
988 988 }
989 989
990 +/*
991 + * ehci_wait_reset
992 + *
993 + * wait specified time for chip to reset
994 + * with workaround of 250us for HPE iLO chip
995 + */
996 +static void
997 +ehci_wait_reset(ehci_state_t *ehcip, clock_t microsecs)
998 +{
999 + /* Wait specified times for reset to complete */
1000 + drv_usecwait(microsecs);
990 1001
1002 + if (ehcip->ehci_vendor_id == PCI_VENDOR_HP) {
1003 + for (int i = 10; i < 250; i += 10) {
1004 + /* Wait 10ms for reset to complete */
1005 + drv_usecwait(EHCI_RESET_TIMEWAIT);
1006 + }
1007 + }
1008 +}
1009 +
991 1010 /*
992 1011 * ehci_init_hardware
993 1012 *
994 1013 * take control from BIOS, reset EHCI host controller, and check version, etc.
995 1014 */
996 1015 int
997 1016 ehci_init_hardware(ehci_state_t *ehcip)
998 1017 {
999 1018 int revision;
1000 1019 uint16_t cmd_reg;
1001 1020 int abort_on_BIOS_take_over_failure;
1002 1021
1003 1022 /* Take control from the BIOS */
1004 1023 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1005 1024
1006 1025 /* read .conf file properties */
1007 1026 abort_on_BIOS_take_over_failure =
1008 1027 ddi_prop_get_int(DDI_DEV_T_ANY,
1009 1028 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1010 1029 "abort-on-BIOS-take-over-failure", 0);
1011 1030
1012 1031 if (abort_on_BIOS_take_over_failure) {
1013 1032
1014 1033 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1015 1034 "Unable to take control from BIOS.");
1016 1035
1017 1036 return (DDI_FAILURE);
1018 1037 }
1019 1038
1020 1039 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1021 1040 "Unable to take control from BIOS. Failure is ignored.");
1022 1041 }
1023 1042
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
1024 1043 /* set Memory Master Enable */
1025 1044 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1026 1045 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1027 1046 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1028 1047
1029 1048 /* Reset the EHCI host controller */
1030 1049 Set_OpReg(ehci_command,
1031 1050 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1032 1051
1033 1052 /* Wait 10ms for reset to complete */
1034 - drv_usecwait(EHCI_RESET_TIMEWAIT);
1053 + ehci_wait_reset(ehcip, EHCI_RESET_TIMEWAIT);
1035 1054
1036 1055 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1037 1056
1038 1057 /* Verify the version number */
1039 1058 revision = Get_16Cap(ehci_version);
1040 1059
1041 1060 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1042 1061 "ehci_init_hardware: Revision 0x%x", revision);
1043 1062
1044 1063 /*
1045 1064 * EHCI driver supports EHCI host controllers compliant to
1046 1065 * 0.95 and higher revisions of EHCI specifications.
1047 1066 */
1048 1067 if (revision < EHCI_REVISION_0_95) {
1049 1068
1050 1069 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1051 1070 "Revision 0x%x is not supported", revision);
1052 1071
1053 1072 return (DDI_FAILURE);
1054 1073 }
1055 1074
1056 1075 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1057 1076
1058 1077 /* Initialize the Frame list base address area */
1059 1078 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1060 1079
1061 1080 return (DDI_FAILURE);
1062 1081 }
1063 1082
1064 1083 /*
1065 1084 * For performance reasons, do not insert anything into the
1066 1085 * asynchronous list or activate the asynch list schedule until
1067 1086 * there is a valid QH.
1068 1087 */
1069 1088 ehcip->ehci_head_of_async_sched_list = NULL;
1070 1089
1071 1090 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1072 1091 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1073 1092 /*
1074 1093 * The driver is unable to reliably stop the asynch
1075 1094 * list schedule on VIA VT6202 controllers, so we
1076 1095 * always keep a dummy QH on the list.
1077 1096 */
1078 1097 ehci_qh_t *dummy_async_qh =
1079 1098 ehci_alloc_qh(ehcip, NULL,
1080 1099 EHCI_INTERRUPT_MODE_FLAG);
1081 1100
1082 1101 Set_QH(dummy_async_qh->qh_link_ptr,
1083 1102 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1084 1103 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1085 1104
1086 1105 /* Set this QH to be the "head" of the circular list */
1087 1106 Set_QH(dummy_async_qh->qh_ctrl,
1088 1107 Get_QH(dummy_async_qh->qh_ctrl) |
1089 1108 EHCI_QH_CTRL_RECLAIM_HEAD);
1090 1109
1091 1110 Set_QH(dummy_async_qh->qh_next_qtd,
1092 1111 EHCI_QH_NEXT_QTD_PTR_VALID);
1093 1112 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1094 1113 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1095 1114
1096 1115 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1097 1116 ehcip->ehci_open_async_count++;
1098 1117 ehcip->ehci_async_req_count++;
1099 1118 }
1100 1119 }
1101 1120
1102 1121 return (DDI_SUCCESS);
1103 1122 }
1104 1123
1105 1124
1106 1125 /*
1107 1126 * ehci_init_workaround
1108 1127 *
1109 1128 * some workarounds during initializing ehci
1110 1129 */
1111 1130 int
1112 1131 ehci_init_workaround(ehci_state_t *ehcip)
1113 1132 {
1114 1133 /*
1115 1134 * Acer Labs Inc. M5273 EHCI controller does not send
1116 1135 * interrupts unless the Root hub ports are routed to the EHCI
1117 1136 * host controller; so route the ports now, before we test for
1118 1137 * the presence of SOFs interrupts.
1119 1138 */
1120 1139 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1121 1140 /* Route all Root hub ports to EHCI host controller */
1122 1141 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1123 1142 }
1124 1143
1125 1144 /*
1126 1145 * VIA chips have some issues and may not work reliably.
1127 1146 * Revisions >= 0x80 are part of a southbridge and appear
1128 1147 * to be reliable with the workaround.
1129 1148 * For revisions < 0x80, if we were bound using class
1130 1149 * complain, else proceed. This will allow the user to
1131 1150 * bind ehci specifically to this chip and not have the
1132 1151 * warnings
1133 1152 */
1134 1153 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1135 1154
1136 1155 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1137 1156
1138 1157 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1139 1158 "ehci_init_workaround: Applying VIA workarounds "
1140 1159 "for the 6212 chip.");
1141 1160
1142 1161 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1143 1162 "pciclass,0c0320") == 0) {
1144 1163
1145 1164 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1146 1165 "Due to recently discovered incompatibilities");
1147 1166 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1148 1167 "with this USB controller, USB2.x transfer");
1149 1168 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1150 1169 "support has been disabled. This device will");
1151 1170 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1152 1171 "continue to function as a USB1.x controller.");
1153 1172 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1154 1173 "If you are interested in enabling USB2.x");
1155 1174 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1156 1175 "support please, refer to the ehci(7D) man page.");
1157 1176 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1158 1177 "Please also refer to www.sun.com/io for");
1159 1178 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1160 1179 "Solaris Ready products and to");
1161 1180 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1162 1181 "www.sun.com/bigadmin/hcl for additional");
1163 1182 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1164 1183 "compatible USB products.");
1165 1184
1166 1185 return (DDI_FAILURE);
1167 1186
1168 1187 } else if (ehci_vt62x2_workaround) {
1169 1188
1170 1189 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1171 1190 "Applying VIA workarounds");
1172 1191 }
1173 1192 }
1174 1193
1175 1194 return (DDI_SUCCESS);
1176 1195 }
1177 1196
1178 1197
1179 1198 /*
1180 1199 * ehci_init_check_status
1181 1200 *
1182 1201 * Check if EHCI host controller is running
1183 1202 */
1184 1203 int
1185 1204 ehci_init_check_status(ehci_state_t *ehcip)
1186 1205 {
1187 1206 clock_t sof_time_wait;
1188 1207
1189 1208 /*
1190 1209 * Get the number of clock ticks to wait.
1191 1210 * This is based on the maximum time it takes for a frame list rollover
1192 1211 * and maximum time wait for SOFs to begin.
1193 1212 */
1194 1213 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1195 1214 EHCI_SOF_TIMEWAIT);
1196 1215
1197 1216 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1198 1217 ehcip->ehci_flags |= EHCI_CV_INTR;
1199 1218
1200 1219 /* We need to add a delay to allow the chip time to start running */
1201 1220 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
1202 1221 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
1203 1222
1204 1223 /*
1205 1224 * Check EHCI host controller is running, otherwise return failure.
1206 1225 */
1207 1226 if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1208 1227 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1209 1228
1210 1229 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1211 1230 "No SOF interrupts have been received, this USB EHCI host"
1212 1231 "controller is unusable");
1213 1232
1214 1233 /*
1215 1234 * Route all Root hub ports to Classic host
1216 1235 * controller, in case this is an unusable ALI M5273
1217 1236 * EHCI controller.
1218 1237 */
1219 1238 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1220 1239 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1221 1240 }
1222 1241
1223 1242 return (DDI_FAILURE);
1224 1243 }
1225 1244
1226 1245 return (DDI_SUCCESS);
1227 1246 }
1228 1247
1229 1248
1230 1249 /*
1231 1250 * ehci_init_ctlr:
1232 1251 *
1233 1252 * Initialize the Host Controller (HC).
1234 1253 */
1235 1254 int
1236 1255 ehci_init_ctlr(ehci_state_t *ehcip, int init_type)
1237 1256 {
1238 1257 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1239 1258
1240 1259 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1241 1260
1242 1261 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1243 1262
1244 1263 return (DDI_FAILURE);
1245 1264 }
1246 1265 }
1247 1266
1248 1267 /*
1249 1268 * Check for Asynchronous schedule park capability feature. If this
1250 1269 * feature is supported, then, program ehci command register with
1251 1270 * appropriate values..
1252 1271 */
1253 1272 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1254 1273
1255 1274 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1256 1275 "ehci_init_ctlr: Async park mode is supported");
1257 1276
1258 1277 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1259 1278 (EHCI_CMD_ASYNC_PARK_ENABLE |
1260 1279 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1261 1280 }
1262 1281
1263 1282 /*
1264 1283 * Check for programmable periodic frame list feature. If this
1265 1284 * feature is supported, then, program ehci command register with
1266 1285 * 1024 frame list value.
1267 1286 */
1268 1287 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1269 1288
1270 1289 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1271 1290 "ehci_init_ctlr: Variable programmable periodic "
1272 1291 "frame list is supported");
1273 1292
1274 1293 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1275 1294 EHCI_CMD_FRAME_1024_SIZE));
1276 1295 }
1277 1296
1278 1297 /*
1279 1298 * Currently EHCI driver doesn't support 64 bit addressing.
1280 1299 *
1281 1300 * If we are using 64 bit addressing capability, then, program
1282 1301 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1283 1302 * of the interface data structures are allocated.
1284 1303 */
1285 1304 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1286 1305
1287 1306 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1288 1307 "ehci_init_ctlr: EHCI driver doesn't support "
1289 1308 "64 bit addressing");
1290 1309 }
1291 1310
1292 1311 /* 64 bit addressing is not support */
1293 1312 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1294 1313
1295 1314 /* Turn on/off the schedulers */
1296 1315 ehci_toggle_scheduler(ehcip);
1297 1316
1298 1317 /* Set host controller soft state to operational */
1299 1318 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1300 1319
1301 1320 /*
1302 1321 * Set the Periodic Frame List Base Address register with the
1303 1322 * starting physical address of the Periodic Frame List.
1304 1323 */
1305 1324 Set_OpReg(ehci_periodic_list_base,
1306 1325 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1307 1326 EHCI_PERIODIC_LIST_BASE));
1308 1327
1309 1328 /*
1310 1329 * Set ehci_interrupt to enable all interrupts except Root
1311 1330 * Hub Status change interrupt.
1312 1331 */
1313 1332 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1314 1333 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1315 1334 EHCI_INTR_USB);
1316 1335
1317 1336 /*
1318 1337 * Set the desired interrupt threshold and turn on EHCI host controller.
1319 1338 */
1320 1339 Set_OpReg(ehci_command,
1321 1340 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) |
1322 1341 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
1323 1342
1324 1343 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1325 1344
1326 1345 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1327 1346
1328 1347 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1329 1348
1330 1349 /* Set host controller soft state to error */
1331 1350 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1332 1351
1333 1352 return (DDI_FAILURE);
1334 1353 }
1335 1354
1336 1355 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1337 1356
1338 1357 /* Set host controller soft state to error */
1339 1358 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1340 1359
1341 1360 return (DDI_FAILURE);
1342 1361 }
1343 1362
1344 1363 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1345 1364 "ehci_init_ctlr: SOF's have started");
1346 1365 }
1347 1366
1348 1367 /* Route all Root hub ports to EHCI host controller */
1349 1368 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1350 1369
1351 1370 return (DDI_SUCCESS);
1352 1371 }
1353 1372
1354 1373 /*
1355 1374 * ehci_take_control:
1356 1375 *
1357 1376 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1358 1377 * x86 machines, because sparc doesn't have a BIOS.
1359 1378 * On x86 machine, the take control process includes
1360 1379 * o get the base address of the extended capability list
1361 1380 * o find out the capability for handoff synchronization in the list.
1362 1381 * o check if BIOS has owned the host controller.
1363 1382 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1364 1383 * o wait for a constant time and check if BIOS has relinquished control.
1365 1384 */
1366 1385 /* ARGSUSED */
1367 1386 static int
1368 1387 ehci_take_control(ehci_state_t *ehcip)
1369 1388 {
1370 1389 #if defined(__x86)
1371 1390 uint32_t extended_cap;
1372 1391 uint32_t extended_cap_offset;
1373 1392 uint32_t extended_cap_id;
1374 1393 uint_t retry;
1375 1394
1376 1395 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1377 1396 "ehci_take_control:");
1378 1397
1379 1398 /*
1380 1399 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1381 1400 * register.
1382 1401 */
1383 1402 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1384 1403 EHCI_HCC_EECP_SHIFT;
1385 1404
1386 1405 /*
1387 1406 * According EHCI Spec 2.2.4, if the extended capability offset is
1388 1407 * less than 40h then its not valid. This means we don't need to
1389 1408 * worry about BIOS handoff.
1390 1409 */
1391 1410 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1392 1411
1393 1412 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1394 1413 "ehci_take_control: Hardware doesn't support legacy.");
1395 1414
1396 1415 goto success;
1397 1416 }
1398 1417
1399 1418 /*
1400 1419 * According EHCI Spec 2.1.7, A zero offset indicates the
1401 1420 * end of the extended capability list.
1402 1421 */
1403 1422 while (extended_cap_offset) {
1404 1423
1405 1424 /* Get the extended capability value. */
1406 1425 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1407 1426 extended_cap_offset);
1408 1427
1409 1428 /*
1410 1429 * It's possible that we'll receive an invalid PCI read here due
1411 1430 * to something going wrong due to platform firmware. This has
1412 1431 * been observed in the wild depending on the version of ACPI in
1413 1432 * use. If this happens, we'll assume that the capability does
1414 1433 * not exist and that we do not need to take control from the
1415 1434 * BIOS.
1416 1435 */
1417 1436 if (extended_cap == PCI_EINVAL32) {
1418 1437 extended_cap_id = EHCI_EX_CAP_ID_RESERVED;
1419 1438 break;
1420 1439 }
1421 1440
1422 1441 /* Get the capability ID */
1423 1442 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1424 1443 EHCI_EX_CAP_ID_SHIFT;
1425 1444
1426 1445 /* Check if the card support legacy */
1427 1446 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1428 1447 break;
1429 1448 }
1430 1449
1431 1450 /* Get the offset of the next capability */
1432 1451 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1433 1452 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1434 1453
1435 1454 }
1436 1455
1437 1456 /*
1438 1457 * Unable to find legacy support in hardware's extended capability list.
1439 1458 * This means we don't need to worry about BIOS handoff.
1440 1459 */
1441 1460 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1442 1461
1443 1462 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1444 1463 "ehci_take_control: Hardware doesn't support legacy");
1445 1464
1446 1465 goto success;
1447 1466 }
1448 1467
1449 1468 /* Check if BIOS has owned it. */
1450 1469 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1451 1470
1452 1471 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1453 1472 "ehci_take_control: BIOS does not own EHCI");
1454 1473
1455 1474 goto success;
1456 1475 }
1457 1476
1458 1477 /*
1459 1478 * According EHCI Spec 5.1, The OS driver initiates an ownership
1460 1479 * request by setting the OS Owned semaphore to a one. The OS
1461 1480 * waits for the BIOS Owned bit to go to a zero before attempting
1462 1481 * to use the EHCI controller. The time that OS must wait for BIOS
1463 1482 * to respond to the request for ownership is beyond the scope of
1464 1483 * this specification.
1465 1484 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1466 1485 * for BIOS to release the ownership.
1467 1486 */
1468 1487 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1469 1488 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1470 1489 extended_cap);
1471 1490
1472 1491 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1473 1492
1474 1493 /* wait a special interval */
1475 1494 #ifndef __lock_lint
1476 1495 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1477 1496 #endif
1478 1497 /* Check to see if the BIOS has released the ownership */
1479 1498 extended_cap = pci_config_get32(
1480 1499 ehcip->ehci_config_handle, extended_cap_offset);
1481 1500
1482 1501 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1483 1502
1484 1503 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1485 1504 ehcip->ehci_log_hdl,
1486 1505 "ehci_take_control: BIOS has released "
1487 1506 "the ownership. retry = %d", retry);
1488 1507
1489 1508 goto success;
1490 1509 }
1491 1510
1492 1511 }
1493 1512
1494 1513 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1495 1514 "ehci_take_control: take control from BIOS failed.");
1496 1515
1497 1516 return (USB_FAILURE);
1498 1517
1499 1518 success:
1500 1519
1501 1520 #endif /* __x86 */
1502 1521 return (USB_SUCCESS);
1503 1522 }
1504 1523
1505 1524
1506 1525 /*
1507 1526 * ehci_init_periodic_frame_list_table :
1508 1527 *
1509 1528 * Allocate the system memory and initialize Host Controller
1510 1529 * Periodic Frame List table area. The starting of the Periodic
1511 1530 * Frame List Table area must be 4096 byte aligned.
1512 1531 */
1513 1532 static int
1514 1533 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1515 1534 {
1516 1535 ddi_device_acc_attr_t dev_attr;
1517 1536 size_t real_length;
1518 1537 uint_t ccount;
1519 1538 int result;
1520 1539
1521 1540 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1522 1541
1523 1542 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1524 1543 "ehci_init_periodic_frame_lst_table:");
1525 1544
1526 1545 /* The host controller will be little endian */
1527 1546 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1528 1547 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1529 1548 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1530 1549
1531 1550 /* Force the required 4K restrictive alignment */
1532 1551 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1533 1552
1534 1553 /* Create space for the Periodic Frame List */
1535 1554 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1536 1555 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1537 1556
1538 1557 goto failure;
1539 1558 }
1540 1559
1541 1560 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1542 1561 sizeof (ehci_periodic_frame_list_t),
1543 1562 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1544 1563 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1545 1564 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1546 1565
1547 1566 goto failure;
1548 1567 }
1549 1568
1550 1569 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1551 1570 "ehci_init_periodic_frame_lst_table: "
1552 1571 "Real length %lu", real_length);
1553 1572
1554 1573 /* Map the whole Periodic Frame List into the I/O address space */
1555 1574 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1556 1575 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1557 1576 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1558 1577 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1559 1578
1560 1579 if (result == DDI_DMA_MAPPED) {
1561 1580 /* The cookie count should be 1 */
1562 1581 if (ccount != 1) {
1563 1582 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1564 1583 "ehci_init_periodic_frame_lst_table: "
1565 1584 "More than 1 cookie");
1566 1585
1567 1586 goto failure;
1568 1587 }
1569 1588 } else {
1570 1589 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1571 1590
1572 1591 goto failure;
1573 1592 }
1574 1593
1575 1594 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1576 1595 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1577 1596 (void *)ehcip->ehci_periodic_frame_list_tablep,
1578 1597 ehcip->ehci_pflt_cookie.dmac_address);
1579 1598
1580 1599 /*
1581 1600 * DMA addresses for Periodic Frame List are bound.
1582 1601 */
1583 1602 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1584 1603
1585 1604 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1586 1605
1587 1606 /* Initialize the Periodic Frame List */
1588 1607 ehci_build_interrupt_lattice(ehcip);
1589 1608
1590 1609 /* Reset Byte Alignment to Default */
1591 1610 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1592 1611
1593 1612 return (DDI_SUCCESS);
1594 1613 failure:
1595 1614 /* Byte alignment */
1596 1615 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1597 1616
1598 1617 return (DDI_FAILURE);
1599 1618 }
1600 1619
1601 1620
1602 1621 /*
1603 1622 * ehci_build_interrupt_lattice:
1604 1623 *
1605 1624 * Construct the interrupt lattice tree using static Endpoint Descriptors
1606 1625 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1607 1626 * lists and the Host Controller (HC) processes one interrupt QH list in
1608 1627 * every frame. The Host Controller traverses the periodic schedule by
1609 1628 * constructing an array offset reference from the Periodic List Base Address
1610 1629 * register and bits 12 to 3 of Frame Index register. It fetches the element
1611 1630 * and begins traversing the graph of linked schedule data structures.
1612 1631 */
1613 1632 static void
1614 1633 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1615 1634 {
1616 1635 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1617 1636 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1618 1637 ehci_periodic_frame_list_t *periodic_frame_list =
1619 1638 ehcip->ehci_periodic_frame_list_tablep;
1620 1639 ushort_t *temp, num_of_nodes;
1621 1640 uintptr_t addr;
1622 1641 int i, j, k;
1623 1642
1624 1643 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1625 1644 "ehci_build_interrupt_lattice:");
1626 1645
1627 1646 /*
1628 1647 * Reserve the first 63 Endpoint Descriptor (QH) structures
1629 1648 * in the pool as static endpoints & these are required for
1630 1649 * constructing interrupt lattice tree.
1631 1650 */
1632 1651 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1633 1652 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1634 1653 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1635 1654 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1636 1655 Set_QH(list_array[i].qh_alt_next_qtd,
1637 1656 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1638 1657 }
1639 1658
1640 1659 /*
1641 1660 * Make sure that last Endpoint on the periodic frame list terminates
1642 1661 * periodic schedule.
1643 1662 */
1644 1663 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1645 1664
1646 1665 /* Build the interrupt lattice tree */
1647 1666 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1648 1667 /*
1649 1668 * The next pointer in the host controller endpoint
1650 1669 * descriptor must contain an iommu address. Calculate
1651 1670 * the offset into the cpu address and add this to the
1652 1671 * starting iommu address.
1653 1672 */
1654 1673 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1655 1674
1656 1675 Set_QH(list_array[2*i + 1].qh_link_ptr,
1657 1676 addr | EHCI_QH_LINK_REF_QH);
1658 1677 Set_QH(list_array[2*i + 2].qh_link_ptr,
1659 1678 addr | EHCI_QH_LINK_REF_QH);
1660 1679 }
1661 1680
1662 1681 /* Build the tree bottom */
1663 1682 temp = (unsigned short *)
1664 1683 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1665 1684
1666 1685 num_of_nodes = 1;
1667 1686
1668 1687 /*
1669 1688 * Initialize the values which are used for setting up head pointers
1670 1689 * for the 32ms scheduling lists which starts from the Periodic Frame
1671 1690 * List.
1672 1691 */
1673 1692 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1674 1693 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1675 1694 ehci_index[j++] = temp[k];
1676 1695 ehci_index[j] = temp[k] + ehci_pow_2(i);
1677 1696 }
1678 1697
1679 1698 num_of_nodes *= 2;
1680 1699 for (k = 0; k < num_of_nodes; k++)
1681 1700 temp[k] = ehci_index[k];
1682 1701 }
1683 1702
1684 1703 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1685 1704
1686 1705 /*
1687 1706 * Initialize the interrupt list in the Periodic Frame List Table
1688 1707 * so that it points to the bottom of the tree.
1689 1708 */
1690 1709 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1691 1710 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1692 1711 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1693 1712
1694 1713 ASSERT(addr);
1695 1714
1696 1715 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1697 1716 Set_PFLT(periodic_frame_list->
1698 1717 ehci_periodic_frame_list_table[ehci_index[j++]],
1699 1718 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1700 1719 }
1701 1720 }
1702 1721 }
1703 1722
1704 1723
1705 1724 /*
1706 1725 * ehci_alloc_hcdi_ops:
1707 1726 *
1708 1727 * The HCDI interfaces or entry points are the software interfaces used by
1709 1728 * the Universal Serial Bus Driver (USBA) to access the services of the
1710 1729 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1711 1730 * about all available HCDI interfaces or entry points.
1712 1731 */
1713 1732 usba_hcdi_ops_t *
1714 1733 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1715 1734 {
1716 1735 usba_hcdi_ops_t *usba_hcdi_ops;
1717 1736
1718 1737 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1719 1738 "ehci_alloc_hcdi_ops:");
1720 1739
1721 1740 usba_hcdi_ops = usba_alloc_hcdi_ops();
1722 1741
1723 1742 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1724 1743
1725 1744 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1726 1745 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1727 1746 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1728 1747
1729 1748 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1730 1749 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1731 1750 ehci_hcdi_pipe_reset_data_toggle;
1732 1751
1733 1752 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1734 1753 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1735 1754 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1736 1755 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1737 1756
1738 1757 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1739 1758 ehci_hcdi_bulk_transfer_size;
1740 1759
1741 1760 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1742 1761 ehci_hcdi_pipe_stop_intr_polling;
1743 1762 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1744 1763 ehci_hcdi_pipe_stop_isoc_polling;
1745 1764
1746 1765 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1747 1766 ehci_hcdi_get_current_frame_number;
1748 1767 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1749 1768 ehci_hcdi_get_max_isoc_pkts;
1750 1769
1751 1770 usba_hcdi_ops->usba_hcdi_console_input_init =
1752 1771 ehci_hcdi_polled_input_init;
1753 1772 usba_hcdi_ops->usba_hcdi_console_input_enter =
1754 1773 ehci_hcdi_polled_input_enter;
1755 1774 usba_hcdi_ops->usba_hcdi_console_read =
1756 1775 ehci_hcdi_polled_read;
1757 1776 usba_hcdi_ops->usba_hcdi_console_input_exit =
1758 1777 ehci_hcdi_polled_input_exit;
1759 1778 usba_hcdi_ops->usba_hcdi_console_input_fini =
1760 1779 ehci_hcdi_polled_input_fini;
1761 1780
1762 1781 usba_hcdi_ops->usba_hcdi_console_output_init =
1763 1782 ehci_hcdi_polled_output_init;
1764 1783 usba_hcdi_ops->usba_hcdi_console_output_enter =
1765 1784 ehci_hcdi_polled_output_enter;
1766 1785 usba_hcdi_ops->usba_hcdi_console_write =
1767 1786 ehci_hcdi_polled_write;
1768 1787 usba_hcdi_ops->usba_hcdi_console_output_exit =
1769 1788 ehci_hcdi_polled_output_exit;
1770 1789 usba_hcdi_ops->usba_hcdi_console_output_fini =
1771 1790 ehci_hcdi_polled_output_fini;
1772 1791 return (usba_hcdi_ops);
1773 1792 }
1774 1793
1775 1794
1776 1795 /*
1777 1796 * Host Controller Driver (HCD) deinitialization functions
1778 1797 */
1779 1798
1780 1799 /*
1781 1800 * ehci_cleanup:
1782 1801 *
1783 1802 * Cleanup on attach failure or detach
1784 1803 */
1785 1804 int
1786 1805 ehci_cleanup(ehci_state_t *ehcip)
1787 1806 {
1788 1807 ehci_trans_wrapper_t *tw;
1789 1808 ehci_pipe_private_t *pp;
1790 1809 ehci_qtd_t *qtd;
1791 1810 int i, ctrl, rval;
1792 1811 int flags = ehcip->ehci_flags;
1793 1812
1794 1813 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1795 1814
1796 1815 if (flags & EHCI_RHREG) {
1797 1816 /* Unload the root hub driver */
1798 1817 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1799 1818
1800 1819 return (DDI_FAILURE);
1801 1820 }
1802 1821 }
1803 1822
1804 1823 if (flags & EHCI_USBAREG) {
1805 1824 /* Unregister this HCD instance with USBA */
1806 1825 usba_hcdi_unregister(ehcip->ehci_dip);
1807 1826 }
1808 1827
1809 1828 if (flags & EHCI_INTR) {
1810 1829
1811 1830 mutex_enter(&ehcip->ehci_int_mutex);
1812 1831
1813 1832 /* Disable all EHCI QH list processing */
1814 1833 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1815 1834 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1816 1835 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1817 1836
1818 1837 /* Disable all EHCI interrupts */
1819 1838 Set_OpReg(ehci_interrupt, 0);
1820 1839
1821 1840 /* wait for the next SOF */
1822 1841 (void) ehci_wait_for_sof(ehcip);
1823 1842
1824 1843 /* Route all Root hub ports to Classic host controller */
1825 1844 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1826 1845
1827 1846 /* Stop the EHCI host controller */
1828 1847 Set_OpReg(ehci_command,
1829 1848 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1830 1849
1831 1850 mutex_exit(&ehcip->ehci_int_mutex);
1832 1851
1833 1852 /* Wait for sometime */
1834 1853 delay(drv_usectohz(EHCI_TIMEWAIT));
1835 1854
1836 1855 ehci_rem_intrs(ehcip);
1837 1856 }
1838 1857
1839 1858 /* Unmap the EHCI registers */
1840 1859 if (ehcip->ehci_caps_handle) {
1841 1860 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1842 1861 }
1843 1862
1844 1863 if (ehcip->ehci_config_handle) {
1845 1864 pci_config_teardown(&ehcip->ehci_config_handle);
1846 1865 }
1847 1866
1848 1867 /* Free all the buffers */
1849 1868 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1850 1869 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1851 1870 qtd = &ehcip->ehci_qtd_pool_addr[i];
1852 1871 ctrl = Get_QTD(ehcip->
1853 1872 ehci_qtd_pool_addr[i].qtd_state);
1854 1873
1855 1874 if ((ctrl != EHCI_QTD_FREE) &&
1856 1875 (ctrl != EHCI_QTD_DUMMY) &&
1857 1876 (qtd->qtd_trans_wrapper)) {
1858 1877
1859 1878 mutex_enter(&ehcip->ehci_int_mutex);
1860 1879
1861 1880 tw = (ehci_trans_wrapper_t *)
1862 1881 EHCI_LOOKUP_ID((uint32_t)
1863 1882 Get_QTD(qtd->qtd_trans_wrapper));
1864 1883
1865 1884 /* Obtain the pipe private structure */
1866 1885 pp = tw->tw_pipe_private;
1867 1886
1868 1887 /* Stop the the transfer timer */
1869 1888 ehci_stop_xfer_timer(ehcip, tw,
1870 1889 EHCI_REMOVE_XFER_ALWAYS);
1871 1890
1872 1891 ehci_deallocate_tw(ehcip, pp, tw);
1873 1892
1874 1893 mutex_exit(&ehcip->ehci_int_mutex);
1875 1894 }
1876 1895 }
1877 1896
1878 1897 /*
1879 1898 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1880 1899 * the handle for QTD pools.
1881 1900 */
1882 1901 if ((ehcip->ehci_dma_addr_bind_flag &
1883 1902 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1884 1903
1885 1904 rval = ddi_dma_unbind_handle(
1886 1905 ehcip->ehci_qtd_pool_dma_handle);
1887 1906
1888 1907 ASSERT(rval == DDI_SUCCESS);
1889 1908 }
1890 1909 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1891 1910 }
1892 1911
1893 1912 /* Free the QTD pool */
1894 1913 if (ehcip->ehci_qtd_pool_dma_handle) {
1895 1914 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1896 1915 }
1897 1916
1898 1917 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1899 1918 /*
1900 1919 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1901 1920 * the handle for QH pools.
1902 1921 */
1903 1922 if ((ehcip->ehci_dma_addr_bind_flag &
1904 1923 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1905 1924
1906 1925 rval = ddi_dma_unbind_handle(
1907 1926 ehcip->ehci_qh_pool_dma_handle);
1908 1927
1909 1928 ASSERT(rval == DDI_SUCCESS);
1910 1929 }
1911 1930
1912 1931 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1913 1932 }
1914 1933
1915 1934 /* Free the QH pool */
1916 1935 if (ehcip->ehci_qh_pool_dma_handle) {
1917 1936 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1918 1937 }
1919 1938
1920 1939 /* Free the Periodic frame list table (PFLT) area */
1921 1940 if (ehcip->ehci_periodic_frame_list_tablep &&
1922 1941 ehcip->ehci_pflt_mem_handle) {
1923 1942 /*
1924 1943 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1925 1944 * the handle for PFLT.
1926 1945 */
1927 1946 if ((ehcip->ehci_dma_addr_bind_flag &
1928 1947 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1929 1948
1930 1949 rval = ddi_dma_unbind_handle(
1931 1950 ehcip->ehci_pflt_dma_handle);
1932 1951
1933 1952 ASSERT(rval == DDI_SUCCESS);
1934 1953 }
1935 1954
1936 1955 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1937 1956 }
1938 1957
1939 1958 (void) ehci_isoc_cleanup(ehcip);
1940 1959
1941 1960 if (ehcip->ehci_pflt_dma_handle) {
1942 1961 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1943 1962 }
1944 1963
1945 1964 if (flags & EHCI_INTR) {
1946 1965 /* Destroy the mutex */
1947 1966 mutex_destroy(&ehcip->ehci_int_mutex);
1948 1967
1949 1968 /* Destroy the async schedule advance condition variable */
1950 1969 cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1951 1970 }
1952 1971
1953 1972 /* clean up kstat structs */
1954 1973 ehci_destroy_stats(ehcip);
1955 1974
1956 1975 /* Free ehci hcdi ops */
1957 1976 if (ehcip->ehci_hcdi_ops) {
1958 1977 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1959 1978 }
1960 1979
1961 1980 if (flags & EHCI_ZALLOC) {
1962 1981
1963 1982 usb_free_log_hdl(ehcip->ehci_log_hdl);
1964 1983
1965 1984 /* Remove all properties that might have been created */
1966 1985 ddi_prop_remove_all(ehcip->ehci_dip);
1967 1986
1968 1987 /* Free the soft state */
1969 1988 ddi_soft_state_free(ehci_statep,
1970 1989 ddi_get_instance(ehcip->ehci_dip));
1971 1990 }
1972 1991
1973 1992 return (DDI_SUCCESS);
1974 1993 }
1975 1994
1976 1995
1977 1996 /*
1978 1997 * ehci_rem_intrs:
1979 1998 *
1980 1999 * Unregister FIXED or MSI interrupts
1981 2000 */
1982 2001 static void
1983 2002 ehci_rem_intrs(ehci_state_t *ehcip)
1984 2003 {
1985 2004 int i;
1986 2005
1987 2006 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1988 2007 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
1989 2008
1990 2009 /* Disable all interrupts */
1991 2010 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
1992 2011 (void) ddi_intr_block_disable(ehcip->ehci_htable,
1993 2012 ehcip->ehci_intr_cnt);
1994 2013 } else {
1995 2014 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
1996 2015 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
1997 2016 }
1998 2017 }
1999 2018
2000 2019 /* Call ddi_intr_remove_handler() */
2001 2020 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
2002 2021 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
2003 2022 (void) ddi_intr_free(ehcip->ehci_htable[i]);
2004 2023 }
2005 2024
2006 2025 kmem_free(ehcip->ehci_htable,
2007 2026 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
2008 2027 }
2009 2028
2010 2029
2011 2030 /*
2012 2031 * ehci_cpr_suspend
2013 2032 */
2014 2033 int
2015 2034 ehci_cpr_suspend(ehci_state_t *ehcip)
2016 2035 {
2017 2036 int i;
2018 2037
2019 2038 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2020 2039 "ehci_cpr_suspend:");
2021 2040
2022 2041 /* Call into the root hub and suspend it */
2023 2042 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
2024 2043
2025 2044 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2026 2045 "ehci_cpr_suspend: root hub fails to suspend");
2027 2046
2028 2047 return (DDI_FAILURE);
2029 2048 }
2030 2049
2031 2050 /* Only root hub's intr pipe should be open at this time */
2032 2051 mutex_enter(&ehcip->ehci_int_mutex);
2033 2052
2034 2053 ASSERT(ehcip->ehci_open_pipe_count == 0);
2035 2054
2036 2055 /* Just wait till all resources are reclaimed */
2037 2056 i = 0;
2038 2057 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2039 2058 ehci_handle_endpoint_reclaimation(ehcip);
2040 2059 (void) ehci_wait_for_sof(ehcip);
2041 2060 }
2042 2061 ASSERT(ehcip->ehci_reclaim_list == NULL);
2043 2062
2044 2063 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2045 2064 "ehci_cpr_suspend: Disable HC QH list processing");
2046 2065
2047 2066 /* Disable all EHCI QH list processing */
2048 2067 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2049 2068 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2050 2069
2051 2070 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2052 2071 "ehci_cpr_suspend: Disable HC interrupts");
2053 2072
2054 2073 /* Disable all EHCI interrupts */
2055 2074 Set_OpReg(ehci_interrupt, 0);
2056 2075
2057 2076 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2058 2077 "ehci_cpr_suspend: Wait for the next SOF");
2059 2078
2060 2079 /* Wait for the next SOF */
2061 2080 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2062 2081
2063 2082 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2064 2083 "ehci_cpr_suspend: ehci host controller suspend failed");
2065 2084
2066 2085 mutex_exit(&ehcip->ehci_int_mutex);
2067 2086 return (DDI_FAILURE);
2068 2087 }
2069 2088
2070 2089 /*
2071 2090 * Stop the ehci host controller
2072 2091 * if usb keyboard is not connected.
2073 2092 */
2074 2093 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2075 2094 Set_OpReg(ehci_command,
2076 2095 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2077 2096
2078 2097 }
2079 2098
2080 2099 /* Set host controller soft state to suspend */
2081 2100 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2082 2101
2083 2102 mutex_exit(&ehcip->ehci_int_mutex);
2084 2103
2085 2104 return (DDI_SUCCESS);
2086 2105 }
2087 2106
2088 2107
2089 2108 /*
2090 2109 * ehci_cpr_resume
2091 2110 */
2092 2111 int
2093 2112 ehci_cpr_resume(ehci_state_t *ehcip)
2094 2113 {
2095 2114 mutex_enter(&ehcip->ehci_int_mutex);
2096 2115
2097 2116 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2098 2117 "ehci_cpr_resume: Restart the controller");
2099 2118
2100 2119 /* Cleanup ehci specific information across cpr */
2101 2120 ehci_cpr_cleanup(ehcip);
2102 2121
2103 2122 /* Restart the controller */
2104 2123 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2105 2124
2106 2125 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2107 2126 "ehci_cpr_resume: ehci host controller resume failed ");
2108 2127
2109 2128 mutex_exit(&ehcip->ehci_int_mutex);
2110 2129
2111 2130 return (DDI_FAILURE);
2112 2131 }
2113 2132
2114 2133 mutex_exit(&ehcip->ehci_int_mutex);
2115 2134
2116 2135 /* Now resume the root hub */
2117 2136 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2118 2137
2119 2138 return (DDI_FAILURE);
2120 2139 }
2121 2140
2122 2141 return (DDI_SUCCESS);
2123 2142 }
2124 2143
2125 2144
2126 2145 /*
2127 2146 * Bandwidth Allocation functions
2128 2147 */
2129 2148
2130 2149 /*
2131 2150 * ehci_allocate_bandwidth:
2132 2151 *
2133 2152 * Figure out whether or not this interval may be supported. Return the index
2134 2153 * into the lattice if it can be supported. Return allocation failure if it
2135 2154 * can not be supported.
2136 2155 */
2137 2156 int
2138 2157 ehci_allocate_bandwidth(
2139 2158 ehci_state_t *ehcip,
2140 2159 usba_pipe_handle_data_t *ph,
2141 2160 uint_t *pnode,
2142 2161 uchar_t *smask,
2143 2162 uchar_t *cmask)
2144 2163 {
2145 2164 int error = USB_SUCCESS;
2146 2165
2147 2166 /* This routine is protected by the ehci_int_mutex */
2148 2167 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2149 2168
2150 2169 /* Reset the pnode to the last checked pnode */
2151 2170 *pnode = 0;
2152 2171
2153 2172 /* Allocate high speed bandwidth */
2154 2173 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2155 2174 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2156 2175
2157 2176 return (error);
2158 2177 }
2159 2178
2160 2179 /*
2161 2180 * For low/full speed usb devices, allocate classic TT bandwidth
2162 2181 * in additional to high speed bandwidth.
2163 2182 */
2164 2183 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2165 2184
2166 2185 /* Allocate classic TT bandwidth */
2167 2186 if ((error = ehci_allocate_classic_tt_bandwidth(
2168 2187 ehcip, ph, *pnode)) != USB_SUCCESS) {
2169 2188
2170 2189 /* Deallocate high speed bandwidth */
2171 2190 ehci_deallocate_high_speed_bandwidth(
2172 2191 ehcip, ph, *pnode, *smask, *cmask);
2173 2192 }
2174 2193 }
2175 2194
2176 2195 return (error);
2177 2196 }
2178 2197
2179 2198
2180 2199 /*
2181 2200 * ehci_allocate_high_speed_bandwidth:
2182 2201 *
2183 2202 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2184 2203 * isochronous endpoints.
2185 2204 */
2186 2205 static int
2187 2206 ehci_allocate_high_speed_bandwidth(
2188 2207 ehci_state_t *ehcip,
2189 2208 usba_pipe_handle_data_t *ph,
2190 2209 uint_t *pnode,
2191 2210 uchar_t *smask,
2192 2211 uchar_t *cmask)
2193 2212 {
2194 2213 uint_t sbandwidth, cbandwidth;
2195 2214 int interval;
2196 2215 usb_ep_descr_t *endpoint = &ph->p_ep;
2197 2216 usba_device_t *child_ud;
2198 2217 usb_port_status_t port_status;
2199 2218 int error;
2200 2219
2201 2220 /* This routine is protected by the ehci_int_mutex */
2202 2221 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2203 2222
2204 2223 /* Get child's usba device structure */
2205 2224 child_ud = ph->p_usba_device;
2206 2225
2207 2226 mutex_enter(&child_ud->usb_mutex);
2208 2227
2209 2228 /* Get the current usb device's port status */
2210 2229 port_status = ph->p_usba_device->usb_port_status;
2211 2230
2212 2231 mutex_exit(&child_ud->usb_mutex);
2213 2232
2214 2233 /*
2215 2234 * Calculate the length in bytes of a transaction on this
2216 2235 * periodic endpoint. Return failure if maximum packet is
2217 2236 * zero.
2218 2237 */
2219 2238 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2220 2239 port_status, &sbandwidth, &cbandwidth);
2221 2240 if (error != USB_SUCCESS) {
2222 2241
2223 2242 return (error);
2224 2243 }
2225 2244
2226 2245 /*
2227 2246 * Adjust polling interval to be a power of 2.
2228 2247 * If this interval can't be supported, return
2229 2248 * allocation failure.
2230 2249 */
2231 2250 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2232 2251 if (interval == USB_FAILURE) {
2233 2252
2234 2253 return (USB_FAILURE);
2235 2254 }
2236 2255
2237 2256 if (port_status == USBA_HIGH_SPEED_DEV) {
2238 2257 /* Allocate bandwidth for high speed devices */
2239 2258 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2240 2259 USB_EP_ATTR_ISOCH) {
2241 2260 error = USB_SUCCESS;
2242 2261 } else {
2243 2262
2244 2263 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2245 2264 endpoint, sbandwidth, interval);
2246 2265 }
2247 2266
2248 2267 *cmask = 0x00;
2249 2268
2250 2269 } else {
2251 2270 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2252 2271 USB_EP_ATTR_INTR) {
2253 2272
2254 2273 /* Allocate bandwidth for low speed interrupt */
2255 2274 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2256 2275 smask, cmask, pnode, sbandwidth, cbandwidth,
2257 2276 interval);
2258 2277 } else {
2259 2278 if ((endpoint->bEndpointAddress &
2260 2279 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2261 2280
2262 2281 /* Allocate bandwidth for sitd in */
2263 2282 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2264 2283 smask, cmask, pnode, sbandwidth, cbandwidth,
2265 2284 interval);
2266 2285 } else {
2267 2286
2268 2287 /* Allocate bandwidth for sitd out */
2269 2288 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2270 2289 smask, pnode, sbandwidth, interval);
2271 2290 *cmask = 0x00;
2272 2291 }
2273 2292 }
2274 2293 }
2275 2294
2276 2295 if (error != USB_SUCCESS) {
2277 2296 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2278 2297 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2279 2298 "bandwidth value and cannot allocate bandwidth for a "
2280 2299 "given high-speed periodic endpoint");
2281 2300
2282 2301 return (USB_NO_BANDWIDTH);
2283 2302 }
2284 2303
2285 2304 return (error);
2286 2305 }
2287 2306
2288 2307
2289 2308 /*
2290 2309 * ehci_allocate_classic_tt_speed_bandwidth:
2291 2310 *
2292 2311 * Allocate classic TT bandwidth for the low/full speed interrupt and
2293 2312 * isochronous endpoints.
2294 2313 */
2295 2314 static int
2296 2315 ehci_allocate_classic_tt_bandwidth(
2297 2316 ehci_state_t *ehcip,
2298 2317 usba_pipe_handle_data_t *ph,
2299 2318 uint_t pnode)
2300 2319 {
2301 2320 uint_t bandwidth, min;
2302 2321 uint_t height, leftmost, list;
2303 2322 usb_ep_descr_t *endpoint = &ph->p_ep;
2304 2323 usba_device_t *child_ud, *parent_ud;
2305 2324 usb_port_status_t port_status;
2306 2325 int i, interval;
2307 2326
2308 2327 /* This routine is protected by the ehci_int_mutex */
2309 2328 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2310 2329
2311 2330 /* Get child's usba device structure */
2312 2331 child_ud = ph->p_usba_device;
2313 2332
2314 2333 mutex_enter(&child_ud->usb_mutex);
2315 2334
2316 2335 /* Get the current usb device's port status */
2317 2336 port_status = child_ud->usb_port_status;
2318 2337
2319 2338 /* Get the parent high speed hub's usba device structure */
2320 2339 parent_ud = child_ud->usb_hs_hub_usba_dev;
2321 2340
2322 2341 mutex_exit(&child_ud->usb_mutex);
2323 2342
2324 2343 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2325 2344 "ehci_allocate_classic_tt_bandwidth: "
2326 2345 "child_ud 0x%p parent_ud 0x%p",
2327 2346 (void *)child_ud, (void *)parent_ud);
2328 2347
2329 2348 /*
2330 2349 * Calculate the length in bytes of a transaction on this
2331 2350 * periodic endpoint. Return failure if maximum packet is
2332 2351 * zero.
2333 2352 */
2334 2353 if (ehci_compute_classic_bandwidth(endpoint,
2335 2354 port_status, &bandwidth) != USB_SUCCESS) {
2336 2355
2337 2356 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2338 2357 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2339 2358 "with zero endpoint maximum packet size is not supported");
2340 2359
2341 2360 return (USB_NOT_SUPPORTED);
2342 2361 }
2343 2362
2344 2363 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2345 2364 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2346 2365
2347 2366 mutex_enter(&parent_ud->usb_mutex);
2348 2367
2349 2368 /*
2350 2369 * If the length in bytes plus the allocated bandwidth exceeds
2351 2370 * the maximum, return bandwidth allocation failure.
2352 2371 */
2353 2372 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2354 2373 FS_PERIODIC_BANDWIDTH) {
2355 2374
2356 2375 mutex_exit(&parent_ud->usb_mutex);
2357 2376
2358 2377 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2359 2378 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2360 2379 "bandwidth value and cannot allocate bandwidth for a "
2361 2380 "given low/full speed periodic endpoint");
2362 2381
2363 2382 return (USB_NO_BANDWIDTH);
2364 2383 }
2365 2384
2366 2385 mutex_exit(&parent_ud->usb_mutex);
2367 2386
2368 2387 /* Adjust polling interval to be a power of 2 */
2369 2388 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2370 2389
2371 2390 /* Find the height in the tree */
2372 2391 height = ehci_lattice_height(interval);
2373 2392
2374 2393 /* Find the leftmost leaf in the subtree specified by the node. */
2375 2394 leftmost = ehci_leftmost_leaf(pnode, height);
2376 2395
2377 2396 mutex_enter(&parent_ud->usb_mutex);
2378 2397
2379 2398 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2380 2399 list = ehci_index[leftmost + i];
2381 2400
2382 2401 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2383 2402 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2384 2403
2385 2404 mutex_exit(&parent_ud->usb_mutex);
2386 2405
2387 2406 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2388 2407 "ehci_allocate_classic_tt_bandwidth: Reached "
2389 2408 "maximum bandwidth value and cannot allocate "
2390 2409 "bandwidth for low/full periodic endpoint");
2391 2410
2392 2411 return (USB_NO_BANDWIDTH);
2393 2412 }
2394 2413 }
2395 2414
2396 2415 /*
2397 2416 * All the leaves for this node must be updated with the bandwidth.
2398 2417 */
2399 2418 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2400 2419 list = ehci_index[leftmost + i];
2401 2420 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2402 2421 }
2403 2422
2404 2423 /* Find the leaf with the smallest allocated bandwidth */
2405 2424 min = parent_ud->usb_hs_hub_bandwidth[0];
2406 2425
2407 2426 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2408 2427 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2409 2428 min = parent_ud->usb_hs_hub_bandwidth[i];
2410 2429 }
2411 2430 }
2412 2431
2413 2432 /* Save the minimum for later use */
2414 2433 parent_ud->usb_hs_hub_min_bandwidth = min;
2415 2434
2416 2435 mutex_exit(&parent_ud->usb_mutex);
2417 2436
2418 2437 return (USB_SUCCESS);
2419 2438 }
2420 2439
2421 2440
2422 2441 /*
2423 2442 * ehci_deallocate_bandwidth:
2424 2443 *
2425 2444 * Deallocate bandwidth for the given node in the lattice and the length
2426 2445 * of transfer.
2427 2446 */
2428 2447 void
2429 2448 ehci_deallocate_bandwidth(
2430 2449 ehci_state_t *ehcip,
2431 2450 usba_pipe_handle_data_t *ph,
2432 2451 uint_t pnode,
2433 2452 uchar_t smask,
2434 2453 uchar_t cmask)
2435 2454 {
2436 2455 /* This routine is protected by the ehci_int_mutex */
2437 2456 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2438 2457
2439 2458 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2440 2459
2441 2460 /*
2442 2461 * For low/full speed usb devices, deallocate classic TT bandwidth
2443 2462 * in additional to high speed bandwidth.
2444 2463 */
2445 2464 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2446 2465
2447 2466 /* Deallocate classic TT bandwidth */
2448 2467 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2449 2468 }
2450 2469 }
2451 2470
2452 2471
2453 2472 /*
2454 2473 * ehci_deallocate_high_speed_bandwidth:
2455 2474 *
2456 2475 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2457 2476 */
2458 2477 static void
2459 2478 ehci_deallocate_high_speed_bandwidth(
2460 2479 ehci_state_t *ehcip,
2461 2480 usba_pipe_handle_data_t *ph,
2462 2481 uint_t pnode,
2463 2482 uchar_t smask,
2464 2483 uchar_t cmask)
2465 2484 {
2466 2485 uint_t height, leftmost;
2467 2486 uint_t list_count;
2468 2487 uint_t sbandwidth, cbandwidth;
2469 2488 int interval;
2470 2489 usb_ep_descr_t *endpoint = &ph->p_ep;
2471 2490 usba_device_t *child_ud;
2472 2491 usb_port_status_t port_status;
2473 2492
2474 2493 /* This routine is protected by the ehci_int_mutex */
2475 2494 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2476 2495
2477 2496 /* Get child's usba device structure */
2478 2497 child_ud = ph->p_usba_device;
2479 2498
2480 2499 mutex_enter(&child_ud->usb_mutex);
2481 2500
2482 2501 /* Get the current usb device's port status */
2483 2502 port_status = ph->p_usba_device->usb_port_status;
2484 2503
2485 2504 mutex_exit(&child_ud->usb_mutex);
2486 2505
2487 2506 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2488 2507 port_status, &sbandwidth, &cbandwidth);
2489 2508
2490 2509 /* Adjust polling interval to be a power of 2 */
2491 2510 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2492 2511
2493 2512 /* Find the height in the tree */
2494 2513 height = ehci_lattice_height(interval);
2495 2514
2496 2515 /*
2497 2516 * Find the leftmost leaf in the subtree specified by the node
2498 2517 */
2499 2518 leftmost = ehci_leftmost_leaf(pnode, height);
2500 2519
2501 2520 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2502 2521
2503 2522 /* Delete the bandwidth from the appropriate lists */
2504 2523 if (port_status == USBA_HIGH_SPEED_DEV) {
2505 2524
2506 2525 ehci_update_bw_availability(ehcip, -sbandwidth,
2507 2526 leftmost, list_count, smask);
2508 2527 } else {
2509 2528 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2510 2529 USB_EP_ATTR_INTR) {
2511 2530
2512 2531 ehci_update_bw_availability(ehcip, -sbandwidth,
2513 2532 leftmost, list_count, smask);
2514 2533 ehci_update_bw_availability(ehcip, -cbandwidth,
2515 2534 leftmost, list_count, cmask);
2516 2535 } else {
2517 2536 if ((endpoint->bEndpointAddress &
2518 2537 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2519 2538
2520 2539 ehci_update_bw_availability(ehcip, -sbandwidth,
2521 2540 leftmost, list_count, smask);
2522 2541 ehci_update_bw_availability(ehcip,
2523 2542 -MAX_UFRAME_SITD_XFER, leftmost,
2524 2543 list_count, cmask);
2525 2544 } else {
2526 2545
2527 2546 ehci_update_bw_availability(ehcip,
2528 2547 -MAX_UFRAME_SITD_XFER, leftmost,
2529 2548 list_count, smask);
2530 2549 }
2531 2550 }
2532 2551 }
2533 2552 }
2534 2553
2535 2554 /*
2536 2555 * ehci_deallocate_classic_tt_bandwidth:
2537 2556 *
2538 2557 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2539 2558 */
2540 2559 static void
2541 2560 ehci_deallocate_classic_tt_bandwidth(
2542 2561 ehci_state_t *ehcip,
2543 2562 usba_pipe_handle_data_t *ph,
2544 2563 uint_t pnode)
2545 2564 {
2546 2565 uint_t bandwidth, height, leftmost, list, min;
2547 2566 int i, interval;
2548 2567 usb_ep_descr_t *endpoint = &ph->p_ep;
2549 2568 usba_device_t *child_ud, *parent_ud;
2550 2569 usb_port_status_t port_status;
2551 2570
2552 2571 /* This routine is protected by the ehci_int_mutex */
2553 2572 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2554 2573
2555 2574 /* Get child's usba device structure */
2556 2575 child_ud = ph->p_usba_device;
2557 2576
2558 2577 mutex_enter(&child_ud->usb_mutex);
2559 2578
2560 2579 /* Get the current usb device's port status */
2561 2580 port_status = child_ud->usb_port_status;
2562 2581
2563 2582 /* Get the parent high speed hub's usba device structure */
2564 2583 parent_ud = child_ud->usb_hs_hub_usba_dev;
2565 2584
2566 2585 mutex_exit(&child_ud->usb_mutex);
2567 2586
2568 2587 /* Obtain the bandwidth */
2569 2588 (void) ehci_compute_classic_bandwidth(endpoint,
2570 2589 port_status, &bandwidth);
2571 2590
2572 2591 /* Adjust polling interval to be a power of 2 */
2573 2592 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2574 2593
2575 2594 /* Find the height in the tree */
2576 2595 height = ehci_lattice_height(interval);
2577 2596
2578 2597 /* Find the leftmost leaf in the subtree specified by the node */
2579 2598 leftmost = ehci_leftmost_leaf(pnode, height);
2580 2599
2581 2600 mutex_enter(&parent_ud->usb_mutex);
2582 2601
2583 2602 /* Delete the bandwidth from the appropriate lists */
2584 2603 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2585 2604 list = ehci_index[leftmost + i];
2586 2605 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2587 2606 }
2588 2607
2589 2608 /* Find the leaf with the smallest allocated bandwidth */
2590 2609 min = parent_ud->usb_hs_hub_bandwidth[0];
2591 2610
2592 2611 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2593 2612 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2594 2613 min = parent_ud->usb_hs_hub_bandwidth[i];
2595 2614 }
2596 2615 }
2597 2616
2598 2617 /* Save the minimum for later use */
2599 2618 parent_ud->usb_hs_hub_min_bandwidth = min;
2600 2619
2601 2620 mutex_exit(&parent_ud->usb_mutex);
2602 2621 }
2603 2622
2604 2623
2605 2624 /*
2606 2625 * ehci_compute_high_speed_bandwidth:
2607 2626 *
2608 2627 * Given a periodic endpoint (interrupt or isochronous) determine the total
2609 2628 * bandwidth for one transaction. The EHCI host controller traverses the
2610 2629 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2611 2630 * services an endpoint, only a single transaction attempt is made. The HC
2612 2631 * moves to the next Endpoint Descriptor after the first transaction attempt
2613 2632 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2614 2633 * Transfer Descriptor is inserted into the lattice, we will only count the
2615 2634 * number of bytes for one transaction.
2616 2635 *
2617 2636 * The following are the formulas used for calculating bandwidth in terms
2618 2637 * bytes and it is for the single USB high speed transaction. The protocol
2619 2638 * overheads will be different for each of type of USB transfer & all these
2620 2639 * formulas & protocol overheads are derived from the 5.11.3 section of the
2621 2640 * USB 2.0 Specification.
2622 2641 *
2623 2642 * High-Speed:
2624 2643 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2625 2644 *
2626 2645 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2627 2646 *
2628 2647 * Protocol overhead + Split transaction overhead +
2629 2648 * ((MaxPktSz * 7)/6) + Host_Delay;
2630 2649 */
2631 2650 /* ARGSUSED */
2632 2651 static int
2633 2652 ehci_compute_high_speed_bandwidth(
2634 2653 ehci_state_t *ehcip,
2635 2654 usb_ep_descr_t *endpoint,
2636 2655 usb_port_status_t port_status,
2637 2656 uint_t *sbandwidth,
2638 2657 uint_t *cbandwidth)
2639 2658 {
2640 2659 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2641 2660
2642 2661 /* Return failure if endpoint maximum packet is zero */
2643 2662 if (maxpacketsize == 0) {
2644 2663 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2645 2664 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2646 2665 "with zero endpoint maximum packet size is not supported");
2647 2666
2648 2667 return (USB_NOT_SUPPORTED);
2649 2668 }
2650 2669
2651 2670 /* Add bit-stuffing overhead */
2652 2671 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2653 2672
2654 2673 /* Add Host Controller specific delay to required bandwidth */
2655 2674 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2656 2675
2657 2676 /* Add xfer specific protocol overheads */
2658 2677 if ((endpoint->bmAttributes &
2659 2678 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2660 2679 /* High speed interrupt transaction */
2661 2680 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2662 2681 } else {
2663 2682 /* Isochronous transaction */
2664 2683 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2665 2684 }
2666 2685
2667 2686 /*
2668 2687 * For low/full speed devices, add split transaction specific
2669 2688 * overheads.
2670 2689 */
2671 2690 if (port_status != USBA_HIGH_SPEED_DEV) {
2672 2691 /*
2673 2692 * Add start and complete split transaction
2674 2693 * tokens overheads.
2675 2694 */
2676 2695 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2677 2696 *sbandwidth += START_SPLIT_OVERHEAD;
2678 2697
2679 2698 /* Add data overhead depending on data direction */
2680 2699 if ((endpoint->bEndpointAddress &
2681 2700 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2682 2701 *cbandwidth += maxpacketsize;
2683 2702 } else {
2684 2703 if ((endpoint->bmAttributes &
2685 2704 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2686 2705 /* There is no compete splits for out */
2687 2706 *cbandwidth = 0;
2688 2707 }
2689 2708 *sbandwidth += maxpacketsize;
2690 2709 }
2691 2710 } else {
2692 2711 uint_t xactions;
2693 2712
2694 2713 /* Get the max transactions per microframe */
2695 2714 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2696 2715 USB_EP_MAX_XACTS_SHIFT) + 1;
2697 2716
2698 2717 /* High speed transaction */
2699 2718 *sbandwidth += maxpacketsize;
2700 2719
2701 2720 /* Calculate bandwidth per micro-frame */
2702 2721 *sbandwidth *= xactions;
2703 2722
2704 2723 *cbandwidth = 0;
2705 2724 }
2706 2725
2707 2726 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2708 2727 "ehci_allocate_high_speed_bandwidth: "
2709 2728 "Start split bandwidth %d Complete split bandwidth %d",
2710 2729 *sbandwidth, *cbandwidth);
2711 2730
2712 2731 return (USB_SUCCESS);
2713 2732 }
2714 2733
2715 2734
2716 2735 /*
2717 2736 * ehci_compute_classic_bandwidth:
2718 2737 *
2719 2738 * Given a periodic endpoint (interrupt or isochronous) determine the total
2720 2739 * bandwidth for one transaction. The EHCI host controller traverses the
2721 2740 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2722 2741 * services an endpoint, only a single transaction attempt is made. The HC
2723 2742 * moves to the next Endpoint Descriptor after the first transaction attempt
2724 2743 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2725 2744 * Transfer Descriptor is inserted into the lattice, we will only count the
2726 2745 * number of bytes for one transaction.
2727 2746 *
2728 2747 * The following are the formulas used for calculating bandwidth in terms
2729 2748 * bytes and it is for the single USB high speed transaction. The protocol
2730 2749 * overheads will be different for each of type of USB transfer & all these
2731 2750 * formulas & protocol overheads are derived from the 5.11.3 section of the
2732 2751 * USB 2.0 Specification.
2733 2752 *
2734 2753 * Low-Speed:
2735 2754 * Protocol overhead + Hub LS overhead +
2736 2755 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2737 2756 *
2738 2757 * Full-Speed:
2739 2758 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2740 2759 */
2741 2760 /* ARGSUSED */
2742 2761 static int
2743 2762 ehci_compute_classic_bandwidth(
2744 2763 usb_ep_descr_t *endpoint,
2745 2764 usb_port_status_t port_status,
2746 2765 uint_t *bandwidth)
2747 2766 {
2748 2767 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2749 2768
2750 2769 /*
2751 2770 * If endpoint maximum packet is zero, then return immediately.
2752 2771 */
2753 2772 if (maxpacketsize == 0) {
2754 2773
2755 2774 return (USB_NOT_SUPPORTED);
2756 2775 }
2757 2776
2758 2777 /* Add TT delay to required bandwidth */
2759 2778 *bandwidth = TT_DELAY;
2760 2779
2761 2780 /* Add bit-stuffing overhead */
2762 2781 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2763 2782
2764 2783 switch (port_status) {
2765 2784 case USBA_LOW_SPEED_DEV:
2766 2785 /* Low speed interrupt transaction */
2767 2786 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2768 2787 HUB_LOW_SPEED_PROTO_OVERHEAD +
2769 2788 (LOW_SPEED_CLOCK * maxpacketsize));
2770 2789 break;
2771 2790 case USBA_FULL_SPEED_DEV:
2772 2791 /* Full speed transaction */
2773 2792 *bandwidth += maxpacketsize;
2774 2793
2775 2794 /* Add xfer specific protocol overheads */
2776 2795 if ((endpoint->bmAttributes &
2777 2796 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2778 2797 /* Full speed interrupt transaction */
2779 2798 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2780 2799 } else {
2781 2800 /* Isochronous and input transaction */
2782 2801 if ((endpoint->bEndpointAddress &
2783 2802 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2784 2803 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2785 2804 } else {
2786 2805 /* Isochronous and output transaction */
2787 2806 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2788 2807 }
2789 2808 }
2790 2809 break;
2791 2810 }
2792 2811
2793 2812 return (USB_SUCCESS);
2794 2813 }
2795 2814
2796 2815
2797 2816 /*
2798 2817 * ehci_adjust_polling_interval:
2799 2818 *
2800 2819 * Adjust bandwidth according usb device speed.
2801 2820 */
2802 2821 /* ARGSUSED */
2803 2822 int
2804 2823 ehci_adjust_polling_interval(
2805 2824 ehci_state_t *ehcip,
2806 2825 usb_ep_descr_t *endpoint,
2807 2826 usb_port_status_t port_status)
2808 2827 {
2809 2828 uint_t interval;
2810 2829 int i = 0;
2811 2830
2812 2831 /* Get the polling interval */
2813 2832 interval = endpoint->bInterval;
2814 2833
2815 2834 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2816 2835 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2817 2836
2818 2837 /*
2819 2838 * According USB 2.0 Specifications, a high-speed endpoint's
2820 2839 * polling intervals are specified interms of 125us or micro
2821 2840 * frame, where as full/low endpoint's polling intervals are
2822 2841 * specified in milliseconds.
2823 2842 *
2824 2843 * A high speed interrupt/isochronous endpoints can specify
2825 2844 * desired polling interval between 1 to 16 micro-frames,
2826 2845 * where as full/low endpoints can specify between 1 to 255
2827 2846 * milliseconds.
2828 2847 */
2829 2848 switch (port_status) {
2830 2849 case USBA_LOW_SPEED_DEV:
2831 2850 /*
2832 2851 * Low speed endpoints are limited to specifying
2833 2852 * only 8ms to 255ms in this driver. If a device
2834 2853 * reports a polling interval that is less than 8ms,
2835 2854 * it will use 8 ms instead.
2836 2855 */
2837 2856 if (interval < LS_MIN_POLL_INTERVAL) {
2838 2857
2839 2858 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2840 2859 "Low speed endpoint's poll interval of %d ms "
2841 2860 "is below threshold. Rounding up to %d ms",
2842 2861 interval, LS_MIN_POLL_INTERVAL);
2843 2862
2844 2863 interval = LS_MIN_POLL_INTERVAL;
2845 2864 }
2846 2865
2847 2866 /*
2848 2867 * Return an error if the polling interval is greater
2849 2868 * than 255ms.
2850 2869 */
2851 2870 if (interval > LS_MAX_POLL_INTERVAL) {
2852 2871
2853 2872 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2854 2873 "Low speed endpoint's poll interval is "
2855 2874 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2856 2875
2857 2876 return (USB_FAILURE);
2858 2877 }
2859 2878 break;
2860 2879
2861 2880 case USBA_FULL_SPEED_DEV:
2862 2881 /*
2863 2882 * Return an error if the polling interval is less
2864 2883 * than 1ms and greater than 255ms.
2865 2884 */
2866 2885 if ((interval < FS_MIN_POLL_INTERVAL) &&
2867 2886 (interval > FS_MAX_POLL_INTERVAL)) {
2868 2887
2869 2888 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2870 2889 "Full speed endpoint's poll interval must "
2871 2890 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2872 2891 FS_MAX_POLL_INTERVAL);
2873 2892
2874 2893 return (USB_FAILURE);
2875 2894 }
2876 2895 break;
2877 2896 case USBA_HIGH_SPEED_DEV:
2878 2897 /*
2879 2898 * Return an error if the polling interval is less 1
2880 2899 * and greater than 16. Convert this value to 125us
2881 2900 * units using 2^(bInterval -1). refer usb 2.0 spec
2882 2901 * page 51 for details.
2883 2902 */
2884 2903 if ((interval < HS_MIN_POLL_INTERVAL) &&
2885 2904 (interval > HS_MAX_POLL_INTERVAL)) {
2886 2905
2887 2906 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2888 2907 "High speed endpoint's poll interval "
2889 2908 "must be between %d and %d units",
2890 2909 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2891 2910
2892 2911 return (USB_FAILURE);
2893 2912 }
2894 2913
2895 2914 /* Adjust high speed device polling interval */
2896 2915 interval =
2897 2916 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2898 2917
2899 2918 break;
2900 2919 }
2901 2920
2902 2921 /*
2903 2922 * If polling interval is greater than 32ms,
2904 2923 * adjust polling interval equal to 32ms.
2905 2924 */
2906 2925 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2907 2926 interval = EHCI_NUM_INTR_QH_LISTS;
2908 2927 }
2909 2928
2910 2929 /*
2911 2930 * Find the nearest power of 2 that's less
2912 2931 * than interval.
2913 2932 */
2914 2933 while ((ehci_pow_2(i)) <= interval) {
2915 2934 i++;
2916 2935 }
2917 2936
2918 2937 return (ehci_pow_2((i - 1)));
2919 2938 }
2920 2939
2921 2940
2922 2941 /*
2923 2942 * ehci_adjust_high_speed_polling_interval:
2924 2943 */
2925 2944 /* ARGSUSED */
2926 2945 static int
2927 2946 ehci_adjust_high_speed_polling_interval(
2928 2947 ehci_state_t *ehcip,
2929 2948 usb_ep_descr_t *endpoint)
2930 2949 {
2931 2950 uint_t interval;
2932 2951
2933 2952 /* Get the polling interval */
2934 2953 interval = ehci_pow_2(endpoint->bInterval - 1);
2935 2954
2936 2955 /*
2937 2956 * Convert polling interval from micro seconds
2938 2957 * to milli seconds.
2939 2958 */
2940 2959 if (interval <= EHCI_MAX_UFRAMES) {
2941 2960 interval = 1;
2942 2961 } else {
2943 2962 interval = interval/EHCI_MAX_UFRAMES;
2944 2963 }
2945 2964
2946 2965 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2947 2966 "ehci_adjust_high_speed_polling_interval: "
2948 2967 "High speed adjusted interval 0x%x", interval);
2949 2968
2950 2969 return (interval);
2951 2970 }
2952 2971
2953 2972
2954 2973 /*
2955 2974 * ehci_lattice_height:
2956 2975 *
2957 2976 * Given the requested bandwidth, find the height in the tree at which the
2958 2977 * nodes for this bandwidth fall. The height is measured as the number of
2959 2978 * nodes from the leaf to the level specified by bandwidth The root of the
2960 2979 * tree is at height TREE_HEIGHT.
2961 2980 */
2962 2981 static uint_t
2963 2982 ehci_lattice_height(uint_t interval)
2964 2983 {
2965 2984 return (TREE_HEIGHT - (ehci_log_2(interval)));
2966 2985 }
2967 2986
2968 2987
2969 2988 /*
2970 2989 * ehci_lattice_parent:
2971 2990 *
2972 2991 * Given a node in the lattice, find the index of the parent node
2973 2992 */
2974 2993 static uint_t
2975 2994 ehci_lattice_parent(uint_t node)
2976 2995 {
2977 2996 if ((node % 2) == 0) {
2978 2997
2979 2998 return ((node/2) - 1);
2980 2999 } else {
2981 3000
2982 3001 return ((node + 1)/2 - 1);
2983 3002 }
2984 3003 }
2985 3004
2986 3005
2987 3006 /*
2988 3007 * ehci_find_periodic_node:
2989 3008 *
2990 3009 * Based on the "real" array leaf node and interval, get the periodic node.
2991 3010 */
2992 3011 static uint_t
2993 3012 ehci_find_periodic_node(uint_t leaf, int interval)
2994 3013 {
2995 3014 uint_t lattice_leaf;
2996 3015 uint_t height = ehci_lattice_height(interval);
2997 3016 uint_t pnode;
2998 3017 int i;
2999 3018
3000 3019 /* Get the leaf number in the lattice */
3001 3020 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
3002 3021
3003 3022 /* Get the node in the lattice based on the height and leaf */
3004 3023 pnode = lattice_leaf;
3005 3024 for (i = 0; i < height; i++) {
3006 3025 pnode = ehci_lattice_parent(pnode);
3007 3026 }
3008 3027
3009 3028 return (pnode);
3010 3029 }
3011 3030
3012 3031
3013 3032 /*
3014 3033 * ehci_leftmost_leaf:
3015 3034 *
3016 3035 * Find the leftmost leaf in the subtree specified by the node. Height refers
3017 3036 * to number of nodes from the bottom of the tree to the node, including the
3018 3037 * node.
3019 3038 *
3020 3039 * The formula for a zero based tree is:
3021 3040 * 2^H * Node + 2^H - 1
3022 3041 * The leaf of the tree is an array, convert the number for the array.
3023 3042 * Subtract the size of nodes not in the array
3024 3043 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
3025 3044 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
3026 3045 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
3027 3046 * 0
3028 3047 * 1 2
3029 3048 * 0 1 2 3
3030 3049 */
3031 3050 static uint_t
3032 3051 ehci_leftmost_leaf(
3033 3052 uint_t node,
3034 3053 uint_t height)
3035 3054 {
3036 3055 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3037 3056 }
3038 3057
3039 3058
3040 3059 /*
3041 3060 * ehci_pow_2:
3042 3061 *
3043 3062 * Compute 2 to the power
3044 3063 */
3045 3064 static uint_t
3046 3065 ehci_pow_2(uint_t x)
3047 3066 {
3048 3067 if (x == 0) {
3049 3068
3050 3069 return (1);
3051 3070 } else {
3052 3071
3053 3072 return (2 << (x - 1));
3054 3073 }
3055 3074 }
3056 3075
3057 3076
3058 3077 /*
3059 3078 * ehci_log_2:
3060 3079 *
3061 3080 * Compute log base 2 of x
3062 3081 */
3063 3082 static uint_t
3064 3083 ehci_log_2(uint_t x)
3065 3084 {
3066 3085 int i = 0;
3067 3086
3068 3087 while (x != 1) {
3069 3088 x = x >> 1;
3070 3089 i++;
3071 3090 }
3072 3091
3073 3092 return (i);
3074 3093 }
3075 3094
3076 3095
3077 3096 /*
3078 3097 * ehci_find_bestfit_hs_mask:
3079 3098 *
3080 3099 * Find the smask and cmask in the bandwidth allocation, and update the
3081 3100 * bandwidth allocation.
3082 3101 */
3083 3102 static int
3084 3103 ehci_find_bestfit_hs_mask(
3085 3104 ehci_state_t *ehcip,
3086 3105 uchar_t *smask,
3087 3106 uint_t *pnode,
3088 3107 usb_ep_descr_t *endpoint,
3089 3108 uint_t bandwidth,
3090 3109 int interval)
3091 3110 {
3092 3111 int i;
3093 3112 uint_t elements, index;
3094 3113 int array_leaf, best_array_leaf;
3095 3114 uint_t node_bandwidth, best_node_bandwidth;
3096 3115 uint_t leaf_count;
3097 3116 uchar_t bw_mask;
3098 3117 uchar_t best_smask;
3099 3118
3100 3119 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3101 3120 "ehci_find_bestfit_hs_mask: ");
3102 3121
3103 3122 /* Get all the valid smasks */
3104 3123 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3105 3124 case EHCI_INTR_1US_POLL:
3106 3125 index = EHCI_1US_MASK_INDEX;
3107 3126 elements = EHCI_INTR_1US_POLL;
3108 3127 break;
3109 3128 case EHCI_INTR_2US_POLL:
3110 3129 index = EHCI_2US_MASK_INDEX;
3111 3130 elements = EHCI_INTR_2US_POLL;
3112 3131 break;
3113 3132 case EHCI_INTR_4US_POLL:
3114 3133 index = EHCI_4US_MASK_INDEX;
3115 3134 elements = EHCI_INTR_4US_POLL;
3116 3135 break;
3117 3136 case EHCI_INTR_XUS_POLL:
3118 3137 default:
3119 3138 index = EHCI_XUS_MASK_INDEX;
3120 3139 elements = EHCI_INTR_XUS_POLL;
3121 3140 break;
3122 3141 }
3123 3142
3124 3143 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3125 3144
3126 3145 /*
3127 3146 * Because of the way the leaves are setup, we will automatically
3128 3147 * hit the leftmost leaf of every possible node with this interval.
3129 3148 */
3130 3149 best_smask = 0x00;
3131 3150 best_node_bandwidth = 0;
3132 3151 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3133 3152 /* Find the bandwidth mask */
3134 3153 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3135 3154 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3136 3155
3137 3156 /*
3138 3157 * If this node cannot support our requirements skip to the
3139 3158 * next leaf.
3140 3159 */
3141 3160 if (bw_mask == 0x00) {
3142 3161 continue;
3143 3162 }
3144 3163
3145 3164 /*
3146 3165 * Now make sure our bandwidth requirements can be
3147 3166 * satisfied with one of smasks in this node.
3148 3167 */
3149 3168 *smask = 0x00;
3150 3169 for (i = index; i < (index + elements); i++) {
3151 3170 /* Check the start split mask value */
3152 3171 if (ehci_start_split_mask[index] & bw_mask) {
3153 3172 *smask = ehci_start_split_mask[index];
3154 3173 break;
3155 3174 }
3156 3175 }
3157 3176
3158 3177 /*
3159 3178 * If an appropriate smask is found save the information if:
3160 3179 * o best_smask has not been found yet.
3161 3180 * - or -
3162 3181 * o This is the node with the least amount of bandwidth
3163 3182 */
3164 3183 if ((*smask != 0x00) &&
3165 3184 ((best_smask == 0x00) ||
3166 3185 (best_node_bandwidth > node_bandwidth))) {
3167 3186
3168 3187 best_node_bandwidth = node_bandwidth;
3169 3188 best_array_leaf = array_leaf;
3170 3189 best_smask = *smask;
3171 3190 }
3172 3191 }
3173 3192
3174 3193 /*
3175 3194 * If we find node that can handle the bandwidth populate the
3176 3195 * appropriate variables and return success.
3177 3196 */
3178 3197 if (best_smask) {
3179 3198 *smask = best_smask;
3180 3199 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3181 3200 interval);
3182 3201 ehci_update_bw_availability(ehcip, bandwidth,
3183 3202 ehci_index[best_array_leaf], leaf_count, best_smask);
3184 3203
3185 3204 return (USB_SUCCESS);
3186 3205 }
3187 3206
3188 3207 return (USB_FAILURE);
3189 3208 }
3190 3209
3191 3210
3192 3211 /*
3193 3212 * ehci_find_bestfit_ls_intr_mask:
3194 3213 *
3195 3214 * Find the smask and cmask in the bandwidth allocation.
3196 3215 */
3197 3216 static int
3198 3217 ehci_find_bestfit_ls_intr_mask(
3199 3218 ehci_state_t *ehcip,
3200 3219 uchar_t *smask,
3201 3220 uchar_t *cmask,
3202 3221 uint_t *pnode,
3203 3222 uint_t sbandwidth,
3204 3223 uint_t cbandwidth,
3205 3224 int interval)
3206 3225 {
3207 3226 int i;
3208 3227 uint_t elements, index;
3209 3228 int array_leaf, best_array_leaf;
3210 3229 uint_t node_sbandwidth, node_cbandwidth;
3211 3230 uint_t best_node_bandwidth;
3212 3231 uint_t leaf_count;
3213 3232 uchar_t bw_smask, bw_cmask;
3214 3233 uchar_t best_smask, best_cmask;
3215 3234
3216 3235 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3217 3236 "ehci_find_bestfit_ls_intr_mask: ");
3218 3237
3219 3238 /* For low and full speed devices */
3220 3239 index = EHCI_XUS_MASK_INDEX;
3221 3240 elements = EHCI_INTR_4MS_POLL;
3222 3241
3223 3242 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3224 3243
3225 3244 /*
3226 3245 * Because of the way the leaves are setup, we will automatically
3227 3246 * hit the leftmost leaf of every possible node with this interval.
3228 3247 */
3229 3248 best_smask = 0x00;
3230 3249 best_node_bandwidth = 0;
3231 3250 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3232 3251 /* Find the bandwidth mask */
3233 3252 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3234 3253 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3235 3254 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3236 3255 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3237 3256
3238 3257 /*
3239 3258 * If this node cannot support our requirements skip to the
3240 3259 * next leaf.
3241 3260 */
3242 3261 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3243 3262 continue;
3244 3263 }
3245 3264
3246 3265 /*
3247 3266 * Now make sure our bandwidth requirements can be
3248 3267 * satisfied with one of smasks in this node.
3249 3268 */
3250 3269 *smask = 0x00;
3251 3270 *cmask = 0x00;
3252 3271 for (i = index; i < (index + elements); i++) {
3253 3272 /* Check the start split mask value */
3254 3273 if ((ehci_start_split_mask[index] & bw_smask) &&
3255 3274 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3256 3275 *smask = ehci_start_split_mask[index];
3257 3276 *cmask = ehci_intr_complete_split_mask[index];
3258 3277 break;
3259 3278 }
3260 3279 }
3261 3280
3262 3281 /*
3263 3282 * If an appropriate smask is found save the information if:
3264 3283 * o best_smask has not been found yet.
3265 3284 * - or -
3266 3285 * o This is the node with the least amount of bandwidth
3267 3286 */
3268 3287 if ((*smask != 0x00) &&
3269 3288 ((best_smask == 0x00) ||
3270 3289 (best_node_bandwidth >
3271 3290 (node_sbandwidth + node_cbandwidth)))) {
3272 3291 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3273 3292 best_array_leaf = array_leaf;
3274 3293 best_smask = *smask;
3275 3294 best_cmask = *cmask;
3276 3295 }
3277 3296 }
3278 3297
3279 3298 /*
3280 3299 * If we find node that can handle the bandwidth populate the
3281 3300 * appropriate variables and return success.
3282 3301 */
3283 3302 if (best_smask) {
3284 3303 *smask = best_smask;
3285 3304 *cmask = best_cmask;
3286 3305 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3287 3306 interval);
3288 3307 ehci_update_bw_availability(ehcip, sbandwidth,
3289 3308 ehci_index[best_array_leaf], leaf_count, best_smask);
3290 3309 ehci_update_bw_availability(ehcip, cbandwidth,
3291 3310 ehci_index[best_array_leaf], leaf_count, best_cmask);
3292 3311
3293 3312 return (USB_SUCCESS);
3294 3313 }
3295 3314
3296 3315 return (USB_FAILURE);
3297 3316 }
3298 3317
3299 3318
3300 3319 /*
3301 3320 * ehci_find_bestfit_sitd_in_mask:
3302 3321 *
3303 3322 * Find the smask and cmask in the bandwidth allocation.
3304 3323 */
3305 3324 static int
3306 3325 ehci_find_bestfit_sitd_in_mask(
3307 3326 ehci_state_t *ehcip,
3308 3327 uchar_t *smask,
3309 3328 uchar_t *cmask,
3310 3329 uint_t *pnode,
3311 3330 uint_t sbandwidth,
3312 3331 uint_t cbandwidth,
3313 3332 int interval)
3314 3333 {
3315 3334 int i, uFrames, found;
3316 3335 int array_leaf, best_array_leaf;
3317 3336 uint_t node_sbandwidth, node_cbandwidth;
3318 3337 uint_t best_node_bandwidth;
3319 3338 uint_t leaf_count;
3320 3339 uchar_t bw_smask, bw_cmask;
3321 3340 uchar_t best_smask, best_cmask;
3322 3341
3323 3342 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3324 3343 "ehci_find_bestfit_sitd_in_mask: ");
3325 3344
3326 3345 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3327 3346
3328 3347 /*
3329 3348 * Because of the way the leaves are setup, we will automatically
3330 3349 * hit the leftmost leaf of every possible node with this interval.
3331 3350 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3332 3351 */
3333 3352 /*
3334 3353 * Need to add an additional 2 uFrames, if the "L"ast
3335 3354 * complete split is before uFrame 6. See section
3336 3355 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3337 3356 * the "Back Ptr" which means we support on IN of
3338 3357 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3339 3358 */
3340 3359 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3341 3360 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3342 3361 uFrames++;
3343 3362 }
3344 3363 if (uFrames > 6) {
3345 3364
3346 3365 return (USB_FAILURE);
3347 3366 }
3348 3367 *smask = 0x1;
3349 3368 *cmask = 0x00;
3350 3369 for (i = 0; i < uFrames; i++) {
3351 3370 *cmask = *cmask << 1;
3352 3371 *cmask |= 0x1;
3353 3372 }
3354 3373 /* cmask must start 2 frames after the smask */
3355 3374 *cmask = *cmask << 2;
3356 3375
3357 3376 found = 0;
3358 3377 best_smask = 0x00;
3359 3378 best_node_bandwidth = 0;
3360 3379 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3361 3380 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3362 3381 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3363 3382 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3364 3383 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3365 3384 &bw_cmask);
3366 3385
3367 3386 /*
3368 3387 * If this node cannot support our requirements skip to the
3369 3388 * next leaf.
3370 3389 */
3371 3390 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3372 3391 continue;
3373 3392 }
3374 3393
3375 3394 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3376 3395 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3377 3396 found = 1;
3378 3397 break;
3379 3398 }
3380 3399 *smask = *smask << 1;
3381 3400 *cmask = *cmask << 1;
3382 3401 }
3383 3402
3384 3403 /*
3385 3404 * If an appropriate smask is found save the information if:
3386 3405 * o best_smask has not been found yet.
3387 3406 * - or -
3388 3407 * o This is the node with the least amount of bandwidth
3389 3408 */
3390 3409 if (found &&
3391 3410 ((best_smask == 0x00) ||
3392 3411 (best_node_bandwidth >
3393 3412 (node_sbandwidth + node_cbandwidth)))) {
3394 3413 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3395 3414 best_array_leaf = array_leaf;
3396 3415 best_smask = *smask;
3397 3416 best_cmask = *cmask;
3398 3417 }
3399 3418 }
3400 3419
3401 3420 /*
3402 3421 * If we find node that can handle the bandwidth populate the
3403 3422 * appropriate variables and return success.
3404 3423 */
3405 3424 if (best_smask) {
3406 3425 *smask = best_smask;
3407 3426 *cmask = best_cmask;
3408 3427 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3409 3428 interval);
3410 3429 ehci_update_bw_availability(ehcip, sbandwidth,
3411 3430 ehci_index[best_array_leaf], leaf_count, best_smask);
3412 3431 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3413 3432 ehci_index[best_array_leaf], leaf_count, best_cmask);
3414 3433
3415 3434 return (USB_SUCCESS);
3416 3435 }
3417 3436
3418 3437 return (USB_FAILURE);
3419 3438 }
3420 3439
3421 3440
3422 3441 /*
3423 3442 * ehci_find_bestfit_sitd_out_mask:
3424 3443 *
3425 3444 * Find the smask in the bandwidth allocation.
3426 3445 */
3427 3446 static int
3428 3447 ehci_find_bestfit_sitd_out_mask(
3429 3448 ehci_state_t *ehcip,
3430 3449 uchar_t *smask,
3431 3450 uint_t *pnode,
3432 3451 uint_t sbandwidth,
3433 3452 int interval)
3434 3453 {
3435 3454 int i, uFrames, found;
3436 3455 int array_leaf, best_array_leaf;
3437 3456 uint_t node_sbandwidth;
3438 3457 uint_t best_node_bandwidth;
3439 3458 uint_t leaf_count;
3440 3459 uchar_t bw_smask;
3441 3460 uchar_t best_smask;
3442 3461
3443 3462 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3444 3463 "ehci_find_bestfit_sitd_out_mask: ");
3445 3464
3446 3465 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3447 3466
3448 3467 /*
3449 3468 * Because of the way the leaves are setup, we will automatically
3450 3469 * hit the leftmost leaf of every possible node with this interval.
3451 3470 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3452 3471 */
3453 3472 *smask = 0x00;
3454 3473 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3455 3474 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3456 3475 uFrames++;
3457 3476 }
3458 3477 for (i = 0; i < uFrames; i++) {
3459 3478 *smask = *smask << 1;
3460 3479 *smask |= 0x1;
3461 3480 }
3462 3481
3463 3482 found = 0;
3464 3483 best_smask = 0x00;
3465 3484 best_node_bandwidth = 0;
3466 3485 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3467 3486 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3468 3487 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3469 3488 &bw_smask);
3470 3489
3471 3490 /*
3472 3491 * If this node cannot support our requirements skip to the
3473 3492 * next leaf.
3474 3493 */
3475 3494 if (bw_smask == 0x00) {
3476 3495 continue;
3477 3496 }
3478 3497
3479 3498 /* You cannot have a start split on the 8th uFrame */
3480 3499 for (i = 0; (*smask & 0x80) == 0; i++) {
3481 3500 if (*smask & bw_smask) {
3482 3501 found = 1;
3483 3502 break;
3484 3503 }
3485 3504 *smask = *smask << 1;
3486 3505 }
3487 3506
3488 3507 /*
3489 3508 * If an appropriate smask is found save the information if:
3490 3509 * o best_smask has not been found yet.
3491 3510 * - or -
3492 3511 * o This is the node with the least amount of bandwidth
3493 3512 */
3494 3513 if (found &&
3495 3514 ((best_smask == 0x00) ||
3496 3515 (best_node_bandwidth > node_sbandwidth))) {
3497 3516 best_node_bandwidth = node_sbandwidth;
3498 3517 best_array_leaf = array_leaf;
3499 3518 best_smask = *smask;
3500 3519 }
3501 3520 }
3502 3521
3503 3522 /*
3504 3523 * If we find node that can handle the bandwidth populate the
3505 3524 * appropriate variables and return success.
3506 3525 */
3507 3526 if (best_smask) {
3508 3527 *smask = best_smask;
3509 3528 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3510 3529 interval);
3511 3530 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3512 3531 ehci_index[best_array_leaf], leaf_count, best_smask);
3513 3532
3514 3533 return (USB_SUCCESS);
3515 3534 }
3516 3535
3517 3536 return (USB_FAILURE);
3518 3537 }
3519 3538
3520 3539
3521 3540 /*
3522 3541 * ehci_calculate_bw_availability_mask:
3523 3542 *
3524 3543 * Returns the "total bandwidth used" in this node.
3525 3544 * Populates bw_mask with the uFrames that can support the bandwidth.
3526 3545 *
3527 3546 * If all the Frames cannot support this bandwidth, then bw_mask
3528 3547 * will return 0x00 and the "total bandwidth used" will be invalid.
3529 3548 */
3530 3549 static uint_t
3531 3550 ehci_calculate_bw_availability_mask(
3532 3551 ehci_state_t *ehcip,
3533 3552 uint_t bandwidth,
3534 3553 int leaf,
3535 3554 int leaf_count,
3536 3555 uchar_t *bw_mask)
3537 3556 {
3538 3557 int i, j;
3539 3558 uchar_t bw_uframe;
3540 3559 int uframe_total;
3541 3560 ehci_frame_bandwidth_t *fbp;
3542 3561 uint_t total_bandwidth = 0;
3543 3562
3544 3563 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3545 3564 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3546 3565 leaf, leaf_count);
3547 3566
3548 3567 /* Start by saying all uFrames are available */
3549 3568 *bw_mask = 0xFF;
3550 3569
3551 3570 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3552 3571 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3553 3572
3554 3573 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3555 3574
3556 3575 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3557 3576 /*
3558 3577 * If the uFrame in bw_mask is available check to see if
3559 3578 * it can support the additional bandwidth.
3560 3579 */
3561 3580 bw_uframe = (*bw_mask & (0x1 << j));
3562 3581 uframe_total =
3563 3582 fbp->ehci_micro_frame_bandwidth[j] +
3564 3583 bandwidth;
3565 3584 if ((bw_uframe) &&
3566 3585 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3567 3586 *bw_mask = *bw_mask & ~bw_uframe;
3568 3587 }
3569 3588 }
3570 3589 }
3571 3590
3572 3591 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3573 3592 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3574 3593 *bw_mask);
3575 3594
3576 3595 return (total_bandwidth);
3577 3596 }
3578 3597
3579 3598
3580 3599 /*
3581 3600 * ehci_update_bw_availability:
3582 3601 *
3583 3602 * The leftmost leaf needs to be in terms of array position and
3584 3603 * not the actual lattice position.
3585 3604 */
3586 3605 static void
3587 3606 ehci_update_bw_availability(
3588 3607 ehci_state_t *ehcip,
3589 3608 int bandwidth,
3590 3609 int leftmost_leaf,
3591 3610 int leaf_count,
3592 3611 uchar_t mask)
3593 3612 {
3594 3613 int i, j;
3595 3614 ehci_frame_bandwidth_t *fbp;
3596 3615 int uFrame_bandwidth[8];
3597 3616
3598 3617 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3599 3618 "ehci_update_bw_availability: "
3600 3619 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3601 3620 leftmost_leaf, leaf_count, bandwidth, mask);
3602 3621
3603 3622 ASSERT(leftmost_leaf < 32);
3604 3623 ASSERT(leftmost_leaf >= 0);
3605 3624
3606 3625 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3607 3626 if (mask & 0x1) {
3608 3627 uFrame_bandwidth[j] = bandwidth;
3609 3628 } else {
3610 3629 uFrame_bandwidth[j] = 0;
3611 3630 }
3612 3631
3613 3632 mask = mask >> 1;
3614 3633 }
3615 3634
3616 3635 /* Updated all the effected leafs with the bandwidth */
3617 3636 for (i = 0; i < leaf_count; i++) {
3618 3637 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3619 3638
3620 3639 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3621 3640 fbp->ehci_micro_frame_bandwidth[j] +=
3622 3641 uFrame_bandwidth[j];
3623 3642 fbp->ehci_allocated_frame_bandwidth +=
3624 3643 uFrame_bandwidth[j];
3625 3644 }
3626 3645 }
3627 3646 }
3628 3647
3629 3648 /*
3630 3649 * Miscellaneous functions
3631 3650 */
3632 3651
3633 3652 /*
3634 3653 * ehci_obtain_state:
3635 3654 *
3636 3655 * NOTE: This function is also called from POLLED MODE.
3637 3656 */
3638 3657 ehci_state_t *
3639 3658 ehci_obtain_state(dev_info_t *dip)
3640 3659 {
3641 3660 int instance = ddi_get_instance(dip);
3642 3661
3643 3662 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3644 3663
3645 3664 ASSERT(state != NULL);
3646 3665
3647 3666 return (state);
3648 3667 }
3649 3668
3650 3669
3651 3670 /*
3652 3671 * ehci_state_is_operational:
3653 3672 *
3654 3673 * Check the Host controller state and return proper values.
3655 3674 */
3656 3675 int
3657 3676 ehci_state_is_operational(ehci_state_t *ehcip)
3658 3677 {
3659 3678 int val;
3660 3679
3661 3680 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3662 3681
3663 3682 switch (ehcip->ehci_hc_soft_state) {
3664 3683 case EHCI_CTLR_INIT_STATE:
3665 3684 case EHCI_CTLR_SUSPEND_STATE:
3666 3685 val = USB_FAILURE;
3667 3686 break;
3668 3687 case EHCI_CTLR_OPERATIONAL_STATE:
3669 3688 val = USB_SUCCESS;
3670 3689 break;
3671 3690 case EHCI_CTLR_ERROR_STATE:
3672 3691 val = USB_HC_HARDWARE_ERROR;
3673 3692 break;
3674 3693 default:
3675 3694 val = USB_FAILURE;
3676 3695 break;
3677 3696 }
3678 3697
3679 3698 return (val);
3680 3699 }
3681 3700
3682 3701
3683 3702 /*
3684 3703 * ehci_do_soft_reset
3685 3704 *
3686 3705 * Do soft reset of ehci host controller.
3687 3706 */
3688 3707 int
3689 3708 ehci_do_soft_reset(ehci_state_t *ehcip)
3690 3709 {
3691 3710 usb_frame_number_t before_frame_number, after_frame_number;
3692 3711 ehci_regs_t *ehci_save_regs;
3693 3712
3694 3713 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3695 3714
3696 3715 /* Increment host controller error count */
3697 3716 ehcip->ehci_hc_error++;
3698 3717
3699 3718 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3700 3719 "ehci_do_soft_reset:"
3701 3720 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3702 3721
3703 3722 /*
3704 3723 * Allocate space for saving current Host Controller
3705 3724 * registers. Don't do any recovery if allocation
3706 3725 * fails.
3707 3726 */
3708 3727 ehci_save_regs = (ehci_regs_t *)
3709 3728 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3710 3729
3711 3730 if (ehci_save_regs == NULL) {
3712 3731 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3713 3732 "ehci_do_soft_reset: kmem_zalloc failed");
3714 3733
3715 3734 return (USB_FAILURE);
3716 3735 }
3717 3736
3718 3737 /* Save current ehci registers */
3719 3738 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3720 3739 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3721 3740 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3722 3741 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3723 3742 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3724 3743 ehci_save_regs->ehci_periodic_list_base =
3725 3744 Get_OpReg(ehci_periodic_list_base);
3726 3745
3727 3746 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3728 3747 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3729 3748
3730 3749 /* Disable all list processing and interrupts */
3731 3750 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3732 3751 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3733 3752
3734 3753 /* Disable all EHCI interrupts */
3735 3754 Set_OpReg(ehci_interrupt, 0);
3736 3755
3737 3756 /* Wait for few milliseconds */
3738 3757 drv_usecwait(EHCI_SOF_TIMEWAIT);
3739 3758
3740 3759 /* Do light soft reset of ehci host controller */
3741 3760 Set_OpReg(ehci_command,
3742 3761 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3743 3762
3744 3763 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3745 3764 "ehci_do_soft_reset: Reset in progress");
3746 3765
3747 3766 /* Wait for reset to complete */
3748 3767 drv_usecwait(EHCI_RESET_TIMEWAIT);
3749 3768
3750 3769 /*
3751 3770 * Restore previous saved EHCI register value
3752 3771 * into the current EHCI registers.
3753 3772 */
3754 3773 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3755 3774 ehci_save_regs->ehci_ctrl_segment);
3756 3775
3757 3776 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3758 3777 ehci_save_regs->ehci_periodic_list_base);
3759 3778
3760 3779 Set_OpReg(ehci_async_list_addr, (uint32_t)
3761 3780 ehci_save_regs->ehci_async_list_addr);
3762 3781
3763 3782 /*
3764 3783 * For some reason this register might get nulled out by
3765 3784 * the Uli M1575 South Bridge. To workaround the hardware
3766 3785 * problem, check the value after write and retry if the
3767 3786 * last write fails.
3768 3787 */
3769 3788 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3770 3789 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3771 3790 (ehci_save_regs->ehci_async_list_addr !=
3772 3791 Get_OpReg(ehci_async_list_addr))) {
3773 3792 int retry = 0;
3774 3793
3775 3794 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3776 3795 ehci_save_regs->ehci_async_list_addr, retry);
3777 3796 if (retry >= EHCI_MAX_RETRY) {
3778 3797 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3779 3798 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3780 3799 " ASYNCLISTADDR write failed.");
3781 3800
3782 3801 return (USB_FAILURE);
3783 3802 }
3784 3803 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3785 3804 "ehci_do_soft_reset: ASYNCLISTADDR "
3786 3805 "write failed, retry=%d", retry);
3787 3806 }
3788 3807
3789 3808 Set_OpReg(ehci_config_flag, (uint32_t)
3790 3809 ehci_save_regs->ehci_config_flag);
3791 3810
3792 3811 /* Enable both Asynchronous and Periodic Schedule if necessary */
3793 3812 ehci_toggle_scheduler(ehcip);
3794 3813
3795 3814 /*
3796 3815 * Set ehci_interrupt to enable all interrupts except Root
3797 3816 * Hub Status change and frame list rollover interrupts.
3798 3817 */
3799 3818 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3800 3819 EHCI_INTR_FRAME_LIST_ROLLOVER |
3801 3820 EHCI_INTR_USB_ERROR |
3802 3821 EHCI_INTR_USB);
3803 3822
3804 3823 /*
3805 3824 * Deallocate the space that allocated for saving
3806 3825 * HC registers.
3807 3826 */
3808 3827 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
↓ open down ↓ |
2764 lines elided |
↑ open up ↑ |
3809 3828
3810 3829 /*
3811 3830 * Set the desired interrupt threshold, frame list size (if
3812 3831 * applicable) and turn EHCI host controller.
3813 3832 */
3814 3833 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3815 3834 ~EHCI_CMD_INTR_THRESHOLD) |
3816 3835 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3817 3836
3818 3837 /* Wait 10ms for EHCI to start sending SOF */
3819 - drv_usecwait(EHCI_RESET_TIMEWAIT);
3838 + ehci_wait_reset(ehcip, EHCI_RESET_TIMEWAIT);
3820 3839
3821 3840 /*
3822 3841 * Get the current usb frame number before waiting for
3823 3842 * few milliseconds.
3824 3843 */
3825 3844 before_frame_number = ehci_get_current_frame_number(ehcip);
3826 3845
3827 3846 /* Wait for few milliseconds */
3828 3847 drv_usecwait(EHCI_SOF_TIMEWAIT);
3829 3848
3830 3849 /*
3831 3850 * Get the current usb frame number after waiting for
3832 3851 * few milliseconds.
3833 3852 */
3834 3853 after_frame_number = ehci_get_current_frame_number(ehcip);
3835 3854
3836 3855 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3837 3856 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3838 3857 "After Frame Number 0x%llx",
3839 3858 (unsigned long long)before_frame_number,
3840 3859 (unsigned long long)after_frame_number);
3841 3860
3842 3861 if ((after_frame_number <= before_frame_number) &&
3843 3862 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3844 3863
3845 3864 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3846 3865 "ehci_do_soft_reset: Soft reset failed");
3847 3866
3848 3867 return (USB_FAILURE);
3849 3868 }
3850 3869
3851 3870 return (USB_SUCCESS);
3852 3871 }
3853 3872
3854 3873
3855 3874 /*
3856 3875 * ehci_get_xfer_attrs:
3857 3876 *
3858 3877 * Get the attributes of a particular xfer.
3859 3878 *
3860 3879 * NOTE: This function is also called from POLLED MODE.
3861 3880 */
3862 3881 usb_req_attrs_t
3863 3882 ehci_get_xfer_attrs(
3864 3883 ehci_state_t *ehcip,
3865 3884 ehci_pipe_private_t *pp,
3866 3885 ehci_trans_wrapper_t *tw)
3867 3886 {
3868 3887 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3869 3888 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3870 3889
3871 3890 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3872 3891 "ehci_get_xfer_attrs:");
3873 3892
3874 3893 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3875 3894 case USB_EP_ATTR_CONTROL:
3876 3895 attrs = ((usb_ctrl_req_t *)
3877 3896 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3878 3897 break;
3879 3898 case USB_EP_ATTR_BULK:
3880 3899 attrs = ((usb_bulk_req_t *)
3881 3900 tw->tw_curr_xfer_reqp)->bulk_attributes;
3882 3901 break;
3883 3902 case USB_EP_ATTR_INTR:
3884 3903 attrs = ((usb_intr_req_t *)
3885 3904 tw->tw_curr_xfer_reqp)->intr_attributes;
3886 3905 break;
3887 3906 }
3888 3907
3889 3908 return (attrs);
3890 3909 }
3891 3910
3892 3911
3893 3912 /*
3894 3913 * ehci_get_current_frame_number:
3895 3914 *
3896 3915 * Get the current software based usb frame number.
3897 3916 */
3898 3917 usb_frame_number_t
3899 3918 ehci_get_current_frame_number(ehci_state_t *ehcip)
3900 3919 {
3901 3920 usb_frame_number_t usb_frame_number;
3902 3921 usb_frame_number_t ehci_fno, micro_frame_number;
3903 3922
3904 3923 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3905 3924
3906 3925 ehci_fno = ehcip->ehci_fno;
3907 3926 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3908 3927
3909 3928 /*
3910 3929 * Calculate current software based usb frame number.
3911 3930 *
3912 3931 * This code accounts for the fact that frame number is
3913 3932 * updated by the Host Controller before the ehci driver
3914 3933 * gets an FrameListRollover interrupt that will adjust
3915 3934 * Frame higher part.
3916 3935 *
3917 3936 * Refer ehci specification 1.0, section 2.3.2, page 21.
3918 3937 */
3919 3938 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3920 3939 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3921 3940 ehci_fno) & 0x2000);
3922 3941
3923 3942 /*
3924 3943 * Micro Frame number is equivalent to 125 usec. Eight
3925 3944 * Micro Frame numbers are equivalent to one millsecond
3926 3945 * or one usb frame number.
3927 3946 */
3928 3947 usb_frame_number = micro_frame_number >>
3929 3948 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3930 3949
3931 3950 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3932 3951 "ehci_get_current_frame_number: "
3933 3952 "Current usb uframe number = 0x%llx "
3934 3953 "Current usb frame number = 0x%llx",
3935 3954 (unsigned long long)micro_frame_number,
3936 3955 (unsigned long long)usb_frame_number);
3937 3956
3938 3957 return (usb_frame_number);
3939 3958 }
3940 3959
3941 3960
3942 3961 /*
3943 3962 * ehci_cpr_cleanup:
3944 3963 *
3945 3964 * Cleanup ehci state and other ehci specific informations across
3946 3965 * Check Point Resume (CPR).
3947 3966 */
3948 3967 static void
3949 3968 ehci_cpr_cleanup(ehci_state_t *ehcip)
3950 3969 {
3951 3970 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3952 3971
3953 3972 /* Reset software part of usb frame number */
3954 3973 ehcip->ehci_fno = 0;
3955 3974 }
3956 3975
3957 3976
3958 3977 /*
3959 3978 * ehci_wait_for_sof:
3960 3979 *
3961 3980 * Wait for couple of SOF interrupts
3962 3981 */
3963 3982 int
3964 3983 ehci_wait_for_sof(ehci_state_t *ehcip)
3965 3984 {
3966 3985 usb_frame_number_t before_frame_number, after_frame_number;
3967 3986 int error = USB_SUCCESS;
3968 3987
3969 3988 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3970 3989 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3971 3990
3972 3991 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3973 3992
3974 3993 error = ehci_state_is_operational(ehcip);
3975 3994
3976 3995 if (error != USB_SUCCESS) {
3977 3996
3978 3997 return (error);
3979 3998 }
3980 3999
3981 4000 /* Get the current usb frame number before waiting for two SOFs */
3982 4001 before_frame_number = ehci_get_current_frame_number(ehcip);
3983 4002
3984 4003 mutex_exit(&ehcip->ehci_int_mutex);
3985 4004
3986 4005 /* Wait for few milliseconds */
3987 4006 delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
3988 4007
3989 4008 mutex_enter(&ehcip->ehci_int_mutex);
3990 4009
3991 4010 /* Get the current usb frame number after woken up */
3992 4011 after_frame_number = ehci_get_current_frame_number(ehcip);
3993 4012
3994 4013 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3995 4014 "ehci_wait_for_sof: framenumber: before 0x%llx "
3996 4015 "after 0x%llx",
3997 4016 (unsigned long long)before_frame_number,
3998 4017 (unsigned long long)after_frame_number);
3999 4018
4000 4019 /* Return failure, if usb frame number has not been changed */
4001 4020 if (after_frame_number <= before_frame_number) {
4002 4021
4003 4022 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
4004 4023
4005 4024 USB_DPRINTF_L0(PRINT_MASK_LISTS,
4006 4025 ehcip->ehci_log_hdl, "No SOF interrupts");
4007 4026
4008 4027 /* Set host controller soft state to error */
4009 4028 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
4010 4029
4011 4030 return (USB_FAILURE);
4012 4031 }
4013 4032
4014 4033 }
4015 4034
4016 4035 return (USB_SUCCESS);
4017 4036 }
4018 4037
4019 4038 /*
4020 4039 * Toggle the async/periodic schedule based on opened pipe count.
4021 4040 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily
4022 4041 * disabled. But the TW on the pipe is not freed. In this case, we need
4023 4042 * to disable async/periodic schedule for some non-compatible hardware.
4024 4043 * Otherwise, the hardware will overwrite software's configuration of
4025 4044 * the QH.
4026 4045 */
4027 4046 void
4028 4047 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip)
4029 4048 {
4030 4049 uint_t temp_reg, cmd_reg;
4031 4050
4032 4051 cmd_reg = Get_OpReg(ehci_command);
4033 4052 temp_reg = cmd_reg;
4034 4053
4035 4054 /*
4036 4055 * Enable/Disable asynchronous scheduler, and
4037 4056 * turn on/off async list door bell
4038 4057 */
4039 4058 if (ehcip->ehci_open_async_count) {
4040 4059 if ((ehcip->ehci_async_req_count > 0) &&
4041 4060 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) {
4042 4061 /*
4043 4062 * For some reason this address might get nulled out by
4044 4063 * the ehci chip. Set it here just in case it is null.
4045 4064 */
4046 4065 Set_OpReg(ehci_async_list_addr,
4047 4066 ehci_qh_cpu_to_iommu(ehcip,
4048 4067 ehcip->ehci_head_of_async_sched_list));
4049 4068
4050 4069 /*
4051 4070 * For some reason this register might get nulled out by
4052 4071 * the Uli M1575 Southbridge. To workaround the HW
4053 4072 * problem, check the value after write and retry if the
4054 4073 * last write fails.
4055 4074 *
4056 4075 * If the ASYNCLISTADDR remains "stuck" after
4057 4076 * EHCI_MAX_RETRY retries, then the M1575 is broken
4058 4077 * and is stuck in an inconsistent state and is about
4059 4078 * to crash the machine with a trn_oor panic when it
4060 4079 * does a DMA read from 0x0. It is better to panic
4061 4080 * now rather than wait for the trn_oor crash; this
4062 4081 * way Customer Service will have a clean signature
4063 4082 * that indicts the M1575 chip rather than a
4064 4083 * mysterious and hard-to-diagnose trn_oor panic.
4065 4084 */
4066 4085 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4067 4086 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4068 4087 (ehci_qh_cpu_to_iommu(ehcip,
4069 4088 ehcip->ehci_head_of_async_sched_list) !=
4070 4089 Get_OpReg(ehci_async_list_addr))) {
4071 4090 int retry = 0;
4072 4091
4073 4092 Set_OpRegRetry(ehci_async_list_addr,
4074 4093 ehci_qh_cpu_to_iommu(ehcip,
4075 4094 ehcip->ehci_head_of_async_sched_list),
4076 4095 retry);
4077 4096 if (retry >= EHCI_MAX_RETRY)
4078 4097 cmn_err(CE_PANIC,
4079 4098 "ehci_toggle_scheduler_on_pipe: "
4080 4099 "ASYNCLISTADDR write failed.");
4081 4100
4082 4101 USB_DPRINTF_L2(PRINT_MASK_ATTA,
4083 4102 ehcip->ehci_log_hdl,
4084 4103 "ehci_toggle_scheduler_on_pipe:"
4085 4104 " ASYNCLISTADDR write failed, retry=%d",
4086 4105 retry);
4087 4106 }
4088 4107
4089 4108 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4090 4109 }
4091 4110 } else {
4092 4111 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4093 4112 }
4094 4113
4095 4114 if (ehcip->ehci_open_periodic_count) {
4096 4115 if ((ehcip->ehci_periodic_req_count > 0) &&
4097 4116 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) {
4098 4117 /*
4099 4118 * For some reason this address get's nulled out by
4100 4119 * the ehci chip. Set it here just in case it is null.
4101 4120 */
4102 4121 Set_OpReg(ehci_periodic_list_base,
4103 4122 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4104 4123 0xFFFFF000));
4105 4124 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4106 4125 }
4107 4126 } else {
4108 4127 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4109 4128 }
4110 4129
4111 4130 /* Just an optimization */
4112 4131 if (temp_reg != cmd_reg) {
4113 4132 Set_OpReg(ehci_command, cmd_reg);
4114 4133 }
4115 4134 }
4116 4135
4117 4136
4118 4137 /*
4119 4138 * ehci_toggle_scheduler:
4120 4139 *
4121 4140 * Turn scheduler based on pipe open count.
4122 4141 */
4123 4142 void
4124 4143 ehci_toggle_scheduler(ehci_state_t *ehcip)
4125 4144 {
4126 4145 uint_t temp_reg, cmd_reg;
4127 4146
4128 4147 /*
4129 4148 * For performance optimization, we need to change the bits
4130 4149 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0)
4131 4150 *
4132 4151 * Related bits already enabled if
4133 4152 * async and periodic req counts are > 1
4134 4153 * OR async req count > 1 & no periodic pipe
4135 4154 * OR periodic req count > 1 & no async pipe
4136 4155 */
4137 4156 if (((ehcip->ehci_async_req_count > 1) &&
4138 4157 (ehcip->ehci_periodic_req_count > 1)) ||
4139 4158 ((ehcip->ehci_async_req_count > 1) &&
4140 4159 (ehcip->ehci_open_periodic_count == 0)) ||
4141 4160 ((ehcip->ehci_periodic_req_count > 1) &&
4142 4161 (ehcip->ehci_open_async_count == 0))) {
4143 4162 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4144 4163 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4145 4164 "async/periodic bits no need to change");
4146 4165
4147 4166 return;
4148 4167 }
4149 4168
4150 4169 cmd_reg = Get_OpReg(ehci_command);
4151 4170 temp_reg = cmd_reg;
4152 4171
4153 4172 /*
4154 4173 * Enable/Disable asynchronous scheduler, and
4155 4174 * turn on/off async list door bell
4156 4175 */
4157 4176 if (ehcip->ehci_async_req_count > 1) {
4158 4177 /* we already enable the async bit */
4159 4178 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4160 4179 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4161 4180 "async bit already enabled: cmd_reg=0x%x", cmd_reg);
4162 4181 } else if (ehcip->ehci_async_req_count == 1) {
4163 4182 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4164 4183 /*
4165 4184 * For some reason this address might get nulled out by
4166 4185 * the ehci chip. Set it here just in case it is null.
4167 4186 * If it's not null, we should not reset the
4168 4187 * ASYNCLISTADDR, because it's updated by hardware to
4169 4188 * point to the next queue head to be executed.
4170 4189 */
4171 4190 if (!Get_OpReg(ehci_async_list_addr)) {
4172 4191 Set_OpReg(ehci_async_list_addr,
4173 4192 ehci_qh_cpu_to_iommu(ehcip,
4174 4193 ehcip->ehci_head_of_async_sched_list));
4175 4194 }
4176 4195
4177 4196 /*
4178 4197 * For some reason this register might get nulled out by
4179 4198 * the Uli M1575 Southbridge. To workaround the HW
4180 4199 * problem, check the value after write and retry if the
4181 4200 * last write fails.
4182 4201 *
4183 4202 * If the ASYNCLISTADDR remains "stuck" after
4184 4203 * EHCI_MAX_RETRY retries, then the M1575 is broken
4185 4204 * and is stuck in an inconsistent state and is about
4186 4205 * to crash the machine with a trn_oor panic when it
4187 4206 * does a DMA read from 0x0. It is better to panic
4188 4207 * now rather than wait for the trn_oor crash; this
4189 4208 * way Customer Service will have a clean signature
4190 4209 * that indicts the M1575 chip rather than a
4191 4210 * mysterious and hard-to-diagnose trn_oor panic.
4192 4211 */
4193 4212 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4194 4213 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4195 4214 (ehci_qh_cpu_to_iommu(ehcip,
4196 4215 ehcip->ehci_head_of_async_sched_list) !=
4197 4216 Get_OpReg(ehci_async_list_addr))) {
4198 4217 int retry = 0;
4199 4218
4200 4219 Set_OpRegRetry(ehci_async_list_addr,
4201 4220 ehci_qh_cpu_to_iommu(ehcip,
4202 4221 ehcip->ehci_head_of_async_sched_list),
4203 4222 retry);
4204 4223 if (retry >= EHCI_MAX_RETRY)
4205 4224 cmn_err(CE_PANIC,
4206 4225 "ehci_toggle_scheduler: "
4207 4226 "ASYNCLISTADDR write failed.");
4208 4227
4209 4228 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4210 4229 ehcip->ehci_log_hdl,
4211 4230 "ehci_toggle_scheduler: ASYNCLISTADDR "
4212 4231 "write failed, retry=%d", retry);
4213 4232 }
4214 4233 }
4215 4234 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4216 4235 } else {
4217 4236 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4218 4237 }
4219 4238
4220 4239 if (ehcip->ehci_periodic_req_count > 1) {
4221 4240 /* we already enable the periodic bit. */
4222 4241 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4223 4242 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4224 4243 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg);
4225 4244 } else if (ehcip->ehci_periodic_req_count == 1) {
4226 4245 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4227 4246 /*
4228 4247 * For some reason this address get's nulled out by
4229 4248 * the ehci chip. Set it here just in case it is null.
4230 4249 */
4231 4250 Set_OpReg(ehci_periodic_list_base,
4232 4251 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4233 4252 0xFFFFF000));
4234 4253 }
4235 4254 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4236 4255 } else {
4237 4256 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4238 4257 }
4239 4258
4240 4259 /* Just an optimization */
4241 4260 if (temp_reg != cmd_reg) {
4242 4261 Set_OpReg(ehci_command, cmd_reg);
4243 4262
4244 4263 /* To make sure the command register is updated correctly */
4245 4264 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4246 4265 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
4247 4266 int retry = 0;
4248 4267
4249 4268 Set_OpRegRetry(ehci_command, cmd_reg, retry);
4250 4269 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4251 4270 ehcip->ehci_log_hdl,
4252 4271 "ehci_toggle_scheduler: CMD write failed, retry=%d",
4253 4272 retry);
4254 4273 }
4255 4274
4256 4275 }
4257 4276 }
4258 4277
4259 4278 /*
4260 4279 * ehci print functions
4261 4280 */
4262 4281
4263 4282 /*
4264 4283 * ehci_print_caps:
4265 4284 */
4266 4285 void
4267 4286 ehci_print_caps(ehci_state_t *ehcip)
4268 4287 {
4269 4288 uint_t i;
4270 4289
4271 4290 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4272 4291 "\n\tUSB 2.0 Host Controller Characteristics\n");
4273 4292
4274 4293 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4275 4294 "Caps Length: 0x%x Version: 0x%x\n",
4276 4295 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4277 4296
4278 4297 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4279 4298 "Structural Parameters\n");
4280 4299 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4281 4300 "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4282 4301 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4283 4302 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4284 4303 "No of Classic host controllers: 0x%x",
4285 4304 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4286 4305 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4287 4306 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4288 4307 "No of ports per Classic host controller: 0x%x",
4289 4308 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4290 4309 >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4291 4310 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4292 4311 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4293 4312 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4294 4313 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4295 4314 "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4296 4315 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4297 4316 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4298 4317 "No of root hub ports: 0x%x\n",
4299 4318 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4300 4319
4301 4320 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4302 4321 "Capability Parameters\n");
4303 4322 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4304 4323 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4305 4324 EHCI_HCC_EECP) ? "Yes" : "No");
4306 4325 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4307 4326 "Isoch schedule threshold: 0x%x",
4308 4327 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4309 4328 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4310 4329 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4311 4330 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4312 4331 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4313 4332 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4314 4333 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4315 4334 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4316 4335 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4317 4336 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4318 4337
4319 4338 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4320 4339 "Classic Port Route Description");
4321 4340
4322 4341 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4323 4342 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4324 4343 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4325 4344 }
4326 4345 }
4327 4346
4328 4347
4329 4348 /*
4330 4349 * ehci_print_regs:
4331 4350 */
4332 4351 void
4333 4352 ehci_print_regs(ehci_state_t *ehcip)
4334 4353 {
4335 4354 uint_t i;
4336 4355
4337 4356 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4338 4357 "\n\tEHCI%d Operational Registers\n",
4339 4358 ddi_get_instance(ehcip->ehci_dip));
4340 4359
4341 4360 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4342 4361 "Command: 0x%x Status: 0x%x",
4343 4362 Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4344 4363 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4345 4364 "Interrupt: 0x%x Frame Index: 0x%x",
4346 4365 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4347 4366 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4348 4367 "Control Segment: 0x%x Periodic List Base: 0x%x",
4349 4368 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4350 4369 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4351 4370 "Async List Addr: 0x%x Config Flag: 0x%x",
4352 4371 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4353 4372
4354 4373 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4355 4374 "Root Hub Port Status");
4356 4375
4357 4376 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4358 4377 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4359 4378 "\tPort Status 0x%x: 0x%x ", i,
4360 4379 Get_OpReg(ehci_rh_port_status[i]));
4361 4380 }
4362 4381 }
4363 4382
4364 4383
4365 4384 /*
4366 4385 * ehci_print_qh:
4367 4386 */
4368 4387 void
4369 4388 ehci_print_qh(
4370 4389 ehci_state_t *ehcip,
4371 4390 ehci_qh_t *qh)
4372 4391 {
4373 4392 uint_t i;
4374 4393
4375 4394 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4376 4395 "ehci_print_qh: qh = 0x%p", (void *)qh);
4377 4396
4378 4397 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4379 4398 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4380 4399 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4381 4400 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4382 4401 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4383 4402 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4384 4403 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4385 4404 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4386 4405 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4387 4406 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4388 4407 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4389 4408 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4390 4409 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4391 4410 "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4392 4411
4393 4412 for (i = 0; i < 5; i++) {
4394 4413 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4395 4414 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4396 4415 }
4397 4416
4398 4417 for (i = 0; i < 5; i++) {
4399 4418 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4400 4419 "\tqh_buf_high[%d]: 0x%x ",
4401 4420 i, Get_QH(qh->qh_buf_high[i]));
4402 4421 }
4403 4422
4404 4423 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4405 4424 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4406 4425 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4407 4426 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4408 4427 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4409 4428 "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4410 4429 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4411 4430 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4412 4431 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4413 4432 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4414 4433 }
4415 4434
4416 4435
4417 4436 /*
4418 4437 * ehci_print_qtd:
4419 4438 */
4420 4439 void
4421 4440 ehci_print_qtd(
4422 4441 ehci_state_t *ehcip,
4423 4442 ehci_qtd_t *qtd)
4424 4443 {
4425 4444 uint_t i;
4426 4445
4427 4446 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4428 4447 "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4429 4448
4430 4449 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4431 4450 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4432 4451 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4433 4452 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4434 4453 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4435 4454 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4436 4455
4437 4456 for (i = 0; i < 5; i++) {
4438 4457 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4439 4458 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4440 4459 }
4441 4460
4442 4461 for (i = 0; i < 5; i++) {
4443 4462 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4444 4463 "\tqtd_buf_high[%d]: 0x%x ",
4445 4464 i, Get_QTD(qtd->qtd_buf_high[i]));
4446 4465 }
4447 4466
4448 4467 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4449 4468 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4450 4469 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4451 4470 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4452 4471 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4453 4472 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4454 4473 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4455 4474 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4456 4475 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4457 4476 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4458 4477 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4459 4478 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4460 4479 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4461 4480 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4462 4481 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4463 4482 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4464 4483 }
4465 4484
4466 4485 /*
4467 4486 * ehci kstat functions
4468 4487 */
4469 4488
4470 4489 /*
4471 4490 * ehci_create_stats:
4472 4491 *
4473 4492 * Allocate and initialize the ehci kstat structures
4474 4493 */
4475 4494 void
4476 4495 ehci_create_stats(ehci_state_t *ehcip)
4477 4496 {
4478 4497 char kstatname[KSTAT_STRLEN];
4479 4498 const char *dname = ddi_driver_name(ehcip->ehci_dip);
4480 4499 char *usbtypes[USB_N_COUNT_KSTATS] =
4481 4500 {"ctrl", "isoch", "bulk", "intr"};
4482 4501 uint_t instance = ehcip->ehci_instance;
4483 4502 ehci_intrs_stats_t *isp;
4484 4503 int i;
4485 4504
4486 4505 if (EHCI_INTRS_STATS(ehcip) == NULL) {
4487 4506 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4488 4507 dname, instance);
4489 4508 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4490 4509 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4491 4510 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4492 4511 KSTAT_FLAG_PERSISTENT);
4493 4512
4494 4513 if (EHCI_INTRS_STATS(ehcip)) {
4495 4514 isp = EHCI_INTRS_STATS_DATA(ehcip);
4496 4515 kstat_named_init(&isp->ehci_sts_total,
4497 4516 "Interrupts Total", KSTAT_DATA_UINT64);
4498 4517 kstat_named_init(&isp->ehci_sts_not_claimed,
4499 4518 "Not Claimed", KSTAT_DATA_UINT64);
4500 4519 kstat_named_init(&isp->ehci_sts_async_sched_status,
4501 4520 "Async schedule status", KSTAT_DATA_UINT64);
4502 4521 kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4503 4522 "Periodic sched status", KSTAT_DATA_UINT64);
4504 4523 kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4505 4524 "Empty async schedule", KSTAT_DATA_UINT64);
4506 4525 kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4507 4526 "Host controller Halted", KSTAT_DATA_UINT64);
4508 4527 kstat_named_init(&isp->ehci_sts_async_advance_intr,
4509 4528 "Intr on async advance", KSTAT_DATA_UINT64);
4510 4529 kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4511 4530 "Host system error", KSTAT_DATA_UINT64);
4512 4531 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4513 4532 "Frame list rollover", KSTAT_DATA_UINT64);
4514 4533 kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4515 4534 "Port change detect", KSTAT_DATA_UINT64);
4516 4535 kstat_named_init(&isp->ehci_sts_usb_error_intr,
4517 4536 "USB error interrupt", KSTAT_DATA_UINT64);
4518 4537 kstat_named_init(&isp->ehci_sts_usb_intr,
4519 4538 "USB interrupt", KSTAT_DATA_UINT64);
4520 4539
4521 4540 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4522 4541 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4523 4542 kstat_install(EHCI_INTRS_STATS(ehcip));
4524 4543 }
4525 4544 }
4526 4545
4527 4546 if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4528 4547 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4529 4548 dname, instance);
4530 4549 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4531 4550 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4532 4551 KSTAT_FLAG_PERSISTENT);
4533 4552
4534 4553 if (EHCI_TOTAL_STATS(ehcip)) {
4535 4554 kstat_install(EHCI_TOTAL_STATS(ehcip));
4536 4555 }
4537 4556 }
4538 4557
4539 4558 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4540 4559 if (ehcip->ehci_count_stats[i] == NULL) {
4541 4560 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4542 4561 dname, instance, usbtypes[i]);
4543 4562 ehcip->ehci_count_stats[i] = kstat_create("usba",
4544 4563 instance, kstatname, "usb_byte_count",
4545 4564 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4546 4565
4547 4566 if (ehcip->ehci_count_stats[i]) {
4548 4567 kstat_install(ehcip->ehci_count_stats[i]);
4549 4568 }
4550 4569 }
4551 4570 }
4552 4571 }
4553 4572
4554 4573
4555 4574 /*
4556 4575 * ehci_destroy_stats:
4557 4576 *
4558 4577 * Clean up ehci kstat structures
4559 4578 */
4560 4579 void
4561 4580 ehci_destroy_stats(ehci_state_t *ehcip)
4562 4581 {
4563 4582 int i;
4564 4583
4565 4584 if (EHCI_INTRS_STATS(ehcip)) {
4566 4585 kstat_delete(EHCI_INTRS_STATS(ehcip));
4567 4586 EHCI_INTRS_STATS(ehcip) = NULL;
4568 4587 }
4569 4588
4570 4589 if (EHCI_TOTAL_STATS(ehcip)) {
4571 4590 kstat_delete(EHCI_TOTAL_STATS(ehcip));
4572 4591 EHCI_TOTAL_STATS(ehcip) = NULL;
4573 4592 }
4574 4593
4575 4594 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4576 4595 if (ehcip->ehci_count_stats[i]) {
4577 4596 kstat_delete(ehcip->ehci_count_stats[i]);
4578 4597 ehcip->ehci_count_stats[i] = NULL;
4579 4598 }
4580 4599 }
4581 4600 }
4582 4601
4583 4602
4584 4603 /*
4585 4604 * ehci_do_intrs_stats:
4586 4605 *
4587 4606 * ehci status information
4588 4607 */
4589 4608 void
4590 4609 ehci_do_intrs_stats(
4591 4610 ehci_state_t *ehcip,
4592 4611 int val)
4593 4612 {
4594 4613 if (EHCI_INTRS_STATS(ehcip)) {
4595 4614 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4596 4615 switch (val) {
4597 4616 case EHCI_STS_ASYNC_SCHED_STATUS:
4598 4617 EHCI_INTRS_STATS_DATA(ehcip)->
4599 4618 ehci_sts_async_sched_status.value.ui64++;
4600 4619 break;
4601 4620 case EHCI_STS_PERIODIC_SCHED_STATUS:
4602 4621 EHCI_INTRS_STATS_DATA(ehcip)->
4603 4622 ehci_sts_periodic_sched_status.value.ui64++;
4604 4623 break;
4605 4624 case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4606 4625 EHCI_INTRS_STATS_DATA(ehcip)->
4607 4626 ehci_sts_empty_async_schedule.value.ui64++;
4608 4627 break;
4609 4628 case EHCI_STS_HOST_CTRL_HALTED:
4610 4629 EHCI_INTRS_STATS_DATA(ehcip)->
4611 4630 ehci_sts_host_ctrl_halted.value.ui64++;
4612 4631 break;
4613 4632 case EHCI_STS_ASYNC_ADVANCE_INTR:
4614 4633 EHCI_INTRS_STATS_DATA(ehcip)->
4615 4634 ehci_sts_async_advance_intr.value.ui64++;
4616 4635 break;
4617 4636 case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4618 4637 EHCI_INTRS_STATS_DATA(ehcip)->
4619 4638 ehci_sts_host_system_error_intr.value.ui64++;
4620 4639 break;
4621 4640 case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4622 4641 EHCI_INTRS_STATS_DATA(ehcip)->
4623 4642 ehci_sts_frm_list_rollover_intr.value.ui64++;
4624 4643 break;
4625 4644 case EHCI_STS_RH_PORT_CHANGE_INTR:
4626 4645 EHCI_INTRS_STATS_DATA(ehcip)->
4627 4646 ehci_sts_rh_port_change_intr.value.ui64++;
4628 4647 break;
4629 4648 case EHCI_STS_USB_ERROR_INTR:
4630 4649 EHCI_INTRS_STATS_DATA(ehcip)->
4631 4650 ehci_sts_usb_error_intr.value.ui64++;
4632 4651 break;
4633 4652 case EHCI_STS_USB_INTR:
4634 4653 EHCI_INTRS_STATS_DATA(ehcip)->
4635 4654 ehci_sts_usb_intr.value.ui64++;
4636 4655 break;
4637 4656 default:
4638 4657 EHCI_INTRS_STATS_DATA(ehcip)->
4639 4658 ehci_sts_not_claimed.value.ui64++;
4640 4659 break;
4641 4660 }
4642 4661 }
4643 4662 }
4644 4663
4645 4664
4646 4665 /*
4647 4666 * ehci_do_byte_stats:
4648 4667 *
4649 4668 * ehci data xfer information
4650 4669 */
4651 4670 void
4652 4671 ehci_do_byte_stats(
4653 4672 ehci_state_t *ehcip,
4654 4673 size_t len,
4655 4674 uint8_t attr,
4656 4675 uint8_t addr)
4657 4676 {
4658 4677 uint8_t type = attr & USB_EP_ATTR_MASK;
4659 4678 uint8_t dir = addr & USB_EP_DIR_MASK;
4660 4679
4661 4680 if (dir == USB_EP_DIR_IN) {
4662 4681 EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4663 4682 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4664 4683 switch (type) {
4665 4684 case USB_EP_ATTR_CONTROL:
4666 4685 EHCI_CTRL_STATS(ehcip)->reads++;
4667 4686 EHCI_CTRL_STATS(ehcip)->nread += len;
4668 4687 break;
4669 4688 case USB_EP_ATTR_BULK:
4670 4689 EHCI_BULK_STATS(ehcip)->reads++;
4671 4690 EHCI_BULK_STATS(ehcip)->nread += len;
4672 4691 break;
4673 4692 case USB_EP_ATTR_INTR:
4674 4693 EHCI_INTR_STATS(ehcip)->reads++;
4675 4694 EHCI_INTR_STATS(ehcip)->nread += len;
4676 4695 break;
4677 4696 case USB_EP_ATTR_ISOCH:
4678 4697 EHCI_ISOC_STATS(ehcip)->reads++;
4679 4698 EHCI_ISOC_STATS(ehcip)->nread += len;
4680 4699 break;
4681 4700 }
4682 4701 } else if (dir == USB_EP_DIR_OUT) {
4683 4702 EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4684 4703 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4685 4704 switch (type) {
4686 4705 case USB_EP_ATTR_CONTROL:
4687 4706 EHCI_CTRL_STATS(ehcip)->writes++;
4688 4707 EHCI_CTRL_STATS(ehcip)->nwritten += len;
4689 4708 break;
4690 4709 case USB_EP_ATTR_BULK:
4691 4710 EHCI_BULK_STATS(ehcip)->writes++;
4692 4711 EHCI_BULK_STATS(ehcip)->nwritten += len;
4693 4712 break;
4694 4713 case USB_EP_ATTR_INTR:
4695 4714 EHCI_INTR_STATS(ehcip)->writes++;
4696 4715 EHCI_INTR_STATS(ehcip)->nwritten += len;
4697 4716 break;
4698 4717 case USB_EP_ATTR_ISOCH:
4699 4718 EHCI_ISOC_STATS(ehcip)->writes++;
4700 4719 EHCI_ISOC_STATS(ehcip)->nwritten += len;
4701 4720 break;
4702 4721 }
4703 4722 }
4704 4723 }
↓ open down ↓ |
875 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX