Customize reset for HPE iLO
1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright (c) 2018, Joyent, Inc.
25 */
26
27 /*
28 * EHCI Host Controller Driver (EHCI)
29 *
30 * The EHCI driver is a software driver which interfaces to the Universal
31 * Serial Bus layer (USBA) and the Host Controller (HC). The interface to
32 * the Host Controller is defined by the EHCI Host Controller Interface.
33 *
34 * This module contains the main EHCI driver code which handles all USB
35 * transfers, bandwidth allocations and other general functionalities.
36 */
37
38 #include <sys/usb/hcd/ehci/ehcid.h>
39 #include <sys/usb/hcd/ehci/ehci_isoch.h>
40 #include <sys/usb/hcd/ehci/ehci_xfer.h>
41
42 /*
43 * EHCI MSI tunable:
44 *
45 * By default MSI is enabled on all supported platforms except for the
46 * EHCI controller of ULI1575 South bridge.
47 */
48 boolean_t ehci_enable_msi = B_TRUE;
49
50 /* Pointer to the state structure */
51 extern void *ehci_statep;
52
53 extern void ehci_handle_endpoint_reclaimation(ehci_state_t *);
54
55 extern uint_t ehci_vt62x2_workaround;
56 extern int force_ehci_off;
57
58 /* Adjustable variables for the size of the pools */
59 int ehci_qh_pool_size = EHCI_QH_POOL_SIZE;
60 int ehci_qtd_pool_size = EHCI_QTD_POOL_SIZE;
61
62 /*
63 * Initialize the values which the order of 32ms intr qh are executed
64 * by the host controller in the lattice tree.
65 */
66 static uchar_t ehci_index[EHCI_NUM_INTR_QH_LISTS] =
67 {0x00, 0x10, 0x08, 0x18,
68 0x04, 0x14, 0x0c, 0x1c,
69 0x02, 0x12, 0x0a, 0x1a,
70 0x06, 0x16, 0x0e, 0x1e,
71 0x01, 0x11, 0x09, 0x19,
72 0x05, 0x15, 0x0d, 0x1d,
73 0x03, 0x13, 0x0b, 0x1b,
74 0x07, 0x17, 0x0f, 0x1f};
75
76 /*
77 * Initialize the values which are used to calculate start split mask
78 * for the low/full/high speed interrupt and isochronous endpoints.
79 */
80 static uint_t ehci_start_split_mask[15] = {
81 /*
82 * For high/full/low speed usb devices. For high speed
83 * device with polling interval greater than or equal
84 * to 8us (125us).
85 */
86 0x01, /* 00000001 */
87 0x02, /* 00000010 */
88 0x04, /* 00000100 */
89 0x08, /* 00001000 */
90 0x10, /* 00010000 */
91 0x20, /* 00100000 */
92 0x40, /* 01000000 */
93 0x80, /* 10000000 */
94
95 /* Only for high speed devices with polling interval 4us */
96 0x11, /* 00010001 */
97 0x22, /* 00100010 */
98 0x44, /* 01000100 */
99 0x88, /* 10001000 */
100
101 /* Only for high speed devices with polling interval 2us */
102 0x55, /* 01010101 */
103 0xaa, /* 10101010 */
104
105 /* Only for high speed devices with polling interval 1us */
106 0xff /* 11111111 */
107 };
108
109 /*
110 * Initialize the values which are used to calculate complete split mask
111 * for the low/full speed interrupt and isochronous endpoints.
112 */
113 static uint_t ehci_intr_complete_split_mask[7] = {
114 /* Only full/low speed devices */
115 0x1c, /* 00011100 */
116 0x38, /* 00111000 */
117 0x70, /* 01110000 */
118 0xe0, /* 11100000 */
119 0x00, /* Need FSTN feature */
120 0x00, /* Need FSTN feature */
121 0x00 /* Need FSTN feature */
122 };
123
124
125 /*
126 * EHCI Internal Function Prototypes
127 */
128
129 /* Host Controller Driver (HCD) initialization functions */
130 void ehci_set_dma_attributes(ehci_state_t *ehcip);
131 int ehci_allocate_pools(ehci_state_t *ehcip);
132 void ehci_decode_ddi_dma_addr_bind_handle_result(
133 ehci_state_t *ehcip,
134 int result);
135 int ehci_map_regs(ehci_state_t *ehcip);
136 int ehci_register_intrs_and_init_mutex(
137 ehci_state_t *ehcip);
138 static int ehci_add_intrs(ehci_state_t *ehcip,
139 int intr_type);
140 int ehci_init_ctlr(ehci_state_t *ehcip,
141 int init_type);
142 static int ehci_take_control(ehci_state_t *ehcip);
143 static int ehci_init_periodic_frame_lst_table(
144 ehci_state_t *ehcip);
145 static void ehci_build_interrupt_lattice(
146 ehci_state_t *ehcip);
147 usba_hcdi_ops_t *ehci_alloc_hcdi_ops(ehci_state_t *ehcip);
148
149 /* Host Controller Driver (HCD) deinitialization functions */
150 int ehci_cleanup(ehci_state_t *ehcip);
151 static void ehci_rem_intrs(ehci_state_t *ehcip);
152 int ehci_cpr_suspend(ehci_state_t *ehcip);
153 int ehci_cpr_resume(ehci_state_t *ehcip);
154
155 /* Bandwidth Allocation functions */
156 int ehci_allocate_bandwidth(ehci_state_t *ehcip,
157 usba_pipe_handle_data_t *ph,
158 uint_t *pnode,
159 uchar_t *smask,
160 uchar_t *cmask);
161 static int ehci_allocate_high_speed_bandwidth(
162 ehci_state_t *ehcip,
163 usba_pipe_handle_data_t *ph,
164 uint_t *hnode,
165 uchar_t *smask,
166 uchar_t *cmask);
167 static int ehci_allocate_classic_tt_bandwidth(
168 ehci_state_t *ehcip,
169 usba_pipe_handle_data_t *ph,
170 uint_t pnode);
171 void ehci_deallocate_bandwidth(ehci_state_t *ehcip,
172 usba_pipe_handle_data_t *ph,
173 uint_t pnode,
174 uchar_t smask,
175 uchar_t cmask);
176 static void ehci_deallocate_high_speed_bandwidth(
177 ehci_state_t *ehcip,
178 usba_pipe_handle_data_t *ph,
179 uint_t hnode,
180 uchar_t smask,
181 uchar_t cmask);
182 static void ehci_deallocate_classic_tt_bandwidth(
183 ehci_state_t *ehcip,
184 usba_pipe_handle_data_t *ph,
185 uint_t pnode);
186 static int ehci_compute_high_speed_bandwidth(
187 ehci_state_t *ehcip,
188 usb_ep_descr_t *endpoint,
189 usb_port_status_t port_status,
190 uint_t *sbandwidth,
191 uint_t *cbandwidth);
192 static int ehci_compute_classic_bandwidth(
193 usb_ep_descr_t *endpoint,
194 usb_port_status_t port_status,
195 uint_t *bandwidth);
196 int ehci_adjust_polling_interval(
197 ehci_state_t *ehcip,
198 usb_ep_descr_t *endpoint,
199 usb_port_status_t port_status);
200 static int ehci_adjust_high_speed_polling_interval(
201 ehci_state_t *ehcip,
202 usb_ep_descr_t *endpoint);
203 static uint_t ehci_lattice_height(uint_t interval);
204 static uint_t ehci_lattice_parent(uint_t node);
205 static uint_t ehci_find_periodic_node(
206 uint_t leaf,
207 int interval);
208 static uint_t ehci_leftmost_leaf(uint_t node,
209 uint_t height);
210 static uint_t ehci_pow_2(uint_t x);
211 static uint_t ehci_log_2(uint_t x);
212 static int ehci_find_bestfit_hs_mask(
213 ehci_state_t *ehcip,
214 uchar_t *smask,
215 uint_t *pnode,
216 usb_ep_descr_t *endpoint,
217 uint_t bandwidth,
218 int interval);
219 static int ehci_find_bestfit_ls_intr_mask(
220 ehci_state_t *ehcip,
221 uchar_t *smask,
222 uchar_t *cmask,
223 uint_t *pnode,
224 uint_t sbandwidth,
225 uint_t cbandwidth,
226 int interval);
227 static int ehci_find_bestfit_sitd_in_mask(
228 ehci_state_t *ehcip,
229 uchar_t *smask,
230 uchar_t *cmask,
231 uint_t *pnode,
232 uint_t sbandwidth,
233 uint_t cbandwidth,
234 int interval);
235 static int ehci_find_bestfit_sitd_out_mask(
236 ehci_state_t *ehcip,
237 uchar_t *smask,
238 uint_t *pnode,
239 uint_t sbandwidth,
240 int interval);
241 static uint_t ehci_calculate_bw_availability_mask(
242 ehci_state_t *ehcip,
243 uint_t bandwidth,
244 int leaf,
245 int leaf_count,
246 uchar_t *bw_mask);
247 static void ehci_update_bw_availability(
248 ehci_state_t *ehcip,
249 int bandwidth,
250 int leftmost_leaf,
251 int leaf_count,
252 uchar_t mask);
253
254 /* Miscellaneous functions */
255 ehci_state_t *ehci_obtain_state(
256 dev_info_t *dip);
257 int ehci_state_is_operational(
258 ehci_state_t *ehcip);
259 int ehci_do_soft_reset(
260 ehci_state_t *ehcip);
261 usb_req_attrs_t ehci_get_xfer_attrs(ehci_state_t *ehcip,
262 ehci_pipe_private_t *pp,
263 ehci_trans_wrapper_t *tw);
264 usb_frame_number_t ehci_get_current_frame_number(
265 ehci_state_t *ehcip);
266 static void ehci_cpr_cleanup(
267 ehci_state_t *ehcip);
268 int ehci_wait_for_sof(
269 ehci_state_t *ehcip);
270 void ehci_toggle_scheduler(
271 ehci_state_t *ehcip);
272 void ehci_print_caps(ehci_state_t *ehcip);
273 void ehci_print_regs(ehci_state_t *ehcip);
274 void ehci_print_qh(ehci_state_t *ehcip,
275 ehci_qh_t *qh);
276 void ehci_print_qtd(ehci_state_t *ehcip,
277 ehci_qtd_t *qtd);
278 void ehci_create_stats(ehci_state_t *ehcip);
279 void ehci_destroy_stats(ehci_state_t *ehcip);
280 void ehci_do_intrs_stats(ehci_state_t *ehcip,
281 int val);
282 void ehci_do_byte_stats(ehci_state_t *ehcip,
283 size_t len,
284 uint8_t attr,
285 uint8_t addr);
286
287 /*
288 * check if this ehci controller can support PM
289 */
290 int
291 ehci_hcdi_pm_support(dev_info_t *dip)
292 {
293 ehci_state_t *ehcip = ddi_get_soft_state(ehci_statep,
294 ddi_get_instance(dip));
295
296 if (((ehcip->ehci_vendor_id == PCI_VENDOR_NEC_COMBO) &&
297 (ehcip->ehci_device_id == PCI_DEVICE_NEC_COMBO)) ||
298
299 ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
300 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) ||
301
302 (ehcip->ehci_vendor_id == PCI_VENDOR_VIA)) {
303
304 return (USB_SUCCESS);
305 }
306
307 return (USB_FAILURE);
308 }
309
310 void
311 ehci_dma_attr_workaround(ehci_state_t *ehcip)
312 {
313 /*
314 * Some Nvidia chips can not handle qh dma address above 2G.
315 * The bit 31 of the dma address might be omitted and it will
316 * cause system crash or other unpredicable result. So force
317 * the dma address allocated below 2G to make ehci work.
318 */
319 if (PCI_VENDOR_NVIDIA == ehcip->ehci_vendor_id) {
320 switch (ehcip->ehci_device_id) {
321 case PCI_DEVICE_NVIDIA_CK804:
322 case PCI_DEVICE_NVIDIA_MCP04:
323 USB_DPRINTF_L2(PRINT_MASK_ATTA,
324 ehcip->ehci_log_hdl,
325 "ehci_dma_attr_workaround: NVIDIA dma "
326 "workaround enabled, force dma address "
327 "to be allocated below 2G");
328 ehcip->ehci_dma_attr.dma_attr_addr_hi =
329 0x7fffffffull;
330 break;
331 default:
332 break;
333
334 }
335 }
336 }
337
338 /*
339 * Host Controller Driver (HCD) initialization functions
340 */
341
342 /*
343 * ehci_set_dma_attributes:
344 *
345 * Set the limits in the DMA attributes structure. Most of the values used
346 * in the DMA limit structures are the default values as specified by the
347 * Writing PCI device drivers document.
348 */
349 void
350 ehci_set_dma_attributes(ehci_state_t *ehcip)
351 {
352 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
353 "ehci_set_dma_attributes:");
354
355 /* Initialize the DMA attributes */
356 ehcip->ehci_dma_attr.dma_attr_version = DMA_ATTR_V0;
357 ehcip->ehci_dma_attr.dma_attr_addr_lo = 0x00000000ull;
358 ehcip->ehci_dma_attr.dma_attr_addr_hi = 0xfffffffeull;
359
360 /* 32 bit addressing */
361 ehcip->ehci_dma_attr.dma_attr_count_max = EHCI_DMA_ATTR_COUNT_MAX;
362
363 /* Byte alignment */
364 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
365
366 /*
367 * Since PCI specification is byte alignment, the
368 * burst size field should be set to 1 for PCI devices.
369 */
370 ehcip->ehci_dma_attr.dma_attr_burstsizes = 0x1;
371
372 ehcip->ehci_dma_attr.dma_attr_minxfer = 0x1;
373 ehcip->ehci_dma_attr.dma_attr_maxxfer = EHCI_DMA_ATTR_MAX_XFER;
374 ehcip->ehci_dma_attr.dma_attr_seg = 0xffffffffull;
375 ehcip->ehci_dma_attr.dma_attr_sgllen = 1;
376 ehcip->ehci_dma_attr.dma_attr_granular = EHCI_DMA_ATTR_GRANULAR;
377 ehcip->ehci_dma_attr.dma_attr_flags = 0;
378 ehci_dma_attr_workaround(ehcip);
379 }
380
381
382 /*
383 * ehci_allocate_pools:
384 *
385 * Allocate the system memory for the Endpoint Descriptor (QH) and for the
386 * Transfer Descriptor (QTD) pools. Both QH and QTD structures must be aligned
387 * to a 16 byte boundary.
388 */
389 int
390 ehci_allocate_pools(ehci_state_t *ehcip)
391 {
392 ddi_device_acc_attr_t dev_attr;
393 size_t real_length;
394 int result;
395 uint_t ccount;
396 int i;
397
398 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
399 "ehci_allocate_pools:");
400
401 /* The host controller will be little endian */
402 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
403 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
404 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
405
406 /* Byte alignment */
407 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_TD_QH_ALIGNMENT;
408
409 /* Allocate the QTD pool DMA handle */
410 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
411 DDI_DMA_SLEEP, 0,
412 &ehcip->ehci_qtd_pool_dma_handle) != DDI_SUCCESS) {
413
414 goto failure;
415 }
416
417 /* Allocate the memory for the QTD pool */
418 if (ddi_dma_mem_alloc(ehcip->ehci_qtd_pool_dma_handle,
419 ehci_qtd_pool_size * sizeof (ehci_qtd_t),
420 &dev_attr,
421 DDI_DMA_CONSISTENT,
422 DDI_DMA_SLEEP,
423 0,
424 (caddr_t *)&ehcip->ehci_qtd_pool_addr,
425 &real_length,
426 &ehcip->ehci_qtd_pool_mem_handle)) {
427
428 goto failure;
429 }
430
431 /* Map the QTD pool into the I/O address space */
432 result = ddi_dma_addr_bind_handle(
433 ehcip->ehci_qtd_pool_dma_handle,
434 NULL,
435 (caddr_t)ehcip->ehci_qtd_pool_addr,
436 real_length,
437 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
438 DDI_DMA_SLEEP,
439 NULL,
440 &ehcip->ehci_qtd_pool_cookie,
441 &ccount);
442
443 bzero((void *)ehcip->ehci_qtd_pool_addr,
444 ehci_qtd_pool_size * sizeof (ehci_qtd_t));
445
446 /* Process the result */
447 if (result == DDI_DMA_MAPPED) {
448 /* The cookie count should be 1 */
449 if (ccount != 1) {
450 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
451 "ehci_allocate_pools: More than 1 cookie");
452
453 goto failure;
454 }
455 } else {
456 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
457 "ehci_allocate_pools: Result = %d", result);
458
459 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
460
461 goto failure;
462 }
463
464 /*
465 * DMA addresses for QTD pools are bound
466 */
467 ehcip->ehci_dma_addr_bind_flag |= EHCI_QTD_POOL_BOUND;
468
469 /* Initialize the QTD pool */
470 for (i = 0; i < ehci_qtd_pool_size; i ++) {
471 Set_QTD(ehcip->ehci_qtd_pool_addr[i].
472 qtd_state, EHCI_QTD_FREE);
473 }
474
475 /* Allocate the QTD pool DMA handle */
476 if (ddi_dma_alloc_handle(ehcip->ehci_dip,
477 &ehcip->ehci_dma_attr,
478 DDI_DMA_SLEEP,
479 0,
480 &ehcip->ehci_qh_pool_dma_handle) != DDI_SUCCESS) {
481 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
482 "ehci_allocate_pools: ddi_dma_alloc_handle failed");
483
484 goto failure;
485 }
486
487 /* Allocate the memory for the QH pool */
488 if (ddi_dma_mem_alloc(ehcip->ehci_qh_pool_dma_handle,
489 ehci_qh_pool_size * sizeof (ehci_qh_t),
490 &dev_attr,
491 DDI_DMA_CONSISTENT,
492 DDI_DMA_SLEEP,
493 0,
494 (caddr_t *)&ehcip->ehci_qh_pool_addr,
495 &real_length,
496 &ehcip->ehci_qh_pool_mem_handle) != DDI_SUCCESS) {
497 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
498 "ehci_allocate_pools: ddi_dma_mem_alloc failed");
499
500 goto failure;
501 }
502
503 result = ddi_dma_addr_bind_handle(ehcip->ehci_qh_pool_dma_handle,
504 NULL,
505 (caddr_t)ehcip->ehci_qh_pool_addr,
506 real_length,
507 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
508 DDI_DMA_SLEEP,
509 NULL,
510 &ehcip->ehci_qh_pool_cookie,
511 &ccount);
512
513 bzero((void *)ehcip->ehci_qh_pool_addr,
514 ehci_qh_pool_size * sizeof (ehci_qh_t));
515
516 /* Process the result */
517 if (result == DDI_DMA_MAPPED) {
518 /* The cookie count should be 1 */
519 if (ccount != 1) {
520 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
521 "ehci_allocate_pools: More than 1 cookie");
522
523 goto failure;
524 }
525 } else {
526 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
527
528 goto failure;
529 }
530
531 /*
532 * DMA addresses for QH pools are bound
533 */
534 ehcip->ehci_dma_addr_bind_flag |= EHCI_QH_POOL_BOUND;
535
536 /* Initialize the QH pool */
537 for (i = 0; i < ehci_qh_pool_size; i ++) {
538 Set_QH(ehcip->ehci_qh_pool_addr[i].qh_state, EHCI_QH_FREE);
539 }
540
541 /* Byte alignment */
542 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
543
544 return (DDI_SUCCESS);
545
546 failure:
547 /* Byte alignment */
548 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
549
550 return (DDI_FAILURE);
551 }
552
553
554 /*
555 * ehci_decode_ddi_dma_addr_bind_handle_result:
556 *
557 * Process the return values of ddi_dma_addr_bind_handle()
558 */
559 void
560 ehci_decode_ddi_dma_addr_bind_handle_result(
561 ehci_state_t *ehcip,
562 int result)
563 {
564 USB_DPRINTF_L2(PRINT_MASK_ALLOC, ehcip->ehci_log_hdl,
565 "ehci_decode_ddi_dma_addr_bind_handle_result:");
566
567 switch (result) {
568 case DDI_DMA_PARTIAL_MAP:
569 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
570 "Partial transfers not allowed");
571 break;
572 case DDI_DMA_INUSE:
573 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
574 "Handle is in use");
575 break;
576 case DDI_DMA_NORESOURCES:
577 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
578 "No resources");
579 break;
580 case DDI_DMA_NOMAPPING:
581 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
582 "No mapping");
583 break;
584 case DDI_DMA_TOOBIG:
585 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
586 "Object is too big");
587 break;
588 default:
589 USB_DPRINTF_L2(PRINT_MASK_ALL, ehcip->ehci_log_hdl,
590 "Unknown dma error");
591 }
592 }
593
594
595 /*
596 * ehci_map_regs:
597 *
598 * The Host Controller (HC) contains a set of on-chip operational registers
599 * and which should be mapped into a non-cacheable portion of the system
600 * addressable space.
601 */
602 int
603 ehci_map_regs(ehci_state_t *ehcip)
604 {
605 ddi_device_acc_attr_t attr;
606 uint16_t cmd_reg;
607 uint_t length;
608
609 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_map_regs:");
610
611 /* Check to make sure we have memory access */
612 if (pci_config_setup(ehcip->ehci_dip,
613 &ehcip->ehci_config_handle) != DDI_SUCCESS) {
614
615 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
616 "ehci_map_regs: Config error");
617
618 return (DDI_FAILURE);
619 }
620
621 /* Make sure Memory Access Enable is set */
622 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
623
624 if (!(cmd_reg & PCI_COMM_MAE)) {
625
626 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
627 "ehci_map_regs: Memory base address access disabled");
628
629 return (DDI_FAILURE);
630 }
631
632 /* The host controller will be little endian */
633 attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
634 attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
635 attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
636
637 /* Map in EHCI Capability registers */
638 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
639 (caddr_t *)&ehcip->ehci_capsp, 0,
640 sizeof (ehci_caps_t), &attr,
641 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
642
643 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
644 "ehci_map_regs: Map setup error");
645
646 return (DDI_FAILURE);
647 }
648
649 length = ddi_get8(ehcip->ehci_caps_handle,
650 (uint8_t *)&ehcip->ehci_capsp->ehci_caps_length);
651
652 /* Free the original mapping */
653 ddi_regs_map_free(&ehcip->ehci_caps_handle);
654
655 /* Re-map in EHCI Capability and Operational registers */
656 if (ddi_regs_map_setup(ehcip->ehci_dip, 1,
657 (caddr_t *)&ehcip->ehci_capsp, 0,
658 length + sizeof (ehci_regs_t), &attr,
659 &ehcip->ehci_caps_handle) != DDI_SUCCESS) {
660
661 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
662 "ehci_map_regs: Map setup error");
663
664 return (DDI_FAILURE);
665 }
666
667 /* Get the pointer to EHCI Operational Register */
668 ehcip->ehci_regsp = (ehci_regs_t *)
669 ((uintptr_t)ehcip->ehci_capsp + length);
670
671 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
672 "ehci_map_regs: Capsp 0x%p Regsp 0x%p\n",
673 (void *)ehcip->ehci_capsp, (void *)ehcip->ehci_regsp);
674
675 return (DDI_SUCCESS);
676 }
677
678 /*
679 * The following simulated polling is for debugging purposes only.
680 * It is activated on x86 by setting usb-polling=true in GRUB or ehci.conf.
681 */
682 static int
683 ehci_is_polled(dev_info_t *dip)
684 {
685 int ret;
686 char *propval;
687
688 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
689 "usb-polling", &propval) != DDI_SUCCESS)
690
691 return (0);
692
693 ret = (strcmp(propval, "true") == 0);
694 ddi_prop_free(propval);
695
696 return (ret);
697 }
698
699 static void
700 ehci_poll_intr(void *arg)
701 {
702 /* poll every msec */
703 for (;;) {
704 (void) ehci_intr(arg, NULL);
705 delay(drv_usectohz(1000));
706 }
707 }
708
709 /*
710 * ehci_register_intrs_and_init_mutex:
711 *
712 * Register interrupts and initialize each mutex and condition variables
713 */
714 int
715 ehci_register_intrs_and_init_mutex(ehci_state_t *ehcip)
716 {
717 int intr_types;
718
719 #if defined(__x86)
720 uint8_t iline;
721 #endif
722
723 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
724 "ehci_register_intrs_and_init_mutex:");
725
726 /*
727 * There is a known MSI hardware bug with the EHCI controller
728 * of ULI1575 southbridge. Hence MSI is disabled for this chip.
729 */
730 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
731 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
732 ehcip->ehci_msi_enabled = B_FALSE;
733 } else {
734 /* Set the MSI enable flag from the global EHCI MSI tunable */
735 ehcip->ehci_msi_enabled = ehci_enable_msi;
736 }
737
738 /* launch polling thread instead of enabling pci interrupt */
739 if (ehci_is_polled(ehcip->ehci_dip)) {
740 extern pri_t maxclsyspri;
741
742 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
743 "ehci_register_intrs_and_init_mutex: "
744 "running in simulated polled mode");
745
746 (void) thread_create(NULL, 0, ehci_poll_intr, ehcip, 0, &p0,
747 TS_RUN, maxclsyspri);
748
749 goto skip_intr;
750 }
751
752 #if defined(__x86)
753 /*
754 * Make sure that the interrupt pin is connected to the
755 * interrupt controller on x86. Interrupt line 255 means
756 * "unknown" or "not connected" (PCI spec 6.2.4, footnote 43).
757 * If we would return failure when interrupt line equals 255, then
758 * high speed devices will be routed to companion host controllers.
759 * However, it is not necessary to return failure here, and
760 * o/uhci codes don't check the interrupt line either.
761 * But it's good to log a message here for debug purposes.
762 */
763 iline = pci_config_get8(ehcip->ehci_config_handle,
764 PCI_CONF_ILINE);
765
766 if (iline == 255) {
767 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
768 "ehci_register_intrs_and_init_mutex: "
769 "interrupt line value out of range (%d)",
770 iline);
771 }
772 #endif /* __x86 */
773
774 /* Get supported interrupt types */
775 if (ddi_intr_get_supported_types(ehcip->ehci_dip,
776 &intr_types) != DDI_SUCCESS) {
777 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
778 "ehci_register_intrs_and_init_mutex: "
779 "ddi_intr_get_supported_types failed");
780
781 return (DDI_FAILURE);
782 }
783
784 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
785 "ehci_register_intrs_and_init_mutex: "
786 "supported interrupt types 0x%x", intr_types);
787
788 if ((intr_types & DDI_INTR_TYPE_MSI) && ehcip->ehci_msi_enabled) {
789 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_MSI)
790 != DDI_SUCCESS) {
791 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
792 "ehci_register_intrs_and_init_mutex: MSI "
793 "registration failed, trying FIXED interrupt \n");
794 } else {
795 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
796 "ehci_register_intrs_and_init_mutex: "
797 "Using MSI interrupt type\n");
798
799 ehcip->ehci_intr_type = DDI_INTR_TYPE_MSI;
800 ehcip->ehci_flags |= EHCI_INTR;
801 }
802 }
803
804 if ((!(ehcip->ehci_flags & EHCI_INTR)) &&
805 (intr_types & DDI_INTR_TYPE_FIXED)) {
806 if (ehci_add_intrs(ehcip, DDI_INTR_TYPE_FIXED)
807 != DDI_SUCCESS) {
808 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
809 "ehci_register_intrs_and_init_mutex: "
810 "FIXED interrupt registration failed\n");
811
812 return (DDI_FAILURE);
813 }
814
815 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
816 "ehci_register_intrs_and_init_mutex: "
817 "Using FIXED interrupt type\n");
818
819 ehcip->ehci_intr_type = DDI_INTR_TYPE_FIXED;
820 ehcip->ehci_flags |= EHCI_INTR;
821 }
822
823 skip_intr:
824 /* Create prototype for advance on async schedule */
825 cv_init(&ehcip->ehci_async_schedule_advance_cv,
826 NULL, CV_DRIVER, NULL);
827
828 return (DDI_SUCCESS);
829 }
830
831
832 /*
833 * ehci_add_intrs:
834 *
835 * Register FIXED or MSI interrupts.
836 */
837 static int
838 ehci_add_intrs(ehci_state_t *ehcip, int intr_type)
839 {
840 int actual, avail, intr_size, count = 0;
841 int i, flag, ret;
842
843 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
844 "ehci_add_intrs: interrupt type 0x%x", intr_type);
845
846 /* Get number of interrupts */
847 ret = ddi_intr_get_nintrs(ehcip->ehci_dip, intr_type, &count);
848 if ((ret != DDI_SUCCESS) || (count == 0)) {
849 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
850 "ehci_add_intrs: ddi_intr_get_nintrs() failure, "
851 "ret: %d, count: %d", ret, count);
852
853 return (DDI_FAILURE);
854 }
855
856 /* Get number of available interrupts */
857 ret = ddi_intr_get_navail(ehcip->ehci_dip, intr_type, &avail);
858 if ((ret != DDI_SUCCESS) || (avail == 0)) {
859 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
860 "ehci_add_intrs: ddi_intr_get_navail() failure, "
861 "ret: %d, count: %d", ret, count);
862
863 return (DDI_FAILURE);
864 }
865
866 if (avail < count) {
867 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
868 "ehci_add_intrs: ehci_add_intrs: nintrs () "
869 "returned %d, navail returned %d\n", count, avail);
870 }
871
872 /* Allocate an array of interrupt handles */
873 intr_size = count * sizeof (ddi_intr_handle_t);
874 ehcip->ehci_htable = kmem_zalloc(intr_size, KM_SLEEP);
875
876 flag = (intr_type == DDI_INTR_TYPE_MSI) ?
877 DDI_INTR_ALLOC_STRICT:DDI_INTR_ALLOC_NORMAL;
878
879 /* call ddi_intr_alloc() */
880 ret = ddi_intr_alloc(ehcip->ehci_dip, ehcip->ehci_htable,
881 intr_type, 0, count, &actual, flag);
882
883 if ((ret != DDI_SUCCESS) || (actual == 0)) {
884 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
885 "ehci_add_intrs: ddi_intr_alloc() failed %d", ret);
886
887 kmem_free(ehcip->ehci_htable, intr_size);
888
889 return (DDI_FAILURE);
890 }
891
892 if (actual < count) {
893 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
894 "ehci_add_intrs: Requested: %d, Received: %d\n",
895 count, actual);
896
897 for (i = 0; i < actual; i++)
898 (void) ddi_intr_free(ehcip->ehci_htable[i]);
899
900 kmem_free(ehcip->ehci_htable, intr_size);
901
902 return (DDI_FAILURE);
903 }
904
905 ehcip->ehci_intr_cnt = actual;
906
907 if ((ret = ddi_intr_get_pri(ehcip->ehci_htable[0],
908 &ehcip->ehci_intr_pri)) != DDI_SUCCESS) {
909 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
910 "ehci_add_intrs: ddi_intr_get_pri() failed %d", ret);
911
912 for (i = 0; i < actual; i++)
913 (void) ddi_intr_free(ehcip->ehci_htable[i]);
914
915 kmem_free(ehcip->ehci_htable, intr_size);
916
917 return (DDI_FAILURE);
918 }
919
920 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
921 "ehci_add_intrs: Supported Interrupt priority 0x%x",
922 ehcip->ehci_intr_pri);
923
924 /* Test for high level mutex */
925 if (ehcip->ehci_intr_pri >= ddi_intr_get_hilevel_pri()) {
926 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
927 "ehci_add_intrs: Hi level interrupt not supported");
928
929 for (i = 0; i < actual; i++)
930 (void) ddi_intr_free(ehcip->ehci_htable[i]);
931
932 kmem_free(ehcip->ehci_htable, intr_size);
933
934 return (DDI_FAILURE);
935 }
936
937 /* Initialize the mutex */
938 mutex_init(&ehcip->ehci_int_mutex, NULL, MUTEX_DRIVER,
939 DDI_INTR_PRI(ehcip->ehci_intr_pri));
940
941 /* Call ddi_intr_add_handler() */
942 for (i = 0; i < actual; i++) {
943 if ((ret = ddi_intr_add_handler(ehcip->ehci_htable[i],
944 ehci_intr, (caddr_t)ehcip,
945 (caddr_t)(uintptr_t)i)) != DDI_SUCCESS) {
946 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
947 "ehci_add_intrs:ddi_intr_add_handler() "
948 "failed %d", ret);
949
950 for (i = 0; i < actual; i++)
951 (void) ddi_intr_free(ehcip->ehci_htable[i]);
952
953 mutex_destroy(&ehcip->ehci_int_mutex);
954 kmem_free(ehcip->ehci_htable, intr_size);
955
956 return (DDI_FAILURE);
957 }
958 }
959
960 if ((ret = ddi_intr_get_cap(ehcip->ehci_htable[0],
961 &ehcip->ehci_intr_cap)) != DDI_SUCCESS) {
962 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
963 "ehci_add_intrs: ddi_intr_get_cap() failed %d", ret);
964
965 for (i = 0; i < actual; i++) {
966 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
967 (void) ddi_intr_free(ehcip->ehci_htable[i]);
968 }
969
970 mutex_destroy(&ehcip->ehci_int_mutex);
971 kmem_free(ehcip->ehci_htable, intr_size);
972
973 return (DDI_FAILURE);
974 }
975
976 /* Enable all interrupts */
977 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
978 /* Call ddi_intr_block_enable() for MSI interrupts */
979 (void) ddi_intr_block_enable(ehcip->ehci_htable,
980 ehcip->ehci_intr_cnt);
981 } else {
982 /* Call ddi_intr_enable for MSI or FIXED interrupts */
983 for (i = 0; i < ehcip->ehci_intr_cnt; i++)
984 (void) ddi_intr_enable(ehcip->ehci_htable[i]);
985 }
986
987 return (DDI_SUCCESS);
988 }
989
990 /*
991 * ehci_wait_reset
992 *
993 * wait specified time for chip to reset
994 * with workaround of 250us for HPE iLO chip
995 */
996 static void
997 ehci_wait_reset(ehci_state_t *ehcip, clock_t microsecs)
998 {
999 /* Wait specified times for reset to complete */
1000 drv_usecwait(microsecs);
1001
1002 if (ehcip->ehci_vendor_id == PCI_VENDOR_HP) {
1003 for (int i = 10; i < 250; i += 10) {
1004 /* Wait 10ms for reset to complete */
1005 drv_usecwait(EHCI_RESET_TIMEWAIT);
1006 }
1007 }
1008 }
1009
1010 /*
1011 * ehci_init_hardware
1012 *
1013 * take control from BIOS, reset EHCI host controller, and check version, etc.
1014 */
1015 int
1016 ehci_init_hardware(ehci_state_t *ehcip)
1017 {
1018 int revision;
1019 uint16_t cmd_reg;
1020 int abort_on_BIOS_take_over_failure;
1021
1022 /* Take control from the BIOS */
1023 if (ehci_take_control(ehcip) != USB_SUCCESS) {
1024
1025 /* read .conf file properties */
1026 abort_on_BIOS_take_over_failure =
1027 ddi_prop_get_int(DDI_DEV_T_ANY,
1028 ehcip->ehci_dip, DDI_PROP_DONTPASS,
1029 "abort-on-BIOS-take-over-failure", 0);
1030
1031 if (abort_on_BIOS_take_over_failure) {
1032
1033 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1034 "Unable to take control from BIOS.");
1035
1036 return (DDI_FAILURE);
1037 }
1038
1039 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1040 "Unable to take control from BIOS. Failure is ignored.");
1041 }
1042
1043 /* set Memory Master Enable */
1044 cmd_reg = pci_config_get16(ehcip->ehci_config_handle, PCI_CONF_COMM);
1045 cmd_reg |= (PCI_COMM_MAE | PCI_COMM_ME);
1046 pci_config_put16(ehcip->ehci_config_handle, PCI_CONF_COMM, cmd_reg);
1047
1048 /* Reset the EHCI host controller */
1049 Set_OpReg(ehci_command,
1050 Get_OpReg(ehci_command) | EHCI_CMD_HOST_CTRL_RESET);
1051
1052 /* Wait 10ms for reset to complete */
1053 ehci_wait_reset(ehcip, EHCI_RESET_TIMEWAIT);
1054
1055 ASSERT(Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED);
1056
1057 /* Verify the version number */
1058 revision = Get_16Cap(ehci_version);
1059
1060 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1061 "ehci_init_hardware: Revision 0x%x", revision);
1062
1063 /*
1064 * EHCI driver supports EHCI host controllers compliant to
1065 * 0.95 and higher revisions of EHCI specifications.
1066 */
1067 if (revision < EHCI_REVISION_0_95) {
1068
1069 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1070 "Revision 0x%x is not supported", revision);
1071
1072 return (DDI_FAILURE);
1073 }
1074
1075 if (ehcip->ehci_hc_soft_state == EHCI_CTLR_INIT_STATE) {
1076
1077 /* Initialize the Frame list base address area */
1078 if (ehci_init_periodic_frame_lst_table(ehcip) != DDI_SUCCESS) {
1079
1080 return (DDI_FAILURE);
1081 }
1082
1083 /*
1084 * For performance reasons, do not insert anything into the
1085 * asynchronous list or activate the asynch list schedule until
1086 * there is a valid QH.
1087 */
1088 ehcip->ehci_head_of_async_sched_list = NULL;
1089
1090 if ((ehcip->ehci_vendor_id == PCI_VENDOR_VIA) &&
1091 (ehci_vt62x2_workaround & EHCI_VIA_ASYNC_SCHEDULE)) {
1092 /*
1093 * The driver is unable to reliably stop the asynch
1094 * list schedule on VIA VT6202 controllers, so we
1095 * always keep a dummy QH on the list.
1096 */
1097 ehci_qh_t *dummy_async_qh =
1098 ehci_alloc_qh(ehcip, NULL,
1099 EHCI_INTERRUPT_MODE_FLAG);
1100
1101 Set_QH(dummy_async_qh->qh_link_ptr,
1102 ((ehci_qh_cpu_to_iommu(ehcip, dummy_async_qh) &
1103 EHCI_QH_LINK_PTR) | EHCI_QH_LINK_REF_QH));
1104
1105 /* Set this QH to be the "head" of the circular list */
1106 Set_QH(dummy_async_qh->qh_ctrl,
1107 Get_QH(dummy_async_qh->qh_ctrl) |
1108 EHCI_QH_CTRL_RECLAIM_HEAD);
1109
1110 Set_QH(dummy_async_qh->qh_next_qtd,
1111 EHCI_QH_NEXT_QTD_PTR_VALID);
1112 Set_QH(dummy_async_qh->qh_alt_next_qtd,
1113 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1114
1115 ehcip->ehci_head_of_async_sched_list = dummy_async_qh;
1116 ehcip->ehci_open_async_count++;
1117 ehcip->ehci_async_req_count++;
1118 }
1119 }
1120
1121 return (DDI_SUCCESS);
1122 }
1123
1124
1125 /*
1126 * ehci_init_workaround
1127 *
1128 * some workarounds during initializing ehci
1129 */
1130 int
1131 ehci_init_workaround(ehci_state_t *ehcip)
1132 {
1133 /*
1134 * Acer Labs Inc. M5273 EHCI controller does not send
1135 * interrupts unless the Root hub ports are routed to the EHCI
1136 * host controller; so route the ports now, before we test for
1137 * the presence of SOFs interrupts.
1138 */
1139 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1140 /* Route all Root hub ports to EHCI host controller */
1141 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1142 }
1143
1144 /*
1145 * VIA chips have some issues and may not work reliably.
1146 * Revisions >= 0x80 are part of a southbridge and appear
1147 * to be reliable with the workaround.
1148 * For revisions < 0x80, if we were bound using class
1149 * complain, else proceed. This will allow the user to
1150 * bind ehci specifically to this chip and not have the
1151 * warnings
1152 */
1153 if (ehcip->ehci_vendor_id == PCI_VENDOR_VIA) {
1154
1155 if (ehcip->ehci_rev_id >= PCI_VIA_REVISION_6212) {
1156
1157 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1158 "ehci_init_workaround: Applying VIA workarounds "
1159 "for the 6212 chip.");
1160
1161 } else if (strcmp(DEVI(ehcip->ehci_dip)->devi_binding_name,
1162 "pciclass,0c0320") == 0) {
1163
1164 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1165 "Due to recently discovered incompatibilities");
1166 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1167 "with this USB controller, USB2.x transfer");
1168 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1169 "support has been disabled. This device will");
1170 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1171 "continue to function as a USB1.x controller.");
1172 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1173 "If you are interested in enabling USB2.x");
1174 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1175 "support please, refer to the ehci(7D) man page.");
1176 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1177 "Please also refer to www.sun.com/io for");
1178 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1179 "Solaris Ready products and to");
1180 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1181 "www.sun.com/bigadmin/hcl for additional");
1182 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1183 "compatible USB products.");
1184
1185 return (DDI_FAILURE);
1186
1187 } else if (ehci_vt62x2_workaround) {
1188
1189 USB_DPRINTF_L1(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1190 "Applying VIA workarounds");
1191 }
1192 }
1193
1194 return (DDI_SUCCESS);
1195 }
1196
1197
1198 /*
1199 * ehci_init_check_status
1200 *
1201 * Check if EHCI host controller is running
1202 */
1203 int
1204 ehci_init_check_status(ehci_state_t *ehcip)
1205 {
1206 clock_t sof_time_wait;
1207
1208 /*
1209 * Get the number of clock ticks to wait.
1210 * This is based on the maximum time it takes for a frame list rollover
1211 * and maximum time wait for SOFs to begin.
1212 */
1213 sof_time_wait = drv_usectohz((EHCI_NUM_PERIODIC_FRAME_LISTS * 1000) +
1214 EHCI_SOF_TIMEWAIT);
1215
1216 /* Tell the ISR to broadcast ehci_async_schedule_advance_cv */
1217 ehcip->ehci_flags |= EHCI_CV_INTR;
1218
1219 /* We need to add a delay to allow the chip time to start running */
1220 (void) cv_reltimedwait(&ehcip->ehci_async_schedule_advance_cv,
1221 &ehcip->ehci_int_mutex, sof_time_wait, TR_CLOCK_TICK);
1222
1223 /*
1224 * Check EHCI host controller is running, otherwise return failure.
1225 */
1226 if ((ehcip->ehci_flags & EHCI_CV_INTR) ||
1227 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
1228
1229 USB_DPRINTF_L0(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1230 "No SOF interrupts have been received, this USB EHCI host"
1231 "controller is unusable");
1232
1233 /*
1234 * Route all Root hub ports to Classic host
1235 * controller, in case this is an unusable ALI M5273
1236 * EHCI controller.
1237 */
1238 if (ehcip->ehci_vendor_id == PCI_VENDOR_ALI) {
1239 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1240 }
1241
1242 return (DDI_FAILURE);
1243 }
1244
1245 return (DDI_SUCCESS);
1246 }
1247
1248
1249 /*
1250 * ehci_init_ctlr:
1251 *
1252 * Initialize the Host Controller (HC).
1253 */
1254 int
1255 ehci_init_ctlr(ehci_state_t *ehcip, int init_type)
1256 {
1257 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_init_ctlr:");
1258
1259 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1260
1261 if (ehci_init_hardware(ehcip) != DDI_SUCCESS) {
1262
1263 return (DDI_FAILURE);
1264 }
1265 }
1266
1267 /*
1268 * Check for Asynchronous schedule park capability feature. If this
1269 * feature is supported, then, program ehci command register with
1270 * appropriate values..
1271 */
1272 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_ASYNC_SCHED_PARK_CAP) {
1273
1274 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1275 "ehci_init_ctlr: Async park mode is supported");
1276
1277 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1278 (EHCI_CMD_ASYNC_PARK_ENABLE |
1279 EHCI_CMD_ASYNC_PARK_COUNT_3)));
1280 }
1281
1282 /*
1283 * Check for programmable periodic frame list feature. If this
1284 * feature is supported, then, program ehci command register with
1285 * 1024 frame list value.
1286 */
1287 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_PROG_FRAME_LIST_FLAG) {
1288
1289 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1290 "ehci_init_ctlr: Variable programmable periodic "
1291 "frame list is supported");
1292
1293 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) |
1294 EHCI_CMD_FRAME_1024_SIZE));
1295 }
1296
1297 /*
1298 * Currently EHCI driver doesn't support 64 bit addressing.
1299 *
1300 * If we are using 64 bit addressing capability, then, program
1301 * ehci_ctrl_segment register with 4 Gigabyte segment where all
1302 * of the interface data structures are allocated.
1303 */
1304 if (Get_Cap(ehci_hcc_params) & EHCI_HCC_64BIT_ADDR_CAP) {
1305
1306 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1307 "ehci_init_ctlr: EHCI driver doesn't support "
1308 "64 bit addressing");
1309 }
1310
1311 /* 64 bit addressing is not support */
1312 Set_OpReg(ehci_ctrl_segment, 0x00000000);
1313
1314 /* Turn on/off the schedulers */
1315 ehci_toggle_scheduler(ehcip);
1316
1317 /* Set host controller soft state to operational */
1318 ehcip->ehci_hc_soft_state = EHCI_CTLR_OPERATIONAL_STATE;
1319
1320 /*
1321 * Set the Periodic Frame List Base Address register with the
1322 * starting physical address of the Periodic Frame List.
1323 */
1324 Set_OpReg(ehci_periodic_list_base,
1325 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
1326 EHCI_PERIODIC_LIST_BASE));
1327
1328 /*
1329 * Set ehci_interrupt to enable all interrupts except Root
1330 * Hub Status change interrupt.
1331 */
1332 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
1333 EHCI_INTR_FRAME_LIST_ROLLOVER | EHCI_INTR_USB_ERROR |
1334 EHCI_INTR_USB);
1335
1336 /*
1337 * Set the desired interrupt threshold and turn on EHCI host controller.
1338 */
1339 Set_OpReg(ehci_command,
1340 ((Get_OpReg(ehci_command) & ~EHCI_CMD_INTR_THRESHOLD) |
1341 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
1342
1343 ASSERT(Get_OpReg(ehci_command) & EHCI_CMD_HOST_CTRL_RUN);
1344
1345 if (init_type == EHCI_NORMAL_INITIALIZATION) {
1346
1347 if (ehci_init_workaround(ehcip) != DDI_SUCCESS) {
1348
1349 /* Set host controller soft state to error */
1350 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1351
1352 return (DDI_FAILURE);
1353 }
1354
1355 if (ehci_init_check_status(ehcip) != DDI_SUCCESS) {
1356
1357 /* Set host controller soft state to error */
1358 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
1359
1360 return (DDI_FAILURE);
1361 }
1362
1363 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1364 "ehci_init_ctlr: SOF's have started");
1365 }
1366
1367 /* Route all Root hub ports to EHCI host controller */
1368 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_EHCI);
1369
1370 return (DDI_SUCCESS);
1371 }
1372
1373 /*
1374 * ehci_take_control:
1375 *
1376 * Handshake to take EHCI control from BIOS if necessary. Its only valid for
1377 * x86 machines, because sparc doesn't have a BIOS.
1378 * On x86 machine, the take control process includes
1379 * o get the base address of the extended capability list
1380 * o find out the capability for handoff synchronization in the list.
1381 * o check if BIOS has owned the host controller.
1382 * o set the OS Owned semaphore bit, ask the BIOS to release the ownership.
1383 * o wait for a constant time and check if BIOS has relinquished control.
1384 */
1385 /* ARGSUSED */
1386 static int
1387 ehci_take_control(ehci_state_t *ehcip)
1388 {
1389 #if defined(__x86)
1390 uint32_t extended_cap;
1391 uint32_t extended_cap_offset;
1392 uint32_t extended_cap_id;
1393 uint_t retry;
1394
1395 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1396 "ehci_take_control:");
1397
1398 /*
1399 * According EHCI Spec 2.2.4, get EECP base address from HCCPARAMS
1400 * register.
1401 */
1402 extended_cap_offset = (Get_Cap(ehci_hcc_params) & EHCI_HCC_EECP) >>
1403 EHCI_HCC_EECP_SHIFT;
1404
1405 /*
1406 * According EHCI Spec 2.2.4, if the extended capability offset is
1407 * less than 40h then its not valid. This means we don't need to
1408 * worry about BIOS handoff.
1409 */
1410 if (extended_cap_offset < EHCI_HCC_EECP_MIN_OFFSET) {
1411
1412 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1413 "ehci_take_control: Hardware doesn't support legacy.");
1414
1415 goto success;
1416 }
1417
1418 /*
1419 * According EHCI Spec 2.1.7, A zero offset indicates the
1420 * end of the extended capability list.
1421 */
1422 while (extended_cap_offset) {
1423
1424 /* Get the extended capability value. */
1425 extended_cap = pci_config_get32(ehcip->ehci_config_handle,
1426 extended_cap_offset);
1427
1428 /*
1429 * It's possible that we'll receive an invalid PCI read here due
1430 * to something going wrong due to platform firmware. This has
1431 * been observed in the wild depending on the version of ACPI in
1432 * use. If this happens, we'll assume that the capability does
1433 * not exist and that we do not need to take control from the
1434 * BIOS.
1435 */
1436 if (extended_cap == PCI_EINVAL32) {
1437 extended_cap_id = EHCI_EX_CAP_ID_RESERVED;
1438 break;
1439 }
1440
1441 /* Get the capability ID */
1442 extended_cap_id = (extended_cap & EHCI_EX_CAP_ID) >>
1443 EHCI_EX_CAP_ID_SHIFT;
1444
1445 /* Check if the card support legacy */
1446 if (extended_cap_id == EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1447 break;
1448 }
1449
1450 /* Get the offset of the next capability */
1451 extended_cap_offset = (extended_cap & EHCI_EX_CAP_NEXT_PTR) >>
1452 EHCI_EX_CAP_NEXT_PTR_SHIFT;
1453
1454 }
1455
1456 /*
1457 * Unable to find legacy support in hardware's extended capability list.
1458 * This means we don't need to worry about BIOS handoff.
1459 */
1460 if (extended_cap_id != EHCI_EX_CAP_ID_BIOS_HANDOFF) {
1461
1462 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1463 "ehci_take_control: Hardware doesn't support legacy");
1464
1465 goto success;
1466 }
1467
1468 /* Check if BIOS has owned it. */
1469 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1470
1471 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1472 "ehci_take_control: BIOS does not own EHCI");
1473
1474 goto success;
1475 }
1476
1477 /*
1478 * According EHCI Spec 5.1, The OS driver initiates an ownership
1479 * request by setting the OS Owned semaphore to a one. The OS
1480 * waits for the BIOS Owned bit to go to a zero before attempting
1481 * to use the EHCI controller. The time that OS must wait for BIOS
1482 * to respond to the request for ownership is beyond the scope of
1483 * this specification.
1484 * It waits up to EHCI_TAKEOVER_WAIT_COUNT*EHCI_TAKEOVER_DELAY ms
1485 * for BIOS to release the ownership.
1486 */
1487 extended_cap |= EHCI_LEGSUP_OS_OWNED_SEM;
1488 pci_config_put32(ehcip->ehci_config_handle, extended_cap_offset,
1489 extended_cap);
1490
1491 for (retry = 0; retry < EHCI_TAKEOVER_WAIT_COUNT; retry++) {
1492
1493 /* wait a special interval */
1494 #ifndef __lock_lint
1495 delay(drv_usectohz(EHCI_TAKEOVER_DELAY));
1496 #endif
1497 /* Check to see if the BIOS has released the ownership */
1498 extended_cap = pci_config_get32(
1499 ehcip->ehci_config_handle, extended_cap_offset);
1500
1501 if (!(extended_cap & EHCI_LEGSUP_BIOS_OWNED_SEM)) {
1502
1503 USB_DPRINTF_L3(PRINT_MASK_ATTA,
1504 ehcip->ehci_log_hdl,
1505 "ehci_take_control: BIOS has released "
1506 "the ownership. retry = %d", retry);
1507
1508 goto success;
1509 }
1510
1511 }
1512
1513 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1514 "ehci_take_control: take control from BIOS failed.");
1515
1516 return (USB_FAILURE);
1517
1518 success:
1519
1520 #endif /* __x86 */
1521 return (USB_SUCCESS);
1522 }
1523
1524
1525 /*
1526 * ehci_init_periodic_frame_list_table :
1527 *
1528 * Allocate the system memory and initialize Host Controller
1529 * Periodic Frame List table area. The starting of the Periodic
1530 * Frame List Table area must be 4096 byte aligned.
1531 */
1532 static int
1533 ehci_init_periodic_frame_lst_table(ehci_state_t *ehcip)
1534 {
1535 ddi_device_acc_attr_t dev_attr;
1536 size_t real_length;
1537 uint_t ccount;
1538 int result;
1539
1540 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
1541
1542 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1543 "ehci_init_periodic_frame_lst_table:");
1544
1545 /* The host controller will be little endian */
1546 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
1547 dev_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
1548 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
1549
1550 /* Force the required 4K restrictive alignment */
1551 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_PFL_ALIGNMENT;
1552
1553 /* Create space for the Periodic Frame List */
1554 if (ddi_dma_alloc_handle(ehcip->ehci_dip, &ehcip->ehci_dma_attr,
1555 DDI_DMA_SLEEP, 0, &ehcip->ehci_pflt_dma_handle) != DDI_SUCCESS) {
1556
1557 goto failure;
1558 }
1559
1560 if (ddi_dma_mem_alloc(ehcip->ehci_pflt_dma_handle,
1561 sizeof (ehci_periodic_frame_list_t),
1562 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
1563 0, (caddr_t *)&ehcip->ehci_periodic_frame_list_tablep,
1564 &real_length, &ehcip->ehci_pflt_mem_handle)) {
1565
1566 goto failure;
1567 }
1568
1569 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1570 "ehci_init_periodic_frame_lst_table: "
1571 "Real length %lu", real_length);
1572
1573 /* Map the whole Periodic Frame List into the I/O address space */
1574 result = ddi_dma_addr_bind_handle(ehcip->ehci_pflt_dma_handle,
1575 NULL, (caddr_t)ehcip->ehci_periodic_frame_list_tablep,
1576 real_length, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1577 DDI_DMA_SLEEP, NULL, &ehcip->ehci_pflt_cookie, &ccount);
1578
1579 if (result == DDI_DMA_MAPPED) {
1580 /* The cookie count should be 1 */
1581 if (ccount != 1) {
1582 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1583 "ehci_init_periodic_frame_lst_table: "
1584 "More than 1 cookie");
1585
1586 goto failure;
1587 }
1588 } else {
1589 ehci_decode_ddi_dma_addr_bind_handle_result(ehcip, result);
1590
1591 goto failure;
1592 }
1593
1594 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1595 "ehci_init_periodic_frame_lst_table: virtual 0x%p physical 0x%x",
1596 (void *)ehcip->ehci_periodic_frame_list_tablep,
1597 ehcip->ehci_pflt_cookie.dmac_address);
1598
1599 /*
1600 * DMA addresses for Periodic Frame List are bound.
1601 */
1602 ehcip->ehci_dma_addr_bind_flag |= EHCI_PFLT_DMA_BOUND;
1603
1604 bzero((void *)ehcip->ehci_periodic_frame_list_tablep, real_length);
1605
1606 /* Initialize the Periodic Frame List */
1607 ehci_build_interrupt_lattice(ehcip);
1608
1609 /* Reset Byte Alignment to Default */
1610 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1611
1612 return (DDI_SUCCESS);
1613 failure:
1614 /* Byte alignment */
1615 ehcip->ehci_dma_attr.dma_attr_align = EHCI_DMA_ATTR_ALIGNMENT;
1616
1617 return (DDI_FAILURE);
1618 }
1619
1620
1621 /*
1622 * ehci_build_interrupt_lattice:
1623 *
1624 * Construct the interrupt lattice tree using static Endpoint Descriptors
1625 * (QH). This interrupt lattice tree will have total of 32 interrupt QH
1626 * lists and the Host Controller (HC) processes one interrupt QH list in
1627 * every frame. The Host Controller traverses the periodic schedule by
1628 * constructing an array offset reference from the Periodic List Base Address
1629 * register and bits 12 to 3 of Frame Index register. It fetches the element
1630 * and begins traversing the graph of linked schedule data structures.
1631 */
1632 static void
1633 ehci_build_interrupt_lattice(ehci_state_t *ehcip)
1634 {
1635 ehci_qh_t *list_array = ehcip->ehci_qh_pool_addr;
1636 ushort_t ehci_index[EHCI_NUM_PERIODIC_FRAME_LISTS];
1637 ehci_periodic_frame_list_t *periodic_frame_list =
1638 ehcip->ehci_periodic_frame_list_tablep;
1639 ushort_t *temp, num_of_nodes;
1640 uintptr_t addr;
1641 int i, j, k;
1642
1643 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1644 "ehci_build_interrupt_lattice:");
1645
1646 /*
1647 * Reserve the first 63 Endpoint Descriptor (QH) structures
1648 * in the pool as static endpoints & these are required for
1649 * constructing interrupt lattice tree.
1650 */
1651 for (i = 0; i < EHCI_NUM_STATIC_NODES; i++) {
1652 Set_QH(list_array[i].qh_state, EHCI_QH_STATIC);
1653 Set_QH(list_array[i].qh_status, EHCI_QH_STS_HALTED);
1654 Set_QH(list_array[i].qh_next_qtd, EHCI_QH_NEXT_QTD_PTR_VALID);
1655 Set_QH(list_array[i].qh_alt_next_qtd,
1656 EHCI_QH_ALT_NEXT_QTD_PTR_VALID);
1657 }
1658
1659 /*
1660 * Make sure that last Endpoint on the periodic frame list terminates
1661 * periodic schedule.
1662 */
1663 Set_QH(list_array[0].qh_link_ptr, EHCI_QH_LINK_PTR_VALID);
1664
1665 /* Build the interrupt lattice tree */
1666 for (i = 0; i < (EHCI_NUM_STATIC_NODES / 2); i++) {
1667 /*
1668 * The next pointer in the host controller endpoint
1669 * descriptor must contain an iommu address. Calculate
1670 * the offset into the cpu address and add this to the
1671 * starting iommu address.
1672 */
1673 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)&list_array[i]);
1674
1675 Set_QH(list_array[2*i + 1].qh_link_ptr,
1676 addr | EHCI_QH_LINK_REF_QH);
1677 Set_QH(list_array[2*i + 2].qh_link_ptr,
1678 addr | EHCI_QH_LINK_REF_QH);
1679 }
1680
1681 /* Build the tree bottom */
1682 temp = (unsigned short *)
1683 kmem_zalloc(EHCI_NUM_PERIODIC_FRAME_LISTS * 2, KM_SLEEP);
1684
1685 num_of_nodes = 1;
1686
1687 /*
1688 * Initialize the values which are used for setting up head pointers
1689 * for the 32ms scheduling lists which starts from the Periodic Frame
1690 * List.
1691 */
1692 for (i = 0; i < ehci_log_2(EHCI_NUM_PERIODIC_FRAME_LISTS); i++) {
1693 for (j = 0, k = 0; k < num_of_nodes; k++, j++) {
1694 ehci_index[j++] = temp[k];
1695 ehci_index[j] = temp[k] + ehci_pow_2(i);
1696 }
1697
1698 num_of_nodes *= 2;
1699 for (k = 0; k < num_of_nodes; k++)
1700 temp[k] = ehci_index[k];
1701 }
1702
1703 kmem_free((void *)temp, (EHCI_NUM_PERIODIC_FRAME_LISTS * 2));
1704
1705 /*
1706 * Initialize the interrupt list in the Periodic Frame List Table
1707 * so that it points to the bottom of the tree.
1708 */
1709 for (i = 0, j = 0; i < ehci_pow_2(TREE_HEIGHT); i++) {
1710 addr = ehci_qh_cpu_to_iommu(ehcip, (ehci_qh_t *)
1711 (&list_array[((EHCI_NUM_STATIC_NODES + 1) / 2) + i - 1]));
1712
1713 ASSERT(addr);
1714
1715 for (k = 0; k < ehci_pow_2(TREE_HEIGHT); k++) {
1716 Set_PFLT(periodic_frame_list->
1717 ehci_periodic_frame_list_table[ehci_index[j++]],
1718 (uint32_t)(addr | EHCI_QH_LINK_REF_QH));
1719 }
1720 }
1721 }
1722
1723
1724 /*
1725 * ehci_alloc_hcdi_ops:
1726 *
1727 * The HCDI interfaces or entry points are the software interfaces used by
1728 * the Universal Serial Bus Driver (USBA) to access the services of the
1729 * Host Controller Driver (HCD). During HCD initialization, inform USBA
1730 * about all available HCDI interfaces or entry points.
1731 */
1732 usba_hcdi_ops_t *
1733 ehci_alloc_hcdi_ops(ehci_state_t *ehcip)
1734 {
1735 usba_hcdi_ops_t *usba_hcdi_ops;
1736
1737 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
1738 "ehci_alloc_hcdi_ops:");
1739
1740 usba_hcdi_ops = usba_alloc_hcdi_ops();
1741
1742 usba_hcdi_ops->usba_hcdi_ops_version = HCDI_OPS_VERSION;
1743
1744 usba_hcdi_ops->usba_hcdi_pm_support = ehci_hcdi_pm_support;
1745 usba_hcdi_ops->usba_hcdi_pipe_open = ehci_hcdi_pipe_open;
1746 usba_hcdi_ops->usba_hcdi_pipe_close = ehci_hcdi_pipe_close;
1747
1748 usba_hcdi_ops->usba_hcdi_pipe_reset = ehci_hcdi_pipe_reset;
1749 usba_hcdi_ops->usba_hcdi_pipe_reset_data_toggle =
1750 ehci_hcdi_pipe_reset_data_toggle;
1751
1752 usba_hcdi_ops->usba_hcdi_pipe_ctrl_xfer = ehci_hcdi_pipe_ctrl_xfer;
1753 usba_hcdi_ops->usba_hcdi_pipe_bulk_xfer = ehci_hcdi_pipe_bulk_xfer;
1754 usba_hcdi_ops->usba_hcdi_pipe_intr_xfer = ehci_hcdi_pipe_intr_xfer;
1755 usba_hcdi_ops->usba_hcdi_pipe_isoc_xfer = ehci_hcdi_pipe_isoc_xfer;
1756
1757 usba_hcdi_ops->usba_hcdi_bulk_transfer_size =
1758 ehci_hcdi_bulk_transfer_size;
1759
1760 usba_hcdi_ops->usba_hcdi_pipe_stop_intr_polling =
1761 ehci_hcdi_pipe_stop_intr_polling;
1762 usba_hcdi_ops->usba_hcdi_pipe_stop_isoc_polling =
1763 ehci_hcdi_pipe_stop_isoc_polling;
1764
1765 usba_hcdi_ops->usba_hcdi_get_current_frame_number =
1766 ehci_hcdi_get_current_frame_number;
1767 usba_hcdi_ops->usba_hcdi_get_max_isoc_pkts =
1768 ehci_hcdi_get_max_isoc_pkts;
1769
1770 usba_hcdi_ops->usba_hcdi_console_input_init =
1771 ehci_hcdi_polled_input_init;
1772 usba_hcdi_ops->usba_hcdi_console_input_enter =
1773 ehci_hcdi_polled_input_enter;
1774 usba_hcdi_ops->usba_hcdi_console_read =
1775 ehci_hcdi_polled_read;
1776 usba_hcdi_ops->usba_hcdi_console_input_exit =
1777 ehci_hcdi_polled_input_exit;
1778 usba_hcdi_ops->usba_hcdi_console_input_fini =
1779 ehci_hcdi_polled_input_fini;
1780
1781 usba_hcdi_ops->usba_hcdi_console_output_init =
1782 ehci_hcdi_polled_output_init;
1783 usba_hcdi_ops->usba_hcdi_console_output_enter =
1784 ehci_hcdi_polled_output_enter;
1785 usba_hcdi_ops->usba_hcdi_console_write =
1786 ehci_hcdi_polled_write;
1787 usba_hcdi_ops->usba_hcdi_console_output_exit =
1788 ehci_hcdi_polled_output_exit;
1789 usba_hcdi_ops->usba_hcdi_console_output_fini =
1790 ehci_hcdi_polled_output_fini;
1791 return (usba_hcdi_ops);
1792 }
1793
1794
1795 /*
1796 * Host Controller Driver (HCD) deinitialization functions
1797 */
1798
1799 /*
1800 * ehci_cleanup:
1801 *
1802 * Cleanup on attach failure or detach
1803 */
1804 int
1805 ehci_cleanup(ehci_state_t *ehcip)
1806 {
1807 ehci_trans_wrapper_t *tw;
1808 ehci_pipe_private_t *pp;
1809 ehci_qtd_t *qtd;
1810 int i, ctrl, rval;
1811 int flags = ehcip->ehci_flags;
1812
1813 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl, "ehci_cleanup:");
1814
1815 if (flags & EHCI_RHREG) {
1816 /* Unload the root hub driver */
1817 if (ehci_unload_root_hub_driver(ehcip) != USB_SUCCESS) {
1818
1819 return (DDI_FAILURE);
1820 }
1821 }
1822
1823 if (flags & EHCI_USBAREG) {
1824 /* Unregister this HCD instance with USBA */
1825 usba_hcdi_unregister(ehcip->ehci_dip);
1826 }
1827
1828 if (flags & EHCI_INTR) {
1829
1830 mutex_enter(&ehcip->ehci_int_mutex);
1831
1832 /* Disable all EHCI QH list processing */
1833 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
1834 ~(EHCI_CMD_ASYNC_SCHED_ENABLE |
1835 EHCI_CMD_PERIODIC_SCHED_ENABLE)));
1836
1837 /* Disable all EHCI interrupts */
1838 Set_OpReg(ehci_interrupt, 0);
1839
1840 /* wait for the next SOF */
1841 (void) ehci_wait_for_sof(ehcip);
1842
1843 /* Route all Root hub ports to Classic host controller */
1844 Set_OpReg(ehci_config_flag, EHCI_CONFIG_FLAG_CLASSIC);
1845
1846 /* Stop the EHCI host controller */
1847 Set_OpReg(ehci_command,
1848 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
1849
1850 mutex_exit(&ehcip->ehci_int_mutex);
1851
1852 /* Wait for sometime */
1853 delay(drv_usectohz(EHCI_TIMEWAIT));
1854
1855 ehci_rem_intrs(ehcip);
1856 }
1857
1858 /* Unmap the EHCI registers */
1859 if (ehcip->ehci_caps_handle) {
1860 ddi_regs_map_free(&ehcip->ehci_caps_handle);
1861 }
1862
1863 if (ehcip->ehci_config_handle) {
1864 pci_config_teardown(&ehcip->ehci_config_handle);
1865 }
1866
1867 /* Free all the buffers */
1868 if (ehcip->ehci_qtd_pool_addr && ehcip->ehci_qtd_pool_mem_handle) {
1869 for (i = 0; i < ehci_qtd_pool_size; i ++) {
1870 qtd = &ehcip->ehci_qtd_pool_addr[i];
1871 ctrl = Get_QTD(ehcip->
1872 ehci_qtd_pool_addr[i].qtd_state);
1873
1874 if ((ctrl != EHCI_QTD_FREE) &&
1875 (ctrl != EHCI_QTD_DUMMY) &&
1876 (qtd->qtd_trans_wrapper)) {
1877
1878 mutex_enter(&ehcip->ehci_int_mutex);
1879
1880 tw = (ehci_trans_wrapper_t *)
1881 EHCI_LOOKUP_ID((uint32_t)
1882 Get_QTD(qtd->qtd_trans_wrapper));
1883
1884 /* Obtain the pipe private structure */
1885 pp = tw->tw_pipe_private;
1886
1887 /* Stop the the transfer timer */
1888 ehci_stop_xfer_timer(ehcip, tw,
1889 EHCI_REMOVE_XFER_ALWAYS);
1890
1891 ehci_deallocate_tw(ehcip, pp, tw);
1892
1893 mutex_exit(&ehcip->ehci_int_mutex);
1894 }
1895 }
1896
1897 /*
1898 * If EHCI_QTD_POOL_BOUND flag is set, then unbind
1899 * the handle for QTD pools.
1900 */
1901 if ((ehcip->ehci_dma_addr_bind_flag &
1902 EHCI_QTD_POOL_BOUND) == EHCI_QTD_POOL_BOUND) {
1903
1904 rval = ddi_dma_unbind_handle(
1905 ehcip->ehci_qtd_pool_dma_handle);
1906
1907 ASSERT(rval == DDI_SUCCESS);
1908 }
1909 ddi_dma_mem_free(&ehcip->ehci_qtd_pool_mem_handle);
1910 }
1911
1912 /* Free the QTD pool */
1913 if (ehcip->ehci_qtd_pool_dma_handle) {
1914 ddi_dma_free_handle(&ehcip->ehci_qtd_pool_dma_handle);
1915 }
1916
1917 if (ehcip->ehci_qh_pool_addr && ehcip->ehci_qh_pool_mem_handle) {
1918 /*
1919 * If EHCI_QH_POOL_BOUND flag is set, then unbind
1920 * the handle for QH pools.
1921 */
1922 if ((ehcip->ehci_dma_addr_bind_flag &
1923 EHCI_QH_POOL_BOUND) == EHCI_QH_POOL_BOUND) {
1924
1925 rval = ddi_dma_unbind_handle(
1926 ehcip->ehci_qh_pool_dma_handle);
1927
1928 ASSERT(rval == DDI_SUCCESS);
1929 }
1930
1931 ddi_dma_mem_free(&ehcip->ehci_qh_pool_mem_handle);
1932 }
1933
1934 /* Free the QH pool */
1935 if (ehcip->ehci_qh_pool_dma_handle) {
1936 ddi_dma_free_handle(&ehcip->ehci_qh_pool_dma_handle);
1937 }
1938
1939 /* Free the Periodic frame list table (PFLT) area */
1940 if (ehcip->ehci_periodic_frame_list_tablep &&
1941 ehcip->ehci_pflt_mem_handle) {
1942 /*
1943 * If EHCI_PFLT_DMA_BOUND flag is set, then unbind
1944 * the handle for PFLT.
1945 */
1946 if ((ehcip->ehci_dma_addr_bind_flag &
1947 EHCI_PFLT_DMA_BOUND) == EHCI_PFLT_DMA_BOUND) {
1948
1949 rval = ddi_dma_unbind_handle(
1950 ehcip->ehci_pflt_dma_handle);
1951
1952 ASSERT(rval == DDI_SUCCESS);
1953 }
1954
1955 ddi_dma_mem_free(&ehcip->ehci_pflt_mem_handle);
1956 }
1957
1958 (void) ehci_isoc_cleanup(ehcip);
1959
1960 if (ehcip->ehci_pflt_dma_handle) {
1961 ddi_dma_free_handle(&ehcip->ehci_pflt_dma_handle);
1962 }
1963
1964 if (flags & EHCI_INTR) {
1965 /* Destroy the mutex */
1966 mutex_destroy(&ehcip->ehci_int_mutex);
1967
1968 /* Destroy the async schedule advance condition variable */
1969 cv_destroy(&ehcip->ehci_async_schedule_advance_cv);
1970 }
1971
1972 /* clean up kstat structs */
1973 ehci_destroy_stats(ehcip);
1974
1975 /* Free ehci hcdi ops */
1976 if (ehcip->ehci_hcdi_ops) {
1977 usba_free_hcdi_ops(ehcip->ehci_hcdi_ops);
1978 }
1979
1980 if (flags & EHCI_ZALLOC) {
1981
1982 usb_free_log_hdl(ehcip->ehci_log_hdl);
1983
1984 /* Remove all properties that might have been created */
1985 ddi_prop_remove_all(ehcip->ehci_dip);
1986
1987 /* Free the soft state */
1988 ddi_soft_state_free(ehci_statep,
1989 ddi_get_instance(ehcip->ehci_dip));
1990 }
1991
1992 return (DDI_SUCCESS);
1993 }
1994
1995
1996 /*
1997 * ehci_rem_intrs:
1998 *
1999 * Unregister FIXED or MSI interrupts
2000 */
2001 static void
2002 ehci_rem_intrs(ehci_state_t *ehcip)
2003 {
2004 int i;
2005
2006 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2007 "ehci_rem_intrs: interrupt type 0x%x", ehcip->ehci_intr_type);
2008
2009 /* Disable all interrupts */
2010 if (ehcip->ehci_intr_cap & DDI_INTR_FLAG_BLOCK) {
2011 (void) ddi_intr_block_disable(ehcip->ehci_htable,
2012 ehcip->ehci_intr_cnt);
2013 } else {
2014 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
2015 (void) ddi_intr_disable(ehcip->ehci_htable[i]);
2016 }
2017 }
2018
2019 /* Call ddi_intr_remove_handler() */
2020 for (i = 0; i < ehcip->ehci_intr_cnt; i++) {
2021 (void) ddi_intr_remove_handler(ehcip->ehci_htable[i]);
2022 (void) ddi_intr_free(ehcip->ehci_htable[i]);
2023 }
2024
2025 kmem_free(ehcip->ehci_htable,
2026 ehcip->ehci_intr_cnt * sizeof (ddi_intr_handle_t));
2027 }
2028
2029
2030 /*
2031 * ehci_cpr_suspend
2032 */
2033 int
2034 ehci_cpr_suspend(ehci_state_t *ehcip)
2035 {
2036 int i;
2037
2038 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2039 "ehci_cpr_suspend:");
2040
2041 /* Call into the root hub and suspend it */
2042 if (usba_hubdi_detach(ehcip->ehci_dip, DDI_SUSPEND) != DDI_SUCCESS) {
2043
2044 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2045 "ehci_cpr_suspend: root hub fails to suspend");
2046
2047 return (DDI_FAILURE);
2048 }
2049
2050 /* Only root hub's intr pipe should be open at this time */
2051 mutex_enter(&ehcip->ehci_int_mutex);
2052
2053 ASSERT(ehcip->ehci_open_pipe_count == 0);
2054
2055 /* Just wait till all resources are reclaimed */
2056 i = 0;
2057 while ((ehcip->ehci_reclaim_list != NULL) && (i++ < 3)) {
2058 ehci_handle_endpoint_reclaimation(ehcip);
2059 (void) ehci_wait_for_sof(ehcip);
2060 }
2061 ASSERT(ehcip->ehci_reclaim_list == NULL);
2062
2063 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2064 "ehci_cpr_suspend: Disable HC QH list processing");
2065
2066 /* Disable all EHCI QH list processing */
2067 Set_OpReg(ehci_command, (Get_OpReg(ehci_command) &
2068 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE)));
2069
2070 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2071 "ehci_cpr_suspend: Disable HC interrupts");
2072
2073 /* Disable all EHCI interrupts */
2074 Set_OpReg(ehci_interrupt, 0);
2075
2076 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2077 "ehci_cpr_suspend: Wait for the next SOF");
2078
2079 /* Wait for the next SOF */
2080 if (ehci_wait_for_sof(ehcip) != USB_SUCCESS) {
2081
2082 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2083 "ehci_cpr_suspend: ehci host controller suspend failed");
2084
2085 mutex_exit(&ehcip->ehci_int_mutex);
2086 return (DDI_FAILURE);
2087 }
2088
2089 /*
2090 * Stop the ehci host controller
2091 * if usb keyboard is not connected.
2092 */
2093 if (ehcip->ehci_polled_kbd_count == 0 || force_ehci_off != 0) {
2094 Set_OpReg(ehci_command,
2095 Get_OpReg(ehci_command) & ~EHCI_CMD_HOST_CTRL_RUN);
2096
2097 }
2098
2099 /* Set host controller soft state to suspend */
2100 ehcip->ehci_hc_soft_state = EHCI_CTLR_SUSPEND_STATE;
2101
2102 mutex_exit(&ehcip->ehci_int_mutex);
2103
2104 return (DDI_SUCCESS);
2105 }
2106
2107
2108 /*
2109 * ehci_cpr_resume
2110 */
2111 int
2112 ehci_cpr_resume(ehci_state_t *ehcip)
2113 {
2114 mutex_enter(&ehcip->ehci_int_mutex);
2115
2116 USB_DPRINTF_L4(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2117 "ehci_cpr_resume: Restart the controller");
2118
2119 /* Cleanup ehci specific information across cpr */
2120 ehci_cpr_cleanup(ehcip);
2121
2122 /* Restart the controller */
2123 if (ehci_init_ctlr(ehcip, EHCI_NORMAL_INITIALIZATION) != DDI_SUCCESS) {
2124
2125 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
2126 "ehci_cpr_resume: ehci host controller resume failed ");
2127
2128 mutex_exit(&ehcip->ehci_int_mutex);
2129
2130 return (DDI_FAILURE);
2131 }
2132
2133 mutex_exit(&ehcip->ehci_int_mutex);
2134
2135 /* Now resume the root hub */
2136 if (usba_hubdi_attach(ehcip->ehci_dip, DDI_RESUME) != DDI_SUCCESS) {
2137
2138 return (DDI_FAILURE);
2139 }
2140
2141 return (DDI_SUCCESS);
2142 }
2143
2144
2145 /*
2146 * Bandwidth Allocation functions
2147 */
2148
2149 /*
2150 * ehci_allocate_bandwidth:
2151 *
2152 * Figure out whether or not this interval may be supported. Return the index
2153 * into the lattice if it can be supported. Return allocation failure if it
2154 * can not be supported.
2155 */
2156 int
2157 ehci_allocate_bandwidth(
2158 ehci_state_t *ehcip,
2159 usba_pipe_handle_data_t *ph,
2160 uint_t *pnode,
2161 uchar_t *smask,
2162 uchar_t *cmask)
2163 {
2164 int error = USB_SUCCESS;
2165
2166 /* This routine is protected by the ehci_int_mutex */
2167 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2168
2169 /* Reset the pnode to the last checked pnode */
2170 *pnode = 0;
2171
2172 /* Allocate high speed bandwidth */
2173 if ((error = ehci_allocate_high_speed_bandwidth(ehcip,
2174 ph, pnode, smask, cmask)) != USB_SUCCESS) {
2175
2176 return (error);
2177 }
2178
2179 /*
2180 * For low/full speed usb devices, allocate classic TT bandwidth
2181 * in additional to high speed bandwidth.
2182 */
2183 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2184
2185 /* Allocate classic TT bandwidth */
2186 if ((error = ehci_allocate_classic_tt_bandwidth(
2187 ehcip, ph, *pnode)) != USB_SUCCESS) {
2188
2189 /* Deallocate high speed bandwidth */
2190 ehci_deallocate_high_speed_bandwidth(
2191 ehcip, ph, *pnode, *smask, *cmask);
2192 }
2193 }
2194
2195 return (error);
2196 }
2197
2198
2199 /*
2200 * ehci_allocate_high_speed_bandwidth:
2201 *
2202 * Allocate high speed bandwidth for the low/full/high speed interrupt and
2203 * isochronous endpoints.
2204 */
2205 static int
2206 ehci_allocate_high_speed_bandwidth(
2207 ehci_state_t *ehcip,
2208 usba_pipe_handle_data_t *ph,
2209 uint_t *pnode,
2210 uchar_t *smask,
2211 uchar_t *cmask)
2212 {
2213 uint_t sbandwidth, cbandwidth;
2214 int interval;
2215 usb_ep_descr_t *endpoint = &ph->p_ep;
2216 usba_device_t *child_ud;
2217 usb_port_status_t port_status;
2218 int error;
2219
2220 /* This routine is protected by the ehci_int_mutex */
2221 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2222
2223 /* Get child's usba device structure */
2224 child_ud = ph->p_usba_device;
2225
2226 mutex_enter(&child_ud->usb_mutex);
2227
2228 /* Get the current usb device's port status */
2229 port_status = ph->p_usba_device->usb_port_status;
2230
2231 mutex_exit(&child_ud->usb_mutex);
2232
2233 /*
2234 * Calculate the length in bytes of a transaction on this
2235 * periodic endpoint. Return failure if maximum packet is
2236 * zero.
2237 */
2238 error = ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2239 port_status, &sbandwidth, &cbandwidth);
2240 if (error != USB_SUCCESS) {
2241
2242 return (error);
2243 }
2244
2245 /*
2246 * Adjust polling interval to be a power of 2.
2247 * If this interval can't be supported, return
2248 * allocation failure.
2249 */
2250 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2251 if (interval == USB_FAILURE) {
2252
2253 return (USB_FAILURE);
2254 }
2255
2256 if (port_status == USBA_HIGH_SPEED_DEV) {
2257 /* Allocate bandwidth for high speed devices */
2258 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2259 USB_EP_ATTR_ISOCH) {
2260 error = USB_SUCCESS;
2261 } else {
2262
2263 error = ehci_find_bestfit_hs_mask(ehcip, smask, pnode,
2264 endpoint, sbandwidth, interval);
2265 }
2266
2267 *cmask = 0x00;
2268
2269 } else {
2270 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2271 USB_EP_ATTR_INTR) {
2272
2273 /* Allocate bandwidth for low speed interrupt */
2274 error = ehci_find_bestfit_ls_intr_mask(ehcip,
2275 smask, cmask, pnode, sbandwidth, cbandwidth,
2276 interval);
2277 } else {
2278 if ((endpoint->bEndpointAddress &
2279 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2280
2281 /* Allocate bandwidth for sitd in */
2282 error = ehci_find_bestfit_sitd_in_mask(ehcip,
2283 smask, cmask, pnode, sbandwidth, cbandwidth,
2284 interval);
2285 } else {
2286
2287 /* Allocate bandwidth for sitd out */
2288 error = ehci_find_bestfit_sitd_out_mask(ehcip,
2289 smask, pnode, sbandwidth, interval);
2290 *cmask = 0x00;
2291 }
2292 }
2293 }
2294
2295 if (error != USB_SUCCESS) {
2296 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2297 "ehci_allocate_high_speed_bandwidth: Reached maximum "
2298 "bandwidth value and cannot allocate bandwidth for a "
2299 "given high-speed periodic endpoint");
2300
2301 return (USB_NO_BANDWIDTH);
2302 }
2303
2304 return (error);
2305 }
2306
2307
2308 /*
2309 * ehci_allocate_classic_tt_speed_bandwidth:
2310 *
2311 * Allocate classic TT bandwidth for the low/full speed interrupt and
2312 * isochronous endpoints.
2313 */
2314 static int
2315 ehci_allocate_classic_tt_bandwidth(
2316 ehci_state_t *ehcip,
2317 usba_pipe_handle_data_t *ph,
2318 uint_t pnode)
2319 {
2320 uint_t bandwidth, min;
2321 uint_t height, leftmost, list;
2322 usb_ep_descr_t *endpoint = &ph->p_ep;
2323 usba_device_t *child_ud, *parent_ud;
2324 usb_port_status_t port_status;
2325 int i, interval;
2326
2327 /* This routine is protected by the ehci_int_mutex */
2328 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2329
2330 /* Get child's usba device structure */
2331 child_ud = ph->p_usba_device;
2332
2333 mutex_enter(&child_ud->usb_mutex);
2334
2335 /* Get the current usb device's port status */
2336 port_status = child_ud->usb_port_status;
2337
2338 /* Get the parent high speed hub's usba device structure */
2339 parent_ud = child_ud->usb_hs_hub_usba_dev;
2340
2341 mutex_exit(&child_ud->usb_mutex);
2342
2343 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2344 "ehci_allocate_classic_tt_bandwidth: "
2345 "child_ud 0x%p parent_ud 0x%p",
2346 (void *)child_ud, (void *)parent_ud);
2347
2348 /*
2349 * Calculate the length in bytes of a transaction on this
2350 * periodic endpoint. Return failure if maximum packet is
2351 * zero.
2352 */
2353 if (ehci_compute_classic_bandwidth(endpoint,
2354 port_status, &bandwidth) != USB_SUCCESS) {
2355
2356 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2357 "ehci_allocate_classic_tt_bandwidth: Periodic endpoint "
2358 "with zero endpoint maximum packet size is not supported");
2359
2360 return (USB_NOT_SUPPORTED);
2361 }
2362
2363 USB_DPRINTF_L3(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2364 "ehci_allocate_classic_tt_bandwidth: bandwidth %d", bandwidth);
2365
2366 mutex_enter(&parent_ud->usb_mutex);
2367
2368 /*
2369 * If the length in bytes plus the allocated bandwidth exceeds
2370 * the maximum, return bandwidth allocation failure.
2371 */
2372 if ((parent_ud->usb_hs_hub_min_bandwidth + bandwidth) >
2373 FS_PERIODIC_BANDWIDTH) {
2374
2375 mutex_exit(&parent_ud->usb_mutex);
2376
2377 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2378 "ehci_allocate_classic_tt_bandwidth: Reached maximum "
2379 "bandwidth value and cannot allocate bandwidth for a "
2380 "given low/full speed periodic endpoint");
2381
2382 return (USB_NO_BANDWIDTH);
2383 }
2384
2385 mutex_exit(&parent_ud->usb_mutex);
2386
2387 /* Adjust polling interval to be a power of 2 */
2388 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2389
2390 /* Find the height in the tree */
2391 height = ehci_lattice_height(interval);
2392
2393 /* Find the leftmost leaf in the subtree specified by the node. */
2394 leftmost = ehci_leftmost_leaf(pnode, height);
2395
2396 mutex_enter(&parent_ud->usb_mutex);
2397
2398 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2399 list = ehci_index[leftmost + i];
2400
2401 if ((parent_ud->usb_hs_hub_bandwidth[list] +
2402 bandwidth) > FS_PERIODIC_BANDWIDTH) {
2403
2404 mutex_exit(&parent_ud->usb_mutex);
2405
2406 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2407 "ehci_allocate_classic_tt_bandwidth: Reached "
2408 "maximum bandwidth value and cannot allocate "
2409 "bandwidth for low/full periodic endpoint");
2410
2411 return (USB_NO_BANDWIDTH);
2412 }
2413 }
2414
2415 /*
2416 * All the leaves for this node must be updated with the bandwidth.
2417 */
2418 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2419 list = ehci_index[leftmost + i];
2420 parent_ud->usb_hs_hub_bandwidth[list] += bandwidth;
2421 }
2422
2423 /* Find the leaf with the smallest allocated bandwidth */
2424 min = parent_ud->usb_hs_hub_bandwidth[0];
2425
2426 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2427 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2428 min = parent_ud->usb_hs_hub_bandwidth[i];
2429 }
2430 }
2431
2432 /* Save the minimum for later use */
2433 parent_ud->usb_hs_hub_min_bandwidth = min;
2434
2435 mutex_exit(&parent_ud->usb_mutex);
2436
2437 return (USB_SUCCESS);
2438 }
2439
2440
2441 /*
2442 * ehci_deallocate_bandwidth:
2443 *
2444 * Deallocate bandwidth for the given node in the lattice and the length
2445 * of transfer.
2446 */
2447 void
2448 ehci_deallocate_bandwidth(
2449 ehci_state_t *ehcip,
2450 usba_pipe_handle_data_t *ph,
2451 uint_t pnode,
2452 uchar_t smask,
2453 uchar_t cmask)
2454 {
2455 /* This routine is protected by the ehci_int_mutex */
2456 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2457
2458 ehci_deallocate_high_speed_bandwidth(ehcip, ph, pnode, smask, cmask);
2459
2460 /*
2461 * For low/full speed usb devices, deallocate classic TT bandwidth
2462 * in additional to high speed bandwidth.
2463 */
2464 if (ph->p_usba_device->usb_port_status != USBA_HIGH_SPEED_DEV) {
2465
2466 /* Deallocate classic TT bandwidth */
2467 ehci_deallocate_classic_tt_bandwidth(ehcip, ph, pnode);
2468 }
2469 }
2470
2471
2472 /*
2473 * ehci_deallocate_high_speed_bandwidth:
2474 *
2475 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2476 */
2477 static void
2478 ehci_deallocate_high_speed_bandwidth(
2479 ehci_state_t *ehcip,
2480 usba_pipe_handle_data_t *ph,
2481 uint_t pnode,
2482 uchar_t smask,
2483 uchar_t cmask)
2484 {
2485 uint_t height, leftmost;
2486 uint_t list_count;
2487 uint_t sbandwidth, cbandwidth;
2488 int interval;
2489 usb_ep_descr_t *endpoint = &ph->p_ep;
2490 usba_device_t *child_ud;
2491 usb_port_status_t port_status;
2492
2493 /* This routine is protected by the ehci_int_mutex */
2494 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2495
2496 /* Get child's usba device structure */
2497 child_ud = ph->p_usba_device;
2498
2499 mutex_enter(&child_ud->usb_mutex);
2500
2501 /* Get the current usb device's port status */
2502 port_status = ph->p_usba_device->usb_port_status;
2503
2504 mutex_exit(&child_ud->usb_mutex);
2505
2506 (void) ehci_compute_high_speed_bandwidth(ehcip, endpoint,
2507 port_status, &sbandwidth, &cbandwidth);
2508
2509 /* Adjust polling interval to be a power of 2 */
2510 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2511
2512 /* Find the height in the tree */
2513 height = ehci_lattice_height(interval);
2514
2515 /*
2516 * Find the leftmost leaf in the subtree specified by the node
2517 */
2518 leftmost = ehci_leftmost_leaf(pnode, height);
2519
2520 list_count = EHCI_NUM_INTR_QH_LISTS/interval;
2521
2522 /* Delete the bandwidth from the appropriate lists */
2523 if (port_status == USBA_HIGH_SPEED_DEV) {
2524
2525 ehci_update_bw_availability(ehcip, -sbandwidth,
2526 leftmost, list_count, smask);
2527 } else {
2528 if ((endpoint->bmAttributes & USB_EP_ATTR_MASK) ==
2529 USB_EP_ATTR_INTR) {
2530
2531 ehci_update_bw_availability(ehcip, -sbandwidth,
2532 leftmost, list_count, smask);
2533 ehci_update_bw_availability(ehcip, -cbandwidth,
2534 leftmost, list_count, cmask);
2535 } else {
2536 if ((endpoint->bEndpointAddress &
2537 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2538
2539 ehci_update_bw_availability(ehcip, -sbandwidth,
2540 leftmost, list_count, smask);
2541 ehci_update_bw_availability(ehcip,
2542 -MAX_UFRAME_SITD_XFER, leftmost,
2543 list_count, cmask);
2544 } else {
2545
2546 ehci_update_bw_availability(ehcip,
2547 -MAX_UFRAME_SITD_XFER, leftmost,
2548 list_count, smask);
2549 }
2550 }
2551 }
2552 }
2553
2554 /*
2555 * ehci_deallocate_classic_tt_bandwidth:
2556 *
2557 * Deallocate high speed bandwidth of a interrupt or isochronous endpoint.
2558 */
2559 static void
2560 ehci_deallocate_classic_tt_bandwidth(
2561 ehci_state_t *ehcip,
2562 usba_pipe_handle_data_t *ph,
2563 uint_t pnode)
2564 {
2565 uint_t bandwidth, height, leftmost, list, min;
2566 int i, interval;
2567 usb_ep_descr_t *endpoint = &ph->p_ep;
2568 usba_device_t *child_ud, *parent_ud;
2569 usb_port_status_t port_status;
2570
2571 /* This routine is protected by the ehci_int_mutex */
2572 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
2573
2574 /* Get child's usba device structure */
2575 child_ud = ph->p_usba_device;
2576
2577 mutex_enter(&child_ud->usb_mutex);
2578
2579 /* Get the current usb device's port status */
2580 port_status = child_ud->usb_port_status;
2581
2582 /* Get the parent high speed hub's usba device structure */
2583 parent_ud = child_ud->usb_hs_hub_usba_dev;
2584
2585 mutex_exit(&child_ud->usb_mutex);
2586
2587 /* Obtain the bandwidth */
2588 (void) ehci_compute_classic_bandwidth(endpoint,
2589 port_status, &bandwidth);
2590
2591 /* Adjust polling interval to be a power of 2 */
2592 interval = ehci_adjust_polling_interval(ehcip, endpoint, port_status);
2593
2594 /* Find the height in the tree */
2595 height = ehci_lattice_height(interval);
2596
2597 /* Find the leftmost leaf in the subtree specified by the node */
2598 leftmost = ehci_leftmost_leaf(pnode, height);
2599
2600 mutex_enter(&parent_ud->usb_mutex);
2601
2602 /* Delete the bandwidth from the appropriate lists */
2603 for (i = 0; i < (EHCI_NUM_INTR_QH_LISTS/interval); i++) {
2604 list = ehci_index[leftmost + i];
2605 parent_ud->usb_hs_hub_bandwidth[list] -= bandwidth;
2606 }
2607
2608 /* Find the leaf with the smallest allocated bandwidth */
2609 min = parent_ud->usb_hs_hub_bandwidth[0];
2610
2611 for (i = 1; i < EHCI_NUM_INTR_QH_LISTS; i++) {
2612 if (parent_ud->usb_hs_hub_bandwidth[i] < min) {
2613 min = parent_ud->usb_hs_hub_bandwidth[i];
2614 }
2615 }
2616
2617 /* Save the minimum for later use */
2618 parent_ud->usb_hs_hub_min_bandwidth = min;
2619
2620 mutex_exit(&parent_ud->usb_mutex);
2621 }
2622
2623
2624 /*
2625 * ehci_compute_high_speed_bandwidth:
2626 *
2627 * Given a periodic endpoint (interrupt or isochronous) determine the total
2628 * bandwidth for one transaction. The EHCI host controller traverses the
2629 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2630 * services an endpoint, only a single transaction attempt is made. The HC
2631 * moves to the next Endpoint Descriptor after the first transaction attempt
2632 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2633 * Transfer Descriptor is inserted into the lattice, we will only count the
2634 * number of bytes for one transaction.
2635 *
2636 * The following are the formulas used for calculating bandwidth in terms
2637 * bytes and it is for the single USB high speed transaction. The protocol
2638 * overheads will be different for each of type of USB transfer & all these
2639 * formulas & protocol overheads are derived from the 5.11.3 section of the
2640 * USB 2.0 Specification.
2641 *
2642 * High-Speed:
2643 * Protocol overhead + ((MaxPktSz * 7)/6) + Host_Delay
2644 *
2645 * Split Transaction: (Low/Full speed devices connected behind usb2.0 hub)
2646 *
2647 * Protocol overhead + Split transaction overhead +
2648 * ((MaxPktSz * 7)/6) + Host_Delay;
2649 */
2650 /* ARGSUSED */
2651 static int
2652 ehci_compute_high_speed_bandwidth(
2653 ehci_state_t *ehcip,
2654 usb_ep_descr_t *endpoint,
2655 usb_port_status_t port_status,
2656 uint_t *sbandwidth,
2657 uint_t *cbandwidth)
2658 {
2659 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2660
2661 /* Return failure if endpoint maximum packet is zero */
2662 if (maxpacketsize == 0) {
2663 USB_DPRINTF_L2(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2664 "ehci_allocate_high_speed_bandwidth: Periodic endpoint "
2665 "with zero endpoint maximum packet size is not supported");
2666
2667 return (USB_NOT_SUPPORTED);
2668 }
2669
2670 /* Add bit-stuffing overhead */
2671 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2672
2673 /* Add Host Controller specific delay to required bandwidth */
2674 *sbandwidth = EHCI_HOST_CONTROLLER_DELAY;
2675
2676 /* Add xfer specific protocol overheads */
2677 if ((endpoint->bmAttributes &
2678 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2679 /* High speed interrupt transaction */
2680 *sbandwidth += HS_NON_ISOC_PROTO_OVERHEAD;
2681 } else {
2682 /* Isochronous transaction */
2683 *sbandwidth += HS_ISOC_PROTO_OVERHEAD;
2684 }
2685
2686 /*
2687 * For low/full speed devices, add split transaction specific
2688 * overheads.
2689 */
2690 if (port_status != USBA_HIGH_SPEED_DEV) {
2691 /*
2692 * Add start and complete split transaction
2693 * tokens overheads.
2694 */
2695 *cbandwidth = *sbandwidth + COMPLETE_SPLIT_OVERHEAD;
2696 *sbandwidth += START_SPLIT_OVERHEAD;
2697
2698 /* Add data overhead depending on data direction */
2699 if ((endpoint->bEndpointAddress &
2700 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2701 *cbandwidth += maxpacketsize;
2702 } else {
2703 if ((endpoint->bmAttributes &
2704 USB_EP_ATTR_MASK) == USB_EP_ATTR_ISOCH) {
2705 /* There is no compete splits for out */
2706 *cbandwidth = 0;
2707 }
2708 *sbandwidth += maxpacketsize;
2709 }
2710 } else {
2711 uint_t xactions;
2712
2713 /* Get the max transactions per microframe */
2714 xactions = ((maxpacketsize & USB_EP_MAX_XACTS_MASK) >>
2715 USB_EP_MAX_XACTS_SHIFT) + 1;
2716
2717 /* High speed transaction */
2718 *sbandwidth += maxpacketsize;
2719
2720 /* Calculate bandwidth per micro-frame */
2721 *sbandwidth *= xactions;
2722
2723 *cbandwidth = 0;
2724 }
2725
2726 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2727 "ehci_allocate_high_speed_bandwidth: "
2728 "Start split bandwidth %d Complete split bandwidth %d",
2729 *sbandwidth, *cbandwidth);
2730
2731 return (USB_SUCCESS);
2732 }
2733
2734
2735 /*
2736 * ehci_compute_classic_bandwidth:
2737 *
2738 * Given a periodic endpoint (interrupt or isochronous) determine the total
2739 * bandwidth for one transaction. The EHCI host controller traverses the
2740 * endpoint descriptor lists on a first-come-first-serve basis. When the HC
2741 * services an endpoint, only a single transaction attempt is made. The HC
2742 * moves to the next Endpoint Descriptor after the first transaction attempt
2743 * rather than finishing the entire Transfer Descriptor. Therefore, when a
2744 * Transfer Descriptor is inserted into the lattice, we will only count the
2745 * number of bytes for one transaction.
2746 *
2747 * The following are the formulas used for calculating bandwidth in terms
2748 * bytes and it is for the single USB high speed transaction. The protocol
2749 * overheads will be different for each of type of USB transfer & all these
2750 * formulas & protocol overheads are derived from the 5.11.3 section of the
2751 * USB 2.0 Specification.
2752 *
2753 * Low-Speed:
2754 * Protocol overhead + Hub LS overhead +
2755 * (Low Speed clock * ((MaxPktSz * 7)/6)) + TT_Delay
2756 *
2757 * Full-Speed:
2758 * Protocol overhead + ((MaxPktSz * 7)/6) + TT_Delay
2759 */
2760 /* ARGSUSED */
2761 static int
2762 ehci_compute_classic_bandwidth(
2763 usb_ep_descr_t *endpoint,
2764 usb_port_status_t port_status,
2765 uint_t *bandwidth)
2766 {
2767 ushort_t maxpacketsize = endpoint->wMaxPacketSize;
2768
2769 /*
2770 * If endpoint maximum packet is zero, then return immediately.
2771 */
2772 if (maxpacketsize == 0) {
2773
2774 return (USB_NOT_SUPPORTED);
2775 }
2776
2777 /* Add TT delay to required bandwidth */
2778 *bandwidth = TT_DELAY;
2779
2780 /* Add bit-stuffing overhead */
2781 maxpacketsize = (ushort_t)((maxpacketsize * 7) / 6);
2782
2783 switch (port_status) {
2784 case USBA_LOW_SPEED_DEV:
2785 /* Low speed interrupt transaction */
2786 *bandwidth += (LOW_SPEED_PROTO_OVERHEAD +
2787 HUB_LOW_SPEED_PROTO_OVERHEAD +
2788 (LOW_SPEED_CLOCK * maxpacketsize));
2789 break;
2790 case USBA_FULL_SPEED_DEV:
2791 /* Full speed transaction */
2792 *bandwidth += maxpacketsize;
2793
2794 /* Add xfer specific protocol overheads */
2795 if ((endpoint->bmAttributes &
2796 USB_EP_ATTR_MASK) == USB_EP_ATTR_INTR) {
2797 /* Full speed interrupt transaction */
2798 *bandwidth += FS_NON_ISOC_PROTO_OVERHEAD;
2799 } else {
2800 /* Isochronous and input transaction */
2801 if ((endpoint->bEndpointAddress &
2802 USB_EP_DIR_MASK) == USB_EP_DIR_IN) {
2803 *bandwidth += FS_ISOC_INPUT_PROTO_OVERHEAD;
2804 } else {
2805 /* Isochronous and output transaction */
2806 *bandwidth += FS_ISOC_OUTPUT_PROTO_OVERHEAD;
2807 }
2808 }
2809 break;
2810 }
2811
2812 return (USB_SUCCESS);
2813 }
2814
2815
2816 /*
2817 * ehci_adjust_polling_interval:
2818 *
2819 * Adjust bandwidth according usb device speed.
2820 */
2821 /* ARGSUSED */
2822 int
2823 ehci_adjust_polling_interval(
2824 ehci_state_t *ehcip,
2825 usb_ep_descr_t *endpoint,
2826 usb_port_status_t port_status)
2827 {
2828 uint_t interval;
2829 int i = 0;
2830
2831 /* Get the polling interval */
2832 interval = endpoint->bInterval;
2833
2834 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2835 "ehci_adjust_polling_interval: Polling interval 0x%x", interval);
2836
2837 /*
2838 * According USB 2.0 Specifications, a high-speed endpoint's
2839 * polling intervals are specified interms of 125us or micro
2840 * frame, where as full/low endpoint's polling intervals are
2841 * specified in milliseconds.
2842 *
2843 * A high speed interrupt/isochronous endpoints can specify
2844 * desired polling interval between 1 to 16 micro-frames,
2845 * where as full/low endpoints can specify between 1 to 255
2846 * milliseconds.
2847 */
2848 switch (port_status) {
2849 case USBA_LOW_SPEED_DEV:
2850 /*
2851 * Low speed endpoints are limited to specifying
2852 * only 8ms to 255ms in this driver. If a device
2853 * reports a polling interval that is less than 8ms,
2854 * it will use 8 ms instead.
2855 */
2856 if (interval < LS_MIN_POLL_INTERVAL) {
2857
2858 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2859 "Low speed endpoint's poll interval of %d ms "
2860 "is below threshold. Rounding up to %d ms",
2861 interval, LS_MIN_POLL_INTERVAL);
2862
2863 interval = LS_MIN_POLL_INTERVAL;
2864 }
2865
2866 /*
2867 * Return an error if the polling interval is greater
2868 * than 255ms.
2869 */
2870 if (interval > LS_MAX_POLL_INTERVAL) {
2871
2872 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2873 "Low speed endpoint's poll interval is "
2874 "greater than %d ms", LS_MAX_POLL_INTERVAL);
2875
2876 return (USB_FAILURE);
2877 }
2878 break;
2879
2880 case USBA_FULL_SPEED_DEV:
2881 /*
2882 * Return an error if the polling interval is less
2883 * than 1ms and greater than 255ms.
2884 */
2885 if ((interval < FS_MIN_POLL_INTERVAL) &&
2886 (interval > FS_MAX_POLL_INTERVAL)) {
2887
2888 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2889 "Full speed endpoint's poll interval must "
2890 "be between %d and %d ms", FS_MIN_POLL_INTERVAL,
2891 FS_MAX_POLL_INTERVAL);
2892
2893 return (USB_FAILURE);
2894 }
2895 break;
2896 case USBA_HIGH_SPEED_DEV:
2897 /*
2898 * Return an error if the polling interval is less 1
2899 * and greater than 16. Convert this value to 125us
2900 * units using 2^(bInterval -1). refer usb 2.0 spec
2901 * page 51 for details.
2902 */
2903 if ((interval < HS_MIN_POLL_INTERVAL) &&
2904 (interval > HS_MAX_POLL_INTERVAL)) {
2905
2906 USB_DPRINTF_L1(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2907 "High speed endpoint's poll interval "
2908 "must be between %d and %d units",
2909 HS_MIN_POLL_INTERVAL, HS_MAX_POLL_INTERVAL);
2910
2911 return (USB_FAILURE);
2912 }
2913
2914 /* Adjust high speed device polling interval */
2915 interval =
2916 ehci_adjust_high_speed_polling_interval(ehcip, endpoint);
2917
2918 break;
2919 }
2920
2921 /*
2922 * If polling interval is greater than 32ms,
2923 * adjust polling interval equal to 32ms.
2924 */
2925 if (interval > EHCI_NUM_INTR_QH_LISTS) {
2926 interval = EHCI_NUM_INTR_QH_LISTS;
2927 }
2928
2929 /*
2930 * Find the nearest power of 2 that's less
2931 * than interval.
2932 */
2933 while ((ehci_pow_2(i)) <= interval) {
2934 i++;
2935 }
2936
2937 return (ehci_pow_2((i - 1)));
2938 }
2939
2940
2941 /*
2942 * ehci_adjust_high_speed_polling_interval:
2943 */
2944 /* ARGSUSED */
2945 static int
2946 ehci_adjust_high_speed_polling_interval(
2947 ehci_state_t *ehcip,
2948 usb_ep_descr_t *endpoint)
2949 {
2950 uint_t interval;
2951
2952 /* Get the polling interval */
2953 interval = ehci_pow_2(endpoint->bInterval - 1);
2954
2955 /*
2956 * Convert polling interval from micro seconds
2957 * to milli seconds.
2958 */
2959 if (interval <= EHCI_MAX_UFRAMES) {
2960 interval = 1;
2961 } else {
2962 interval = interval/EHCI_MAX_UFRAMES;
2963 }
2964
2965 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
2966 "ehci_adjust_high_speed_polling_interval: "
2967 "High speed adjusted interval 0x%x", interval);
2968
2969 return (interval);
2970 }
2971
2972
2973 /*
2974 * ehci_lattice_height:
2975 *
2976 * Given the requested bandwidth, find the height in the tree at which the
2977 * nodes for this bandwidth fall. The height is measured as the number of
2978 * nodes from the leaf to the level specified by bandwidth The root of the
2979 * tree is at height TREE_HEIGHT.
2980 */
2981 static uint_t
2982 ehci_lattice_height(uint_t interval)
2983 {
2984 return (TREE_HEIGHT - (ehci_log_2(interval)));
2985 }
2986
2987
2988 /*
2989 * ehci_lattice_parent:
2990 *
2991 * Given a node in the lattice, find the index of the parent node
2992 */
2993 static uint_t
2994 ehci_lattice_parent(uint_t node)
2995 {
2996 if ((node % 2) == 0) {
2997
2998 return ((node/2) - 1);
2999 } else {
3000
3001 return ((node + 1)/2 - 1);
3002 }
3003 }
3004
3005
3006 /*
3007 * ehci_find_periodic_node:
3008 *
3009 * Based on the "real" array leaf node and interval, get the periodic node.
3010 */
3011 static uint_t
3012 ehci_find_periodic_node(uint_t leaf, int interval)
3013 {
3014 uint_t lattice_leaf;
3015 uint_t height = ehci_lattice_height(interval);
3016 uint_t pnode;
3017 int i;
3018
3019 /* Get the leaf number in the lattice */
3020 lattice_leaf = leaf + EHCI_NUM_INTR_QH_LISTS - 1;
3021
3022 /* Get the node in the lattice based on the height and leaf */
3023 pnode = lattice_leaf;
3024 for (i = 0; i < height; i++) {
3025 pnode = ehci_lattice_parent(pnode);
3026 }
3027
3028 return (pnode);
3029 }
3030
3031
3032 /*
3033 * ehci_leftmost_leaf:
3034 *
3035 * Find the leftmost leaf in the subtree specified by the node. Height refers
3036 * to number of nodes from the bottom of the tree to the node, including the
3037 * node.
3038 *
3039 * The formula for a zero based tree is:
3040 * 2^H * Node + 2^H - 1
3041 * The leaf of the tree is an array, convert the number for the array.
3042 * Subtract the size of nodes not in the array
3043 * 2^H * Node + 2^H - 1 - (EHCI_NUM_INTR_QH_LISTS - 1) =
3044 * 2^H * Node + 2^H - EHCI_NUM_INTR_QH_LISTS =
3045 * 2^H * (Node + 1) - EHCI_NUM_INTR_QH_LISTS
3046 * 0
3047 * 1 2
3048 * 0 1 2 3
3049 */
3050 static uint_t
3051 ehci_leftmost_leaf(
3052 uint_t node,
3053 uint_t height)
3054 {
3055 return ((ehci_pow_2(height) * (node + 1)) - EHCI_NUM_INTR_QH_LISTS);
3056 }
3057
3058
3059 /*
3060 * ehci_pow_2:
3061 *
3062 * Compute 2 to the power
3063 */
3064 static uint_t
3065 ehci_pow_2(uint_t x)
3066 {
3067 if (x == 0) {
3068
3069 return (1);
3070 } else {
3071
3072 return (2 << (x - 1));
3073 }
3074 }
3075
3076
3077 /*
3078 * ehci_log_2:
3079 *
3080 * Compute log base 2 of x
3081 */
3082 static uint_t
3083 ehci_log_2(uint_t x)
3084 {
3085 int i = 0;
3086
3087 while (x != 1) {
3088 x = x >> 1;
3089 i++;
3090 }
3091
3092 return (i);
3093 }
3094
3095
3096 /*
3097 * ehci_find_bestfit_hs_mask:
3098 *
3099 * Find the smask and cmask in the bandwidth allocation, and update the
3100 * bandwidth allocation.
3101 */
3102 static int
3103 ehci_find_bestfit_hs_mask(
3104 ehci_state_t *ehcip,
3105 uchar_t *smask,
3106 uint_t *pnode,
3107 usb_ep_descr_t *endpoint,
3108 uint_t bandwidth,
3109 int interval)
3110 {
3111 int i;
3112 uint_t elements, index;
3113 int array_leaf, best_array_leaf;
3114 uint_t node_bandwidth, best_node_bandwidth;
3115 uint_t leaf_count;
3116 uchar_t bw_mask;
3117 uchar_t best_smask;
3118
3119 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3120 "ehci_find_bestfit_hs_mask: ");
3121
3122 /* Get all the valid smasks */
3123 switch (ehci_pow_2(endpoint->bInterval - 1)) {
3124 case EHCI_INTR_1US_POLL:
3125 index = EHCI_1US_MASK_INDEX;
3126 elements = EHCI_INTR_1US_POLL;
3127 break;
3128 case EHCI_INTR_2US_POLL:
3129 index = EHCI_2US_MASK_INDEX;
3130 elements = EHCI_INTR_2US_POLL;
3131 break;
3132 case EHCI_INTR_4US_POLL:
3133 index = EHCI_4US_MASK_INDEX;
3134 elements = EHCI_INTR_4US_POLL;
3135 break;
3136 case EHCI_INTR_XUS_POLL:
3137 default:
3138 index = EHCI_XUS_MASK_INDEX;
3139 elements = EHCI_INTR_XUS_POLL;
3140 break;
3141 }
3142
3143 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3144
3145 /*
3146 * Because of the way the leaves are setup, we will automatically
3147 * hit the leftmost leaf of every possible node with this interval.
3148 */
3149 best_smask = 0x00;
3150 best_node_bandwidth = 0;
3151 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3152 /* Find the bandwidth mask */
3153 node_bandwidth = ehci_calculate_bw_availability_mask(ehcip,
3154 bandwidth, ehci_index[array_leaf], leaf_count, &bw_mask);
3155
3156 /*
3157 * If this node cannot support our requirements skip to the
3158 * next leaf.
3159 */
3160 if (bw_mask == 0x00) {
3161 continue;
3162 }
3163
3164 /*
3165 * Now make sure our bandwidth requirements can be
3166 * satisfied with one of smasks in this node.
3167 */
3168 *smask = 0x00;
3169 for (i = index; i < (index + elements); i++) {
3170 /* Check the start split mask value */
3171 if (ehci_start_split_mask[index] & bw_mask) {
3172 *smask = ehci_start_split_mask[index];
3173 break;
3174 }
3175 }
3176
3177 /*
3178 * If an appropriate smask is found save the information if:
3179 * o best_smask has not been found yet.
3180 * - or -
3181 * o This is the node with the least amount of bandwidth
3182 */
3183 if ((*smask != 0x00) &&
3184 ((best_smask == 0x00) ||
3185 (best_node_bandwidth > node_bandwidth))) {
3186
3187 best_node_bandwidth = node_bandwidth;
3188 best_array_leaf = array_leaf;
3189 best_smask = *smask;
3190 }
3191 }
3192
3193 /*
3194 * If we find node that can handle the bandwidth populate the
3195 * appropriate variables and return success.
3196 */
3197 if (best_smask) {
3198 *smask = best_smask;
3199 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3200 interval);
3201 ehci_update_bw_availability(ehcip, bandwidth,
3202 ehci_index[best_array_leaf], leaf_count, best_smask);
3203
3204 return (USB_SUCCESS);
3205 }
3206
3207 return (USB_FAILURE);
3208 }
3209
3210
3211 /*
3212 * ehci_find_bestfit_ls_intr_mask:
3213 *
3214 * Find the smask and cmask in the bandwidth allocation.
3215 */
3216 static int
3217 ehci_find_bestfit_ls_intr_mask(
3218 ehci_state_t *ehcip,
3219 uchar_t *smask,
3220 uchar_t *cmask,
3221 uint_t *pnode,
3222 uint_t sbandwidth,
3223 uint_t cbandwidth,
3224 int interval)
3225 {
3226 int i;
3227 uint_t elements, index;
3228 int array_leaf, best_array_leaf;
3229 uint_t node_sbandwidth, node_cbandwidth;
3230 uint_t best_node_bandwidth;
3231 uint_t leaf_count;
3232 uchar_t bw_smask, bw_cmask;
3233 uchar_t best_smask, best_cmask;
3234
3235 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3236 "ehci_find_bestfit_ls_intr_mask: ");
3237
3238 /* For low and full speed devices */
3239 index = EHCI_XUS_MASK_INDEX;
3240 elements = EHCI_INTR_4MS_POLL;
3241
3242 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3243
3244 /*
3245 * Because of the way the leaves are setup, we will automatically
3246 * hit the leftmost leaf of every possible node with this interval.
3247 */
3248 best_smask = 0x00;
3249 best_node_bandwidth = 0;
3250 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3251 /* Find the bandwidth mask */
3252 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3253 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3254 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3255 cbandwidth, ehci_index[array_leaf], leaf_count, &bw_cmask);
3256
3257 /*
3258 * If this node cannot support our requirements skip to the
3259 * next leaf.
3260 */
3261 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3262 continue;
3263 }
3264
3265 /*
3266 * Now make sure our bandwidth requirements can be
3267 * satisfied with one of smasks in this node.
3268 */
3269 *smask = 0x00;
3270 *cmask = 0x00;
3271 for (i = index; i < (index + elements); i++) {
3272 /* Check the start split mask value */
3273 if ((ehci_start_split_mask[index] & bw_smask) &&
3274 (ehci_intr_complete_split_mask[index] & bw_cmask)) {
3275 *smask = ehci_start_split_mask[index];
3276 *cmask = ehci_intr_complete_split_mask[index];
3277 break;
3278 }
3279 }
3280
3281 /*
3282 * If an appropriate smask is found save the information if:
3283 * o best_smask has not been found yet.
3284 * - or -
3285 * o This is the node with the least amount of bandwidth
3286 */
3287 if ((*smask != 0x00) &&
3288 ((best_smask == 0x00) ||
3289 (best_node_bandwidth >
3290 (node_sbandwidth + node_cbandwidth)))) {
3291 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3292 best_array_leaf = array_leaf;
3293 best_smask = *smask;
3294 best_cmask = *cmask;
3295 }
3296 }
3297
3298 /*
3299 * If we find node that can handle the bandwidth populate the
3300 * appropriate variables and return success.
3301 */
3302 if (best_smask) {
3303 *smask = best_smask;
3304 *cmask = best_cmask;
3305 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3306 interval);
3307 ehci_update_bw_availability(ehcip, sbandwidth,
3308 ehci_index[best_array_leaf], leaf_count, best_smask);
3309 ehci_update_bw_availability(ehcip, cbandwidth,
3310 ehci_index[best_array_leaf], leaf_count, best_cmask);
3311
3312 return (USB_SUCCESS);
3313 }
3314
3315 return (USB_FAILURE);
3316 }
3317
3318
3319 /*
3320 * ehci_find_bestfit_sitd_in_mask:
3321 *
3322 * Find the smask and cmask in the bandwidth allocation.
3323 */
3324 static int
3325 ehci_find_bestfit_sitd_in_mask(
3326 ehci_state_t *ehcip,
3327 uchar_t *smask,
3328 uchar_t *cmask,
3329 uint_t *pnode,
3330 uint_t sbandwidth,
3331 uint_t cbandwidth,
3332 int interval)
3333 {
3334 int i, uFrames, found;
3335 int array_leaf, best_array_leaf;
3336 uint_t node_sbandwidth, node_cbandwidth;
3337 uint_t best_node_bandwidth;
3338 uint_t leaf_count;
3339 uchar_t bw_smask, bw_cmask;
3340 uchar_t best_smask, best_cmask;
3341
3342 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3343 "ehci_find_bestfit_sitd_in_mask: ");
3344
3345 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3346
3347 /*
3348 * Because of the way the leaves are setup, we will automatically
3349 * hit the leftmost leaf of every possible node with this interval.
3350 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3351 */
3352 /*
3353 * Need to add an additional 2 uFrames, if the "L"ast
3354 * complete split is before uFrame 6. See section
3355 * 11.8.4 in USB 2.0 Spec. Currently we do not support
3356 * the "Back Ptr" which means we support on IN of
3357 * ~4*MAX_UFRAME_SITD_XFER bandwidth/
3358 */
3359 uFrames = (cbandwidth / MAX_UFRAME_SITD_XFER) + 2;
3360 if (cbandwidth % MAX_UFRAME_SITD_XFER) {
3361 uFrames++;
3362 }
3363 if (uFrames > 6) {
3364
3365 return (USB_FAILURE);
3366 }
3367 *smask = 0x1;
3368 *cmask = 0x00;
3369 for (i = 0; i < uFrames; i++) {
3370 *cmask = *cmask << 1;
3371 *cmask |= 0x1;
3372 }
3373 /* cmask must start 2 frames after the smask */
3374 *cmask = *cmask << 2;
3375
3376 found = 0;
3377 best_smask = 0x00;
3378 best_node_bandwidth = 0;
3379 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3380 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3381 sbandwidth, ehci_index[array_leaf], leaf_count, &bw_smask);
3382 node_cbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3383 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3384 &bw_cmask);
3385
3386 /*
3387 * If this node cannot support our requirements skip to the
3388 * next leaf.
3389 */
3390 if ((bw_smask == 0x00) || (bw_cmask == 0x00)) {
3391 continue;
3392 }
3393
3394 for (i = 0; i < (EHCI_MAX_UFRAMES - uFrames - 2); i++) {
3395 if ((*smask & bw_smask) && (*cmask & bw_cmask)) {
3396 found = 1;
3397 break;
3398 }
3399 *smask = *smask << 1;
3400 *cmask = *cmask << 1;
3401 }
3402
3403 /*
3404 * If an appropriate smask is found save the information if:
3405 * o best_smask has not been found yet.
3406 * - or -
3407 * o This is the node with the least amount of bandwidth
3408 */
3409 if (found &&
3410 ((best_smask == 0x00) ||
3411 (best_node_bandwidth >
3412 (node_sbandwidth + node_cbandwidth)))) {
3413 best_node_bandwidth = node_sbandwidth + node_cbandwidth;
3414 best_array_leaf = array_leaf;
3415 best_smask = *smask;
3416 best_cmask = *cmask;
3417 }
3418 }
3419
3420 /*
3421 * If we find node that can handle the bandwidth populate the
3422 * appropriate variables and return success.
3423 */
3424 if (best_smask) {
3425 *smask = best_smask;
3426 *cmask = best_cmask;
3427 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3428 interval);
3429 ehci_update_bw_availability(ehcip, sbandwidth,
3430 ehci_index[best_array_leaf], leaf_count, best_smask);
3431 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3432 ehci_index[best_array_leaf], leaf_count, best_cmask);
3433
3434 return (USB_SUCCESS);
3435 }
3436
3437 return (USB_FAILURE);
3438 }
3439
3440
3441 /*
3442 * ehci_find_bestfit_sitd_out_mask:
3443 *
3444 * Find the smask in the bandwidth allocation.
3445 */
3446 static int
3447 ehci_find_bestfit_sitd_out_mask(
3448 ehci_state_t *ehcip,
3449 uchar_t *smask,
3450 uint_t *pnode,
3451 uint_t sbandwidth,
3452 int interval)
3453 {
3454 int i, uFrames, found;
3455 int array_leaf, best_array_leaf;
3456 uint_t node_sbandwidth;
3457 uint_t best_node_bandwidth;
3458 uint_t leaf_count;
3459 uchar_t bw_smask;
3460 uchar_t best_smask;
3461
3462 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3463 "ehci_find_bestfit_sitd_out_mask: ");
3464
3465 leaf_count = EHCI_NUM_INTR_QH_LISTS/interval;
3466
3467 /*
3468 * Because of the way the leaves are setup, we will automatically
3469 * hit the leftmost leaf of every possible node with this interval.
3470 * You may only send MAX_UFRAME_SITD_XFER raw bits per uFrame.
3471 */
3472 *smask = 0x00;
3473 uFrames = sbandwidth / MAX_UFRAME_SITD_XFER;
3474 if (sbandwidth % MAX_UFRAME_SITD_XFER) {
3475 uFrames++;
3476 }
3477 for (i = 0; i < uFrames; i++) {
3478 *smask = *smask << 1;
3479 *smask |= 0x1;
3480 }
3481
3482 found = 0;
3483 best_smask = 0x00;
3484 best_node_bandwidth = 0;
3485 for (array_leaf = 0; array_leaf < interval; array_leaf++) {
3486 node_sbandwidth = ehci_calculate_bw_availability_mask(ehcip,
3487 MAX_UFRAME_SITD_XFER, ehci_index[array_leaf], leaf_count,
3488 &bw_smask);
3489
3490 /*
3491 * If this node cannot support our requirements skip to the
3492 * next leaf.
3493 */
3494 if (bw_smask == 0x00) {
3495 continue;
3496 }
3497
3498 /* You cannot have a start split on the 8th uFrame */
3499 for (i = 0; (*smask & 0x80) == 0; i++) {
3500 if (*smask & bw_smask) {
3501 found = 1;
3502 break;
3503 }
3504 *smask = *smask << 1;
3505 }
3506
3507 /*
3508 * If an appropriate smask is found save the information if:
3509 * o best_smask has not been found yet.
3510 * - or -
3511 * o This is the node with the least amount of bandwidth
3512 */
3513 if (found &&
3514 ((best_smask == 0x00) ||
3515 (best_node_bandwidth > node_sbandwidth))) {
3516 best_node_bandwidth = node_sbandwidth;
3517 best_array_leaf = array_leaf;
3518 best_smask = *smask;
3519 }
3520 }
3521
3522 /*
3523 * If we find node that can handle the bandwidth populate the
3524 * appropriate variables and return success.
3525 */
3526 if (best_smask) {
3527 *smask = best_smask;
3528 *pnode = ehci_find_periodic_node(ehci_index[best_array_leaf],
3529 interval);
3530 ehci_update_bw_availability(ehcip, MAX_UFRAME_SITD_XFER,
3531 ehci_index[best_array_leaf], leaf_count, best_smask);
3532
3533 return (USB_SUCCESS);
3534 }
3535
3536 return (USB_FAILURE);
3537 }
3538
3539
3540 /*
3541 * ehci_calculate_bw_availability_mask:
3542 *
3543 * Returns the "total bandwidth used" in this node.
3544 * Populates bw_mask with the uFrames that can support the bandwidth.
3545 *
3546 * If all the Frames cannot support this bandwidth, then bw_mask
3547 * will return 0x00 and the "total bandwidth used" will be invalid.
3548 */
3549 static uint_t
3550 ehci_calculate_bw_availability_mask(
3551 ehci_state_t *ehcip,
3552 uint_t bandwidth,
3553 int leaf,
3554 int leaf_count,
3555 uchar_t *bw_mask)
3556 {
3557 int i, j;
3558 uchar_t bw_uframe;
3559 int uframe_total;
3560 ehci_frame_bandwidth_t *fbp;
3561 uint_t total_bandwidth = 0;
3562
3563 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3564 "ehci_calculate_bw_availability_mask: leaf %d leaf count %d",
3565 leaf, leaf_count);
3566
3567 /* Start by saying all uFrames are available */
3568 *bw_mask = 0xFF;
3569
3570 for (i = 0; (i < leaf_count) || (*bw_mask == 0x00); i++) {
3571 fbp = &ehcip->ehci_frame_bandwidth[leaf + i];
3572
3573 total_bandwidth += fbp->ehci_allocated_frame_bandwidth;
3574
3575 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3576 /*
3577 * If the uFrame in bw_mask is available check to see if
3578 * it can support the additional bandwidth.
3579 */
3580 bw_uframe = (*bw_mask & (0x1 << j));
3581 uframe_total =
3582 fbp->ehci_micro_frame_bandwidth[j] +
3583 bandwidth;
3584 if ((bw_uframe) &&
3585 (uframe_total > HS_PERIODIC_BANDWIDTH)) {
3586 *bw_mask = *bw_mask & ~bw_uframe;
3587 }
3588 }
3589 }
3590
3591 USB_DPRINTF_L4(PRINT_MASK_BW, ehcip->ehci_log_hdl,
3592 "ehci_calculate_bw_availability_mask: bandwidth mask 0x%x",
3593 *bw_mask);
3594
3595 return (total_bandwidth);
3596 }
3597
3598
3599 /*
3600 * ehci_update_bw_availability:
3601 *
3602 * The leftmost leaf needs to be in terms of array position and
3603 * not the actual lattice position.
3604 */
3605 static void
3606 ehci_update_bw_availability(
3607 ehci_state_t *ehcip,
3608 int bandwidth,
3609 int leftmost_leaf,
3610 int leaf_count,
3611 uchar_t mask)
3612 {
3613 int i, j;
3614 ehci_frame_bandwidth_t *fbp;
3615 int uFrame_bandwidth[8];
3616
3617 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3618 "ehci_update_bw_availability: "
3619 "leaf %d count %d bandwidth 0x%x mask 0x%x",
3620 leftmost_leaf, leaf_count, bandwidth, mask);
3621
3622 ASSERT(leftmost_leaf < 32);
3623 ASSERT(leftmost_leaf >= 0);
3624
3625 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3626 if (mask & 0x1) {
3627 uFrame_bandwidth[j] = bandwidth;
3628 } else {
3629 uFrame_bandwidth[j] = 0;
3630 }
3631
3632 mask = mask >> 1;
3633 }
3634
3635 /* Updated all the effected leafs with the bandwidth */
3636 for (i = 0; i < leaf_count; i++) {
3637 fbp = &ehcip->ehci_frame_bandwidth[leftmost_leaf + i];
3638
3639 for (j = 0; j < EHCI_MAX_UFRAMES; j++) {
3640 fbp->ehci_micro_frame_bandwidth[j] +=
3641 uFrame_bandwidth[j];
3642 fbp->ehci_allocated_frame_bandwidth +=
3643 uFrame_bandwidth[j];
3644 }
3645 }
3646 }
3647
3648 /*
3649 * Miscellaneous functions
3650 */
3651
3652 /*
3653 * ehci_obtain_state:
3654 *
3655 * NOTE: This function is also called from POLLED MODE.
3656 */
3657 ehci_state_t *
3658 ehci_obtain_state(dev_info_t *dip)
3659 {
3660 int instance = ddi_get_instance(dip);
3661
3662 ehci_state_t *state = ddi_get_soft_state(ehci_statep, instance);
3663
3664 ASSERT(state != NULL);
3665
3666 return (state);
3667 }
3668
3669
3670 /*
3671 * ehci_state_is_operational:
3672 *
3673 * Check the Host controller state and return proper values.
3674 */
3675 int
3676 ehci_state_is_operational(ehci_state_t *ehcip)
3677 {
3678 int val;
3679
3680 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3681
3682 switch (ehcip->ehci_hc_soft_state) {
3683 case EHCI_CTLR_INIT_STATE:
3684 case EHCI_CTLR_SUSPEND_STATE:
3685 val = USB_FAILURE;
3686 break;
3687 case EHCI_CTLR_OPERATIONAL_STATE:
3688 val = USB_SUCCESS;
3689 break;
3690 case EHCI_CTLR_ERROR_STATE:
3691 val = USB_HC_HARDWARE_ERROR;
3692 break;
3693 default:
3694 val = USB_FAILURE;
3695 break;
3696 }
3697
3698 return (val);
3699 }
3700
3701
3702 /*
3703 * ehci_do_soft_reset
3704 *
3705 * Do soft reset of ehci host controller.
3706 */
3707 int
3708 ehci_do_soft_reset(ehci_state_t *ehcip)
3709 {
3710 usb_frame_number_t before_frame_number, after_frame_number;
3711 ehci_regs_t *ehci_save_regs;
3712
3713 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3714
3715 /* Increment host controller error count */
3716 ehcip->ehci_hc_error++;
3717
3718 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3719 "ehci_do_soft_reset:"
3720 "Reset ehci host controller 0x%x", ehcip->ehci_hc_error);
3721
3722 /*
3723 * Allocate space for saving current Host Controller
3724 * registers. Don't do any recovery if allocation
3725 * fails.
3726 */
3727 ehci_save_regs = (ehci_regs_t *)
3728 kmem_zalloc(sizeof (ehci_regs_t), KM_NOSLEEP);
3729
3730 if (ehci_save_regs == NULL) {
3731 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3732 "ehci_do_soft_reset: kmem_zalloc failed");
3733
3734 return (USB_FAILURE);
3735 }
3736
3737 /* Save current ehci registers */
3738 ehci_save_regs->ehci_command = Get_OpReg(ehci_command);
3739 ehci_save_regs->ehci_interrupt = Get_OpReg(ehci_interrupt);
3740 ehci_save_regs->ehci_ctrl_segment = Get_OpReg(ehci_ctrl_segment);
3741 ehci_save_regs->ehci_async_list_addr = Get_OpReg(ehci_async_list_addr);
3742 ehci_save_regs->ehci_config_flag = Get_OpReg(ehci_config_flag);
3743 ehci_save_regs->ehci_periodic_list_base =
3744 Get_OpReg(ehci_periodic_list_base);
3745
3746 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3747 "ehci_do_soft_reset: Save reg = 0x%p", (void *)ehci_save_regs);
3748
3749 /* Disable all list processing and interrupts */
3750 Set_OpReg(ehci_command, Get_OpReg(ehci_command) &
3751 ~(EHCI_CMD_ASYNC_SCHED_ENABLE | EHCI_CMD_PERIODIC_SCHED_ENABLE));
3752
3753 /* Disable all EHCI interrupts */
3754 Set_OpReg(ehci_interrupt, 0);
3755
3756 /* Wait for few milliseconds */
3757 drv_usecwait(EHCI_SOF_TIMEWAIT);
3758
3759 /* Do light soft reset of ehci host controller */
3760 Set_OpReg(ehci_command,
3761 Get_OpReg(ehci_command) | EHCI_CMD_LIGHT_HC_RESET);
3762
3763 USB_DPRINTF_L3(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3764 "ehci_do_soft_reset: Reset in progress");
3765
3766 /* Wait for reset to complete */
3767 drv_usecwait(EHCI_RESET_TIMEWAIT);
3768
3769 /*
3770 * Restore previous saved EHCI register value
3771 * into the current EHCI registers.
3772 */
3773 Set_OpReg(ehci_ctrl_segment, (uint32_t)
3774 ehci_save_regs->ehci_ctrl_segment);
3775
3776 Set_OpReg(ehci_periodic_list_base, (uint32_t)
3777 ehci_save_regs->ehci_periodic_list_base);
3778
3779 Set_OpReg(ehci_async_list_addr, (uint32_t)
3780 ehci_save_regs->ehci_async_list_addr);
3781
3782 /*
3783 * For some reason this register might get nulled out by
3784 * the Uli M1575 South Bridge. To workaround the hardware
3785 * problem, check the value after write and retry if the
3786 * last write fails.
3787 */
3788 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
3789 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
3790 (ehci_save_regs->ehci_async_list_addr !=
3791 Get_OpReg(ehci_async_list_addr))) {
3792 int retry = 0;
3793
3794 Set_OpRegRetry(ehci_async_list_addr, (uint32_t)
3795 ehci_save_regs->ehci_async_list_addr, retry);
3796 if (retry >= EHCI_MAX_RETRY) {
3797 USB_DPRINTF_L2(PRINT_MASK_ATTA,
3798 ehcip->ehci_log_hdl, "ehci_do_soft_reset:"
3799 " ASYNCLISTADDR write failed.");
3800
3801 return (USB_FAILURE);
3802 }
3803 USB_DPRINTF_L2(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
3804 "ehci_do_soft_reset: ASYNCLISTADDR "
3805 "write failed, retry=%d", retry);
3806 }
3807
3808 Set_OpReg(ehci_config_flag, (uint32_t)
3809 ehci_save_regs->ehci_config_flag);
3810
3811 /* Enable both Asynchronous and Periodic Schedule if necessary */
3812 ehci_toggle_scheduler(ehcip);
3813
3814 /*
3815 * Set ehci_interrupt to enable all interrupts except Root
3816 * Hub Status change and frame list rollover interrupts.
3817 */
3818 Set_OpReg(ehci_interrupt, EHCI_INTR_HOST_SYSTEM_ERROR |
3819 EHCI_INTR_FRAME_LIST_ROLLOVER |
3820 EHCI_INTR_USB_ERROR |
3821 EHCI_INTR_USB);
3822
3823 /*
3824 * Deallocate the space that allocated for saving
3825 * HC registers.
3826 */
3827 kmem_free((void *) ehci_save_regs, sizeof (ehci_regs_t));
3828
3829 /*
3830 * Set the desired interrupt threshold, frame list size (if
3831 * applicable) and turn EHCI host controller.
3832 */
3833 Set_OpReg(ehci_command, ((Get_OpReg(ehci_command) &
3834 ~EHCI_CMD_INTR_THRESHOLD) |
3835 (EHCI_CMD_01_INTR | EHCI_CMD_HOST_CTRL_RUN)));
3836
3837 /* Wait 10ms for EHCI to start sending SOF */
3838 ehci_wait_reset(ehcip, EHCI_RESET_TIMEWAIT);
3839
3840 /*
3841 * Get the current usb frame number before waiting for
3842 * few milliseconds.
3843 */
3844 before_frame_number = ehci_get_current_frame_number(ehcip);
3845
3846 /* Wait for few milliseconds */
3847 drv_usecwait(EHCI_SOF_TIMEWAIT);
3848
3849 /*
3850 * Get the current usb frame number after waiting for
3851 * few milliseconds.
3852 */
3853 after_frame_number = ehci_get_current_frame_number(ehcip);
3854
3855 USB_DPRINTF_L4(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3856 "ehci_do_soft_reset: Before Frame Number 0x%llx "
3857 "After Frame Number 0x%llx",
3858 (unsigned long long)before_frame_number,
3859 (unsigned long long)after_frame_number);
3860
3861 if ((after_frame_number <= before_frame_number) &&
3862 (Get_OpReg(ehci_status) & EHCI_STS_HOST_CTRL_HALTED)) {
3863
3864 USB_DPRINTF_L2(PRINT_MASK_INTR, ehcip->ehci_log_hdl,
3865 "ehci_do_soft_reset: Soft reset failed");
3866
3867 return (USB_FAILURE);
3868 }
3869
3870 return (USB_SUCCESS);
3871 }
3872
3873
3874 /*
3875 * ehci_get_xfer_attrs:
3876 *
3877 * Get the attributes of a particular xfer.
3878 *
3879 * NOTE: This function is also called from POLLED MODE.
3880 */
3881 usb_req_attrs_t
3882 ehci_get_xfer_attrs(
3883 ehci_state_t *ehcip,
3884 ehci_pipe_private_t *pp,
3885 ehci_trans_wrapper_t *tw)
3886 {
3887 usb_ep_descr_t *eptd = &pp->pp_pipe_handle->p_ep;
3888 usb_req_attrs_t attrs = USB_ATTRS_NONE;
3889
3890 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3891 "ehci_get_xfer_attrs:");
3892
3893 switch (eptd->bmAttributes & USB_EP_ATTR_MASK) {
3894 case USB_EP_ATTR_CONTROL:
3895 attrs = ((usb_ctrl_req_t *)
3896 tw->tw_curr_xfer_reqp)->ctrl_attributes;
3897 break;
3898 case USB_EP_ATTR_BULK:
3899 attrs = ((usb_bulk_req_t *)
3900 tw->tw_curr_xfer_reqp)->bulk_attributes;
3901 break;
3902 case USB_EP_ATTR_INTR:
3903 attrs = ((usb_intr_req_t *)
3904 tw->tw_curr_xfer_reqp)->intr_attributes;
3905 break;
3906 }
3907
3908 return (attrs);
3909 }
3910
3911
3912 /*
3913 * ehci_get_current_frame_number:
3914 *
3915 * Get the current software based usb frame number.
3916 */
3917 usb_frame_number_t
3918 ehci_get_current_frame_number(ehci_state_t *ehcip)
3919 {
3920 usb_frame_number_t usb_frame_number;
3921 usb_frame_number_t ehci_fno, micro_frame_number;
3922
3923 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3924
3925 ehci_fno = ehcip->ehci_fno;
3926 micro_frame_number = Get_OpReg(ehci_frame_index) & 0x3FFF;
3927
3928 /*
3929 * Calculate current software based usb frame number.
3930 *
3931 * This code accounts for the fact that frame number is
3932 * updated by the Host Controller before the ehci driver
3933 * gets an FrameListRollover interrupt that will adjust
3934 * Frame higher part.
3935 *
3936 * Refer ehci specification 1.0, section 2.3.2, page 21.
3937 */
3938 micro_frame_number = ((micro_frame_number & 0x1FFF) |
3939 ehci_fno) + (((micro_frame_number & 0x3FFF) ^
3940 ehci_fno) & 0x2000);
3941
3942 /*
3943 * Micro Frame number is equivalent to 125 usec. Eight
3944 * Micro Frame numbers are equivalent to one millsecond
3945 * or one usb frame number.
3946 */
3947 usb_frame_number = micro_frame_number >>
3948 EHCI_uFRAMES_PER_USB_FRAME_SHIFT;
3949
3950 USB_DPRINTF_L4(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
3951 "ehci_get_current_frame_number: "
3952 "Current usb uframe number = 0x%llx "
3953 "Current usb frame number = 0x%llx",
3954 (unsigned long long)micro_frame_number,
3955 (unsigned long long)usb_frame_number);
3956
3957 return (usb_frame_number);
3958 }
3959
3960
3961 /*
3962 * ehci_cpr_cleanup:
3963 *
3964 * Cleanup ehci state and other ehci specific informations across
3965 * Check Point Resume (CPR).
3966 */
3967 static void
3968 ehci_cpr_cleanup(ehci_state_t *ehcip)
3969 {
3970 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3971
3972 /* Reset software part of usb frame number */
3973 ehcip->ehci_fno = 0;
3974 }
3975
3976
3977 /*
3978 * ehci_wait_for_sof:
3979 *
3980 * Wait for couple of SOF interrupts
3981 */
3982 int
3983 ehci_wait_for_sof(ehci_state_t *ehcip)
3984 {
3985 usb_frame_number_t before_frame_number, after_frame_number;
3986 int error = USB_SUCCESS;
3987
3988 USB_DPRINTF_L4(PRINT_MASK_LISTS,
3989 ehcip->ehci_log_hdl, "ehci_wait_for_sof");
3990
3991 ASSERT(mutex_owned(&ehcip->ehci_int_mutex));
3992
3993 error = ehci_state_is_operational(ehcip);
3994
3995 if (error != USB_SUCCESS) {
3996
3997 return (error);
3998 }
3999
4000 /* Get the current usb frame number before waiting for two SOFs */
4001 before_frame_number = ehci_get_current_frame_number(ehcip);
4002
4003 mutex_exit(&ehcip->ehci_int_mutex);
4004
4005 /* Wait for few milliseconds */
4006 delay(drv_usectohz(EHCI_SOF_TIMEWAIT));
4007
4008 mutex_enter(&ehcip->ehci_int_mutex);
4009
4010 /* Get the current usb frame number after woken up */
4011 after_frame_number = ehci_get_current_frame_number(ehcip);
4012
4013 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4014 "ehci_wait_for_sof: framenumber: before 0x%llx "
4015 "after 0x%llx",
4016 (unsigned long long)before_frame_number,
4017 (unsigned long long)after_frame_number);
4018
4019 /* Return failure, if usb frame number has not been changed */
4020 if (after_frame_number <= before_frame_number) {
4021
4022 if ((ehci_do_soft_reset(ehcip)) != USB_SUCCESS) {
4023
4024 USB_DPRINTF_L0(PRINT_MASK_LISTS,
4025 ehcip->ehci_log_hdl, "No SOF interrupts");
4026
4027 /* Set host controller soft state to error */
4028 ehcip->ehci_hc_soft_state = EHCI_CTLR_ERROR_STATE;
4029
4030 return (USB_FAILURE);
4031 }
4032
4033 }
4034
4035 return (USB_SUCCESS);
4036 }
4037
4038 /*
4039 * Toggle the async/periodic schedule based on opened pipe count.
4040 * During pipe cleanup(in pipe reset case), the pipe's QH is temporarily
4041 * disabled. But the TW on the pipe is not freed. In this case, we need
4042 * to disable async/periodic schedule for some non-compatible hardware.
4043 * Otherwise, the hardware will overwrite software's configuration of
4044 * the QH.
4045 */
4046 void
4047 ehci_toggle_scheduler_on_pipe(ehci_state_t *ehcip)
4048 {
4049 uint_t temp_reg, cmd_reg;
4050
4051 cmd_reg = Get_OpReg(ehci_command);
4052 temp_reg = cmd_reg;
4053
4054 /*
4055 * Enable/Disable asynchronous scheduler, and
4056 * turn on/off async list door bell
4057 */
4058 if (ehcip->ehci_open_async_count) {
4059 if ((ehcip->ehci_async_req_count > 0) &&
4060 ((cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE) == 0)) {
4061 /*
4062 * For some reason this address might get nulled out by
4063 * the ehci chip. Set it here just in case it is null.
4064 */
4065 Set_OpReg(ehci_async_list_addr,
4066 ehci_qh_cpu_to_iommu(ehcip,
4067 ehcip->ehci_head_of_async_sched_list));
4068
4069 /*
4070 * For some reason this register might get nulled out by
4071 * the Uli M1575 Southbridge. To workaround the HW
4072 * problem, check the value after write and retry if the
4073 * last write fails.
4074 *
4075 * If the ASYNCLISTADDR remains "stuck" after
4076 * EHCI_MAX_RETRY retries, then the M1575 is broken
4077 * and is stuck in an inconsistent state and is about
4078 * to crash the machine with a trn_oor panic when it
4079 * does a DMA read from 0x0. It is better to panic
4080 * now rather than wait for the trn_oor crash; this
4081 * way Customer Service will have a clean signature
4082 * that indicts the M1575 chip rather than a
4083 * mysterious and hard-to-diagnose trn_oor panic.
4084 */
4085 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4086 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4087 (ehci_qh_cpu_to_iommu(ehcip,
4088 ehcip->ehci_head_of_async_sched_list) !=
4089 Get_OpReg(ehci_async_list_addr))) {
4090 int retry = 0;
4091
4092 Set_OpRegRetry(ehci_async_list_addr,
4093 ehci_qh_cpu_to_iommu(ehcip,
4094 ehcip->ehci_head_of_async_sched_list),
4095 retry);
4096 if (retry >= EHCI_MAX_RETRY)
4097 cmn_err(CE_PANIC,
4098 "ehci_toggle_scheduler_on_pipe: "
4099 "ASYNCLISTADDR write failed.");
4100
4101 USB_DPRINTF_L2(PRINT_MASK_ATTA,
4102 ehcip->ehci_log_hdl,
4103 "ehci_toggle_scheduler_on_pipe:"
4104 " ASYNCLISTADDR write failed, retry=%d",
4105 retry);
4106 }
4107
4108 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4109 }
4110 } else {
4111 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4112 }
4113
4114 if (ehcip->ehci_open_periodic_count) {
4115 if ((ehcip->ehci_periodic_req_count > 0) &&
4116 ((cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE) == 0)) {
4117 /*
4118 * For some reason this address get's nulled out by
4119 * the ehci chip. Set it here just in case it is null.
4120 */
4121 Set_OpReg(ehci_periodic_list_base,
4122 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4123 0xFFFFF000));
4124 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4125 }
4126 } else {
4127 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4128 }
4129
4130 /* Just an optimization */
4131 if (temp_reg != cmd_reg) {
4132 Set_OpReg(ehci_command, cmd_reg);
4133 }
4134 }
4135
4136
4137 /*
4138 * ehci_toggle_scheduler:
4139 *
4140 * Turn scheduler based on pipe open count.
4141 */
4142 void
4143 ehci_toggle_scheduler(ehci_state_t *ehcip)
4144 {
4145 uint_t temp_reg, cmd_reg;
4146
4147 /*
4148 * For performance optimization, we need to change the bits
4149 * if (async == 1||async == 0) OR (periodic == 1||periodic == 0)
4150 *
4151 * Related bits already enabled if
4152 * async and periodic req counts are > 1
4153 * OR async req count > 1 & no periodic pipe
4154 * OR periodic req count > 1 & no async pipe
4155 */
4156 if (((ehcip->ehci_async_req_count > 1) &&
4157 (ehcip->ehci_periodic_req_count > 1)) ||
4158 ((ehcip->ehci_async_req_count > 1) &&
4159 (ehcip->ehci_open_periodic_count == 0)) ||
4160 ((ehcip->ehci_periodic_req_count > 1) &&
4161 (ehcip->ehci_open_async_count == 0))) {
4162 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4163 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4164 "async/periodic bits no need to change");
4165
4166 return;
4167 }
4168
4169 cmd_reg = Get_OpReg(ehci_command);
4170 temp_reg = cmd_reg;
4171
4172 /*
4173 * Enable/Disable asynchronous scheduler, and
4174 * turn on/off async list door bell
4175 */
4176 if (ehcip->ehci_async_req_count > 1) {
4177 /* we already enable the async bit */
4178 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4179 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4180 "async bit already enabled: cmd_reg=0x%x", cmd_reg);
4181 } else if (ehcip->ehci_async_req_count == 1) {
4182 if (!(cmd_reg & EHCI_CMD_ASYNC_SCHED_ENABLE)) {
4183 /*
4184 * For some reason this address might get nulled out by
4185 * the ehci chip. Set it here just in case it is null.
4186 * If it's not null, we should not reset the
4187 * ASYNCLISTADDR, because it's updated by hardware to
4188 * point to the next queue head to be executed.
4189 */
4190 if (!Get_OpReg(ehci_async_list_addr)) {
4191 Set_OpReg(ehci_async_list_addr,
4192 ehci_qh_cpu_to_iommu(ehcip,
4193 ehcip->ehci_head_of_async_sched_list));
4194 }
4195
4196 /*
4197 * For some reason this register might get nulled out by
4198 * the Uli M1575 Southbridge. To workaround the HW
4199 * problem, check the value after write and retry if the
4200 * last write fails.
4201 *
4202 * If the ASYNCLISTADDR remains "stuck" after
4203 * EHCI_MAX_RETRY retries, then the M1575 is broken
4204 * and is stuck in an inconsistent state and is about
4205 * to crash the machine with a trn_oor panic when it
4206 * does a DMA read from 0x0. It is better to panic
4207 * now rather than wait for the trn_oor crash; this
4208 * way Customer Service will have a clean signature
4209 * that indicts the M1575 chip rather than a
4210 * mysterious and hard-to-diagnose trn_oor panic.
4211 */
4212 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4213 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575) &&
4214 (ehci_qh_cpu_to_iommu(ehcip,
4215 ehcip->ehci_head_of_async_sched_list) !=
4216 Get_OpReg(ehci_async_list_addr))) {
4217 int retry = 0;
4218
4219 Set_OpRegRetry(ehci_async_list_addr,
4220 ehci_qh_cpu_to_iommu(ehcip,
4221 ehcip->ehci_head_of_async_sched_list),
4222 retry);
4223 if (retry >= EHCI_MAX_RETRY)
4224 cmn_err(CE_PANIC,
4225 "ehci_toggle_scheduler: "
4226 "ASYNCLISTADDR write failed.");
4227
4228 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4229 ehcip->ehci_log_hdl,
4230 "ehci_toggle_scheduler: ASYNCLISTADDR "
4231 "write failed, retry=%d", retry);
4232 }
4233 }
4234 cmd_reg |= EHCI_CMD_ASYNC_SCHED_ENABLE;
4235 } else {
4236 cmd_reg &= ~EHCI_CMD_ASYNC_SCHED_ENABLE;
4237 }
4238
4239 if (ehcip->ehci_periodic_req_count > 1) {
4240 /* we already enable the periodic bit. */
4241 USB_DPRINTF_L4(PRINT_MASK_ATTA,
4242 ehcip->ehci_log_hdl, "ehci_toggle_scheduler:"
4243 "periodic bit already enabled: cmd_reg=0x%x", cmd_reg);
4244 } else if (ehcip->ehci_periodic_req_count == 1) {
4245 if (!(cmd_reg & EHCI_CMD_PERIODIC_SCHED_ENABLE)) {
4246 /*
4247 * For some reason this address get's nulled out by
4248 * the ehci chip. Set it here just in case it is null.
4249 */
4250 Set_OpReg(ehci_periodic_list_base,
4251 (uint32_t)(ehcip->ehci_pflt_cookie.dmac_address &
4252 0xFFFFF000));
4253 }
4254 cmd_reg |= EHCI_CMD_PERIODIC_SCHED_ENABLE;
4255 } else {
4256 cmd_reg &= ~EHCI_CMD_PERIODIC_SCHED_ENABLE;
4257 }
4258
4259 /* Just an optimization */
4260 if (temp_reg != cmd_reg) {
4261 Set_OpReg(ehci_command, cmd_reg);
4262
4263 /* To make sure the command register is updated correctly */
4264 if ((ehcip->ehci_vendor_id == PCI_VENDOR_ULi_M1575) &&
4265 (ehcip->ehci_device_id == PCI_DEVICE_ULi_M1575)) {
4266 int retry = 0;
4267
4268 Set_OpRegRetry(ehci_command, cmd_reg, retry);
4269 USB_DPRINTF_L3(PRINT_MASK_ATTA,
4270 ehcip->ehci_log_hdl,
4271 "ehci_toggle_scheduler: CMD write failed, retry=%d",
4272 retry);
4273 }
4274
4275 }
4276 }
4277
4278 /*
4279 * ehci print functions
4280 */
4281
4282 /*
4283 * ehci_print_caps:
4284 */
4285 void
4286 ehci_print_caps(ehci_state_t *ehcip)
4287 {
4288 uint_t i;
4289
4290 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4291 "\n\tUSB 2.0 Host Controller Characteristics\n");
4292
4293 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4294 "Caps Length: 0x%x Version: 0x%x\n",
4295 Get_8Cap(ehci_caps_length), Get_16Cap(ehci_version));
4296
4297 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4298 "Structural Parameters\n");
4299 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4300 "Port indicators: %s", (Get_Cap(ehci_hcs_params) &
4301 EHCI_HCS_PORT_INDICATOR) ? "Yes" : "No");
4302 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4303 "No of Classic host controllers: 0x%x",
4304 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_COMP_CTRLS)
4305 >> EHCI_HCS_NUM_COMP_CTRL_SHIFT);
4306 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4307 "No of ports per Classic host controller: 0x%x",
4308 (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS_CC)
4309 >> EHCI_HCS_NUM_PORTS_CC_SHIFT);
4310 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4311 "Port routing rules: %s", (Get_Cap(ehci_hcs_params) &
4312 EHCI_HCS_PORT_ROUTING_RULES) ? "Yes" : "No");
4313 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4314 "Port power control: %s", (Get_Cap(ehci_hcs_params) &
4315 EHCI_HCS_PORT_POWER_CONTROL) ? "Yes" : "No");
4316 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4317 "No of root hub ports: 0x%x\n",
4318 Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS);
4319
4320 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4321 "Capability Parameters\n");
4322 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4323 "EHCI extended capability: %s", (Get_Cap(ehci_hcc_params) &
4324 EHCI_HCC_EECP) ? "Yes" : "No");
4325 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4326 "Isoch schedule threshold: 0x%x",
4327 Get_Cap(ehci_hcc_params) & EHCI_HCC_ISOCH_SCHED_THRESHOLD);
4328 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4329 "Async schedule park capability: %s", (Get_Cap(ehci_hcc_params) &
4330 EHCI_HCC_ASYNC_SCHED_PARK_CAP) ? "Yes" : "No");
4331 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4332 "Programmable frame list flag: %s", (Get_Cap(ehci_hcc_params) &
4333 EHCI_HCC_PROG_FRAME_LIST_FLAG) ? "256/512/1024" : "1024");
4334 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4335 "64bit addressing capability: %s\n", (Get_Cap(ehci_hcc_params) &
4336 EHCI_HCC_64BIT_ADDR_CAP) ? "Yes" : "No");
4337
4338 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4339 "Classic Port Route Description");
4340
4341 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4342 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4343 "\tPort Route 0x%x: 0x%x", i, Get_8Cap(ehci_port_route[i]));
4344 }
4345 }
4346
4347
4348 /*
4349 * ehci_print_regs:
4350 */
4351 void
4352 ehci_print_regs(ehci_state_t *ehcip)
4353 {
4354 uint_t i;
4355
4356 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4357 "\n\tEHCI%d Operational Registers\n",
4358 ddi_get_instance(ehcip->ehci_dip));
4359
4360 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4361 "Command: 0x%x Status: 0x%x",
4362 Get_OpReg(ehci_command), Get_OpReg(ehci_status));
4363 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4364 "Interrupt: 0x%x Frame Index: 0x%x",
4365 Get_OpReg(ehci_interrupt), Get_OpReg(ehci_frame_index));
4366 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4367 "Control Segment: 0x%x Periodic List Base: 0x%x",
4368 Get_OpReg(ehci_ctrl_segment), Get_OpReg(ehci_periodic_list_base));
4369 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4370 "Async List Addr: 0x%x Config Flag: 0x%x",
4371 Get_OpReg(ehci_async_list_addr), Get_OpReg(ehci_config_flag));
4372
4373 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4374 "Root Hub Port Status");
4375
4376 for (i = 0; i < (Get_Cap(ehci_hcs_params) & EHCI_HCS_NUM_PORTS); i++) {
4377 USB_DPRINTF_L3(PRINT_MASK_ATTA, ehcip->ehci_log_hdl,
4378 "\tPort Status 0x%x: 0x%x ", i,
4379 Get_OpReg(ehci_rh_port_status[i]));
4380 }
4381 }
4382
4383
4384 /*
4385 * ehci_print_qh:
4386 */
4387 void
4388 ehci_print_qh(
4389 ehci_state_t *ehcip,
4390 ehci_qh_t *qh)
4391 {
4392 uint_t i;
4393
4394 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4395 "ehci_print_qh: qh = 0x%p", (void *)qh);
4396
4397 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4398 "\tqh_link_ptr: 0x%x ", Get_QH(qh->qh_link_ptr));
4399 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4400 "\tqh_ctrl: 0x%x ", Get_QH(qh->qh_ctrl));
4401 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4402 "\tqh_split_ctrl: 0x%x ", Get_QH(qh->qh_split_ctrl));
4403 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4404 "\tqh_curr_qtd: 0x%x ", Get_QH(qh->qh_curr_qtd));
4405 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4406 "\tqh_next_qtd: 0x%x ", Get_QH(qh->qh_next_qtd));
4407 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4408 "\tqh_alt_next_qtd: 0x%x ", Get_QH(qh->qh_alt_next_qtd));
4409 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4410 "\tqh_status: 0x%x ", Get_QH(qh->qh_status));
4411
4412 for (i = 0; i < 5; i++) {
4413 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4414 "\tqh_buf[%d]: 0x%x ", i, Get_QH(qh->qh_buf[i]));
4415 }
4416
4417 for (i = 0; i < 5; i++) {
4418 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4419 "\tqh_buf_high[%d]: 0x%x ",
4420 i, Get_QH(qh->qh_buf_high[i]));
4421 }
4422
4423 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4424 "\tqh_dummy_qtd: 0x%x ", Get_QH(qh->qh_dummy_qtd));
4425 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4426 "\tqh_prev: 0x%x ", Get_QH(qh->qh_prev));
4427 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4428 "\tqh_state: 0x%x ", Get_QH(qh->qh_state));
4429 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4430 "\tqh_reclaim_next: 0x%x ", Get_QH(qh->qh_reclaim_next));
4431 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4432 "\tqh_reclaim_frame: 0x%x ", Get_QH(qh->qh_reclaim_frame));
4433 }
4434
4435
4436 /*
4437 * ehci_print_qtd:
4438 */
4439 void
4440 ehci_print_qtd(
4441 ehci_state_t *ehcip,
4442 ehci_qtd_t *qtd)
4443 {
4444 uint_t i;
4445
4446 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4447 "ehci_print_qtd: qtd = 0x%p", (void *)qtd);
4448
4449 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4450 "\tqtd_next_qtd: 0x%x ", Get_QTD(qtd->qtd_next_qtd));
4451 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4452 "\tqtd_alt_next_qtd: 0x%x ", Get_QTD(qtd->qtd_alt_next_qtd));
4453 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4454 "\tqtd_ctrl: 0x%x ", Get_QTD(qtd->qtd_ctrl));
4455
4456 for (i = 0; i < 5; i++) {
4457 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4458 "\tqtd_buf[%d]: 0x%x ", i, Get_QTD(qtd->qtd_buf[i]));
4459 }
4460
4461 for (i = 0; i < 5; i++) {
4462 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4463 "\tqtd_buf_high[%d]: 0x%x ",
4464 i, Get_QTD(qtd->qtd_buf_high[i]));
4465 }
4466
4467 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4468 "\tqtd_trans_wrapper: 0x%x ", Get_QTD(qtd->qtd_trans_wrapper));
4469 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4470 "\tqtd_tw_next_qtd: 0x%x ", Get_QTD(qtd->qtd_tw_next_qtd));
4471 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4472 "\tqtd_active_qtd_next: 0x%x ", Get_QTD(qtd->qtd_active_qtd_next));
4473 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4474 "\tqtd_active_qtd_prev: 0x%x ", Get_QTD(qtd->qtd_active_qtd_prev));
4475 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4476 "\tqtd_state: 0x%x ", Get_QTD(qtd->qtd_state));
4477 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4478 "\tqtd_ctrl_phase: 0x%x ", Get_QTD(qtd->qtd_ctrl_phase));
4479 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4480 "\tqtd_xfer_offs: 0x%x ", Get_QTD(qtd->qtd_xfer_offs));
4481 USB_DPRINTF_L3(PRINT_MASK_LISTS, ehcip->ehci_log_hdl,
4482 "\tqtd_xfer_len: 0x%x ", Get_QTD(qtd->qtd_xfer_len));
4483 }
4484
4485 /*
4486 * ehci kstat functions
4487 */
4488
4489 /*
4490 * ehci_create_stats:
4491 *
4492 * Allocate and initialize the ehci kstat structures
4493 */
4494 void
4495 ehci_create_stats(ehci_state_t *ehcip)
4496 {
4497 char kstatname[KSTAT_STRLEN];
4498 const char *dname = ddi_driver_name(ehcip->ehci_dip);
4499 char *usbtypes[USB_N_COUNT_KSTATS] =
4500 {"ctrl", "isoch", "bulk", "intr"};
4501 uint_t instance = ehcip->ehci_instance;
4502 ehci_intrs_stats_t *isp;
4503 int i;
4504
4505 if (EHCI_INTRS_STATS(ehcip) == NULL) {
4506 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,intrs",
4507 dname, instance);
4508 EHCI_INTRS_STATS(ehcip) = kstat_create("usba", instance,
4509 kstatname, "usb_interrupts", KSTAT_TYPE_NAMED,
4510 sizeof (ehci_intrs_stats_t) / sizeof (kstat_named_t),
4511 KSTAT_FLAG_PERSISTENT);
4512
4513 if (EHCI_INTRS_STATS(ehcip)) {
4514 isp = EHCI_INTRS_STATS_DATA(ehcip);
4515 kstat_named_init(&isp->ehci_sts_total,
4516 "Interrupts Total", KSTAT_DATA_UINT64);
4517 kstat_named_init(&isp->ehci_sts_not_claimed,
4518 "Not Claimed", KSTAT_DATA_UINT64);
4519 kstat_named_init(&isp->ehci_sts_async_sched_status,
4520 "Async schedule status", KSTAT_DATA_UINT64);
4521 kstat_named_init(&isp->ehci_sts_periodic_sched_status,
4522 "Periodic sched status", KSTAT_DATA_UINT64);
4523 kstat_named_init(&isp->ehci_sts_empty_async_schedule,
4524 "Empty async schedule", KSTAT_DATA_UINT64);
4525 kstat_named_init(&isp->ehci_sts_host_ctrl_halted,
4526 "Host controller Halted", KSTAT_DATA_UINT64);
4527 kstat_named_init(&isp->ehci_sts_async_advance_intr,
4528 "Intr on async advance", KSTAT_DATA_UINT64);
4529 kstat_named_init(&isp->ehci_sts_host_system_error_intr,
4530 "Host system error", KSTAT_DATA_UINT64);
4531 kstat_named_init(&isp->ehci_sts_frm_list_rollover_intr,
4532 "Frame list rollover", KSTAT_DATA_UINT64);
4533 kstat_named_init(&isp->ehci_sts_rh_port_change_intr,
4534 "Port change detect", KSTAT_DATA_UINT64);
4535 kstat_named_init(&isp->ehci_sts_usb_error_intr,
4536 "USB error interrupt", KSTAT_DATA_UINT64);
4537 kstat_named_init(&isp->ehci_sts_usb_intr,
4538 "USB interrupt", KSTAT_DATA_UINT64);
4539
4540 EHCI_INTRS_STATS(ehcip)->ks_private = ehcip;
4541 EHCI_INTRS_STATS(ehcip)->ks_update = nulldev;
4542 kstat_install(EHCI_INTRS_STATS(ehcip));
4543 }
4544 }
4545
4546 if (EHCI_TOTAL_STATS(ehcip) == NULL) {
4547 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,total",
4548 dname, instance);
4549 EHCI_TOTAL_STATS(ehcip) = kstat_create("usba", instance,
4550 kstatname, "usb_byte_count", KSTAT_TYPE_IO, 1,
4551 KSTAT_FLAG_PERSISTENT);
4552
4553 if (EHCI_TOTAL_STATS(ehcip)) {
4554 kstat_install(EHCI_TOTAL_STATS(ehcip));
4555 }
4556 }
4557
4558 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4559 if (ehcip->ehci_count_stats[i] == NULL) {
4560 (void) snprintf(kstatname, KSTAT_STRLEN, "%s%d,%s",
4561 dname, instance, usbtypes[i]);
4562 ehcip->ehci_count_stats[i] = kstat_create("usba",
4563 instance, kstatname, "usb_byte_count",
4564 KSTAT_TYPE_IO, 1, KSTAT_FLAG_PERSISTENT);
4565
4566 if (ehcip->ehci_count_stats[i]) {
4567 kstat_install(ehcip->ehci_count_stats[i]);
4568 }
4569 }
4570 }
4571 }
4572
4573
4574 /*
4575 * ehci_destroy_stats:
4576 *
4577 * Clean up ehci kstat structures
4578 */
4579 void
4580 ehci_destroy_stats(ehci_state_t *ehcip)
4581 {
4582 int i;
4583
4584 if (EHCI_INTRS_STATS(ehcip)) {
4585 kstat_delete(EHCI_INTRS_STATS(ehcip));
4586 EHCI_INTRS_STATS(ehcip) = NULL;
4587 }
4588
4589 if (EHCI_TOTAL_STATS(ehcip)) {
4590 kstat_delete(EHCI_TOTAL_STATS(ehcip));
4591 EHCI_TOTAL_STATS(ehcip) = NULL;
4592 }
4593
4594 for (i = 0; i < USB_N_COUNT_KSTATS; i++) {
4595 if (ehcip->ehci_count_stats[i]) {
4596 kstat_delete(ehcip->ehci_count_stats[i]);
4597 ehcip->ehci_count_stats[i] = NULL;
4598 }
4599 }
4600 }
4601
4602
4603 /*
4604 * ehci_do_intrs_stats:
4605 *
4606 * ehci status information
4607 */
4608 void
4609 ehci_do_intrs_stats(
4610 ehci_state_t *ehcip,
4611 int val)
4612 {
4613 if (EHCI_INTRS_STATS(ehcip)) {
4614 EHCI_INTRS_STATS_DATA(ehcip)->ehci_sts_total.value.ui64++;
4615 switch (val) {
4616 case EHCI_STS_ASYNC_SCHED_STATUS:
4617 EHCI_INTRS_STATS_DATA(ehcip)->
4618 ehci_sts_async_sched_status.value.ui64++;
4619 break;
4620 case EHCI_STS_PERIODIC_SCHED_STATUS:
4621 EHCI_INTRS_STATS_DATA(ehcip)->
4622 ehci_sts_periodic_sched_status.value.ui64++;
4623 break;
4624 case EHCI_STS_EMPTY_ASYNC_SCHEDULE:
4625 EHCI_INTRS_STATS_DATA(ehcip)->
4626 ehci_sts_empty_async_schedule.value.ui64++;
4627 break;
4628 case EHCI_STS_HOST_CTRL_HALTED:
4629 EHCI_INTRS_STATS_DATA(ehcip)->
4630 ehci_sts_host_ctrl_halted.value.ui64++;
4631 break;
4632 case EHCI_STS_ASYNC_ADVANCE_INTR:
4633 EHCI_INTRS_STATS_DATA(ehcip)->
4634 ehci_sts_async_advance_intr.value.ui64++;
4635 break;
4636 case EHCI_STS_HOST_SYSTEM_ERROR_INTR:
4637 EHCI_INTRS_STATS_DATA(ehcip)->
4638 ehci_sts_host_system_error_intr.value.ui64++;
4639 break;
4640 case EHCI_STS_FRM_LIST_ROLLOVER_INTR:
4641 EHCI_INTRS_STATS_DATA(ehcip)->
4642 ehci_sts_frm_list_rollover_intr.value.ui64++;
4643 break;
4644 case EHCI_STS_RH_PORT_CHANGE_INTR:
4645 EHCI_INTRS_STATS_DATA(ehcip)->
4646 ehci_sts_rh_port_change_intr.value.ui64++;
4647 break;
4648 case EHCI_STS_USB_ERROR_INTR:
4649 EHCI_INTRS_STATS_DATA(ehcip)->
4650 ehci_sts_usb_error_intr.value.ui64++;
4651 break;
4652 case EHCI_STS_USB_INTR:
4653 EHCI_INTRS_STATS_DATA(ehcip)->
4654 ehci_sts_usb_intr.value.ui64++;
4655 break;
4656 default:
4657 EHCI_INTRS_STATS_DATA(ehcip)->
4658 ehci_sts_not_claimed.value.ui64++;
4659 break;
4660 }
4661 }
4662 }
4663
4664
4665 /*
4666 * ehci_do_byte_stats:
4667 *
4668 * ehci data xfer information
4669 */
4670 void
4671 ehci_do_byte_stats(
4672 ehci_state_t *ehcip,
4673 size_t len,
4674 uint8_t attr,
4675 uint8_t addr)
4676 {
4677 uint8_t type = attr & USB_EP_ATTR_MASK;
4678 uint8_t dir = addr & USB_EP_DIR_MASK;
4679
4680 if (dir == USB_EP_DIR_IN) {
4681 EHCI_TOTAL_STATS_DATA(ehcip)->reads++;
4682 EHCI_TOTAL_STATS_DATA(ehcip)->nread += len;
4683 switch (type) {
4684 case USB_EP_ATTR_CONTROL:
4685 EHCI_CTRL_STATS(ehcip)->reads++;
4686 EHCI_CTRL_STATS(ehcip)->nread += len;
4687 break;
4688 case USB_EP_ATTR_BULK:
4689 EHCI_BULK_STATS(ehcip)->reads++;
4690 EHCI_BULK_STATS(ehcip)->nread += len;
4691 break;
4692 case USB_EP_ATTR_INTR:
4693 EHCI_INTR_STATS(ehcip)->reads++;
4694 EHCI_INTR_STATS(ehcip)->nread += len;
4695 break;
4696 case USB_EP_ATTR_ISOCH:
4697 EHCI_ISOC_STATS(ehcip)->reads++;
4698 EHCI_ISOC_STATS(ehcip)->nread += len;
4699 break;
4700 }
4701 } else if (dir == USB_EP_DIR_OUT) {
4702 EHCI_TOTAL_STATS_DATA(ehcip)->writes++;
4703 EHCI_TOTAL_STATS_DATA(ehcip)->nwritten += len;
4704 switch (type) {
4705 case USB_EP_ATTR_CONTROL:
4706 EHCI_CTRL_STATS(ehcip)->writes++;
4707 EHCI_CTRL_STATS(ehcip)->nwritten += len;
4708 break;
4709 case USB_EP_ATTR_BULK:
4710 EHCI_BULK_STATS(ehcip)->writes++;
4711 EHCI_BULK_STATS(ehcip)->nwritten += len;
4712 break;
4713 case USB_EP_ATTR_INTR:
4714 EHCI_INTR_STATS(ehcip)->writes++;
4715 EHCI_INTR_STATS(ehcip)->nwritten += len;
4716 break;
4717 case USB_EP_ATTR_ISOCH:
4718 EHCI_ISOC_STATS(ehcip)->writes++;
4719 EHCI_ISOC_STATS(ehcip)->nwritten += len;
4720 break;
4721 }
4722 }
4723 }
--- EOF ---