Print this page
8368 remove warlock leftovers from usr/src/uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/1394/adapters/hci1394_q.c
+++ new/usr/src/uts/common/io/1394/adapters/hci1394_q.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License, Version 1.0 only
6 6 * (the "License"). You may not use this file except in compliance
7 7 * with the License.
8 8 *
9 9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 10 * or http://www.opensolaris.org/os/licensing.
11 11 * See the License for the specific language governing permissions
12 12 * and limitations under the License.
13 13 *
14 14 * When distributing Covered Code, include this CDDL HEADER in each
15 15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 16 * If applicable, add the following below this CDDL HEADER, with the
↓ open down ↓ |
16 lines elided |
↑ open up ↑ |
17 17 * fields enclosed by brackets "[]" replaced with your own identifying
18 18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 19 *
20 20 * CDDL HEADER END
21 21 */
22 22 /*
23 23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 -#pragma ident "%Z%%M% %I% %E% SMI"
28 -
29 27 /*
30 28 * hci1394_q.c
31 29 * This code decouples some of the OpenHCI async descriptor logic/structures
32 30 * from the async processing. The goal was to combine as much of the
33 31 * duplicate code as possible for the different type of async transfers
34 32 * without going too overboard.
35 33 *
36 34 * There are two parts to the Q, the descriptor buffer and the data buffer.
37 35 * For the most part, data to be transmitted and data which is received go
38 36 * in the data buffers. The information of where to get the data and put
39 37 * the data reside in the descriptor buffers. There are exceptions to this.
40 38 */
41 39
42 40
43 41 #include <sys/types.h>
44 42 #include <sys/conf.h>
45 43 #include <sys/ddi.h>
46 44 #include <sys/modctl.h>
47 45 #include <sys/stat.h>
48 46 #include <sys/sunddi.h>
49 47 #include <sys/cmn_err.h>
50 48 #include <sys/kmem.h>
51 49 #include <sys/note.h>
52 50
53 51 #include <sys/1394/adapters/hci1394.h>
54 52
55 53
56 54 static int hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size,
57 55 uint32_t *io_addr);
58 56 static void hci1394_q_unreserve(hci1394_q_buf_t *qbuf);
59 57 static void hci1394_q_buf_setup(hci1394_q_buf_t *qbuf);
60 58 static void hci1394_q_reset(hci1394_q_handle_t q_handle);
61 59 static void hci1394_q_next_buf(hci1394_q_buf_t *qbuf);
62 60
63 61 static void hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,
64 62 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
65 63 uint_t hdrsize);
66 64 static void hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,
67 65 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
68 66 uint_t hdrsize);
69 67 static void hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,
↓ open down ↓ |
31 lines elided |
↑ open up ↑ |
70 68 hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, uint32_t io_addr,
71 69 uint_t datasize);
72 70 static void hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
73 71 uint8_t *data, uint_t datasize);
74 72 static void hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf,
75 73 hci1394_q_cmd_t *cmd, h1394_mblk_t *mblk);
76 74
77 75 static void hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,
78 76 hci1394_q_buf_t *qbuf, uint32_t io_addr, uint_t datasize);
79 77
80 -_NOTE(SCHEME_PROTECTS_DATA("unique", msgb))
81 -
82 78 /*
83 79 * hci1394_q_init()
84 80 * Initialize a Q. A Q consists of a descriptor buffer and a data buffer and
85 81 * can be either an AT or AR Q. hci1394_q_init() returns a handle which
86 82 * should be used for the reset of the hci1394_q_* calls.
87 83 */
88 84 int
89 85 hci1394_q_init(hci1394_drvinfo_t *drvinfo,
90 86 hci1394_ohci_handle_t ohci_handle, hci1394_q_info_t *qinfo,
91 87 hci1394_q_handle_t *q_handle)
92 88 {
93 89 hci1394_q_buf_t *desc;
94 90 hci1394_q_buf_t *data;
95 91 hci1394_buf_parms_t parms;
96 92 hci1394_q_t *q;
97 93 int status;
98 94 int index;
99 95
100 96
101 97 ASSERT(drvinfo != NULL);
102 98 ASSERT(qinfo != NULL);
103 99 ASSERT(q_handle != NULL);
104 100 TNF_PROBE_0_DEBUG(hci1394_q_init_enter, HCI1394_TNF_HAL_STACK, "");
105 101
106 102 /*
107 103 * allocate the memory to track this Q. Initialize the internal Q
108 104 * structure.
109 105 */
110 106 q = kmem_alloc(sizeof (hci1394_q_t), KM_SLEEP);
111 107 q->q_drvinfo = drvinfo;
112 108 q->q_info = *qinfo;
113 109 q->q_ohci = ohci_handle;
114 110 mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie);
115 111 desc = &q->q_desc;
116 112 data = &q->q_data;
117 113
118 114 /*
119 115 * Allocate the Descriptor buffer.
120 116 *
121 117 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
122 118 * after we have tested the multiple cookie code on x86.
123 119 */
124 120 parms.bp_length = qinfo->qi_desc_size;
125 121 parms.bp_max_cookies = 1;
126 122 parms.bp_alignment = 16;
127 123 status = hci1394_buf_alloc(drvinfo, &parms, &desc->qb_buf,
128 124 &desc->qb_buf_handle);
129 125 if (status != DDI_SUCCESS) {
130 126 mutex_destroy(&q->q_mutex);
131 127 kmem_free(q, sizeof (hci1394_q_t));
132 128 *q_handle = NULL;
133 129 TNF_PROBE_0(hci1394_q_init_bae_fail, HCI1394_TNF_HAL_ERROR, "");
134 130 TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
135 131 "");
136 132 return (DDI_FAILURE);
137 133 }
138 134
139 135 /* Copy in buffer cookies into our local cookie array */
140 136 desc->qb_cookie[0] = desc->qb_buf.bi_cookie;
141 137 for (index = 1; index < desc->qb_buf.bi_cookie_count; index++) {
142 138 ddi_dma_nextcookie(desc->qb_buf.bi_dma_handle,
143 139 &desc->qb_buf.bi_cookie);
144 140 desc->qb_cookie[index] = desc->qb_buf.bi_cookie;
145 141 }
146 142
147 143 /*
148 144 * Allocate the Data buffer.
149 145 *
150 146 * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
151 147 * after we have tested the multiple cookie code on x86.
152 148 */
153 149 parms.bp_length = qinfo->qi_data_size;
154 150 parms.bp_max_cookies = 1;
155 151 parms.bp_alignment = 16;
156 152 status = hci1394_buf_alloc(drvinfo, &parms, &data->qb_buf,
157 153 &data->qb_buf_handle);
158 154 if (status != DDI_SUCCESS) {
159 155 /* Free the allocated Descriptor buffer */
160 156 hci1394_buf_free(&desc->qb_buf_handle);
161 157
162 158 mutex_destroy(&q->q_mutex);
163 159 kmem_free(q, sizeof (hci1394_q_t));
164 160 *q_handle = NULL;
165 161 TNF_PROBE_0(hci1394_q_init_baa_fail, HCI1394_TNF_HAL_ERROR, "");
166 162 TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
167 163 "");
168 164 return (DDI_FAILURE);
169 165 }
170 166
171 167 /*
172 168 * We must have at least 2 ARQ data buffers, If we only have one, we
173 169 * will artificially create 2. We must have 2 so that we always have a
174 170 * descriptor with free data space to write AR data to. When one is
175 171 * empty, it will take us a bit to get a new descriptor back into the
176 172 * chain.
177 173 */
178 174 if ((qinfo->qi_mode == HCI1394_ARQ) &&
179 175 (data->qb_buf.bi_cookie_count == 1)) {
180 176 data->qb_buf.bi_cookie_count = 2;
181 177 data->qb_cookie[0] = data->qb_buf.bi_cookie;
182 178 data->qb_cookie[0].dmac_size /= 2;
183 179 data->qb_cookie[1] = data->qb_cookie[0];
184 180 data->qb_cookie[1].dmac_laddress =
185 181 data->qb_cookie[0].dmac_laddress +
186 182 data->qb_cookie[0].dmac_size;
187 183 data->qb_cookie[1].dmac_address =
188 184 data->qb_cookie[0].dmac_address +
189 185 data->qb_cookie[0].dmac_size;
190 186
191 187 /* We have more than 1 cookie or we are an AT Q */
192 188 } else {
193 189 /* Copy in buffer cookies into our local cookie array */
194 190 data->qb_cookie[0] = data->qb_buf.bi_cookie;
195 191 for (index = 1; index < data->qb_buf.bi_cookie_count; index++) {
196 192 ddi_dma_nextcookie(data->qb_buf.bi_dma_handle,
197 193 &data->qb_buf.bi_cookie);
198 194 data->qb_cookie[index] = data->qb_buf.bi_cookie;
199 195 }
200 196 }
201 197
202 198 /* The top and bottom of the Q are only set once */
203 199 desc->qb_ptrs.qp_top = desc->qb_buf.bi_kaddr;
204 200 desc->qb_ptrs.qp_bottom = desc->qb_buf.bi_kaddr +
205 201 desc->qb_buf.bi_real_length - 1;
206 202 data->qb_ptrs.qp_top = data->qb_buf.bi_kaddr;
207 203 data->qb_ptrs.qp_bottom = data->qb_buf.bi_kaddr +
208 204 data->qb_buf.bi_real_length - 1;
209 205
210 206 /*
211 207 * reset the Q pointers to their original settings. Setup IM
212 208 * descriptors if this is an AR Q.
213 209 */
214 210 hci1394_q_reset(q);
215 211
216 212 /* if this is an AT Q, create a queued list for the AT descriptors */
217 213 if (qinfo->qi_mode == HCI1394_ATQ) {
218 214 hci1394_tlist_init(drvinfo, NULL, &q->q_queued_list);
219 215 }
220 216
221 217 *q_handle = q;
222 218
223 219 TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK, "");
224 220
225 221 return (DDI_SUCCESS);
226 222 }
227 223
228 224
229 225 /*
230 226 * hci1394_q_fini()
231 227 * Cleanup after a successful hci1394_q_init(). Notice that a pointer to the
232 228 * handle is used for the parameter. fini() will set your handle to NULL
233 229 * before returning.
234 230 */
235 231 void
236 232 hci1394_q_fini(hci1394_q_handle_t *q_handle)
237 233 {
238 234 hci1394_q_t *q;
239 235
240 236 ASSERT(q_handle != NULL);
241 237 TNF_PROBE_0_DEBUG(hci1394_q_fini_enter, HCI1394_TNF_HAL_STACK, "");
242 238
243 239 q = *q_handle;
244 240 if (q->q_info.qi_mode == HCI1394_ATQ) {
245 241 hci1394_tlist_fini(&q->q_queued_list);
246 242 }
247 243 mutex_destroy(&q->q_mutex);
248 244 hci1394_buf_free(&q->q_desc.qb_buf_handle);
249 245 hci1394_buf_free(&q->q_data.qb_buf_handle);
250 246 kmem_free(q, sizeof (hci1394_q_t));
251 247 *q_handle = NULL;
252 248
253 249 TNF_PROBE_0_DEBUG(hci1394_q_fini_exit, HCI1394_TNF_HAL_STACK, "");
254 250 }
255 251
256 252
257 253 /*
258 254 * hci1394_q_buf_setup()
259 255 * Initialization of buffer pointers which are present in both the descriptor
260 256 * buffer and data buffer (No reason to duplicate the code)
261 257 */
262 258 static void
263 259 hci1394_q_buf_setup(hci1394_q_buf_t *qbuf)
264 260 {
265 261 ASSERT(qbuf != NULL);
266 262 TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_enter, HCI1394_TNF_HAL_STACK, "");
267 263
268 264 /* start with the first cookie */
269 265 qbuf->qb_ptrs.qp_current_buf = 0;
270 266 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
271 267 qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
272 268 qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
273 269 qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
274 270 qbuf->qb_ptrs.qp_offset = 0;
275 271
276 272 /*
277 273 * The free_buf and free pointer will change everytime an ACK (of some
278 274 * type) is processed. Free is the last byte in the last cookie.
279 275 */
280 276 qbuf->qb_ptrs.qp_free_buf = qbuf->qb_buf.bi_cookie_count - 1;
281 277 qbuf->qb_ptrs.qp_free = qbuf->qb_ptrs.qp_bottom;
282 278
283 279 /*
284 280 * Start with no space to write descriptors. We first need to call
285 281 * hci1394_q_reserve() before calling hci1394_q_at_write_O*().
286 282 */
287 283 qbuf->qb_ptrs.qp_resv_size = 0;
288 284
289 285 TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_exit, HCI1394_TNF_HAL_STACK, "");
290 286 }
291 287
292 288
293 289 /*
294 290 * hci1394_q_reset()
295 291 * Resets the buffers to an initial state. This should be called during
296 292 * attach and resume.
297 293 */
298 294 static void
299 295 hci1394_q_reset(hci1394_q_handle_t q_handle)
300 296 {
301 297 hci1394_q_buf_t *desc;
302 298 hci1394_q_buf_t *data;
303 299 int index;
304 300
305 301 ASSERT(q_handle != NULL);
306 302 TNF_PROBE_0_DEBUG(hci1394_q_reset_enter, HCI1394_TNF_HAL_STACK, "");
307 303
308 304 mutex_enter(&q_handle->q_mutex);
309 305 desc = &q_handle->q_desc;
310 306 data = &q_handle->q_data;
311 307
312 308 hci1394_q_buf_setup(desc);
313 309 hci1394_q_buf_setup(data);
314 310
315 311 /* DMA starts off stopped, no previous descriptor to link from */
316 312 q_handle->q_dma_running = B_FALSE;
317 313 q_handle->q_block_cnt = 0;
318 314 q_handle->q_previous = NULL;
319 315
320 316 /* If this is an AR Q, setup IM's for the data buffers that we have */
321 317 if (q_handle->q_info.qi_mode == HCI1394_ARQ) {
322 318 /*
323 319 * This points to where to find the first IM descriptor. Since
324 320 * we just reset the pointers in hci1394_q_buf_setup(), the
325 321 * first IM we write below will be found at the top of the Q.
326 322 */
327 323 q_handle->q_head = desc->qb_ptrs.qp_top;
328 324
329 325 for (index = 0; index < data->qb_buf.bi_cookie_count; index++) {
330 326 hci1394_q_ar_write_IM(q_handle, desc,
331 327 data->qb_cookie[index].dmac_address,
332 328 data->qb_cookie[index].dmac_size);
333 329 }
334 330
335 331 /*
336 332 * The space left in the current IM is the size of the buffer.
337 333 * The current buffer is the first buffer added to the AR Q.
338 334 */
339 335 q_handle->q_space_left = data->qb_cookie[0].dmac_size;
340 336 }
341 337
342 338 mutex_exit(&q_handle->q_mutex);
343 339 TNF_PROBE_0_DEBUG(hci1394_q_reset_exit, HCI1394_TNF_HAL_STACK, "");
344 340 }
345 341
346 342
347 343 /*
348 344 * hci1394_q_resume()
349 345 * This is called during a resume (after a successful suspend). Currently
350 346 * we only call reset. Since this is not a time critical function, we will
351 347 * leave this as a separate function to increase readability.
352 348 */
353 349 void
354 350 hci1394_q_resume(hci1394_q_handle_t q_handle)
355 351 {
356 352 ASSERT(q_handle != NULL);
357 353 TNF_PROBE_0_DEBUG(hci1394_q_resume_enter, HCI1394_TNF_HAL_STACK, "");
358 354 hci1394_q_reset(q_handle);
359 355 TNF_PROBE_0_DEBUG(hci1394_q_resume_exit, HCI1394_TNF_HAL_STACK, "");
360 356 }
361 357
362 358
363 359 /*
364 360 * hci1394_q_stop()
365 361 * This call informs us that a DMA engine has been stopped. It does not
366 362 * perform the actual stop. We need to know this so that when we add a
367 363 * new descriptor, we do a start instead of a wake.
368 364 */
369 365 void
370 366 hci1394_q_stop(hci1394_q_handle_t q_handle)
371 367 {
372 368 ASSERT(q_handle != NULL);
373 369 TNF_PROBE_0_DEBUG(hci1394_q_stop_enter, HCI1394_TNF_HAL_STACK, "");
374 370 mutex_enter(&q_handle->q_mutex);
375 371 q_handle->q_dma_running = B_FALSE;
376 372 mutex_exit(&q_handle->q_mutex);
377 373 TNF_PROBE_0_DEBUG(hci1394_q_stop_exit, HCI1394_TNF_HAL_STACK, "");
378 374 }
379 375
380 376
381 377 /*
382 378 * hci1394_q_reserve()
383 379 * Reserve space in the AT descriptor or data buffer. This ensures that we
384 380 * can get a contiguous buffer. Descriptors have to be in a contiguous
385 381 * buffer. Data does not have to be in a contiguous buffer but we do this to
386 382 * reduce complexity. For systems with small page sizes (e.g. x86), this
387 383 * could result in inefficient use of the data buffers when sending large
388 384 * data blocks (this only applies to non-physical block write ATREQs and
389 385 * block read ATRESP). Since it looks like most protocols that use large data
390 386 * blocks (like SPB-2), use physical transfers to do this (due to their
391 387 * efficiency), this will probably not be a real world problem. If it turns
392 388 * out to be a problem, the options are to force a single cookie for the data
393 389 * buffer, allow multiple cookies and have a larger data space, or change the
394 390 * data code to use a OMI, OM, OL descriptor sequence (instead of OMI, OL).
395 391 */
396 392 static int
397 393 hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size, uint32_t *io_addr)
398 394 {
399 395 uint_t aligned_size;
400 396
401 397
402 398 ASSERT(qbuf != NULL);
403 399 TNF_PROBE_0_DEBUG(hci1394_q_reserve_enter, HCI1394_TNF_HAL_STACK, "");
404 400
405 401 /* Save backup of pointers in case we have to unreserve */
406 402 qbuf->qb_backup_ptrs = qbuf->qb_ptrs;
407 403
408 404 /*
409 405 * Make sure all alloc's are quadlet aligned. The data doesn't have to
410 406 * be, so we will force it to be.
411 407 */
412 408 aligned_size = HCI1394_ALIGN_QUAD(size);
413 409
414 410 /*
415 411 * if the free pointer is in the current buffer and the free pointer
416 412 * is below the current pointer (i.e. has not wrapped around)
417 413 */
418 414 if ((qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) &&
419 415 (qbuf->qb_ptrs.qp_free >= qbuf->qb_ptrs.qp_current)) {
420 416 /*
421 417 * The free pointer is in this buffer below the current pointer.
422 418 * Check to see if we have enough free space left.
423 419 */
424 420 if ((qbuf->qb_ptrs.qp_current + aligned_size) <=
425 421 qbuf->qb_ptrs.qp_free) {
426 422 /* Setup up our reserved size, return the IO address */
427 423 qbuf->qb_ptrs.qp_resv_size = aligned_size;
428 424 *io_addr = (uint32_t)(qbuf->qb_cookie[
429 425 qbuf->qb_ptrs.qp_current_buf].dmac_address +
430 426 qbuf->qb_ptrs.qp_offset);
431 427
432 428 /*
433 429 * The free pointer is in this buffer below the current pointer.
434 430 * We do not have enough free space for the alloc. Return
435 431 * failure.
436 432 */
437 433 } else {
438 434 qbuf->qb_ptrs.qp_resv_size = 0;
439 435 TNF_PROBE_0(hci1394_q_reserve_ns_fail,
440 436 HCI1394_TNF_HAL_ERROR, "");
441 437 TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
442 438 HCI1394_TNF_HAL_STACK, "");
443 439 return (DDI_FAILURE);
444 440 }
445 441
446 442 /*
447 443 * If there is not enough room to fit in the current buffer (not
448 444 * including wrap around), we will go to the next buffer and check
449 445 * there. If we only have one buffer (i.e. one cookie), we will end up
450 446 * staying at the current buffer and wrapping the address back to the
451 447 * top.
452 448 */
453 449 } else if ((qbuf->qb_ptrs.qp_current + aligned_size) >
454 450 qbuf->qb_ptrs.qp_end) {
455 451 /* Go to the next buffer (or the top of ours for one cookie) */
456 452 hci1394_q_next_buf(qbuf);
457 453
458 454 /* If the free pointer is in the new current buffer */
459 455 if (qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) {
460 456 /*
461 457 * The free pointer is in this buffer. If we do not have
462 458 * enough free space for the alloc. Return failure.
463 459 */
464 460 if ((qbuf->qb_ptrs.qp_current + aligned_size) >
465 461 qbuf->qb_ptrs.qp_free) {
466 462 qbuf->qb_ptrs.qp_resv_size = 0;
467 463 TNF_PROBE_0(hci1394_q_reserve_ns_fail,
468 464 HCI1394_TNF_HAL_ERROR, "");
469 465 TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
470 466 HCI1394_TNF_HAL_STACK, "");
471 467 return (DDI_FAILURE);
472 468 /*
473 469 * The free pointer is in this buffer. We have enough
474 470 * free space left.
475 471 */
476 472 } else {
477 473 /*
478 474 * Setup up our reserved size, return the IO
479 475 * address
480 476 */
481 477 qbuf->qb_ptrs.qp_resv_size = aligned_size;
482 478 *io_addr = (uint32_t)(qbuf->qb_cookie[
483 479 qbuf->qb_ptrs.qp_current_buf].dmac_address +
484 480 qbuf->qb_ptrs.qp_offset);
485 481 }
486 482
487 483 /*
488 484 * We switched buffers and the free pointer is still in another
489 485 * buffer. We have sufficient space in this buffer for the alloc
490 486 * after changing buffers.
491 487 */
492 488 } else {
493 489 /* Setup up our reserved size, return the IO address */
494 490 qbuf->qb_ptrs.qp_resv_size = aligned_size;
495 491 *io_addr = (uint32_t)(qbuf->qb_cookie[
496 492 qbuf->qb_ptrs.qp_current_buf].dmac_address +
497 493 qbuf->qb_ptrs.qp_offset);
498 494 }
499 495 /*
500 496 * The free pointer is in another buffer. We have sufficient space in
501 497 * this buffer for the alloc.
502 498 */
503 499 } else {
504 500 /* Setup up our reserved size, return the IO address */
505 501 qbuf->qb_ptrs.qp_resv_size = aligned_size;
506 502 *io_addr = (uint32_t)(qbuf->qb_cookie[
507 503 qbuf->qb_ptrs.qp_current_buf].dmac_address +
508 504 qbuf->qb_ptrs.qp_offset);
509 505 }
510 506
511 507 TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit, HCI1394_TNF_HAL_STACK, "");
512 508
513 509 return (DDI_SUCCESS);
514 510 }
515 511
516 512 /*
517 513 * hci1394_q_unreserve()
518 514 * Set the buffer pointer to what they were before hci1394_reserve(). This
519 515 * will be called when we encounter errors during hci1394_q_at*().
520 516 */
521 517 static void
522 518 hci1394_q_unreserve(hci1394_q_buf_t *qbuf)
523 519 {
524 520 ASSERT(qbuf != NULL);
525 521 TNF_PROBE_0_DEBUG(hci1394_q_unreserve_enter, HCI1394_TNF_HAL_STACK, "");
526 522
527 523 /* Go back to pointer setting before the reserve */
528 524 qbuf->qb_ptrs = qbuf->qb_backup_ptrs;
529 525
530 526 TNF_PROBE_0_DEBUG(hci1394_q_unreserve_exit, HCI1394_TNF_HAL_STACK, "");
531 527 }
532 528
533 529
534 530 /*
535 531 * hci1394_q_next_buf()
536 532 * Set our current buffer to the next cookie. If we only have one cookie, we
537 533 * will go back to the top of our buffer.
538 534 */
539 535 void
540 536 hci1394_q_next_buf(hci1394_q_buf_t *qbuf)
541 537 {
542 538 ASSERT(qbuf != NULL);
543 539 TNF_PROBE_0_DEBUG(hci1394_q_next_buf_enter, HCI1394_TNF_HAL_STACK, "");
544 540
545 541 /*
546 542 * go to the next cookie, if we are >= the cookie count, go back to the
547 543 * first cookie.
548 544 */
549 545 qbuf->qb_ptrs.qp_current_buf++;
550 546 if (qbuf->qb_ptrs.qp_current_buf >= qbuf->qb_buf.bi_cookie_count) {
551 547 qbuf->qb_ptrs.qp_current_buf = 0;
552 548 }
553 549
554 550 /* adjust the begin, end, current, and offset pointers */
555 551 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_end + 1;
556 552 if (qbuf->qb_ptrs.qp_begin > qbuf->qb_ptrs.qp_bottom) {
557 553 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
558 554 }
559 555 qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
560 556 qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
561 557 qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
562 558 qbuf->qb_ptrs.qp_offset = 0;
563 559
564 560 TNF_PROBE_0_DEBUG(hci1394_q_next_buf_exit, HCI1394_TNF_HAL_STACK, "");
565 561 }
566 562
567 563
568 564 /*
569 565 * hci1394_q_at()
570 566 * Place an AT command that does NOT need the data buffer into the DMA chain.
571 567 * Some examples of this are quadlet read/write, PHY packets, ATREQ Block
572 568 * Read, and ATRESP block write. result is only valid on failure.
573 569 */
574 570 int
575 571 hci1394_q_at(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
576 572 hci1394_basic_pkt_t *hdr, uint_t hdrsize, int *result)
577 573 {
578 574 int status;
579 575 uint32_t ioaddr;
580 576
581 577
582 578 ASSERT(q_handle != NULL);
583 579 ASSERT(cmd != NULL);
584 580 ASSERT(hdr != NULL);
585 581 TNF_PROBE_0_DEBUG(hci1394_q_at_enter, HCI1394_TNF_HAL_STACK, "");
586 582
587 583 mutex_enter(&q_handle->q_mutex);
588 584
589 585 /*
590 586 * Check the HAL state and generation when the AT Q is locked. This
591 587 * will make sure that we get all the commands when we flush the Q's
592 588 * during a reset or shutdown.
593 589 */
594 590 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
595 591 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
596 592 cmd->qc_generation)) {
597 593 *result = H1394_STATUS_INVALID_BUSGEN;
598 594 mutex_exit(&q_handle->q_mutex);
599 595 TNF_PROBE_0(hci1394_q_at_st_fail, HCI1394_TNF_HAL_ERROR, "");
600 596 TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
601 597 "");
602 598 return (DDI_FAILURE);
603 599 }
604 600
605 601 /* save away the argument to pass up when this command completes */
606 602 cmd->qc_node.tln_addr = cmd;
607 603
608 604 /* we have not written any 16 byte blocks to the descriptor yet */
609 605 q_handle->q_block_cnt = 0;
610 606
611 607 /* Reserve space for an OLI in the descriptor buffer */
612 608 status = hci1394_q_reserve(&q_handle->q_desc,
613 609 sizeof (hci1394_desc_imm_t), &ioaddr);
614 610 if (status != DDI_SUCCESS) {
615 611 *result = H1394_STATUS_NOMORE_SPACE;
616 612 mutex_exit(&q_handle->q_mutex);
617 613 TNF_PROBE_0(hci1394_q_at_qre_fail, HCI1394_TNF_HAL_ERROR, "");
618 614 TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
619 615 "");
620 616 return (DDI_FAILURE);
621 617 }
622 618
623 619 /* write the OLI to the descriptor buffer */
624 620 hci1394_q_at_write_OLI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
625 621
626 622 /* Add the AT command to the queued list */
627 623 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
628 624
629 625 mutex_exit(&q_handle->q_mutex);
630 626 TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK, "");
631 627
632 628 return (DDI_SUCCESS);
633 629 }
634 630
635 631
636 632 /*
637 633 * XXX - NOTE: POSSIBLE FUTURE OPTIMIZATION
638 634 * ATREQ Block read and write's that go through software are not very
639 635 * efficient (one of the reasons to use physical space). A copy is forced
640 636 * on all block reads due to the design of OpenHCI. Writes do not have this
641 637 * same restriction. This design forces a copy for writes too (we always
642 638 * copy into a data buffer before sending). There are many reasons for this
643 639 * including complexity reduction. There is a data size threshold where a
644 640 * copy is more expensive than mapping the data buffer address (or worse
645 641 * case a big enough difference where it pays to do it). However, we move
646 642 * block data around in mblks which means that our data may be scattered
647 643 * over many buffers. This adds to the complexity of mapping and setting
648 644 * up the OpenHCI descriptors.
649 645 *
650 646 * If someone really needs a speedup on block write ATREQs, my recommendation
651 647 * would be to add an additional command type at the target interface for a
652 648 * fast block write. The target driver would pass a mapped io addr to use.
653 649 * A function like "hci1394_q_at_with_ioaddr()" could be created which would
654 650 * be almost an exact copy of hci1394_q_at_with_data() without the
655 651 * hci1394_q_reserve() and hci1394_q_at_rep_put8() for the data buffer.
656 652 */
657 653
658 654
659 655 /*
660 656 * hci1394_q_at_with_data()
661 657 * Place an AT command that does need the data buffer into the DMA chain.
662 658 * The data is passed as a pointer to a kernel virtual address. An example of
663 659 * this is the lock operations. result is only valid on failure.
664 660 */
665 661 int
666 662 hci1394_q_at_with_data(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
667 663 hci1394_basic_pkt_t *hdr, uint_t hdrsize, uint8_t *data, uint_t datasize,
668 664 int *result)
669 665 {
670 666 uint32_t desc_ioaddr;
671 667 uint32_t data_ioaddr;
672 668 int status;
673 669
674 670
675 671 ASSERT(q_handle != NULL);
676 672 ASSERT(cmd != NULL);
677 673 ASSERT(hdr != NULL);
678 674 ASSERT(data != NULL);
679 675 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_enter, HCI1394_TNF_HAL_STACK,
680 676 "");
681 677
682 678 mutex_enter(&q_handle->q_mutex);
683 679
684 680 /*
685 681 * Check the HAL state and generation when the AT Q is locked. This
686 682 * will make sure that we get all the commands when we flush the Q's
687 683 * during a reset or shutdown.
688 684 */
689 685 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
690 686 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
691 687 cmd->qc_generation)) {
692 688 *result = H1394_STATUS_INVALID_BUSGEN;
693 689 mutex_exit(&q_handle->q_mutex);
694 690 TNF_PROBE_0_DEBUG(hci1394_q_at_wd_st_fail,
695 691 HCI1394_TNF_HAL_STACK, "");
696 692 return (DDI_FAILURE);
697 693 }
698 694
699 695 /* save away the argument to pass up when this command completes */
700 696 cmd->qc_node.tln_addr = cmd;
701 697
702 698 /* we have not written any 16 byte blocks to the descriptor yet */
703 699 q_handle->q_block_cnt = 0;
704 700
705 701 /* Reserve space for an OMI and OL in the descriptor buffer */
706 702 status = hci1394_q_reserve(&q_handle->q_desc,
707 703 (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
708 704 &desc_ioaddr);
709 705 if (status != DDI_SUCCESS) {
710 706 *result = H1394_STATUS_NOMORE_SPACE;
711 707 mutex_exit(&q_handle->q_mutex);
712 708 TNF_PROBE_0(hci1394_q_at_wd_qre_fail,
713 709 HCI1394_TNF_HAL_ERROR, "");
714 710 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
715 711 HCI1394_TNF_HAL_STACK, "");
716 712 return (DDI_FAILURE);
717 713 }
718 714
719 715 /* allocate space for data in the data buffer */
720 716 status = hci1394_q_reserve(&q_handle->q_data, datasize, &data_ioaddr);
721 717 if (status != DDI_SUCCESS) {
722 718 *result = H1394_STATUS_NOMORE_SPACE;
723 719 hci1394_q_unreserve(&q_handle->q_desc);
724 720 mutex_exit(&q_handle->q_mutex);
725 721 TNF_PROBE_0(hci1394_q_at_wd_qra_fail,
726 722 HCI1394_TNF_HAL_ERROR, "");
727 723 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
728 724 HCI1394_TNF_HAL_STACK, "");
729 725 return (DDI_FAILURE);
730 726 }
731 727
732 728 /* Copy data into data buffer */
733 729 hci1394_q_at_rep_put8(&q_handle->q_data, cmd, data, datasize);
734 730
735 731 /* write the OMI to the descriptor buffer */
736 732 hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
737 733
738 734 /* write the OL to the descriptor buffer */
739 735 hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
740 736 datasize);
741 737
742 738 /* Add the AT command to the queued list */
743 739 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
744 740
745 741 mutex_exit(&q_handle->q_mutex);
746 742 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit, HCI1394_TNF_HAL_STACK,
747 743 "");
748 744
749 745 return (DDI_SUCCESS);
750 746 }
751 747
752 748
753 749 /*
754 750 * hci1394_q_at_with_mblk()
755 751 * Place an AT command that does need the data buffer into the DMA chain.
756 752 * The data is passed in mblk_t(s). Examples of this are a block write
757 753 * ATREQ and a block read ATRESP. The services layer and the hal use a
758 754 * private structure (h1394_mblk_t) to keep track of how much of the mblk
759 755 * to send since we may have to break the transfer up into smaller blocks.
760 756 * (i.e. a 1MByte block write would go out in 2KByte chunks. result is only
761 757 * valid on failure.
762 758 */
763 759 int
764 760 hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
765 761 hci1394_basic_pkt_t *hdr, uint_t hdrsize, h1394_mblk_t *mblk, int *result)
766 762 {
767 763 uint32_t desc_ioaddr;
768 764 uint32_t data_ioaddr;
769 765 int status;
770 766
771 767
772 768 ASSERT(q_handle != NULL);
773 769 ASSERT(cmd != NULL);
774 770 ASSERT(hdr != NULL);
775 771 ASSERT(mblk != NULL);
776 772 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_enter, HCI1394_TNF_HAL_STACK,
777 773 "");
778 774
779 775 mutex_enter(&q_handle->q_mutex);
780 776
781 777 /*
782 778 * Check the HAL state and generation when the AT Q is locked. This
783 779 * will make sure that we get all the commands when we flush the Q's
784 780 * during a reset or shutdown.
785 781 */
786 782 if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
787 783 (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
788 784 cmd->qc_generation)) {
789 785 *result = H1394_STATUS_INVALID_BUSGEN;
790 786 mutex_exit(&q_handle->q_mutex);
791 787 TNF_PROBE_0_DEBUG(hci1394_q_at_wm_st_fail,
792 788 HCI1394_TNF_HAL_STACK, "");
793 789 return (DDI_FAILURE);
794 790 }
795 791
796 792 /* save away the argument to pass up when this command completes */
797 793 cmd->qc_node.tln_addr = cmd;
798 794
799 795 /* we have not written any 16 byte blocks to the descriptor yet */
800 796 q_handle->q_block_cnt = 0;
801 797
802 798 /* Reserve space for an OMI and OL in the descriptor buffer */
803 799 status = hci1394_q_reserve(&q_handle->q_desc,
804 800 (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
805 801 &desc_ioaddr);
806 802 if (status != DDI_SUCCESS) {
807 803 *result = H1394_STATUS_NOMORE_SPACE;
808 804 mutex_exit(&q_handle->q_mutex);
809 805 TNF_PROBE_0(hci1394_q_at_wm_qre_fail,
810 806 HCI1394_TNF_HAL_ERROR, "");
811 807 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
812 808 HCI1394_TNF_HAL_STACK, "");
813 809 return (DDI_FAILURE);
814 810 }
815 811
816 812 /* Reserve space for data in the data buffer */
817 813 status = hci1394_q_reserve(&q_handle->q_data, mblk->length,
818 814 &data_ioaddr);
819 815 if (status != DDI_SUCCESS) {
820 816 *result = H1394_STATUS_NOMORE_SPACE;
821 817 hci1394_q_unreserve(&q_handle->q_desc);
822 818 mutex_exit(&q_handle->q_mutex);
823 819 TNF_PROBE_0(hci1394_q_at_wm_qra_fail,
824 820 HCI1394_TNF_HAL_ERROR, "");
825 821 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
826 822 HCI1394_TNF_HAL_STACK, "");
827 823 return (DDI_FAILURE);
828 824 }
829 825
830 826 /* Copy mblk data into data buffer */
831 827 hci1394_q_at_copy_from_mblk(&q_handle->q_data, cmd, mblk);
832 828
833 829 /* write the OMI to the descriptor buffer */
834 830 hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
835 831
836 832 /* write the OL to the descriptor buffer */
837 833 hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
838 834 mblk->length);
839 835
840 836 /* Add the AT command to the queued list */
841 837 hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
842 838
843 839 mutex_exit(&q_handle->q_mutex);
844 840 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit, HCI1394_TNF_HAL_STACK,
845 841 "");
846 842
847 843 return (DDI_SUCCESS);
848 844 }
849 845
850 846
851 847 /*
852 848 * hci1394_q_at_next()
853 849 * Return the next completed AT command in cmd. If flush_q is true, we will
854 850 * return the command regardless if it finished or not. We will flush
855 851 * during bus reset processing, shutdown, and detach.
856 852 */
857 853 void
858 854 hci1394_q_at_next(hci1394_q_handle_t q_handle, boolean_t flush_q,
859 855 hci1394_q_cmd_t **cmd)
860 856 {
861 857 hci1394_q_buf_t *desc;
862 858 hci1394_q_buf_t *data;
863 859 hci1394_tlist_node_t *node;
864 860 uint32_t cmd_status;
865 861
866 862
867 863 ASSERT(q_handle != NULL);
868 864 ASSERT(cmd != NULL);
869 865 TNF_PROBE_0_DEBUG(hci1394_q_at_next_enter, HCI1394_TNF_HAL_STACK, "");
870 866
871 867 mutex_enter(&q_handle->q_mutex);
872 868
873 869 desc = &q_handle->q_desc;
874 870 data = &q_handle->q_data;
875 871
876 872 /* Sync descriptor buffer */
877 873 (void) ddi_dma_sync(desc->qb_buf.bi_dma_handle, 0,
878 874 desc->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
879 875
880 876 /* Look at the top cmd on the queued list (without removing it) */
881 877 hci1394_tlist_peek(q_handle->q_queued_list, &node);
882 878 if (node == NULL) {
883 879 /* There are no more commands left on the queued list */
884 880 *cmd = NULL;
885 881 mutex_exit(&q_handle->q_mutex);
886 882 TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK,
887 883 "");
888 884 return;
889 885 }
890 886
891 887 /*
892 888 * There is a command on the list, read its status and timestamp when
893 889 * it was sent
894 890 */
895 891 *cmd = (hci1394_q_cmd_t *)node->tln_addr;
896 892 cmd_status = ddi_get32(desc->qb_buf.bi_handle, (*cmd)->qc_status_addr);
897 893 (*cmd)->qc_timestamp = cmd_status & DESC_ST_TIMESTAMP_MASK;
898 894 cmd_status = HCI1394_DESC_EVT_GET(cmd_status);
899 895
900 896 /*
901 897 * If we are flushing the q (e.g. due to a bus reset), we will return
902 898 * the command regardless of its completion status. If we are not
903 899 * flushing the Q and we do not have status on the command (e.g. status
904 900 * = 0), we are done with this Q for now.
905 901 */
906 902 if (flush_q == B_FALSE) {
907 903 if (cmd_status == 0) {
908 904 *cmd = NULL;
909 905 mutex_exit(&q_handle->q_mutex);
910 906 TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit,
911 907 HCI1394_TNF_HAL_STACK, "");
912 908 return;
913 909 }
914 910 }
915 911
916 912 /*
917 913 * The command completed, remove it from the queued list. There is not
918 914 * a race condition to delete the node in the list here. This is the
919 915 * only place the node will be deleted so we do not need to check the
920 916 * return status.
921 917 */
922 918 (void) hci1394_tlist_delete(q_handle->q_queued_list, node);
923 919
924 920 /*
925 921 * Free the space used by the command in the descriptor and data
926 922 * buffers.
927 923 */
928 924 desc->qb_ptrs.qp_free_buf = (*cmd)->qc_descriptor_buf;
929 925 desc->qb_ptrs.qp_free = (*cmd)->qc_descriptor_end;
930 926 if ((*cmd)->qc_data_used == B_TRUE) {
931 927 data->qb_ptrs.qp_free_buf = (*cmd)->qc_data_buf;
932 928 data->qb_ptrs.qp_free = (*cmd)->qc_data_end;
933 929 }
934 930
935 931 /* return command status */
936 932 (*cmd)->qc_status = cmd_status;
937 933
938 934 mutex_exit(&q_handle->q_mutex);
939 935 TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK, "");
940 936 }
941 937
942 938
943 939 /*
944 940 * hci1394_q_at_write_OMI()
945 941 * Write an OMI descriptor into the AT descriptor buffer passed in as qbuf.
946 942 * Buffer state information is stored in cmd. Use the hdr and hdr size for
947 943 * the additional information attached to an immediate descriptor.
948 944 */
949 945 void
950 946 hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
951 947 hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
952 948 {
953 949 hci1394_desc_imm_t *desc;
954 950 uint32_t data;
955 951
956 952
957 953 ASSERT(qbuf != NULL);
958 954 ASSERT(cmd != NULL);
959 955 ASSERT(hdr != NULL);
960 956 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
961 957 TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_enter, HCI1394_TNF_HAL_STACK,
962 958 "");
963 959
964 960 /* The only valid "header" sizes for an OMI are 8 bytes or 16 bytes */
965 961 ASSERT((hdrsize == 8) || (hdrsize == 16));
966 962
967 963 /* Make sure enough room for OMI */
968 964 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
969 965
970 966 /* Store the offset of the top of this descriptor block */
971 967 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
972 968 qbuf->qb_ptrs.qp_begin);
973 969
974 970 /* Setup OpenHCI OMI Header */
975 971 desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
976 972 data = DESC_AT_OMI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
977 973 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
978 974 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
979 975 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
980 976 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
981 977
982 978 /*
983 979 * Copy in 1394 header. Size is in bytes, convert it to a 32-bit word
984 980 * count.
985 981 */
986 982 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
987 983 hdrsize >> 2, DDI_DEV_AUTOINCR);
988 984
989 985 /*
990 986 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
991 987 * accordingly. Update the reserved size and current pointer.
992 988 */
993 989 q_handle->q_block_cnt += 2;
994 990 qbuf->qb_ptrs.qp_resv_size -= sizeof (hci1394_desc_imm_t);
995 991 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
996 992
997 993 TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_exit, HCI1394_TNF_HAL_STACK,
998 994 "");
999 995 }
1000 996
1001 997
1002 998 /*
1003 999 * hci1394_q_at_write_OLI()
1004 1000 * Write an OLI descriptor into the AT descriptor buffer passed in as qbuf.
1005 1001 * Buffer state information is stored in cmd. Use the hdr and hdr size for
1006 1002 * the additional information attached to an immediate descriptor.
1007 1003 */
1008 1004 void
1009 1005 hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1010 1006 hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
1011 1007 {
1012 1008 hci1394_desc_imm_t *desc;
1013 1009 uint32_t data;
1014 1010 uint32_t command_ptr;
1015 1011 uint32_t tcode;
1016 1012
1017 1013
1018 1014 ASSERT(qbuf != NULL);
1019 1015 ASSERT(cmd != NULL);
1020 1016 ASSERT(hdr != NULL);
1021 1017 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1022 1018 TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_enter, HCI1394_TNF_HAL_STACK,
1023 1019 "");
1024 1020
1025 1021 /* The only valid "header" sizes for an OLI are 8, 12, 16 bytes */
1026 1022 ASSERT((hdrsize == 8) || (hdrsize == 12) || (hdrsize == 16));
1027 1023
1028 1024 /* make sure enough room for 1 OLI */
1029 1025 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
1030 1026
1031 1027 /* Store the offset of the top of this descriptor block */
1032 1028 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1033 1029 qbuf->qb_ptrs.qp_begin);
1034 1030
1035 1031 /* Setup OpenHCI OLI Header */
1036 1032 desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
1037 1033 data = DESC_AT_OLI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
1038 1034 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1039 1035 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
1040 1036 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1041 1037 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
1042 1038
1043 1039 /* Setup 1394 Header */
1044 1040 tcode = (hdr->q1 & DESC_PKT_TCODE_MASK) >> DESC_PKT_TCODE_SHIFT;
1045 1041 if ((tcode == IEEE1394_TCODE_WRITE_QUADLET) ||
1046 1042 (tcode == IEEE1394_TCODE_READ_QUADLET_RESP)) {
1047 1043 /*
1048 1044 * if the tcode = a quadlet write, move the last quadlet as
1049 1045 * 8-bit data. All data is treated as 8-bit data (even quadlet
1050 1046 * reads and writes). Therefore, target drivers MUST take that
1051 1047 * into consideration when accessing device registers.
1052 1048 */
1053 1049 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1, 3,
1054 1050 DDI_DEV_AUTOINCR);
1055 1051 ddi_rep_put8(qbuf->qb_buf.bi_handle, (uint8_t *)&hdr->q4,
1056 1052 (uint8_t *)&desc->q4, 4, DDI_DEV_AUTOINCR);
1057 1053 } else {
1058 1054 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
1059 1055 hdrsize >> 2, DDI_DEV_AUTOINCR);
1060 1056 }
1061 1057
1062 1058 /*
1063 1059 * We wrote 2 16 byte blocks in the descriptor buffer, update the count
1064 1060 * accordingly.
1065 1061 */
1066 1062 q_handle->q_block_cnt += 2;
1067 1063
1068 1064 /*
1069 1065 * Sync buffer in case DMA engine currently running. This must be done
1070 1066 * before writing the command pointer in the previous descriptor.
1071 1067 */
1072 1068 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1073 1069 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1074 1070
1075 1071 /* save away the status address for quick access in at_next() */
1076 1072 cmd->qc_status_addr = &desc->status;
1077 1073
1078 1074 /*
1079 1075 * Setup the command pointer. This tells the HW where to get the
1080 1076 * descriptor we just setup. This includes the IO address along with
1081 1077 * a 4 bit 16 byte block count
1082 1078 */
1083 1079 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1084 1080 ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1085 1081 DESC_Z_MASK));
1086 1082
1087 1083 /*
1088 1084 * if we previously setup a descriptor, add this new descriptor into
1089 1085 * the previous descriptor's "next" pointer.
1090 1086 */
1091 1087 if (q_handle->q_previous != NULL) {
1092 1088 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1093 1089 command_ptr);
1094 1090 /* Sync buffer again, this gets the command pointer */
1095 1091 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1096 1092 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1097 1093 }
1098 1094
1099 1095 /*
1100 1096 * this is now the previous descriptor. Update the current pointer,
1101 1097 * clear the block count and reserved size since this is the end of
1102 1098 * this command.
1103 1099 */
1104 1100 q_handle->q_previous = (hci1394_desc_t *)desc;
1105 1101 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
1106 1102 q_handle->q_block_cnt = 0;
1107 1103 qbuf->qb_ptrs.qp_resv_size = 0;
1108 1104
1109 1105 /* save away cleanup info when we are done with the command */
1110 1106 cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1111 1107 cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1112 1108
1113 1109 /* If the DMA is not running, start it */
1114 1110 if (q_handle->q_dma_running == B_FALSE) {
1115 1111 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1116 1112 command_ptr);
1117 1113 q_handle->q_dma_running = B_TRUE;
1118 1114 /* the DMA is running, wake it up */
1119 1115 } else {
1120 1116 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1121 1117 }
1122 1118
1123 1119 TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_exit, HCI1394_TNF_HAL_STACK,
1124 1120 "");
1125 1121 }
1126 1122
1127 1123
1128 1124 /*
1129 1125 * hci1394_q_at_write_OL()
1130 1126 * Write an OL descriptor into the AT descriptor buffer passed in as qbuf.
1131 1127 * Buffer state information is stored in cmd. The IO address of the data
1132 1128 * buffer is passed in io_addr. Size is the size of the data to be
1133 1129 * transferred.
1134 1130 */
1135 1131 void
1136 1132 hci1394_q_at_write_OL(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1137 1133 hci1394_q_cmd_t *cmd, uint32_t io_addr, uint_t size)
1138 1134 {
1139 1135 hci1394_desc_t *desc;
1140 1136 uint32_t data;
1141 1137 uint32_t command_ptr;
1142 1138
1143 1139
1144 1140 ASSERT(q_handle != NULL);
1145 1141 ASSERT(qbuf != NULL);
1146 1142 ASSERT(cmd != NULL);
1147 1143 ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1148 1144 TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_enter, HCI1394_TNF_HAL_STACK,
1149 1145 "");
1150 1146
1151 1147 /* make sure enough room for OL */
1152 1148 ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_t));
1153 1149
1154 1150 /* Setup OpenHCI OL Header */
1155 1151 desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1156 1152 data = DESC_AT_OL | (size & DESC_HDR_REQCOUNT_MASK);
1157 1153 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1158 1154 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1159 1155 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1160 1156 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, 0);
1161 1157
1162 1158 /*
1163 1159 * We wrote 1 16 byte block in the descriptor buffer, update the count
1164 1160 * accordingly.
1165 1161 */
1166 1162 q_handle->q_block_cnt++;
1167 1163
1168 1164 /*
1169 1165 * Sync buffer in case DMA engine currently running. This must be done
1170 1166 * before writing the command pointer in the previous descriptor.
1171 1167 */
1172 1168 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1173 1169 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1174 1170
1175 1171 /* save away the status address for quick access in at_next() */
1176 1172 cmd->qc_status_addr = &desc->status;
1177 1173
1178 1174 /*
1179 1175 * Setup the command pointer. This tells the HW where to get the
1180 1176 * descriptor we just setup. This includes the IO address along with
1181 1177 * a 4 bit 16 byte block count
1182 1178 */
1183 1179 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1184 1180 ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1185 1181 DESC_Z_MASK));
1186 1182
1187 1183 /*
1188 1184 * if we previously setup a descriptor, add this new descriptor into
1189 1185 * the previous descriptor's "next" pointer.
1190 1186 */
1191 1187 if (q_handle->q_previous != NULL) {
1192 1188 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1193 1189 command_ptr);
1194 1190 /* Sync buffer again, this gets the command pointer */
1195 1191 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1196 1192 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1197 1193 }
1198 1194
1199 1195 /*
1200 1196 * this is now the previous descriptor. Update the current pointer,
1201 1197 * clear the block count and reserved size since this is the end of
1202 1198 * this command.
1203 1199 */
1204 1200 q_handle->q_previous = desc;
1205 1201 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1206 1202 q_handle->q_block_cnt = 0;
1207 1203 qbuf->qb_ptrs.qp_resv_size = 0;
1208 1204
1209 1205 /* save away cleanup info when we are done with the command */
1210 1206 cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1211 1207 cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1212 1208
1213 1209 /* If the DMA is not running, start it */
1214 1210 if (q_handle->q_dma_running == B_FALSE) {
1215 1211 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1216 1212 command_ptr);
1217 1213 q_handle->q_dma_running = B_TRUE;
1218 1214 /* the DMA is running, wake it up */
1219 1215 } else {
1220 1216 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1221 1217 }
1222 1218
1223 1219 TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_exit, HCI1394_TNF_HAL_STACK,
1224 1220 "");
1225 1221 }
1226 1222
1227 1223
1228 1224 /*
1229 1225 * hci1394_q_at_rep_put8()
1230 1226 * Copy a byte stream from a kernel virtual address (data) to a IO mapped
1231 1227 * data buffer (qbuf). Copy datasize bytes. State information for the
1232 1228 * data buffer is kept in cmd.
1233 1229 */
1234 1230 void
1235 1231 hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1236 1232 uint8_t *data, uint_t datasize)
1237 1233 {
1238 1234 ASSERT(qbuf != NULL);
1239 1235 ASSERT(cmd != NULL);
1240 1236 ASSERT(data != NULL);
1241 1237 TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_enter, HCI1394_TNF_HAL_STACK,
1242 1238 "");
1243 1239
1244 1240 /* Make sure enough room for data */
1245 1241 ASSERT(qbuf->qb_ptrs.qp_resv_size >= datasize);
1246 1242
1247 1243 /* Copy in data into the data buffer */
1248 1244 ddi_rep_put8(qbuf->qb_buf.bi_handle, data,
1249 1245 (uint8_t *)qbuf->qb_ptrs.qp_current, datasize, DDI_DEV_AUTOINCR);
1250 1246
1251 1247 /* Update the current pointer, offset, and reserved size */
1252 1248 qbuf->qb_ptrs.qp_current += datasize;
1253 1249 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1254 1250 qbuf->qb_ptrs.qp_begin);
1255 1251 qbuf->qb_ptrs.qp_resv_size -= datasize;
1256 1252
1257 1253 /* save away cleanup info when we are done with the command */
1258 1254 cmd->qc_data_used = B_TRUE;
1259 1255 cmd->qc_data_buf = qbuf->qb_ptrs.qp_current_buf;
1260 1256 cmd->qc_data_end = qbuf->qb_ptrs.qp_current - 1;
1261 1257
1262 1258 /* Sync data buffer */
1263 1259 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1264 1260 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1265 1261
1266 1262 TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_exit, HCI1394_TNF_HAL_STACK,
1267 1263 "");
1268 1264 }
1269 1265
1270 1266
1271 1267 /*
1272 1268 * hci1394_q_at_copy_from_mblk()
1273 1269 * Copy a byte stream from a mblk(s) to a IO mapped data buffer (qbuf).
1274 1270 * Copy mblk->length bytes. The services layer and the hal use a private
1275 1271 * structure (h1394_mblk_t) to keep track of how much of the mblk to send
1276 1272 * since we may have to break the transfer up into smaller blocks. (i.e. a
1277 1273 * 1MByte block write would go out in 2KByte chunks. State information for
1278 1274 * the data buffer is kept in cmd.
1279 1275 */
1280 1276 static void
1281 1277 hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1282 1278 h1394_mblk_t *mblk)
1283 1279 {
1284 1280 uint_t bytes_left;
1285 1281 uint_t length;
1286 1282
1287 1283
1288 1284 ASSERT(qbuf != NULL);
1289 1285 ASSERT(cmd != NULL);
1290 1286 ASSERT(mblk != NULL);
1291 1287 TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_enter,
1292 1288 HCI1394_TNF_HAL_STACK, "");
1293 1289
1294 1290 /* We return these variables to the Services Layer when we are done */
1295 1291 mblk->next_offset = mblk->curr_offset;
1296 1292 mblk->next_mblk = mblk->curr_mblk;
1297 1293 bytes_left = mblk->length;
1298 1294
1299 1295 /* do while there are bytes left to copy */
1300 1296 do {
1301 1297 /*
1302 1298 * If the entire data portion of the current block transfer is
1303 1299 * contained within a single mblk.
1304 1300 */
1305 1301 if ((mblk->next_offset + bytes_left) <=
1306 1302 (mblk->next_mblk->b_wptr)) {
1307 1303 /* Copy the data into the data Q */
1308 1304 hci1394_q_at_rep_put8(qbuf, cmd,
1309 1305 (uint8_t *)mblk->next_offset, bytes_left);
1310 1306
1311 1307 /* increment the mblk offset */
1312 1308 mblk->next_offset += bytes_left;
1313 1309
1314 1310 /* we have no more bytes to put into the buffer */
1315 1311 bytes_left = 0;
1316 1312
1317 1313 /*
1318 1314 * If our offset is at the end of data in this mblk, go
1319 1315 * to the next mblk.
1320 1316 */
1321 1317 if (mblk->next_offset >= mblk->next_mblk->b_wptr) {
1322 1318 mblk->next_mblk = mblk->next_mblk->b_cont;
1323 1319 if (mblk->next_mblk != NULL) {
1324 1320 mblk->next_offset =
1325 1321 mblk->next_mblk->b_rptr;
1326 1322 }
1327 1323 }
1328 1324
1329 1325 /*
1330 1326 * The data portion of the current block transfer is spread
1331 1327 * across two or more mblk's
1332 1328 */
1333 1329 } else {
1334 1330 /*
1335 1331 * Figure out how much data is in this mblk.
1336 1332 */
1337 1333 length = mblk->next_mblk->b_wptr - mblk->next_offset;
1338 1334
1339 1335 /* Copy the data into the atreq data Q */
1340 1336 hci1394_q_at_rep_put8(qbuf, cmd,
1341 1337 (uint8_t *)mblk->next_offset, length);
1342 1338
1343 1339 /* update the bytes left count, go to the next mblk */
1344 1340 bytes_left = bytes_left - length;
1345 1341 mblk->next_mblk = mblk->next_mblk->b_cont;
1346 1342 ASSERT(mblk->next_mblk != NULL);
1347 1343 mblk->next_offset = mblk->next_mblk->b_rptr;
1348 1344 }
1349 1345 } while (bytes_left > 0);
1350 1346
1351 1347 TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_exit,
1352 1348 HCI1394_TNF_HAL_STACK, "");
1353 1349 }
1354 1350
1355 1351
1356 1352 /*
1357 1353 * hci1394_q_ar_next()
1358 1354 * Return an address to the next received AR packet. If there are no more
1359 1355 * AR packets in the buffer, q_addr will be set to NULL.
1360 1356 */
1361 1357 void
1362 1358 hci1394_q_ar_next(hci1394_q_handle_t q_handle, uint32_t **q_addr)
1363 1359 {
1364 1360 hci1394_desc_t *desc;
1365 1361 hci1394_q_buf_t *descb;
1366 1362 hci1394_q_buf_t *datab;
1367 1363 uint32_t residual_count;
1368 1364
1369 1365
1370 1366 ASSERT(q_handle != NULL);
1371 1367 ASSERT(q_addr != NULL);
1372 1368 TNF_PROBE_0_DEBUG(hci1394_q_ar_next_enter, HCI1394_TNF_HAL_STACK, "");
1373 1369
1374 1370 descb = &q_handle->q_desc;
1375 1371 datab = &q_handle->q_data;
1376 1372
1377 1373 /* Sync Descriptor buffer */
1378 1374 (void) ddi_dma_sync(descb->qb_buf.bi_dma_handle, 0,
1379 1375 descb->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1380 1376
1381 1377 /*
1382 1378 * Check residual in current IM count vs q_space_left to see if we have
1383 1379 * received any more responses
1384 1380 */
1385 1381 desc = (hci1394_desc_t *)q_handle->q_head;
1386 1382 residual_count = ddi_get32(descb->qb_buf.bi_handle, &desc->status);
1387 1383 residual_count &= DESC_ST_RESCOUNT_MASK;
1388 1384 if (residual_count >= q_handle->q_space_left) {
1389 1385 /* No new packets received */
1390 1386 *q_addr = NULL;
1391 1387 TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit,
1392 1388 HCI1394_TNF_HAL_STACK, "");
1393 1389 return;
1394 1390 }
1395 1391
1396 1392 /* Sync Data Q */
1397 1393 (void) ddi_dma_sync(datab->qb_buf.bi_dma_handle, 0,
1398 1394 datab->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1399 1395
1400 1396 /*
1401 1397 * We have a new packet, return the address of the start of the
1402 1398 * packet.
1403 1399 */
1404 1400 *q_addr = (uint32_t *)datab->qb_ptrs.qp_current;
1405 1401
1406 1402 TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit, HCI1394_TNF_HAL_STACK, "");
1407 1403 }
1408 1404
1409 1405
1410 1406 /*
1411 1407 * hci1394_q_ar_free()
1412 1408 * Free the space used by the AR packet at the top of the data buffer. AR
1413 1409 * packets are processed in the order that they are received. This will
1414 1410 * free the oldest received packet which has not yet been freed. size is
1415 1411 * how much space the packet takes up.
1416 1412 */
1417 1413 void
1418 1414 hci1394_q_ar_free(hci1394_q_handle_t q_handle, uint_t size)
1419 1415 {
1420 1416 hci1394_q_buf_t *descb;
1421 1417 hci1394_q_buf_t *datab;
1422 1418
1423 1419
1424 1420 ASSERT(q_handle != NULL);
1425 1421 TNF_PROBE_0_DEBUG(hci1394_q_ar_free_enter, HCI1394_TNF_HAL_STACK, "");
1426 1422
1427 1423 descb = &q_handle->q_desc;
1428 1424 datab = &q_handle->q_data;
1429 1425
1430 1426 /*
1431 1427 * Packet is in multiple buffers. Theoretically a buffer could be broken
1432 1428 * in more than two buffers for an ARRESP. Since the buffers should be
1433 1429 * in at least 4K increments this will not happen since the max packet
1434 1430 * size is 2KBytes.
1435 1431 */
1436 1432 if ((datab->qb_ptrs.qp_current + size) > datab->qb_ptrs.qp_end) {
1437 1433 /* Add IM descriptor for used buffer back into Q */
1438 1434 hci1394_q_ar_write_IM(q_handle, descb,
1439 1435 datab->qb_cookie[datab->qb_ptrs.qp_current_buf
1440 1436 ].dmac_address,
1441 1437 datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size);
1442 1438
1443 1439 /* Go to the next buffer */
1444 1440 hci1394_q_next_buf(datab);
1445 1441
1446 1442 /* Update next buffers pointers for partial packet */
1447 1443 size -= q_handle->q_space_left;
1448 1444 datab->qb_ptrs.qp_current += size;
1449 1445 q_handle->q_space_left =
1450 1446 datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size -
1451 1447 size;
1452 1448
1453 1449 /* Change the head pointer to the next IM descriptor */
1454 1450 q_handle->q_head += sizeof (hci1394_desc_t);
1455 1451 if ((q_handle->q_head + sizeof (hci1394_desc_t)) >
1456 1452 (descb->qb_ptrs.qp_bottom + 1)) {
1457 1453 q_handle->q_head = descb->qb_ptrs.qp_top;
1458 1454 }
1459 1455
1460 1456 /* Packet is only in one buffer */
1461 1457 } else {
1462 1458 q_handle->q_space_left -= size;
1463 1459 datab->qb_ptrs.qp_current += size;
1464 1460 }
1465 1461
1466 1462 TNF_PROBE_0_DEBUG(hci1394_q_ar_free_exit, HCI1394_TNF_HAL_STACK, "");
1467 1463 }
1468 1464
1469 1465
1470 1466 /*
1471 1467 * hci1394_q_ar_get32()
1472 1468 * Read a quadlet of data regardless if it is in the current buffer or has
1473 1469 * wrapped to the top buffer. If the address passed to this routine is
1474 1470 * passed the bottom of the data buffer, this routine will automatically
1475 1471 * wrap back to the top of the Q and look in the correct offset from the
1476 1472 * top. Copy the data into the kernel virtual address provided.
1477 1473 */
1478 1474 uint32_t
1479 1475 hci1394_q_ar_get32(hci1394_q_handle_t q_handle, uint32_t *addr)
1480 1476 {
1481 1477 hci1394_q_buf_t *data;
1482 1478 uintptr_t new_addr;
1483 1479 uint32_t data32;
1484 1480
1485 1481
1486 1482 ASSERT(q_handle != NULL);
1487 1483 ASSERT(addr != NULL);
1488 1484 TNF_PROBE_0_DEBUG(hci1394_q_get32_enter, HCI1394_TNF_HAL_STACK, "");
1489 1485
1490 1486 data = &q_handle->q_data;
1491 1487
1492 1488 /*
1493 1489 * if the data has wrapped to the top of the buffer, adjust the address.
1494 1490 */
1495 1491 if ((uintptr_t)addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1496 1492 new_addr = (uintptr_t)data->qb_ptrs.qp_top + ((uintptr_t)addr -
1497 1493 ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1));
1498 1494 data32 = ddi_get32(data->qb_buf.bi_handle,
1499 1495 (uint32_t *)new_addr);
1500 1496
1501 1497 /* data is before end of buffer */
1502 1498 } else {
1503 1499 data32 = ddi_get32(data->qb_buf.bi_handle, addr);
1504 1500 }
1505 1501
1506 1502 TNF_PROBE_0_DEBUG(hci1394_q_get32_exit, HCI1394_TNF_HAL_STACK, "");
1507 1503
1508 1504 return (data32);
1509 1505 }
1510 1506
1511 1507
1512 1508 /*
1513 1509 * hci1394_q_ar_rep_get8()
1514 1510 * Read a byte stream of data regardless if it is contiguous or has partially
1515 1511 * or fully wrapped to the top buffer. If the address passed to this routine
1516 1512 * is passed the bottom of the data buffer, or address + size is past the
1517 1513 * bottom of the data buffer. this routine will automatically wrap back to
1518 1514 * the top of the Q and look in the correct offset from the top. Copy the
1519 1515 * data into the kernel virtual address provided.
1520 1516 */
1521 1517 void
1522 1518 hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle, uint8_t *dest,
1523 1519 uint8_t *q_addr, uint_t size)
1524 1520 {
1525 1521 hci1394_q_buf_t *data;
1526 1522 uintptr_t new_addr;
1527 1523 uint_t new_size;
1528 1524 uintptr_t new_dest;
1529 1525
1530 1526
1531 1527 ASSERT(q_handle != NULL);
1532 1528 ASSERT(dest != NULL);
1533 1529 ASSERT(q_addr != NULL);
1534 1530 TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_enter, HCI1394_TNF_HAL_STACK,
1535 1531 "");
1536 1532
1537 1533 data = &q_handle->q_data;
1538 1534
1539 1535 /*
1540 1536 * There are three cases:
1541 1537 * 1) All of the data has wrapped.
1542 1538 * 2) Some of the data has not wrapped and some has wrapped.
1543 1539 * 3) None of the data has wrapped.
1544 1540 */
1545 1541
1546 1542 /* All of the data has wrapped, just adjust the starting address */
1547 1543 if ((uintptr_t)q_addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1548 1544 new_addr = (uintptr_t)data->qb_ptrs.qp_top +
1549 1545 ((uintptr_t)q_addr - ((uintptr_t)data->qb_ptrs.qp_bottom +
1550 1546 (uintptr_t)1));
1551 1547 ddi_rep_get8(data->qb_buf.bi_handle, dest, (uint8_t *)new_addr,
1552 1548 size, DDI_DEV_AUTOINCR);
1553 1549
1554 1550 /*
1555 1551 * Some of the data has wrapped. Copy the data that hasn't wrapped,
1556 1552 * adjust the address, then copy the rest.
1557 1553 */
1558 1554 } else if (((uintptr_t)q_addr + (uintptr_t)size) >
1559 1555 ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1)) {
1560 1556 /* Copy first half */
1561 1557 new_size = (uint_t)(((uintptr_t)data->qb_ptrs.qp_bottom +
1562 1558 (uintptr_t)1) - (uintptr_t)q_addr);
1563 1559 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, new_size,
1564 1560 DDI_DEV_AUTOINCR);
1565 1561
1566 1562 /* copy second half */
1567 1563 new_dest = (uintptr_t)dest + (uintptr_t)new_size;
1568 1564 new_size = size - new_size;
1569 1565 new_addr = (uintptr_t)data->qb_ptrs.qp_top;
1570 1566 ddi_rep_get8(data->qb_buf.bi_handle, (uint8_t *)new_dest,
1571 1567 (uint8_t *)new_addr, new_size, DDI_DEV_AUTOINCR);
1572 1568
1573 1569 /* None of the data has wrapped */
1574 1570 } else {
1575 1571 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, size,
1576 1572 DDI_DEV_AUTOINCR);
1577 1573 }
1578 1574
1579 1575 TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_exit, HCI1394_TNF_HAL_STACK,
1580 1576 "");
1581 1577 }
1582 1578
1583 1579
1584 1580 /*
1585 1581 * hci1394_q_ar_copy_to_mblk()
1586 1582 * Read a byte stream of data regardless if it is contiguous or has partially
1587 1583 * or fully wrapped to the top buffer. If the address passed to this routine
1588 1584 * is passed the bottom of the data buffer, or address + size is passed the
1589 1585 * bottom of the data buffer. this routine will automatically wrap back to
1590 1586 * the top of the Q and look in the correct offset from the top. Copy the
1591 1587 * data into the mblk provided. The services layer and the hal use a private
1592 1588 * structure (h1394_mblk_t) to keep track of how much of the mblk to receive
1593 1589 * into since we may have to break the transfer up into smaller blocks.
1594 1590 * (i.e. a 1MByte block read would go out in 2KByte requests.
1595 1591 */
1596 1592 void
1597 1593 hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle, uint8_t *addr,
1598 1594 h1394_mblk_t *mblk)
1599 1595 {
1600 1596 uint8_t *new_addr;
1601 1597 uint_t bytes_left;
1602 1598 uint_t length;
1603 1599
1604 1600
1605 1601 ASSERT(q_handle != NULL);
1606 1602 ASSERT(addr != NULL);
1607 1603 ASSERT(mblk != NULL);
1608 1604 TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_enter,
1609 1605 HCI1394_TNF_HAL_STACK, "");
1610 1606
1611 1607 /* We return these variables to the Services Layer when we are done */
1612 1608 mblk->next_offset = mblk->curr_offset;
1613 1609 mblk->next_mblk = mblk->curr_mblk;
1614 1610 bytes_left = mblk->length;
1615 1611
1616 1612 /* the address we copy from will change as we change mblks */
1617 1613 new_addr = addr;
1618 1614
1619 1615 /* do while there are bytes left to copy */
1620 1616 do {
1621 1617 /*
1622 1618 * If the entire data portion of the current block transfer is
1623 1619 * contained within a single mblk.
1624 1620 */
1625 1621 if ((mblk->next_offset + bytes_left) <=
1626 1622 (mblk->next_mblk->b_datap->db_lim)) {
1627 1623 /* Copy the data into the mblk */
1628 1624 hci1394_q_ar_rep_get8(q_handle,
1629 1625 (uint8_t *)mblk->next_offset, new_addr, bytes_left);
1630 1626
1631 1627 /* increment the offset */
1632 1628 mblk->next_offset += bytes_left;
1633 1629 mblk->next_mblk->b_wptr = mblk->next_offset;
1634 1630
1635 1631 /* we have no more bytes to put into the buffer */
1636 1632 bytes_left = 0;
1637 1633
1638 1634 /*
1639 1635 * If our offset is at the end of data in this mblk, go
1640 1636 * to the next mblk.
1641 1637 */
1642 1638 if (mblk->next_offset >=
1643 1639 mblk->next_mblk->b_datap->db_lim) {
1644 1640 mblk->next_mblk = mblk->next_mblk->b_cont;
1645 1641 if (mblk->next_mblk != NULL) {
1646 1642 mblk->next_offset =
1647 1643 mblk->next_mblk->b_wptr;
1648 1644 }
1649 1645 }
1650 1646
1651 1647 /*
1652 1648 * The data portion of the current block transfer is spread
1653 1649 * across two or more mblk's
1654 1650 */
1655 1651 } else {
1656 1652 /* Figure out how much data is in this mblk */
1657 1653 length = mblk->next_mblk->b_datap->db_lim -
1658 1654 mblk->next_offset;
1659 1655
1660 1656 /* Copy the data into the mblk */
1661 1657 hci1394_q_ar_rep_get8(q_handle,
1662 1658 (uint8_t *)mblk->next_offset, new_addr, length);
1663 1659 mblk->next_mblk->b_wptr =
1664 1660 mblk->next_mblk->b_datap->db_lim;
1665 1661
1666 1662 /*
1667 1663 * update the bytes left and address to copy from, go
1668 1664 * to the next mblk.
1669 1665 */
1670 1666 bytes_left = bytes_left - length;
1671 1667 new_addr = (uint8_t *)((uintptr_t)new_addr +
1672 1668 (uintptr_t)length);
1673 1669 mblk->next_mblk = mblk->next_mblk->b_cont;
1674 1670 ASSERT(mblk->next_mblk != NULL);
1675 1671 mblk->next_offset = mblk->next_mblk->b_wptr;
1676 1672 }
1677 1673 } while (bytes_left > 0);
1678 1674
1679 1675 TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_exit,
1680 1676 HCI1394_TNF_HAL_STACK, "");
1681 1677 }
1682 1678
1683 1679
1684 1680 /*
1685 1681 * hci1394_q_ar_write_IM()
1686 1682 * Write an IM descriptor into the AR descriptor buffer passed in as qbuf.
1687 1683 * The IO address of the data buffer is passed in io_addr. datasize is the
1688 1684 * size of the data data buffer to receive into.
1689 1685 */
1690 1686 void
1691 1687 hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1692 1688 uint32_t io_addr, uint_t datasize)
1693 1689 {
1694 1690 hci1394_desc_t *desc;
1695 1691 uint32_t data;
1696 1692 uint32_t command_ptr;
1697 1693
1698 1694
1699 1695 ASSERT(q_handle != NULL);
1700 1696 ASSERT(qbuf != NULL);
1701 1697 TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_enter, HCI1394_TNF_HAL_STACK,
1702 1698 "");
1703 1699
1704 1700 /* Make sure enough room for IM */
1705 1701 if ((qbuf->qb_ptrs.qp_current + sizeof (hci1394_desc_t)) >
1706 1702 (qbuf->qb_ptrs.qp_bottom + 1)) {
1707 1703 hci1394_q_next_buf(qbuf);
1708 1704 } else {
1709 1705 /* Store the offset of the top of this descriptor block */
1710 1706 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1711 1707 qbuf->qb_ptrs.qp_begin);
1712 1708 }
1713 1709
1714 1710 /* Setup OpenHCI IM Header */
1715 1711 desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1716 1712 data = DESC_AR_IM | (datasize & DESC_HDR_REQCOUNT_MASK);
1717 1713 ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1718 1714 ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1719 1715 ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1720 1716 ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, datasize &
1721 1717 DESC_ST_RESCOUNT_MASK);
1722 1718
1723 1719 /*
1724 1720 * Sync buffer in case DMA engine currently running. This must be done
1725 1721 * before writing the command pointer in the previous descriptor.
1726 1722 */
1727 1723 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1728 1724 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1729 1725
1730 1726 /*
1731 1727 * Setup the command pointer. This tells the HW where to get the
1732 1728 * descriptor we just setup. This includes the IO address along with
1733 1729 * a 4 bit 16 byte block count. We only wrote 1 16 byte block.
1734 1730 */
1735 1731 command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1736 1732 ].dmac_address + qbuf->qb_ptrs.qp_offset) | 1);
1737 1733
1738 1734 /*
1739 1735 * if we previously setup a descriptor, add this new descriptor into
1740 1736 * the previous descriptor's "next" pointer.
1741 1737 */
1742 1738 if (q_handle->q_previous != NULL) {
1743 1739 ddi_put32(qbuf->qb_buf.bi_handle,
1744 1740 &q_handle->q_previous->branch, command_ptr);
1745 1741 /* Sync buffer again, this gets the command pointer */
1746 1742 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1747 1743 qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1748 1744 }
1749 1745
1750 1746 /* this is the new previous descriptor. Update the current pointer */
1751 1747 q_handle->q_previous = desc;
1752 1748 qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1753 1749
1754 1750 /* If the DMA is not running, start it */
1755 1751 if (q_handle->q_dma_running == B_FALSE) {
1756 1752 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1757 1753 command_ptr);
1758 1754 q_handle->q_dma_running = B_TRUE;
1759 1755 /* the DMA is running, wake it up */
1760 1756 } else {
1761 1757 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1762 1758 }
1763 1759
1764 1760 TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_exit, HCI1394_TNF_HAL_STACK,
1765 1761 "");
1766 1762 }
↓ open down ↓ |
1675 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX