1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License, Version 1.0 only
   6  * (the "License").  You may not use this file except in compliance
   7  * with the License.
   8  *
   9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10  * or http://www.opensolaris.org/os/licensing.
  11  * See the License for the specific language governing permissions
  12  * and limitations under the License.
  13  *
  14  * When distributing Covered Code, include this CDDL HEADER in each
  15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16  * If applicable, add the following below this CDDL HEADER, with the
  17  * fields enclosed by brackets "[]" replaced with your own identifying
  18  * information: Portions Copyright [yyyy] [name of copyright owner]
  19  *
  20  * CDDL HEADER END
  21  */
  22 /*
  23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #pragma ident   "%Z%%M% %I%     %E% SMI"
  28 
  29 /*
  30  * hci1394_q.c
  31  *    This code decouples some of the OpenHCI async descriptor logic/structures
  32  *    from the async processing.  The goal was to combine as much of the
  33  *    duplicate code as possible for the different type of async transfers
  34  *    without going too overboard.
  35  *
  36  *    There are two parts to the Q, the descriptor buffer and the data buffer.
  37  *    For the most part, data to be transmitted and data which is received go
  38  *    in the data buffers.  The information of where to get the data and put
  39  *    the data reside in the descriptor buffers. There are exceptions to this.
  40  */
  41 
  42 
  43 #include <sys/types.h>
  44 #include <sys/conf.h>
  45 #include <sys/ddi.h>
  46 #include <sys/modctl.h>
  47 #include <sys/stat.h>
  48 #include <sys/sunddi.h>
  49 #include <sys/cmn_err.h>
  50 #include <sys/kmem.h>
  51 #include <sys/note.h>
  52 
  53 #include <sys/1394/adapters/hci1394.h>
  54 
  55 
  56 static int hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size,
  57     uint32_t *io_addr);
  58 static void hci1394_q_unreserve(hci1394_q_buf_t *qbuf);
  59 static void hci1394_q_buf_setup(hci1394_q_buf_t *qbuf);
  60 static void hci1394_q_reset(hci1394_q_handle_t q_handle);
  61 static void hci1394_q_next_buf(hci1394_q_buf_t *qbuf);
  62 
  63 static void hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,
  64     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
  65     uint_t hdrsize);
  66 static void hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,
  67     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
  68     uint_t hdrsize);
  69 static void hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,
  70     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, uint32_t io_addr,
  71     uint_t datasize);
  72 static void hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
  73     uint8_t *data, uint_t datasize);
  74 static void hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf,
  75     hci1394_q_cmd_t *cmd, h1394_mblk_t *mblk);
  76 
  77 static void hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,
  78     hci1394_q_buf_t *qbuf, uint32_t io_addr, uint_t datasize);
  79 
  80 _NOTE(SCHEME_PROTECTS_DATA("unique", msgb))
  81 
  82 /*
  83  * hci1394_q_init()
  84  *    Initialize a Q.  A Q consists of a descriptor buffer and a data buffer and
  85  *    can be either an AT or AR Q. hci1394_q_init() returns a handle which
  86  *    should be used for the reset of the hci1394_q_* calls.
  87  */
  88 int
  89 hci1394_q_init(hci1394_drvinfo_t *drvinfo,
  90     hci1394_ohci_handle_t ohci_handle, hci1394_q_info_t *qinfo,
  91     hci1394_q_handle_t *q_handle)
  92 {
  93         hci1394_q_buf_t *desc;
  94         hci1394_q_buf_t *data;
  95         hci1394_buf_parms_t parms;
  96         hci1394_q_t *q;
  97         int status;
  98         int index;
  99 
 100 
 101         ASSERT(drvinfo != NULL);
 102         ASSERT(qinfo != NULL);
 103         ASSERT(q_handle != NULL);
 104         TNF_PROBE_0_DEBUG(hci1394_q_init_enter, HCI1394_TNF_HAL_STACK, "");
 105 
 106         /*
 107          * allocate the memory to track this Q.  Initialize the internal Q
 108          * structure.
 109          */
 110         q = kmem_alloc(sizeof (hci1394_q_t), KM_SLEEP);
 111         q->q_drvinfo = drvinfo;
 112         q->q_info = *qinfo;
 113         q->q_ohci = ohci_handle;
 114         mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie);
 115         desc = &q->q_desc;
 116         data = &q->q_data;
 117 
 118         /*
 119          * Allocate the Descriptor buffer.
 120          *
 121          * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
 122          * after we have tested the multiple cookie code on x86.
 123          */
 124         parms.bp_length = qinfo->qi_desc_size;
 125         parms.bp_max_cookies = 1;
 126         parms.bp_alignment = 16;
 127         status = hci1394_buf_alloc(drvinfo, &parms, &desc->qb_buf,
 128             &desc->qb_buf_handle);
 129         if (status != DDI_SUCCESS) {
 130                 mutex_destroy(&q->q_mutex);
 131                 kmem_free(q, sizeof (hci1394_q_t));
 132                 *q_handle = NULL;
 133                 TNF_PROBE_0(hci1394_q_init_bae_fail, HCI1394_TNF_HAL_ERROR, "");
 134                 TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
 135                     "");
 136                 return (DDI_FAILURE);
 137         }
 138 
 139         /* Copy in buffer cookies into our local cookie array */
 140         desc->qb_cookie[0] = desc->qb_buf.bi_cookie;
 141         for (index = 1; index < desc->qb_buf.bi_cookie_count; index++) {
 142                 ddi_dma_nextcookie(desc->qb_buf.bi_dma_handle,
 143                     &desc->qb_buf.bi_cookie);
 144                 desc->qb_cookie[index] = desc->qb_buf.bi_cookie;
 145         }
 146 
 147         /*
 148          * Allocate the Data buffer.
 149          *
 150          * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
 151          * after we have tested the multiple cookie code on x86.
 152          */
 153         parms.bp_length = qinfo->qi_data_size;
 154         parms.bp_max_cookies = 1;
 155         parms.bp_alignment = 16;
 156         status = hci1394_buf_alloc(drvinfo, &parms, &data->qb_buf,
 157             &data->qb_buf_handle);
 158         if (status != DDI_SUCCESS) {
 159                 /* Free the allocated Descriptor buffer */
 160                 hci1394_buf_free(&desc->qb_buf_handle);
 161 
 162                 mutex_destroy(&q->q_mutex);
 163                 kmem_free(q, sizeof (hci1394_q_t));
 164                 *q_handle = NULL;
 165                 TNF_PROBE_0(hci1394_q_init_baa_fail, HCI1394_TNF_HAL_ERROR, "");
 166                 TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
 167                     "");
 168                 return (DDI_FAILURE);
 169         }
 170 
 171         /*
 172          * We must have at least 2 ARQ data buffers, If we only have one, we
 173          * will artificially create 2. We must have 2 so that we always have a
 174          * descriptor with free data space to write AR data to. When one is
 175          * empty, it will take us a bit to get a new descriptor back into the
 176          * chain.
 177          */
 178         if ((qinfo->qi_mode == HCI1394_ARQ) &&
 179             (data->qb_buf.bi_cookie_count == 1)) {
 180                 data->qb_buf.bi_cookie_count = 2;
 181                 data->qb_cookie[0] = data->qb_buf.bi_cookie;
 182                 data->qb_cookie[0].dmac_size /= 2;
 183                 data->qb_cookie[1] = data->qb_cookie[0];
 184                 data->qb_cookie[1].dmac_laddress =
 185                     data->qb_cookie[0].dmac_laddress +
 186                     data->qb_cookie[0].dmac_size;
 187                 data->qb_cookie[1].dmac_address =
 188                     data->qb_cookie[0].dmac_address +
 189                     data->qb_cookie[0].dmac_size;
 190 
 191         /* We have more than 1 cookie or we are an AT Q */
 192         } else {
 193                 /* Copy in buffer cookies into our local cookie array */
 194                 data->qb_cookie[0] = data->qb_buf.bi_cookie;
 195                 for (index = 1; index < data->qb_buf.bi_cookie_count; index++) {
 196                         ddi_dma_nextcookie(data->qb_buf.bi_dma_handle,
 197                             &data->qb_buf.bi_cookie);
 198                         data->qb_cookie[index] = data->qb_buf.bi_cookie;
 199                 }
 200         }
 201 
 202         /* The top and bottom of the Q are only set once */
 203         desc->qb_ptrs.qp_top = desc->qb_buf.bi_kaddr;
 204         desc->qb_ptrs.qp_bottom = desc->qb_buf.bi_kaddr +
 205             desc->qb_buf.bi_real_length - 1;
 206         data->qb_ptrs.qp_top = data->qb_buf.bi_kaddr;
 207         data->qb_ptrs.qp_bottom = data->qb_buf.bi_kaddr +
 208             data->qb_buf.bi_real_length - 1;
 209 
 210         /*
 211          * reset the Q pointers to their original settings.  Setup IM
 212          * descriptors if this is an AR Q.
 213          */
 214         hci1394_q_reset(q);
 215 
 216         /* if this is an AT Q, create a queued list for the AT descriptors */
 217         if (qinfo->qi_mode == HCI1394_ATQ) {
 218                 hci1394_tlist_init(drvinfo, NULL, &q->q_queued_list);
 219         }
 220 
 221         *q_handle = q;
 222 
 223         TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK, "");
 224 
 225         return (DDI_SUCCESS);
 226 }
 227 
 228 
 229 /*
 230  * hci1394_q_fini()
 231  *    Cleanup after a successful hci1394_q_init(). Notice that a pointer to the
 232  *    handle is used for the parameter.  fini() will set your handle to NULL
 233  *    before returning.
 234  */
 235 void
 236 hci1394_q_fini(hci1394_q_handle_t *q_handle)
 237 {
 238         hci1394_q_t *q;
 239 
 240         ASSERT(q_handle != NULL);
 241         TNF_PROBE_0_DEBUG(hci1394_q_fini_enter, HCI1394_TNF_HAL_STACK, "");
 242 
 243         q = *q_handle;
 244         if (q->q_info.qi_mode == HCI1394_ATQ) {
 245                 hci1394_tlist_fini(&q->q_queued_list);
 246         }
 247         mutex_destroy(&q->q_mutex);
 248         hci1394_buf_free(&q->q_desc.qb_buf_handle);
 249         hci1394_buf_free(&q->q_data.qb_buf_handle);
 250         kmem_free(q, sizeof (hci1394_q_t));
 251         *q_handle = NULL;
 252 
 253         TNF_PROBE_0_DEBUG(hci1394_q_fini_exit, HCI1394_TNF_HAL_STACK, "");
 254 }
 255 
 256 
 257 /*
 258  * hci1394_q_buf_setup()
 259  *    Initialization of buffer pointers which are present in both the descriptor
 260  *    buffer and data buffer (No reason to duplicate the code)
 261  */
 262 static void
 263 hci1394_q_buf_setup(hci1394_q_buf_t *qbuf)
 264 {
 265         ASSERT(qbuf != NULL);
 266         TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_enter, HCI1394_TNF_HAL_STACK, "");
 267 
 268         /* start with the first cookie */
 269         qbuf->qb_ptrs.qp_current_buf = 0;
 270         qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
 271         qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
 272             qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
 273         qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
 274         qbuf->qb_ptrs.qp_offset = 0;
 275 
 276         /*
 277          * The free_buf and free pointer will change everytime an ACK (of some
 278          * type) is processed.  Free is the last byte in the last cookie.
 279          */
 280         qbuf->qb_ptrs.qp_free_buf = qbuf->qb_buf.bi_cookie_count - 1;
 281         qbuf->qb_ptrs.qp_free = qbuf->qb_ptrs.qp_bottom;
 282 
 283         /*
 284          * Start with no space to write descriptors.  We first need to call
 285          * hci1394_q_reserve() before calling hci1394_q_at_write_O*().
 286          */
 287         qbuf->qb_ptrs.qp_resv_size = 0;
 288 
 289         TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_exit, HCI1394_TNF_HAL_STACK, "");
 290 }
 291 
 292 
 293 /*
 294  * hci1394_q_reset()
 295  *    Resets the buffers to an initial state.  This should be called during
 296  *    attach and resume.
 297  */
 298 static void
 299 hci1394_q_reset(hci1394_q_handle_t q_handle)
 300 {
 301         hci1394_q_buf_t *desc;
 302         hci1394_q_buf_t *data;
 303         int index;
 304 
 305         ASSERT(q_handle != NULL);
 306         TNF_PROBE_0_DEBUG(hci1394_q_reset_enter, HCI1394_TNF_HAL_STACK, "");
 307 
 308         mutex_enter(&q_handle->q_mutex);
 309         desc = &q_handle->q_desc;
 310         data = &q_handle->q_data;
 311 
 312         hci1394_q_buf_setup(desc);
 313         hci1394_q_buf_setup(data);
 314 
 315         /* DMA starts off stopped, no previous descriptor to link from */
 316         q_handle->q_dma_running = B_FALSE;
 317         q_handle->q_block_cnt = 0;
 318         q_handle->q_previous = NULL;
 319 
 320         /* If this is an AR Q, setup IM's for the data buffers that we have */
 321         if (q_handle->q_info.qi_mode == HCI1394_ARQ) {
 322                 /*
 323                  * This points to where to find the first IM descriptor.  Since
 324                  * we just reset the pointers in hci1394_q_buf_setup(), the
 325                  * first IM we write below will be found at the top of the Q.
 326                  */
 327                 q_handle->q_head = desc->qb_ptrs.qp_top;
 328 
 329                 for (index = 0; index < data->qb_buf.bi_cookie_count; index++) {
 330                         hci1394_q_ar_write_IM(q_handle, desc,
 331                             data->qb_cookie[index].dmac_address,
 332                             data->qb_cookie[index].dmac_size);
 333                 }
 334 
 335                 /*
 336                  * The space left in the current IM is the size of the buffer.
 337                  * The current buffer is the first buffer added to the AR Q.
 338                  */
 339                 q_handle->q_space_left = data->qb_cookie[0].dmac_size;
 340         }
 341 
 342         mutex_exit(&q_handle->q_mutex);
 343         TNF_PROBE_0_DEBUG(hci1394_q_reset_exit, HCI1394_TNF_HAL_STACK, "");
 344 }
 345 
 346 
 347 /*
 348  * hci1394_q_resume()
 349  *    This is called during a resume (after a successful suspend). Currently
 350  *    we only call reset.  Since this is not a time critical function, we will
 351  *    leave this as a separate function to increase readability.
 352  */
 353 void
 354 hci1394_q_resume(hci1394_q_handle_t q_handle)
 355 {
 356         ASSERT(q_handle != NULL);
 357         TNF_PROBE_0_DEBUG(hci1394_q_resume_enter, HCI1394_TNF_HAL_STACK, "");
 358         hci1394_q_reset(q_handle);
 359         TNF_PROBE_0_DEBUG(hci1394_q_resume_exit, HCI1394_TNF_HAL_STACK, "");
 360 }
 361 
 362 
 363 /*
 364  * hci1394_q_stop()
 365  *    This call informs us that a DMA engine has been stopped.  It does not
 366  *    perform the actual stop. We need to know this so that when we add a
 367  *    new descriptor, we do a start instead of a wake.
 368  */
 369 void
 370 hci1394_q_stop(hci1394_q_handle_t q_handle)
 371 {
 372         ASSERT(q_handle != NULL);
 373         TNF_PROBE_0_DEBUG(hci1394_q_stop_enter, HCI1394_TNF_HAL_STACK, "");
 374         mutex_enter(&q_handle->q_mutex);
 375         q_handle->q_dma_running = B_FALSE;
 376         mutex_exit(&q_handle->q_mutex);
 377         TNF_PROBE_0_DEBUG(hci1394_q_stop_exit, HCI1394_TNF_HAL_STACK, "");
 378 }
 379 
 380 
 381 /*
 382  * hci1394_q_reserve()
 383  *    Reserve space in the AT descriptor or data buffer. This ensures that we
 384  *    can get a contiguous buffer. Descriptors have to be in a contiguous
 385  *    buffer. Data does not have to be in a contiguous buffer but we do this to
 386  *    reduce complexity. For systems with small page sizes (e.g. x86), this
 387  *    could result in inefficient use of the data buffers when sending large
 388  *    data blocks (this only applies to non-physical block write ATREQs and
 389  *    block read ATRESP). Since it looks like most protocols that use large data
 390  *    blocks (like SPB-2), use physical transfers to do this (due to their
 391  *    efficiency), this will probably not be a real world problem.  If it turns
 392  *    out to be a problem, the options are to force a single cookie for the data
 393  *    buffer, allow multiple cookies and have a larger data space, or change the
 394  *    data code to use a OMI, OM, OL descriptor sequence (instead of OMI, OL).
 395  */
 396 static int
 397 hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size, uint32_t *io_addr)
 398 {
 399         uint_t aligned_size;
 400 
 401 
 402         ASSERT(qbuf != NULL);
 403         TNF_PROBE_0_DEBUG(hci1394_q_reserve_enter, HCI1394_TNF_HAL_STACK, "");
 404 
 405         /* Save backup of pointers in case we have to unreserve */
 406         qbuf->qb_backup_ptrs = qbuf->qb_ptrs;
 407 
 408         /*
 409          * Make sure all alloc's are quadlet aligned. The data doesn't have to
 410          * be, so we will force it to be.
 411          */
 412         aligned_size = HCI1394_ALIGN_QUAD(size);
 413 
 414         /*
 415          * if the free pointer is in the current buffer and the free pointer
 416          * is below the current pointer (i.e. has not wrapped around)
 417          */
 418         if ((qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) &&
 419             (qbuf->qb_ptrs.qp_free >= qbuf->qb_ptrs.qp_current)) {
 420                 /*
 421                  * The free pointer is in this buffer below the current pointer.
 422                  * Check to see if we have enough free space left.
 423                  */
 424                 if ((qbuf->qb_ptrs.qp_current + aligned_size) <=
 425                     qbuf->qb_ptrs.qp_free) {
 426                         /* Setup up our reserved size, return the IO address */
 427                         qbuf->qb_ptrs.qp_resv_size = aligned_size;
 428                         *io_addr = (uint32_t)(qbuf->qb_cookie[
 429                             qbuf->qb_ptrs.qp_current_buf].dmac_address +
 430                             qbuf->qb_ptrs.qp_offset);
 431 
 432                 /*
 433                  * The free pointer is in this buffer below the current pointer.
 434                  * We do not have enough free space for the alloc. Return
 435                  * failure.
 436                  */
 437                 } else {
 438                         qbuf->qb_ptrs.qp_resv_size = 0;
 439                         TNF_PROBE_0(hci1394_q_reserve_ns_fail,
 440                             HCI1394_TNF_HAL_ERROR, "");
 441                         TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
 442                             HCI1394_TNF_HAL_STACK, "");
 443                         return (DDI_FAILURE);
 444                 }
 445 
 446         /*
 447          * If there is not enough room to fit in the current buffer (not
 448          * including wrap around), we will go to the next buffer and check
 449          * there. If we only have one buffer (i.e. one cookie), we will end up
 450          * staying at the current buffer and wrapping the address back to the
 451          * top.
 452          */
 453         } else if ((qbuf->qb_ptrs.qp_current + aligned_size) >
 454             qbuf->qb_ptrs.qp_end) {
 455                 /* Go to the next buffer (or the top of ours for one cookie) */
 456                 hci1394_q_next_buf(qbuf);
 457 
 458                 /* If the free pointer is in the new current buffer */
 459                 if (qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) {
 460                         /*
 461                          * The free pointer is in this buffer. If we do not have
 462                          * enough free space for the alloc. Return failure.
 463                          */
 464                         if ((qbuf->qb_ptrs.qp_current + aligned_size) >
 465                             qbuf->qb_ptrs.qp_free) {
 466                                 qbuf->qb_ptrs.qp_resv_size = 0;
 467                                 TNF_PROBE_0(hci1394_q_reserve_ns_fail,
 468                                     HCI1394_TNF_HAL_ERROR, "");
 469                                 TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
 470                                     HCI1394_TNF_HAL_STACK, "");
 471                                 return (DDI_FAILURE);
 472                         /*
 473                          * The free pointer is in this buffer. We have enough
 474                          * free space left.
 475                          */
 476                         } else {
 477                                 /*
 478                                  * Setup up our reserved size, return the IO
 479                                  * address
 480                                  */
 481                                 qbuf->qb_ptrs.qp_resv_size = aligned_size;
 482                                 *io_addr = (uint32_t)(qbuf->qb_cookie[
 483                                     qbuf->qb_ptrs.qp_current_buf].dmac_address +
 484                                     qbuf->qb_ptrs.qp_offset);
 485                         }
 486 
 487                 /*
 488                  * We switched buffers and the free pointer is still in another
 489                  * buffer. We have sufficient space in this buffer for the alloc
 490                  * after changing buffers.
 491                  */
 492                 } else {
 493                         /* Setup up our reserved size, return the IO address */
 494                         qbuf->qb_ptrs.qp_resv_size = aligned_size;
 495                         *io_addr = (uint32_t)(qbuf->qb_cookie[
 496                             qbuf->qb_ptrs.qp_current_buf].dmac_address +
 497                             qbuf->qb_ptrs.qp_offset);
 498                 }
 499         /*
 500          * The free pointer is in another buffer. We have sufficient space in
 501          * this buffer for the alloc.
 502          */
 503         } else {
 504                 /* Setup up our reserved size, return the IO address */
 505                 qbuf->qb_ptrs.qp_resv_size = aligned_size;
 506                 *io_addr = (uint32_t)(qbuf->qb_cookie[
 507                     qbuf->qb_ptrs.qp_current_buf].dmac_address +
 508                     qbuf->qb_ptrs.qp_offset);
 509         }
 510 
 511         TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit, HCI1394_TNF_HAL_STACK, "");
 512 
 513         return (DDI_SUCCESS);
 514 }
 515 
 516 /*
 517  * hci1394_q_unreserve()
 518  *    Set the buffer pointer to what they were before hci1394_reserve().  This
 519  *    will be called when we encounter errors during hci1394_q_at*().
 520  */
 521 static void
 522 hci1394_q_unreserve(hci1394_q_buf_t *qbuf)
 523 {
 524         ASSERT(qbuf != NULL);
 525         TNF_PROBE_0_DEBUG(hci1394_q_unreserve_enter, HCI1394_TNF_HAL_STACK, "");
 526 
 527         /* Go back to pointer setting before the reserve */
 528         qbuf->qb_ptrs = qbuf->qb_backup_ptrs;
 529 
 530         TNF_PROBE_0_DEBUG(hci1394_q_unreserve_exit, HCI1394_TNF_HAL_STACK, "");
 531 }
 532 
 533 
 534 /*
 535  * hci1394_q_next_buf()
 536  *    Set our current buffer to the next cookie.  If we only have one cookie, we
 537  *    will go back to the top of our buffer.
 538  */
 539 void
 540 hci1394_q_next_buf(hci1394_q_buf_t *qbuf)
 541 {
 542         ASSERT(qbuf != NULL);
 543         TNF_PROBE_0_DEBUG(hci1394_q_next_buf_enter, HCI1394_TNF_HAL_STACK, "");
 544 
 545         /*
 546          * go to the next cookie, if we are >= the cookie count, go back to the
 547          * first cookie.
 548          */
 549         qbuf->qb_ptrs.qp_current_buf++;
 550         if (qbuf->qb_ptrs.qp_current_buf >= qbuf->qb_buf.bi_cookie_count) {
 551                 qbuf->qb_ptrs.qp_current_buf = 0;
 552         }
 553 
 554         /* adjust the begin, end, current, and offset pointers */
 555         qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_end + 1;
 556         if (qbuf->qb_ptrs.qp_begin > qbuf->qb_ptrs.qp_bottom) {
 557                 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
 558         }
 559         qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
 560             qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
 561         qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
 562         qbuf->qb_ptrs.qp_offset = 0;
 563 
 564         TNF_PROBE_0_DEBUG(hci1394_q_next_buf_exit, HCI1394_TNF_HAL_STACK, "");
 565 }
 566 
 567 
 568 /*
 569  * hci1394_q_at()
 570  *    Place an AT command that does NOT need the data buffer into the DMA chain.
 571  *    Some examples of this are quadlet read/write, PHY packets, ATREQ Block
 572  *    Read, and ATRESP block write. result is only valid on failure.
 573  */
 574 int
 575 hci1394_q_at(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
 576     hci1394_basic_pkt_t *hdr, uint_t hdrsize, int *result)
 577 {
 578         int status;
 579         uint32_t ioaddr;
 580 
 581 
 582         ASSERT(q_handle != NULL);
 583         ASSERT(cmd != NULL);
 584         ASSERT(hdr != NULL);
 585         TNF_PROBE_0_DEBUG(hci1394_q_at_enter, HCI1394_TNF_HAL_STACK, "");
 586 
 587         mutex_enter(&q_handle->q_mutex);
 588 
 589         /*
 590          * Check the HAL state and generation when the AT Q is locked.  This
 591          * will make sure that we get all the commands when we flush the Q's
 592          * during a reset or shutdown.
 593          */
 594         if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
 595             (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
 596             cmd->qc_generation)) {
 597                 *result = H1394_STATUS_INVALID_BUSGEN;
 598                 mutex_exit(&q_handle->q_mutex);
 599                 TNF_PROBE_0(hci1394_q_at_st_fail, HCI1394_TNF_HAL_ERROR, "");
 600                 TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
 601                     "");
 602                 return (DDI_FAILURE);
 603         }
 604 
 605         /* save away the argument to pass up when this command completes */
 606         cmd->qc_node.tln_addr = cmd;
 607 
 608         /* we have not written any 16 byte blocks to the descriptor yet */
 609         q_handle->q_block_cnt = 0;
 610 
 611         /* Reserve space for an OLI in the descriptor buffer */
 612         status = hci1394_q_reserve(&q_handle->q_desc,
 613             sizeof (hci1394_desc_imm_t), &ioaddr);
 614         if (status != DDI_SUCCESS) {
 615                 *result = H1394_STATUS_NOMORE_SPACE;
 616                 mutex_exit(&q_handle->q_mutex);
 617                 TNF_PROBE_0(hci1394_q_at_qre_fail, HCI1394_TNF_HAL_ERROR, "");
 618                 TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
 619                     "");
 620                 return (DDI_FAILURE);
 621         }
 622 
 623         /* write the OLI to the descriptor buffer */
 624         hci1394_q_at_write_OLI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
 625 
 626         /* Add the AT command to the queued list */
 627         hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
 628 
 629         mutex_exit(&q_handle->q_mutex);
 630         TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK, "");
 631 
 632         return (DDI_SUCCESS);
 633 }
 634 
 635 
 636 /*
 637  * XXX - NOTE: POSSIBLE FUTURE OPTIMIZATION
 638  *    ATREQ Block read and write's that go through software are not very
 639  *    efficient (one of the reasons to use physical space). A copy is forced
 640  *    on all block reads due to the design of OpenHCI. Writes do not have this
 641  *    same restriction.  This design forces a copy for writes too (we always
 642  *    copy into a data buffer before sending). There are many reasons for this
 643  *    including complexity reduction.  There is a data size threshold where a
 644  *    copy is more expensive than mapping the data buffer address (or worse
 645  *    case a big enough difference where it pays to do it). However, we move
 646  *    block data around in mblks which means that our data may be scattered
 647  *    over many buffers.  This adds to the complexity of mapping and setting
 648  *    up the OpenHCI descriptors.
 649  *
 650  *    If someone really needs a speedup on block write ATREQs, my recommendation
 651  *    would be to add an additional command type at the target interface for a
 652  *    fast block write.  The target driver would pass a mapped io addr to use.
 653  *    A function like "hci1394_q_at_with_ioaddr()" could be created which would
 654  *    be almost an exact copy of hci1394_q_at_with_data() without the
 655  *    hci1394_q_reserve() and hci1394_q_at_rep_put8() for the data buffer.
 656  */
 657 
 658 
 659 /*
 660  * hci1394_q_at_with_data()
 661  *    Place an AT command that does need the data buffer into the DMA chain.
 662  *    The data is passed as a pointer to a kernel virtual address. An example of
 663  *    this is the lock operations. result is only valid on failure.
 664  */
 665 int
 666 hci1394_q_at_with_data(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
 667     hci1394_basic_pkt_t *hdr, uint_t hdrsize, uint8_t *data, uint_t datasize,
 668     int *result)
 669 {
 670         uint32_t desc_ioaddr;
 671         uint32_t data_ioaddr;
 672         int status;
 673 
 674 
 675         ASSERT(q_handle != NULL);
 676         ASSERT(cmd != NULL);
 677         ASSERT(hdr != NULL);
 678         ASSERT(data != NULL);
 679         TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_enter, HCI1394_TNF_HAL_STACK,
 680             "");
 681 
 682         mutex_enter(&q_handle->q_mutex);
 683 
 684         /*
 685          * Check the HAL state and generation when the AT Q is locked.  This
 686          * will make sure that we get all the commands when we flush the Q's
 687          * during a reset or shutdown.
 688          */
 689         if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
 690             (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
 691             cmd->qc_generation)) {
 692                 *result = H1394_STATUS_INVALID_BUSGEN;
 693                 mutex_exit(&q_handle->q_mutex);
 694                 TNF_PROBE_0_DEBUG(hci1394_q_at_wd_st_fail,
 695                     HCI1394_TNF_HAL_STACK, "");
 696                 return (DDI_FAILURE);
 697         }
 698 
 699         /* save away the argument to pass up when this command completes */
 700         cmd->qc_node.tln_addr = cmd;
 701 
 702         /* we have not written any 16 byte blocks to the descriptor yet */
 703         q_handle->q_block_cnt = 0;
 704 
 705         /* Reserve space for an OMI and OL in the descriptor buffer */
 706         status = hci1394_q_reserve(&q_handle->q_desc,
 707             (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
 708             &desc_ioaddr);
 709         if (status != DDI_SUCCESS) {
 710                 *result = H1394_STATUS_NOMORE_SPACE;
 711                 mutex_exit(&q_handle->q_mutex);
 712                 TNF_PROBE_0(hci1394_q_at_wd_qre_fail,
 713                     HCI1394_TNF_HAL_ERROR, "");
 714                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
 715                     HCI1394_TNF_HAL_STACK, "");
 716                 return (DDI_FAILURE);
 717         }
 718 
 719         /* allocate space for data in the data buffer */
 720         status = hci1394_q_reserve(&q_handle->q_data, datasize, &data_ioaddr);
 721         if (status != DDI_SUCCESS) {
 722                 *result = H1394_STATUS_NOMORE_SPACE;
 723                 hci1394_q_unreserve(&q_handle->q_desc);
 724                 mutex_exit(&q_handle->q_mutex);
 725                 TNF_PROBE_0(hci1394_q_at_wd_qra_fail,
 726                     HCI1394_TNF_HAL_ERROR, "");
 727                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
 728                     HCI1394_TNF_HAL_STACK, "");
 729                 return (DDI_FAILURE);
 730         }
 731 
 732         /* Copy data into data buffer */
 733         hci1394_q_at_rep_put8(&q_handle->q_data, cmd, data, datasize);
 734 
 735         /* write the OMI to the descriptor buffer */
 736         hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
 737 
 738         /* write the OL to the descriptor buffer */
 739         hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
 740             datasize);
 741 
 742         /* Add the AT command to the queued list */
 743         hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
 744 
 745         mutex_exit(&q_handle->q_mutex);
 746         TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit, HCI1394_TNF_HAL_STACK,
 747             "");
 748 
 749         return (DDI_SUCCESS);
 750 }
 751 
 752 
 753 /*
 754  * hci1394_q_at_with_mblk()
 755  *    Place an AT command that does need the data buffer into the DMA chain.
 756  *    The data is passed in mblk_t(s). Examples of this are a block write
 757  *    ATREQ and a block read ATRESP. The services layer and the hal use a
 758  *    private structure (h1394_mblk_t) to keep track of how much of the mblk
 759  *    to send since we may have to break the transfer up into smaller blocks.
 760  *    (i.e. a 1MByte block write would go out in 2KByte chunks. result is only
 761  *    valid on failure.
 762  */
 763 int
 764 hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
 765     hci1394_basic_pkt_t *hdr, uint_t hdrsize, h1394_mblk_t *mblk, int *result)
 766 {
 767         uint32_t desc_ioaddr;
 768         uint32_t data_ioaddr;
 769         int status;
 770 
 771 
 772         ASSERT(q_handle != NULL);
 773         ASSERT(cmd != NULL);
 774         ASSERT(hdr != NULL);
 775         ASSERT(mblk != NULL);
 776         TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_enter, HCI1394_TNF_HAL_STACK,
 777             "");
 778 
 779         mutex_enter(&q_handle->q_mutex);
 780 
 781         /*
 782          * Check the HAL state and generation when the AT Q is locked.  This
 783          * will make sure that we get all the commands when we flush the Q's
 784          * during a reset or shutdown.
 785          */
 786         if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
 787             (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
 788             cmd->qc_generation)) {
 789                 *result = H1394_STATUS_INVALID_BUSGEN;
 790                 mutex_exit(&q_handle->q_mutex);
 791                 TNF_PROBE_0_DEBUG(hci1394_q_at_wm_st_fail,
 792                     HCI1394_TNF_HAL_STACK, "");
 793                 return (DDI_FAILURE);
 794         }
 795 
 796         /* save away the argument to pass up when this command completes */
 797         cmd->qc_node.tln_addr = cmd;
 798 
 799         /* we have not written any 16 byte blocks to the descriptor yet */
 800         q_handle->q_block_cnt = 0;
 801 
 802         /* Reserve space for an OMI and OL in the descriptor buffer */
 803         status = hci1394_q_reserve(&q_handle->q_desc,
 804             (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
 805             &desc_ioaddr);
 806         if (status != DDI_SUCCESS) {
 807                 *result = H1394_STATUS_NOMORE_SPACE;
 808                 mutex_exit(&q_handle->q_mutex);
 809                 TNF_PROBE_0(hci1394_q_at_wm_qre_fail,
 810                     HCI1394_TNF_HAL_ERROR, "");
 811                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
 812                     HCI1394_TNF_HAL_STACK, "");
 813                 return (DDI_FAILURE);
 814         }
 815 
 816         /* Reserve space for data in the data buffer */
 817         status = hci1394_q_reserve(&q_handle->q_data, mblk->length,
 818             &data_ioaddr);
 819         if (status != DDI_SUCCESS) {
 820                 *result = H1394_STATUS_NOMORE_SPACE;
 821                 hci1394_q_unreserve(&q_handle->q_desc);
 822                 mutex_exit(&q_handle->q_mutex);
 823                 TNF_PROBE_0(hci1394_q_at_wm_qra_fail,
 824                     HCI1394_TNF_HAL_ERROR, "");
 825                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
 826                     HCI1394_TNF_HAL_STACK, "");
 827                 return (DDI_FAILURE);
 828         }
 829 
 830         /* Copy mblk data into data buffer */
 831         hci1394_q_at_copy_from_mblk(&q_handle->q_data, cmd, mblk);
 832 
 833         /* write the OMI to the descriptor buffer */
 834         hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
 835 
 836         /* write the OL to the descriptor buffer */
 837         hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
 838             mblk->length);
 839 
 840         /* Add the AT command to the queued list */
 841         hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
 842 
 843         mutex_exit(&q_handle->q_mutex);
 844         TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit, HCI1394_TNF_HAL_STACK,
 845             "");
 846 
 847         return (DDI_SUCCESS);
 848 }
 849 
 850 
 851 /*
 852  * hci1394_q_at_next()
 853  *    Return the next completed AT command in cmd.  If flush_q is true, we will
 854  *    return the command regardless if it finished or not.  We will flush
 855  *    during bus reset processing, shutdown, and detach.
 856  */
 857 void
 858 hci1394_q_at_next(hci1394_q_handle_t q_handle, boolean_t flush_q,
 859     hci1394_q_cmd_t **cmd)
 860 {
 861         hci1394_q_buf_t *desc;
 862         hci1394_q_buf_t *data;
 863         hci1394_tlist_node_t *node;
 864         uint32_t cmd_status;
 865 
 866 
 867         ASSERT(q_handle != NULL);
 868         ASSERT(cmd != NULL);
 869         TNF_PROBE_0_DEBUG(hci1394_q_at_next_enter, HCI1394_TNF_HAL_STACK, "");
 870 
 871         mutex_enter(&q_handle->q_mutex);
 872 
 873         desc = &q_handle->q_desc;
 874         data = &q_handle->q_data;
 875 
 876         /* Sync descriptor buffer */
 877         (void) ddi_dma_sync(desc->qb_buf.bi_dma_handle, 0,
 878             desc->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
 879 
 880         /* Look at the top cmd on the queued list (without removing it) */
 881         hci1394_tlist_peek(q_handle->q_queued_list, &node);
 882         if (node == NULL) {
 883                 /* There are no more commands left on the queued list */
 884                 *cmd = NULL;
 885                 mutex_exit(&q_handle->q_mutex);
 886                 TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK,
 887                     "");
 888                 return;
 889         }
 890 
 891         /*
 892          * There is a command on the list, read its status and timestamp when
 893          * it was sent
 894          */
 895         *cmd = (hci1394_q_cmd_t *)node->tln_addr;
 896         cmd_status = ddi_get32(desc->qb_buf.bi_handle, (*cmd)->qc_status_addr);
 897         (*cmd)->qc_timestamp = cmd_status & DESC_ST_TIMESTAMP_MASK;
 898         cmd_status = HCI1394_DESC_EVT_GET(cmd_status);
 899 
 900         /*
 901          * If we are flushing the q (e.g. due to a bus reset), we will return
 902          * the command regardless of its completion status. If we are not
 903          * flushing the Q and we do not have status on the command (e.g. status
 904          * = 0), we are done with this Q for now.
 905          */
 906         if (flush_q == B_FALSE) {
 907                 if (cmd_status == 0) {
 908                         *cmd = NULL;
 909                         mutex_exit(&q_handle->q_mutex);
 910                         TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit,
 911                             HCI1394_TNF_HAL_STACK, "");
 912                         return;
 913                 }
 914         }
 915 
 916         /*
 917          * The command completed, remove it from the queued list. There is not
 918          * a race condition to delete the node in the list here.  This is the
 919          * only place the node will be deleted so we do not need to check the
 920          * return status.
 921          */
 922         (void) hci1394_tlist_delete(q_handle->q_queued_list, node);
 923 
 924         /*
 925          * Free the space used by the command in the descriptor and data
 926          * buffers.
 927          */
 928         desc->qb_ptrs.qp_free_buf = (*cmd)->qc_descriptor_buf;
 929         desc->qb_ptrs.qp_free = (*cmd)->qc_descriptor_end;
 930         if ((*cmd)->qc_data_used == B_TRUE) {
 931                 data->qb_ptrs.qp_free_buf = (*cmd)->qc_data_buf;
 932                 data->qb_ptrs.qp_free = (*cmd)->qc_data_end;
 933         }
 934 
 935         /* return command status */
 936         (*cmd)->qc_status = cmd_status;
 937 
 938         mutex_exit(&q_handle->q_mutex);
 939         TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK, "");
 940 }
 941 
 942 
 943 /*
 944  * hci1394_q_at_write_OMI()
 945  *    Write an OMI descriptor into the AT descriptor buffer passed in as qbuf.
 946  *    Buffer state information is stored in cmd.  Use the hdr and hdr size for
 947  *    the additional information attached to an immediate descriptor.
 948  */
 949 void
 950 hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
 951     hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
 952 {
 953         hci1394_desc_imm_t *desc;
 954         uint32_t data;
 955 
 956 
 957         ASSERT(qbuf != NULL);
 958         ASSERT(cmd != NULL);
 959         ASSERT(hdr != NULL);
 960         ASSERT(MUTEX_HELD(&q_handle->q_mutex));
 961         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_enter, HCI1394_TNF_HAL_STACK,
 962             "");
 963 
 964         /* The only valid "header" sizes for an OMI are 8 bytes or 16 bytes */
 965         ASSERT((hdrsize == 8) || (hdrsize == 16));
 966 
 967         /* Make sure enough room for OMI */
 968         ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
 969 
 970         /* Store the offset of the top of this descriptor block */
 971         qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
 972             qbuf->qb_ptrs.qp_begin);
 973 
 974         /* Setup OpenHCI OMI Header */
 975         desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
 976         data = DESC_AT_OMI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
 977         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
 978         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
 979         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
 980         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
 981 
 982         /*
 983          * Copy in 1394 header. Size is in bytes, convert it to a 32-bit word
 984          * count.
 985          */
 986         ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
 987             hdrsize >> 2, DDI_DEV_AUTOINCR);
 988 
 989         /*
 990          * We wrote 2 16 byte blocks in the descriptor buffer, update the count
 991          * accordingly.  Update the reserved size and current pointer.
 992          */
 993         q_handle->q_block_cnt += 2;
 994         qbuf->qb_ptrs.qp_resv_size -= sizeof (hci1394_desc_imm_t);
 995         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
 996 
 997         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_exit, HCI1394_TNF_HAL_STACK,
 998             "");
 999 }
1000 
1001 
1002 /*
1003  * hci1394_q_at_write_OLI()
1004  *    Write an OLI descriptor into the AT descriptor buffer passed in as qbuf.
1005  *    Buffer state information is stored in cmd.  Use the hdr and hdr size for
1006  *    the additional information attached to an immediate descriptor.
1007  */
1008 void
1009 hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1010     hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
1011 {
1012         hci1394_desc_imm_t *desc;
1013         uint32_t data;
1014         uint32_t command_ptr;
1015         uint32_t tcode;
1016 
1017 
1018         ASSERT(qbuf != NULL);
1019         ASSERT(cmd != NULL);
1020         ASSERT(hdr != NULL);
1021         ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1022         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_enter, HCI1394_TNF_HAL_STACK,
1023             "");
1024 
1025         /* The only valid "header" sizes for an OLI are 8, 12, 16 bytes */
1026         ASSERT((hdrsize == 8) || (hdrsize == 12) || (hdrsize == 16));
1027 
1028         /* make sure enough room for 1 OLI */
1029         ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
1030 
1031         /* Store the offset of the top of this descriptor block */
1032         qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1033             qbuf->qb_ptrs.qp_begin);
1034 
1035         /* Setup OpenHCI OLI Header */
1036         desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
1037         data = DESC_AT_OLI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
1038         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1039         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
1040         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1041         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
1042 
1043         /* Setup 1394 Header */
1044         tcode = (hdr->q1 & DESC_PKT_TCODE_MASK) >> DESC_PKT_TCODE_SHIFT;
1045         if ((tcode == IEEE1394_TCODE_WRITE_QUADLET) ||
1046             (tcode == IEEE1394_TCODE_READ_QUADLET_RESP)) {
1047                 /*
1048                  * if the tcode = a quadlet write, move the last quadlet as
1049                  * 8-bit data.  All data is treated as 8-bit data (even quadlet
1050                  * reads and writes). Therefore, target drivers MUST take that
1051                  * into consideration when accessing device registers.
1052                  */
1053                 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1, 3,
1054                     DDI_DEV_AUTOINCR);
1055                 ddi_rep_put8(qbuf->qb_buf.bi_handle, (uint8_t *)&hdr->q4,
1056                     (uint8_t *)&desc->q4, 4, DDI_DEV_AUTOINCR);
1057         } else {
1058                 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
1059                     hdrsize >> 2, DDI_DEV_AUTOINCR);
1060         }
1061 
1062         /*
1063          * We wrote 2 16 byte blocks in the descriptor buffer, update the count
1064          * accordingly.
1065          */
1066         q_handle->q_block_cnt += 2;
1067 
1068         /*
1069          * Sync buffer in case DMA engine currently running. This must be done
1070          * before writing the command pointer in the previous descriptor.
1071          */
1072         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1073             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1074 
1075         /* save away the status address for quick access in at_next() */
1076         cmd->qc_status_addr = &desc->status;
1077 
1078         /*
1079          * Setup the command pointer.  This tells the HW where to get the
1080          * descriptor we just setup.  This includes the IO address along with
1081          * a 4 bit 16 byte block count
1082          */
1083         command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1084             ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1085             DESC_Z_MASK));
1086 
1087         /*
1088          * if we previously setup a descriptor, add this new descriptor into
1089          * the previous descriptor's "next" pointer.
1090          */
1091         if (q_handle->q_previous != NULL) {
1092                 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1093                     command_ptr);
1094                 /* Sync buffer again, this gets the command pointer */
1095                 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1096                     qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1097         }
1098 
1099         /*
1100          * this is now the previous descriptor.  Update the current pointer,
1101          * clear the block count and reserved size since this is the end of
1102          * this command.
1103          */
1104         q_handle->q_previous = (hci1394_desc_t *)desc;
1105         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
1106         q_handle->q_block_cnt = 0;
1107         qbuf->qb_ptrs.qp_resv_size = 0;
1108 
1109         /* save away cleanup info when we are done with the command */
1110         cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1111         cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1112 
1113         /* If the DMA is not running, start it */
1114         if (q_handle->q_dma_running == B_FALSE) {
1115                 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1116                     command_ptr);
1117                 q_handle->q_dma_running = B_TRUE;
1118         /* the DMA is running, wake it up */
1119         } else {
1120                 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1121         }
1122 
1123         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_exit, HCI1394_TNF_HAL_STACK,
1124             "");
1125 }
1126 
1127 
1128 /*
1129  * hci1394_q_at_write_OL()
1130  *    Write an OL descriptor into the AT descriptor buffer passed in as qbuf.
1131  *    Buffer state information is stored in cmd.  The IO address of the data
1132  *    buffer is passed in io_addr.  Size is the size of the data to be
1133  *    transferred.
1134  */
1135 void
1136 hci1394_q_at_write_OL(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1137     hci1394_q_cmd_t *cmd, uint32_t io_addr, uint_t size)
1138 {
1139         hci1394_desc_t *desc;
1140         uint32_t data;
1141         uint32_t command_ptr;
1142 
1143 
1144         ASSERT(q_handle != NULL);
1145         ASSERT(qbuf != NULL);
1146         ASSERT(cmd != NULL);
1147         ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1148         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_enter, HCI1394_TNF_HAL_STACK,
1149             "");
1150 
1151         /* make sure enough room for OL */
1152         ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_t));
1153 
1154         /* Setup OpenHCI OL Header */
1155         desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1156         data = DESC_AT_OL | (size & DESC_HDR_REQCOUNT_MASK);
1157         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1158         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1159         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1160         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, 0);
1161 
1162         /*
1163          * We wrote 1 16 byte block in the descriptor buffer, update the count
1164          * accordingly.
1165          */
1166         q_handle->q_block_cnt++;
1167 
1168         /*
1169          * Sync buffer in case DMA engine currently running. This must be done
1170          * before writing the command pointer in the previous descriptor.
1171          */
1172         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1173             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1174 
1175         /* save away the status address for quick access in at_next() */
1176         cmd->qc_status_addr = &desc->status;
1177 
1178         /*
1179          * Setup the command pointer.  This tells the HW where to get the
1180          * descriptor we just setup.  This includes the IO address along with
1181          * a 4 bit 16 byte block count
1182          */
1183         command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1184             ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1185             DESC_Z_MASK));
1186 
1187         /*
1188          * if we previously setup a descriptor, add this new descriptor into
1189          * the previous descriptor's "next" pointer.
1190          */
1191         if (q_handle->q_previous != NULL) {
1192                 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1193                     command_ptr);
1194                 /* Sync buffer again, this gets the command pointer */
1195                 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1196                     qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1197         }
1198 
1199         /*
1200          * this is now the previous descriptor.  Update the current pointer,
1201          * clear the block count and reserved size since this is the end of
1202          * this command.
1203          */
1204         q_handle->q_previous = desc;
1205         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1206         q_handle->q_block_cnt = 0;
1207         qbuf->qb_ptrs.qp_resv_size = 0;
1208 
1209         /* save away cleanup info when we are done with the command */
1210         cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1211         cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1212 
1213         /* If the DMA is not running, start it */
1214         if (q_handle->q_dma_running == B_FALSE) {
1215                 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1216                     command_ptr);
1217                 q_handle->q_dma_running = B_TRUE;
1218         /* the DMA is running, wake it up */
1219         } else {
1220                 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1221         }
1222 
1223         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_exit, HCI1394_TNF_HAL_STACK,
1224             "");
1225 }
1226 
1227 
1228 /*
1229  * hci1394_q_at_rep_put8()
1230  *    Copy a byte stream from a kernel virtual address (data) to a IO mapped
1231  *    data buffer (qbuf).  Copy datasize bytes.  State information for the
1232  *    data buffer is kept in cmd.
1233  */
1234 void
1235 hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1236     uint8_t *data, uint_t datasize)
1237 {
1238         ASSERT(qbuf != NULL);
1239         ASSERT(cmd != NULL);
1240         ASSERT(data != NULL);
1241         TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_enter, HCI1394_TNF_HAL_STACK,
1242             "");
1243 
1244         /* Make sure enough room for data */
1245         ASSERT(qbuf->qb_ptrs.qp_resv_size >= datasize);
1246 
1247         /* Copy in data into the data buffer */
1248         ddi_rep_put8(qbuf->qb_buf.bi_handle, data,
1249             (uint8_t *)qbuf->qb_ptrs.qp_current, datasize, DDI_DEV_AUTOINCR);
1250 
1251         /* Update the current pointer, offset, and reserved size */
1252         qbuf->qb_ptrs.qp_current += datasize;
1253         qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1254             qbuf->qb_ptrs.qp_begin);
1255         qbuf->qb_ptrs.qp_resv_size -= datasize;
1256 
1257         /* save away cleanup info when we are done with the command */
1258         cmd->qc_data_used = B_TRUE;
1259         cmd->qc_data_buf = qbuf->qb_ptrs.qp_current_buf;
1260         cmd->qc_data_end = qbuf->qb_ptrs.qp_current - 1;
1261 
1262         /* Sync data buffer */
1263         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1264             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1265 
1266         TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_exit, HCI1394_TNF_HAL_STACK,
1267             "");
1268 }
1269 
1270 
1271 /*
1272  * hci1394_q_at_copy_from_mblk()
1273  *    Copy a byte stream from a mblk(s) to a IO mapped data buffer (qbuf).
1274  *    Copy mblk->length bytes. The services layer and the hal use a private
1275  *    structure (h1394_mblk_t) to keep track of how much of the mblk to send
1276  *    since we may have to break the transfer up into smaller blocks. (i.e. a
1277  *    1MByte block write would go out in 2KByte chunks. State information for
1278  *    the data buffer is kept in cmd.
1279  */
1280 static void
1281 hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1282     h1394_mblk_t *mblk)
1283 {
1284         uint_t bytes_left;
1285         uint_t length;
1286 
1287 
1288         ASSERT(qbuf != NULL);
1289         ASSERT(cmd != NULL);
1290         ASSERT(mblk != NULL);
1291         TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_enter,
1292             HCI1394_TNF_HAL_STACK, "");
1293 
1294         /* We return these variables to the Services Layer when we are done */
1295         mblk->next_offset = mblk->curr_offset;
1296         mblk->next_mblk = mblk->curr_mblk;
1297         bytes_left = mblk->length;
1298 
1299         /* do while there are bytes left to copy */
1300         do {
1301                 /*
1302                  * If the entire data portion of the current block transfer is
1303                  * contained within a single mblk.
1304                  */
1305                 if ((mblk->next_offset + bytes_left) <=
1306                     (mblk->next_mblk->b_wptr)) {
1307                         /* Copy the data into the data Q */
1308                         hci1394_q_at_rep_put8(qbuf, cmd,
1309                             (uint8_t *)mblk->next_offset, bytes_left);
1310 
1311                         /* increment the mblk offset */
1312                         mblk->next_offset += bytes_left;
1313 
1314                         /* we have no more bytes to put into the buffer */
1315                         bytes_left = 0;
1316 
1317                         /*
1318                          * If our offset is at the end of data in this mblk, go
1319                          * to the next mblk.
1320                          */
1321                         if (mblk->next_offset >= mblk->next_mblk->b_wptr) {
1322                                 mblk->next_mblk = mblk->next_mblk->b_cont;
1323                                 if (mblk->next_mblk != NULL) {
1324                                         mblk->next_offset =
1325                                             mblk->next_mblk->b_rptr;
1326                                 }
1327                         }
1328 
1329                 /*
1330                  * The data portion of the current block transfer is spread
1331                  * across two or more mblk's
1332                  */
1333                 } else {
1334                         /*
1335                          * Figure out how much data is in this mblk.
1336                          */
1337                         length = mblk->next_mblk->b_wptr - mblk->next_offset;
1338 
1339                         /* Copy the data into the atreq data Q */
1340                         hci1394_q_at_rep_put8(qbuf, cmd,
1341                             (uint8_t *)mblk->next_offset, length);
1342 
1343                         /* update the bytes left count, go to the next mblk */
1344                         bytes_left = bytes_left - length;
1345                         mblk->next_mblk = mblk->next_mblk->b_cont;
1346                         ASSERT(mblk->next_mblk != NULL);
1347                         mblk->next_offset = mblk->next_mblk->b_rptr;
1348                 }
1349         } while (bytes_left > 0);
1350 
1351         TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_exit,
1352             HCI1394_TNF_HAL_STACK, "");
1353 }
1354 
1355 
1356 /*
1357  * hci1394_q_ar_next()
1358  *    Return an address to the next received AR packet.  If there are no more
1359  *    AR packets in the buffer, q_addr will be set to NULL.
1360  */
1361 void
1362 hci1394_q_ar_next(hci1394_q_handle_t q_handle, uint32_t **q_addr)
1363 {
1364         hci1394_desc_t *desc;
1365         hci1394_q_buf_t *descb;
1366         hci1394_q_buf_t *datab;
1367         uint32_t residual_count;
1368 
1369 
1370         ASSERT(q_handle != NULL);
1371         ASSERT(q_addr != NULL);
1372         TNF_PROBE_0_DEBUG(hci1394_q_ar_next_enter, HCI1394_TNF_HAL_STACK, "");
1373 
1374         descb = &q_handle->q_desc;
1375         datab = &q_handle->q_data;
1376 
1377         /* Sync Descriptor buffer */
1378         (void) ddi_dma_sync(descb->qb_buf.bi_dma_handle, 0,
1379             descb->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1380 
1381         /*
1382          * Check residual in current IM count vs q_space_left to see if we have
1383          * received any more responses
1384          */
1385         desc = (hci1394_desc_t *)q_handle->q_head;
1386         residual_count = ddi_get32(descb->qb_buf.bi_handle, &desc->status);
1387         residual_count &= DESC_ST_RESCOUNT_MASK;
1388         if (residual_count >= q_handle->q_space_left) {
1389                 /* No new packets received */
1390                 *q_addr = NULL;
1391                 TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit,
1392                     HCI1394_TNF_HAL_STACK, "");
1393                 return;
1394         }
1395 
1396         /* Sync Data Q */
1397         (void) ddi_dma_sync(datab->qb_buf.bi_dma_handle, 0,
1398             datab->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1399 
1400         /*
1401          * We have a new packet, return the address of the start of the
1402          * packet.
1403          */
1404         *q_addr = (uint32_t *)datab->qb_ptrs.qp_current;
1405 
1406         TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit, HCI1394_TNF_HAL_STACK, "");
1407 }
1408 
1409 
1410 /*
1411  * hci1394_q_ar_free()
1412  *    Free the space used by the AR packet at the top of the data buffer. AR
1413  *    packets are processed in the order that they are received.  This will
1414  *    free the oldest received packet which has not yet been freed.  size is
1415  *    how much space the packet takes up.
1416  */
1417 void
1418 hci1394_q_ar_free(hci1394_q_handle_t q_handle, uint_t size)
1419 {
1420         hci1394_q_buf_t *descb;
1421         hci1394_q_buf_t *datab;
1422 
1423 
1424         ASSERT(q_handle != NULL);
1425         TNF_PROBE_0_DEBUG(hci1394_q_ar_free_enter, HCI1394_TNF_HAL_STACK, "");
1426 
1427         descb = &q_handle->q_desc;
1428         datab = &q_handle->q_data;
1429 
1430         /*
1431          * Packet is in multiple buffers. Theoretically a buffer could be broken
1432          * in more than two buffers for an ARRESP.  Since the buffers should be
1433          * in at least 4K increments this will not happen since the max packet
1434          * size is 2KBytes.
1435          */
1436         if ((datab->qb_ptrs.qp_current + size) > datab->qb_ptrs.qp_end) {
1437                 /* Add IM descriptor for used buffer back into Q */
1438                 hci1394_q_ar_write_IM(q_handle, descb,
1439                     datab->qb_cookie[datab->qb_ptrs.qp_current_buf
1440                     ].dmac_address,
1441                     datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size);
1442 
1443                 /* Go to the next buffer */
1444                 hci1394_q_next_buf(datab);
1445 
1446                 /* Update next buffers pointers for partial packet */
1447                 size -= q_handle->q_space_left;
1448                 datab->qb_ptrs.qp_current += size;
1449                 q_handle->q_space_left =
1450                     datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size -
1451                     size;
1452 
1453                 /* Change the head pointer to the next IM descriptor */
1454                 q_handle->q_head += sizeof (hci1394_desc_t);
1455                 if ((q_handle->q_head + sizeof (hci1394_desc_t)) >
1456                     (descb->qb_ptrs.qp_bottom + 1)) {
1457                         q_handle->q_head = descb->qb_ptrs.qp_top;
1458                 }
1459 
1460         /* Packet is only in one buffer */
1461         } else {
1462                 q_handle->q_space_left -= size;
1463                 datab->qb_ptrs.qp_current += size;
1464         }
1465 
1466         TNF_PROBE_0_DEBUG(hci1394_q_ar_free_exit, HCI1394_TNF_HAL_STACK, "");
1467 }
1468 
1469 
1470 /*
1471  * hci1394_q_ar_get32()
1472  *    Read a quadlet of data regardless if it is in the current buffer or has
1473  *    wrapped to the top buffer.  If the address passed to this routine is
1474  *    passed the bottom of the data buffer, this routine will automatically
1475  *    wrap back to the top of the Q and look in the correct offset from the
1476  *    top. Copy the data into the kernel virtual address provided.
1477  */
1478 uint32_t
1479 hci1394_q_ar_get32(hci1394_q_handle_t q_handle, uint32_t *addr)
1480 {
1481         hci1394_q_buf_t *data;
1482         uintptr_t new_addr;
1483         uint32_t data32;
1484 
1485 
1486         ASSERT(q_handle != NULL);
1487         ASSERT(addr != NULL);
1488         TNF_PROBE_0_DEBUG(hci1394_q_get32_enter, HCI1394_TNF_HAL_STACK, "");
1489 
1490         data = &q_handle->q_data;
1491 
1492         /*
1493          * if the data has wrapped to the top of the buffer, adjust the address.
1494          */
1495         if ((uintptr_t)addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1496                 new_addr = (uintptr_t)data->qb_ptrs.qp_top + ((uintptr_t)addr -
1497                     ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1));
1498                 data32 = ddi_get32(data->qb_buf.bi_handle,
1499                     (uint32_t *)new_addr);
1500 
1501         /* data is before end of buffer */
1502         } else {
1503                 data32 = ddi_get32(data->qb_buf.bi_handle, addr);
1504         }
1505 
1506         TNF_PROBE_0_DEBUG(hci1394_q_get32_exit, HCI1394_TNF_HAL_STACK, "");
1507 
1508         return (data32);
1509 }
1510 
1511 
1512 /*
1513  * hci1394_q_ar_rep_get8()
1514  *    Read a byte stream of data regardless if it is contiguous or has partially
1515  *    or fully wrapped to the top buffer.  If the address passed to this routine
1516  *    is passed the bottom of the data buffer, or address + size is past the
1517  *    bottom of the data buffer. this routine will automatically wrap back to
1518  *    the top of the Q and look in the correct offset from the top. Copy the
1519  *    data into the kernel virtual address provided.
1520  */
1521 void
1522 hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle, uint8_t *dest,
1523     uint8_t *q_addr, uint_t size)
1524 {
1525         hci1394_q_buf_t *data;
1526         uintptr_t new_addr;
1527         uint_t new_size;
1528         uintptr_t new_dest;
1529 
1530 
1531         ASSERT(q_handle != NULL);
1532         ASSERT(dest != NULL);
1533         ASSERT(q_addr != NULL);
1534         TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_enter, HCI1394_TNF_HAL_STACK,
1535             "");
1536 
1537         data = &q_handle->q_data;
1538 
1539         /*
1540          * There are three cases:
1541          *   1) All of the data has wrapped.
1542          *   2) Some of the data has not wrapped and some has wrapped.
1543          *   3) None of the data has wrapped.
1544          */
1545 
1546         /* All of the data has wrapped, just adjust the starting address */
1547         if ((uintptr_t)q_addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1548                 new_addr = (uintptr_t)data->qb_ptrs.qp_top +
1549                     ((uintptr_t)q_addr - ((uintptr_t)data->qb_ptrs.qp_bottom +
1550                     (uintptr_t)1));
1551                 ddi_rep_get8(data->qb_buf.bi_handle, dest, (uint8_t *)new_addr,
1552                     size, DDI_DEV_AUTOINCR);
1553 
1554         /*
1555          * Some of the data has wrapped. Copy the data that hasn't wrapped,
1556          * adjust the address, then copy the rest.
1557          */
1558         } else if (((uintptr_t)q_addr + (uintptr_t)size) >
1559             ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1)) {
1560                 /* Copy first half */
1561                 new_size = (uint_t)(((uintptr_t)data->qb_ptrs.qp_bottom +
1562                     (uintptr_t)1) - (uintptr_t)q_addr);
1563                 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, new_size,
1564                     DDI_DEV_AUTOINCR);
1565 
1566                 /* copy second half */
1567                 new_dest = (uintptr_t)dest + (uintptr_t)new_size;
1568                 new_size = size - new_size;
1569                 new_addr = (uintptr_t)data->qb_ptrs.qp_top;
1570                 ddi_rep_get8(data->qb_buf.bi_handle, (uint8_t *)new_dest,
1571                     (uint8_t *)new_addr, new_size, DDI_DEV_AUTOINCR);
1572 
1573         /* None of the data has wrapped */
1574         } else {
1575                 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, size,
1576                     DDI_DEV_AUTOINCR);
1577         }
1578 
1579         TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_exit, HCI1394_TNF_HAL_STACK,
1580             "");
1581 }
1582 
1583 
1584 /*
1585  * hci1394_q_ar_copy_to_mblk()
1586  *    Read a byte stream of data regardless if it is contiguous or has partially
1587  *    or fully wrapped to the top buffer.  If the address passed to this routine
1588  *    is passed the bottom of the data buffer, or address + size is passed the
1589  *    bottom of the data buffer. this routine will automatically wrap back to
1590  *    the top of the Q and look in the correct offset from the top. Copy the
1591  *    data into the mblk provided. The services layer and the hal use a private
1592  *    structure (h1394_mblk_t) to keep track of how much of the mblk to receive
1593  *    into since we may have to break the transfer up into smaller blocks.
1594  *    (i.e. a 1MByte block read would go out in 2KByte requests.
1595  */
1596 void
1597 hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle, uint8_t *addr,
1598     h1394_mblk_t *mblk)
1599 {
1600         uint8_t *new_addr;
1601         uint_t bytes_left;
1602         uint_t length;
1603 
1604 
1605         ASSERT(q_handle != NULL);
1606         ASSERT(addr != NULL);
1607         ASSERT(mblk != NULL);
1608         TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_enter,
1609             HCI1394_TNF_HAL_STACK, "");
1610 
1611         /* We return these variables to the Services Layer when we are done */
1612         mblk->next_offset = mblk->curr_offset;
1613         mblk->next_mblk = mblk->curr_mblk;
1614         bytes_left = mblk->length;
1615 
1616         /* the address we copy from will change as we change mblks */
1617         new_addr = addr;
1618 
1619         /* do while there are bytes left to copy */
1620         do {
1621                 /*
1622                  * If the entire data portion of the current block transfer is
1623                  * contained within a single mblk.
1624                  */
1625                 if ((mblk->next_offset + bytes_left) <=
1626                     (mblk->next_mblk->b_datap->db_lim)) {
1627                         /* Copy the data into the mblk */
1628                         hci1394_q_ar_rep_get8(q_handle,
1629                             (uint8_t *)mblk->next_offset, new_addr, bytes_left);
1630 
1631                         /* increment the offset */
1632                         mblk->next_offset += bytes_left;
1633                         mblk->next_mblk->b_wptr = mblk->next_offset;
1634 
1635                         /* we have no more bytes to put into the buffer */
1636                         bytes_left = 0;
1637 
1638                         /*
1639                          * If our offset is at the end of data in this mblk, go
1640                          * to the next mblk.
1641                          */
1642                         if (mblk->next_offset >=
1643                             mblk->next_mblk->b_datap->db_lim) {
1644                                 mblk->next_mblk = mblk->next_mblk->b_cont;
1645                                 if (mblk->next_mblk != NULL) {
1646                                         mblk->next_offset =
1647                                             mblk->next_mblk->b_wptr;
1648                                 }
1649                         }
1650 
1651                 /*
1652                  * The data portion of the current block transfer is spread
1653                  * across two or more mblk's
1654                  */
1655                 } else {
1656                         /* Figure out how much data is in this mblk */
1657                         length = mblk->next_mblk->b_datap->db_lim -
1658                             mblk->next_offset;
1659 
1660                         /* Copy the data into the mblk */
1661                         hci1394_q_ar_rep_get8(q_handle,
1662                             (uint8_t *)mblk->next_offset, new_addr, length);
1663                         mblk->next_mblk->b_wptr =
1664                             mblk->next_mblk->b_datap->db_lim;
1665 
1666                         /*
1667                          * update the bytes left and address to copy from, go
1668                          * to the next mblk.
1669                          */
1670                         bytes_left = bytes_left - length;
1671                         new_addr = (uint8_t *)((uintptr_t)new_addr +
1672                             (uintptr_t)length);
1673                         mblk->next_mblk = mblk->next_mblk->b_cont;
1674                         ASSERT(mblk->next_mblk != NULL);
1675                         mblk->next_offset = mblk->next_mblk->b_wptr;
1676                 }
1677         } while (bytes_left > 0);
1678 
1679         TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_exit,
1680             HCI1394_TNF_HAL_STACK, "");
1681 }
1682 
1683 
1684 /*
1685  * hci1394_q_ar_write_IM()
1686  *    Write an IM descriptor into the AR descriptor buffer passed in as qbuf.
1687  *    The IO address of the data buffer is passed in io_addr.  datasize is the
1688  *    size of the data data buffer to receive into.
1689  */
1690 void
1691 hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1692     uint32_t io_addr, uint_t datasize)
1693 {
1694         hci1394_desc_t *desc;
1695         uint32_t data;
1696         uint32_t command_ptr;
1697 
1698 
1699         ASSERT(q_handle != NULL);
1700         ASSERT(qbuf != NULL);
1701         TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_enter, HCI1394_TNF_HAL_STACK,
1702             "");
1703 
1704         /* Make sure enough room for IM */
1705         if ((qbuf->qb_ptrs.qp_current + sizeof (hci1394_desc_t)) >
1706             (qbuf->qb_ptrs.qp_bottom + 1)) {
1707                 hci1394_q_next_buf(qbuf);
1708         } else {
1709                 /* Store the offset of the top of this descriptor block */
1710                 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1711                     qbuf->qb_ptrs.qp_begin);
1712         }
1713 
1714         /* Setup OpenHCI IM Header */
1715         desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1716         data = DESC_AR_IM | (datasize & DESC_HDR_REQCOUNT_MASK);
1717         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1718         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1719         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1720         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, datasize &
1721             DESC_ST_RESCOUNT_MASK);
1722 
1723         /*
1724          * Sync buffer in case DMA engine currently running. This must be done
1725          * before writing the command pointer in the previous descriptor.
1726          */
1727         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1728             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1729 
1730         /*
1731          * Setup the command pointer.  This tells the HW where to get the
1732          * descriptor we just setup.  This includes the IO address along with
1733          * a 4 bit 16 byte block count.  We only wrote 1 16 byte block.
1734          */
1735         command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1736             ].dmac_address + qbuf->qb_ptrs.qp_offset) | 1);
1737 
1738         /*
1739          * if we previously setup a descriptor, add this new descriptor into
1740          * the previous descriptor's "next" pointer.
1741          */
1742         if (q_handle->q_previous != NULL) {
1743                 ddi_put32(qbuf->qb_buf.bi_handle,
1744                     &q_handle->q_previous->branch, command_ptr);
1745                 /* Sync buffer again, this gets the command pointer */
1746                 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1747                     qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1748         }
1749 
1750         /* this is the new previous descriptor.  Update the current pointer */
1751         q_handle->q_previous = desc;
1752         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1753 
1754         /* If the DMA is not running, start it */
1755         if (q_handle->q_dma_running == B_FALSE) {
1756                 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1757                     command_ptr);
1758                 q_handle->q_dma_running = B_TRUE;
1759         /* the DMA is running, wake it up */
1760         } else {
1761                 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1762         }
1763 
1764         TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_exit, HCI1394_TNF_HAL_STACK,
1765             "");
1766 }