1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License, Version 1.0 only
   6  * (the "License").  You may not use this file except in compliance
   7  * with the License.
   8  *
   9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
  10  * or http://www.opensolaris.org/os/licensing.
  11  * See the License for the specific language governing permissions
  12  * and limitations under the License.
  13  *
  14  * When distributing Covered Code, include this CDDL HEADER in each
  15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  16  * If applicable, add the following below this CDDL HEADER, with the
  17  * fields enclosed by brackets "[]" replaced with your own identifying
  18  * information: Portions Copyright [yyyy] [name of copyright owner]
  19  *
  20  * CDDL HEADER END
  21  */
  22 /*
  23  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 /*
  28  * hci1394_q.c
  29  *    This code decouples some of the OpenHCI async descriptor logic/structures
  30  *    from the async processing.  The goal was to combine as much of the
  31  *    duplicate code as possible for the different type of async transfers
  32  *    without going too overboard.
  33  *
  34  *    There are two parts to the Q, the descriptor buffer and the data buffer.
  35  *    For the most part, data to be transmitted and data which is received go
  36  *    in the data buffers.  The information of where to get the data and put
  37  *    the data reside in the descriptor buffers. There are exceptions to this.
  38  */
  39 
  40 
  41 #include <sys/types.h>
  42 #include <sys/conf.h>
  43 #include <sys/ddi.h>
  44 #include <sys/modctl.h>
  45 #include <sys/stat.h>
  46 #include <sys/sunddi.h>
  47 #include <sys/cmn_err.h>
  48 #include <sys/kmem.h>
  49 #include <sys/note.h>
  50 
  51 #include <sys/1394/adapters/hci1394.h>
  52 
  53 
  54 static int hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size,
  55     uint32_t *io_addr);
  56 static void hci1394_q_unreserve(hci1394_q_buf_t *qbuf);
  57 static void hci1394_q_buf_setup(hci1394_q_buf_t *qbuf);
  58 static void hci1394_q_reset(hci1394_q_handle_t q_handle);
  59 static void hci1394_q_next_buf(hci1394_q_buf_t *qbuf);
  60 
  61 static void hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle,
  62     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
  63     uint_t hdrsize);
  64 static void hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle,
  65     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr,
  66     uint_t hdrsize);
  67 static void hci1394_q_at_write_OL(hci1394_q_handle_t q_handle,
  68     hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd, uint32_t io_addr,
  69     uint_t datasize);
  70 static void hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
  71     uint8_t *data, uint_t datasize);
  72 static void hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf,
  73     hci1394_q_cmd_t *cmd, h1394_mblk_t *mblk);
  74 
  75 static void hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle,
  76     hci1394_q_buf_t *qbuf, uint32_t io_addr, uint_t datasize);
  77 
  78 /*
  79  * hci1394_q_init()
  80  *    Initialize a Q.  A Q consists of a descriptor buffer and a data buffer and
  81  *    can be either an AT or AR Q. hci1394_q_init() returns a handle which
  82  *    should be used for the reset of the hci1394_q_* calls.
  83  */
  84 int
  85 hci1394_q_init(hci1394_drvinfo_t *drvinfo,
  86     hci1394_ohci_handle_t ohci_handle, hci1394_q_info_t *qinfo,
  87     hci1394_q_handle_t *q_handle)
  88 {
  89         hci1394_q_buf_t *desc;
  90         hci1394_q_buf_t *data;
  91         hci1394_buf_parms_t parms;
  92         hci1394_q_t *q;
  93         int status;
  94         int index;
  95 
  96 
  97         ASSERT(drvinfo != NULL);
  98         ASSERT(qinfo != NULL);
  99         ASSERT(q_handle != NULL);
 100         TNF_PROBE_0_DEBUG(hci1394_q_init_enter, HCI1394_TNF_HAL_STACK, "");
 101 
 102         /*
 103          * allocate the memory to track this Q.  Initialize the internal Q
 104          * structure.
 105          */
 106         q = kmem_alloc(sizeof (hci1394_q_t), KM_SLEEP);
 107         q->q_drvinfo = drvinfo;
 108         q->q_info = *qinfo;
 109         q->q_ohci = ohci_handle;
 110         mutex_init(&q->q_mutex, NULL, MUTEX_DRIVER, drvinfo->di_iblock_cookie);
 111         desc = &q->q_desc;
 112         data = &q->q_data;
 113 
 114         /*
 115          * Allocate the Descriptor buffer.
 116          *
 117          * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
 118          * after we have tested the multiple cookie code on x86.
 119          */
 120         parms.bp_length = qinfo->qi_desc_size;
 121         parms.bp_max_cookies = 1;
 122         parms.bp_alignment = 16;
 123         status = hci1394_buf_alloc(drvinfo, &parms, &desc->qb_buf,
 124             &desc->qb_buf_handle);
 125         if (status != DDI_SUCCESS) {
 126                 mutex_destroy(&q->q_mutex);
 127                 kmem_free(q, sizeof (hci1394_q_t));
 128                 *q_handle = NULL;
 129                 TNF_PROBE_0(hci1394_q_init_bae_fail, HCI1394_TNF_HAL_ERROR, "");
 130                 TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
 131                     "");
 132                 return (DDI_FAILURE);
 133         }
 134 
 135         /* Copy in buffer cookies into our local cookie array */
 136         desc->qb_cookie[0] = desc->qb_buf.bi_cookie;
 137         for (index = 1; index < desc->qb_buf.bi_cookie_count; index++) {
 138                 ddi_dma_nextcookie(desc->qb_buf.bi_dma_handle,
 139                     &desc->qb_buf.bi_cookie);
 140                 desc->qb_cookie[index] = desc->qb_buf.bi_cookie;
 141         }
 142 
 143         /*
 144          * Allocate the Data buffer.
 145          *
 146          * XXX - Only want 1 cookie for now. Change this to OHCI_MAX_COOKIE
 147          * after we have tested the multiple cookie code on x86.
 148          */
 149         parms.bp_length = qinfo->qi_data_size;
 150         parms.bp_max_cookies = 1;
 151         parms.bp_alignment = 16;
 152         status = hci1394_buf_alloc(drvinfo, &parms, &data->qb_buf,
 153             &data->qb_buf_handle);
 154         if (status != DDI_SUCCESS) {
 155                 /* Free the allocated Descriptor buffer */
 156                 hci1394_buf_free(&desc->qb_buf_handle);
 157 
 158                 mutex_destroy(&q->q_mutex);
 159                 kmem_free(q, sizeof (hci1394_q_t));
 160                 *q_handle = NULL;
 161                 TNF_PROBE_0(hci1394_q_init_baa_fail, HCI1394_TNF_HAL_ERROR, "");
 162                 TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK,
 163                     "");
 164                 return (DDI_FAILURE);
 165         }
 166 
 167         /*
 168          * We must have at least 2 ARQ data buffers, If we only have one, we
 169          * will artificially create 2. We must have 2 so that we always have a
 170          * descriptor with free data space to write AR data to. When one is
 171          * empty, it will take us a bit to get a new descriptor back into the
 172          * chain.
 173          */
 174         if ((qinfo->qi_mode == HCI1394_ARQ) &&
 175             (data->qb_buf.bi_cookie_count == 1)) {
 176                 data->qb_buf.bi_cookie_count = 2;
 177                 data->qb_cookie[0] = data->qb_buf.bi_cookie;
 178                 data->qb_cookie[0].dmac_size /= 2;
 179                 data->qb_cookie[1] = data->qb_cookie[0];
 180                 data->qb_cookie[1].dmac_laddress =
 181                     data->qb_cookie[0].dmac_laddress +
 182                     data->qb_cookie[0].dmac_size;
 183                 data->qb_cookie[1].dmac_address =
 184                     data->qb_cookie[0].dmac_address +
 185                     data->qb_cookie[0].dmac_size;
 186 
 187         /* We have more than 1 cookie or we are an AT Q */
 188         } else {
 189                 /* Copy in buffer cookies into our local cookie array */
 190                 data->qb_cookie[0] = data->qb_buf.bi_cookie;
 191                 for (index = 1; index < data->qb_buf.bi_cookie_count; index++) {
 192                         ddi_dma_nextcookie(data->qb_buf.bi_dma_handle,
 193                             &data->qb_buf.bi_cookie);
 194                         data->qb_cookie[index] = data->qb_buf.bi_cookie;
 195                 }
 196         }
 197 
 198         /* The top and bottom of the Q are only set once */
 199         desc->qb_ptrs.qp_top = desc->qb_buf.bi_kaddr;
 200         desc->qb_ptrs.qp_bottom = desc->qb_buf.bi_kaddr +
 201             desc->qb_buf.bi_real_length - 1;
 202         data->qb_ptrs.qp_top = data->qb_buf.bi_kaddr;
 203         data->qb_ptrs.qp_bottom = data->qb_buf.bi_kaddr +
 204             data->qb_buf.bi_real_length - 1;
 205 
 206         /*
 207          * reset the Q pointers to their original settings.  Setup IM
 208          * descriptors if this is an AR Q.
 209          */
 210         hci1394_q_reset(q);
 211 
 212         /* if this is an AT Q, create a queued list for the AT descriptors */
 213         if (qinfo->qi_mode == HCI1394_ATQ) {
 214                 hci1394_tlist_init(drvinfo, NULL, &q->q_queued_list);
 215         }
 216 
 217         *q_handle = q;
 218 
 219         TNF_PROBE_0_DEBUG(hci1394_q_init_exit, HCI1394_TNF_HAL_STACK, "");
 220 
 221         return (DDI_SUCCESS);
 222 }
 223 
 224 
 225 /*
 226  * hci1394_q_fini()
 227  *    Cleanup after a successful hci1394_q_init(). Notice that a pointer to the
 228  *    handle is used for the parameter.  fini() will set your handle to NULL
 229  *    before returning.
 230  */
 231 void
 232 hci1394_q_fini(hci1394_q_handle_t *q_handle)
 233 {
 234         hci1394_q_t *q;
 235 
 236         ASSERT(q_handle != NULL);
 237         TNF_PROBE_0_DEBUG(hci1394_q_fini_enter, HCI1394_TNF_HAL_STACK, "");
 238 
 239         q = *q_handle;
 240         if (q->q_info.qi_mode == HCI1394_ATQ) {
 241                 hci1394_tlist_fini(&q->q_queued_list);
 242         }
 243         mutex_destroy(&q->q_mutex);
 244         hci1394_buf_free(&q->q_desc.qb_buf_handle);
 245         hci1394_buf_free(&q->q_data.qb_buf_handle);
 246         kmem_free(q, sizeof (hci1394_q_t));
 247         *q_handle = NULL;
 248 
 249         TNF_PROBE_0_DEBUG(hci1394_q_fini_exit, HCI1394_TNF_HAL_STACK, "");
 250 }
 251 
 252 
 253 /*
 254  * hci1394_q_buf_setup()
 255  *    Initialization of buffer pointers which are present in both the descriptor
 256  *    buffer and data buffer (No reason to duplicate the code)
 257  */
 258 static void
 259 hci1394_q_buf_setup(hci1394_q_buf_t *qbuf)
 260 {
 261         ASSERT(qbuf != NULL);
 262         TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_enter, HCI1394_TNF_HAL_STACK, "");
 263 
 264         /* start with the first cookie */
 265         qbuf->qb_ptrs.qp_current_buf = 0;
 266         qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
 267         qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
 268             qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
 269         qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
 270         qbuf->qb_ptrs.qp_offset = 0;
 271 
 272         /*
 273          * The free_buf and free pointer will change everytime an ACK (of some
 274          * type) is processed.  Free is the last byte in the last cookie.
 275          */
 276         qbuf->qb_ptrs.qp_free_buf = qbuf->qb_buf.bi_cookie_count - 1;
 277         qbuf->qb_ptrs.qp_free = qbuf->qb_ptrs.qp_bottom;
 278 
 279         /*
 280          * Start with no space to write descriptors.  We first need to call
 281          * hci1394_q_reserve() before calling hci1394_q_at_write_O*().
 282          */
 283         qbuf->qb_ptrs.qp_resv_size = 0;
 284 
 285         TNF_PROBE_0_DEBUG(hci1394_q_buf_setup_exit, HCI1394_TNF_HAL_STACK, "");
 286 }
 287 
 288 
 289 /*
 290  * hci1394_q_reset()
 291  *    Resets the buffers to an initial state.  This should be called during
 292  *    attach and resume.
 293  */
 294 static void
 295 hci1394_q_reset(hci1394_q_handle_t q_handle)
 296 {
 297         hci1394_q_buf_t *desc;
 298         hci1394_q_buf_t *data;
 299         int index;
 300 
 301         ASSERT(q_handle != NULL);
 302         TNF_PROBE_0_DEBUG(hci1394_q_reset_enter, HCI1394_TNF_HAL_STACK, "");
 303 
 304         mutex_enter(&q_handle->q_mutex);
 305         desc = &q_handle->q_desc;
 306         data = &q_handle->q_data;
 307 
 308         hci1394_q_buf_setup(desc);
 309         hci1394_q_buf_setup(data);
 310 
 311         /* DMA starts off stopped, no previous descriptor to link from */
 312         q_handle->q_dma_running = B_FALSE;
 313         q_handle->q_block_cnt = 0;
 314         q_handle->q_previous = NULL;
 315 
 316         /* If this is an AR Q, setup IM's for the data buffers that we have */
 317         if (q_handle->q_info.qi_mode == HCI1394_ARQ) {
 318                 /*
 319                  * This points to where to find the first IM descriptor.  Since
 320                  * we just reset the pointers in hci1394_q_buf_setup(), the
 321                  * first IM we write below will be found at the top of the Q.
 322                  */
 323                 q_handle->q_head = desc->qb_ptrs.qp_top;
 324 
 325                 for (index = 0; index < data->qb_buf.bi_cookie_count; index++) {
 326                         hci1394_q_ar_write_IM(q_handle, desc,
 327                             data->qb_cookie[index].dmac_address,
 328                             data->qb_cookie[index].dmac_size);
 329                 }
 330 
 331                 /*
 332                  * The space left in the current IM is the size of the buffer.
 333                  * The current buffer is the first buffer added to the AR Q.
 334                  */
 335                 q_handle->q_space_left = data->qb_cookie[0].dmac_size;
 336         }
 337 
 338         mutex_exit(&q_handle->q_mutex);
 339         TNF_PROBE_0_DEBUG(hci1394_q_reset_exit, HCI1394_TNF_HAL_STACK, "");
 340 }
 341 
 342 
 343 /*
 344  * hci1394_q_resume()
 345  *    This is called during a resume (after a successful suspend). Currently
 346  *    we only call reset.  Since this is not a time critical function, we will
 347  *    leave this as a separate function to increase readability.
 348  */
 349 void
 350 hci1394_q_resume(hci1394_q_handle_t q_handle)
 351 {
 352         ASSERT(q_handle != NULL);
 353         TNF_PROBE_0_DEBUG(hci1394_q_resume_enter, HCI1394_TNF_HAL_STACK, "");
 354         hci1394_q_reset(q_handle);
 355         TNF_PROBE_0_DEBUG(hci1394_q_resume_exit, HCI1394_TNF_HAL_STACK, "");
 356 }
 357 
 358 
 359 /*
 360  * hci1394_q_stop()
 361  *    This call informs us that a DMA engine has been stopped.  It does not
 362  *    perform the actual stop. We need to know this so that when we add a
 363  *    new descriptor, we do a start instead of a wake.
 364  */
 365 void
 366 hci1394_q_stop(hci1394_q_handle_t q_handle)
 367 {
 368         ASSERT(q_handle != NULL);
 369         TNF_PROBE_0_DEBUG(hci1394_q_stop_enter, HCI1394_TNF_HAL_STACK, "");
 370         mutex_enter(&q_handle->q_mutex);
 371         q_handle->q_dma_running = B_FALSE;
 372         mutex_exit(&q_handle->q_mutex);
 373         TNF_PROBE_0_DEBUG(hci1394_q_stop_exit, HCI1394_TNF_HAL_STACK, "");
 374 }
 375 
 376 
 377 /*
 378  * hci1394_q_reserve()
 379  *    Reserve space in the AT descriptor or data buffer. This ensures that we
 380  *    can get a contiguous buffer. Descriptors have to be in a contiguous
 381  *    buffer. Data does not have to be in a contiguous buffer but we do this to
 382  *    reduce complexity. For systems with small page sizes (e.g. x86), this
 383  *    could result in inefficient use of the data buffers when sending large
 384  *    data blocks (this only applies to non-physical block write ATREQs and
 385  *    block read ATRESP). Since it looks like most protocols that use large data
 386  *    blocks (like SPB-2), use physical transfers to do this (due to their
 387  *    efficiency), this will probably not be a real world problem.  If it turns
 388  *    out to be a problem, the options are to force a single cookie for the data
 389  *    buffer, allow multiple cookies and have a larger data space, or change the
 390  *    data code to use a OMI, OM, OL descriptor sequence (instead of OMI, OL).
 391  */
 392 static int
 393 hci1394_q_reserve(hci1394_q_buf_t *qbuf, uint_t size, uint32_t *io_addr)
 394 {
 395         uint_t aligned_size;
 396 
 397 
 398         ASSERT(qbuf != NULL);
 399         TNF_PROBE_0_DEBUG(hci1394_q_reserve_enter, HCI1394_TNF_HAL_STACK, "");
 400 
 401         /* Save backup of pointers in case we have to unreserve */
 402         qbuf->qb_backup_ptrs = qbuf->qb_ptrs;
 403 
 404         /*
 405          * Make sure all alloc's are quadlet aligned. The data doesn't have to
 406          * be, so we will force it to be.
 407          */
 408         aligned_size = HCI1394_ALIGN_QUAD(size);
 409 
 410         /*
 411          * if the free pointer is in the current buffer and the free pointer
 412          * is below the current pointer (i.e. has not wrapped around)
 413          */
 414         if ((qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) &&
 415             (qbuf->qb_ptrs.qp_free >= qbuf->qb_ptrs.qp_current)) {
 416                 /*
 417                  * The free pointer is in this buffer below the current pointer.
 418                  * Check to see if we have enough free space left.
 419                  */
 420                 if ((qbuf->qb_ptrs.qp_current + aligned_size) <=
 421                     qbuf->qb_ptrs.qp_free) {
 422                         /* Setup up our reserved size, return the IO address */
 423                         qbuf->qb_ptrs.qp_resv_size = aligned_size;
 424                         *io_addr = (uint32_t)(qbuf->qb_cookie[
 425                             qbuf->qb_ptrs.qp_current_buf].dmac_address +
 426                             qbuf->qb_ptrs.qp_offset);
 427 
 428                 /*
 429                  * The free pointer is in this buffer below the current pointer.
 430                  * We do not have enough free space for the alloc. Return
 431                  * failure.
 432                  */
 433                 } else {
 434                         qbuf->qb_ptrs.qp_resv_size = 0;
 435                         TNF_PROBE_0(hci1394_q_reserve_ns_fail,
 436                             HCI1394_TNF_HAL_ERROR, "");
 437                         TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
 438                             HCI1394_TNF_HAL_STACK, "");
 439                         return (DDI_FAILURE);
 440                 }
 441 
 442         /*
 443          * If there is not enough room to fit in the current buffer (not
 444          * including wrap around), we will go to the next buffer and check
 445          * there. If we only have one buffer (i.e. one cookie), we will end up
 446          * staying at the current buffer and wrapping the address back to the
 447          * top.
 448          */
 449         } else if ((qbuf->qb_ptrs.qp_current + aligned_size) >
 450             qbuf->qb_ptrs.qp_end) {
 451                 /* Go to the next buffer (or the top of ours for one cookie) */
 452                 hci1394_q_next_buf(qbuf);
 453 
 454                 /* If the free pointer is in the new current buffer */
 455                 if (qbuf->qb_ptrs.qp_current_buf == qbuf->qb_ptrs.qp_free_buf) {
 456                         /*
 457                          * The free pointer is in this buffer. If we do not have
 458                          * enough free space for the alloc. Return failure.
 459                          */
 460                         if ((qbuf->qb_ptrs.qp_current + aligned_size) >
 461                             qbuf->qb_ptrs.qp_free) {
 462                                 qbuf->qb_ptrs.qp_resv_size = 0;
 463                                 TNF_PROBE_0(hci1394_q_reserve_ns_fail,
 464                                     HCI1394_TNF_HAL_ERROR, "");
 465                                 TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit,
 466                                     HCI1394_TNF_HAL_STACK, "");
 467                                 return (DDI_FAILURE);
 468                         /*
 469                          * The free pointer is in this buffer. We have enough
 470                          * free space left.
 471                          */
 472                         } else {
 473                                 /*
 474                                  * Setup up our reserved size, return the IO
 475                                  * address
 476                                  */
 477                                 qbuf->qb_ptrs.qp_resv_size = aligned_size;
 478                                 *io_addr = (uint32_t)(qbuf->qb_cookie[
 479                                     qbuf->qb_ptrs.qp_current_buf].dmac_address +
 480                                     qbuf->qb_ptrs.qp_offset);
 481                         }
 482 
 483                 /*
 484                  * We switched buffers and the free pointer is still in another
 485                  * buffer. We have sufficient space in this buffer for the alloc
 486                  * after changing buffers.
 487                  */
 488                 } else {
 489                         /* Setup up our reserved size, return the IO address */
 490                         qbuf->qb_ptrs.qp_resv_size = aligned_size;
 491                         *io_addr = (uint32_t)(qbuf->qb_cookie[
 492                             qbuf->qb_ptrs.qp_current_buf].dmac_address +
 493                             qbuf->qb_ptrs.qp_offset);
 494                 }
 495         /*
 496          * The free pointer is in another buffer. We have sufficient space in
 497          * this buffer for the alloc.
 498          */
 499         } else {
 500                 /* Setup up our reserved size, return the IO address */
 501                 qbuf->qb_ptrs.qp_resv_size = aligned_size;
 502                 *io_addr = (uint32_t)(qbuf->qb_cookie[
 503                     qbuf->qb_ptrs.qp_current_buf].dmac_address +
 504                     qbuf->qb_ptrs.qp_offset);
 505         }
 506 
 507         TNF_PROBE_0_DEBUG(hci1394_q_reserve_exit, HCI1394_TNF_HAL_STACK, "");
 508 
 509         return (DDI_SUCCESS);
 510 }
 511 
 512 /*
 513  * hci1394_q_unreserve()
 514  *    Set the buffer pointer to what they were before hci1394_reserve().  This
 515  *    will be called when we encounter errors during hci1394_q_at*().
 516  */
 517 static void
 518 hci1394_q_unreserve(hci1394_q_buf_t *qbuf)
 519 {
 520         ASSERT(qbuf != NULL);
 521         TNF_PROBE_0_DEBUG(hci1394_q_unreserve_enter, HCI1394_TNF_HAL_STACK, "");
 522 
 523         /* Go back to pointer setting before the reserve */
 524         qbuf->qb_ptrs = qbuf->qb_backup_ptrs;
 525 
 526         TNF_PROBE_0_DEBUG(hci1394_q_unreserve_exit, HCI1394_TNF_HAL_STACK, "");
 527 }
 528 
 529 
 530 /*
 531  * hci1394_q_next_buf()
 532  *    Set our current buffer to the next cookie.  If we only have one cookie, we
 533  *    will go back to the top of our buffer.
 534  */
 535 void
 536 hci1394_q_next_buf(hci1394_q_buf_t *qbuf)
 537 {
 538         ASSERT(qbuf != NULL);
 539         TNF_PROBE_0_DEBUG(hci1394_q_next_buf_enter, HCI1394_TNF_HAL_STACK, "");
 540 
 541         /*
 542          * go to the next cookie, if we are >= the cookie count, go back to the
 543          * first cookie.
 544          */
 545         qbuf->qb_ptrs.qp_current_buf++;
 546         if (qbuf->qb_ptrs.qp_current_buf >= qbuf->qb_buf.bi_cookie_count) {
 547                 qbuf->qb_ptrs.qp_current_buf = 0;
 548         }
 549 
 550         /* adjust the begin, end, current, and offset pointers */
 551         qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_end + 1;
 552         if (qbuf->qb_ptrs.qp_begin > qbuf->qb_ptrs.qp_bottom) {
 553                 qbuf->qb_ptrs.qp_begin = qbuf->qb_ptrs.qp_top;
 554         }
 555         qbuf->qb_ptrs.qp_end = qbuf->qb_ptrs.qp_begin +
 556             qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf].dmac_size - 1;
 557         qbuf->qb_ptrs.qp_current = qbuf->qb_ptrs.qp_begin;
 558         qbuf->qb_ptrs.qp_offset = 0;
 559 
 560         TNF_PROBE_0_DEBUG(hci1394_q_next_buf_exit, HCI1394_TNF_HAL_STACK, "");
 561 }
 562 
 563 
 564 /*
 565  * hci1394_q_at()
 566  *    Place an AT command that does NOT need the data buffer into the DMA chain.
 567  *    Some examples of this are quadlet read/write, PHY packets, ATREQ Block
 568  *    Read, and ATRESP block write. result is only valid on failure.
 569  */
 570 int
 571 hci1394_q_at(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
 572     hci1394_basic_pkt_t *hdr, uint_t hdrsize, int *result)
 573 {
 574         int status;
 575         uint32_t ioaddr;
 576 
 577 
 578         ASSERT(q_handle != NULL);
 579         ASSERT(cmd != NULL);
 580         ASSERT(hdr != NULL);
 581         TNF_PROBE_0_DEBUG(hci1394_q_at_enter, HCI1394_TNF_HAL_STACK, "");
 582 
 583         mutex_enter(&q_handle->q_mutex);
 584 
 585         /*
 586          * Check the HAL state and generation when the AT Q is locked.  This
 587          * will make sure that we get all the commands when we flush the Q's
 588          * during a reset or shutdown.
 589          */
 590         if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
 591             (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
 592             cmd->qc_generation)) {
 593                 *result = H1394_STATUS_INVALID_BUSGEN;
 594                 mutex_exit(&q_handle->q_mutex);
 595                 TNF_PROBE_0(hci1394_q_at_st_fail, HCI1394_TNF_HAL_ERROR, "");
 596                 TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
 597                     "");
 598                 return (DDI_FAILURE);
 599         }
 600 
 601         /* save away the argument to pass up when this command completes */
 602         cmd->qc_node.tln_addr = cmd;
 603 
 604         /* we have not written any 16 byte blocks to the descriptor yet */
 605         q_handle->q_block_cnt = 0;
 606 
 607         /* Reserve space for an OLI in the descriptor buffer */
 608         status = hci1394_q_reserve(&q_handle->q_desc,
 609             sizeof (hci1394_desc_imm_t), &ioaddr);
 610         if (status != DDI_SUCCESS) {
 611                 *result = H1394_STATUS_NOMORE_SPACE;
 612                 mutex_exit(&q_handle->q_mutex);
 613                 TNF_PROBE_0(hci1394_q_at_qre_fail, HCI1394_TNF_HAL_ERROR, "");
 614                 TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK,
 615                     "");
 616                 return (DDI_FAILURE);
 617         }
 618 
 619         /* write the OLI to the descriptor buffer */
 620         hci1394_q_at_write_OLI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
 621 
 622         /* Add the AT command to the queued list */
 623         hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
 624 
 625         mutex_exit(&q_handle->q_mutex);
 626         TNF_PROBE_0_DEBUG(hci1394_q_at_exit, HCI1394_TNF_HAL_STACK, "");
 627 
 628         return (DDI_SUCCESS);
 629 }
 630 
 631 
 632 /*
 633  * XXX - NOTE: POSSIBLE FUTURE OPTIMIZATION
 634  *    ATREQ Block read and write's that go through software are not very
 635  *    efficient (one of the reasons to use physical space). A copy is forced
 636  *    on all block reads due to the design of OpenHCI. Writes do not have this
 637  *    same restriction.  This design forces a copy for writes too (we always
 638  *    copy into a data buffer before sending). There are many reasons for this
 639  *    including complexity reduction.  There is a data size threshold where a
 640  *    copy is more expensive than mapping the data buffer address (or worse
 641  *    case a big enough difference where it pays to do it). However, we move
 642  *    block data around in mblks which means that our data may be scattered
 643  *    over many buffers.  This adds to the complexity of mapping and setting
 644  *    up the OpenHCI descriptors.
 645  *
 646  *    If someone really needs a speedup on block write ATREQs, my recommendation
 647  *    would be to add an additional command type at the target interface for a
 648  *    fast block write.  The target driver would pass a mapped io addr to use.
 649  *    A function like "hci1394_q_at_with_ioaddr()" could be created which would
 650  *    be almost an exact copy of hci1394_q_at_with_data() without the
 651  *    hci1394_q_reserve() and hci1394_q_at_rep_put8() for the data buffer.
 652  */
 653 
 654 
 655 /*
 656  * hci1394_q_at_with_data()
 657  *    Place an AT command that does need the data buffer into the DMA chain.
 658  *    The data is passed as a pointer to a kernel virtual address. An example of
 659  *    this is the lock operations. result is only valid on failure.
 660  */
 661 int
 662 hci1394_q_at_with_data(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
 663     hci1394_basic_pkt_t *hdr, uint_t hdrsize, uint8_t *data, uint_t datasize,
 664     int *result)
 665 {
 666         uint32_t desc_ioaddr;
 667         uint32_t data_ioaddr;
 668         int status;
 669 
 670 
 671         ASSERT(q_handle != NULL);
 672         ASSERT(cmd != NULL);
 673         ASSERT(hdr != NULL);
 674         ASSERT(data != NULL);
 675         TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_enter, HCI1394_TNF_HAL_STACK,
 676             "");
 677 
 678         mutex_enter(&q_handle->q_mutex);
 679 
 680         /*
 681          * Check the HAL state and generation when the AT Q is locked.  This
 682          * will make sure that we get all the commands when we flush the Q's
 683          * during a reset or shutdown.
 684          */
 685         if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
 686             (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
 687             cmd->qc_generation)) {
 688                 *result = H1394_STATUS_INVALID_BUSGEN;
 689                 mutex_exit(&q_handle->q_mutex);
 690                 TNF_PROBE_0_DEBUG(hci1394_q_at_wd_st_fail,
 691                     HCI1394_TNF_HAL_STACK, "");
 692                 return (DDI_FAILURE);
 693         }
 694 
 695         /* save away the argument to pass up when this command completes */
 696         cmd->qc_node.tln_addr = cmd;
 697 
 698         /* we have not written any 16 byte blocks to the descriptor yet */
 699         q_handle->q_block_cnt = 0;
 700 
 701         /* Reserve space for an OMI and OL in the descriptor buffer */
 702         status = hci1394_q_reserve(&q_handle->q_desc,
 703             (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
 704             &desc_ioaddr);
 705         if (status != DDI_SUCCESS) {
 706                 *result = H1394_STATUS_NOMORE_SPACE;
 707                 mutex_exit(&q_handle->q_mutex);
 708                 TNF_PROBE_0(hci1394_q_at_wd_qre_fail,
 709                     HCI1394_TNF_HAL_ERROR, "");
 710                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
 711                     HCI1394_TNF_HAL_STACK, "");
 712                 return (DDI_FAILURE);
 713         }
 714 
 715         /* allocate space for data in the data buffer */
 716         status = hci1394_q_reserve(&q_handle->q_data, datasize, &data_ioaddr);
 717         if (status != DDI_SUCCESS) {
 718                 *result = H1394_STATUS_NOMORE_SPACE;
 719                 hci1394_q_unreserve(&q_handle->q_desc);
 720                 mutex_exit(&q_handle->q_mutex);
 721                 TNF_PROBE_0(hci1394_q_at_wd_qra_fail,
 722                     HCI1394_TNF_HAL_ERROR, "");
 723                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit,
 724                     HCI1394_TNF_HAL_STACK, "");
 725                 return (DDI_FAILURE);
 726         }
 727 
 728         /* Copy data into data buffer */
 729         hci1394_q_at_rep_put8(&q_handle->q_data, cmd, data, datasize);
 730 
 731         /* write the OMI to the descriptor buffer */
 732         hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
 733 
 734         /* write the OL to the descriptor buffer */
 735         hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
 736             datasize);
 737 
 738         /* Add the AT command to the queued list */
 739         hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
 740 
 741         mutex_exit(&q_handle->q_mutex);
 742         TNF_PROBE_0_DEBUG(hci1394_q_at_with_data_exit, HCI1394_TNF_HAL_STACK,
 743             "");
 744 
 745         return (DDI_SUCCESS);
 746 }
 747 
 748 
 749 /*
 750  * hci1394_q_at_with_mblk()
 751  *    Place an AT command that does need the data buffer into the DMA chain.
 752  *    The data is passed in mblk_t(s). Examples of this are a block write
 753  *    ATREQ and a block read ATRESP. The services layer and the hal use a
 754  *    private structure (h1394_mblk_t) to keep track of how much of the mblk
 755  *    to send since we may have to break the transfer up into smaller blocks.
 756  *    (i.e. a 1MByte block write would go out in 2KByte chunks. result is only
 757  *    valid on failure.
 758  */
 759 int
 760 hci1394_q_at_with_mblk(hci1394_q_handle_t q_handle, hci1394_q_cmd_t *cmd,
 761     hci1394_basic_pkt_t *hdr, uint_t hdrsize, h1394_mblk_t *mblk, int *result)
 762 {
 763         uint32_t desc_ioaddr;
 764         uint32_t data_ioaddr;
 765         int status;
 766 
 767 
 768         ASSERT(q_handle != NULL);
 769         ASSERT(cmd != NULL);
 770         ASSERT(hdr != NULL);
 771         ASSERT(mblk != NULL);
 772         TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_enter, HCI1394_TNF_HAL_STACK,
 773             "");
 774 
 775         mutex_enter(&q_handle->q_mutex);
 776 
 777         /*
 778          * Check the HAL state and generation when the AT Q is locked.  This
 779          * will make sure that we get all the commands when we flush the Q's
 780          * during a reset or shutdown.
 781          */
 782         if ((hci1394_state(q_handle->q_drvinfo) != HCI1394_NORMAL) ||
 783             (hci1394_ohci_current_busgen(q_handle->q_ohci) !=
 784             cmd->qc_generation)) {
 785                 *result = H1394_STATUS_INVALID_BUSGEN;
 786                 mutex_exit(&q_handle->q_mutex);
 787                 TNF_PROBE_0_DEBUG(hci1394_q_at_wm_st_fail,
 788                     HCI1394_TNF_HAL_STACK, "");
 789                 return (DDI_FAILURE);
 790         }
 791 
 792         /* save away the argument to pass up when this command completes */
 793         cmd->qc_node.tln_addr = cmd;
 794 
 795         /* we have not written any 16 byte blocks to the descriptor yet */
 796         q_handle->q_block_cnt = 0;
 797 
 798         /* Reserve space for an OMI and OL in the descriptor buffer */
 799         status = hci1394_q_reserve(&q_handle->q_desc,
 800             (sizeof (hci1394_desc_imm_t) + sizeof (hci1394_desc_t)),
 801             &desc_ioaddr);
 802         if (status != DDI_SUCCESS) {
 803                 *result = H1394_STATUS_NOMORE_SPACE;
 804                 mutex_exit(&q_handle->q_mutex);
 805                 TNF_PROBE_0(hci1394_q_at_wm_qre_fail,
 806                     HCI1394_TNF_HAL_ERROR, "");
 807                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
 808                     HCI1394_TNF_HAL_STACK, "");
 809                 return (DDI_FAILURE);
 810         }
 811 
 812         /* Reserve space for data in the data buffer */
 813         status = hci1394_q_reserve(&q_handle->q_data, mblk->length,
 814             &data_ioaddr);
 815         if (status != DDI_SUCCESS) {
 816                 *result = H1394_STATUS_NOMORE_SPACE;
 817                 hci1394_q_unreserve(&q_handle->q_desc);
 818                 mutex_exit(&q_handle->q_mutex);
 819                 TNF_PROBE_0(hci1394_q_at_wm_qra_fail,
 820                     HCI1394_TNF_HAL_ERROR, "");
 821                 TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit,
 822                     HCI1394_TNF_HAL_STACK, "");
 823                 return (DDI_FAILURE);
 824         }
 825 
 826         /* Copy mblk data into data buffer */
 827         hci1394_q_at_copy_from_mblk(&q_handle->q_data, cmd, mblk);
 828 
 829         /* write the OMI to the descriptor buffer */
 830         hci1394_q_at_write_OMI(q_handle, &q_handle->q_desc, cmd, hdr, hdrsize);
 831 
 832         /* write the OL to the descriptor buffer */
 833         hci1394_q_at_write_OL(q_handle, &q_handle->q_desc, cmd, data_ioaddr,
 834             mblk->length);
 835 
 836         /* Add the AT command to the queued list */
 837         hci1394_tlist_add(q_handle->q_queued_list, &cmd->qc_node);
 838 
 839         mutex_exit(&q_handle->q_mutex);
 840         TNF_PROBE_0_DEBUG(hci1394_q_at_with_mblk_exit, HCI1394_TNF_HAL_STACK,
 841             "");
 842 
 843         return (DDI_SUCCESS);
 844 }
 845 
 846 
 847 /*
 848  * hci1394_q_at_next()
 849  *    Return the next completed AT command in cmd.  If flush_q is true, we will
 850  *    return the command regardless if it finished or not.  We will flush
 851  *    during bus reset processing, shutdown, and detach.
 852  */
 853 void
 854 hci1394_q_at_next(hci1394_q_handle_t q_handle, boolean_t flush_q,
 855     hci1394_q_cmd_t **cmd)
 856 {
 857         hci1394_q_buf_t *desc;
 858         hci1394_q_buf_t *data;
 859         hci1394_tlist_node_t *node;
 860         uint32_t cmd_status;
 861 
 862 
 863         ASSERT(q_handle != NULL);
 864         ASSERT(cmd != NULL);
 865         TNF_PROBE_0_DEBUG(hci1394_q_at_next_enter, HCI1394_TNF_HAL_STACK, "");
 866 
 867         mutex_enter(&q_handle->q_mutex);
 868 
 869         desc = &q_handle->q_desc;
 870         data = &q_handle->q_data;
 871 
 872         /* Sync descriptor buffer */
 873         (void) ddi_dma_sync(desc->qb_buf.bi_dma_handle, 0,
 874             desc->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
 875 
 876         /* Look at the top cmd on the queued list (without removing it) */
 877         hci1394_tlist_peek(q_handle->q_queued_list, &node);
 878         if (node == NULL) {
 879                 /* There are no more commands left on the queued list */
 880                 *cmd = NULL;
 881                 mutex_exit(&q_handle->q_mutex);
 882                 TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK,
 883                     "");
 884                 return;
 885         }
 886 
 887         /*
 888          * There is a command on the list, read its status and timestamp when
 889          * it was sent
 890          */
 891         *cmd = (hci1394_q_cmd_t *)node->tln_addr;
 892         cmd_status = ddi_get32(desc->qb_buf.bi_handle, (*cmd)->qc_status_addr);
 893         (*cmd)->qc_timestamp = cmd_status & DESC_ST_TIMESTAMP_MASK;
 894         cmd_status = HCI1394_DESC_EVT_GET(cmd_status);
 895 
 896         /*
 897          * If we are flushing the q (e.g. due to a bus reset), we will return
 898          * the command regardless of its completion status. If we are not
 899          * flushing the Q and we do not have status on the command (e.g. status
 900          * = 0), we are done with this Q for now.
 901          */
 902         if (flush_q == B_FALSE) {
 903                 if (cmd_status == 0) {
 904                         *cmd = NULL;
 905                         mutex_exit(&q_handle->q_mutex);
 906                         TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit,
 907                             HCI1394_TNF_HAL_STACK, "");
 908                         return;
 909                 }
 910         }
 911 
 912         /*
 913          * The command completed, remove it from the queued list. There is not
 914          * a race condition to delete the node in the list here.  This is the
 915          * only place the node will be deleted so we do not need to check the
 916          * return status.
 917          */
 918         (void) hci1394_tlist_delete(q_handle->q_queued_list, node);
 919 
 920         /*
 921          * Free the space used by the command in the descriptor and data
 922          * buffers.
 923          */
 924         desc->qb_ptrs.qp_free_buf = (*cmd)->qc_descriptor_buf;
 925         desc->qb_ptrs.qp_free = (*cmd)->qc_descriptor_end;
 926         if ((*cmd)->qc_data_used == B_TRUE) {
 927                 data->qb_ptrs.qp_free_buf = (*cmd)->qc_data_buf;
 928                 data->qb_ptrs.qp_free = (*cmd)->qc_data_end;
 929         }
 930 
 931         /* return command status */
 932         (*cmd)->qc_status = cmd_status;
 933 
 934         mutex_exit(&q_handle->q_mutex);
 935         TNF_PROBE_0_DEBUG(hci1394_q_at_next_exit, HCI1394_TNF_HAL_STACK, "");
 936 }
 937 
 938 
 939 /*
 940  * hci1394_q_at_write_OMI()
 941  *    Write an OMI descriptor into the AT descriptor buffer passed in as qbuf.
 942  *    Buffer state information is stored in cmd.  Use the hdr and hdr size for
 943  *    the additional information attached to an immediate descriptor.
 944  */
 945 void
 946 hci1394_q_at_write_OMI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
 947     hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
 948 {
 949         hci1394_desc_imm_t *desc;
 950         uint32_t data;
 951 
 952 
 953         ASSERT(qbuf != NULL);
 954         ASSERT(cmd != NULL);
 955         ASSERT(hdr != NULL);
 956         ASSERT(MUTEX_HELD(&q_handle->q_mutex));
 957         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_enter, HCI1394_TNF_HAL_STACK,
 958             "");
 959 
 960         /* The only valid "header" sizes for an OMI are 8 bytes or 16 bytes */
 961         ASSERT((hdrsize == 8) || (hdrsize == 16));
 962 
 963         /* Make sure enough room for OMI */
 964         ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
 965 
 966         /* Store the offset of the top of this descriptor block */
 967         qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
 968             qbuf->qb_ptrs.qp_begin);
 969 
 970         /* Setup OpenHCI OMI Header */
 971         desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
 972         data = DESC_AT_OMI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
 973         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
 974         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
 975         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
 976         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
 977 
 978         /*
 979          * Copy in 1394 header. Size is in bytes, convert it to a 32-bit word
 980          * count.
 981          */
 982         ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
 983             hdrsize >> 2, DDI_DEV_AUTOINCR);
 984 
 985         /*
 986          * We wrote 2 16 byte blocks in the descriptor buffer, update the count
 987          * accordingly.  Update the reserved size and current pointer.
 988          */
 989         q_handle->q_block_cnt += 2;
 990         qbuf->qb_ptrs.qp_resv_size -= sizeof (hci1394_desc_imm_t);
 991         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
 992 
 993         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OMI_exit, HCI1394_TNF_HAL_STACK,
 994             "");
 995 }
 996 
 997 
 998 /*
 999  * hci1394_q_at_write_OLI()
1000  *    Write an OLI descriptor into the AT descriptor buffer passed in as qbuf.
1001  *    Buffer state information is stored in cmd.  Use the hdr and hdr size for
1002  *    the additional information attached to an immediate descriptor.
1003  */
1004 void
1005 hci1394_q_at_write_OLI(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1006     hci1394_q_cmd_t *cmd, hci1394_basic_pkt_t *hdr, uint_t hdrsize)
1007 {
1008         hci1394_desc_imm_t *desc;
1009         uint32_t data;
1010         uint32_t command_ptr;
1011         uint32_t tcode;
1012 
1013 
1014         ASSERT(qbuf != NULL);
1015         ASSERT(cmd != NULL);
1016         ASSERT(hdr != NULL);
1017         ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1018         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_enter, HCI1394_TNF_HAL_STACK,
1019             "");
1020 
1021         /* The only valid "header" sizes for an OLI are 8, 12, 16 bytes */
1022         ASSERT((hdrsize == 8) || (hdrsize == 12) || (hdrsize == 16));
1023 
1024         /* make sure enough room for 1 OLI */
1025         ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_imm_t));
1026 
1027         /* Store the offset of the top of this descriptor block */
1028         qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1029             qbuf->qb_ptrs.qp_begin);
1030 
1031         /* Setup OpenHCI OLI Header */
1032         desc = (hci1394_desc_imm_t *)qbuf->qb_ptrs.qp_current;
1033         data = DESC_AT_OLI | (hdrsize & DESC_HDR_REQCOUNT_MASK);
1034         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1035         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, 0);
1036         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1037         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, cmd->qc_timestamp);
1038 
1039         /* Setup 1394 Header */
1040         tcode = (hdr->q1 & DESC_PKT_TCODE_MASK) >> DESC_PKT_TCODE_SHIFT;
1041         if ((tcode == IEEE1394_TCODE_WRITE_QUADLET) ||
1042             (tcode == IEEE1394_TCODE_READ_QUADLET_RESP)) {
1043                 /*
1044                  * if the tcode = a quadlet write, move the last quadlet as
1045                  * 8-bit data.  All data is treated as 8-bit data (even quadlet
1046                  * reads and writes). Therefore, target drivers MUST take that
1047                  * into consideration when accessing device registers.
1048                  */
1049                 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1, 3,
1050                     DDI_DEV_AUTOINCR);
1051                 ddi_rep_put8(qbuf->qb_buf.bi_handle, (uint8_t *)&hdr->q4,
1052                     (uint8_t *)&desc->q4, 4, DDI_DEV_AUTOINCR);
1053         } else {
1054                 ddi_rep_put32(qbuf->qb_buf.bi_handle, &hdr->q1, &desc->q1,
1055                     hdrsize >> 2, DDI_DEV_AUTOINCR);
1056         }
1057 
1058         /*
1059          * We wrote 2 16 byte blocks in the descriptor buffer, update the count
1060          * accordingly.
1061          */
1062         q_handle->q_block_cnt += 2;
1063 
1064         /*
1065          * Sync buffer in case DMA engine currently running. This must be done
1066          * before writing the command pointer in the previous descriptor.
1067          */
1068         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1069             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1070 
1071         /* save away the status address for quick access in at_next() */
1072         cmd->qc_status_addr = &desc->status;
1073 
1074         /*
1075          * Setup the command pointer.  This tells the HW where to get the
1076          * descriptor we just setup.  This includes the IO address along with
1077          * a 4 bit 16 byte block count
1078          */
1079         command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1080             ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1081             DESC_Z_MASK));
1082 
1083         /*
1084          * if we previously setup a descriptor, add this new descriptor into
1085          * the previous descriptor's "next" pointer.
1086          */
1087         if (q_handle->q_previous != NULL) {
1088                 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1089                     command_ptr);
1090                 /* Sync buffer again, this gets the command pointer */
1091                 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1092                     qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1093         }
1094 
1095         /*
1096          * this is now the previous descriptor.  Update the current pointer,
1097          * clear the block count and reserved size since this is the end of
1098          * this command.
1099          */
1100         q_handle->q_previous = (hci1394_desc_t *)desc;
1101         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_imm_t);
1102         q_handle->q_block_cnt = 0;
1103         qbuf->qb_ptrs.qp_resv_size = 0;
1104 
1105         /* save away cleanup info when we are done with the command */
1106         cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1107         cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1108 
1109         /* If the DMA is not running, start it */
1110         if (q_handle->q_dma_running == B_FALSE) {
1111                 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1112                     command_ptr);
1113                 q_handle->q_dma_running = B_TRUE;
1114         /* the DMA is running, wake it up */
1115         } else {
1116                 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1117         }
1118 
1119         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OLI_exit, HCI1394_TNF_HAL_STACK,
1120             "");
1121 }
1122 
1123 
1124 /*
1125  * hci1394_q_at_write_OL()
1126  *    Write an OL descriptor into the AT descriptor buffer passed in as qbuf.
1127  *    Buffer state information is stored in cmd.  The IO address of the data
1128  *    buffer is passed in io_addr.  Size is the size of the data to be
1129  *    transferred.
1130  */
1131 void
1132 hci1394_q_at_write_OL(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1133     hci1394_q_cmd_t *cmd, uint32_t io_addr, uint_t size)
1134 {
1135         hci1394_desc_t *desc;
1136         uint32_t data;
1137         uint32_t command_ptr;
1138 
1139 
1140         ASSERT(q_handle != NULL);
1141         ASSERT(qbuf != NULL);
1142         ASSERT(cmd != NULL);
1143         ASSERT(MUTEX_HELD(&q_handle->q_mutex));
1144         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_enter, HCI1394_TNF_HAL_STACK,
1145             "");
1146 
1147         /* make sure enough room for OL */
1148         ASSERT(qbuf->qb_ptrs.qp_resv_size >= sizeof (hci1394_desc_t));
1149 
1150         /* Setup OpenHCI OL Header */
1151         desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1152         data = DESC_AT_OL | (size & DESC_HDR_REQCOUNT_MASK);
1153         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1154         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1155         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1156         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, 0);
1157 
1158         /*
1159          * We wrote 1 16 byte block in the descriptor buffer, update the count
1160          * accordingly.
1161          */
1162         q_handle->q_block_cnt++;
1163 
1164         /*
1165          * Sync buffer in case DMA engine currently running. This must be done
1166          * before writing the command pointer in the previous descriptor.
1167          */
1168         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1169             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1170 
1171         /* save away the status address for quick access in at_next() */
1172         cmd->qc_status_addr = &desc->status;
1173 
1174         /*
1175          * Setup the command pointer.  This tells the HW where to get the
1176          * descriptor we just setup.  This includes the IO address along with
1177          * a 4 bit 16 byte block count
1178          */
1179         command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1180             ].dmac_address + qbuf->qb_ptrs.qp_offset) | (q_handle->q_block_cnt &
1181             DESC_Z_MASK));
1182 
1183         /*
1184          * if we previously setup a descriptor, add this new descriptor into
1185          * the previous descriptor's "next" pointer.
1186          */
1187         if (q_handle->q_previous != NULL) {
1188                 ddi_put32(qbuf->qb_buf.bi_handle, &q_handle->q_previous->branch,
1189                     command_ptr);
1190                 /* Sync buffer again, this gets the command pointer */
1191                 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1192                     qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1193         }
1194 
1195         /*
1196          * this is now the previous descriptor.  Update the current pointer,
1197          * clear the block count and reserved size since this is the end of
1198          * this command.
1199          */
1200         q_handle->q_previous = desc;
1201         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1202         q_handle->q_block_cnt = 0;
1203         qbuf->qb_ptrs.qp_resv_size = 0;
1204 
1205         /* save away cleanup info when we are done with the command */
1206         cmd->qc_descriptor_buf = qbuf->qb_ptrs.qp_current_buf;
1207         cmd->qc_descriptor_end = qbuf->qb_ptrs.qp_current - 1;
1208 
1209         /* If the DMA is not running, start it */
1210         if (q_handle->q_dma_running == B_FALSE) {
1211                 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1212                     command_ptr);
1213                 q_handle->q_dma_running = B_TRUE;
1214         /* the DMA is running, wake it up */
1215         } else {
1216                 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1217         }
1218 
1219         TNF_PROBE_0_DEBUG(hci1394_q_at_write_OL_exit, HCI1394_TNF_HAL_STACK,
1220             "");
1221 }
1222 
1223 
1224 /*
1225  * hci1394_q_at_rep_put8()
1226  *    Copy a byte stream from a kernel virtual address (data) to a IO mapped
1227  *    data buffer (qbuf).  Copy datasize bytes.  State information for the
1228  *    data buffer is kept in cmd.
1229  */
1230 void
1231 hci1394_q_at_rep_put8(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1232     uint8_t *data, uint_t datasize)
1233 {
1234         ASSERT(qbuf != NULL);
1235         ASSERT(cmd != NULL);
1236         ASSERT(data != NULL);
1237         TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_enter, HCI1394_TNF_HAL_STACK,
1238             "");
1239 
1240         /* Make sure enough room for data */
1241         ASSERT(qbuf->qb_ptrs.qp_resv_size >= datasize);
1242 
1243         /* Copy in data into the data buffer */
1244         ddi_rep_put8(qbuf->qb_buf.bi_handle, data,
1245             (uint8_t *)qbuf->qb_ptrs.qp_current, datasize, DDI_DEV_AUTOINCR);
1246 
1247         /* Update the current pointer, offset, and reserved size */
1248         qbuf->qb_ptrs.qp_current += datasize;
1249         qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1250             qbuf->qb_ptrs.qp_begin);
1251         qbuf->qb_ptrs.qp_resv_size -= datasize;
1252 
1253         /* save away cleanup info when we are done with the command */
1254         cmd->qc_data_used = B_TRUE;
1255         cmd->qc_data_buf = qbuf->qb_ptrs.qp_current_buf;
1256         cmd->qc_data_end = qbuf->qb_ptrs.qp_current - 1;
1257 
1258         /* Sync data buffer */
1259         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1260             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1261 
1262         TNF_PROBE_0_DEBUG(hci1394_q_at_rep_put8_exit, HCI1394_TNF_HAL_STACK,
1263             "");
1264 }
1265 
1266 
1267 /*
1268  * hci1394_q_at_copy_from_mblk()
1269  *    Copy a byte stream from a mblk(s) to a IO mapped data buffer (qbuf).
1270  *    Copy mblk->length bytes. The services layer and the hal use a private
1271  *    structure (h1394_mblk_t) to keep track of how much of the mblk to send
1272  *    since we may have to break the transfer up into smaller blocks. (i.e. a
1273  *    1MByte block write would go out in 2KByte chunks. State information for
1274  *    the data buffer is kept in cmd.
1275  */
1276 static void
1277 hci1394_q_at_copy_from_mblk(hci1394_q_buf_t *qbuf, hci1394_q_cmd_t *cmd,
1278     h1394_mblk_t *mblk)
1279 {
1280         uint_t bytes_left;
1281         uint_t length;
1282 
1283 
1284         ASSERT(qbuf != NULL);
1285         ASSERT(cmd != NULL);
1286         ASSERT(mblk != NULL);
1287         TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_enter,
1288             HCI1394_TNF_HAL_STACK, "");
1289 
1290         /* We return these variables to the Services Layer when we are done */
1291         mblk->next_offset = mblk->curr_offset;
1292         mblk->next_mblk = mblk->curr_mblk;
1293         bytes_left = mblk->length;
1294 
1295         /* do while there are bytes left to copy */
1296         do {
1297                 /*
1298                  * If the entire data portion of the current block transfer is
1299                  * contained within a single mblk.
1300                  */
1301                 if ((mblk->next_offset + bytes_left) <=
1302                     (mblk->next_mblk->b_wptr)) {
1303                         /* Copy the data into the data Q */
1304                         hci1394_q_at_rep_put8(qbuf, cmd,
1305                             (uint8_t *)mblk->next_offset, bytes_left);
1306 
1307                         /* increment the mblk offset */
1308                         mblk->next_offset += bytes_left;
1309 
1310                         /* we have no more bytes to put into the buffer */
1311                         bytes_left = 0;
1312 
1313                         /*
1314                          * If our offset is at the end of data in this mblk, go
1315                          * to the next mblk.
1316                          */
1317                         if (mblk->next_offset >= mblk->next_mblk->b_wptr) {
1318                                 mblk->next_mblk = mblk->next_mblk->b_cont;
1319                                 if (mblk->next_mblk != NULL) {
1320                                         mblk->next_offset =
1321                                             mblk->next_mblk->b_rptr;
1322                                 }
1323                         }
1324 
1325                 /*
1326                  * The data portion of the current block transfer is spread
1327                  * across two or more mblk's
1328                  */
1329                 } else {
1330                         /*
1331                          * Figure out how much data is in this mblk.
1332                          */
1333                         length = mblk->next_mblk->b_wptr - mblk->next_offset;
1334 
1335                         /* Copy the data into the atreq data Q */
1336                         hci1394_q_at_rep_put8(qbuf, cmd,
1337                             (uint8_t *)mblk->next_offset, length);
1338 
1339                         /* update the bytes left count, go to the next mblk */
1340                         bytes_left = bytes_left - length;
1341                         mblk->next_mblk = mblk->next_mblk->b_cont;
1342                         ASSERT(mblk->next_mblk != NULL);
1343                         mblk->next_offset = mblk->next_mblk->b_rptr;
1344                 }
1345         } while (bytes_left > 0);
1346 
1347         TNF_PROBE_0_DEBUG(hci1394_q_at_copy_from_mblk_exit,
1348             HCI1394_TNF_HAL_STACK, "");
1349 }
1350 
1351 
1352 /*
1353  * hci1394_q_ar_next()
1354  *    Return an address to the next received AR packet.  If there are no more
1355  *    AR packets in the buffer, q_addr will be set to NULL.
1356  */
1357 void
1358 hci1394_q_ar_next(hci1394_q_handle_t q_handle, uint32_t **q_addr)
1359 {
1360         hci1394_desc_t *desc;
1361         hci1394_q_buf_t *descb;
1362         hci1394_q_buf_t *datab;
1363         uint32_t residual_count;
1364 
1365 
1366         ASSERT(q_handle != NULL);
1367         ASSERT(q_addr != NULL);
1368         TNF_PROBE_0_DEBUG(hci1394_q_ar_next_enter, HCI1394_TNF_HAL_STACK, "");
1369 
1370         descb = &q_handle->q_desc;
1371         datab = &q_handle->q_data;
1372 
1373         /* Sync Descriptor buffer */
1374         (void) ddi_dma_sync(descb->qb_buf.bi_dma_handle, 0,
1375             descb->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1376 
1377         /*
1378          * Check residual in current IM count vs q_space_left to see if we have
1379          * received any more responses
1380          */
1381         desc = (hci1394_desc_t *)q_handle->q_head;
1382         residual_count = ddi_get32(descb->qb_buf.bi_handle, &desc->status);
1383         residual_count &= DESC_ST_RESCOUNT_MASK;
1384         if (residual_count >= q_handle->q_space_left) {
1385                 /* No new packets received */
1386                 *q_addr = NULL;
1387                 TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit,
1388                     HCI1394_TNF_HAL_STACK, "");
1389                 return;
1390         }
1391 
1392         /* Sync Data Q */
1393         (void) ddi_dma_sync(datab->qb_buf.bi_dma_handle, 0,
1394             datab->qb_buf.bi_length, DDI_DMA_SYNC_FORKERNEL);
1395 
1396         /*
1397          * We have a new packet, return the address of the start of the
1398          * packet.
1399          */
1400         *q_addr = (uint32_t *)datab->qb_ptrs.qp_current;
1401 
1402         TNF_PROBE_0_DEBUG(hci1394_q_ar_next_exit, HCI1394_TNF_HAL_STACK, "");
1403 }
1404 
1405 
1406 /*
1407  * hci1394_q_ar_free()
1408  *    Free the space used by the AR packet at the top of the data buffer. AR
1409  *    packets are processed in the order that they are received.  This will
1410  *    free the oldest received packet which has not yet been freed.  size is
1411  *    how much space the packet takes up.
1412  */
1413 void
1414 hci1394_q_ar_free(hci1394_q_handle_t q_handle, uint_t size)
1415 {
1416         hci1394_q_buf_t *descb;
1417         hci1394_q_buf_t *datab;
1418 
1419 
1420         ASSERT(q_handle != NULL);
1421         TNF_PROBE_0_DEBUG(hci1394_q_ar_free_enter, HCI1394_TNF_HAL_STACK, "");
1422 
1423         descb = &q_handle->q_desc;
1424         datab = &q_handle->q_data;
1425 
1426         /*
1427          * Packet is in multiple buffers. Theoretically a buffer could be broken
1428          * in more than two buffers for an ARRESP.  Since the buffers should be
1429          * in at least 4K increments this will not happen since the max packet
1430          * size is 2KBytes.
1431          */
1432         if ((datab->qb_ptrs.qp_current + size) > datab->qb_ptrs.qp_end) {
1433                 /* Add IM descriptor for used buffer back into Q */
1434                 hci1394_q_ar_write_IM(q_handle, descb,
1435                     datab->qb_cookie[datab->qb_ptrs.qp_current_buf
1436                     ].dmac_address,
1437                     datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size);
1438 
1439                 /* Go to the next buffer */
1440                 hci1394_q_next_buf(datab);
1441 
1442                 /* Update next buffers pointers for partial packet */
1443                 size -= q_handle->q_space_left;
1444                 datab->qb_ptrs.qp_current += size;
1445                 q_handle->q_space_left =
1446                     datab->qb_cookie[datab->qb_ptrs.qp_current_buf].dmac_size -
1447                     size;
1448 
1449                 /* Change the head pointer to the next IM descriptor */
1450                 q_handle->q_head += sizeof (hci1394_desc_t);
1451                 if ((q_handle->q_head + sizeof (hci1394_desc_t)) >
1452                     (descb->qb_ptrs.qp_bottom + 1)) {
1453                         q_handle->q_head = descb->qb_ptrs.qp_top;
1454                 }
1455 
1456         /* Packet is only in one buffer */
1457         } else {
1458                 q_handle->q_space_left -= size;
1459                 datab->qb_ptrs.qp_current += size;
1460         }
1461 
1462         TNF_PROBE_0_DEBUG(hci1394_q_ar_free_exit, HCI1394_TNF_HAL_STACK, "");
1463 }
1464 
1465 
1466 /*
1467  * hci1394_q_ar_get32()
1468  *    Read a quadlet of data regardless if it is in the current buffer or has
1469  *    wrapped to the top buffer.  If the address passed to this routine is
1470  *    passed the bottom of the data buffer, this routine will automatically
1471  *    wrap back to the top of the Q and look in the correct offset from the
1472  *    top. Copy the data into the kernel virtual address provided.
1473  */
1474 uint32_t
1475 hci1394_q_ar_get32(hci1394_q_handle_t q_handle, uint32_t *addr)
1476 {
1477         hci1394_q_buf_t *data;
1478         uintptr_t new_addr;
1479         uint32_t data32;
1480 
1481 
1482         ASSERT(q_handle != NULL);
1483         ASSERT(addr != NULL);
1484         TNF_PROBE_0_DEBUG(hci1394_q_get32_enter, HCI1394_TNF_HAL_STACK, "");
1485 
1486         data = &q_handle->q_data;
1487 
1488         /*
1489          * if the data has wrapped to the top of the buffer, adjust the address.
1490          */
1491         if ((uintptr_t)addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1492                 new_addr = (uintptr_t)data->qb_ptrs.qp_top + ((uintptr_t)addr -
1493                     ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1));
1494                 data32 = ddi_get32(data->qb_buf.bi_handle,
1495                     (uint32_t *)new_addr);
1496 
1497         /* data is before end of buffer */
1498         } else {
1499                 data32 = ddi_get32(data->qb_buf.bi_handle, addr);
1500         }
1501 
1502         TNF_PROBE_0_DEBUG(hci1394_q_get32_exit, HCI1394_TNF_HAL_STACK, "");
1503 
1504         return (data32);
1505 }
1506 
1507 
1508 /*
1509  * hci1394_q_ar_rep_get8()
1510  *    Read a byte stream of data regardless if it is contiguous or has partially
1511  *    or fully wrapped to the top buffer.  If the address passed to this routine
1512  *    is passed the bottom of the data buffer, or address + size is past the
1513  *    bottom of the data buffer. this routine will automatically wrap back to
1514  *    the top of the Q and look in the correct offset from the top. Copy the
1515  *    data into the kernel virtual address provided.
1516  */
1517 void
1518 hci1394_q_ar_rep_get8(hci1394_q_handle_t q_handle, uint8_t *dest,
1519     uint8_t *q_addr, uint_t size)
1520 {
1521         hci1394_q_buf_t *data;
1522         uintptr_t new_addr;
1523         uint_t new_size;
1524         uintptr_t new_dest;
1525 
1526 
1527         ASSERT(q_handle != NULL);
1528         ASSERT(dest != NULL);
1529         ASSERT(q_addr != NULL);
1530         TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_enter, HCI1394_TNF_HAL_STACK,
1531             "");
1532 
1533         data = &q_handle->q_data;
1534 
1535         /*
1536          * There are three cases:
1537          *   1) All of the data has wrapped.
1538          *   2) Some of the data has not wrapped and some has wrapped.
1539          *   3) None of the data has wrapped.
1540          */
1541 
1542         /* All of the data has wrapped, just adjust the starting address */
1543         if ((uintptr_t)q_addr > (uintptr_t)data->qb_ptrs.qp_bottom) {
1544                 new_addr = (uintptr_t)data->qb_ptrs.qp_top +
1545                     ((uintptr_t)q_addr - ((uintptr_t)data->qb_ptrs.qp_bottom +
1546                     (uintptr_t)1));
1547                 ddi_rep_get8(data->qb_buf.bi_handle, dest, (uint8_t *)new_addr,
1548                     size, DDI_DEV_AUTOINCR);
1549 
1550         /*
1551          * Some of the data has wrapped. Copy the data that hasn't wrapped,
1552          * adjust the address, then copy the rest.
1553          */
1554         } else if (((uintptr_t)q_addr + (uintptr_t)size) >
1555             ((uintptr_t)data->qb_ptrs.qp_bottom + (uintptr_t)1)) {
1556                 /* Copy first half */
1557                 new_size = (uint_t)(((uintptr_t)data->qb_ptrs.qp_bottom +
1558                     (uintptr_t)1) - (uintptr_t)q_addr);
1559                 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, new_size,
1560                     DDI_DEV_AUTOINCR);
1561 
1562                 /* copy second half */
1563                 new_dest = (uintptr_t)dest + (uintptr_t)new_size;
1564                 new_size = size - new_size;
1565                 new_addr = (uintptr_t)data->qb_ptrs.qp_top;
1566                 ddi_rep_get8(data->qb_buf.bi_handle, (uint8_t *)new_dest,
1567                     (uint8_t *)new_addr, new_size, DDI_DEV_AUTOINCR);
1568 
1569         /* None of the data has wrapped */
1570         } else {
1571                 ddi_rep_get8(data->qb_buf.bi_handle, dest, q_addr, size,
1572                     DDI_DEV_AUTOINCR);
1573         }
1574 
1575         TNF_PROBE_0_DEBUG(hci1394_q_ar_rep_get8_exit, HCI1394_TNF_HAL_STACK,
1576             "");
1577 }
1578 
1579 
1580 /*
1581  * hci1394_q_ar_copy_to_mblk()
1582  *    Read a byte stream of data regardless if it is contiguous or has partially
1583  *    or fully wrapped to the top buffer.  If the address passed to this routine
1584  *    is passed the bottom of the data buffer, or address + size is passed the
1585  *    bottom of the data buffer. this routine will automatically wrap back to
1586  *    the top of the Q and look in the correct offset from the top. Copy the
1587  *    data into the mblk provided. The services layer and the hal use a private
1588  *    structure (h1394_mblk_t) to keep track of how much of the mblk to receive
1589  *    into since we may have to break the transfer up into smaller blocks.
1590  *    (i.e. a 1MByte block read would go out in 2KByte requests.
1591  */
1592 void
1593 hci1394_q_ar_copy_to_mblk(hci1394_q_handle_t q_handle, uint8_t *addr,
1594     h1394_mblk_t *mblk)
1595 {
1596         uint8_t *new_addr;
1597         uint_t bytes_left;
1598         uint_t length;
1599 
1600 
1601         ASSERT(q_handle != NULL);
1602         ASSERT(addr != NULL);
1603         ASSERT(mblk != NULL);
1604         TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_enter,
1605             HCI1394_TNF_HAL_STACK, "");
1606 
1607         /* We return these variables to the Services Layer when we are done */
1608         mblk->next_offset = mblk->curr_offset;
1609         mblk->next_mblk = mblk->curr_mblk;
1610         bytes_left = mblk->length;
1611 
1612         /* the address we copy from will change as we change mblks */
1613         new_addr = addr;
1614 
1615         /* do while there are bytes left to copy */
1616         do {
1617                 /*
1618                  * If the entire data portion of the current block transfer is
1619                  * contained within a single mblk.
1620                  */
1621                 if ((mblk->next_offset + bytes_left) <=
1622                     (mblk->next_mblk->b_datap->db_lim)) {
1623                         /* Copy the data into the mblk */
1624                         hci1394_q_ar_rep_get8(q_handle,
1625                             (uint8_t *)mblk->next_offset, new_addr, bytes_left);
1626 
1627                         /* increment the offset */
1628                         mblk->next_offset += bytes_left;
1629                         mblk->next_mblk->b_wptr = mblk->next_offset;
1630 
1631                         /* we have no more bytes to put into the buffer */
1632                         bytes_left = 0;
1633 
1634                         /*
1635                          * If our offset is at the end of data in this mblk, go
1636                          * to the next mblk.
1637                          */
1638                         if (mblk->next_offset >=
1639                             mblk->next_mblk->b_datap->db_lim) {
1640                                 mblk->next_mblk = mblk->next_mblk->b_cont;
1641                                 if (mblk->next_mblk != NULL) {
1642                                         mblk->next_offset =
1643                                             mblk->next_mblk->b_wptr;
1644                                 }
1645                         }
1646 
1647                 /*
1648                  * The data portion of the current block transfer is spread
1649                  * across two or more mblk's
1650                  */
1651                 } else {
1652                         /* Figure out how much data is in this mblk */
1653                         length = mblk->next_mblk->b_datap->db_lim -
1654                             mblk->next_offset;
1655 
1656                         /* Copy the data into the mblk */
1657                         hci1394_q_ar_rep_get8(q_handle,
1658                             (uint8_t *)mblk->next_offset, new_addr, length);
1659                         mblk->next_mblk->b_wptr =
1660                             mblk->next_mblk->b_datap->db_lim;
1661 
1662                         /*
1663                          * update the bytes left and address to copy from, go
1664                          * to the next mblk.
1665                          */
1666                         bytes_left = bytes_left - length;
1667                         new_addr = (uint8_t *)((uintptr_t)new_addr +
1668                             (uintptr_t)length);
1669                         mblk->next_mblk = mblk->next_mblk->b_cont;
1670                         ASSERT(mblk->next_mblk != NULL);
1671                         mblk->next_offset = mblk->next_mblk->b_wptr;
1672                 }
1673         } while (bytes_left > 0);
1674 
1675         TNF_PROBE_0_DEBUG(hci1394_q_copy_to_mblk_exit,
1676             HCI1394_TNF_HAL_STACK, "");
1677 }
1678 
1679 
1680 /*
1681  * hci1394_q_ar_write_IM()
1682  *    Write an IM descriptor into the AR descriptor buffer passed in as qbuf.
1683  *    The IO address of the data buffer is passed in io_addr.  datasize is the
1684  *    size of the data data buffer to receive into.
1685  */
1686 void
1687 hci1394_q_ar_write_IM(hci1394_q_handle_t q_handle, hci1394_q_buf_t *qbuf,
1688     uint32_t io_addr, uint_t datasize)
1689 {
1690         hci1394_desc_t *desc;
1691         uint32_t data;
1692         uint32_t command_ptr;
1693 
1694 
1695         ASSERT(q_handle != NULL);
1696         ASSERT(qbuf != NULL);
1697         TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_enter, HCI1394_TNF_HAL_STACK,
1698             "");
1699 
1700         /* Make sure enough room for IM */
1701         if ((qbuf->qb_ptrs.qp_current + sizeof (hci1394_desc_t)) >
1702             (qbuf->qb_ptrs.qp_bottom + 1)) {
1703                 hci1394_q_next_buf(qbuf);
1704         } else {
1705                 /* Store the offset of the top of this descriptor block */
1706                 qbuf->qb_ptrs.qp_offset = (uint32_t)(qbuf->qb_ptrs.qp_current -
1707                     qbuf->qb_ptrs.qp_begin);
1708         }
1709 
1710         /* Setup OpenHCI IM Header */
1711         desc = (hci1394_desc_t *)qbuf->qb_ptrs.qp_current;
1712         data = DESC_AR_IM | (datasize & DESC_HDR_REQCOUNT_MASK);
1713         ddi_put32(qbuf->qb_buf.bi_handle, &desc->hdr, data);
1714         ddi_put32(qbuf->qb_buf.bi_handle, &desc->data_addr, io_addr);
1715         ddi_put32(qbuf->qb_buf.bi_handle, &desc->branch, 0);
1716         ddi_put32(qbuf->qb_buf.bi_handle, &desc->status, datasize &
1717             DESC_ST_RESCOUNT_MASK);
1718 
1719         /*
1720          * Sync buffer in case DMA engine currently running. This must be done
1721          * before writing the command pointer in the previous descriptor.
1722          */
1723         (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1724             qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1725 
1726         /*
1727          * Setup the command pointer.  This tells the HW where to get the
1728          * descriptor we just setup.  This includes the IO address along with
1729          * a 4 bit 16 byte block count.  We only wrote 1 16 byte block.
1730          */
1731         command_ptr = (uint32_t)((qbuf->qb_cookie[qbuf->qb_ptrs.qp_current_buf
1732             ].dmac_address + qbuf->qb_ptrs.qp_offset) | 1);
1733 
1734         /*
1735          * if we previously setup a descriptor, add this new descriptor into
1736          * the previous descriptor's "next" pointer.
1737          */
1738         if (q_handle->q_previous != NULL) {
1739                 ddi_put32(qbuf->qb_buf.bi_handle,
1740                     &q_handle->q_previous->branch, command_ptr);
1741                 /* Sync buffer again, this gets the command pointer */
1742                 (void) ddi_dma_sync(qbuf->qb_buf.bi_dma_handle, 0,
1743                     qbuf->qb_buf.bi_length, DDI_DMA_SYNC_FORDEV);
1744         }
1745 
1746         /* this is the new previous descriptor.  Update the current pointer */
1747         q_handle->q_previous = desc;
1748         qbuf->qb_ptrs.qp_current += sizeof (hci1394_desc_t);
1749 
1750         /* If the DMA is not running, start it */
1751         if (q_handle->q_dma_running == B_FALSE) {
1752                 q_handle->q_info.qi_start(q_handle->q_info.qi_callback_arg,
1753                     command_ptr);
1754                 q_handle->q_dma_running = B_TRUE;
1755         /* the DMA is running, wake it up */
1756         } else {
1757                 q_handle->q_info.qi_wake(q_handle->q_info.qi_callback_arg);
1758         }
1759 
1760         TNF_PROBE_0_DEBUG(hci1394_q_ar_write_IM_exit, HCI1394_TNF_HAL_STACK,
1761             "");
1762 }