1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright 2009 Sun Microsystems, Inc.  All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 #include <sys/scsi/scsi.h>
  28 #include <sys/vtrace.h>
  29 
  30 
  31 #define A_TO_TRAN(ap)   ((ap)->a_hba_tran)
  32 #define P_TO_TRAN(pkt)  ((pkt)->pkt_address.a_hba_tran)
  33 #define P_TO_ADDR(pkt)  (&((pkt)->pkt_address))
  34 
  35 /*
  36  * Callback id
  37  */
  38 uintptr_t scsi_callback_id = 0;
  39 
  40 extern ddi_dma_attr_t scsi_alloc_attr;
  41 
  42 struct buf *
  43 scsi_alloc_consistent_buf(struct scsi_address *ap,
  44     struct buf *in_bp, size_t datalen, uint_t bflags,
  45     int (*callback)(caddr_t), caddr_t callback_arg)
  46 {
  47         dev_info_t      *pdip;
  48         struct          buf *bp;
  49         int             kmflag;
  50         size_t          rlen;
  51 
  52         TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_START,
  53             "scsi_alloc_consistent_buf_start");
  54 
  55         if (!in_bp) {
  56                 kmflag = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
  57                 if ((bp = getrbuf(kmflag)) == NULL) {
  58                         goto no_resource;
  59                 }
  60         } else {
  61                 bp = in_bp;
  62 
  63                 /* we are establishing a new buffer memory association */
  64                 bp->b_flags &= ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW);
  65                 bp->b_proc = NULL;
  66                 bp->b_pages = NULL;
  67                 bp->b_shadow = NULL;
  68         }
  69 
  70         /* limit bits that can be set by bflags argument */
  71         ASSERT(!(bflags & ~(B_READ | B_WRITE)));
  72         bflags &= (B_READ | B_WRITE);
  73         bp->b_un.b_addr = 0;
  74 
  75         if (datalen) {
  76                 pdip = (A_TO_TRAN(ap))->tran_hba_dip;
  77 
  78                 /*
  79                  * use i_ddi_mem_alloc() for now until we have an interface to
  80                  * allocate memory for DMA which doesn't require a DMA handle.
  81                  */
  82                 while (i_ddi_mem_alloc(pdip, &scsi_alloc_attr, datalen,
  83                     ((callback == SLEEP_FUNC) ? 1 : 0), 0, NULL,
  84                     &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
  85                         if (callback == SLEEP_FUNC) {
  86                                 delay(drv_usectohz(10000));
  87                         } else {
  88                                 if (!in_bp)
  89                                         freerbuf(bp);
  90                                 goto no_resource;
  91                         }
  92                 }
  93                 bp->b_flags |= bflags;
  94         }
  95         bp->b_bcount = datalen;
  96         bp->b_resid = 0;
  97 
  98         TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_END,
  99             "scsi_alloc_consistent_buf_end");
 100         return (bp);
 101 
 102 no_resource:
 103 
 104         if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
 105                 ddi_set_callback(callback, callback_arg,
 106                     &scsi_callback_id);
 107         }
 108         TRACE_0(TR_FAC_SCSI_RES,
 109             TR_SCSI_ALLOC_CONSISTENT_BUF_RETURN1_END,
 110             "scsi_alloc_consistent_buf_end (return1)");
 111         return (NULL);
 112 }
 113 
 114 void
 115 scsi_free_consistent_buf(struct buf *bp)
 116 {
 117         TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_START,
 118             "scsi_free_consistent_buf_start");
 119         if (!bp)
 120                 return;
 121         if (bp->b_un.b_addr)
 122                 i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
 123         freerbuf(bp);
 124         if (scsi_callback_id != 0) {
 125                 ddi_run_callback(&scsi_callback_id);
 126         }
 127         TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_END,
 128             "scsi_free_consistent_buf_end");
 129 }
 130 
 131 void
 132 scsi_dmafree_attr(struct scsi_pkt *pktp)
 133 {
 134         struct scsi_pkt_cache_wrapper *pktw =
 135             (struct scsi_pkt_cache_wrapper *)pktp;
 136 
 137         if (pktw->pcw_flags & PCW_BOUND) {
 138                 if (ddi_dma_unbind_handle(pktp->pkt_handle) !=
 139                     DDI_SUCCESS)
 140                         cmn_err(CE_WARN, "scsi_dmafree_attr: "
 141                             "unbind handle failed");
 142                 pktw->pcw_flags &= ~PCW_BOUND;
 143         }
 144         pktp->pkt_numcookies = 0;
 145         pktw->pcw_totalwin = 0;
 146 }
 147 
 148 struct buf *
 149 scsi_pkt2bp(struct scsi_pkt *pkt)
 150 {
 151         return (((struct scsi_pkt_cache_wrapper *)pkt)->pcw_bp);
 152 }
 153 
 154 int
 155 scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper *pktw,
 156                         struct buf      *bp,
 157                         int              dma_flags,
 158                         int             (*callback)(),
 159                         caddr_t          arg)
 160 {
 161         struct scsi_pkt *pktp = &(pktw->pcw_pkt);
 162         int      status;
 163 
 164         /*
 165          * First time, need to establish the handle.
 166          */
 167 
 168         ASSERT(pktp->pkt_numcookies == 0);
 169         ASSERT(pktw->pcw_totalwin == 0);
 170 
 171         status = ddi_dma_buf_bind_handle(pktp->pkt_handle, bp, dma_flags,
 172             callback, arg, &pktw->pcw_cookie,
 173             &pktp->pkt_numcookies);
 174 
 175         switch (status) {
 176         case DDI_DMA_MAPPED:
 177                 pktw->pcw_totalwin = 1;
 178                 break;
 179 
 180         case DDI_DMA_PARTIAL_MAP:
 181                 /* enable first call to ddi_dma_getwin */
 182                 if (ddi_dma_numwin(pktp->pkt_handle,
 183                     &pktw->pcw_totalwin) != DDI_SUCCESS) {
 184                         bp->b_error = 0;
 185                         return (0);
 186                 }
 187                 break;
 188 
 189         case DDI_DMA_NORESOURCES:
 190                 bp->b_error = 0;
 191                 return (0);
 192 
 193         case DDI_DMA_TOOBIG:
 194                 bioerror(bp, EINVAL);
 195                 return (0);
 196 
 197         case DDI_DMA_NOMAPPING:
 198         case DDI_DMA_INUSE:
 199         default:
 200                 bioerror(bp, EFAULT);
 201                 return (0);
 202         }
 203 
 204         /* initialize the loop controls for scsi_dmaget_attr() */
 205         pktw->pcw_curwin = 0;
 206         pktw->pcw_total_xfer = 0;
 207         pktp->pkt_dma_flags = dma_flags;
 208         return (1);
 209 }
 210 
 211 #if defined(_DMA_USES_PHYSADDR)
 212 int
 213 scsi_dmaget_attr(struct scsi_pkt_cache_wrapper *pktw)
 214 {
 215         struct scsi_pkt *pktp = &(pktw->pcw_pkt);
 216 
 217         int             status;
 218         int             num_segs = 0;
 219         ddi_dma_impl_t  *hp = (ddi_dma_impl_t *)pktp->pkt_handle;
 220         ddi_dma_cookie_t *cp;
 221 
 222         if (pktw->pcw_curwin != 0) {
 223                 ddi_dma_cookie_t        cookie;
 224 
 225                 /*
 226                  * start the next window, and get its first cookie
 227                  */
 228                 status = ddi_dma_getwin(pktp->pkt_handle,
 229                     pktw->pcw_curwin, &pktp->pkt_dma_offset,
 230                     &pktp->pkt_dma_len, &cookie,
 231                     &pktp->pkt_numcookies);
 232                 if (status != DDI_SUCCESS)
 233                         return (0);
 234         }
 235 
 236         /*
 237          * start the Scatter/Gather loop
 238          */
 239         cp = hp->dmai_cookie - 1;
 240         pktp->pkt_dma_len = 0;
 241         for (;;) {
 242 
 243                 /* take care of the loop-bookkeeping */
 244                 pktp->pkt_dma_len += cp->dmac_size;
 245                 num_segs++;
 246                 /*
 247                  * if this was the last cookie in the current window
 248                  * set the loop controls start the next window and
 249                  * exit so the HBA can do this partial transfer
 250                  */
 251                 if (num_segs >= pktp->pkt_numcookies) {
 252                         pktw->pcw_curwin++;
 253                         break;
 254                 }
 255 
 256                 cp++;
 257         }
 258         pktw->pcw_total_xfer += pktp->pkt_dma_len;
 259         pktp->pkt_cookies = hp->dmai_cookie - 1;
 260         hp->dmai_cookie = cp;
 261 
 262         return (1);
 263 }
 264 #endif
 265 
 266 void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *);
 267 
 268 struct scsi_pkt *
 269 scsi_init_cache_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
 270     struct buf *bp, int cmdlen, int statuslen, int pplen,
 271     int flags, int (*callback)(caddr_t), caddr_t callback_arg)
 272 {
 273         struct scsi_pkt_cache_wrapper *pktw;
 274         scsi_hba_tran_t *tranp = ap->a_hba_tran;
 275         int             (*func)(caddr_t);
 276 
 277         func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
 278 
 279         if (in_pktp == NULL) {
 280                 int kf;
 281 
 282                 if (callback == SLEEP_FUNC)
 283                         kf = KM_SLEEP;
 284                 else
 285                         kf = KM_NOSLEEP;
 286                 /*
 287                  * By using kmem_cache_alloc(), the layout of the
 288                  * scsi_pkt, scsi_pkt_cache_wrapper, hba private data,
 289                  * cdb, tgt driver private data, and status block is
 290                  * as below.
 291                  *
 292                  * This is a piece of contiguous memory starting from
 293                  * the first structure field scsi_pkt in the struct
 294                  * scsi_pkt_cache_wrapper, followed by the hba private
 295                  * data, pkt_cdbp, the tgt driver private data and
 296                  * pkt_scbp.
 297                  *
 298                  * |----------------------------|--------------------->
 299                  * |    struct scsi_pkt         |       struct
 300                  * |    ......                  |scsi_pkt_cache_wrapper
 301                  * |    pcw_flags               |
 302                  * |----------------------------|<---------------------
 303                  * |    hba private data        |tranp->tran_hba_len
 304                  * |----------------------------|
 305                  * |    pkt_cdbp                |DEFAULT_CDBLEN
 306                  * |----------------------------|
 307                  * |    tgt private data        |DEFAULT_PRIVLEN
 308                  * |----------------------------|
 309                  * |    pkt_scbp                |DEFAULT_SCBLEN
 310                  * |----------------------------|
 311                  *
 312                  * If the actual data length of the cdb, or the tgt
 313                  * driver private data, or the status block is bigger
 314                  * than the default data length, kmem_alloc() will be
 315                  * called to get extra space.
 316                  */
 317                 pktw = kmem_cache_alloc(tranp->tran_pkt_cache_ptr,
 318                     kf);
 319                 if (pktw == NULL)
 320                         goto fail1;
 321 
 322                 pktw->pcw_flags = 0;
 323                 in_pktp = &(pktw->pcw_pkt);
 324                 in_pktp->pkt_address = *ap;
 325 
 326                 /*
 327                  * target drivers should initialize pkt_comp and
 328                  * pkt_time, but sometimes they don't so initialize
 329                  * them here to be safe.
 330                  */
 331                 in_pktp->pkt_flags = 0;
 332                 in_pktp->pkt_time = 0;
 333                 in_pktp->pkt_resid = 0;
 334                 in_pktp->pkt_state = 0;
 335                 in_pktp->pkt_statistics = 0;
 336                 in_pktp->pkt_reason = 0;
 337                 in_pktp->pkt_dma_offset = 0;
 338                 in_pktp->pkt_dma_len = 0;
 339                 in_pktp->pkt_dma_flags = 0;
 340                 in_pktp->pkt_path_instance = 0;
 341                 ASSERT(in_pktp->pkt_numcookies == 0);
 342                 pktw->pcw_curwin = 0;
 343                 pktw->pcw_totalwin = 0;
 344                 pktw->pcw_total_xfer = 0;
 345 
 346                 in_pktp->pkt_cdblen = cmdlen;
 347                 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_CDB) &&
 348                     (cmdlen > DEFAULT_CDBLEN)) {
 349                         pktw->pcw_flags |= PCW_NEED_EXT_CDB;
 350                         in_pktp->pkt_cdbp = kmem_alloc(cmdlen, kf);
 351                         if (in_pktp->pkt_cdbp == NULL)
 352                                 goto fail2;
 353                 }
 354                 in_pktp->pkt_tgtlen = pplen;
 355                 if (pplen > DEFAULT_PRIVLEN) {
 356                         pktw->pcw_flags |= PCW_NEED_EXT_TGT;
 357                         in_pktp->pkt_private = kmem_alloc(pplen, kf);
 358                         if (in_pktp->pkt_private == NULL)
 359                                 goto fail3;
 360                 }
 361                 in_pktp->pkt_scblen = statuslen;
 362                 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_SCB) &&
 363                     (statuslen > DEFAULT_SCBLEN)) {
 364                         pktw->pcw_flags |= PCW_NEED_EXT_SCB;
 365                         in_pktp->pkt_scbp = kmem_alloc(statuslen, kf);
 366                         if (in_pktp->pkt_scbp == NULL)
 367                                 goto fail4;
 368                 }
 369                 if ((*tranp->tran_setup_pkt) (in_pktp,
 370                     func, NULL) == -1) {
 371                                 goto fail5;
 372                 }
 373                 if (cmdlen)
 374                         bzero((void *)in_pktp->pkt_cdbp, cmdlen);
 375                 if (pplen)
 376                         bzero((void *)in_pktp->pkt_private, pplen);
 377                 if (statuslen)
 378                         bzero((void *)in_pktp->pkt_scbp, statuslen);
 379         } else
 380                 pktw = (struct scsi_pkt_cache_wrapper *)in_pktp;
 381 
 382         if (bp && bp->b_bcount) {
 383 
 384                 int dma_flags = 0;
 385 
 386                 /*
 387                  * we need to transfer data, so we alloc dma resources
 388                  * for this packet
 389                  */
 390                 /*CONSTCOND*/
 391                 ASSERT(SLEEP_FUNC == DDI_DMA_SLEEP);
 392                 /*CONSTCOND*/
 393                 ASSERT(NULL_FUNC == DDI_DMA_DONTWAIT);
 394 
 395 #if defined(_DMA_USES_PHYSADDR)
 396                 /*
 397                  * with an IOMMU we map everything, so we don't
 398                  * need to bother with this
 399                  */
 400                 if (tranp->tran_dma_attr.dma_attr_granular !=
 401                     pktw->pcw_granular) {
 402 
 403                         ddi_dma_free_handle(&in_pktp->pkt_handle);
 404                         if (ddi_dma_alloc_handle(tranp->tran_hba_dip,
 405                             &tranp->tran_dma_attr,
 406                             func, NULL,
 407                             &in_pktp->pkt_handle) != DDI_SUCCESS) {
 408 
 409                                 in_pktp->pkt_handle = NULL;
 410                                 return (NULL);
 411                         }
 412                         pktw->pcw_granular =
 413                             tranp->tran_dma_attr.dma_attr_granular;
 414                 }
 415 #endif
 416 
 417                 if (in_pktp->pkt_numcookies == 0) {
 418                         pktw->pcw_bp = bp;
 419                         /*
 420                          * set dma flags; the "read" case must be first
 421                          * since B_WRITE isn't always be set for writes.
 422                          */
 423                         if (bp->b_flags & B_READ) {
 424                                 dma_flags |= DDI_DMA_READ;
 425                         } else {
 426                                 dma_flags |= DDI_DMA_WRITE;
 427                         }
 428                         if (flags & PKT_CONSISTENT)
 429                                 dma_flags |= DDI_DMA_CONSISTENT;
 430                         if (flags & PKT_DMA_PARTIAL)
 431                                 dma_flags |= DDI_DMA_PARTIAL;
 432 
 433 #if defined(__sparc)
 434                         /*
 435                          * workaround for byte hole issue on psycho and
 436                          * schizo pre 2.1
 437                          */
 438                         if ((bp->b_flags & B_READ) && ((bp->b_flags &
 439                             (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
 440                             (((uintptr_t)bp->b_un.b_addr & 0x7) ||
 441                             ((uintptr_t)bp->b_bcount & 0x7))) {
 442                                 dma_flags |= DDI_DMA_CONSISTENT;
 443                         }
 444 #endif
 445                         if (!scsi_dma_buf_bind_attr(pktw, bp,
 446                             dma_flags, callback, callback_arg)) {
 447                                 return (NULL);
 448                         } else {
 449                                 pktw->pcw_flags |= PCW_BOUND;
 450                         }
 451                 }
 452 
 453 #if defined(_DMA_USES_PHYSADDR)
 454                 if (!scsi_dmaget_attr(pktw)) {
 455                         scsi_dmafree_attr(in_pktp);
 456                         goto fail5;
 457                 }
 458 #else
 459                 in_pktp->pkt_cookies = &pktw->pcw_cookie;
 460                 in_pktp->pkt_dma_len = pktw->pcw_cookie.dmac_size;
 461                 pktw->pcw_total_xfer += in_pktp->pkt_dma_len;
 462 #endif
 463                 ASSERT(in_pktp->pkt_numcookies <=
 464                     tranp->tran_dma_attr.dma_attr_sgllen);
 465                 ASSERT(pktw->pcw_total_xfer <= bp->b_bcount);
 466                 in_pktp->pkt_resid = bp->b_bcount -
 467                     pktw->pcw_total_xfer;
 468 
 469                 ASSERT((in_pktp->pkt_resid % pktw->pcw_granular) ==
 470                     0);
 471         } else {
 472                 /* !bp or no b_bcount */
 473                 in_pktp->pkt_resid = 0;
 474         }
 475         return (in_pktp);
 476 
 477 fail5:
 478         if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
 479                 kmem_free(in_pktp->pkt_scbp, statuslen);
 480                 in_pktp->pkt_scbp = (opaque_t)((char *)in_pktp +
 481                     tranp->tran_hba_len + DEFAULT_PRIVLEN +
 482                     sizeof (struct scsi_pkt_cache_wrapper));
 483                 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
 484                         in_pktp->pkt_scbp = (opaque_t)((in_pktp->pkt_scbp) +
 485                             DEFAULT_CDBLEN);
 486                 in_pktp->pkt_scblen = 0;
 487         }
 488 fail4:
 489         if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
 490                 kmem_free(in_pktp->pkt_private, pplen);
 491                 in_pktp->pkt_tgtlen = 0;
 492                 in_pktp->pkt_private = NULL;
 493         }
 494 fail3:
 495         if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
 496                 kmem_free(in_pktp->pkt_cdbp, cmdlen);
 497                 in_pktp->pkt_cdbp = (opaque_t)((char *)in_pktp +
 498                     tranp->tran_hba_len +
 499                     sizeof (struct scsi_pkt_cache_wrapper));
 500                 in_pktp->pkt_cdblen = 0;
 501         }
 502         pktw->pcw_flags &=
 503             ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
 504 fail2:
 505         kmem_cache_free(tranp->tran_pkt_cache_ptr, pktw);
 506 fail1:
 507         if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
 508                 ddi_set_callback(callback, callback_arg,
 509                     &scsi_callback_id);
 510         }
 511 
 512         return (NULL);
 513 }
 514 
 515 void
 516 scsi_free_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pktp)
 517 {
 518         struct scsi_pkt_cache_wrapper *pktw;
 519 
 520         (*A_TO_TRAN(ap)->tran_teardown_pkt)(pktp);
 521         pktw = (struct scsi_pkt_cache_wrapper *)pktp;
 522         if (pktw->pcw_flags & PCW_BOUND)
 523                 scsi_dmafree_attr(pktp);
 524 
 525         /*
 526          * if we allocated memory for anything that wouldn't fit, free
 527          * the memory and restore the pointers
 528          */
 529         if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
 530                 kmem_free(pktp->pkt_scbp, pktp->pkt_scblen);
 531                 pktp->pkt_scbp = (opaque_t)((char *)pktp +
 532                     (A_TO_TRAN(ap))->tran_hba_len +
 533                     DEFAULT_PRIVLEN + sizeof (struct scsi_pkt_cache_wrapper));
 534                 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
 535                         pktp->pkt_scbp = (opaque_t)((pktp->pkt_scbp) +
 536                             DEFAULT_CDBLEN);
 537                 pktp->pkt_scblen = 0;
 538         }
 539         if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
 540                 kmem_free(pktp->pkt_private, pktp->pkt_tgtlen);
 541                 pktp->pkt_tgtlen = 0;
 542                 pktp->pkt_private = NULL;
 543         }
 544         if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
 545                 kmem_free(pktp->pkt_cdbp, pktp->pkt_cdblen);
 546                 pktp->pkt_cdbp = (opaque_t)((char *)pktp +
 547                     (A_TO_TRAN(ap))->tran_hba_len +
 548                     sizeof (struct scsi_pkt_cache_wrapper));
 549                 pktp->pkt_cdblen = 0;
 550         }
 551         pktw->pcw_flags &=
 552             ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
 553         kmem_cache_free(A_TO_TRAN(ap)->tran_pkt_cache_ptr, pktw);
 554 
 555         if (scsi_callback_id != 0) {
 556                 ddi_run_callback(&scsi_callback_id);
 557         }
 558 
 559 }
 560 
 561 
 562 struct scsi_pkt *
 563 scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
 564     struct buf *bp, int cmdlen, int statuslen, int pplen,
 565     int flags, int (*callback)(caddr_t), caddr_t callback_arg)
 566 {
 567         struct scsi_pkt *pktp;
 568         scsi_hba_tran_t *tranp = ap->a_hba_tran;
 569         int             (*func)(caddr_t);
 570 
 571         TRACE_5(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_START,
 572 "scsi_init_pkt_start: addr %p in_pktp %p cmdlen %d statuslen %d pplen %d",
 573             ap, in_pktp, cmdlen, statuslen, pplen);
 574 
 575 #if defined(__i386) || defined(__amd64)
 576         if (flags & PKT_CONSISTENT_OLD) {
 577                 flags &= ~PKT_CONSISTENT_OLD;
 578                 flags |= PKT_CONSISTENT;
 579         }
 580 #endif
 581 
 582         func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
 583 
 584         pktp = (*tranp->tran_init_pkt) (ap, in_pktp, bp, cmdlen,
 585             statuslen, pplen, flags, func, NULL);
 586         if (pktp == NULL) {
 587                 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
 588                         ddi_set_callback(callback, callback_arg,
 589                             &scsi_callback_id);
 590                 }
 591         }
 592 
 593         TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_END,
 594             "scsi_init_pkt_end: pktp %p", pktp);
 595         return (pktp);
 596 }
 597 
 598 void
 599 scsi_destroy_pkt(struct scsi_pkt *pkt)
 600 {
 601         struct scsi_address     *ap = P_TO_ADDR(pkt);
 602 
 603         TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_START,
 604             "scsi_destroy_pkt_start: pkt %p", pkt);
 605 
 606         (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
 607 
 608         if (scsi_callback_id != 0) {
 609                 ddi_run_callback(&scsi_callback_id);
 610         }
 611 
 612         TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_END,
 613             "scsi_destroy_pkt_end");
 614 }
 615 
 616 
 617 /*
 618  *      Generic Resource Allocation Routines
 619  */
 620 
 621 struct scsi_pkt *
 622 scsi_resalloc(struct scsi_address *ap, int cmdlen, int statuslen,
 623     opaque_t dmatoken, int (*callback)())
 624 {
 625         register struct scsi_pkt *pkt;
 626         register scsi_hba_tran_t *tranp = ap->a_hba_tran;
 627         register int                    (*func)(caddr_t);
 628 
 629         func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
 630 
 631         pkt = (*tranp->tran_init_pkt) (ap, NULL, (struct buf *)dmatoken,
 632             cmdlen, statuslen, 0, 0, func, NULL);
 633         if (pkt == NULL) {
 634                 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
 635                         ddi_set_callback(callback, NULL, &scsi_callback_id);
 636                 }
 637         }
 638 
 639         return (pkt);
 640 }
 641 
 642 struct scsi_pkt *
 643 scsi_pktalloc(struct scsi_address *ap, int cmdlen, int statuslen,
 644     int (*callback)())
 645 {
 646         struct scsi_pkt         *pkt;
 647         struct scsi_hba_tran    *tran = ap->a_hba_tran;
 648         register int                    (*func)(caddr_t);
 649 
 650         func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
 651 
 652         pkt = (*tran->tran_init_pkt) (ap, NULL, NULL, cmdlen,
 653             statuslen, 0, 0, func, NULL);
 654         if (pkt == NULL) {
 655                 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
 656                         ddi_set_callback(callback, NULL, &scsi_callback_id);
 657                 }
 658         }
 659 
 660         return (pkt);
 661 }
 662 
 663 struct scsi_pkt *
 664 scsi_dmaget(struct scsi_pkt *pkt, opaque_t dmatoken, int (*callback)())
 665 {
 666         struct scsi_pkt         *new_pkt;
 667         register int            (*func)(caddr_t);
 668 
 669         func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
 670 
 671         new_pkt = (*P_TO_TRAN(pkt)->tran_init_pkt) (&pkt->pkt_address,
 672             pkt, (struct buf *)dmatoken,
 673             0, 0, 0, 0, func, NULL);
 674         ASSERT(new_pkt == pkt || new_pkt == NULL);
 675         if (new_pkt == NULL) {
 676                 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
 677                         ddi_set_callback(callback, NULL, &scsi_callback_id);
 678                 }
 679         }
 680 
 681         return (new_pkt);
 682 }
 683 
 684 
 685 /*
 686  *      Generic Resource Deallocation Routines
 687  */
 688 
 689 void
 690 scsi_dmafree(struct scsi_pkt *pkt)
 691 {
 692         register struct scsi_address    *ap = P_TO_ADDR(pkt);
 693 
 694         (*A_TO_TRAN(ap)->tran_dmafree)(ap, pkt);
 695 
 696         if (scsi_callback_id != 0) {
 697                 ddi_run_callback(&scsi_callback_id);
 698         }
 699 }
 700 
 701 /*ARGSUSED*/
 702 void
 703 scsi_cache_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
 704 {
 705         ASSERT(pkt->pkt_numcookies == 0 ||
 706             ((struct scsi_pkt_cache_wrapper *)pkt)->pcw_flags & PCW_BOUND);
 707         ASSERT(pkt->pkt_handle != NULL);
 708         scsi_dmafree_attr(pkt);
 709 
 710         if (scsi_callback_id != 0) {
 711                 ddi_run_callback(&scsi_callback_id);
 712         }
 713 }
 714 
 715 void
 716 scsi_sync_pkt(struct scsi_pkt *pkt)
 717 {
 718         register struct scsi_address    *ap = P_TO_ADDR(pkt);
 719 
 720         if (pkt->pkt_state & STATE_XFERRED_DATA)
 721                 (*A_TO_TRAN(ap)->tran_sync_pkt)(ap, pkt);
 722 }
 723 
 724 /*ARGSUSED*/
 725 void
 726 scsi_sync_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
 727 {
 728         if (pkt->pkt_handle &&
 729             (pkt->pkt_dma_flags & (DDI_DMA_WRITE | DDI_DMA_READ))) {
 730                 (void) ddi_dma_sync(pkt->pkt_handle,
 731                     pkt->pkt_dma_offset, pkt->pkt_dma_len,
 732                     (pkt->pkt_dma_flags & DDI_DMA_WRITE) ?
 733                     DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
 734         }
 735 }
 736 
 737 void
 738 scsi_resfree(struct scsi_pkt *pkt)
 739 {
 740         register struct scsi_address    *ap = P_TO_ADDR(pkt);
 741         (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
 742 
 743         if (scsi_callback_id != 0) {
 744                 ddi_run_callback(&scsi_callback_id);
 745         }
 746 }