Print this page
4888 Undocument dma_req(9s)
4884 EOF scsi_hba_attach
4886 EOF ddi_dmae_getlim
4887 EOF ddi_iomin
4634 undocument scsi_hba_attach() and ddi_dma_lim(9s)
4630 clean stale references to ddi_iopb_alloc and ddi_iopb_free
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/scsi/impl/scsi_resource.c
+++ new/usr/src/uts/common/io/scsi/impl/scsi_resource.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 #include <sys/scsi/scsi.h>
28 28 #include <sys/vtrace.h>
29 29
30 30
31 31 #define A_TO_TRAN(ap) ((ap)->a_hba_tran)
32 32 #define P_TO_TRAN(pkt) ((pkt)->pkt_address.a_hba_tran)
33 33 #define P_TO_ADDR(pkt) (&((pkt)->pkt_address))
34 34
35 35 /*
36 36 * Callback id
37 37 */
38 38 uintptr_t scsi_callback_id = 0;
39 39
40 40 extern ddi_dma_attr_t scsi_alloc_attr;
41 41
42 42 struct buf *
43 43 scsi_alloc_consistent_buf(struct scsi_address *ap,
44 44 struct buf *in_bp, size_t datalen, uint_t bflags,
45 45 int (*callback)(caddr_t), caddr_t callback_arg)
46 46 {
47 47 dev_info_t *pdip;
48 48 struct buf *bp;
49 49 int kmflag;
50 50 size_t rlen;
51 51
52 52 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_START,
53 53 "scsi_alloc_consistent_buf_start");
54 54
55 55 if (!in_bp) {
56 56 kmflag = (callback == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
57 57 if ((bp = getrbuf(kmflag)) == NULL) {
58 58 goto no_resource;
59 59 }
60 60 } else {
61 61 bp = in_bp;
62 62
63 63 /* we are establishing a new buffer memory association */
64 64 bp->b_flags &= ~(B_PAGEIO | B_PHYS | B_REMAPPED | B_SHADOW);
65 65 bp->b_proc = NULL;
66 66 bp->b_pages = NULL;
67 67 bp->b_shadow = NULL;
68 68 }
69 69
70 70 /* limit bits that can be set by bflags argument */
↓ open down ↓ |
70 lines elided |
↑ open up ↑ |
71 71 ASSERT(!(bflags & ~(B_READ | B_WRITE)));
72 72 bflags &= (B_READ | B_WRITE);
73 73 bp->b_un.b_addr = 0;
74 74
75 75 if (datalen) {
76 76 pdip = (A_TO_TRAN(ap))->tran_hba_dip;
77 77
78 78 /*
79 79 * use i_ddi_mem_alloc() for now until we have an interface to
80 80 * allocate memory for DMA which doesn't require a DMA handle.
81 - * ddi_iopb_alloc() is obsolete and we want more flexibility in
82 - * controlling the DMA address constraints.
83 81 */
84 82 while (i_ddi_mem_alloc(pdip, &scsi_alloc_attr, datalen,
85 83 ((callback == SLEEP_FUNC) ? 1 : 0), 0, NULL,
86 84 &bp->b_un.b_addr, &rlen, NULL) != DDI_SUCCESS) {
87 85 if (callback == SLEEP_FUNC) {
88 86 delay(drv_usectohz(10000));
89 87 } else {
90 88 if (!in_bp)
91 89 freerbuf(bp);
92 90 goto no_resource;
93 91 }
94 92 }
95 93 bp->b_flags |= bflags;
96 94 }
97 95 bp->b_bcount = datalen;
98 96 bp->b_resid = 0;
99 97
100 98 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_ALLOC_CONSISTENT_BUF_END,
101 99 "scsi_alloc_consistent_buf_end");
102 100 return (bp);
103 101
104 102 no_resource:
105 103
106 104 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
107 105 ddi_set_callback(callback, callback_arg,
108 106 &scsi_callback_id);
109 107 }
110 108 TRACE_0(TR_FAC_SCSI_RES,
111 109 TR_SCSI_ALLOC_CONSISTENT_BUF_RETURN1_END,
112 110 "scsi_alloc_consistent_buf_end (return1)");
113 111 return (NULL);
114 112 }
115 113
116 114 void
117 115 scsi_free_consistent_buf(struct buf *bp)
118 116 {
119 117 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_START,
120 118 "scsi_free_consistent_buf_start");
121 119 if (!bp)
122 120 return;
123 121 if (bp->b_un.b_addr)
124 122 i_ddi_mem_free((caddr_t)bp->b_un.b_addr, NULL);
125 123 freerbuf(bp);
126 124 if (scsi_callback_id != 0) {
127 125 ddi_run_callback(&scsi_callback_id);
128 126 }
129 127 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_FREE_CONSISTENT_BUF_END,
130 128 "scsi_free_consistent_buf_end");
131 129 }
132 130
133 131 void
134 132 scsi_dmafree_attr(struct scsi_pkt *pktp)
135 133 {
136 134 struct scsi_pkt_cache_wrapper *pktw =
137 135 (struct scsi_pkt_cache_wrapper *)pktp;
138 136
139 137 if (pktw->pcw_flags & PCW_BOUND) {
140 138 if (ddi_dma_unbind_handle(pktp->pkt_handle) !=
141 139 DDI_SUCCESS)
142 140 cmn_err(CE_WARN, "scsi_dmafree_attr: "
143 141 "unbind handle failed");
144 142 pktw->pcw_flags &= ~PCW_BOUND;
145 143 }
146 144 pktp->pkt_numcookies = 0;
147 145 pktw->pcw_totalwin = 0;
148 146 }
149 147
150 148 struct buf *
151 149 scsi_pkt2bp(struct scsi_pkt *pkt)
152 150 {
153 151 return (((struct scsi_pkt_cache_wrapper *)pkt)->pcw_bp);
154 152 }
155 153
156 154 int
157 155 scsi_dma_buf_bind_attr(struct scsi_pkt_cache_wrapper *pktw,
158 156 struct buf *bp,
159 157 int dma_flags,
160 158 int (*callback)(),
161 159 caddr_t arg)
162 160 {
163 161 struct scsi_pkt *pktp = &(pktw->pcw_pkt);
164 162 int status;
165 163
166 164 /*
167 165 * First time, need to establish the handle.
168 166 */
169 167
170 168 ASSERT(pktp->pkt_numcookies == 0);
171 169 ASSERT(pktw->pcw_totalwin == 0);
172 170
173 171 status = ddi_dma_buf_bind_handle(pktp->pkt_handle, bp, dma_flags,
174 172 callback, arg, &pktw->pcw_cookie,
175 173 &pktp->pkt_numcookies);
176 174
177 175 switch (status) {
178 176 case DDI_DMA_MAPPED:
179 177 pktw->pcw_totalwin = 1;
180 178 break;
181 179
182 180 case DDI_DMA_PARTIAL_MAP:
183 181 /* enable first call to ddi_dma_getwin */
184 182 if (ddi_dma_numwin(pktp->pkt_handle,
185 183 &pktw->pcw_totalwin) != DDI_SUCCESS) {
186 184 bp->b_error = 0;
187 185 return (0);
188 186 }
189 187 break;
190 188
191 189 case DDI_DMA_NORESOURCES:
192 190 bp->b_error = 0;
193 191 return (0);
194 192
195 193 case DDI_DMA_TOOBIG:
196 194 bioerror(bp, EINVAL);
197 195 return (0);
198 196
199 197 case DDI_DMA_NOMAPPING:
200 198 case DDI_DMA_INUSE:
201 199 default:
202 200 bioerror(bp, EFAULT);
203 201 return (0);
204 202 }
205 203
206 204 /* initialize the loop controls for scsi_dmaget_attr() */
207 205 pktw->pcw_curwin = 0;
208 206 pktw->pcw_total_xfer = 0;
209 207 pktp->pkt_dma_flags = dma_flags;
210 208 return (1);
211 209 }
212 210
213 211 #if defined(_DMA_USES_PHYSADDR)
214 212 int
215 213 scsi_dmaget_attr(struct scsi_pkt_cache_wrapper *pktw)
216 214 {
217 215 struct scsi_pkt *pktp = &(pktw->pcw_pkt);
218 216
219 217 int status;
220 218 int num_segs = 0;
221 219 ddi_dma_impl_t *hp = (ddi_dma_impl_t *)pktp->pkt_handle;
222 220 ddi_dma_cookie_t *cp;
223 221
224 222 if (pktw->pcw_curwin != 0) {
225 223 ddi_dma_cookie_t cookie;
226 224
227 225 /*
228 226 * start the next window, and get its first cookie
229 227 */
230 228 status = ddi_dma_getwin(pktp->pkt_handle,
231 229 pktw->pcw_curwin, &pktp->pkt_dma_offset,
232 230 &pktp->pkt_dma_len, &cookie,
233 231 &pktp->pkt_numcookies);
234 232 if (status != DDI_SUCCESS)
235 233 return (0);
236 234 }
237 235
238 236 /*
239 237 * start the Scatter/Gather loop
240 238 */
241 239 cp = hp->dmai_cookie - 1;
242 240 pktp->pkt_dma_len = 0;
243 241 for (;;) {
244 242
245 243 /* take care of the loop-bookkeeping */
246 244 pktp->pkt_dma_len += cp->dmac_size;
247 245 num_segs++;
248 246 /*
249 247 * if this was the last cookie in the current window
250 248 * set the loop controls start the next window and
251 249 * exit so the HBA can do this partial transfer
252 250 */
253 251 if (num_segs >= pktp->pkt_numcookies) {
254 252 pktw->pcw_curwin++;
255 253 break;
256 254 }
257 255
258 256 cp++;
259 257 }
260 258 pktw->pcw_total_xfer += pktp->pkt_dma_len;
261 259 pktp->pkt_cookies = hp->dmai_cookie - 1;
262 260 hp->dmai_cookie = cp;
263 261
264 262 return (1);
265 263 }
266 264 #endif
267 265
268 266 void scsi_free_cache_pkt(struct scsi_address *, struct scsi_pkt *);
269 267
270 268 struct scsi_pkt *
271 269 scsi_init_cache_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
272 270 struct buf *bp, int cmdlen, int statuslen, int pplen,
273 271 int flags, int (*callback)(caddr_t), caddr_t callback_arg)
274 272 {
275 273 struct scsi_pkt_cache_wrapper *pktw;
276 274 scsi_hba_tran_t *tranp = ap->a_hba_tran;
277 275 int (*func)(caddr_t);
278 276
279 277 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
280 278
281 279 if (in_pktp == NULL) {
282 280 int kf;
283 281
284 282 if (callback == SLEEP_FUNC)
285 283 kf = KM_SLEEP;
286 284 else
287 285 kf = KM_NOSLEEP;
288 286 /*
289 287 * By using kmem_cache_alloc(), the layout of the
290 288 * scsi_pkt, scsi_pkt_cache_wrapper, hba private data,
291 289 * cdb, tgt driver private data, and status block is
292 290 * as below.
293 291 *
294 292 * This is a piece of contiguous memory starting from
295 293 * the first structure field scsi_pkt in the struct
296 294 * scsi_pkt_cache_wrapper, followed by the hba private
297 295 * data, pkt_cdbp, the tgt driver private data and
298 296 * pkt_scbp.
299 297 *
300 298 * |----------------------------|--------------------->
301 299 * | struct scsi_pkt | struct
302 300 * | ...... |scsi_pkt_cache_wrapper
303 301 * | pcw_flags |
304 302 * |----------------------------|<---------------------
305 303 * | hba private data |tranp->tran_hba_len
306 304 * |----------------------------|
307 305 * | pkt_cdbp |DEFAULT_CDBLEN
308 306 * |----------------------------|
309 307 * | tgt private data |DEFAULT_PRIVLEN
310 308 * |----------------------------|
311 309 * | pkt_scbp |DEFAULT_SCBLEN
312 310 * |----------------------------|
313 311 *
314 312 * If the actual data length of the cdb, or the tgt
315 313 * driver private data, or the status block is bigger
316 314 * than the default data length, kmem_alloc() will be
317 315 * called to get extra space.
318 316 */
319 317 pktw = kmem_cache_alloc(tranp->tran_pkt_cache_ptr,
320 318 kf);
321 319 if (pktw == NULL)
322 320 goto fail1;
323 321
324 322 pktw->pcw_flags = 0;
325 323 in_pktp = &(pktw->pcw_pkt);
326 324 in_pktp->pkt_address = *ap;
327 325
328 326 /*
329 327 * target drivers should initialize pkt_comp and
330 328 * pkt_time, but sometimes they don't so initialize
331 329 * them here to be safe.
332 330 */
333 331 in_pktp->pkt_flags = 0;
334 332 in_pktp->pkt_time = 0;
335 333 in_pktp->pkt_resid = 0;
336 334 in_pktp->pkt_state = 0;
337 335 in_pktp->pkt_statistics = 0;
338 336 in_pktp->pkt_reason = 0;
339 337 in_pktp->pkt_dma_offset = 0;
340 338 in_pktp->pkt_dma_len = 0;
341 339 in_pktp->pkt_dma_flags = 0;
342 340 in_pktp->pkt_path_instance = 0;
343 341 ASSERT(in_pktp->pkt_numcookies == 0);
344 342 pktw->pcw_curwin = 0;
345 343 pktw->pcw_totalwin = 0;
346 344 pktw->pcw_total_xfer = 0;
347 345
348 346 in_pktp->pkt_cdblen = cmdlen;
349 347 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_CDB) &&
350 348 (cmdlen > DEFAULT_CDBLEN)) {
351 349 pktw->pcw_flags |= PCW_NEED_EXT_CDB;
352 350 in_pktp->pkt_cdbp = kmem_alloc(cmdlen, kf);
353 351 if (in_pktp->pkt_cdbp == NULL)
354 352 goto fail2;
355 353 }
356 354 in_pktp->pkt_tgtlen = pplen;
357 355 if (pplen > DEFAULT_PRIVLEN) {
358 356 pktw->pcw_flags |= PCW_NEED_EXT_TGT;
359 357 in_pktp->pkt_private = kmem_alloc(pplen, kf);
360 358 if (in_pktp->pkt_private == NULL)
361 359 goto fail3;
362 360 }
363 361 in_pktp->pkt_scblen = statuslen;
364 362 if ((tranp->tran_hba_flags & SCSI_HBA_TRAN_SCB) &&
365 363 (statuslen > DEFAULT_SCBLEN)) {
366 364 pktw->pcw_flags |= PCW_NEED_EXT_SCB;
367 365 in_pktp->pkt_scbp = kmem_alloc(statuslen, kf);
368 366 if (in_pktp->pkt_scbp == NULL)
369 367 goto fail4;
370 368 }
371 369 if ((*tranp->tran_setup_pkt) (in_pktp,
372 370 func, NULL) == -1) {
373 371 goto fail5;
374 372 }
375 373 if (cmdlen)
376 374 bzero((void *)in_pktp->pkt_cdbp, cmdlen);
377 375 if (pplen)
378 376 bzero((void *)in_pktp->pkt_private, pplen);
379 377 if (statuslen)
380 378 bzero((void *)in_pktp->pkt_scbp, statuslen);
381 379 } else
382 380 pktw = (struct scsi_pkt_cache_wrapper *)in_pktp;
383 381
384 382 if (bp && bp->b_bcount) {
385 383
386 384 int dma_flags = 0;
387 385
388 386 /*
389 387 * we need to transfer data, so we alloc dma resources
390 388 * for this packet
391 389 */
392 390 /*CONSTCOND*/
393 391 ASSERT(SLEEP_FUNC == DDI_DMA_SLEEP);
394 392 /*CONSTCOND*/
395 393 ASSERT(NULL_FUNC == DDI_DMA_DONTWAIT);
396 394
397 395 #if defined(_DMA_USES_PHYSADDR)
398 396 /*
399 397 * with an IOMMU we map everything, so we don't
400 398 * need to bother with this
401 399 */
402 400 if (tranp->tran_dma_attr.dma_attr_granular !=
403 401 pktw->pcw_granular) {
404 402
405 403 ddi_dma_free_handle(&in_pktp->pkt_handle);
406 404 if (ddi_dma_alloc_handle(tranp->tran_hba_dip,
407 405 &tranp->tran_dma_attr,
408 406 func, NULL,
409 407 &in_pktp->pkt_handle) != DDI_SUCCESS) {
410 408
411 409 in_pktp->pkt_handle = NULL;
412 410 return (NULL);
413 411 }
414 412 pktw->pcw_granular =
415 413 tranp->tran_dma_attr.dma_attr_granular;
416 414 }
417 415 #endif
418 416
419 417 if (in_pktp->pkt_numcookies == 0) {
420 418 pktw->pcw_bp = bp;
421 419 /*
422 420 * set dma flags; the "read" case must be first
423 421 * since B_WRITE isn't always be set for writes.
424 422 */
425 423 if (bp->b_flags & B_READ) {
426 424 dma_flags |= DDI_DMA_READ;
427 425 } else {
428 426 dma_flags |= DDI_DMA_WRITE;
429 427 }
430 428 if (flags & PKT_CONSISTENT)
431 429 dma_flags |= DDI_DMA_CONSISTENT;
432 430 if (flags & PKT_DMA_PARTIAL)
433 431 dma_flags |= DDI_DMA_PARTIAL;
434 432
435 433 #if defined(__sparc)
436 434 /*
437 435 * workaround for byte hole issue on psycho and
438 436 * schizo pre 2.1
439 437 */
440 438 if ((bp->b_flags & B_READ) && ((bp->b_flags &
441 439 (B_PAGEIO|B_REMAPPED)) != B_PAGEIO) &&
442 440 (((uintptr_t)bp->b_un.b_addr & 0x7) ||
443 441 ((uintptr_t)bp->b_bcount & 0x7))) {
444 442 dma_flags |= DDI_DMA_CONSISTENT;
445 443 }
446 444 #endif
447 445 if (!scsi_dma_buf_bind_attr(pktw, bp,
448 446 dma_flags, callback, callback_arg)) {
449 447 return (NULL);
450 448 } else {
451 449 pktw->pcw_flags |= PCW_BOUND;
452 450 }
453 451 }
454 452
455 453 #if defined(_DMA_USES_PHYSADDR)
456 454 if (!scsi_dmaget_attr(pktw)) {
457 455 scsi_dmafree_attr(in_pktp);
458 456 goto fail5;
459 457 }
460 458 #else
461 459 in_pktp->pkt_cookies = &pktw->pcw_cookie;
462 460 in_pktp->pkt_dma_len = pktw->pcw_cookie.dmac_size;
463 461 pktw->pcw_total_xfer += in_pktp->pkt_dma_len;
464 462 #endif
465 463 ASSERT(in_pktp->pkt_numcookies <=
466 464 tranp->tran_dma_attr.dma_attr_sgllen);
467 465 ASSERT(pktw->pcw_total_xfer <= bp->b_bcount);
468 466 in_pktp->pkt_resid = bp->b_bcount -
469 467 pktw->pcw_total_xfer;
470 468
471 469 ASSERT((in_pktp->pkt_resid % pktw->pcw_granular) ==
472 470 0);
473 471 } else {
474 472 /* !bp or no b_bcount */
475 473 in_pktp->pkt_resid = 0;
476 474 }
477 475 return (in_pktp);
478 476
479 477 fail5:
480 478 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
481 479 kmem_free(in_pktp->pkt_scbp, statuslen);
482 480 in_pktp->pkt_scbp = (opaque_t)((char *)in_pktp +
483 481 tranp->tran_hba_len + DEFAULT_PRIVLEN +
484 482 sizeof (struct scsi_pkt_cache_wrapper));
485 483 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
486 484 in_pktp->pkt_scbp = (opaque_t)((in_pktp->pkt_scbp) +
487 485 DEFAULT_CDBLEN);
488 486 in_pktp->pkt_scblen = 0;
489 487 }
490 488 fail4:
491 489 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
492 490 kmem_free(in_pktp->pkt_private, pplen);
493 491 in_pktp->pkt_tgtlen = 0;
494 492 in_pktp->pkt_private = NULL;
495 493 }
496 494 fail3:
497 495 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
498 496 kmem_free(in_pktp->pkt_cdbp, cmdlen);
499 497 in_pktp->pkt_cdbp = (opaque_t)((char *)in_pktp +
500 498 tranp->tran_hba_len +
501 499 sizeof (struct scsi_pkt_cache_wrapper));
502 500 in_pktp->pkt_cdblen = 0;
503 501 }
504 502 pktw->pcw_flags &=
505 503 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
506 504 fail2:
507 505 kmem_cache_free(tranp->tran_pkt_cache_ptr, pktw);
508 506 fail1:
509 507 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
510 508 ddi_set_callback(callback, callback_arg,
511 509 &scsi_callback_id);
512 510 }
513 511
514 512 return (NULL);
515 513 }
516 514
517 515 void
518 516 scsi_free_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pktp)
519 517 {
520 518 struct scsi_pkt_cache_wrapper *pktw;
521 519
522 520 (*A_TO_TRAN(ap)->tran_teardown_pkt)(pktp);
523 521 pktw = (struct scsi_pkt_cache_wrapper *)pktp;
524 522 if (pktw->pcw_flags & PCW_BOUND)
525 523 scsi_dmafree_attr(pktp);
526 524
527 525 /*
528 526 * if we allocated memory for anything that wouldn't fit, free
529 527 * the memory and restore the pointers
530 528 */
531 529 if (pktw->pcw_flags & PCW_NEED_EXT_SCB) {
532 530 kmem_free(pktp->pkt_scbp, pktp->pkt_scblen);
533 531 pktp->pkt_scbp = (opaque_t)((char *)pktp +
534 532 (A_TO_TRAN(ap))->tran_hba_len +
535 533 DEFAULT_PRIVLEN + sizeof (struct scsi_pkt_cache_wrapper));
536 534 if ((A_TO_TRAN(ap))->tran_hba_flags & SCSI_HBA_TRAN_CDB)
537 535 pktp->pkt_scbp = (opaque_t)((pktp->pkt_scbp) +
538 536 DEFAULT_CDBLEN);
539 537 pktp->pkt_scblen = 0;
540 538 }
541 539 if (pktw->pcw_flags & PCW_NEED_EXT_TGT) {
542 540 kmem_free(pktp->pkt_private, pktp->pkt_tgtlen);
543 541 pktp->pkt_tgtlen = 0;
544 542 pktp->pkt_private = NULL;
545 543 }
546 544 if (pktw->pcw_flags & PCW_NEED_EXT_CDB) {
547 545 kmem_free(pktp->pkt_cdbp, pktp->pkt_cdblen);
548 546 pktp->pkt_cdbp = (opaque_t)((char *)pktp +
549 547 (A_TO_TRAN(ap))->tran_hba_len +
550 548 sizeof (struct scsi_pkt_cache_wrapper));
551 549 pktp->pkt_cdblen = 0;
552 550 }
553 551 pktw->pcw_flags &=
554 552 ~(PCW_NEED_EXT_CDB|PCW_NEED_EXT_TGT|PCW_NEED_EXT_SCB);
555 553 kmem_cache_free(A_TO_TRAN(ap)->tran_pkt_cache_ptr, pktw);
556 554
557 555 if (scsi_callback_id != 0) {
558 556 ddi_run_callback(&scsi_callback_id);
559 557 }
560 558
561 559 }
562 560
563 561
564 562 struct scsi_pkt *
565 563 scsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *in_pktp,
566 564 struct buf *bp, int cmdlen, int statuslen, int pplen,
567 565 int flags, int (*callback)(caddr_t), caddr_t callback_arg)
568 566 {
569 567 struct scsi_pkt *pktp;
570 568 scsi_hba_tran_t *tranp = ap->a_hba_tran;
571 569 int (*func)(caddr_t);
572 570
573 571 TRACE_5(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_START,
574 572 "scsi_init_pkt_start: addr %p in_pktp %p cmdlen %d statuslen %d pplen %d",
575 573 ap, in_pktp, cmdlen, statuslen, pplen);
576 574
577 575 #if defined(__i386) || defined(__amd64)
578 576 if (flags & PKT_CONSISTENT_OLD) {
579 577 flags &= ~PKT_CONSISTENT_OLD;
580 578 flags |= PKT_CONSISTENT;
581 579 }
582 580 #endif
583 581
584 582 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
585 583
586 584 pktp = (*tranp->tran_init_pkt) (ap, in_pktp, bp, cmdlen,
587 585 statuslen, pplen, flags, func, NULL);
588 586 if (pktp == NULL) {
589 587 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
590 588 ddi_set_callback(callback, callback_arg,
591 589 &scsi_callback_id);
592 590 }
593 591 }
594 592
595 593 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_INIT_PKT_END,
596 594 "scsi_init_pkt_end: pktp %p", pktp);
597 595 return (pktp);
598 596 }
599 597
600 598 void
601 599 scsi_destroy_pkt(struct scsi_pkt *pkt)
602 600 {
603 601 struct scsi_address *ap = P_TO_ADDR(pkt);
604 602
605 603 TRACE_1(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_START,
606 604 "scsi_destroy_pkt_start: pkt %p", pkt);
607 605
608 606 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
609 607
610 608 if (scsi_callback_id != 0) {
611 609 ddi_run_callback(&scsi_callback_id);
612 610 }
613 611
614 612 TRACE_0(TR_FAC_SCSI_RES, TR_SCSI_DESTROY_PKT_END,
615 613 "scsi_destroy_pkt_end");
616 614 }
617 615
618 616
619 617 /*
620 618 * Generic Resource Allocation Routines
621 619 */
622 620
623 621 struct scsi_pkt *
624 622 scsi_resalloc(struct scsi_address *ap, int cmdlen, int statuslen,
625 623 opaque_t dmatoken, int (*callback)())
626 624 {
627 625 register struct scsi_pkt *pkt;
628 626 register scsi_hba_tran_t *tranp = ap->a_hba_tran;
629 627 register int (*func)(caddr_t);
630 628
631 629 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
632 630
633 631 pkt = (*tranp->tran_init_pkt) (ap, NULL, (struct buf *)dmatoken,
634 632 cmdlen, statuslen, 0, 0, func, NULL);
635 633 if (pkt == NULL) {
636 634 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
637 635 ddi_set_callback(callback, NULL, &scsi_callback_id);
638 636 }
639 637 }
640 638
641 639 return (pkt);
642 640 }
643 641
644 642 struct scsi_pkt *
645 643 scsi_pktalloc(struct scsi_address *ap, int cmdlen, int statuslen,
646 644 int (*callback)())
647 645 {
648 646 struct scsi_pkt *pkt;
649 647 struct scsi_hba_tran *tran = ap->a_hba_tran;
650 648 register int (*func)(caddr_t);
651 649
652 650 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
653 651
654 652 pkt = (*tran->tran_init_pkt) (ap, NULL, NULL, cmdlen,
655 653 statuslen, 0, 0, func, NULL);
656 654 if (pkt == NULL) {
657 655 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
658 656 ddi_set_callback(callback, NULL, &scsi_callback_id);
659 657 }
660 658 }
661 659
662 660 return (pkt);
663 661 }
664 662
665 663 struct scsi_pkt *
666 664 scsi_dmaget(struct scsi_pkt *pkt, opaque_t dmatoken, int (*callback)())
667 665 {
668 666 struct scsi_pkt *new_pkt;
669 667 register int (*func)(caddr_t);
670 668
671 669 func = (callback == SLEEP_FUNC) ? SLEEP_FUNC : NULL_FUNC;
672 670
673 671 new_pkt = (*P_TO_TRAN(pkt)->tran_init_pkt) (&pkt->pkt_address,
674 672 pkt, (struct buf *)dmatoken,
675 673 0, 0, 0, 0, func, NULL);
676 674 ASSERT(new_pkt == pkt || new_pkt == NULL);
677 675 if (new_pkt == NULL) {
678 676 if (callback != NULL_FUNC && callback != SLEEP_FUNC) {
679 677 ddi_set_callback(callback, NULL, &scsi_callback_id);
680 678 }
681 679 }
682 680
683 681 return (new_pkt);
684 682 }
685 683
686 684
687 685 /*
688 686 * Generic Resource Deallocation Routines
689 687 */
690 688
691 689 void
692 690 scsi_dmafree(struct scsi_pkt *pkt)
693 691 {
694 692 register struct scsi_address *ap = P_TO_ADDR(pkt);
695 693
696 694 (*A_TO_TRAN(ap)->tran_dmafree)(ap, pkt);
697 695
698 696 if (scsi_callback_id != 0) {
699 697 ddi_run_callback(&scsi_callback_id);
700 698 }
701 699 }
702 700
703 701 /*ARGSUSED*/
704 702 void
705 703 scsi_cache_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
706 704 {
707 705 ASSERT(pkt->pkt_numcookies == 0 ||
708 706 ((struct scsi_pkt_cache_wrapper *)pkt)->pcw_flags & PCW_BOUND);
709 707 ASSERT(pkt->pkt_handle != NULL);
710 708 scsi_dmafree_attr(pkt);
711 709
712 710 if (scsi_callback_id != 0) {
713 711 ddi_run_callback(&scsi_callback_id);
714 712 }
715 713 }
716 714
717 715 void
718 716 scsi_sync_pkt(struct scsi_pkt *pkt)
719 717 {
720 718 register struct scsi_address *ap = P_TO_ADDR(pkt);
721 719
722 720 if (pkt->pkt_state & STATE_XFERRED_DATA)
723 721 (*A_TO_TRAN(ap)->tran_sync_pkt)(ap, pkt);
724 722 }
725 723
726 724 /*ARGSUSED*/
727 725 void
728 726 scsi_sync_cache_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
729 727 {
730 728 if (pkt->pkt_handle &&
731 729 (pkt->pkt_dma_flags & (DDI_DMA_WRITE | DDI_DMA_READ))) {
732 730 (void) ddi_dma_sync(pkt->pkt_handle,
733 731 pkt->pkt_dma_offset, pkt->pkt_dma_len,
734 732 (pkt->pkt_dma_flags & DDI_DMA_WRITE) ?
735 733 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
736 734 }
737 735 }
738 736
739 737 void
740 738 scsi_resfree(struct scsi_pkt *pkt)
741 739 {
742 740 register struct scsi_address *ap = P_TO_ADDR(pkt);
743 741 (*A_TO_TRAN(ap)->tran_destroy_pkt)(ap, pkt);
744 742
745 743 if (scsi_callback_id != 0) {
746 744 ddi_run_callback(&scsi_callback_id);
747 745 }
748 746 }
↓ open down ↓ |
656 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX