Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/scsi/adapters/pvscsi/pvscsi.c
+++ new/usr/src/uts/intel/io/scsi/adapters/pvscsi/pvscsi.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2016 Nexenta Systems, Inc.
14 14 */
15 15
16 16 #include <sys/atomic.h>
17 17 #include <sys/cmn_err.h>
18 18 #include <sys/conf.h>
19 19 #include <sys/cpuvar.h>
20 20 #include <sys/ddi.h>
21 21 #include <sys/errno.h>
22 22 #include <sys/fs/dv_node.h>
23 23 #include <sys/kmem.h>
24 24 #include <sys/kmem_impl.h>
25 25 #include <sys/list.h>
26 26 #include <sys/modctl.h>
27 27 #include <sys/pci.h>
28 28 #include <sys/scsi/scsi.h>
29 29 #include <sys/sunddi.h>
30 30 #include <sys/sysmacros.h>
31 31 #include <sys/time.h>
32 32 #include <sys/types.h>
33 33
34 34 #include "pvscsi.h"
35 35 #include "pvscsi_var.h"
36 36
37 37 int pvscsi_enable_msi = 1;
38 38 int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
39 39 int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
40 40
41 41 static int pvscsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
42 42
43 43 static void *pvscsi_sstate;
44 44
45 45 /* HBA DMA attributes */
46 46 static ddi_dma_attr_t pvscsi_hba_dma_attr = {
47 47 .dma_attr_version = DMA_ATTR_V0,
48 48 .dma_attr_addr_lo = 0x0000000000000000ull,
49 49 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
50 50 .dma_attr_count_max = 0x000000007FFFFFFFull,
51 51 .dma_attr_align = 0x0000000000000001ull,
52 52 .dma_attr_burstsizes = 0x7ff,
53 53 .dma_attr_minxfer = 0x00000001u,
54 54 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
55 55 .dma_attr_seg = 0x00000000FFFFFFFFull,
56 56 .dma_attr_sgllen = 1,
57 57 .dma_attr_granular = 0x00000200u,
58 58 .dma_attr_flags = 0
59 59 };
60 60
61 61 /* DMA attributes for req/comp rings */
62 62 static ddi_dma_attr_t pvscsi_ring_dma_attr = {
63 63 .dma_attr_version = DMA_ATTR_V0,
64 64 .dma_attr_addr_lo = 0x0000000000000000ull,
65 65 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
66 66 .dma_attr_count_max = 0x000000007FFFFFFFull,
67 67 .dma_attr_align = 0x0000000000000001ull,
68 68 .dma_attr_burstsizes = 0x7ff,
69 69 .dma_attr_minxfer = 0x00000001u,
70 70 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
71 71 .dma_attr_seg = 0x00000000FFFFFFFFull,
72 72 .dma_attr_sgllen = 1,
73 73 .dma_attr_granular = 0x00000001u,
74 74 .dma_attr_flags = 0
75 75 };
76 76
77 77 /* DMA attributes for buffer I/O */
78 78 static ddi_dma_attr_t pvscsi_io_dma_attr = {
79 79 .dma_attr_version = DMA_ATTR_V0,
80 80 .dma_attr_addr_lo = 0x0000000000000000ull,
81 81 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
82 82 .dma_attr_count_max = 0x000000007FFFFFFFull,
83 83 .dma_attr_align = 0x0000000000000001ull,
84 84 .dma_attr_burstsizes = 0x7ff,
85 85 .dma_attr_minxfer = 0x00000001u,
86 86 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
87 87 .dma_attr_seg = 0x00000000FFFFFFFFull,
88 88 .dma_attr_sgllen = PVSCSI_MAX_SG_SIZE,
89 89 .dma_attr_granular = 0x00000200u,
90 90 .dma_attr_flags = 0
91 91 };
92 92
93 93 static ddi_device_acc_attr_t pvscsi_mmio_attr = {
94 94 DDI_DEVICE_ATTR_V1,
95 95 DDI_STRUCTURE_LE_ACC,
96 96 DDI_STRICTORDER_ACC,
97 97 DDI_DEFAULT_ACC
98 98 };
99 99
100 100 static ddi_device_acc_attr_t pvscsi_dma_attrs = {
101 101 DDI_DEVICE_ATTR_V0,
102 102 DDI_STRUCTURE_LE_ACC,
103 103 DDI_STRICTORDER_ACC,
104 104 DDI_DEFAULT_ACC,
105 105 };
106 106
107 107 static void
108 108 pvscsi_add_to_queue(pvscsi_cmd_t *cmd)
109 109 {
110 110 pvscsi_softc_t *pvs = cmd->cmd_pvs;
111 111
112 112 ASSERT(pvs != NULL);
113 113 ASSERT(mutex_owned(&pvs->mutex));
114 114 ASSERT(!list_link_active(&(cmd)->cmd_queue_node));
115 115
116 116 list_insert_tail(&pvs->cmd_queue, cmd);
117 117 pvs->cmd_queue_len++;
118 118 }
119 119
120 120 static void
121 121 pvscsi_remove_from_queue(pvscsi_cmd_t *cmd)
122 122 {
123 123 pvscsi_softc_t *pvs = cmd->cmd_pvs;
124 124
125 125 ASSERT(pvs != NULL);
126 126 ASSERT(mutex_owned(&pvs->mutex));
127 127 ASSERT(list_link_active(&cmd->cmd_queue_node));
128 128 ASSERT(pvs->cmd_queue_len > 0);
129 129
130 130 if (list_link_active(&cmd->cmd_queue_node)) {
131 131 list_remove(&pvs->cmd_queue, cmd);
132 132 pvs->cmd_queue_len--;
133 133 }
134 134 }
135 135
136 136 static uint64_t
137 137 pvscsi_map_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_ctx_t *io_ctx)
138 138 {
139 139 return (io_ctx - pvs->cmd_ctx + 1);
140 140 }
141 141
142 142 static pvscsi_cmd_ctx_t *
143 143 pvscsi_lookup_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
144 144 {
145 145 pvscsi_cmd_ctx_t *ctx, *end;
146 146
147 147 end = &pvs->cmd_ctx[pvs->req_depth];
148 148 for (ctx = pvs->cmd_ctx; ctx < end; ctx++) {
149 149 if (ctx->cmd == cmd)
150 150 return (ctx);
151 151 }
152 152
153 153 return (NULL);
154 154 }
155 155
156 156 static pvscsi_cmd_ctx_t *
157 157 pvscsi_resolve_ctx(pvscsi_softc_t *pvs, uint64_t ctx)
158 158 {
159 159 if (ctx > 0 && ctx <= pvs->req_depth)
160 160 return (&pvs->cmd_ctx[ctx - 1]);
161 161 else
162 162 return (NULL);
163 163 }
164 164
165 165 static boolean_t
166 166 pvscsi_acquire_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
167 167 {
168 168 pvscsi_cmd_ctx_t *ctx;
169 169
170 170 if (list_is_empty(&pvs->cmd_ctx_pool))
171 171 return (B_FALSE);
172 172
173 173 ctx = (pvscsi_cmd_ctx_t *)list_remove_head(&pvs->cmd_ctx_pool);
174 174 ASSERT(ctx != NULL);
175 175
176 176 ctx->cmd = cmd;
177 177 cmd->ctx = ctx;
178 178
179 179 return (B_TRUE);
180 180 }
181 181
182 182 static void
183 183 pvscsi_release_ctx(pvscsi_cmd_t *cmd)
184 184 {
185 185 pvscsi_softc_t *pvs = cmd->cmd_pvs;
186 186
187 187 ASSERT(mutex_owned(&pvs->mutex));
188 188
189 189 cmd->ctx->cmd = NULL;
190 190 list_insert_tail(&pvs->cmd_ctx_pool, cmd->ctx);
191 191 cmd->ctx = NULL;
192 192 }
193 193
194 194 static uint32_t
195 195 pvscsi_reg_read(pvscsi_softc_t *pvs, uint32_t offset)
196 196 {
197 197 uint32_t ret;
198 198
199 199 ASSERT((offset & (sizeof (uint32_t) - 1)) == 0);
200 200
201 201 ret = ddi_get32(pvs->mmio_handle,
202 202 (uint32_t *)(pvs->mmio_base + offset));
203 203
204 204 return (ret);
205 205 }
206 206
207 207 static void
208 208 pvscsi_reg_write(pvscsi_softc_t *pvs, uint32_t offset, uint32_t value)
209 209 {
210 210 ASSERT((offset & (sizeof (uint32_t) - 1)) == 0);
211 211
212 212 ddi_put32(pvs->mmio_handle, (uint32_t *)(pvs->mmio_base + offset),
213 213 value);
214 214 }
215 215
216 216 static void
217 217 pvscsi_write_cmd_desc(pvscsi_softc_t *pvs, uint32_t cmd, void *desc, size_t len)
218 218 {
219 219 len /= sizeof (uint32_t);
220 220 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_COMMAND, cmd);
221 221 ddi_rep_put32(pvs->mmio_handle, (uint32_t *)desc,
222 222 (uint32_t *)(pvs->mmio_base + PVSCSI_REG_OFFSET_COMMAND_DATA),
223 223 len, DDI_DEV_NO_AUTOINCR);
224 224 }
225 225
226 226 static uint32_t
227 227 pvscsi_read_intr_status(pvscsi_softc_t *pvs)
228 228 {
229 229 return (pvscsi_reg_read(pvs, PVSCSI_REG_OFFSET_INTR_STATUS));
230 230 }
231 231
232 232 static void
233 233 pvscsi_write_intr_status(pvscsi_softc_t *pvs, uint32_t val)
234 234 {
235 235 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_STATUS, val);
236 236 }
237 237
238 238 static void
239 239 pvscsi_mask_intr(pvscsi_softc_t *pvs)
240 240 {
241 241 mutex_enter(&pvs->intr_mutex);
242 242
243 243 VERIFY(pvs->intr_lock_counter >= 0);
244 244
245 245 if (++pvs->intr_lock_counter == 1)
246 246 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK, 0);
247 247
248 248 mutex_exit(&pvs->intr_mutex);
249 249 }
250 250
251 251 static void
252 252 pvscsi_unmask_intr(pvscsi_softc_t *pvs)
253 253 {
254 254 mutex_enter(&pvs->intr_mutex);
255 255
256 256 VERIFY(pvs->intr_lock_counter > 0);
257 257
258 258 if (--pvs->intr_lock_counter == 0) {
259 259 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK,
260 260 PVSCSI_INTR_CMPL_MASK | PVSCSI_INTR_MSG_MASK);
261 261 }
262 262
263 263 mutex_exit(&pvs->intr_mutex);
264 264 }
265 265
266 266 static void
267 267 pvscsi_reset_hba(pvscsi_softc_t *pvs)
268 268 {
269 269 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
270 270 }
271 271
272 272 static void
273 273 pvscsi_reset_bus(pvscsi_softc_t *pvs)
274 274 {
275 275 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_RESET_BUS, NULL, 0);
276 276 }
277 277
278 278 static void
279 279 pvscsi_submit_nonrw_io(pvscsi_softc_t *pvs)
280 280 {
281 281 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
282 282 }
283 283
284 284 static void
285 285 pvscsi_submit_rw_io(pvscsi_softc_t *pvs)
286 286 {
287 287 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
288 288 }
289 289
290 290
291 291 static int
292 292 pvscsi_inquiry_target(pvscsi_softc_t *pvs, int target, struct scsi_inquiry *inq)
293 293 {
294 294 int len = sizeof (struct scsi_inquiry);
295 295 int ret = -1;
296 296 struct buf *b;
297 297 struct scsi_address ap;
298 298 struct scsi_pkt *pkt;
299 299 uint8_t cdb[CDB_GROUP0];
300 300
301 301 ap.a_hba_tran = pvs->tran;
302 302 ap.a_target = (ushort_t)target;
303 303 ap.a_lun = (uchar_t)0;
304 304
305 305 if ((b = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL, len,
306 306 B_READ, NULL_FUNC, NULL)) == NULL)
307 307 return (-1);
308 308
309 309 if ((pkt = scsi_init_pkt(&ap, (struct scsi_pkt *)NULL, b,
310 310 CDB_GROUP0, sizeof (struct scsi_arq_status), 0, 0,
311 311 NULL_FUNC, NULL)) == NULL)
312 312 goto free_buf;
313 313
314 314 cdb[0] = SCMD_INQUIRY;
315 315 cdb[1] = 0;
316 316 cdb[2] = 0;
317 317 cdb[3] = (len & 0xff00) >> 8;
318 318 cdb[4] = (len & 0x00ff);
319 319 cdb[5] = 0;
320 320
321 321 if (inq != NULL)
322 322 bzero(inq, sizeof (*inq));
323 323 bcopy(cdb, pkt->pkt_cdbp, CDB_GROUP0);
324 324 bzero((struct scsi_inquiry *)b->b_un.b_addr, sizeof (*inq));
325 325
326 326 if ((ret = scsi_poll(pkt)) == 0 && inq != NULL)
327 327 bcopy(b->b_un.b_addr, inq, sizeof (*inq));
328 328
329 329 scsi_destroy_pkt(pkt);
330 330
331 331 free_buf:
332 332 scsi_free_consistent_buf(b);
333 333
334 334 return (ret);
335 335 }
336 336
337 337 static int
338 338 pvscsi_config_one(dev_info_t *pdip, pvscsi_softc_t *pvs, int target,
339 339 dev_info_t **childp)
340 340 {
341 341 char **compatible = NULL;
342 342 char *nodename = NULL;
343 343 dev_info_t *dip;
344 344 int inqrc;
345 345 int ncompatible = 0;
346 346 pvscsi_device_t *devnode;
347 347 struct scsi_inquiry inq;
348 348
349 349 /* Inquiry target */
350 350 inqrc = pvscsi_inquiry_target(pvs, target, &inq);
351 351
352 352 /* Find devnode */
353 353 for (devnode = list_head(&pvs->devnodes); devnode != NULL;
354 354 devnode = list_next(&pvs->devnodes, devnode)) {
355 355 if (devnode->target == target)
356 356 break;
357 357 }
358 358
359 359 if (devnode != NULL) {
360 360 if (inqrc != 0) {
361 361 /* Target disappeared, drop devnode */
362 362 if (i_ddi_devi_attached(devnode->pdip)) {
363 363 char *devname;
364 364 /* Get full devname */
365 365 devname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
366 366 (void) ddi_deviname(devnode->pdip, devname);
367 367 /* Clean cache and name */
368 368 (void) devfs_clean(devnode->parent, devname + 1,
369 369 DV_CLEAN_FORCE);
370 370 kmem_free(devname, MAXPATHLEN);
371 371 }
372 372
373 373 (void) ndi_devi_offline(devnode->pdip, NDI_DEVI_REMOVE);
374 374
375 375 list_remove(&pvs->devnodes, devnode);
376 376 kmem_free(devnode, sizeof (*devnode));
377 377 } else if (childp != NULL) {
378 378 /* Target exists */
379 379 *childp = devnode->pdip;
380 380 }
381 381 return (NDI_SUCCESS);
382 382 } else if (inqrc != 0) {
383 383 /* Target doesn't exist */
384 384 return (NDI_FAILURE);
385 385 }
386 386
387 387 scsi_hba_nodename_compatible_get(&inq, NULL, inq.inq_dtype, NULL,
388 388 &nodename, &compatible, &ncompatible);
389 389 if (nodename == NULL)
390 390 goto free_nodename;
391 391
392 392 if (ndi_devi_alloc(pdip, nodename, DEVI_SID_NODEID,
393 393 &dip) != NDI_SUCCESS) {
394 394 dev_err(pvs->dip, CE_WARN, "!failed to alloc device instance");
395 395 goto free_nodename;
396 396 }
397 397
398 398 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
399 399 "device-type", "scsi") != DDI_PROP_SUCCESS ||
400 400 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
401 401 "target", target) != DDI_PROP_SUCCESS ||
402 402 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
403 403 "lun", 0) != DDI_PROP_SUCCESS ||
404 404 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
405 405 "pm-capable", 1) != DDI_PROP_SUCCESS ||
406 406 ndi_prop_update_string_array(DDI_DEV_T_NONE, dip,
407 407 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
408 408 dev_err(pvs->dip, CE_WARN,
409 409 "!failed to update props for target %d", target);
410 410 goto free_devi;
411 411 }
412 412
413 413 if ((devnode = kmem_zalloc(sizeof (*devnode), KM_NOSLEEP)) == NULL)
414 414 goto free_devi;
415 415
416 416 if (ndi_devi_online(dip, NDI_ONLINE_ATTACH) != NDI_SUCCESS) {
417 417 dev_err(pvs->dip, CE_WARN, "!failed to online target %d",
418 418 target);
419 419 kmem_free(devnode, sizeof (*devnode));
420 420 goto free_devi;
421 421 }
422 422
423 423 devnode->target = target;
424 424 devnode->pdip = dip;
425 425 devnode->parent = pdip;
426 426 list_insert_tail(&pvs->devnodes, devnode);
427 427
428 428 if (childp != NULL)
429 429 *childp = dip;
430 430
431 431 scsi_hba_nodename_compatible_free(nodename, compatible);
432 432
433 433 return (NDI_SUCCESS);
434 434
435 435 free_devi:
436 436 ndi_prop_remove_all(dip);
437 437 (void) ndi_devi_free(dip);
438 438 free_nodename:
439 439 scsi_hba_nodename_compatible_free(nodename, compatible);
440 440
441 441 return (NDI_FAILURE);
442 442 }
443 443
444 444 static int
445 445 pvscsi_config_all(dev_info_t *pdip, pvscsi_softc_t *pvs)
446 446 {
447 447 int target;
448 448
449 449 for (target = 0; target < PVSCSI_MAXTGTS; target++)
450 450 (void) pvscsi_config_one(pdip, pvs, target, NULL);
451 451
452 452 return (NDI_SUCCESS);
453 453 }
454 454
455 455 static pvscsi_cmd_t *
456 456 pvscsi_process_comp_ring(pvscsi_softc_t *pvs)
457 457 {
458 458 pvscsi_cmd_t **pnext_cmd = NULL;
459 459 pvscsi_cmd_t *cmd;
460 460 pvscsi_cmd_t *head = NULL;
461 461 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
462 462 uint32_t cmp_ne = sdesc->cmpNumEntriesLog2;
463 463
464 464 ASSERT(mutex_owned(&pvs->rx_mutex));
465 465
466 466 while (sdesc->cmpConsIdx != sdesc->cmpProdIdx) {
467 467 pvscsi_cmd_ctx_t *ctx;
468 468 struct PVSCSIRingCmpDesc *cdesc;
469 469
470 470 cdesc = CMP_RING(pvs) + (sdesc->cmpConsIdx & MASK(cmp_ne));
471 471 membar_consumer();
472 472
473 473 ctx = pvscsi_resolve_ctx(pvs, cdesc->context);
474 474 ASSERT(ctx != NULL);
475 475
476 476 if ((cmd = ctx->cmd) != NULL) {
477 477 cmd->next_cmd = NULL;
478 478
479 479 /* Save command status for further processing */
480 480 cmd->cmp_stat.host_status = cdesc->hostStatus;
481 481 cmd->cmp_stat.scsi_status = cdesc->scsiStatus;
482 482 cmd->cmp_stat.data_len = cdesc->dataLen;
483 483
484 484 /* Mark this command as arrived from hardware */
485 485 cmd->flags |= PVSCSI_FLAG_HW_STATUS;
486 486
487 487 if (head == NULL) {
488 488 head = cmd;
489 489 head->tail_cmd = cmd;
490 490 } else {
491 491 head->tail_cmd = cmd;
492 492 }
493 493
494 494 if (pnext_cmd == NULL) {
495 495 pnext_cmd = &cmd->next_cmd;
496 496 } else {
497 497 *pnext_cmd = cmd;
498 498 pnext_cmd = &cmd->next_cmd;
499 499 }
500 500 }
501 501
502 502 membar_consumer();
503 503 sdesc->cmpConsIdx++;
504 504 }
505 505
506 506 return (head);
507 507 }
508 508
509 509 static pvscsi_msg_t *
510 510 pvscsi_process_msg_ring(pvscsi_softc_t *pvs)
511 511 {
512 512 pvscsi_msg_t *msg;
513 513 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
514 514 struct PVSCSIRingMsgDesc *mdesc;
515 515 struct PVSCSIMsgDescDevStatusChanged *desc;
516 516 uint32_t msg_ne = sdesc->msgNumEntriesLog2;
517 517
518 518 ASSERT(mutex_owned(&pvs->rx_mutex));
519 519
520 520 if (sdesc->msgProdIdx == sdesc->msgConsIdx)
521 521 return (NULL);
522 522
523 523 mdesc = MSG_RING(pvs) + (sdesc->msgConsIdx & MASK(msg_ne));
524 524 membar_consumer();
525 525
526 526 switch (mdesc->type) {
527 527 case PVSCSI_MSG_DEV_ADDED:
528 528 case PVSCSI_MSG_DEV_REMOVED:
529 529 desc = (struct PVSCSIMsgDescDevStatusChanged *)mdesc;
530 530 msg = kmem_alloc(sizeof (pvscsi_msg_t), KM_NOSLEEP);
531 531 if (msg == NULL)
532 532 return (NULL);
533 533 msg->msg_pvs = pvs;
534 534 msg->type = mdesc->type;
535 535 msg->target = desc->target;
536 536 break;
537 537 default:
538 538 dev_err(pvs->dip, CE_WARN, "!unknown msg type: %d",
539 539 mdesc->type);
540 540 return (NULL);
541 541 }
542 542
543 543 membar_consumer();
544 544 sdesc->msgConsIdx++;
545 545
546 546 return (msg);
547 547 }
548 548
549 549 static void
550 550 pvscsi_handle_msg(void *arg)
551 551 {
552 552 pvscsi_msg_t *msg = (pvscsi_msg_t *)arg;
553 553
554 554 (void) pvscsi_config_one(msg->msg_pvs->dip, msg->msg_pvs, msg->target,
555 555 NULL);
556 556
557 557 kmem_free(msg, sizeof (pvscsi_msg_t));
558 558 }
559 559
560 560 static int
561 561 pvscsi_abort_cmd(pvscsi_cmd_t *cmd, pvscsi_cmd_t **pending)
562 562 {
563 563 pvscsi_softc_t *pvs = cmd->cmd_pvs;
564 564 pvscsi_cmd_t *c;
565 565 pvscsi_cmd_t *done;
566 566 struct PVSCSICmdDescAbortCmd acmd;
567 567
568 568 dev_err(pvs->dip, CE_WARN, "!aborting command %p", (void *)cmd);
569 569
570 570 ASSERT(mutex_owned(&pvs->rx_mutex));
571 571 ASSERT(mutex_owned(&pvs->tx_mutex));
572 572
573 573 /* Check if the cmd was already completed by the HBA */
574 574 *pending = done = pvscsi_process_comp_ring(pvs);
575 575 for (c = done; c != NULL; c = c->next_cmd) {
576 576 if (c == cmd)
577 577 return (CMD_CMPLT);
578 578 }
579 579
580 580 /* Check if cmd was really scheduled by the HBA */
581 581 if (pvscsi_lookup_ctx(pvs, cmd) == NULL)
582 582 return (CMD_CMPLT);
583 583
584 584 /* Abort cmd in the HBA */
585 585 bzero(&acmd, sizeof (acmd));
586 586 acmd.target = cmd->cmd_target;
587 587 acmd.context = pvscsi_map_ctx(pvs, cmd->ctx);
588 588 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_ABORT_CMD, &acmd, sizeof (acmd));
589 589
590 590 /* Check if cmd was completed by the HBA before it could be aborted */
591 591 if ((done = pvscsi_process_comp_ring(pvs)) != NULL) {
592 592 done->tail_cmd->next_cmd = *pending;
593 593 *pending = done;
594 594 for (c = done; c != NULL; c = c->next_cmd) {
595 595 if (c == cmd)
596 596 return (CMD_CMPLT);
597 597 }
598 598 }
599 599
600 600 /* Release I/O ctx */
601 601 mutex_enter(&pvs->mutex);
602 602 if (cmd->ctx != NULL)
603 603 pvscsi_release_ctx(cmd);
604 604 /* Remove cmd from the queue */
605 605 pvscsi_remove_from_queue(cmd);
606 606 mutex_exit(&pvs->mutex);
607 607
608 608 /* Insert cmd at the beginning of the list */
609 609 cmd->next_cmd = *pending;
610 610 *pending = cmd;
611 611
612 612 dev_err(pvs->dip, CE_WARN, "!command %p aborted", (void *)cmd);
613 613
614 614 return (CMD_ABORTED);
615 615 }
616 616
617 617 static void
618 618 pvscsi_map_buffers(pvscsi_cmd_t *cmd, struct PVSCSIRingReqDesc *rdesc)
619 619 {
620 620 int i;
621 621
622 622 ASSERT(cmd->ctx);
623 623 ASSERT(cmd->cmd_dmaccount > 0 && cmd->cmd_dmaccount <=
624 624 PVSCSI_MAX_SG_SIZE);
625 625
626 626 rdesc->dataLen = cmd->cmd_dma_count;
627 627 rdesc->dataAddr = 0;
628 628
629 629 if (cmd->cmd_dma_count == 0)
630 630 return;
631 631
632 632 if (cmd->cmd_dmaccount > 1) {
633 633 struct PVSCSISGElement *sgl = CMD_CTX_SGLIST_VA(cmd->ctx);
634 634
635 635 for (i = 0; i < cmd->cmd_dmaccount; i++) {
636 636 sgl[i].addr = cmd->cached_cookies[i].dmac_laddress;
637 637 sgl[i].length = cmd->cached_cookies[i].dmac_size;
638 638 sgl[i].flags = 0;
639 639 }
640 640 rdesc->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
641 641 rdesc->dataAddr = (uint64_t)CMD_CTX_SGLIST_PA(cmd->ctx);
642 642 } else {
643 643 rdesc->dataAddr = cmd->cached_cookies[0].dmac_laddress;
644 644 }
645 645 }
646 646
647 647 static void
648 648 pvscsi_comp_cmd(pvscsi_cmd_t *cmd, uint8_t status)
649 649 {
650 650 struct scsi_pkt *pkt = CMD2PKT(cmd);
651 651
652 652 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
653 653 STATE_GOT_STATUS);
654 654 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0)
655 655 pkt->pkt_state |= STATE_XFERRED_DATA;
656 656 pkt->pkt_reason = CMD_CMPLT;
657 657 pkt->pkt_resid = 0;
658 658 *(pkt->pkt_scbp) = status;
659 659 }
660 660
661 661 static void
662 662 pvscsi_set_status(pvscsi_cmd_t *cmd)
663 663 {
664 664 pvscsi_softc_t *pvs = cmd->cmd_pvs;
665 665 struct scsi_pkt *pkt = CMD2PKT(cmd);
666 666 uchar_t scsi_status = cmd->cmp_stat.scsi_status;
667 667 uint32_t host_status = cmd->cmp_stat.host_status;
668 668
669 669 if (scsi_status != STATUS_GOOD &&
670 670 (host_status == BTSTAT_SUCCESS ||
671 671 (host_status == BTSTAT_LINKED_COMMAND_COMPLETED) ||
672 672 (host_status == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG))) {
673 673 if (scsi_status == STATUS_CHECK) {
674 674 struct scsi_arq_status *astat = (void*)(pkt->pkt_scbp);
675 675 uint8_t *sensedata;
676 676 int arq_size;
677 677
678 678 *pkt->pkt_scbp = scsi_status;
679 679 pkt->pkt_state |= STATE_ARQ_DONE;
680 680
681 681 if ((cmd->flags & PVSCSI_FLAG_XARQ) != 0) {
682 682 arq_size = (cmd->cmd_rqslen >=
683 683 SENSE_BUFFER_SIZE) ? SENSE_BUFFER_SIZE :
684 684 cmd->cmd_rqslen;
685 685
686 686 astat->sts_rqpkt_resid = SENSE_BUFFER_SIZE -
687 687 arq_size;
688 688 sensedata = (uint8_t *)&astat->sts_sensedata;
689 689 bcopy(cmd->arqbuf->b_un.b_addr, sensedata,
690 690 arq_size);
691 691
692 692 pkt->pkt_state |= STATE_XARQ_DONE;
693 693 } else {
694 694 astat->sts_rqpkt_resid = 0;
695 695 }
696 696
697 697 astat->sts_rqpkt_statistics = 0;
698 698 astat->sts_rqpkt_reason = CMD_CMPLT;
699 699 (*(uint8_t *)&astat->sts_rqpkt_status) = STATUS_GOOD;
700 700 astat->sts_rqpkt_state = STATE_GOT_BUS |
701 701 STATE_GOT_TARGET | STATE_SENT_CMD |
702 702 STATE_XFERRED_DATA | STATE_GOT_STATUS;
703 703 }
704 704 pvscsi_comp_cmd(cmd, scsi_status);
705 705
706 706 return;
707 707 }
708 708
709 709 switch (host_status) {
710 710 case BTSTAT_SUCCESS:
711 711 case BTSTAT_LINKED_COMMAND_COMPLETED:
712 712 case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
713 713 pvscsi_comp_cmd(cmd, STATUS_GOOD);
714 714 break;
715 715 case BTSTAT_DATARUN:
716 716 pkt->pkt_reason = CMD_DATA_OVR;
717 717 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
718 718 STATE_SENT_CMD | STATE_GOT_STATUS |
719 719 STATE_XFERRED_DATA);
720 720 pkt->pkt_resid = 0;
721 721 break;
722 722 case BTSTAT_DATA_UNDERRUN:
723 723 pkt->pkt_reason = pkt->pkt_state |= (STATE_GOT_BUS |
724 724 STATE_GOT_TARGET | STATE_SENT_CMD | STATE_GOT_STATUS);
725 725 pkt->pkt_resid = cmd->dma_count - cmd->cmp_stat.data_len;
726 726 if (pkt->pkt_resid != cmd->dma_count)
727 727 pkt->pkt_state |= STATE_XFERRED_DATA;
728 728 break;
729 729 case BTSTAT_SELTIMEO:
730 730 pkt->pkt_reason = CMD_DEV_GONE;
731 731 pkt->pkt_state |= STATE_GOT_BUS;
732 732 break;
733 733 case BTSTAT_TAGREJECT:
734 734 pkt->pkt_reason = CMD_TAG_REJECT;
735 735 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
736 736 STATE_SENT_CMD | STATE_GOT_STATUS);
737 737 break;
738 738 case BTSTAT_BADMSG:
739 739 pkt->pkt_reason = CMD_BADMSG;
740 740 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
741 741 STATE_SENT_CMD | STATE_GOT_STATUS);
742 742 break;
743 743 case BTSTAT_SENTRST:
744 744 case BTSTAT_RECVRST:
745 745 case BTSTAT_BUSRESET:
746 746 pkt->pkt_reason = CMD_RESET;
747 747 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
748 748 STATE_SENT_CMD | STATE_GOT_STATUS);
749 749 break;
750 750 case BTSTAT_ABORTQUEUE:
751 751 pkt->pkt_reason = CMD_ABORTED;
752 752 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
753 753 STATE_SENT_CMD | STATE_GOT_STATUS);
754 754 break;
755 755 case BTSTAT_HAHARDWARE:
756 756 case BTSTAT_INVPHASE:
757 757 case BTSTAT_HATIMEOUT:
758 758 case BTSTAT_NORESPONSE:
759 759 case BTSTAT_DISCONNECT:
760 760 case BTSTAT_HASOFTWARE:
761 761 case BTSTAT_BUSFREE:
762 762 case BTSTAT_SENSFAILED:
763 763 pkt->pkt_reason = CMD_TRAN_ERR;
764 764 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
765 765 STATE_SENT_CMD | STATE_GOT_STATUS);
766 766 break;
767 767 default:
768 768 dev_err(pvs->dip, CE_WARN,
769 769 "!unknown host status code: %d", host_status);
770 770 pkt->pkt_reason = CMD_TRAN_ERR;
771 771 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
772 772 STATE_SENT_CMD | STATE_GOT_STATUS);
773 773 break;
774 774 }
775 775 }
776 776
777 777 static void
778 778 pvscsi_complete_chained(void *arg)
779 779 {
780 780 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)arg;
781 781 pvscsi_cmd_t *c;
782 782 struct scsi_pkt *pkt;
783 783
784 784 while (cmd != NULL) {
785 785 pvscsi_softc_t *pvs = cmd->cmd_pvs;
786 786
787 787 c = cmd->next_cmd;
788 788 cmd->next_cmd = NULL;
789 789
790 790 pkt = CMD2PKT(cmd);
791 791 if (pkt == NULL)
792 792 return;
793 793
794 794 if ((cmd->flags & PVSCSI_FLAG_IO_IOPB) != 0 &&
795 795 (cmd->flags & PVSCSI_FLAG_IO_READ) != 0) {
796 796 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
797 797 DDI_DMA_SYNC_FORCPU);
798 798 }
799 799
800 800 mutex_enter(&pvs->mutex);
801 801 /* Release I/O ctx */
802 802 if (cmd->ctx != NULL)
803 803 pvscsi_release_ctx(cmd);
804 804 /* Remove command from queue */
805 805 pvscsi_remove_from_queue(cmd);
806 806 mutex_exit(&pvs->mutex);
807 807
808 808 if ((cmd->flags & PVSCSI_FLAG_HW_STATUS) != 0) {
809 809 pvscsi_set_status(cmd);
810 810 } else {
811 811 ASSERT((cmd->flags & PVSCSI_FLAGS_NON_HW_COMPLETION) !=
812 812 0);
813 813
814 814 if ((cmd->flags & PVSCSI_FLAG_TIMED_OUT) != 0) {
815 815 cmd->pkt->pkt_reason = CMD_TIMEOUT;
816 816 cmd->pkt->pkt_statistics |=
817 817 (STAT_TIMEOUT | STAT_ABORTED);
818 818 } else if ((cmd->flags & PVSCSI_FLAG_ABORTED) != 0) {
819 819 cmd->pkt->pkt_reason = CMD_ABORTED;
820 820 cmd->pkt->pkt_statistics |=
821 821 (STAT_TIMEOUT | STAT_ABORTED);
822 822 } else if ((cmd->flags & PVSCSI_FLAGS_RESET) != 0) {
823 823 cmd->pkt->pkt_reason = CMD_RESET;
824 824 if ((cmd->flags & PVSCSI_FLAG_RESET_BUS) != 0) {
825 825 cmd->pkt->pkt_statistics |=
826 826 STAT_BUS_RESET;
827 827 } else {
828 828 cmd->pkt->pkt_statistics |=
829 829 STAT_DEV_RESET;
830 830 }
831 831 }
832 832 }
833 833
834 834 cmd->flags |= PVSCSI_FLAG_DONE;
835 835 cmd->flags &= ~PVSCSI_FLAG_TRANSPORT;
836 836
837 837 if ((pkt->pkt_flags & FLAG_NOINTR) == 0 &&
838 838 pkt->pkt_comp != NULL)
839 839 (*pkt->pkt_comp)(pkt);
840 840
841 841 cmd = c;
842 842 }
843 843 }
844 844
845 845 static void
846 846 pvscsi_dev_reset(pvscsi_softc_t *pvs, int target)
847 847 {
848 848 struct PVSCSICmdDescResetDevice cmd = { 0 };
849 849
850 850 cmd.target = target;
851 851 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof (cmd));
852 852 }
853 853
854 854 static int
855 855 pvscsi_poll_cmd(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
856 856 {
857 857 boolean_t seen_intr;
858 858 int cycles = (cmd->pkt->pkt_time * 1000000) / USECS_TO_WAIT;
859 859 int i;
860 860 pvscsi_cmd_t *dcmd;
861 861 struct scsi_pkt *pkt = CMD2PKT(cmd);
862 862
863 863 /*
864 864 * Make sure we're not missing any commands completed
865 865 * concurrently before we have actually disabled interrupts.
866 866 */
867 867 mutex_enter(&pvs->rx_mutex);
868 868 dcmd = pvscsi_process_comp_ring(pvs);
869 869 mutex_exit(&pvs->rx_mutex);
870 870
871 871 pvscsi_complete_chained(dcmd);
872 872
873 873 while ((cmd->flags & PVSCSI_FLAG_DONE) == 0) {
874 874 seen_intr = B_FALSE;
875 875
876 876 /* Disable interrupts from H/W */
877 877 pvscsi_mask_intr(pvs);
878 878
879 879 /* Wait for interrupt to arrive */
880 880 for (i = 0; i < cycles; i++) {
881 881 uint32_t status;
882 882
883 883 mutex_enter(&pvs->rx_mutex);
884 884 mutex_enter(&pvs->intr_mutex);
885 885 status = pvscsi_read_intr_status(pvs);
886 886 if ((status & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
887 887 /* Check completion ring */
888 888 mutex_exit(&pvs->intr_mutex);
889 889 dcmd = pvscsi_process_comp_ring(pvs);
890 890 mutex_exit(&pvs->rx_mutex);
891 891 seen_intr = B_TRUE;
892 892 break;
893 893 } else {
894 894 mutex_exit(&pvs->intr_mutex);
895 895 mutex_exit(&pvs->rx_mutex);
896 896 drv_usecwait(USECS_TO_WAIT);
897 897 }
898 898 }
899 899
900 900 /* Enable interrupts from H/W */
901 901 pvscsi_unmask_intr(pvs);
902 902
903 903 if (!seen_intr) {
904 904 /* No interrupts seen from device during the timeout */
905 905 mutex_enter(&pvs->tx_mutex);
906 906 mutex_enter(&pvs->rx_mutex);
907 907 if ((cmd->flags & PVSCSI_FLAGS_COMPLETION) != 0) {
908 908 /* Command was cancelled asynchronously */
909 909 dcmd = NULL;
910 910 } else if ((pvscsi_abort_cmd(cmd,
911 911 &dcmd)) == CMD_ABORTED) {
912 912 /* Command was cancelled in hardware */
913 913 pkt->pkt_state |= (STAT_TIMEOUT | STAT_ABORTED);
914 914 pkt->pkt_statistics |= (STAT_TIMEOUT |
915 915 STAT_ABORTED);
916 916 pkt->pkt_reason = CMD_TIMEOUT;
917 917 }
918 918 mutex_exit(&pvs->rx_mutex);
919 919 mutex_exit(&pvs->tx_mutex);
920 920
921 921 /*
922 922 * Complete commands that might be on completion list.
923 923 * Target command can also be on the list in case it was
924 924 * completed before it could be actually cancelled.
925 925 */
926 926 break;
927 927 }
928 928
929 929 pvscsi_complete_chained(dcmd);
930 930
931 931 if (!seen_intr)
932 932 break;
933 933 }
934 934
935 935 return (TRAN_ACCEPT);
936 936 }
937 937
938 938 static void
939 939 pvscsi_abort_all(struct scsi_address *ap, pvscsi_softc_t *pvs,
940 940 pvscsi_cmd_t **pending, int marker_flag)
941 941 {
942 942 int qlen = pvs->cmd_queue_len;
943 943 pvscsi_cmd_t *cmd, *pcmd, *phead = NULL;
944 944
945 945 ASSERT(mutex_owned(&pvs->rx_mutex));
946 946 ASSERT(mutex_owned(&pvs->tx_mutex));
947 947
948 948 /*
949 949 * Try to abort all queued commands, merging commands waiting
950 950 * for completion into a single list to complete them at one
951 951 * time when mutex is released.
952 952 */
953 953 while (qlen > 0) {
954 954 mutex_enter(&pvs->mutex);
955 955 cmd = list_remove_head(&pvs->cmd_queue);
956 956 ASSERT(cmd != NULL);
957 957
958 958 qlen--;
959 959
960 960 if (ap == NULL || ap->a_target == cmd->cmd_target) {
961 961 int c = --pvs->cmd_queue_len;
962 962
963 963 mutex_exit(&pvs->mutex);
964 964
965 965 if (pvscsi_abort_cmd(cmd, &pcmd) == CMD_ABORTED) {
966 966 /*
967 967 * Assume command is completely cancelled now,
968 968 * so mark it as requested.
969 969 */
970 970 cmd->flags |= marker_flag;
971 971 }
972 972
973 973 qlen -= (c - pvs->cmd_queue_len);
974 974
975 975 /*
976 976 * Now merge current pending commands with
977 977 * previous ones.
978 978 */
979 979 if (phead == NULL) {
980 980 phead = pcmd;
981 981 } else if (pcmd != NULL) {
982 982 phead->tail_cmd->next_cmd = pcmd;
983 983 phead->tail_cmd = pcmd->tail_cmd;
984 984 }
985 985 } else {
986 986 list_insert_tail(&pvs->cmd_queue, cmd);
987 987 mutex_exit(&pvs->mutex);
988 988 }
989 989 }
990 990
991 991 *pending = phead;
992 992 }
993 993
994 994 static void
995 995 pvscsi_quiesce_notify(pvscsi_softc_t *pvs)
996 996 {
997 997 mutex_enter(&pvs->mutex);
998 998 if (pvs->cmd_queue_len == 0 &&
999 999 (pvs->flags & PVSCSI_HBA_QUIESCE_PENDING) != 0) {
1000 1000 pvs->flags &= ~PVSCSI_HBA_QUIESCE_PENDING;
1001 1001 cv_broadcast(&pvs->quiescevar);
1002 1002 }
1003 1003 mutex_exit(&pvs->mutex);
1004 1004 }
1005 1005
1006 1006 static int
1007 1007 pvscsi_transport_command(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
1008 1008 {
1009 1009 struct PVSCSIRingReqDesc *rdesc;
1010 1010 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
1011 1011 struct scsi_pkt *pkt = CMD2PKT(cmd);
1012 1012 uint32_t req_ne = sdesc->reqNumEntriesLog2;
1013 1013
1014 1014 mutex_enter(&pvs->tx_mutex);
1015 1015 mutex_enter(&pvs->mutex);
1016 1016 if (!pvscsi_acquire_ctx(pvs, cmd)) {
1017 1017 mutex_exit(&pvs->mutex);
1018 1018 mutex_exit(&pvs->tx_mutex);
1019 1019 dev_err(pvs->dip, CE_WARN, "!no free ctx available");
1020 1020 return (TRAN_BUSY);
1021 1021 }
1022 1022
1023 1023 if ((sdesc->reqProdIdx - sdesc->cmpConsIdx) >= (1 << req_ne)) {
1024 1024 pvscsi_release_ctx(cmd);
1025 1025 mutex_exit(&pvs->mutex);
1026 1026 mutex_exit(&pvs->tx_mutex);
1027 1027 dev_err(pvs->dip, CE_WARN, "!no free I/O slots available");
1028 1028 return (TRAN_BUSY);
1029 1029 }
1030 1030 mutex_exit(&pvs->mutex);
1031 1031
1032 1032 cmd->flags |= PVSCSI_FLAG_TRANSPORT;
1033 1033
1034 1034 rdesc = REQ_RING(pvs) + (sdesc->reqProdIdx & MASK(req_ne));
1035 1035
1036 1036 bzero(&rdesc->lun, sizeof (rdesc->lun));
1037 1037
1038 1038 rdesc->bus = 0;
1039 1039 rdesc->target = cmd->cmd_target;
1040 1040
1041 1041 if ((cmd->flags & PVSCSI_FLAG_XARQ) != 0) {
1042 1042 bzero((void*)cmd->arqbuf->b_un.b_addr, SENSE_BUFFER_SIZE);
1043 1043 rdesc->senseLen = SENSE_BUFFER_SIZE;
1044 1044 rdesc->senseAddr = cmd->arqc.dmac_laddress;
1045 1045 } else {
1046 1046 rdesc->senseLen = 0;
1047 1047 rdesc->senseAddr = 0;
1048 1048 }
1049 1049
1050 1050 rdesc->vcpuHint = CPU->cpu_id;
1051 1051 rdesc->cdbLen = cmd->cmdlen;
1052 1052 bcopy(cmd->cmd_cdb, rdesc->cdb, cmd->cmdlen);
1053 1053
1054 1054 /* Setup tag info */
1055 1055 if ((cmd->flags & PVSCSI_FLAG_TAG) != 0)
1056 1056 rdesc->tag = cmd->tag;
1057 1057 else
1058 1058 rdesc->tag = MSG_SIMPLE_QTAG;
1059 1059
1060 1060 /* Setup I/O direction and map data buffers */
1061 1061 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1062 1062 if ((cmd->flags & PVSCSI_FLAG_IO_READ) != 0)
1063 1063 rdesc->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
1064 1064 else
1065 1065 rdesc->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
1066 1066 pvscsi_map_buffers(cmd, rdesc);
1067 1067 } else {
1068 1068 rdesc->flags = 0;
1069 1069 }
1070 1070
1071 1071 rdesc->context = pvscsi_map_ctx(pvs, cmd->ctx);
1072 1072 membar_producer();
1073 1073
1074 1074 sdesc->reqProdIdx++;
1075 1075 membar_producer();
1076 1076
1077 1077 mutex_enter(&pvs->mutex);
1078 1078 cmd->timeout_lbolt = ddi_get_lbolt() + SEC_TO_TICK(pkt->pkt_time);
1079 1079 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD);
1080 1080 pvscsi_add_to_queue(cmd);
1081 1081
1082 1082 switch (cmd->pkt->pkt_cdbp[0]) {
1083 1083 case SCMD_READ:
1084 1084 case SCMD_WRITE:
1085 1085 case SCMD_READ_G1:
1086 1086 case SCMD_WRITE_G1:
1087 1087 case SCMD_READ_G4:
1088 1088 case SCMD_WRITE_G4:
1089 1089 case SCMD_READ_G5:
1090 1090 case SCMD_WRITE_G5:
1091 1091 ASSERT((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0);
1092 1092 pvscsi_submit_rw_io(pvs);
1093 1093 break;
1094 1094 default:
1095 1095 pvscsi_submit_nonrw_io(pvs);
1096 1096 break;
1097 1097 }
1098 1098 mutex_exit(&pvs->mutex);
1099 1099 mutex_exit(&pvs->tx_mutex);
1100 1100
1101 1101 return (TRAN_ACCEPT);
1102 1102 }
1103 1103
1104 1104 static int
1105 1105 pvscsi_reset_generic(pvscsi_softc_t *pvs, struct scsi_address *ap)
1106 1106 {
1107 1107 boolean_t bus_reset = (ap == NULL);
1108 1108 int flags;
1109 1109 pvscsi_cmd_t *done, *aborted;
1110 1110
1111 1111 flags = bus_reset ? PVSCSI_FLAG_RESET_BUS : PVSCSI_FLAG_RESET_DEV;
1112 1112
1113 1113 mutex_enter(&pvs->tx_mutex);
1114 1114 mutex_enter(&pvs->rx_mutex);
1115 1115 /* Try to process pending requests */
1116 1116 done = pvscsi_process_comp_ring(pvs);
1117 1117
1118 1118 /* Abort all pending requests */
1119 1119 pvscsi_abort_all(ap, pvs, &aborted, flags);
1120 1120
1121 1121 /* Reset at hardware level */
1122 1122 if (bus_reset) {
1123 1123 pvscsi_reset_bus(pvs);
1124 1124 /* Should never happen after bus reset */
1125 1125 ASSERT(pvscsi_process_comp_ring(pvs) == NULL);
1126 1126 } else {
1127 1127 pvscsi_dev_reset(pvs, ap->a_target);
1128 1128 }
1129 1129 mutex_exit(&pvs->rx_mutex);
1130 1130 mutex_exit(&pvs->tx_mutex);
1131 1131
1132 1132 pvscsi_complete_chained(done);
1133 1133 pvscsi_complete_chained(aborted);
1134 1134
1135 1135 return (1);
1136 1136 }
1137 1137
1138 1138 static void
1139 1139 pvscsi_cmd_ext_free(pvscsi_cmd_t *cmd)
1140 1140 {
1141 1141 struct scsi_pkt *pkt = CMD2PKT(cmd);
1142 1142
1143 1143 if ((cmd->flags & PVSCSI_FLAG_CDB_EXT) != 0) {
1144 1144 kmem_free(pkt->pkt_cdbp, cmd->cmdlen);
1145 1145 cmd->flags &= ~PVSCSI_FLAG_CDB_EXT;
1146 1146 }
1147 1147 if ((cmd->flags & PVSCSI_FLAG_SCB_EXT) != 0) {
1148 1148 kmem_free(pkt->pkt_scbp, cmd->statuslen);
1149 1149 cmd->flags &= ~PVSCSI_FLAG_SCB_EXT;
1150 1150 }
1151 1151 if ((cmd->flags & PVSCSI_FLAG_PRIV_EXT) != 0) {
1152 1152 kmem_free(pkt->pkt_private, cmd->tgtlen);
1153 1153 cmd->flags &= ~PVSCSI_FLAG_PRIV_EXT;
1154 1154 }
1155 1155 }
1156 1156
1157 1157 /* ARGSUSED pvs */
1158 1158 static int
1159 1159 pvscsi_cmd_ext_alloc(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd, int kf)
1160 1160 {
1161 1161 struct scsi_pkt *pkt = CMD2PKT(cmd);
1162 1162 void *buf;
1163 1163
1164 1164 if (cmd->cmdlen > sizeof (cmd->cmd_cdb)) {
1165 1165 if ((buf = kmem_zalloc(cmd->cmdlen, kf)) == NULL)
1166 1166 return (NULL);
1167 1167 pkt->pkt_cdbp = buf;
1168 1168 cmd->flags |= PVSCSI_FLAG_CDB_EXT;
1169 1169 }
1170 1170
1171 1171 if (cmd->statuslen > sizeof (cmd->cmd_scb)) {
1172 1172 if ((buf = kmem_zalloc(cmd->statuslen, kf)) == NULL)
1173 1173 goto out;
1174 1174 pkt->pkt_scbp = buf;
1175 1175 cmd->flags |= PVSCSI_FLAG_SCB_EXT;
1176 1176 cmd->cmd_rqslen = (cmd->statuslen - sizeof (cmd->cmd_scb));
1177 1177 }
1178 1178
1179 1179 if (cmd->tgtlen > sizeof (cmd->tgt_priv)) {
1180 1180 if ((buf = kmem_zalloc(cmd->tgtlen, kf)) == NULL)
1181 1181 goto out;
1182 1182 pkt->pkt_private = buf;
1183 1183 cmd->flags |= PVSCSI_FLAG_PRIV_EXT;
1184 1184 }
1185 1185
1186 1186 return (DDI_SUCCESS);
1187 1187
1188 1188 out:
1189 1189 pvscsi_cmd_ext_free(cmd);
1190 1190
1191 1191 return (NULL);
1192 1192 }
1193 1193
1194 1194 static int
1195 1195 pvscsi_setup_dma_buffer(pvscsi_softc_t *pvs, size_t length,
1196 1196 pvscsi_dma_buf_t *buf)
1197 1197 {
1198 1198 ddi_dma_cookie_t cookie;
1199 1199 uint_t ccount;
1200 1200
1201 1201 if ((ddi_dma_alloc_handle(pvs->dip, &pvscsi_ring_dma_attr,
1202 1202 DDI_DMA_SLEEP, NULL, &buf->dma_handle)) != DDI_SUCCESS) {
1203 1203 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA handle");
1204 1204 return (DDI_FAILURE);
1205 1205 }
1206 1206
1207 1207 if ((ddi_dma_mem_alloc(buf->dma_handle, length, &pvscsi_dma_attrs,
1208 1208 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &buf->addr,
1209 1209 &buf->real_length, &buf->acc_handle)) != DDI_SUCCESS) {
1210 1210 dev_err(pvs->dip, CE_WARN,
1211 1211 "!failed to allocate %ld bytes for DMA buffer", length);
1212 1212 ddi_dma_free_handle(&buf->dma_handle);
1213 1213 return (DDI_FAILURE);
1214 1214 }
1215 1215
1216 1216 if ((ddi_dma_addr_bind_handle(buf->dma_handle, NULL, buf->addr,
1217 1217 buf->real_length, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, DDI_DMA_SLEEP,
1218 1218 NULL, &cookie, &ccount)) != DDI_SUCCESS) {
1219 1219 dev_err(pvs->dip, CE_WARN, "!failed to bind DMA buffer");
1220 1220 ddi_dma_free_handle(&buf->dma_handle);
1221 1221 ddi_dma_mem_free(&buf->acc_handle);
1222 1222 return (DDI_FAILURE);
1223 1223 }
1224 1224
1225 1225 /* TODO Support multipart SG regions */
1226 1226 ASSERT(ccount == 1);
1227 1227
1228 1228 buf->pa = cookie.dmac_laddress;
1229 1229
1230 1230 return (DDI_SUCCESS);
1231 1231 }
1232 1232
1233 1233 static void
1234 1234 pvscsi_free_dma_buffer(pvscsi_dma_buf_t *buf)
1235 1235 {
1236 1236 ddi_dma_free_handle(&buf->dma_handle);
1237 1237 ddi_dma_mem_free(&buf->acc_handle);
1238 1238 }
1239 1239
1240 1240 static int
1241 1241 pvscsi_setup_sg(pvscsi_softc_t *pvs)
1242 1242 {
1243 1243 int i;
1244 1244 pvscsi_cmd_ctx_t *ctx;
1245 1245 size_t size = pvs->req_depth * sizeof (pvscsi_cmd_ctx_t);
1246 1246
1247 1247 ctx = pvs->cmd_ctx = kmem_zalloc(size, KM_SLEEP);
1248 1248
1249 1249 for (i = 0; i < pvs->req_depth; ++i, ++ctx) {
1250 1250 list_insert_tail(&pvs->cmd_ctx_pool, ctx);
1251 1251 if (pvscsi_setup_dma_buffer(pvs, PAGE_SIZE,
1252 1252 &ctx->dma_buf) != DDI_SUCCESS)
1253 1253 goto cleanup;
1254 1254 }
1255 1255
1256 1256 return (DDI_SUCCESS);
1257 1257
1258 1258 cleanup:
1259 1259 for (; i >= 0; --i, --ctx) {
1260 1260 list_remove(&pvs->cmd_ctx_pool, ctx);
1261 1261 pvscsi_free_dma_buffer(&ctx->dma_buf);
1262 1262 }
1263 1263 kmem_free(pvs->cmd_ctx, size);
1264 1264
1265 1265 return (DDI_FAILURE);
1266 1266 }
1267 1267
1268 1268 static void
1269 1269 pvscsi_free_sg(pvscsi_softc_t *pvs)
1270 1270 {
1271 1271 int i;
1272 1272 pvscsi_cmd_ctx_t *ctx = pvs->cmd_ctx;
1273 1273
1274 1274 for (i = 0; i < pvs->req_depth; ++i, ++ctx) {
1275 1275 list_remove(&pvs->cmd_ctx_pool, ctx);
1276 1276 pvscsi_free_dma_buffer(&ctx->dma_buf);
1277 1277 }
1278 1278
1279 1279 kmem_free(pvs->cmd_ctx, pvs->req_pages << PAGE_SHIFT);
1280 1280 }
1281 1281
1282 1282 static int
1283 1283 pvscsi_allocate_rings(pvscsi_softc_t *pvs)
1284 1284 {
1285 1285 /* Allocate DMA buffer for rings state */
1286 1286 if (pvscsi_setup_dma_buffer(pvs, PAGE_SIZE,
1287 1287 &pvs->rings_state_buf) != DDI_SUCCESS)
1288 1288 return (DDI_FAILURE);
1289 1289
1290 1290 /* Allocate DMA buffer for request ring */
1291 1291 pvs->req_pages = MIN(pvscsi_ring_pages, PVSCSI_MAX_NUM_PAGES_REQ_RING);
1292 1292 pvs->req_depth = pvs->req_pages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1293 1293 if (pvscsi_setup_dma_buffer(pvs, pvs->req_pages * PAGE_SIZE,
1294 1294 &pvs->req_ring_buf) != DDI_SUCCESS)
1295 1295 goto free_rings_state;
1296 1296
1297 1297 /* Allocate completion ring */
1298 1298 pvs->cmp_pages = MIN(pvscsi_ring_pages, PVSCSI_MAX_NUM_PAGES_CMP_RING);
1299 1299 if (pvscsi_setup_dma_buffer(pvs, pvs->cmp_pages * PAGE_SIZE,
1300 1300 &pvs->cmp_ring_buf) != DDI_SUCCESS)
1301 1301 goto free_req_buf;
1302 1302
1303 1303 /* Allocate message ring */
1304 1304 pvs->msg_pages = MIN(pvscsi_msg_ring_pages,
1305 1305 PVSCSI_MAX_NUM_PAGES_MSG_RING);
1306 1306 if (pvscsi_setup_dma_buffer(pvs, pvs->msg_pages * PAGE_SIZE,
1307 1307 &pvs->msg_ring_buf) != DDI_SUCCESS)
1308 1308 goto free_cmp_buf;
1309 1309
1310 1310 return (DDI_SUCCESS);
1311 1311
1312 1312 free_cmp_buf:
1313 1313 pvscsi_free_dma_buffer(&pvs->cmp_ring_buf);
1314 1314 free_req_buf:
1315 1315 pvscsi_free_dma_buffer(&pvs->req_ring_buf);
1316 1316 free_rings_state:
1317 1317 pvscsi_free_dma_buffer(&pvs->rings_state_buf);
1318 1318
1319 1319 return (DDI_FAILURE);
1320 1320 }
1321 1321
1322 1322 static void
1323 1323 pvscsi_free_rings(pvscsi_softc_t *pvs)
1324 1324 {
1325 1325 pvscsi_free_dma_buffer(&pvs->msg_ring_buf);
1326 1326 pvscsi_free_dma_buffer(&pvs->cmp_ring_buf);
1327 1327 pvscsi_free_dma_buffer(&pvs->req_ring_buf);
1328 1328 pvscsi_free_dma_buffer(&pvs->rings_state_buf);
1329 1329 }
1330 1330
1331 1331 static void
1332 1332 pvscsi_setup_rings(pvscsi_softc_t *pvs)
1333 1333 {
1334 1334 int i;
1335 1335 struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
1336 1336 struct PVSCSICmdDescSetupRings cmd = { 0 };
1337 1337 uint64_t base;
1338 1338
1339 1339 cmd.ringsStatePPN = pvs->rings_state_buf.pa >> PAGE_SHIFT;
1340 1340 cmd.reqRingNumPages = pvs->req_pages;
1341 1341 cmd.cmpRingNumPages = pvs->cmp_pages;
1342 1342
1343 1343 /* Setup request ring */
1344 1344 base = pvs->req_ring_buf.pa;
1345 1345 for (i = 0; i < pvs->req_pages; i++) {
1346 1346 cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
1347 1347 base += PAGE_SIZE;
1348 1348 }
1349 1349
1350 1350 /* Setup completion ring */
1351 1351 base = pvs->cmp_ring_buf.pa;
1352 1352 for (i = 0; i < pvs->cmp_pages; i++) {
1353 1353 cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
1354 1354 base += PAGE_SIZE;
1355 1355 }
1356 1356
1357 1357 bzero(RINGS_STATE(pvs), PAGE_SIZE);
1358 1358 bzero(REQ_RING(pvs), pvs->req_pages * PAGE_SIZE);
1359 1359 bzero(CMP_RING(pvs), pvs->cmp_pages * PAGE_SIZE);
1360 1360
1361 1361 /* Issue SETUP command */
1362 1362 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof (cmd));
1363 1363
1364 1364 /* Setup message ring */
1365 1365 cmd_msg.numPages = pvs->msg_pages;
1366 1366 base = pvs->msg_ring_buf.pa;
1367 1367
1368 1368 for (i = 0; i < pvs->msg_pages; i++) {
1369 1369 cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
1370 1370 base += PAGE_SIZE;
1371 1371 }
1372 1372 bzero(MSG_RING(pvs), pvs->msg_pages * PAGE_SIZE);
1373 1373
1374 1374 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_SETUP_MSG_RING, &cmd_msg,
1375 1375 sizeof (cmd_msg));
1376 1376 }
1377 1377
1378 1378 static int
1379 1379 pvscsi_setup_io(pvscsi_softc_t *pvs)
1380 1380 {
1381 1381 int offset, rcount, rn, type;
1382 1382 int ret = DDI_FAILURE;
1383 1383 off_t regsize;
1384 1384 pci_regspec_t *regs;
1385 1385 uint_t regs_length;
1386 1386
1387 1387 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pvs->dip,
1388 1388 DDI_PROP_DONTPASS, "reg", (int **)®s,
1389 1389 ®s_length) != DDI_PROP_SUCCESS) {
1390 1390 dev_err(pvs->dip, CE_WARN, "!failed to lookup 'reg' property");
1391 1391 return (DDI_FAILURE);
1392 1392 }
1393 1393
1394 1394 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1395 1395
1396 1396 for (offset = PCI_CONF_BASE0; offset <= PCI_CONF_BASE5; offset += 4) {
1397 1397 for (rn = 0; rn < rcount; ++rn) {
1398 1398 if (PCI_REG_REG_G(regs[rn].pci_phys_hi) == offset) {
1399 1399 type = regs[rn].pci_phys_hi & PCI_ADDR_MASK;
1400 1400 break;
1401 1401 }
1402 1402 }
1403 1403
1404 1404 if (rn >= rcount)
1405 1405 continue;
1406 1406
1407 1407 if (type != PCI_ADDR_IO) {
1408 1408 if (ddi_dev_regsize(pvs->dip, rn,
1409 1409 ®size) != DDI_SUCCESS) {
1410 1410 dev_err(pvs->dip, CE_WARN,
1411 1411 "!failed to get size of reg %d", rn);
1412 1412 goto out;
1413 1413 }
1414 1414 if (regsize == PVSCSI_MEM_SPACE_SIZE) {
1415 1415 if (ddi_regs_map_setup(pvs->dip, rn,
1416 1416 &pvs->mmio_base, 0, 0,
1417 1417 &pvscsi_mmio_attr,
1418 1418 &pvs->mmio_handle) != DDI_SUCCESS) {
1419 1419 dev_err(pvs->dip, CE_WARN,
1420 1420 "!failed to map MMIO BAR");
1421 1421 goto out;
1422 1422 }
1423 1423 ret = DDI_SUCCESS;
1424 1424 break;
1425 1425 }
1426 1426 }
1427 1427 }
1428 1428
1429 1429 out:
1430 1430 ddi_prop_free(regs);
1431 1431
1432 1432 return (ret);
1433 1433 }
1434 1434
1435 1435 static void
1436 1436 pvscsi_free_io(pvscsi_softc_t *pvs)
1437 1437 {
1438 1438 ddi_regs_map_free(&pvs->mmio_handle);
1439 1439 }
1440 1440
1441 1441 static int
1442 1442 pvscsi_enable_intrs(pvscsi_softc_t *pvs)
1443 1443 {
1444 1444 int i, rc, intr_caps;
1445 1445
1446 1446 if ((rc = ddi_intr_get_cap(pvs->intr_htable[0], &intr_caps)) !=
1447 1447 DDI_SUCCESS) {
1448 1448 dev_err(pvs->dip, CE_WARN, "!failed to get interrupt caps");
1449 1449 return (DDI_FAILURE);
1450 1450 }
1451 1451
1452 1452 if ((intr_caps & DDI_INTR_FLAG_BLOCK) != 0) {
1453 1453 if ((rc = ddi_intr_block_enable(pvs->intr_htable,
1454 1454 pvs->intr_cnt)) != DDI_SUCCESS) {
1455 1455 dev_err(pvs->dip, CE_WARN,
1456 1456 "!failed to enable interrupt block");
1457 1457 }
1458 1458 } else {
1459 1459 for (i = 0; i < pvs->intr_cnt; i++) {
1460 1460 if ((rc = ddi_intr_enable(pvs->intr_htable[i])) ==
1461 1461 DDI_SUCCESS)
1462 1462 continue;
1463 1463 dev_err(pvs->dip, CE_WARN,
1464 1464 "!failed to enable interrupt");
1465 1465 while (--i >= 0)
1466 1466 (void) ddi_intr_disable(pvs->intr_htable[i]);
1467 1467 break;
1468 1468 }
1469 1469 }
1470 1470
1471 1471 /* Unmask interrupts */
1472 1472 if (rc == DDI_SUCCESS) {
1473 1473 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK,
1474 1474 PVSCSI_INTR_CMPL_MASK | PVSCSI_INTR_MSG_MASK);
1475 1475 }
1476 1476
1477 1477 return (rc);
1478 1478 }
1479 1479
1480 1480 /* ARGSUSED arg2 */
1481 1481 static uint32_t
1482 1482 pvscsi_intr_handler(caddr_t arg1, caddr_t arg2)
1483 1483 {
1484 1484 boolean_t handled;
1485 1485 pvscsi_softc_t *pvs = (pvscsi_softc_t *)arg1;
1486 1486 uint32_t status;
1487 1487
1488 1488 mutex_enter(&pvs->intr_mutex);
1489 1489 if (pvs->num_pollers > 0) {
1490 1490 mutex_exit(&pvs->intr_mutex);
1491 1491 return (DDI_INTR_CLAIMED);
1492 1492 }
1493 1493
1494 1494 if (pvscsi_enable_msi) {
1495 1495 handled = B_TRUE;
1496 1496 } else {
1497 1497 status = pvscsi_read_intr_status(pvs);
1498 1498 handled = (status & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1499 1499 if (handled)
1500 1500 pvscsi_write_intr_status(pvs, status);
1501 1501 }
1502 1502 mutex_exit(&pvs->intr_mutex);
1503 1503
1504 1504 if (handled) {
1505 1505 boolean_t qnotify;
1506 1506 pvscsi_cmd_t *pending;
1507 1507 pvscsi_msg_t *msg;
1508 1508
1509 1509 mutex_enter(&pvs->rx_mutex);
1510 1510 pending = pvscsi_process_comp_ring(pvs);
1511 1511 msg = pvscsi_process_msg_ring(pvs);
1512 1512 mutex_exit(&pvs->rx_mutex);
1513 1513
1514 1514 mutex_enter(&pvs->mutex);
1515 1515 qnotify = HBA_QUIESCE_PENDING(pvs);
1516 1516 mutex_exit(&pvs->mutex);
1517 1517
1518 1518 if (pending != NULL && ddi_taskq_dispatch(pvs->comp_tq,
1519 1519 pvscsi_complete_chained, pending,
1520 1520 DDI_NOSLEEP) == DDI_FAILURE)
1521 1521 pvscsi_complete_chained(pending);
1522 1522
1523 1523 if (msg != NULL && ddi_taskq_dispatch(pvs->msg_tq,
1524 1524 pvscsi_handle_msg, msg, DDI_NOSLEEP) == DDI_FAILURE) {
1525 1525 dev_err(pvs->dip, CE_WARN,
1526 1526 "!failed to process msg type %d for target %d",
1527 1527 msg->type, msg->target);
1528 1528 kmem_free(msg, sizeof (pvscsi_msg_t));
1529 1529 }
1530 1530
1531 1531 if (qnotify)
1532 1532 pvscsi_quiesce_notify(pvs);
1533 1533 }
1534 1534
1535 1535 return (handled ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
1536 1536 }
1537 1537
1538 1538 static int
1539 1539 pvscsi_register_isr(pvscsi_softc_t *pvs, int type)
1540 1540 {
1541 1541 int navail, nactual;
1542 1542 int i;
1543 1543
1544 1544 if (ddi_intr_get_navail(pvs->dip, type, &navail) != DDI_SUCCESS ||
1545 1545 navail == 0) {
1546 1546 dev_err(pvs->dip, CE_WARN,
1547 1547 "!failed to get number of available interrupts of type %d",
1548 1548 type);
1549 1549 return (DDI_FAILURE);
1550 1550 }
1551 1551 navail = MIN(navail, PVSCSI_MAX_INTRS);
1552 1552
1553 1553 pvs->intr_size = navail * sizeof (ddi_intr_handle_t);
1554 1554 if ((pvs->intr_htable = kmem_alloc(pvs->intr_size, KM_SLEEP)) == NULL) {
1555 1555 dev_err(pvs->dip, CE_WARN,
1556 1556 "!failed to allocate %d bytes for interrupt hashtable",
1557 1557 pvs->intr_size);
1558 1558 return (DDI_FAILURE);
1559 1559 }
1560 1560
1561 1561 if (ddi_intr_alloc(pvs->dip, pvs->intr_htable, type, 0, navail,
1562 1562 &nactual, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS || nactual == 0) {
1563 1563 dev_err(pvs->dip, CE_WARN, "!failed to allocate %d interrupts",
1564 1564 navail);
1565 1565 goto free_htable;
1566 1566 }
1567 1567
1568 1568 pvs->intr_cnt = nactual;
1569 1569
1570 1570 if (ddi_intr_get_pri(pvs->intr_htable[0],
1571 1571 (uint_t *)&pvs->intr_pri) != DDI_SUCCESS) {
1572 1572 dev_err(pvs->dip, CE_WARN, "!failed to get interrupt priority");
1573 1573 goto free_intrs;
1574 1574 }
1575 1575
1576 1576 for (i = 0; i < nactual; i++) {
1577 1577 if (ddi_intr_add_handler(pvs->intr_htable[i],
1578 1578 pvscsi_intr_handler, (caddr_t)pvs, NULL) != DDI_SUCCESS) {
1579 1579 dev_err(pvs->dip, CE_WARN,
1580 1580 "!failed to add interrupt handler");
1581 1581 goto free_intrs;
1582 1582 }
1583 1583 }
1584 1584
1585 1585 return (DDI_SUCCESS);
1586 1586
1587 1587 free_intrs:
1588 1588 for (i = 0; i < nactual; i++)
1589 1589 (void) ddi_intr_free(pvs->intr_htable[i]);
1590 1590 free_htable:
1591 1591 kmem_free(pvs->intr_htable, pvs->intr_size);
1592 1592
1593 1593 return (DDI_FAILURE);
1594 1594 }
1595 1595
1596 1596 static void
1597 1597 pvscsi_free_intr_resources(pvscsi_softc_t *pvs)
1598 1598 {
1599 1599 int i;
1600 1600
1601 1601 for (i = 0; i < pvs->intr_cnt; i++) {
1602 1602 (void) ddi_intr_disable(pvs->intr_htable[i]);
1603 1603 (void) ddi_intr_remove_handler(pvs->intr_htable[i]);
1604 1604 (void) ddi_intr_free(pvs->intr_htable[i]);
1605 1605 }
1606 1606 kmem_free(pvs->intr_htable, pvs->intr_size);
1607 1607 }
1608 1608
1609 1609 static int
1610 1610 pvscsi_setup_isr(pvscsi_softc_t *pvs)
1611 1611 {
1612 1612 int intr_types;
1613 1613
1614 1614 if (ddi_intr_get_supported_types(pvs->dip,
1615 1615 &intr_types) != DDI_SUCCESS) {
1616 1616 dev_err(pvs->dip, CE_WARN,
1617 1617 "!failed to get supported interrupt types");
1618 1618 return (DDI_FAILURE);
1619 1619 }
1620 1620
1621 1621 if ((intr_types & DDI_INTR_TYPE_MSIX) != 0 && pvscsi_enable_msi) {
1622 1622 if (pvscsi_register_isr(pvs,
1623 1623 DDI_INTR_TYPE_MSIX) == DDI_SUCCESS) {
1624 1624 pvs->intr_type = DDI_INTR_TYPE_MSIX;
1625 1625 } else {
1626 1626 dev_err(pvs->dip, CE_WARN,
1627 1627 "!failed to install MSI-X interrupt handler");
1628 1628 }
1629 1629 } else if ((intr_types & DDI_INTR_TYPE_MSI) != 0 && pvscsi_enable_msi) {
1630 1630 if (pvscsi_register_isr(pvs,
1631 1631 DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
1632 1632 pvs->intr_type = DDI_INTR_TYPE_MSI;
1633 1633 } else {
1634 1634 dev_err(pvs->dip, CE_WARN,
1635 1635 "!failed to install MSI interrupt handler");
1636 1636 }
1637 1637 } else if ((intr_types & DDI_INTR_TYPE_FIXED) != 0) {
1638 1638 if (pvscsi_register_isr(pvs,
1639 1639 DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
1640 1640 pvs->intr_type = DDI_INTR_TYPE_FIXED;
1641 1641 } else {
1642 1642 dev_err(pvs->dip, CE_WARN,
1643 1643 "!failed to install FIXED interrupt handler");
1644 1644 }
1645 1645 }
1646 1646
1647 1647 return (pvs->intr_type == 0 ? DDI_FAILURE : DDI_SUCCESS);
1648 1648 }
1649 1649
1650 1650 static void
1651 1651 pvscsi_wd_thread(pvscsi_softc_t *pvs)
1652 1652 {
1653 1653 clock_t now;
1654 1654 pvscsi_cmd_t *expired, *c, *cn, **pnext;
1655 1655
1656 1656 mutex_enter(&pvs->mutex);
1657 1657 for (;;) {
1658 1658 expired = NULL;
1659 1659 pnext = NULL;
1660 1660 now = ddi_get_lbolt();
1661 1661
1662 1662 for (c = list_head(&pvs->cmd_queue); c != NULL; ) {
1663 1663 cn = list_next(&pvs->cmd_queue, c);
1664 1664
1665 1665 /*
1666 1666 * Commands with 'FLAG_NOINTR' are watched using their
1667 1667 * own timeouts, so we should not touch them.
1668 1668 */
1669 1669 if ((c->pkt->pkt_flags & FLAG_NOINTR) == 0 &&
1670 1670 now > c->timeout_lbolt) {
1671 1671 dev_err(pvs->dip, CE_WARN,
1672 1672 "!expired command: %p (%ld > %ld)",
1673 1673 (void *)c, now, c->timeout_lbolt);
1674 1674 pvscsi_remove_from_queue(c);
1675 1675 if (expired == NULL)
1676 1676 expired = c;
1677 1677 if (pnext == NULL) {
1678 1678 pnext = &c->next_cmd;
1679 1679 } else {
1680 1680 *pnext = c;
1681 1681 pnext = &c->next_cmd;
1682 1682 }
1683 1683 }
1684 1684 c = cn;
1685 1685 }
1686 1686 mutex_exit(&pvs->mutex);
1687 1687
1688 1688 /* Now cancel all expired commands */
1689 1689 if (expired != NULL) {
1690 1690 struct scsi_address sa = {0};
1691 1691 /* Build a fake SCSI address */
1692 1692 sa.a_hba_tran = pvs->tran;
1693 1693 while (expired != NULL) {
1694 1694 c = expired->next_cmd;
1695 1695 sa.a_target = expired->cmd_target;
1696 1696 sa.a_lun = 0;
1697 1697 (void) pvscsi_abort(&sa, CMD2PKT(expired));
1698 1698 expired = c;
1699 1699 }
1700 1700 }
1701 1701
1702 1702 mutex_enter(&pvs->mutex);
1703 1703 if ((pvs->flags & PVSCSI_DRIVER_SHUTDOWN) != 0) {
1704 1704 /* Finish job */
1705 1705 break;
1706 1706 }
1707 1707 if (cv_reltimedwait(&pvs->wd_condvar, &pvs->mutex,
1708 1708 SEC_TO_TICK(1), TR_CLOCK_TICK) > 0) {
1709 1709 /* Explicitly woken up, finish job */
1710 1710 break;
1711 1711 }
1712 1712 }
1713 1713
1714 1714 /* Confirm thread termination */
1715 1715 cv_signal(&pvs->syncvar);
1716 1716 mutex_exit(&pvs->mutex);
1717 1717 }
1718 1718
1719 1719 static int
1720 1720 pvscsi_ccache_constructor(void *buf, void *cdrarg, int kmflags)
1721 1721 {
1722 1722 int (*callback)(caddr_t);
1723 1723 uint_t cookiec;
1724 1724 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)buf;
1725 1725 pvscsi_softc_t *pvs = cdrarg;
1726 1726 struct scsi_address ap;
1727 1727
1728 1728 callback = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1729 1729 ap.a_hba_tran = pvs->tran;
1730 1730 ap.a_target = 0;
1731 1731 ap.a_lun = 0;
1732 1732
1733 1733 /* Allocate a DMA handle for data transfers */
1734 1734 if ((ddi_dma_alloc_handle(pvs->dip, &pvs->io_dma_attr, callback,
1735 1735 NULL, &cmd->cmd_dmahdl)) != DDI_SUCCESS) {
1736 1736 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA handle");
1737 1737 return (-1);
1738 1738 }
1739 1739
1740 1740 /* Setup ARQ buffer */
1741 1741 if ((cmd->arqbuf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
1742 1742 SENSE_BUFFER_SIZE, B_READ, callback, NULL)) == NULL) {
1743 1743 dev_err(pvs->dip, CE_WARN, "!failed to allocate ARQ buffer");
1744 1744 goto free_handle;
1745 1745 }
1746 1746
1747 1747 if (ddi_dma_alloc_handle(pvs->dip, &pvs->hba_dma_attr,
1748 1748 callback, NULL, &cmd->arqhdl) != DDI_SUCCESS) {
1749 1749 dev_err(pvs->dip, CE_WARN,
1750 1750 "!failed to allocate DMA handle for ARQ buffer");
1751 1751 goto free_arqbuf;
1752 1752 }
1753 1753
1754 1754 if (ddi_dma_buf_bind_handle(cmd->arqhdl, cmd->arqbuf,
1755 1755 (DDI_DMA_READ | DDI_DMA_CONSISTENT), callback, NULL,
1756 1756 &cmd->arqc, &cookiec) != DDI_SUCCESS) {
1757 1757 dev_err(pvs->dip, CE_WARN, "!failed to bind ARQ buffer");
1758 1758 goto free_arqhdl;
1759 1759 }
1760 1760
1761 1761 return (0);
1762 1762
1763 1763 free_arqhdl:
1764 1764 ddi_dma_free_handle(&cmd->arqhdl);
1765 1765 free_arqbuf:
1766 1766 scsi_free_consistent_buf(cmd->arqbuf);
1767 1767 free_handle:
1768 1768 ddi_dma_free_handle(&cmd->cmd_dmahdl);
1769 1769
1770 1770 return (-1);
1771 1771 }
1772 1772
1773 1773 /* ARGSUSED cdrarg */
1774 1774 static void
1775 1775 pvscsi_ccache_destructor(void *buf, void *cdrarg)
1776 1776 {
1777 1777 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)buf;
1778 1778
1779 1779 if (cmd->cmd_dmahdl != NULL) {
1780 1780 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
1781 1781 ddi_dma_free_handle(&cmd->cmd_dmahdl);
1782 1782 cmd->cmd_dmahdl = NULL;
1783 1783 }
1784 1784
1785 1785 if (cmd->arqhdl != NULL) {
1786 1786 (void) ddi_dma_unbind_handle(cmd->arqhdl);
1787 1787 ddi_dma_free_handle(&cmd->arqhdl);
1788 1788 cmd->arqhdl = NULL;
1789 1789 }
1790 1790
1791 1791 if (cmd->arqbuf != NULL) {
1792 1792 scsi_free_consistent_buf(cmd->arqbuf);
1793 1793 cmd->arqbuf = NULL;
1794 1794 }
1795 1795 }
1796 1796
1797 1797 /* tran_* entry points and setup */
1798 1798 /* ARGSUSED hba_dip tgt_dip hba_tran */
1799 1799 static int
1800 1800 pvscsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1801 1801 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1802 1802 {
1803 1803 pvscsi_softc_t *pvs = SDEV2PRIV(sd);
1804 1804
1805 1805 ASSERT(pvs != NULL);
1806 1806
1807 1807 if (sd->sd_address.a_target >= PVSCSI_MAXTGTS)
1808 1808 return (DDI_FAILURE);
1809 1809
1810 1810 return (DDI_SUCCESS);
1811 1811 }
1812 1812
1813 1813 static int
1814 1814 pvscsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1815 1815 {
1816 1816 boolean_t poll = ((pkt->pkt_flags & FLAG_NOINTR) != 0);
1817 1817 int rc;
1818 1818 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1819 1819 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1820 1820
1821 1821 ASSERT(cmd->pkt == pkt);
1822 1822 ASSERT(cmd->cmd_pvs == pvs);
1823 1823
1824 1824 /*
1825 1825 * Reinitialize some fields because the packet may
1826 1826 * have been resubmitted.
1827 1827 */
1828 1828 pkt->pkt_reason = CMD_CMPLT;
1829 1829 pkt->pkt_state = 0;
1830 1830 pkt->pkt_statistics = 0;
1831 1831
1832 1832 /* Zero status byte */
1833 1833 *(pkt->pkt_scbp) = 0;
1834 1834
1835 1835 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1836 1836 ASSERT(cmd->cmd_dma_count != 0);
1837 1837 pkt->pkt_resid = cmd->cmd_dma_count;
1838 1838
1839 1839 /*
1840 1840 * Consistent packets need to be synced first
1841 1841 * (only for data going out).
1842 1842 */
1843 1843 if ((cmd->flags & PVSCSI_FLAG_IO_IOPB) != 0) {
1844 1844 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
1845 1845 DDI_DMA_SYNC_FORDEV);
1846 1846 }
1847 1847 }
1848 1848
1849 1849 cmd->cmd_target = ap->a_target;
1850 1850
1851 1851 mutex_enter(&pvs->mutex);
1852 1852 if (HBA_IS_QUIESCED(pvs) && !poll) {
1853 1853 mutex_exit(&pvs->mutex);
1854 1854 return (TRAN_BUSY);
1855 1855 }
1856 1856 mutex_exit(&pvs->mutex);
1857 1857
1858 1858 rc = pvscsi_transport_command(pvs, cmd);
1859 1859
1860 1860 if (poll) {
1861 1861 pvscsi_cmd_t *dcmd;
1862 1862 boolean_t qnotify;
1863 1863
1864 1864 if (rc == TRAN_ACCEPT)
1865 1865 rc = pvscsi_poll_cmd(pvs, cmd);
1866 1866
1867 1867 mutex_enter(&pvs->rx_mutex);
1868 1868 dcmd = pvscsi_process_comp_ring(pvs);
1869 1869 mutex_exit(&pvs->rx_mutex);
1870 1870
1871 1871 mutex_enter(&pvs->mutex);
1872 1872 qnotify = HBA_QUIESCE_PENDING(pvs);
1873 1873 mutex_exit(&pvs->mutex);
1874 1874
1875 1875 pvscsi_complete_chained(dcmd);
1876 1876
1877 1877 if (qnotify)
1878 1878 pvscsi_quiesce_notify(pvs);
1879 1879 }
1880 1880
1881 1881 return (rc);
1882 1882 }
1883 1883
1884 1884 static int
1885 1885 pvscsi_reset(struct scsi_address *ap, int level)
1886 1886 {
1887 1887 pvscsi_softc_t *pvs = AP2PRIV(ap);
1888 1888
1889 1889 switch (level) {
1890 1890 case RESET_ALL:
1891 1891 return (pvscsi_reset_generic(pvs, NULL));
1892 1892 case RESET_TARGET:
1893 1893 ASSERT(ap != NULL);
1894 1894 return (pvscsi_reset_generic(pvs, ap));
1895 1895 default:
1896 1896 return (0);
1897 1897 }
1898 1898 }
1899 1899
1900 1900 static int
1901 1901 pvscsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1902 1902 {
1903 1903 boolean_t qnotify = B_FALSE;
1904 1904 pvscsi_cmd_t *pending;
1905 1905 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1906 1906
1907 1907 mutex_enter(&pvs->tx_mutex);
1908 1908 mutex_enter(&pvs->rx_mutex);
1909 1909 if (pkt != NULL) {
1910 1910 /* Abort single command */
1911 1911 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1912 1912
1913 1913 if (pvscsi_abort_cmd(cmd, &pending) == CMD_ABORTED) {
1914 1914 /* Assume command is completely cancelled now */
1915 1915 cmd->flags |= PVSCSI_FLAG_ABORTED;
1916 1916 }
1917 1917 } else {
1918 1918 /* Abort all commands on the bus */
1919 1919 pvscsi_abort_all(ap, pvs, &pending, PVSCSI_FLAG_ABORTED);
1920 1920 }
1921 1921 qnotify = HBA_QUIESCE_PENDING(pvs);
1922 1922 mutex_exit(&pvs->rx_mutex);
1923 1923 mutex_exit(&pvs->tx_mutex);
1924 1924
1925 1925 pvscsi_complete_chained(pending);
1926 1926
1927 1927 if (qnotify)
1928 1928 pvscsi_quiesce_notify(pvs);
1929 1929
1930 1930 return (1);
1931 1931 }
1932 1932
1933 1933 /* ARGSUSED tgtonly */
1934 1934 static int
1935 1935 pvscsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
1936 1936 {
1937 1937 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1938 1938
1939 1939 if (cap == NULL)
1940 1940 return (-1);
1941 1941
1942 1942 switch (scsi_hba_lookup_capstr(cap)) {
1943 1943 case SCSI_CAP_ARQ:
1944 1944 return ((pvs->flags & PVSCSI_HBA_AUTO_REQUEST_SENSE) != 0);
1945 1945 case SCSI_CAP_UNTAGGED_QING:
1946 1946 return (1);
1947 1947 default:
1948 1948 return (-1);
1949 1949 }
1950 1950 }
1951 1951
1952 1952 /* ARGSUSED tgtonly */
1953 1953 static int
1954 1954 pvscsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
1955 1955 {
1956 1956 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1957 1957
1958 1958 if (cap == NULL)
1959 1959 return (-1);
1960 1960
1961 1961 switch (scsi_hba_lookup_capstr(cap)) {
1962 1962 case SCSI_CAP_ARQ:
1963 1963 mutex_enter(&pvs->mutex);
1964 1964 if (value == 0)
1965 1965 pvs->flags &= ~PVSCSI_HBA_AUTO_REQUEST_SENSE;
1966 1966 else
1967 1967 pvs->flags |= PVSCSI_HBA_AUTO_REQUEST_SENSE;
1968 1968 mutex_exit(&pvs->mutex);
1969 1969 return (1);
1970 1970 default:
1971 1971 return (0);
1972 1972 }
1973 1973 }
1974 1974
1975 1975 static void
1976 1976 pvscsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1977 1977 {
1978 1978 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1979 1979 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1980 1980
1981 1981 ASSERT(cmd->cmd_pvs == pvs);
1982 1982
1983 1983 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1984 1984 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
1985 1985 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
1986 1986 }
1987 1987
1988 1988 if (cmd->ctx != NULL) {
1989 1989 mutex_enter(&pvs->mutex);
1990 1990 pvscsi_release_ctx(cmd);
1991 1991 mutex_exit(&pvs->mutex);
1992 1992 }
1993 1993
1994 1994 if ((cmd->flags & PVSCSI_FLAGS_EXT) != 0)
1995 1995 pvscsi_cmd_ext_free(cmd);
1996 1996
1997 1997 kmem_cache_free(pvs->cmd_cache, cmd);
1998 1998 }
1999 1999
2000 2000 static struct scsi_pkt *
2001 2001 pvscsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, struct buf *bp,
2002 2002 int cmdlen, int statuslen, int tgtlen, int flags, int (*callback)(),
2003 2003 caddr_t arg)
2004 2004 {
2005 2005 boolean_t is_new;
2006 2006 int kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
2007 2007 int rc, i;
2008 2008 pvscsi_cmd_t *cmd;
2009 2009 pvscsi_softc_t *pvs;
2010 2010
2011 2011 pvs = ap->a_hba_tran->tran_hba_private;
2012 2012 ASSERT(pvs != NULL);
2013 2013
2014 2014 /* Allocate a new SCSI packet */
2015 2015 if (pkt == NULL) {
2016 2016 ddi_dma_handle_t saved_dmahdl, saved_arqhdl;
2017 2017 struct buf *saved_arqbuf;
2018 2018 ddi_dma_cookie_t saved_arqc;
2019 2019
2020 2020 is_new = B_TRUE;
2021 2021
2022 2022 if ((cmd = kmem_cache_alloc(pvs->cmd_cache, kf)) == NULL)
2023 2023 return (NULL);
2024 2024
2025 2025 saved_dmahdl = cmd->cmd_dmahdl;
2026 2026 saved_arqhdl = cmd->arqhdl;
2027 2027 saved_arqbuf = cmd->arqbuf;
2028 2028 saved_arqc = cmd->arqc;
2029 2029
2030 2030 bzero(cmd, sizeof (pvscsi_cmd_t) -
2031 2031 sizeof (cmd->cached_cookies));
2032 2032
2033 2033 cmd->cmd_pvs = pvs;
2034 2034 cmd->cmd_dmahdl = saved_dmahdl;
2035 2035 cmd->arqhdl = saved_arqhdl;
2036 2036 cmd->arqbuf = saved_arqbuf;
2037 2037 cmd->arqc = saved_arqc;
2038 2038
2039 2039 pkt = &cmd->cached_pkt;
2040 2040 pkt->pkt_ha_private = (opaque_t)cmd;
2041 2041 pkt->pkt_address = *ap;
2042 2042 pkt->pkt_scbp = (uint8_t *)&cmd->cmd_scb;
2043 2043 pkt->pkt_cdbp = (uint8_t *)&cmd->cmd_cdb;
2044 2044 pkt->pkt_private = (opaque_t)&cmd->tgt_priv;
2045 2045
2046 2046 cmd->tgtlen = tgtlen;
2047 2047 cmd->statuslen = statuslen;
2048 2048 cmd->cmdlen = cmdlen;
2049 2049 cmd->pkt = pkt;
2050 2050 cmd->ctx = NULL;
2051 2051
2052 2052 /* Allocate extended buffers */
2053 2053 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
2054 2054 (statuslen > sizeof (cmd->cmd_scb)) ||
2055 2055 (tgtlen > sizeof (cmd->tgt_priv))) {
2056 2056 if (pvscsi_cmd_ext_alloc(pvs, cmd, kf) != DDI_SUCCESS) {
2057 2057 dev_err(pvs->dip, CE_WARN,
2058 2058 "!extent allocation failed");
2059 2059 goto out;
2060 2060 }
2061 2061 }
2062 2062 } else {
2063 2063 is_new = B_FALSE;
2064 2064
2065 2065 cmd = PKT2CMD(pkt);
2066 2066 cmd->flags &= PVSCSI_FLAGS_PERSISTENT;
2067 2067 }
2068 2068
2069 2069 ASSERT((cmd->flags & PVSCSI_FLAG_TRANSPORT) == 0);
2070 2070
2071 2071 if ((flags & PKT_XARQ) != 0)
2072 2072 cmd->flags |= PVSCSI_FLAG_XARQ;
2073 2073
2074 2074 /* Handle partial DMA transfers */
2075 2075 if (cmd->cmd_nwin > 0) {
2076 2076 if (++cmd->cmd_winindex >= cmd->cmd_nwin)
2077 2077 return (NULL);
2078 2078 if (ddi_dma_getwin(cmd->cmd_dmahdl, cmd->cmd_winindex,
2079 2079 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
2080 2080 &cmd->cmd_dmac, &cmd->cmd_dmaccount) == DDI_FAILURE)
2081 2081 return (NULL);
2082 2082 goto handle_dma_cookies;
2083 2083 }
2084 2084
2085 2085 /* Setup data buffer */
2086 2086 if (bp != NULL && bp->b_bcount > 0 &&
2087 2087 (cmd->flags & PVSCSI_FLAG_DMA_VALID) == 0) {
2088 2088 int dma_flags;
2089 2089
2090 2090 ASSERT(cmd->cmd_dmahdl != NULL);
2091 2091
2092 2092 if ((bp->b_flags & B_READ) != 0) {
2093 2093 cmd->flags |= PVSCSI_FLAG_IO_READ;
2094 2094 dma_flags = DDI_DMA_READ;
2095 2095 } else {
2096 2096 cmd->flags &= ~PVSCSI_FLAG_IO_READ;
2097 2097 dma_flags = DDI_DMA_WRITE;
2098 2098 }
2099 2099 if ((flags & PKT_CONSISTENT) != 0) {
2100 2100 cmd->flags |= PVSCSI_FLAG_IO_IOPB;
2101 2101 dma_flags |= DDI_DMA_CONSISTENT;
2102 2102 }
2103 2103 if ((flags & PKT_DMA_PARTIAL) != 0)
2104 2104 dma_flags |= DDI_DMA_PARTIAL;
2105 2105
2106 2106 rc = ddi_dma_buf_bind_handle(cmd->cmd_dmahdl, bp,
2107 2107 dma_flags, callback, arg, &cmd->cmd_dmac,
2108 2108 &cmd->cmd_dmaccount);
2109 2109 if (rc == DDI_DMA_PARTIAL_MAP) {
2110 2110 (void) ddi_dma_numwin(cmd->cmd_dmahdl,
2111 2111 &cmd->cmd_nwin);
2112 2112 cmd->cmd_winindex = 0;
2113 2113 (void) ddi_dma_getwin(cmd->cmd_dmahdl,
2114 2114 cmd->cmd_winindex, &cmd->cmd_dma_offset,
2115 2115 &cmd->cmd_dma_len, &cmd->cmd_dmac,
2116 2116 &cmd->cmd_dmaccount);
2117 2117 } else if (rc != 0 && rc != DDI_DMA_MAPPED) {
2118 2118 switch (rc) {
2119 2119 case DDI_DMA_NORESOURCES:
2120 2120 bioerror(bp, 0);
2121 2121 break;
2122 2122 case DDI_DMA_BADATTR:
2123 2123 case DDI_DMA_NOMAPPING:
2124 2124 bioerror(bp, EFAULT);
2125 2125 break;
2126 2126 case DDI_DMA_TOOBIG:
2127 2127 default:
2128 2128 bioerror(bp, EINVAL);
2129 2129 break;
2130 2130 }
2131 2131 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
2132 2132 goto out;
2133 2133 }
2134 2134
2135 2135 handle_dma_cookies:
2136 2136 ASSERT(cmd->cmd_dmaccount > 0);
2137 2137 if (cmd->cmd_dmaccount > PVSCSI_MAX_SG_SIZE) {
2138 2138 dev_err(pvs->dip, CE_WARN,
2139 2139 "!invalid cookie count: %d (max %d)",
2140 2140 cmd->cmd_dmaccount, PVSCSI_MAX_SG_SIZE);
2141 2141 bioerror(bp, EINVAL);
2142 2142 goto out;
2143 2143 }
2144 2144
2145 2145 cmd->flags |= PVSCSI_FLAG_DMA_VALID;
2146 2146 cmd->cmd_dma_count = cmd->cmd_dmac.dmac_size;
2147 2147 cmd->cmd_total_dma_count += cmd->cmd_dmac.dmac_size;
2148 2148
2149 2149 cmd->cached_cookies[0] = cmd->cmd_dmac;
2150 2150
2151 2151 /*
2152 2152 * Calculate total amount of bytes for this I/O and
2153 2153 * store cookies for further processing.
2154 2154 */
2155 2155 for (i = 1; i < cmd->cmd_dmaccount; i++) {
2156 2156 ddi_dma_nextcookie(cmd->cmd_dmahdl, &cmd->cmd_dmac);
2157 2157 cmd->cached_cookies[i] = cmd->cmd_dmac;
2158 2158 cmd->cmd_dma_count += cmd->cmd_dmac.dmac_size;
2159 2159 cmd->cmd_total_dma_count += cmd->cmd_dmac.dmac_size;
2160 2160 }
2161 2161
2162 2162 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_total_dma_count);
2163 2163 }
2164 2164
2165 2165 return (pkt);
2166 2166
2167 2167 out:
2168 2168 if (is_new)
2169 2169 pvscsi_destroy_pkt(ap, pkt);
2170 2170
2171 2171 return (NULL);
2172 2172 }
2173 2173
2174 2174 /* ARGSUSED ap */
2175 2175 static void
2176 2176 pvscsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2177 2177 {
2178 2178 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
2179 2179
2180 2180 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
2181 2181 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
2182 2182 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
2183 2183 }
2184 2184 }
2185 2185
2186 2186 /* ARGSUSED ap */
2187 2187 static void
2188 2188 pvscsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2189 2189 {
2190 2190 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
2191 2191
2192 2192 if (cmd->cmd_dmahdl != NULL) {
2193 2193 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
2194 2194 (cmd->flags & PVSCSI_FLAG_IO_READ) ?
2195 2195 DDI_DMA_SYNC_FORCPU : DDI_DMA_SYNC_FORDEV);
2196 2196 }
2197 2197
2198 2198 }
2199 2199
2200 2200 /* ARGSUSED ap flag callback arg */
2201 2201 static int
2202 2202 pvscsi_reset_notify(struct scsi_address *ap, int flag,
2203 2203 void (*callback)(caddr_t), caddr_t arg)
2204 2204 {
2205 2205 return (DDI_FAILURE);
2206 2206 }
2207 2207
2208 2208 static int
2209 2209 pvscsi_quiesce_hba(dev_info_t *dip)
2210 2210 {
2211 2211 pvscsi_softc_t *pvs;
2212 2212 scsi_hba_tran_t *tran;
2213 2213
2214 2214 if ((tran = ddi_get_driver_private(dip)) == NULL ||
2215 2215 (pvs = TRAN2PRIV(tran)) == NULL)
2216 2216 return (-1);
2217 2217
2218 2218 mutex_enter(&pvs->mutex);
2219 2219 if (!HBA_IS_QUIESCED(pvs))
2220 2220 pvs->flags |= PVSCSI_HBA_QUIESCED;
2221 2221
2222 2222 if (pvs->cmd_queue_len != 0) {
2223 2223 /* Outstanding commands present, wait */
2224 2224 pvs->flags |= PVSCSI_HBA_QUIESCE_PENDING;
2225 2225 cv_wait(&pvs->quiescevar, &pvs->mutex);
2226 2226 ASSERT(pvs->cmd_queue_len == 0);
2227 2227 }
2228 2228 mutex_exit(&pvs->mutex);
2229 2229
2230 2230 /* Suspend taskq delivery and complete all scheduled tasks */
2231 2231 ddi_taskq_suspend(pvs->msg_tq);
2232 2232 ddi_taskq_wait(pvs->msg_tq);
2233 2233 ddi_taskq_suspend(pvs->comp_tq);
2234 2234 ddi_taskq_wait(pvs->comp_tq);
2235 2235
2236 2236 return (0);
2237 2237 }
2238 2238
2239 2239 static int
2240 2240 pvscsi_unquiesce_hba(dev_info_t *dip)
2241 2241 {
2242 2242 pvscsi_softc_t *pvs;
2243 2243 scsi_hba_tran_t *tran;
2244 2244
2245 2245 if ((tran = ddi_get_driver_private(dip)) == NULL ||
2246 2246 (pvs = TRAN2PRIV(tran)) == NULL)
2247 2247 return (-1);
2248 2248
2249 2249 mutex_enter(&pvs->mutex);
2250 2250 if (!HBA_IS_QUIESCED(pvs)) {
2251 2251 mutex_exit(&pvs->mutex);
2252 2252 return (0);
2253 2253 }
2254 2254 ASSERT(pvs->cmd_queue_len == 0);
2255 2255 pvs->flags &= ~PVSCSI_HBA_QUIESCED;
2256 2256 mutex_exit(&pvs->mutex);
2257 2257
2258 2258 /* Resume taskq delivery */
2259 2259 ddi_taskq_resume(pvs->msg_tq);
2260 2260 ddi_taskq_resume(pvs->comp_tq);
2261 2261
2262 2262 return (0);
2263 2263 }
2264 2264
2265 2265 static int
2266 2266 pvscsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
2267 2267 void *arg, dev_info_t **childp)
2268 2268 {
2269 2269 char *p;
2270 2270 int circ;
2271 2271 int ret = NDI_FAILURE;
2272 2272 long target = 0;
2273 2273 pvscsi_softc_t *pvs;
2274 2274 scsi_hba_tran_t *tran;
2275 2275
2276 2276 tran = ddi_get_driver_private(pdip);
2277 2277 pvs = tran->tran_hba_private;
2278 2278
2279 2279 ndi_devi_enter(pdip, &circ);
2280 2280 switch (op) {
2281 2281 case BUS_CONFIG_ONE:
2282 2282 if ((p = strrchr((char *)arg, '@')) != NULL &&
2283 2283 ddi_strtol(p + 1, NULL, 10, &target) == 0)
2284 2284 ret = pvscsi_config_one(pdip, pvs, (int)target, childp);
2285 2285 break;
2286 2286 case BUS_CONFIG_DRIVER:
2287 2287 case BUS_CONFIG_ALL:
2288 2288 ret = pvscsi_config_all(pdip, pvs);
2289 2289 break;
2290 2290 default:
2291 2291 break;
2292 2292 }
2293 2293
2294 2294 if (ret == NDI_SUCCESS)
2295 2295 ret = ndi_busop_bus_config(pdip, flags, op, arg, childp, 0);
2296 2296 ndi_devi_exit(pdip, circ);
2297 2297
2298 2298 return (ret);
2299 2299 }
2300 2300
2301 2301 static int
2302 2302 pvscsi_hba_setup(pvscsi_softc_t *pvs)
2303 2303 {
2304 2304 scsi_hba_tran_t *hba_tran;
2305 2305
2306 2306 hba_tran = pvs->tran = scsi_hba_tran_alloc(pvs->dip,
2307 2307 SCSI_HBA_CANSLEEP);
2308 2308 ASSERT(pvs->tran != NULL);
2309 2309
2310 2310 hba_tran->tran_hba_private = pvs;
2311 2311 hba_tran->tran_tgt_private = NULL;
2312 2312
2313 2313 hba_tran->tran_tgt_init = pvscsi_tgt_init;
2314 2314 hba_tran->tran_tgt_free = NULL;
2315 2315 hba_tran->tran_tgt_probe = scsi_hba_probe;
2316 2316
2317 2317 hba_tran->tran_start = pvscsi_start;
2318 2318 hba_tran->tran_reset = pvscsi_reset;
2319 2319 hba_tran->tran_abort = pvscsi_abort;
2320 2320 hba_tran->tran_getcap = pvscsi_getcap;
2321 2321 hba_tran->tran_setcap = pvscsi_setcap;
2322 2322 hba_tran->tran_init_pkt = pvscsi_init_pkt;
2323 2323 hba_tran->tran_destroy_pkt = pvscsi_destroy_pkt;
2324 2324
2325 2325 hba_tran->tran_dmafree = pvscsi_dmafree;
2326 2326 hba_tran->tran_sync_pkt = pvscsi_sync_pkt;
2327 2327 hba_tran->tran_reset_notify = pvscsi_reset_notify;
2328 2328
2329 2329 hba_tran->tran_quiesce = pvscsi_quiesce_hba;
2330 2330 hba_tran->tran_unquiesce = pvscsi_unquiesce_hba;
2331 2331 hba_tran->tran_bus_reset = NULL;
2332 2332
2333 2333 hba_tran->tran_add_eventcall = NULL;
2334 2334 hba_tran->tran_get_eventcookie = NULL;
2335 2335 hba_tran->tran_post_event = NULL;
2336 2336 hba_tran->tran_remove_eventcall = NULL;
2337 2337
2338 2338 hba_tran->tran_bus_config = pvscsi_bus_config;
2339 2339
2340 2340 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2341 2341
2342 2342 if (scsi_hba_attach_setup(pvs->dip, &pvs->hba_dma_attr, hba_tran,
2343 2343 SCSI_HBA_TRAN_CDB | SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CLONE) !=
2344 2344 DDI_SUCCESS) {
2345 2345 dev_err(pvs->dip, CE_WARN, "!failed to attach HBA");
2346 2346 scsi_hba_tran_free(hba_tran);
2347 2347 pvs->tran = NULL;
2348 2348 return (-1);
2349 2349 }
2350 2350
2351 2351 return (0);
2352 2352 }
2353 2353
2354 2354 static int
2355 2355 pvscsi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2356 2356 {
2357 2357 int instance;
2358 2358 pvscsi_softc_t *pvs;
2359 2359 char buf[32];
2360 2360
2361 2361 ASSERT(scsi_hba_iport_unit_address(dip) == NULL);
2362 2362
2363 2363 switch (cmd) {
2364 2364 case DDI_ATTACH:
2365 2365 case DDI_RESUME:
2366 2366 break;
2367 2367 default:
2368 2368 return (DDI_FAILURE);
2369 2369 }
2370 2370
2371 2371 instance = ddi_get_instance(dip);
2372 2372
2373 2373 /* Allocate softstate information */
2374 2374 if (ddi_soft_state_zalloc(pvscsi_sstate, instance) != DDI_SUCCESS) {
2375 2375 cmn_err(CE_WARN,
2376 2376 "!ddi_soft_state_zalloc() failed for instance %d",
2377 2377 instance);
2378 2378 return (DDI_FAILURE);
2379 2379 }
2380 2380
2381 2381 if ((pvs = ddi_get_soft_state(pvscsi_sstate, instance)) == NULL) {
2382 2382 cmn_err(CE_WARN, "!failed to get soft state for instance %d",
2383 2383 instance);
2384 2384 goto fail;
2385 2385 }
2386 2386
2387 2387 /*
2388 2388 * Indicate that we are 'sizeof (scsi_*(9S))' clean, we use
2389 2389 * scsi_pkt_size() instead.
2390 2390 */
2391 2391 scsi_size_clean(dip);
2392 2392
2393 2393 /* Setup HBA instance */
2394 2394 pvs->instance = instance;
2395 2395 pvs->dip = dip;
2396 2396 pvs->hba_dma_attr = pvscsi_hba_dma_attr;
2397 2397 pvs->ring_dma_attr = pvscsi_ring_dma_attr;
2398 2398 pvs->io_dma_attr = pvscsi_io_dma_attr;
2399 2399 mutex_init(&pvs->mutex, "pvscsi instance mutex", MUTEX_DRIVER, NULL);
2400 2400 mutex_init(&pvs->intr_mutex, "pvscsi instance interrupt mutex",
2401 2401 MUTEX_DRIVER, NULL);
2402 2402 mutex_init(&pvs->rx_mutex, "pvscsi rx ring mutex", MUTEX_DRIVER, NULL);
2403 2403 mutex_init(&pvs->tx_mutex, "pvscsi tx ring mutex", MUTEX_DRIVER, NULL);
2404 2404 list_create(&pvs->cmd_ctx_pool, sizeof (pvscsi_cmd_ctx_t),
2405 2405 offsetof(pvscsi_cmd_ctx_t, list));
2406 2406 list_create(&pvs->devnodes, sizeof (pvscsi_device_t),
2407 2407 offsetof(pvscsi_device_t, list));
2408 2408 list_create(&pvs->cmd_queue, sizeof (pvscsi_cmd_t),
2409 2409 offsetof(pvscsi_cmd_t, cmd_queue_node));
2410 2410 cv_init(&pvs->syncvar, "pvscsi synchronization cv", CV_DRIVER, NULL);
2411 2411 cv_init(&pvs->wd_condvar, "pvscsi watchdog cv", CV_DRIVER, NULL);
2412 2412 cv_init(&pvs->quiescevar, "pvscsi quiesce cv", CV_DRIVER, NULL);
2413 2413
2414 2414 (void) sprintf(buf, "pvscsi%d_cache", instance);
2415 2415 pvs->cmd_cache = kmem_cache_create(buf, sizeof (pvscsi_cmd_t), 0,
2416 2416 pvscsi_ccache_constructor, pvscsi_ccache_destructor, NULL,
2417 2417 (void *)pvs, NULL, 0);
2418 2418 if (pvs->cmd_cache == NULL) {
2419 2419 dev_err(pvs->dip, CE_WARN,
2420 2420 "!failed to create a cache for SCSI commands");
2421 2421 goto fail;
2422 2422 }
2423 2423
2424 2424 if ((pvscsi_setup_io(pvs)) != DDI_SUCCESS) {
2425 2425 dev_err(pvs->dip, CE_WARN, "!failed to setup I/O region");
2426 2426 goto free_cache;
2427 2427 }
2428 2428
2429 2429 pvscsi_reset_hba(pvs);
2430 2430
2431 2431 if ((pvscsi_allocate_rings(pvs)) != DDI_SUCCESS) {
2432 2432 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA rings");
2433 2433 goto free_io;
2434 2434 }
2435 2435
2436 2436 pvscsi_setup_rings(pvs);
2437 2437
2438 2438 if (pvscsi_setup_isr(pvs) != DDI_SUCCESS) {
2439 2439 dev_err(pvs->dip, CE_WARN, "!failed to setup ISR");
2440 2440 goto free_rings;
2441 2441 }
2442 2442
2443 2443 if (pvscsi_setup_sg(pvs) != DDI_SUCCESS) {
2444 2444 dev_err(pvs->dip, CE_WARN, "!failed to setup S/G");
2445 2445 goto free_intr;
2446 2446 }
2447 2447
2448 2448 if (pvscsi_hba_setup(pvs) != 0) {
2449 2449 dev_err(pvs->dip, CE_WARN, "!failed to setup HBA");
2450 2450 goto free_sg;
2451 2451 }
2452 2452
2453 2453 if ((pvs->comp_tq = ddi_taskq_create(pvs->dip, "comp_tq",
2454 2454 MIN(UINT16_MAX, ncpus), TASKQ_DEFAULTPRI, 0)) == NULL) {
2455 2455 dev_err(pvs->dip, CE_WARN,
2456 2456 "!failed to create completion taskq");
2457 2457 goto free_sg;
2458 2458 }
2459 2459
2460 2460 if ((pvs->msg_tq = ddi_taskq_create(pvs->dip, "msg_tq",
2461 2461 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
2462 2462 dev_err(pvs->dip, CE_WARN,
2463 2463 "!failed to create message taskq");
2464 2464 goto free_comp_tq;
2465 2465 }
2466 2466
2467 2467 if (pvscsi_enable_intrs(pvs) != DDI_SUCCESS) {
2468 2468 dev_err(pvs->dip, CE_WARN, "!failed to enable interrupts");
2469 2469 goto free_msg_tq;
2470 2470 }
2471 2471
2472 2472 /* Launch watchdog thread */
2473 2473 pvs->wd_thread = thread_create(NULL, 0, pvscsi_wd_thread, pvs, 0, &p0,
2474 2474 TS_RUN, minclsyspri);
2475 2475
2476 2476 return (DDI_SUCCESS);
2477 2477
2478 2478 free_msg_tq:
2479 2479 ddi_taskq_destroy(pvs->msg_tq);
2480 2480 free_comp_tq:
2481 2481 ddi_taskq_destroy(pvs->comp_tq);
2482 2482 free_sg:
2483 2483 pvscsi_free_sg(pvs);
2484 2484 free_intr:
2485 2485 pvscsi_free_intr_resources(pvs);
2486 2486 free_rings:
2487 2487 pvscsi_reset_hba(pvs);
2488 2488 pvscsi_free_rings(pvs);
2489 2489 free_io:
2490 2490 pvscsi_free_io(pvs);
2491 2491 free_cache:
2492 2492 kmem_cache_destroy(pvs->cmd_cache);
2493 2493 fail:
2494 2494 ddi_soft_state_free(pvscsi_sstate, instance);
2495 2495
2496 2496 return (DDI_FAILURE);
2497 2497 }
2498 2498
2499 2499 static int
2500 2500 pvscsi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2501 2501 {
2502 2502 int instance;
2503 2503 pvscsi_softc_t *pvs;
2504 2504
2505 2505 switch (cmd) {
2506 2506 case DDI_DETACH:
2507 2507 break;
2508 2508 default:
2509 2509 return (DDI_FAILURE);
2510 2510 }
2511 2511
2512 2512 instance = ddi_get_instance(dip);
2513 2513 if ((pvs = ddi_get_soft_state(pvscsi_sstate, instance)) == NULL) {
2514 2514 cmn_err(CE_WARN, "!failed to get soft state for instance %d",
2515 2515 instance);
2516 2516 return (DDI_FAILURE);
2517 2517 }
2518 2518
2519 2519 pvscsi_reset_hba(pvs);
2520 2520 pvscsi_free_intr_resources(pvs);
2521 2521
2522 2522 /* Shutdown message taskq */
2523 2523 ddi_taskq_wait(pvs->msg_tq);
2524 2524 ddi_taskq_destroy(pvs->msg_tq);
2525 2525
2526 2526 /* Shutdown completion taskq */
2527 2527 ddi_taskq_wait(pvs->comp_tq);
2528 2528 ddi_taskq_destroy(pvs->comp_tq);
2529 2529
2530 2530 /* Shutdown watchdog thread */
2531 2531 mutex_enter(&pvs->mutex);
2532 2532 pvs->flags |= PVSCSI_DRIVER_SHUTDOWN;
2533 2533 cv_signal(&pvs->wd_condvar);
2534 2534 cv_wait(&pvs->syncvar, &pvs->mutex);
2535 2535 mutex_exit(&pvs->mutex);
2536 2536
2537 2537 pvscsi_free_sg(pvs);
2538 2538 pvscsi_free_rings(pvs);
2539 2539 pvscsi_free_io(pvs);
2540 2540
2541 2541 kmem_cache_destroy(pvs->cmd_cache);
2542 2542
2543 2543 mutex_destroy(&pvs->mutex);
2544 2544 mutex_destroy(&pvs->intr_mutex);
2545 2545 mutex_destroy(&pvs->rx_mutex);
2546 2546
2547 2547 cv_destroy(&pvs->syncvar);
2548 2548 cv_destroy(&pvs->wd_condvar);
2549 2549 cv_destroy(&pvs->quiescevar);
2550 2550
2551 2551 ddi_soft_state_free(pvscsi_sstate, instance);
2552 2552 ddi_prop_remove_all(dip);
2553 2553
2554 2554 return (DDI_SUCCESS);
2555 2555 }
2556 2556
2557 2557 static int
2558 2558 pvscsi_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
2559 2559 int *rval)
2560 2560 {
2561 2561 int ret;
2562 2562
2563 2563 if (ddi_get_soft_state(pvscsi_sstate, getminor(dev)) == NULL) {
2564 2564 cmn_err(CE_WARN, "!invalid device instance: %d", getminor(dev));
2565 2565 return (ENXIO);
2566 2566 }
2567 2567
2568 2568 /* Try to handle command in a common way */
2569 2569 if ((ret = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval)) != ENOTTY)
2570 2570 return (ret);
2571 2571
2572 2572 cmn_err(CE_WARN, "!unsupported IOCTL command: 0x%X", cmd);
2573 2573
2574 2574 return (ENXIO);
2575 2575 }
2576 2576
2577 2577 static int
2578 2578 pvscsi_quiesce(dev_info_t *devi)
2579 2579 {
2580 2580 scsi_hba_tran_t *tran;
2581 2581 pvscsi_softc_t *pvs;
2582 2582
2583 2583 if ((tran = ddi_get_driver_private(devi)) == NULL)
2584 2584 return (DDI_SUCCESS);
2585 2585
2586 2586 if ((pvs = tran->tran_hba_private) == NULL)
2587 2587 return (DDI_SUCCESS);
2588 2588
2589 2589 /* Mask all interrupts from device */
2590 2590 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK, 0);
2591 2591
2592 2592 /* Reset the HBA */
2593 2593 pvscsi_reset_hba(pvs);
2594 2594
2595 2595 return (DDI_SUCCESS);
2596 2596 }
2597 2597
2598 2598 /* module */
2599 2599
2600 2600 static struct cb_ops pvscsi_cb_ops = {
2601 2601 .cb_open = scsi_hba_open,
2602 2602 .cb_close = scsi_hba_close,
2603 2603 .cb_strategy = nodev,
2604 2604 .cb_print = nodev,
2605 2605 .cb_dump = nodev,
2606 2606 .cb_read = nodev,
2607 2607 .cb_write = nodev,
2608 2608 .cb_ioctl = pvscsi_ioctl,
2609 2609 .cb_devmap = nodev,
2610 2610 .cb_mmap = nodev,
2611 2611 .cb_segmap = nodev,
2612 2612 .cb_chpoll = nochpoll,
2613 2613 .cb_prop_op = ddi_prop_op,
2614 2614 .cb_str = NULL,
2615 2615 .cb_flag = D_MP,
2616 2616 .cb_rev = CB_REV,
2617 2617 .cb_aread = nodev,
2618 2618 .cb_awrite = nodev
2619 2619 };
2620 2620
2621 2621 static struct dev_ops pvscsi_ops = {
2622 2622 .devo_rev = DEVO_REV,
2623 2623 .devo_refcnt = 0,
2624 2624 .devo_getinfo = ddi_no_info,
2625 2625 .devo_identify = nulldev,
2626 2626 .devo_probe = nulldev,
2627 2627 .devo_attach = pvscsi_attach,
2628 2628 .devo_detach = pvscsi_detach,
2629 2629 .devo_reset = nodev,
2630 2630 .devo_cb_ops = &pvscsi_cb_ops,
2631 2631 .devo_bus_ops = NULL,
2632 2632 .devo_power = NULL,
2633 2633 .devo_quiesce = pvscsi_quiesce
2634 2634 };
2635 2635
↓ open down ↓ |
2635 lines elided |
↑ open up ↑ |
2636 2636 #define PVSCSI_IDENT "VMware PVSCSI"
2637 2637
2638 2638 static struct modldrv modldrv = {
2639 2639 &mod_driverops,
2640 2640 PVSCSI_IDENT,
2641 2641 &pvscsi_ops,
2642 2642 };
2643 2643
2644 2644 static struct modlinkage modlinkage = {
2645 2645 MODREV_1,
2646 - &modldrv,
2647 - NULL
2646 + { &modldrv, NULL }
2648 2647 };
2649 2648
2650 2649 int
2651 2650 _init(void)
2652 2651 {
2653 2652 int ret;
2654 2653
2655 2654 if ((ret = ddi_soft_state_init(&pvscsi_sstate,
2656 2655 sizeof (struct pvscsi_softc), PVSCSI_INITIAL_SSTATE_ITEMS)) != 0) {
2657 2656 cmn_err(CE_WARN, "!ddi_soft_state_init() failed");
2658 2657 return (ret);
2659 2658 }
2660 2659
2661 2660 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
2662 2661 cmn_err(CE_WARN, "!scsi_hba_init() failed");
2663 2662 ddi_soft_state_fini(&pvscsi_sstate);
2664 2663 return (ret);
2665 2664 }
2666 2665
2667 2666 if ((ret = mod_install(&modlinkage)) != 0) {
2668 2667 cmn_err(CE_WARN, "!mod_install() failed");
2669 2668 ddi_soft_state_fini(&pvscsi_sstate);
2670 2669 scsi_hba_fini(&modlinkage);
2671 2670 }
2672 2671
2673 2672 return (ret);
2674 2673 }
2675 2674
2676 2675 int
2677 2676 _info(struct modinfo *modinfop)
2678 2677 {
2679 2678 return (mod_info(&modlinkage, modinfop));
2680 2679 }
2681 2680
2682 2681 int
2683 2682 _fini(void)
2684 2683 {
2685 2684 int ret;
2686 2685
2687 2686 if ((ret = mod_remove(&modlinkage)) == 0) {
2688 2687 ddi_soft_state_fini(&pvscsi_sstate);
2689 2688 scsi_hba_fini(&modlinkage);
2690 2689 }
2691 2690
2692 2691 return (ret);
2693 2692 }
↓ open down ↓ |
36 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX