Print this page
9702 HBA drivers don't need the redundant devfs_clean step
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/scsi/adapters/pvscsi/pvscsi.c
+++ new/usr/src/uts/intel/io/scsi/adapters/pvscsi/pvscsi.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
↓ open down ↓ |
2 lines elided |
↑ open up ↑ |
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 - * Copyright 2016 Nexenta Systems, Inc.
13 + * Copyright 2018 Nexenta Systems, Inc.
14 14 */
15 15
16 16 #include <sys/atomic.h>
17 17 #include <sys/cmn_err.h>
18 18 #include <sys/conf.h>
19 19 #include <sys/cpuvar.h>
20 20 #include <sys/ddi.h>
21 21 #include <sys/errno.h>
22 -#include <sys/fs/dv_node.h>
23 22 #include <sys/kmem.h>
24 23 #include <sys/kmem_impl.h>
25 24 #include <sys/list.h>
26 25 #include <sys/modctl.h>
27 26 #include <sys/pci.h>
28 27 #include <sys/scsi/scsi.h>
29 28 #include <sys/sunddi.h>
30 29 #include <sys/sysmacros.h>
31 30 #include <sys/time.h>
32 31 #include <sys/types.h>
33 32
34 33 #include "pvscsi.h"
35 34 #include "pvscsi_var.h"
36 35
37 36 int pvscsi_enable_msi = 1;
38 37 int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING;
39 38 int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING;
40 39
41 40 static int pvscsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
42 41
43 42 static void *pvscsi_sstate;
44 43
45 44 /* HBA DMA attributes */
46 45 static ddi_dma_attr_t pvscsi_hba_dma_attr = {
47 46 .dma_attr_version = DMA_ATTR_V0,
48 47 .dma_attr_addr_lo = 0x0000000000000000ull,
49 48 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
50 49 .dma_attr_count_max = 0x000000007FFFFFFFull,
51 50 .dma_attr_align = 0x0000000000000001ull,
52 51 .dma_attr_burstsizes = 0x7ff,
53 52 .dma_attr_minxfer = 0x00000001u,
54 53 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
55 54 .dma_attr_seg = 0x00000000FFFFFFFFull,
56 55 .dma_attr_sgllen = 1,
57 56 .dma_attr_granular = 0x00000200u,
58 57 .dma_attr_flags = 0
59 58 };
60 59
61 60 /* DMA attributes for req/comp rings */
62 61 static ddi_dma_attr_t pvscsi_ring_dma_attr = {
63 62 .dma_attr_version = DMA_ATTR_V0,
64 63 .dma_attr_addr_lo = 0x0000000000000000ull,
65 64 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
66 65 .dma_attr_count_max = 0x000000007FFFFFFFull,
67 66 .dma_attr_align = 0x0000000000000001ull,
68 67 .dma_attr_burstsizes = 0x7ff,
69 68 .dma_attr_minxfer = 0x00000001u,
70 69 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
71 70 .dma_attr_seg = 0x00000000FFFFFFFFull,
72 71 .dma_attr_sgllen = 1,
73 72 .dma_attr_granular = 0x00000001u,
74 73 .dma_attr_flags = 0
75 74 };
76 75
77 76 /* DMA attributes for buffer I/O */
78 77 static ddi_dma_attr_t pvscsi_io_dma_attr = {
79 78 .dma_attr_version = DMA_ATTR_V0,
80 79 .dma_attr_addr_lo = 0x0000000000000000ull,
81 80 .dma_attr_addr_hi = 0xFFFFFFFFFFFFFFFFull,
82 81 .dma_attr_count_max = 0x000000007FFFFFFFull,
83 82 .dma_attr_align = 0x0000000000000001ull,
84 83 .dma_attr_burstsizes = 0x7ff,
85 84 .dma_attr_minxfer = 0x00000001u,
86 85 .dma_attr_maxxfer = 0x00000000FFFFFFFFull,
87 86 .dma_attr_seg = 0x00000000FFFFFFFFull,
88 87 .dma_attr_sgllen = PVSCSI_MAX_SG_SIZE,
89 88 .dma_attr_granular = 0x00000200u,
90 89 .dma_attr_flags = 0
91 90 };
92 91
93 92 static ddi_device_acc_attr_t pvscsi_mmio_attr = {
94 93 DDI_DEVICE_ATTR_V1,
95 94 DDI_STRUCTURE_LE_ACC,
96 95 DDI_STRICTORDER_ACC,
97 96 DDI_DEFAULT_ACC
98 97 };
99 98
100 99 static ddi_device_acc_attr_t pvscsi_dma_attrs = {
101 100 DDI_DEVICE_ATTR_V0,
102 101 DDI_STRUCTURE_LE_ACC,
103 102 DDI_STRICTORDER_ACC,
104 103 DDI_DEFAULT_ACC,
105 104 };
106 105
107 106 static void
108 107 pvscsi_add_to_queue(pvscsi_cmd_t *cmd)
109 108 {
110 109 pvscsi_softc_t *pvs = cmd->cmd_pvs;
111 110
112 111 ASSERT(pvs != NULL);
113 112 ASSERT(mutex_owned(&pvs->mutex));
114 113 ASSERT(!list_link_active(&(cmd)->cmd_queue_node));
115 114
116 115 list_insert_tail(&pvs->cmd_queue, cmd);
117 116 pvs->cmd_queue_len++;
118 117 }
119 118
120 119 static void
121 120 pvscsi_remove_from_queue(pvscsi_cmd_t *cmd)
122 121 {
123 122 pvscsi_softc_t *pvs = cmd->cmd_pvs;
124 123
125 124 ASSERT(pvs != NULL);
126 125 ASSERT(mutex_owned(&pvs->mutex));
127 126 ASSERT(list_link_active(&cmd->cmd_queue_node));
128 127 ASSERT(pvs->cmd_queue_len > 0);
129 128
130 129 if (list_link_active(&cmd->cmd_queue_node)) {
131 130 list_remove(&pvs->cmd_queue, cmd);
132 131 pvs->cmd_queue_len--;
133 132 }
134 133 }
135 134
136 135 static uint64_t
137 136 pvscsi_map_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_ctx_t *io_ctx)
138 137 {
139 138 return (io_ctx - pvs->cmd_ctx + 1);
140 139 }
141 140
142 141 static pvscsi_cmd_ctx_t *
143 142 pvscsi_lookup_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
144 143 {
145 144 pvscsi_cmd_ctx_t *ctx, *end;
146 145
147 146 end = &pvs->cmd_ctx[pvs->req_depth];
148 147 for (ctx = pvs->cmd_ctx; ctx < end; ctx++) {
149 148 if (ctx->cmd == cmd)
150 149 return (ctx);
151 150 }
152 151
153 152 return (NULL);
154 153 }
155 154
156 155 static pvscsi_cmd_ctx_t *
157 156 pvscsi_resolve_ctx(pvscsi_softc_t *pvs, uint64_t ctx)
158 157 {
159 158 if (ctx > 0 && ctx <= pvs->req_depth)
160 159 return (&pvs->cmd_ctx[ctx - 1]);
161 160 else
162 161 return (NULL);
163 162 }
164 163
165 164 static boolean_t
166 165 pvscsi_acquire_ctx(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
167 166 {
168 167 pvscsi_cmd_ctx_t *ctx;
169 168
170 169 if (list_is_empty(&pvs->cmd_ctx_pool))
171 170 return (B_FALSE);
172 171
173 172 ctx = (pvscsi_cmd_ctx_t *)list_remove_head(&pvs->cmd_ctx_pool);
174 173 ASSERT(ctx != NULL);
175 174
176 175 ctx->cmd = cmd;
177 176 cmd->ctx = ctx;
178 177
179 178 return (B_TRUE);
180 179 }
181 180
182 181 static void
183 182 pvscsi_release_ctx(pvscsi_cmd_t *cmd)
184 183 {
185 184 pvscsi_softc_t *pvs = cmd->cmd_pvs;
186 185
187 186 ASSERT(mutex_owned(&pvs->mutex));
188 187
189 188 cmd->ctx->cmd = NULL;
190 189 list_insert_tail(&pvs->cmd_ctx_pool, cmd->ctx);
191 190 cmd->ctx = NULL;
192 191 }
193 192
194 193 static uint32_t
195 194 pvscsi_reg_read(pvscsi_softc_t *pvs, uint32_t offset)
196 195 {
197 196 uint32_t ret;
198 197
199 198 ASSERT((offset & (sizeof (uint32_t) - 1)) == 0);
200 199
201 200 ret = ddi_get32(pvs->mmio_handle,
202 201 (uint32_t *)(pvs->mmio_base + offset));
203 202
204 203 return (ret);
205 204 }
206 205
207 206 static void
208 207 pvscsi_reg_write(pvscsi_softc_t *pvs, uint32_t offset, uint32_t value)
209 208 {
210 209 ASSERT((offset & (sizeof (uint32_t) - 1)) == 0);
211 210
212 211 ddi_put32(pvs->mmio_handle, (uint32_t *)(pvs->mmio_base + offset),
213 212 value);
214 213 }
215 214
216 215 static void
217 216 pvscsi_write_cmd_desc(pvscsi_softc_t *pvs, uint32_t cmd, void *desc, size_t len)
218 217 {
219 218 len /= sizeof (uint32_t);
220 219 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_COMMAND, cmd);
221 220 ddi_rep_put32(pvs->mmio_handle, (uint32_t *)desc,
222 221 (uint32_t *)(pvs->mmio_base + PVSCSI_REG_OFFSET_COMMAND_DATA),
223 222 len, DDI_DEV_NO_AUTOINCR);
224 223 }
225 224
226 225 static uint32_t
227 226 pvscsi_read_intr_status(pvscsi_softc_t *pvs)
228 227 {
229 228 return (pvscsi_reg_read(pvs, PVSCSI_REG_OFFSET_INTR_STATUS));
230 229 }
231 230
232 231 static void
233 232 pvscsi_write_intr_status(pvscsi_softc_t *pvs, uint32_t val)
234 233 {
235 234 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_STATUS, val);
236 235 }
237 236
238 237 static void
239 238 pvscsi_mask_intr(pvscsi_softc_t *pvs)
240 239 {
241 240 mutex_enter(&pvs->intr_mutex);
242 241
243 242 VERIFY(pvs->intr_lock_counter >= 0);
244 243
245 244 if (++pvs->intr_lock_counter == 1)
246 245 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK, 0);
247 246
248 247 mutex_exit(&pvs->intr_mutex);
249 248 }
250 249
251 250 static void
252 251 pvscsi_unmask_intr(pvscsi_softc_t *pvs)
253 252 {
254 253 mutex_enter(&pvs->intr_mutex);
255 254
256 255 VERIFY(pvs->intr_lock_counter > 0);
257 256
258 257 if (--pvs->intr_lock_counter == 0) {
259 258 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK,
260 259 PVSCSI_INTR_CMPL_MASK | PVSCSI_INTR_MSG_MASK);
261 260 }
262 261
263 262 mutex_exit(&pvs->intr_mutex);
264 263 }
265 264
266 265 static void
267 266 pvscsi_reset_hba(pvscsi_softc_t *pvs)
268 267 {
269 268 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_ADAPTER_RESET, NULL, 0);
270 269 }
271 270
272 271 static void
273 272 pvscsi_reset_bus(pvscsi_softc_t *pvs)
274 273 {
275 274 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_RESET_BUS, NULL, 0);
276 275 }
277 276
278 277 static void
279 278 pvscsi_submit_nonrw_io(pvscsi_softc_t *pvs)
280 279 {
281 280 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0);
282 281 }
283 282
284 283 static void
285 284 pvscsi_submit_rw_io(pvscsi_softc_t *pvs)
286 285 {
287 286 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_KICK_RW_IO, 0);
288 287 }
289 288
290 289
291 290 static int
292 291 pvscsi_inquiry_target(pvscsi_softc_t *pvs, int target, struct scsi_inquiry *inq)
293 292 {
294 293 int len = sizeof (struct scsi_inquiry);
295 294 int ret = -1;
296 295 struct buf *b;
297 296 struct scsi_address ap;
298 297 struct scsi_pkt *pkt;
299 298 uint8_t cdb[CDB_GROUP0];
300 299
301 300 ap.a_hba_tran = pvs->tran;
302 301 ap.a_target = (ushort_t)target;
303 302 ap.a_lun = (uchar_t)0;
304 303
305 304 if ((b = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL, len,
306 305 B_READ, NULL_FUNC, NULL)) == NULL)
307 306 return (-1);
308 307
309 308 if ((pkt = scsi_init_pkt(&ap, (struct scsi_pkt *)NULL, b,
310 309 CDB_GROUP0, sizeof (struct scsi_arq_status), 0, 0,
311 310 NULL_FUNC, NULL)) == NULL)
312 311 goto free_buf;
313 312
314 313 cdb[0] = SCMD_INQUIRY;
315 314 cdb[1] = 0;
316 315 cdb[2] = 0;
317 316 cdb[3] = (len & 0xff00) >> 8;
318 317 cdb[4] = (len & 0x00ff);
319 318 cdb[5] = 0;
320 319
321 320 if (inq != NULL)
322 321 bzero(inq, sizeof (*inq));
323 322 bcopy(cdb, pkt->pkt_cdbp, CDB_GROUP0);
324 323 bzero((struct scsi_inquiry *)b->b_un.b_addr, sizeof (*inq));
325 324
326 325 if ((ret = scsi_poll(pkt)) == 0 && inq != NULL)
327 326 bcopy(b->b_un.b_addr, inq, sizeof (*inq));
328 327
329 328 scsi_destroy_pkt(pkt);
330 329
331 330 free_buf:
332 331 scsi_free_consistent_buf(b);
333 332
334 333 return (ret);
335 334 }
336 335
337 336 static int
338 337 pvscsi_config_one(dev_info_t *pdip, pvscsi_softc_t *pvs, int target,
339 338 dev_info_t **childp)
340 339 {
341 340 char **compatible = NULL;
342 341 char *nodename = NULL;
343 342 dev_info_t *dip;
344 343 int inqrc;
345 344 int ncompatible = 0;
346 345 pvscsi_device_t *devnode;
347 346 struct scsi_inquiry inq;
348 347
349 348 ASSERT(DEVI_BUSY_OWNED(pdip));
350 349
351 350 /* Inquiry target */
352 351 inqrc = pvscsi_inquiry_target(pvs, target, &inq);
↓ open down ↓ |
320 lines elided |
↑ open up ↑ |
353 352
354 353 /* Find devnode */
355 354 for (devnode = list_head(&pvs->devnodes); devnode != NULL;
356 355 devnode = list_next(&pvs->devnodes, devnode)) {
357 356 if (devnode->target == target)
358 357 break;
359 358 }
360 359
361 360 if (devnode != NULL) {
362 361 if (inqrc != 0) {
363 - /* Target disappeared, drop devnode */
364 - if (i_ddi_devi_attached(devnode->pdip)) {
365 - char *devname;
366 - /* Get full devname */
367 - devname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
368 - (void) ddi_deviname(devnode->pdip, devname);
369 - /* Clean cache and name */
370 - (void) devfs_clean(devnode->parent, devname + 1,
371 - DV_CLEAN_FORCE);
372 - kmem_free(devname, MAXPATHLEN);
373 - }
374 -
375 - (void) ndi_devi_offline(devnode->pdip, NDI_DEVI_REMOVE);
376 -
362 + (void) ndi_devi_offline(devnode->pdip,
363 + NDI_DEVFS_CLEAN | NDI_DEVI_REMOVE);
377 364 list_remove(&pvs->devnodes, devnode);
378 365 kmem_free(devnode, sizeof (*devnode));
379 366 } else if (childp != NULL) {
380 367 /* Target exists */
381 368 *childp = devnode->pdip;
382 369 }
383 370 return (NDI_SUCCESS);
384 371 } else if (inqrc != 0) {
385 372 /* Target doesn't exist */
386 373 return (NDI_FAILURE);
387 374 }
388 375
389 376 scsi_hba_nodename_compatible_get(&inq, NULL, inq.inq_dtype, NULL,
390 377 &nodename, &compatible, &ncompatible);
391 378 if (nodename == NULL)
392 379 goto free_nodename;
393 380
394 381 if (ndi_devi_alloc(pdip, nodename, DEVI_SID_NODEID,
395 382 &dip) != NDI_SUCCESS) {
396 383 dev_err(pvs->dip, CE_WARN, "!failed to alloc device instance");
397 384 goto free_nodename;
398 385 }
399 386
400 387 if (ndi_prop_update_string(DDI_DEV_T_NONE, dip,
401 388 "device-type", "scsi") != DDI_PROP_SUCCESS ||
402 389 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
403 390 "target", target) != DDI_PROP_SUCCESS ||
404 391 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
405 392 "lun", 0) != DDI_PROP_SUCCESS ||
406 393 ndi_prop_update_int(DDI_DEV_T_NONE, dip,
407 394 "pm-capable", 1) != DDI_PROP_SUCCESS ||
408 395 ndi_prop_update_string_array(DDI_DEV_T_NONE, dip,
409 396 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
410 397 dev_err(pvs->dip, CE_WARN,
411 398 "!failed to update props for target %d", target);
412 399 goto free_devi;
413 400 }
414 401
415 402 if ((devnode = kmem_zalloc(sizeof (*devnode), KM_NOSLEEP)) == NULL)
416 403 goto free_devi;
417 404
418 405 if (ndi_devi_online(dip, NDI_ONLINE_ATTACH) != NDI_SUCCESS) {
419 406 dev_err(pvs->dip, CE_WARN, "!failed to online target %d",
420 407 target);
421 408 kmem_free(devnode, sizeof (*devnode));
422 409 goto free_devi;
423 410 }
424 411
425 412 devnode->target = target;
426 413 devnode->pdip = dip;
427 414 devnode->parent = pdip;
428 415 list_insert_tail(&pvs->devnodes, devnode);
429 416
430 417 if (childp != NULL)
431 418 *childp = dip;
432 419
433 420 scsi_hba_nodename_compatible_free(nodename, compatible);
434 421
435 422 return (NDI_SUCCESS);
436 423
437 424 free_devi:
438 425 ndi_prop_remove_all(dip);
439 426 (void) ndi_devi_free(dip);
440 427 free_nodename:
441 428 scsi_hba_nodename_compatible_free(nodename, compatible);
442 429
443 430 return (NDI_FAILURE);
444 431 }
445 432
446 433 static int
447 434 pvscsi_config_all(dev_info_t *pdip, pvscsi_softc_t *pvs)
448 435 {
449 436 int target;
450 437
451 438 for (target = 0; target < PVSCSI_MAXTGTS; target++) {
452 439 /* ndi_devi_enter is done in pvscsi_bus_config */
453 440 (void) pvscsi_config_one(pdip, pvs, target, NULL);
454 441 }
455 442
456 443 return (NDI_SUCCESS);
457 444 }
458 445
459 446 static pvscsi_cmd_t *
460 447 pvscsi_process_comp_ring(pvscsi_softc_t *pvs)
461 448 {
462 449 pvscsi_cmd_t **pnext_cmd = NULL;
463 450 pvscsi_cmd_t *cmd;
464 451 pvscsi_cmd_t *head = NULL;
465 452 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
466 453 uint32_t cmp_ne = sdesc->cmpNumEntriesLog2;
467 454
468 455 ASSERT(mutex_owned(&pvs->rx_mutex));
469 456
470 457 while (sdesc->cmpConsIdx != sdesc->cmpProdIdx) {
471 458 pvscsi_cmd_ctx_t *ctx;
472 459 struct PVSCSIRingCmpDesc *cdesc;
473 460
474 461 cdesc = CMP_RING(pvs) + (sdesc->cmpConsIdx & MASK(cmp_ne));
475 462 membar_consumer();
476 463
477 464 ctx = pvscsi_resolve_ctx(pvs, cdesc->context);
478 465 ASSERT(ctx != NULL);
479 466
480 467 if ((cmd = ctx->cmd) != NULL) {
481 468 cmd->next_cmd = NULL;
482 469
483 470 /* Save command status for further processing */
484 471 cmd->cmp_stat.host_status = cdesc->hostStatus;
485 472 cmd->cmp_stat.scsi_status = cdesc->scsiStatus;
486 473 cmd->cmp_stat.data_len = cdesc->dataLen;
487 474
488 475 /* Mark this command as arrived from hardware */
489 476 cmd->flags |= PVSCSI_FLAG_HW_STATUS;
490 477
491 478 if (head == NULL) {
492 479 head = cmd;
493 480 head->tail_cmd = cmd;
494 481 } else {
495 482 head->tail_cmd = cmd;
496 483 }
497 484
498 485 if (pnext_cmd == NULL) {
499 486 pnext_cmd = &cmd->next_cmd;
500 487 } else {
501 488 *pnext_cmd = cmd;
502 489 pnext_cmd = &cmd->next_cmd;
503 490 }
504 491 }
505 492
506 493 membar_consumer();
507 494 sdesc->cmpConsIdx++;
508 495 }
509 496
510 497 return (head);
511 498 }
512 499
513 500 static pvscsi_msg_t *
514 501 pvscsi_process_msg_ring(pvscsi_softc_t *pvs)
515 502 {
516 503 pvscsi_msg_t *msg;
517 504 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
518 505 struct PVSCSIRingMsgDesc *mdesc;
519 506 struct PVSCSIMsgDescDevStatusChanged *desc;
520 507 uint32_t msg_ne = sdesc->msgNumEntriesLog2;
521 508
522 509 ASSERT(mutex_owned(&pvs->rx_mutex));
523 510
524 511 if (sdesc->msgProdIdx == sdesc->msgConsIdx)
525 512 return (NULL);
526 513
527 514 mdesc = MSG_RING(pvs) + (sdesc->msgConsIdx & MASK(msg_ne));
528 515 membar_consumer();
529 516
530 517 switch (mdesc->type) {
531 518 case PVSCSI_MSG_DEV_ADDED:
532 519 case PVSCSI_MSG_DEV_REMOVED:
533 520 desc = (struct PVSCSIMsgDescDevStatusChanged *)mdesc;
534 521 msg = kmem_alloc(sizeof (pvscsi_msg_t), KM_NOSLEEP);
535 522 if (msg == NULL)
536 523 return (NULL);
537 524 msg->msg_pvs = pvs;
538 525 msg->type = mdesc->type;
539 526 msg->target = desc->target;
540 527 break;
541 528 default:
542 529 dev_err(pvs->dip, CE_WARN, "!unknown msg type: %d",
543 530 mdesc->type);
544 531 return (NULL);
545 532 }
546 533
547 534 membar_consumer();
548 535 sdesc->msgConsIdx++;
549 536
550 537 return (msg);
551 538 }
552 539
553 540 static void
554 541 pvscsi_handle_msg(void *arg)
555 542 {
556 543 pvscsi_msg_t *msg = (pvscsi_msg_t *)arg;
557 544 dev_info_t *dip = msg->msg_pvs->dip;
558 545 int circ;
559 546
560 547 ndi_devi_enter(dip, &circ);
561 548 (void) pvscsi_config_one(dip, msg->msg_pvs, msg->target, NULL);
562 549 ndi_devi_exit(dip, circ);
563 550
564 551 kmem_free(msg, sizeof (pvscsi_msg_t));
565 552 }
566 553
567 554 static int
568 555 pvscsi_abort_cmd(pvscsi_cmd_t *cmd, pvscsi_cmd_t **pending)
569 556 {
570 557 pvscsi_softc_t *pvs = cmd->cmd_pvs;
571 558 pvscsi_cmd_t *c;
572 559 pvscsi_cmd_t *done;
573 560 struct PVSCSICmdDescAbortCmd acmd;
574 561
575 562 dev_err(pvs->dip, CE_WARN, "!aborting command %p", (void *)cmd);
576 563
577 564 ASSERT(mutex_owned(&pvs->rx_mutex));
578 565 ASSERT(mutex_owned(&pvs->tx_mutex));
579 566
580 567 /* Check if the cmd was already completed by the HBA */
581 568 *pending = done = pvscsi_process_comp_ring(pvs);
582 569 for (c = done; c != NULL; c = c->next_cmd) {
583 570 if (c == cmd)
584 571 return (CMD_CMPLT);
585 572 }
586 573
587 574 /* Check if cmd was really scheduled by the HBA */
588 575 if (pvscsi_lookup_ctx(pvs, cmd) == NULL)
589 576 return (CMD_CMPLT);
590 577
591 578 /* Abort cmd in the HBA */
592 579 bzero(&acmd, sizeof (acmd));
593 580 acmd.target = cmd->cmd_target;
594 581 acmd.context = pvscsi_map_ctx(pvs, cmd->ctx);
595 582 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_ABORT_CMD, &acmd, sizeof (acmd));
596 583
597 584 /* Check if cmd was completed by the HBA before it could be aborted */
598 585 if ((done = pvscsi_process_comp_ring(pvs)) != NULL) {
599 586 done->tail_cmd->next_cmd = *pending;
600 587 *pending = done;
601 588 for (c = done; c != NULL; c = c->next_cmd) {
602 589 if (c == cmd)
603 590 return (CMD_CMPLT);
604 591 }
605 592 }
606 593
607 594 /* Release I/O ctx */
608 595 mutex_enter(&pvs->mutex);
609 596 if (cmd->ctx != NULL)
610 597 pvscsi_release_ctx(cmd);
611 598 /* Remove cmd from the queue */
612 599 pvscsi_remove_from_queue(cmd);
613 600 mutex_exit(&pvs->mutex);
614 601
615 602 /* Insert cmd at the beginning of the list */
616 603 cmd->next_cmd = *pending;
617 604 *pending = cmd;
618 605
619 606 dev_err(pvs->dip, CE_WARN, "!command %p aborted", (void *)cmd);
620 607
621 608 return (CMD_ABORTED);
622 609 }
623 610
624 611 static void
625 612 pvscsi_map_buffers(pvscsi_cmd_t *cmd, struct PVSCSIRingReqDesc *rdesc)
626 613 {
627 614 int i;
628 615
629 616 ASSERT(cmd->ctx);
630 617 ASSERT(cmd->cmd_dmaccount > 0 && cmd->cmd_dmaccount <=
631 618 PVSCSI_MAX_SG_SIZE);
632 619
633 620 rdesc->dataLen = cmd->cmd_dma_count;
634 621 rdesc->dataAddr = 0;
635 622
636 623 if (cmd->cmd_dma_count == 0)
637 624 return;
638 625
639 626 if (cmd->cmd_dmaccount > 1) {
640 627 struct PVSCSISGElement *sgl = CMD_CTX_SGLIST_VA(cmd->ctx);
641 628
642 629 for (i = 0; i < cmd->cmd_dmaccount; i++) {
643 630 sgl[i].addr = cmd->cached_cookies[i].dmac_laddress;
644 631 sgl[i].length = cmd->cached_cookies[i].dmac_size;
645 632 sgl[i].flags = 0;
646 633 }
647 634 rdesc->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST;
648 635 rdesc->dataAddr = (uint64_t)CMD_CTX_SGLIST_PA(cmd->ctx);
649 636 } else {
650 637 rdesc->dataAddr = cmd->cached_cookies[0].dmac_laddress;
651 638 }
652 639 }
653 640
654 641 static void
655 642 pvscsi_comp_cmd(pvscsi_cmd_t *cmd, uint8_t status)
656 643 {
657 644 struct scsi_pkt *pkt = CMD2PKT(cmd);
658 645
659 646 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD |
660 647 STATE_GOT_STATUS);
661 648 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0)
662 649 pkt->pkt_state |= STATE_XFERRED_DATA;
663 650 pkt->pkt_reason = CMD_CMPLT;
664 651 pkt->pkt_resid = 0;
665 652 *(pkt->pkt_scbp) = status;
666 653 }
667 654
668 655 static void
669 656 pvscsi_set_status(pvscsi_cmd_t *cmd)
670 657 {
671 658 pvscsi_softc_t *pvs = cmd->cmd_pvs;
672 659 struct scsi_pkt *pkt = CMD2PKT(cmd);
673 660 uchar_t scsi_status = cmd->cmp_stat.scsi_status;
674 661 uint32_t host_status = cmd->cmp_stat.host_status;
675 662
676 663 if (scsi_status != STATUS_GOOD &&
677 664 (host_status == BTSTAT_SUCCESS ||
678 665 (host_status == BTSTAT_LINKED_COMMAND_COMPLETED) ||
679 666 (host_status == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG))) {
680 667 if (scsi_status == STATUS_CHECK) {
681 668 struct scsi_arq_status *astat = (void*)(pkt->pkt_scbp);
682 669 uint8_t *sensedata;
683 670 int arq_size;
684 671
685 672 *pkt->pkt_scbp = scsi_status;
686 673 pkt->pkt_state |= STATE_ARQ_DONE;
687 674
688 675 if ((cmd->flags & PVSCSI_FLAG_XARQ) != 0) {
689 676 arq_size = (cmd->cmd_rqslen >=
690 677 SENSE_BUFFER_SIZE) ? SENSE_BUFFER_SIZE :
691 678 cmd->cmd_rqslen;
692 679
693 680 astat->sts_rqpkt_resid = SENSE_BUFFER_SIZE -
694 681 arq_size;
695 682 sensedata = (uint8_t *)&astat->sts_sensedata;
696 683 bcopy(cmd->arqbuf->b_un.b_addr, sensedata,
697 684 arq_size);
698 685
699 686 pkt->pkt_state |= STATE_XARQ_DONE;
700 687 } else {
701 688 astat->sts_rqpkt_resid = 0;
702 689 }
703 690
704 691 astat->sts_rqpkt_statistics = 0;
705 692 astat->sts_rqpkt_reason = CMD_CMPLT;
706 693 (*(uint8_t *)&astat->sts_rqpkt_status) = STATUS_GOOD;
707 694 astat->sts_rqpkt_state = STATE_GOT_BUS |
708 695 STATE_GOT_TARGET | STATE_SENT_CMD |
709 696 STATE_XFERRED_DATA | STATE_GOT_STATUS;
710 697 }
711 698 pvscsi_comp_cmd(cmd, scsi_status);
712 699
713 700 return;
714 701 }
715 702
716 703 switch (host_status) {
717 704 case BTSTAT_SUCCESS:
718 705 case BTSTAT_LINKED_COMMAND_COMPLETED:
719 706 case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG:
720 707 pvscsi_comp_cmd(cmd, STATUS_GOOD);
721 708 break;
722 709 case BTSTAT_DATARUN:
723 710 pkt->pkt_reason = CMD_DATA_OVR;
724 711 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
725 712 STATE_SENT_CMD | STATE_GOT_STATUS |
726 713 STATE_XFERRED_DATA);
727 714 pkt->pkt_resid = 0;
728 715 break;
729 716 case BTSTAT_DATA_UNDERRUN:
730 717 pkt->pkt_reason = pkt->pkt_state |= (STATE_GOT_BUS |
731 718 STATE_GOT_TARGET | STATE_SENT_CMD | STATE_GOT_STATUS);
732 719 pkt->pkt_resid = cmd->dma_count - cmd->cmp_stat.data_len;
733 720 if (pkt->pkt_resid != cmd->dma_count)
734 721 pkt->pkt_state |= STATE_XFERRED_DATA;
735 722 break;
736 723 case BTSTAT_SELTIMEO:
737 724 pkt->pkt_reason = CMD_DEV_GONE;
738 725 pkt->pkt_state |= STATE_GOT_BUS;
739 726 break;
740 727 case BTSTAT_TAGREJECT:
741 728 pkt->pkt_reason = CMD_TAG_REJECT;
742 729 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
743 730 STATE_SENT_CMD | STATE_GOT_STATUS);
744 731 break;
745 732 case BTSTAT_BADMSG:
746 733 pkt->pkt_reason = CMD_BADMSG;
747 734 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
748 735 STATE_SENT_CMD | STATE_GOT_STATUS);
749 736 break;
750 737 case BTSTAT_SENTRST:
751 738 case BTSTAT_RECVRST:
752 739 case BTSTAT_BUSRESET:
753 740 pkt->pkt_reason = CMD_RESET;
754 741 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
755 742 STATE_SENT_CMD | STATE_GOT_STATUS);
756 743 break;
757 744 case BTSTAT_ABORTQUEUE:
758 745 pkt->pkt_reason = CMD_ABORTED;
759 746 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
760 747 STATE_SENT_CMD | STATE_GOT_STATUS);
761 748 break;
762 749 case BTSTAT_HAHARDWARE:
763 750 case BTSTAT_INVPHASE:
764 751 case BTSTAT_HATIMEOUT:
765 752 case BTSTAT_NORESPONSE:
766 753 case BTSTAT_DISCONNECT:
767 754 case BTSTAT_HASOFTWARE:
768 755 case BTSTAT_BUSFREE:
769 756 case BTSTAT_SENSFAILED:
770 757 pkt->pkt_reason = CMD_TRAN_ERR;
771 758 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
772 759 STATE_SENT_CMD | STATE_GOT_STATUS);
773 760 break;
774 761 default:
775 762 dev_err(pvs->dip, CE_WARN,
776 763 "!unknown host status code: %d", host_status);
777 764 pkt->pkt_reason = CMD_TRAN_ERR;
778 765 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
779 766 STATE_SENT_CMD | STATE_GOT_STATUS);
780 767 break;
781 768 }
782 769 }
783 770
784 771 static void
785 772 pvscsi_complete_chained(void *arg)
786 773 {
787 774 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)arg;
788 775 pvscsi_cmd_t *c;
789 776 struct scsi_pkt *pkt;
790 777
791 778 while (cmd != NULL) {
792 779 pvscsi_softc_t *pvs = cmd->cmd_pvs;
793 780
794 781 c = cmd->next_cmd;
795 782 cmd->next_cmd = NULL;
796 783
797 784 pkt = CMD2PKT(cmd);
798 785 if (pkt == NULL)
799 786 return;
800 787
801 788 if ((cmd->flags & PVSCSI_FLAG_IO_IOPB) != 0 &&
802 789 (cmd->flags & PVSCSI_FLAG_IO_READ) != 0) {
803 790 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
804 791 DDI_DMA_SYNC_FORCPU);
805 792 }
806 793
807 794 mutex_enter(&pvs->mutex);
808 795 /* Release I/O ctx */
809 796 if (cmd->ctx != NULL)
810 797 pvscsi_release_ctx(cmd);
811 798 /* Remove command from queue */
812 799 pvscsi_remove_from_queue(cmd);
813 800 mutex_exit(&pvs->mutex);
814 801
815 802 if ((cmd->flags & PVSCSI_FLAG_HW_STATUS) != 0) {
816 803 pvscsi_set_status(cmd);
817 804 } else {
818 805 ASSERT((cmd->flags & PVSCSI_FLAGS_NON_HW_COMPLETION) !=
819 806 0);
820 807
821 808 if ((cmd->flags & PVSCSI_FLAG_TIMED_OUT) != 0) {
822 809 cmd->pkt->pkt_reason = CMD_TIMEOUT;
823 810 cmd->pkt->pkt_statistics |=
824 811 (STAT_TIMEOUT | STAT_ABORTED);
825 812 } else if ((cmd->flags & PVSCSI_FLAG_ABORTED) != 0) {
826 813 cmd->pkt->pkt_reason = CMD_ABORTED;
827 814 cmd->pkt->pkt_statistics |=
828 815 (STAT_TIMEOUT | STAT_ABORTED);
829 816 } else if ((cmd->flags & PVSCSI_FLAGS_RESET) != 0) {
830 817 cmd->pkt->pkt_reason = CMD_RESET;
831 818 if ((cmd->flags & PVSCSI_FLAG_RESET_BUS) != 0) {
832 819 cmd->pkt->pkt_statistics |=
833 820 STAT_BUS_RESET;
834 821 } else {
835 822 cmd->pkt->pkt_statistics |=
836 823 STAT_DEV_RESET;
837 824 }
838 825 }
839 826 }
840 827
841 828 cmd->flags |= PVSCSI_FLAG_DONE;
842 829 cmd->flags &= ~PVSCSI_FLAG_TRANSPORT;
843 830
844 831 if ((pkt->pkt_flags & FLAG_NOINTR) == 0 &&
845 832 pkt->pkt_comp != NULL)
846 833 (*pkt->pkt_comp)(pkt);
847 834
848 835 cmd = c;
849 836 }
850 837 }
851 838
852 839 static void
853 840 pvscsi_dev_reset(pvscsi_softc_t *pvs, int target)
854 841 {
855 842 struct PVSCSICmdDescResetDevice cmd = { 0 };
856 843
857 844 cmd.target = target;
858 845 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof (cmd));
859 846 }
860 847
861 848 static int
862 849 pvscsi_poll_cmd(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
863 850 {
864 851 boolean_t seen_intr;
865 852 int cycles = (cmd->pkt->pkt_time * 1000000) / USECS_TO_WAIT;
866 853 int i;
867 854 pvscsi_cmd_t *dcmd;
868 855 struct scsi_pkt *pkt = CMD2PKT(cmd);
869 856
870 857 /*
871 858 * Make sure we're not missing any commands completed
872 859 * concurrently before we have actually disabled interrupts.
873 860 */
874 861 mutex_enter(&pvs->rx_mutex);
875 862 dcmd = pvscsi_process_comp_ring(pvs);
876 863 mutex_exit(&pvs->rx_mutex);
877 864
878 865 pvscsi_complete_chained(dcmd);
879 866
880 867 while ((cmd->flags & PVSCSI_FLAG_DONE) == 0) {
881 868 seen_intr = B_FALSE;
882 869
883 870 /* Disable interrupts from H/W */
884 871 pvscsi_mask_intr(pvs);
885 872
886 873 /* Wait for interrupt to arrive */
887 874 for (i = 0; i < cycles; i++) {
888 875 uint32_t status;
889 876
890 877 mutex_enter(&pvs->rx_mutex);
891 878 mutex_enter(&pvs->intr_mutex);
892 879 status = pvscsi_read_intr_status(pvs);
893 880 if ((status & PVSCSI_INTR_ALL_SUPPORTED) != 0) {
894 881 /* Check completion ring */
895 882 mutex_exit(&pvs->intr_mutex);
896 883 dcmd = pvscsi_process_comp_ring(pvs);
897 884 mutex_exit(&pvs->rx_mutex);
898 885 seen_intr = B_TRUE;
899 886 break;
900 887 } else {
901 888 mutex_exit(&pvs->intr_mutex);
902 889 mutex_exit(&pvs->rx_mutex);
903 890 drv_usecwait(USECS_TO_WAIT);
904 891 }
905 892 }
906 893
907 894 /* Enable interrupts from H/W */
908 895 pvscsi_unmask_intr(pvs);
909 896
910 897 if (!seen_intr) {
911 898 /* No interrupts seen from device during the timeout */
912 899 mutex_enter(&pvs->tx_mutex);
913 900 mutex_enter(&pvs->rx_mutex);
914 901 if ((cmd->flags & PVSCSI_FLAGS_COMPLETION) != 0) {
915 902 /* Command was cancelled asynchronously */
916 903 dcmd = NULL;
917 904 } else if ((pvscsi_abort_cmd(cmd,
918 905 &dcmd)) == CMD_ABORTED) {
919 906 /* Command was cancelled in hardware */
920 907 pkt->pkt_state |= (STAT_TIMEOUT | STAT_ABORTED);
921 908 pkt->pkt_statistics |= (STAT_TIMEOUT |
922 909 STAT_ABORTED);
923 910 pkt->pkt_reason = CMD_TIMEOUT;
924 911 }
925 912 mutex_exit(&pvs->rx_mutex);
926 913 mutex_exit(&pvs->tx_mutex);
927 914
928 915 /*
929 916 * Complete commands that might be on completion list.
930 917 * Target command can also be on the list in case it was
931 918 * completed before it could be actually cancelled.
932 919 */
933 920 break;
934 921 }
935 922
936 923 pvscsi_complete_chained(dcmd);
937 924
938 925 if (!seen_intr)
939 926 break;
940 927 }
941 928
942 929 return (TRAN_ACCEPT);
943 930 }
944 931
945 932 static void
946 933 pvscsi_abort_all(struct scsi_address *ap, pvscsi_softc_t *pvs,
947 934 pvscsi_cmd_t **pending, int marker_flag)
948 935 {
949 936 int qlen = pvs->cmd_queue_len;
950 937 pvscsi_cmd_t *cmd, *pcmd, *phead = NULL;
951 938
952 939 ASSERT(mutex_owned(&pvs->rx_mutex));
953 940 ASSERT(mutex_owned(&pvs->tx_mutex));
954 941
955 942 /*
956 943 * Try to abort all queued commands, merging commands waiting
957 944 * for completion into a single list to complete them at one
958 945 * time when mutex is released.
959 946 */
960 947 while (qlen > 0) {
961 948 mutex_enter(&pvs->mutex);
962 949 cmd = list_remove_head(&pvs->cmd_queue);
963 950 ASSERT(cmd != NULL);
964 951
965 952 qlen--;
966 953
967 954 if (ap == NULL || ap->a_target == cmd->cmd_target) {
968 955 int c = --pvs->cmd_queue_len;
969 956
970 957 mutex_exit(&pvs->mutex);
971 958
972 959 if (pvscsi_abort_cmd(cmd, &pcmd) == CMD_ABORTED) {
973 960 /*
974 961 * Assume command is completely cancelled now,
975 962 * so mark it as requested.
976 963 */
977 964 cmd->flags |= marker_flag;
978 965 }
979 966
980 967 qlen -= (c - pvs->cmd_queue_len);
981 968
982 969 /*
983 970 * Now merge current pending commands with
984 971 * previous ones.
985 972 */
986 973 if (phead == NULL) {
987 974 phead = pcmd;
988 975 } else if (pcmd != NULL) {
989 976 phead->tail_cmd->next_cmd = pcmd;
990 977 phead->tail_cmd = pcmd->tail_cmd;
991 978 }
992 979 } else {
993 980 list_insert_tail(&pvs->cmd_queue, cmd);
994 981 mutex_exit(&pvs->mutex);
995 982 }
996 983 }
997 984
998 985 *pending = phead;
999 986 }
1000 987
1001 988 static void
1002 989 pvscsi_quiesce_notify(pvscsi_softc_t *pvs)
1003 990 {
1004 991 mutex_enter(&pvs->mutex);
1005 992 if (pvs->cmd_queue_len == 0 &&
1006 993 (pvs->flags & PVSCSI_HBA_QUIESCE_PENDING) != 0) {
1007 994 pvs->flags &= ~PVSCSI_HBA_QUIESCE_PENDING;
1008 995 cv_broadcast(&pvs->quiescevar);
1009 996 }
1010 997 mutex_exit(&pvs->mutex);
1011 998 }
1012 999
1013 1000 static int
1014 1001 pvscsi_transport_command(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd)
1015 1002 {
1016 1003 struct PVSCSIRingReqDesc *rdesc;
1017 1004 struct PVSCSIRingsState *sdesc = RINGS_STATE(pvs);
1018 1005 struct scsi_pkt *pkt = CMD2PKT(cmd);
1019 1006 uint32_t req_ne = sdesc->reqNumEntriesLog2;
1020 1007
1021 1008 mutex_enter(&pvs->tx_mutex);
1022 1009 mutex_enter(&pvs->mutex);
1023 1010 if (!pvscsi_acquire_ctx(pvs, cmd)) {
1024 1011 mutex_exit(&pvs->mutex);
1025 1012 mutex_exit(&pvs->tx_mutex);
1026 1013 dev_err(pvs->dip, CE_WARN, "!no free ctx available");
1027 1014 return (TRAN_BUSY);
1028 1015 }
1029 1016
1030 1017 if ((sdesc->reqProdIdx - sdesc->cmpConsIdx) >= (1 << req_ne)) {
1031 1018 pvscsi_release_ctx(cmd);
1032 1019 mutex_exit(&pvs->mutex);
1033 1020 mutex_exit(&pvs->tx_mutex);
1034 1021 dev_err(pvs->dip, CE_WARN, "!no free I/O slots available");
1035 1022 return (TRAN_BUSY);
1036 1023 }
1037 1024 mutex_exit(&pvs->mutex);
1038 1025
1039 1026 cmd->flags |= PVSCSI_FLAG_TRANSPORT;
1040 1027
1041 1028 rdesc = REQ_RING(pvs) + (sdesc->reqProdIdx & MASK(req_ne));
1042 1029
1043 1030 bzero(&rdesc->lun, sizeof (rdesc->lun));
1044 1031
1045 1032 rdesc->bus = 0;
1046 1033 rdesc->target = cmd->cmd_target;
1047 1034
1048 1035 if ((cmd->flags & PVSCSI_FLAG_XARQ) != 0) {
1049 1036 bzero((void*)cmd->arqbuf->b_un.b_addr, SENSE_BUFFER_SIZE);
1050 1037 rdesc->senseLen = SENSE_BUFFER_SIZE;
1051 1038 rdesc->senseAddr = cmd->arqc.dmac_laddress;
1052 1039 } else {
1053 1040 rdesc->senseLen = 0;
1054 1041 rdesc->senseAddr = 0;
1055 1042 }
1056 1043
1057 1044 rdesc->vcpuHint = CPU->cpu_id;
1058 1045 rdesc->cdbLen = cmd->cmdlen;
1059 1046 bcopy(cmd->cmd_cdb, rdesc->cdb, cmd->cmdlen);
1060 1047
1061 1048 /* Setup tag info */
1062 1049 if ((cmd->flags & PVSCSI_FLAG_TAG) != 0)
1063 1050 rdesc->tag = cmd->tag;
1064 1051 else
1065 1052 rdesc->tag = MSG_SIMPLE_QTAG;
1066 1053
1067 1054 /* Setup I/O direction and map data buffers */
1068 1055 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1069 1056 if ((cmd->flags & PVSCSI_FLAG_IO_READ) != 0)
1070 1057 rdesc->flags = PVSCSI_FLAG_CMD_DIR_TOHOST;
1071 1058 else
1072 1059 rdesc->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE;
1073 1060 pvscsi_map_buffers(cmd, rdesc);
1074 1061 } else {
1075 1062 rdesc->flags = 0;
1076 1063 }
1077 1064
1078 1065 rdesc->context = pvscsi_map_ctx(pvs, cmd->ctx);
1079 1066 membar_producer();
1080 1067
1081 1068 sdesc->reqProdIdx++;
1082 1069 membar_producer();
1083 1070
1084 1071 mutex_enter(&pvs->mutex);
1085 1072 cmd->timeout_lbolt = ddi_get_lbolt() + SEC_TO_TICK(pkt->pkt_time);
1086 1073 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD);
1087 1074 pvscsi_add_to_queue(cmd);
1088 1075
1089 1076 switch (cmd->pkt->pkt_cdbp[0]) {
1090 1077 case SCMD_READ:
1091 1078 case SCMD_WRITE:
1092 1079 case SCMD_READ_G1:
1093 1080 case SCMD_WRITE_G1:
1094 1081 case SCMD_READ_G4:
1095 1082 case SCMD_WRITE_G4:
1096 1083 case SCMD_READ_G5:
1097 1084 case SCMD_WRITE_G5:
1098 1085 ASSERT((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0);
1099 1086 pvscsi_submit_rw_io(pvs);
1100 1087 break;
1101 1088 default:
1102 1089 pvscsi_submit_nonrw_io(pvs);
1103 1090 break;
1104 1091 }
1105 1092 mutex_exit(&pvs->mutex);
1106 1093 mutex_exit(&pvs->tx_mutex);
1107 1094
1108 1095 return (TRAN_ACCEPT);
1109 1096 }
1110 1097
1111 1098 static int
1112 1099 pvscsi_reset_generic(pvscsi_softc_t *pvs, struct scsi_address *ap)
1113 1100 {
1114 1101 boolean_t bus_reset = (ap == NULL);
1115 1102 int flags;
1116 1103 pvscsi_cmd_t *done, *aborted;
1117 1104
1118 1105 flags = bus_reset ? PVSCSI_FLAG_RESET_BUS : PVSCSI_FLAG_RESET_DEV;
1119 1106
1120 1107 mutex_enter(&pvs->tx_mutex);
1121 1108 mutex_enter(&pvs->rx_mutex);
1122 1109 /* Try to process pending requests */
1123 1110 done = pvscsi_process_comp_ring(pvs);
1124 1111
1125 1112 /* Abort all pending requests */
1126 1113 pvscsi_abort_all(ap, pvs, &aborted, flags);
1127 1114
1128 1115 /* Reset at hardware level */
1129 1116 if (bus_reset) {
1130 1117 pvscsi_reset_bus(pvs);
1131 1118 /* Should never happen after bus reset */
1132 1119 ASSERT(pvscsi_process_comp_ring(pvs) == NULL);
1133 1120 } else {
1134 1121 pvscsi_dev_reset(pvs, ap->a_target);
1135 1122 }
1136 1123 mutex_exit(&pvs->rx_mutex);
1137 1124 mutex_exit(&pvs->tx_mutex);
1138 1125
1139 1126 pvscsi_complete_chained(done);
1140 1127 pvscsi_complete_chained(aborted);
1141 1128
1142 1129 return (1);
1143 1130 }
1144 1131
1145 1132 static void
1146 1133 pvscsi_cmd_ext_free(pvscsi_cmd_t *cmd)
1147 1134 {
1148 1135 struct scsi_pkt *pkt = CMD2PKT(cmd);
1149 1136
1150 1137 if ((cmd->flags & PVSCSI_FLAG_CDB_EXT) != 0) {
1151 1138 kmem_free(pkt->pkt_cdbp, cmd->cmdlen);
1152 1139 cmd->flags &= ~PVSCSI_FLAG_CDB_EXT;
1153 1140 }
1154 1141 if ((cmd->flags & PVSCSI_FLAG_SCB_EXT) != 0) {
1155 1142 kmem_free(pkt->pkt_scbp, cmd->statuslen);
1156 1143 cmd->flags &= ~PVSCSI_FLAG_SCB_EXT;
1157 1144 }
1158 1145 if ((cmd->flags & PVSCSI_FLAG_PRIV_EXT) != 0) {
1159 1146 kmem_free(pkt->pkt_private, cmd->tgtlen);
1160 1147 cmd->flags &= ~PVSCSI_FLAG_PRIV_EXT;
1161 1148 }
1162 1149 }
1163 1150
1164 1151 /* ARGSUSED pvs */
1165 1152 static int
1166 1153 pvscsi_cmd_ext_alloc(pvscsi_softc_t *pvs, pvscsi_cmd_t *cmd, int kf)
1167 1154 {
1168 1155 struct scsi_pkt *pkt = CMD2PKT(cmd);
1169 1156 void *buf;
1170 1157
1171 1158 if (cmd->cmdlen > sizeof (cmd->cmd_cdb)) {
1172 1159 if ((buf = kmem_zalloc(cmd->cmdlen, kf)) == NULL)
1173 1160 return (DDI_FAILURE);
1174 1161 pkt->pkt_cdbp = buf;
1175 1162 cmd->flags |= PVSCSI_FLAG_CDB_EXT;
1176 1163 }
1177 1164
1178 1165 if (cmd->statuslen > sizeof (cmd->cmd_scb)) {
1179 1166 if ((buf = kmem_zalloc(cmd->statuslen, kf)) == NULL)
1180 1167 goto out;
1181 1168 pkt->pkt_scbp = buf;
1182 1169 cmd->flags |= PVSCSI_FLAG_SCB_EXT;
1183 1170 cmd->cmd_rqslen = (cmd->statuslen - sizeof (cmd->cmd_scb));
1184 1171 }
1185 1172
1186 1173 if (cmd->tgtlen > sizeof (cmd->tgt_priv)) {
1187 1174 if ((buf = kmem_zalloc(cmd->tgtlen, kf)) == NULL)
1188 1175 goto out;
1189 1176 pkt->pkt_private = buf;
1190 1177 cmd->flags |= PVSCSI_FLAG_PRIV_EXT;
1191 1178 }
1192 1179
1193 1180 return (DDI_SUCCESS);
1194 1181
1195 1182 out:
1196 1183 pvscsi_cmd_ext_free(cmd);
1197 1184
1198 1185 return (DDI_FAILURE);
1199 1186 }
1200 1187
1201 1188 static int
1202 1189 pvscsi_setup_dma_buffer(pvscsi_softc_t *pvs, size_t length,
1203 1190 pvscsi_dma_buf_t *buf)
1204 1191 {
1205 1192 ddi_dma_cookie_t cookie;
1206 1193 uint_t ccount;
1207 1194
1208 1195 if ((ddi_dma_alloc_handle(pvs->dip, &pvscsi_ring_dma_attr,
1209 1196 DDI_DMA_SLEEP, NULL, &buf->dma_handle)) != DDI_SUCCESS) {
1210 1197 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA handle");
1211 1198 return (DDI_FAILURE);
1212 1199 }
1213 1200
1214 1201 if ((ddi_dma_mem_alloc(buf->dma_handle, length, &pvscsi_dma_attrs,
1215 1202 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &buf->addr,
1216 1203 &buf->real_length, &buf->acc_handle)) != DDI_SUCCESS) {
1217 1204 dev_err(pvs->dip, CE_WARN,
1218 1205 "!failed to allocate %ld bytes for DMA buffer", length);
1219 1206 ddi_dma_free_handle(&buf->dma_handle);
1220 1207 return (DDI_FAILURE);
1221 1208 }
1222 1209
1223 1210 if ((ddi_dma_addr_bind_handle(buf->dma_handle, NULL, buf->addr,
1224 1211 buf->real_length, DDI_DMA_CONSISTENT | DDI_DMA_RDWR, DDI_DMA_SLEEP,
1225 1212 NULL, &cookie, &ccount)) != DDI_SUCCESS) {
1226 1213 dev_err(pvs->dip, CE_WARN, "!failed to bind DMA buffer");
1227 1214 ddi_dma_free_handle(&buf->dma_handle);
1228 1215 ddi_dma_mem_free(&buf->acc_handle);
1229 1216 return (DDI_FAILURE);
1230 1217 }
1231 1218
1232 1219 /* TODO Support multipart SG regions */
1233 1220 ASSERT(ccount == 1);
1234 1221
1235 1222 buf->pa = cookie.dmac_laddress;
1236 1223
1237 1224 return (DDI_SUCCESS);
1238 1225 }
1239 1226
1240 1227 static void
1241 1228 pvscsi_free_dma_buffer(pvscsi_dma_buf_t *buf)
1242 1229 {
1243 1230 ddi_dma_free_handle(&buf->dma_handle);
1244 1231 ddi_dma_mem_free(&buf->acc_handle);
1245 1232 }
1246 1233
1247 1234 static int
1248 1235 pvscsi_setup_sg(pvscsi_softc_t *pvs)
1249 1236 {
1250 1237 int i;
1251 1238 pvscsi_cmd_ctx_t *ctx;
1252 1239 size_t size = pvs->req_depth * sizeof (pvscsi_cmd_ctx_t);
1253 1240
1254 1241 ctx = pvs->cmd_ctx = kmem_zalloc(size, KM_SLEEP);
1255 1242
1256 1243 for (i = 0; i < pvs->req_depth; ++i, ++ctx) {
1257 1244 list_insert_tail(&pvs->cmd_ctx_pool, ctx);
1258 1245 if (pvscsi_setup_dma_buffer(pvs, PAGE_SIZE,
1259 1246 &ctx->dma_buf) != DDI_SUCCESS)
1260 1247 goto cleanup;
1261 1248 }
1262 1249
1263 1250 return (DDI_SUCCESS);
1264 1251
1265 1252 cleanup:
1266 1253 for (; i >= 0; --i, --ctx) {
1267 1254 list_remove(&pvs->cmd_ctx_pool, ctx);
1268 1255 pvscsi_free_dma_buffer(&ctx->dma_buf);
1269 1256 }
1270 1257 kmem_free(pvs->cmd_ctx, size);
1271 1258
1272 1259 return (DDI_FAILURE);
1273 1260 }
1274 1261
1275 1262 static void
1276 1263 pvscsi_free_sg(pvscsi_softc_t *pvs)
1277 1264 {
1278 1265 int i;
1279 1266 pvscsi_cmd_ctx_t *ctx = pvs->cmd_ctx;
1280 1267
1281 1268 for (i = 0; i < pvs->req_depth; ++i, ++ctx) {
1282 1269 list_remove(&pvs->cmd_ctx_pool, ctx);
1283 1270 pvscsi_free_dma_buffer(&ctx->dma_buf);
1284 1271 }
1285 1272
1286 1273 kmem_free(pvs->cmd_ctx, pvs->req_pages << PAGE_SHIFT);
1287 1274 }
1288 1275
1289 1276 static int
1290 1277 pvscsi_allocate_rings(pvscsi_softc_t *pvs)
1291 1278 {
1292 1279 /* Allocate DMA buffer for rings state */
1293 1280 if (pvscsi_setup_dma_buffer(pvs, PAGE_SIZE,
1294 1281 &pvs->rings_state_buf) != DDI_SUCCESS)
1295 1282 return (DDI_FAILURE);
1296 1283
1297 1284 /* Allocate DMA buffer for request ring */
1298 1285 pvs->req_pages = MIN(pvscsi_ring_pages, PVSCSI_MAX_NUM_PAGES_REQ_RING);
1299 1286 pvs->req_depth = pvs->req_pages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE;
1300 1287 if (pvscsi_setup_dma_buffer(pvs, pvs->req_pages * PAGE_SIZE,
1301 1288 &pvs->req_ring_buf) != DDI_SUCCESS)
1302 1289 goto free_rings_state;
1303 1290
1304 1291 /* Allocate completion ring */
1305 1292 pvs->cmp_pages = MIN(pvscsi_ring_pages, PVSCSI_MAX_NUM_PAGES_CMP_RING);
1306 1293 if (pvscsi_setup_dma_buffer(pvs, pvs->cmp_pages * PAGE_SIZE,
1307 1294 &pvs->cmp_ring_buf) != DDI_SUCCESS)
1308 1295 goto free_req_buf;
1309 1296
1310 1297 /* Allocate message ring */
1311 1298 pvs->msg_pages = MIN(pvscsi_msg_ring_pages,
1312 1299 PVSCSI_MAX_NUM_PAGES_MSG_RING);
1313 1300 if (pvscsi_setup_dma_buffer(pvs, pvs->msg_pages * PAGE_SIZE,
1314 1301 &pvs->msg_ring_buf) != DDI_SUCCESS)
1315 1302 goto free_cmp_buf;
1316 1303
1317 1304 return (DDI_SUCCESS);
1318 1305
1319 1306 free_cmp_buf:
1320 1307 pvscsi_free_dma_buffer(&pvs->cmp_ring_buf);
1321 1308 free_req_buf:
1322 1309 pvscsi_free_dma_buffer(&pvs->req_ring_buf);
1323 1310 free_rings_state:
1324 1311 pvscsi_free_dma_buffer(&pvs->rings_state_buf);
1325 1312
1326 1313 return (DDI_FAILURE);
1327 1314 }
1328 1315
1329 1316 static void
1330 1317 pvscsi_free_rings(pvscsi_softc_t *pvs)
1331 1318 {
1332 1319 pvscsi_free_dma_buffer(&pvs->msg_ring_buf);
1333 1320 pvscsi_free_dma_buffer(&pvs->cmp_ring_buf);
1334 1321 pvscsi_free_dma_buffer(&pvs->req_ring_buf);
1335 1322 pvscsi_free_dma_buffer(&pvs->rings_state_buf);
1336 1323 }
1337 1324
1338 1325 static void
1339 1326 pvscsi_setup_rings(pvscsi_softc_t *pvs)
1340 1327 {
1341 1328 int i;
1342 1329 struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 };
1343 1330 struct PVSCSICmdDescSetupRings cmd = { 0 };
1344 1331 uint64_t base;
1345 1332
1346 1333 cmd.ringsStatePPN = pvs->rings_state_buf.pa >> PAGE_SHIFT;
1347 1334 cmd.reqRingNumPages = pvs->req_pages;
1348 1335 cmd.cmpRingNumPages = pvs->cmp_pages;
1349 1336
1350 1337 /* Setup request ring */
1351 1338 base = pvs->req_ring_buf.pa;
1352 1339 for (i = 0; i < pvs->req_pages; i++) {
1353 1340 cmd.reqRingPPNs[i] = base >> PAGE_SHIFT;
1354 1341 base += PAGE_SIZE;
1355 1342 }
1356 1343
1357 1344 /* Setup completion ring */
1358 1345 base = pvs->cmp_ring_buf.pa;
1359 1346 for (i = 0; i < pvs->cmp_pages; i++) {
1360 1347 cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT;
1361 1348 base += PAGE_SIZE;
1362 1349 }
1363 1350
1364 1351 bzero(RINGS_STATE(pvs), PAGE_SIZE);
1365 1352 bzero(REQ_RING(pvs), pvs->req_pages * PAGE_SIZE);
1366 1353 bzero(CMP_RING(pvs), pvs->cmp_pages * PAGE_SIZE);
1367 1354
1368 1355 /* Issue SETUP command */
1369 1356 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof (cmd));
1370 1357
1371 1358 /* Setup message ring */
1372 1359 cmd_msg.numPages = pvs->msg_pages;
1373 1360 base = pvs->msg_ring_buf.pa;
1374 1361
1375 1362 for (i = 0; i < pvs->msg_pages; i++) {
1376 1363 cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT;
1377 1364 base += PAGE_SIZE;
1378 1365 }
1379 1366 bzero(MSG_RING(pvs), pvs->msg_pages * PAGE_SIZE);
1380 1367
1381 1368 pvscsi_write_cmd_desc(pvs, PVSCSI_CMD_SETUP_MSG_RING, &cmd_msg,
1382 1369 sizeof (cmd_msg));
1383 1370 }
1384 1371
1385 1372 static int
1386 1373 pvscsi_setup_io(pvscsi_softc_t *pvs)
1387 1374 {
1388 1375 int offset, rcount, rn, type;
1389 1376 int ret = DDI_FAILURE;
1390 1377 off_t regsize;
1391 1378 pci_regspec_t *regs;
1392 1379 uint_t regs_length;
1393 1380
1394 1381 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, pvs->dip,
1395 1382 DDI_PROP_DONTPASS, "reg", (int **)®s,
1396 1383 ®s_length) != DDI_PROP_SUCCESS) {
1397 1384 dev_err(pvs->dip, CE_WARN, "!failed to lookup 'reg' property");
1398 1385 return (DDI_FAILURE);
1399 1386 }
1400 1387
1401 1388 rcount = regs_length * sizeof (int) / sizeof (pci_regspec_t);
1402 1389
1403 1390 for (offset = PCI_CONF_BASE0; offset <= PCI_CONF_BASE5; offset += 4) {
1404 1391 for (rn = 0; rn < rcount; ++rn) {
1405 1392 if (PCI_REG_REG_G(regs[rn].pci_phys_hi) == offset) {
1406 1393 type = regs[rn].pci_phys_hi & PCI_ADDR_MASK;
1407 1394 break;
1408 1395 }
1409 1396 }
1410 1397
1411 1398 if (rn >= rcount)
1412 1399 continue;
1413 1400
1414 1401 if (type != PCI_ADDR_IO) {
1415 1402 if (ddi_dev_regsize(pvs->dip, rn,
1416 1403 ®size) != DDI_SUCCESS) {
1417 1404 dev_err(pvs->dip, CE_WARN,
1418 1405 "!failed to get size of reg %d", rn);
1419 1406 goto out;
1420 1407 }
1421 1408 if (regsize == PVSCSI_MEM_SPACE_SIZE) {
1422 1409 if (ddi_regs_map_setup(pvs->dip, rn,
1423 1410 &pvs->mmio_base, 0, 0,
1424 1411 &pvscsi_mmio_attr,
1425 1412 &pvs->mmio_handle) != DDI_SUCCESS) {
1426 1413 dev_err(pvs->dip, CE_WARN,
1427 1414 "!failed to map MMIO BAR");
1428 1415 goto out;
1429 1416 }
1430 1417 ret = DDI_SUCCESS;
1431 1418 break;
1432 1419 }
1433 1420 }
1434 1421 }
1435 1422
1436 1423 out:
1437 1424 ddi_prop_free(regs);
1438 1425
1439 1426 return (ret);
1440 1427 }
1441 1428
1442 1429 static void
1443 1430 pvscsi_free_io(pvscsi_softc_t *pvs)
1444 1431 {
1445 1432 ddi_regs_map_free(&pvs->mmio_handle);
1446 1433 }
1447 1434
1448 1435 static int
1449 1436 pvscsi_enable_intrs(pvscsi_softc_t *pvs)
1450 1437 {
1451 1438 int i, rc, intr_caps;
1452 1439
1453 1440 if ((rc = ddi_intr_get_cap(pvs->intr_htable[0], &intr_caps)) !=
1454 1441 DDI_SUCCESS) {
1455 1442 dev_err(pvs->dip, CE_WARN, "!failed to get interrupt caps");
1456 1443 return (DDI_FAILURE);
1457 1444 }
1458 1445
1459 1446 if ((intr_caps & DDI_INTR_FLAG_BLOCK) != 0) {
1460 1447 if ((rc = ddi_intr_block_enable(pvs->intr_htable,
1461 1448 pvs->intr_cnt)) != DDI_SUCCESS) {
1462 1449 dev_err(pvs->dip, CE_WARN,
1463 1450 "!failed to enable interrupt block");
1464 1451 }
1465 1452 } else {
1466 1453 for (i = 0; i < pvs->intr_cnt; i++) {
1467 1454 if ((rc = ddi_intr_enable(pvs->intr_htable[i])) ==
1468 1455 DDI_SUCCESS)
1469 1456 continue;
1470 1457 dev_err(pvs->dip, CE_WARN,
1471 1458 "!failed to enable interrupt");
1472 1459 while (--i >= 0)
1473 1460 (void) ddi_intr_disable(pvs->intr_htable[i]);
1474 1461 break;
1475 1462 }
1476 1463 }
1477 1464
1478 1465 /* Unmask interrupts */
1479 1466 if (rc == DDI_SUCCESS) {
1480 1467 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK,
1481 1468 PVSCSI_INTR_CMPL_MASK | PVSCSI_INTR_MSG_MASK);
1482 1469 }
1483 1470
1484 1471 return (rc);
1485 1472 }
1486 1473
1487 1474 /* ARGSUSED arg2 */
1488 1475 static uint32_t
1489 1476 pvscsi_intr_handler(caddr_t arg1, caddr_t arg2)
1490 1477 {
1491 1478 boolean_t handled;
1492 1479 pvscsi_softc_t *pvs = (pvscsi_softc_t *)arg1;
1493 1480 uint32_t status;
1494 1481
1495 1482 mutex_enter(&pvs->intr_mutex);
1496 1483 if (pvs->num_pollers > 0) {
1497 1484 mutex_exit(&pvs->intr_mutex);
1498 1485 return (DDI_INTR_CLAIMED);
1499 1486 }
1500 1487
1501 1488 if (pvscsi_enable_msi) {
1502 1489 handled = B_TRUE;
1503 1490 } else {
1504 1491 status = pvscsi_read_intr_status(pvs);
1505 1492 handled = (status & PVSCSI_INTR_ALL_SUPPORTED) != 0;
1506 1493 if (handled)
1507 1494 pvscsi_write_intr_status(pvs, status);
1508 1495 }
1509 1496 mutex_exit(&pvs->intr_mutex);
1510 1497
1511 1498 if (handled) {
1512 1499 boolean_t qnotify;
1513 1500 pvscsi_cmd_t *pending;
1514 1501 pvscsi_msg_t *msg;
1515 1502
1516 1503 mutex_enter(&pvs->rx_mutex);
1517 1504 pending = pvscsi_process_comp_ring(pvs);
1518 1505 msg = pvscsi_process_msg_ring(pvs);
1519 1506 mutex_exit(&pvs->rx_mutex);
1520 1507
1521 1508 mutex_enter(&pvs->mutex);
1522 1509 qnotify = HBA_QUIESCE_PENDING(pvs);
1523 1510 mutex_exit(&pvs->mutex);
1524 1511
1525 1512 if (pending != NULL && ddi_taskq_dispatch(pvs->comp_tq,
1526 1513 pvscsi_complete_chained, pending,
1527 1514 DDI_NOSLEEP) == DDI_FAILURE)
1528 1515 pvscsi_complete_chained(pending);
1529 1516
1530 1517 if (msg != NULL && ddi_taskq_dispatch(pvs->msg_tq,
1531 1518 pvscsi_handle_msg, msg, DDI_NOSLEEP) == DDI_FAILURE) {
1532 1519 dev_err(pvs->dip, CE_WARN,
1533 1520 "!failed to process msg type %d for target %d",
1534 1521 msg->type, msg->target);
1535 1522 kmem_free(msg, sizeof (pvscsi_msg_t));
1536 1523 }
1537 1524
1538 1525 if (qnotify)
1539 1526 pvscsi_quiesce_notify(pvs);
1540 1527 }
1541 1528
1542 1529 return (handled ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
1543 1530 }
1544 1531
1545 1532 static int
1546 1533 pvscsi_register_isr(pvscsi_softc_t *pvs, int type)
1547 1534 {
1548 1535 int navail, nactual;
1549 1536 int i;
1550 1537
1551 1538 if (ddi_intr_get_navail(pvs->dip, type, &navail) != DDI_SUCCESS ||
1552 1539 navail == 0) {
1553 1540 dev_err(pvs->dip, CE_WARN,
1554 1541 "!failed to get number of available interrupts of type %d",
1555 1542 type);
1556 1543 return (DDI_FAILURE);
1557 1544 }
1558 1545 navail = MIN(navail, PVSCSI_MAX_INTRS);
1559 1546
1560 1547 pvs->intr_size = navail * sizeof (ddi_intr_handle_t);
1561 1548 if ((pvs->intr_htable = kmem_alloc(pvs->intr_size, KM_SLEEP)) == NULL) {
1562 1549 dev_err(pvs->dip, CE_WARN,
1563 1550 "!failed to allocate %d bytes for interrupt hashtable",
1564 1551 pvs->intr_size);
1565 1552 return (DDI_FAILURE);
1566 1553 }
1567 1554
1568 1555 if (ddi_intr_alloc(pvs->dip, pvs->intr_htable, type, 0, navail,
1569 1556 &nactual, DDI_INTR_ALLOC_NORMAL) != DDI_SUCCESS || nactual == 0) {
1570 1557 dev_err(pvs->dip, CE_WARN, "!failed to allocate %d interrupts",
1571 1558 navail);
1572 1559 goto free_htable;
1573 1560 }
1574 1561
1575 1562 pvs->intr_cnt = nactual;
1576 1563
1577 1564 if (ddi_intr_get_pri(pvs->intr_htable[0],
1578 1565 (uint_t *)&pvs->intr_pri) != DDI_SUCCESS) {
1579 1566 dev_err(pvs->dip, CE_WARN, "!failed to get interrupt priority");
1580 1567 goto free_intrs;
1581 1568 }
1582 1569
1583 1570 for (i = 0; i < nactual; i++) {
1584 1571 if (ddi_intr_add_handler(pvs->intr_htable[i],
1585 1572 pvscsi_intr_handler, (caddr_t)pvs, NULL) != DDI_SUCCESS) {
1586 1573 dev_err(pvs->dip, CE_WARN,
1587 1574 "!failed to add interrupt handler");
1588 1575 goto free_intrs;
1589 1576 }
1590 1577 }
1591 1578
1592 1579 return (DDI_SUCCESS);
1593 1580
1594 1581 free_intrs:
1595 1582 for (i = 0; i < nactual; i++)
1596 1583 (void) ddi_intr_free(pvs->intr_htable[i]);
1597 1584 free_htable:
1598 1585 kmem_free(pvs->intr_htable, pvs->intr_size);
1599 1586
1600 1587 return (DDI_FAILURE);
1601 1588 }
1602 1589
1603 1590 static void
1604 1591 pvscsi_free_intr_resources(pvscsi_softc_t *pvs)
1605 1592 {
1606 1593 int i;
1607 1594
1608 1595 for (i = 0; i < pvs->intr_cnt; i++) {
1609 1596 (void) ddi_intr_disable(pvs->intr_htable[i]);
1610 1597 (void) ddi_intr_remove_handler(pvs->intr_htable[i]);
1611 1598 (void) ddi_intr_free(pvs->intr_htable[i]);
1612 1599 }
1613 1600 kmem_free(pvs->intr_htable, pvs->intr_size);
1614 1601 }
1615 1602
1616 1603 static int
1617 1604 pvscsi_setup_isr(pvscsi_softc_t *pvs)
1618 1605 {
1619 1606 int intr_types;
1620 1607
1621 1608 if (ddi_intr_get_supported_types(pvs->dip,
1622 1609 &intr_types) != DDI_SUCCESS) {
1623 1610 dev_err(pvs->dip, CE_WARN,
1624 1611 "!failed to get supported interrupt types");
1625 1612 return (DDI_FAILURE);
1626 1613 }
1627 1614
1628 1615 if ((intr_types & DDI_INTR_TYPE_MSIX) != 0 && pvscsi_enable_msi) {
1629 1616 if (pvscsi_register_isr(pvs,
1630 1617 DDI_INTR_TYPE_MSIX) == DDI_SUCCESS) {
1631 1618 pvs->intr_type = DDI_INTR_TYPE_MSIX;
1632 1619 } else {
1633 1620 dev_err(pvs->dip, CE_WARN,
1634 1621 "!failed to install MSI-X interrupt handler");
1635 1622 }
1636 1623 } else if ((intr_types & DDI_INTR_TYPE_MSI) != 0 && pvscsi_enable_msi) {
1637 1624 if (pvscsi_register_isr(pvs,
1638 1625 DDI_INTR_TYPE_MSI) == DDI_SUCCESS) {
1639 1626 pvs->intr_type = DDI_INTR_TYPE_MSI;
1640 1627 } else {
1641 1628 dev_err(pvs->dip, CE_WARN,
1642 1629 "!failed to install MSI interrupt handler");
1643 1630 }
1644 1631 } else if ((intr_types & DDI_INTR_TYPE_FIXED) != 0) {
1645 1632 if (pvscsi_register_isr(pvs,
1646 1633 DDI_INTR_TYPE_FIXED) == DDI_SUCCESS) {
1647 1634 pvs->intr_type = DDI_INTR_TYPE_FIXED;
1648 1635 } else {
1649 1636 dev_err(pvs->dip, CE_WARN,
1650 1637 "!failed to install FIXED interrupt handler");
1651 1638 }
1652 1639 }
1653 1640
1654 1641 return (pvs->intr_type == 0 ? DDI_FAILURE : DDI_SUCCESS);
1655 1642 }
1656 1643
1657 1644 static void
1658 1645 pvscsi_wd_thread(pvscsi_softc_t *pvs)
1659 1646 {
1660 1647 clock_t now;
1661 1648 pvscsi_cmd_t *expired, *c, *cn, **pnext;
1662 1649
1663 1650 mutex_enter(&pvs->mutex);
1664 1651 for (;;) {
1665 1652 expired = NULL;
1666 1653 pnext = NULL;
1667 1654 now = ddi_get_lbolt();
1668 1655
1669 1656 for (c = list_head(&pvs->cmd_queue); c != NULL; ) {
1670 1657 cn = list_next(&pvs->cmd_queue, c);
1671 1658
1672 1659 /*
1673 1660 * Commands with 'FLAG_NOINTR' are watched using their
1674 1661 * own timeouts, so we should not touch them.
1675 1662 */
1676 1663 if ((c->pkt->pkt_flags & FLAG_NOINTR) == 0 &&
1677 1664 now > c->timeout_lbolt) {
1678 1665 dev_err(pvs->dip, CE_WARN,
1679 1666 "!expired command: %p (%ld > %ld)",
1680 1667 (void *)c, now, c->timeout_lbolt);
1681 1668 pvscsi_remove_from_queue(c);
1682 1669 if (expired == NULL)
1683 1670 expired = c;
1684 1671 if (pnext == NULL) {
1685 1672 pnext = &c->next_cmd;
1686 1673 } else {
1687 1674 *pnext = c;
1688 1675 pnext = &c->next_cmd;
1689 1676 }
1690 1677 }
1691 1678 c = cn;
1692 1679 }
1693 1680 mutex_exit(&pvs->mutex);
1694 1681
1695 1682 /* Now cancel all expired commands */
1696 1683 if (expired != NULL) {
1697 1684 struct scsi_address sa = {0};
1698 1685 /* Build a fake SCSI address */
1699 1686 sa.a_hba_tran = pvs->tran;
1700 1687 while (expired != NULL) {
1701 1688 c = expired->next_cmd;
1702 1689 sa.a_target = expired->cmd_target;
1703 1690 sa.a_lun = 0;
1704 1691 (void) pvscsi_abort(&sa, CMD2PKT(expired));
1705 1692 expired = c;
1706 1693 }
1707 1694 }
1708 1695
1709 1696 mutex_enter(&pvs->mutex);
1710 1697 if ((pvs->flags & PVSCSI_DRIVER_SHUTDOWN) != 0) {
1711 1698 /* Finish job */
1712 1699 break;
1713 1700 }
1714 1701 if (cv_reltimedwait(&pvs->wd_condvar, &pvs->mutex,
1715 1702 SEC_TO_TICK(1), TR_CLOCK_TICK) > 0) {
1716 1703 /* Explicitly woken up, finish job */
1717 1704 break;
1718 1705 }
1719 1706 }
1720 1707
1721 1708 /* Confirm thread termination */
1722 1709 cv_signal(&pvs->syncvar);
1723 1710 mutex_exit(&pvs->mutex);
1724 1711 }
1725 1712
1726 1713 static int
1727 1714 pvscsi_ccache_constructor(void *buf, void *cdrarg, int kmflags)
1728 1715 {
1729 1716 int (*callback)(caddr_t);
1730 1717 uint_t cookiec;
1731 1718 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)buf;
1732 1719 pvscsi_softc_t *pvs = cdrarg;
1733 1720 struct scsi_address ap;
1734 1721
1735 1722 callback = (kmflags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
1736 1723 ap.a_hba_tran = pvs->tran;
1737 1724 ap.a_target = 0;
1738 1725 ap.a_lun = 0;
1739 1726
1740 1727 /* Allocate a DMA handle for data transfers */
1741 1728 if ((ddi_dma_alloc_handle(pvs->dip, &pvs->io_dma_attr, callback,
1742 1729 NULL, &cmd->cmd_dmahdl)) != DDI_SUCCESS) {
1743 1730 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA handle");
1744 1731 return (-1);
1745 1732 }
1746 1733
1747 1734 /* Setup ARQ buffer */
1748 1735 if ((cmd->arqbuf = scsi_alloc_consistent_buf(&ap, (struct buf *)NULL,
1749 1736 SENSE_BUFFER_SIZE, B_READ, callback, NULL)) == NULL) {
1750 1737 dev_err(pvs->dip, CE_WARN, "!failed to allocate ARQ buffer");
1751 1738 goto free_handle;
1752 1739 }
1753 1740
1754 1741 if (ddi_dma_alloc_handle(pvs->dip, &pvs->hba_dma_attr,
1755 1742 callback, NULL, &cmd->arqhdl) != DDI_SUCCESS) {
1756 1743 dev_err(pvs->dip, CE_WARN,
1757 1744 "!failed to allocate DMA handle for ARQ buffer");
1758 1745 goto free_arqbuf;
1759 1746 }
1760 1747
1761 1748 if (ddi_dma_buf_bind_handle(cmd->arqhdl, cmd->arqbuf,
1762 1749 (DDI_DMA_READ | DDI_DMA_CONSISTENT), callback, NULL,
1763 1750 &cmd->arqc, &cookiec) != DDI_SUCCESS) {
1764 1751 dev_err(pvs->dip, CE_WARN, "!failed to bind ARQ buffer");
1765 1752 goto free_arqhdl;
1766 1753 }
1767 1754
1768 1755 return (0);
1769 1756
1770 1757 free_arqhdl:
1771 1758 ddi_dma_free_handle(&cmd->arqhdl);
1772 1759 free_arqbuf:
1773 1760 scsi_free_consistent_buf(cmd->arqbuf);
1774 1761 free_handle:
1775 1762 ddi_dma_free_handle(&cmd->cmd_dmahdl);
1776 1763
1777 1764 return (-1);
1778 1765 }
1779 1766
1780 1767 /* ARGSUSED cdrarg */
1781 1768 static void
1782 1769 pvscsi_ccache_destructor(void *buf, void *cdrarg)
1783 1770 {
1784 1771 pvscsi_cmd_t *cmd = (pvscsi_cmd_t *)buf;
1785 1772
1786 1773 if (cmd->cmd_dmahdl != NULL) {
1787 1774 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
1788 1775 ddi_dma_free_handle(&cmd->cmd_dmahdl);
1789 1776 cmd->cmd_dmahdl = NULL;
1790 1777 }
1791 1778
1792 1779 if (cmd->arqhdl != NULL) {
1793 1780 (void) ddi_dma_unbind_handle(cmd->arqhdl);
1794 1781 ddi_dma_free_handle(&cmd->arqhdl);
1795 1782 cmd->arqhdl = NULL;
1796 1783 }
1797 1784
1798 1785 if (cmd->arqbuf != NULL) {
1799 1786 scsi_free_consistent_buf(cmd->arqbuf);
1800 1787 cmd->arqbuf = NULL;
1801 1788 }
1802 1789 }
1803 1790
1804 1791 /* tran_* entry points and setup */
1805 1792 /* ARGSUSED hba_dip tgt_dip hba_tran */
1806 1793 static int
1807 1794 pvscsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1808 1795 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
1809 1796 {
1810 1797 pvscsi_softc_t *pvs = SDEV2PRIV(sd);
1811 1798
1812 1799 ASSERT(pvs != NULL);
1813 1800
1814 1801 if (sd->sd_address.a_target >= PVSCSI_MAXTGTS)
1815 1802 return (DDI_FAILURE);
1816 1803
1817 1804 return (DDI_SUCCESS);
1818 1805 }
1819 1806
1820 1807 static int
1821 1808 pvscsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1822 1809 {
1823 1810 boolean_t poll = ((pkt->pkt_flags & FLAG_NOINTR) != 0);
1824 1811 int rc;
1825 1812 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1826 1813 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1827 1814
1828 1815 ASSERT(cmd->pkt == pkt);
1829 1816 ASSERT(cmd->cmd_pvs == pvs);
1830 1817
1831 1818 /*
1832 1819 * Reinitialize some fields because the packet may
1833 1820 * have been resubmitted.
1834 1821 */
1835 1822 pkt->pkt_reason = CMD_CMPLT;
1836 1823 pkt->pkt_state = 0;
1837 1824 pkt->pkt_statistics = 0;
1838 1825
1839 1826 /* Zero status byte */
1840 1827 *(pkt->pkt_scbp) = 0;
1841 1828
1842 1829 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1843 1830 ASSERT(cmd->cmd_dma_count != 0);
1844 1831 pkt->pkt_resid = cmd->cmd_dma_count;
1845 1832
1846 1833 /*
1847 1834 * Consistent packets need to be synced first
1848 1835 * (only for data going out).
1849 1836 */
1850 1837 if ((cmd->flags & PVSCSI_FLAG_IO_IOPB) != 0) {
1851 1838 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
1852 1839 DDI_DMA_SYNC_FORDEV);
1853 1840 }
1854 1841 }
1855 1842
1856 1843 cmd->cmd_target = ap->a_target;
1857 1844
1858 1845 mutex_enter(&pvs->mutex);
1859 1846 if (HBA_IS_QUIESCED(pvs) && !poll) {
1860 1847 mutex_exit(&pvs->mutex);
1861 1848 return (TRAN_BUSY);
1862 1849 }
1863 1850 mutex_exit(&pvs->mutex);
1864 1851
1865 1852 rc = pvscsi_transport_command(pvs, cmd);
1866 1853
1867 1854 if (poll) {
1868 1855 pvscsi_cmd_t *dcmd;
1869 1856 boolean_t qnotify;
1870 1857
1871 1858 if (rc == TRAN_ACCEPT)
1872 1859 rc = pvscsi_poll_cmd(pvs, cmd);
1873 1860
1874 1861 mutex_enter(&pvs->rx_mutex);
1875 1862 dcmd = pvscsi_process_comp_ring(pvs);
1876 1863 mutex_exit(&pvs->rx_mutex);
1877 1864
1878 1865 mutex_enter(&pvs->mutex);
1879 1866 qnotify = HBA_QUIESCE_PENDING(pvs);
1880 1867 mutex_exit(&pvs->mutex);
1881 1868
1882 1869 pvscsi_complete_chained(dcmd);
1883 1870
1884 1871 if (qnotify)
1885 1872 pvscsi_quiesce_notify(pvs);
1886 1873 }
1887 1874
1888 1875 return (rc);
1889 1876 }
1890 1877
1891 1878 static int
1892 1879 pvscsi_reset(struct scsi_address *ap, int level)
1893 1880 {
1894 1881 pvscsi_softc_t *pvs = AP2PRIV(ap);
1895 1882
1896 1883 switch (level) {
1897 1884 case RESET_ALL:
1898 1885 return (pvscsi_reset_generic(pvs, NULL));
1899 1886 case RESET_TARGET:
1900 1887 ASSERT(ap != NULL);
1901 1888 return (pvscsi_reset_generic(pvs, ap));
1902 1889 default:
1903 1890 return (0);
1904 1891 }
1905 1892 }
1906 1893
1907 1894 static int
1908 1895 pvscsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
1909 1896 {
1910 1897 boolean_t qnotify = B_FALSE;
1911 1898 pvscsi_cmd_t *pending;
1912 1899 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1913 1900
1914 1901 mutex_enter(&pvs->tx_mutex);
1915 1902 mutex_enter(&pvs->rx_mutex);
1916 1903 if (pkt != NULL) {
1917 1904 /* Abort single command */
1918 1905 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1919 1906
1920 1907 if (pvscsi_abort_cmd(cmd, &pending) == CMD_ABORTED) {
1921 1908 /* Assume command is completely cancelled now */
1922 1909 cmd->flags |= PVSCSI_FLAG_ABORTED;
1923 1910 }
1924 1911 } else {
1925 1912 /* Abort all commands on the bus */
1926 1913 pvscsi_abort_all(ap, pvs, &pending, PVSCSI_FLAG_ABORTED);
1927 1914 }
1928 1915 qnotify = HBA_QUIESCE_PENDING(pvs);
1929 1916 mutex_exit(&pvs->rx_mutex);
1930 1917 mutex_exit(&pvs->tx_mutex);
1931 1918
1932 1919 pvscsi_complete_chained(pending);
1933 1920
1934 1921 if (qnotify)
1935 1922 pvscsi_quiesce_notify(pvs);
1936 1923
1937 1924 return (1);
1938 1925 }
1939 1926
1940 1927 /* ARGSUSED tgtonly */
1941 1928 static int
1942 1929 pvscsi_getcap(struct scsi_address *ap, char *cap, int tgtonly)
1943 1930 {
1944 1931 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1945 1932
1946 1933 if (cap == NULL)
1947 1934 return (-1);
1948 1935
1949 1936 switch (scsi_hba_lookup_capstr(cap)) {
1950 1937 case SCSI_CAP_ARQ:
1951 1938 return ((pvs->flags & PVSCSI_HBA_AUTO_REQUEST_SENSE) != 0);
1952 1939 case SCSI_CAP_UNTAGGED_QING:
1953 1940 return (1);
1954 1941 default:
1955 1942 return (-1);
1956 1943 }
1957 1944 }
1958 1945
1959 1946 /* ARGSUSED tgtonly */
1960 1947 static int
1961 1948 pvscsi_setcap(struct scsi_address *ap, char *cap, int value, int tgtonly)
1962 1949 {
1963 1950 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1964 1951
1965 1952 if (cap == NULL)
1966 1953 return (-1);
1967 1954
1968 1955 switch (scsi_hba_lookup_capstr(cap)) {
1969 1956 case SCSI_CAP_ARQ:
1970 1957 mutex_enter(&pvs->mutex);
1971 1958 if (value == 0)
1972 1959 pvs->flags &= ~PVSCSI_HBA_AUTO_REQUEST_SENSE;
1973 1960 else
1974 1961 pvs->flags |= PVSCSI_HBA_AUTO_REQUEST_SENSE;
1975 1962 mutex_exit(&pvs->mutex);
1976 1963 return (1);
1977 1964 default:
1978 1965 return (0);
1979 1966 }
1980 1967 }
1981 1968
1982 1969 static void
1983 1970 pvscsi_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1984 1971 {
1985 1972 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
1986 1973 pvscsi_softc_t *pvs = ap->a_hba_tran->tran_hba_private;
1987 1974
1988 1975 ASSERT(cmd->cmd_pvs == pvs);
1989 1976
1990 1977 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
1991 1978 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
1992 1979 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
1993 1980 }
1994 1981
1995 1982 if (cmd->ctx != NULL) {
1996 1983 mutex_enter(&pvs->mutex);
1997 1984 pvscsi_release_ctx(cmd);
1998 1985 mutex_exit(&pvs->mutex);
1999 1986 }
2000 1987
2001 1988 if ((cmd->flags & PVSCSI_FLAGS_EXT) != 0)
2002 1989 pvscsi_cmd_ext_free(cmd);
2003 1990
2004 1991 kmem_cache_free(pvs->cmd_cache, cmd);
2005 1992 }
2006 1993
2007 1994 static struct scsi_pkt *
2008 1995 pvscsi_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt, struct buf *bp,
2009 1996 int cmdlen, int statuslen, int tgtlen, int flags, int (*callback)(),
2010 1997 caddr_t arg)
2011 1998 {
2012 1999 boolean_t is_new;
2013 2000 int kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
2014 2001 int rc, i;
2015 2002 pvscsi_cmd_t *cmd;
2016 2003 pvscsi_softc_t *pvs;
2017 2004
2018 2005 pvs = ap->a_hba_tran->tran_hba_private;
2019 2006 ASSERT(pvs != NULL);
2020 2007
2021 2008 /* Allocate a new SCSI packet */
2022 2009 if (pkt == NULL) {
2023 2010 ddi_dma_handle_t saved_dmahdl, saved_arqhdl;
2024 2011 struct buf *saved_arqbuf;
2025 2012 ddi_dma_cookie_t saved_arqc;
2026 2013
2027 2014 is_new = B_TRUE;
2028 2015
2029 2016 if ((cmd = kmem_cache_alloc(pvs->cmd_cache, kf)) == NULL)
2030 2017 return (NULL);
2031 2018
2032 2019 saved_dmahdl = cmd->cmd_dmahdl;
2033 2020 saved_arqhdl = cmd->arqhdl;
2034 2021 saved_arqbuf = cmd->arqbuf;
2035 2022 saved_arqc = cmd->arqc;
2036 2023
2037 2024 bzero(cmd, sizeof (pvscsi_cmd_t) -
2038 2025 sizeof (cmd->cached_cookies));
2039 2026
2040 2027 cmd->cmd_pvs = pvs;
2041 2028 cmd->cmd_dmahdl = saved_dmahdl;
2042 2029 cmd->arqhdl = saved_arqhdl;
2043 2030 cmd->arqbuf = saved_arqbuf;
2044 2031 cmd->arqc = saved_arqc;
2045 2032
2046 2033 pkt = &cmd->cached_pkt;
2047 2034 pkt->pkt_ha_private = (opaque_t)cmd;
2048 2035 pkt->pkt_address = *ap;
2049 2036 pkt->pkt_scbp = (uint8_t *)&cmd->cmd_scb;
2050 2037 pkt->pkt_cdbp = (uint8_t *)&cmd->cmd_cdb;
2051 2038 pkt->pkt_private = (opaque_t)&cmd->tgt_priv;
2052 2039
2053 2040 cmd->tgtlen = tgtlen;
2054 2041 cmd->statuslen = statuslen;
2055 2042 cmd->cmdlen = cmdlen;
2056 2043 cmd->pkt = pkt;
2057 2044 cmd->ctx = NULL;
2058 2045
2059 2046 /* Allocate extended buffers */
2060 2047 if ((cmdlen > sizeof (cmd->cmd_cdb)) ||
2061 2048 (statuslen > sizeof (cmd->cmd_scb)) ||
2062 2049 (tgtlen > sizeof (cmd->tgt_priv))) {
2063 2050 if (pvscsi_cmd_ext_alloc(pvs, cmd, kf) != DDI_SUCCESS) {
2064 2051 dev_err(pvs->dip, CE_WARN,
2065 2052 "!extent allocation failed");
2066 2053 goto out;
2067 2054 }
2068 2055 }
2069 2056 } else {
2070 2057 is_new = B_FALSE;
2071 2058
2072 2059 cmd = PKT2CMD(pkt);
2073 2060 cmd->flags &= PVSCSI_FLAGS_PERSISTENT;
2074 2061 }
2075 2062
2076 2063 ASSERT((cmd->flags & PVSCSI_FLAG_TRANSPORT) == 0);
2077 2064
2078 2065 if ((flags & PKT_XARQ) != 0)
2079 2066 cmd->flags |= PVSCSI_FLAG_XARQ;
2080 2067
2081 2068 /* Handle partial DMA transfers */
2082 2069 if (cmd->cmd_nwin > 0) {
2083 2070 if (++cmd->cmd_winindex >= cmd->cmd_nwin)
2084 2071 return (NULL);
2085 2072 if (ddi_dma_getwin(cmd->cmd_dmahdl, cmd->cmd_winindex,
2086 2073 &cmd->cmd_dma_offset, &cmd->cmd_dma_len,
2087 2074 &cmd->cmd_dmac, &cmd->cmd_dmaccount) == DDI_FAILURE)
2088 2075 return (NULL);
2089 2076 goto handle_dma_cookies;
2090 2077 }
2091 2078
2092 2079 /* Setup data buffer */
2093 2080 if (bp != NULL && bp->b_bcount > 0 &&
2094 2081 (cmd->flags & PVSCSI_FLAG_DMA_VALID) == 0) {
2095 2082 int dma_flags;
2096 2083
2097 2084 ASSERT(cmd->cmd_dmahdl != NULL);
2098 2085
2099 2086 if ((bp->b_flags & B_READ) != 0) {
2100 2087 cmd->flags |= PVSCSI_FLAG_IO_READ;
2101 2088 dma_flags = DDI_DMA_READ;
2102 2089 } else {
2103 2090 cmd->flags &= ~PVSCSI_FLAG_IO_READ;
2104 2091 dma_flags = DDI_DMA_WRITE;
2105 2092 }
2106 2093 if ((flags & PKT_CONSISTENT) != 0) {
2107 2094 cmd->flags |= PVSCSI_FLAG_IO_IOPB;
2108 2095 dma_flags |= DDI_DMA_CONSISTENT;
2109 2096 }
2110 2097 if ((flags & PKT_DMA_PARTIAL) != 0)
2111 2098 dma_flags |= DDI_DMA_PARTIAL;
2112 2099
2113 2100 rc = ddi_dma_buf_bind_handle(cmd->cmd_dmahdl, bp,
2114 2101 dma_flags, callback, arg, &cmd->cmd_dmac,
2115 2102 &cmd->cmd_dmaccount);
2116 2103 if (rc == DDI_DMA_PARTIAL_MAP) {
2117 2104 (void) ddi_dma_numwin(cmd->cmd_dmahdl,
2118 2105 &cmd->cmd_nwin);
2119 2106 cmd->cmd_winindex = 0;
2120 2107 (void) ddi_dma_getwin(cmd->cmd_dmahdl,
2121 2108 cmd->cmd_winindex, &cmd->cmd_dma_offset,
2122 2109 &cmd->cmd_dma_len, &cmd->cmd_dmac,
2123 2110 &cmd->cmd_dmaccount);
2124 2111 } else if (rc != 0 && rc != DDI_DMA_MAPPED) {
2125 2112 switch (rc) {
2126 2113 case DDI_DMA_NORESOURCES:
2127 2114 bioerror(bp, 0);
2128 2115 break;
2129 2116 case DDI_DMA_BADATTR:
2130 2117 case DDI_DMA_NOMAPPING:
2131 2118 bioerror(bp, EFAULT);
2132 2119 break;
2133 2120 case DDI_DMA_TOOBIG:
2134 2121 default:
2135 2122 bioerror(bp, EINVAL);
2136 2123 break;
2137 2124 }
2138 2125 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
2139 2126 goto out;
2140 2127 }
2141 2128
2142 2129 handle_dma_cookies:
2143 2130 ASSERT(cmd->cmd_dmaccount > 0);
2144 2131 if (cmd->cmd_dmaccount > PVSCSI_MAX_SG_SIZE) {
2145 2132 dev_err(pvs->dip, CE_WARN,
2146 2133 "!invalid cookie count: %d (max %d)",
2147 2134 cmd->cmd_dmaccount, PVSCSI_MAX_SG_SIZE);
2148 2135 bioerror(bp, EINVAL);
2149 2136 goto out;
2150 2137 }
2151 2138
2152 2139 cmd->flags |= PVSCSI_FLAG_DMA_VALID;
2153 2140 cmd->cmd_dma_count = cmd->cmd_dmac.dmac_size;
2154 2141 cmd->cmd_total_dma_count += cmd->cmd_dmac.dmac_size;
2155 2142
2156 2143 cmd->cached_cookies[0] = cmd->cmd_dmac;
2157 2144
2158 2145 /*
2159 2146 * Calculate total amount of bytes for this I/O and
2160 2147 * store cookies for further processing.
2161 2148 */
2162 2149 for (i = 1; i < cmd->cmd_dmaccount; i++) {
2163 2150 ddi_dma_nextcookie(cmd->cmd_dmahdl, &cmd->cmd_dmac);
2164 2151 cmd->cached_cookies[i] = cmd->cmd_dmac;
2165 2152 cmd->cmd_dma_count += cmd->cmd_dmac.dmac_size;
2166 2153 cmd->cmd_total_dma_count += cmd->cmd_dmac.dmac_size;
2167 2154 }
2168 2155
2169 2156 pkt->pkt_resid = (bp->b_bcount - cmd->cmd_total_dma_count);
2170 2157 }
2171 2158
2172 2159 return (pkt);
2173 2160
2174 2161 out:
2175 2162 if (is_new)
2176 2163 pvscsi_destroy_pkt(ap, pkt);
2177 2164
2178 2165 return (NULL);
2179 2166 }
2180 2167
2181 2168 /* ARGSUSED ap */
2182 2169 static void
2183 2170 pvscsi_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
2184 2171 {
2185 2172 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
2186 2173
2187 2174 if ((cmd->flags & PVSCSI_FLAG_DMA_VALID) != 0) {
2188 2175 (void) ddi_dma_unbind_handle(cmd->cmd_dmahdl);
2189 2176 cmd->flags &= ~PVSCSI_FLAG_DMA_VALID;
2190 2177 }
2191 2178 }
2192 2179
2193 2180 /* ARGSUSED ap */
2194 2181 static void
2195 2182 pvscsi_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
2196 2183 {
2197 2184 pvscsi_cmd_t *cmd = PKT2CMD(pkt);
2198 2185
2199 2186 if (cmd->cmd_dmahdl != NULL) {
2200 2187 (void) ddi_dma_sync(cmd->cmd_dmahdl, 0, 0,
2201 2188 (cmd->flags & PVSCSI_FLAG_IO_READ) ?
2202 2189 DDI_DMA_SYNC_FORCPU : DDI_DMA_SYNC_FORDEV);
2203 2190 }
2204 2191
2205 2192 }
2206 2193
2207 2194 /* ARGSUSED ap flag callback arg */
2208 2195 static int
2209 2196 pvscsi_reset_notify(struct scsi_address *ap, int flag,
2210 2197 void (*callback)(caddr_t), caddr_t arg)
2211 2198 {
2212 2199 return (DDI_FAILURE);
2213 2200 }
2214 2201
2215 2202 static int
2216 2203 pvscsi_quiesce_hba(dev_info_t *dip)
2217 2204 {
2218 2205 pvscsi_softc_t *pvs;
2219 2206 scsi_hba_tran_t *tran;
2220 2207
2221 2208 if ((tran = ddi_get_driver_private(dip)) == NULL ||
2222 2209 (pvs = TRAN2PRIV(tran)) == NULL)
2223 2210 return (-1);
2224 2211
2225 2212 mutex_enter(&pvs->mutex);
2226 2213 if (!HBA_IS_QUIESCED(pvs))
2227 2214 pvs->flags |= PVSCSI_HBA_QUIESCED;
2228 2215
2229 2216 if (pvs->cmd_queue_len != 0) {
2230 2217 /* Outstanding commands present, wait */
2231 2218 pvs->flags |= PVSCSI_HBA_QUIESCE_PENDING;
2232 2219 cv_wait(&pvs->quiescevar, &pvs->mutex);
2233 2220 ASSERT(pvs->cmd_queue_len == 0);
2234 2221 }
2235 2222 mutex_exit(&pvs->mutex);
2236 2223
2237 2224 /* Suspend taskq delivery and complete all scheduled tasks */
2238 2225 ddi_taskq_suspend(pvs->msg_tq);
2239 2226 ddi_taskq_wait(pvs->msg_tq);
2240 2227 ddi_taskq_suspend(pvs->comp_tq);
2241 2228 ddi_taskq_wait(pvs->comp_tq);
2242 2229
2243 2230 return (0);
2244 2231 }
2245 2232
2246 2233 static int
2247 2234 pvscsi_unquiesce_hba(dev_info_t *dip)
2248 2235 {
2249 2236 pvscsi_softc_t *pvs;
2250 2237 scsi_hba_tran_t *tran;
2251 2238
2252 2239 if ((tran = ddi_get_driver_private(dip)) == NULL ||
2253 2240 (pvs = TRAN2PRIV(tran)) == NULL)
2254 2241 return (-1);
2255 2242
2256 2243 mutex_enter(&pvs->mutex);
2257 2244 if (!HBA_IS_QUIESCED(pvs)) {
2258 2245 mutex_exit(&pvs->mutex);
2259 2246 return (0);
2260 2247 }
2261 2248 ASSERT(pvs->cmd_queue_len == 0);
2262 2249 pvs->flags &= ~PVSCSI_HBA_QUIESCED;
2263 2250 mutex_exit(&pvs->mutex);
2264 2251
2265 2252 /* Resume taskq delivery */
2266 2253 ddi_taskq_resume(pvs->msg_tq);
2267 2254 ddi_taskq_resume(pvs->comp_tq);
2268 2255
2269 2256 return (0);
2270 2257 }
2271 2258
2272 2259 static int
2273 2260 pvscsi_bus_config(dev_info_t *pdip, uint_t flags, ddi_bus_config_op_t op,
2274 2261 void *arg, dev_info_t **childp)
2275 2262 {
2276 2263 char *p;
2277 2264 int circ;
2278 2265 int ret = NDI_FAILURE;
2279 2266 long target = 0;
2280 2267 pvscsi_softc_t *pvs;
2281 2268 scsi_hba_tran_t *tran;
2282 2269
2283 2270 tran = ddi_get_driver_private(pdip);
2284 2271 pvs = tran->tran_hba_private;
2285 2272
2286 2273 ndi_devi_enter(pdip, &circ);
2287 2274 switch (op) {
2288 2275 case BUS_CONFIG_ONE:
2289 2276 if ((p = strrchr((char *)arg, '@')) != NULL &&
2290 2277 ddi_strtol(p + 1, NULL, 16, &target) == 0)
2291 2278 ret = pvscsi_config_one(pdip, pvs, (int)target, childp);
2292 2279 break;
2293 2280 case BUS_CONFIG_DRIVER:
2294 2281 case BUS_CONFIG_ALL:
2295 2282 ret = pvscsi_config_all(pdip, pvs);
2296 2283 break;
2297 2284 default:
2298 2285 break;
2299 2286 }
2300 2287
2301 2288 if (ret == NDI_SUCCESS)
2302 2289 ret = ndi_busop_bus_config(pdip, flags, op, arg, childp, 0);
2303 2290 ndi_devi_exit(pdip, circ);
2304 2291
2305 2292 return (ret);
2306 2293 }
2307 2294
2308 2295 static int
2309 2296 pvscsi_hba_setup(pvscsi_softc_t *pvs)
2310 2297 {
2311 2298 scsi_hba_tran_t *hba_tran;
2312 2299
2313 2300 hba_tran = pvs->tran = scsi_hba_tran_alloc(pvs->dip,
2314 2301 SCSI_HBA_CANSLEEP);
2315 2302 ASSERT(pvs->tran != NULL);
2316 2303
2317 2304 hba_tran->tran_hba_private = pvs;
2318 2305 hba_tran->tran_tgt_private = NULL;
2319 2306
2320 2307 hba_tran->tran_tgt_init = pvscsi_tgt_init;
2321 2308 hba_tran->tran_tgt_free = NULL;
2322 2309 hba_tran->tran_tgt_probe = scsi_hba_probe;
2323 2310
2324 2311 hba_tran->tran_start = pvscsi_start;
2325 2312 hba_tran->tran_reset = pvscsi_reset;
2326 2313 hba_tran->tran_abort = pvscsi_abort;
2327 2314 hba_tran->tran_getcap = pvscsi_getcap;
2328 2315 hba_tran->tran_setcap = pvscsi_setcap;
2329 2316 hba_tran->tran_init_pkt = pvscsi_init_pkt;
2330 2317 hba_tran->tran_destroy_pkt = pvscsi_destroy_pkt;
2331 2318
2332 2319 hba_tran->tran_dmafree = pvscsi_dmafree;
2333 2320 hba_tran->tran_sync_pkt = pvscsi_sync_pkt;
2334 2321 hba_tran->tran_reset_notify = pvscsi_reset_notify;
2335 2322
2336 2323 hba_tran->tran_quiesce = pvscsi_quiesce_hba;
2337 2324 hba_tran->tran_unquiesce = pvscsi_unquiesce_hba;
2338 2325 hba_tran->tran_bus_reset = NULL;
2339 2326
2340 2327 hba_tran->tran_add_eventcall = NULL;
2341 2328 hba_tran->tran_get_eventcookie = NULL;
2342 2329 hba_tran->tran_post_event = NULL;
2343 2330 hba_tran->tran_remove_eventcall = NULL;
2344 2331
2345 2332 hba_tran->tran_bus_config = pvscsi_bus_config;
2346 2333
2347 2334 hba_tran->tran_interconnect_type = INTERCONNECT_SAS;
2348 2335
2349 2336 if (scsi_hba_attach_setup(pvs->dip, &pvs->hba_dma_attr, hba_tran,
2350 2337 SCSI_HBA_TRAN_CDB | SCSI_HBA_TRAN_SCB | SCSI_HBA_TRAN_CLONE) !=
2351 2338 DDI_SUCCESS) {
2352 2339 dev_err(pvs->dip, CE_WARN, "!failed to attach HBA");
2353 2340 scsi_hba_tran_free(hba_tran);
2354 2341 pvs->tran = NULL;
2355 2342 return (-1);
2356 2343 }
2357 2344
2358 2345 return (0);
2359 2346 }
2360 2347
2361 2348 static int
2362 2349 pvscsi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
2363 2350 {
2364 2351 int instance;
2365 2352 pvscsi_softc_t *pvs;
2366 2353 char buf[32];
2367 2354
2368 2355 ASSERT(scsi_hba_iport_unit_address(dip) == NULL);
2369 2356
2370 2357 switch (cmd) {
2371 2358 case DDI_ATTACH:
2372 2359 case DDI_RESUME:
2373 2360 break;
2374 2361 default:
2375 2362 return (DDI_FAILURE);
2376 2363 }
2377 2364
2378 2365 instance = ddi_get_instance(dip);
2379 2366
2380 2367 /* Allocate softstate information */
2381 2368 if (ddi_soft_state_zalloc(pvscsi_sstate, instance) != DDI_SUCCESS) {
2382 2369 cmn_err(CE_WARN,
2383 2370 "!ddi_soft_state_zalloc() failed for instance %d",
2384 2371 instance);
2385 2372 return (DDI_FAILURE);
2386 2373 }
2387 2374
2388 2375 if ((pvs = ddi_get_soft_state(pvscsi_sstate, instance)) == NULL) {
2389 2376 cmn_err(CE_WARN, "!failed to get soft state for instance %d",
2390 2377 instance);
2391 2378 goto fail;
2392 2379 }
2393 2380
2394 2381 /*
2395 2382 * Indicate that we are 'sizeof (scsi_*(9S))' clean, we use
2396 2383 * scsi_pkt_size() instead.
2397 2384 */
2398 2385 scsi_size_clean(dip);
2399 2386
2400 2387 /* Setup HBA instance */
2401 2388 pvs->instance = instance;
2402 2389 pvs->dip = dip;
2403 2390 pvs->hba_dma_attr = pvscsi_hba_dma_attr;
2404 2391 pvs->ring_dma_attr = pvscsi_ring_dma_attr;
2405 2392 pvs->io_dma_attr = pvscsi_io_dma_attr;
2406 2393 mutex_init(&pvs->mutex, "pvscsi instance mutex", MUTEX_DRIVER, NULL);
2407 2394 mutex_init(&pvs->intr_mutex, "pvscsi instance interrupt mutex",
2408 2395 MUTEX_DRIVER, NULL);
2409 2396 mutex_init(&pvs->rx_mutex, "pvscsi rx ring mutex", MUTEX_DRIVER, NULL);
2410 2397 mutex_init(&pvs->tx_mutex, "pvscsi tx ring mutex", MUTEX_DRIVER, NULL);
2411 2398 list_create(&pvs->cmd_ctx_pool, sizeof (pvscsi_cmd_ctx_t),
2412 2399 offsetof(pvscsi_cmd_ctx_t, list));
2413 2400 list_create(&pvs->devnodes, sizeof (pvscsi_device_t),
2414 2401 offsetof(pvscsi_device_t, list));
2415 2402 list_create(&pvs->cmd_queue, sizeof (pvscsi_cmd_t),
2416 2403 offsetof(pvscsi_cmd_t, cmd_queue_node));
2417 2404 cv_init(&pvs->syncvar, "pvscsi synchronization cv", CV_DRIVER, NULL);
2418 2405 cv_init(&pvs->wd_condvar, "pvscsi watchdog cv", CV_DRIVER, NULL);
2419 2406 cv_init(&pvs->quiescevar, "pvscsi quiesce cv", CV_DRIVER, NULL);
2420 2407
2421 2408 (void) sprintf(buf, "pvscsi%d_cache", instance);
2422 2409 pvs->cmd_cache = kmem_cache_create(buf, sizeof (pvscsi_cmd_t), 0,
2423 2410 pvscsi_ccache_constructor, pvscsi_ccache_destructor, NULL,
2424 2411 (void *)pvs, NULL, 0);
2425 2412 if (pvs->cmd_cache == NULL) {
2426 2413 dev_err(pvs->dip, CE_WARN,
2427 2414 "!failed to create a cache for SCSI commands");
2428 2415 goto fail;
2429 2416 }
2430 2417
2431 2418 if ((pvscsi_setup_io(pvs)) != DDI_SUCCESS) {
2432 2419 dev_err(pvs->dip, CE_WARN, "!failed to setup I/O region");
2433 2420 goto free_cache;
2434 2421 }
2435 2422
2436 2423 pvscsi_reset_hba(pvs);
2437 2424
2438 2425 if ((pvscsi_allocate_rings(pvs)) != DDI_SUCCESS) {
2439 2426 dev_err(pvs->dip, CE_WARN, "!failed to allocate DMA rings");
2440 2427 goto free_io;
2441 2428 }
2442 2429
2443 2430 pvscsi_setup_rings(pvs);
2444 2431
2445 2432 if (pvscsi_setup_isr(pvs) != DDI_SUCCESS) {
2446 2433 dev_err(pvs->dip, CE_WARN, "!failed to setup ISR");
2447 2434 goto free_rings;
2448 2435 }
2449 2436
2450 2437 if (pvscsi_setup_sg(pvs) != DDI_SUCCESS) {
2451 2438 dev_err(pvs->dip, CE_WARN, "!failed to setup S/G");
2452 2439 goto free_intr;
2453 2440 }
2454 2441
2455 2442 if (pvscsi_hba_setup(pvs) != 0) {
2456 2443 dev_err(pvs->dip, CE_WARN, "!failed to setup HBA");
2457 2444 goto free_sg;
2458 2445 }
2459 2446
2460 2447 if ((pvs->comp_tq = ddi_taskq_create(pvs->dip, "comp_tq",
2461 2448 MIN(UINT16_MAX, ncpus), TASKQ_DEFAULTPRI, 0)) == NULL) {
2462 2449 dev_err(pvs->dip, CE_WARN,
2463 2450 "!failed to create completion taskq");
2464 2451 goto free_sg;
2465 2452 }
2466 2453
2467 2454 if ((pvs->msg_tq = ddi_taskq_create(pvs->dip, "msg_tq",
2468 2455 1, TASKQ_DEFAULTPRI, 0)) == NULL) {
2469 2456 dev_err(pvs->dip, CE_WARN,
2470 2457 "!failed to create message taskq");
2471 2458 goto free_comp_tq;
2472 2459 }
2473 2460
2474 2461 if (pvscsi_enable_intrs(pvs) != DDI_SUCCESS) {
2475 2462 dev_err(pvs->dip, CE_WARN, "!failed to enable interrupts");
2476 2463 goto free_msg_tq;
2477 2464 }
2478 2465
2479 2466 /* Launch watchdog thread */
2480 2467 pvs->wd_thread = thread_create(NULL, 0, pvscsi_wd_thread, pvs, 0, &p0,
2481 2468 TS_RUN, minclsyspri);
2482 2469
2483 2470 return (DDI_SUCCESS);
2484 2471
2485 2472 free_msg_tq:
2486 2473 ddi_taskq_destroy(pvs->msg_tq);
2487 2474 free_comp_tq:
2488 2475 ddi_taskq_destroy(pvs->comp_tq);
2489 2476 free_sg:
2490 2477 pvscsi_free_sg(pvs);
2491 2478 free_intr:
2492 2479 pvscsi_free_intr_resources(pvs);
2493 2480 free_rings:
2494 2481 pvscsi_reset_hba(pvs);
2495 2482 pvscsi_free_rings(pvs);
2496 2483 free_io:
2497 2484 pvscsi_free_io(pvs);
2498 2485 free_cache:
2499 2486 kmem_cache_destroy(pvs->cmd_cache);
2500 2487 fail:
2501 2488 ddi_soft_state_free(pvscsi_sstate, instance);
2502 2489
2503 2490 return (DDI_FAILURE);
2504 2491 }
2505 2492
2506 2493 static int
2507 2494 pvscsi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
2508 2495 {
2509 2496 int instance;
2510 2497 pvscsi_softc_t *pvs;
2511 2498
2512 2499 switch (cmd) {
2513 2500 case DDI_DETACH:
2514 2501 break;
2515 2502 default:
2516 2503 return (DDI_FAILURE);
2517 2504 }
2518 2505
2519 2506 instance = ddi_get_instance(dip);
2520 2507 if ((pvs = ddi_get_soft_state(pvscsi_sstate, instance)) == NULL) {
2521 2508 cmn_err(CE_WARN, "!failed to get soft state for instance %d",
2522 2509 instance);
2523 2510 return (DDI_FAILURE);
2524 2511 }
2525 2512
2526 2513 pvscsi_reset_hba(pvs);
2527 2514 pvscsi_free_intr_resources(pvs);
2528 2515
2529 2516 /* Shutdown message taskq */
2530 2517 ddi_taskq_wait(pvs->msg_tq);
2531 2518 ddi_taskq_destroy(pvs->msg_tq);
2532 2519
2533 2520 /* Shutdown completion taskq */
2534 2521 ddi_taskq_wait(pvs->comp_tq);
2535 2522 ddi_taskq_destroy(pvs->comp_tq);
2536 2523
2537 2524 /* Shutdown watchdog thread */
2538 2525 mutex_enter(&pvs->mutex);
2539 2526 pvs->flags |= PVSCSI_DRIVER_SHUTDOWN;
2540 2527 cv_signal(&pvs->wd_condvar);
2541 2528 cv_wait(&pvs->syncvar, &pvs->mutex);
2542 2529 mutex_exit(&pvs->mutex);
2543 2530
2544 2531 pvscsi_free_sg(pvs);
2545 2532 pvscsi_free_rings(pvs);
2546 2533 pvscsi_free_io(pvs);
2547 2534
2548 2535 kmem_cache_destroy(pvs->cmd_cache);
2549 2536
2550 2537 mutex_destroy(&pvs->mutex);
2551 2538 mutex_destroy(&pvs->intr_mutex);
2552 2539 mutex_destroy(&pvs->rx_mutex);
2553 2540
2554 2541 cv_destroy(&pvs->syncvar);
2555 2542 cv_destroy(&pvs->wd_condvar);
2556 2543 cv_destroy(&pvs->quiescevar);
2557 2544
2558 2545 ddi_soft_state_free(pvscsi_sstate, instance);
2559 2546 ddi_prop_remove_all(dip);
2560 2547
2561 2548 return (DDI_SUCCESS);
2562 2549 }
2563 2550
2564 2551 static int
2565 2552 pvscsi_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
2566 2553 int *rval)
2567 2554 {
2568 2555 int ret;
2569 2556
2570 2557 if (ddi_get_soft_state(pvscsi_sstate, getminor(dev)) == NULL) {
2571 2558 cmn_err(CE_WARN, "!invalid device instance: %d", getminor(dev));
2572 2559 return (ENXIO);
2573 2560 }
2574 2561
2575 2562 /* Try to handle command in a common way */
2576 2563 if ((ret = scsi_hba_ioctl(dev, cmd, data, mode, credp, rval)) != ENOTTY)
2577 2564 return (ret);
2578 2565
2579 2566 cmn_err(CE_WARN, "!unsupported IOCTL command: 0x%X", cmd);
2580 2567
2581 2568 return (ENXIO);
2582 2569 }
2583 2570
2584 2571 static int
2585 2572 pvscsi_quiesce(dev_info_t *devi)
2586 2573 {
2587 2574 scsi_hba_tran_t *tran;
2588 2575 pvscsi_softc_t *pvs;
2589 2576
2590 2577 if ((tran = ddi_get_driver_private(devi)) == NULL)
2591 2578 return (DDI_SUCCESS);
2592 2579
2593 2580 if ((pvs = tran->tran_hba_private) == NULL)
2594 2581 return (DDI_SUCCESS);
2595 2582
2596 2583 /* Mask all interrupts from device */
2597 2584 pvscsi_reg_write(pvs, PVSCSI_REG_OFFSET_INTR_MASK, 0);
2598 2585
2599 2586 /* Reset the HBA */
2600 2587 pvscsi_reset_hba(pvs);
2601 2588
2602 2589 return (DDI_SUCCESS);
2603 2590 }
2604 2591
2605 2592 /* module */
2606 2593
2607 2594 static struct cb_ops pvscsi_cb_ops = {
2608 2595 .cb_open = scsi_hba_open,
2609 2596 .cb_close = scsi_hba_close,
2610 2597 .cb_strategy = nodev,
2611 2598 .cb_print = nodev,
2612 2599 .cb_dump = nodev,
2613 2600 .cb_read = nodev,
2614 2601 .cb_write = nodev,
2615 2602 .cb_ioctl = pvscsi_ioctl,
2616 2603 .cb_devmap = nodev,
2617 2604 .cb_mmap = nodev,
2618 2605 .cb_segmap = nodev,
2619 2606 .cb_chpoll = nochpoll,
2620 2607 .cb_prop_op = ddi_prop_op,
2621 2608 .cb_str = NULL,
2622 2609 .cb_flag = D_MP,
2623 2610 .cb_rev = CB_REV,
2624 2611 .cb_aread = nodev,
2625 2612 .cb_awrite = nodev
2626 2613 };
2627 2614
2628 2615 static struct dev_ops pvscsi_ops = {
2629 2616 .devo_rev = DEVO_REV,
2630 2617 .devo_refcnt = 0,
2631 2618 .devo_getinfo = ddi_no_info,
2632 2619 .devo_identify = nulldev,
2633 2620 .devo_probe = nulldev,
2634 2621 .devo_attach = pvscsi_attach,
2635 2622 .devo_detach = pvscsi_detach,
2636 2623 .devo_reset = nodev,
2637 2624 .devo_cb_ops = &pvscsi_cb_ops,
2638 2625 .devo_bus_ops = NULL,
2639 2626 .devo_power = NULL,
2640 2627 .devo_quiesce = pvscsi_quiesce
2641 2628 };
2642 2629
2643 2630 #define PVSCSI_IDENT "VMware PVSCSI"
2644 2631
2645 2632 static struct modldrv modldrv = {
2646 2633 &mod_driverops,
2647 2634 PVSCSI_IDENT,
2648 2635 &pvscsi_ops,
2649 2636 };
2650 2637
2651 2638 static struct modlinkage modlinkage = {
2652 2639 MODREV_1,
2653 2640 &modldrv,
2654 2641 NULL
2655 2642 };
2656 2643
2657 2644 int
2658 2645 _init(void)
2659 2646 {
2660 2647 int ret;
2661 2648
2662 2649 if ((ret = ddi_soft_state_init(&pvscsi_sstate,
2663 2650 sizeof (struct pvscsi_softc), PVSCSI_INITIAL_SSTATE_ITEMS)) != 0) {
2664 2651 cmn_err(CE_WARN, "!ddi_soft_state_init() failed");
2665 2652 return (ret);
2666 2653 }
2667 2654
2668 2655 if ((ret = scsi_hba_init(&modlinkage)) != 0) {
2669 2656 cmn_err(CE_WARN, "!scsi_hba_init() failed");
2670 2657 ddi_soft_state_fini(&pvscsi_sstate);
2671 2658 return (ret);
2672 2659 }
2673 2660
2674 2661 if ((ret = mod_install(&modlinkage)) != 0) {
2675 2662 cmn_err(CE_WARN, "!mod_install() failed");
2676 2663 ddi_soft_state_fini(&pvscsi_sstate);
2677 2664 scsi_hba_fini(&modlinkage);
2678 2665 }
2679 2666
2680 2667 return (ret);
2681 2668 }
2682 2669
2683 2670 int
2684 2671 _info(struct modinfo *modinfop)
2685 2672 {
2686 2673 return (mod_info(&modlinkage, modinfop));
2687 2674 }
2688 2675
2689 2676 int
2690 2677 _fini(void)
2691 2678 {
2692 2679 int ret;
2693 2680
2694 2681 if ((ret = mod_remove(&modlinkage)) == 0) {
2695 2682 ddi_soft_state_fini(&pvscsi_sstate);
2696 2683 scsi_hba_fini(&modlinkage);
2697 2684 }
2698 2685
2699 2686 return (ret);
2700 2687 }
↓ open down ↓ |
2314 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX