Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/amr/amr.c
+++ new/usr/src/uts/intel/io/amr/amr.c
1 1 /*
2 2 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
3 3 * Use is subject to license terms.
4 4 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
5 5 */
6 6 /*
7 7 * Copyright (c) 1999,2000 Michael Smith
8 8 * Copyright (c) 2000 BSDi
9 9 * All rights reserved.
10 10 *
11 11 * Redistribution and use in source and binary forms, with or without
12 12 * modification, are permitted provided that the following conditions
13 13 * are met:
14 14 * 1. Redistributions of source code must retain the above copyright
15 15 * notice, this list of conditions and the following disclaimer.
16 16 * 2. Redistributions in binary form must reproduce the above copyright
17 17 * notice, this list of conditions and the following disclaimer in the
18 18 * documentation and/or other materials provided with the distribution.
19 19 *
20 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
21 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
22 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
24 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
25 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
26 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
27 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
28 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
29 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 30 * SUCH DAMAGE.
31 31 */
32 32 /*
33 33 * Copyright (c) 2002 Eric Moore
34 34 * Copyright (c) 2002 LSI Logic Corporation
35 35 * All rights reserved.
36 36 *
37 37 * Redistribution and use in source and binary forms, with or without
38 38 * modification, are permitted provided that the following conditions
39 39 * are met:
40 40 * 1. Redistributions of source code must retain the above copyright
41 41 * notice, this list of conditions and the following disclaimer.
42 42 * 2. Redistributions in binary form must reproduce the above copyright
43 43 * notice, this list of conditions and the following disclaimer in the
44 44 * documentation and/or other materials provided with the distribution.
45 45 * 3. The party using or redistributing the source code and binary forms
46 46 * agrees to the disclaimer below and the terms and conditions set forth
47 47 * herein.
48 48 *
49 49 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
50 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
51 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
52 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
53 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
54 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
55 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
56 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
57 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
58 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
59 59 * SUCH DAMAGE.
60 60 */
61 61
62 62 #include <sys/int_types.h>
63 63 #include <sys/scsi/scsi.h>
64 64 #include <sys/dkbad.h>
65 65 #include <sys/dklabel.h>
66 66 #include <sys/dkio.h>
67 67 #include <sys/cdio.h>
68 68 #include <sys/mhd.h>
69 69 #include <sys/vtoc.h>
70 70 #include <sys/dktp/fdisk.h>
71 71 #include <sys/scsi/targets/sddef.h>
72 72 #include <sys/debug.h>
73 73 #include <sys/pci.h>
74 74 #include <sys/ksynch.h>
75 75 #include <sys/ddi.h>
76 76 #include <sys/sunddi.h>
77 77 #include <sys/modctl.h>
78 78 #include <sys/byteorder.h>
79 79
80 80 #include "amrreg.h"
81 81 #include "amrvar.h"
82 82
83 83 /* dynamic debug symbol */
84 84 int amr_debug_var = 0;
85 85
86 86 #define AMR_DELAY(cond, count, done_flag) { \
87 87 int local_counter = 0; \
88 88 done_flag = 1; \
89 89 while (!(cond)) { \
90 90 delay(drv_usectohz(100)); \
91 91 if ((local_counter) > count) { \
92 92 done_flag = 0; \
93 93 break; \
94 94 } \
95 95 (local_counter)++; \
96 96 } \
97 97 }
98 98
99 99 #define AMR_BUSYWAIT(cond, count, done_flag) { \
100 100 int local_counter = 0; \
101 101 done_flag = 1; \
102 102 while (!(cond)) { \
103 103 drv_usecwait(100); \
104 104 if ((local_counter) > count) { \
105 105 done_flag = 0; \
106 106 break; \
107 107 } \
108 108 (local_counter)++; \
109 109 } \
110 110 }
111 111
112 112 /*
113 113 * driver interfaces
114 114 */
115 115
116 116 static uint_t amr_intr(caddr_t arg);
117 117 static void amr_done(struct amr_softs *softs);
118 118
119 119 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
120 120 void *arg, void **result);
121 121 static int amr_attach(dev_info_t *, ddi_attach_cmd_t);
122 122 static int amr_detach(dev_info_t *, ddi_detach_cmd_t);
123 123
124 124 static int amr_setup_mbox(struct amr_softs *softs);
125 125 static int amr_setup_sg(struct amr_softs *softs);
126 126
127 127 /*
128 128 * Command wrappers
129 129 */
130 130 static int amr_query_controller(struct amr_softs *softs);
131 131 static void *amr_enquiry(struct amr_softs *softs, size_t bufsize,
132 132 uint8_t cmd, uint8_t cmdsub, uint8_t cmdqual);
133 133 static int amr_flush(struct amr_softs *softs);
134 134
135 135 /*
136 136 * Command processing.
137 137 */
138 138 static void amr_rw_command(struct amr_softs *softs,
139 139 struct scsi_pkt *pkt, int lun);
140 140 static void amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp,
141 141 unsigned int capacity);
142 142 static void amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key);
143 143 static int amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size);
144 144 static void amr_enquiry_unmapcmd(struct amr_command *ac);
145 145 static int amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg);
146 146 static void amr_unmapcmd(struct amr_command *ac);
147 147
148 148 /*
149 149 * Status monitoring
150 150 */
151 151 static void amr_periodic(void *data);
152 152
153 153 /*
154 154 * Interface-specific shims
155 155 */
156 156 static int amr_poll_command(struct amr_command *ac);
157 157 static void amr_start_waiting_queue(void *softp);
158 158 static void amr_call_pkt_comp(struct amr_command *head);
159 159
160 160 /*
161 161 * SCSI interface
162 162 */
163 163 static int amr_setup_tran(dev_info_t *dip, struct amr_softs *softp);
164 164
165 165 /*
166 166 * Function prototypes
167 167 *
168 168 * SCSA functions exported by means of the transport table
169 169 */
170 170 static int amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
171 171 scsi_hba_tran_t *tran, struct scsi_device *sd);
172 172 static int amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
173 173 static int amr_tran_reset(struct scsi_address *ap, int level);
174 174 static int amr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
175 175 static int amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
176 176 int whom);
177 177 static struct scsi_pkt *amr_tran_init_pkt(struct scsi_address *ap,
178 178 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
179 179 int tgtlen, int flags, int (*callback)(), caddr_t arg);
180 180 static void amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
181 181 static void amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
182 182 static void amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt);
183 183
184 184 static ddi_dma_attr_t buffer_dma_attr = {
185 185 DMA_ATTR_V0, /* version of this structure */
186 186 0, /* lowest usable address */
187 187 0xffffffffull, /* highest usable address */
188 188 0x00ffffffull, /* maximum DMAable byte count */
189 189 4, /* alignment */
190 190 1, /* burst sizes */
191 191 1, /* minimum transfer */
192 192 0xffffffffull, /* maximum transfer */
193 193 0xffffffffull, /* maximum segment length */
194 194 AMR_NSEG, /* maximum number of segments */
195 195 AMR_BLKSIZE, /* granularity */
196 196 0, /* flags (reserved) */
197 197 };
198 198
199 199 static ddi_dma_attr_t addr_dma_attr = {
200 200 DMA_ATTR_V0, /* version of this structure */
201 201 0, /* lowest usable address */
202 202 0xffffffffull, /* highest usable address */
203 203 0x7fffffff, /* maximum DMAable byte count */
204 204 4, /* alignment */
205 205 1, /* burst sizes */
206 206 1, /* minimum transfer */
207 207 0xffffffffull, /* maximum transfer */
208 208 0xffffffffull, /* maximum segment length */
209 209 1, /* maximum number of segments */
210 210 1, /* granularity */
211 211 0, /* flags (reserved) */
212 212 };
213 213
214 214
215 215 static struct dev_ops amr_ops = {
216 216 DEVO_REV, /* devo_rev, */
217 217 0, /* refcnt */
218 218 amr_info, /* info */
219 219 nulldev, /* identify */
220 220 nulldev, /* probe */
221 221 amr_attach, /* attach */
222 222 amr_detach, /* detach */
223 223 nodev, /* reset */
224 224 NULL, /* driver operations */
225 225 (struct bus_ops *)0, /* bus operations */
226 226 0, /* power */
227 227 ddi_quiesce_not_supported, /* devo_quiesce */
228 228 };
229 229
↓ open down ↓ |
229 lines elided |
↑ open up ↑ |
230 230
231 231 extern struct mod_ops mod_driverops;
232 232 static struct modldrv modldrv = {
233 233 &mod_driverops, /* Type of module. driver here */
234 234 "AMR Driver", /* Name of the module. */
235 235 &amr_ops, /* Driver ops vector */
236 236 };
237 237
238 238 static struct modlinkage modlinkage = {
239 239 MODREV_1,
240 - &modldrv,
241 - NULL
240 + { &modldrv, NULL }
242 241 };
243 242
244 243 /* DMA access attributes */
245 244 static ddi_device_acc_attr_t accattr = {
246 245 DDI_DEVICE_ATTR_V0,
247 246 DDI_NEVERSWAP_ACC,
248 247 DDI_STRICTORDER_ACC
249 248 };
250 249
251 250 static struct amr_softs *amr_softstatep;
252 251
253 252
254 253 int
255 254 _init(void)
256 255 {
257 256 int error;
258 257
259 258 error = ddi_soft_state_init((void *)&amr_softstatep,
260 259 sizeof (struct amr_softs), 0);
261 260
262 261 if (error != 0)
263 262 goto error_out;
264 263
265 264 if ((error = scsi_hba_init(&modlinkage)) != 0) {
266 265 ddi_soft_state_fini((void*)&amr_softstatep);
267 266 goto error_out;
268 267 }
269 268
270 269 error = mod_install(&modlinkage);
271 270 if (error != 0) {
272 271 scsi_hba_fini(&modlinkage);
273 272 ddi_soft_state_fini((void*)&amr_softstatep);
274 273 goto error_out;
275 274 }
276 275
277 276 return (error);
278 277
279 278 error_out:
280 279 cmn_err(CE_NOTE, "_init failed");
281 280 return (error);
282 281 }
283 282
284 283 int
285 284 _info(struct modinfo *modinfop)
286 285 {
287 286 return (mod_info(&modlinkage, modinfop));
288 287 }
289 288
290 289 int
291 290 _fini(void)
292 291 {
293 292 int error;
294 293
295 294 if ((error = mod_remove(&modlinkage)) != 0) {
296 295 return (error);
297 296 }
298 297
299 298 scsi_hba_fini(&modlinkage);
300 299
301 300 ddi_soft_state_fini((void*)&amr_softstatep);
302 301 return (error);
303 302 }
304 303
305 304
306 305 static int
307 306 amr_attach(dev_info_t *dev, ddi_attach_cmd_t cmd)
308 307 {
309 308 struct amr_softs *softs;
310 309 int error;
311 310 uint32_t command, i;
312 311 int instance;
313 312 caddr_t cfgaddr;
314 313
315 314 instance = ddi_get_instance(dev);
316 315
317 316 switch (cmd) {
318 317 case DDI_ATTACH:
319 318 break;
320 319
321 320 case DDI_RESUME:
322 321 return (DDI_FAILURE);
323 322
324 323 default:
325 324 return (DDI_FAILURE);
326 325 }
327 326
328 327 /*
329 328 * Initialize softs.
330 329 */
331 330 if (ddi_soft_state_zalloc(amr_softstatep, instance) != DDI_SUCCESS)
332 331 return (DDI_FAILURE);
333 332 softs = ddi_get_soft_state(amr_softstatep, instance);
334 333 softs->state |= AMR_STATE_SOFT_STATE_SETUP;
335 334
336 335 softs->dev_info_p = dev;
337 336
338 337 AMRDB_PRINT((CE_NOTE, "softs: %p; busy_slot addr: %p",
339 338 (void *)softs, (void *)&(softs->amr_busyslots)));
340 339
341 340 if (pci_config_setup(dev, &(softs->pciconfig_handle))
342 341 != DDI_SUCCESS) {
343 342 goto error_out;
344 343 }
345 344 softs->state |= AMR_STATE_PCI_CONFIG_SETUP;
346 345
347 346 error = ddi_regs_map_setup(dev, 1, &cfgaddr, 0, 0,
348 347 &accattr, &(softs->regsmap_handle));
349 348 if (error != DDI_SUCCESS) {
350 349 goto error_out;
351 350 }
352 351 softs->state |= AMR_STATE_PCI_MEM_MAPPED;
353 352
354 353 /*
355 354 * Determine board type.
356 355 */
357 356 command = pci_config_get16(softs->pciconfig_handle, PCI_CONF_COMM);
358 357
359 358 /*
360 359 * Make sure we are going to be able to talk to this board.
361 360 */
362 361 if ((command & PCI_COMM_MAE) == 0) {
363 362 AMRDB_PRINT((CE_NOTE, "memory window not available"));
364 363 goto error_out;
365 364 }
366 365
367 366 /* force the busmaster enable bit on */
368 367 if (!(command & PCI_COMM_ME)) {
369 368 command |= PCI_COMM_ME;
370 369 pci_config_put16(softs->pciconfig_handle,
371 370 PCI_CONF_COMM, command);
372 371 command = pci_config_get16(softs->pciconfig_handle,
373 372 PCI_CONF_COMM);
374 373 if (!(command & PCI_COMM_ME))
375 374 goto error_out;
376 375 }
377 376
378 377 /*
379 378 * Allocate and connect our interrupt.
380 379 */
381 380 if (ddi_intr_hilevel(dev, 0) != 0) {
382 381 AMRDB_PRINT((CE_NOTE,
383 382 "High level interrupt is not supported!"));
384 383 goto error_out;
385 384 }
386 385
387 386 if (ddi_get_iblock_cookie(dev, 0, &softs->iblock_cookiep)
388 387 != DDI_SUCCESS) {
389 388 goto error_out;
390 389 }
391 390
392 391 mutex_init(&softs->cmd_mutex, NULL, MUTEX_DRIVER,
393 392 softs->iblock_cookiep); /* should be used in interrupt */
394 393 mutex_init(&softs->queue_mutex, NULL, MUTEX_DRIVER,
395 394 softs->iblock_cookiep); /* should be used in interrupt */
396 395 mutex_init(&softs->periodic_mutex, NULL, MUTEX_DRIVER,
397 396 softs->iblock_cookiep); /* should be used in interrupt */
398 397 /* sychronize waits for the busy slots via this cv */
399 398 cv_init(&softs->cmd_cv, NULL, CV_DRIVER, NULL);
400 399 softs->state |= AMR_STATE_KMUTEX_INITED;
401 400
402 401 /*
403 402 * Do bus-independent initialisation, bring controller online.
404 403 */
405 404 if (amr_setup_mbox(softs) != DDI_SUCCESS)
406 405 goto error_out;
407 406 softs->state |= AMR_STATE_MAILBOX_SETUP;
408 407
409 408 if (amr_setup_sg(softs) != DDI_SUCCESS)
410 409 goto error_out;
411 410
412 411 softs->state |= AMR_STATE_SG_TABLES_SETUP;
413 412
414 413 if (amr_query_controller(softs) != DDI_SUCCESS)
415 414 goto error_out;
416 415
417 416 /*
418 417 * A taskq is created for dispatching the waiting queue processing
419 418 * thread. The threads number equals to the logic drive number and
420 419 * the thread number should be 1 if there is no logic driver is
421 420 * configured for this instance.
422 421 */
423 422 if ((softs->amr_taskq = ddi_taskq_create(dev, "amr_taskq",
424 423 MAX(softs->amr_nlogdrives, 1), TASKQ_DEFAULTPRI, 0)) == NULL) {
425 424 goto error_out;
426 425 }
427 426 softs->state |= AMR_STATE_TASKQ_SETUP;
428 427
429 428 if (ddi_add_intr(dev, 0, &softs->iblock_cookiep, NULL,
430 429 amr_intr, (caddr_t)softs) != DDI_SUCCESS) {
431 430 goto error_out;
432 431 }
433 432 softs->state |= AMR_STATE_INTR_SETUP;
434 433
435 434 /* set up the tran interface */
436 435 if (amr_setup_tran(softs->dev_info_p, softs) != DDI_SUCCESS) {
437 436 AMRDB_PRINT((CE_NOTE, "setup tran failed"));
438 437 goto error_out;
439 438 }
440 439 softs->state |= AMR_STATE_TRAN_SETUP;
441 440
442 441 /* schedule a thread for periodic check */
443 442 mutex_enter(&softs->periodic_mutex);
444 443 softs->timeout_t = timeout(amr_periodic, (void *)softs,
445 444 drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
446 445 softs->state |= AMR_STATE_TIMEOUT_ENABLED;
447 446 mutex_exit(&softs->periodic_mutex);
448 447
449 448 /* print firmware information in verbose mode */
450 449 cmn_err(CE_CONT, "?MegaRaid %s %s attached.",
451 450 softs->amr_product_info.pi_product_name,
452 451 softs->amr_product_info.pi_firmware_ver);
453 452
454 453 /* clear any interrupts */
455 454 AMR_QCLEAR_INTR(softs);
456 455 return (DDI_SUCCESS);
457 456
458 457 error_out:
459 458 if (softs->state & AMR_STATE_INTR_SETUP) {
460 459 ddi_remove_intr(dev, 0, softs->iblock_cookiep);
461 460 }
462 461 if (softs->state & AMR_STATE_TASKQ_SETUP) {
463 462 ddi_taskq_destroy(softs->amr_taskq);
464 463 }
465 464 if (softs->state & AMR_STATE_SG_TABLES_SETUP) {
466 465 for (i = 0; i < softs->sg_max_count; i++) {
467 466 (void) ddi_dma_unbind_handle(
468 467 softs->sg_items[i].sg_handle);
469 468 (void) ddi_dma_mem_free(
470 469 &((softs->sg_items[i]).sg_acc_handle));
471 470 (void) ddi_dma_free_handle(
472 471 &(softs->sg_items[i].sg_handle));
473 472 }
474 473 }
475 474 if (softs->state & AMR_STATE_MAILBOX_SETUP) {
476 475 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
477 476 (void) ddi_dma_mem_free(&softs->mbox_acc_handle);
478 477 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
479 478 }
480 479 if (softs->state & AMR_STATE_KMUTEX_INITED) {
481 480 mutex_destroy(&softs->queue_mutex);
482 481 mutex_destroy(&softs->cmd_mutex);
483 482 mutex_destroy(&softs->periodic_mutex);
484 483 cv_destroy(&softs->cmd_cv);
485 484 }
486 485 if (softs->state & AMR_STATE_PCI_MEM_MAPPED)
487 486 ddi_regs_map_free(&softs->regsmap_handle);
488 487 if (softs->state & AMR_STATE_PCI_CONFIG_SETUP)
489 488 pci_config_teardown(&softs->pciconfig_handle);
490 489 if (softs->state & AMR_STATE_SOFT_STATE_SETUP)
491 490 ddi_soft_state_free(amr_softstatep, instance);
492 491 return (DDI_FAILURE);
493 492 }
494 493
495 494 /*
496 495 * Bring the controller down to a dormant state and detach all child devices.
497 496 * This function is called during detach, system shutdown.
498 497 *
499 498 * Note that we can assume that the bufq on the controller is empty, as we won't
500 499 * allow shutdown if any device is open.
501 500 */
502 501 /*ARGSUSED*/
503 502 static int amr_detach(dev_info_t *dev, ddi_detach_cmd_t cmd)
504 503 {
505 504 struct amr_softs *softs;
506 505 int instance;
507 506 uint32_t i, done_flag;
508 507
509 508 instance = ddi_get_instance(dev);
510 509 softs = ddi_get_soft_state(amr_softstatep, instance);
511 510
512 511 /* flush the controllor */
513 512 if (amr_flush(softs) != 0) {
514 513 AMRDB_PRINT((CE_NOTE, "device shutdown failed"));
515 514 return (EIO);
516 515 }
517 516
518 517 /* release the amr timer */
519 518 mutex_enter(&softs->periodic_mutex);
520 519 softs->state &= ~AMR_STATE_TIMEOUT_ENABLED;
521 520 if (softs->timeout_t) {
522 521 (void) untimeout(softs->timeout_t);
523 522 softs->timeout_t = 0;
524 523 }
525 524 mutex_exit(&softs->periodic_mutex);
526 525
527 526 for (i = 0; i < softs->sg_max_count; i++) {
528 527 (void) ddi_dma_unbind_handle(
529 528 softs->sg_items[i].sg_handle);
530 529 (void) ddi_dma_mem_free(
531 530 &((softs->sg_items[i]).sg_acc_handle));
532 531 (void) ddi_dma_free_handle(
533 532 &(softs->sg_items[i].sg_handle));
534 533 }
535 534
536 535 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
537 536 (void) ddi_dma_mem_free(&softs->mbox_acc_handle);
538 537 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
539 538
540 539 /* disconnect the interrupt handler */
541 540 ddi_remove_intr(softs->dev_info_p, 0, softs->iblock_cookiep);
542 541
543 542 /* wait for the completion of current in-progress interruptes */
544 543 AMR_DELAY((softs->amr_interrupts_counter == 0), 1000, done_flag);
545 544 if (!done_flag) {
546 545 cmn_err(CE_WARN, "Suspicious interrupts in-progress.");
547 546 }
548 547
549 548 ddi_taskq_destroy(softs->amr_taskq);
550 549
551 550 (void) scsi_hba_detach(dev);
552 551 scsi_hba_tran_free(softs->hba_tran);
553 552 ddi_regs_map_free(&softs->regsmap_handle);
554 553 pci_config_teardown(&softs->pciconfig_handle);
555 554
556 555 mutex_destroy(&softs->queue_mutex);
557 556 mutex_destroy(&softs->cmd_mutex);
558 557 mutex_destroy(&softs->periodic_mutex);
559 558 cv_destroy(&softs->cmd_cv);
560 559
561 560 /* print firmware information in verbose mode */
562 561 cmn_err(CE_NOTE, "?MegaRaid %s %s detached.",
563 562 softs->amr_product_info.pi_product_name,
564 563 softs->amr_product_info.pi_firmware_ver);
565 564
566 565 ddi_soft_state_free(amr_softstatep, instance);
567 566
568 567 return (DDI_SUCCESS);
569 568 }
570 569
571 570
572 571 /*ARGSUSED*/
573 572 static int amr_info(dev_info_t *dip, ddi_info_cmd_t infocmd,
574 573 void *arg, void **result)
575 574 {
576 575 struct amr_softs *softs;
577 576 int instance;
578 577
579 578 instance = ddi_get_instance(dip);
580 579
581 580 switch (infocmd) {
582 581 case DDI_INFO_DEVT2DEVINFO:
583 582 softs = ddi_get_soft_state(amr_softstatep, instance);
584 583 if (softs != NULL) {
585 584 *result = softs->dev_info_p;
586 585 return (DDI_SUCCESS);
587 586 } else {
588 587 *result = NULL;
589 588 return (DDI_FAILURE);
590 589 }
591 590 case DDI_INFO_DEVT2INSTANCE:
592 591 *(int *)result = instance;
593 592 break;
594 593 default:
595 594 break;
596 595 }
597 596 return (DDI_SUCCESS);
598 597 }
599 598
600 599 /*
601 600 * Take an interrupt, or be poked by other code to look for interrupt-worthy
602 601 * status.
603 602 */
604 603 static uint_t
605 604 amr_intr(caddr_t arg)
606 605 {
607 606 struct amr_softs *softs = (struct amr_softs *)arg;
608 607
609 608 softs->amr_interrupts_counter++;
610 609
611 610 if (AMR_QGET_ODB(softs) != AMR_QODB_READY) {
612 611 softs->amr_interrupts_counter--;
613 612 return (DDI_INTR_UNCLAIMED);
614 613 }
615 614
616 615 /* collect finished commands, queue anything waiting */
617 616 amr_done(softs);
618 617
619 618 softs->amr_interrupts_counter--;
620 619
621 620 return (DDI_INTR_CLAIMED);
622 621
623 622 }
624 623
625 624 /*
626 625 * Setup the amr mailbox
627 626 */
628 627 static int
629 628 amr_setup_mbox(struct amr_softs *softs)
630 629 {
631 630 uint32_t move;
632 631 size_t mbox_len;
633 632
634 633 if (ddi_dma_alloc_handle(
635 634 softs->dev_info_p,
636 635 &addr_dma_attr,
637 636 DDI_DMA_SLEEP,
638 637 NULL,
639 638 &softs->mbox_dma_handle) != DDI_SUCCESS) {
640 639 AMRDB_PRINT((CE_NOTE, "Cannot alloc dma handle for mailbox"));
641 640 goto error_out;
642 641 }
643 642
644 643 if (ddi_dma_mem_alloc(
645 644 softs->mbox_dma_handle,
646 645 sizeof (struct amr_mailbox) + 16,
647 646 &accattr,
648 647 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
649 648 DDI_DMA_SLEEP,
650 649 NULL,
651 650 (caddr_t *)(&softs->mbox),
652 651 &mbox_len,
653 652 &softs->mbox_acc_handle) !=
654 653 DDI_SUCCESS) {
655 654
656 655 AMRDB_PRINT((CE_WARN, "Cannot alloc dma memory for mailbox"));
657 656 goto error_out;
658 657 }
659 658
660 659 if (ddi_dma_addr_bind_handle(
661 660 softs->mbox_dma_handle,
662 661 NULL,
663 662 (caddr_t)softs->mbox,
664 663 mbox_len,
665 664 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
666 665 DDI_DMA_SLEEP,
667 666 NULL,
668 667 &softs->mbox_dma_cookie,
669 668 &softs->mbox_dma_cookien) != DDI_DMA_MAPPED) {
670 669
671 670 AMRDB_PRINT((CE_NOTE, "Cannot bind dma memory for mailbox"));
672 671 goto error_out;
673 672 }
674 673
675 674 if (softs->mbox_dma_cookien != 1)
676 675 goto error_out;
677 676
678 677 /* The phy address of mailbox must be aligned on a 16-byte boundary */
679 678 move = 16 - (((uint32_t)softs->mbox_dma_cookie.dmac_address)&0xf);
680 679 softs->mbox_phyaddr =
681 680 (softs->mbox_dma_cookie.dmac_address + move);
682 681
683 682 softs->mailbox =
684 683 (struct amr_mailbox *)(((uintptr_t)softs->mbox) + move);
685 684
686 685 AMRDB_PRINT((CE_NOTE, "phraddy=%x, mailbox=%p, softs->mbox=%p, move=%x",
687 686 softs->mbox_phyaddr, (void *)softs->mailbox,
688 687 softs->mbox, move));
689 688
690 689 return (DDI_SUCCESS);
691 690
692 691 error_out:
693 692 if (softs->mbox_dma_cookien)
694 693 (void) ddi_dma_unbind_handle(softs->mbox_dma_handle);
695 694 if (softs->mbox_acc_handle) {
696 695 (void) ddi_dma_mem_free(&(softs->mbox_acc_handle));
697 696 softs->mbox_acc_handle = NULL;
698 697 }
699 698 if (softs->mbox_dma_handle) {
700 699 (void) ddi_dma_free_handle(&softs->mbox_dma_handle);
701 700 softs->mbox_dma_handle = NULL;
702 701 }
703 702
704 703 return (DDI_FAILURE);
705 704 }
706 705
707 706 /*
708 707 * Perform a periodic check of the controller status
709 708 */
710 709 static void
711 710 amr_periodic(void *data)
712 711 {
713 712 uint32_t i;
714 713 struct amr_softs *softs = (struct amr_softs *)data;
715 714 struct scsi_pkt *pkt;
716 715 register struct amr_command *ac;
717 716
718 717 for (i = 0; i < softs->sg_max_count; i++) {
719 718 if (softs->busycmd[i] == NULL)
720 719 continue;
721 720
722 721 mutex_enter(&softs->cmd_mutex);
723 722
724 723 if (softs->busycmd[i] == NULL) {
725 724 mutex_exit(&softs->cmd_mutex);
726 725 continue;
727 726 }
728 727
729 728 pkt = softs->busycmd[i]->pkt;
730 729
731 730 if ((pkt->pkt_time != 0) &&
732 731 (ddi_get_time() -
733 732 softs->busycmd[i]->ac_timestamp >
734 733 pkt->pkt_time)) {
735 734
736 735 cmn_err(CE_WARN,
737 736 "!timed out packet detected,\
738 737 sc = %p, pkt = %p, index = %d, ac = %p",
739 738 (void *)softs,
740 739 (void *)pkt,
741 740 i,
742 741 (void *)softs->busycmd[i]);
743 742
744 743 ac = softs->busycmd[i];
745 744 ac->ac_next = NULL;
746 745
747 746 /* pull command from the busy index */
748 747 softs->busycmd[i] = NULL;
749 748 if (softs->amr_busyslots > 0)
750 749 softs->amr_busyslots--;
751 750 if (softs->amr_busyslots == 0)
752 751 cv_broadcast(&softs->cmd_cv);
753 752
754 753 mutex_exit(&softs->cmd_mutex);
755 754
756 755 pkt = ac->pkt;
757 756 *pkt->pkt_scbp = 0;
758 757 pkt->pkt_statistics |= STAT_TIMEOUT;
759 758 pkt->pkt_reason = CMD_TIMEOUT;
760 759 if (!(pkt->pkt_flags & FLAG_NOINTR)) {
761 760 /* call pkt callback */
762 761 scsi_hba_pkt_comp(pkt);
763 762 }
764 763
765 764 } else {
766 765 mutex_exit(&softs->cmd_mutex);
767 766 }
768 767 }
769 768
770 769 /* restart the amr timer */
771 770 mutex_enter(&softs->periodic_mutex);
772 771 if (softs->state & AMR_STATE_TIMEOUT_ENABLED)
773 772 softs->timeout_t = timeout(amr_periodic, (void *)softs,
774 773 drv_usectohz(500000*AMR_PERIODIC_TIMEOUT));
775 774 mutex_exit(&softs->periodic_mutex);
776 775 }
777 776
778 777 /*
779 778 * Interrogate the controller for the operational parameters we require.
780 779 */
781 780 static int
782 781 amr_query_controller(struct amr_softs *softs)
783 782 {
784 783 struct amr_enquiry3 *aex;
785 784 struct amr_prodinfo *ap;
786 785 struct amr_enquiry *ae;
787 786 uint32_t ldrv;
788 787 int instance;
789 788
790 789 /*
791 790 * If we haven't found the real limit yet, let us have a couple of
792 791 * commands in order to be able to probe.
793 792 */
794 793 if (softs->maxio == 0)
795 794 softs->maxio = 2;
796 795
797 796 instance = ddi_get_instance(softs->dev_info_p);
798 797
799 798 /*
800 799 * Try to issue an ENQUIRY3 command
801 800 */
802 801 if ((aex = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE, AMR_CMD_CONFIG,
803 802 AMR_CONFIG_ENQ3, AMR_CONFIG_ENQ3_SOLICITED_FULL)) != NULL) {
804 803
805 804 AMRDB_PRINT((CE_NOTE, "First enquiry"));
806 805
807 806 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
808 807 softs->logic_drive[ldrv].al_size =
809 808 aex->ae_drivesize[ldrv];
810 809 softs->logic_drive[ldrv].al_state =
811 810 aex->ae_drivestate[ldrv];
812 811 softs->logic_drive[ldrv].al_properties =
813 812 aex->ae_driveprop[ldrv];
814 813 AMRDB_PRINT((CE_NOTE,
815 814 " drive %d: size: %d state %x properties %x\n",
816 815 ldrv,
817 816 softs->logic_drive[ldrv].al_size,
818 817 softs->logic_drive[ldrv].al_state,
819 818 softs->logic_drive[ldrv].al_properties));
820 819
821 820 if (softs->logic_drive[ldrv].al_state ==
822 821 AMR_LDRV_OFFLINE)
823 822 cmn_err(CE_NOTE,
824 823 "!instance %d log-drive %d is offline",
825 824 instance, ldrv);
826 825 else
827 826 softs->amr_nlogdrives++;
828 827 }
829 828 kmem_free(aex, AMR_ENQ_BUFFER_SIZE);
830 829
831 830 if ((ap = amr_enquiry(softs, AMR_ENQ_BUFFER_SIZE,
832 831 AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0)) == NULL) {
833 832 AMRDB_PRINT((CE_NOTE,
834 833 "Cannot obtain product data from controller"));
835 834 return (EIO);
836 835 }
837 836
838 837 softs->maxdrives = AMR_40LD_MAXDRIVES;
839 838 softs->maxchan = ap->ap_nschan;
840 839 softs->maxio = ap->ap_maxio;
841 840
842 841 bcopy(ap->ap_firmware, softs->amr_product_info.pi_firmware_ver,
843 842 AMR_FIRMWARE_VER_SIZE);
844 843 softs->amr_product_info.
845 844 pi_firmware_ver[AMR_FIRMWARE_VER_SIZE] = 0;
846 845
847 846 bcopy(ap->ap_product, softs->amr_product_info.pi_product_name,
848 847 AMR_PRODUCT_INFO_SIZE);
849 848 softs->amr_product_info.
850 849 pi_product_name[AMR_PRODUCT_INFO_SIZE] = 0;
851 850
852 851 kmem_free(ap, AMR_ENQ_BUFFER_SIZE);
853 852 AMRDB_PRINT((CE_NOTE, "maxio=%d", softs->maxio));
854 853 } else {
855 854
856 855 AMRDB_PRINT((CE_NOTE, "First enquiry failed, \
857 856 so try another way"));
858 857
859 858 /* failed, try the 8LD ENQUIRY commands */
860 859 if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
861 860 AMR_ENQ_BUFFER_SIZE, AMR_CMD_EXT_ENQUIRY2, 0, 0))
862 861 == NULL) {
863 862
864 863 if ((ae = (struct amr_enquiry *)amr_enquiry(softs,
865 864 AMR_ENQ_BUFFER_SIZE, AMR_CMD_ENQUIRY, 0, 0))
866 865 == NULL) {
867 866 AMRDB_PRINT((CE_NOTE,
868 867 "Cannot obtain configuration data"));
869 868 return (EIO);
870 869 }
871 870 ae->ae_signature = 0;
872 871 }
873 872
874 873 /*
875 874 * Fetch current state of logical drives.
876 875 */
877 876 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
878 877 softs->logic_drive[ldrv].al_size =
879 878 ae->ae_ldrv.al_size[ldrv];
880 879 softs->logic_drive[ldrv].al_state =
881 880 ae->ae_ldrv.al_state[ldrv];
882 881 softs->logic_drive[ldrv].al_properties =
883 882 ae->ae_ldrv.al_properties[ldrv];
884 883 AMRDB_PRINT((CE_NOTE,
885 884 " ********* drive %d: %d state %x properties %x",
886 885 ldrv,
887 886 softs->logic_drive[ldrv].al_size,
888 887 softs->logic_drive[ldrv].al_state,
889 888 softs->logic_drive[ldrv].al_properties));
890 889
891 890 if (softs->logic_drive[ldrv].al_state ==
892 891 AMR_LDRV_OFFLINE)
893 892 cmn_err(CE_NOTE,
894 893 "!instance %d log-drive %d is offline",
895 894 instance, ldrv);
896 895 else
897 896 softs->amr_nlogdrives++;
898 897 }
899 898
900 899 softs->maxdrives = AMR_8LD_MAXDRIVES;
901 900 softs->maxchan = ae->ae_adapter.aa_channels;
902 901 softs->maxio = ae->ae_adapter.aa_maxio;
903 902 kmem_free(ae, AMR_ENQ_BUFFER_SIZE);
904 903 }
905 904
906 905 /*
907 906 * Mark remaining drives as unused.
908 907 */
909 908 for (; ldrv < AMR_MAXLD; ldrv++)
910 909 softs->logic_drive[ldrv].al_state = AMR_LDRV_OFFLINE;
911 910
912 911 /*
913 912 * Cap the maximum number of outstanding I/Os. AMI's driver
914 913 * doesn't trust the controller's reported value, and lockups have
915 914 * been seen when we do.
916 915 */
917 916 softs->maxio = MIN(softs->maxio, AMR_LIMITCMD);
918 917
919 918 return (DDI_SUCCESS);
920 919 }
921 920
922 921 /*
923 922 * Run a generic enquiry-style command.
924 923 */
925 924 static void *
926 925 amr_enquiry(struct amr_softs *softs, size_t bufsize, uint8_t cmd,
927 926 uint8_t cmdsub, uint8_t cmdqual)
928 927 {
929 928 struct amr_command ac;
930 929 void *result;
931 930
932 931 result = NULL;
933 932
934 933 bzero(&ac, sizeof (struct amr_command));
935 934 ac.ac_softs = softs;
936 935
937 936 /* set command flags */
938 937 ac.ac_flags |= AMR_CMD_DATAOUT;
939 938
940 939 /* build the command proper */
941 940 ac.mailbox.mb_command = cmd;
942 941 ac.mailbox.mb_cmdsub = cmdsub;
943 942 ac.mailbox.mb_cmdqual = cmdqual;
944 943
945 944 if (amr_enquiry_mapcmd(&ac, bufsize) != DDI_SUCCESS)
946 945 return (NULL);
947 946
948 947 if (amr_poll_command(&ac) || ac.ac_status != 0) {
949 948 AMRDB_PRINT((CE_NOTE, "can not poll command, goto out"));
950 949 amr_enquiry_unmapcmd(&ac);
951 950 return (NULL);
952 951 }
953 952
954 953 /* allocate the response structure */
955 954 result = kmem_zalloc(bufsize, KM_SLEEP);
956 955
957 956 bcopy(ac.ac_data, result, bufsize);
958 957
959 958 amr_enquiry_unmapcmd(&ac);
960 959 return (result);
961 960 }
962 961
963 962 /*
964 963 * Flush the controller's internal cache, return status.
965 964 */
966 965 static int
967 966 amr_flush(struct amr_softs *softs)
968 967 {
969 968 struct amr_command ac;
970 969 int error = 0;
971 970
972 971 bzero(&ac, sizeof (struct amr_command));
973 972 ac.ac_softs = softs;
974 973
975 974 ac.ac_flags |= AMR_CMD_DATAOUT;
976 975
977 976 /* build the command proper */
978 977 ac.mailbox.mb_command = AMR_CMD_FLUSH;
979 978
980 979 /* have to poll, as the system may be going down or otherwise damaged */
981 980 if (error = amr_poll_command(&ac)) {
982 981 AMRDB_PRINT((CE_NOTE, "can not poll this cmd"));
983 982 return (error);
984 983 }
985 984
986 985 return (error);
987 986 }
988 987
989 988 /*
990 989 * Take a command, submit it to the controller and wait for it to return.
991 990 * Returns nonzero on error. Can be safely called with interrupts enabled.
992 991 */
993 992 static int
994 993 amr_poll_command(struct amr_command *ac)
995 994 {
996 995 struct amr_softs *softs = ac->ac_softs;
997 996 volatile uint32_t done_flag;
998 997
999 998 AMRDB_PRINT((CE_NOTE, "Amr_Poll bcopy(%p, %p, %d)",
1000 999 (void *)&ac->mailbox,
1001 1000 (void *)softs->mailbox,
1002 1001 (uint32_t)AMR_MBOX_CMDSIZE));
1003 1002
1004 1003 mutex_enter(&softs->cmd_mutex);
1005 1004
1006 1005 while (softs->amr_busyslots != 0)
1007 1006 cv_wait(&softs->cmd_cv, &softs->cmd_mutex);
1008 1007
1009 1008 /*
1010 1009 * For read/write commands, the scatter/gather table should be
1011 1010 * filled, and the last entry in scatter/gather table will be used.
1012 1011 */
1013 1012 if ((ac->mailbox.mb_command == AMR_CMD_LREAD) ||
1014 1013 (ac->mailbox.mb_command == AMR_CMD_LWRITE)) {
1015 1014 bcopy(ac->sgtable,
1016 1015 softs->sg_items[softs->sg_max_count - 1].sg_table,
1017 1016 sizeof (struct amr_sgentry) * AMR_NSEG);
1018 1017
1019 1018 (void) ddi_dma_sync(
1020 1019 softs->sg_items[softs->sg_max_count - 1].sg_handle,
1021 1020 0, 0, DDI_DMA_SYNC_FORDEV);
1022 1021
1023 1022 ac->mailbox.mb_physaddr =
1024 1023 softs->sg_items[softs->sg_max_count - 1].sg_phyaddr;
1025 1024 }
1026 1025
1027 1026 bcopy(&ac->mailbox, (void *)softs->mailbox, AMR_MBOX_CMDSIZE);
1028 1027
1029 1028 /* sync the dma memory */
1030 1029 (void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
1031 1030
1032 1031 /* clear the poll/ack fields in the mailbox */
1033 1032 softs->mailbox->mb_ident = AMR_POLL_COMMAND_ID;
1034 1033 softs->mailbox->mb_nstatus = AMR_POLL_DEFAULT_NSTATUS;
1035 1034 softs->mailbox->mb_status = AMR_POLL_DEFAULT_STATUS;
1036 1035 softs->mailbox->mb_poll = 0;
1037 1036 softs->mailbox->mb_ack = 0;
1038 1037 softs->mailbox->mb_busy = 1;
1039 1038
1040 1039 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
1041 1040
1042 1041 /* sync the dma memory */
1043 1042 (void) ddi_dma_sync(softs->mbox_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1044 1043
1045 1044 AMR_DELAY((softs->mailbox->mb_nstatus != AMR_POLL_DEFAULT_NSTATUS),
1046 1045 1000, done_flag);
1047 1046 if (!done_flag) {
1048 1047 mutex_exit(&softs->cmd_mutex);
1049 1048 return (1);
1050 1049 }
1051 1050
1052 1051 ac->ac_status = softs->mailbox->mb_status;
1053 1052
1054 1053 AMR_DELAY((softs->mailbox->mb_poll == AMR_POLL_ACK), 1000, done_flag);
1055 1054 if (!done_flag) {
1056 1055 mutex_exit(&softs->cmd_mutex);
1057 1056 return (1);
1058 1057 }
1059 1058
1060 1059 softs->mailbox->mb_poll = 0;
1061 1060 softs->mailbox->mb_ack = AMR_POLL_ACK;
1062 1061
1063 1062 /* acknowledge that we have the commands */
1064 1063 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1065 1064
1066 1065 AMR_DELAY(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK), 1000, done_flag);
1067 1066 if (!done_flag) {
1068 1067 mutex_exit(&softs->cmd_mutex);
1069 1068 return (1);
1070 1069 }
1071 1070
1072 1071 mutex_exit(&softs->cmd_mutex);
1073 1072 return (ac->ac_status != AMR_STATUS_SUCCESS);
1074 1073 }
1075 1074
1076 1075 /*
1077 1076 * setup the scatter/gather table
1078 1077 */
1079 1078 static int
1080 1079 amr_setup_sg(struct amr_softs *softs)
1081 1080 {
1082 1081 uint32_t i;
1083 1082 size_t len;
1084 1083 ddi_dma_cookie_t cookie;
1085 1084 uint_t cookien;
1086 1085
1087 1086 softs->sg_max_count = 0;
1088 1087
1089 1088 for (i = 0; i < AMR_MAXCMD; i++) {
1090 1089
1091 1090 /* reset the cookien */
1092 1091 cookien = 0;
1093 1092
1094 1093 (softs->sg_items[i]).sg_handle = NULL;
1095 1094 if (ddi_dma_alloc_handle(
1096 1095 softs->dev_info_p,
1097 1096 &addr_dma_attr,
1098 1097 DDI_DMA_SLEEP,
1099 1098 NULL,
1100 1099 &((softs->sg_items[i]).sg_handle)) != DDI_SUCCESS) {
1101 1100
1102 1101 AMRDB_PRINT((CE_WARN,
1103 1102 "Cannot alloc dma handle for s/g table"));
1104 1103 goto error_out;
1105 1104 }
1106 1105
1107 1106 if (ddi_dma_mem_alloc((softs->sg_items[i]).sg_handle,
1108 1107 sizeof (struct amr_sgentry) * AMR_NSEG,
1109 1108 &accattr,
1110 1109 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1111 1110 DDI_DMA_SLEEP, NULL,
1112 1111 (caddr_t *)(&(softs->sg_items[i]).sg_table),
1113 1112 &len,
1114 1113 &(softs->sg_items[i]).sg_acc_handle)
1115 1114 != DDI_SUCCESS) {
1116 1115
1117 1116 AMRDB_PRINT((CE_WARN,
1118 1117 "Cannot allocate DMA memory"));
1119 1118 goto error_out;
1120 1119 }
1121 1120
1122 1121 if (ddi_dma_addr_bind_handle(
1123 1122 (softs->sg_items[i]).sg_handle,
1124 1123 NULL,
1125 1124 (caddr_t)((softs->sg_items[i]).sg_table),
1126 1125 len,
1127 1126 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
1128 1127 DDI_DMA_SLEEP,
1129 1128 NULL,
1130 1129 &cookie,
1131 1130 &cookien) != DDI_DMA_MAPPED) {
1132 1131
1133 1132 AMRDB_PRINT((CE_WARN,
1134 1133 "Cannot bind communication area for s/g table"));
1135 1134 goto error_out;
1136 1135 }
1137 1136
1138 1137 if (cookien != 1)
1139 1138 goto error_out;
1140 1139
1141 1140 softs->sg_items[i].sg_phyaddr = cookie.dmac_address;
1142 1141 softs->sg_max_count++;
1143 1142 }
1144 1143
1145 1144 return (DDI_SUCCESS);
1146 1145
1147 1146 error_out:
1148 1147 /*
1149 1148 * Couldn't allocate/initialize all of the sg table entries.
1150 1149 * Clean up the partially-initialized entry before returning.
1151 1150 */
1152 1151 if (cookien) {
1153 1152 (void) ddi_dma_unbind_handle((softs->sg_items[i]).sg_handle);
1154 1153 }
1155 1154 if ((softs->sg_items[i]).sg_acc_handle) {
1156 1155 (void) ddi_dma_mem_free(&((softs->sg_items[i]).sg_acc_handle));
1157 1156 (softs->sg_items[i]).sg_acc_handle = NULL;
1158 1157 }
1159 1158 if ((softs->sg_items[i]).sg_handle) {
1160 1159 (void) ddi_dma_free_handle(&((softs->sg_items[i]).sg_handle));
1161 1160 (softs->sg_items[i]).sg_handle = NULL;
1162 1161 }
1163 1162
1164 1163 /*
1165 1164 * At least two sg table entries are needed. One is for regular data
1166 1165 * I/O commands, the other is for poll I/O commands.
1167 1166 */
1168 1167 return (softs->sg_max_count > 1 ? DDI_SUCCESS : DDI_FAILURE);
1169 1168 }
1170 1169
1171 1170 /*
1172 1171 * Map/unmap (ac)'s data in the controller's addressable space as required.
1173 1172 *
1174 1173 * These functions may be safely called multiple times on a given command.
1175 1174 */
1176 1175 static void
1177 1176 amr_setup_dmamap(struct amr_command *ac, ddi_dma_cookie_t *buffer_dma_cookiep,
1178 1177 int nsegments)
1179 1178 {
1180 1179 struct amr_sgentry *sg;
1181 1180 uint32_t i, size;
1182 1181
1183 1182 sg = ac->sgtable;
1184 1183
1185 1184 size = 0;
1186 1185
1187 1186 ac->mailbox.mb_nsgelem = (uint8_t)nsegments;
1188 1187 for (i = 0; i < nsegments; i++, sg++) {
1189 1188 sg->sg_addr = buffer_dma_cookiep->dmac_address;
1190 1189 sg->sg_count = buffer_dma_cookiep->dmac_size;
1191 1190 size += sg->sg_count;
1192 1191
1193 1192 /*
1194 1193 * There is no next cookie if the end of the current
1195 1194 * window is reached. Otherwise, the next cookie
1196 1195 * would be found.
1197 1196 */
1198 1197 if ((ac->current_cookie + i + 1) != ac->num_of_cookie)
1199 1198 ddi_dma_nextcookie(ac->buffer_dma_handle,
1200 1199 buffer_dma_cookiep);
1201 1200 }
1202 1201
1203 1202 ac->transfer_size = size;
1204 1203 ac->data_transfered += size;
1205 1204 }
1206 1205
1207 1206
1208 1207 /*
1209 1208 * map the amr command for enquiry, allocate the DMA resource
1210 1209 */
1211 1210 static int
1212 1211 amr_enquiry_mapcmd(struct amr_command *ac, uint32_t data_size)
1213 1212 {
1214 1213 struct amr_softs *softs = ac->ac_softs;
1215 1214 size_t len;
1216 1215 uint_t dma_flags;
1217 1216
1218 1217 AMRDB_PRINT((CE_NOTE, "Amr_enquiry_mapcmd called, ac=%p, flags=%x",
1219 1218 (void *)ac, ac->ac_flags));
1220 1219
1221 1220 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1222 1221 dma_flags = DDI_DMA_READ;
1223 1222 } else {
1224 1223 dma_flags = DDI_DMA_WRITE;
1225 1224 }
1226 1225
1227 1226 dma_flags |= DDI_DMA_CONSISTENT;
1228 1227
1229 1228 /* process the DMA by address bind mode */
1230 1229 if (ddi_dma_alloc_handle(softs->dev_info_p,
1231 1230 &addr_dma_attr, DDI_DMA_SLEEP, NULL,
1232 1231 &ac->buffer_dma_handle) !=
1233 1232 DDI_SUCCESS) {
1234 1233
1235 1234 AMRDB_PRINT((CE_WARN,
1236 1235 "Cannot allocate addr DMA tag"));
1237 1236 goto error_out;
1238 1237 }
1239 1238
1240 1239 if (ddi_dma_mem_alloc(ac->buffer_dma_handle,
1241 1240 data_size,
1242 1241 &accattr,
1243 1242 dma_flags,
1244 1243 DDI_DMA_SLEEP,
1245 1244 NULL,
1246 1245 (caddr_t *)&ac->ac_data,
1247 1246 &len,
1248 1247 &ac->buffer_acc_handle) !=
1249 1248 DDI_SUCCESS) {
1250 1249
1251 1250 AMRDB_PRINT((CE_WARN,
1252 1251 "Cannot allocate DMA memory"));
1253 1252 goto error_out;
1254 1253 }
1255 1254
1256 1255 if ((ddi_dma_addr_bind_handle(
1257 1256 ac->buffer_dma_handle,
1258 1257 NULL, ac->ac_data, len, dma_flags,
1259 1258 DDI_DMA_SLEEP, NULL, &ac->buffer_dma_cookie,
1260 1259 &ac->num_of_cookie)) != DDI_DMA_MAPPED) {
1261 1260
1262 1261 AMRDB_PRINT((CE_WARN,
1263 1262 "Cannot bind addr for dma"));
1264 1263 goto error_out;
1265 1264 }
1266 1265
1267 1266 ac->ac_dataphys = (&ac->buffer_dma_cookie)->dmac_address;
1268 1267
1269 1268 ((struct amr_mailbox *)&(ac->mailbox))->mb_param = 0;
1270 1269 ac->mailbox.mb_nsgelem = 0;
1271 1270 ac->mailbox.mb_physaddr = ac->ac_dataphys;
1272 1271
1273 1272 ac->ac_flags |= AMR_CMD_MAPPED;
1274 1273
1275 1274 return (DDI_SUCCESS);
1276 1275
1277 1276 error_out:
1278 1277 if (ac->num_of_cookie)
1279 1278 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1280 1279 if (ac->buffer_acc_handle) {
1281 1280 ddi_dma_mem_free(&ac->buffer_acc_handle);
1282 1281 ac->buffer_acc_handle = NULL;
1283 1282 }
1284 1283 if (ac->buffer_dma_handle) {
1285 1284 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1286 1285 ac->buffer_dma_handle = NULL;
1287 1286 }
1288 1287
1289 1288 return (DDI_FAILURE);
1290 1289 }
1291 1290
1292 1291 /*
1293 1292 * unmap the amr command for enquiry, free the DMA resource
1294 1293 */
1295 1294 static void
1296 1295 amr_enquiry_unmapcmd(struct amr_command *ac)
1297 1296 {
1298 1297 AMRDB_PRINT((CE_NOTE, "Amr_enquiry_unmapcmd called, ac=%p",
1299 1298 (void *)ac));
1300 1299
1301 1300 /* if the command involved data at all and was mapped */
1302 1301 if ((ac->ac_flags & AMR_CMD_MAPPED) && ac->ac_data) {
1303 1302 if (ac->buffer_dma_handle)
1304 1303 (void) ddi_dma_unbind_handle(
1305 1304 ac->buffer_dma_handle);
1306 1305 if (ac->buffer_acc_handle) {
1307 1306 ddi_dma_mem_free(&ac->buffer_acc_handle);
1308 1307 ac->buffer_acc_handle = NULL;
1309 1308 }
1310 1309 if (ac->buffer_dma_handle) {
1311 1310 (void) ddi_dma_free_handle(
1312 1311 &ac->buffer_dma_handle);
1313 1312 ac->buffer_dma_handle = NULL;
1314 1313 }
1315 1314 }
1316 1315
1317 1316 ac->ac_flags &= ~AMR_CMD_MAPPED;
1318 1317 }
1319 1318
1320 1319 /*
1321 1320 * map the amr command, allocate the DMA resource
1322 1321 */
1323 1322 static int
1324 1323 amr_mapcmd(struct amr_command *ac, int (*callback)(), caddr_t arg)
1325 1324 {
1326 1325 uint_t dma_flags;
1327 1326 off_t off;
1328 1327 size_t len;
1329 1328 int error;
1330 1329 int (*cb)(caddr_t);
1331 1330
1332 1331 AMRDB_PRINT((CE_NOTE, "Amr_mapcmd called, ac=%p, flags=%x",
1333 1332 (void *)ac, ac->ac_flags));
1334 1333
1335 1334 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1336 1335 dma_flags = DDI_DMA_READ;
1337 1336 } else {
1338 1337 dma_flags = DDI_DMA_WRITE;
1339 1338 }
1340 1339
1341 1340 if (ac->ac_flags & AMR_CMD_PKT_CONSISTENT) {
1342 1341 dma_flags |= DDI_DMA_CONSISTENT;
1343 1342 }
1344 1343 if (ac->ac_flags & AMR_CMD_PKT_DMA_PARTIAL) {
1345 1344 dma_flags |= DDI_DMA_PARTIAL;
1346 1345 }
1347 1346
1348 1347 if ((!(ac->ac_flags & AMR_CMD_MAPPED)) && (ac->ac_buf == NULL)) {
1349 1348 ac->ac_flags |= AMR_CMD_MAPPED;
1350 1349 return (DDI_SUCCESS);
1351 1350 }
1352 1351
1353 1352 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1354 1353
1355 1354 /* if the command involves data at all, and hasn't been mapped */
1356 1355 if (!(ac->ac_flags & AMR_CMD_MAPPED)) {
1357 1356 /* process the DMA by buffer bind mode */
1358 1357 error = ddi_dma_buf_bind_handle(ac->buffer_dma_handle,
1359 1358 ac->ac_buf,
1360 1359 dma_flags,
1361 1360 cb,
1362 1361 arg,
1363 1362 &ac->buffer_dma_cookie,
1364 1363 &ac->num_of_cookie);
1365 1364 switch (error) {
1366 1365 case DDI_DMA_PARTIAL_MAP:
1367 1366 if (ddi_dma_numwin(ac->buffer_dma_handle,
1368 1367 &ac->num_of_win) == DDI_FAILURE) {
1369 1368
1370 1369 AMRDB_PRINT((CE_WARN,
1371 1370 "Cannot get dma num win"));
1372 1371 (void) ddi_dma_unbind_handle(
1373 1372 ac->buffer_dma_handle);
1374 1373 (void) ddi_dma_free_handle(
1375 1374 &ac->buffer_dma_handle);
1376 1375 ac->buffer_dma_handle = NULL;
1377 1376 return (DDI_FAILURE);
1378 1377 }
1379 1378 ac->current_win = 0;
1380 1379 break;
1381 1380
1382 1381 case DDI_DMA_MAPPED:
1383 1382 ac->num_of_win = 1;
1384 1383 ac->current_win = 0;
1385 1384 break;
1386 1385
1387 1386 default:
1388 1387 AMRDB_PRINT((CE_WARN,
1389 1388 "Cannot bind buf for dma"));
1390 1389
1391 1390 (void) ddi_dma_free_handle(
1392 1391 &ac->buffer_dma_handle);
1393 1392 ac->buffer_dma_handle = NULL;
1394 1393 return (DDI_FAILURE);
1395 1394 }
1396 1395
1397 1396 ac->current_cookie = 0;
1398 1397
1399 1398 ac->ac_flags |= AMR_CMD_MAPPED;
1400 1399 } else if (ac->current_cookie == AMR_LAST_COOKIE_TAG) {
1401 1400 /* get the next window */
1402 1401 ac->current_win++;
1403 1402 (void) ddi_dma_getwin(ac->buffer_dma_handle,
1404 1403 ac->current_win, &off, &len,
1405 1404 &ac->buffer_dma_cookie,
1406 1405 &ac->num_of_cookie);
1407 1406 ac->current_cookie = 0;
1408 1407 }
1409 1408
1410 1409 if ((ac->num_of_cookie - ac->current_cookie) > AMR_NSEG) {
1411 1410 amr_setup_dmamap(ac, &ac->buffer_dma_cookie, AMR_NSEG);
1412 1411 ac->current_cookie += AMR_NSEG;
1413 1412 } else {
1414 1413 amr_setup_dmamap(ac, &ac->buffer_dma_cookie,
1415 1414 ac->num_of_cookie - ac->current_cookie);
1416 1415 ac->current_cookie = AMR_LAST_COOKIE_TAG;
1417 1416 }
1418 1417
1419 1418 return (DDI_SUCCESS);
1420 1419 }
1421 1420
1422 1421 /*
1423 1422 * unmap the amr command, free the DMA resource
1424 1423 */
1425 1424 static void
1426 1425 amr_unmapcmd(struct amr_command *ac)
1427 1426 {
1428 1427 AMRDB_PRINT((CE_NOTE, "Amr_unmapcmd called, ac=%p",
1429 1428 (void *)ac));
1430 1429
1431 1430 /* if the command involved data at all and was mapped */
1432 1431 if ((ac->ac_flags & AMR_CMD_MAPPED) &&
1433 1432 ac->ac_buf && ac->buffer_dma_handle)
1434 1433 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1435 1434
1436 1435 ac->ac_flags &= ~AMR_CMD_MAPPED;
1437 1436 }
1438 1437
1439 1438 static int
1440 1439 amr_setup_tran(dev_info_t *dip, struct amr_softs *softp)
1441 1440 {
1442 1441 softp->hba_tran = scsi_hba_tran_alloc(dip, SCSI_HBA_CANSLEEP);
1443 1442
1444 1443 /*
1445 1444 * hba_private always points to the amr_softs struct
1446 1445 */
1447 1446 softp->hba_tran->tran_hba_private = softp;
1448 1447 softp->hba_tran->tran_tgt_init = amr_tran_tgt_init;
1449 1448 softp->hba_tran->tran_tgt_probe = scsi_hba_probe;
1450 1449 softp->hba_tran->tran_start = amr_tran_start;
1451 1450 softp->hba_tran->tran_reset = amr_tran_reset;
1452 1451 softp->hba_tran->tran_getcap = amr_tran_getcap;
1453 1452 softp->hba_tran->tran_setcap = amr_tran_setcap;
1454 1453 softp->hba_tran->tran_init_pkt = amr_tran_init_pkt;
1455 1454 softp->hba_tran->tran_destroy_pkt = amr_tran_destroy_pkt;
1456 1455 softp->hba_tran->tran_dmafree = amr_tran_dmafree;
1457 1456 softp->hba_tran->tran_sync_pkt = amr_tran_sync_pkt;
1458 1457 softp->hba_tran->tran_abort = NULL;
1459 1458 softp->hba_tran->tran_tgt_free = NULL;
1460 1459 softp->hba_tran->tran_quiesce = NULL;
1461 1460 softp->hba_tran->tran_unquiesce = NULL;
1462 1461 softp->hba_tran->tran_sd = NULL;
1463 1462
1464 1463 if (scsi_hba_attach_setup(dip, &buffer_dma_attr, softp->hba_tran,
1465 1464 SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
1466 1465 scsi_hba_tran_free(softp->hba_tran);
1467 1466 softp->hba_tran = NULL;
1468 1467 return (DDI_FAILURE);
1469 1468 } else {
1470 1469 return (DDI_SUCCESS);
1471 1470 }
1472 1471 }
1473 1472
1474 1473 /*ARGSUSED*/
1475 1474 static int
1476 1475 amr_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
1477 1476 scsi_hba_tran_t *tran, struct scsi_device *sd)
1478 1477 {
1479 1478 struct amr_softs *softs;
1480 1479 ushort_t target = sd->sd_address.a_target;
1481 1480 uchar_t lun = sd->sd_address.a_lun;
1482 1481
1483 1482 softs = (struct amr_softs *)
1484 1483 (sd->sd_address.a_hba_tran->tran_hba_private);
1485 1484
1486 1485 if ((lun == 0) && (target < AMR_MAXLD))
1487 1486 if (softs->logic_drive[target].al_state != AMR_LDRV_OFFLINE)
1488 1487 return (DDI_SUCCESS);
1489 1488
1490 1489 return (DDI_FAILURE);
1491 1490 }
1492 1491
1493 1492 static int
1494 1493 amr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1495 1494 {
1496 1495 struct amr_softs *softs;
1497 1496 struct buf *bp = NULL;
1498 1497 union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1499 1498 int ret;
1500 1499 uint32_t capacity;
1501 1500 struct amr_command *ac;
1502 1501
1503 1502 AMRDB_PRINT((CE_NOTE, "amr_tran_start, cmd=%X,target=%d,lun=%d",
1504 1503 cdbp->scc_cmd, ap->a_target, ap->a_lun));
1505 1504
1506 1505 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1507 1506 if ((ap->a_lun != 0) || (ap->a_target >= AMR_MAXLD) ||
1508 1507 (softs->logic_drive[ap->a_target].al_state ==
1509 1508 AMR_LDRV_OFFLINE)) {
1510 1509 cmn_err(CE_WARN, "target or lun is not correct!");
1511 1510 ret = TRAN_BADPKT;
1512 1511 return (ret);
1513 1512 }
1514 1513
1515 1514 ac = (struct amr_command *)pkt->pkt_ha_private;
1516 1515 bp = ac->ac_buf;
1517 1516
1518 1517 AMRDB_PRINT((CE_NOTE, "scsi cmd accepted, cmd=%X", cdbp->scc_cmd));
1519 1518
1520 1519 switch (cdbp->scc_cmd) {
1521 1520 case SCMD_READ: /* read */
1522 1521 case SCMD_READ_G1: /* read g1 */
1523 1522 case SCMD_READ_BUFFER: /* read buffer */
1524 1523 case SCMD_WRITE: /* write */
1525 1524 case SCMD_WRITE_G1: /* write g1 */
1526 1525 case SCMD_WRITE_BUFFER: /* write buffer */
1527 1526 amr_rw_command(softs, pkt, ap->a_target);
1528 1527
1529 1528 if (pkt->pkt_flags & FLAG_NOINTR) {
1530 1529 (void) amr_poll_command(ac);
1531 1530 pkt->pkt_state |= (STATE_GOT_BUS
1532 1531 | STATE_GOT_TARGET
1533 1532 | STATE_SENT_CMD
1534 1533 | STATE_XFERRED_DATA);
1535 1534 *pkt->pkt_scbp = 0;
1536 1535 pkt->pkt_statistics |= STAT_SYNC;
1537 1536 pkt->pkt_reason = CMD_CMPLT;
1538 1537 } else {
1539 1538 mutex_enter(&softs->queue_mutex);
1540 1539 if (softs->waiting_q_head == NULL) {
1541 1540 ac->ac_prev = NULL;
1542 1541 ac->ac_next = NULL;
1543 1542 softs->waiting_q_head = ac;
1544 1543 softs->waiting_q_tail = ac;
1545 1544 } else {
1546 1545 ac->ac_next = NULL;
1547 1546 ac->ac_prev = softs->waiting_q_tail;
1548 1547 softs->waiting_q_tail->ac_next = ac;
1549 1548 softs->waiting_q_tail = ac;
1550 1549 }
1551 1550 mutex_exit(&softs->queue_mutex);
1552 1551 amr_start_waiting_queue((void *)softs);
1553 1552 }
1554 1553 ret = TRAN_ACCEPT;
1555 1554 break;
1556 1555
1557 1556 case SCMD_INQUIRY: /* inquiry */
1558 1557 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1559 1558 struct scsi_inquiry inqp;
1560 1559 uint8_t *sinq_p = (uint8_t *)&inqp;
1561 1560
1562 1561 bzero(&inqp, sizeof (struct scsi_inquiry));
1563 1562
1564 1563 if (((char *)cdbp)[1] || ((char *)cdbp)[2]) {
1565 1564 /*
1566 1565 * The EVDP and pagecode is
1567 1566 * not supported
1568 1567 */
1569 1568 sinq_p[1] = 0xFF;
1570 1569 sinq_p[2] = 0x0;
1571 1570 } else {
1572 1571 inqp.inq_len = AMR_INQ_ADDITIONAL_LEN;
1573 1572 inqp.inq_ansi = AMR_INQ_ANSI_VER;
1574 1573 inqp.inq_rdf = AMR_INQ_RESP_DATA_FORMAT;
1575 1574 /* Enable Tag Queue */
1576 1575 inqp.inq_cmdque = 1;
1577 1576 bcopy("MegaRaid", inqp.inq_vid,
1578 1577 sizeof (inqp.inq_vid));
1579 1578 bcopy(softs->amr_product_info.pi_product_name,
1580 1579 inqp.inq_pid,
1581 1580 AMR_PRODUCT_INFO_SIZE);
1582 1581 bcopy(softs->amr_product_info.pi_firmware_ver,
1583 1582 inqp.inq_revision,
1584 1583 AMR_FIRMWARE_VER_SIZE);
1585 1584 }
1586 1585
1587 1586 amr_unmapcmd(ac);
1588 1587
1589 1588 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1590 1589 bp_mapin(bp);
1591 1590 bcopy(&inqp, bp->b_un.b_addr,
1592 1591 sizeof (struct scsi_inquiry));
1593 1592
1594 1593 pkt->pkt_state |= STATE_XFERRED_DATA;
1595 1594 }
1596 1595 pkt->pkt_reason = CMD_CMPLT;
1597 1596 pkt->pkt_state |= (STATE_GOT_BUS
1598 1597 | STATE_GOT_TARGET
1599 1598 | STATE_SENT_CMD);
1600 1599 *pkt->pkt_scbp = 0;
1601 1600 ret = TRAN_ACCEPT;
1602 1601 if (!(pkt->pkt_flags & FLAG_NOINTR))
1603 1602 scsi_hba_pkt_comp(pkt);
1604 1603 break;
1605 1604
1606 1605 case SCMD_READ_CAPACITY: /* read capacity */
1607 1606 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1608 1607 struct scsi_capacity cp;
1609 1608
1610 1609 capacity = softs->logic_drive[ap->a_target].al_size - 1;
1611 1610 cp.capacity = BE_32(capacity);
1612 1611 cp.lbasize = BE_32(512);
1613 1612
1614 1613 amr_unmapcmd(ac);
1615 1614
1616 1615 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1617 1616 bp_mapin(bp);
1618 1617 bcopy(&cp, bp->b_un.b_addr, 8);
1619 1618 }
1620 1619 pkt->pkt_reason = CMD_CMPLT;
1621 1620 pkt->pkt_state |= (STATE_GOT_BUS
1622 1621 | STATE_GOT_TARGET
1623 1622 | STATE_SENT_CMD
1624 1623 | STATE_XFERRED_DATA);
1625 1624 *pkt->pkt_scbp = 0;
1626 1625 ret = TRAN_ACCEPT;
1627 1626 if (!(pkt->pkt_flags & FLAG_NOINTR))
1628 1627 scsi_hba_pkt_comp(pkt);
1629 1628 break;
1630 1629
1631 1630 case SCMD_MODE_SENSE: /* mode sense */
1632 1631 case SCMD_MODE_SENSE_G1: /* mode sense g1 */
1633 1632 amr_unmapcmd(ac);
1634 1633
1635 1634 capacity = softs->logic_drive[ap->a_target].al_size - 1;
1636 1635 amr_mode_sense(cdbp, bp, capacity);
1637 1636
1638 1637 pkt->pkt_reason = CMD_CMPLT;
1639 1638 pkt->pkt_state |= (STATE_GOT_BUS
1640 1639 | STATE_GOT_TARGET
1641 1640 | STATE_SENT_CMD
1642 1641 | STATE_XFERRED_DATA);
1643 1642 *pkt->pkt_scbp = 0;
1644 1643 ret = TRAN_ACCEPT;
1645 1644 if (!(pkt->pkt_flags & FLAG_NOINTR))
1646 1645 scsi_hba_pkt_comp(pkt);
1647 1646 break;
1648 1647
1649 1648 case SCMD_TEST_UNIT_READY: /* test unit ready */
1650 1649 case SCMD_REQUEST_SENSE: /* request sense */
1651 1650 case SCMD_FORMAT: /* format */
1652 1651 case SCMD_START_STOP: /* start stop */
1653 1652 case SCMD_SYNCHRONIZE_CACHE: /* synchronize cache */
1654 1653 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1655 1654 amr_unmapcmd(ac);
1656 1655
1657 1656 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1658 1657 bp_mapin(bp);
1659 1658 bzero(bp->b_un.b_addr, bp->b_bcount);
1660 1659
1661 1660 pkt->pkt_state |= STATE_XFERRED_DATA;
1662 1661 }
1663 1662 pkt->pkt_reason = CMD_CMPLT;
1664 1663 pkt->pkt_state |= (STATE_GOT_BUS
1665 1664 | STATE_GOT_TARGET
1666 1665 | STATE_SENT_CMD);
1667 1666 ret = TRAN_ACCEPT;
1668 1667 *pkt->pkt_scbp = 0;
1669 1668 if (!(pkt->pkt_flags & FLAG_NOINTR))
1670 1669 scsi_hba_pkt_comp(pkt);
1671 1670 break;
1672 1671
1673 1672 default: /* any other commands */
1674 1673 amr_unmapcmd(ac);
1675 1674 pkt->pkt_reason = CMD_INCOMPLETE;
1676 1675 pkt->pkt_state = (STATE_GOT_BUS
1677 1676 | STATE_GOT_TARGET
1678 1677 | STATE_SENT_CMD
1679 1678 | STATE_GOT_STATUS
1680 1679 | STATE_ARQ_DONE);
1681 1680 ret = TRAN_ACCEPT;
1682 1681 *pkt->pkt_scbp = 0;
1683 1682 amr_set_arq_data(pkt, KEY_ILLEGAL_REQUEST);
1684 1683 if (!(pkt->pkt_flags & FLAG_NOINTR))
1685 1684 scsi_hba_pkt_comp(pkt);
1686 1685 break;
1687 1686 }
1688 1687
1689 1688 return (ret);
1690 1689 }
1691 1690
1692 1691 /*
1693 1692 * tran_reset() will reset the bus/target/adapter to support the fault recovery
1694 1693 * functionality according to the "level" in interface. However, we got the
1695 1694 * confirmation from LSI that these HBA cards does not support any commands to
1696 1695 * reset bus/target/adapter/channel.
1697 1696 *
1698 1697 * If the tran_reset() return a FAILURE to the sd, the system will not
1699 1698 * continue to dump the core. But core dump is an crucial method to analyze
1700 1699 * problems in panic. Now we adopt a work around solution, that is to return
1701 1700 * a fake SUCCESS to sd during panic, which will force the system continue
1702 1701 * to dump core though the core may have problems in some situtation because
1703 1702 * some on-the-fly commands will continue DMAing data to the memory.
1704 1703 * In addition, the work around core dump method may not be performed
1705 1704 * successfully if the panic is caused by the HBA itself. So the work around
1706 1705 * solution is not a good example for the implementation of tran_reset(),
1707 1706 * the most reasonable approach should send a reset command to the adapter.
1708 1707 */
1709 1708 /*ARGSUSED*/
1710 1709 static int
1711 1710 amr_tran_reset(struct scsi_address *ap, int level)
1712 1711 {
1713 1712 struct amr_softs *softs;
1714 1713 volatile uint32_t done_flag;
1715 1714
1716 1715 if (ddi_in_panic()) {
1717 1716 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1718 1717
1719 1718 /* Acknowledge the card if there are any significant commands */
1720 1719 while (softs->amr_busyslots > 0) {
1721 1720 AMR_DELAY((softs->mailbox->mb_busy == 0),
1722 1721 AMR_RETRYCOUNT, done_flag);
1723 1722 if (!done_flag) {
1724 1723 /*
1725 1724 * command not completed, indicate the
1726 1725 * problem and continue get ac
1727 1726 */
1728 1727 cmn_err(CE_WARN,
1729 1728 "AMR command is not completed");
1730 1729 return (0);
1731 1730 }
1732 1731
1733 1732 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
1734 1733
1735 1734 /* wait for the acknowledge from hardware */
1736 1735 AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
1737 1736 AMR_RETRYCOUNT, done_flag);
1738 1737 if (!done_flag) {
1739 1738 /*
1740 1739 * command is not completed, return from the
1741 1740 * current interrupt and wait for the next one
1742 1741 */
1743 1742 cmn_err(CE_WARN, "No answer from the hardware");
1744 1743
1745 1744 mutex_exit(&softs->cmd_mutex);
1746 1745 return (0);
1747 1746 }
1748 1747
1749 1748 softs->amr_busyslots -= softs->mailbox->mb_nstatus;
1750 1749 }
1751 1750
1752 1751 /* flush the controllor */
1753 1752 (void) amr_flush(softs);
1754 1753
1755 1754 /*
1756 1755 * If the system is in panic, the tran_reset() will return a
1757 1756 * fake SUCCESS to sd, then the system would continue dump the
1758 1757 * core by poll commands. This is a work around for dumping
1759 1758 * core in panic.
1760 1759 *
1761 1760 * Note: Some on-the-fly command will continue DMAing data to
1762 1761 * the memory when the core is dumping, which may cause
1763 1762 * some flaws in the dumped core file, so a cmn_err()
1764 1763 * will be printed out to warn users. However, for most
1765 1764 * cases, the core file will be fine.
1766 1765 */
1767 1766 cmn_err(CE_WARN, "This system contains a SCSI HBA card/driver "
1768 1767 "that doesn't support software reset. This "
1769 1768 "means that memory being used by the HBA for "
1770 1769 "DMA based reads could have been updated after "
1771 1770 "we panic'd.");
1772 1771 return (1);
1773 1772 } else {
1774 1773 /* return failure to sd */
1775 1774 return (0);
1776 1775 }
1777 1776 }
1778 1777
1779 1778 /*ARGSUSED*/
1780 1779 static int
1781 1780 amr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
1782 1781 {
1783 1782 struct amr_softs *softs;
1784 1783
1785 1784 /*
1786 1785 * We don't allow inquiring about capabilities for other targets
1787 1786 */
1788 1787 if (cap == NULL || whom == 0)
1789 1788 return (-1);
1790 1789
1791 1790 softs = ((struct amr_softs *)(ap->a_hba_tran)->tran_hba_private);
1792 1791
1793 1792 switch (scsi_hba_lookup_capstr(cap)) {
1794 1793 case SCSI_CAP_ARQ:
1795 1794 return (1);
1796 1795 case SCSI_CAP_GEOMETRY:
1797 1796 return ((AMR_DEFAULT_HEADS << 16) | AMR_DEFAULT_CYLINDERS);
1798 1797 case SCSI_CAP_SECTOR_SIZE:
1799 1798 return (AMR_DEFAULT_SECTORS);
1800 1799 case SCSI_CAP_TOTAL_SECTORS:
1801 1800 /* number of sectors */
1802 1801 return (softs->logic_drive[ap->a_target].al_size);
1803 1802 case SCSI_CAP_UNTAGGED_QING:
1804 1803 case SCSI_CAP_TAGGED_QING:
1805 1804 return (1);
1806 1805 default:
1807 1806 return (-1);
1808 1807 }
1809 1808 }
1810 1809
1811 1810 /*ARGSUSED*/
1812 1811 static int
1813 1812 amr_tran_setcap(struct scsi_address *ap, char *cap, int value,
1814 1813 int whom)
1815 1814 {
1816 1815 /*
1817 1816 * We don't allow setting capabilities for other targets
1818 1817 */
1819 1818 if (cap == NULL || whom == 0) {
1820 1819 AMRDB_PRINT((CE_NOTE,
1821 1820 "Set Cap not supported, string = %s, whom=%d",
1822 1821 cap, whom));
1823 1822 return (-1);
1824 1823 }
1825 1824
1826 1825 switch (scsi_hba_lookup_capstr(cap)) {
1827 1826 case SCSI_CAP_ARQ:
1828 1827 return (1);
1829 1828 case SCSI_CAP_TOTAL_SECTORS:
1830 1829 return (1);
1831 1830 case SCSI_CAP_SECTOR_SIZE:
1832 1831 return (1);
1833 1832 case SCSI_CAP_UNTAGGED_QING:
1834 1833 case SCSI_CAP_TAGGED_QING:
1835 1834 return ((value == 1) ? 1 : 0);
1836 1835 default:
1837 1836 return (0);
1838 1837 }
1839 1838 }
1840 1839
1841 1840 static struct scsi_pkt *
1842 1841 amr_tran_init_pkt(struct scsi_address *ap,
1843 1842 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
1844 1843 int tgtlen, int flags, int (*callback)(), caddr_t arg)
1845 1844 {
1846 1845 struct amr_softs *softs;
1847 1846 struct amr_command *ac;
1848 1847 uint32_t slen;
1849 1848
1850 1849 softs = (struct amr_softs *)(ap->a_hba_tran->tran_hba_private);
1851 1850
1852 1851 if ((ap->a_lun != 0)||(ap->a_target >= AMR_MAXLD)||
1853 1852 (softs->logic_drive[ap->a_target].al_state ==
1854 1853 AMR_LDRV_OFFLINE)) {
1855 1854 return (NULL);
1856 1855 }
1857 1856
1858 1857 if (pkt == NULL) {
1859 1858 /* force auto request sense */
1860 1859 slen = MAX(statuslen, sizeof (struct scsi_arq_status));
1861 1860
1862 1861 pkt = scsi_hba_pkt_alloc(softs->dev_info_p, ap, cmdlen,
1863 1862 slen, tgtlen, sizeof (struct amr_command),
1864 1863 callback, arg);
1865 1864 if (pkt == NULL) {
1866 1865 AMRDB_PRINT((CE_WARN, "scsi_hba_pkt_alloc failed"));
1867 1866 return (NULL);
1868 1867 }
1869 1868 pkt->pkt_address = *ap;
1870 1869 pkt->pkt_comp = (void (*)())NULL;
1871 1870 pkt->pkt_time = 0;
1872 1871 pkt->pkt_resid = 0;
1873 1872 pkt->pkt_statistics = 0;
1874 1873 pkt->pkt_reason = 0;
1875 1874
1876 1875 ac = (struct amr_command *)pkt->pkt_ha_private;
1877 1876 ac->ac_buf = bp;
1878 1877 ac->cmdlen = cmdlen;
1879 1878 ac->ac_softs = softs;
1880 1879 ac->pkt = pkt;
1881 1880 ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
1882 1881 ac->ac_flags &= ~AMR_CMD_BUSY;
1883 1882
1884 1883 if ((bp == NULL) || (bp->b_bcount == 0)) {
1885 1884 return (pkt);
1886 1885 }
1887 1886
1888 1887 if (ddi_dma_alloc_handle(softs->dev_info_p, &buffer_dma_attr,
1889 1888 DDI_DMA_SLEEP, NULL,
1890 1889 &ac->buffer_dma_handle) != DDI_SUCCESS) {
1891 1890
1892 1891 AMRDB_PRINT((CE_WARN,
1893 1892 "Cannot allocate buffer DMA tag"));
1894 1893 scsi_hba_pkt_free(ap, pkt);
1895 1894 return (NULL);
1896 1895
1897 1896 }
1898 1897
1899 1898 } else {
1900 1899 if ((bp == NULL) || (bp->b_bcount == 0)) {
1901 1900 return (pkt);
1902 1901 }
1903 1902 ac = (struct amr_command *)pkt->pkt_ha_private;
1904 1903 }
1905 1904
1906 1905 ASSERT(ac != NULL);
1907 1906
1908 1907 if (bp->b_flags & B_READ) {
1909 1908 ac->ac_flags |= AMR_CMD_DATAOUT;
1910 1909 } else {
1911 1910 ac->ac_flags |= AMR_CMD_DATAIN;
1912 1911 }
1913 1912
1914 1913 if (flags & PKT_CONSISTENT) {
1915 1914 ac->ac_flags |= AMR_CMD_PKT_CONSISTENT;
1916 1915 }
1917 1916
1918 1917 if (flags & PKT_DMA_PARTIAL) {
1919 1918 ac->ac_flags |= AMR_CMD_PKT_DMA_PARTIAL;
1920 1919 }
1921 1920
1922 1921 if (amr_mapcmd(ac, callback, arg) != DDI_SUCCESS) {
1923 1922 scsi_hba_pkt_free(ap, pkt);
1924 1923 return (NULL);
1925 1924 }
1926 1925
1927 1926 pkt->pkt_resid = bp->b_bcount - ac->data_transfered;
1928 1927
1929 1928 AMRDB_PRINT((CE_NOTE,
1930 1929 "init pkt, pkt_resid=%d, b_bcount=%d, data_transfered=%d",
1931 1930 (uint32_t)pkt->pkt_resid, (uint32_t)bp->b_bcount,
1932 1931 ac->data_transfered));
1933 1932
1934 1933 ASSERT(pkt->pkt_resid >= 0);
1935 1934
1936 1935 return (pkt);
1937 1936 }
1938 1937
1939 1938 static void
1940 1939 amr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1941 1940 {
1942 1941 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1943 1942
1944 1943 amr_unmapcmd(ac);
1945 1944
1946 1945 if (ac->buffer_dma_handle) {
1947 1946 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1948 1947 ac->buffer_dma_handle = NULL;
1949 1948 }
1950 1949
1951 1950 scsi_hba_pkt_free(ap, pkt);
1952 1951 AMRDB_PRINT((CE_NOTE, "Destroy pkt called"));
1953 1952 }
1954 1953
1955 1954 /*ARGSUSED*/
1956 1955 static void
1957 1956 amr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1958 1957 {
1959 1958 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1960 1959
1961 1960 if (ac->buffer_dma_handle) {
1962 1961 (void) ddi_dma_sync(ac->buffer_dma_handle, 0, 0,
1963 1962 (ac->ac_flags & AMR_CMD_DATAIN) ?
1964 1963 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU);
1965 1964 }
1966 1965 }
1967 1966
1968 1967 /*ARGSUSED*/
1969 1968 static void
1970 1969 amr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1971 1970 {
1972 1971 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1973 1972
1974 1973 if (ac->ac_flags & AMR_CMD_MAPPED) {
1975 1974 (void) ddi_dma_unbind_handle(ac->buffer_dma_handle);
1976 1975 (void) ddi_dma_free_handle(&ac->buffer_dma_handle);
1977 1976 ac->buffer_dma_handle = NULL;
1978 1977 ac->ac_flags &= ~AMR_CMD_MAPPED;
1979 1978 }
1980 1979
1981 1980 }
1982 1981
1983 1982 /*ARGSUSED*/
1984 1983 static void
1985 1984 amr_rw_command(struct amr_softs *softs, struct scsi_pkt *pkt, int target)
1986 1985 {
1987 1986 struct amr_command *ac = (struct amr_command *)pkt->pkt_ha_private;
1988 1987 union scsi_cdb *cdbp = (union scsi_cdb *)pkt->pkt_cdbp;
1989 1988 uint8_t cmd;
1990 1989
1991 1990 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1992 1991 cmd = AMR_CMD_LREAD;
1993 1992 } else {
1994 1993 cmd = AMR_CMD_LWRITE;
1995 1994 }
1996 1995
1997 1996 ac->mailbox.mb_command = cmd;
1998 1997 ac->mailbox.mb_blkcount =
1999 1998 (ac->transfer_size + AMR_BLKSIZE - 1)/AMR_BLKSIZE;
2000 1999 ac->mailbox.mb_lba = (ac->cmdlen == 10) ?
2001 2000 GETG1ADDR(cdbp) : GETG0ADDR(cdbp);
2002 2001 ac->mailbox.mb_drive = (uint8_t)target;
2003 2002 }
2004 2003
2005 2004 static void
2006 2005 amr_mode_sense(union scsi_cdb *cdbp, struct buf *bp, unsigned int capacity)
2007 2006 {
2008 2007 uchar_t pagecode;
2009 2008 struct mode_format *page3p;
2010 2009 struct mode_geometry *page4p;
2011 2010 struct mode_header *headerp;
2012 2011 uint32_t ncyl;
2013 2012
2014 2013 if (!(bp && bp->b_un.b_addr && bp->b_bcount))
2015 2014 return;
2016 2015
2017 2016 if (bp->b_flags & (B_PHYS | B_PAGEIO))
2018 2017 bp_mapin(bp);
2019 2018
2020 2019 pagecode = cdbp->cdb_un.sg.scsi[0];
2021 2020 switch (pagecode) {
2022 2021 case SD_MODE_SENSE_PAGE3_CODE:
2023 2022 headerp = (struct mode_header *)(bp->b_un.b_addr);
2024 2023 headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2025 2024
2026 2025 page3p = (struct mode_format *)((caddr_t)headerp +
2027 2026 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2028 2027 page3p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE3_CODE);
2029 2028 page3p->mode_page.length = BE_8(sizeof (struct mode_format));
2030 2029 page3p->data_bytes_sect = BE_16(AMR_DEFAULT_SECTORS);
2031 2030 page3p->sect_track = BE_16(AMR_DEFAULT_CYLINDERS);
2032 2031
2033 2032 return;
2034 2033
2035 2034 case SD_MODE_SENSE_PAGE4_CODE:
2036 2035 headerp = (struct mode_header *)(bp->b_un.b_addr);
2037 2036 headerp->bdesc_length = MODE_BLK_DESC_LENGTH;
2038 2037
2039 2038 page4p = (struct mode_geometry *)((caddr_t)headerp +
2040 2039 MODE_HEADER_LENGTH + MODE_BLK_DESC_LENGTH);
2041 2040 page4p->mode_page.code = BE_8(SD_MODE_SENSE_PAGE4_CODE);
2042 2041 page4p->mode_page.length = BE_8(sizeof (struct mode_geometry));
2043 2042 page4p->heads = BE_8(AMR_DEFAULT_HEADS);
2044 2043 page4p->rpm = BE_16(AMR_DEFAULT_ROTATIONS);
2045 2044
2046 2045 ncyl = capacity / (AMR_DEFAULT_HEADS*AMR_DEFAULT_CYLINDERS);
2047 2046 page4p->cyl_lb = BE_8(ncyl & 0xff);
2048 2047 page4p->cyl_mb = BE_8((ncyl >> 8) & 0xff);
2049 2048 page4p->cyl_ub = BE_8((ncyl >> 16) & 0xff);
2050 2049
2051 2050 return;
2052 2051 default:
2053 2052 bzero(bp->b_un.b_addr, bp->b_bcount);
2054 2053 return;
2055 2054 }
2056 2055 }
2057 2056
2058 2057 static void
2059 2058 amr_set_arq_data(struct scsi_pkt *pkt, uchar_t key)
2060 2059 {
2061 2060 struct scsi_arq_status *arqstat;
2062 2061
2063 2062 arqstat = (struct scsi_arq_status *)(pkt->pkt_scbp);
2064 2063 arqstat->sts_status.sts_chk = 1; /* CHECK CONDITION */
2065 2064 arqstat->sts_rqpkt_reason = CMD_CMPLT;
2066 2065 arqstat->sts_rqpkt_resid = 0;
2067 2066 arqstat->sts_rqpkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
2068 2067 STATE_SENT_CMD | STATE_XFERRED_DATA;
2069 2068 arqstat->sts_rqpkt_statistics = 0;
2070 2069 arqstat->sts_sensedata.es_valid = 1;
2071 2070 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
2072 2071 arqstat->sts_sensedata.es_key = key;
2073 2072 }
2074 2073
2075 2074 static void
2076 2075 amr_start_waiting_queue(void *softp)
2077 2076 {
2078 2077 uint32_t slot;
2079 2078 struct amr_command *ac;
2080 2079 volatile uint32_t done_flag;
2081 2080 struct amr_softs *softs = (struct amr_softs *)softp;
2082 2081
2083 2082 /* only one command allowed at the same time */
2084 2083 mutex_enter(&softs->queue_mutex);
2085 2084 mutex_enter(&softs->cmd_mutex);
2086 2085
2087 2086 while ((ac = softs->waiting_q_head) != NULL) {
2088 2087 /*
2089 2088 * Find an available slot, the last slot is
2090 2089 * occupied by poll I/O command.
2091 2090 */
2092 2091 for (slot = 0; slot < (softs->sg_max_count - 1); slot++) {
2093 2092 if (softs->busycmd[slot] == NULL) {
2094 2093 if (AMR_QGET_IDB(softs) & AMR_QIDB_SUBMIT) {
2095 2094 /*
2096 2095 * only one command allowed at the
2097 2096 * same time
2098 2097 */
2099 2098 mutex_exit(&softs->cmd_mutex);
2100 2099 mutex_exit(&softs->queue_mutex);
2101 2100 return;
2102 2101 }
2103 2102
2104 2103 ac->ac_timestamp = ddi_get_time();
2105 2104
2106 2105 if (!(ac->ac_flags & AMR_CMD_GOT_SLOT)) {
2107 2106
2108 2107 softs->busycmd[slot] = ac;
2109 2108 ac->ac_slot = slot;
2110 2109 softs->amr_busyslots++;
2111 2110
2112 2111 bcopy(ac->sgtable,
2113 2112 softs->sg_items[slot].sg_table,
2114 2113 sizeof (struct amr_sgentry) *
2115 2114 AMR_NSEG);
2116 2115
2117 2116 (void) ddi_dma_sync(
2118 2117 softs->sg_items[slot].sg_handle,
2119 2118 0, 0, DDI_DMA_SYNC_FORDEV);
2120 2119
2121 2120 ac->mailbox.mb_physaddr =
2122 2121 softs->sg_items[slot].sg_phyaddr;
2123 2122 }
2124 2123
2125 2124 /* take the cmd from the queue */
2126 2125 softs->waiting_q_head = ac->ac_next;
2127 2126
2128 2127 ac->mailbox.mb_ident = ac->ac_slot + 1;
2129 2128 ac->mailbox.mb_busy = 1;
2130 2129 ac->ac_next = NULL;
2131 2130 ac->ac_prev = NULL;
2132 2131 ac->ac_flags |= AMR_CMD_GOT_SLOT;
2133 2132
2134 2133 /* clear the poll/ack fields in the mailbox */
2135 2134 softs->mailbox->mb_poll = 0;
2136 2135 softs->mailbox->mb_ack = 0;
2137 2136
2138 2137 AMR_DELAY((softs->mailbox->mb_busy == 0),
2139 2138 AMR_RETRYCOUNT, done_flag);
2140 2139 if (!done_flag) {
2141 2140 /*
2142 2141 * command not completed, indicate the
2143 2142 * problem and continue get ac
2144 2143 */
2145 2144 cmn_err(CE_WARN,
2146 2145 "AMR command is not completed");
2147 2146 break;
2148 2147 }
2149 2148
2150 2149 bcopy(&ac->mailbox, (void *)softs->mailbox,
2151 2150 AMR_MBOX_CMDSIZE);
2152 2151 ac->ac_flags |= AMR_CMD_BUSY;
2153 2152
2154 2153 (void) ddi_dma_sync(softs->mbox_dma_handle,
2155 2154 0, 0, DDI_DMA_SYNC_FORDEV);
2156 2155
2157 2156 AMR_QPUT_IDB(softs,
2158 2157 softs->mbox_phyaddr | AMR_QIDB_SUBMIT);
2159 2158
2160 2159 /*
2161 2160 * current ac is submitted
2162 2161 * so quit 'for-loop' to get next ac
2163 2162 */
2164 2163 break;
2165 2164 }
2166 2165 }
2167 2166
2168 2167 /* no slot, finish our task */
2169 2168 if (slot == softs->maxio)
2170 2169 break;
2171 2170 }
2172 2171
2173 2172 /* only one command allowed at the same time */
2174 2173 mutex_exit(&softs->cmd_mutex);
2175 2174 mutex_exit(&softs->queue_mutex);
2176 2175 }
2177 2176
2178 2177 static void
2179 2178 amr_done(struct amr_softs *softs)
2180 2179 {
2181 2180
2182 2181 uint32_t i, idx;
2183 2182 volatile uint32_t done_flag;
2184 2183 struct amr_mailbox *mbox, mbsave;
2185 2184 struct amr_command *ac, *head, *tail;
2186 2185
2187 2186 head = tail = NULL;
2188 2187
2189 2188 AMR_QPUT_ODB(softs, AMR_QODB_READY);
2190 2189
2191 2190 /* acknowledge interrupt */
2192 2191 (void) AMR_QGET_ODB(softs);
2193 2192
2194 2193 mutex_enter(&softs->cmd_mutex);
2195 2194
2196 2195 if (softs->mailbox->mb_nstatus != 0) {
2197 2196 (void) ddi_dma_sync(softs->mbox_dma_handle,
2198 2197 0, 0, DDI_DMA_SYNC_FORCPU);
2199 2198
2200 2199 /* save mailbox, which contains a list of completed commands */
2201 2200 bcopy((void *)(uintptr_t)(volatile void *)softs->mailbox,
2202 2201 &mbsave, sizeof (mbsave));
2203 2202
2204 2203 mbox = &mbsave;
2205 2204
2206 2205 AMR_QPUT_IDB(softs, softs->mbox_phyaddr | AMR_QIDB_ACK);
2207 2206
2208 2207 /* wait for the acknowledge from hardware */
2209 2208 AMR_BUSYWAIT(!(AMR_QGET_IDB(softs) & AMR_QIDB_ACK),
2210 2209 AMR_RETRYCOUNT, done_flag);
2211 2210 if (!done_flag) {
2212 2211 /*
2213 2212 * command is not completed, return from the current
2214 2213 * interrupt and wait for the next one
2215 2214 */
2216 2215 cmn_err(CE_WARN, "No answer from the hardware");
2217 2216
2218 2217 mutex_exit(&softs->cmd_mutex);
2219 2218 return;
2220 2219 }
2221 2220
2222 2221 for (i = 0; i < mbox->mb_nstatus; i++) {
2223 2222 idx = mbox->mb_completed[i] - 1;
2224 2223 ac = softs->busycmd[idx];
2225 2224
2226 2225 if (ac != NULL) {
2227 2226 /* pull the command from the busy index */
2228 2227 softs->busycmd[idx] = NULL;
2229 2228 if (softs->amr_busyslots > 0)
2230 2229 softs->amr_busyslots--;
2231 2230 if (softs->amr_busyslots == 0)
2232 2231 cv_broadcast(&softs->cmd_cv);
2233 2232
2234 2233 ac->ac_flags &= ~AMR_CMD_BUSY;
2235 2234 ac->ac_flags &= ~AMR_CMD_GOT_SLOT;
2236 2235 ac->ac_status = mbox->mb_status;
2237 2236
2238 2237 /* enqueue here */
2239 2238 if (head) {
2240 2239 tail->ac_next = ac;
2241 2240 tail = ac;
2242 2241 tail->ac_next = NULL;
2243 2242 } else {
2244 2243 tail = head = ac;
2245 2244 ac->ac_next = NULL;
2246 2245 }
2247 2246 } else {
2248 2247 AMRDB_PRINT((CE_WARN,
2249 2248 "ac in mailbox is NULL!"));
2250 2249 }
2251 2250 }
2252 2251 } else {
2253 2252 AMRDB_PRINT((CE_WARN, "mailbox is not ready for copy out!"));
2254 2253 }
2255 2254
2256 2255 mutex_exit(&softs->cmd_mutex);
2257 2256
2258 2257 if (head != NULL) {
2259 2258 amr_call_pkt_comp(head);
2260 2259 }
2261 2260
2262 2261 /* dispatch a thread to process the pending I/O if there is any */
2263 2262 if ((ddi_taskq_dispatch(softs->amr_taskq, amr_start_waiting_queue,
2264 2263 (void *)softs, DDI_NOSLEEP)) != DDI_SUCCESS) {
2265 2264 cmn_err(CE_WARN, "No memory available to dispatch taskq");
2266 2265 }
2267 2266 }
2268 2267
2269 2268 static void
2270 2269 amr_call_pkt_comp(register struct amr_command *head)
2271 2270 {
2272 2271 register struct scsi_pkt *pkt;
2273 2272 register struct amr_command *ac, *localhead;
2274 2273
2275 2274 localhead = head;
2276 2275
2277 2276 while (localhead) {
2278 2277 ac = localhead;
2279 2278 localhead = ac->ac_next;
2280 2279 ac->ac_next = NULL;
2281 2280
2282 2281 pkt = ac->pkt;
2283 2282 *pkt->pkt_scbp = 0;
2284 2283
2285 2284 if (ac->ac_status == AMR_STATUS_SUCCESS) {
2286 2285 pkt->pkt_state |= (STATE_GOT_BUS
2287 2286 | STATE_GOT_TARGET
2288 2287 | STATE_SENT_CMD
2289 2288 | STATE_XFERRED_DATA);
2290 2289 pkt->pkt_reason = CMD_CMPLT;
2291 2290 } else {
2292 2291 pkt->pkt_state |= STATE_GOT_BUS
2293 2292 | STATE_ARQ_DONE;
2294 2293 pkt->pkt_reason = CMD_INCOMPLETE;
2295 2294 amr_set_arq_data(pkt, KEY_HARDWARE_ERROR);
2296 2295 }
2297 2296 if (!(pkt->pkt_flags & FLAG_NOINTR)) {
2298 2297 scsi_hba_pkt_comp(pkt);
2299 2298 }
2300 2299 }
2301 2300 }
↓ open down ↓ |
2050 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX