1 /*
2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
3 */
4
5 /*
6 * Copyright 2005-08 Adaptec, Inc.
7 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
8 * Copyright (c) 2000 Michael Smith
9 * Copyright (c) 2001 Scott Long
10 * Copyright (c) 2000 BSDi
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34 #include <sys/modctl.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/ddi.h>
38 #include <sys/devops.h>
39 #include <sys/pci.h>
40 #include <sys/types.h>
41 #include <sys/ddidmareq.h>
42 #include <sys/scsi/scsi.h>
43 #include <sys/ksynch.h>
44 #include <sys/sunddi.h>
45 #include <sys/byteorder.h>
46 #include "aac_regs.h"
47 #include "aac.h"
48
49 /*
50 * FMA header files
51 */
52 #include <sys/ddifm.h>
53 #include <sys/fm/protocol.h>
54 #include <sys/fm/util.h>
55 #include <sys/fm/io/ddi.h>
56
57 /*
58 * For minor nodes created by the SCSA framework, minor numbers are
59 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
60 * number less than 64.
61 *
62 * To support cfgadm, need to confirm the SCSA framework by creating
63 * devctl/scsi and driver specific minor nodes under SCSA format,
64 * and calling scsi_hba_xxx() functions aacordingly.
65 */
66
67 #define AAC_MINOR 32
68 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR)
69 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK)
70 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
71
72 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran)
73 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
74 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip))
75 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
76 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd)))
77 #define AAC_PD(t) ((t) - AAC_MAX_LD)
78 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \
79 &(softs)->containers[(t)].dev : \
80 ((t) < AAC_MAX_DEV(softs)) ? \
81 &(softs)->nondasds[AAC_PD(t)].dev : NULL)
82 #define AAC_DEVCFG_BEGIN(softs, tgt) \
83 aac_devcfg((softs), (tgt), 1)
84 #define AAC_DEVCFG_END(softs, tgt) \
85 aac_devcfg((softs), (tgt), 0)
86 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private)
87 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
88 if (!(cond)) { \
89 int count = (timeout) * 10; \
90 while (count) { \
91 drv_usecwait(100); \
92 if (cond) \
93 break; \
94 count--; \
95 } \
96 (timeout) = (count + 9) / 10; \
97 } \
98 }
99
100 #define AAC_SENSE_DATA_DESCR_LEN \
101 (sizeof (struct scsi_descr_sense_hdr) + \
102 sizeof (struct scsi_information_sense_descr))
103 #define AAC_ARQ64_LENGTH \
104 (sizeof (struct scsi_arq_status) + \
105 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
106
107 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
108 #define AAC_GETGXADDR(cmdlen, cdbp) \
109 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \
110 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
111 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
112
113 #define AAC_CDB_INQUIRY_CMDDT 0x02
114 #define AAC_CDB_INQUIRY_EVPD 0x01
115 #define AAC_VPD_PAGE_CODE 1
116 #define AAC_VPD_PAGE_LENGTH 3
117 #define AAC_VPD_PAGE_DATA 4
118 #define AAC_VPD_ID_CODESET 0
119 #define AAC_VPD_ID_TYPE 1
120 #define AAC_VPD_ID_LENGTH 3
121 #define AAC_VPD_ID_DATA 4
122
123 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08
124 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08
125 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0
126 /* 00b - peripheral device addressing method */
127 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00
128 /* 01b - flat space addressing method */
129 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40
130 /* 10b - logical unit addressing method */
131 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80
132
133 /* Return the size of FIB with data part type data_type */
134 #define AAC_FIB_SIZEOF(data_type) \
135 (sizeof (struct aac_fib_header) + sizeof (data_type))
136 /* Return the container size defined in mir */
137 #define AAC_MIR_SIZE(softs, acc, mir) \
138 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
139 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
140 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
141 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
142
143 /* The last entry of aac_cards[] is for unknown cards */
144 #define AAC_UNKNOWN_CARD \
145 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
146 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD)
147 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ)
148 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL)
149 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC))
150
151 #define PCI_MEM_GET32(softs, off) \
152 ddi_get32((softs)->pci_mem_handle, \
153 (void *)((softs)->pci_mem_base_vaddr + (off)))
154 #define PCI_MEM_PUT32(softs, off, val) \
155 ddi_put32((softs)->pci_mem_handle, \
156 (void *)((softs)->pci_mem_base_vaddr + (off)), \
157 (uint32_t)(val))
158 #define PCI_MEM_GET16(softs, off) \
159 ddi_get16((softs)->pci_mem_handle, \
160 (void *)((softs)->pci_mem_base_vaddr + (off)))
161 #define PCI_MEM_PUT16(softs, off, val) \
162 ddi_put16((softs)->pci_mem_handle, \
163 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
164 /* Write host data at valp to device mem[off] repeatedly count times */
165 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \
166 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
167 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
168 count, DDI_DEV_AUTOINCR)
169 /* Read device data at mem[off] to host addr valp repeatedly count times */
170 #define PCI_MEM_REP_GET8(softs, off, valp, count) \
171 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
172 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
173 count, DDI_DEV_AUTOINCR)
174 #define AAC_GET_FIELD8(acc, d, s, field) \
175 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
176 #define AAC_GET_FIELD32(acc, d, s, field) \
177 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
178 #define AAC_GET_FIELD64(acc, d, s, field) \
179 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
180 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \
181 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
182 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
183 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \
184 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
185 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
186
187 #define AAC_ENABLE_INTR(softs) { \
188 if (softs->flags & AAC_FLAGS_NEW_COMM) \
189 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
190 else \
191 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
192 softs->state |= AAC_STATE_INTR; \
193 }
194
195 #define AAC_DISABLE_INTR(softs) { \
196 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \
197 softs->state &= ~AAC_STATE_INTR; \
198 }
199 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask)
200 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR)
201 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val)
202 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE)
203 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val)
204 #define AAC_FWSTATUS_GET(softs) \
205 ((softs)->aac_if.aif_get_fwstatus(softs))
206 #define AAC_MAILBOX_GET(softs, mb) \
207 ((softs)->aac_if.aif_get_mailbox((softs), (mb)))
208 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
209 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
210 (arg0), (arg1), (arg2), (arg3)))
211
212 #define AAC_MGT_SLOT_NUM 2
213 #define AAC_THROTTLE_DRAIN -1
214
215 #define AAC_QUIESCE_TICK 1 /* 1 second */
216 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */
217 #define AAC_DEFAULT_TICK 10 /* 10 seconds */
218 #define AAC_SYNC_TICK (30*60) /* 30 minutes */
219
220 /* Poll time for aac_do_poll_io() */
221 #define AAC_POLL_TIME 60 /* 60 seconds */
222
223 /* IOP reset */
224 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */
225 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */
226 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */
227
228 /*
229 * Hardware access functions
230 */
231 static int aac_rx_get_fwstatus(struct aac_softstate *);
232 static int aac_rx_get_mailbox(struct aac_softstate *, int);
233 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
234 uint32_t, uint32_t, uint32_t);
235 static int aac_rkt_get_fwstatus(struct aac_softstate *);
236 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
237 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
238 uint32_t, uint32_t, uint32_t);
239
240 /*
241 * SCSA function prototypes
242 */
243 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
244 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
245 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
246 static int aac_quiesce(dev_info_t *);
247 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
248
249 /*
250 * Interrupt handler functions
251 */
252 static int aac_query_intrs(struct aac_softstate *, int);
253 static int aac_add_intrs(struct aac_softstate *);
254 static void aac_remove_intrs(struct aac_softstate *);
255 static int aac_enable_intrs(struct aac_softstate *);
256 static int aac_disable_intrs(struct aac_softstate *);
257 static uint_t aac_intr_old(caddr_t);
258 static uint_t aac_intr_new(caddr_t);
259 static uint_t aac_softintr(caddr_t);
260
261 /*
262 * Internal functions in attach
263 */
264 static int aac_check_card_type(struct aac_softstate *);
265 static int aac_check_firmware(struct aac_softstate *);
266 static int aac_common_attach(struct aac_softstate *);
267 static void aac_common_detach(struct aac_softstate *);
268 static int aac_probe_containers(struct aac_softstate *);
269 static int aac_alloc_comm_space(struct aac_softstate *);
270 static int aac_setup_comm_space(struct aac_softstate *);
271 static void aac_free_comm_space(struct aac_softstate *);
272 static int aac_hba_setup(struct aac_softstate *);
273
274 /*
275 * Sync FIB operation functions
276 */
277 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
278 uint32_t, uint32_t, uint32_t, uint32_t *);
279 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
280
281 /*
282 * Command queue operation functions
283 */
284 static void aac_cmd_initq(struct aac_cmd_queue *);
285 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
286 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
287 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
288
289 /*
290 * FIB queue operation functions
291 */
292 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
293 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
294
295 /*
296 * Slot operation functions
297 */
298 static int aac_create_slots(struct aac_softstate *);
299 static void aac_destroy_slots(struct aac_softstate *);
300 static void aac_alloc_fibs(struct aac_softstate *);
301 static void aac_destroy_fibs(struct aac_softstate *);
302 static struct aac_slot *aac_get_slot(struct aac_softstate *);
303 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
304 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
305 static void aac_free_fib(struct aac_slot *);
306
307 /*
308 * Internal functions
309 */
310 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *,
311 uint16_t);
312 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
313 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
314 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
315 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
316 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
317 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
318 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *);
319 static void aac_start_waiting_io(struct aac_softstate *);
320 static void aac_drain_comp_q(struct aac_softstate *);
321 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
322 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *);
323 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *);
324 static void aac_start_io(struct aac_softstate *, struct aac_cmd *);
325 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
326 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
327 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
328 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
329 static int aac_dma_sync_ac(struct aac_cmd *);
330 static int aac_shutdown(struct aac_softstate *);
331 static int aac_reset_adapter(struct aac_softstate *);
332 static int aac_do_quiesce(struct aac_softstate *softs);
333 static int aac_do_unquiesce(struct aac_softstate *softs);
334 static void aac_unhold_bus(struct aac_softstate *, int);
335 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
336 int, int);
337
338 /*
339 * Adapter Initiated FIB handling function
340 */
341 static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t,
342 struct aac_fib *, int);
343 static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *);
344
345 /*
346 * Event handling related functions
347 */
348 static void aac_timer(void *);
349 static void aac_event_thread(struct aac_softstate *);
350 static void aac_event_disp(struct aac_softstate *, int);
351
352 /*
353 * IOCTL interface related functions
354 */
355 static int aac_open(dev_t *, int, int, cred_t *);
356 static int aac_close(dev_t, int, int, cred_t *);
357 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
358 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
359
360 /*
361 * FMA Prototypes
362 */
363 static void aac_fm_init(struct aac_softstate *);
364 static void aac_fm_fini(struct aac_softstate *);
365 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
366 int aac_check_acc_handle(ddi_acc_handle_t);
367 int aac_check_dma_handle(ddi_dma_handle_t);
368 void aac_fm_ereport(struct aac_softstate *, char *);
369
370 /*
371 * Auto enumeration functions
372 */
373 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
374 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
375 void *, dev_info_t **);
376 static int aac_handle_dr(struct aac_softstate *, int, int, int);
377
378 extern pri_t minclsyspri;
379
380 #ifdef DEBUG
381 /*
382 * UART debug output support
383 */
384
385 #define AAC_PRINT_BUFFER_SIZE 512
386 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */
387
388 #define AAC_FW_DBG_STRLEN_OFFSET 0x00
389 #define AAC_FW_DBG_FLAGS_OFFSET 0x04
390 #define AAC_FW_DBG_BLED_OFFSET 0x08
391
392 static int aac_get_fw_debug_buffer(struct aac_softstate *);
393 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
394 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
395
396 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
397 static char aac_fmt[] = " %s";
398 static char aac_fmt_header[] = " %s.%d: %s";
399 static kmutex_t aac_prt_mutex;
400
401 /*
402 * Debug flags to be put into the softstate flags field
403 * when initialized
404 */
405 uint32_t aac_debug_flags =
406 /* AACDB_FLAGS_KERNEL_PRINT | */
407 /* AACDB_FLAGS_FW_PRINT | */
408 /* AACDB_FLAGS_MISC | */
409 /* AACDB_FLAGS_FUNC1 | */
410 /* AACDB_FLAGS_FUNC2 | */
411 /* AACDB_FLAGS_SCMD | */
412 /* AACDB_FLAGS_AIF | */
413 /* AACDB_FLAGS_FIB | */
414 /* AACDB_FLAGS_IOCTL | */
415 0;
416 uint32_t aac_debug_fib_flags =
417 /* AACDB_FLAGS_FIB_RW | */
418 /* AACDB_FLAGS_FIB_IOCTL | */
419 /* AACDB_FLAGS_FIB_SRB | */
420 /* AACDB_FLAGS_FIB_SYNC | */
421 /* AACDB_FLAGS_FIB_HEADER | */
422 /* AACDB_FLAGS_FIB_TIMEOUT | */
423 0;
424
425 #endif /* DEBUG */
426
427 static struct cb_ops aac_cb_ops = {
428 aac_open, /* open */
429 aac_close, /* close */
430 nodev, /* strategy */
431 nodev, /* print */
432 nodev, /* dump */
433 nodev, /* read */
434 nodev, /* write */
435 aac_ioctl, /* ioctl */
436 nodev, /* devmap */
437 nodev, /* mmap */
438 nodev, /* segmap */
439 nochpoll, /* poll */
440 ddi_prop_op, /* cb_prop_op */
441 NULL, /* streamtab */
442 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
443 CB_REV, /* cb_rev */
444 nodev, /* async I/O read entry point */
445 nodev /* async I/O write entry point */
446 };
447
448 static struct dev_ops aac_dev_ops = {
449 DEVO_REV,
450 0,
451 aac_getinfo,
452 nulldev,
453 nulldev,
454 aac_attach,
455 aac_detach,
456 aac_reset,
457 &aac_cb_ops,
458 NULL,
459 NULL,
460 aac_quiesce,
461 };
462
463 static struct modldrv aac_modldrv = {
464 &mod_driverops,
465 "AAC Driver " AAC_DRIVER_VERSION,
466 &aac_dev_ops,
467 };
468
469 static struct modlinkage aac_modlinkage = {
470 MODREV_1,
471 { &aac_modldrv, NULL }
472 };
473
474 static struct aac_softstate *aac_softstatep;
475
476 /*
477 * Supported card list
478 * ordered in vendor id, subvendor id, subdevice id, and device id
479 */
480 static struct aac_card_type aac_cards[] = {
481 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
482 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
483 "Dell", "PERC 3/Di"},
484 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
485 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
486 "Dell", "PERC 3/Di"},
487 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
488 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
489 "Dell", "PERC 3/Si"},
490 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
491 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
492 "Dell", "PERC 3/Di"},
493 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
494 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
495 "Dell", "PERC 3/Si"},
496 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
497 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
498 "Dell", "PERC 3/Di"},
499 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
500 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
501 "Dell", "PERC 3/Di"},
502 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
503 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
504 "Dell", "PERC 3/Di"},
505 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
506 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
507 "Dell", "PERC 3/Di"},
508 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
509 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
510 "Dell", "PERC 3/Di"},
511 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
512 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
513 "Dell", "PERC 320/DC"},
514 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
515 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
516
517 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
518 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
519 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
520 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
521 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
522 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
523
524 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
525 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
526 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
527 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
528
529 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
530 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
531 "Adaptec", "2200S"},
532 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
533 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
534 "Adaptec", "2120S"},
535 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
536 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
537 "Adaptec", "2200S"},
538 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
539 0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
540 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
541 0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
542 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
543 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
544 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
545 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
546 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
547 0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
548 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
549 0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
550 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
551 0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
552 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
553 0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
554 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
555 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
556 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
557 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
558 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
559 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
560 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
561 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
562 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
563 0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
564 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
565 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
566 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
567 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
568 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
569 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
570 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
571 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
572 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
573 0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
574 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
575 0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
576 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
577 0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
578 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
579 0, AAC_TYPE_SATA, "ICP", "9024RO"},
580 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
581 0, AAC_TYPE_SATA, "ICP", "9014RO"},
582 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
583 0, AAC_TYPE_SATA, "ICP", "9047MA"},
584 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
585 0, AAC_TYPE_SATA, "ICP", "9087MA"},
586 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
587 0, AAC_TYPE_SAS, "ICP", "9085LI"},
588 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
589 0, AAC_TYPE_SAS, "ICP", "5085BR"},
590 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
591 0, AAC_TYPE_SATA, "ICP", "9067MA"},
592 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
593 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
594 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
595 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
596 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
597 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
598 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
599 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
600 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
601 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
602 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
603 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
604
605 {0, 0, 0, 0, AAC_HWIF_UNKNOWN,
606 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
607 };
608
609 /*
610 * Hardware access functions for i960 based cards
611 */
612 static struct aac_interface aac_rx_interface = {
613 aac_rx_get_fwstatus,
614 aac_rx_get_mailbox,
615 aac_rx_set_mailbox
616 };
617
618 /*
619 * Hardware access functions for Rocket based cards
620 */
621 static struct aac_interface aac_rkt_interface = {
622 aac_rkt_get_fwstatus,
623 aac_rkt_get_mailbox,
624 aac_rkt_set_mailbox
625 };
626
627 ddi_device_acc_attr_t aac_acc_attr = {
628 DDI_DEVICE_ATTR_V1,
629 DDI_STRUCTURE_LE_ACC,
630 DDI_STRICTORDER_ACC,
631 DDI_DEFAULT_ACC
632 };
633
634 static struct {
635 int size;
636 int notify;
637 } aac_qinfo[] = {
638 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
639 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
640 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
641 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
642 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
643 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
644 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
645 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
646 };
647
648 /*
649 * Default aac dma attributes
650 */
651 static ddi_dma_attr_t aac_dma_attr = {
652 DMA_ATTR_V0,
653 0, /* lowest usable address */
654 0xffffffffull, /* high DMA address range */
655 0xffffffffull, /* DMA counter register */
656 AAC_DMA_ALIGN, /* DMA address alignment */
657 1, /* DMA burstsizes */
658 1, /* min effective DMA size */
659 0xffffffffull, /* max DMA xfer size */
660 0xffffffffull, /* segment boundary */
661 1, /* s/g list length */
662 AAC_BLK_SIZE, /* granularity of device */
663 0 /* DMA transfer flags */
664 };
665
666 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */
667 static uint32_t aac_timebase = 0; /* internal timer in seconds */
668
669 /*
670 * Warlock directives
671 *
672 * Different variables with the same types have to be protected by the
673 * same mutex; otherwise, warlock will complain with "variables don't
674 * seem to be protected consistently". For example,
675 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
676 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
677 * declare them as protected explictly at aac_cmd_dequeue().
678 */
679 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
680 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
681 mode_format mode_geometry mode_header aac_cmd))
682 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
683 aac_sge))
684 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
685 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
686 aac_sg_table aac_srb))
687 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
688 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
689 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf))
690
691 int
692 _init(void)
693 {
694 int rval = 0;
695
696 #ifdef DEBUG
697 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
698 #endif
699 DBCALLED(NULL, 1);
700
701 if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
702 sizeof (struct aac_softstate), 0)) != 0)
703 goto error;
704
705 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
706 ddi_soft_state_fini((void *)&aac_softstatep);
707 goto error;
708 }
709
710 if ((rval = mod_install(&aac_modlinkage)) != 0) {
711 ddi_soft_state_fini((void *)&aac_softstatep);
712 scsi_hba_fini(&aac_modlinkage);
713 goto error;
714 }
715 return (rval);
716
717 error:
718 AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
719 #ifdef DEBUG
720 mutex_destroy(&aac_prt_mutex);
721 #endif
722 return (rval);
723 }
724
725 int
726 _info(struct modinfo *modinfop)
727 {
728 DBCALLED(NULL, 1);
729 return (mod_info(&aac_modlinkage, modinfop));
730 }
731
732 /*
733 * An HBA driver cannot be unload unless you reboot,
734 * so this function will be of no use.
735 */
736 int
737 _fini(void)
738 {
739 int rval;
740
741 DBCALLED(NULL, 1);
742
743 if ((rval = mod_remove(&aac_modlinkage)) != 0)
744 goto error;
745
746 scsi_hba_fini(&aac_modlinkage);
747 ddi_soft_state_fini((void *)&aac_softstatep);
748 #ifdef DEBUG
749 mutex_destroy(&aac_prt_mutex);
750 #endif
751 return (0);
752
753 error:
754 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
755 return (rval);
756 }
757
758 static int
759 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
760 {
761 int instance, i;
762 struct aac_softstate *softs = NULL;
763 int attach_state = 0;
764 char *data;
765
766 DBCALLED(NULL, 1);
767
768 switch (cmd) {
769 case DDI_ATTACH:
770 break;
771 case DDI_RESUME:
772 return (DDI_FAILURE);
773 default:
774 return (DDI_FAILURE);
775 }
776
777 instance = ddi_get_instance(dip);
778
779 /* Get soft state */
780 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
781 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
782 goto error;
783 }
784 softs = ddi_get_soft_state(aac_softstatep, instance);
785 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
786
787 softs->instance = instance;
788 softs->devinfo_p = dip;
789 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
790 softs->addr_dma_attr.dma_attr_granular = 1;
791 softs->acc_attr = aac_acc_attr;
792 softs->reg_attr = aac_acc_attr;
793 softs->card = AAC_UNKNOWN_CARD;
794 #ifdef DEBUG
795 softs->debug_flags = aac_debug_flags;
796 softs->debug_fib_flags = aac_debug_fib_flags;
797 #endif
798
799 /* Initialize FMA */
800 aac_fm_init(softs);
801
802 /* Check the card type */
803 if (aac_check_card_type(softs) == AACERR) {
804 AACDB_PRINT(softs, CE_WARN, "Card not supported");
805 goto error;
806 }
807 /* We have found the right card and everything is OK */
808 attach_state |= AAC_ATTACH_CARD_DETECTED;
809
810 /* Map PCI mem space */
811 if (ddi_regs_map_setup(dip, 1,
812 (caddr_t *)&softs->pci_mem_base_vaddr, 0,
813 softs->map_size_min, &softs->reg_attr,
814 &softs->pci_mem_handle) != DDI_SUCCESS)
815 goto error;
816
817 softs->map_size = softs->map_size_min;
818 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
819
820 AAC_DISABLE_INTR(softs);
821
822 /* Init mutexes and condvars */
823 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
824 DDI_INTR_PRI(softs->intr_pri));
825 mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER,
826 DDI_INTR_PRI(softs->intr_pri));
827 mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER,
828 DDI_INTR_PRI(softs->intr_pri));
829 mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER,
830 DDI_INTR_PRI(softs->intr_pri));
831 mutex_init(&softs->aifq_mutex, NULL,
832 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
833 cv_init(&softs->event, NULL, CV_DRIVER, NULL);
834 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL);
835 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
836 cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL);
837 cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL);
838 cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL);
839 attach_state |= AAC_ATTACH_KMUTEX_INITED;
840
841 /* Init the cmd queues */
842 for (i = 0; i < AAC_CMDQ_NUM; i++)
843 aac_cmd_initq(&softs->q_wait[i]);
844 aac_cmd_initq(&softs->q_busy);
845 aac_cmd_initq(&softs->q_comp);
846
847 /* Check for legacy device naming support */
848 softs->legacy = 1; /* default to use legacy name */
849 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
850 "legacy-name-enable", &data) == DDI_SUCCESS)) {
851 if (strcmp(data, "no") == 0) {
852 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
853 softs->legacy = 0;
854 }
855 ddi_prop_free(data);
856 }
857
858 /*
859 * Everything has been set up till now,
860 * we will do some common attach.
861 */
862 mutex_enter(&softs->io_lock);
863 if (aac_common_attach(softs) == AACERR) {
864 mutex_exit(&softs->io_lock);
865 goto error;
866 }
867 mutex_exit(&softs->io_lock);
868 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
869
870 /* Check for buf breakup support */
871 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
872 "breakup-enable", &data) == DDI_SUCCESS)) {
873 if (strcmp(data, "yes") == 0) {
874 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
875 softs->flags |= AAC_FLAGS_BRKUP;
876 }
877 ddi_prop_free(data);
878 }
879 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
880 if (softs->flags & AAC_FLAGS_BRKUP) {
881 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
882 DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
883 }
884
885 if (aac_hba_setup(softs) != AACOK)
886 goto error;
887 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
888
889 /* Create devctl/scsi nodes for cfgadm */
890 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
891 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
892 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
893 goto error;
894 }
895 attach_state |= AAC_ATTACH_CREATE_DEVCTL;
896
897 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
898 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
899 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
900 goto error;
901 }
902 attach_state |= AAC_ATTACH_CREATE_SCSI;
903
904 /* Create aac node for app. to issue ioctls */
905 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
906 DDI_PSEUDO, 0) != DDI_SUCCESS) {
907 AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
908 goto error;
909 }
910
911 /* Common attach is OK, so we are attached! */
912 softs->state |= AAC_STATE_RUN;
913
914 /* Create event thread */
915 softs->fibctx_p = &softs->aifctx;
916 if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread,
917 softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) {
918 AACDB_PRINT(softs, CE_WARN, "aif thread create failed");
919 softs->state &= ~AAC_STATE_RUN;
920 goto error;
921 }
922
923 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
924
925 /* Create a thread for command timeout */
926 softs->timeout_id = timeout(aac_timer, (void *)softs,
927 (aac_tick * drv_usectohz(1000000)));
928
929 /* Common attach is OK, so we are attached! */
930 ddi_report_dev(dip);
931 AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
932 return (DDI_SUCCESS);
933
934 error:
935 if (attach_state & AAC_ATTACH_CREATE_SCSI)
936 ddi_remove_minor_node(dip, "scsi");
937 if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
938 ddi_remove_minor_node(dip, "devctl");
939 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
940 aac_common_detach(softs);
941 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
942 (void) scsi_hba_detach(dip);
943 scsi_hba_tran_free(AAC_DIP2TRAN(dip));
944 }
945 if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
946 mutex_destroy(&softs->io_lock);
947 mutex_destroy(&softs->q_comp_mutex);
948 mutex_destroy(&softs->time_mutex);
949 mutex_destroy(&softs->ev_lock);
950 mutex_destroy(&softs->aifq_mutex);
951 cv_destroy(&softs->event);
952 cv_destroy(&softs->sync_fib_cv);
953 cv_destroy(&softs->drain_cv);
954 cv_destroy(&softs->event_wait_cv);
955 cv_destroy(&softs->event_disp_cv);
956 cv_destroy(&softs->aifq_cv);
957 }
958 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
959 ddi_regs_map_free(&softs->pci_mem_handle);
960 aac_fm_fini(softs);
961 if (attach_state & AAC_ATTACH_CARD_DETECTED)
962 softs->card = AACERR;
963 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
964 ddi_soft_state_free(aac_softstatep, instance);
965 return (DDI_FAILURE);
966 }
967
968 static int
969 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
970 {
971 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
972 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
973
974 DBCALLED(softs, 1);
975
976 switch (cmd) {
977 case DDI_DETACH:
978 break;
979 case DDI_SUSPEND:
980 return (DDI_FAILURE);
981 default:
982 return (DDI_FAILURE);
983 }
984
985 mutex_enter(&softs->io_lock);
986 AAC_DISABLE_INTR(softs);
987 softs->state = AAC_STATE_STOPPED;
988
989 ddi_remove_minor_node(dip, "aac");
990 ddi_remove_minor_node(dip, "scsi");
991 ddi_remove_minor_node(dip, "devctl");
992 mutex_exit(&softs->io_lock);
993
994 aac_common_detach(softs);
995
996 mutex_enter(&softs->io_lock);
997 (void) scsi_hba_detach(dip);
998 scsi_hba_tran_free(tran);
999 mutex_exit(&softs->io_lock);
1000
1001 /* Stop timer */
1002 mutex_enter(&softs->time_mutex);
1003 if (softs->timeout_id) {
1004 timeout_id_t tid = softs->timeout_id;
1005 softs->timeout_id = 0;
1006
1007 mutex_exit(&softs->time_mutex);
1008 (void) untimeout(tid);
1009 mutex_enter(&softs->time_mutex);
1010 }
1011 mutex_exit(&softs->time_mutex);
1012
1013 /* Destroy event thread */
1014 mutex_enter(&softs->ev_lock);
1015 cv_signal(&softs->event_disp_cv);
1016 cv_wait(&softs->event_wait_cv, &softs->ev_lock);
1017 mutex_exit(&softs->ev_lock);
1018
1019 cv_destroy(&softs->aifq_cv);
1020 cv_destroy(&softs->event_disp_cv);
1021 cv_destroy(&softs->event_wait_cv);
1022 cv_destroy(&softs->drain_cv);
1023 cv_destroy(&softs->sync_fib_cv);
1024 cv_destroy(&softs->event);
1025 mutex_destroy(&softs->aifq_mutex);
1026 mutex_destroy(&softs->ev_lock);
1027 mutex_destroy(&softs->time_mutex);
1028 mutex_destroy(&softs->q_comp_mutex);
1029 mutex_destroy(&softs->io_lock);
1030
1031 ddi_regs_map_free(&softs->pci_mem_handle);
1032 aac_fm_fini(softs);
1033 softs->hwif = AAC_HWIF_UNKNOWN;
1034 softs->card = AAC_UNKNOWN_CARD;
1035 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1036
1037 return (DDI_SUCCESS);
1038 }
1039
1040 /*ARGSUSED*/
1041 static int
1042 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1043 {
1044 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1045
1046 DBCALLED(softs, 1);
1047
1048 mutex_enter(&softs->io_lock);
1049 AAC_DISABLE_INTR(softs);
1050 (void) aac_shutdown(softs);
1051 mutex_exit(&softs->io_lock);
1052
1053 return (DDI_SUCCESS);
1054 }
1055
1056 /*
1057 * quiesce(9E) entry point.
1058 *
1059 * This function is called when the system is single-threaded at high
1060 * PIL with preemption disabled. Therefore, this function must not be
1061 * blocked.
1062 *
1063 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1064 * DDI_FAILURE indicates an error condition and should almost never happen.
1065 */
1066 static int
1067 aac_quiesce(dev_info_t *dip)
1068 {
1069 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1070
1071 if (softs == NULL)
1072 return (DDI_FAILURE);
1073
1074 _NOTE(ASSUMING_PROTECTED(softs->state))
1075 AAC_DISABLE_INTR(softs);
1076
1077 return (DDI_SUCCESS);
1078 }
1079
1080 /* ARGSUSED */
1081 static int
1082 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg,
1083 void **result)
1084 {
1085 int error = DDI_SUCCESS;
1086
1087 switch (infocmd) {
1088 case DDI_INFO_DEVT2INSTANCE:
1089 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg)));
1090 break;
1091 default:
1092 error = DDI_FAILURE;
1093 }
1094 return (error);
1095 }
1096
1097 /*
1098 * Bring the controller down to a dormant state and detach all child devices.
1099 * This function is called before detach or system shutdown.
1100 * Note: we can assume that the q_wait on the controller is empty, as we
1101 * won't allow shutdown if any device is open.
1102 */
1103 static int
1104 aac_shutdown(struct aac_softstate *softs)
1105 {
1106 ddi_acc_handle_t acc;
1107 struct aac_close_command *cc;
1108 int rval;
1109
1110 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
1111 acc = softs->sync_ac.slotp->fib_acc_handle;
1112
1113 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0];
1114
1115 ddi_put32(acc, &cc->Command, VM_CloseAll);
1116 ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1117
1118 /* Flush all caches, set FW to write through mode */
1119 rval = aac_sync_fib(softs, ContainerCommand,
1120 AAC_FIB_SIZEOF(struct aac_close_command));
1121 aac_sync_fib_slot_release(softs, &softs->sync_ac);
1122
1123 AACDB_PRINT(softs, CE_NOTE,
1124 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1125 return (rval);
1126 }
1127
1128 static uint_t
1129 aac_softintr(caddr_t arg)
1130 {
1131 struct aac_softstate *softs = (void *)arg;
1132
1133 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1134 aac_drain_comp_q(softs);
1135 }
1136 return (DDI_INTR_CLAIMED);
1137 }
1138
1139 /*
1140 * Setup auto sense data for pkt
1141 */
1142 static void
1143 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1144 uchar_t add_code, uchar_t qual_code, uint64_t info)
1145 {
1146 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1147
1148 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1149 pkt->pkt_state |= STATE_ARQ_DONE;
1150
1151 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1152 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1153 arqstat->sts_rqpkt_resid = 0;
1154 arqstat->sts_rqpkt_state =
1155 STATE_GOT_BUS |
1156 STATE_GOT_TARGET |
1157 STATE_SENT_CMD |
1158 STATE_XFERRED_DATA;
1159 arqstat->sts_rqpkt_statistics = 0;
1160
1161 if (info <= 0xfffffffful) {
1162 arqstat->sts_sensedata.es_valid = 1;
1163 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1164 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1165 arqstat->sts_sensedata.es_key = key;
1166 arqstat->sts_sensedata.es_add_code = add_code;
1167 arqstat->sts_sensedata.es_qual_code = qual_code;
1168
1169 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1170 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1171 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF;
1172 arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1173 } else { /* 64-bit LBA */
1174 struct scsi_descr_sense_hdr *dsp;
1175 struct scsi_information_sense_descr *isd;
1176
1177 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1178 dsp->ds_class = CLASS_EXTENDED_SENSE;
1179 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1180 dsp->ds_key = key;
1181 dsp->ds_add_code = add_code;
1182 dsp->ds_qual_code = qual_code;
1183 dsp->ds_addl_sense_length =
1184 sizeof (struct scsi_information_sense_descr);
1185
1186 isd = (struct scsi_information_sense_descr *)(dsp+1);
1187 isd->isd_descr_type = DESCR_INFORMATION;
1188 isd->isd_valid = 1;
1189 isd->isd_information[0] = (info >> 56) & 0xFF;
1190 isd->isd_information[1] = (info >> 48) & 0xFF;
1191 isd->isd_information[2] = (info >> 40) & 0xFF;
1192 isd->isd_information[3] = (info >> 32) & 0xFF;
1193 isd->isd_information[4] = (info >> 24) & 0xFF;
1194 isd->isd_information[5] = (info >> 16) & 0xFF;
1195 isd->isd_information[6] = (info >> 8) & 0xFF;
1196 isd->isd_information[7] = (info) & 0xFF;
1197 }
1198 }
1199
1200 /*
1201 * Setup auto sense data for HARDWARE ERROR
1202 */
1203 static void
1204 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1205 {
1206 union scsi_cdb *cdbp;
1207 uint64_t err_blkno;
1208
1209 cdbp = (void *)acp->pkt->pkt_cdbp;
1210 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1211 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1212 }
1213
1214 /*
1215 * Send a command to the adapter in New Comm. interface
1216 */
1217 static int
1218 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1219 {
1220 uint32_t index, device;
1221
1222 index = PCI_MEM_GET32(softs, AAC_IQUE);
1223 if (index == 0xffffffffUL) {
1224 index = PCI_MEM_GET32(softs, AAC_IQUE);
1225 if (index == 0xffffffffUL)
1226 return (AACERR);
1227 }
1228
1229 device = index;
1230 PCI_MEM_PUT32(softs, device,
1231 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1232 device += 4;
1233 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1234 device += 4;
1235 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1236 PCI_MEM_PUT32(softs, AAC_IQUE, index);
1237 return (AACOK);
1238 }
1239
1240 static void
1241 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1242 {
1243 struct aac_device *dvp = acp->dvp;
1244 int q = AAC_CMDQ(acp);
1245
1246 if (acp->slotp) { /* outstanding cmd */
1247 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) {
1248 aac_release_slot(softs, acp->slotp);
1249 acp->slotp = NULL;
1250 }
1251 if (dvp) {
1252 dvp->ncmds[q]--;
1253 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1254 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1255 aac_set_throttle(softs, dvp, q,
1256 softs->total_slots);
1257 /*
1258 * Setup auto sense data for UNIT ATTENTION
1259 * Each lun should generate a unit attention
1260 * condition when reset.
1261 * Phys. drives are treated as logical ones
1262 * during error recovery.
1263 */
1264 if (dvp->type == AAC_DEV_LD) {
1265 struct aac_container *ctp =
1266 (struct aac_container *)dvp;
1267 if (ctp->reset == 0)
1268 goto noreset;
1269
1270 AACDB_PRINT(softs, CE_NOTE,
1271 "Unit attention: reset");
1272 ctp->reset = 0;
1273 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION,
1274 0x29, 0x02, 0);
1275 }
1276 }
1277 noreset:
1278 softs->bus_ncmds[q]--;
1279 aac_cmd_delete(&softs->q_busy, acp);
1280 } else { /* cmd in waiting queue */
1281 aac_cmd_delete(&softs->q_wait[q], acp);
1282 }
1283
1284 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1285 mutex_enter(&softs->q_comp_mutex);
1286 aac_cmd_enqueue(&softs->q_comp, acp);
1287 mutex_exit(&softs->q_comp_mutex);
1288 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1289 cv_broadcast(&softs->event);
1290 }
1291 }
1292
1293 static void
1294 aac_handle_io(struct aac_softstate *softs, int index)
1295 {
1296 struct aac_slot *slotp;
1297 struct aac_cmd *acp;
1298 uint32_t fast;
1299
1300 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1301 index >>= 2;
1302
1303 /* Make sure firmware reported index is valid */
1304 ASSERT(index >= 0 && index < softs->total_slots);
1305 slotp = &softs->io_slot[index];
1306 ASSERT(slotp->index == index);
1307 acp = slotp->acp;
1308
1309 if (acp == NULL || acp->slotp != slotp) {
1310 cmn_err(CE_WARN,
1311 "Firmware error: invalid slot index received from FW");
1312 return;
1313 }
1314
1315 acp->flags |= AAC_CMD_CMPLT;
1316 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1317
1318 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1319 /*
1320 * For fast response IO, the firmware do not return any FIB
1321 * data, so we need to fill in the FIB status and state so that
1322 * FIB users can handle it correctly.
1323 */
1324 if (fast) {
1325 uint32_t state;
1326
1327 state = ddi_get32(slotp->fib_acc_handle,
1328 &slotp->fibp->Header.XferState);
1329 /*
1330 * Update state for CPU not for device, no DMA sync
1331 * needed
1332 */
1333 ddi_put32(slotp->fib_acc_handle,
1334 &slotp->fibp->Header.XferState,
1335 state | AAC_FIBSTATE_DONEADAP);
1336 ddi_put32(slotp->fib_acc_handle,
1337 (void *)&slotp->fibp->data[0], ST_OK);
1338 }
1339
1340 /* Handle completed ac */
1341 acp->ac_comp(softs, acp);
1342 } else {
1343 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1344 acp->flags |= AAC_CMD_ERR;
1345 if (acp->pkt) {
1346 acp->pkt->pkt_reason = CMD_TRAN_ERR;
1347 acp->pkt->pkt_statistics = 0;
1348 }
1349 }
1350 aac_end_io(softs, acp);
1351 }
1352
1353 /*
1354 * Interrupt handler for New Comm. interface
1355 * New Comm. interface use a different mechanism for interrupt. No explict
1356 * message queues, and driver need only accesses the mapped PCI mem space to
1357 * find the completed FIB or AIF.
1358 */
1359 static int
1360 aac_process_intr_new(struct aac_softstate *softs)
1361 {
1362 uint32_t index;
1363
1364 index = AAC_OUTB_GET(softs);
1365 if (index == 0xfffffffful)
1366 index = AAC_OUTB_GET(softs);
1367 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1368 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1369 return (0);
1370 }
1371 if (index != 0xfffffffful) {
1372 do {
1373 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1374 aac_handle_io(softs, index);
1375 } else if (index != 0xfffffffeul) {
1376 struct aac_fib *fibp; /* FIB in AIF queue */
1377 uint16_t fib_size;
1378
1379 /*
1380 * 0xfffffffe means that the controller wants
1381 * more work, ignore it for now. Otherwise,
1382 * AIF received.
1383 */
1384 index &= ~2;
1385
1386 fibp = (struct aac_fib *)(softs-> \
1387 pci_mem_base_vaddr + index);
1388 fib_size = PCI_MEM_GET16(softs, index + \
1389 offsetof(struct aac_fib, Header.Size));
1390
1391 aac_save_aif(softs, softs->pci_mem_handle,
1392 fibp, fib_size);
1393
1394 /*
1395 * AIF memory is owned by the adapter, so let it
1396 * know that we are done with it.
1397 */
1398 AAC_OUTB_SET(softs, index);
1399 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1400 }
1401
1402 index = AAC_OUTB_GET(softs);
1403 } while (index != 0xfffffffful);
1404
1405 /*
1406 * Process waiting cmds before start new ones to
1407 * ensure first IOs are serviced first.
1408 */
1409 aac_start_waiting_io(softs);
1410 return (AAC_DB_COMMAND_READY);
1411 } else {
1412 return (0);
1413 }
1414 }
1415
1416 static uint_t
1417 aac_intr_new(caddr_t arg)
1418 {
1419 struct aac_softstate *softs = (void *)arg;
1420 uint_t rval;
1421
1422 mutex_enter(&softs->io_lock);
1423 if (aac_process_intr_new(softs))
1424 rval = DDI_INTR_CLAIMED;
1425 else
1426 rval = DDI_INTR_UNCLAIMED;
1427 mutex_exit(&softs->io_lock);
1428
1429 aac_drain_comp_q(softs);
1430 return (rval);
1431 }
1432
1433 /*
1434 * Interrupt handler for old interface
1435 * Explicit message queues are used to send FIB to and get completed FIB from
1436 * the adapter. Driver and adapter maitain the queues in the producer/consumer
1437 * manner. The driver has to query the queues to find the completed FIB.
1438 */
1439 static int
1440 aac_process_intr_old(struct aac_softstate *softs)
1441 {
1442 uint16_t status;
1443
1444 status = AAC_STATUS_GET(softs);
1445 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1446 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1447 return (DDI_INTR_UNCLAIMED);
1448 }
1449 if (status & AAC_DB_RESPONSE_READY) {
1450 int slot_idx;
1451
1452 /* ACK the intr */
1453 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1454 (void) AAC_STATUS_GET(softs);
1455 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1456 &slot_idx) == AACOK)
1457 aac_handle_io(softs, slot_idx);
1458
1459 /*
1460 * Process waiting cmds before start new ones to
1461 * ensure first IOs are serviced first.
1462 */
1463 aac_start_waiting_io(softs);
1464 return (AAC_DB_RESPONSE_READY);
1465 } else if (status & AAC_DB_COMMAND_READY) {
1466 int aif_idx;
1467
1468 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1469 (void) AAC_STATUS_GET(softs);
1470 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1471 AACOK) {
1472 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1473 struct aac_fib *fibp; /* FIB in communication space */
1474 uint16_t fib_size;
1475 uint32_t fib_xfer_state;
1476 uint32_t addr, size;
1477
1478 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1479
1480 #define AAC_SYNC_AIF(softs, aif_idx, type) \
1481 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1482 offsetof(struct aac_comm_space, \
1483 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1484 (type)); }
1485
1486 /* Copy AIF from adapter to the empty AIF slot */
1487 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1488 fibp = &softs->comm_space->adapter_fibs[aif_idx];
1489 fib_size = ddi_get16(acc, &fibp->Header.Size);
1490
1491 aac_save_aif(softs, acc, fibp, fib_size);
1492
1493 /* Complete AIF back to adapter with good status */
1494 fib_xfer_state = LE_32(fibp->Header.XferState);
1495 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1496 ddi_put32(acc, &fibp->Header.XferState,
1497 fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1498 ddi_put32(acc, (void *)&fibp->data[0], ST_OK);
1499 if (fib_size > AAC_FIB_SIZE)
1500 ddi_put16(acc, &fibp->Header.Size,
1501 AAC_FIB_SIZE);
1502 AAC_SYNC_AIF(softs, aif_idx,
1503 DDI_DMA_SYNC_FORDEV);
1504 }
1505
1506 /* Put the AIF response on the response queue */
1507 addr = ddi_get32(acc,
1508 &softs->comm_space->adapter_fibs[aif_idx]. \
1509 Header.SenderFibAddress);
1510 size = (uint32_t)ddi_get16(acc,
1511 &softs->comm_space->adapter_fibs[aif_idx]. \
1512 Header.Size);
1513 ddi_put32(acc,
1514 &softs->comm_space->adapter_fibs[aif_idx]. \
1515 Header.ReceiverFibAddress, addr);
1516 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1517 addr, size) == AACERR)
1518 cmn_err(CE_NOTE, "!AIF ack failed");
1519 }
1520 return (AAC_DB_COMMAND_READY);
1521 } else if (status & AAC_DB_PRINTF_READY) {
1522 /* ACK the intr */
1523 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1524 (void) AAC_STATUS_GET(softs);
1525 (void) ddi_dma_sync(softs->comm_space_dma_handle,
1526 offsetof(struct aac_comm_space, adapter_print_buf),
1527 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1528 if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1529 DDI_SUCCESS)
1530 cmn_err(CE_NOTE, "MSG From Adapter: %s",
1531 softs->comm_space->adapter_print_buf);
1532 else
1533 ddi_fm_service_impact(softs->devinfo_p,
1534 DDI_SERVICE_UNAFFECTED);
1535 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1536 return (AAC_DB_PRINTF_READY);
1537 } else if (status & AAC_DB_COMMAND_NOT_FULL) {
1538 /*
1539 * Without these two condition statements, the OS could hang
1540 * after a while, especially if there are a lot of AIF's to
1541 * handle, for instance if a drive is pulled from an array
1542 * under heavy load.
1543 */
1544 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1545 return (AAC_DB_COMMAND_NOT_FULL);
1546 } else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1547 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1548 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1549 return (AAC_DB_RESPONSE_NOT_FULL);
1550 } else {
1551 return (0);
1552 }
1553 }
1554
1555 static uint_t
1556 aac_intr_old(caddr_t arg)
1557 {
1558 struct aac_softstate *softs = (void *)arg;
1559 int rval;
1560
1561 mutex_enter(&softs->io_lock);
1562 if (aac_process_intr_old(softs))
1563 rval = DDI_INTR_CLAIMED;
1564 else
1565 rval = DDI_INTR_UNCLAIMED;
1566 mutex_exit(&softs->io_lock);
1567
1568 aac_drain_comp_q(softs);
1569 return (rval);
1570 }
1571
1572 /*
1573 * Query FIXED or MSI interrupts
1574 */
1575 static int
1576 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1577 {
1578 dev_info_t *dip = softs->devinfo_p;
1579 int avail, actual, count;
1580 int i, flag, ret;
1581
1582 AACDB_PRINT(softs, CE_NOTE,
1583 "aac_query_intrs:interrupt type 0x%x", intr_type);
1584
1585 /* Get number of interrupts */
1586 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1587 if ((ret != DDI_SUCCESS) || (count == 0)) {
1588 AACDB_PRINT(softs, CE_WARN,
1589 "ddi_intr_get_nintrs() failed, ret %d count %d",
1590 ret, count);
1591 return (DDI_FAILURE);
1592 }
1593
1594 /* Get number of available interrupts */
1595 ret = ddi_intr_get_navail(dip, intr_type, &avail);
1596 if ((ret != DDI_SUCCESS) || (avail == 0)) {
1597 AACDB_PRINT(softs, CE_WARN,
1598 "ddi_intr_get_navail() failed, ret %d avail %d",
1599 ret, avail);
1600 return (DDI_FAILURE);
1601 }
1602
1603 AACDB_PRINT(softs, CE_NOTE,
1604 "ddi_intr_get_nvail returned %d, navail() returned %d",
1605 count, avail);
1606
1607 /* Allocate an array of interrupt handles */
1608 softs->intr_size = count * sizeof (ddi_intr_handle_t);
1609 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP);
1610
1611 if (intr_type == DDI_INTR_TYPE_MSI) {
1612 count = 1; /* only one vector needed by now */
1613 flag = DDI_INTR_ALLOC_STRICT;
1614 } else { /* must be DDI_INTR_TYPE_FIXED */
1615 flag = DDI_INTR_ALLOC_NORMAL;
1616 }
1617
1618 /* Call ddi_intr_alloc() */
1619 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1620 count, &actual, flag);
1621
1622 if ((ret != DDI_SUCCESS) || (actual == 0)) {
1623 AACDB_PRINT(softs, CE_WARN,
1624 "ddi_intr_alloc() failed, ret = %d", ret);
1625 actual = 0;
1626 goto error;
1627 }
1628
1629 if (actual < count) {
1630 AACDB_PRINT(softs, CE_NOTE,
1631 "Requested: %d, Received: %d", count, actual);
1632 goto error;
1633 }
1634
1635 softs->intr_cnt = actual;
1636
1637 /* Get priority for first msi, assume remaining are all the same */
1638 if ((ret = ddi_intr_get_pri(softs->htable[0],
1639 &softs->intr_pri)) != DDI_SUCCESS) {
1640 AACDB_PRINT(softs, CE_WARN,
1641 "ddi_intr_get_pri() failed, ret = %d", ret);
1642 goto error;
1643 }
1644
1645 /* Test for high level mutex */
1646 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1647 AACDB_PRINT(softs, CE_WARN,
1648 "aac_query_intrs: Hi level interrupt not supported");
1649 goto error;
1650 }
1651
1652 return (DDI_SUCCESS);
1653
1654 error:
1655 /* Free already allocated intr */
1656 for (i = 0; i < actual; i++)
1657 (void) ddi_intr_free(softs->htable[i]);
1658
1659 kmem_free(softs->htable, softs->intr_size);
1660 return (DDI_FAILURE);
1661 }
1662
1663
1664 /*
1665 * Register FIXED or MSI interrupts, and enable them
1666 */
1667 static int
1668 aac_add_intrs(struct aac_softstate *softs)
1669 {
1670 int i, ret;
1671 int actual;
1672 ddi_intr_handler_t *aac_intr;
1673
1674 actual = softs->intr_cnt;
1675 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1676 aac_intr_new : aac_intr_old);
1677
1678 /* Call ddi_intr_add_handler() */
1679 for (i = 0; i < actual; i++) {
1680 if ((ret = ddi_intr_add_handler(softs->htable[i],
1681 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1682 cmn_err(CE_WARN,
1683 "ddi_intr_add_handler() failed ret = %d", ret);
1684
1685 /* Free already allocated intr */
1686 for (i = 0; i < actual; i++)
1687 (void) ddi_intr_free(softs->htable[i]);
1688
1689 kmem_free(softs->htable, softs->intr_size);
1690 return (DDI_FAILURE);
1691 }
1692 }
1693
1694 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1695 != DDI_SUCCESS) {
1696 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1697
1698 /* Free already allocated intr */
1699 for (i = 0; i < actual; i++)
1700 (void) ddi_intr_free(softs->htable[i]);
1701
1702 kmem_free(softs->htable, softs->intr_size);
1703 return (DDI_FAILURE);
1704 }
1705
1706 return (DDI_SUCCESS);
1707 }
1708
1709 /*
1710 * Unregister FIXED or MSI interrupts
1711 */
1712 static void
1713 aac_remove_intrs(struct aac_softstate *softs)
1714 {
1715 int i;
1716
1717 /* Disable all interrupts */
1718 (void) aac_disable_intrs(softs);
1719 /* Call ddi_intr_remove_handler() */
1720 for (i = 0; i < softs->intr_cnt; i++) {
1721 (void) ddi_intr_remove_handler(softs->htable[i]);
1722 (void) ddi_intr_free(softs->htable[i]);
1723 }
1724
1725 kmem_free(softs->htable, softs->intr_size);
1726 }
1727
1728 static int
1729 aac_enable_intrs(struct aac_softstate *softs)
1730 {
1731 int rval = AACOK;
1732
1733 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1734 /* for MSI block enable */
1735 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) !=
1736 DDI_SUCCESS)
1737 rval = AACERR;
1738 } else {
1739 int i;
1740
1741 /* Call ddi_intr_enable() for legacy/MSI non block enable */
1742 for (i = 0; i < softs->intr_cnt; i++) {
1743 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS)
1744 rval = AACERR;
1745 }
1746 }
1747 return (rval);
1748 }
1749
1750 static int
1751 aac_disable_intrs(struct aac_softstate *softs)
1752 {
1753 int rval = AACOK;
1754
1755 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1756 /* Call ddi_intr_block_disable() */
1757 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) !=
1758 DDI_SUCCESS)
1759 rval = AACERR;
1760 } else {
1761 int i;
1762
1763 for (i = 0; i < softs->intr_cnt; i++) {
1764 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS)
1765 rval = AACERR;
1766 }
1767 }
1768 return (rval);
1769 }
1770
1771 /*
1772 * Set pkt_reason and OR in pkt_statistics flag
1773 */
1774 static void
1775 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1776 uchar_t reason, uint_t stat)
1777 {
1778 #ifndef __lock_lint
1779 _NOTE(ARGUNUSED(softs))
1780 #endif
1781 if (acp->pkt->pkt_reason == CMD_CMPLT)
1782 acp->pkt->pkt_reason = reason;
1783 acp->pkt->pkt_statistics |= stat;
1784 }
1785
1786 /*
1787 * Handle a finished pkt of soft SCMD
1788 */
1789 static void
1790 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1791 {
1792 ASSERT(acp->pkt);
1793
1794 acp->flags |= AAC_CMD_CMPLT;
1795
1796 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1797 STATE_SENT_CMD | STATE_GOT_STATUS;
1798 if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1799 acp->pkt->pkt_resid = 0;
1800
1801 /* AAC_CMD_NO_INTR means no complete callback */
1802 if (!(acp->flags & AAC_CMD_NO_INTR)) {
1803 mutex_enter(&softs->q_comp_mutex);
1804 aac_cmd_enqueue(&softs->q_comp, acp);
1805 mutex_exit(&softs->q_comp_mutex);
1806 ddi_trigger_softintr(softs->softint_id);
1807 }
1808 }
1809
1810 /*
1811 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1812 */
1813
1814 /*
1815 * Handle completed logical device IO command
1816 */
1817 /*ARGSUSED*/
1818 static void
1819 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1820 {
1821 struct aac_slot *slotp = acp->slotp;
1822 struct aac_blockread_response *resp;
1823 uint32_t status;
1824
1825 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1826 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1827
1828 acp->pkt->pkt_state |= STATE_GOT_STATUS;
1829
1830 /*
1831 * block_read/write has a similar response header, use blockread
1832 * response for both.
1833 */
1834 resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1835 status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1836 if (status == ST_OK) {
1837 acp->pkt->pkt_resid = 0;
1838 acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1839 } else {
1840 aac_set_arq_data_hwerr(acp);
1841 }
1842 }
1843
1844 /*
1845 * Handle completed phys. device IO command
1846 */
1847 static void
1848 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1849 {
1850 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1851 struct aac_fib *fibp = acp->slotp->fibp;
1852 struct scsi_pkt *pkt = acp->pkt;
1853 struct aac_srb_reply *resp;
1854 uint32_t resp_status;
1855
1856 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1857 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1858
1859 resp = (struct aac_srb_reply *)&fibp->data[0];
1860 resp_status = ddi_get32(acc, &resp->status);
1861
1862 /* First check FIB status */
1863 if (resp_status == ST_OK) {
1864 uint32_t scsi_status;
1865 uint32_t srb_status;
1866 uint32_t data_xfer_length;
1867
1868 scsi_status = ddi_get32(acc, &resp->scsi_status);
1869 srb_status = ddi_get32(acc, &resp->srb_status);
1870 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1871
1872 *pkt->pkt_scbp = (uint8_t)scsi_status;
1873 pkt->pkt_state |= STATE_GOT_STATUS;
1874 if (scsi_status == STATUS_GOOD) {
1875 uchar_t cmd = ((union scsi_cdb *)(void *)
1876 (pkt->pkt_cdbp))->scc_cmd;
1877
1878 /* Next check SRB status */
1879 switch (srb_status & 0x3f) {
1880 case SRB_STATUS_DATA_OVERRUN:
1881 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1882 "scmd=%d, xfer=%d, buflen=%d",
1883 (uint32_t)cmd, data_xfer_length,
1884 acp->bcount);
1885
1886 switch (cmd) {
1887 case SCMD_READ:
1888 case SCMD_WRITE:
1889 case SCMD_READ_G1:
1890 case SCMD_WRITE_G1:
1891 case SCMD_READ_G4:
1892 case SCMD_WRITE_G4:
1893 case SCMD_READ_G5:
1894 case SCMD_WRITE_G5:
1895 aac_set_pkt_reason(softs, acp,
1896 CMD_DATA_OVR, 0);
1897 break;
1898 }
1899 /*FALLTHRU*/
1900 case SRB_STATUS_ERROR_RECOVERY:
1901 case SRB_STATUS_PENDING:
1902 case SRB_STATUS_SUCCESS:
1903 /*
1904 * pkt_resid should only be calculated if the
1905 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1906 * OVERRUN/UNDERRUN
1907 */
1908 if (data_xfer_length) {
1909 pkt->pkt_state |= STATE_XFERRED_DATA;
1910 pkt->pkt_resid = acp->bcount - \
1911 data_xfer_length;
1912 ASSERT(pkt->pkt_resid >= 0);
1913 }
1914 break;
1915 case SRB_STATUS_ABORTED:
1916 AACDB_PRINT(softs, CE_NOTE,
1917 "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1918 data_xfer_length, pkt->pkt_resid);
1919 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1920 STAT_ABORTED);
1921 break;
1922 case SRB_STATUS_ABORT_FAILED:
1923 AACDB_PRINT(softs, CE_NOTE,
1924 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1925 "resid=%d", data_xfer_length,
1926 pkt->pkt_resid);
1927 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1928 0);
1929 break;
1930 case SRB_STATUS_PARITY_ERROR:
1931 AACDB_PRINT(softs, CE_NOTE,
1932 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1933 "resid=%d", data_xfer_length,
1934 pkt->pkt_resid);
1935 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1936 break;
1937 case SRB_STATUS_NO_DEVICE:
1938 case SRB_STATUS_INVALID_PATH_ID:
1939 case SRB_STATUS_INVALID_TARGET_ID:
1940 case SRB_STATUS_INVALID_LUN:
1941 case SRB_STATUS_SELECTION_TIMEOUT:
1942 #ifdef DEBUG
1943 if (AAC_DEV_IS_VALID(acp->dvp)) {
1944 AACDB_PRINT(softs, CE_NOTE,
1945 "SRB_STATUS_NO_DEVICE(%d), " \
1946 "xfer=%d, resid=%d ",
1947 srb_status & 0x3f,
1948 data_xfer_length, pkt->pkt_resid);
1949 }
1950 #endif
1951 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1952 break;
1953 case SRB_STATUS_COMMAND_TIMEOUT:
1954 case SRB_STATUS_TIMEOUT:
1955 AACDB_PRINT(softs, CE_NOTE,
1956 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1957 "resid=%d", data_xfer_length,
1958 pkt->pkt_resid);
1959 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1960 STAT_TIMEOUT);
1961 break;
1962 case SRB_STATUS_BUS_RESET:
1963 AACDB_PRINT(softs, CE_NOTE,
1964 "SRB_STATUS_BUS_RESET, xfer=%d, " \
1965 "resid=%d", data_xfer_length,
1966 pkt->pkt_resid);
1967 aac_set_pkt_reason(softs, acp, CMD_RESET,
1968 STAT_BUS_RESET);
1969 break;
1970 default:
1971 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1972 "xfer=%d, resid=%d", srb_status & 0x3f,
1973 data_xfer_length, pkt->pkt_resid);
1974 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1975 break;
1976 }
1977 } else if (scsi_status == STATUS_CHECK) {
1978 /* CHECK CONDITION */
1979 struct scsi_arq_status *arqstat =
1980 (void *)(pkt->pkt_scbp);
1981 uint32_t sense_data_size;
1982
1983 pkt->pkt_state |= STATE_ARQ_DONE;
1984
1985 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1986 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1987 arqstat->sts_rqpkt_resid = 0;
1988 arqstat->sts_rqpkt_state =
1989 STATE_GOT_BUS |
1990 STATE_GOT_TARGET |
1991 STATE_SENT_CMD |
1992 STATE_XFERRED_DATA;
1993 arqstat->sts_rqpkt_statistics = 0;
1994
1995 sense_data_size = ddi_get32(acc,
1996 &resp->sense_data_size);
1997 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1998 AACDB_PRINT(softs, CE_NOTE,
1999 "CHECK CONDITION: sense len=%d, xfer len=%d",
2000 sense_data_size, data_xfer_length);
2001
2002 if (sense_data_size > SENSE_LENGTH)
2003 sense_data_size = SENSE_LENGTH;
2004 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
2005 (uint8_t *)resp->sense_data, sense_data_size,
2006 DDI_DEV_AUTOINCR);
2007 } else {
2008 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
2009 "scsi_status=%d, srb_status=%d",
2010 scsi_status, srb_status);
2011 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2012 }
2013 } else {
2014 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
2015 resp_status);
2016 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2017 }
2018 }
2019
2020 /*
2021 * Handle completed IOCTL command
2022 */
2023 /*ARGSUSED*/
2024 void
2025 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2026 {
2027 struct aac_slot *slotp = acp->slotp;
2028
2029 /*
2030 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
2031 * may wait on softs->event, so use cv_broadcast() instead
2032 * of cv_signal().
2033 */
2034 ASSERT(acp->flags & AAC_CMD_SYNC);
2035 ASSERT(acp->flags & AAC_CMD_NO_CB);
2036
2037 /* Get the size of the response FIB from its FIB.Header.Size field */
2038 acp->fib_size = ddi_get16(slotp->fib_acc_handle,
2039 &slotp->fibp->Header.Size);
2040
2041 ASSERT(acp->fib_size <= softs->aac_max_fib_size);
2042 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
2043 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
2044 }
2045
2046 /*
2047 * Handle completed sync fib command
2048 */
2049 /*ARGSUSED*/
2050 void
2051 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2052 {
2053 }
2054
2055 /*
2056 * Handle completed Flush command
2057 */
2058 /*ARGSUSED*/
2059 static void
2060 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2061 {
2062 struct aac_slot *slotp = acp->slotp;
2063 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2064 struct aac_synchronize_reply *resp;
2065 uint32_t status;
2066
2067 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2068
2069 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2070
2071 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2072 status = ddi_get32(acc, &resp->Status);
2073 if (status != CT_OK)
2074 aac_set_arq_data_hwerr(acp);
2075 }
2076
2077 /*ARGSUSED*/
2078 static void
2079 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2080 {
2081 struct aac_slot *slotp = acp->slotp;
2082 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2083 struct aac_Container_resp *resp;
2084 uint32_t status;
2085
2086 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2087
2088 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2089
2090 resp = (struct aac_Container_resp *)&slotp->fibp->data[0];
2091 status = ddi_get32(acc, &resp->Status);
2092 if (status != 0) {
2093 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit");
2094 aac_set_arq_data_hwerr(acp);
2095 }
2096 }
2097
2098 /*
2099 * Access PCI space to see if the driver can support the card
2100 */
2101 static int
2102 aac_check_card_type(struct aac_softstate *softs)
2103 {
2104 ddi_acc_handle_t pci_config_handle;
2105 int card_index;
2106 uint32_t pci_cmd;
2107
2108 /* Map pci configuration space */
2109 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2110 DDI_SUCCESS) {
2111 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2112 return (AACERR);
2113 }
2114
2115 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2116 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2117 softs->subvendid = pci_config_get16(pci_config_handle,
2118 PCI_CONF_SUBVENID);
2119 softs->subsysid = pci_config_get16(pci_config_handle,
2120 PCI_CONF_SUBSYSID);
2121
2122 card_index = 0;
2123 while (!CARD_IS_UNKNOWN(card_index)) {
2124 if ((aac_cards[card_index].vendor == softs->vendid) &&
2125 (aac_cards[card_index].device == softs->devid) &&
2126 (aac_cards[card_index].subvendor == softs->subvendid) &&
2127 (aac_cards[card_index].subsys == softs->subsysid)) {
2128 break;
2129 }
2130 card_index++;
2131 }
2132
2133 softs->card = card_index;
2134 softs->hwif = aac_cards[card_index].hwif;
2135
2136 /*
2137 * Unknown aac card
2138 * do a generic match based on the VendorID and DeviceID to
2139 * support the new cards in the aac family
2140 */
2141 if (CARD_IS_UNKNOWN(card_index)) {
2142 if (softs->vendid != 0x9005) {
2143 AACDB_PRINT(softs, CE_WARN,
2144 "Unknown vendor 0x%x", softs->vendid);
2145 goto error;
2146 }
2147 switch (softs->devid) {
2148 case 0x285:
2149 softs->hwif = AAC_HWIF_I960RX;
2150 break;
2151 case 0x286:
2152 softs->hwif = AAC_HWIF_RKT;
2153 break;
2154 default:
2155 AACDB_PRINT(softs, CE_WARN,
2156 "Unknown device \"pci9005,%x\"", softs->devid);
2157 goto error;
2158 }
2159 }
2160
2161 /* Set hardware dependent interface */
2162 switch (softs->hwif) {
2163 case AAC_HWIF_I960RX:
2164 softs->aac_if = aac_rx_interface;
2165 softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2166 break;
2167 case AAC_HWIF_RKT:
2168 softs->aac_if = aac_rkt_interface;
2169 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2170 break;
2171 default:
2172 AACDB_PRINT(softs, CE_WARN,
2173 "Unknown hardware interface %d", softs->hwif);
2174 goto error;
2175 }
2176
2177 /* Set card names */
2178 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2179 AAC_VENDOR_LEN);
2180 (void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2181 AAC_PRODUCT_LEN);
2182
2183 /* Set up quirks */
2184 softs->flags = aac_cards[card_index].quirks;
2185
2186 /* Force the busmaster enable bit on */
2187 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2188 if ((pci_cmd & PCI_COMM_ME) == 0) {
2189 pci_cmd |= PCI_COMM_ME;
2190 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2191 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2192 if ((pci_cmd & PCI_COMM_ME) == 0) {
2193 cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2194 goto error;
2195 }
2196 }
2197
2198 /* Set memory base to map */
2199 softs->pci_mem_base_paddr = 0xfffffff0UL & \
2200 pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2201
2202 pci_config_teardown(&pci_config_handle);
2203
2204 return (AACOK); /* card type detected */
2205 error:
2206 pci_config_teardown(&pci_config_handle);
2207 return (AACERR); /* no matched card found */
2208 }
2209
2210 /*
2211 * Do the usual interrupt handler setup stuff.
2212 */
2213 static int
2214 aac_register_intrs(struct aac_softstate *softs)
2215 {
2216 dev_info_t *dip;
2217 int intr_types;
2218
2219 ASSERT(softs->devinfo_p);
2220 dip = softs->devinfo_p;
2221
2222 /* Get the type of device intrrupts */
2223 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
2224 AACDB_PRINT(softs, CE_WARN,
2225 "ddi_intr_get_supported_types() failed");
2226 return (AACERR);
2227 }
2228 AACDB_PRINT(softs, CE_NOTE,
2229 "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
2230
2231 /* Query interrupt, and alloc/init all needed struct */
2232 if (intr_types & DDI_INTR_TYPE_MSI) {
2233 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
2234 != DDI_SUCCESS) {
2235 AACDB_PRINT(softs, CE_WARN,
2236 "MSI interrupt query failed");
2237 return (AACERR);
2238 }
2239 softs->intr_type = DDI_INTR_TYPE_MSI;
2240 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
2241 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
2242 != DDI_SUCCESS) {
2243 AACDB_PRINT(softs, CE_WARN,
2244 "FIXED interrupt query failed");
2245 return (AACERR);
2246 }
2247 softs->intr_type = DDI_INTR_TYPE_FIXED;
2248 } else {
2249 AACDB_PRINT(softs, CE_WARN,
2250 "Device cannot suppport both FIXED and MSI interrupts");
2251 return (AACERR);
2252 }
2253
2254 /* Connect interrupt handlers */
2255 if (aac_add_intrs(softs) != DDI_SUCCESS) {
2256 AACDB_PRINT(softs, CE_WARN,
2257 "Interrupt registration failed, intr type: %s",
2258 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
2259 return (AACERR);
2260 }
2261 (void) aac_enable_intrs(softs);
2262
2263 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
2264 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
2265 AACDB_PRINT(softs, CE_WARN,
2266 "Can not setup soft interrupt handler!");
2267 aac_remove_intrs(softs);
2268 return (AACERR);
2269 }
2270
2271 return (AACOK);
2272 }
2273
2274 static void
2275 aac_unregister_intrs(struct aac_softstate *softs)
2276 {
2277 aac_remove_intrs(softs);
2278 ddi_remove_softintr(softs->softint_id);
2279 }
2280
2281 /*
2282 * Check the firmware to determine the features to support and the FIB
2283 * parameters to use.
2284 */
2285 static int
2286 aac_check_firmware(struct aac_softstate *softs)
2287 {
2288 uint32_t options;
2289 uint32_t atu_size;
2290 ddi_acc_handle_t pci_handle;
2291 uint8_t *data;
2292 uint32_t max_fibs;
2293 uint32_t max_fib_size;
2294 uint32_t sg_tablesize;
2295 uint32_t max_sectors;
2296 uint32_t status;
2297
2298 /* Get supported options */
2299 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2300 &status)) != AACOK) {
2301 if (status != SRB_STATUS_INVALID_REQUEST) {
2302 cmn_err(CE_CONT,
2303 "?Fatal error: request adapter info error");
2304 return (AACERR);
2305 }
2306 options = 0;
2307 atu_size = 0;
2308 } else {
2309 options = AAC_MAILBOX_GET(softs, 1);
2310 atu_size = AAC_MAILBOX_GET(softs, 2);
2311 }
2312
2313 if (softs->state & AAC_STATE_RESET) {
2314 if ((softs->support_opt == options) &&
2315 (softs->atu_size == atu_size))
2316 return (AACOK);
2317
2318 cmn_err(CE_WARN,
2319 "?Fatal error: firmware changed, system needs reboot");
2320 return (AACERR);
2321 }
2322
2323 /*
2324 * The following critical settings are initialized only once during
2325 * driver attachment.
2326 */
2327 softs->support_opt = options;
2328 softs->atu_size = atu_size;
2329
2330 /* Process supported options */
2331 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2332 (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2333 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2334 softs->flags |= AAC_FLAGS_4GB_WINDOW;
2335 } else {
2336 /*
2337 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2338 * only. IO is handled by the DMA engine which does not suffer
2339 * from the ATU window programming workarounds necessary for
2340 * CPU copy operations.
2341 */
2342 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2343 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2344 }
2345
2346 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2347 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2348 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2349 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2350 softs->flags |= AAC_FLAGS_SG_64BIT;
2351 }
2352
2353 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2354 softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2355 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2356 }
2357
2358 if (options & AAC_SUPPORTED_NONDASD) {
2359 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2360 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2361 if (strcmp((char *)data, "yes") == 0) {
2362 AACDB_PRINT(softs, CE_NOTE,
2363 "!Enable Non-DASD access");
2364 softs->flags |= AAC_FLAGS_NONDASD;
2365 }
2366 ddi_prop_free(data);
2367 }
2368 }
2369
2370 /* Read preferred settings */
2371 max_fib_size = 0;
2372 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2373 0, 0, 0, 0, NULL)) == AACOK) {
2374 options = AAC_MAILBOX_GET(softs, 1);
2375 max_fib_size = (options & 0xffff);
2376 max_sectors = (options >> 16) << 1;
2377 options = AAC_MAILBOX_GET(softs, 2);
2378 sg_tablesize = (options >> 16);
2379 options = AAC_MAILBOX_GET(softs, 3);
2380 max_fibs = (options & 0xffff);
2381 }
2382
2383 /* Enable new comm. and rawio at the same time */
2384 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2385 (max_fib_size != 0)) {
2386 /* read out and save PCI MBR */
2387 if ((atu_size > softs->map_size) &&
2388 (ddi_regs_map_setup(softs->devinfo_p, 1,
2389 (caddr_t *)&data, 0, atu_size, &softs->reg_attr,
2390 &pci_handle) == DDI_SUCCESS)) {
2391 ddi_regs_map_free(&softs->pci_mem_handle);
2392 softs->pci_mem_handle = pci_handle;
2393 softs->pci_mem_base_vaddr = data;
2394 softs->map_size = atu_size;
2395 }
2396 if (atu_size == softs->map_size) {
2397 softs->flags |= AAC_FLAGS_NEW_COMM;
2398 AACDB_PRINT(softs, CE_NOTE,
2399 "!Enable New Comm. interface");
2400 }
2401 }
2402
2403 /* Set FIB parameters */
2404 if (softs->flags & AAC_FLAGS_NEW_COMM) {
2405 softs->aac_max_fibs = max_fibs;
2406 softs->aac_max_fib_size = max_fib_size;
2407 softs->aac_max_sectors = max_sectors;
2408 softs->aac_sg_tablesize = sg_tablesize;
2409
2410 softs->flags |= AAC_FLAGS_RAW_IO;
2411 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2412 } else {
2413 softs->aac_max_fibs =
2414 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2415 softs->aac_max_fib_size = AAC_FIB_SIZE;
2416 softs->aac_max_sectors = 128; /* 64K */
2417 if (softs->flags & AAC_FLAGS_17SG)
2418 softs->aac_sg_tablesize = 17;
2419 else if (softs->flags & AAC_FLAGS_34SG)
2420 softs->aac_sg_tablesize = 34;
2421 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2422 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2423 sizeof (struct aac_blockwrite64) +
2424 sizeof (struct aac_sg_entry64)) /
2425 sizeof (struct aac_sg_entry64);
2426 else
2427 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2428 sizeof (struct aac_blockwrite) +
2429 sizeof (struct aac_sg_entry)) /
2430 sizeof (struct aac_sg_entry);
2431 }
2432
2433 if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2434 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2435 softs->flags |= AAC_FLAGS_LBA_64BIT;
2436 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2437 }
2438 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2439 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2440 /*
2441 * 64K maximum segment size in scatter gather list is controlled by
2442 * the NEW_COMM bit in the adapter information. If not set, the card
2443 * can only accept a maximum of 64K. It is not recommended to permit
2444 * more than 128KB of total transfer size to the adapters because
2445 * performance is negatively impacted.
2446 *
2447 * For new comm, segment size equals max xfer size. For old comm,
2448 * we use 64K for both.
2449 */
2450 softs->buf_dma_attr.dma_attr_count_max =
2451 softs->buf_dma_attr.dma_attr_maxxfer - 1;
2452
2453 /* Setup FIB operations */
2454 if (softs->flags & AAC_FLAGS_RAW_IO)
2455 softs->aac_cmd_fib = aac_cmd_fib_rawio;
2456 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2457 softs->aac_cmd_fib = aac_cmd_fib_brw64;
2458 else
2459 softs->aac_cmd_fib = aac_cmd_fib_brw;
2460 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2461 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2462
2463 /* 64-bit LBA needs descriptor format sense data */
2464 softs->slen = sizeof (struct scsi_arq_status);
2465 if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2466 softs->slen < AAC_ARQ64_LENGTH)
2467 softs->slen = AAC_ARQ64_LENGTH;
2468
2469 AACDB_PRINT(softs, CE_NOTE,
2470 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2471 softs->aac_max_fibs, softs->aac_max_fib_size,
2472 softs->aac_max_sectors, softs->aac_sg_tablesize);
2473
2474 return (AACOK);
2475 }
2476
2477 static void
2478 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2479 struct FsaRev *fsarev1)
2480 {
2481 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
2482
2483 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2484 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2485 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2486 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2487 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2488 }
2489
2490 /*
2491 * The following function comes from Adaptec:
2492 *
2493 * Query adapter information and supplement adapter information
2494 */
2495 static int
2496 aac_get_adapter_info(struct aac_softstate *softs,
2497 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2498 {
2499 struct aac_cmd *acp = &softs->sync_ac;
2500 ddi_acc_handle_t acc;
2501 struct aac_fib *fibp;
2502 struct aac_adapter_info *ainfp;
2503 struct aac_supplement_adapter_info *sinfp;
2504 int rval;
2505
2506 (void) aac_sync_fib_slot_bind(softs, acp);
2507 acc = acp->slotp->fib_acc_handle;
2508 fibp = acp->slotp->fibp;
2509
2510 ddi_put8(acc, &fibp->data[0], 0);
2511 if (aac_sync_fib(softs, RequestAdapterInfo,
2512 AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) {
2513 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2514 rval = AACERR;
2515 goto finish;
2516 }
2517 ainfp = (struct aac_adapter_info *)fibp->data;
2518 if (ainfr) {
2519 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2520 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2521 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2522 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2523 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2524 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2525 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2526 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2527 aac_fsa_rev(softs, &ainfp->KernelRevision,
2528 &ainfr->KernelRevision);
2529 aac_fsa_rev(softs, &ainfp->MonitorRevision,
2530 &ainfr->MonitorRevision);
2531 aac_fsa_rev(softs, &ainfp->HardwareRevision,
2532 &ainfr->HardwareRevision);
2533 aac_fsa_rev(softs, &ainfp->BIOSRevision,
2534 &ainfr->BIOSRevision);
2535 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2536 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2537 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2538 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2539 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2540 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2541 }
2542 if (sinfr) {
2543 if (!(softs->support_opt &
2544 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2545 AACDB_PRINT(softs, CE_WARN,
2546 "SupplementAdapterInfo not supported");
2547 rval = AACERR;
2548 goto finish;
2549 }
2550 ddi_put8(acc, &fibp->data[0], 0);
2551 if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2552 AAC_FIB_SIZEOF(struct aac_supplement_adapter_info))
2553 != AACOK) {
2554 AACDB_PRINT(softs, CE_WARN,
2555 "RequestSupplementAdapterInfo failed");
2556 rval = AACERR;
2557 goto finish;
2558 }
2559 sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2560 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2561 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2562 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2563 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2564 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2565 AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2566 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2567 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2568 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2569 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2570 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2571 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2572 sizeof (struct vpd_info));
2573 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2574 &sinfr->FlashFirmwareRevision);
2575 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2576 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2577 &sinfr->FlashFirmwareBootRevision);
2578 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2579 MFG_PCBA_SERIAL_NUMBER_WIDTH);
2580 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2581 MFG_WWN_WIDTH);
2582 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2);
2583 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag);
2584 if (sinfr->ExpansionFlag == 1) {
2585 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3);
2586 AAC_GET_FIELD32(acc, sinfr, sinfp,
2587 SupportedPerformanceMode);
2588 AAC_REP_GET_FIELD32(acc, sinfr, sinfp,
2589 ReservedGrowth[0], 80);
2590 }
2591 }
2592 rval = AACOK;
2593 finish:
2594 aac_sync_fib_slot_release(softs, acp);
2595 return (rval);
2596 }
2597
2598 static int
2599 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2600 uint32_t *tgt_max)
2601 {
2602 struct aac_cmd *acp = &softs->sync_ac;
2603 ddi_acc_handle_t acc;
2604 struct aac_fib *fibp;
2605 struct aac_ctcfg *c_cmd;
2606 struct aac_ctcfg_resp *c_resp;
2607 uint32_t scsi_method_id;
2608 struct aac_bus_info *cmd;
2609 struct aac_bus_info_response *resp;
2610 int rval;
2611
2612 (void) aac_sync_fib_slot_bind(softs, acp);
2613 acc = acp->slotp->fib_acc_handle;
2614 fibp = acp->slotp->fibp;
2615
2616 /* Detect MethodId */
2617 c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2618 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2619 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2620 ddi_put32(acc, &c_cmd->param, 0);
2621 rval = aac_sync_fib(softs, ContainerCommand,
2622 AAC_FIB_SIZEOF(struct aac_ctcfg));
2623 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2624 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2625 AACDB_PRINT(softs, CE_WARN,
2626 "VM_ContainerConfig command fail");
2627 rval = AACERR;
2628 goto finish;
2629 }
2630 scsi_method_id = ddi_get32(acc, &c_resp->param);
2631
2632 /* Detect phys. bus count and max. target id first */
2633 cmd = (struct aac_bus_info *)&fibp->data[0];
2634 ddi_put32(acc, &cmd->Command, VM_Ioctl);
2635 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2636 ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2637 ddi_put32(acc, &cmd->ObjectId, 0);
2638 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2639 /*
2640 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2641 * driver as the size to be returned. Therefore the driver has to use
2642 * sizeof (struct aac_bus_info_response) because it is greater than
2643 * sizeof (struct aac_bus_info).
2644 */
2645 rval = aac_sync_fib(softs, ContainerCommand,
2646 AAC_FIB_SIZEOF(struct aac_bus_info_response));
2647 resp = (struct aac_bus_info_response *)cmd;
2648
2649 /* Scan all coordinates with INQUIRY */
2650 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2651 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2652 rval = AACERR;
2653 goto finish;
2654 }
2655 *bus_max = ddi_get32(acc, &resp->BusCount);
2656 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2657
2658 finish:
2659 aac_sync_fib_slot_release(softs, acp);
2660 return (AACOK);
2661 }
2662
2663 /*
2664 * The following function comes from Adaptec:
2665 *
2666 * Routine to be called during initialization of communications with
2667 * the adapter to handle possible adapter configuration issues. When
2668 * the adapter first boots up, it examines attached drives, etc, and
2669 * potentially comes up with a new or revised configuration (relative to
2670 * what's stored in it's NVRAM). Additionally it may discover problems
2671 * that make the current physical configuration unworkable (currently
2672 * applicable only to cluster configuration issues).
2673 *
2674 * If there are no configuration issues or the issues are considered
2675 * trival by the adapter, it will set it's configuration status to
2676 * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2677 * automatically on it's own.
2678 *
2679 * However, if there are non-trivial issues, the adapter will set it's
2680 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2681 * and wait for some agent on the host to issue the "\ContainerCommand
2682 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2683 * adapter to commit the new/updated configuration and enable
2684 * un-inhibited operation. The host agent should first issue the
2685 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2686 * command to obtain information about config issues detected by
2687 * the adapter.
2688 *
2689 * Normally the adapter's PC BIOS will execute on the host following
2690 * adapter poweron and reset and will be responsible for querring the
2691 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2692 * command if appropriate.
2693 *
2694 * However, with the introduction of IOP reset support, the adapter may
2695 * boot up without the benefit of the adapter's PC BIOS host agent.
2696 * This routine is intended to take care of these issues in situations
2697 * where BIOS doesn't execute following adapter poweron or reset. The
2698 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2699 * there is no harm in doing this when it's already been done.
2700 */
2701 static int
2702 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2703 {
2704 struct aac_cmd *acp = &softs->sync_ac;
2705 ddi_acc_handle_t acc;
2706 struct aac_fib *fibp;
2707 struct aac_Container *cmd;
2708 struct aac_Container_resp *resp;
2709 struct aac_cf_status_header *cfg_sts_hdr;
2710 uint32_t resp_status;
2711 uint32_t ct_status;
2712 uint32_t cfg_stat_action;
2713 int rval;
2714
2715 (void) aac_sync_fib_slot_bind(softs, acp);
2716 acc = acp->slotp->fib_acc_handle;
2717 fibp = acp->slotp->fibp;
2718
2719 /* Get adapter config status */
2720 cmd = (struct aac_Container *)&fibp->data[0];
2721
2722 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2723 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2724 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2725 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2726 sizeof (struct aac_cf_status_header));
2727 rval = aac_sync_fib(softs, ContainerCommand,
2728 AAC_FIB_SIZEOF(struct aac_Container));
2729 resp = (struct aac_Container_resp *)cmd;
2730 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2731
2732 resp_status = ddi_get32(acc, &resp->Status);
2733 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2734 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2735 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2736
2737 /* Commit configuration if it's reasonable to do so. */
2738 if (cfg_stat_action <= CFACT_PAUSE) {
2739 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2740 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2741 ddi_put32(acc, &cmd->CTCommand.command,
2742 CT_COMMIT_CONFIG);
2743 rval = aac_sync_fib(softs, ContainerCommand,
2744 AAC_FIB_SIZEOF(struct aac_Container));
2745
2746 resp_status = ddi_get32(acc, &resp->Status);
2747 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2748 if ((rval == AACOK) && (resp_status == 0) &&
2749 (ct_status == CT_OK))
2750 /* Successful completion */
2751 rval = AACMPE_OK;
2752 else
2753 /* Auto-commit aborted due to error(s). */
2754 rval = AACMPE_COMMIT_CONFIG;
2755 } else {
2756 /*
2757 * Auto-commit aborted due to adapter indicating
2758 * configuration issue(s) too dangerous to auto-commit.
2759 */
2760 rval = AACMPE_CONFIG_STATUS;
2761 }
2762 } else {
2763 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2764 rval = AACMPE_CONFIG_STATUS;
2765 }
2766
2767 aac_sync_fib_slot_release(softs, acp);
2768 return (rval);
2769 }
2770
2771 /*
2772 * Hardware initialization and resource allocation
2773 */
2774 static int
2775 aac_common_attach(struct aac_softstate *softs)
2776 {
2777 uint32_t status;
2778 int i;
2779 struct aac_supplement_adapter_info sinf;
2780
2781 DBCALLED(softs, 1);
2782
2783 /*
2784 * Do a little check here to make sure there aren't any outstanding
2785 * FIBs in the message queue. At this point there should not be and
2786 * if there are they are probably left over from another instance of
2787 * the driver like when the system crashes and the crash dump driver
2788 * gets loaded.
2789 */
2790 while (AAC_OUTB_GET(softs) != 0xfffffffful)
2791 ;
2792
2793 /*
2794 * Wait the card to complete booting up before do anything that
2795 * attempts to communicate with it.
2796 */
2797 status = AAC_FWSTATUS_GET(softs);
2798 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2799 goto error;
2800 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2801 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2802 if (i == 0) {
2803 cmn_err(CE_CONT, "?Fatal error: controller not ready");
2804 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2805 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2806 goto error;
2807 }
2808
2809 /* Read and set card supported options and settings */
2810 if (aac_check_firmware(softs) == AACERR) {
2811 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2812 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2813 goto error;
2814 }
2815
2816 /* Add interrupt handlers */
2817 if (aac_register_intrs(softs) == AACERR) {
2818 cmn_err(CE_CONT,
2819 "?Fatal error: interrupts register failed");
2820 goto error;
2821 }
2822
2823 /* Setup communication space with the card */
2824 if (softs->comm_space_dma_handle == NULL) {
2825 if (aac_alloc_comm_space(softs) != AACOK)
2826 goto error;
2827 }
2828 if (aac_setup_comm_space(softs) != AACOK) {
2829 cmn_err(CE_CONT, "?Setup communication space failed");
2830 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2831 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2832 goto error;
2833 }
2834
2835 #ifdef DEBUG
2836 if (aac_get_fw_debug_buffer(softs) != AACOK)
2837 cmn_err(CE_CONT, "?firmware UART trace not supported");
2838 #endif
2839
2840 /* Allocate slots */
2841 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2842 cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2843 goto error;
2844 }
2845 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2846
2847 /* Allocate FIBs */
2848 if (softs->total_fibs < softs->total_slots) {
2849 aac_alloc_fibs(softs);
2850 if (softs->total_fibs == 0)
2851 goto error;
2852 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2853 softs->total_fibs);
2854 }
2855
2856 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */
2857 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */
2858
2859 if (aac_get_adapter_info(softs, NULL, &sinf) == AACOK) {
2860 softs->feature_bits = sinf.FeatureBits;
2861 softs->support_opt2 = sinf.SupportedOptions2;
2862
2863 /* Get adapter names */
2864 if (CARD_IS_UNKNOWN(softs->card)) {
2865 char *p, *p0, *p1;
2866
2867 /*
2868 * Now find the controller name in supp_adapter_info->
2869 * AdapterTypeText. Use the first word as the vendor
2870 * and the other words as the product name.
2871 */
2872 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2873 "\"%s\"", sinf.AdapterTypeText);
2874 p = sinf.AdapterTypeText;
2875 p0 = p1 = NULL;
2876 /* Skip heading spaces */
2877 while (*p && (*p == ' ' || *p == '\t'))
2878 p++;
2879 p0 = p;
2880 while (*p && (*p != ' ' && *p != '\t'))
2881 p++;
2882 /* Remove middle spaces */
2883 while (*p && (*p == ' ' || *p == '\t'))
2884 *p++ = 0;
2885 p1 = p;
2886 /* Remove trailing spaces */
2887 p = p1 + strlen(p1) - 1;
2888 while (p > p1 && (*p == ' ' || *p == '\t'))
2889 *p-- = 0;
2890 if (*p0 && *p1) {
2891 (void *)strncpy(softs->vendor_name, p0,
2892 AAC_VENDOR_LEN);
2893 (void *)strncpy(softs->product_name, p1,
2894 AAC_PRODUCT_LEN);
2895 } else {
2896 cmn_err(CE_WARN,
2897 "?adapter name mis-formatted\n");
2898 if (*p0)
2899 (void *)strncpy(softs->product_name,
2900 p0, AAC_PRODUCT_LEN);
2901 }
2902 }
2903 } else {
2904 cmn_err(CE_CONT, "?Query adapter information failed");
2905 }
2906
2907
2908 cmn_err(CE_NOTE,
2909 "!aac driver %d.%02d.%02d-%d, found card: " \
2910 "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2911 AAC_DRIVER_MAJOR_VERSION,
2912 AAC_DRIVER_MINOR_VERSION,
2913 AAC_DRIVER_BUGFIX_LEVEL,
2914 AAC_DRIVER_BUILD,
2915 softs->vendor_name, softs->product_name,
2916 softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2917 softs->pci_mem_base_paddr);
2918
2919 /* Perform acceptance of adapter-detected config changes if possible */
2920 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2921 cmn_err(CE_CONT, "?Handle adapter config issues failed");
2922 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2923 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2924 goto error;
2925 }
2926
2927 /* Setup containers (logical devices) */
2928 if (aac_probe_containers(softs) != AACOK) {
2929 cmn_err(CE_CONT, "?Fatal error: get container info error");
2930 goto error;
2931 }
2932
2933 /* Check for JBOD support. Default disable */
2934 char *data;
2935 if (softs->feature_bits & AAC_FEATURE_SUPPORTED_JBOD) {
2936 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p,
2937 0, "jbod-enable", &data) == DDI_SUCCESS)) {
2938 if (strcmp(data, "yes") == 0) {
2939 AACDB_PRINT(softs, CE_NOTE,
2940 "Enable JBOD access");
2941 softs->flags |= AAC_FLAGS_JBOD;
2942 }
2943 ddi_prop_free(data);
2944 }
2945 }
2946
2947 /* Setup phys. devices */
2948 if (softs->flags & (AAC_FLAGS_NONDASD | AAC_FLAGS_JBOD)) {
2949 uint32_t bus_max, tgt_max;
2950 uint32_t bus, tgt;
2951 int index;
2952
2953 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2954 cmn_err(CE_CONT, "?Fatal error: get bus info error");
2955 goto error;
2956 }
2957 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2958 bus_max, tgt_max);
2959 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2960 if (softs->state & AAC_STATE_RESET) {
2961 cmn_err(CE_WARN,
2962 "?Fatal error: bus map changed");
2963 goto error;
2964 }
2965 softs->bus_max = bus_max;
2966 softs->tgt_max = tgt_max;
2967 if (softs->nondasds) {
2968 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2969 sizeof (struct aac_nondasd));
2970 }
2971 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2972 sizeof (struct aac_nondasd), KM_SLEEP);
2973
2974 index = 0;
2975 for (bus = 0; bus < softs->bus_max; bus++) {
2976 for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2977 struct aac_nondasd *dvp =
2978 &softs->nondasds[index++];
2979 dvp->dev.type = AAC_DEV_PD;
2980 dvp->bus = bus;
2981 dvp->tid = tgt;
2982 }
2983 }
2984 }
2985 }
2986
2987 /* Check dma & acc handles allocated in attach */
2988 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2989 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2990 goto error;
2991 }
2992
2993 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2994 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2995 goto error;
2996 }
2997
2998 for (i = 0; i < softs->total_slots; i++) {
2999 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
3000 DDI_SUCCESS) {
3001 ddi_fm_service_impact(softs->devinfo_p,
3002 DDI_SERVICE_LOST);
3003 goto error;
3004 }
3005 }
3006
3007 return (AACOK);
3008 error:
3009 if (softs->state & AAC_STATE_RESET)
3010 return (AACERR);
3011 if (softs->nondasds) {
3012 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
3013 sizeof (struct aac_nondasd));
3014 softs->nondasds = NULL;
3015 }
3016 if (softs->total_fibs > 0)
3017 aac_destroy_fibs(softs);
3018 if (softs->total_slots > 0)
3019 aac_destroy_slots(softs);
3020 if (softs->comm_space_dma_handle)
3021 aac_free_comm_space(softs);
3022 return (AACERR);
3023 }
3024
3025 /*
3026 * Hardware shutdown and resource release
3027 */
3028 static void
3029 aac_common_detach(struct aac_softstate *softs)
3030 {
3031 DBCALLED(softs, 1);
3032
3033 aac_unregister_intrs(softs);
3034
3035 mutex_enter(&softs->io_lock);
3036 (void) aac_shutdown(softs);
3037
3038 if (softs->nondasds) {
3039 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
3040 sizeof (struct aac_nondasd));
3041 softs->nondasds = NULL;
3042 }
3043 aac_destroy_fibs(softs);
3044 aac_destroy_slots(softs);
3045 aac_free_comm_space(softs);
3046 mutex_exit(&softs->io_lock);
3047 }
3048
3049 /*
3050 * Send a synchronous command to the controller and wait for a result.
3051 * Indicate if the controller completed the command with an error status.
3052 */
3053 int
3054 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
3055 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
3056 uint32_t *statusp)
3057 {
3058 int timeout;
3059 uint32_t status;
3060
3061 if (statusp != NULL)
3062 *statusp = SRB_STATUS_SUCCESS;
3063
3064 /* Fill in mailbox */
3065 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
3066
3067 /* Ensure the sync command doorbell flag is cleared */
3068 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3069
3070 /* Then set it to signal the adapter */
3071 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
3072
3073 /* Spin waiting for the command to complete */
3074 timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
3075 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
3076 if (!timeout) {
3077 AACDB_PRINT(softs, CE_WARN,
3078 "Sync command timed out after %d seconds (0x%x)!",
3079 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
3080 return (AACERR);
3081 }
3082
3083 /* Clear the completion flag */
3084 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3085
3086 /* Get the command status */
3087 status = AAC_MAILBOX_GET(softs, 0);
3088 if (statusp != NULL)
3089 *statusp = status;
3090 if (status != SRB_STATUS_SUCCESS) {
3091 AACDB_PRINT(softs, CE_WARN,
3092 "Sync command fail: status = 0x%x", status);
3093 return (AACERR);
3094 }
3095
3096 return (AACOK);
3097 }
3098
3099 /*
3100 * Send a synchronous FIB to the adapter and wait for its completion
3101 */
3102 static int
3103 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
3104 {
3105 struct aac_cmd *acp = &softs->sync_ac;
3106
3107 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT;
3108 if (softs->state & AAC_STATE_INTR)
3109 acp->flags |= AAC_CMD_NO_CB;
3110 else
3111 acp->flags |= AAC_CMD_NO_INTR;
3112
3113 acp->ac_comp = aac_sync_complete;
3114 acp->timeout = AAC_SYNC_TIMEOUT;
3115 acp->fib_size = fibsize;
3116
3117 /*
3118 * Only need to setup sync fib header, caller should have init
3119 * fib data
3120 */
3121 aac_cmd_fib_header(softs, acp, cmd);
3122
3123 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize,
3124 DDI_DMA_SYNC_FORDEV);
3125
3126 aac_start_io(softs, acp);
3127
3128 if (softs->state & AAC_STATE_INTR)
3129 return (aac_do_sync_io(softs, acp));
3130 else
3131 return (aac_do_poll_io(softs, acp));
3132 }
3133
3134 static void
3135 aac_cmd_initq(struct aac_cmd_queue *q)
3136 {
3137 q->q_head = NULL;
3138 q->q_tail = (struct aac_cmd *)&q->q_head;
3139 }
3140
3141 /*
3142 * Remove a cmd from the head of q
3143 */
3144 static struct aac_cmd *
3145 aac_cmd_dequeue(struct aac_cmd_queue *q)
3146 {
3147 struct aac_cmd *acp;
3148
3149 _NOTE(ASSUMING_PROTECTED(*q))
3150
3151 if ((acp = q->q_head) != NULL) {
3152 if ((q->q_head = acp->next) != NULL)
3153 acp->next = NULL;
3154 else
3155 q->q_tail = (struct aac_cmd *)&q->q_head;
3156 acp->prev = NULL;
3157 }
3158 return (acp);
3159 }
3160
3161 /*
3162 * Add a cmd to the tail of q
3163 */
3164 static void
3165 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
3166 {
3167 ASSERT(acp->next == NULL);
3168 acp->prev = q->q_tail;
3169 q->q_tail->next = acp;
3170 q->q_tail = acp;
3171 }
3172
3173 /*
3174 * Remove the cmd ac from q
3175 */
3176 static void
3177 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
3178 {
3179 if (acp->prev) {
3180 if ((acp->prev->next = acp->next) != NULL) {
3181 acp->next->prev = acp->prev;
3182 acp->next = NULL;
3183 } else {
3184 q->q_tail = acp->prev;
3185 }
3186 acp->prev = NULL;
3187 }
3188 /* ac is not in the queue */
3189 }
3190
3191 /*
3192 * Atomically insert an entry into the nominated queue, returns 0 on success or
3193 * AACERR if the queue is full.
3194 *
3195 * Note: it would be more efficient to defer notifying the controller in
3196 * the case where we may be inserting several entries in rapid succession,
3197 * but implementing this usefully may be difficult (it would involve a
3198 * separate queue/notify interface).
3199 */
3200 static int
3201 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
3202 uint32_t fib_size)
3203 {
3204 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3205 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3206 uint32_t pi, ci;
3207
3208 DBCALLED(softs, 2);
3209
3210 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
3211
3212 /* Get the producer/consumer indices */
3213 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3214 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3215 DDI_DMA_SYNC_FORCPU);
3216 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3217 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3218 return (AACERR);
3219 }
3220
3221 pi = ddi_get32(acc,
3222 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3223 ci = ddi_get32(acc,
3224 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3225
3226 /*
3227 * Wrap the queue first before we check the queue to see
3228 * if it is full
3229 */
3230 if (pi >= aac_qinfo[queue].size)
3231 pi = 0;
3232
3233 /* XXX queue full */
3234 if ((pi + 1) == ci)
3235 return (AACERR);
3236
3237 /* Fill in queue entry */
3238 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3239 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3240 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3241 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3242 DDI_DMA_SYNC_FORDEV);
3243
3244 /* Update producer index */
3245 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3246 pi + 1);
3247 (void) ddi_dma_sync(dma,
3248 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3249 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3250 DDI_DMA_SYNC_FORDEV);
3251
3252 if (aac_qinfo[queue].notify != 0)
3253 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3254 return (AACOK);
3255 }
3256
3257 /*
3258 * Atomically remove one entry from the nominated queue, returns 0 on
3259 * success or AACERR if the queue is empty.
3260 */
3261 static int
3262 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3263 {
3264 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3265 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3266 uint32_t pi, ci;
3267 int unfull = 0;
3268
3269 DBCALLED(softs, 2);
3270
3271 ASSERT(idxp);
3272
3273 /* Get the producer/consumer indices */
3274 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3275 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3276 DDI_DMA_SYNC_FORCPU);
3277 pi = ddi_get32(acc,
3278 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3279 ci = ddi_get32(acc,
3280 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3281
3282 /* Check for queue empty */
3283 if (ci == pi)
3284 return (AACERR);
3285
3286 if (pi >= aac_qinfo[queue].size)
3287 pi = 0;
3288
3289 /* Check for queue full */
3290 if (ci == pi + 1)
3291 unfull = 1;
3292
3293 /*
3294 * The controller does not wrap the queue,
3295 * so we have to do it by ourselves
3296 */
3297 if (ci >= aac_qinfo[queue].size)
3298 ci = 0;
3299
3300 /* Fetch the entry */
3301 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3302 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3303 DDI_DMA_SYNC_FORCPU);
3304 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3305 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3306 return (AACERR);
3307 }
3308
3309 switch (queue) {
3310 case AAC_HOST_NORM_RESP_Q:
3311 case AAC_HOST_HIGH_RESP_Q:
3312 *idxp = ddi_get32(acc,
3313 &(softs->qentries[queue] + ci)->aq_fib_addr);
3314 break;
3315
3316 case AAC_HOST_NORM_CMD_Q:
3317 case AAC_HOST_HIGH_CMD_Q:
3318 *idxp = ddi_get32(acc,
3319 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3320 break;
3321
3322 default:
3323 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3324 return (AACERR);
3325 }
3326
3327 /* Update consumer index */
3328 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3329 ci + 1);
3330 (void) ddi_dma_sync(dma,
3331 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3332 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3333 DDI_DMA_SYNC_FORDEV);
3334
3335 if (unfull && aac_qinfo[queue].notify != 0)
3336 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3337 return (AACOK);
3338 }
3339
3340 static struct aac_mntinforesp *
3341 aac_get_mntinfo(struct aac_softstate *softs, int cid)
3342 {
3343 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3344 struct aac_fib *fibp = softs->sync_ac.slotp->fibp;
3345 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3346 struct aac_mntinforesp *mir;
3347
3348 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3349 (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3350 VM_NameServe64 : VM_NameServe);
3351 ddi_put32(acc, &mi->MntType, FT_FILESYS);
3352 ddi_put32(acc, &mi->MntCount, cid);
3353
3354 if (aac_sync_fib(softs, ContainerCommand,
3355 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3356 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3357 return (NULL);
3358 }
3359
3360 mir = (struct aac_mntinforesp *)&fibp->data[0];
3361 if (ddi_get32(acc, &mir->Status) == ST_OK)
3362 return (mir);
3363 return (NULL);
3364 }
3365
3366 static int
3367 aac_get_container_count(struct aac_softstate *softs, int *count)
3368 {
3369 ddi_acc_handle_t acc;
3370 struct aac_mntinforesp *mir;
3371 int rval;
3372
3373 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3374 acc = softs->sync_ac.slotp->fib_acc_handle;
3375
3376 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) {
3377 rval = AACERR;
3378 goto finish;
3379 }
3380 *count = ddi_get32(acc, &mir->MntRespCount);
3381 if (*count > AAC_MAX_LD) {
3382 AACDB_PRINT(softs, CE_CONT,
3383 "container count(%d) > AAC_MAX_LD", *count);
3384 rval = AACERR;
3385 goto finish;
3386 }
3387 rval = AACOK;
3388
3389 finish:
3390 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3391 return (rval);
3392 }
3393
3394 static int
3395 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3396 {
3397 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3398 struct aac_Container *ct = (struct aac_Container *) \
3399 &softs->sync_ac.slotp->fibp->data[0];
3400
3401 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3402 ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3403 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3404 ddi_put32(acc, &ct->CTCommand.param[0], cid);
3405
3406 if (aac_sync_fib(softs, ContainerCommand,
3407 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3408 return (AACERR);
3409 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3410 return (AACERR);
3411
3412 *uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3413 return (AACOK);
3414 }
3415
3416 /*
3417 * Request information of the container cid
3418 */
3419 static struct aac_mntinforesp *
3420 aac_get_container_info(struct aac_softstate *softs, int cid)
3421 {
3422 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3423 struct aac_mntinforesp *mir;
3424 int rval_uid;
3425 uint32_t uid;
3426
3427 /* Get container UID first so that it will not overwrite mntinfo */
3428 rval_uid = aac_get_container_uid(softs, cid, &uid);
3429
3430 /* Get container basic info */
3431 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) {
3432 AACDB_PRINT(softs, CE_CONT,
3433 "query container %d info failed", cid);
3434 return (NULL);
3435 }
3436 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE)
3437 return (mir);
3438 if (rval_uid != AACOK) {
3439 AACDB_PRINT(softs, CE_CONT,
3440 "query container %d uid failed", cid);
3441 return (NULL);
3442 }
3443
3444 ddi_put32(acc, &mir->Status, uid);
3445 return (mir);
3446 }
3447
3448 static enum aac_cfg_event
3449 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3450 {
3451 enum aac_cfg_event event = AAC_CFG_NULL_NOEXIST;
3452 struct aac_container *dvp = &softs->containers[cid];
3453 struct aac_mntinforesp *mir;
3454 ddi_acc_handle_t acc;
3455
3456 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3457 acc = softs->sync_ac.slotp->fib_acc_handle;
3458
3459 /* Get container basic info */
3460 if ((mir = aac_get_container_info(softs, cid)) == NULL) {
3461 /* AAC_CFG_NULL_NOEXIST */
3462 goto finish;
3463 }
3464
3465 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3466 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3467 AACDB_PRINT(softs, CE_NOTE,
3468 ">>> Container %d deleted", cid);
3469 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3470 event = AAC_CFG_DELETE;
3471 }
3472 /* AAC_CFG_NULL_NOEXIST */
3473 } else {
3474 uint64_t size;
3475 uint32_t uid;
3476
3477 event = AAC_CFG_NULL_EXIST;
3478
3479 size = AAC_MIR_SIZE(softs, acc, mir);
3480 uid = ddi_get32(acc, &mir->Status);
3481 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3482 if (dvp->uid != uid) {
3483 AACDB_PRINT(softs, CE_WARN,
3484 ">>> Container %u uid changed to %d",
3485 cid, uid);
3486 dvp->uid = uid;
3487 event = AAC_CFG_CHANGE;
3488 }
3489 if (dvp->size != size) {
3490 AACDB_PRINT(softs, CE_NOTE,
3491 ">>> Container %u size changed to %"PRIu64,
3492 cid, size);
3493 dvp->size = size;
3494 event = AAC_CFG_CHANGE;
3495 }
3496 } else { /* Init new container */
3497 AACDB_PRINT(softs, CE_NOTE,
3498 ">>> Container %d added: " \
3499 "size=0x%x.%08x, type=%d, name=%s",
3500 cid,
3501 ddi_get32(acc, &mir->MntObj.CapacityHigh),
3502 ddi_get32(acc, &mir->MntObj.Capacity),
3503 ddi_get32(acc, &mir->MntObj.VolType),
3504 mir->MntObj.FileSystemName);
3505 dvp->dev.flags |= AAC_DFLAG_VALID;
3506 dvp->dev.type = AAC_DEV_LD;
3507
3508 dvp->cid = cid;
3509 dvp->uid = uid;
3510 dvp->size = size;
3511 dvp->locked = 0;
3512 dvp->deleted = 0;
3513
3514 event = AAC_CFG_ADD;
3515 }
3516 }
3517
3518 finish:
3519 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3520 return (event);
3521 }
3522
3523 /*
3524 * Do a rescan of all the possible containers and update the container list
3525 * with newly online/offline containers, and prepare for autoconfiguration.
3526 */
3527 static int
3528 aac_probe_containers(struct aac_softstate *softs)
3529 {
3530 int i, count, total;
3531
3532 /* Loop over possible containers */
3533 count = softs->container_count;
3534 if (aac_get_container_count(softs, &count) == AACERR)
3535 return (AACERR);
3536
3537 for (i = total = 0; i < count; i++) {
3538 enum aac_cfg_event event = aac_probe_container(softs, i);
3539 if ((event != AAC_CFG_NULL_NOEXIST) &&
3540 (event != AAC_CFG_NULL_EXIST)) {
3541 (void) aac_handle_dr(softs, i, -1, event);
3542 total++;
3543 }
3544 }
3545
3546 if (count < softs->container_count) {
3547 struct aac_container *dvp;
3548
3549 for (dvp = &softs->containers[count];
3550 dvp < &softs->containers[softs->container_count]; dvp++) {
3551 if (!AAC_DEV_IS_VALID(&dvp->dev))
3552 continue;
3553 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3554 dvp->cid);
3555 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3556 (void) aac_handle_dr(softs, dvp->cid, -1,
3557 AAC_CFG_DELETE);
3558 }
3559 }
3560
3561 softs->container_count = count;
3562 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3563 return (AACOK);
3564 }
3565
3566 static int
3567 aac_probe_jbod(struct aac_softstate *softs, int tgt, int event)
3568 {
3569 ASSERT(AAC_MAX_LD <= tgt);
3570 ASSERT(tgt < AAC_MAX_DEV(softs));
3571 struct aac_device *dvp;
3572 dvp = AAC_DEV(softs, tgt);
3573
3574 switch (event) {
3575 case AAC_CFG_ADD:
3576 AACDB_PRINT(softs, CE_NOTE,
3577 ">>> Jbod %d added", tgt - AAC_MAX_LD);
3578 dvp->flags |= AAC_DFLAG_VALID;
3579 dvp->type = AAC_DEV_PD;
3580 break;
3581 case AAC_CFG_DELETE:
3582 AACDB_PRINT(softs, CE_NOTE,
3583 ">>> Jbod %d deleted", tgt - AAC_MAX_LD);
3584 dvp->flags &= ~AAC_DFLAG_VALID;
3585 break;
3586 default:
3587 return (AACERR);
3588 }
3589 (void) aac_handle_dr(softs, tgt, 0, event);
3590 return (AACOK);
3591 }
3592
3593 static int
3594 aac_alloc_comm_space(struct aac_softstate *softs)
3595 {
3596 size_t rlen;
3597 ddi_dma_cookie_t cookie;
3598 uint_t cookien;
3599
3600 /* Allocate DMA for comm. space */
3601 if (ddi_dma_alloc_handle(
3602 softs->devinfo_p,
3603 &softs->addr_dma_attr,
3604 DDI_DMA_SLEEP,
3605 NULL,
3606 &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3607 AACDB_PRINT(softs, CE_WARN,
3608 "Cannot alloc dma handle for communication area");
3609 goto error;
3610 }
3611 if (ddi_dma_mem_alloc(
3612 softs->comm_space_dma_handle,
3613 sizeof (struct aac_comm_space),
3614 &softs->acc_attr,
3615 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3616 DDI_DMA_SLEEP,
3617 NULL,
3618 (caddr_t *)&softs->comm_space,
3619 &rlen,
3620 &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3621 AACDB_PRINT(softs, CE_WARN,
3622 "Cannot alloc mem for communication area");
3623 goto error;
3624 }
3625 if (ddi_dma_addr_bind_handle(
3626 softs->comm_space_dma_handle,
3627 NULL,
3628 (caddr_t)softs->comm_space,
3629 sizeof (struct aac_comm_space),
3630 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3631 DDI_DMA_SLEEP,
3632 NULL,
3633 &cookie,
3634 &cookien) != DDI_DMA_MAPPED) {
3635 AACDB_PRINT(softs, CE_WARN,
3636 "DMA bind failed for communication area");
3637 goto error;
3638 }
3639 softs->comm_space_phyaddr = cookie.dmac_address;
3640
3641 return (AACOK);
3642 error:
3643 if (softs->comm_space_acc_handle) {
3644 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3645 softs->comm_space_acc_handle = NULL;
3646 }
3647 if (softs->comm_space_dma_handle) {
3648 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3649 softs->comm_space_dma_handle = NULL;
3650 }
3651 return (AACERR);
3652 }
3653
3654 static void
3655 aac_free_comm_space(struct aac_softstate *softs)
3656 {
3657
3658 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3659 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3660 softs->comm_space_acc_handle = NULL;
3661 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3662 softs->comm_space_dma_handle = NULL;
3663 softs->comm_space_phyaddr = NULL;
3664 }
3665
3666 /*
3667 * Initialize the data structures that are required for the communication
3668 * interface to operate
3669 */
3670 static int
3671 aac_setup_comm_space(struct aac_softstate *softs)
3672 {
3673 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3674 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3675 uint32_t comm_space_phyaddr;
3676 struct aac_adapter_init *initp;
3677 int qoffset;
3678
3679 comm_space_phyaddr = softs->comm_space_phyaddr;
3680
3681 /* Setup adapter init struct */
3682 initp = &softs->comm_space->init_data;
3683 bzero(initp, sizeof (struct aac_adapter_init));
3684
3685 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3686 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3687
3688 /* Setup new/old comm. specific data */
3689 if (softs->flags & AAC_FLAGS_RAW_IO) {
3690 uint32_t init_flags = 0;
3691
3692 if (softs->flags & AAC_FLAGS_NEW_COMM)
3693 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED;
3694 /* AAC_SUPPORTED_POWER_MANAGEMENT */
3695 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM;
3696 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME;
3697
3698 ddi_put32(acc, &initp->InitStructRevision,
3699 AAC_INIT_STRUCT_REVISION_4);
3700 ddi_put32(acc, &initp->InitFlags, init_flags);
3701 /* Setup the preferred settings */
3702 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3703 ddi_put32(acc, &initp->MaxIoSize,
3704 (softs->aac_max_sectors << 9));
3705 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3706 } else {
3707 /*
3708 * Tells the adapter about the physical location of various
3709 * important shared data structures
3710 */
3711 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3712 comm_space_phyaddr + \
3713 offsetof(struct aac_comm_space, adapter_fibs));
3714 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3715 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3716 ddi_put32(acc, &initp->AdapterFibsSize,
3717 AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3718 ddi_put32(acc, &initp->PrintfBufferAddress,
3719 comm_space_phyaddr + \
3720 offsetof(struct aac_comm_space, adapter_print_buf));
3721 ddi_put32(acc, &initp->PrintfBufferSize,
3722 AAC_ADAPTER_PRINT_BUFSIZE);
3723 ddi_put32(acc, &initp->MiniPortRevision,
3724 AAC_INIT_STRUCT_MINIPORT_REVISION);
3725 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3726
3727 qoffset = (comm_space_phyaddr + \
3728 offsetof(struct aac_comm_space, qtable)) % \
3729 AAC_QUEUE_ALIGN;
3730 if (qoffset)
3731 qoffset = AAC_QUEUE_ALIGN - qoffset;
3732 softs->qtablep = (struct aac_queue_table *) \
3733 ((char *)&softs->comm_space->qtable + qoffset);
3734 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3735 offsetof(struct aac_comm_space, qtable) + qoffset);
3736
3737 /* Init queue table */
3738 ddi_put32(acc, &softs->qtablep-> \
3739 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3740 AAC_HOST_NORM_CMD_ENTRIES);
3741 ddi_put32(acc, &softs->qtablep-> \
3742 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3743 AAC_HOST_NORM_CMD_ENTRIES);
3744 ddi_put32(acc, &softs->qtablep-> \
3745 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3746 AAC_HOST_HIGH_CMD_ENTRIES);
3747 ddi_put32(acc, &softs->qtablep-> \
3748 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3749 AAC_HOST_HIGH_CMD_ENTRIES);
3750 ddi_put32(acc, &softs->qtablep-> \
3751 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3752 AAC_ADAP_NORM_CMD_ENTRIES);
3753 ddi_put32(acc, &softs->qtablep-> \
3754 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3755 AAC_ADAP_NORM_CMD_ENTRIES);
3756 ddi_put32(acc, &softs->qtablep-> \
3757 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3758 AAC_ADAP_HIGH_CMD_ENTRIES);
3759 ddi_put32(acc, &softs->qtablep-> \
3760 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3761 AAC_ADAP_HIGH_CMD_ENTRIES);
3762 ddi_put32(acc, &softs->qtablep-> \
3763 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3764 AAC_HOST_NORM_RESP_ENTRIES);
3765 ddi_put32(acc, &softs->qtablep-> \
3766 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3767 AAC_HOST_NORM_RESP_ENTRIES);
3768 ddi_put32(acc, &softs->qtablep-> \
3769 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3770 AAC_HOST_HIGH_RESP_ENTRIES);
3771 ddi_put32(acc, &softs->qtablep-> \
3772 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3773 AAC_HOST_HIGH_RESP_ENTRIES);
3774 ddi_put32(acc, &softs->qtablep-> \
3775 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3776 AAC_ADAP_NORM_RESP_ENTRIES);
3777 ddi_put32(acc, &softs->qtablep-> \
3778 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3779 AAC_ADAP_NORM_RESP_ENTRIES);
3780 ddi_put32(acc, &softs->qtablep-> \
3781 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3782 AAC_ADAP_HIGH_RESP_ENTRIES);
3783 ddi_put32(acc, &softs->qtablep-> \
3784 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3785 AAC_ADAP_HIGH_RESP_ENTRIES);
3786
3787 /* Init queue entries */
3788 softs->qentries[AAC_HOST_NORM_CMD_Q] =
3789 &softs->qtablep->qt_HostNormCmdQueue[0];
3790 softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3791 &softs->qtablep->qt_HostHighCmdQueue[0];
3792 softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3793 &softs->qtablep->qt_AdapNormCmdQueue[0];
3794 softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3795 &softs->qtablep->qt_AdapHighCmdQueue[0];
3796 softs->qentries[AAC_HOST_NORM_RESP_Q] =
3797 &softs->qtablep->qt_HostNormRespQueue[0];
3798 softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3799 &softs->qtablep->qt_HostHighRespQueue[0];
3800 softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3801 &softs->qtablep->qt_AdapNormRespQueue[0];
3802 softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3803 &softs->qtablep->qt_AdapHighRespQueue[0];
3804 }
3805 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3806
3807 /* Send init structure to the card */
3808 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3809 comm_space_phyaddr + \
3810 offsetof(struct aac_comm_space, init_data),
3811 0, 0, 0, NULL) == AACERR) {
3812 AACDB_PRINT(softs, CE_WARN,
3813 "Cannot send init structure to adapter");
3814 return (AACERR);
3815 }
3816
3817 return (AACOK);
3818 }
3819
3820 static uchar_t *
3821 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3822 {
3823 (void) memset(buf, ' ', AAC_VENDOR_LEN);
3824 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3825 return (buf + AAC_VENDOR_LEN);
3826 }
3827
3828 static uchar_t *
3829 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3830 {
3831 (void) memset(buf, ' ', AAC_PRODUCT_LEN);
3832 bcopy(softs->product_name, buf, strlen(softs->product_name));
3833 return (buf + AAC_PRODUCT_LEN);
3834 }
3835
3836 /*
3837 * Construct unit serial number from container uid
3838 */
3839 static uchar_t *
3840 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3841 {
3842 int i, d;
3843 uint32_t uid;
3844
3845 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3846
3847 uid = softs->containers[tgt].uid;
3848 for (i = 7; i >= 0; i--) {
3849 d = uid & 0xf;
3850 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3851 uid >>= 4;
3852 }
3853 return (buf + 8);
3854 }
3855
3856 /*
3857 * SPC-3 7.5 INQUIRY command implementation
3858 */
3859 static void
3860 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3861 union scsi_cdb *cdbp, struct buf *bp)
3862 {
3863 int tgt = pkt->pkt_address.a_target;
3864 char *b_addr = NULL;
3865 uchar_t page = cdbp->cdb_opaque[2];
3866
3867 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3868 /* Command Support Data is not supported */
3869 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3870 return;
3871 }
3872
3873 if (bp && bp->b_un.b_addr && bp->b_bcount) {
3874 if (bp->b_flags & (B_PHYS | B_PAGEIO))
3875 bp_mapin(bp);
3876 b_addr = bp->b_un.b_addr;
3877 }
3878
3879 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3880 uchar_t *vpdp = (uchar_t *)b_addr;
3881 uchar_t *idp, *sp;
3882
3883 /* SPC-3 8.4 Vital product data parameters */
3884 switch (page) {
3885 case 0x00:
3886 /* Supported VPD pages */
3887 if (vpdp == NULL ||
3888 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3889 return;
3890 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3891 vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3892 vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3893
3894 vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3895 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3896 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3897
3898 pkt->pkt_state |= STATE_XFERRED_DATA;
3899 break;
3900
3901 case 0x80:
3902 /* Unit serial number page */
3903 if (vpdp == NULL ||
3904 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3905 return;
3906 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3907 vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3908 vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3909
3910 sp = &vpdp[AAC_VPD_PAGE_DATA];
3911 (void) aac_lun_serialno(softs, tgt, sp);
3912
3913 pkt->pkt_state |= STATE_XFERRED_DATA;
3914 break;
3915
3916 case 0x83:
3917 /* Device identification page */
3918 if (vpdp == NULL ||
3919 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3920 return;
3921 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3922 vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3923
3924 idp = &vpdp[AAC_VPD_PAGE_DATA];
3925 bzero(idp, AAC_VPD_ID_LENGTH);
3926 idp[AAC_VPD_ID_CODESET] = 0x02;
3927 idp[AAC_VPD_ID_TYPE] = 0x01;
3928
3929 /*
3930 * SPC-3 Table 111 - Identifier type
3931 * One recommanded method of constructing the remainder
3932 * of identifier field is to concatenate the product
3933 * identification field from the standard INQUIRY data
3934 * field and the product serial number field from the
3935 * unit serial number page.
3936 */
3937 sp = &idp[AAC_VPD_ID_DATA];
3938 sp = aac_vendor_id(softs, sp);
3939 sp = aac_product_id(softs, sp);
3940 sp = aac_lun_serialno(softs, tgt, sp);
3941 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3942 (uintptr_t)&idp[AAC_VPD_ID_DATA];
3943
3944 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3945 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3946 pkt->pkt_state |= STATE_XFERRED_DATA;
3947 break;
3948
3949 default:
3950 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3951 0x24, 0x00, 0);
3952 break;
3953 }
3954 } else {
3955 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3956 size_t len = sizeof (struct scsi_inquiry);
3957
3958 if (page != 0) {
3959 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3960 0x24, 0x00, 0);
3961 return;
3962 }
3963 if (inqp == NULL || bp->b_bcount < len)
3964 return;
3965
3966 bzero(inqp, len);
3967 inqp->inq_len = AAC_ADDITIONAL_LEN;
3968 inqp->inq_ansi = AAC_ANSI_VER;
3969 inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3970 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3971 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3972 bcopy("V1.0", inqp->inq_revision, 4);
3973 inqp->inq_cmdque = 1; /* enable tagged-queuing */
3974 /*
3975 * For "sd-max-xfer-size" property which may impact performance
3976 * when IO threads increase.
3977 */
3978 inqp->inq_wbus32 = 1;
3979
3980 pkt->pkt_state |= STATE_XFERRED_DATA;
3981 }
3982 }
3983
3984 /*
3985 * SPC-3 7.10 MODE SENSE command implementation
3986 */
3987 static void
3988 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3989 union scsi_cdb *cdbp, struct buf *bp, int capacity)
3990 {
3991 uchar_t pagecode;
3992 struct mode_header *headerp;
3993 struct mode_header_g1 *g1_headerp;
3994 unsigned int ncyl;
3995 caddr_t sense_data;
3996 caddr_t next_page;
3997 size_t sdata_size;
3998 size_t pages_size;
3999 int unsupport_page = 0;
4000
4001 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
4002 cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
4003
4004 if (!(bp && bp->b_un.b_addr && bp->b_bcount))
4005 return;
4006
4007 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4008 bp_mapin(bp);
4009 pkt->pkt_state |= STATE_XFERRED_DATA;
4010 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
4011
4012 /* calculate the size of needed buffer */
4013 if (cdbp->scc_cmd == SCMD_MODE_SENSE)
4014 sdata_size = MODE_HEADER_LENGTH;
4015 else /* must be SCMD_MODE_SENSE_G1 */
4016 sdata_size = MODE_HEADER_LENGTH_G1;
4017
4018 pages_size = 0;
4019 switch (pagecode) {
4020 case SD_MODE_SENSE_PAGE3_CODE:
4021 pages_size += sizeof (struct mode_format);
4022 break;
4023
4024 case SD_MODE_SENSE_PAGE4_CODE:
4025 pages_size += sizeof (struct mode_geometry);
4026 break;
4027
4028 case MODEPAGE_CTRL_MODE:
4029 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4030 pages_size += sizeof (struct mode_control_scsi3);
4031 } else {
4032 unsupport_page = 1;
4033 }
4034 break;
4035
4036 case MODEPAGE_ALLPAGES:
4037 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4038 pages_size += sizeof (struct mode_format) +
4039 sizeof (struct mode_geometry) +
4040 sizeof (struct mode_control_scsi3);
4041 } else {
4042 pages_size += sizeof (struct mode_format) +
4043 sizeof (struct mode_geometry);
4044 }
4045 break;
4046
4047 default:
4048 /* unsupported pages */
4049 unsupport_page = 1;
4050 }
4051
4052 /* allocate buffer to fill the send data */
4053 sdata_size += pages_size;
4054 sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
4055
4056 if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
4057 headerp = (struct mode_header *)sense_data;
4058 headerp->length = MODE_HEADER_LENGTH + pages_size -
4059 sizeof (headerp->length);
4060 headerp->bdesc_length = 0;
4061 next_page = sense_data + sizeof (struct mode_header);
4062 } else {
4063 g1_headerp = (void *)sense_data;
4064 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
4065 sizeof (g1_headerp->length));
4066 g1_headerp->bdesc_length = 0;
4067 next_page = sense_data + sizeof (struct mode_header_g1);
4068 }
4069
4070 if (unsupport_page)
4071 goto finish;
4072
4073 if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
4074 pagecode == MODEPAGE_ALLPAGES) {
4075 /* SBC-3 7.1.3.3 Format device page */
4076 struct mode_format *page3p;
4077
4078 page3p = (void *)next_page;
4079 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
4080 page3p->mode_page.length = sizeof (struct mode_format);
4081 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
4082 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
4083
4084 next_page += sizeof (struct mode_format);
4085 }
4086
4087 if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
4088 pagecode == MODEPAGE_ALLPAGES) {
4089 /* SBC-3 7.1.3.8 Rigid disk device geometry page */
4090 struct mode_geometry *page4p;
4091
4092 page4p = (void *)next_page;
4093 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
4094 page4p->mode_page.length = sizeof (struct mode_geometry);
4095 page4p->heads = AAC_NUMBER_OF_HEADS;
4096 page4p->rpm = BE_16(AAC_ROTATION_SPEED);
4097 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
4098 page4p->cyl_lb = ncyl & 0xff;
4099 page4p->cyl_mb = (ncyl >> 8) & 0xff;
4100 page4p->cyl_ub = (ncyl >> 16) & 0xff;
4101
4102 next_page += sizeof (struct mode_geometry);
4103 }
4104
4105 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
4106 softs->flags & AAC_FLAGS_LBA_64BIT) {
4107 /* 64-bit LBA need large sense data */
4108 struct mode_control_scsi3 *mctl;
4109
4110 mctl = (void *)next_page;
4111 mctl->mode_page.code = MODEPAGE_CTRL_MODE;
4112 mctl->mode_page.length =
4113 sizeof (struct mode_control_scsi3) -
4114 sizeof (struct mode_page);
4115 mctl->d_sense = 1;
4116 }
4117
4118 finish:
4119 /* copyout the valid data. */
4120 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
4121 kmem_free(sense_data, sdata_size);
4122 }
4123
4124 static int
4125 aac_name_node(dev_info_t *dip, char *name, int len)
4126 {
4127 int tgt, lun;
4128
4129 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4130 DDI_PROP_DONTPASS, "target", -1);
4131 if (tgt == -1)
4132 return (DDI_FAILURE);
4133 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4134 DDI_PROP_DONTPASS, "lun", -1);
4135 if (lun == -1)
4136 return (DDI_FAILURE);
4137
4138 (void) snprintf(name, len, "%x,%x", tgt, lun);
4139 return (DDI_SUCCESS);
4140 }
4141
4142 /*ARGSUSED*/
4143 static int
4144 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4145 scsi_hba_tran_t *tran, struct scsi_device *sd)
4146 {
4147 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
4148 #if defined(DEBUG) || defined(__lock_lint)
4149 int ctl = ddi_get_instance(softs->devinfo_p);
4150 #endif
4151 uint16_t tgt = sd->sd_address.a_target;
4152 uint8_t lun = sd->sd_address.a_lun;
4153 struct aac_device *dvp;
4154
4155 DBCALLED(softs, 2);
4156
4157 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
4158 /*
4159 * If no persistent node exist, we don't allow .conf node
4160 * to be created.
4161 */
4162 if (aac_find_child(softs, tgt, lun) != NULL) {
4163 if (ndi_merge_node(tgt_dip, aac_name_node) !=
4164 DDI_SUCCESS)
4165 /* Create this .conf node */
4166 return (DDI_SUCCESS);
4167 }
4168 return (DDI_FAILURE);
4169 }
4170
4171 /*
4172 * Only support container/phys. device that has been
4173 * detected and valid
4174 */
4175 mutex_enter(&softs->io_lock);
4176 if (tgt >= AAC_MAX_DEV(softs)) {
4177 AACDB_PRINT_TRAN(softs,
4178 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
4179 mutex_exit(&softs->io_lock);
4180 return (DDI_FAILURE);
4181 }
4182
4183 if (tgt < AAC_MAX_LD) {
4184 dvp = (struct aac_device *)&softs->containers[tgt];
4185 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
4186 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
4187 ctl, tgt, lun);
4188 mutex_exit(&softs->io_lock);
4189 return (DDI_FAILURE);
4190 }
4191 /*
4192 * Save the tgt_dip for the given target if one doesn't exist
4193 * already. Dip's for non-existance tgt's will be cleared in
4194 * tgt_free.
4195 */
4196 if (softs->containers[tgt].dev.dip == NULL &&
4197 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4198 softs->containers[tgt].dev.dip = tgt_dip;
4199 } else {
4200 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
4201 /*
4202 * Save the tgt_dip for the given target if one doesn't exist
4203 * already. Dip's for non-existance tgt's will be cleared in
4204 * tgt_free.
4205 */
4206
4207 if (softs->nondasds[AAC_PD(tgt)].dev.dip == NULL &&
4208 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4209 softs->nondasds[AAC_PD(tgt)].dev.dip = tgt_dip;
4210 }
4211
4212 if (softs->flags & AAC_FLAGS_BRKUP) {
4213 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
4214 "buf_break", 1) != DDI_PROP_SUCCESS) {
4215 cmn_err(CE_CONT, "unable to create "
4216 "property for t%dL%d (buf_break)", tgt, lun);
4217 }
4218 }
4219
4220 AACDB_PRINT(softs, CE_NOTE,
4221 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
4222 (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
4223 mutex_exit(&softs->io_lock);
4224 return (DDI_SUCCESS);
4225 }
4226
4227 static void
4228 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4229 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
4230 {
4231 #ifndef __lock_lint
4232 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
4233 #endif
4234
4235 struct aac_softstate *softs = SD2AAC(sd);
4236 int tgt = sd->sd_address.a_target;
4237
4238 mutex_enter(&softs->io_lock);
4239 if (tgt < AAC_MAX_LD) {
4240 if (softs->containers[tgt].dev.dip == tgt_dip)
4241 softs->containers[tgt].dev.dip = NULL;
4242 } else {
4243 if (softs->nondasds[AAC_PD(tgt)].dev.dip == tgt_dip)
4244 softs->nondasds[AAC_PD(tgt)].dev.dip = NULL;
4245 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
4246 }
4247 mutex_exit(&softs->io_lock);
4248 }
4249
4250 /*
4251 * Check if the firmware is Up And Running. If it is in the Kernel Panic
4252 * state, (BlinkLED code + 1) is returned.
4253 * 0 -- firmware up and running
4254 * -1 -- firmware dead
4255 * >0 -- firmware kernel panic
4256 */
4257 static int
4258 aac_check_adapter_health(struct aac_softstate *softs)
4259 {
4260 int rval;
4261
4262 rval = PCI_MEM_GET32(softs, AAC_OMR0);
4263
4264 if (rval & AAC_KERNEL_UP_AND_RUNNING) {
4265 rval = 0;
4266 } else if (rval & AAC_KERNEL_PANIC) {
4267 cmn_err(CE_WARN, "firmware panic");
4268 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
4269 } else {
4270 cmn_err(CE_WARN, "firmware dead");
4271 rval = -1;
4272 }
4273 return (rval);
4274 }
4275
4276 static void
4277 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
4278 uchar_t reason)
4279 {
4280 acp->flags |= AAC_CMD_ABORT;
4281
4282 if (acp->pkt) {
4283 if (acp->slotp) { /* outstanding cmd */
4284 acp->pkt->pkt_state |= STATE_GOT_STATUS;
4285 }
4286
4287 switch (reason) {
4288 case CMD_TIMEOUT:
4289 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
4290 acp);
4291 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
4292 STAT_TIMEOUT | STAT_BUS_RESET);
4293 break;
4294 case CMD_RESET:
4295 /* aac support only RESET_ALL */
4296 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
4297 aac_set_pkt_reason(softs, acp, CMD_RESET,
4298 STAT_BUS_RESET);
4299 break;
4300 case CMD_ABORTED:
4301 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4302 acp);
4303 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4304 STAT_ABORTED);
4305 break;
4306 }
4307 }
4308 aac_end_io(softs, acp);
4309 }
4310
4311 /*
4312 * Abort all the pending commands of type iocmd or just the command pkt
4313 * corresponding to pkt
4314 */
4315 static void
4316 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4317 int reason)
4318 {
4319 struct aac_cmd *ac_arg, *acp;
4320 int i;
4321
4322 if (pkt == NULL) {
4323 ac_arg = NULL;
4324 } else {
4325 ac_arg = PKT2AC(pkt);
4326 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4327 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4328 }
4329
4330 /*
4331 * a) outstanding commands on the controller
4332 * Note: should abort outstanding commands only after one
4333 * IOP reset has been done.
4334 */
4335 if (iocmd & AAC_IOCMD_OUTSTANDING) {
4336 struct aac_cmd *acp;
4337
4338 for (i = 0; i < AAC_MAX_LD; i++) {
4339 if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4340 softs->containers[i].reset = 1;
4341 }
4342 while ((acp = softs->q_busy.q_head) != NULL)
4343 aac_abort_iocmd(softs, acp, reason);
4344 }
4345
4346 /* b) commands in the waiting queues */
4347 for (i = 0; i < AAC_CMDQ_NUM; i++) {
4348 if (iocmd & (1 << i)) {
4349 if (ac_arg) {
4350 aac_abort_iocmd(softs, ac_arg, reason);
4351 } else {
4352 while ((acp = softs->q_wait[i].q_head) != NULL)
4353 aac_abort_iocmd(softs, acp, reason);
4354 }
4355 }
4356 }
4357 }
4358
4359 /*
4360 * The draining thread is shared among quiesce threads. It terminates
4361 * when the adapter is quiesced or stopped by aac_stop_drain().
4362 */
4363 static void
4364 aac_check_drain(void *arg)
4365 {
4366 struct aac_softstate *softs = arg;
4367
4368 mutex_enter(&softs->io_lock);
4369 if (softs->ndrains) {
4370 softs->drain_timeid = 0;
4371 /*
4372 * If both ASYNC and SYNC bus throttle are held,
4373 * wake up threads only when both are drained out.
4374 */
4375 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4376 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4377 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4378 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4379 cv_broadcast(&softs->drain_cv);
4380 else
4381 softs->drain_timeid = timeout(aac_check_drain, softs,
4382 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4383 }
4384 mutex_exit(&softs->io_lock);
4385 }
4386
4387 /*
4388 * If not draining the outstanding cmds, drain them. Otherwise,
4389 * only update ndrains.
4390 */
4391 static void
4392 aac_start_drain(struct aac_softstate *softs)
4393 {
4394 if (softs->ndrains == 0) {
4395 ASSERT(softs->drain_timeid == 0);
4396 softs->drain_timeid = timeout(aac_check_drain, softs,
4397 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4398 }
4399 softs->ndrains++;
4400 }
4401
4402 /*
4403 * Stop the draining thread when no other threads use it any longer.
4404 * Side effect: io_lock may be released in the middle.
4405 */
4406 static void
4407 aac_stop_drain(struct aac_softstate *softs)
4408 {
4409 softs->ndrains--;
4410 if (softs->ndrains == 0) {
4411 if (softs->drain_timeid != 0) {
4412 timeout_id_t tid = softs->drain_timeid;
4413
4414 softs->drain_timeid = 0;
4415 mutex_exit(&softs->io_lock);
4416 (void) untimeout(tid);
4417 mutex_enter(&softs->io_lock);
4418 }
4419 }
4420 }
4421
4422 /*
4423 * The following function comes from Adaptec:
4424 *
4425 * Once do an IOP reset, basically the driver have to re-initialize the card
4426 * as if up from a cold boot, and the driver is responsible for any IO that
4427 * is outstanding to the adapter at the time of the IOP RESET. And prepare
4428 * for IOP RESET by making the init code modular with the ability to call it
4429 * from multiple places.
4430 */
4431 static int
4432 aac_reset_adapter(struct aac_softstate *softs)
4433 {
4434 int health;
4435 uint32_t status;
4436 int rval = AAC_IOP_RESET_FAILED;
4437
4438 DBCALLED(softs, 1);
4439
4440 ASSERT(softs->state & AAC_STATE_RESET);
4441
4442 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4443 /* Disable interrupt */
4444 AAC_DISABLE_INTR(softs);
4445
4446 health = aac_check_adapter_health(softs);
4447 if (health == -1) {
4448 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4449 goto finish;
4450 }
4451 if (health == 0) /* flush drives if possible */
4452 (void) aac_shutdown(softs);
4453
4454 /* Execute IOP reset */
4455 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4456 &status)) != AACOK) {
4457 ddi_acc_handle_t acc;
4458 struct aac_fib *fibp;
4459 struct aac_pause_command *pc;
4460
4461 if ((status & 0xf) == 0xf) {
4462 uint32_t wait_count;
4463
4464 /*
4465 * Sunrise Lake has dual cores and we must drag the
4466 * other core with us to reset simultaneously. There
4467 * are 2 bits in the Inbound Reset Control and Status
4468 * Register (offset 0x38) of the Sunrise Lake to reset
4469 * the chip without clearing out the PCI configuration
4470 * info (COMMAND & BARS).
4471 */
4472 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4473
4474 /*
4475 * We need to wait for 5 seconds before accessing the MU
4476 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4477 */
4478 wait_count = 5 * 10000;
4479 while (wait_count) {
4480 drv_usecwait(100); /* delay 100 microseconds */
4481 wait_count--;
4482 }
4483 } else {
4484 if (status == SRB_STATUS_INVALID_REQUEST)
4485 cmn_err(CE_WARN, "!IOP_RESET not supported");
4486 else /* probably timeout */
4487 cmn_err(CE_WARN, "!IOP_RESET failed");
4488
4489 /* Unwind aac_shutdown() */
4490 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
4491 acc = softs->sync_ac.slotp->fib_acc_handle;
4492
4493 fibp = softs->sync_ac.slotp->fibp;
4494 pc = (struct aac_pause_command *)&fibp->data[0];
4495
4496 bzero(pc, sizeof (*pc));
4497 ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4498 ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4499 ddi_put32(acc, &pc->Timeout, 1);
4500 ddi_put32(acc, &pc->Min, 1);
4501 ddi_put32(acc, &pc->NoRescan, 1);
4502
4503 (void) aac_sync_fib(softs, ContainerCommand,
4504 AAC_FIB_SIZEOF(struct aac_pause_command));
4505 aac_sync_fib_slot_release(softs, &softs->sync_ac);
4506
4507 if (aac_check_adapter_health(softs) != 0)
4508 ddi_fm_service_impact(softs->devinfo_p,
4509 DDI_SERVICE_LOST);
4510 else
4511 /*
4512 * IOP reset not supported or IOP not reseted
4513 */
4514 rval = AAC_IOP_RESET_ABNORMAL;
4515 goto finish;
4516 }
4517 }
4518
4519 /*
4520 * Re-read and renegotiate the FIB parameters, as one of the actions
4521 * that can result from an IOP reset is the running of a new firmware
4522 * image.
4523 */
4524 if (aac_common_attach(softs) != AACOK)
4525 goto finish;
4526
4527 rval = AAC_IOP_RESET_SUCCEED;
4528
4529 finish:
4530 AAC_ENABLE_INTR(softs);
4531 return (rval);
4532 }
4533
4534 static void
4535 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4536 int throttle)
4537 {
4538 /*
4539 * If the bus is draining/quiesced, no changes to the throttles
4540 * are allowed. All throttles should have been set to 0.
4541 */
4542 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4543 return;
4544 dvp->throttle[q] = throttle;
4545 }
4546
4547 static void
4548 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4549 {
4550 int i, q;
4551
4552 /* Hold bus by holding every device on the bus */
4553 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4554 if (iocmds & (1 << q)) {
4555 softs->bus_throttle[q] = 0;
4556 for (i = 0; i < AAC_MAX_LD; i++)
4557 aac_set_throttle(softs,
4558 &softs->containers[i].dev, q, 0);
4559 for (i = 0; i < AAC_MAX_PD(softs); i++)
4560 aac_set_throttle(softs,
4561 &softs->nondasds[i].dev, q, 0);
4562 }
4563 }
4564 }
4565
4566 static void
4567 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4568 {
4569 int i, q, max_throttle;
4570
4571 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4572 if (iocmds & (1 << q)) {
4573 /*
4574 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4575 * quiesced or being drained by possibly some quiesce
4576 * threads.
4577 */
4578 if (q == AAC_CMDQ_ASYNC && ((softs->state &
4579 AAC_STATE_QUIESCED) || softs->ndrains))
4580 continue;
4581 if (q == AAC_CMDQ_ASYNC)
4582 max_throttle = softs->total_slots -
4583 AAC_MGT_SLOT_NUM;
4584 else
4585 max_throttle = softs->total_slots - 1;
4586 softs->bus_throttle[q] = max_throttle;
4587 for (i = 0; i < AAC_MAX_LD; i++)
4588 aac_set_throttle(softs,
4589 &softs->containers[i].dev,
4590 q, max_throttle);
4591 for (i = 0; i < AAC_MAX_PD(softs); i++)
4592 aac_set_throttle(softs, &softs->nondasds[i].dev,
4593 q, max_throttle);
4594 }
4595 }
4596 }
4597
4598 static int
4599 aac_do_reset(struct aac_softstate *softs)
4600 {
4601 int health;
4602 int rval;
4603
4604 softs->state |= AAC_STATE_RESET;
4605 health = aac_check_adapter_health(softs);
4606
4607 /*
4608 * Hold off new io commands and wait all outstanding io
4609 * commands to complete.
4610 */
4611 if (health == 0) {
4612 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4613 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4614
4615 if (sync_cmds == 0 && async_cmds == 0) {
4616 rval = AAC_IOP_RESET_SUCCEED;
4617 goto finish;
4618 }
4619 /*
4620 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4621 * to complete the outstanding io commands
4622 */
4623 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4624 int (*intr_handler)(struct aac_softstate *);
4625
4626 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4627 /*
4628 * Poll the adapter by ourselves in case interrupt is disabled
4629 * and to avoid releasing the io_lock.
4630 */
4631 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4632 aac_process_intr_new : aac_process_intr_old;
4633 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4634 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4635 drv_usecwait(100);
4636 (void) intr_handler(softs);
4637 timeout--;
4638 }
4639 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4640
4641 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4642 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4643 /* Cmds drained out */
4644 rval = AAC_IOP_RESET_SUCCEED;
4645 goto finish;
4646 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4647 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4648 /* Cmds not drained out, adapter overloaded */
4649 rval = AAC_IOP_RESET_ABNORMAL;
4650 goto finish;
4651 }
4652 }
4653
4654 /*
4655 * If a longer waiting time still can't drain any outstanding io
4656 * commands, do IOP reset.
4657 */
4658 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4659 softs->state |= AAC_STATE_DEAD;
4660
4661 finish:
4662 softs->state &= ~AAC_STATE_RESET;
4663 return (rval);
4664 }
4665
4666 static int
4667 aac_tran_reset(struct scsi_address *ap, int level)
4668 {
4669 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4670 int rval;
4671
4672 DBCALLED(softs, 1);
4673
4674 if (level != RESET_ALL) {
4675 cmn_err(CE_NOTE, "!reset target/lun not supported");
4676 return (0);
4677 }
4678
4679 mutex_enter(&softs->io_lock);
4680 switch (rval = aac_do_reset(softs)) {
4681 case AAC_IOP_RESET_SUCCEED:
4682 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4683 NULL, CMD_RESET);
4684 aac_start_waiting_io(softs);
4685 break;
4686 case AAC_IOP_RESET_FAILED:
4687 /* Abort IOCTL cmds when adapter is dead */
4688 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4689 break;
4690 case AAC_IOP_RESET_ABNORMAL:
4691 aac_start_waiting_io(softs);
4692 }
4693 mutex_exit(&softs->io_lock);
4694
4695 aac_drain_comp_q(softs);
4696 return (rval == 0);
4697 }
4698
4699 static int
4700 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4701 {
4702 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4703
4704 DBCALLED(softs, 1);
4705
4706 mutex_enter(&softs->io_lock);
4707 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4708 mutex_exit(&softs->io_lock);
4709
4710 aac_drain_comp_q(softs);
4711 return (1);
4712 }
4713
4714 void
4715 aac_free_dmamap(struct aac_cmd *acp)
4716 {
4717 /* Free dma mapping */
4718 if (acp->flags & AAC_CMD_DMA_VALID) {
4719 ASSERT(acp->buf_dma_handle);
4720 (void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4721 acp->flags &= ~AAC_CMD_DMA_VALID;
4722 }
4723
4724 if (acp->abp != NULL) { /* free non-aligned buf DMA */
4725 ASSERT(acp->buf_dma_handle);
4726 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4727 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4728 (uint8_t *)acp->abp, acp->bp->b_bcount,
4729 DDI_DEV_AUTOINCR);
4730 ddi_dma_mem_free(&acp->abh);
4731 acp->abp = NULL;
4732 }
4733
4734 if (acp->buf_dma_handle) {
4735 ddi_dma_free_handle(&acp->buf_dma_handle);
4736 acp->buf_dma_handle = NULL;
4737 }
4738 }
4739
4740 static void
4741 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4742 {
4743 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4744 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4745 aac_free_dmamap(acp);
4746 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4747 aac_soft_callback(softs, acp);
4748 }
4749
4750 /*
4751 * Handle command to logical device
4752 */
4753 static int
4754 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4755 {
4756 struct aac_container *dvp;
4757 struct scsi_pkt *pkt;
4758 union scsi_cdb *cdbp;
4759 struct buf *bp;
4760 int rval;
4761
4762 dvp = (struct aac_container *)acp->dvp;
4763 pkt = acp->pkt;
4764 cdbp = (void *)pkt->pkt_cdbp;
4765 bp = acp->bp;
4766
4767 switch (cdbp->scc_cmd) {
4768 case SCMD_INQUIRY: /* inquiry */
4769 aac_free_dmamap(acp);
4770 aac_inquiry(softs, pkt, cdbp, bp);
4771 aac_soft_callback(softs, acp);
4772 rval = TRAN_ACCEPT;
4773 break;
4774
4775 case SCMD_READ_CAPACITY: /* read capacity */
4776 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4777 struct scsi_capacity cap;
4778 uint64_t last_lba;
4779
4780 /* check 64-bit LBA */
4781 last_lba = dvp->size - 1;
4782 if (last_lba > 0xffffffffull) {
4783 cap.capacity = 0xfffffffful;
4784 } else {
4785 cap.capacity = BE_32(last_lba);
4786 }
4787 cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4788
4789 aac_free_dmamap(acp);
4790 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4791 bp_mapin(bp);
4792 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4793 pkt->pkt_state |= STATE_XFERRED_DATA;
4794 }
4795 aac_soft_callback(softs, acp);
4796 rval = TRAN_ACCEPT;
4797 break;
4798
4799 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4800 /* Check if containers need 64-bit LBA support */
4801 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4802 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4803 struct scsi_capacity_16 cap16;
4804 int cap_len = sizeof (struct scsi_capacity_16);
4805
4806 bzero(&cap16, cap_len);
4807 cap16.sc_capacity = BE_64(dvp->size - 1);
4808 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4809
4810 aac_free_dmamap(acp);
4811 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4812 bp_mapin(bp);
4813 bcopy(&cap16, bp->b_un.b_addr,
4814 min(bp->b_bcount, cap_len));
4815 pkt->pkt_state |= STATE_XFERRED_DATA;
4816 }
4817 aac_soft_callback(softs, acp);
4818 } else {
4819 aac_unknown_scmd(softs, acp);
4820 }
4821 rval = TRAN_ACCEPT;
4822 break;
4823
4824 case SCMD_READ_G4: /* read_16 */
4825 case SCMD_WRITE_G4: /* write_16 */
4826 if (softs->flags & AAC_FLAGS_RAW_IO) {
4827 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4828 acp->blkno = ((uint64_t) \
4829 GETG4ADDR(cdbp) << 32) | \
4830 (uint32_t)GETG4ADDRTL(cdbp);
4831 goto do_io;
4832 }
4833 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4834 aac_unknown_scmd(softs, acp);
4835 rval = TRAN_ACCEPT;
4836 break;
4837
4838 case SCMD_READ: /* read_6 */
4839 case SCMD_WRITE: /* write_6 */
4840 acp->blkno = GETG0ADDR(cdbp);
4841 goto do_io;
4842
4843 case SCMD_READ_G5: /* read_12 */
4844 case SCMD_WRITE_G5: /* write_12 */
4845 acp->blkno = GETG5ADDR(cdbp);
4846 goto do_io;
4847
4848 case SCMD_READ_G1: /* read_10 */
4849 case SCMD_WRITE_G1: /* write_10 */
4850 acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4851 do_io:
4852 if (acp->flags & AAC_CMD_DMA_VALID) {
4853 uint64_t cnt_size = dvp->size;
4854
4855 /*
4856 * If LBA > array size AND rawio, the
4857 * adapter may hang. So check it before
4858 * sending.
4859 * NOTE: (blkno + blkcnt) may overflow
4860 */
4861 if ((acp->blkno < cnt_size) &&
4862 ((acp->blkno + acp->bcount /
4863 AAC_BLK_SIZE) <= cnt_size)) {
4864 rval = aac_do_io(softs, acp);
4865 } else {
4866 /*
4867 * Request exceeds the capacity of disk,
4868 * set error block number to last LBA
4869 * + 1.
4870 */
4871 aac_set_arq_data(pkt,
4872 KEY_ILLEGAL_REQUEST, 0x21,
4873 0x00, cnt_size);
4874 aac_soft_callback(softs, acp);
4875 rval = TRAN_ACCEPT;
4876 }
4877 } else if (acp->bcount == 0) {
4878 /* For 0 length IO, just return ok */
4879 aac_soft_callback(softs, acp);
4880 rval = TRAN_ACCEPT;
4881 } else {
4882 rval = TRAN_BADPKT;
4883 }
4884 break;
4885
4886 case SCMD_MODE_SENSE: /* mode_sense_6 */
4887 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4888 int capacity;
4889
4890 aac_free_dmamap(acp);
4891 if (dvp->size > 0xffffffffull)
4892 capacity = 0xfffffffful; /* 64-bit LBA */
4893 else
4894 capacity = dvp->size;
4895 aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4896 aac_soft_callback(softs, acp);
4897 rval = TRAN_ACCEPT;
4898 break;
4899 }
4900
4901 case SCMD_START_STOP:
4902 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
4903 acp->aac_cmd_fib = aac_cmd_fib_startstop;
4904 acp->ac_comp = aac_startstop_complete;
4905 rval = aac_do_io(softs, acp);
4906 break;
4907 }
4908 /* FALLTHRU */
4909 case SCMD_TEST_UNIT_READY:
4910 case SCMD_REQUEST_SENSE:
4911 case SCMD_FORMAT:
4912 aac_free_dmamap(acp);
4913 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4914 if (acp->flags & AAC_CMD_BUF_READ) {
4915 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4916 bp_mapin(bp);
4917 bzero(bp->b_un.b_addr, bp->b_bcount);
4918 }
4919 pkt->pkt_state |= STATE_XFERRED_DATA;
4920 }
4921 aac_soft_callback(softs, acp);
4922 rval = TRAN_ACCEPT;
4923 break;
4924
4925 case SCMD_SYNCHRONIZE_CACHE:
4926 acp->flags |= AAC_CMD_NTAG;
4927 acp->aac_cmd_fib = aac_cmd_fib_sync;
4928 acp->ac_comp = aac_synccache_complete;
4929 rval = aac_do_io(softs, acp);
4930 break;
4931
4932 case SCMD_DOORLOCK:
4933 aac_free_dmamap(acp);
4934 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4935 aac_soft_callback(softs, acp);
4936 rval = TRAN_ACCEPT;
4937 break;
4938
4939 default: /* unknown command */
4940 aac_unknown_scmd(softs, acp);
4941 rval = TRAN_ACCEPT;
4942 break;
4943 }
4944
4945 return (rval);
4946 }
4947
4948 static int
4949 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4950 {
4951 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4952 struct aac_cmd *acp = PKT2AC(pkt);
4953 struct aac_device *dvp = acp->dvp;
4954 int rval;
4955
4956 DBCALLED(softs, 2);
4957
4958 /*
4959 * Reinitialize some fields of ac and pkt; the packet may
4960 * have been resubmitted
4961 */
4962 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4963 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4964 acp->timeout = acp->pkt->pkt_time;
4965 if (pkt->pkt_flags & FLAG_NOINTR)
4966 acp->flags |= AAC_CMD_NO_INTR;
4967 #ifdef DEBUG
4968 acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4969 #endif
4970 pkt->pkt_reason = CMD_CMPLT;
4971 pkt->pkt_state = 0;
4972 pkt->pkt_statistics = 0;
4973 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4974
4975 if (acp->flags & AAC_CMD_DMA_VALID) {
4976 pkt->pkt_resid = acp->bcount;
4977 /* Consistent packets need to be sync'ed first */
4978 if ((acp->flags & AAC_CMD_CONSISTENT) &&
4979 (acp->flags & AAC_CMD_BUF_WRITE))
4980 if (aac_dma_sync_ac(acp) != AACOK) {
4981 ddi_fm_service_impact(softs->devinfo_p,
4982 DDI_SERVICE_UNAFFECTED);
4983 return (TRAN_BADPKT);
4984 }
4985 } else {
4986 pkt->pkt_resid = 0;
4987 }
4988
4989 mutex_enter(&softs->io_lock);
4990 AACDB_PRINT_SCMD(softs, acp);
4991 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4992 !(softs->state & AAC_STATE_DEAD)) {
4993 if (dvp->type == AAC_DEV_LD) {
4994 if (ap->a_lun == 0)
4995 rval = aac_tran_start_ld(softs, acp);
4996 else
4997 goto error;
4998 } else {
4999 rval = aac_do_io(softs, acp);
5000 }
5001 } else {
5002 error:
5003 #ifdef DEBUG
5004 if (!(softs->state & AAC_STATE_DEAD)) {
5005 AACDB_PRINT_TRAN(softs,
5006 "Cannot send cmd to target t%dL%d: %s",
5007 ap->a_target, ap->a_lun,
5008 "target invalid");
5009 } else {
5010 AACDB_PRINT(softs, CE_WARN,
5011 "Cannot send cmd to target t%dL%d: %s",
5012 ap->a_target, ap->a_lun,
5013 "adapter dead");
5014 }
5015 #endif
5016 rval = TRAN_FATAL_ERROR;
5017 }
5018 mutex_exit(&softs->io_lock);
5019 return (rval);
5020 }
5021
5022 static int
5023 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
5024 {
5025 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5026 struct aac_device *dvp;
5027 int rval;
5028
5029 DBCALLED(softs, 2);
5030
5031 /* We don't allow inquiring about capabilities for other targets */
5032 if (cap == NULL || whom == 0) {
5033 AACDB_PRINT(softs, CE_WARN,
5034 "GetCap> %s not supported: whom=%d", cap, whom);
5035 return (-1);
5036 }
5037
5038 mutex_enter(&softs->io_lock);
5039 dvp = AAC_DEV(softs, ap->a_target);
5040 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5041 mutex_exit(&softs->io_lock);
5042 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
5043 ap->a_target, ap->a_lun);
5044 return (-1);
5045 }
5046
5047 switch (scsi_hba_lookup_capstr(cap)) {
5048 case SCSI_CAP_ARQ: /* auto request sense */
5049 rval = 1;
5050 break;
5051 case SCSI_CAP_UNTAGGED_QING:
5052 case SCSI_CAP_TAGGED_QING:
5053 rval = 1;
5054 break;
5055 case SCSI_CAP_DMA_MAX:
5056 rval = softs->dma_max;
5057 break;
5058 default:
5059 rval = -1;
5060 break;
5061 }
5062 mutex_exit(&softs->io_lock);
5063
5064 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
5065 cap, ap->a_target, ap->a_lun, rval);
5066 return (rval);
5067 }
5068
5069 /*ARGSUSED*/
5070 static int
5071 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
5072 {
5073 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5074 struct aac_device *dvp;
5075 int rval;
5076
5077 DBCALLED(softs, 2);
5078
5079 /* We don't allow inquiring about capabilities for other targets */
5080 if (cap == NULL || whom == 0) {
5081 AACDB_PRINT(softs, CE_WARN,
5082 "SetCap> %s not supported: whom=%d", cap, whom);
5083 return (-1);
5084 }
5085
5086 mutex_enter(&softs->io_lock);
5087 dvp = AAC_DEV(softs, ap->a_target);
5088 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5089 mutex_exit(&softs->io_lock);
5090 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
5091 ap->a_target, ap->a_lun);
5092 return (-1);
5093 }
5094
5095 switch (scsi_hba_lookup_capstr(cap)) {
5096 case SCSI_CAP_ARQ:
5097 /* Force auto request sense */
5098 rval = (value == 1) ? 1 : 0;
5099 break;
5100 case SCSI_CAP_UNTAGGED_QING:
5101 case SCSI_CAP_TAGGED_QING:
5102 rval = (value == 1) ? 1 : 0;
5103 break;
5104 default:
5105 rval = -1;
5106 break;
5107 }
5108 mutex_exit(&softs->io_lock);
5109
5110 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
5111 cap, ap->a_target, ap->a_lun, value, rval);
5112 return (rval);
5113 }
5114
5115 static void
5116 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5117 {
5118 struct aac_cmd *acp = PKT2AC(pkt);
5119
5120 DBCALLED(NULL, 2);
5121
5122 if (acp->sgt) {
5123 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5124 acp->left_cookien);
5125 }
5126 aac_free_dmamap(acp);
5127 ASSERT(acp->slotp == NULL);
5128 scsi_hba_pkt_free(ap, pkt);
5129 }
5130
5131 int
5132 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
5133 struct buf *bp, int flags, int (*cb)(), caddr_t arg)
5134 {
5135 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
5136 uint_t oldcookiec;
5137 int bioerr;
5138 int rval;
5139
5140 oldcookiec = acp->left_cookien;
5141
5142 /* Move window to build s/g map */
5143 if (acp->total_nwin > 0) {
5144 if (++acp->cur_win < acp->total_nwin) {
5145 off_t off;
5146 size_t len;
5147
5148 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
5149 &off, &len, &acp->cookie, &acp->left_cookien);
5150 if (rval == DDI_SUCCESS)
5151 goto get_dma_cookies;
5152 AACDB_PRINT(softs, CE_WARN,
5153 "ddi_dma_getwin() fail %d", rval);
5154 return (AACERR);
5155 }
5156 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
5157 return (AACERR);
5158 }
5159
5160 /* We need to transfer data, so we alloc DMA resources for this pkt */
5161 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
5162 uint_t dma_flags = 0;
5163 struct aac_sge *sge;
5164
5165 /*
5166 * We will still use this point to fake some
5167 * infomation in tran_start
5168 */
5169 acp->bp = bp;
5170
5171 /* Set dma flags */
5172 if (BUF_IS_READ(bp)) {
5173 dma_flags |= DDI_DMA_READ;
5174 acp->flags |= AAC_CMD_BUF_READ;
5175 } else {
5176 dma_flags |= DDI_DMA_WRITE;
5177 acp->flags |= AAC_CMD_BUF_WRITE;
5178 }
5179 if (flags & PKT_CONSISTENT)
5180 dma_flags |= DDI_DMA_CONSISTENT;
5181 if (flags & PKT_DMA_PARTIAL)
5182 dma_flags |= DDI_DMA_PARTIAL;
5183
5184 /* Alloc buf dma handle */
5185 if (!acp->buf_dma_handle) {
5186 rval = ddi_dma_alloc_handle(softs->devinfo_p,
5187 &softs->buf_dma_attr, cb, arg,
5188 &acp->buf_dma_handle);
5189 if (rval != DDI_SUCCESS) {
5190 AACDB_PRINT(softs, CE_WARN,
5191 "Can't allocate DMA handle, errno=%d",
5192 rval);
5193 goto error_out;
5194 }
5195 }
5196
5197 /* Bind buf */
5198 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
5199 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
5200 bp, dma_flags, cb, arg, &acp->cookie,
5201 &acp->left_cookien);
5202 } else {
5203 size_t bufsz;
5204
5205 AACDB_PRINT_TRAN(softs,
5206 "non-aligned buffer: addr=0x%p, cnt=%lu",
5207 (void *)bp->b_un.b_addr, bp->b_bcount);
5208 if (bp->b_flags & (B_PAGEIO|B_PHYS))
5209 bp_mapin(bp);
5210
5211 rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
5212 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
5213 &softs->acc_attr, DDI_DMA_STREAMING,
5214 cb, arg, &acp->abp, &bufsz, &acp->abh);
5215
5216 if (rval != DDI_SUCCESS) {
5217 AACDB_PRINT(softs, CE_NOTE,
5218 "Cannot alloc DMA to non-aligned buf");
5219 bioerr = 0;
5220 goto error_out;
5221 }
5222
5223 if (acp->flags & AAC_CMD_BUF_WRITE)
5224 ddi_rep_put8(acp->abh,
5225 (uint8_t *)bp->b_un.b_addr,
5226 (uint8_t *)acp->abp, bp->b_bcount,
5227 DDI_DEV_AUTOINCR);
5228
5229 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
5230 NULL, acp->abp, bufsz, dma_flags, cb, arg,
5231 &acp->cookie, &acp->left_cookien);
5232 }
5233
5234 switch (rval) {
5235 case DDI_DMA_PARTIAL_MAP:
5236 if (ddi_dma_numwin(acp->buf_dma_handle,
5237 &acp->total_nwin) == DDI_FAILURE) {
5238 AACDB_PRINT(softs, CE_WARN,
5239 "Cannot get number of DMA windows");
5240 bioerr = 0;
5241 goto error_out;
5242 }
5243 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5244 acp->left_cookien);
5245 acp->cur_win = 0;
5246 break;
5247
5248 case DDI_DMA_MAPPED:
5249 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5250 acp->left_cookien);
5251 acp->cur_win = 0;
5252 acp->total_nwin = 1;
5253 break;
5254
5255 case DDI_DMA_NORESOURCES:
5256 bioerr = 0;
5257 AACDB_PRINT(softs, CE_WARN,
5258 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
5259 goto error_out;
5260 case DDI_DMA_BADATTR:
5261 case DDI_DMA_NOMAPPING:
5262 bioerr = EFAULT;
5263 AACDB_PRINT(softs, CE_WARN,
5264 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
5265 goto error_out;
5266 case DDI_DMA_TOOBIG:
5267 bioerr = EINVAL;
5268 AACDB_PRINT(softs, CE_WARN,
5269 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
5270 bp->b_bcount);
5271 goto error_out;
5272 default:
5273 bioerr = EINVAL;
5274 AACDB_PRINT(softs, CE_WARN,
5275 "Cannot bind buf for DMA: %d", rval);
5276 goto error_out;
5277 }
5278 acp->flags |= AAC_CMD_DMA_VALID;
5279
5280 get_dma_cookies:
5281 ASSERT(acp->left_cookien > 0);
5282 if (acp->left_cookien > softs->aac_sg_tablesize) {
5283 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
5284 acp->left_cookien);
5285 bioerr = EINVAL;
5286 goto error_out;
5287 }
5288 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
5289 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5290 oldcookiec);
5291 acp->sgt = NULL;
5292 }
5293 if (acp->sgt == NULL) {
5294 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
5295 acp->left_cookien, kf);
5296 if (acp->sgt == NULL) {
5297 AACDB_PRINT(softs, CE_WARN,
5298 "sgt kmem_alloc fail");
5299 bioerr = ENOMEM;
5300 goto error_out;
5301 }
5302 }
5303
5304 sge = &acp->sgt[0];
5305 sge->bcount = acp->cookie.dmac_size;
5306 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5307 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5308 acp->bcount = acp->cookie.dmac_size;
5309 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
5310 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
5311 sge->bcount = acp->cookie.dmac_size;
5312 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5313 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5314 acp->bcount += acp->cookie.dmac_size;
5315 }
5316
5317 /*
5318 * Note: The old DMA engine do not correctly handle
5319 * dma_attr_maxxfer attribute. So we have to ensure
5320 * it by ourself.
5321 */
5322 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5323 AACDB_PRINT(softs, CE_NOTE,
5324 "large xfer size received %d\n", acp->bcount);
5325 bioerr = EINVAL;
5326 goto error_out;
5327 }
5328
5329 acp->total_xfer += acp->bcount;
5330
5331 if (acp->pkt) {
5332 /* Return remaining byte count */
5333 if (acp->total_xfer <= bp->b_bcount) {
5334 acp->pkt->pkt_resid = bp->b_bcount - \
5335 acp->total_xfer;
5336 } else {
5337 /*
5338 * Allocated DMA size is greater than the buf
5339 * size of bp. This is caused by devices like
5340 * tape. we have extra bytes allocated, but
5341 * the packet residual has to stay correct.
5342 */
5343 acp->pkt->pkt_resid = 0;
5344 }
5345 AACDB_PRINT_TRAN(softs,
5346 "bp=0x%p, xfered=%d/%d, resid=%d",
5347 (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5348 (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5349 }
5350 }
5351 return (AACOK);
5352
5353 error_out:
5354 bioerror(bp, bioerr);
5355 return (AACERR);
5356 }
5357
5358 static struct scsi_pkt *
5359 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5360 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5361 int (*callback)(), caddr_t arg)
5362 {
5363 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5364 struct aac_cmd *acp, *new_acp;
5365
5366 DBCALLED(softs, 2);
5367
5368 /* Allocate pkt */
5369 if (pkt == NULL) {
5370 int slen;
5371
5372 /* Force auto request sense */
5373 slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5374 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5375 slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5376 if (pkt == NULL) {
5377 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5378 return (NULL);
5379 }
5380 acp = new_acp = PKT2AC(pkt);
5381 acp->pkt = pkt;
5382 acp->cmdlen = cmdlen;
5383
5384 if (ap->a_target < AAC_MAX_LD) {
5385 acp->dvp = &softs->containers[ap->a_target].dev;
5386 acp->aac_cmd_fib = softs->aac_cmd_fib;
5387 acp->ac_comp = aac_ld_complete;
5388 } else {
5389 _NOTE(ASSUMING_PROTECTED(softs->nondasds))
5390
5391 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5392 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5393 acp->ac_comp = aac_pd_complete;
5394 }
5395 } else {
5396 acp = PKT2AC(pkt);
5397 new_acp = NULL;
5398 }
5399
5400 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5401 return (pkt);
5402
5403 if (new_acp)
5404 aac_tran_destroy_pkt(ap, pkt);
5405 return (NULL);
5406 }
5407
5408 /*
5409 * tran_sync_pkt(9E) - explicit DMA synchronization
5410 */
5411 /*ARGSUSED*/
5412 static void
5413 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5414 {
5415 struct aac_cmd *acp = PKT2AC(pkt);
5416
5417 DBCALLED(NULL, 2);
5418
5419 if (aac_dma_sync_ac(acp) != AACOK)
5420 ddi_fm_service_impact(
5421 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5422 DDI_SERVICE_UNAFFECTED);
5423 }
5424
5425 /*
5426 * tran_dmafree(9E) - deallocate DMA resources allocated for command
5427 */
5428 /*ARGSUSED*/
5429 static void
5430 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5431 {
5432 struct aac_cmd *acp = PKT2AC(pkt);
5433
5434 DBCALLED(NULL, 2);
5435
5436 aac_free_dmamap(acp);
5437 }
5438
5439 static int
5440 aac_do_quiesce(struct aac_softstate *softs)
5441 {
5442 aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5443 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5444 aac_start_drain(softs);
5445 do {
5446 if (cv_wait_sig(&softs->drain_cv,
5447 &softs->io_lock) == 0) {
5448 /* Quiesce has been interrupted */
5449 aac_stop_drain(softs);
5450 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5451 aac_start_waiting_io(softs);
5452 return (AACERR);
5453 }
5454 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5455 aac_stop_drain(softs);
5456 }
5457
5458 softs->state |= AAC_STATE_QUIESCED;
5459 return (AACOK);
5460 }
5461
5462 static int
5463 aac_tran_quiesce(dev_info_t *dip)
5464 {
5465 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5466 int rval;
5467
5468 DBCALLED(softs, 1);
5469
5470 mutex_enter(&softs->io_lock);
5471 if (aac_do_quiesce(softs) == AACOK)
5472 rval = 0;
5473 else
5474 rval = 1;
5475 mutex_exit(&softs->io_lock);
5476 return (rval);
5477 }
5478
5479 static int
5480 aac_do_unquiesce(struct aac_softstate *softs)
5481 {
5482 softs->state &= ~AAC_STATE_QUIESCED;
5483 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5484
5485 aac_start_waiting_io(softs);
5486 return (AACOK);
5487 }
5488
5489 static int
5490 aac_tran_unquiesce(dev_info_t *dip)
5491 {
5492 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5493 int rval;
5494
5495 DBCALLED(softs, 1);
5496
5497 mutex_enter(&softs->io_lock);
5498 if (aac_do_unquiesce(softs) == AACOK)
5499 rval = 0;
5500 else
5501 rval = 1;
5502 mutex_exit(&softs->io_lock);
5503 return (rval);
5504 }
5505
5506 static int
5507 aac_hba_setup(struct aac_softstate *softs)
5508 {
5509 scsi_hba_tran_t *hba_tran;
5510 int rval;
5511
5512 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5513 if (hba_tran == NULL)
5514 return (AACERR);
5515 hba_tran->tran_hba_private = softs;
5516 hba_tran->tran_tgt_init = aac_tran_tgt_init;
5517 hba_tran->tran_tgt_free = aac_tran_tgt_free;
5518 hba_tran->tran_tgt_probe = scsi_hba_probe;
5519 hba_tran->tran_start = aac_tran_start;
5520 hba_tran->tran_getcap = aac_tran_getcap;
5521 hba_tran->tran_setcap = aac_tran_setcap;
5522 hba_tran->tran_init_pkt = aac_tran_init_pkt;
5523 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5524 hba_tran->tran_reset = aac_tran_reset;
5525 hba_tran->tran_abort = aac_tran_abort;
5526 hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5527 hba_tran->tran_dmafree = aac_tran_dmafree;
5528 hba_tran->tran_quiesce = aac_tran_quiesce;
5529 hba_tran->tran_unquiesce = aac_tran_unquiesce;
5530 hba_tran->tran_bus_config = aac_tran_bus_config;
5531 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5532 hba_tran, 0);
5533 if (rval != DDI_SUCCESS) {
5534 scsi_hba_tran_free(hba_tran);
5535 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5536 return (AACERR);
5537 }
5538
5539 softs->hba_tran = hba_tran;
5540 return (AACOK);
5541 }
5542
5543 /*
5544 * FIB setup operations
5545 */
5546
5547 /*
5548 * Init FIB header
5549 */
5550 static void
5551 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp,
5552 uint16_t cmd)
5553 {
5554 struct aac_slot *slotp = acp->slotp;
5555 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5556 struct aac_fib *fibp = slotp->fibp;
5557 uint32_t xfer_state;
5558
5559 xfer_state =
5560 AAC_FIBSTATE_HOSTOWNED |
5561 AAC_FIBSTATE_INITIALISED |
5562 AAC_FIBSTATE_EMPTY |
5563 AAC_FIBSTATE_FAST_RESPONSE | /* enable fast io */
5564 AAC_FIBSTATE_FROMHOST |
5565 AAC_FIBSTATE_REXPECTED |
5566 AAC_FIBSTATE_NORM;
5567
5568 if (!(acp->flags & AAC_CMD_SYNC))
5569 xfer_state |= AAC_FIBSTATE_ASYNC;
5570
5571 ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5572 ddi_put16(acc, &fibp->Header.Command, cmd);
5573 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5574 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5575 ddi_put16(acc, &fibp->Header.Size, acp->fib_size);
5576 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size);
5577 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5578 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5579 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5580 }
5581
5582 /*
5583 * Init FIB for raw IO command
5584 */
5585 static void
5586 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5587 {
5588 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5589 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5590 struct aac_sg_entryraw *sgp;
5591 struct aac_sge *sge;
5592
5593 /* Calculate FIB size */
5594 acp->fib_size = sizeof (struct aac_fib_header) + \
5595 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5596 sizeof (struct aac_sg_entryraw);
5597
5598 aac_cmd_fib_header(softs, acp, RawIo);
5599
5600 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5601 ddi_put16(acc, &io->BpTotal, 0);
5602 ddi_put16(acc, &io->BpComplete, 0);
5603
5604 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5605 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5606 ddi_put16(acc, &io->ContainerId,
5607 ((struct aac_container *)acp->dvp)->cid);
5608
5609 /* Fill SG table */
5610 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5611 ddi_put32(acc, &io->ByteCount, acp->bcount);
5612
5613 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5614 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5615 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5616 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5617 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5618 sgp->Next = 0;
5619 sgp->Prev = 0;
5620 sgp->Flags = 0;
5621 }
5622 }
5623
5624 /* Init FIB for 64-bit block IO command */
5625 static void
5626 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5627 {
5628 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5629 struct aac_blockread64 *br = (struct aac_blockread64 *) \
5630 &acp->slotp->fibp->data[0];
5631 struct aac_sg_entry64 *sgp;
5632 struct aac_sge *sge;
5633
5634 acp->fib_size = sizeof (struct aac_fib_header) + \
5635 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5636 sizeof (struct aac_sg_entry64);
5637
5638 aac_cmd_fib_header(softs, acp, ContainerCommand64);
5639
5640 /*
5641 * The definitions for aac_blockread64 and aac_blockwrite64
5642 * are the same.
5643 */
5644 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5645 ddi_put16(acc, &br->ContainerId,
5646 ((struct aac_container *)acp->dvp)->cid);
5647 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5648 VM_CtHostRead64 : VM_CtHostWrite64);
5649 ddi_put16(acc, &br->Pad, 0);
5650 ddi_put16(acc, &br->Flags, 0);
5651
5652 /* Fill SG table */
5653 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5654 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5655
5656 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5657 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5658 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5659 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5660 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5661 }
5662 }
5663
5664 /* Init FIB for block IO command */
5665 static void
5666 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5667 {
5668 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5669 struct aac_blockread *br = (struct aac_blockread *) \
5670 &acp->slotp->fibp->data[0];
5671 struct aac_sg_entry *sgp;
5672 struct aac_sge *sge = &acp->sgt[0];
5673
5674 if (acp->flags & AAC_CMD_BUF_READ) {
5675 acp->fib_size = sizeof (struct aac_fib_header) + \
5676 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5677 sizeof (struct aac_sg_entry);
5678
5679 ddi_put32(acc, &br->Command, VM_CtBlockRead);
5680 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5681 sgp = &br->SgMap.SgEntry[0];
5682 } else {
5683 struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5684
5685 acp->fib_size = sizeof (struct aac_fib_header) + \
5686 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5687 sizeof (struct aac_sg_entry);
5688
5689 ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5690 ddi_put32(acc, &bw->Stable, CUNSTABLE);
5691 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5692 sgp = &bw->SgMap.SgEntry[0];
5693 }
5694 aac_cmd_fib_header(softs, acp, ContainerCommand);
5695
5696 /*
5697 * aac_blockread and aac_blockwrite have the similar
5698 * structure head, so use br for bw here
5699 */
5700 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5701 ddi_put32(acc, &br->ContainerId,
5702 ((struct aac_container *)acp->dvp)->cid);
5703 ddi_put32(acc, &br->ByteCount, acp->bcount);
5704
5705 /* Fill SG table */
5706 for (sge = &acp->sgt[0];
5707 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5708 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5709 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5710 }
5711 }
5712
5713 /*ARGSUSED*/
5714 void
5715 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5716 {
5717 struct aac_slot *slotp = acp->slotp;
5718 struct aac_fib *fibp = slotp->fibp;
5719 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5720
5721 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5722 acp->fib_size, /* only copy data of needed length */
5723 DDI_DEV_AUTOINCR);
5724 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5725 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5726 }
5727
5728 static void
5729 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5730 {
5731 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5732 struct aac_synchronize_command *sync =
5733 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0];
5734
5735 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command);
5736
5737 aac_cmd_fib_header(softs, acp, ContainerCommand);
5738 ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5739 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5740 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5741 ddi_put32(acc, &sync->Count,
5742 sizeof (((struct aac_synchronize_reply *)0)->Data));
5743 }
5744
5745 /*
5746 * Start/Stop unit (Power Management)
5747 */
5748 static void
5749 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp)
5750 {
5751 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5752 struct aac_Container *cmd =
5753 (struct aac_Container *)&acp->slotp->fibp->data[0];
5754 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp;
5755
5756 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container);
5757
5758 aac_cmd_fib_header(softs, acp, ContainerCommand);
5759 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
5760 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
5761 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT);
5762 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \
5763 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT);
5764 ddi_put32(acc, &cmd->CTCommand.param[1],
5765 ((struct aac_container *)acp->dvp)->cid);
5766 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1);
5767 }
5768
5769 /*
5770 * Init FIB for pass-through SCMD
5771 */
5772 static void
5773 aac_cmd_fib_srb(struct aac_cmd *acp)
5774 {
5775 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5776 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5777 uint8_t *cdb;
5778
5779 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5780 ddi_put32(acc, &srb->retry_limit, 0);
5781 ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5782 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5783 if (acp->fibp == NULL) {
5784 if (acp->flags & AAC_CMD_BUF_READ)
5785 ddi_put32(acc, &srb->flags, SRB_DataIn);
5786 else if (acp->flags & AAC_CMD_BUF_WRITE)
5787 ddi_put32(acc, &srb->flags, SRB_DataOut);
5788 ddi_put32(acc, &srb->channel,
5789 ((struct aac_nondasd *)acp->dvp)->bus);
5790 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5791 ddi_put32(acc, &srb->lun, 0);
5792 cdb = acp->pkt->pkt_cdbp;
5793 } else {
5794 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5795
5796 ddi_put32(acc, &srb->flags, srb0->flags);
5797 ddi_put32(acc, &srb->channel, srb0->channel);
5798 ddi_put32(acc, &srb->id, srb0->id);
5799 ddi_put32(acc, &srb->lun, srb0->lun);
5800 cdb = srb0->cdb;
5801 }
5802 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5803 }
5804
5805 static void
5806 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5807 {
5808 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5809 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5810 struct aac_sg_entry *sgp;
5811 struct aac_sge *sge;
5812
5813 acp->fib_size = sizeof (struct aac_fib_header) + \
5814 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5815 acp->left_cookien * sizeof (struct aac_sg_entry);
5816
5817 /* Fill FIB and SRB headers, and copy cdb */
5818 aac_cmd_fib_header(softs, acp, ScsiPortCommand);
5819 aac_cmd_fib_srb(acp);
5820
5821 /* Fill SG table */
5822 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5823 ddi_put32(acc, &srb->count, acp->bcount);
5824
5825 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5826 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5827 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5828 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5829 }
5830 }
5831
5832 static void
5833 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5834 {
5835 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5836 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5837 struct aac_sg_entry64 *sgp;
5838 struct aac_sge *sge;
5839
5840 acp->fib_size = sizeof (struct aac_fib_header) + \
5841 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5842 acp->left_cookien * sizeof (struct aac_sg_entry64);
5843
5844 /* Fill FIB and SRB headers, and copy cdb */
5845 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64);
5846 aac_cmd_fib_srb(acp);
5847
5848 /* Fill SG table */
5849 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5850 ddi_put32(acc, &srb->count, acp->bcount);
5851
5852 for (sge = &acp->sgt[0],
5853 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5854 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5855 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5856 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5857 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5858 }
5859 }
5860
5861 static int
5862 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5863 {
5864 struct aac_slot *slotp;
5865
5866 if (slotp = aac_get_slot(softs)) {
5867 acp->slotp = slotp;
5868 slotp->acp = acp;
5869 acp->aac_cmd_fib(softs, acp);
5870 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5871 DDI_DMA_SYNC_FORDEV);
5872 return (AACOK);
5873 }
5874 return (AACERR);
5875 }
5876
5877 static int
5878 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5879 {
5880 struct aac_device *dvp = acp->dvp;
5881 int q = AAC_CMDQ(acp);
5882
5883 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) {
5884 if (dvp) {
5885 if (dvp->ncmds[q] < dvp->throttle[q]) {
5886 if (!(acp->flags & AAC_CMD_NTAG) ||
5887 dvp->ncmds[q] == 0) {
5888 return (aac_cmd_slot_bind(softs, acp));
5889 }
5890 ASSERT(q == AAC_CMDQ_ASYNC);
5891 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5892 AAC_THROTTLE_DRAIN);
5893 }
5894 } else {
5895 return (aac_cmd_slot_bind(softs, acp));
5896 }
5897 }
5898 return (AACERR);
5899 }
5900
5901 static int
5902 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5903 {
5904 struct aac_slot *slotp;
5905
5906 while (softs->sync_ac.slotp)
5907 cv_wait(&softs->sync_fib_cv, &softs->io_lock);
5908
5909 if (slotp = aac_get_slot(softs)) {
5910 ASSERT(acp->slotp == NULL);
5911
5912 acp->slotp = slotp;
5913 slotp->acp = acp;
5914 return (AACOK);
5915 }
5916 return (AACERR);
5917 }
5918
5919 static void
5920 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp)
5921 {
5922 ASSERT(acp->slotp);
5923
5924 aac_release_slot(softs, acp->slotp);
5925 acp->slotp->acp = NULL;
5926 acp->slotp = NULL;
5927
5928 cv_signal(&softs->sync_fib_cv);
5929 }
5930
5931 static void
5932 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5933 {
5934 struct aac_slot *slotp = acp->slotp;
5935 int q = AAC_CMDQ(acp);
5936 int rval;
5937
5938 /* Set ac and pkt */
5939 if (acp->pkt) { /* ac from ioctl has no pkt */
5940 acp->pkt->pkt_state |=
5941 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5942 }
5943 if (acp->timeout) /* 0 indicates no timeout */
5944 acp->timeout += aac_timebase + aac_tick;
5945
5946 if (acp->dvp)
5947 acp->dvp->ncmds[q]++;
5948 softs->bus_ncmds[q]++;
5949 aac_cmd_enqueue(&softs->q_busy, acp);
5950
5951 AACDB_PRINT_FIB(softs, slotp);
5952
5953 if (softs->flags & AAC_FLAGS_NEW_COMM) {
5954 rval = aac_send_command(softs, slotp);
5955 } else {
5956 /*
5957 * If fib can not be enqueued, the adapter is in an abnormal
5958 * state, there will be no interrupt to us.
5959 */
5960 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5961 slotp->fib_phyaddr, acp->fib_size);
5962 }
5963
5964 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5965 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5966
5967 /*
5968 * NOTE: We send command only when slots availabe, so should never
5969 * reach here.
5970 */
5971 if (rval != AACOK) {
5972 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5973 if (acp->pkt) {
5974 acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5975 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5976 }
5977 aac_end_io(softs, acp);
5978 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5979 ddi_trigger_softintr(softs->softint_id);
5980 }
5981 }
5982
5983 static void
5984 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5985 {
5986 struct aac_cmd *acp, *next_acp;
5987
5988 /* Serve as many waiting io's as possible */
5989 for (acp = q->q_head; acp; acp = next_acp) {
5990 next_acp = acp->next;
5991 if (aac_bind_io(softs, acp) == AACOK) {
5992 aac_cmd_delete(q, acp);
5993 aac_start_io(softs, acp);
5994 }
5995 if (softs->free_io_slot_head == NULL)
5996 break;
5997 }
5998 }
5999
6000 static void
6001 aac_start_waiting_io(struct aac_softstate *softs)
6002 {
6003 /*
6004 * Sync FIB io is served before async FIB io so that io requests
6005 * sent by interactive userland commands get responded asap.
6006 */
6007 if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
6008 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
6009 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
6010 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
6011 }
6012
6013 static void
6014 aac_drain_comp_q(struct aac_softstate *softs)
6015 {
6016 struct aac_cmd *acp;
6017 struct scsi_pkt *pkt;
6018
6019 /*CONSTCOND*/
6020 while (1) {
6021 mutex_enter(&softs->q_comp_mutex);
6022 acp = aac_cmd_dequeue(&softs->q_comp);
6023 mutex_exit(&softs->q_comp_mutex);
6024 if (acp != NULL) {
6025 ASSERT(acp->pkt != NULL);
6026 pkt = acp->pkt;
6027
6028 if (pkt->pkt_reason == CMD_CMPLT) {
6029 /*
6030 * Consistent packets need to be sync'ed first
6031 */
6032 if ((acp->flags & AAC_CMD_CONSISTENT) &&
6033 (acp->flags & AAC_CMD_BUF_READ)) {
6034 if (aac_dma_sync_ac(acp) != AACOK) {
6035 ddi_fm_service_impact(
6036 softs->devinfo_p,
6037 DDI_SERVICE_UNAFFECTED);
6038 pkt->pkt_reason = CMD_TRAN_ERR;
6039 pkt->pkt_statistics = 0;
6040 }
6041 }
6042 if ((aac_check_acc_handle(softs-> \
6043 comm_space_acc_handle) != DDI_SUCCESS) ||
6044 (aac_check_acc_handle(softs-> \
6045 pci_mem_handle) != DDI_SUCCESS)) {
6046 ddi_fm_service_impact(softs->devinfo_p,
6047 DDI_SERVICE_UNAFFECTED);
6048 ddi_fm_acc_err_clear(softs-> \
6049 pci_mem_handle, DDI_FME_VER0);
6050 pkt->pkt_reason = CMD_TRAN_ERR;
6051 pkt->pkt_statistics = 0;
6052 }
6053 if (aac_check_dma_handle(softs-> \
6054 comm_space_dma_handle) != DDI_SUCCESS) {
6055 ddi_fm_service_impact(softs->devinfo_p,
6056 DDI_SERVICE_UNAFFECTED);
6057 pkt->pkt_reason = CMD_TRAN_ERR;
6058 pkt->pkt_statistics = 0;
6059 }
6060 }
6061 scsi_hba_pkt_comp(pkt);
6062 } else {
6063 break;
6064 }
6065 }
6066 }
6067
6068 static int
6069 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
6070 {
6071 size_t rlen;
6072 ddi_dma_cookie_t cookie;
6073 uint_t cookien;
6074
6075 /* Allocate FIB dma resource */
6076 if (ddi_dma_alloc_handle(
6077 softs->devinfo_p,
6078 &softs->addr_dma_attr,
6079 DDI_DMA_SLEEP,
6080 NULL,
6081 &slotp->fib_dma_handle) != DDI_SUCCESS) {
6082 AACDB_PRINT(softs, CE_WARN,
6083 "Cannot alloc dma handle for slot fib area");
6084 goto error;
6085 }
6086 if (ddi_dma_mem_alloc(
6087 slotp->fib_dma_handle,
6088 softs->aac_max_fib_size,
6089 &softs->acc_attr,
6090 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6091 DDI_DMA_SLEEP,
6092 NULL,
6093 (caddr_t *)&slotp->fibp,
6094 &rlen,
6095 &slotp->fib_acc_handle) != DDI_SUCCESS) {
6096 AACDB_PRINT(softs, CE_WARN,
6097 "Cannot alloc mem for slot fib area");
6098 goto error;
6099 }
6100 if (ddi_dma_addr_bind_handle(
6101 slotp->fib_dma_handle,
6102 NULL,
6103 (caddr_t)slotp->fibp,
6104 softs->aac_max_fib_size,
6105 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6106 DDI_DMA_SLEEP,
6107 NULL,
6108 &cookie,
6109 &cookien) != DDI_DMA_MAPPED) {
6110 AACDB_PRINT(softs, CE_WARN,
6111 "dma bind failed for slot fib area");
6112 goto error;
6113 }
6114
6115 /* Check dma handles allocated in fib attach */
6116 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
6117 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6118 goto error;
6119 }
6120
6121 /* Check acc handles allocated in fib attach */
6122 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
6123 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6124 goto error;
6125 }
6126
6127 slotp->fib_phyaddr = cookie.dmac_laddress;
6128 return (AACOK);
6129
6130 error:
6131 if (slotp->fib_acc_handle) {
6132 ddi_dma_mem_free(&slotp->fib_acc_handle);
6133 slotp->fib_acc_handle = NULL;
6134 }
6135 if (slotp->fib_dma_handle) {
6136 ddi_dma_free_handle(&slotp->fib_dma_handle);
6137 slotp->fib_dma_handle = NULL;
6138 }
6139 return (AACERR);
6140 }
6141
6142 static void
6143 aac_free_fib(struct aac_slot *slotp)
6144 {
6145 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
6146 ddi_dma_mem_free(&slotp->fib_acc_handle);
6147 slotp->fib_acc_handle = NULL;
6148 ddi_dma_free_handle(&slotp->fib_dma_handle);
6149 slotp->fib_dma_handle = NULL;
6150 slotp->fib_phyaddr = 0;
6151 }
6152
6153 static void
6154 aac_alloc_fibs(struct aac_softstate *softs)
6155 {
6156 int i;
6157 struct aac_slot *slotp;
6158
6159 for (i = 0; i < softs->total_slots &&
6160 softs->total_fibs < softs->total_slots; i++) {
6161 slotp = &(softs->io_slot[i]);
6162 if (slotp->fib_phyaddr)
6163 continue;
6164 if (aac_alloc_fib(softs, slotp) != AACOK)
6165 break;
6166
6167 /* Insert the slot to the free slot list */
6168 aac_release_slot(softs, slotp);
6169 softs->total_fibs++;
6170 }
6171 }
6172
6173 static void
6174 aac_destroy_fibs(struct aac_softstate *softs)
6175 {
6176 struct aac_slot *slotp;
6177
6178 while ((slotp = softs->free_io_slot_head) != NULL) {
6179 ASSERT(slotp->fib_phyaddr);
6180 softs->free_io_slot_head = slotp->next;
6181 aac_free_fib(slotp);
6182 ASSERT(slotp->index == (slotp - softs->io_slot));
6183 softs->total_fibs--;
6184 }
6185 ASSERT(softs->total_fibs == 0);
6186 }
6187
6188 static int
6189 aac_create_slots(struct aac_softstate *softs)
6190 {
6191 int i;
6192
6193 softs->total_slots = softs->aac_max_fibs;
6194 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
6195 softs->total_slots, KM_SLEEP);
6196 if (softs->io_slot == NULL) {
6197 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
6198 return (AACERR);
6199 }
6200 for (i = 0; i < softs->total_slots; i++)
6201 softs->io_slot[i].index = i;
6202 softs->free_io_slot_head = NULL;
6203 softs->total_fibs = 0;
6204 return (AACOK);
6205 }
6206
6207 static void
6208 aac_destroy_slots(struct aac_softstate *softs)
6209 {
6210 ASSERT(softs->free_io_slot_head == NULL);
6211
6212 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
6213 softs->total_slots);
6214 softs->io_slot = NULL;
6215 softs->total_slots = 0;
6216 }
6217
6218 struct aac_slot *
6219 aac_get_slot(struct aac_softstate *softs)
6220 {
6221 struct aac_slot *slotp;
6222
6223 if ((slotp = softs->free_io_slot_head) != NULL) {
6224 softs->free_io_slot_head = slotp->next;
6225 slotp->next = NULL;
6226 }
6227 return (slotp);
6228 }
6229
6230 static void
6231 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
6232 {
6233 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
6234 ASSERT(slotp == &softs->io_slot[slotp->index]);
6235
6236 slotp->acp = NULL;
6237 slotp->next = softs->free_io_slot_head;
6238 softs->free_io_slot_head = slotp;
6239 }
6240
6241 int
6242 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
6243 {
6244 if (aac_bind_io(softs, acp) == AACOK)
6245 aac_start_io(softs, acp);
6246 else
6247 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
6248
6249 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
6250 return (TRAN_ACCEPT);
6251 /*
6252 * Because sync FIB is always 512 bytes and used for critical
6253 * functions, async FIB is used for poll IO.
6254 */
6255 if (acp->flags & AAC_CMD_NO_INTR) {
6256 if (aac_do_poll_io(softs, acp) == AACOK)
6257 return (TRAN_ACCEPT);
6258 } else {
6259 if (aac_do_sync_io(softs, acp) == AACOK)
6260 return (TRAN_ACCEPT);
6261 }
6262 return (TRAN_BADPKT);
6263 }
6264
6265 static int
6266 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
6267 {
6268 int (*intr_handler)(struct aac_softstate *);
6269
6270 /*
6271 * Interrupt is disabled, we have to poll the adapter by ourselves.
6272 */
6273 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
6274 aac_process_intr_new : aac_process_intr_old;
6275 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
6276 int i = AAC_POLL_TIME * 1000;
6277
6278 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
6279 if (i == 0)
6280 aac_cmd_timeout(softs, acp);
6281 }
6282
6283 ddi_trigger_softintr(softs->softint_id);
6284
6285 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
6286 return (AACOK);
6287 return (AACERR);
6288 }
6289
6290 static int
6291 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
6292 {
6293 ASSERT(softs && acp);
6294
6295 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
6296 cv_wait(&softs->event, &softs->io_lock);
6297
6298 if (acp->flags & AAC_CMD_CMPLT)
6299 return (AACOK);
6300 return (AACERR);
6301 }
6302
6303 static int
6304 aac_dma_sync_ac(struct aac_cmd *acp)
6305 {
6306 if (acp->buf_dma_handle) {
6307 if (acp->flags & AAC_CMD_BUF_WRITE) {
6308 if (acp->abp != NULL)
6309 ddi_rep_put8(acp->abh,
6310 (uint8_t *)acp->bp->b_un.b_addr,
6311 (uint8_t *)acp->abp, acp->bp->b_bcount,
6312 DDI_DEV_AUTOINCR);
6313 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6314 DDI_DMA_SYNC_FORDEV);
6315 } else {
6316 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6317 DDI_DMA_SYNC_FORCPU);
6318 if (aac_check_dma_handle(acp->buf_dma_handle) !=
6319 DDI_SUCCESS)
6320 return (AACERR);
6321 if (acp->abp != NULL)
6322 ddi_rep_get8(acp->abh,
6323 (uint8_t *)acp->bp->b_un.b_addr,
6324 (uint8_t *)acp->abp, acp->bp->b_bcount,
6325 DDI_DEV_AUTOINCR);
6326 }
6327 }
6328 return (AACOK);
6329 }
6330
6331 /*
6332 * Copy AIF from adapter to the empty AIF slot and inform AIF threads
6333 */
6334 static void
6335 aac_save_aif(struct aac_softstate *softs, ddi_acc_handle_t acc,
6336 struct aac_fib *fibp0, int fib_size0)
6337 {
6338 struct aac_fib *fibp; /* FIB in AIF queue */
6339 int fib_size;
6340 uint16_t fib_command;
6341 int current, next;
6342
6343 /* Ignore non AIF messages */
6344 fib_command = ddi_get16(acc, &fibp0->Header.Command);
6345 if (fib_command != AifRequest) {
6346 cmn_err(CE_WARN, "!Unknown command from controller");
6347 return;
6348 }
6349
6350 mutex_enter(&softs->aifq_mutex);
6351
6352 /* Save AIF */
6353 fibp = &softs->aifq[softs->aifq_idx].d;
6354 fib_size = (fib_size0 > AAC_FIB_SIZE) ? AAC_FIB_SIZE : fib_size0;
6355 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, fib_size,
6356 DDI_DEV_AUTOINCR);
6357
6358 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
6359 ddi_fm_service_impact(softs->devinfo_p,
6360 DDI_SERVICE_UNAFFECTED);
6361 mutex_exit(&softs->aifq_mutex);
6362 return;
6363 }
6364
6365 AACDB_PRINT_AIF(softs, (struct aac_aif_command *)&fibp->data[0]);
6366
6367 /* Modify AIF contexts */
6368 current = softs->aifq_idx;
6369 next = (current + 1) % AAC_AIFQ_LENGTH;
6370 if (next == 0) {
6371 struct aac_fib_context *ctx_p;
6372
6373 softs->aifq_wrap = 1;
6374 for (ctx_p = softs->fibctx_p; ctx_p; ctx_p = ctx_p->next) {
6375 if (next == ctx_p->ctx_idx) {
6376 ctx_p->ctx_flags |= AAC_CTXFLAG_FILLED;
6377 } else if (current == ctx_p->ctx_idx &&
6378 (ctx_p->ctx_flags & AAC_CTXFLAG_FILLED)) {
6379 ctx_p->ctx_idx = next;
6380 ctx_p->ctx_overrun++;
6381 }
6382 }
6383 }
6384 softs->aifq_idx = next;
6385
6386 /* Wakeup AIF threads */
6387 cv_broadcast(&softs->aifq_cv);
6388 mutex_exit(&softs->aifq_mutex);
6389
6390 /* Wakeup event thread to handle aif */
6391 aac_event_disp(softs, AAC_EVENT_AIF);
6392 }
6393
6394 static int
6395 aac_return_aif_common(struct aac_softstate *softs, struct aac_fib_context *ctx,
6396 struct aac_fib **fibpp)
6397 {
6398 int current;
6399
6400 current = ctx->ctx_idx;
6401 if (current == softs->aifq_idx &&
6402 !(ctx->ctx_flags & AAC_CTXFLAG_FILLED))
6403 return (EAGAIN); /* Empty */
6404
6405 *fibpp = &softs->aifq[current].d;
6406
6407 ctx->ctx_flags &= ~AAC_CTXFLAG_FILLED;
6408 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
6409 return (0);
6410 }
6411
6412 int
6413 aac_return_aif(struct aac_softstate *softs, struct aac_fib_context *ctx,
6414 struct aac_fib **fibpp)
6415 {
6416 int rval;
6417
6418 mutex_enter(&softs->aifq_mutex);
6419 rval = aac_return_aif_common(softs, ctx, fibpp);
6420 mutex_exit(&softs->aifq_mutex);
6421 return (rval);
6422 }
6423
6424 int
6425 aac_return_aif_wait(struct aac_softstate *softs, struct aac_fib_context *ctx,
6426 struct aac_fib **fibpp)
6427 {
6428 int rval;
6429
6430 mutex_enter(&softs->aifq_mutex);
6431 rval = aac_return_aif_common(softs, ctx, fibpp);
6432 if (rval == EAGAIN) {
6433 AACDB_PRINT(softs, CE_NOTE, "Waiting for AIF");
6434 rval = cv_wait_sig(&softs->aifq_cv, &softs->aifq_mutex);
6435 }
6436 mutex_exit(&softs->aifq_mutex);
6437 return ((rval > 0) ? 0 : EINTR);
6438 }
6439
6440 /*
6441 * The following function comes from Adaptec:
6442 *
6443 * When driver sees a particular event that means containers are changed, it
6444 * will rescan containers. However a change may not be complete until some
6445 * other event is received. For example, creating or deleting an array will
6446 * incur as many as six AifEnConfigChange events which would generate six
6447 * container rescans. To diminish rescans, driver set a flag to wait for
6448 * another particular event. When sees that events come in, it will do rescan.
6449 */
6450 static int
6451 aac_handle_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
6452 {
6453 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
6454 int en_type;
6455 int devcfg_needed;
6456 int cid;
6457 uint32_t bus_id, tgt_id;
6458 enum aac_cfg_event event = AAC_CFG_NULL_EXIST;
6459
6460 devcfg_needed = 0;
6461 en_type = LE_32((uint32_t)aif->data.EN.type);
6462
6463 switch (LE_32((uint32_t)aif->command)) {
6464 case AifCmdDriverNotify: {
6465 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6466
6467 switch (en_type) {
6468 case AifDenMorphComplete:
6469 case AifDenVolumeExtendComplete:
6470 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6471 softs->devcfg_wait_on = AifEnConfigChange;
6472 break;
6473 }
6474 if (softs->devcfg_wait_on == en_type)
6475 devcfg_needed = 1;
6476 break;
6477 }
6478
6479 case AifCmdEventNotify:
6480 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6481 switch (en_type) {
6482 case AifEnAddContainer:
6483 case AifEnDeleteContainer:
6484 softs->devcfg_wait_on = AifEnConfigChange;
6485 break;
6486 case AifEnContainerChange:
6487 if (!softs->devcfg_wait_on)
6488 softs->devcfg_wait_on = AifEnConfigChange;
6489 break;
6490 case AifEnContainerEvent:
6491 if (ddi_get32(acc, &aif-> \
6492 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6493 devcfg_needed = 1;
6494 break;
6495 case AifEnAddJBOD:
6496 if (!(softs->flags & AAC_FLAGS_JBOD))
6497 return (AACERR);
6498 event = AAC_CFG_ADD;
6499 bus_id = (cid >> 24) & 0xf;
6500 tgt_id = cid & 0xffff;
6501 break;
6502 case AifEnDeleteJBOD:
6503 if (!(softs->flags & AAC_FLAGS_JBOD))
6504 return (AACERR);
6505 event = AAC_CFG_DELETE;
6506 bus_id = (cid >> 24) & 0xf;
6507 tgt_id = cid & 0xffff;
6508 break;
6509 }
6510 if (softs->devcfg_wait_on == en_type)
6511 devcfg_needed = 1;
6512 break;
6513
6514 case AifCmdJobProgress:
6515 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6516 int pr_status;
6517 uint32_t pr_ftick, pr_ctick;
6518
6519 pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6520 pr_ctick = LE_32(aif->data.PR[0].currentTick);
6521 pr_ftick = LE_32(aif->data.PR[0].finalTick);
6522
6523 if ((pr_ctick == pr_ftick) ||
6524 (pr_status == AifJobStsSuccess))
6525 softs->devcfg_wait_on = AifEnContainerChange;
6526 else if ((pr_ctick == 0) &&
6527 (pr_status == AifJobStsRunning))
6528 softs->devcfg_wait_on = AifEnContainerChange;
6529 }
6530 break;
6531 }
6532
6533 if (devcfg_needed) {
6534 softs->devcfg_wait_on = 0;
6535 (void) aac_probe_containers(softs);
6536 }
6537
6538 if (event != AAC_CFG_NULL_EXIST) {
6539 ASSERT(en_type == AifEnAddJBOD || en_type == AifEnDeleteJBOD);
6540 (void) aac_probe_jbod(softs,
6541 AAC_P2VTGT(softs, bus_id, tgt_id), event);
6542 }
6543 return (AACOK);
6544 }
6545
6546
6547 /*
6548 * Check and handle AIF events
6549 */
6550 static void
6551 aac_aif_event(struct aac_softstate *softs)
6552 {
6553 struct aac_fib *fibp;
6554
6555 /*CONSTCOND*/
6556 while (1) {
6557 if (aac_return_aif(softs, &softs->aifctx, &fibp) != 0)
6558 break; /* No more AIFs to handle, end loop */
6559
6560 /* AIF overrun, array create/delete may missed. */
6561 if (softs->aifctx.ctx_overrun) {
6562 softs->aifctx.ctx_overrun = 0;
6563 }
6564
6565 /* AIF received, handle it */
6566 struct aac_aif_command *aifp =
6567 (struct aac_aif_command *)&fibp->data[0];
6568 uint32_t aif_command = LE_32((uint32_t)aifp->command);
6569
6570 if (aif_command == AifCmdDriverNotify ||
6571 aif_command == AifCmdEventNotify ||
6572 aif_command == AifCmdJobProgress)
6573 (void) aac_handle_aif(softs, aifp);
6574 }
6575 }
6576
6577 /*
6578 * Timeout recovery
6579 */
6580 /*ARGSUSED*/
6581 static void
6582 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6583 {
6584 #ifdef DEBUG
6585 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6586 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6587 AACDB_PRINT_FIB(softs, acp->slotp);
6588 #endif
6589
6590 /*
6591 * Besides the firmware in unhealthy state, an overloaded
6592 * adapter may also incur pkt timeout.
6593 * There is a chance for an adapter with a slower IOP to take
6594 * longer than 60 seconds to process the commands, such as when
6595 * to perform IOs. So the adapter is doing a build on a RAID-5
6596 * while being required longer completion times should be
6597 * tolerated.
6598 */
6599 switch (aac_do_reset(softs)) {
6600 case AAC_IOP_RESET_SUCCEED:
6601 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6602 aac_start_waiting_io(softs);
6603 break;
6604 case AAC_IOP_RESET_FAILED:
6605 /* Abort all waiting cmds when adapter is dead */
6606 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6607 break;
6608 case AAC_IOP_RESET_ABNORMAL:
6609 aac_start_waiting_io(softs);
6610 }
6611 }
6612
6613 /*
6614 * The following function comes from Adaptec:
6615 *
6616 * Time sync. command added to synchronize time with firmware every 30
6617 * minutes (required for correct AIF timestamps etc.)
6618 */
6619 static void
6620 aac_sync_tick(struct aac_softstate *softs)
6621 {
6622 ddi_acc_handle_t acc;
6623 int rval;
6624
6625 mutex_enter(&softs->time_mutex);
6626 ASSERT(softs->time_sync <= softs->timebase);
6627 softs->time_sync = 0;
6628 mutex_exit(&softs->time_mutex);
6629
6630 /* Time sync. with firmware every AAC_SYNC_TICK */
6631 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
6632 acc = softs->sync_ac.slotp->fib_acc_handle;
6633
6634 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0],
6635 ddi_get_time());
6636 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t));
6637 aac_sync_fib_slot_release(softs, &softs->sync_ac);
6638
6639 mutex_enter(&softs->time_mutex);
6640 softs->time_sync = softs->timebase;
6641 if (rval != AACOK)
6642 /* retry shortly */
6643 softs->time_sync += aac_tick << 1;
6644 else
6645 softs->time_sync += AAC_SYNC_TICK;
6646 mutex_exit(&softs->time_mutex);
6647 }
6648
6649 /*
6650 * Timeout checking and handling
6651 */
6652 static void
6653 aac_daemon(struct aac_softstate *softs)
6654 {
6655 int time_out; /* set if timeout happened */
6656 int time_adjust;
6657 uint32_t softs_timebase;
6658
6659 mutex_enter(&softs->time_mutex);
6660 ASSERT(softs->time_out <= softs->timebase);
6661 softs->time_out = 0;
6662 softs_timebase = softs->timebase;
6663 mutex_exit(&softs->time_mutex);
6664
6665 /* Check slots for timeout pkts */
6666 time_adjust = 0;
6667 do {
6668 struct aac_cmd *acp;
6669
6670 time_out = 0;
6671 for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6672 if (acp->timeout == 0)
6673 continue;
6674
6675 /*
6676 * If timeout happened, update outstanding cmds
6677 * to be checked later again.
6678 */
6679 if (time_adjust) {
6680 acp->timeout += time_adjust;
6681 continue;
6682 }
6683
6684 if (acp->timeout <= softs_timebase) {
6685 aac_cmd_timeout(softs, acp);
6686 time_out = 1;
6687 time_adjust = aac_tick * drv_usectohz(1000000);
6688 break; /* timeout happened */
6689 } else {
6690 break; /* no timeout */
6691 }
6692 }
6693 } while (time_out);
6694
6695 mutex_enter(&softs->time_mutex);
6696 softs->time_out = softs->timebase + aac_tick;
6697 mutex_exit(&softs->time_mutex);
6698 }
6699
6700 /*
6701 * The event thread handles various tasks serially for the other parts of
6702 * the driver, so that they can run fast.
6703 */
6704 static void
6705 aac_event_thread(struct aac_softstate *softs)
6706 {
6707 int run = 1;
6708
6709 DBCALLED(softs, 1);
6710
6711 mutex_enter(&softs->ev_lock);
6712 while (run) {
6713 int events;
6714
6715 if ((events = softs->events) == 0) {
6716 cv_wait(&softs->event_disp_cv, &softs->ev_lock);
6717 events = softs->events;
6718 }
6719 softs->events = 0;
6720 mutex_exit(&softs->ev_lock);
6721
6722 mutex_enter(&softs->io_lock);
6723 if ((softs->state & AAC_STATE_RUN) &&
6724 (softs->state & AAC_STATE_DEAD) == 0) {
6725 if (events & AAC_EVENT_TIMEOUT)
6726 aac_daemon(softs);
6727 if (events & AAC_EVENT_SYNCTICK)
6728 aac_sync_tick(softs);
6729 if (events & AAC_EVENT_AIF)
6730 aac_aif_event(softs);
6731 } else {
6732 run = 0;
6733 }
6734 mutex_exit(&softs->io_lock);
6735
6736 mutex_enter(&softs->ev_lock);
6737 }
6738
6739 cv_signal(&softs->event_wait_cv);
6740 mutex_exit(&softs->ev_lock);
6741 }
6742
6743 /*
6744 * Internal timer. It is only responsbile for time counting and report time
6745 * related events. Events handling is done by aac_event_thread(), so that
6746 * the timer itself could be as precise as possible.
6747 */
6748 static void
6749 aac_timer(void *arg)
6750 {
6751 struct aac_softstate *softs = arg;
6752 int events = 0;
6753
6754 mutex_enter(&softs->time_mutex);
6755
6756 /* If timer is being stopped, exit */
6757 if (softs->timeout_id) {
6758 softs->timeout_id = timeout(aac_timer, (void *)softs,
6759 (aac_tick * drv_usectohz(1000000)));
6760 } else {
6761 mutex_exit(&softs->time_mutex);
6762 return;
6763 }
6764
6765 /* Time counting */
6766 softs->timebase += aac_tick;
6767
6768 /* Check time related events */
6769 if (softs->time_out && softs->time_out <= softs->timebase)
6770 events |= AAC_EVENT_TIMEOUT;
6771 if (softs->time_sync && softs->time_sync <= softs->timebase)
6772 events |= AAC_EVENT_SYNCTICK;
6773
6774 mutex_exit(&softs->time_mutex);
6775
6776 if (events)
6777 aac_event_disp(softs, events);
6778 }
6779
6780 /*
6781 * Dispatch events to daemon thread for handling
6782 */
6783 static void
6784 aac_event_disp(struct aac_softstate *softs, int events)
6785 {
6786 mutex_enter(&softs->ev_lock);
6787 softs->events |= events;
6788 cv_broadcast(&softs->event_disp_cv);
6789 mutex_exit(&softs->ev_lock);
6790 }
6791
6792 /*
6793 * Architecture dependent functions
6794 */
6795 static int
6796 aac_rx_get_fwstatus(struct aac_softstate *softs)
6797 {
6798 return (PCI_MEM_GET32(softs, AAC_OMR0));
6799 }
6800
6801 static int
6802 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6803 {
6804 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6805 }
6806
6807 static void
6808 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6809 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6810 {
6811 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6812 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6813 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6814 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6815 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6816 }
6817
6818 static int
6819 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6820 {
6821 return (PCI_MEM_GET32(softs, AAC_OMR0));
6822 }
6823
6824 static int
6825 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6826 {
6827 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6828 }
6829
6830 static void
6831 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6832 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6833 {
6834 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6835 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6836 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6837 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6838 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6839 }
6840
6841 /*
6842 * cb_ops functions
6843 */
6844 static int
6845 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6846 {
6847 struct aac_softstate *softs;
6848 int minor0, minor;
6849 int instance;
6850
6851 DBCALLED(NULL, 2);
6852
6853 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6854 return (EINVAL);
6855
6856 minor0 = getminor(*devp);
6857 minor = AAC_SCSA_MINOR(minor0);
6858
6859 if (AAC_IS_SCSA_NODE(minor))
6860 return (scsi_hba_open(devp, flag, otyp, cred));
6861
6862 instance = MINOR2INST(minor0);
6863 if (instance >= AAC_MAX_ADAPTERS)
6864 return (ENXIO);
6865
6866 softs = ddi_get_soft_state(aac_softstatep, instance);
6867 if (softs == NULL)
6868 return (ENXIO);
6869
6870 return (0);
6871 }
6872
6873 /*ARGSUSED*/
6874 static int
6875 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6876 {
6877 int minor0, minor;
6878 int instance;
6879
6880 DBCALLED(NULL, 2);
6881
6882 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6883 return (EINVAL);
6884
6885 minor0 = getminor(dev);
6886 minor = AAC_SCSA_MINOR(minor0);
6887
6888 if (AAC_IS_SCSA_NODE(minor))
6889 return (scsi_hba_close(dev, flag, otyp, cred));
6890
6891 instance = MINOR2INST(minor0);
6892 if (instance >= AAC_MAX_ADAPTERS)
6893 return (ENXIO);
6894
6895 return (0);
6896 }
6897
6898 static int
6899 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6900 int *rval_p)
6901 {
6902 struct aac_softstate *softs;
6903 int minor0, minor;
6904 int instance;
6905
6906 DBCALLED(NULL, 2);
6907
6908 if (drv_priv(cred_p) != 0)
6909 return (EPERM);
6910
6911 minor0 = getminor(dev);
6912 minor = AAC_SCSA_MINOR(minor0);
6913
6914 if (AAC_IS_SCSA_NODE(minor))
6915 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6916
6917 instance = MINOR2INST(minor0);
6918 if (instance < AAC_MAX_ADAPTERS) {
6919 softs = ddi_get_soft_state(aac_softstatep, instance);
6920 return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6921 }
6922 return (ENXIO);
6923 }
6924
6925 /*
6926 * The IO fault service error handling callback function
6927 */
6928 /*ARGSUSED*/
6929 static int
6930 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6931 {
6932 /*
6933 * as the driver can always deal with an error in any dma or
6934 * access handle, we can just return the fme_status value.
6935 */
6936 pci_ereport_post(dip, err, NULL);
6937 return (err->fme_status);
6938 }
6939
6940 /*
6941 * aac_fm_init - initialize fma capabilities and register with IO
6942 * fault services.
6943 */
6944 static void
6945 aac_fm_init(struct aac_softstate *softs)
6946 {
6947 /*
6948 * Need to change iblock to priority for new MSI intr
6949 */
6950 ddi_iblock_cookie_t fm_ibc;
6951
6952 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6953 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6954 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6955 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6956
6957 /* Only register with IO Fault Services if we have some capability */
6958 if (softs->fm_capabilities) {
6959 /* Adjust access and dma attributes for FMA */
6960 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6961 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6962 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6963
6964 /*
6965 * Register capabilities with IO Fault Services.
6966 * fm_capabilities will be updated to indicate
6967 * capabilities actually supported (not requested.)
6968 */
6969 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6970
6971 /*
6972 * Initialize pci ereport capabilities if ereport
6973 * capable (should always be.)
6974 */
6975 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6976 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6977 pci_ereport_setup(softs->devinfo_p);
6978 }
6979
6980 /*
6981 * Register error callback if error callback capable.
6982 */
6983 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6984 ddi_fm_handler_register(softs->devinfo_p,
6985 aac_fm_error_cb, (void *) softs);
6986 }
6987 }
6988 }
6989
6990 /*
6991 * aac_fm_fini - Releases fma capabilities and un-registers with IO
6992 * fault services.
6993 */
6994 static void
6995 aac_fm_fini(struct aac_softstate *softs)
6996 {
6997 /* Only unregister FMA capabilities if registered */
6998 if (softs->fm_capabilities) {
6999 /*
7000 * Un-register error callback if error callback capable.
7001 */
7002 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
7003 ddi_fm_handler_unregister(softs->devinfo_p);
7004 }
7005
7006 /*
7007 * Release any resources allocated by pci_ereport_setup()
7008 */
7009 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
7010 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
7011 pci_ereport_teardown(softs->devinfo_p);
7012 }
7013
7014 /* Unregister from IO Fault Services */
7015 ddi_fm_fini(softs->devinfo_p);
7016
7017 /* Adjust access and dma attributes for FMA */
7018 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7019 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
7020 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
7021 }
7022 }
7023
7024 int
7025 aac_check_acc_handle(ddi_acc_handle_t handle)
7026 {
7027 ddi_fm_error_t de;
7028
7029 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7030 return (de.fme_status);
7031 }
7032
7033 int
7034 aac_check_dma_handle(ddi_dma_handle_t handle)
7035 {
7036 ddi_fm_error_t de;
7037
7038 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7039 return (de.fme_status);
7040 }
7041
7042 void
7043 aac_fm_ereport(struct aac_softstate *softs, char *detail)
7044 {
7045 uint64_t ena;
7046 char buf[FM_MAX_CLASS];
7047
7048 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7049 ena = fm_ena_generate(0, FM_ENA_FMT1);
7050 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
7051 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
7052 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7053 }
7054 }
7055
7056 /*
7057 * Autoconfiguration support
7058 */
7059 static int
7060 aac_parse_devname(char *devnm, int *tgt, int *lun)
7061 {
7062 char devbuf[SCSI_MAXNAMELEN];
7063 char *addr;
7064 char *p, *tp, *lp;
7065 long num;
7066
7067 /* Parse dev name and address */
7068 (void) strcpy(devbuf, devnm);
7069 addr = "";
7070 for (p = devbuf; *p != '\0'; p++) {
7071 if (*p == '@') {
7072 addr = p + 1;
7073 *p = '\0';
7074 } else if (*p == ':') {
7075 *p = '\0';
7076 break;
7077 }
7078 }
7079
7080 /* Parse taget and lun */
7081 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7082 if (*p == ',') {
7083 lp = p + 1;
7084 *p = '\0';
7085 break;
7086 }
7087 }
7088 if (tgt && tp) {
7089 if (ddi_strtol(tp, NULL, 0x10, &num))
7090 return (AACERR);
7091 *tgt = (int)num;
7092 }
7093 if (lun && lp) {
7094 if (ddi_strtol(lp, NULL, 0x10, &num))
7095 return (AACERR);
7096 *lun = (int)num;
7097 }
7098 return (AACOK);
7099 }
7100
7101 static dev_info_t *
7102 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
7103 {
7104 dev_info_t *child = NULL;
7105 char addr[SCSI_MAXNAMELEN];
7106 char tmp[MAXNAMELEN];
7107
7108 if (tgt < AAC_MAX_LD) {
7109 if (lun == 0) {
7110 struct aac_device *dvp = &softs->containers[tgt].dev;
7111
7112 child = dvp->dip;
7113 }
7114 } else {
7115 (void) sprintf(addr, "%x,%x", tgt, lun);
7116 for (child = ddi_get_child(softs->devinfo_p);
7117 child; child = ddi_get_next_sibling(child)) {
7118 /* We don't care about non-persistent node */
7119 if (ndi_dev_is_persistent_node(child) == 0)
7120 continue;
7121
7122 if (aac_name_node(child, tmp, MAXNAMELEN) !=
7123 DDI_SUCCESS)
7124 continue;
7125 if (strcmp(addr, tmp) == 0)
7126 break;
7127 }
7128 }
7129 return (child);
7130 }
7131
7132 static int
7133 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
7134 dev_info_t **dipp)
7135 {
7136 char *nodename = NULL;
7137 char **compatible = NULL;
7138 int ncompatible = 0;
7139 char *childname;
7140 dev_info_t *ldip = NULL;
7141 int tgt = sd->sd_address.a_target;
7142 int lun = sd->sd_address.a_lun;
7143 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7144 int rval;
7145
7146 DBCALLED(softs, 2);
7147
7148 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7149 NULL, &nodename, &compatible, &ncompatible);
7150 if (nodename == NULL) {
7151 AACDB_PRINT(softs, CE_WARN,
7152 "found no comptible driver for t%dL%d", tgt, lun);
7153 rval = NDI_FAILURE;
7154 goto finish;
7155 }
7156 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
7157
7158 /* Create dev node */
7159 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
7160 &ldip);
7161 if (rval == NDI_SUCCESS) {
7162 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
7163 != DDI_PROP_SUCCESS) {
7164 AACDB_PRINT(softs, CE_WARN, "unable to create "
7165 "property for t%dL%d (target)", tgt, lun);
7166 rval = NDI_FAILURE;
7167 goto finish;
7168 }
7169 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
7170 != DDI_PROP_SUCCESS) {
7171 AACDB_PRINT(softs, CE_WARN, "unable to create "
7172 "property for t%dL%d (lun)", tgt, lun);
7173 rval = NDI_FAILURE;
7174 goto finish;
7175 }
7176 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7177 "compatible", compatible, ncompatible)
7178 != DDI_PROP_SUCCESS) {
7179 AACDB_PRINT(softs, CE_WARN, "unable to create "
7180 "property for t%dL%d (compatible)", tgt, lun);
7181 rval = NDI_FAILURE;
7182 goto finish;
7183 }
7184
7185 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7186 if (rval != NDI_SUCCESS) {
7187 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
7188 tgt, lun);
7189 ndi_prop_remove_all(ldip);
7190 (void) ndi_devi_free(ldip);
7191 }
7192 }
7193 finish:
7194 if (dipp)
7195 *dipp = ldip;
7196
7197 scsi_hba_nodename_compatible_free(nodename, compatible);
7198 return (rval);
7199 }
7200
7201 /*ARGSUSED*/
7202 static int
7203 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
7204 {
7205 int tgt = sd->sd_address.a_target;
7206 int lun = sd->sd_address.a_lun;
7207
7208 DBCALLED(softs, 2);
7209
7210 if (tgt < AAC_MAX_LD) {
7211 enum aac_cfg_event event;
7212
7213 if (lun == 0) {
7214 mutex_enter(&softs->io_lock);
7215 event = aac_probe_container(softs, tgt);
7216 mutex_exit(&softs->io_lock);
7217 if ((event != AAC_CFG_NULL_NOEXIST) &&
7218 (event != AAC_CFG_DELETE)) {
7219 if (scsi_hba_probe(sd, NULL) ==
7220 SCSIPROBE_EXISTS)
7221 return (NDI_SUCCESS);
7222 }
7223 }
7224 return (NDI_FAILURE);
7225 } else {
7226 int dtype;
7227 int qual; /* device qualifier */
7228
7229 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
7230 return (NDI_FAILURE);
7231
7232 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7233 qual = dtype >> 5;
7234
7235 AACDB_PRINT(softs, CE_NOTE,
7236 "Phys. device found: tgt %d dtype %d: %s",
7237 tgt, dtype, sd->sd_inq->inq_vid);
7238
7239 /* Only non-DASD and JBOD mode DASD are allowed exposed */
7240 if (dtype == DTYPE_RODIRECT /* CDROM */ ||
7241 dtype == DTYPE_SEQUENTIAL /* TAPE */ ||
7242 dtype == DTYPE_ESI /* SES */) {
7243 if (!(softs->flags & AAC_FLAGS_NONDASD))
7244 return (NDI_FAILURE);
7245 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
7246
7247 } else if (dtype == DTYPE_DIRECT) {
7248 if (!(softs->flags & AAC_FLAGS_JBOD) || qual != 0)
7249 return (NDI_FAILURE);
7250 AACDB_PRINT(softs, CE_NOTE, "JBOD DASD %d found", tgt);
7251 }
7252
7253 mutex_enter(&softs->io_lock);
7254 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
7255 mutex_exit(&softs->io_lock);
7256 return (NDI_SUCCESS);
7257 }
7258 }
7259
7260 static int
7261 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
7262 dev_info_t **ldip)
7263 {
7264 struct scsi_device sd;
7265 dev_info_t *child;
7266 int rval;
7267
7268 DBCALLED(softs, 2);
7269
7270 if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
7271 if (ldip)
7272 *ldip = child;
7273 return (NDI_SUCCESS);
7274 }
7275
7276 bzero(&sd, sizeof (struct scsi_device));
7277 sd.sd_address.a_hba_tran = softs->hba_tran;
7278 sd.sd_address.a_target = (uint16_t)tgt;
7279 sd.sd_address.a_lun = (uint8_t)lun;
7280 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
7281 rval = aac_config_child(softs, &sd, ldip);
7282 /* scsi_unprobe is blank now. Free buffer manually */
7283 if (sd.sd_inq) {
7284 kmem_free(sd.sd_inq, SUN_INQSIZE);
7285 sd.sd_inq = (struct scsi_inquiry *)NULL;
7286 }
7287 return (rval);
7288 }
7289
7290 static int
7291 aac_config_tgt(struct aac_softstate *softs, int tgt)
7292 {
7293 struct scsi_address ap;
7294 struct buf *bp = NULL;
7295 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
7296 int list_len = 0;
7297 int lun_total = 0;
7298 dev_info_t *ldip;
7299 int i;
7300
7301 ap.a_hba_tran = softs->hba_tran;
7302 ap.a_target = (uint16_t)tgt;
7303 ap.a_lun = 0;
7304
7305 for (i = 0; i < 2; i++) {
7306 struct scsi_pkt *pkt;
7307 uchar_t *cdb;
7308 uchar_t *p;
7309 uint32_t data;
7310
7311 if (bp == NULL) {
7312 if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
7313 buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
7314 return (AACERR);
7315 }
7316 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
7317 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
7318 NULL, NULL)) == NULL) {
7319 scsi_free_consistent_buf(bp);
7320 return (AACERR);
7321 }
7322 cdb = pkt->pkt_cdbp;
7323 bzero(cdb, CDB_GROUP5);
7324 cdb[0] = SCMD_REPORT_LUNS;
7325
7326 /* Convert buffer len from local to LE_32 */
7327 data = buf_len;
7328 for (p = &cdb[9]; p > &cdb[5]; p--) {
7329 *p = data & 0xff;
7330 data >>= 8;
7331 }
7332
7333 if (scsi_poll(pkt) < 0 ||
7334 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
7335 scsi_destroy_pkt(pkt);
7336 break;
7337 }
7338
7339 /* Convert list_len from LE_32 to local */
7340 for (p = (uchar_t *)bp->b_un.b_addr;
7341 p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
7342 data <<= 8;
7343 data |= *p;
7344 }
7345 list_len = data;
7346 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
7347 scsi_free_consistent_buf(bp);
7348 bp = NULL;
7349 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
7350 }
7351 scsi_destroy_pkt(pkt);
7352 }
7353 if (i >= 2) {
7354 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
7355 AAC_SCSI_RPTLUNS_HEAD_SIZE);
7356
7357 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
7358 uint16_t lun;
7359
7360 /* Determine report luns addressing type */
7361 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
7362 /*
7363 * Vendors in the field have been found to be
7364 * concatenating bus/target/lun to equal the
7365 * complete lun value instead of switching to
7366 * flat space addressing
7367 */
7368 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
7369 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
7370 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
7371 lun = ((buf[0] & 0x3f) << 8) | buf[1];
7372 if (lun > UINT8_MAX) {
7373 AACDB_PRINT(softs, CE_WARN,
7374 "abnormal lun number: %d", lun);
7375 break;
7376 }
7377 if (aac_config_lun(softs, tgt, lun, &ldip) ==
7378 NDI_SUCCESS)
7379 lun_total++;
7380 break;
7381 }
7382
7383 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
7384 }
7385 } else {
7386 /* The target may do not support SCMD_REPORT_LUNS. */
7387 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
7388 lun_total++;
7389 }
7390 scsi_free_consistent_buf(bp);
7391 return (lun_total);
7392 }
7393
7394 static void
7395 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
7396 {
7397 struct aac_device *dvp;
7398
7399 mutex_enter(&softs->io_lock);
7400 dvp = AAC_DEV(softs, tgt);
7401 if (en)
7402 dvp->flags |= AAC_DFLAG_CONFIGURING;
7403 else
7404 dvp->flags &= ~AAC_DFLAG_CONFIGURING;
7405 mutex_exit(&softs->io_lock);
7406 }
7407
7408 static int
7409 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
7410 void *arg, dev_info_t **childp)
7411 {
7412 struct aac_softstate *softs;
7413 int circ = 0;
7414 int rval;
7415
7416 if ((softs = ddi_get_soft_state(aac_softstatep,
7417 ddi_get_instance(parent))) == NULL)
7418 return (NDI_FAILURE);
7419
7420 /* Commands for bus config should be blocked as the bus is quiesced */
7421 mutex_enter(&softs->io_lock);
7422 if (softs->state & AAC_STATE_QUIESCED) {
7423 AACDB_PRINT(softs, CE_NOTE,
7424 "bus_config abroted because bus is quiesced");
7425 mutex_exit(&softs->io_lock);
7426 return (NDI_FAILURE);
7427 }
7428 mutex_exit(&softs->io_lock);
7429
7430 DBCALLED(softs, 1);
7431
7432 /* Hold the nexus across the bus_config */
7433 ndi_devi_enter(parent, &circ);
7434 switch (op) {
7435 case BUS_CONFIG_ONE: {
7436 int tgt, lun;
7437
7438 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
7439 rval = NDI_FAILURE;
7440 break;
7441 }
7442 if (tgt >= AAC_MAX_LD) {
7443 if (tgt >= AAC_MAX_DEV(softs)) {
7444 rval = NDI_FAILURE;
7445 break;
7446 }
7447 }
7448
7449 AAC_DEVCFG_BEGIN(softs, tgt);
7450 rval = aac_config_lun(softs, tgt, lun, childp);
7451 AAC_DEVCFG_END(softs, tgt);
7452 break;
7453 }
7454
7455 case BUS_CONFIG_DRIVER:
7456 case BUS_CONFIG_ALL: {
7457 uint32_t bus, tgt;
7458 int index, total;
7459
7460 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
7461 AAC_DEVCFG_BEGIN(softs, tgt);
7462 (void) aac_config_lun(softs, tgt, 0, NULL);
7463 AAC_DEVCFG_END(softs, tgt);
7464 }
7465
7466 /* Config the non-DASD devices connected to the card */
7467 total = 0;
7468 index = AAC_MAX_LD;
7469 for (bus = 0; bus < softs->bus_max; bus++) {
7470 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
7471 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
7472 AAC_DEVCFG_BEGIN(softs, index);
7473 if (aac_config_tgt(softs, index))
7474 total++;
7475 AAC_DEVCFG_END(softs, index);
7476 }
7477 }
7478 AACDB_PRINT(softs, CE_CONT,
7479 "?Total %d phys. device(s) found", total);
7480 rval = NDI_SUCCESS;
7481 break;
7482 }
7483 }
7484
7485 if (rval == NDI_SUCCESS)
7486 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7487 ndi_devi_exit(parent, circ);
7488 return (rval);
7489 }
7490
7491 /*ARGSUSED*/
7492 static int
7493 aac_handle_dr(struct aac_softstate *softs, int tgt, int lun, int event)
7494 {
7495 struct aac_device *dvp;
7496 dev_info_t *dip;
7497 int valid;
7498 int circ1 = 0;
7499
7500 DBCALLED(softs, 1);
7501
7502 /* Hold the nexus across the bus_config */
7503 dvp = AAC_DEV(softs, tgt);
7504 valid = AAC_DEV_IS_VALID(dvp);
7505 dip = dvp->dip;
7506 if (!(softs->state & AAC_STATE_RUN))
7507 return (AACERR);
7508 mutex_exit(&softs->io_lock);
7509
7510 switch (event) {
7511 case AAC_CFG_ADD:
7512 case AAC_CFG_DELETE:
7513 /* Device onlined */
7514 if (dip == NULL && valid) {
7515 ndi_devi_enter(softs->devinfo_p, &circ1);
7516 (void) aac_config_lun(softs, tgt, 0, NULL);
7517 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
7518 softs->instance, tgt, lun);
7519 ndi_devi_exit(softs->devinfo_p, circ1);
7520 }
7521 /* Device offlined */
7522 if (dip && !valid) {
7523 mutex_enter(&softs->io_lock);
7524 (void) aac_do_reset(softs);
7525 mutex_exit(&softs->io_lock);
7526
7527 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7528 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
7529 softs->instance, tgt, lun);
7530 }
7531 break;
7532 }
7533
7534 mutex_enter(&softs->io_lock);
7535 return (AACOK);
7536 }
7537
7538 #ifdef DEBUG
7539
7540 /* -------------------------debug aid functions-------------------------- */
7541
7542 #define AAC_FIB_CMD_KEY_STRINGS \
7543 {TestCommandResponse, "TestCommandResponse"}, \
7544 {TestAdapterCommand, "TestAdapterCommand"}, \
7545 {LastTestCommand, "LastTestCommand"}, \
7546 {ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue"}, \
7547 {ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue"}, \
7548 {ReinitHostHighRespQueue, "ReinitHostHighRespQueue"}, \
7549 {ReinitHostNormRespQueue, "ReinitHostNormRespQueue"}, \
7550 {ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue"}, \
7551 {ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue"}, \
7552 {ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue"}, \
7553 {ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue"}, \
7554 {InterfaceShutdown, "InterfaceShutdown"}, \
7555 {DmaCommandFib, "DmaCommandFib"}, \
7556 {StartProfile, "StartProfile"}, \
7557 {TermProfile, "TermProfile"}, \
7558 {SpeedTest, "SpeedTest"}, \
7559 {TakeABreakPt, "TakeABreakPt"}, \
7560 {RequestPerfData, "RequestPerfData"}, \
7561 {SetInterruptDefTimer, "SetInterruptDefTimer"}, \
7562 {SetInterruptDefCount, "SetInterruptDefCount"}, \
7563 {GetInterruptDefStatus, "GetInterruptDefStatus"}, \
7564 {LastCommCommand, "LastCommCommand"}, \
7565 {NuFileSystem, "NuFileSystem"}, \
7566 {UFS, "UFS"}, \
7567 {HostFileSystem, "HostFileSystem"}, \
7568 {LastFileSystemCommand, "LastFileSystemCommand"}, \
7569 {ContainerCommand, "ContainerCommand"}, \
7570 {ContainerCommand64, "ContainerCommand64"}, \
7571 {ClusterCommand, "ClusterCommand"}, \
7572 {ScsiPortCommand, "ScsiPortCommand"}, \
7573 {ScsiPortCommandU64, "ScsiPortCommandU64"}, \
7574 {AifRequest, "AifRequest"}, \
7575 {CheckRevision, "CheckRevision"}, \
7576 {FsaHostShutdown, "FsaHostShutdown"}, \
7577 {RequestAdapterInfo, "RequestAdapterInfo"}, \
7578 {IsAdapterPaused, "IsAdapterPaused"}, \
7579 {SendHostTime, "SendHostTime"}, \
7580 {LastMiscCommand, "LastMiscCommand"}
7581
7582 #define AAC_CTVM_SUBCMD_KEY_STRINGS \
7583 {VM_Null, "VM_Null"}, \
7584 {VM_NameServe, "VM_NameServe"}, \
7585 {VM_ContainerConfig, "VM_ContainerConfig"}, \
7586 {VM_Ioctl, "VM_Ioctl"}, \
7587 {VM_FilesystemIoctl, "VM_FilesystemIoctl"}, \
7588 {VM_CloseAll, "VM_CloseAll"}, \
7589 {VM_CtBlockRead, "VM_CtBlockRead"}, \
7590 {VM_CtBlockWrite, "VM_CtBlockWrite"}, \
7591 {VM_SliceBlockRead, "VM_SliceBlockRead"}, \
7592 {VM_SliceBlockWrite, "VM_SliceBlockWrite"}, \
7593 {VM_DriveBlockRead, "VM_DriveBlockRead"}, \
7594 {VM_DriveBlockWrite, "VM_DriveBlockWrite"}, \
7595 {VM_EnclosureMgt, "VM_EnclosureMgt"}, \
7596 {VM_Unused, "VM_Unused"}, \
7597 {VM_CtBlockVerify, "VM_CtBlockVerify"}, \
7598 {VM_CtPerf, "VM_CtPerf"}, \
7599 {VM_CtBlockRead64, "VM_CtBlockRead64"}, \
7600 {VM_CtBlockWrite64, "VM_CtBlockWrite64"}, \
7601 {VM_CtBlockVerify64, "{VM_CtBlockVerify64"}, \
7602 {VM_CtHostRead64, "VM_CtHostRead64"}, \
7603 {VM_CtHostWrite64, "VM_CtHostWrite64"}, \
7604 {VM_NameServe64, "VM_NameServe64"}
7605
7606 #define AAC_CT_SUBCMD_KEY_STRINGS \
7607 {CT_Null, "CT_Null"}, \
7608 {CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT"}, \
7609 {CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT"}, \
7610 {CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO"}, \
7611 {CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT"}, \
7612 {CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD"}, \
7613 {CT_WRITE_MBR, "CT_WRITE_MBR"}, \
7614 {CT_WRITE_PARTITION, "CT_WRITE_PARTITION"}, \
7615 {CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION"}, \
7616 {CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER"}, \
7617 {CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY"}, \
7618 {CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE"}, \
7619 {CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE"}, \
7620 {CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER"}, \
7621 {CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY"}, \
7622 {CT_READ_MBR, "CT_READ_MBR"}, \
7623 {CT_READ_PARTITION, "CT_READ_PARTITION"}, \
7624 {CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER"}, \
7625 {CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER"}, \
7626 {CT_SLICE_SIZE, "CT_SLICE_SIZE"}, \
7627 {CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS"}, \
7628 {CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER"}, \
7629 {CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE"}, \
7630 {CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE"}, \
7631 {CT_UNMIRROR, "CT_UNMIRROR"}, \
7632 {CT_MIRROR_DELAY, "CT_MIRROR_DELAY"}, \
7633 {CT_GEN_MIRROR, "CT_GEN_MIRROR"}, \
7634 {CT_GEN_MIRROR2, "CT_GEN_MIRROR2"}, \
7635 {CT_TEST_CONTAINER, "CT_TEST_CONTAINER"}, \
7636 {CT_MOVE2, "CT_MOVE2"}, \
7637 {CT_SPLIT, "CT_SPLIT"}, \
7638 {CT_SPLIT2, "CT_SPLIT2"}, \
7639 {CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN"}, \
7640 {CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2"}, \
7641 {CT_RECONFIG, "CT_RECONFIG"}, \
7642 {CT_BREAK2, "CT_BREAK2"}, \
7643 {CT_BREAK, "CT_BREAK"}, \
7644 {CT_MERGE2, "CT_MERGE2"}, \
7645 {CT_MERGE, "CT_MERGE"}, \
7646 {CT_FORCE_ERROR, "CT_FORCE_ERROR"}, \
7647 {CT_CLEAR_ERROR, "CT_CLEAR_ERROR"}, \
7648 {CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER"}, \
7649 {CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER"}, \
7650 {CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA"}, \
7651 {CT_VOLUME_ADD, "CT_VOLUME_ADD"}, \
7652 {CT_VOLUME_ADD2, "CT_VOLUME_ADD2"}, \
7653 {CT_MIRROR_STATUS, "CT_MIRROR_STATUS"}, \
7654 {CT_COPY_STATUS, "CT_COPY_STATUS"}, \
7655 {CT_COPY, "CT_COPY"}, \
7656 {CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER"}, \
7657 {CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER"}, \
7658 {CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY"}, \
7659 {CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE"}, \
7660 {CT_CLEAN_DEAD, "CT_CLEAN_DEAD"}, \
7661 {CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND"}, \
7662 {CT_SET, "CT_SET"}, \
7663 {CT_GET, "CT_GET"}, \
7664 {CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY"}, \
7665 {CT_GET_DELAY, "CT_GET_DELAY"}, \
7666 {CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE"}, \
7667 {CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS"}, \
7668 {CT_SCRUB, "CT_SCRUB"}, \
7669 {CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS"}, \
7670 {CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO"}, \
7671 {CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD"}, \
7672 {CT_PAUSE_IO, "CT_PAUSE_IO"}, \
7673 {CT_RELEASE_IO, "CT_RELEASE_IO"}, \
7674 {CT_SCRUB2, "CT_SCRUB2"}, \
7675 {CT_MCHECK, "CT_MCHECK"}, \
7676 {CT_CORRUPT, "CT_CORRUPT"}, \
7677 {CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT"}, \
7678 {CT_PROMOTE, "CT_PROMOTE"}, \
7679 {CT_SET_DEAD, "CT_SET_DEAD"}, \
7680 {CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS"}, \
7681 {CT_GET_NV_PARAM, "CT_GET_NV_PARAM"}, \
7682 {CT_GET_PARAM, "CT_GET_PARAM"}, \
7683 {CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE"}, \
7684 {CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE"}, \
7685 {CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE"}, \
7686 {CT_SET_NV_PARAM, "CT_SET_NV_PARAM"}, \
7687 {CT_ABORT_SCRUB, "CT_ABORT_SCRUB"}, \
7688 {CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR"}, \
7689 {CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER"}, \
7690 {CT_CONTINUE_DATA, "CT_CONTINUE_DATA"}, \
7691 {CT_STOP_DATA, "CT_STOP_DATA"}, \
7692 {CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE"}, \
7693 {CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS"}, \
7694 {CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS"}, \
7695 {CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO"}, \
7696 {CT_GET_TIME, "CT_GET_TIME"}, \
7697 {CT_READ_DATA, "CT_READ_DATA"}, \
7698 {CT_CTR, "CT_CTR"}, \
7699 {CT_CTL, "CT_CTL"}, \
7700 {CT_DRAINIO, "CT_DRAINIO"}, \
7701 {CT_RELEASEIO, "CT_RELEASEIO"}, \
7702 {CT_GET_NVRAM, "CT_GET_NVRAM"}, \
7703 {CT_GET_MEMORY, "CT_GET_MEMORY"}, \
7704 {CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG"}, \
7705 {CT_ADD_LEVEL, "CT_ADD_LEVEL"}, \
7706 {CT_NV_ZERO, "CT_NV_ZERO"}, \
7707 {CT_READ_SIGNATURE, "CT_READ_SIGNATURE"}, \
7708 {CT_THROTTLE_ON, "CT_THROTTLE_ON"}, \
7709 {CT_THROTTLE_OFF, "CT_THROTTLE_OFF"}, \
7710 {CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS"}, \
7711 {CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT"}, \
7712 {CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT"}, \
7713 {CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS"}, \
7714 {CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS"}, \
7715 {CT_MONITOR, "CT_MONITOR"}, \
7716 {CT_GEN_MORPH, "CT_GEN_MORPH"}, \
7717 {CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO"}, \
7718 {CT_CACHE_SET, "CT_CACHE_SET"}, \
7719 {CT_CACHE_STAT, "CT_CACHE_STAT"}, \
7720 {CT_TRACE_START, "CT_TRACE_START"}, \
7721 {CT_TRACE_STOP, "CT_TRACE_STOP"}, \
7722 {CT_TRACE_ENABLE, "CT_TRACE_ENABLE"}, \
7723 {CT_TRACE_DISABLE, "CT_TRACE_DISABLE"}, \
7724 {CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP"}, \
7725 {CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER"}, \
7726 {CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER"}, \
7727 {CT_ENABLE_RAID5, "CT_ENABLE_RAID5"}, \
7728 {CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG"}, \
7729 {CT_GET_MEM_STATS, "CT_GET_MEM_STATS"}, \
7730 {CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE"}, \
7731 {CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD"}, \
7732 {CT_STOP_DUMPS, "CT_STOP_DUMPS"}, \
7733 {CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK"}, \
7734 {CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS"}, \
7735 {CT_MOVE_PARTITION, "CT_MOVE_PARTITION"}, \
7736 {CT_FLUSH_CACHE, "CT_FLUSH_CACHE"}, \
7737 {CT_READ_NAME, "CT_READ_NAME"}, \
7738 {CT_WRITE_NAME, "CT_WRITE_NAME"}, \
7739 {CT_TOSS_CACHE, "CT_TOSS_CACHE"}, \
7740 {CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO"}, \
7741 {CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE"}, \
7742 {CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE"}, \
7743 {CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS"}, \
7744 {CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK"}, \
7745 {CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG"}, \
7746 {CT_CACHE_FAVOR, "CT_CACHE_FAVOR"}, \
7747 {CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR"}, \
7748 {CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX"}, \
7749 {CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX"}, \
7750 {CT_FLUSH, "CT_FLUSH"}, \
7751 {CT_REBUILD, "CT_REBUILD"}, \
7752 {CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER"}, \
7753 {CT_RESTART, "CT_RESTART"}, \
7754 {CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS"}, \
7755 {CT_TRACE_FLAG, "CT_TRACE_FLAG"}, \
7756 {CT_RESTART_MORPH, "CT_RESTART_MORPH"}, \
7757 {CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO"}, \
7758 {CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM"}, \
7759 {CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG"}, \
7760 {CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS"}, \
7761 {CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT"}, \
7762 {CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE"}, \
7763 {CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK"}, \
7764 {CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS"}, \
7765 {CT_CRAZY_CACHE, "CT_CRAZY_CACHE"}, \
7766 {CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT"}, \
7767 {CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG"}, \
7768 {CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT"}, \
7769 {CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID"}, \
7770 {CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID"}, \
7771 {CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID"}, \
7772 {CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID"}, \
7773 {CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID"}, \
7774 {CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID"}, \
7775 {CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION"}, \
7776 {CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION"}, \
7777 {CT_STRIPE_ADD2, "CT_STRIPE_ADD2"}, \
7778 {CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET"}, \
7779 {CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET"}, \
7780 {CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER"}, \
7781 {CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD"}, \
7782 {CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION"}, \
7783 {CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT"}, \
7784 {CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT"}, \
7785 {CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO"}, \
7786 {CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER"}, \
7787 {CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO"}, \
7788 {CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID"}, \
7789 {CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK"}, \
7790 {CT_IS_CONTAINER_MEATADATA_STANDARD, \
7791 "CT_IS_CONTAINER_MEATADATA_STANDARD"}, \
7792 {CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD"}, \
7793 {CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT"}, \
7794 {CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS"}, \
7795 {CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO"}, \
7796 {CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY"}, \
7797 {CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE"}, \
7798 {CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE"}, \
7799 {CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE"}, \
7800 {CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF"}, \
7801 {CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID"}, \
7802 {CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS"}, \
7803 {CT_GET_PPI_DATA, "CT_GET_PPI_DATA"}, \
7804 {CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES"}, \
7805 {CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE"}, \
7806 {CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2"}, \
7807 {CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2"}, \
7808 {CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2"}, \
7809 {CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER"}, \
7810 {CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"}
7811
7812 #define AAC_CL_SUBCMD_KEY_STRINGS \
7813 {CL_NULL, "CL_NULL"}, \
7814 {DS_INIT, "DS_INIT"}, \
7815 {DS_RESCAN, "DS_RESCAN"}, \
7816 {DS_CREATE, "DS_CREATE"}, \
7817 {DS_DELETE, "DS_DELETE"}, \
7818 {DS_ADD_DISK, "DS_ADD_DISK"}, \
7819 {DS_REMOVE_DISK, "DS_REMOVE_DISK"}, \
7820 {DS_MOVE_DISK, "DS_MOVE_DISK"}, \
7821 {DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP"}, \
7822 {DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP"}, \
7823 {DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP"}, \
7824 {DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM"}, \
7825 {DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM"}, \
7826 {DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM"}, \
7827 {DS_GET_DISK_SETS, "DS_GET_DISK_SETS"}, \
7828 {DS_GET_DRIVES, "DS_GET_DRIVES"}, \
7829 {DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM"}, \
7830 {DS_ONLINE, "DS_ONLINE"}, \
7831 {DS_OFFLINE, "DS_OFFLINE"}, \
7832 {DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS"}, \
7833 {DS_FSAPRINT, "DS_FSAPRINT"}, \
7834 {CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS"}, \
7835 {CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS"}, \
7836 {CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG"}, \
7837 {CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER"}, \
7838 {CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER"}, \
7839 {CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER"}, \
7840 {CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER"}, \
7841 {CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE"}, \
7842 {CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE"}, \
7843 {CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE"}, \
7844 {CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE"}, \
7845 {CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE"}, \
7846 {CC_GET_BUSINFO, "CC_GET_BUSINFO"}, \
7847 {CC_GET_PORTINFO, "CC_GET_PORTINFO"}, \
7848 {CC_GET_NAMEINFO, "CC_GET_NAMEINFO"}, \
7849 {CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO"}, \
7850 {CQ_QUORUM_OP, "CQ_QUORUM_OP"}
7851
7852 #define AAC_AIF_SUBCMD_KEY_STRINGS \
7853 {AifCmdEventNotify, "AifCmdEventNotify"}, \
7854 {AifCmdJobProgress, "AifCmdJobProgress"}, \
7855 {AifCmdAPIReport, "AifCmdAPIReport"}, \
7856 {AifCmdDriverNotify, "AifCmdDriverNotify"}, \
7857 {AifReqJobList, "AifReqJobList"}, \
7858 {AifReqJobsForCtr, "AifReqJobsForCtr"}, \
7859 {AifReqJobsForScsi, "AifReqJobsForScsi"}, \
7860 {AifReqJobReport, "AifReqJobReport"}, \
7861 {AifReqTerminateJob, "AifReqTerminateJob"}, \
7862 {AifReqSuspendJob, "AifReqSuspendJob"}, \
7863 {AifReqResumeJob, "AifReqResumeJob"}, \
7864 {AifReqSendAPIReport, "AifReqSendAPIReport"}, \
7865 {AifReqAPIJobStart, "AifReqAPIJobStart"}, \
7866 {AifReqAPIJobUpdate, "AifReqAPIJobUpdate"}, \
7867 {AifReqAPIJobFinish, "AifReqAPIJobFinish"}
7868
7869 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \
7870 {Reserved_IOCTL, "Reserved_IOCTL"}, \
7871 {GetDeviceHandle, "GetDeviceHandle"}, \
7872 {BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle"}, \
7873 {DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun"}, \
7874 {RescanBus, "RescanBus"}, \
7875 {GetDeviceProbeInfo, "GetDeviceProbeInfo"}, \
7876 {GetDeviceCapacity, "GetDeviceCapacity"}, \
7877 {GetContainerProbeInfo, "GetContainerProbeInfo"}, \
7878 {GetRequestedMemorySize, "GetRequestedMemorySize"}, \
7879 {GetBusInfo, "GetBusInfo"}, \
7880 {GetVendorSpecific, "GetVendorSpecific"}, \
7881 {EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo"}, \
7882 {EnhancedGetBusInfo, "EnhancedGetBusInfo"}, \
7883 {SetupExtendedCounters, "SetupExtendedCounters"}, \
7884 {GetPerformanceCounters, "GetPerformanceCounters"}, \
7885 {ResetPerformanceCounters, "ResetPerformanceCounters"}, \
7886 {ReadModePage, "ReadModePage"}, \
7887 {WriteModePage, "WriteModePage"}, \
7888 {ReadDriveParameter, "ReadDriveParameter"}, \
7889 {WriteDriveParameter, "WriteDriveParameter"}, \
7890 {ResetAdapter, "ResetAdapter"}, \
7891 {ResetBus, "ResetBus"}, \
7892 {ResetBusDevice, "ResetBusDevice"}, \
7893 {ExecuteSrb, "ExecuteSrb"}, \
7894 {Create_IO_Task, "Create_IO_Task"}, \
7895 {Delete_IO_Task, "Delete_IO_Task"}, \
7896 {Get_IO_Task_Info, "Get_IO_Task_Info"}, \
7897 {Check_Task_Progress, "Check_Task_Progress"}, \
7898 {InjectError, "InjectError"}, \
7899 {GetDeviceDefectCounts, "GetDeviceDefectCounts"}, \
7900 {GetDeviceDefectInfo, "GetDeviceDefectInfo"}, \
7901 {GetDeviceStatus, "GetDeviceStatus"}, \
7902 {ClearDeviceStatus, "ClearDeviceStatus"}, \
7903 {DiskSpinControl, "DiskSpinControl"}, \
7904 {DiskSmartControl, "DiskSmartControl"}, \
7905 {WriteSame, "WriteSame"}, \
7906 {ReadWriteLong, "ReadWriteLong"}, \
7907 {FormatUnit, "FormatUnit"}, \
7908 {TargetDeviceControl, "TargetDeviceControl"}, \
7909 {TargetChannelControl, "TargetChannelControl"}, \
7910 {FlashNewCode, "FlashNewCode"}, \
7911 {DiskCheck, "DiskCheck"}, \
7912 {RequestSense, "RequestSense"}, \
7913 {DiskPERControl, "DiskPERControl"}, \
7914 {Read10, "Read10"}, \
7915 {Write10, "Write10"}
7916
7917 #define AAC_AIFEN_KEY_STRINGS \
7918 {AifEnGeneric, "Generic"}, \
7919 {AifEnTaskComplete, "TaskComplete"}, \
7920 {AifEnConfigChange, "Config change"}, \
7921 {AifEnContainerChange, "Container change"}, \
7922 {AifEnDeviceFailure, "device failed"}, \
7923 {AifEnMirrorFailover, "Mirror failover"}, \
7924 {AifEnContainerEvent, "container event"}, \
7925 {AifEnFileSystemChange, "File system changed"}, \
7926 {AifEnConfigPause, "Container pause event"}, \
7927 {AifEnConfigResume, "Container resume event"}, \
7928 {AifEnFailoverChange, "Failover space assignment changed"}, \
7929 {AifEnRAID5RebuildDone, "RAID5 rebuild finished"}, \
7930 {AifEnEnclosureManagement, "Enclosure management event"}, \
7931 {AifEnBatteryEvent, "battery event"}, \
7932 {AifEnAddContainer, "Add container"}, \
7933 {AifEnDeleteContainer, "Delete container"}, \
7934 {AifEnSMARTEvent, "SMART Event"}, \
7935 {AifEnBatteryNeedsRecond, "battery needs reconditioning"}, \
7936 {AifEnClusterEvent, "cluster event"}, \
7937 {AifEnDiskSetEvent, "disk set event occured"}, \
7938 {AifDenMorphComplete, "morph operation completed"}, \
7939 {AifDenVolumeExtendComplete, "VolumeExtendComplete"}
7940
7941 struct aac_key_strings {
7942 int key;
7943 char *message;
7944 };
7945
7946 extern struct scsi_key_strings scsi_cmds[];
7947
7948 static struct aac_key_strings aac_fib_cmds[] = {
7949 AAC_FIB_CMD_KEY_STRINGS,
7950 { -1, NULL }
7951 };
7952
7953 static struct aac_key_strings aac_ctvm_subcmds[] = {
7954 AAC_CTVM_SUBCMD_KEY_STRINGS,
7955 { -1, NULL }
7956 };
7957
7958 static struct aac_key_strings aac_ct_subcmds[] = {
7959 AAC_CT_SUBCMD_KEY_STRINGS,
7960 { -1, NULL }
7961 };
7962
7963 static struct aac_key_strings aac_cl_subcmds[] = {
7964 AAC_CL_SUBCMD_KEY_STRINGS,
7965 { -1, NULL }
7966 };
7967
7968 static struct aac_key_strings aac_aif_subcmds[] = {
7969 AAC_AIF_SUBCMD_KEY_STRINGS,
7970 { -1, NULL }
7971 };
7972
7973 static struct aac_key_strings aac_ioctl_subcmds[] = {
7974 AAC_IOCTL_SUBCMD_KEY_STRINGS,
7975 { -1, NULL }
7976 };
7977
7978 static struct aac_key_strings aac_aifens[] = {
7979 AAC_AIFEN_KEY_STRINGS,
7980 { -1, NULL }
7981 };
7982
7983 /*
7984 * The following function comes from Adaptec:
7985 *
7986 * Get the firmware print buffer parameters from the firmware,
7987 * if the command was successful map in the address.
7988 */
7989 static int
7990 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7991 {
7992 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7993 0, 0, 0, 0, NULL) == AACOK) {
7994 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7995 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7996 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7997 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7998
7999 if (mondrv_buf_size) {
8000 uint32_t offset = mondrv_buf_paddrl - \
8001 softs->pci_mem_base_paddr;
8002
8003 /*
8004 * See if the address is already mapped in, and
8005 * if so set it up from the base address
8006 */
8007 if ((mondrv_buf_paddrh == 0) &&
8008 (offset + mondrv_buf_size < softs->map_size)) {
8009 mutex_enter(&aac_prt_mutex);
8010 softs->debug_buf_offset = offset;
8011 softs->debug_header_size = mondrv_hdr_size;
8012 softs->debug_buf_size = mondrv_buf_size;
8013 softs->debug_fw_flags = 0;
8014 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
8015 mutex_exit(&aac_prt_mutex);
8016
8017 return (AACOK);
8018 }
8019 }
8020 }
8021 return (AACERR);
8022 }
8023
8024 int
8025 aac_dbflag_on(struct aac_softstate *softs, int flag)
8026 {
8027 int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
8028
8029 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
8030 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
8031 }
8032
8033 static void
8034 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
8035 {
8036 if (noheader) {
8037 if (sl) {
8038 aac_fmt[0] = sl;
8039 cmn_err(lev, aac_fmt, aac_prt_buf);
8040 } else {
8041 cmn_err(lev, &aac_fmt[1], aac_prt_buf);
8042 }
8043 } else {
8044 if (sl) {
8045 aac_fmt_header[0] = sl;
8046 cmn_err(lev, aac_fmt_header,
8047 softs->vendor_name, softs->instance,
8048 aac_prt_buf);
8049 } else {
8050 cmn_err(lev, &aac_fmt_header[1],
8051 softs->vendor_name, softs->instance,
8052 aac_prt_buf);
8053 }
8054 }
8055 }
8056
8057 /*
8058 * The following function comes from Adaptec:
8059 *
8060 * Format and print out the data passed in to UART or console
8061 * as specified by debug flags.
8062 */
8063 void
8064 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
8065 {
8066 va_list args;
8067 char sl; /* system log character */
8068
8069 mutex_enter(&aac_prt_mutex);
8070 /* Set up parameters and call sprintf function to format the data */
8071 if (strchr("^!?", fmt[0]) == NULL) {
8072 sl = 0;
8073 } else {
8074 sl = fmt[0];
8075 fmt++;
8076 }
8077 va_start(args, fmt);
8078 (void) vsprintf(aac_prt_buf, fmt, args);
8079 va_end(args);
8080
8081 /* Make sure the softs structure has been passed in for this section */
8082 if (softs) {
8083 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
8084 /* If we are set up for a Firmware print */
8085 (softs->debug_buf_size)) {
8086 uint32_t count, i;
8087
8088 /* Make sure the string size is within boundaries */
8089 count = strlen(aac_prt_buf);
8090 if (count > softs->debug_buf_size)
8091 count = (uint16_t)softs->debug_buf_size;
8092
8093 /*
8094 * Wait for no more than AAC_PRINT_TIMEOUT for the
8095 * previous message length to clear (the handshake).
8096 */
8097 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
8098 if (!PCI_MEM_GET32(softs,
8099 softs->debug_buf_offset + \
8100 AAC_FW_DBG_STRLEN_OFFSET))
8101 break;
8102
8103 drv_usecwait(1000);
8104 }
8105
8106 /*
8107 * If the length is clear, copy over the message, the
8108 * flags, and the length. Make sure the length is the
8109 * last because that is the signal for the Firmware to
8110 * pick it up.
8111 */
8112 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
8113 AAC_FW_DBG_STRLEN_OFFSET)) {
8114 PCI_MEM_REP_PUT8(softs,
8115 softs->debug_buf_offset + \
8116 softs->debug_header_size,
8117 aac_prt_buf, count);
8118 PCI_MEM_PUT32(softs,
8119 softs->debug_buf_offset + \
8120 AAC_FW_DBG_FLAGS_OFFSET,
8121 softs->debug_fw_flags);
8122 PCI_MEM_PUT32(softs,
8123 softs->debug_buf_offset + \
8124 AAC_FW_DBG_STRLEN_OFFSET, count);
8125 } else {
8126 cmn_err(CE_WARN, "UART output fail");
8127 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
8128 }
8129 }
8130
8131 /*
8132 * If the Kernel Debug Print flag is set, send it off
8133 * to the Kernel Debugger
8134 */
8135 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8136 aac_cmn_err(softs, lev, sl,
8137 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
8138 } else {
8139 /* Driver not initialized yet, no firmware or header output */
8140 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8141 aac_cmn_err(softs, lev, sl, 1);
8142 }
8143 mutex_exit(&aac_prt_mutex);
8144 }
8145
8146 /*
8147 * Translate command number to description string
8148 */
8149 static char *
8150 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
8151 {
8152 int i;
8153
8154 for (i = 0; cmdlist[i].key != -1; i++) {
8155 if (cmd == cmdlist[i].key)
8156 return (cmdlist[i].message);
8157 }
8158 return (NULL);
8159 }
8160
8161 static void
8162 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
8163 {
8164 struct scsi_pkt *pkt = acp->pkt;
8165 struct scsi_address *ap = &pkt->pkt_address;
8166 int is_pd = 0;
8167 int ctl = ddi_get_instance(softs->devinfo_p);
8168 int tgt = ap->a_target;
8169 int lun = ap->a_lun;
8170 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
8171 uchar_t cmd = cdbp->scc_cmd;
8172 char *desc;
8173
8174 if (tgt >= AAC_MAX_LD) {
8175 is_pd = 1;
8176 ctl = ((struct aac_nondasd *)acp->dvp)->bus;
8177 tgt = ((struct aac_nondasd *)acp->dvp)->tid;
8178 lun = 0;
8179 }
8180
8181 if ((desc = aac_cmd_name(cmd,
8182 (struct aac_key_strings *)scsi_cmds)) == NULL) {
8183 aac_printf(softs, CE_NOTE,
8184 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
8185 cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
8186 return;
8187 }
8188
8189 switch (cmd) {
8190 case SCMD_READ:
8191 case SCMD_WRITE:
8192 aac_printf(softs, CE_NOTE,
8193 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8194 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
8195 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8196 ctl, tgt, lun, is_pd ? "(pd)" : "");
8197 break;
8198 case SCMD_READ_G1:
8199 case SCMD_WRITE_G1:
8200 aac_printf(softs, CE_NOTE,
8201 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8202 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
8203 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8204 ctl, tgt, lun, is_pd ? "(pd)" : "");
8205 break;
8206 case SCMD_READ_G4:
8207 case SCMD_WRITE_G4:
8208 aac_printf(softs, CE_NOTE,
8209 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
8210 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
8211 GETG4COUNT(cdbp),
8212 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8213 ctl, tgt, lun, is_pd ? "(pd)" : "");
8214 break;
8215 case SCMD_READ_G5:
8216 case SCMD_WRITE_G5:
8217 aac_printf(softs, CE_NOTE,
8218 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8219 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
8220 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8221 ctl, tgt, lun, is_pd ? "(pd)" : "");
8222 break;
8223 default:
8224 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
8225 desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
8226 }
8227 }
8228
8229 void
8230 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
8231 {
8232 struct aac_cmd *acp = slotp->acp;
8233 struct aac_fib *fibp = slotp->fibp;
8234 ddi_acc_handle_t acc = slotp->fib_acc_handle;
8235 uint16_t fib_size;
8236 uint32_t fib_cmd, sub_cmd;
8237 char *cmdstr, *subcmdstr;
8238 char *caller;
8239 int i;
8240
8241 if (acp) {
8242 if (!(softs->debug_fib_flags & acp->fib_flags))
8243 return;
8244 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
8245 caller = "SCMD";
8246 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
8247 caller = "IOCTL";
8248 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
8249 caller = "SRB";
8250 else
8251 return;
8252 } else {
8253 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
8254 return;
8255 caller = "SYNC";
8256 }
8257
8258 fib_cmd = ddi_get16(acc, &fibp->Header.Command);
8259 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
8260 sub_cmd = (uint32_t)-1;
8261 subcmdstr = NULL;
8262
8263 /* Print FIB header */
8264 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
8265 aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
8266 aac_printf(softs, CE_NOTE, " XferState %d",
8267 ddi_get32(acc, &fibp->Header.XferState));
8268 aac_printf(softs, CE_NOTE, " Command %d",
8269 ddi_get16(acc, &fibp->Header.Command));
8270 aac_printf(softs, CE_NOTE, " StructType %d",
8271 ddi_get8(acc, &fibp->Header.StructType));
8272 aac_printf(softs, CE_NOTE, " Flags 0x%x",
8273 ddi_get8(acc, &fibp->Header.Flags));
8274 aac_printf(softs, CE_NOTE, " Size %d",
8275 ddi_get16(acc, &fibp->Header.Size));
8276 aac_printf(softs, CE_NOTE, " SenderSize %d",
8277 ddi_get16(acc, &fibp->Header.SenderSize));
8278 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x",
8279 ddi_get32(acc, &fibp->Header.SenderFibAddress));
8280 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x",
8281 ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
8282 aac_printf(softs, CE_NOTE, " SenderData 0x%x",
8283 ddi_get32(acc, &fibp->Header.SenderData));
8284 }
8285
8286 /* Print FIB data */
8287 switch (fib_cmd) {
8288 case ContainerCommand:
8289 sub_cmd = ddi_get32(acc,
8290 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
8291 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
8292 if (subcmdstr == NULL)
8293 break;
8294
8295 switch (sub_cmd) {
8296 case VM_ContainerConfig: {
8297 struct aac_Container *pContainer =
8298 (struct aac_Container *)fibp->data;
8299
8300 fib_cmd = sub_cmd;
8301 cmdstr = subcmdstr;
8302 sub_cmd = (uint32_t)-1;
8303 subcmdstr = NULL;
8304
8305 sub_cmd = ddi_get32(acc,
8306 &pContainer->CTCommand.command);
8307 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
8308 if (subcmdstr == NULL)
8309 break;
8310 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
8311 subcmdstr,
8312 ddi_get32(acc, &pContainer->CTCommand.param[0]),
8313 ddi_get32(acc, &pContainer->CTCommand.param[1]),
8314 ddi_get32(acc, &pContainer->CTCommand.param[2]));
8315 return;
8316 }
8317
8318 case VM_Ioctl:
8319 fib_cmd = sub_cmd;
8320 cmdstr = subcmdstr;
8321 sub_cmd = (uint32_t)-1;
8322 subcmdstr = NULL;
8323
8324 sub_cmd = ddi_get32(acc,
8325 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
8326 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
8327 break;
8328
8329 case VM_CtBlockRead:
8330 case VM_CtBlockWrite: {
8331 struct aac_blockread *br =
8332 (struct aac_blockread *)fibp->data;
8333 struct aac_sg_table *sg = &br->SgMap;
8334 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8335
8336 aac_printf(softs, CE_NOTE,
8337 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8338 ddi_get32(acc, &br->ContainerId),
8339 ddi_get32(acc, &br->BlockNumber),
8340 ddi_get32(acc, &br->ByteCount));
8341 for (i = 0; i < sgcount; i++)
8342 aac_printf(softs, CE_NOTE,
8343 " %d: 0x%08x/%d", i,
8344 ddi_get32(acc, &sg->SgEntry[i].SgAddress),
8345 ddi_get32(acc, &sg->SgEntry[i]. \
8346 SgByteCount));
8347 return;
8348 }
8349 }
8350 break;
8351
8352 case ContainerCommand64: {
8353 struct aac_blockread64 *br =
8354 (struct aac_blockread64 *)fibp->data;
8355 struct aac_sg_table64 *sg = &br->SgMap64;
8356 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8357 uint64_t sgaddr;
8358
8359 sub_cmd = br->Command;
8360 subcmdstr = NULL;
8361 if (sub_cmd == VM_CtHostRead64)
8362 subcmdstr = "VM_CtHostRead64";
8363 else if (sub_cmd == VM_CtHostWrite64)
8364 subcmdstr = "VM_CtHostWrite64";
8365 else
8366 break;
8367
8368 aac_printf(softs, CE_NOTE,
8369 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8370 ddi_get16(acc, &br->ContainerId),
8371 ddi_get32(acc, &br->BlockNumber),
8372 ddi_get16(acc, &br->SectorCount));
8373 for (i = 0; i < sgcount; i++) {
8374 sgaddr = ddi_get64(acc,
8375 &sg->SgEntry64[i].SgAddress);
8376 aac_printf(softs, CE_NOTE,
8377 " %d: 0x%08x.%08x/%d", i,
8378 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8379 ddi_get32(acc, &sg->SgEntry64[i]. \
8380 SgByteCount));
8381 }
8382 return;
8383 }
8384
8385 case RawIo: {
8386 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
8387 struct aac_sg_tableraw *sg = &io->SgMapRaw;
8388 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8389 uint64_t sgaddr;
8390
8391 aac_printf(softs, CE_NOTE,
8392 "FIB> RawIo Container %d 0x%llx/%d 0x%x",
8393 ddi_get16(acc, &io->ContainerId),
8394 ddi_get64(acc, &io->BlockNumber),
8395 ddi_get32(acc, &io->ByteCount),
8396 ddi_get16(acc, &io->Flags));
8397 for (i = 0; i < sgcount; i++) {
8398 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
8399 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i,
8400 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8401 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
8402 }
8403 return;
8404 }
8405
8406 case ClusterCommand:
8407 sub_cmd = ddi_get32(acc,
8408 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8409 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
8410 break;
8411
8412 case AifRequest:
8413 sub_cmd = ddi_get32(acc,
8414 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8415 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
8416 break;
8417
8418 default:
8419 break;
8420 }
8421
8422 fib_size = ddi_get16(acc, &(fibp->Header.Size));
8423 if (subcmdstr)
8424 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8425 subcmdstr, fib_size);
8426 else if (cmdstr && sub_cmd == (uint32_t)-1)
8427 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8428 cmdstr, fib_size);
8429 else if (cmdstr)
8430 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
8431 cmdstr, sub_cmd, fib_size);
8432 else
8433 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
8434 fib_cmd, fib_size);
8435 }
8436
8437 static void
8438 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
8439 {
8440 int aif_command;
8441 uint32_t aif_seqnumber;
8442 int aif_en_type;
8443 char *str;
8444
8445 aif_command = LE_32(aif->command);
8446 aif_seqnumber = LE_32(aif->seqNumber);
8447 aif_en_type = LE_32(aif->data.EN.type);
8448
8449 switch (aif_command) {
8450 case AifCmdEventNotify:
8451 str = aac_cmd_name(aif_en_type, aac_aifens);
8452 if (str)
8453 aac_printf(softs, CE_NOTE, "AIF! %s", str);
8454 else
8455 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
8456 aif_en_type);
8457 break;
8458
8459 case AifCmdJobProgress:
8460 switch (LE_32(aif->data.PR[0].status)) {
8461 case AifJobStsSuccess:
8462 str = "success"; break;
8463 case AifJobStsFinished:
8464 str = "finished"; break;
8465 case AifJobStsAborted:
8466 str = "aborted"; break;
8467 case AifJobStsFailed:
8468 str = "failed"; break;
8469 case AifJobStsSuspended:
8470 str = "suspended"; break;
8471 case AifJobStsRunning:
8472 str = "running"; break;
8473 default:
8474 str = "unknown"; break;
8475 }
8476 aac_printf(softs, CE_NOTE,
8477 "AIF! JobProgress (%d) - %s (%d, %d)",
8478 aif_seqnumber, str,
8479 LE_32(aif->data.PR[0].currentTick),
8480 LE_32(aif->data.PR[0].finalTick));
8481 break;
8482
8483 case AifCmdAPIReport:
8484 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
8485 aif_seqnumber);
8486 break;
8487
8488 case AifCmdDriverNotify:
8489 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
8490 aif_seqnumber);
8491 break;
8492
8493 default:
8494 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
8495 aif_command, aif_seqnumber);
8496 break;
8497 }
8498 }
8499
8500 #endif /* DEBUG */