1 /*
2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
3 */
4
5 /*
6 * Copyright 2005-08 Adaptec, Inc.
7 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
8 * Copyright (c) 2000 Michael Smith
9 * Copyright (c) 2001 Scott Long
10 * Copyright (c) 2000 BSDi
11 * All rights reserved.
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * SUCH DAMAGE.
33 */
34 #include <sys/modctl.h>
35 #include <sys/conf.h>
36 #include <sys/cmn_err.h>
37 #include <sys/ddi.h>
38 #include <sys/devops.h>
39 #include <sys/pci.h>
40 #include <sys/types.h>
41 #include <sys/ddidmareq.h>
42 #include <sys/scsi/scsi.h>
43 #include <sys/ksynch.h>
44 #include <sys/sunddi.h>
45 #include <sys/byteorder.h>
46 #include "aac_regs.h"
47 #include "aac.h"
48
49 /*
50 * FMA header files
51 */
52 #include <sys/ddifm.h>
53 #include <sys/fm/protocol.h>
54 #include <sys/fm/util.h>
55 #include <sys/fm/io/ddi.h>
56
57 /*
58 * For minor nodes created by the SCSA framework, minor numbers are
59 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
60 * number less than 64.
61 *
62 * To support cfgadm, need to confirm the SCSA framework by creating
63 * devctl/scsi and driver specific minor nodes under SCSA format,
64 * and calling scsi_hba_xxx() functions aacordingly.
65 */
66
67 #define AAC_MINOR 32
68 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR)
69 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK)
70 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
71
72 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran)
73 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
74 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip))
75 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
76 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd)))
77 #define AAC_PD(t) ((t) - AAC_MAX_LD)
78 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \
79 &(softs)->containers[(t)].dev : \
80 ((t) < AAC_MAX_DEV(softs)) ? \
81 &(softs)->nondasds[AAC_PD(t)].dev : NULL)
82 #define AAC_DEVCFG_BEGIN(softs, tgt) \
83 aac_devcfg((softs), (tgt), 1)
84 #define AAC_DEVCFG_END(softs, tgt) \
85 aac_devcfg((softs), (tgt), 0)
86 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private)
87 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
88 if (!(cond)) { \
89 int count = (timeout) * 10; \
90 while (count) { \
91 drv_usecwait(100); \
92 if (cond) \
93 break; \
94 count--; \
95 } \
96 (timeout) = (count + 9) / 10; \
97 } \
98 }
99
100 #define AAC_SENSE_DATA_DESCR_LEN \
101 (sizeof (struct scsi_descr_sense_hdr) + \
102 sizeof (struct scsi_information_sense_descr))
103 #define AAC_ARQ64_LENGTH \
104 (sizeof (struct scsi_arq_status) + \
105 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
106
107 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
108 #define AAC_GETGXADDR(cmdlen, cdbp) \
109 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \
110 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
111 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
112
113 #define AAC_CDB_INQUIRY_CMDDT 0x02
114 #define AAC_CDB_INQUIRY_EVPD 0x01
115 #define AAC_VPD_PAGE_CODE 1
116 #define AAC_VPD_PAGE_LENGTH 3
117 #define AAC_VPD_PAGE_DATA 4
118 #define AAC_VPD_ID_CODESET 0
119 #define AAC_VPD_ID_TYPE 1
120 #define AAC_VPD_ID_LENGTH 3
121 #define AAC_VPD_ID_DATA 4
122
123 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08
124 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08
125 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0
126 /* 00b - peripheral device addressing method */
127 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00
128 /* 01b - flat space addressing method */
129 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40
130 /* 10b - logical unit addressing method */
131 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80
132
133 /* Return the size of FIB with data part type data_type */
134 #define AAC_FIB_SIZEOF(data_type) \
135 (sizeof (struct aac_fib_header) + sizeof (data_type))
136 /* Return the container size defined in mir */
137 #define AAC_MIR_SIZE(softs, acc, mir) \
138 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
139 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
140 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
141 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
142
143 /* The last entry of aac_cards[] is for unknown cards */
144 #define AAC_UNKNOWN_CARD \
145 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
146 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD)
147 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ)
148 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL)
149 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC))
150
151 #define PCI_MEM_GET32(softs, off) \
152 ddi_get32((softs)->pci_mem_handle, \
153 (void *)((softs)->pci_mem_base_vaddr + (off)))
154 #define PCI_MEM_PUT32(softs, off, val) \
155 ddi_put32((softs)->pci_mem_handle, \
156 (void *)((softs)->pci_mem_base_vaddr + (off)), \
157 (uint32_t)(val))
158 #define PCI_MEM_GET16(softs, off) \
159 ddi_get16((softs)->pci_mem_handle, \
160 (void *)((softs)->pci_mem_base_vaddr + (off)))
161 #define PCI_MEM_PUT16(softs, off, val) \
162 ddi_put16((softs)->pci_mem_handle, \
163 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
164 /* Write host data at valp to device mem[off] repeatedly count times */
165 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \
166 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
167 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
168 count, DDI_DEV_AUTOINCR)
169 /* Read device data at mem[off] to host addr valp repeatedly count times */
170 #define PCI_MEM_REP_GET8(softs, off, valp, count) \
171 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
172 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
173 count, DDI_DEV_AUTOINCR)
174 #define AAC_GET_FIELD8(acc, d, s, field) \
175 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
176 #define AAC_GET_FIELD32(acc, d, s, field) \
177 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
178 #define AAC_GET_FIELD64(acc, d, s, field) \
179 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
180 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \
181 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
182 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
183 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \
184 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
185 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
186
187 #define AAC_ENABLE_INTR(softs) { \
188 if (softs->flags & AAC_FLAGS_NEW_COMM) \
189 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
190 else \
191 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
192 softs->state |= AAC_STATE_INTR; \
193 }
194
195 #define AAC_DISABLE_INTR(softs) { \
196 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \
197 softs->state &= ~AAC_STATE_INTR; \
198 }
199 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask)
200 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR)
201 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val)
202 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE)
203 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val)
204 #define AAC_FWSTATUS_GET(softs) \
205 ((softs)->aac_if.aif_get_fwstatus(softs))
206 #define AAC_MAILBOX_GET(softs, mb) \
207 ((softs)->aac_if.aif_get_mailbox((softs), (mb)))
208 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
209 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
210 (arg0), (arg1), (arg2), (arg3)))
211
212 #define AAC_MGT_SLOT_NUM 2
213 #define AAC_THROTTLE_DRAIN -1
214
215 #define AAC_QUIESCE_TICK 1 /* 1 second */
216 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */
217 #define AAC_DEFAULT_TICK 10 /* 10 seconds */
218 #define AAC_SYNC_TICK (30*60) /* 30 minutes */
219
220 /* Poll time for aac_do_poll_io() */
221 #define AAC_POLL_TIME 60 /* 60 seconds */
222
223 /* IOP reset */
224 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */
225 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */
226 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */
227
228 /*
229 * Hardware access functions
230 */
231 static int aac_rx_get_fwstatus(struct aac_softstate *);
232 static int aac_rx_get_mailbox(struct aac_softstate *, int);
233 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
234 uint32_t, uint32_t, uint32_t);
235 static int aac_rkt_get_fwstatus(struct aac_softstate *);
236 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
237 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
238 uint32_t, uint32_t, uint32_t);
239
240 /*
241 * SCSA function prototypes
242 */
243 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
244 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
245 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
246 static int aac_quiesce(dev_info_t *);
247 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
248
249 /*
250 * Interrupt handler functions
251 */
252 static int aac_query_intrs(struct aac_softstate *, int);
253 static int aac_add_intrs(struct aac_softstate *);
254 static void aac_remove_intrs(struct aac_softstate *);
255 static int aac_enable_intrs(struct aac_softstate *);
256 static int aac_disable_intrs(struct aac_softstate *);
257 static uint_t aac_intr_old(caddr_t);
258 static uint_t aac_intr_new(caddr_t);
259 static uint_t aac_softintr(caddr_t);
260
261 /*
262 * Internal functions in attach
263 */
264 static int aac_check_card_type(struct aac_softstate *);
265 static int aac_check_firmware(struct aac_softstate *);
266 static int aac_common_attach(struct aac_softstate *);
267 static void aac_common_detach(struct aac_softstate *);
268 static int aac_probe_containers(struct aac_softstate *);
269 static int aac_alloc_comm_space(struct aac_softstate *);
270 static int aac_setup_comm_space(struct aac_softstate *);
271 static void aac_free_comm_space(struct aac_softstate *);
272 static int aac_hba_setup(struct aac_softstate *);
273
274 /*
275 * Sync FIB operation functions
276 */
277 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
278 uint32_t, uint32_t, uint32_t, uint32_t *);
279 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
280
281 /*
282 * Command queue operation functions
283 */
284 static void aac_cmd_initq(struct aac_cmd_queue *);
285 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
286 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
287 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
288
289 /*
290 * FIB queue operation functions
291 */
292 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
293 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
294
295 /*
296 * Slot operation functions
297 */
298 static int aac_create_slots(struct aac_softstate *);
299 static void aac_destroy_slots(struct aac_softstate *);
300 static void aac_alloc_fibs(struct aac_softstate *);
301 static void aac_destroy_fibs(struct aac_softstate *);
302 static struct aac_slot *aac_get_slot(struct aac_softstate *);
303 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
304 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
305 static void aac_free_fib(struct aac_slot *);
306
307 /*
308 * Internal functions
309 */
310 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *,
311 uint16_t);
312 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
313 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
314 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
315 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
316 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
317 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
318 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *);
319 static void aac_start_waiting_io(struct aac_softstate *);
320 static void aac_drain_comp_q(struct aac_softstate *);
321 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
322 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *);
323 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *);
324 static void aac_start_io(struct aac_softstate *, struct aac_cmd *);
325 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
326 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
327 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
328 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
329 static int aac_dma_sync_ac(struct aac_cmd *);
330 static int aac_shutdown(struct aac_softstate *);
331 static int aac_reset_adapter(struct aac_softstate *);
332 static int aac_do_quiesce(struct aac_softstate *softs);
333 static int aac_do_unquiesce(struct aac_softstate *softs);
334 static void aac_unhold_bus(struct aac_softstate *, int);
335 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
336 int, int);
337
338 /*
339 * Adapter Initiated FIB handling function
340 */
341 static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t,
342 struct aac_fib *, int);
343 static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *);
344
345 /*
346 * Event handling related functions
347 */
348 static void aac_timer(void *);
349 static void aac_event_thread(struct aac_softstate *);
350 static void aac_event_disp(struct aac_softstate *, int);
351
352 /*
353 * IOCTL interface related functions
354 */
355 static int aac_open(dev_t *, int, int, cred_t *);
356 static int aac_close(dev_t, int, int, cred_t *);
357 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
358 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
359
360 /*
361 * FMA Prototypes
362 */
363 static void aac_fm_init(struct aac_softstate *);
364 static void aac_fm_fini(struct aac_softstate *);
365 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
366 int aac_check_acc_handle(ddi_acc_handle_t);
367 int aac_check_dma_handle(ddi_dma_handle_t);
368 void aac_fm_ereport(struct aac_softstate *, char *);
369
370 /*
371 * Auto enumeration functions
372 */
373 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
374 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
375 void *, dev_info_t **);
376 static int aac_handle_dr(struct aac_softstate *, int, int, int);
377
378 extern pri_t minclsyspri;
379
380 #ifdef DEBUG
381 /*
382 * UART debug output support
383 */
384
385 #define AAC_PRINT_BUFFER_SIZE 512
386 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */
387
388 #define AAC_FW_DBG_STRLEN_OFFSET 0x00
389 #define AAC_FW_DBG_FLAGS_OFFSET 0x04
390 #define AAC_FW_DBG_BLED_OFFSET 0x08
391
392 static int aac_get_fw_debug_buffer(struct aac_softstate *);
393 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
394 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
395
396 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
397 static char aac_fmt[] = " %s";
398 static char aac_fmt_header[] = " %s.%d: %s";
399 static kmutex_t aac_prt_mutex;
400
401 /*
402 * Debug flags to be put into the softstate flags field
403 * when initialized
404 */
405 uint32_t aac_debug_flags =
406 /* AACDB_FLAGS_KERNEL_PRINT | */
407 /* AACDB_FLAGS_FW_PRINT | */
408 /* AACDB_FLAGS_MISC | */
409 /* AACDB_FLAGS_FUNC1 | */
410 /* AACDB_FLAGS_FUNC2 | */
411 /* AACDB_FLAGS_SCMD | */
412 /* AACDB_FLAGS_AIF | */
413 /* AACDB_FLAGS_FIB | */
414 /* AACDB_FLAGS_IOCTL | */
415 0;
416 uint32_t aac_debug_fib_flags =
417 /* AACDB_FLAGS_FIB_RW | */
418 /* AACDB_FLAGS_FIB_IOCTL | */
419 /* AACDB_FLAGS_FIB_SRB | */
420 /* AACDB_FLAGS_FIB_SYNC | */
421 /* AACDB_FLAGS_FIB_HEADER | */
422 /* AACDB_FLAGS_FIB_TIMEOUT | */
423 0;
424
425 #endif /* DEBUG */
426
427 static struct cb_ops aac_cb_ops = {
428 aac_open, /* open */
429 aac_close, /* close */
430 nodev, /* strategy */
431 nodev, /* print */
432 nodev, /* dump */
433 nodev, /* read */
434 nodev, /* write */
435 aac_ioctl, /* ioctl */
436 nodev, /* devmap */
437 nodev, /* mmap */
438 nodev, /* segmap */
439 nochpoll, /* poll */
440 ddi_prop_op, /* cb_prop_op */
441 NULL, /* streamtab */
442 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
443 CB_REV, /* cb_rev */
444 nodev, /* async I/O read entry point */
445 nodev /* async I/O write entry point */
446 };
447
448 static struct dev_ops aac_dev_ops = {
449 DEVO_REV,
450 0,
451 aac_getinfo,
452 nulldev,
453 nulldev,
454 aac_attach,
455 aac_detach,
456 aac_reset,
457 &aac_cb_ops,
458 NULL,
459 NULL,
460 aac_quiesce,
461 };
462
463 static struct modldrv aac_modldrv = {
464 &mod_driverops,
465 "AAC Driver " AAC_DRIVER_VERSION,
466 &aac_dev_ops,
467 };
468
469 static struct modlinkage aac_modlinkage = {
470 MODREV_1,
471 &aac_modldrv,
472 NULL
473 };
474
475 static struct aac_softstate *aac_softstatep;
476
477 /*
478 * Supported card list
479 * ordered in vendor id, subvendor id, subdevice id, and device id
480 */
481 static struct aac_card_type aac_cards[] = {
482 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
483 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
484 "Dell", "PERC 3/Di"},
485 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
486 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
487 "Dell", "PERC 3/Di"},
488 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
489 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
490 "Dell", "PERC 3/Si"},
491 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
492 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
493 "Dell", "PERC 3/Di"},
494 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
495 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
496 "Dell", "PERC 3/Si"},
497 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
498 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
499 "Dell", "PERC 3/Di"},
500 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
501 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
502 "Dell", "PERC 3/Di"},
503 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
504 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
505 "Dell", "PERC 3/Di"},
506 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
507 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
508 "Dell", "PERC 3/Di"},
509 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
510 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
511 "Dell", "PERC 3/Di"},
512 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
513 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
514 "Dell", "PERC 320/DC"},
515 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
516 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
517
518 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
519 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
520 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
521 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
522 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
523 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
524
525 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
526 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
527 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
528 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
529
530 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
531 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
532 "Adaptec", "2200S"},
533 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
534 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
535 "Adaptec", "2120S"},
536 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
537 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
538 "Adaptec", "2200S"},
539 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
540 0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
541 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
542 0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
543 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
544 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
545 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
546 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
547 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
548 0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
549 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
550 0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
551 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
552 0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
553 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
554 0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
555 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
556 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
557 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
558 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
559 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
560 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
561 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
562 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
563 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
564 0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
565 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
566 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
567 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
568 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
569 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
570 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
571 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
572 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
573 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
574 0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
575 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
576 0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
577 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
578 0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
579 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
580 0, AAC_TYPE_SATA, "ICP", "9024RO"},
581 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
582 0, AAC_TYPE_SATA, "ICP", "9014RO"},
583 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
584 0, AAC_TYPE_SATA, "ICP", "9047MA"},
585 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
586 0, AAC_TYPE_SATA, "ICP", "9087MA"},
587 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
588 0, AAC_TYPE_SAS, "ICP", "9085LI"},
589 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
590 0, AAC_TYPE_SAS, "ICP", "5085BR"},
591 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
592 0, AAC_TYPE_SATA, "ICP", "9067MA"},
593 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
594 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
595 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
596 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
597 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
598 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
599 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
600 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
601 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
602 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
603 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
604 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
605
606 {0, 0, 0, 0, AAC_HWIF_UNKNOWN,
607 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
608 };
609
610 /*
611 * Hardware access functions for i960 based cards
612 */
613 static struct aac_interface aac_rx_interface = {
614 aac_rx_get_fwstatus,
615 aac_rx_get_mailbox,
616 aac_rx_set_mailbox
617 };
618
619 /*
620 * Hardware access functions for Rocket based cards
621 */
622 static struct aac_interface aac_rkt_interface = {
623 aac_rkt_get_fwstatus,
624 aac_rkt_get_mailbox,
625 aac_rkt_set_mailbox
626 };
627
628 ddi_device_acc_attr_t aac_acc_attr = {
629 DDI_DEVICE_ATTR_V1,
630 DDI_STRUCTURE_LE_ACC,
631 DDI_STRICTORDER_ACC,
632 DDI_DEFAULT_ACC
633 };
634
635 static struct {
636 int size;
637 int notify;
638 } aac_qinfo[] = {
639 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
640 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
641 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
642 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
643 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
644 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
645 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
646 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
647 };
648
649 /*
650 * Default aac dma attributes
651 */
652 static ddi_dma_attr_t aac_dma_attr = {
653 DMA_ATTR_V0,
654 0, /* lowest usable address */
655 0xffffffffull, /* high DMA address range */
656 0xffffffffull, /* DMA counter register */
657 AAC_DMA_ALIGN, /* DMA address alignment */
658 1, /* DMA burstsizes */
659 1, /* min effective DMA size */
660 0xffffffffull, /* max DMA xfer size */
661 0xffffffffull, /* segment boundary */
662 1, /* s/g list length */
663 AAC_BLK_SIZE, /* granularity of device */
664 0 /* DMA transfer flags */
665 };
666
667 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */
668 static uint32_t aac_timebase = 0; /* internal timer in seconds */
669
670 int
671 _init(void)
672 {
673 int rval = 0;
674
675 #ifdef DEBUG
676 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
677 #endif
678 DBCALLED(NULL, 1);
679
680 if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
681 sizeof (struct aac_softstate), 0)) != 0)
682 goto error;
683
684 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
685 ddi_soft_state_fini((void *)&aac_softstatep);
686 goto error;
687 }
688
689 if ((rval = mod_install(&aac_modlinkage)) != 0) {
690 ddi_soft_state_fini((void *)&aac_softstatep);
691 scsi_hba_fini(&aac_modlinkage);
692 goto error;
693 }
694 return (rval);
695
696 error:
697 AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
698 #ifdef DEBUG
699 mutex_destroy(&aac_prt_mutex);
700 #endif
701 return (rval);
702 }
703
704 int
705 _info(struct modinfo *modinfop)
706 {
707 DBCALLED(NULL, 1);
708 return (mod_info(&aac_modlinkage, modinfop));
709 }
710
711 /*
712 * An HBA driver cannot be unload unless you reboot,
713 * so this function will be of no use.
714 */
715 int
716 _fini(void)
717 {
718 int rval;
719
720 DBCALLED(NULL, 1);
721
722 if ((rval = mod_remove(&aac_modlinkage)) != 0)
723 goto error;
724
725 scsi_hba_fini(&aac_modlinkage);
726 ddi_soft_state_fini((void *)&aac_softstatep);
727 #ifdef DEBUG
728 mutex_destroy(&aac_prt_mutex);
729 #endif
730 return (0);
731
732 error:
733 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
734 return (rval);
735 }
736
737 static int
738 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
739 {
740 int instance, i;
741 struct aac_softstate *softs = NULL;
742 int attach_state = 0;
743 char *data;
744
745 DBCALLED(NULL, 1);
746
747 switch (cmd) {
748 case DDI_ATTACH:
749 break;
750 case DDI_RESUME:
751 return (DDI_FAILURE);
752 default:
753 return (DDI_FAILURE);
754 }
755
756 instance = ddi_get_instance(dip);
757
758 /* Get soft state */
759 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
760 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
761 goto error;
762 }
763 softs = ddi_get_soft_state(aac_softstatep, instance);
764 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
765
766 softs->instance = instance;
767 softs->devinfo_p = dip;
768 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
769 softs->addr_dma_attr.dma_attr_granular = 1;
770 softs->acc_attr = aac_acc_attr;
771 softs->reg_attr = aac_acc_attr;
772 softs->card = AAC_UNKNOWN_CARD;
773 #ifdef DEBUG
774 softs->debug_flags = aac_debug_flags;
775 softs->debug_fib_flags = aac_debug_fib_flags;
776 #endif
777
778 /* Initialize FMA */
779 aac_fm_init(softs);
780
781 /* Check the card type */
782 if (aac_check_card_type(softs) == AACERR) {
783 AACDB_PRINT(softs, CE_WARN, "Card not supported");
784 goto error;
785 }
786 /* We have found the right card and everything is OK */
787 attach_state |= AAC_ATTACH_CARD_DETECTED;
788
789 /* Map PCI mem space */
790 if (ddi_regs_map_setup(dip, 1,
791 (caddr_t *)&softs->pci_mem_base_vaddr, 0,
792 softs->map_size_min, &softs->reg_attr,
793 &softs->pci_mem_handle) != DDI_SUCCESS)
794 goto error;
795
796 softs->map_size = softs->map_size_min;
797 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
798
799 AAC_DISABLE_INTR(softs);
800
801 /* Init mutexes and condvars */
802 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
803 DDI_INTR_PRI(softs->intr_pri));
804 mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER,
805 DDI_INTR_PRI(softs->intr_pri));
806 mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER,
807 DDI_INTR_PRI(softs->intr_pri));
808 mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER,
809 DDI_INTR_PRI(softs->intr_pri));
810 mutex_init(&softs->aifq_mutex, NULL,
811 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
812 cv_init(&softs->event, NULL, CV_DRIVER, NULL);
813 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL);
814 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
815 cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL);
816 cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL);
817 cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL);
818 attach_state |= AAC_ATTACH_KMUTEX_INITED;
819
820 /* Init the cmd queues */
821 for (i = 0; i < AAC_CMDQ_NUM; i++)
822 aac_cmd_initq(&softs->q_wait[i]);
823 aac_cmd_initq(&softs->q_busy);
824 aac_cmd_initq(&softs->q_comp);
825
826 /* Check for legacy device naming support */
827 softs->legacy = 1; /* default to use legacy name */
828 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
829 "legacy-name-enable", &data) == DDI_SUCCESS)) {
830 if (strcmp(data, "no") == 0) {
831 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
832 softs->legacy = 0;
833 }
834 ddi_prop_free(data);
835 }
836
837 /*
838 * Everything has been set up till now,
839 * we will do some common attach.
840 */
841 mutex_enter(&softs->io_lock);
842 if (aac_common_attach(softs) == AACERR) {
843 mutex_exit(&softs->io_lock);
844 goto error;
845 }
846 mutex_exit(&softs->io_lock);
847 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
848
849 /* Check for buf breakup support */
850 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
851 "breakup-enable", &data) == DDI_SUCCESS)) {
852 if (strcmp(data, "yes") == 0) {
853 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
854 softs->flags |= AAC_FLAGS_BRKUP;
855 }
856 ddi_prop_free(data);
857 }
858 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
859 if (softs->flags & AAC_FLAGS_BRKUP) {
860 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
861 DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
862 }
863
864 if (aac_hba_setup(softs) != AACOK)
865 goto error;
866 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
867
868 /* Create devctl/scsi nodes for cfgadm */
869 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
870 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
871 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
872 goto error;
873 }
874 attach_state |= AAC_ATTACH_CREATE_DEVCTL;
875
876 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
877 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
878 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
879 goto error;
880 }
881 attach_state |= AAC_ATTACH_CREATE_SCSI;
882
883 /* Create aac node for app. to issue ioctls */
884 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
885 DDI_PSEUDO, 0) != DDI_SUCCESS) {
886 AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
887 goto error;
888 }
889
890 /* Common attach is OK, so we are attached! */
891 softs->state |= AAC_STATE_RUN;
892
893 /* Create event thread */
894 softs->fibctx_p = &softs->aifctx;
895 if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread,
896 softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) {
897 AACDB_PRINT(softs, CE_WARN, "aif thread create failed");
898 softs->state &= ~AAC_STATE_RUN;
899 goto error;
900 }
901
902 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
903
904 /* Create a thread for command timeout */
905 softs->timeout_id = timeout(aac_timer, (void *)softs,
906 (aac_tick * drv_usectohz(1000000)));
907
908 /* Common attach is OK, so we are attached! */
909 ddi_report_dev(dip);
910 AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
911 return (DDI_SUCCESS);
912
913 error:
914 if (attach_state & AAC_ATTACH_CREATE_SCSI)
915 ddi_remove_minor_node(dip, "scsi");
916 if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
917 ddi_remove_minor_node(dip, "devctl");
918 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
919 aac_common_detach(softs);
920 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
921 (void) scsi_hba_detach(dip);
922 scsi_hba_tran_free(AAC_DIP2TRAN(dip));
923 }
924 if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
925 mutex_destroy(&softs->io_lock);
926 mutex_destroy(&softs->q_comp_mutex);
927 mutex_destroy(&softs->time_mutex);
928 mutex_destroy(&softs->ev_lock);
929 mutex_destroy(&softs->aifq_mutex);
930 cv_destroy(&softs->event);
931 cv_destroy(&softs->sync_fib_cv);
932 cv_destroy(&softs->drain_cv);
933 cv_destroy(&softs->event_wait_cv);
934 cv_destroy(&softs->event_disp_cv);
935 cv_destroy(&softs->aifq_cv);
936 }
937 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
938 ddi_regs_map_free(&softs->pci_mem_handle);
939 aac_fm_fini(softs);
940 if (attach_state & AAC_ATTACH_CARD_DETECTED)
941 softs->card = AACERR;
942 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
943 ddi_soft_state_free(aac_softstatep, instance);
944 return (DDI_FAILURE);
945 }
946
947 static int
948 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
949 {
950 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
951 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
952
953 DBCALLED(softs, 1);
954
955 switch (cmd) {
956 case DDI_DETACH:
957 break;
958 case DDI_SUSPEND:
959 return (DDI_FAILURE);
960 default:
961 return (DDI_FAILURE);
962 }
963
964 mutex_enter(&softs->io_lock);
965 AAC_DISABLE_INTR(softs);
966 softs->state = AAC_STATE_STOPPED;
967
968 ddi_remove_minor_node(dip, "aac");
969 ddi_remove_minor_node(dip, "scsi");
970 ddi_remove_minor_node(dip, "devctl");
971 mutex_exit(&softs->io_lock);
972
973 aac_common_detach(softs);
974
975 mutex_enter(&softs->io_lock);
976 (void) scsi_hba_detach(dip);
977 scsi_hba_tran_free(tran);
978 mutex_exit(&softs->io_lock);
979
980 /* Stop timer */
981 mutex_enter(&softs->time_mutex);
982 if (softs->timeout_id) {
983 timeout_id_t tid = softs->timeout_id;
984 softs->timeout_id = 0;
985
986 mutex_exit(&softs->time_mutex);
987 (void) untimeout(tid);
988 mutex_enter(&softs->time_mutex);
989 }
990 mutex_exit(&softs->time_mutex);
991
992 /* Destroy event thread */
993 mutex_enter(&softs->ev_lock);
994 cv_signal(&softs->event_disp_cv);
995 cv_wait(&softs->event_wait_cv, &softs->ev_lock);
996 mutex_exit(&softs->ev_lock);
997
998 cv_destroy(&softs->aifq_cv);
999 cv_destroy(&softs->event_disp_cv);
1000 cv_destroy(&softs->event_wait_cv);
1001 cv_destroy(&softs->drain_cv);
1002 cv_destroy(&softs->sync_fib_cv);
1003 cv_destroy(&softs->event);
1004 mutex_destroy(&softs->aifq_mutex);
1005 mutex_destroy(&softs->ev_lock);
1006 mutex_destroy(&softs->time_mutex);
1007 mutex_destroy(&softs->q_comp_mutex);
1008 mutex_destroy(&softs->io_lock);
1009
1010 ddi_regs_map_free(&softs->pci_mem_handle);
1011 aac_fm_fini(softs);
1012 softs->hwif = AAC_HWIF_UNKNOWN;
1013 softs->card = AAC_UNKNOWN_CARD;
1014 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1015
1016 return (DDI_SUCCESS);
1017 }
1018
1019 /*ARGSUSED*/
1020 static int
1021 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1022 {
1023 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1024
1025 DBCALLED(softs, 1);
1026
1027 mutex_enter(&softs->io_lock);
1028 AAC_DISABLE_INTR(softs);
1029 (void) aac_shutdown(softs);
1030 mutex_exit(&softs->io_lock);
1031
1032 return (DDI_SUCCESS);
1033 }
1034
1035 /*
1036 * quiesce(9E) entry point.
1037 *
1038 * This function is called when the system is single-threaded at high
1039 * PIL with preemption disabled. Therefore, this function must not be
1040 * blocked.
1041 *
1042 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1043 * DDI_FAILURE indicates an error condition and should almost never happen.
1044 */
1045 static int
1046 aac_quiesce(dev_info_t *dip)
1047 {
1048 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1049
1050 if (softs == NULL)
1051 return (DDI_FAILURE);
1052
1053 AAC_DISABLE_INTR(softs);
1054
1055 return (DDI_SUCCESS);
1056 }
1057
1058 /* ARGSUSED */
1059 static int
1060 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg,
1061 void **result)
1062 {
1063 int error = DDI_SUCCESS;
1064
1065 switch (infocmd) {
1066 case DDI_INFO_DEVT2INSTANCE:
1067 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg)));
1068 break;
1069 default:
1070 error = DDI_FAILURE;
1071 }
1072 return (error);
1073 }
1074
1075 /*
1076 * Bring the controller down to a dormant state and detach all child devices.
1077 * This function is called before detach or system shutdown.
1078 * Note: we can assume that the q_wait on the controller is empty, as we
1079 * won't allow shutdown if any device is open.
1080 */
1081 static int
1082 aac_shutdown(struct aac_softstate *softs)
1083 {
1084 ddi_acc_handle_t acc;
1085 struct aac_close_command *cc;
1086 int rval;
1087
1088 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
1089 acc = softs->sync_ac.slotp->fib_acc_handle;
1090
1091 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0];
1092
1093 ddi_put32(acc, &cc->Command, VM_CloseAll);
1094 ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1095
1096 /* Flush all caches, set FW to write through mode */
1097 rval = aac_sync_fib(softs, ContainerCommand,
1098 AAC_FIB_SIZEOF(struct aac_close_command));
1099 aac_sync_fib_slot_release(softs, &softs->sync_ac);
1100
1101 AACDB_PRINT(softs, CE_NOTE,
1102 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1103 return (rval);
1104 }
1105
1106 static uint_t
1107 aac_softintr(caddr_t arg)
1108 {
1109 struct aac_softstate *softs = (void *)arg;
1110
1111 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1112 aac_drain_comp_q(softs);
1113 }
1114 return (DDI_INTR_CLAIMED);
1115 }
1116
1117 /*
1118 * Setup auto sense data for pkt
1119 */
1120 static void
1121 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1122 uchar_t add_code, uchar_t qual_code, uint64_t info)
1123 {
1124 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1125
1126 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1127 pkt->pkt_state |= STATE_ARQ_DONE;
1128
1129 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1130 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1131 arqstat->sts_rqpkt_resid = 0;
1132 arqstat->sts_rqpkt_state =
1133 STATE_GOT_BUS |
1134 STATE_GOT_TARGET |
1135 STATE_SENT_CMD |
1136 STATE_XFERRED_DATA;
1137 arqstat->sts_rqpkt_statistics = 0;
1138
1139 if (info <= 0xfffffffful) {
1140 arqstat->sts_sensedata.es_valid = 1;
1141 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1142 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1143 arqstat->sts_sensedata.es_key = key;
1144 arqstat->sts_sensedata.es_add_code = add_code;
1145 arqstat->sts_sensedata.es_qual_code = qual_code;
1146
1147 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1148 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1149 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF;
1150 arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1151 } else { /* 64-bit LBA */
1152 struct scsi_descr_sense_hdr *dsp;
1153 struct scsi_information_sense_descr *isd;
1154
1155 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1156 dsp->ds_class = CLASS_EXTENDED_SENSE;
1157 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1158 dsp->ds_key = key;
1159 dsp->ds_add_code = add_code;
1160 dsp->ds_qual_code = qual_code;
1161 dsp->ds_addl_sense_length =
1162 sizeof (struct scsi_information_sense_descr);
1163
1164 isd = (struct scsi_information_sense_descr *)(dsp+1);
1165 isd->isd_descr_type = DESCR_INFORMATION;
1166 isd->isd_valid = 1;
1167 isd->isd_information[0] = (info >> 56) & 0xFF;
1168 isd->isd_information[1] = (info >> 48) & 0xFF;
1169 isd->isd_information[2] = (info >> 40) & 0xFF;
1170 isd->isd_information[3] = (info >> 32) & 0xFF;
1171 isd->isd_information[4] = (info >> 24) & 0xFF;
1172 isd->isd_information[5] = (info >> 16) & 0xFF;
1173 isd->isd_information[6] = (info >> 8) & 0xFF;
1174 isd->isd_information[7] = (info) & 0xFF;
1175 }
1176 }
1177
1178 /*
1179 * Setup auto sense data for HARDWARE ERROR
1180 */
1181 static void
1182 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1183 {
1184 union scsi_cdb *cdbp;
1185 uint64_t err_blkno;
1186
1187 cdbp = (void *)acp->pkt->pkt_cdbp;
1188 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1189 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1190 }
1191
1192 /*
1193 * Send a command to the adapter in New Comm. interface
1194 */
1195 static int
1196 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1197 {
1198 uint32_t index, device;
1199
1200 index = PCI_MEM_GET32(softs, AAC_IQUE);
1201 if (index == 0xffffffffUL) {
1202 index = PCI_MEM_GET32(softs, AAC_IQUE);
1203 if (index == 0xffffffffUL)
1204 return (AACERR);
1205 }
1206
1207 device = index;
1208 PCI_MEM_PUT32(softs, device,
1209 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1210 device += 4;
1211 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1212 device += 4;
1213 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1214 PCI_MEM_PUT32(softs, AAC_IQUE, index);
1215 return (AACOK);
1216 }
1217
1218 static void
1219 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1220 {
1221 struct aac_device *dvp = acp->dvp;
1222 int q = AAC_CMDQ(acp);
1223
1224 if (acp->slotp) { /* outstanding cmd */
1225 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) {
1226 aac_release_slot(softs, acp->slotp);
1227 acp->slotp = NULL;
1228 }
1229 if (dvp) {
1230 dvp->ncmds[q]--;
1231 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1232 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1233 aac_set_throttle(softs, dvp, q,
1234 softs->total_slots);
1235 /*
1236 * Setup auto sense data for UNIT ATTENTION
1237 * Each lun should generate a unit attention
1238 * condition when reset.
1239 * Phys. drives are treated as logical ones
1240 * during error recovery.
1241 */
1242 if (dvp->type == AAC_DEV_LD) {
1243 struct aac_container *ctp =
1244 (struct aac_container *)dvp;
1245 if (ctp->reset == 0)
1246 goto noreset;
1247
1248 AACDB_PRINT(softs, CE_NOTE,
1249 "Unit attention: reset");
1250 ctp->reset = 0;
1251 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION,
1252 0x29, 0x02, 0);
1253 }
1254 }
1255 noreset:
1256 softs->bus_ncmds[q]--;
1257 aac_cmd_delete(&softs->q_busy, acp);
1258 } else { /* cmd in waiting queue */
1259 aac_cmd_delete(&softs->q_wait[q], acp);
1260 }
1261
1262 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1263 mutex_enter(&softs->q_comp_mutex);
1264 aac_cmd_enqueue(&softs->q_comp, acp);
1265 mutex_exit(&softs->q_comp_mutex);
1266 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1267 cv_broadcast(&softs->event);
1268 }
1269 }
1270
1271 static void
1272 aac_handle_io(struct aac_softstate *softs, int index)
1273 {
1274 struct aac_slot *slotp;
1275 struct aac_cmd *acp;
1276 uint32_t fast;
1277
1278 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1279 index >>= 2;
1280
1281 /* Make sure firmware reported index is valid */
1282 ASSERT(index >= 0 && index < softs->total_slots);
1283 slotp = &softs->io_slot[index];
1284 ASSERT(slotp->index == index);
1285 acp = slotp->acp;
1286
1287 if (acp == NULL || acp->slotp != slotp) {
1288 cmn_err(CE_WARN,
1289 "Firmware error: invalid slot index received from FW");
1290 return;
1291 }
1292
1293 acp->flags |= AAC_CMD_CMPLT;
1294 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1295
1296 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1297 /*
1298 * For fast response IO, the firmware do not return any FIB
1299 * data, so we need to fill in the FIB status and state so that
1300 * FIB users can handle it correctly.
1301 */
1302 if (fast) {
1303 uint32_t state;
1304
1305 state = ddi_get32(slotp->fib_acc_handle,
1306 &slotp->fibp->Header.XferState);
1307 /*
1308 * Update state for CPU not for device, no DMA sync
1309 * needed
1310 */
1311 ddi_put32(slotp->fib_acc_handle,
1312 &slotp->fibp->Header.XferState,
1313 state | AAC_FIBSTATE_DONEADAP);
1314 ddi_put32(slotp->fib_acc_handle,
1315 (void *)&slotp->fibp->data[0], ST_OK);
1316 }
1317
1318 /* Handle completed ac */
1319 acp->ac_comp(softs, acp);
1320 } else {
1321 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1322 acp->flags |= AAC_CMD_ERR;
1323 if (acp->pkt) {
1324 acp->pkt->pkt_reason = CMD_TRAN_ERR;
1325 acp->pkt->pkt_statistics = 0;
1326 }
1327 }
1328 aac_end_io(softs, acp);
1329 }
1330
1331 /*
1332 * Interrupt handler for New Comm. interface
1333 * New Comm. interface use a different mechanism for interrupt. No explict
1334 * message queues, and driver need only accesses the mapped PCI mem space to
1335 * find the completed FIB or AIF.
1336 */
1337 static int
1338 aac_process_intr_new(struct aac_softstate *softs)
1339 {
1340 uint32_t index;
1341
1342 index = AAC_OUTB_GET(softs);
1343 if (index == 0xfffffffful)
1344 index = AAC_OUTB_GET(softs);
1345 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1346 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1347 return (0);
1348 }
1349 if (index != 0xfffffffful) {
1350 do {
1351 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1352 aac_handle_io(softs, index);
1353 } else if (index != 0xfffffffeul) {
1354 struct aac_fib *fibp; /* FIB in AIF queue */
1355 uint16_t fib_size;
1356
1357 /*
1358 * 0xfffffffe means that the controller wants
1359 * more work, ignore it for now. Otherwise,
1360 * AIF received.
1361 */
1362 index &= ~2;
1363
1364 fibp = (struct aac_fib *)(softs-> \
1365 pci_mem_base_vaddr + index);
1366 fib_size = PCI_MEM_GET16(softs, index + \
1367 offsetof(struct aac_fib, Header.Size));
1368
1369 aac_save_aif(softs, softs->pci_mem_handle,
1370 fibp, fib_size);
1371
1372 /*
1373 * AIF memory is owned by the adapter, so let it
1374 * know that we are done with it.
1375 */
1376 AAC_OUTB_SET(softs, index);
1377 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1378 }
1379
1380 index = AAC_OUTB_GET(softs);
1381 } while (index != 0xfffffffful);
1382
1383 /*
1384 * Process waiting cmds before start new ones to
1385 * ensure first IOs are serviced first.
1386 */
1387 aac_start_waiting_io(softs);
1388 return (AAC_DB_COMMAND_READY);
1389 } else {
1390 return (0);
1391 }
1392 }
1393
1394 static uint_t
1395 aac_intr_new(caddr_t arg)
1396 {
1397 struct aac_softstate *softs = (void *)arg;
1398 uint_t rval;
1399
1400 mutex_enter(&softs->io_lock);
1401 if (aac_process_intr_new(softs))
1402 rval = DDI_INTR_CLAIMED;
1403 else
1404 rval = DDI_INTR_UNCLAIMED;
1405 mutex_exit(&softs->io_lock);
1406
1407 aac_drain_comp_q(softs);
1408 return (rval);
1409 }
1410
1411 /*
1412 * Interrupt handler for old interface
1413 * Explicit message queues are used to send FIB to and get completed FIB from
1414 * the adapter. Driver and adapter maitain the queues in the producer/consumer
1415 * manner. The driver has to query the queues to find the completed FIB.
1416 */
1417 static int
1418 aac_process_intr_old(struct aac_softstate *softs)
1419 {
1420 uint16_t status;
1421
1422 status = AAC_STATUS_GET(softs);
1423 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1424 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1425 return (DDI_INTR_UNCLAIMED);
1426 }
1427 if (status & AAC_DB_RESPONSE_READY) {
1428 int slot_idx;
1429
1430 /* ACK the intr */
1431 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1432 (void) AAC_STATUS_GET(softs);
1433 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1434 &slot_idx) == AACOK)
1435 aac_handle_io(softs, slot_idx);
1436
1437 /*
1438 * Process waiting cmds before start new ones to
1439 * ensure first IOs are serviced first.
1440 */
1441 aac_start_waiting_io(softs);
1442 return (AAC_DB_RESPONSE_READY);
1443 } else if (status & AAC_DB_COMMAND_READY) {
1444 int aif_idx;
1445
1446 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1447 (void) AAC_STATUS_GET(softs);
1448 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1449 AACOK) {
1450 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1451 struct aac_fib *fibp; /* FIB in communication space */
1452 uint16_t fib_size;
1453 uint32_t fib_xfer_state;
1454 uint32_t addr, size;
1455
1456 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1457
1458 #define AAC_SYNC_AIF(softs, aif_idx, type) \
1459 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1460 offsetof(struct aac_comm_space, \
1461 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1462 (type)); }
1463
1464 /* Copy AIF from adapter to the empty AIF slot */
1465 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1466 fibp = &softs->comm_space->adapter_fibs[aif_idx];
1467 fib_size = ddi_get16(acc, &fibp->Header.Size);
1468
1469 aac_save_aif(softs, acc, fibp, fib_size);
1470
1471 /* Complete AIF back to adapter with good status */
1472 fib_xfer_state = LE_32(fibp->Header.XferState);
1473 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1474 ddi_put32(acc, &fibp->Header.XferState,
1475 fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1476 ddi_put32(acc, (void *)&fibp->data[0], ST_OK);
1477 if (fib_size > AAC_FIB_SIZE)
1478 ddi_put16(acc, &fibp->Header.Size,
1479 AAC_FIB_SIZE);
1480 AAC_SYNC_AIF(softs, aif_idx,
1481 DDI_DMA_SYNC_FORDEV);
1482 }
1483
1484 /* Put the AIF response on the response queue */
1485 addr = ddi_get32(acc,
1486 &softs->comm_space->adapter_fibs[aif_idx]. \
1487 Header.SenderFibAddress);
1488 size = (uint32_t)ddi_get16(acc,
1489 &softs->comm_space->adapter_fibs[aif_idx]. \
1490 Header.Size);
1491 ddi_put32(acc,
1492 &softs->comm_space->adapter_fibs[aif_idx]. \
1493 Header.ReceiverFibAddress, addr);
1494 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1495 addr, size) == AACERR)
1496 cmn_err(CE_NOTE, "!AIF ack failed");
1497 }
1498 return (AAC_DB_COMMAND_READY);
1499 } else if (status & AAC_DB_PRINTF_READY) {
1500 /* ACK the intr */
1501 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1502 (void) AAC_STATUS_GET(softs);
1503 (void) ddi_dma_sync(softs->comm_space_dma_handle,
1504 offsetof(struct aac_comm_space, adapter_print_buf),
1505 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1506 if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1507 DDI_SUCCESS)
1508 cmn_err(CE_NOTE, "MSG From Adapter: %s",
1509 softs->comm_space->adapter_print_buf);
1510 else
1511 ddi_fm_service_impact(softs->devinfo_p,
1512 DDI_SERVICE_UNAFFECTED);
1513 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1514 return (AAC_DB_PRINTF_READY);
1515 } else if (status & AAC_DB_COMMAND_NOT_FULL) {
1516 /*
1517 * Without these two condition statements, the OS could hang
1518 * after a while, especially if there are a lot of AIF's to
1519 * handle, for instance if a drive is pulled from an array
1520 * under heavy load.
1521 */
1522 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1523 return (AAC_DB_COMMAND_NOT_FULL);
1524 } else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1525 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1526 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1527 return (AAC_DB_RESPONSE_NOT_FULL);
1528 } else {
1529 return (0);
1530 }
1531 }
1532
1533 static uint_t
1534 aac_intr_old(caddr_t arg)
1535 {
1536 struct aac_softstate *softs = (void *)arg;
1537 int rval;
1538
1539 mutex_enter(&softs->io_lock);
1540 if (aac_process_intr_old(softs))
1541 rval = DDI_INTR_CLAIMED;
1542 else
1543 rval = DDI_INTR_UNCLAIMED;
1544 mutex_exit(&softs->io_lock);
1545
1546 aac_drain_comp_q(softs);
1547 return (rval);
1548 }
1549
1550 /*
1551 * Query FIXED or MSI interrupts
1552 */
1553 static int
1554 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1555 {
1556 dev_info_t *dip = softs->devinfo_p;
1557 int avail, actual, count;
1558 int i, flag, ret;
1559
1560 AACDB_PRINT(softs, CE_NOTE,
1561 "aac_query_intrs:interrupt type 0x%x", intr_type);
1562
1563 /* Get number of interrupts */
1564 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1565 if ((ret != DDI_SUCCESS) || (count == 0)) {
1566 AACDB_PRINT(softs, CE_WARN,
1567 "ddi_intr_get_nintrs() failed, ret %d count %d",
1568 ret, count);
1569 return (DDI_FAILURE);
1570 }
1571
1572 /* Get number of available interrupts */
1573 ret = ddi_intr_get_navail(dip, intr_type, &avail);
1574 if ((ret != DDI_SUCCESS) || (avail == 0)) {
1575 AACDB_PRINT(softs, CE_WARN,
1576 "ddi_intr_get_navail() failed, ret %d avail %d",
1577 ret, avail);
1578 return (DDI_FAILURE);
1579 }
1580
1581 AACDB_PRINT(softs, CE_NOTE,
1582 "ddi_intr_get_nvail returned %d, navail() returned %d",
1583 count, avail);
1584
1585 /* Allocate an array of interrupt handles */
1586 softs->intr_size = count * sizeof (ddi_intr_handle_t);
1587 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP);
1588
1589 if (intr_type == DDI_INTR_TYPE_MSI) {
1590 count = 1; /* only one vector needed by now */
1591 flag = DDI_INTR_ALLOC_STRICT;
1592 } else { /* must be DDI_INTR_TYPE_FIXED */
1593 flag = DDI_INTR_ALLOC_NORMAL;
1594 }
1595
1596 /* Call ddi_intr_alloc() */
1597 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1598 count, &actual, flag);
1599
1600 if ((ret != DDI_SUCCESS) || (actual == 0)) {
1601 AACDB_PRINT(softs, CE_WARN,
1602 "ddi_intr_alloc() failed, ret = %d", ret);
1603 actual = 0;
1604 goto error;
1605 }
1606
1607 if (actual < count) {
1608 AACDB_PRINT(softs, CE_NOTE,
1609 "Requested: %d, Received: %d", count, actual);
1610 goto error;
1611 }
1612
1613 softs->intr_cnt = actual;
1614
1615 /* Get priority for first msi, assume remaining are all the same */
1616 if ((ret = ddi_intr_get_pri(softs->htable[0],
1617 &softs->intr_pri)) != DDI_SUCCESS) {
1618 AACDB_PRINT(softs, CE_WARN,
1619 "ddi_intr_get_pri() failed, ret = %d", ret);
1620 goto error;
1621 }
1622
1623 /* Test for high level mutex */
1624 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1625 AACDB_PRINT(softs, CE_WARN,
1626 "aac_query_intrs: Hi level interrupt not supported");
1627 goto error;
1628 }
1629
1630 return (DDI_SUCCESS);
1631
1632 error:
1633 /* Free already allocated intr */
1634 for (i = 0; i < actual; i++)
1635 (void) ddi_intr_free(softs->htable[i]);
1636
1637 kmem_free(softs->htable, softs->intr_size);
1638 return (DDI_FAILURE);
1639 }
1640
1641
1642 /*
1643 * Register FIXED or MSI interrupts, and enable them
1644 */
1645 static int
1646 aac_add_intrs(struct aac_softstate *softs)
1647 {
1648 int i, ret;
1649 int actual;
1650 ddi_intr_handler_t *aac_intr;
1651
1652 actual = softs->intr_cnt;
1653 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1654 aac_intr_new : aac_intr_old);
1655
1656 /* Call ddi_intr_add_handler() */
1657 for (i = 0; i < actual; i++) {
1658 if ((ret = ddi_intr_add_handler(softs->htable[i],
1659 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1660 cmn_err(CE_WARN,
1661 "ddi_intr_add_handler() failed ret = %d", ret);
1662
1663 /* Free already allocated intr */
1664 for (i = 0; i < actual; i++)
1665 (void) ddi_intr_free(softs->htable[i]);
1666
1667 kmem_free(softs->htable, softs->intr_size);
1668 return (DDI_FAILURE);
1669 }
1670 }
1671
1672 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1673 != DDI_SUCCESS) {
1674 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1675
1676 /* Free already allocated intr */
1677 for (i = 0; i < actual; i++)
1678 (void) ddi_intr_free(softs->htable[i]);
1679
1680 kmem_free(softs->htable, softs->intr_size);
1681 return (DDI_FAILURE);
1682 }
1683
1684 return (DDI_SUCCESS);
1685 }
1686
1687 /*
1688 * Unregister FIXED or MSI interrupts
1689 */
1690 static void
1691 aac_remove_intrs(struct aac_softstate *softs)
1692 {
1693 int i;
1694
1695 /* Disable all interrupts */
1696 (void) aac_disable_intrs(softs);
1697 /* Call ddi_intr_remove_handler() */
1698 for (i = 0; i < softs->intr_cnt; i++) {
1699 (void) ddi_intr_remove_handler(softs->htable[i]);
1700 (void) ddi_intr_free(softs->htable[i]);
1701 }
1702
1703 kmem_free(softs->htable, softs->intr_size);
1704 }
1705
1706 static int
1707 aac_enable_intrs(struct aac_softstate *softs)
1708 {
1709 int rval = AACOK;
1710
1711 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1712 /* for MSI block enable */
1713 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) !=
1714 DDI_SUCCESS)
1715 rval = AACERR;
1716 } else {
1717 int i;
1718
1719 /* Call ddi_intr_enable() for legacy/MSI non block enable */
1720 for (i = 0; i < softs->intr_cnt; i++) {
1721 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS)
1722 rval = AACERR;
1723 }
1724 }
1725 return (rval);
1726 }
1727
1728 static int
1729 aac_disable_intrs(struct aac_softstate *softs)
1730 {
1731 int rval = AACOK;
1732
1733 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1734 /* Call ddi_intr_block_disable() */
1735 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) !=
1736 DDI_SUCCESS)
1737 rval = AACERR;
1738 } else {
1739 int i;
1740
1741 for (i = 0; i < softs->intr_cnt; i++) {
1742 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS)
1743 rval = AACERR;
1744 }
1745 }
1746 return (rval);
1747 }
1748
1749 /*
1750 * Set pkt_reason and OR in pkt_statistics flag
1751 */
1752 static void
1753 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1754 uchar_t reason, uint_t stat)
1755 {
1756 _NOTE(ARGUNUSED(softs))
1757 if (acp->pkt->pkt_reason == CMD_CMPLT)
1758 acp->pkt->pkt_reason = reason;
1759 acp->pkt->pkt_statistics |= stat;
1760 }
1761
1762 /*
1763 * Handle a finished pkt of soft SCMD
1764 */
1765 static void
1766 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1767 {
1768 ASSERT(acp->pkt);
1769
1770 acp->flags |= AAC_CMD_CMPLT;
1771
1772 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1773 STATE_SENT_CMD | STATE_GOT_STATUS;
1774 if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1775 acp->pkt->pkt_resid = 0;
1776
1777 /* AAC_CMD_NO_INTR means no complete callback */
1778 if (!(acp->flags & AAC_CMD_NO_INTR)) {
1779 mutex_enter(&softs->q_comp_mutex);
1780 aac_cmd_enqueue(&softs->q_comp, acp);
1781 mutex_exit(&softs->q_comp_mutex);
1782 ddi_trigger_softintr(softs->softint_id);
1783 }
1784 }
1785
1786 /*
1787 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1788 */
1789
1790 /*
1791 * Handle completed logical device IO command
1792 */
1793 /*ARGSUSED*/
1794 static void
1795 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1796 {
1797 struct aac_slot *slotp = acp->slotp;
1798 struct aac_blockread_response *resp;
1799 uint32_t status;
1800
1801 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1802 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1803
1804 acp->pkt->pkt_state |= STATE_GOT_STATUS;
1805
1806 /*
1807 * block_read/write has a similar response header, use blockread
1808 * response for both.
1809 */
1810 resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1811 status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1812 if (status == ST_OK) {
1813 acp->pkt->pkt_resid = 0;
1814 acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1815 } else {
1816 aac_set_arq_data_hwerr(acp);
1817 }
1818 }
1819
1820 /*
1821 * Handle completed phys. device IO command
1822 */
1823 static void
1824 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1825 {
1826 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1827 struct aac_fib *fibp = acp->slotp->fibp;
1828 struct scsi_pkt *pkt = acp->pkt;
1829 struct aac_srb_reply *resp;
1830 uint32_t resp_status;
1831
1832 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1833 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1834
1835 resp = (struct aac_srb_reply *)&fibp->data[0];
1836 resp_status = ddi_get32(acc, &resp->status);
1837
1838 /* First check FIB status */
1839 if (resp_status == ST_OK) {
1840 uint32_t scsi_status;
1841 uint32_t srb_status;
1842 uint32_t data_xfer_length;
1843
1844 scsi_status = ddi_get32(acc, &resp->scsi_status);
1845 srb_status = ddi_get32(acc, &resp->srb_status);
1846 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1847
1848 *pkt->pkt_scbp = (uint8_t)scsi_status;
1849 pkt->pkt_state |= STATE_GOT_STATUS;
1850 if (scsi_status == STATUS_GOOD) {
1851 uchar_t cmd = ((union scsi_cdb *)(void *)
1852 (pkt->pkt_cdbp))->scc_cmd;
1853
1854 /* Next check SRB status */
1855 switch (srb_status & 0x3f) {
1856 case SRB_STATUS_DATA_OVERRUN:
1857 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1858 "scmd=%d, xfer=%d, buflen=%d",
1859 (uint32_t)cmd, data_xfer_length,
1860 acp->bcount);
1861
1862 switch (cmd) {
1863 case SCMD_READ:
1864 case SCMD_WRITE:
1865 case SCMD_READ_G1:
1866 case SCMD_WRITE_G1:
1867 case SCMD_READ_G4:
1868 case SCMD_WRITE_G4:
1869 case SCMD_READ_G5:
1870 case SCMD_WRITE_G5:
1871 aac_set_pkt_reason(softs, acp,
1872 CMD_DATA_OVR, 0);
1873 break;
1874 }
1875 /*FALLTHRU*/
1876 case SRB_STATUS_ERROR_RECOVERY:
1877 case SRB_STATUS_PENDING:
1878 case SRB_STATUS_SUCCESS:
1879 /*
1880 * pkt_resid should only be calculated if the
1881 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1882 * OVERRUN/UNDERRUN
1883 */
1884 if (data_xfer_length) {
1885 pkt->pkt_state |= STATE_XFERRED_DATA;
1886 pkt->pkt_resid = acp->bcount - \
1887 data_xfer_length;
1888 ASSERT(pkt->pkt_resid >= 0);
1889 }
1890 break;
1891 case SRB_STATUS_ABORTED:
1892 AACDB_PRINT(softs, CE_NOTE,
1893 "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1894 data_xfer_length, pkt->pkt_resid);
1895 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1896 STAT_ABORTED);
1897 break;
1898 case SRB_STATUS_ABORT_FAILED:
1899 AACDB_PRINT(softs, CE_NOTE,
1900 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1901 "resid=%d", data_xfer_length,
1902 pkt->pkt_resid);
1903 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1904 0);
1905 break;
1906 case SRB_STATUS_PARITY_ERROR:
1907 AACDB_PRINT(softs, CE_NOTE,
1908 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1909 "resid=%d", data_xfer_length,
1910 pkt->pkt_resid);
1911 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1912 break;
1913 case SRB_STATUS_NO_DEVICE:
1914 case SRB_STATUS_INVALID_PATH_ID:
1915 case SRB_STATUS_INVALID_TARGET_ID:
1916 case SRB_STATUS_INVALID_LUN:
1917 case SRB_STATUS_SELECTION_TIMEOUT:
1918 #ifdef DEBUG
1919 if (AAC_DEV_IS_VALID(acp->dvp)) {
1920 AACDB_PRINT(softs, CE_NOTE,
1921 "SRB_STATUS_NO_DEVICE(%d), " \
1922 "xfer=%d, resid=%d ",
1923 srb_status & 0x3f,
1924 data_xfer_length, pkt->pkt_resid);
1925 }
1926 #endif
1927 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1928 break;
1929 case SRB_STATUS_COMMAND_TIMEOUT:
1930 case SRB_STATUS_TIMEOUT:
1931 AACDB_PRINT(softs, CE_NOTE,
1932 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1933 "resid=%d", data_xfer_length,
1934 pkt->pkt_resid);
1935 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1936 STAT_TIMEOUT);
1937 break;
1938 case SRB_STATUS_BUS_RESET:
1939 AACDB_PRINT(softs, CE_NOTE,
1940 "SRB_STATUS_BUS_RESET, xfer=%d, " \
1941 "resid=%d", data_xfer_length,
1942 pkt->pkt_resid);
1943 aac_set_pkt_reason(softs, acp, CMD_RESET,
1944 STAT_BUS_RESET);
1945 break;
1946 default:
1947 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1948 "xfer=%d, resid=%d", srb_status & 0x3f,
1949 data_xfer_length, pkt->pkt_resid);
1950 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1951 break;
1952 }
1953 } else if (scsi_status == STATUS_CHECK) {
1954 /* CHECK CONDITION */
1955 struct scsi_arq_status *arqstat =
1956 (void *)(pkt->pkt_scbp);
1957 uint32_t sense_data_size;
1958
1959 pkt->pkt_state |= STATE_ARQ_DONE;
1960
1961 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1962 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1963 arqstat->sts_rqpkt_resid = 0;
1964 arqstat->sts_rqpkt_state =
1965 STATE_GOT_BUS |
1966 STATE_GOT_TARGET |
1967 STATE_SENT_CMD |
1968 STATE_XFERRED_DATA;
1969 arqstat->sts_rqpkt_statistics = 0;
1970
1971 sense_data_size = ddi_get32(acc,
1972 &resp->sense_data_size);
1973 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1974 AACDB_PRINT(softs, CE_NOTE,
1975 "CHECK CONDITION: sense len=%d, xfer len=%d",
1976 sense_data_size, data_xfer_length);
1977
1978 if (sense_data_size > SENSE_LENGTH)
1979 sense_data_size = SENSE_LENGTH;
1980 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
1981 (uint8_t *)resp->sense_data, sense_data_size,
1982 DDI_DEV_AUTOINCR);
1983 } else {
1984 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
1985 "scsi_status=%d, srb_status=%d",
1986 scsi_status, srb_status);
1987 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1988 }
1989 } else {
1990 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
1991 resp_status);
1992 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1993 }
1994 }
1995
1996 /*
1997 * Handle completed IOCTL command
1998 */
1999 /*ARGSUSED*/
2000 void
2001 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2002 {
2003 struct aac_slot *slotp = acp->slotp;
2004
2005 /*
2006 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
2007 * may wait on softs->event, so use cv_broadcast() instead
2008 * of cv_signal().
2009 */
2010 ASSERT(acp->flags & AAC_CMD_SYNC);
2011 ASSERT(acp->flags & AAC_CMD_NO_CB);
2012
2013 /* Get the size of the response FIB from its FIB.Header.Size field */
2014 acp->fib_size = ddi_get16(slotp->fib_acc_handle,
2015 &slotp->fibp->Header.Size);
2016
2017 ASSERT(acp->fib_size <= softs->aac_max_fib_size);
2018 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
2019 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
2020 }
2021
2022 /*
2023 * Handle completed sync fib command
2024 */
2025 /*ARGSUSED*/
2026 void
2027 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2028 {
2029 }
2030
2031 /*
2032 * Handle completed Flush command
2033 */
2034 /*ARGSUSED*/
2035 static void
2036 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2037 {
2038 struct aac_slot *slotp = acp->slotp;
2039 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2040 struct aac_synchronize_reply *resp;
2041 uint32_t status;
2042
2043 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2044
2045 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2046
2047 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2048 status = ddi_get32(acc, &resp->Status);
2049 if (status != CT_OK)
2050 aac_set_arq_data_hwerr(acp);
2051 }
2052
2053 /*ARGSUSED*/
2054 static void
2055 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2056 {
2057 struct aac_slot *slotp = acp->slotp;
2058 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2059 struct aac_Container_resp *resp;
2060 uint32_t status;
2061
2062 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2063
2064 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2065
2066 resp = (struct aac_Container_resp *)&slotp->fibp->data[0];
2067 status = ddi_get32(acc, &resp->Status);
2068 if (status != 0) {
2069 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit");
2070 aac_set_arq_data_hwerr(acp);
2071 }
2072 }
2073
2074 /*
2075 * Access PCI space to see if the driver can support the card
2076 */
2077 static int
2078 aac_check_card_type(struct aac_softstate *softs)
2079 {
2080 ddi_acc_handle_t pci_config_handle;
2081 int card_index;
2082 uint32_t pci_cmd;
2083
2084 /* Map pci configuration space */
2085 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2086 DDI_SUCCESS) {
2087 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2088 return (AACERR);
2089 }
2090
2091 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2092 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2093 softs->subvendid = pci_config_get16(pci_config_handle,
2094 PCI_CONF_SUBVENID);
2095 softs->subsysid = pci_config_get16(pci_config_handle,
2096 PCI_CONF_SUBSYSID);
2097
2098 card_index = 0;
2099 while (!CARD_IS_UNKNOWN(card_index)) {
2100 if ((aac_cards[card_index].vendor == softs->vendid) &&
2101 (aac_cards[card_index].device == softs->devid) &&
2102 (aac_cards[card_index].subvendor == softs->subvendid) &&
2103 (aac_cards[card_index].subsys == softs->subsysid)) {
2104 break;
2105 }
2106 card_index++;
2107 }
2108
2109 softs->card = card_index;
2110 softs->hwif = aac_cards[card_index].hwif;
2111
2112 /*
2113 * Unknown aac card
2114 * do a generic match based on the VendorID and DeviceID to
2115 * support the new cards in the aac family
2116 */
2117 if (CARD_IS_UNKNOWN(card_index)) {
2118 if (softs->vendid != 0x9005) {
2119 AACDB_PRINT(softs, CE_WARN,
2120 "Unknown vendor 0x%x", softs->vendid);
2121 goto error;
2122 }
2123 switch (softs->devid) {
2124 case 0x285:
2125 softs->hwif = AAC_HWIF_I960RX;
2126 break;
2127 case 0x286:
2128 softs->hwif = AAC_HWIF_RKT;
2129 break;
2130 default:
2131 AACDB_PRINT(softs, CE_WARN,
2132 "Unknown device \"pci9005,%x\"", softs->devid);
2133 goto error;
2134 }
2135 }
2136
2137 /* Set hardware dependent interface */
2138 switch (softs->hwif) {
2139 case AAC_HWIF_I960RX:
2140 softs->aac_if = aac_rx_interface;
2141 softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2142 break;
2143 case AAC_HWIF_RKT:
2144 softs->aac_if = aac_rkt_interface;
2145 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2146 break;
2147 default:
2148 AACDB_PRINT(softs, CE_WARN,
2149 "Unknown hardware interface %d", softs->hwif);
2150 goto error;
2151 }
2152
2153 /* Set card names */
2154 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2155 AAC_VENDOR_LEN);
2156 (void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2157 AAC_PRODUCT_LEN);
2158
2159 /* Set up quirks */
2160 softs->flags = aac_cards[card_index].quirks;
2161
2162 /* Force the busmaster enable bit on */
2163 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2164 if ((pci_cmd & PCI_COMM_ME) == 0) {
2165 pci_cmd |= PCI_COMM_ME;
2166 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2167 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2168 if ((pci_cmd & PCI_COMM_ME) == 0) {
2169 cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2170 goto error;
2171 }
2172 }
2173
2174 /* Set memory base to map */
2175 softs->pci_mem_base_paddr = 0xfffffff0UL & \
2176 pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2177
2178 pci_config_teardown(&pci_config_handle);
2179
2180 return (AACOK); /* card type detected */
2181 error:
2182 pci_config_teardown(&pci_config_handle);
2183 return (AACERR); /* no matched card found */
2184 }
2185
2186 /*
2187 * Do the usual interrupt handler setup stuff.
2188 */
2189 static int
2190 aac_register_intrs(struct aac_softstate *softs)
2191 {
2192 dev_info_t *dip;
2193 int intr_types;
2194
2195 ASSERT(softs->devinfo_p);
2196 dip = softs->devinfo_p;
2197
2198 /* Get the type of device intrrupts */
2199 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
2200 AACDB_PRINT(softs, CE_WARN,
2201 "ddi_intr_get_supported_types() failed");
2202 return (AACERR);
2203 }
2204 AACDB_PRINT(softs, CE_NOTE,
2205 "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
2206
2207 /* Query interrupt, and alloc/init all needed struct */
2208 if (intr_types & DDI_INTR_TYPE_MSI) {
2209 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
2210 != DDI_SUCCESS) {
2211 AACDB_PRINT(softs, CE_WARN,
2212 "MSI interrupt query failed");
2213 return (AACERR);
2214 }
2215 softs->intr_type = DDI_INTR_TYPE_MSI;
2216 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
2217 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
2218 != DDI_SUCCESS) {
2219 AACDB_PRINT(softs, CE_WARN,
2220 "FIXED interrupt query failed");
2221 return (AACERR);
2222 }
2223 softs->intr_type = DDI_INTR_TYPE_FIXED;
2224 } else {
2225 AACDB_PRINT(softs, CE_WARN,
2226 "Device cannot suppport both FIXED and MSI interrupts");
2227 return (AACERR);
2228 }
2229
2230 /* Connect interrupt handlers */
2231 if (aac_add_intrs(softs) != DDI_SUCCESS) {
2232 AACDB_PRINT(softs, CE_WARN,
2233 "Interrupt registration failed, intr type: %s",
2234 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
2235 return (AACERR);
2236 }
2237 (void) aac_enable_intrs(softs);
2238
2239 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
2240 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
2241 AACDB_PRINT(softs, CE_WARN,
2242 "Can not setup soft interrupt handler!");
2243 aac_remove_intrs(softs);
2244 return (AACERR);
2245 }
2246
2247 return (AACOK);
2248 }
2249
2250 static void
2251 aac_unregister_intrs(struct aac_softstate *softs)
2252 {
2253 aac_remove_intrs(softs);
2254 ddi_remove_softintr(softs->softint_id);
2255 }
2256
2257 /*
2258 * Check the firmware to determine the features to support and the FIB
2259 * parameters to use.
2260 */
2261 static int
2262 aac_check_firmware(struct aac_softstate *softs)
2263 {
2264 uint32_t options;
2265 uint32_t atu_size;
2266 ddi_acc_handle_t pci_handle;
2267 uint8_t *data;
2268 uint32_t max_fibs;
2269 uint32_t max_fib_size;
2270 uint32_t sg_tablesize;
2271 uint32_t max_sectors;
2272 uint32_t status;
2273
2274 /* Get supported options */
2275 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2276 &status)) != AACOK) {
2277 if (status != SRB_STATUS_INVALID_REQUEST) {
2278 cmn_err(CE_CONT,
2279 "?Fatal error: request adapter info error");
2280 return (AACERR);
2281 }
2282 options = 0;
2283 atu_size = 0;
2284 } else {
2285 options = AAC_MAILBOX_GET(softs, 1);
2286 atu_size = AAC_MAILBOX_GET(softs, 2);
2287 }
2288
2289 if (softs->state & AAC_STATE_RESET) {
2290 if ((softs->support_opt == options) &&
2291 (softs->atu_size == atu_size))
2292 return (AACOK);
2293
2294 cmn_err(CE_WARN,
2295 "?Fatal error: firmware changed, system needs reboot");
2296 return (AACERR);
2297 }
2298
2299 /*
2300 * The following critical settings are initialized only once during
2301 * driver attachment.
2302 */
2303 softs->support_opt = options;
2304 softs->atu_size = atu_size;
2305
2306 /* Process supported options */
2307 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2308 (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2309 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2310 softs->flags |= AAC_FLAGS_4GB_WINDOW;
2311 } else {
2312 /*
2313 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2314 * only. IO is handled by the DMA engine which does not suffer
2315 * from the ATU window programming workarounds necessary for
2316 * CPU copy operations.
2317 */
2318 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2319 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2320 }
2321
2322 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2323 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2324 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2325 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2326 softs->flags |= AAC_FLAGS_SG_64BIT;
2327 }
2328
2329 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2330 softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2331 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2332 }
2333
2334 if (options & AAC_SUPPORTED_NONDASD) {
2335 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2336 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2337 if (strcmp((char *)data, "yes") == 0) {
2338 AACDB_PRINT(softs, CE_NOTE,
2339 "!Enable Non-DASD access");
2340 softs->flags |= AAC_FLAGS_NONDASD;
2341 }
2342 ddi_prop_free(data);
2343 }
2344 }
2345
2346 /* Read preferred settings */
2347 max_fib_size = 0;
2348 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2349 0, 0, 0, 0, NULL)) == AACOK) {
2350 options = AAC_MAILBOX_GET(softs, 1);
2351 max_fib_size = (options & 0xffff);
2352 max_sectors = (options >> 16) << 1;
2353 options = AAC_MAILBOX_GET(softs, 2);
2354 sg_tablesize = (options >> 16);
2355 options = AAC_MAILBOX_GET(softs, 3);
2356 max_fibs = (options & 0xffff);
2357 }
2358
2359 /* Enable new comm. and rawio at the same time */
2360 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2361 (max_fib_size != 0)) {
2362 /* read out and save PCI MBR */
2363 if ((atu_size > softs->map_size) &&
2364 (ddi_regs_map_setup(softs->devinfo_p, 1,
2365 (caddr_t *)&data, 0, atu_size, &softs->reg_attr,
2366 &pci_handle) == DDI_SUCCESS)) {
2367 ddi_regs_map_free(&softs->pci_mem_handle);
2368 softs->pci_mem_handle = pci_handle;
2369 softs->pci_mem_base_vaddr = data;
2370 softs->map_size = atu_size;
2371 }
2372 if (atu_size == softs->map_size) {
2373 softs->flags |= AAC_FLAGS_NEW_COMM;
2374 AACDB_PRINT(softs, CE_NOTE,
2375 "!Enable New Comm. interface");
2376 }
2377 }
2378
2379 /* Set FIB parameters */
2380 if (softs->flags & AAC_FLAGS_NEW_COMM) {
2381 softs->aac_max_fibs = max_fibs;
2382 softs->aac_max_fib_size = max_fib_size;
2383 softs->aac_max_sectors = max_sectors;
2384 softs->aac_sg_tablesize = sg_tablesize;
2385
2386 softs->flags |= AAC_FLAGS_RAW_IO;
2387 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2388 } else {
2389 softs->aac_max_fibs =
2390 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2391 softs->aac_max_fib_size = AAC_FIB_SIZE;
2392 softs->aac_max_sectors = 128; /* 64K */
2393 if (softs->flags & AAC_FLAGS_17SG)
2394 softs->aac_sg_tablesize = 17;
2395 else if (softs->flags & AAC_FLAGS_34SG)
2396 softs->aac_sg_tablesize = 34;
2397 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2398 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2399 sizeof (struct aac_blockwrite64) +
2400 sizeof (struct aac_sg_entry64)) /
2401 sizeof (struct aac_sg_entry64);
2402 else
2403 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2404 sizeof (struct aac_blockwrite) +
2405 sizeof (struct aac_sg_entry)) /
2406 sizeof (struct aac_sg_entry);
2407 }
2408
2409 if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2410 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2411 softs->flags |= AAC_FLAGS_LBA_64BIT;
2412 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2413 }
2414 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2415 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2416 /*
2417 * 64K maximum segment size in scatter gather list is controlled by
2418 * the NEW_COMM bit in the adapter information. If not set, the card
2419 * can only accept a maximum of 64K. It is not recommended to permit
2420 * more than 128KB of total transfer size to the adapters because
2421 * performance is negatively impacted.
2422 *
2423 * For new comm, segment size equals max xfer size. For old comm,
2424 * we use 64K for both.
2425 */
2426 softs->buf_dma_attr.dma_attr_count_max =
2427 softs->buf_dma_attr.dma_attr_maxxfer - 1;
2428
2429 /* Setup FIB operations */
2430 if (softs->flags & AAC_FLAGS_RAW_IO)
2431 softs->aac_cmd_fib = aac_cmd_fib_rawio;
2432 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2433 softs->aac_cmd_fib = aac_cmd_fib_brw64;
2434 else
2435 softs->aac_cmd_fib = aac_cmd_fib_brw;
2436 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2437 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2438
2439 /* 64-bit LBA needs descriptor format sense data */
2440 softs->slen = sizeof (struct scsi_arq_status);
2441 if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2442 softs->slen < AAC_ARQ64_LENGTH)
2443 softs->slen = AAC_ARQ64_LENGTH;
2444
2445 AACDB_PRINT(softs, CE_NOTE,
2446 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2447 softs->aac_max_fibs, softs->aac_max_fib_size,
2448 softs->aac_max_sectors, softs->aac_sg_tablesize);
2449
2450 return (AACOK);
2451 }
2452
2453 static void
2454 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2455 struct FsaRev *fsarev1)
2456 {
2457 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
2458
2459 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2460 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2461 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2462 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2463 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2464 }
2465
2466 /*
2467 * The following function comes from Adaptec:
2468 *
2469 * Query adapter information and supplement adapter information
2470 */
2471 static int
2472 aac_get_adapter_info(struct aac_softstate *softs,
2473 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2474 {
2475 struct aac_cmd *acp = &softs->sync_ac;
2476 ddi_acc_handle_t acc;
2477 struct aac_fib *fibp;
2478 struct aac_adapter_info *ainfp;
2479 struct aac_supplement_adapter_info *sinfp;
2480 int rval;
2481
2482 (void) aac_sync_fib_slot_bind(softs, acp);
2483 acc = acp->slotp->fib_acc_handle;
2484 fibp = acp->slotp->fibp;
2485
2486 ddi_put8(acc, &fibp->data[0], 0);
2487 if (aac_sync_fib(softs, RequestAdapterInfo,
2488 AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) {
2489 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2490 rval = AACERR;
2491 goto finish;
2492 }
2493 ainfp = (struct aac_adapter_info *)fibp->data;
2494 if (ainfr) {
2495 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2496 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2497 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2498 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2499 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2500 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2501 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2502 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2503 aac_fsa_rev(softs, &ainfp->KernelRevision,
2504 &ainfr->KernelRevision);
2505 aac_fsa_rev(softs, &ainfp->MonitorRevision,
2506 &ainfr->MonitorRevision);
2507 aac_fsa_rev(softs, &ainfp->HardwareRevision,
2508 &ainfr->HardwareRevision);
2509 aac_fsa_rev(softs, &ainfp->BIOSRevision,
2510 &ainfr->BIOSRevision);
2511 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2512 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2513 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2514 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2515 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2516 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2517 }
2518 if (sinfr) {
2519 if (!(softs->support_opt &
2520 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2521 AACDB_PRINT(softs, CE_WARN,
2522 "SupplementAdapterInfo not supported");
2523 rval = AACERR;
2524 goto finish;
2525 }
2526 ddi_put8(acc, &fibp->data[0], 0);
2527 if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2528 AAC_FIB_SIZEOF(struct aac_supplement_adapter_info))
2529 != AACOK) {
2530 AACDB_PRINT(softs, CE_WARN,
2531 "RequestSupplementAdapterInfo failed");
2532 rval = AACERR;
2533 goto finish;
2534 }
2535 sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2536 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2537 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2538 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2539 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2540 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2541 AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2542 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2543 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2544 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2545 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2546 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2547 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2548 sizeof (struct vpd_info));
2549 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2550 &sinfr->FlashFirmwareRevision);
2551 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2552 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2553 &sinfr->FlashFirmwareBootRevision);
2554 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2555 MFG_PCBA_SERIAL_NUMBER_WIDTH);
2556 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2557 MFG_WWN_WIDTH);
2558 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2);
2559 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag);
2560 if (sinfr->ExpansionFlag == 1) {
2561 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3);
2562 AAC_GET_FIELD32(acc, sinfr, sinfp,
2563 SupportedPerformanceMode);
2564 AAC_REP_GET_FIELD32(acc, sinfr, sinfp,
2565 ReservedGrowth[0], 80);
2566 }
2567 }
2568 rval = AACOK;
2569 finish:
2570 aac_sync_fib_slot_release(softs, acp);
2571 return (rval);
2572 }
2573
2574 static int
2575 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2576 uint32_t *tgt_max)
2577 {
2578 struct aac_cmd *acp = &softs->sync_ac;
2579 ddi_acc_handle_t acc;
2580 struct aac_fib *fibp;
2581 struct aac_ctcfg *c_cmd;
2582 struct aac_ctcfg_resp *c_resp;
2583 uint32_t scsi_method_id;
2584 struct aac_bus_info *cmd;
2585 struct aac_bus_info_response *resp;
2586 int rval;
2587
2588 (void) aac_sync_fib_slot_bind(softs, acp);
2589 acc = acp->slotp->fib_acc_handle;
2590 fibp = acp->slotp->fibp;
2591
2592 /* Detect MethodId */
2593 c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2594 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2595 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2596 ddi_put32(acc, &c_cmd->param, 0);
2597 rval = aac_sync_fib(softs, ContainerCommand,
2598 AAC_FIB_SIZEOF(struct aac_ctcfg));
2599 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2600 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2601 AACDB_PRINT(softs, CE_WARN,
2602 "VM_ContainerConfig command fail");
2603 rval = AACERR;
2604 goto finish;
2605 }
2606 scsi_method_id = ddi_get32(acc, &c_resp->param);
2607
2608 /* Detect phys. bus count and max. target id first */
2609 cmd = (struct aac_bus_info *)&fibp->data[0];
2610 ddi_put32(acc, &cmd->Command, VM_Ioctl);
2611 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2612 ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2613 ddi_put32(acc, &cmd->ObjectId, 0);
2614 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2615 /*
2616 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2617 * driver as the size to be returned. Therefore the driver has to use
2618 * sizeof (struct aac_bus_info_response) because it is greater than
2619 * sizeof (struct aac_bus_info).
2620 */
2621 rval = aac_sync_fib(softs, ContainerCommand,
2622 AAC_FIB_SIZEOF(struct aac_bus_info_response));
2623 resp = (struct aac_bus_info_response *)cmd;
2624
2625 /* Scan all coordinates with INQUIRY */
2626 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2627 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2628 rval = AACERR;
2629 goto finish;
2630 }
2631 *bus_max = ddi_get32(acc, &resp->BusCount);
2632 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2633
2634 finish:
2635 aac_sync_fib_slot_release(softs, acp);
2636 return (AACOK);
2637 }
2638
2639 /*
2640 * The following function comes from Adaptec:
2641 *
2642 * Routine to be called during initialization of communications with
2643 * the adapter to handle possible adapter configuration issues. When
2644 * the adapter first boots up, it examines attached drives, etc, and
2645 * potentially comes up with a new or revised configuration (relative to
2646 * what's stored in it's NVRAM). Additionally it may discover problems
2647 * that make the current physical configuration unworkable (currently
2648 * applicable only to cluster configuration issues).
2649 *
2650 * If there are no configuration issues or the issues are considered
2651 * trival by the adapter, it will set it's configuration status to
2652 * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2653 * automatically on it's own.
2654 *
2655 * However, if there are non-trivial issues, the adapter will set it's
2656 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2657 * and wait for some agent on the host to issue the "\ContainerCommand
2658 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2659 * adapter to commit the new/updated configuration and enable
2660 * un-inhibited operation. The host agent should first issue the
2661 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2662 * command to obtain information about config issues detected by
2663 * the adapter.
2664 *
2665 * Normally the adapter's PC BIOS will execute on the host following
2666 * adapter poweron and reset and will be responsible for querring the
2667 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2668 * command if appropriate.
2669 *
2670 * However, with the introduction of IOP reset support, the adapter may
2671 * boot up without the benefit of the adapter's PC BIOS host agent.
2672 * This routine is intended to take care of these issues in situations
2673 * where BIOS doesn't execute following adapter poweron or reset. The
2674 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2675 * there is no harm in doing this when it's already been done.
2676 */
2677 static int
2678 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2679 {
2680 struct aac_cmd *acp = &softs->sync_ac;
2681 ddi_acc_handle_t acc;
2682 struct aac_fib *fibp;
2683 struct aac_Container *cmd;
2684 struct aac_Container_resp *resp;
2685 struct aac_cf_status_header *cfg_sts_hdr;
2686 uint32_t resp_status;
2687 uint32_t ct_status;
2688 uint32_t cfg_stat_action;
2689 int rval;
2690
2691 (void) aac_sync_fib_slot_bind(softs, acp);
2692 acc = acp->slotp->fib_acc_handle;
2693 fibp = acp->slotp->fibp;
2694
2695 /* Get adapter config status */
2696 cmd = (struct aac_Container *)&fibp->data[0];
2697
2698 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2699 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2700 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2701 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2702 sizeof (struct aac_cf_status_header));
2703 rval = aac_sync_fib(softs, ContainerCommand,
2704 AAC_FIB_SIZEOF(struct aac_Container));
2705 resp = (struct aac_Container_resp *)cmd;
2706 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2707
2708 resp_status = ddi_get32(acc, &resp->Status);
2709 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2710 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2711 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2712
2713 /* Commit configuration if it's reasonable to do so. */
2714 if (cfg_stat_action <= CFACT_PAUSE) {
2715 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2716 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2717 ddi_put32(acc, &cmd->CTCommand.command,
2718 CT_COMMIT_CONFIG);
2719 rval = aac_sync_fib(softs, ContainerCommand,
2720 AAC_FIB_SIZEOF(struct aac_Container));
2721
2722 resp_status = ddi_get32(acc, &resp->Status);
2723 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2724 if ((rval == AACOK) && (resp_status == 0) &&
2725 (ct_status == CT_OK))
2726 /* Successful completion */
2727 rval = AACMPE_OK;
2728 else
2729 /* Auto-commit aborted due to error(s). */
2730 rval = AACMPE_COMMIT_CONFIG;
2731 } else {
2732 /*
2733 * Auto-commit aborted due to adapter indicating
2734 * configuration issue(s) too dangerous to auto-commit.
2735 */
2736 rval = AACMPE_CONFIG_STATUS;
2737 }
2738 } else {
2739 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2740 rval = AACMPE_CONFIG_STATUS;
2741 }
2742
2743 aac_sync_fib_slot_release(softs, acp);
2744 return (rval);
2745 }
2746
2747 /*
2748 * Hardware initialization and resource allocation
2749 */
2750 static int
2751 aac_common_attach(struct aac_softstate *softs)
2752 {
2753 uint32_t status;
2754 int i;
2755 struct aac_supplement_adapter_info sinf;
2756
2757 DBCALLED(softs, 1);
2758
2759 /*
2760 * Do a little check here to make sure there aren't any outstanding
2761 * FIBs in the message queue. At this point there should not be and
2762 * if there are they are probably left over from another instance of
2763 * the driver like when the system crashes and the crash dump driver
2764 * gets loaded.
2765 */
2766 while (AAC_OUTB_GET(softs) != 0xfffffffful)
2767 ;
2768
2769 /*
2770 * Wait the card to complete booting up before do anything that
2771 * attempts to communicate with it.
2772 */
2773 status = AAC_FWSTATUS_GET(softs);
2774 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2775 goto error;
2776 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2777 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2778 if (i == 0) {
2779 cmn_err(CE_CONT, "?Fatal error: controller not ready");
2780 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2781 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2782 goto error;
2783 }
2784
2785 /* Read and set card supported options and settings */
2786 if (aac_check_firmware(softs) == AACERR) {
2787 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2788 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2789 goto error;
2790 }
2791
2792 /* Add interrupt handlers */
2793 if (aac_register_intrs(softs) == AACERR) {
2794 cmn_err(CE_CONT,
2795 "?Fatal error: interrupts register failed");
2796 goto error;
2797 }
2798
2799 /* Setup communication space with the card */
2800 if (softs->comm_space_dma_handle == NULL) {
2801 if (aac_alloc_comm_space(softs) != AACOK)
2802 goto error;
2803 }
2804 if (aac_setup_comm_space(softs) != AACOK) {
2805 cmn_err(CE_CONT, "?Setup communication space failed");
2806 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2807 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2808 goto error;
2809 }
2810
2811 #ifdef DEBUG
2812 if (aac_get_fw_debug_buffer(softs) != AACOK)
2813 cmn_err(CE_CONT, "?firmware UART trace not supported");
2814 #endif
2815
2816 /* Allocate slots */
2817 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2818 cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2819 goto error;
2820 }
2821 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2822
2823 /* Allocate FIBs */
2824 if (softs->total_fibs < softs->total_slots) {
2825 aac_alloc_fibs(softs);
2826 if (softs->total_fibs == 0)
2827 goto error;
2828 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2829 softs->total_fibs);
2830 }
2831
2832 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */
2833 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */
2834
2835 if (aac_get_adapter_info(softs, NULL, &sinf) == AACOK) {
2836 softs->feature_bits = sinf.FeatureBits;
2837 softs->support_opt2 = sinf.SupportedOptions2;
2838
2839 /* Get adapter names */
2840 if (CARD_IS_UNKNOWN(softs->card)) {
2841 char *p, *p0, *p1;
2842
2843 /*
2844 * Now find the controller name in supp_adapter_info->
2845 * AdapterTypeText. Use the first word as the vendor
2846 * and the other words as the product name.
2847 */
2848 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2849 "\"%s\"", sinf.AdapterTypeText);
2850 p = sinf.AdapterTypeText;
2851 p0 = p1 = NULL;
2852 /* Skip heading spaces */
2853 while (*p && (*p == ' ' || *p == '\t'))
2854 p++;
2855 p0 = p;
2856 while (*p && (*p != ' ' && *p != '\t'))
2857 p++;
2858 /* Remove middle spaces */
2859 while (*p && (*p == ' ' || *p == '\t'))
2860 *p++ = 0;
2861 p1 = p;
2862 /* Remove trailing spaces */
2863 p = p1 + strlen(p1) - 1;
2864 while (p > p1 && (*p == ' ' || *p == '\t'))
2865 *p-- = 0;
2866 if (*p0 && *p1) {
2867 (void *)strncpy(softs->vendor_name, p0,
2868 AAC_VENDOR_LEN);
2869 (void *)strncpy(softs->product_name, p1,
2870 AAC_PRODUCT_LEN);
2871 } else {
2872 cmn_err(CE_WARN,
2873 "?adapter name mis-formatted\n");
2874 if (*p0)
2875 (void *)strncpy(softs->product_name,
2876 p0, AAC_PRODUCT_LEN);
2877 }
2878 }
2879 } else {
2880 cmn_err(CE_CONT, "?Query adapter information failed");
2881 }
2882
2883
2884 cmn_err(CE_NOTE,
2885 "!aac driver %d.%02d.%02d-%d, found card: " \
2886 "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2887 AAC_DRIVER_MAJOR_VERSION,
2888 AAC_DRIVER_MINOR_VERSION,
2889 AAC_DRIVER_BUGFIX_LEVEL,
2890 AAC_DRIVER_BUILD,
2891 softs->vendor_name, softs->product_name,
2892 softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2893 softs->pci_mem_base_paddr);
2894
2895 /* Perform acceptance of adapter-detected config changes if possible */
2896 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2897 cmn_err(CE_CONT, "?Handle adapter config issues failed");
2898 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2899 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2900 goto error;
2901 }
2902
2903 /* Setup containers (logical devices) */
2904 if (aac_probe_containers(softs) != AACOK) {
2905 cmn_err(CE_CONT, "?Fatal error: get container info error");
2906 goto error;
2907 }
2908
2909 /* Check for JBOD support. Default disable */
2910 char *data;
2911 if (softs->feature_bits & AAC_FEATURE_SUPPORTED_JBOD) {
2912 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p,
2913 0, "jbod-enable", &data) == DDI_SUCCESS)) {
2914 if (strcmp(data, "yes") == 0) {
2915 AACDB_PRINT(softs, CE_NOTE,
2916 "Enable JBOD access");
2917 softs->flags |= AAC_FLAGS_JBOD;
2918 }
2919 ddi_prop_free(data);
2920 }
2921 }
2922
2923 /* Setup phys. devices */
2924 if (softs->flags & (AAC_FLAGS_NONDASD | AAC_FLAGS_JBOD)) {
2925 uint32_t bus_max, tgt_max;
2926 uint32_t bus, tgt;
2927 int index;
2928
2929 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2930 cmn_err(CE_CONT, "?Fatal error: get bus info error");
2931 goto error;
2932 }
2933 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2934 bus_max, tgt_max);
2935 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2936 if (softs->state & AAC_STATE_RESET) {
2937 cmn_err(CE_WARN,
2938 "?Fatal error: bus map changed");
2939 goto error;
2940 }
2941 softs->bus_max = bus_max;
2942 softs->tgt_max = tgt_max;
2943 if (softs->nondasds) {
2944 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2945 sizeof (struct aac_nondasd));
2946 }
2947 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2948 sizeof (struct aac_nondasd), KM_SLEEP);
2949
2950 index = 0;
2951 for (bus = 0; bus < softs->bus_max; bus++) {
2952 for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2953 struct aac_nondasd *dvp =
2954 &softs->nondasds[index++];
2955 dvp->dev.type = AAC_DEV_PD;
2956 dvp->bus = bus;
2957 dvp->tid = tgt;
2958 }
2959 }
2960 }
2961 }
2962
2963 /* Check dma & acc handles allocated in attach */
2964 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2965 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2966 goto error;
2967 }
2968
2969 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2970 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2971 goto error;
2972 }
2973
2974 for (i = 0; i < softs->total_slots; i++) {
2975 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
2976 DDI_SUCCESS) {
2977 ddi_fm_service_impact(softs->devinfo_p,
2978 DDI_SERVICE_LOST);
2979 goto error;
2980 }
2981 }
2982
2983 return (AACOK);
2984 error:
2985 if (softs->state & AAC_STATE_RESET)
2986 return (AACERR);
2987 if (softs->nondasds) {
2988 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2989 sizeof (struct aac_nondasd));
2990 softs->nondasds = NULL;
2991 }
2992 if (softs->total_fibs > 0)
2993 aac_destroy_fibs(softs);
2994 if (softs->total_slots > 0)
2995 aac_destroy_slots(softs);
2996 if (softs->comm_space_dma_handle)
2997 aac_free_comm_space(softs);
2998 return (AACERR);
2999 }
3000
3001 /*
3002 * Hardware shutdown and resource release
3003 */
3004 static void
3005 aac_common_detach(struct aac_softstate *softs)
3006 {
3007 DBCALLED(softs, 1);
3008
3009 aac_unregister_intrs(softs);
3010
3011 mutex_enter(&softs->io_lock);
3012 (void) aac_shutdown(softs);
3013
3014 if (softs->nondasds) {
3015 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
3016 sizeof (struct aac_nondasd));
3017 softs->nondasds = NULL;
3018 }
3019 aac_destroy_fibs(softs);
3020 aac_destroy_slots(softs);
3021 aac_free_comm_space(softs);
3022 mutex_exit(&softs->io_lock);
3023 }
3024
3025 /*
3026 * Send a synchronous command to the controller and wait for a result.
3027 * Indicate if the controller completed the command with an error status.
3028 */
3029 int
3030 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
3031 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
3032 uint32_t *statusp)
3033 {
3034 int timeout;
3035 uint32_t status;
3036
3037 if (statusp != NULL)
3038 *statusp = SRB_STATUS_SUCCESS;
3039
3040 /* Fill in mailbox */
3041 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
3042
3043 /* Ensure the sync command doorbell flag is cleared */
3044 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3045
3046 /* Then set it to signal the adapter */
3047 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
3048
3049 /* Spin waiting for the command to complete */
3050 timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
3051 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
3052 if (!timeout) {
3053 AACDB_PRINT(softs, CE_WARN,
3054 "Sync command timed out after %d seconds (0x%x)!",
3055 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
3056 return (AACERR);
3057 }
3058
3059 /* Clear the completion flag */
3060 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3061
3062 /* Get the command status */
3063 status = AAC_MAILBOX_GET(softs, 0);
3064 if (statusp != NULL)
3065 *statusp = status;
3066 if (status != SRB_STATUS_SUCCESS) {
3067 AACDB_PRINT(softs, CE_WARN,
3068 "Sync command fail: status = 0x%x", status);
3069 return (AACERR);
3070 }
3071
3072 return (AACOK);
3073 }
3074
3075 /*
3076 * Send a synchronous FIB to the adapter and wait for its completion
3077 */
3078 static int
3079 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
3080 {
3081 struct aac_cmd *acp = &softs->sync_ac;
3082
3083 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT;
3084 if (softs->state & AAC_STATE_INTR)
3085 acp->flags |= AAC_CMD_NO_CB;
3086 else
3087 acp->flags |= AAC_CMD_NO_INTR;
3088
3089 acp->ac_comp = aac_sync_complete;
3090 acp->timeout = AAC_SYNC_TIMEOUT;
3091 acp->fib_size = fibsize;
3092
3093 /*
3094 * Only need to setup sync fib header, caller should have init
3095 * fib data
3096 */
3097 aac_cmd_fib_header(softs, acp, cmd);
3098
3099 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize,
3100 DDI_DMA_SYNC_FORDEV);
3101
3102 aac_start_io(softs, acp);
3103
3104 if (softs->state & AAC_STATE_INTR)
3105 return (aac_do_sync_io(softs, acp));
3106 else
3107 return (aac_do_poll_io(softs, acp));
3108 }
3109
3110 static void
3111 aac_cmd_initq(struct aac_cmd_queue *q)
3112 {
3113 q->q_head = NULL;
3114 q->q_tail = (struct aac_cmd *)&q->q_head;
3115 }
3116
3117 /*
3118 * Remove a cmd from the head of q
3119 */
3120 static struct aac_cmd *
3121 aac_cmd_dequeue(struct aac_cmd_queue *q)
3122 {
3123 struct aac_cmd *acp;
3124
3125 if ((acp = q->q_head) != NULL) {
3126 if ((q->q_head = acp->next) != NULL)
3127 acp->next = NULL;
3128 else
3129 q->q_tail = (struct aac_cmd *)&q->q_head;
3130 acp->prev = NULL;
3131 }
3132 return (acp);
3133 }
3134
3135 /*
3136 * Add a cmd to the tail of q
3137 */
3138 static void
3139 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
3140 {
3141 ASSERT(acp->next == NULL);
3142 acp->prev = q->q_tail;
3143 q->q_tail->next = acp;
3144 q->q_tail = acp;
3145 }
3146
3147 /*
3148 * Remove the cmd ac from q
3149 */
3150 static void
3151 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
3152 {
3153 if (acp->prev) {
3154 if ((acp->prev->next = acp->next) != NULL) {
3155 acp->next->prev = acp->prev;
3156 acp->next = NULL;
3157 } else {
3158 q->q_tail = acp->prev;
3159 }
3160 acp->prev = NULL;
3161 }
3162 /* ac is not in the queue */
3163 }
3164
3165 /*
3166 * Atomically insert an entry into the nominated queue, returns 0 on success or
3167 * AACERR if the queue is full.
3168 *
3169 * Note: it would be more efficient to defer notifying the controller in
3170 * the case where we may be inserting several entries in rapid succession,
3171 * but implementing this usefully may be difficult (it would involve a
3172 * separate queue/notify interface).
3173 */
3174 static int
3175 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
3176 uint32_t fib_size)
3177 {
3178 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3179 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3180 uint32_t pi, ci;
3181
3182 DBCALLED(softs, 2);
3183
3184 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
3185
3186 /* Get the producer/consumer indices */
3187 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3188 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3189 DDI_DMA_SYNC_FORCPU);
3190 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3191 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3192 return (AACERR);
3193 }
3194
3195 pi = ddi_get32(acc,
3196 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3197 ci = ddi_get32(acc,
3198 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3199
3200 /*
3201 * Wrap the queue first before we check the queue to see
3202 * if it is full
3203 */
3204 if (pi >= aac_qinfo[queue].size)
3205 pi = 0;
3206
3207 /* XXX queue full */
3208 if ((pi + 1) == ci)
3209 return (AACERR);
3210
3211 /* Fill in queue entry */
3212 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3213 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3214 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3215 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3216 DDI_DMA_SYNC_FORDEV);
3217
3218 /* Update producer index */
3219 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3220 pi + 1);
3221 (void) ddi_dma_sync(dma,
3222 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3223 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3224 DDI_DMA_SYNC_FORDEV);
3225
3226 if (aac_qinfo[queue].notify != 0)
3227 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3228 return (AACOK);
3229 }
3230
3231 /*
3232 * Atomically remove one entry from the nominated queue, returns 0 on
3233 * success or AACERR if the queue is empty.
3234 */
3235 static int
3236 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3237 {
3238 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3239 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3240 uint32_t pi, ci;
3241 int unfull = 0;
3242
3243 DBCALLED(softs, 2);
3244
3245 ASSERT(idxp);
3246
3247 /* Get the producer/consumer indices */
3248 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3249 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3250 DDI_DMA_SYNC_FORCPU);
3251 pi = ddi_get32(acc,
3252 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3253 ci = ddi_get32(acc,
3254 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3255
3256 /* Check for queue empty */
3257 if (ci == pi)
3258 return (AACERR);
3259
3260 if (pi >= aac_qinfo[queue].size)
3261 pi = 0;
3262
3263 /* Check for queue full */
3264 if (ci == pi + 1)
3265 unfull = 1;
3266
3267 /*
3268 * The controller does not wrap the queue,
3269 * so we have to do it by ourselves
3270 */
3271 if (ci >= aac_qinfo[queue].size)
3272 ci = 0;
3273
3274 /* Fetch the entry */
3275 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3276 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3277 DDI_DMA_SYNC_FORCPU);
3278 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3279 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3280 return (AACERR);
3281 }
3282
3283 switch (queue) {
3284 case AAC_HOST_NORM_RESP_Q:
3285 case AAC_HOST_HIGH_RESP_Q:
3286 *idxp = ddi_get32(acc,
3287 &(softs->qentries[queue] + ci)->aq_fib_addr);
3288 break;
3289
3290 case AAC_HOST_NORM_CMD_Q:
3291 case AAC_HOST_HIGH_CMD_Q:
3292 *idxp = ddi_get32(acc,
3293 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3294 break;
3295
3296 default:
3297 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3298 return (AACERR);
3299 }
3300
3301 /* Update consumer index */
3302 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3303 ci + 1);
3304 (void) ddi_dma_sync(dma,
3305 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3306 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3307 DDI_DMA_SYNC_FORDEV);
3308
3309 if (unfull && aac_qinfo[queue].notify != 0)
3310 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3311 return (AACOK);
3312 }
3313
3314 static struct aac_mntinforesp *
3315 aac_get_mntinfo(struct aac_softstate *softs, int cid)
3316 {
3317 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3318 struct aac_fib *fibp = softs->sync_ac.slotp->fibp;
3319 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3320 struct aac_mntinforesp *mir;
3321
3322 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3323 (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3324 VM_NameServe64 : VM_NameServe);
3325 ddi_put32(acc, &mi->MntType, FT_FILESYS);
3326 ddi_put32(acc, &mi->MntCount, cid);
3327
3328 if (aac_sync_fib(softs, ContainerCommand,
3329 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3330 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3331 return (NULL);
3332 }
3333
3334 mir = (struct aac_mntinforesp *)&fibp->data[0];
3335 if (ddi_get32(acc, &mir->Status) == ST_OK)
3336 return (mir);
3337 return (NULL);
3338 }
3339
3340 static int
3341 aac_get_container_count(struct aac_softstate *softs, int *count)
3342 {
3343 ddi_acc_handle_t acc;
3344 struct aac_mntinforesp *mir;
3345 int rval;
3346
3347 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3348 acc = softs->sync_ac.slotp->fib_acc_handle;
3349
3350 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) {
3351 rval = AACERR;
3352 goto finish;
3353 }
3354 *count = ddi_get32(acc, &mir->MntRespCount);
3355 if (*count > AAC_MAX_LD) {
3356 AACDB_PRINT(softs, CE_CONT,
3357 "container count(%d) > AAC_MAX_LD", *count);
3358 rval = AACERR;
3359 goto finish;
3360 }
3361 rval = AACOK;
3362
3363 finish:
3364 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3365 return (rval);
3366 }
3367
3368 static int
3369 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3370 {
3371 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3372 struct aac_Container *ct = (struct aac_Container *) \
3373 &softs->sync_ac.slotp->fibp->data[0];
3374
3375 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3376 ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3377 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3378 ddi_put32(acc, &ct->CTCommand.param[0], cid);
3379
3380 if (aac_sync_fib(softs, ContainerCommand,
3381 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3382 return (AACERR);
3383 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3384 return (AACERR);
3385
3386 *uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3387 return (AACOK);
3388 }
3389
3390 /*
3391 * Request information of the container cid
3392 */
3393 static struct aac_mntinforesp *
3394 aac_get_container_info(struct aac_softstate *softs, int cid)
3395 {
3396 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3397 struct aac_mntinforesp *mir;
3398 int rval_uid;
3399 uint32_t uid;
3400
3401 /* Get container UID first so that it will not overwrite mntinfo */
3402 rval_uid = aac_get_container_uid(softs, cid, &uid);
3403
3404 /* Get container basic info */
3405 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) {
3406 AACDB_PRINT(softs, CE_CONT,
3407 "query container %d info failed", cid);
3408 return (NULL);
3409 }
3410 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE)
3411 return (mir);
3412 if (rval_uid != AACOK) {
3413 AACDB_PRINT(softs, CE_CONT,
3414 "query container %d uid failed", cid);
3415 return (NULL);
3416 }
3417
3418 ddi_put32(acc, &mir->Status, uid);
3419 return (mir);
3420 }
3421
3422 static enum aac_cfg_event
3423 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3424 {
3425 enum aac_cfg_event event = AAC_CFG_NULL_NOEXIST;
3426 struct aac_container *dvp = &softs->containers[cid];
3427 struct aac_mntinforesp *mir;
3428 ddi_acc_handle_t acc;
3429
3430 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3431 acc = softs->sync_ac.slotp->fib_acc_handle;
3432
3433 /* Get container basic info */
3434 if ((mir = aac_get_container_info(softs, cid)) == NULL) {
3435 /* AAC_CFG_NULL_NOEXIST */
3436 goto finish;
3437 }
3438
3439 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3440 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3441 AACDB_PRINT(softs, CE_NOTE,
3442 ">>> Container %d deleted", cid);
3443 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3444 event = AAC_CFG_DELETE;
3445 }
3446 /* AAC_CFG_NULL_NOEXIST */
3447 } else {
3448 uint64_t size;
3449 uint32_t uid;
3450
3451 event = AAC_CFG_NULL_EXIST;
3452
3453 size = AAC_MIR_SIZE(softs, acc, mir);
3454 uid = ddi_get32(acc, &mir->Status);
3455 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3456 if (dvp->uid != uid) {
3457 AACDB_PRINT(softs, CE_WARN,
3458 ">>> Container %u uid changed to %d",
3459 cid, uid);
3460 dvp->uid = uid;
3461 event = AAC_CFG_CHANGE;
3462 }
3463 if (dvp->size != size) {
3464 AACDB_PRINT(softs, CE_NOTE,
3465 ">>> Container %u size changed to %"PRIu64,
3466 cid, size);
3467 dvp->size = size;
3468 event = AAC_CFG_CHANGE;
3469 }
3470 } else { /* Init new container */
3471 AACDB_PRINT(softs, CE_NOTE,
3472 ">>> Container %d added: " \
3473 "size=0x%x.%08x, type=%d, name=%s",
3474 cid,
3475 ddi_get32(acc, &mir->MntObj.CapacityHigh),
3476 ddi_get32(acc, &mir->MntObj.Capacity),
3477 ddi_get32(acc, &mir->MntObj.VolType),
3478 mir->MntObj.FileSystemName);
3479 dvp->dev.flags |= AAC_DFLAG_VALID;
3480 dvp->dev.type = AAC_DEV_LD;
3481
3482 dvp->cid = cid;
3483 dvp->uid = uid;
3484 dvp->size = size;
3485 dvp->locked = 0;
3486 dvp->deleted = 0;
3487
3488 event = AAC_CFG_ADD;
3489 }
3490 }
3491
3492 finish:
3493 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3494 return (event);
3495 }
3496
3497 /*
3498 * Do a rescan of all the possible containers and update the container list
3499 * with newly online/offline containers, and prepare for autoconfiguration.
3500 */
3501 static int
3502 aac_probe_containers(struct aac_softstate *softs)
3503 {
3504 int i, count, total;
3505
3506 /* Loop over possible containers */
3507 count = softs->container_count;
3508 if (aac_get_container_count(softs, &count) == AACERR)
3509 return (AACERR);
3510
3511 for (i = total = 0; i < count; i++) {
3512 enum aac_cfg_event event = aac_probe_container(softs, i);
3513 if ((event != AAC_CFG_NULL_NOEXIST) &&
3514 (event != AAC_CFG_NULL_EXIST)) {
3515 (void) aac_handle_dr(softs, i, -1, event);
3516 total++;
3517 }
3518 }
3519
3520 if (count < softs->container_count) {
3521 struct aac_container *dvp;
3522
3523 for (dvp = &softs->containers[count];
3524 dvp < &softs->containers[softs->container_count]; dvp++) {
3525 if (!AAC_DEV_IS_VALID(&dvp->dev))
3526 continue;
3527 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3528 dvp->cid);
3529 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3530 (void) aac_handle_dr(softs, dvp->cid, -1,
3531 AAC_CFG_DELETE);
3532 }
3533 }
3534
3535 softs->container_count = count;
3536 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3537 return (AACOK);
3538 }
3539
3540 static int
3541 aac_probe_jbod(struct aac_softstate *softs, int tgt, int event)
3542 {
3543 ASSERT(AAC_MAX_LD <= tgt);
3544 ASSERT(tgt < AAC_MAX_DEV(softs));
3545 struct aac_device *dvp;
3546 dvp = AAC_DEV(softs, tgt);
3547
3548 switch (event) {
3549 case AAC_CFG_ADD:
3550 AACDB_PRINT(softs, CE_NOTE,
3551 ">>> Jbod %d added", tgt - AAC_MAX_LD);
3552 dvp->flags |= AAC_DFLAG_VALID;
3553 dvp->type = AAC_DEV_PD;
3554 break;
3555 case AAC_CFG_DELETE:
3556 AACDB_PRINT(softs, CE_NOTE,
3557 ">>> Jbod %d deleted", tgt - AAC_MAX_LD);
3558 dvp->flags &= ~AAC_DFLAG_VALID;
3559 break;
3560 default:
3561 return (AACERR);
3562 }
3563 (void) aac_handle_dr(softs, tgt, 0, event);
3564 return (AACOK);
3565 }
3566
3567 static int
3568 aac_alloc_comm_space(struct aac_softstate *softs)
3569 {
3570 size_t rlen;
3571 ddi_dma_cookie_t cookie;
3572 uint_t cookien;
3573
3574 /* Allocate DMA for comm. space */
3575 if (ddi_dma_alloc_handle(
3576 softs->devinfo_p,
3577 &softs->addr_dma_attr,
3578 DDI_DMA_SLEEP,
3579 NULL,
3580 &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3581 AACDB_PRINT(softs, CE_WARN,
3582 "Cannot alloc dma handle for communication area");
3583 goto error;
3584 }
3585 if (ddi_dma_mem_alloc(
3586 softs->comm_space_dma_handle,
3587 sizeof (struct aac_comm_space),
3588 &softs->acc_attr,
3589 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3590 DDI_DMA_SLEEP,
3591 NULL,
3592 (caddr_t *)&softs->comm_space,
3593 &rlen,
3594 &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3595 AACDB_PRINT(softs, CE_WARN,
3596 "Cannot alloc mem for communication area");
3597 goto error;
3598 }
3599 if (ddi_dma_addr_bind_handle(
3600 softs->comm_space_dma_handle,
3601 NULL,
3602 (caddr_t)softs->comm_space,
3603 sizeof (struct aac_comm_space),
3604 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3605 DDI_DMA_SLEEP,
3606 NULL,
3607 &cookie,
3608 &cookien) != DDI_DMA_MAPPED) {
3609 AACDB_PRINT(softs, CE_WARN,
3610 "DMA bind failed for communication area");
3611 goto error;
3612 }
3613 softs->comm_space_phyaddr = cookie.dmac_address;
3614
3615 return (AACOK);
3616 error:
3617 if (softs->comm_space_acc_handle) {
3618 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3619 softs->comm_space_acc_handle = NULL;
3620 }
3621 if (softs->comm_space_dma_handle) {
3622 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3623 softs->comm_space_dma_handle = NULL;
3624 }
3625 return (AACERR);
3626 }
3627
3628 static void
3629 aac_free_comm_space(struct aac_softstate *softs)
3630 {
3631
3632 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3633 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3634 softs->comm_space_acc_handle = NULL;
3635 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3636 softs->comm_space_dma_handle = NULL;
3637 softs->comm_space_phyaddr = NULL;
3638 }
3639
3640 /*
3641 * Initialize the data structures that are required for the communication
3642 * interface to operate
3643 */
3644 static int
3645 aac_setup_comm_space(struct aac_softstate *softs)
3646 {
3647 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3648 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3649 uint32_t comm_space_phyaddr;
3650 struct aac_adapter_init *initp;
3651 int qoffset;
3652
3653 comm_space_phyaddr = softs->comm_space_phyaddr;
3654
3655 /* Setup adapter init struct */
3656 initp = &softs->comm_space->init_data;
3657 bzero(initp, sizeof (struct aac_adapter_init));
3658
3659 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3660 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3661
3662 /* Setup new/old comm. specific data */
3663 if (softs->flags & AAC_FLAGS_RAW_IO) {
3664 uint32_t init_flags = 0;
3665
3666 if (softs->flags & AAC_FLAGS_NEW_COMM)
3667 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED;
3668 /* AAC_SUPPORTED_POWER_MANAGEMENT */
3669 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM;
3670 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME;
3671
3672 ddi_put32(acc, &initp->InitStructRevision,
3673 AAC_INIT_STRUCT_REVISION_4);
3674 ddi_put32(acc, &initp->InitFlags, init_flags);
3675 /* Setup the preferred settings */
3676 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3677 ddi_put32(acc, &initp->MaxIoSize,
3678 (softs->aac_max_sectors << 9));
3679 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3680 } else {
3681 /*
3682 * Tells the adapter about the physical location of various
3683 * important shared data structures
3684 */
3685 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3686 comm_space_phyaddr + \
3687 offsetof(struct aac_comm_space, adapter_fibs));
3688 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3689 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3690 ddi_put32(acc, &initp->AdapterFibsSize,
3691 AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3692 ddi_put32(acc, &initp->PrintfBufferAddress,
3693 comm_space_phyaddr + \
3694 offsetof(struct aac_comm_space, adapter_print_buf));
3695 ddi_put32(acc, &initp->PrintfBufferSize,
3696 AAC_ADAPTER_PRINT_BUFSIZE);
3697 ddi_put32(acc, &initp->MiniPortRevision,
3698 AAC_INIT_STRUCT_MINIPORT_REVISION);
3699 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3700
3701 qoffset = (comm_space_phyaddr + \
3702 offsetof(struct aac_comm_space, qtable)) % \
3703 AAC_QUEUE_ALIGN;
3704 if (qoffset)
3705 qoffset = AAC_QUEUE_ALIGN - qoffset;
3706 softs->qtablep = (struct aac_queue_table *) \
3707 ((char *)&softs->comm_space->qtable + qoffset);
3708 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3709 offsetof(struct aac_comm_space, qtable) + qoffset);
3710
3711 /* Init queue table */
3712 ddi_put32(acc, &softs->qtablep-> \
3713 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3714 AAC_HOST_NORM_CMD_ENTRIES);
3715 ddi_put32(acc, &softs->qtablep-> \
3716 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3717 AAC_HOST_NORM_CMD_ENTRIES);
3718 ddi_put32(acc, &softs->qtablep-> \
3719 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3720 AAC_HOST_HIGH_CMD_ENTRIES);
3721 ddi_put32(acc, &softs->qtablep-> \
3722 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3723 AAC_HOST_HIGH_CMD_ENTRIES);
3724 ddi_put32(acc, &softs->qtablep-> \
3725 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3726 AAC_ADAP_NORM_CMD_ENTRIES);
3727 ddi_put32(acc, &softs->qtablep-> \
3728 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3729 AAC_ADAP_NORM_CMD_ENTRIES);
3730 ddi_put32(acc, &softs->qtablep-> \
3731 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3732 AAC_ADAP_HIGH_CMD_ENTRIES);
3733 ddi_put32(acc, &softs->qtablep-> \
3734 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3735 AAC_ADAP_HIGH_CMD_ENTRIES);
3736 ddi_put32(acc, &softs->qtablep-> \
3737 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3738 AAC_HOST_NORM_RESP_ENTRIES);
3739 ddi_put32(acc, &softs->qtablep-> \
3740 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3741 AAC_HOST_NORM_RESP_ENTRIES);
3742 ddi_put32(acc, &softs->qtablep-> \
3743 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3744 AAC_HOST_HIGH_RESP_ENTRIES);
3745 ddi_put32(acc, &softs->qtablep-> \
3746 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3747 AAC_HOST_HIGH_RESP_ENTRIES);
3748 ddi_put32(acc, &softs->qtablep-> \
3749 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3750 AAC_ADAP_NORM_RESP_ENTRIES);
3751 ddi_put32(acc, &softs->qtablep-> \
3752 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3753 AAC_ADAP_NORM_RESP_ENTRIES);
3754 ddi_put32(acc, &softs->qtablep-> \
3755 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3756 AAC_ADAP_HIGH_RESP_ENTRIES);
3757 ddi_put32(acc, &softs->qtablep-> \
3758 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3759 AAC_ADAP_HIGH_RESP_ENTRIES);
3760
3761 /* Init queue entries */
3762 softs->qentries[AAC_HOST_NORM_CMD_Q] =
3763 &softs->qtablep->qt_HostNormCmdQueue[0];
3764 softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3765 &softs->qtablep->qt_HostHighCmdQueue[0];
3766 softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3767 &softs->qtablep->qt_AdapNormCmdQueue[0];
3768 softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3769 &softs->qtablep->qt_AdapHighCmdQueue[0];
3770 softs->qentries[AAC_HOST_NORM_RESP_Q] =
3771 &softs->qtablep->qt_HostNormRespQueue[0];
3772 softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3773 &softs->qtablep->qt_HostHighRespQueue[0];
3774 softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3775 &softs->qtablep->qt_AdapNormRespQueue[0];
3776 softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3777 &softs->qtablep->qt_AdapHighRespQueue[0];
3778 }
3779 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3780
3781 /* Send init structure to the card */
3782 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3783 comm_space_phyaddr + \
3784 offsetof(struct aac_comm_space, init_data),
3785 0, 0, 0, NULL) == AACERR) {
3786 AACDB_PRINT(softs, CE_WARN,
3787 "Cannot send init structure to adapter");
3788 return (AACERR);
3789 }
3790
3791 return (AACOK);
3792 }
3793
3794 static uchar_t *
3795 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3796 {
3797 (void) memset(buf, ' ', AAC_VENDOR_LEN);
3798 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3799 return (buf + AAC_VENDOR_LEN);
3800 }
3801
3802 static uchar_t *
3803 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3804 {
3805 (void) memset(buf, ' ', AAC_PRODUCT_LEN);
3806 bcopy(softs->product_name, buf, strlen(softs->product_name));
3807 return (buf + AAC_PRODUCT_LEN);
3808 }
3809
3810 /*
3811 * Construct unit serial number from container uid
3812 */
3813 static uchar_t *
3814 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3815 {
3816 int i, d;
3817 uint32_t uid;
3818
3819 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3820
3821 uid = softs->containers[tgt].uid;
3822 for (i = 7; i >= 0; i--) {
3823 d = uid & 0xf;
3824 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3825 uid >>= 4;
3826 }
3827 return (buf + 8);
3828 }
3829
3830 /*
3831 * SPC-3 7.5 INQUIRY command implementation
3832 */
3833 static void
3834 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3835 union scsi_cdb *cdbp, struct buf *bp)
3836 {
3837 int tgt = pkt->pkt_address.a_target;
3838 char *b_addr = NULL;
3839 uchar_t page = cdbp->cdb_opaque[2];
3840
3841 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3842 /* Command Support Data is not supported */
3843 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3844 return;
3845 }
3846
3847 if (bp && bp->b_un.b_addr && bp->b_bcount) {
3848 if (bp->b_flags & (B_PHYS | B_PAGEIO))
3849 bp_mapin(bp);
3850 b_addr = bp->b_un.b_addr;
3851 }
3852
3853 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3854 uchar_t *vpdp = (uchar_t *)b_addr;
3855 uchar_t *idp, *sp;
3856
3857 /* SPC-3 8.4 Vital product data parameters */
3858 switch (page) {
3859 case 0x00:
3860 /* Supported VPD pages */
3861 if (vpdp == NULL ||
3862 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3863 return;
3864 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3865 vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3866 vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3867
3868 vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3869 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3870 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3871
3872 pkt->pkt_state |= STATE_XFERRED_DATA;
3873 break;
3874
3875 case 0x80:
3876 /* Unit serial number page */
3877 if (vpdp == NULL ||
3878 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3879 return;
3880 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3881 vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3882 vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3883
3884 sp = &vpdp[AAC_VPD_PAGE_DATA];
3885 (void) aac_lun_serialno(softs, tgt, sp);
3886
3887 pkt->pkt_state |= STATE_XFERRED_DATA;
3888 break;
3889
3890 case 0x83:
3891 /* Device identification page */
3892 if (vpdp == NULL ||
3893 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3894 return;
3895 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3896 vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3897
3898 idp = &vpdp[AAC_VPD_PAGE_DATA];
3899 bzero(idp, AAC_VPD_ID_LENGTH);
3900 idp[AAC_VPD_ID_CODESET] = 0x02;
3901 idp[AAC_VPD_ID_TYPE] = 0x01;
3902
3903 /*
3904 * SPC-3 Table 111 - Identifier type
3905 * One recommanded method of constructing the remainder
3906 * of identifier field is to concatenate the product
3907 * identification field from the standard INQUIRY data
3908 * field and the product serial number field from the
3909 * unit serial number page.
3910 */
3911 sp = &idp[AAC_VPD_ID_DATA];
3912 sp = aac_vendor_id(softs, sp);
3913 sp = aac_product_id(softs, sp);
3914 sp = aac_lun_serialno(softs, tgt, sp);
3915 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3916 (uintptr_t)&idp[AAC_VPD_ID_DATA];
3917
3918 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3919 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3920 pkt->pkt_state |= STATE_XFERRED_DATA;
3921 break;
3922
3923 default:
3924 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3925 0x24, 0x00, 0);
3926 break;
3927 }
3928 } else {
3929 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3930 size_t len = sizeof (struct scsi_inquiry);
3931
3932 if (page != 0) {
3933 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3934 0x24, 0x00, 0);
3935 return;
3936 }
3937 if (inqp == NULL || bp->b_bcount < len)
3938 return;
3939
3940 bzero(inqp, len);
3941 inqp->inq_len = AAC_ADDITIONAL_LEN;
3942 inqp->inq_ansi = AAC_ANSI_VER;
3943 inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3944 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3945 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3946 bcopy("V1.0", inqp->inq_revision, 4);
3947 inqp->inq_cmdque = 1; /* enable tagged-queuing */
3948 /*
3949 * For "sd-max-xfer-size" property which may impact performance
3950 * when IO threads increase.
3951 */
3952 inqp->inq_wbus32 = 1;
3953
3954 pkt->pkt_state |= STATE_XFERRED_DATA;
3955 }
3956 }
3957
3958 /*
3959 * SPC-3 7.10 MODE SENSE command implementation
3960 */
3961 static void
3962 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3963 union scsi_cdb *cdbp, struct buf *bp, int capacity)
3964 {
3965 uchar_t pagecode;
3966 struct mode_header *headerp;
3967 struct mode_header_g1 *g1_headerp;
3968 unsigned int ncyl;
3969 caddr_t sense_data;
3970 caddr_t next_page;
3971 size_t sdata_size;
3972 size_t pages_size;
3973 int unsupport_page = 0;
3974
3975 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
3976 cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
3977
3978 if (!(bp && bp->b_un.b_addr && bp->b_bcount))
3979 return;
3980
3981 if (bp->b_flags & (B_PHYS | B_PAGEIO))
3982 bp_mapin(bp);
3983 pkt->pkt_state |= STATE_XFERRED_DATA;
3984 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
3985
3986 /* calculate the size of needed buffer */
3987 if (cdbp->scc_cmd == SCMD_MODE_SENSE)
3988 sdata_size = MODE_HEADER_LENGTH;
3989 else /* must be SCMD_MODE_SENSE_G1 */
3990 sdata_size = MODE_HEADER_LENGTH_G1;
3991
3992 pages_size = 0;
3993 switch (pagecode) {
3994 case SD_MODE_SENSE_PAGE3_CODE:
3995 pages_size += sizeof (struct mode_format);
3996 break;
3997
3998 case SD_MODE_SENSE_PAGE4_CODE:
3999 pages_size += sizeof (struct mode_geometry);
4000 break;
4001
4002 case MODEPAGE_CTRL_MODE:
4003 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4004 pages_size += sizeof (struct mode_control_scsi3);
4005 } else {
4006 unsupport_page = 1;
4007 }
4008 break;
4009
4010 case MODEPAGE_ALLPAGES:
4011 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4012 pages_size += sizeof (struct mode_format) +
4013 sizeof (struct mode_geometry) +
4014 sizeof (struct mode_control_scsi3);
4015 } else {
4016 pages_size += sizeof (struct mode_format) +
4017 sizeof (struct mode_geometry);
4018 }
4019 break;
4020
4021 default:
4022 /* unsupported pages */
4023 unsupport_page = 1;
4024 }
4025
4026 /* allocate buffer to fill the send data */
4027 sdata_size += pages_size;
4028 sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
4029
4030 if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
4031 headerp = (struct mode_header *)sense_data;
4032 headerp->length = MODE_HEADER_LENGTH + pages_size -
4033 sizeof (headerp->length);
4034 headerp->bdesc_length = 0;
4035 next_page = sense_data + sizeof (struct mode_header);
4036 } else {
4037 g1_headerp = (void *)sense_data;
4038 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
4039 sizeof (g1_headerp->length));
4040 g1_headerp->bdesc_length = 0;
4041 next_page = sense_data + sizeof (struct mode_header_g1);
4042 }
4043
4044 if (unsupport_page)
4045 goto finish;
4046
4047 if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
4048 pagecode == MODEPAGE_ALLPAGES) {
4049 /* SBC-3 7.1.3.3 Format device page */
4050 struct mode_format *page3p;
4051
4052 page3p = (void *)next_page;
4053 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
4054 page3p->mode_page.length = sizeof (struct mode_format);
4055 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
4056 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
4057
4058 next_page += sizeof (struct mode_format);
4059 }
4060
4061 if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
4062 pagecode == MODEPAGE_ALLPAGES) {
4063 /* SBC-3 7.1.3.8 Rigid disk device geometry page */
4064 struct mode_geometry *page4p;
4065
4066 page4p = (void *)next_page;
4067 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
4068 page4p->mode_page.length = sizeof (struct mode_geometry);
4069 page4p->heads = AAC_NUMBER_OF_HEADS;
4070 page4p->rpm = BE_16(AAC_ROTATION_SPEED);
4071 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
4072 page4p->cyl_lb = ncyl & 0xff;
4073 page4p->cyl_mb = (ncyl >> 8) & 0xff;
4074 page4p->cyl_ub = (ncyl >> 16) & 0xff;
4075
4076 next_page += sizeof (struct mode_geometry);
4077 }
4078
4079 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
4080 softs->flags & AAC_FLAGS_LBA_64BIT) {
4081 /* 64-bit LBA need large sense data */
4082 struct mode_control_scsi3 *mctl;
4083
4084 mctl = (void *)next_page;
4085 mctl->mode_page.code = MODEPAGE_CTRL_MODE;
4086 mctl->mode_page.length =
4087 sizeof (struct mode_control_scsi3) -
4088 sizeof (struct mode_page);
4089 mctl->d_sense = 1;
4090 }
4091
4092 finish:
4093 /* copyout the valid data. */
4094 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
4095 kmem_free(sense_data, sdata_size);
4096 }
4097
4098 static int
4099 aac_name_node(dev_info_t *dip, char *name, int len)
4100 {
4101 int tgt, lun;
4102
4103 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4104 DDI_PROP_DONTPASS, "target", -1);
4105 if (tgt == -1)
4106 return (DDI_FAILURE);
4107 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4108 DDI_PROP_DONTPASS, "lun", -1);
4109 if (lun == -1)
4110 return (DDI_FAILURE);
4111
4112 (void) snprintf(name, len, "%x,%x", tgt, lun);
4113 return (DDI_SUCCESS);
4114 }
4115
4116 /*ARGSUSED*/
4117 static int
4118 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4119 scsi_hba_tran_t *tran, struct scsi_device *sd)
4120 {
4121 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
4122 #ifdef DEBUG
4123 int ctl = ddi_get_instance(softs->devinfo_p);
4124 #endif
4125 uint16_t tgt = sd->sd_address.a_target;
4126 uint8_t lun = sd->sd_address.a_lun;
4127 struct aac_device *dvp;
4128
4129 DBCALLED(softs, 2);
4130
4131 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
4132 /*
4133 * If no persistent node exist, we don't allow .conf node
4134 * to be created.
4135 */
4136 if (aac_find_child(softs, tgt, lun) != NULL) {
4137 if (ndi_merge_node(tgt_dip, aac_name_node) !=
4138 DDI_SUCCESS)
4139 /* Create this .conf node */
4140 return (DDI_SUCCESS);
4141 }
4142 return (DDI_FAILURE);
4143 }
4144
4145 /*
4146 * Only support container/phys. device that has been
4147 * detected and valid
4148 */
4149 mutex_enter(&softs->io_lock);
4150 if (tgt >= AAC_MAX_DEV(softs)) {
4151 AACDB_PRINT_TRAN(softs,
4152 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
4153 mutex_exit(&softs->io_lock);
4154 return (DDI_FAILURE);
4155 }
4156
4157 if (tgt < AAC_MAX_LD) {
4158 dvp = (struct aac_device *)&softs->containers[tgt];
4159 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
4160 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
4161 ctl, tgt, lun);
4162 mutex_exit(&softs->io_lock);
4163 return (DDI_FAILURE);
4164 }
4165 /*
4166 * Save the tgt_dip for the given target if one doesn't exist
4167 * already. Dip's for non-existance tgt's will be cleared in
4168 * tgt_free.
4169 */
4170 if (softs->containers[tgt].dev.dip == NULL &&
4171 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4172 softs->containers[tgt].dev.dip = tgt_dip;
4173 } else {
4174 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
4175 /*
4176 * Save the tgt_dip for the given target if one doesn't exist
4177 * already. Dip's for non-existance tgt's will be cleared in
4178 * tgt_free.
4179 */
4180
4181 if (softs->nondasds[AAC_PD(tgt)].dev.dip == NULL &&
4182 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4183 softs->nondasds[AAC_PD(tgt)].dev.dip = tgt_dip;
4184 }
4185
4186 if (softs->flags & AAC_FLAGS_BRKUP) {
4187 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
4188 "buf_break", 1) != DDI_PROP_SUCCESS) {
4189 cmn_err(CE_CONT, "unable to create "
4190 "property for t%dL%d (buf_break)", tgt, lun);
4191 }
4192 }
4193
4194 AACDB_PRINT(softs, CE_NOTE,
4195 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
4196 (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
4197 mutex_exit(&softs->io_lock);
4198 return (DDI_SUCCESS);
4199 }
4200
4201 static void
4202 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4203 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
4204 {
4205 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
4206
4207 struct aac_softstate *softs = SD2AAC(sd);
4208 int tgt = sd->sd_address.a_target;
4209
4210 mutex_enter(&softs->io_lock);
4211 if (tgt < AAC_MAX_LD) {
4212 if (softs->containers[tgt].dev.dip == tgt_dip)
4213 softs->containers[tgt].dev.dip = NULL;
4214 } else {
4215 if (softs->nondasds[AAC_PD(tgt)].dev.dip == tgt_dip)
4216 softs->nondasds[AAC_PD(tgt)].dev.dip = NULL;
4217 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
4218 }
4219 mutex_exit(&softs->io_lock);
4220 }
4221
4222 /*
4223 * Check if the firmware is Up And Running. If it is in the Kernel Panic
4224 * state, (BlinkLED code + 1) is returned.
4225 * 0 -- firmware up and running
4226 * -1 -- firmware dead
4227 * >0 -- firmware kernel panic
4228 */
4229 static int
4230 aac_check_adapter_health(struct aac_softstate *softs)
4231 {
4232 int rval;
4233
4234 rval = PCI_MEM_GET32(softs, AAC_OMR0);
4235
4236 if (rval & AAC_KERNEL_UP_AND_RUNNING) {
4237 rval = 0;
4238 } else if (rval & AAC_KERNEL_PANIC) {
4239 cmn_err(CE_WARN, "firmware panic");
4240 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
4241 } else {
4242 cmn_err(CE_WARN, "firmware dead");
4243 rval = -1;
4244 }
4245 return (rval);
4246 }
4247
4248 static void
4249 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
4250 uchar_t reason)
4251 {
4252 acp->flags |= AAC_CMD_ABORT;
4253
4254 if (acp->pkt) {
4255 if (acp->slotp) { /* outstanding cmd */
4256 acp->pkt->pkt_state |= STATE_GOT_STATUS;
4257 }
4258
4259 switch (reason) {
4260 case CMD_TIMEOUT:
4261 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
4262 acp);
4263 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
4264 STAT_TIMEOUT | STAT_BUS_RESET);
4265 break;
4266 case CMD_RESET:
4267 /* aac support only RESET_ALL */
4268 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
4269 aac_set_pkt_reason(softs, acp, CMD_RESET,
4270 STAT_BUS_RESET);
4271 break;
4272 case CMD_ABORTED:
4273 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4274 acp);
4275 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4276 STAT_ABORTED);
4277 break;
4278 }
4279 }
4280 aac_end_io(softs, acp);
4281 }
4282
4283 /*
4284 * Abort all the pending commands of type iocmd or just the command pkt
4285 * corresponding to pkt
4286 */
4287 static void
4288 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4289 int reason)
4290 {
4291 struct aac_cmd *ac_arg, *acp;
4292 int i;
4293
4294 if (pkt == NULL) {
4295 ac_arg = NULL;
4296 } else {
4297 ac_arg = PKT2AC(pkt);
4298 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4299 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4300 }
4301
4302 /*
4303 * a) outstanding commands on the controller
4304 * Note: should abort outstanding commands only after one
4305 * IOP reset has been done.
4306 */
4307 if (iocmd & AAC_IOCMD_OUTSTANDING) {
4308 struct aac_cmd *acp;
4309
4310 for (i = 0; i < AAC_MAX_LD; i++) {
4311 if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4312 softs->containers[i].reset = 1;
4313 }
4314 while ((acp = softs->q_busy.q_head) != NULL)
4315 aac_abort_iocmd(softs, acp, reason);
4316 }
4317
4318 /* b) commands in the waiting queues */
4319 for (i = 0; i < AAC_CMDQ_NUM; i++) {
4320 if (iocmd & (1 << i)) {
4321 if (ac_arg) {
4322 aac_abort_iocmd(softs, ac_arg, reason);
4323 } else {
4324 while ((acp = softs->q_wait[i].q_head) != NULL)
4325 aac_abort_iocmd(softs, acp, reason);
4326 }
4327 }
4328 }
4329 }
4330
4331 /*
4332 * The draining thread is shared among quiesce threads. It terminates
4333 * when the adapter is quiesced or stopped by aac_stop_drain().
4334 */
4335 static void
4336 aac_check_drain(void *arg)
4337 {
4338 struct aac_softstate *softs = arg;
4339
4340 mutex_enter(&softs->io_lock);
4341 if (softs->ndrains) {
4342 softs->drain_timeid = 0;
4343 /*
4344 * If both ASYNC and SYNC bus throttle are held,
4345 * wake up threads only when both are drained out.
4346 */
4347 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4348 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4349 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4350 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4351 cv_broadcast(&softs->drain_cv);
4352 else
4353 softs->drain_timeid = timeout(aac_check_drain, softs,
4354 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4355 }
4356 mutex_exit(&softs->io_lock);
4357 }
4358
4359 /*
4360 * If not draining the outstanding cmds, drain them. Otherwise,
4361 * only update ndrains.
4362 */
4363 static void
4364 aac_start_drain(struct aac_softstate *softs)
4365 {
4366 if (softs->ndrains == 0) {
4367 ASSERT(softs->drain_timeid == 0);
4368 softs->drain_timeid = timeout(aac_check_drain, softs,
4369 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4370 }
4371 softs->ndrains++;
4372 }
4373
4374 /*
4375 * Stop the draining thread when no other threads use it any longer.
4376 * Side effect: io_lock may be released in the middle.
4377 */
4378 static void
4379 aac_stop_drain(struct aac_softstate *softs)
4380 {
4381 softs->ndrains--;
4382 if (softs->ndrains == 0) {
4383 if (softs->drain_timeid != 0) {
4384 timeout_id_t tid = softs->drain_timeid;
4385
4386 softs->drain_timeid = 0;
4387 mutex_exit(&softs->io_lock);
4388 (void) untimeout(tid);
4389 mutex_enter(&softs->io_lock);
4390 }
4391 }
4392 }
4393
4394 /*
4395 * The following function comes from Adaptec:
4396 *
4397 * Once do an IOP reset, basically the driver have to re-initialize the card
4398 * as if up from a cold boot, and the driver is responsible for any IO that
4399 * is outstanding to the adapter at the time of the IOP RESET. And prepare
4400 * for IOP RESET by making the init code modular with the ability to call it
4401 * from multiple places.
4402 */
4403 static int
4404 aac_reset_adapter(struct aac_softstate *softs)
4405 {
4406 int health;
4407 uint32_t status;
4408 int rval = AAC_IOP_RESET_FAILED;
4409
4410 DBCALLED(softs, 1);
4411
4412 ASSERT(softs->state & AAC_STATE_RESET);
4413
4414 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4415 /* Disable interrupt */
4416 AAC_DISABLE_INTR(softs);
4417
4418 health = aac_check_adapter_health(softs);
4419 if (health == -1) {
4420 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4421 goto finish;
4422 }
4423 if (health == 0) /* flush drives if possible */
4424 (void) aac_shutdown(softs);
4425
4426 /* Execute IOP reset */
4427 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4428 &status)) != AACOK) {
4429 ddi_acc_handle_t acc;
4430 struct aac_fib *fibp;
4431 struct aac_pause_command *pc;
4432
4433 if ((status & 0xf) == 0xf) {
4434 uint32_t wait_count;
4435
4436 /*
4437 * Sunrise Lake has dual cores and we must drag the
4438 * other core with us to reset simultaneously. There
4439 * are 2 bits in the Inbound Reset Control and Status
4440 * Register (offset 0x38) of the Sunrise Lake to reset
4441 * the chip without clearing out the PCI configuration
4442 * info (COMMAND & BARS).
4443 */
4444 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4445
4446 /*
4447 * We need to wait for 5 seconds before accessing the MU
4448 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4449 */
4450 wait_count = 5 * 10000;
4451 while (wait_count) {
4452 drv_usecwait(100); /* delay 100 microseconds */
4453 wait_count--;
4454 }
4455 } else {
4456 if (status == SRB_STATUS_INVALID_REQUEST)
4457 cmn_err(CE_WARN, "!IOP_RESET not supported");
4458 else /* probably timeout */
4459 cmn_err(CE_WARN, "!IOP_RESET failed");
4460
4461 /* Unwind aac_shutdown() */
4462 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
4463 acc = softs->sync_ac.slotp->fib_acc_handle;
4464
4465 fibp = softs->sync_ac.slotp->fibp;
4466 pc = (struct aac_pause_command *)&fibp->data[0];
4467
4468 bzero(pc, sizeof (*pc));
4469 ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4470 ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4471 ddi_put32(acc, &pc->Timeout, 1);
4472 ddi_put32(acc, &pc->Min, 1);
4473 ddi_put32(acc, &pc->NoRescan, 1);
4474
4475 (void) aac_sync_fib(softs, ContainerCommand,
4476 AAC_FIB_SIZEOF(struct aac_pause_command));
4477 aac_sync_fib_slot_release(softs, &softs->sync_ac);
4478
4479 if (aac_check_adapter_health(softs) != 0)
4480 ddi_fm_service_impact(softs->devinfo_p,
4481 DDI_SERVICE_LOST);
4482 else
4483 /*
4484 * IOP reset not supported or IOP not reseted
4485 */
4486 rval = AAC_IOP_RESET_ABNORMAL;
4487 goto finish;
4488 }
4489 }
4490
4491 /*
4492 * Re-read and renegotiate the FIB parameters, as one of the actions
4493 * that can result from an IOP reset is the running of a new firmware
4494 * image.
4495 */
4496 if (aac_common_attach(softs) != AACOK)
4497 goto finish;
4498
4499 rval = AAC_IOP_RESET_SUCCEED;
4500
4501 finish:
4502 AAC_ENABLE_INTR(softs);
4503 return (rval);
4504 }
4505
4506 static void
4507 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4508 int throttle)
4509 {
4510 /*
4511 * If the bus is draining/quiesced, no changes to the throttles
4512 * are allowed. All throttles should have been set to 0.
4513 */
4514 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4515 return;
4516 dvp->throttle[q] = throttle;
4517 }
4518
4519 static void
4520 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4521 {
4522 int i, q;
4523
4524 /* Hold bus by holding every device on the bus */
4525 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4526 if (iocmds & (1 << q)) {
4527 softs->bus_throttle[q] = 0;
4528 for (i = 0; i < AAC_MAX_LD; i++)
4529 aac_set_throttle(softs,
4530 &softs->containers[i].dev, q, 0);
4531 for (i = 0; i < AAC_MAX_PD(softs); i++)
4532 aac_set_throttle(softs,
4533 &softs->nondasds[i].dev, q, 0);
4534 }
4535 }
4536 }
4537
4538 static void
4539 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4540 {
4541 int i, q, max_throttle;
4542
4543 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4544 if (iocmds & (1 << q)) {
4545 /*
4546 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4547 * quiesced or being drained by possibly some quiesce
4548 * threads.
4549 */
4550 if (q == AAC_CMDQ_ASYNC && ((softs->state &
4551 AAC_STATE_QUIESCED) || softs->ndrains))
4552 continue;
4553 if (q == AAC_CMDQ_ASYNC)
4554 max_throttle = softs->total_slots -
4555 AAC_MGT_SLOT_NUM;
4556 else
4557 max_throttle = softs->total_slots - 1;
4558 softs->bus_throttle[q] = max_throttle;
4559 for (i = 0; i < AAC_MAX_LD; i++)
4560 aac_set_throttle(softs,
4561 &softs->containers[i].dev,
4562 q, max_throttle);
4563 for (i = 0; i < AAC_MAX_PD(softs); i++)
4564 aac_set_throttle(softs, &softs->nondasds[i].dev,
4565 q, max_throttle);
4566 }
4567 }
4568 }
4569
4570 static int
4571 aac_do_reset(struct aac_softstate *softs)
4572 {
4573 int health;
4574 int rval;
4575
4576 softs->state |= AAC_STATE_RESET;
4577 health = aac_check_adapter_health(softs);
4578
4579 /*
4580 * Hold off new io commands and wait all outstanding io
4581 * commands to complete.
4582 */
4583 if (health == 0) {
4584 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4585 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4586
4587 if (sync_cmds == 0 && async_cmds == 0) {
4588 rval = AAC_IOP_RESET_SUCCEED;
4589 goto finish;
4590 }
4591 /*
4592 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4593 * to complete the outstanding io commands
4594 */
4595 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4596 int (*intr_handler)(struct aac_softstate *);
4597
4598 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4599 /*
4600 * Poll the adapter by ourselves in case interrupt is disabled
4601 * and to avoid releasing the io_lock.
4602 */
4603 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4604 aac_process_intr_new : aac_process_intr_old;
4605 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4606 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4607 drv_usecwait(100);
4608 (void) intr_handler(softs);
4609 timeout--;
4610 }
4611 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4612
4613 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4614 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4615 /* Cmds drained out */
4616 rval = AAC_IOP_RESET_SUCCEED;
4617 goto finish;
4618 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4619 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4620 /* Cmds not drained out, adapter overloaded */
4621 rval = AAC_IOP_RESET_ABNORMAL;
4622 goto finish;
4623 }
4624 }
4625
4626 /*
4627 * If a longer waiting time still can't drain any outstanding io
4628 * commands, do IOP reset.
4629 */
4630 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4631 softs->state |= AAC_STATE_DEAD;
4632
4633 finish:
4634 softs->state &= ~AAC_STATE_RESET;
4635 return (rval);
4636 }
4637
4638 static int
4639 aac_tran_reset(struct scsi_address *ap, int level)
4640 {
4641 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4642 int rval;
4643
4644 DBCALLED(softs, 1);
4645
4646 if (level != RESET_ALL) {
4647 cmn_err(CE_NOTE, "!reset target/lun not supported");
4648 return (0);
4649 }
4650
4651 mutex_enter(&softs->io_lock);
4652 switch (rval = aac_do_reset(softs)) {
4653 case AAC_IOP_RESET_SUCCEED:
4654 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4655 NULL, CMD_RESET);
4656 aac_start_waiting_io(softs);
4657 break;
4658 case AAC_IOP_RESET_FAILED:
4659 /* Abort IOCTL cmds when adapter is dead */
4660 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4661 break;
4662 case AAC_IOP_RESET_ABNORMAL:
4663 aac_start_waiting_io(softs);
4664 }
4665 mutex_exit(&softs->io_lock);
4666
4667 aac_drain_comp_q(softs);
4668 return (rval == 0);
4669 }
4670
4671 static int
4672 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4673 {
4674 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4675
4676 DBCALLED(softs, 1);
4677
4678 mutex_enter(&softs->io_lock);
4679 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4680 mutex_exit(&softs->io_lock);
4681
4682 aac_drain_comp_q(softs);
4683 return (1);
4684 }
4685
4686 void
4687 aac_free_dmamap(struct aac_cmd *acp)
4688 {
4689 /* Free dma mapping */
4690 if (acp->flags & AAC_CMD_DMA_VALID) {
4691 ASSERT(acp->buf_dma_handle);
4692 (void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4693 acp->flags &= ~AAC_CMD_DMA_VALID;
4694 }
4695
4696 if (acp->abp != NULL) { /* free non-aligned buf DMA */
4697 ASSERT(acp->buf_dma_handle);
4698 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4699 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4700 (uint8_t *)acp->abp, acp->bp->b_bcount,
4701 DDI_DEV_AUTOINCR);
4702 ddi_dma_mem_free(&acp->abh);
4703 acp->abp = NULL;
4704 }
4705
4706 if (acp->buf_dma_handle) {
4707 ddi_dma_free_handle(&acp->buf_dma_handle);
4708 acp->buf_dma_handle = NULL;
4709 }
4710 }
4711
4712 static void
4713 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4714 {
4715 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4716 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4717 aac_free_dmamap(acp);
4718 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4719 aac_soft_callback(softs, acp);
4720 }
4721
4722 /*
4723 * Handle command to logical device
4724 */
4725 static int
4726 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4727 {
4728 struct aac_container *dvp;
4729 struct scsi_pkt *pkt;
4730 union scsi_cdb *cdbp;
4731 struct buf *bp;
4732 int rval;
4733
4734 dvp = (struct aac_container *)acp->dvp;
4735 pkt = acp->pkt;
4736 cdbp = (void *)pkt->pkt_cdbp;
4737 bp = acp->bp;
4738
4739 switch (cdbp->scc_cmd) {
4740 case SCMD_INQUIRY: /* inquiry */
4741 aac_free_dmamap(acp);
4742 aac_inquiry(softs, pkt, cdbp, bp);
4743 aac_soft_callback(softs, acp);
4744 rval = TRAN_ACCEPT;
4745 break;
4746
4747 case SCMD_READ_CAPACITY: /* read capacity */
4748 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4749 struct scsi_capacity cap;
4750 uint64_t last_lba;
4751
4752 /* check 64-bit LBA */
4753 last_lba = dvp->size - 1;
4754 if (last_lba > 0xffffffffull) {
4755 cap.capacity = 0xfffffffful;
4756 } else {
4757 cap.capacity = BE_32(last_lba);
4758 }
4759 cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4760
4761 aac_free_dmamap(acp);
4762 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4763 bp_mapin(bp);
4764 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4765 pkt->pkt_state |= STATE_XFERRED_DATA;
4766 }
4767 aac_soft_callback(softs, acp);
4768 rval = TRAN_ACCEPT;
4769 break;
4770
4771 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4772 /* Check if containers need 64-bit LBA support */
4773 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4774 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4775 struct scsi_capacity_16 cap16;
4776 int cap_len = sizeof (struct scsi_capacity_16);
4777
4778 bzero(&cap16, cap_len);
4779 cap16.sc_capacity = BE_64(dvp->size - 1);
4780 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4781
4782 aac_free_dmamap(acp);
4783 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4784 bp_mapin(bp);
4785 bcopy(&cap16, bp->b_un.b_addr,
4786 min(bp->b_bcount, cap_len));
4787 pkt->pkt_state |= STATE_XFERRED_DATA;
4788 }
4789 aac_soft_callback(softs, acp);
4790 } else {
4791 aac_unknown_scmd(softs, acp);
4792 }
4793 rval = TRAN_ACCEPT;
4794 break;
4795
4796 case SCMD_READ_G4: /* read_16 */
4797 case SCMD_WRITE_G4: /* write_16 */
4798 if (softs->flags & AAC_FLAGS_RAW_IO) {
4799 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4800 acp->blkno = ((uint64_t) \
4801 GETG4ADDR(cdbp) << 32) | \
4802 (uint32_t)GETG4ADDRTL(cdbp);
4803 goto do_io;
4804 }
4805 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4806 aac_unknown_scmd(softs, acp);
4807 rval = TRAN_ACCEPT;
4808 break;
4809
4810 case SCMD_READ: /* read_6 */
4811 case SCMD_WRITE: /* write_6 */
4812 acp->blkno = GETG0ADDR(cdbp);
4813 goto do_io;
4814
4815 case SCMD_READ_G5: /* read_12 */
4816 case SCMD_WRITE_G5: /* write_12 */
4817 acp->blkno = GETG5ADDR(cdbp);
4818 goto do_io;
4819
4820 case SCMD_READ_G1: /* read_10 */
4821 case SCMD_WRITE_G1: /* write_10 */
4822 acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4823 do_io:
4824 if (acp->flags & AAC_CMD_DMA_VALID) {
4825 uint64_t cnt_size = dvp->size;
4826
4827 /*
4828 * If LBA > array size AND rawio, the
4829 * adapter may hang. So check it before
4830 * sending.
4831 * NOTE: (blkno + blkcnt) may overflow
4832 */
4833 if ((acp->blkno < cnt_size) &&
4834 ((acp->blkno + acp->bcount /
4835 AAC_BLK_SIZE) <= cnt_size)) {
4836 rval = aac_do_io(softs, acp);
4837 } else {
4838 /*
4839 * Request exceeds the capacity of disk,
4840 * set error block number to last LBA
4841 * + 1.
4842 */
4843 aac_set_arq_data(pkt,
4844 KEY_ILLEGAL_REQUEST, 0x21,
4845 0x00, cnt_size);
4846 aac_soft_callback(softs, acp);
4847 rval = TRAN_ACCEPT;
4848 }
4849 } else if (acp->bcount == 0) {
4850 /* For 0 length IO, just return ok */
4851 aac_soft_callback(softs, acp);
4852 rval = TRAN_ACCEPT;
4853 } else {
4854 rval = TRAN_BADPKT;
4855 }
4856 break;
4857
4858 case SCMD_MODE_SENSE: /* mode_sense_6 */
4859 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4860 int capacity;
4861
4862 aac_free_dmamap(acp);
4863 if (dvp->size > 0xffffffffull)
4864 capacity = 0xfffffffful; /* 64-bit LBA */
4865 else
4866 capacity = dvp->size;
4867 aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4868 aac_soft_callback(softs, acp);
4869 rval = TRAN_ACCEPT;
4870 break;
4871 }
4872
4873 case SCMD_START_STOP:
4874 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
4875 acp->aac_cmd_fib = aac_cmd_fib_startstop;
4876 acp->ac_comp = aac_startstop_complete;
4877 rval = aac_do_io(softs, acp);
4878 break;
4879 }
4880 /* FALLTHRU */
4881 case SCMD_TEST_UNIT_READY:
4882 case SCMD_REQUEST_SENSE:
4883 case SCMD_FORMAT:
4884 aac_free_dmamap(acp);
4885 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4886 if (acp->flags & AAC_CMD_BUF_READ) {
4887 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4888 bp_mapin(bp);
4889 bzero(bp->b_un.b_addr, bp->b_bcount);
4890 }
4891 pkt->pkt_state |= STATE_XFERRED_DATA;
4892 }
4893 aac_soft_callback(softs, acp);
4894 rval = TRAN_ACCEPT;
4895 break;
4896
4897 case SCMD_SYNCHRONIZE_CACHE:
4898 acp->flags |= AAC_CMD_NTAG;
4899 acp->aac_cmd_fib = aac_cmd_fib_sync;
4900 acp->ac_comp = aac_synccache_complete;
4901 rval = aac_do_io(softs, acp);
4902 break;
4903
4904 case SCMD_DOORLOCK:
4905 aac_free_dmamap(acp);
4906 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4907 aac_soft_callback(softs, acp);
4908 rval = TRAN_ACCEPT;
4909 break;
4910
4911 default: /* unknown command */
4912 aac_unknown_scmd(softs, acp);
4913 rval = TRAN_ACCEPT;
4914 break;
4915 }
4916
4917 return (rval);
4918 }
4919
4920 static int
4921 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4922 {
4923 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4924 struct aac_cmd *acp = PKT2AC(pkt);
4925 struct aac_device *dvp = acp->dvp;
4926 int rval;
4927
4928 DBCALLED(softs, 2);
4929
4930 /*
4931 * Reinitialize some fields of ac and pkt; the packet may
4932 * have been resubmitted
4933 */
4934 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4935 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4936 acp->timeout = acp->pkt->pkt_time;
4937 if (pkt->pkt_flags & FLAG_NOINTR)
4938 acp->flags |= AAC_CMD_NO_INTR;
4939 #ifdef DEBUG
4940 acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4941 #endif
4942 pkt->pkt_reason = CMD_CMPLT;
4943 pkt->pkt_state = 0;
4944 pkt->pkt_statistics = 0;
4945 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4946
4947 if (acp->flags & AAC_CMD_DMA_VALID) {
4948 pkt->pkt_resid = acp->bcount;
4949 /* Consistent packets need to be sync'ed first */
4950 if ((acp->flags & AAC_CMD_CONSISTENT) &&
4951 (acp->flags & AAC_CMD_BUF_WRITE))
4952 if (aac_dma_sync_ac(acp) != AACOK) {
4953 ddi_fm_service_impact(softs->devinfo_p,
4954 DDI_SERVICE_UNAFFECTED);
4955 return (TRAN_BADPKT);
4956 }
4957 } else {
4958 pkt->pkt_resid = 0;
4959 }
4960
4961 mutex_enter(&softs->io_lock);
4962 AACDB_PRINT_SCMD(softs, acp);
4963 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4964 !(softs->state & AAC_STATE_DEAD)) {
4965 if (dvp->type == AAC_DEV_LD) {
4966 if (ap->a_lun == 0)
4967 rval = aac_tran_start_ld(softs, acp);
4968 else
4969 goto error;
4970 } else {
4971 rval = aac_do_io(softs, acp);
4972 }
4973 } else {
4974 error:
4975 #ifdef DEBUG
4976 if (!(softs->state & AAC_STATE_DEAD)) {
4977 AACDB_PRINT_TRAN(softs,
4978 "Cannot send cmd to target t%dL%d: %s",
4979 ap->a_target, ap->a_lun,
4980 "target invalid");
4981 } else {
4982 AACDB_PRINT(softs, CE_WARN,
4983 "Cannot send cmd to target t%dL%d: %s",
4984 ap->a_target, ap->a_lun,
4985 "adapter dead");
4986 }
4987 #endif
4988 rval = TRAN_FATAL_ERROR;
4989 }
4990 mutex_exit(&softs->io_lock);
4991 return (rval);
4992 }
4993
4994 static int
4995 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
4996 {
4997 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4998 struct aac_device *dvp;
4999 int rval;
5000
5001 DBCALLED(softs, 2);
5002
5003 /* We don't allow inquiring about capabilities for other targets */
5004 if (cap == NULL || whom == 0) {
5005 AACDB_PRINT(softs, CE_WARN,
5006 "GetCap> %s not supported: whom=%d", cap, whom);
5007 return (-1);
5008 }
5009
5010 mutex_enter(&softs->io_lock);
5011 dvp = AAC_DEV(softs, ap->a_target);
5012 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5013 mutex_exit(&softs->io_lock);
5014 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
5015 ap->a_target, ap->a_lun);
5016 return (-1);
5017 }
5018
5019 switch (scsi_hba_lookup_capstr(cap)) {
5020 case SCSI_CAP_ARQ: /* auto request sense */
5021 rval = 1;
5022 break;
5023 case SCSI_CAP_UNTAGGED_QING:
5024 case SCSI_CAP_TAGGED_QING:
5025 rval = 1;
5026 break;
5027 case SCSI_CAP_DMA_MAX:
5028 rval = softs->dma_max;
5029 break;
5030 default:
5031 rval = -1;
5032 break;
5033 }
5034 mutex_exit(&softs->io_lock);
5035
5036 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
5037 cap, ap->a_target, ap->a_lun, rval);
5038 return (rval);
5039 }
5040
5041 /*ARGSUSED*/
5042 static int
5043 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
5044 {
5045 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5046 struct aac_device *dvp;
5047 int rval;
5048
5049 DBCALLED(softs, 2);
5050
5051 /* We don't allow inquiring about capabilities for other targets */
5052 if (cap == NULL || whom == 0) {
5053 AACDB_PRINT(softs, CE_WARN,
5054 "SetCap> %s not supported: whom=%d", cap, whom);
5055 return (-1);
5056 }
5057
5058 mutex_enter(&softs->io_lock);
5059 dvp = AAC_DEV(softs, ap->a_target);
5060 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5061 mutex_exit(&softs->io_lock);
5062 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
5063 ap->a_target, ap->a_lun);
5064 return (-1);
5065 }
5066
5067 switch (scsi_hba_lookup_capstr(cap)) {
5068 case SCSI_CAP_ARQ:
5069 /* Force auto request sense */
5070 rval = (value == 1) ? 1 : 0;
5071 break;
5072 case SCSI_CAP_UNTAGGED_QING:
5073 case SCSI_CAP_TAGGED_QING:
5074 rval = (value == 1) ? 1 : 0;
5075 break;
5076 default:
5077 rval = -1;
5078 break;
5079 }
5080 mutex_exit(&softs->io_lock);
5081
5082 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
5083 cap, ap->a_target, ap->a_lun, value, rval);
5084 return (rval);
5085 }
5086
5087 static void
5088 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5089 {
5090 struct aac_cmd *acp = PKT2AC(pkt);
5091
5092 DBCALLED(NULL, 2);
5093
5094 if (acp->sgt) {
5095 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5096 acp->left_cookien);
5097 }
5098 aac_free_dmamap(acp);
5099 ASSERT(acp->slotp == NULL);
5100 scsi_hba_pkt_free(ap, pkt);
5101 }
5102
5103 int
5104 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
5105 struct buf *bp, int flags, int (*cb)(), caddr_t arg)
5106 {
5107 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
5108 uint_t oldcookiec;
5109 int bioerr;
5110 int rval;
5111
5112 oldcookiec = acp->left_cookien;
5113
5114 /* Move window to build s/g map */
5115 if (acp->total_nwin > 0) {
5116 if (++acp->cur_win < acp->total_nwin) {
5117 off_t off;
5118 size_t len;
5119
5120 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
5121 &off, &len, &acp->cookie, &acp->left_cookien);
5122 if (rval == DDI_SUCCESS)
5123 goto get_dma_cookies;
5124 AACDB_PRINT(softs, CE_WARN,
5125 "ddi_dma_getwin() fail %d", rval);
5126 return (AACERR);
5127 }
5128 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
5129 return (AACERR);
5130 }
5131
5132 /* We need to transfer data, so we alloc DMA resources for this pkt */
5133 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
5134 uint_t dma_flags = 0;
5135 struct aac_sge *sge;
5136
5137 /*
5138 * We will still use this point to fake some
5139 * infomation in tran_start
5140 */
5141 acp->bp = bp;
5142
5143 /* Set dma flags */
5144 if (BUF_IS_READ(bp)) {
5145 dma_flags |= DDI_DMA_READ;
5146 acp->flags |= AAC_CMD_BUF_READ;
5147 } else {
5148 dma_flags |= DDI_DMA_WRITE;
5149 acp->flags |= AAC_CMD_BUF_WRITE;
5150 }
5151 if (flags & PKT_CONSISTENT)
5152 dma_flags |= DDI_DMA_CONSISTENT;
5153 if (flags & PKT_DMA_PARTIAL)
5154 dma_flags |= DDI_DMA_PARTIAL;
5155
5156 /* Alloc buf dma handle */
5157 if (!acp->buf_dma_handle) {
5158 rval = ddi_dma_alloc_handle(softs->devinfo_p,
5159 &softs->buf_dma_attr, cb, arg,
5160 &acp->buf_dma_handle);
5161 if (rval != DDI_SUCCESS) {
5162 AACDB_PRINT(softs, CE_WARN,
5163 "Can't allocate DMA handle, errno=%d",
5164 rval);
5165 goto error_out;
5166 }
5167 }
5168
5169 /* Bind buf */
5170 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
5171 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
5172 bp, dma_flags, cb, arg, &acp->cookie,
5173 &acp->left_cookien);
5174 } else {
5175 size_t bufsz;
5176
5177 AACDB_PRINT_TRAN(softs,
5178 "non-aligned buffer: addr=0x%p, cnt=%lu",
5179 (void *)bp->b_un.b_addr, bp->b_bcount);
5180 if (bp->b_flags & (B_PAGEIO|B_PHYS))
5181 bp_mapin(bp);
5182
5183 rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
5184 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
5185 &softs->acc_attr, DDI_DMA_STREAMING,
5186 cb, arg, &acp->abp, &bufsz, &acp->abh);
5187
5188 if (rval != DDI_SUCCESS) {
5189 AACDB_PRINT(softs, CE_NOTE,
5190 "Cannot alloc DMA to non-aligned buf");
5191 bioerr = 0;
5192 goto error_out;
5193 }
5194
5195 if (acp->flags & AAC_CMD_BUF_WRITE)
5196 ddi_rep_put8(acp->abh,
5197 (uint8_t *)bp->b_un.b_addr,
5198 (uint8_t *)acp->abp, bp->b_bcount,
5199 DDI_DEV_AUTOINCR);
5200
5201 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
5202 NULL, acp->abp, bufsz, dma_flags, cb, arg,
5203 &acp->cookie, &acp->left_cookien);
5204 }
5205
5206 switch (rval) {
5207 case DDI_DMA_PARTIAL_MAP:
5208 if (ddi_dma_numwin(acp->buf_dma_handle,
5209 &acp->total_nwin) == DDI_FAILURE) {
5210 AACDB_PRINT(softs, CE_WARN,
5211 "Cannot get number of DMA windows");
5212 bioerr = 0;
5213 goto error_out;
5214 }
5215 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5216 acp->left_cookien);
5217 acp->cur_win = 0;
5218 break;
5219
5220 case DDI_DMA_MAPPED:
5221 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5222 acp->left_cookien);
5223 acp->cur_win = 0;
5224 acp->total_nwin = 1;
5225 break;
5226
5227 case DDI_DMA_NORESOURCES:
5228 bioerr = 0;
5229 AACDB_PRINT(softs, CE_WARN,
5230 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
5231 goto error_out;
5232 case DDI_DMA_BADATTR:
5233 case DDI_DMA_NOMAPPING:
5234 bioerr = EFAULT;
5235 AACDB_PRINT(softs, CE_WARN,
5236 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
5237 goto error_out;
5238 case DDI_DMA_TOOBIG:
5239 bioerr = EINVAL;
5240 AACDB_PRINT(softs, CE_WARN,
5241 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
5242 bp->b_bcount);
5243 goto error_out;
5244 default:
5245 bioerr = EINVAL;
5246 AACDB_PRINT(softs, CE_WARN,
5247 "Cannot bind buf for DMA: %d", rval);
5248 goto error_out;
5249 }
5250 acp->flags |= AAC_CMD_DMA_VALID;
5251
5252 get_dma_cookies:
5253 ASSERT(acp->left_cookien > 0);
5254 if (acp->left_cookien > softs->aac_sg_tablesize) {
5255 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
5256 acp->left_cookien);
5257 bioerr = EINVAL;
5258 goto error_out;
5259 }
5260 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
5261 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5262 oldcookiec);
5263 acp->sgt = NULL;
5264 }
5265 if (acp->sgt == NULL) {
5266 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
5267 acp->left_cookien, kf);
5268 if (acp->sgt == NULL) {
5269 AACDB_PRINT(softs, CE_WARN,
5270 "sgt kmem_alloc fail");
5271 bioerr = ENOMEM;
5272 goto error_out;
5273 }
5274 }
5275
5276 sge = &acp->sgt[0];
5277 sge->bcount = acp->cookie.dmac_size;
5278 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5279 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5280 acp->bcount = acp->cookie.dmac_size;
5281 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
5282 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
5283 sge->bcount = acp->cookie.dmac_size;
5284 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5285 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5286 acp->bcount += acp->cookie.dmac_size;
5287 }
5288
5289 /*
5290 * Note: The old DMA engine do not correctly handle
5291 * dma_attr_maxxfer attribute. So we have to ensure
5292 * it by ourself.
5293 */
5294 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5295 AACDB_PRINT(softs, CE_NOTE,
5296 "large xfer size received %d\n", acp->bcount);
5297 bioerr = EINVAL;
5298 goto error_out;
5299 }
5300
5301 acp->total_xfer += acp->bcount;
5302
5303 if (acp->pkt) {
5304 /* Return remaining byte count */
5305 if (acp->total_xfer <= bp->b_bcount) {
5306 acp->pkt->pkt_resid = bp->b_bcount - \
5307 acp->total_xfer;
5308 } else {
5309 /*
5310 * Allocated DMA size is greater than the buf
5311 * size of bp. This is caused by devices like
5312 * tape. we have extra bytes allocated, but
5313 * the packet residual has to stay correct.
5314 */
5315 acp->pkt->pkt_resid = 0;
5316 }
5317 AACDB_PRINT_TRAN(softs,
5318 "bp=0x%p, xfered=%d/%d, resid=%d",
5319 (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5320 (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5321 }
5322 }
5323 return (AACOK);
5324
5325 error_out:
5326 bioerror(bp, bioerr);
5327 return (AACERR);
5328 }
5329
5330 static struct scsi_pkt *
5331 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5332 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5333 int (*callback)(), caddr_t arg)
5334 {
5335 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5336 struct aac_cmd *acp, *new_acp;
5337
5338 DBCALLED(softs, 2);
5339
5340 /* Allocate pkt */
5341 if (pkt == NULL) {
5342 int slen;
5343
5344 /* Force auto request sense */
5345 slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5346 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5347 slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5348 if (pkt == NULL) {
5349 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5350 return (NULL);
5351 }
5352 acp = new_acp = PKT2AC(pkt);
5353 acp->pkt = pkt;
5354 acp->cmdlen = cmdlen;
5355
5356 if (ap->a_target < AAC_MAX_LD) {
5357 acp->dvp = &softs->containers[ap->a_target].dev;
5358 acp->aac_cmd_fib = softs->aac_cmd_fib;
5359 acp->ac_comp = aac_ld_complete;
5360 } else {
5361 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5362 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5363 acp->ac_comp = aac_pd_complete;
5364 }
5365 } else {
5366 acp = PKT2AC(pkt);
5367 new_acp = NULL;
5368 }
5369
5370 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5371 return (pkt);
5372
5373 if (new_acp)
5374 aac_tran_destroy_pkt(ap, pkt);
5375 return (NULL);
5376 }
5377
5378 /*
5379 * tran_sync_pkt(9E) - explicit DMA synchronization
5380 */
5381 /*ARGSUSED*/
5382 static void
5383 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5384 {
5385 struct aac_cmd *acp = PKT2AC(pkt);
5386
5387 DBCALLED(NULL, 2);
5388
5389 if (aac_dma_sync_ac(acp) != AACOK)
5390 ddi_fm_service_impact(
5391 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5392 DDI_SERVICE_UNAFFECTED);
5393 }
5394
5395 /*
5396 * tran_dmafree(9E) - deallocate DMA resources allocated for command
5397 */
5398 /*ARGSUSED*/
5399 static void
5400 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5401 {
5402 struct aac_cmd *acp = PKT2AC(pkt);
5403
5404 DBCALLED(NULL, 2);
5405
5406 aac_free_dmamap(acp);
5407 }
5408
5409 static int
5410 aac_do_quiesce(struct aac_softstate *softs)
5411 {
5412 aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5413 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5414 aac_start_drain(softs);
5415 do {
5416 if (cv_wait_sig(&softs->drain_cv,
5417 &softs->io_lock) == 0) {
5418 /* Quiesce has been interrupted */
5419 aac_stop_drain(softs);
5420 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5421 aac_start_waiting_io(softs);
5422 return (AACERR);
5423 }
5424 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5425 aac_stop_drain(softs);
5426 }
5427
5428 softs->state |= AAC_STATE_QUIESCED;
5429 return (AACOK);
5430 }
5431
5432 static int
5433 aac_tran_quiesce(dev_info_t *dip)
5434 {
5435 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5436 int rval;
5437
5438 DBCALLED(softs, 1);
5439
5440 mutex_enter(&softs->io_lock);
5441 if (aac_do_quiesce(softs) == AACOK)
5442 rval = 0;
5443 else
5444 rval = 1;
5445 mutex_exit(&softs->io_lock);
5446 return (rval);
5447 }
5448
5449 static int
5450 aac_do_unquiesce(struct aac_softstate *softs)
5451 {
5452 softs->state &= ~AAC_STATE_QUIESCED;
5453 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5454
5455 aac_start_waiting_io(softs);
5456 return (AACOK);
5457 }
5458
5459 static int
5460 aac_tran_unquiesce(dev_info_t *dip)
5461 {
5462 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5463 int rval;
5464
5465 DBCALLED(softs, 1);
5466
5467 mutex_enter(&softs->io_lock);
5468 if (aac_do_unquiesce(softs) == AACOK)
5469 rval = 0;
5470 else
5471 rval = 1;
5472 mutex_exit(&softs->io_lock);
5473 return (rval);
5474 }
5475
5476 static int
5477 aac_hba_setup(struct aac_softstate *softs)
5478 {
5479 scsi_hba_tran_t *hba_tran;
5480 int rval;
5481
5482 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5483 if (hba_tran == NULL)
5484 return (AACERR);
5485 hba_tran->tran_hba_private = softs;
5486 hba_tran->tran_tgt_init = aac_tran_tgt_init;
5487 hba_tran->tran_tgt_free = aac_tran_tgt_free;
5488 hba_tran->tran_tgt_probe = scsi_hba_probe;
5489 hba_tran->tran_start = aac_tran_start;
5490 hba_tran->tran_getcap = aac_tran_getcap;
5491 hba_tran->tran_setcap = aac_tran_setcap;
5492 hba_tran->tran_init_pkt = aac_tran_init_pkt;
5493 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5494 hba_tran->tran_reset = aac_tran_reset;
5495 hba_tran->tran_abort = aac_tran_abort;
5496 hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5497 hba_tran->tran_dmafree = aac_tran_dmafree;
5498 hba_tran->tran_quiesce = aac_tran_quiesce;
5499 hba_tran->tran_unquiesce = aac_tran_unquiesce;
5500 hba_tran->tran_bus_config = aac_tran_bus_config;
5501 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5502 hba_tran, 0);
5503 if (rval != DDI_SUCCESS) {
5504 scsi_hba_tran_free(hba_tran);
5505 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5506 return (AACERR);
5507 }
5508
5509 softs->hba_tran = hba_tran;
5510 return (AACOK);
5511 }
5512
5513 /*
5514 * FIB setup operations
5515 */
5516
5517 /*
5518 * Init FIB header
5519 */
5520 static void
5521 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp,
5522 uint16_t cmd)
5523 {
5524 struct aac_slot *slotp = acp->slotp;
5525 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5526 struct aac_fib *fibp = slotp->fibp;
5527 uint32_t xfer_state;
5528
5529 xfer_state =
5530 AAC_FIBSTATE_HOSTOWNED |
5531 AAC_FIBSTATE_INITIALISED |
5532 AAC_FIBSTATE_EMPTY |
5533 AAC_FIBSTATE_FAST_RESPONSE | /* enable fast io */
5534 AAC_FIBSTATE_FROMHOST |
5535 AAC_FIBSTATE_REXPECTED |
5536 AAC_FIBSTATE_NORM;
5537
5538 if (!(acp->flags & AAC_CMD_SYNC))
5539 xfer_state |= AAC_FIBSTATE_ASYNC;
5540
5541 ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5542 ddi_put16(acc, &fibp->Header.Command, cmd);
5543 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5544 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5545 ddi_put16(acc, &fibp->Header.Size, acp->fib_size);
5546 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size);
5547 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5548 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5549 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5550 }
5551
5552 /*
5553 * Init FIB for raw IO command
5554 */
5555 static void
5556 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5557 {
5558 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5559 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5560 struct aac_sg_entryraw *sgp;
5561 struct aac_sge *sge;
5562
5563 /* Calculate FIB size */
5564 acp->fib_size = sizeof (struct aac_fib_header) + \
5565 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5566 sizeof (struct aac_sg_entryraw);
5567
5568 aac_cmd_fib_header(softs, acp, RawIo);
5569
5570 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5571 ddi_put16(acc, &io->BpTotal, 0);
5572 ddi_put16(acc, &io->BpComplete, 0);
5573
5574 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5575 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5576 ddi_put16(acc, &io->ContainerId,
5577 ((struct aac_container *)acp->dvp)->cid);
5578
5579 /* Fill SG table */
5580 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5581 ddi_put32(acc, &io->ByteCount, acp->bcount);
5582
5583 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5584 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5585 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5586 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5587 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5588 sgp->Next = 0;
5589 sgp->Prev = 0;
5590 sgp->Flags = 0;
5591 }
5592 }
5593
5594 /* Init FIB for 64-bit block IO command */
5595 static void
5596 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5597 {
5598 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5599 struct aac_blockread64 *br = (struct aac_blockread64 *) \
5600 &acp->slotp->fibp->data[0];
5601 struct aac_sg_entry64 *sgp;
5602 struct aac_sge *sge;
5603
5604 acp->fib_size = sizeof (struct aac_fib_header) + \
5605 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5606 sizeof (struct aac_sg_entry64);
5607
5608 aac_cmd_fib_header(softs, acp, ContainerCommand64);
5609
5610 /*
5611 * The definitions for aac_blockread64 and aac_blockwrite64
5612 * are the same.
5613 */
5614 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5615 ddi_put16(acc, &br->ContainerId,
5616 ((struct aac_container *)acp->dvp)->cid);
5617 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5618 VM_CtHostRead64 : VM_CtHostWrite64);
5619 ddi_put16(acc, &br->Pad, 0);
5620 ddi_put16(acc, &br->Flags, 0);
5621
5622 /* Fill SG table */
5623 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5624 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5625
5626 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5627 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5628 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5629 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5630 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5631 }
5632 }
5633
5634 /* Init FIB for block IO command */
5635 static void
5636 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5637 {
5638 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5639 struct aac_blockread *br = (struct aac_blockread *) \
5640 &acp->slotp->fibp->data[0];
5641 struct aac_sg_entry *sgp;
5642 struct aac_sge *sge = &acp->sgt[0];
5643
5644 if (acp->flags & AAC_CMD_BUF_READ) {
5645 acp->fib_size = sizeof (struct aac_fib_header) + \
5646 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5647 sizeof (struct aac_sg_entry);
5648
5649 ddi_put32(acc, &br->Command, VM_CtBlockRead);
5650 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5651 sgp = &br->SgMap.SgEntry[0];
5652 } else {
5653 struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5654
5655 acp->fib_size = sizeof (struct aac_fib_header) + \
5656 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5657 sizeof (struct aac_sg_entry);
5658
5659 ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5660 ddi_put32(acc, &bw->Stable, CUNSTABLE);
5661 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5662 sgp = &bw->SgMap.SgEntry[0];
5663 }
5664 aac_cmd_fib_header(softs, acp, ContainerCommand);
5665
5666 /*
5667 * aac_blockread and aac_blockwrite have the similar
5668 * structure head, so use br for bw here
5669 */
5670 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5671 ddi_put32(acc, &br->ContainerId,
5672 ((struct aac_container *)acp->dvp)->cid);
5673 ddi_put32(acc, &br->ByteCount, acp->bcount);
5674
5675 /* Fill SG table */
5676 for (sge = &acp->sgt[0];
5677 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5678 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5679 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5680 }
5681 }
5682
5683 /*ARGSUSED*/
5684 void
5685 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5686 {
5687 struct aac_slot *slotp = acp->slotp;
5688 struct aac_fib *fibp = slotp->fibp;
5689 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5690
5691 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5692 acp->fib_size, /* only copy data of needed length */
5693 DDI_DEV_AUTOINCR);
5694 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5695 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5696 }
5697
5698 static void
5699 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5700 {
5701 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5702 struct aac_synchronize_command *sync =
5703 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0];
5704
5705 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command);
5706
5707 aac_cmd_fib_header(softs, acp, ContainerCommand);
5708 ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5709 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5710 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5711 ddi_put32(acc, &sync->Count,
5712 sizeof (((struct aac_synchronize_reply *)0)->Data));
5713 }
5714
5715 /*
5716 * Start/Stop unit (Power Management)
5717 */
5718 static void
5719 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp)
5720 {
5721 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5722 struct aac_Container *cmd =
5723 (struct aac_Container *)&acp->slotp->fibp->data[0];
5724 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp;
5725
5726 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container);
5727
5728 aac_cmd_fib_header(softs, acp, ContainerCommand);
5729 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
5730 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
5731 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT);
5732 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \
5733 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT);
5734 ddi_put32(acc, &cmd->CTCommand.param[1],
5735 ((struct aac_container *)acp->dvp)->cid);
5736 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1);
5737 }
5738
5739 /*
5740 * Init FIB for pass-through SCMD
5741 */
5742 static void
5743 aac_cmd_fib_srb(struct aac_cmd *acp)
5744 {
5745 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5746 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5747 uint8_t *cdb;
5748
5749 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5750 ddi_put32(acc, &srb->retry_limit, 0);
5751 ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5752 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5753 if (acp->fibp == NULL) {
5754 if (acp->flags & AAC_CMD_BUF_READ)
5755 ddi_put32(acc, &srb->flags, SRB_DataIn);
5756 else if (acp->flags & AAC_CMD_BUF_WRITE)
5757 ddi_put32(acc, &srb->flags, SRB_DataOut);
5758 ddi_put32(acc, &srb->channel,
5759 ((struct aac_nondasd *)acp->dvp)->bus);
5760 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5761 ddi_put32(acc, &srb->lun, 0);
5762 cdb = acp->pkt->pkt_cdbp;
5763 } else {
5764 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5765
5766 ddi_put32(acc, &srb->flags, srb0->flags);
5767 ddi_put32(acc, &srb->channel, srb0->channel);
5768 ddi_put32(acc, &srb->id, srb0->id);
5769 ddi_put32(acc, &srb->lun, srb0->lun);
5770 cdb = srb0->cdb;
5771 }
5772 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5773 }
5774
5775 static void
5776 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5777 {
5778 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5779 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5780 struct aac_sg_entry *sgp;
5781 struct aac_sge *sge;
5782
5783 acp->fib_size = sizeof (struct aac_fib_header) + \
5784 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5785 acp->left_cookien * sizeof (struct aac_sg_entry);
5786
5787 /* Fill FIB and SRB headers, and copy cdb */
5788 aac_cmd_fib_header(softs, acp, ScsiPortCommand);
5789 aac_cmd_fib_srb(acp);
5790
5791 /* Fill SG table */
5792 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5793 ddi_put32(acc, &srb->count, acp->bcount);
5794
5795 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5796 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5797 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5798 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5799 }
5800 }
5801
5802 static void
5803 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5804 {
5805 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5806 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5807 struct aac_sg_entry64 *sgp;
5808 struct aac_sge *sge;
5809
5810 acp->fib_size = sizeof (struct aac_fib_header) + \
5811 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5812 acp->left_cookien * sizeof (struct aac_sg_entry64);
5813
5814 /* Fill FIB and SRB headers, and copy cdb */
5815 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64);
5816 aac_cmd_fib_srb(acp);
5817
5818 /* Fill SG table */
5819 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5820 ddi_put32(acc, &srb->count, acp->bcount);
5821
5822 for (sge = &acp->sgt[0],
5823 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5824 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5825 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5826 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5827 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5828 }
5829 }
5830
5831 static int
5832 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5833 {
5834 struct aac_slot *slotp;
5835
5836 if (slotp = aac_get_slot(softs)) {
5837 acp->slotp = slotp;
5838 slotp->acp = acp;
5839 acp->aac_cmd_fib(softs, acp);
5840 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5841 DDI_DMA_SYNC_FORDEV);
5842 return (AACOK);
5843 }
5844 return (AACERR);
5845 }
5846
5847 static int
5848 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5849 {
5850 struct aac_device *dvp = acp->dvp;
5851 int q = AAC_CMDQ(acp);
5852
5853 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) {
5854 if (dvp) {
5855 if (dvp->ncmds[q] < dvp->throttle[q]) {
5856 if (!(acp->flags & AAC_CMD_NTAG) ||
5857 dvp->ncmds[q] == 0) {
5858 return (aac_cmd_slot_bind(softs, acp));
5859 }
5860 ASSERT(q == AAC_CMDQ_ASYNC);
5861 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5862 AAC_THROTTLE_DRAIN);
5863 }
5864 } else {
5865 return (aac_cmd_slot_bind(softs, acp));
5866 }
5867 }
5868 return (AACERR);
5869 }
5870
5871 static int
5872 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5873 {
5874 struct aac_slot *slotp;
5875
5876 while (softs->sync_ac.slotp)
5877 cv_wait(&softs->sync_fib_cv, &softs->io_lock);
5878
5879 if (slotp = aac_get_slot(softs)) {
5880 ASSERT(acp->slotp == NULL);
5881
5882 acp->slotp = slotp;
5883 slotp->acp = acp;
5884 return (AACOK);
5885 }
5886 return (AACERR);
5887 }
5888
5889 static void
5890 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp)
5891 {
5892 ASSERT(acp->slotp);
5893
5894 aac_release_slot(softs, acp->slotp);
5895 acp->slotp->acp = NULL;
5896 acp->slotp = NULL;
5897
5898 cv_signal(&softs->sync_fib_cv);
5899 }
5900
5901 static void
5902 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5903 {
5904 struct aac_slot *slotp = acp->slotp;
5905 int q = AAC_CMDQ(acp);
5906 int rval;
5907
5908 /* Set ac and pkt */
5909 if (acp->pkt) { /* ac from ioctl has no pkt */
5910 acp->pkt->pkt_state |=
5911 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5912 }
5913 if (acp->timeout) /* 0 indicates no timeout */
5914 acp->timeout += aac_timebase + aac_tick;
5915
5916 if (acp->dvp)
5917 acp->dvp->ncmds[q]++;
5918 softs->bus_ncmds[q]++;
5919 aac_cmd_enqueue(&softs->q_busy, acp);
5920
5921 AACDB_PRINT_FIB(softs, slotp);
5922
5923 if (softs->flags & AAC_FLAGS_NEW_COMM) {
5924 rval = aac_send_command(softs, slotp);
5925 } else {
5926 /*
5927 * If fib can not be enqueued, the adapter is in an abnormal
5928 * state, there will be no interrupt to us.
5929 */
5930 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5931 slotp->fib_phyaddr, acp->fib_size);
5932 }
5933
5934 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5935 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5936
5937 /*
5938 * NOTE: We send command only when slots availabe, so should never
5939 * reach here.
5940 */
5941 if (rval != AACOK) {
5942 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5943 if (acp->pkt) {
5944 acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5945 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5946 }
5947 aac_end_io(softs, acp);
5948 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5949 ddi_trigger_softintr(softs->softint_id);
5950 }
5951 }
5952
5953 static void
5954 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5955 {
5956 struct aac_cmd *acp, *next_acp;
5957
5958 /* Serve as many waiting io's as possible */
5959 for (acp = q->q_head; acp; acp = next_acp) {
5960 next_acp = acp->next;
5961 if (aac_bind_io(softs, acp) == AACOK) {
5962 aac_cmd_delete(q, acp);
5963 aac_start_io(softs, acp);
5964 }
5965 if (softs->free_io_slot_head == NULL)
5966 break;
5967 }
5968 }
5969
5970 static void
5971 aac_start_waiting_io(struct aac_softstate *softs)
5972 {
5973 /*
5974 * Sync FIB io is served before async FIB io so that io requests
5975 * sent by interactive userland commands get responded asap.
5976 */
5977 if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
5978 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
5979 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
5980 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
5981 }
5982
5983 static void
5984 aac_drain_comp_q(struct aac_softstate *softs)
5985 {
5986 struct aac_cmd *acp;
5987 struct scsi_pkt *pkt;
5988
5989 /*CONSTCOND*/
5990 while (1) {
5991 mutex_enter(&softs->q_comp_mutex);
5992 acp = aac_cmd_dequeue(&softs->q_comp);
5993 mutex_exit(&softs->q_comp_mutex);
5994 if (acp != NULL) {
5995 ASSERT(acp->pkt != NULL);
5996 pkt = acp->pkt;
5997
5998 if (pkt->pkt_reason == CMD_CMPLT) {
5999 /*
6000 * Consistent packets need to be sync'ed first
6001 */
6002 if ((acp->flags & AAC_CMD_CONSISTENT) &&
6003 (acp->flags & AAC_CMD_BUF_READ)) {
6004 if (aac_dma_sync_ac(acp) != AACOK) {
6005 ddi_fm_service_impact(
6006 softs->devinfo_p,
6007 DDI_SERVICE_UNAFFECTED);
6008 pkt->pkt_reason = CMD_TRAN_ERR;
6009 pkt->pkt_statistics = 0;
6010 }
6011 }
6012 if ((aac_check_acc_handle(softs-> \
6013 comm_space_acc_handle) != DDI_SUCCESS) ||
6014 (aac_check_acc_handle(softs-> \
6015 pci_mem_handle) != DDI_SUCCESS)) {
6016 ddi_fm_service_impact(softs->devinfo_p,
6017 DDI_SERVICE_UNAFFECTED);
6018 ddi_fm_acc_err_clear(softs-> \
6019 pci_mem_handle, DDI_FME_VER0);
6020 pkt->pkt_reason = CMD_TRAN_ERR;
6021 pkt->pkt_statistics = 0;
6022 }
6023 if (aac_check_dma_handle(softs-> \
6024 comm_space_dma_handle) != DDI_SUCCESS) {
6025 ddi_fm_service_impact(softs->devinfo_p,
6026 DDI_SERVICE_UNAFFECTED);
6027 pkt->pkt_reason = CMD_TRAN_ERR;
6028 pkt->pkt_statistics = 0;
6029 }
6030 }
6031 scsi_hba_pkt_comp(pkt);
6032 } else {
6033 break;
6034 }
6035 }
6036 }
6037
6038 static int
6039 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
6040 {
6041 size_t rlen;
6042 ddi_dma_cookie_t cookie;
6043 uint_t cookien;
6044
6045 /* Allocate FIB dma resource */
6046 if (ddi_dma_alloc_handle(
6047 softs->devinfo_p,
6048 &softs->addr_dma_attr,
6049 DDI_DMA_SLEEP,
6050 NULL,
6051 &slotp->fib_dma_handle) != DDI_SUCCESS) {
6052 AACDB_PRINT(softs, CE_WARN,
6053 "Cannot alloc dma handle for slot fib area");
6054 goto error;
6055 }
6056 if (ddi_dma_mem_alloc(
6057 slotp->fib_dma_handle,
6058 softs->aac_max_fib_size,
6059 &softs->acc_attr,
6060 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6061 DDI_DMA_SLEEP,
6062 NULL,
6063 (caddr_t *)&slotp->fibp,
6064 &rlen,
6065 &slotp->fib_acc_handle) != DDI_SUCCESS) {
6066 AACDB_PRINT(softs, CE_WARN,
6067 "Cannot alloc mem for slot fib area");
6068 goto error;
6069 }
6070 if (ddi_dma_addr_bind_handle(
6071 slotp->fib_dma_handle,
6072 NULL,
6073 (caddr_t)slotp->fibp,
6074 softs->aac_max_fib_size,
6075 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6076 DDI_DMA_SLEEP,
6077 NULL,
6078 &cookie,
6079 &cookien) != DDI_DMA_MAPPED) {
6080 AACDB_PRINT(softs, CE_WARN,
6081 "dma bind failed for slot fib area");
6082 goto error;
6083 }
6084
6085 /* Check dma handles allocated in fib attach */
6086 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
6087 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6088 goto error;
6089 }
6090
6091 /* Check acc handles allocated in fib attach */
6092 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
6093 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6094 goto error;
6095 }
6096
6097 slotp->fib_phyaddr = cookie.dmac_laddress;
6098 return (AACOK);
6099
6100 error:
6101 if (slotp->fib_acc_handle) {
6102 ddi_dma_mem_free(&slotp->fib_acc_handle);
6103 slotp->fib_acc_handle = NULL;
6104 }
6105 if (slotp->fib_dma_handle) {
6106 ddi_dma_free_handle(&slotp->fib_dma_handle);
6107 slotp->fib_dma_handle = NULL;
6108 }
6109 return (AACERR);
6110 }
6111
6112 static void
6113 aac_free_fib(struct aac_slot *slotp)
6114 {
6115 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
6116 ddi_dma_mem_free(&slotp->fib_acc_handle);
6117 slotp->fib_acc_handle = NULL;
6118 ddi_dma_free_handle(&slotp->fib_dma_handle);
6119 slotp->fib_dma_handle = NULL;
6120 slotp->fib_phyaddr = 0;
6121 }
6122
6123 static void
6124 aac_alloc_fibs(struct aac_softstate *softs)
6125 {
6126 int i;
6127 struct aac_slot *slotp;
6128
6129 for (i = 0; i < softs->total_slots &&
6130 softs->total_fibs < softs->total_slots; i++) {
6131 slotp = &(softs->io_slot[i]);
6132 if (slotp->fib_phyaddr)
6133 continue;
6134 if (aac_alloc_fib(softs, slotp) != AACOK)
6135 break;
6136
6137 /* Insert the slot to the free slot list */
6138 aac_release_slot(softs, slotp);
6139 softs->total_fibs++;
6140 }
6141 }
6142
6143 static void
6144 aac_destroy_fibs(struct aac_softstate *softs)
6145 {
6146 struct aac_slot *slotp;
6147
6148 while ((slotp = softs->free_io_slot_head) != NULL) {
6149 ASSERT(slotp->fib_phyaddr);
6150 softs->free_io_slot_head = slotp->next;
6151 aac_free_fib(slotp);
6152 ASSERT(slotp->index == (slotp - softs->io_slot));
6153 softs->total_fibs--;
6154 }
6155 ASSERT(softs->total_fibs == 0);
6156 }
6157
6158 static int
6159 aac_create_slots(struct aac_softstate *softs)
6160 {
6161 int i;
6162
6163 softs->total_slots = softs->aac_max_fibs;
6164 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
6165 softs->total_slots, KM_SLEEP);
6166 if (softs->io_slot == NULL) {
6167 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
6168 return (AACERR);
6169 }
6170 for (i = 0; i < softs->total_slots; i++)
6171 softs->io_slot[i].index = i;
6172 softs->free_io_slot_head = NULL;
6173 softs->total_fibs = 0;
6174 return (AACOK);
6175 }
6176
6177 static void
6178 aac_destroy_slots(struct aac_softstate *softs)
6179 {
6180 ASSERT(softs->free_io_slot_head == NULL);
6181
6182 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
6183 softs->total_slots);
6184 softs->io_slot = NULL;
6185 softs->total_slots = 0;
6186 }
6187
6188 struct aac_slot *
6189 aac_get_slot(struct aac_softstate *softs)
6190 {
6191 struct aac_slot *slotp;
6192
6193 if ((slotp = softs->free_io_slot_head) != NULL) {
6194 softs->free_io_slot_head = slotp->next;
6195 slotp->next = NULL;
6196 }
6197 return (slotp);
6198 }
6199
6200 static void
6201 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
6202 {
6203 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
6204 ASSERT(slotp == &softs->io_slot[slotp->index]);
6205
6206 slotp->acp = NULL;
6207 slotp->next = softs->free_io_slot_head;
6208 softs->free_io_slot_head = slotp;
6209 }
6210
6211 int
6212 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
6213 {
6214 if (aac_bind_io(softs, acp) == AACOK)
6215 aac_start_io(softs, acp);
6216 else
6217 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
6218
6219 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
6220 return (TRAN_ACCEPT);
6221 /*
6222 * Because sync FIB is always 512 bytes and used for critical
6223 * functions, async FIB is used for poll IO.
6224 */
6225 if (acp->flags & AAC_CMD_NO_INTR) {
6226 if (aac_do_poll_io(softs, acp) == AACOK)
6227 return (TRAN_ACCEPT);
6228 } else {
6229 if (aac_do_sync_io(softs, acp) == AACOK)
6230 return (TRAN_ACCEPT);
6231 }
6232 return (TRAN_BADPKT);
6233 }
6234
6235 static int
6236 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
6237 {
6238 int (*intr_handler)(struct aac_softstate *);
6239
6240 /*
6241 * Interrupt is disabled, we have to poll the adapter by ourselves.
6242 */
6243 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
6244 aac_process_intr_new : aac_process_intr_old;
6245 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
6246 int i = AAC_POLL_TIME * 1000;
6247
6248 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
6249 if (i == 0)
6250 aac_cmd_timeout(softs, acp);
6251 }
6252
6253 ddi_trigger_softintr(softs->softint_id);
6254
6255 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
6256 return (AACOK);
6257 return (AACERR);
6258 }
6259
6260 static int
6261 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
6262 {
6263 ASSERT(softs && acp);
6264
6265 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
6266 cv_wait(&softs->event, &softs->io_lock);
6267
6268 if (acp->flags & AAC_CMD_CMPLT)
6269 return (AACOK);
6270 return (AACERR);
6271 }
6272
6273 static int
6274 aac_dma_sync_ac(struct aac_cmd *acp)
6275 {
6276 if (acp->buf_dma_handle) {
6277 if (acp->flags & AAC_CMD_BUF_WRITE) {
6278 if (acp->abp != NULL)
6279 ddi_rep_put8(acp->abh,
6280 (uint8_t *)acp->bp->b_un.b_addr,
6281 (uint8_t *)acp->abp, acp->bp->b_bcount,
6282 DDI_DEV_AUTOINCR);
6283 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6284 DDI_DMA_SYNC_FORDEV);
6285 } else {
6286 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6287 DDI_DMA_SYNC_FORCPU);
6288 if (aac_check_dma_handle(acp->buf_dma_handle) !=
6289 DDI_SUCCESS)
6290 return (AACERR);
6291 if (acp->abp != NULL)
6292 ddi_rep_get8(acp->abh,
6293 (uint8_t *)acp->bp->b_un.b_addr,
6294 (uint8_t *)acp->abp, acp->bp->b_bcount,
6295 DDI_DEV_AUTOINCR);
6296 }
6297 }
6298 return (AACOK);
6299 }
6300
6301 /*
6302 * Copy AIF from adapter to the empty AIF slot and inform AIF threads
6303 */
6304 static void
6305 aac_save_aif(struct aac_softstate *softs, ddi_acc_handle_t acc,
6306 struct aac_fib *fibp0, int fib_size0)
6307 {
6308 struct aac_fib *fibp; /* FIB in AIF queue */
6309 int fib_size;
6310 uint16_t fib_command;
6311 int current, next;
6312
6313 /* Ignore non AIF messages */
6314 fib_command = ddi_get16(acc, &fibp0->Header.Command);
6315 if (fib_command != AifRequest) {
6316 cmn_err(CE_WARN, "!Unknown command from controller");
6317 return;
6318 }
6319
6320 mutex_enter(&softs->aifq_mutex);
6321
6322 /* Save AIF */
6323 fibp = &softs->aifq[softs->aifq_idx].d;
6324 fib_size = (fib_size0 > AAC_FIB_SIZE) ? AAC_FIB_SIZE : fib_size0;
6325 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, fib_size,
6326 DDI_DEV_AUTOINCR);
6327
6328 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
6329 ddi_fm_service_impact(softs->devinfo_p,
6330 DDI_SERVICE_UNAFFECTED);
6331 mutex_exit(&softs->aifq_mutex);
6332 return;
6333 }
6334
6335 AACDB_PRINT_AIF(softs, (struct aac_aif_command *)&fibp->data[0]);
6336
6337 /* Modify AIF contexts */
6338 current = softs->aifq_idx;
6339 next = (current + 1) % AAC_AIFQ_LENGTH;
6340 if (next == 0) {
6341 struct aac_fib_context *ctx_p;
6342
6343 softs->aifq_wrap = 1;
6344 for (ctx_p = softs->fibctx_p; ctx_p; ctx_p = ctx_p->next) {
6345 if (next == ctx_p->ctx_idx) {
6346 ctx_p->ctx_flags |= AAC_CTXFLAG_FILLED;
6347 } else if (current == ctx_p->ctx_idx &&
6348 (ctx_p->ctx_flags & AAC_CTXFLAG_FILLED)) {
6349 ctx_p->ctx_idx = next;
6350 ctx_p->ctx_overrun++;
6351 }
6352 }
6353 }
6354 softs->aifq_idx = next;
6355
6356 /* Wakeup AIF threads */
6357 cv_broadcast(&softs->aifq_cv);
6358 mutex_exit(&softs->aifq_mutex);
6359
6360 /* Wakeup event thread to handle aif */
6361 aac_event_disp(softs, AAC_EVENT_AIF);
6362 }
6363
6364 static int
6365 aac_return_aif_common(struct aac_softstate *softs, struct aac_fib_context *ctx,
6366 struct aac_fib **fibpp)
6367 {
6368 int current;
6369
6370 current = ctx->ctx_idx;
6371 if (current == softs->aifq_idx &&
6372 !(ctx->ctx_flags & AAC_CTXFLAG_FILLED))
6373 return (EAGAIN); /* Empty */
6374
6375 *fibpp = &softs->aifq[current].d;
6376
6377 ctx->ctx_flags &= ~AAC_CTXFLAG_FILLED;
6378 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
6379 return (0);
6380 }
6381
6382 int
6383 aac_return_aif(struct aac_softstate *softs, struct aac_fib_context *ctx,
6384 struct aac_fib **fibpp)
6385 {
6386 int rval;
6387
6388 mutex_enter(&softs->aifq_mutex);
6389 rval = aac_return_aif_common(softs, ctx, fibpp);
6390 mutex_exit(&softs->aifq_mutex);
6391 return (rval);
6392 }
6393
6394 int
6395 aac_return_aif_wait(struct aac_softstate *softs, struct aac_fib_context *ctx,
6396 struct aac_fib **fibpp)
6397 {
6398 int rval;
6399
6400 mutex_enter(&softs->aifq_mutex);
6401 rval = aac_return_aif_common(softs, ctx, fibpp);
6402 if (rval == EAGAIN) {
6403 AACDB_PRINT(softs, CE_NOTE, "Waiting for AIF");
6404 rval = cv_wait_sig(&softs->aifq_cv, &softs->aifq_mutex);
6405 }
6406 mutex_exit(&softs->aifq_mutex);
6407 return ((rval > 0) ? 0 : EINTR);
6408 }
6409
6410 /*
6411 * The following function comes from Adaptec:
6412 *
6413 * When driver sees a particular event that means containers are changed, it
6414 * will rescan containers. However a change may not be complete until some
6415 * other event is received. For example, creating or deleting an array will
6416 * incur as many as six AifEnConfigChange events which would generate six
6417 * container rescans. To diminish rescans, driver set a flag to wait for
6418 * another particular event. When sees that events come in, it will do rescan.
6419 */
6420 static int
6421 aac_handle_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
6422 {
6423 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
6424 int en_type;
6425 int devcfg_needed;
6426 int cid;
6427 uint32_t bus_id, tgt_id;
6428 enum aac_cfg_event event = AAC_CFG_NULL_EXIST;
6429
6430 devcfg_needed = 0;
6431 en_type = LE_32((uint32_t)aif->data.EN.type);
6432
6433 switch (LE_32((uint32_t)aif->command)) {
6434 case AifCmdDriverNotify: {
6435 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6436
6437 switch (en_type) {
6438 case AifDenMorphComplete:
6439 case AifDenVolumeExtendComplete:
6440 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6441 softs->devcfg_wait_on = AifEnConfigChange;
6442 break;
6443 }
6444 if (softs->devcfg_wait_on == en_type)
6445 devcfg_needed = 1;
6446 break;
6447 }
6448
6449 case AifCmdEventNotify:
6450 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6451 switch (en_type) {
6452 case AifEnAddContainer:
6453 case AifEnDeleteContainer:
6454 softs->devcfg_wait_on = AifEnConfigChange;
6455 break;
6456 case AifEnContainerChange:
6457 if (!softs->devcfg_wait_on)
6458 softs->devcfg_wait_on = AifEnConfigChange;
6459 break;
6460 case AifEnContainerEvent:
6461 if (ddi_get32(acc, &aif-> \
6462 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6463 devcfg_needed = 1;
6464 break;
6465 case AifEnAddJBOD:
6466 if (!(softs->flags & AAC_FLAGS_JBOD))
6467 return (AACERR);
6468 event = AAC_CFG_ADD;
6469 bus_id = (cid >> 24) & 0xf;
6470 tgt_id = cid & 0xffff;
6471 break;
6472 case AifEnDeleteJBOD:
6473 if (!(softs->flags & AAC_FLAGS_JBOD))
6474 return (AACERR);
6475 event = AAC_CFG_DELETE;
6476 bus_id = (cid >> 24) & 0xf;
6477 tgt_id = cid & 0xffff;
6478 break;
6479 }
6480 if (softs->devcfg_wait_on == en_type)
6481 devcfg_needed = 1;
6482 break;
6483
6484 case AifCmdJobProgress:
6485 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6486 int pr_status;
6487 uint32_t pr_ftick, pr_ctick;
6488
6489 pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6490 pr_ctick = LE_32(aif->data.PR[0].currentTick);
6491 pr_ftick = LE_32(aif->data.PR[0].finalTick);
6492
6493 if ((pr_ctick == pr_ftick) ||
6494 (pr_status == AifJobStsSuccess))
6495 softs->devcfg_wait_on = AifEnContainerChange;
6496 else if ((pr_ctick == 0) &&
6497 (pr_status == AifJobStsRunning))
6498 softs->devcfg_wait_on = AifEnContainerChange;
6499 }
6500 break;
6501 }
6502
6503 if (devcfg_needed) {
6504 softs->devcfg_wait_on = 0;
6505 (void) aac_probe_containers(softs);
6506 }
6507
6508 if (event != AAC_CFG_NULL_EXIST) {
6509 ASSERT(en_type == AifEnAddJBOD || en_type == AifEnDeleteJBOD);
6510 (void) aac_probe_jbod(softs,
6511 AAC_P2VTGT(softs, bus_id, tgt_id), event);
6512 }
6513 return (AACOK);
6514 }
6515
6516
6517 /*
6518 * Check and handle AIF events
6519 */
6520 static void
6521 aac_aif_event(struct aac_softstate *softs)
6522 {
6523 struct aac_fib *fibp;
6524
6525 /*CONSTCOND*/
6526 while (1) {
6527 if (aac_return_aif(softs, &softs->aifctx, &fibp) != 0)
6528 break; /* No more AIFs to handle, end loop */
6529
6530 /* AIF overrun, array create/delete may missed. */
6531 if (softs->aifctx.ctx_overrun) {
6532 softs->aifctx.ctx_overrun = 0;
6533 }
6534
6535 /* AIF received, handle it */
6536 struct aac_aif_command *aifp =
6537 (struct aac_aif_command *)&fibp->data[0];
6538 uint32_t aif_command = LE_32((uint32_t)aifp->command);
6539
6540 if (aif_command == AifCmdDriverNotify ||
6541 aif_command == AifCmdEventNotify ||
6542 aif_command == AifCmdJobProgress)
6543 (void) aac_handle_aif(softs, aifp);
6544 }
6545 }
6546
6547 /*
6548 * Timeout recovery
6549 */
6550 /*ARGSUSED*/
6551 static void
6552 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6553 {
6554 #ifdef DEBUG
6555 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6556 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6557 AACDB_PRINT_FIB(softs, acp->slotp);
6558 #endif
6559
6560 /*
6561 * Besides the firmware in unhealthy state, an overloaded
6562 * adapter may also incur pkt timeout.
6563 * There is a chance for an adapter with a slower IOP to take
6564 * longer than 60 seconds to process the commands, such as when
6565 * to perform IOs. So the adapter is doing a build on a RAID-5
6566 * while being required longer completion times should be
6567 * tolerated.
6568 */
6569 switch (aac_do_reset(softs)) {
6570 case AAC_IOP_RESET_SUCCEED:
6571 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6572 aac_start_waiting_io(softs);
6573 break;
6574 case AAC_IOP_RESET_FAILED:
6575 /* Abort all waiting cmds when adapter is dead */
6576 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6577 break;
6578 case AAC_IOP_RESET_ABNORMAL:
6579 aac_start_waiting_io(softs);
6580 }
6581 }
6582
6583 /*
6584 * The following function comes from Adaptec:
6585 *
6586 * Time sync. command added to synchronize time with firmware every 30
6587 * minutes (required for correct AIF timestamps etc.)
6588 */
6589 static void
6590 aac_sync_tick(struct aac_softstate *softs)
6591 {
6592 ddi_acc_handle_t acc;
6593 int rval;
6594
6595 mutex_enter(&softs->time_mutex);
6596 ASSERT(softs->time_sync <= softs->timebase);
6597 softs->time_sync = 0;
6598 mutex_exit(&softs->time_mutex);
6599
6600 /* Time sync. with firmware every AAC_SYNC_TICK */
6601 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
6602 acc = softs->sync_ac.slotp->fib_acc_handle;
6603
6604 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0],
6605 ddi_get_time());
6606 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t));
6607 aac_sync_fib_slot_release(softs, &softs->sync_ac);
6608
6609 mutex_enter(&softs->time_mutex);
6610 softs->time_sync = softs->timebase;
6611 if (rval != AACOK)
6612 /* retry shortly */
6613 softs->time_sync += aac_tick << 1;
6614 else
6615 softs->time_sync += AAC_SYNC_TICK;
6616 mutex_exit(&softs->time_mutex);
6617 }
6618
6619 /*
6620 * Timeout checking and handling
6621 */
6622 static void
6623 aac_daemon(struct aac_softstate *softs)
6624 {
6625 int time_out; /* set if timeout happened */
6626 int time_adjust;
6627 uint32_t softs_timebase;
6628
6629 mutex_enter(&softs->time_mutex);
6630 ASSERT(softs->time_out <= softs->timebase);
6631 softs->time_out = 0;
6632 softs_timebase = softs->timebase;
6633 mutex_exit(&softs->time_mutex);
6634
6635 /* Check slots for timeout pkts */
6636 time_adjust = 0;
6637 do {
6638 struct aac_cmd *acp;
6639
6640 time_out = 0;
6641 for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6642 if (acp->timeout == 0)
6643 continue;
6644
6645 /*
6646 * If timeout happened, update outstanding cmds
6647 * to be checked later again.
6648 */
6649 if (time_adjust) {
6650 acp->timeout += time_adjust;
6651 continue;
6652 }
6653
6654 if (acp->timeout <= softs_timebase) {
6655 aac_cmd_timeout(softs, acp);
6656 time_out = 1;
6657 time_adjust = aac_tick * drv_usectohz(1000000);
6658 break; /* timeout happened */
6659 } else {
6660 break; /* no timeout */
6661 }
6662 }
6663 } while (time_out);
6664
6665 mutex_enter(&softs->time_mutex);
6666 softs->time_out = softs->timebase + aac_tick;
6667 mutex_exit(&softs->time_mutex);
6668 }
6669
6670 /*
6671 * The event thread handles various tasks serially for the other parts of
6672 * the driver, so that they can run fast.
6673 */
6674 static void
6675 aac_event_thread(struct aac_softstate *softs)
6676 {
6677 int run = 1;
6678
6679 DBCALLED(softs, 1);
6680
6681 mutex_enter(&softs->ev_lock);
6682 while (run) {
6683 int events;
6684
6685 if ((events = softs->events) == 0) {
6686 cv_wait(&softs->event_disp_cv, &softs->ev_lock);
6687 events = softs->events;
6688 }
6689 softs->events = 0;
6690 mutex_exit(&softs->ev_lock);
6691
6692 mutex_enter(&softs->io_lock);
6693 if ((softs->state & AAC_STATE_RUN) &&
6694 (softs->state & AAC_STATE_DEAD) == 0) {
6695 if (events & AAC_EVENT_TIMEOUT)
6696 aac_daemon(softs);
6697 if (events & AAC_EVENT_SYNCTICK)
6698 aac_sync_tick(softs);
6699 if (events & AAC_EVENT_AIF)
6700 aac_aif_event(softs);
6701 } else {
6702 run = 0;
6703 }
6704 mutex_exit(&softs->io_lock);
6705
6706 mutex_enter(&softs->ev_lock);
6707 }
6708
6709 cv_signal(&softs->event_wait_cv);
6710 mutex_exit(&softs->ev_lock);
6711 }
6712
6713 /*
6714 * Internal timer. It is only responsbile for time counting and report time
6715 * related events. Events handling is done by aac_event_thread(), so that
6716 * the timer itself could be as precise as possible.
6717 */
6718 static void
6719 aac_timer(void *arg)
6720 {
6721 struct aac_softstate *softs = arg;
6722 int events = 0;
6723
6724 mutex_enter(&softs->time_mutex);
6725
6726 /* If timer is being stopped, exit */
6727 if (softs->timeout_id) {
6728 softs->timeout_id = timeout(aac_timer, (void *)softs,
6729 (aac_tick * drv_usectohz(1000000)));
6730 } else {
6731 mutex_exit(&softs->time_mutex);
6732 return;
6733 }
6734
6735 /* Time counting */
6736 softs->timebase += aac_tick;
6737
6738 /* Check time related events */
6739 if (softs->time_out && softs->time_out <= softs->timebase)
6740 events |= AAC_EVENT_TIMEOUT;
6741 if (softs->time_sync && softs->time_sync <= softs->timebase)
6742 events |= AAC_EVENT_SYNCTICK;
6743
6744 mutex_exit(&softs->time_mutex);
6745
6746 if (events)
6747 aac_event_disp(softs, events);
6748 }
6749
6750 /*
6751 * Dispatch events to daemon thread for handling
6752 */
6753 static void
6754 aac_event_disp(struct aac_softstate *softs, int events)
6755 {
6756 mutex_enter(&softs->ev_lock);
6757 softs->events |= events;
6758 cv_broadcast(&softs->event_disp_cv);
6759 mutex_exit(&softs->ev_lock);
6760 }
6761
6762 /*
6763 * Architecture dependent functions
6764 */
6765 static int
6766 aac_rx_get_fwstatus(struct aac_softstate *softs)
6767 {
6768 return (PCI_MEM_GET32(softs, AAC_OMR0));
6769 }
6770
6771 static int
6772 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6773 {
6774 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6775 }
6776
6777 static void
6778 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6779 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6780 {
6781 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6782 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6783 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6784 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6785 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6786 }
6787
6788 static int
6789 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6790 {
6791 return (PCI_MEM_GET32(softs, AAC_OMR0));
6792 }
6793
6794 static int
6795 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6796 {
6797 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6798 }
6799
6800 static void
6801 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6802 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6803 {
6804 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6805 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6806 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6807 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6808 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6809 }
6810
6811 /*
6812 * cb_ops functions
6813 */
6814 static int
6815 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6816 {
6817 struct aac_softstate *softs;
6818 int minor0, minor;
6819 int instance;
6820
6821 DBCALLED(NULL, 2);
6822
6823 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6824 return (EINVAL);
6825
6826 minor0 = getminor(*devp);
6827 minor = AAC_SCSA_MINOR(minor0);
6828
6829 if (AAC_IS_SCSA_NODE(minor))
6830 return (scsi_hba_open(devp, flag, otyp, cred));
6831
6832 instance = MINOR2INST(minor0);
6833 if (instance >= AAC_MAX_ADAPTERS)
6834 return (ENXIO);
6835
6836 softs = ddi_get_soft_state(aac_softstatep, instance);
6837 if (softs == NULL)
6838 return (ENXIO);
6839
6840 return (0);
6841 }
6842
6843 /*ARGSUSED*/
6844 static int
6845 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6846 {
6847 int minor0, minor;
6848 int instance;
6849
6850 DBCALLED(NULL, 2);
6851
6852 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6853 return (EINVAL);
6854
6855 minor0 = getminor(dev);
6856 minor = AAC_SCSA_MINOR(minor0);
6857
6858 if (AAC_IS_SCSA_NODE(minor))
6859 return (scsi_hba_close(dev, flag, otyp, cred));
6860
6861 instance = MINOR2INST(minor0);
6862 if (instance >= AAC_MAX_ADAPTERS)
6863 return (ENXIO);
6864
6865 return (0);
6866 }
6867
6868 static int
6869 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6870 int *rval_p)
6871 {
6872 struct aac_softstate *softs;
6873 int minor0, minor;
6874 int instance;
6875
6876 DBCALLED(NULL, 2);
6877
6878 if (drv_priv(cred_p) != 0)
6879 return (EPERM);
6880
6881 minor0 = getminor(dev);
6882 minor = AAC_SCSA_MINOR(minor0);
6883
6884 if (AAC_IS_SCSA_NODE(minor))
6885 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6886
6887 instance = MINOR2INST(minor0);
6888 if (instance < AAC_MAX_ADAPTERS) {
6889 softs = ddi_get_soft_state(aac_softstatep, instance);
6890 return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6891 }
6892 return (ENXIO);
6893 }
6894
6895 /*
6896 * The IO fault service error handling callback function
6897 */
6898 /*ARGSUSED*/
6899 static int
6900 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6901 {
6902 /*
6903 * as the driver can always deal with an error in any dma or
6904 * access handle, we can just return the fme_status value.
6905 */
6906 pci_ereport_post(dip, err, NULL);
6907 return (err->fme_status);
6908 }
6909
6910 /*
6911 * aac_fm_init - initialize fma capabilities and register with IO
6912 * fault services.
6913 */
6914 static void
6915 aac_fm_init(struct aac_softstate *softs)
6916 {
6917 /*
6918 * Need to change iblock to priority for new MSI intr
6919 */
6920 ddi_iblock_cookie_t fm_ibc;
6921
6922 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6923 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6924 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6925 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6926
6927 /* Only register with IO Fault Services if we have some capability */
6928 if (softs->fm_capabilities) {
6929 /* Adjust access and dma attributes for FMA */
6930 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6931 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6932 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6933
6934 /*
6935 * Register capabilities with IO Fault Services.
6936 * fm_capabilities will be updated to indicate
6937 * capabilities actually supported (not requested.)
6938 */
6939 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6940
6941 /*
6942 * Initialize pci ereport capabilities if ereport
6943 * capable (should always be.)
6944 */
6945 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6946 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6947 pci_ereport_setup(softs->devinfo_p);
6948 }
6949
6950 /*
6951 * Register error callback if error callback capable.
6952 */
6953 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6954 ddi_fm_handler_register(softs->devinfo_p,
6955 aac_fm_error_cb, (void *) softs);
6956 }
6957 }
6958 }
6959
6960 /*
6961 * aac_fm_fini - Releases fma capabilities and un-registers with IO
6962 * fault services.
6963 */
6964 static void
6965 aac_fm_fini(struct aac_softstate *softs)
6966 {
6967 /* Only unregister FMA capabilities if registered */
6968 if (softs->fm_capabilities) {
6969 /*
6970 * Un-register error callback if error callback capable.
6971 */
6972 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6973 ddi_fm_handler_unregister(softs->devinfo_p);
6974 }
6975
6976 /*
6977 * Release any resources allocated by pci_ereport_setup()
6978 */
6979 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6980 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6981 pci_ereport_teardown(softs->devinfo_p);
6982 }
6983
6984 /* Unregister from IO Fault Services */
6985 ddi_fm_fini(softs->devinfo_p);
6986
6987 /* Adjust access and dma attributes for FMA */
6988 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC;
6989 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6990 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
6991 }
6992 }
6993
6994 int
6995 aac_check_acc_handle(ddi_acc_handle_t handle)
6996 {
6997 ddi_fm_error_t de;
6998
6999 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7000 return (de.fme_status);
7001 }
7002
7003 int
7004 aac_check_dma_handle(ddi_dma_handle_t handle)
7005 {
7006 ddi_fm_error_t de;
7007
7008 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7009 return (de.fme_status);
7010 }
7011
7012 void
7013 aac_fm_ereport(struct aac_softstate *softs, char *detail)
7014 {
7015 uint64_t ena;
7016 char buf[FM_MAX_CLASS];
7017
7018 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7019 ena = fm_ena_generate(0, FM_ENA_FMT1);
7020 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
7021 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
7022 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7023 }
7024 }
7025
7026 /*
7027 * Autoconfiguration support
7028 */
7029 static int
7030 aac_parse_devname(char *devnm, int *tgt, int *lun)
7031 {
7032 char devbuf[SCSI_MAXNAMELEN];
7033 char *addr;
7034 char *p, *tp, *lp;
7035 long num;
7036
7037 /* Parse dev name and address */
7038 (void) strcpy(devbuf, devnm);
7039 addr = "";
7040 for (p = devbuf; *p != '\0'; p++) {
7041 if (*p == '@') {
7042 addr = p + 1;
7043 *p = '\0';
7044 } else if (*p == ':') {
7045 *p = '\0';
7046 break;
7047 }
7048 }
7049
7050 /* Parse taget and lun */
7051 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7052 if (*p == ',') {
7053 lp = p + 1;
7054 *p = '\0';
7055 break;
7056 }
7057 }
7058 if (tgt && tp) {
7059 if (ddi_strtol(tp, NULL, 0x10, &num))
7060 return (AACERR);
7061 *tgt = (int)num;
7062 }
7063 if (lun && lp) {
7064 if (ddi_strtol(lp, NULL, 0x10, &num))
7065 return (AACERR);
7066 *lun = (int)num;
7067 }
7068 return (AACOK);
7069 }
7070
7071 static dev_info_t *
7072 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
7073 {
7074 dev_info_t *child = NULL;
7075 char addr[SCSI_MAXNAMELEN];
7076 char tmp[MAXNAMELEN];
7077
7078 if (tgt < AAC_MAX_LD) {
7079 if (lun == 0) {
7080 struct aac_device *dvp = &softs->containers[tgt].dev;
7081
7082 child = dvp->dip;
7083 }
7084 } else {
7085 (void) sprintf(addr, "%x,%x", tgt, lun);
7086 for (child = ddi_get_child(softs->devinfo_p);
7087 child; child = ddi_get_next_sibling(child)) {
7088 /* We don't care about non-persistent node */
7089 if (ndi_dev_is_persistent_node(child) == 0)
7090 continue;
7091
7092 if (aac_name_node(child, tmp, MAXNAMELEN) !=
7093 DDI_SUCCESS)
7094 continue;
7095 if (strcmp(addr, tmp) == 0)
7096 break;
7097 }
7098 }
7099 return (child);
7100 }
7101
7102 static int
7103 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
7104 dev_info_t **dipp)
7105 {
7106 char *nodename = NULL;
7107 char **compatible = NULL;
7108 int ncompatible = 0;
7109 char *childname;
7110 dev_info_t *ldip = NULL;
7111 int tgt = sd->sd_address.a_target;
7112 int lun = sd->sd_address.a_lun;
7113 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7114 int rval;
7115
7116 DBCALLED(softs, 2);
7117
7118 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7119 NULL, &nodename, &compatible, &ncompatible);
7120 if (nodename == NULL) {
7121 AACDB_PRINT(softs, CE_WARN,
7122 "found no comptible driver for t%dL%d", tgt, lun);
7123 rval = NDI_FAILURE;
7124 goto finish;
7125 }
7126 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
7127
7128 /* Create dev node */
7129 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
7130 &ldip);
7131 if (rval == NDI_SUCCESS) {
7132 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
7133 != DDI_PROP_SUCCESS) {
7134 AACDB_PRINT(softs, CE_WARN, "unable to create "
7135 "property for t%dL%d (target)", tgt, lun);
7136 rval = NDI_FAILURE;
7137 goto finish;
7138 }
7139 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
7140 != DDI_PROP_SUCCESS) {
7141 AACDB_PRINT(softs, CE_WARN, "unable to create "
7142 "property for t%dL%d (lun)", tgt, lun);
7143 rval = NDI_FAILURE;
7144 goto finish;
7145 }
7146 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7147 "compatible", compatible, ncompatible)
7148 != DDI_PROP_SUCCESS) {
7149 AACDB_PRINT(softs, CE_WARN, "unable to create "
7150 "property for t%dL%d (compatible)", tgt, lun);
7151 rval = NDI_FAILURE;
7152 goto finish;
7153 }
7154
7155 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7156 if (rval != NDI_SUCCESS) {
7157 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
7158 tgt, lun);
7159 ndi_prop_remove_all(ldip);
7160 (void) ndi_devi_free(ldip);
7161 }
7162 }
7163 finish:
7164 if (dipp)
7165 *dipp = ldip;
7166
7167 scsi_hba_nodename_compatible_free(nodename, compatible);
7168 return (rval);
7169 }
7170
7171 /*ARGSUSED*/
7172 static int
7173 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
7174 {
7175 int tgt = sd->sd_address.a_target;
7176 int lun = sd->sd_address.a_lun;
7177
7178 DBCALLED(softs, 2);
7179
7180 if (tgt < AAC_MAX_LD) {
7181 enum aac_cfg_event event;
7182
7183 if (lun == 0) {
7184 mutex_enter(&softs->io_lock);
7185 event = aac_probe_container(softs, tgt);
7186 mutex_exit(&softs->io_lock);
7187 if ((event != AAC_CFG_NULL_NOEXIST) &&
7188 (event != AAC_CFG_DELETE)) {
7189 if (scsi_hba_probe(sd, NULL) ==
7190 SCSIPROBE_EXISTS)
7191 return (NDI_SUCCESS);
7192 }
7193 }
7194 return (NDI_FAILURE);
7195 } else {
7196 int dtype;
7197 int qual; /* device qualifier */
7198
7199 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
7200 return (NDI_FAILURE);
7201
7202 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7203 qual = dtype >> 5;
7204
7205 AACDB_PRINT(softs, CE_NOTE,
7206 "Phys. device found: tgt %d dtype %d: %s",
7207 tgt, dtype, sd->sd_inq->inq_vid);
7208
7209 /* Only non-DASD and JBOD mode DASD are allowed exposed */
7210 if (dtype == DTYPE_RODIRECT /* CDROM */ ||
7211 dtype == DTYPE_SEQUENTIAL /* TAPE */ ||
7212 dtype == DTYPE_ESI /* SES */) {
7213 if (!(softs->flags & AAC_FLAGS_NONDASD))
7214 return (NDI_FAILURE);
7215 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
7216
7217 } else if (dtype == DTYPE_DIRECT) {
7218 if (!(softs->flags & AAC_FLAGS_JBOD) || qual != 0)
7219 return (NDI_FAILURE);
7220 AACDB_PRINT(softs, CE_NOTE, "JBOD DASD %d found", tgt);
7221 }
7222
7223 mutex_enter(&softs->io_lock);
7224 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
7225 mutex_exit(&softs->io_lock);
7226 return (NDI_SUCCESS);
7227 }
7228 }
7229
7230 static int
7231 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
7232 dev_info_t **ldip)
7233 {
7234 struct scsi_device sd;
7235 dev_info_t *child;
7236 int rval;
7237
7238 DBCALLED(softs, 2);
7239
7240 if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
7241 if (ldip)
7242 *ldip = child;
7243 return (NDI_SUCCESS);
7244 }
7245
7246 bzero(&sd, sizeof (struct scsi_device));
7247 sd.sd_address.a_hba_tran = softs->hba_tran;
7248 sd.sd_address.a_target = (uint16_t)tgt;
7249 sd.sd_address.a_lun = (uint8_t)lun;
7250 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
7251 rval = aac_config_child(softs, &sd, ldip);
7252 /* scsi_unprobe is blank now. Free buffer manually */
7253 if (sd.sd_inq) {
7254 kmem_free(sd.sd_inq, SUN_INQSIZE);
7255 sd.sd_inq = (struct scsi_inquiry *)NULL;
7256 }
7257 return (rval);
7258 }
7259
7260 static int
7261 aac_config_tgt(struct aac_softstate *softs, int tgt)
7262 {
7263 struct scsi_address ap;
7264 struct buf *bp = NULL;
7265 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
7266 int list_len = 0;
7267 int lun_total = 0;
7268 dev_info_t *ldip;
7269 int i;
7270
7271 ap.a_hba_tran = softs->hba_tran;
7272 ap.a_target = (uint16_t)tgt;
7273 ap.a_lun = 0;
7274
7275 for (i = 0; i < 2; i++) {
7276 struct scsi_pkt *pkt;
7277 uchar_t *cdb;
7278 uchar_t *p;
7279 uint32_t data;
7280
7281 if (bp == NULL) {
7282 if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
7283 buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
7284 return (AACERR);
7285 }
7286 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
7287 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
7288 NULL, NULL)) == NULL) {
7289 scsi_free_consistent_buf(bp);
7290 return (AACERR);
7291 }
7292 cdb = pkt->pkt_cdbp;
7293 bzero(cdb, CDB_GROUP5);
7294 cdb[0] = SCMD_REPORT_LUNS;
7295
7296 /* Convert buffer len from local to LE_32 */
7297 data = buf_len;
7298 for (p = &cdb[9]; p > &cdb[5]; p--) {
7299 *p = data & 0xff;
7300 data >>= 8;
7301 }
7302
7303 if (scsi_poll(pkt) < 0 ||
7304 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
7305 scsi_destroy_pkt(pkt);
7306 break;
7307 }
7308
7309 /* Convert list_len from LE_32 to local */
7310 for (p = (uchar_t *)bp->b_un.b_addr;
7311 p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
7312 data <<= 8;
7313 data |= *p;
7314 }
7315 list_len = data;
7316 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
7317 scsi_free_consistent_buf(bp);
7318 bp = NULL;
7319 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
7320 }
7321 scsi_destroy_pkt(pkt);
7322 }
7323 if (i >= 2) {
7324 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
7325 AAC_SCSI_RPTLUNS_HEAD_SIZE);
7326
7327 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
7328 uint16_t lun;
7329
7330 /* Determine report luns addressing type */
7331 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
7332 /*
7333 * Vendors in the field have been found to be
7334 * concatenating bus/target/lun to equal the
7335 * complete lun value instead of switching to
7336 * flat space addressing
7337 */
7338 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
7339 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
7340 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
7341 lun = ((buf[0] & 0x3f) << 8) | buf[1];
7342 if (lun > UINT8_MAX) {
7343 AACDB_PRINT(softs, CE_WARN,
7344 "abnormal lun number: %d", lun);
7345 break;
7346 }
7347 if (aac_config_lun(softs, tgt, lun, &ldip) ==
7348 NDI_SUCCESS)
7349 lun_total++;
7350 break;
7351 }
7352
7353 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
7354 }
7355 } else {
7356 /* The target may do not support SCMD_REPORT_LUNS. */
7357 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
7358 lun_total++;
7359 }
7360 scsi_free_consistent_buf(bp);
7361 return (lun_total);
7362 }
7363
7364 static void
7365 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
7366 {
7367 struct aac_device *dvp;
7368
7369 mutex_enter(&softs->io_lock);
7370 dvp = AAC_DEV(softs, tgt);
7371 if (en)
7372 dvp->flags |= AAC_DFLAG_CONFIGURING;
7373 else
7374 dvp->flags &= ~AAC_DFLAG_CONFIGURING;
7375 mutex_exit(&softs->io_lock);
7376 }
7377
7378 static int
7379 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
7380 void *arg, dev_info_t **childp)
7381 {
7382 struct aac_softstate *softs;
7383 int circ = 0;
7384 int rval;
7385
7386 if ((softs = ddi_get_soft_state(aac_softstatep,
7387 ddi_get_instance(parent))) == NULL)
7388 return (NDI_FAILURE);
7389
7390 /* Commands for bus config should be blocked as the bus is quiesced */
7391 mutex_enter(&softs->io_lock);
7392 if (softs->state & AAC_STATE_QUIESCED) {
7393 AACDB_PRINT(softs, CE_NOTE,
7394 "bus_config abroted because bus is quiesced");
7395 mutex_exit(&softs->io_lock);
7396 return (NDI_FAILURE);
7397 }
7398 mutex_exit(&softs->io_lock);
7399
7400 DBCALLED(softs, 1);
7401
7402 /* Hold the nexus across the bus_config */
7403 ndi_devi_enter(parent, &circ);
7404 switch (op) {
7405 case BUS_CONFIG_ONE: {
7406 int tgt, lun;
7407
7408 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
7409 rval = NDI_FAILURE;
7410 break;
7411 }
7412 if (tgt >= AAC_MAX_LD) {
7413 if (tgt >= AAC_MAX_DEV(softs)) {
7414 rval = NDI_FAILURE;
7415 break;
7416 }
7417 }
7418
7419 AAC_DEVCFG_BEGIN(softs, tgt);
7420 rval = aac_config_lun(softs, tgt, lun, childp);
7421 AAC_DEVCFG_END(softs, tgt);
7422 break;
7423 }
7424
7425 case BUS_CONFIG_DRIVER:
7426 case BUS_CONFIG_ALL: {
7427 uint32_t bus, tgt;
7428 int index, total;
7429
7430 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
7431 AAC_DEVCFG_BEGIN(softs, tgt);
7432 (void) aac_config_lun(softs, tgt, 0, NULL);
7433 AAC_DEVCFG_END(softs, tgt);
7434 }
7435
7436 /* Config the non-DASD devices connected to the card */
7437 total = 0;
7438 index = AAC_MAX_LD;
7439 for (bus = 0; bus < softs->bus_max; bus++) {
7440 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
7441 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
7442 AAC_DEVCFG_BEGIN(softs, index);
7443 if (aac_config_tgt(softs, index))
7444 total++;
7445 AAC_DEVCFG_END(softs, index);
7446 }
7447 }
7448 AACDB_PRINT(softs, CE_CONT,
7449 "?Total %d phys. device(s) found", total);
7450 rval = NDI_SUCCESS;
7451 break;
7452 }
7453 }
7454
7455 if (rval == NDI_SUCCESS)
7456 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7457 ndi_devi_exit(parent, circ);
7458 return (rval);
7459 }
7460
7461 /*ARGSUSED*/
7462 static int
7463 aac_handle_dr(struct aac_softstate *softs, int tgt, int lun, int event)
7464 {
7465 struct aac_device *dvp;
7466 dev_info_t *dip;
7467 int valid;
7468 int circ1 = 0;
7469
7470 DBCALLED(softs, 1);
7471
7472 /* Hold the nexus across the bus_config */
7473 dvp = AAC_DEV(softs, tgt);
7474 valid = AAC_DEV_IS_VALID(dvp);
7475 dip = dvp->dip;
7476 if (!(softs->state & AAC_STATE_RUN))
7477 return (AACERR);
7478 mutex_exit(&softs->io_lock);
7479
7480 switch (event) {
7481 case AAC_CFG_ADD:
7482 case AAC_CFG_DELETE:
7483 /* Device onlined */
7484 if (dip == NULL && valid) {
7485 ndi_devi_enter(softs->devinfo_p, &circ1);
7486 (void) aac_config_lun(softs, tgt, 0, NULL);
7487 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
7488 softs->instance, tgt, lun);
7489 ndi_devi_exit(softs->devinfo_p, circ1);
7490 }
7491 /* Device offlined */
7492 if (dip && !valid) {
7493 mutex_enter(&softs->io_lock);
7494 (void) aac_do_reset(softs);
7495 mutex_exit(&softs->io_lock);
7496
7497 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7498 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
7499 softs->instance, tgt, lun);
7500 }
7501 break;
7502 }
7503
7504 mutex_enter(&softs->io_lock);
7505 return (AACOK);
7506 }
7507
7508 #ifdef DEBUG
7509
7510 /* -------------------------debug aid functions-------------------------- */
7511
7512 #define AAC_FIB_CMD_KEY_STRINGS \
7513 TestCommandResponse, "TestCommandResponse", \
7514 TestAdapterCommand, "TestAdapterCommand", \
7515 LastTestCommand, "LastTestCommand", \
7516 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
7517 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
7518 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
7519 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
7520 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
7521 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
7522 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
7523 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
7524 InterfaceShutdown, "InterfaceShutdown", \
7525 DmaCommandFib, "DmaCommandFib", \
7526 StartProfile, "StartProfile", \
7527 TermProfile, "TermProfile", \
7528 SpeedTest, "SpeedTest", \
7529 TakeABreakPt, "TakeABreakPt", \
7530 RequestPerfData, "RequestPerfData", \
7531 SetInterruptDefTimer, "SetInterruptDefTimer", \
7532 SetInterruptDefCount, "SetInterruptDefCount", \
7533 GetInterruptDefStatus, "GetInterruptDefStatus", \
7534 LastCommCommand, "LastCommCommand", \
7535 NuFileSystem, "NuFileSystem", \
7536 UFS, "UFS", \
7537 HostFileSystem, "HostFileSystem", \
7538 LastFileSystemCommand, "LastFileSystemCommand", \
7539 ContainerCommand, "ContainerCommand", \
7540 ContainerCommand64, "ContainerCommand64", \
7541 ClusterCommand, "ClusterCommand", \
7542 ScsiPortCommand, "ScsiPortCommand", \
7543 ScsiPortCommandU64, "ScsiPortCommandU64", \
7544 AifRequest, "AifRequest", \
7545 CheckRevision, "CheckRevision", \
7546 FsaHostShutdown, "FsaHostShutdown", \
7547 RequestAdapterInfo, "RequestAdapterInfo", \
7548 IsAdapterPaused, "IsAdapterPaused", \
7549 SendHostTime, "SendHostTime", \
7550 LastMiscCommand, "LastMiscCommand"
7551
7552 #define AAC_CTVM_SUBCMD_KEY_STRINGS \
7553 VM_Null, "VM_Null", \
7554 VM_NameServe, "VM_NameServe", \
7555 VM_ContainerConfig, "VM_ContainerConfig", \
7556 VM_Ioctl, "VM_Ioctl", \
7557 VM_FilesystemIoctl, "VM_FilesystemIoctl", \
7558 VM_CloseAll, "VM_CloseAll", \
7559 VM_CtBlockRead, "VM_CtBlockRead", \
7560 VM_CtBlockWrite, "VM_CtBlockWrite", \
7561 VM_SliceBlockRead, "VM_SliceBlockRead", \
7562 VM_SliceBlockWrite, "VM_SliceBlockWrite", \
7563 VM_DriveBlockRead, "VM_DriveBlockRead", \
7564 VM_DriveBlockWrite, "VM_DriveBlockWrite", \
7565 VM_EnclosureMgt, "VM_EnclosureMgt", \
7566 VM_Unused, "VM_Unused", \
7567 VM_CtBlockVerify, "VM_CtBlockVerify", \
7568 VM_CtPerf, "VM_CtPerf", \
7569 VM_CtBlockRead64, "VM_CtBlockRead64", \
7570 VM_CtBlockWrite64, "VM_CtBlockWrite64", \
7571 VM_CtBlockVerify64, "VM_CtBlockVerify64", \
7572 VM_CtHostRead64, "VM_CtHostRead64", \
7573 VM_CtHostWrite64, "VM_CtHostWrite64", \
7574 VM_NameServe64, "VM_NameServe64"
7575
7576 #define AAC_CT_SUBCMD_KEY_STRINGS \
7577 CT_Null, "CT_Null", \
7578 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
7579 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
7580 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
7581 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
7582 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
7583 CT_WRITE_MBR, "CT_WRITE_MBR", \
7584 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
7585 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
7586 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
7587 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
7588 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
7589 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
7590 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
7591 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
7592 CT_READ_MBR, "CT_READ_MBR", \
7593 CT_READ_PARTITION, "CT_READ_PARTITION", \
7594 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
7595 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
7596 CT_SLICE_SIZE, "CT_SLICE_SIZE", \
7597 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
7598 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
7599 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
7600 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
7601 CT_UNMIRROR, "CT_UNMIRROR", \
7602 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
7603 CT_GEN_MIRROR, "CT_GEN_MIRROR", \
7604 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
7605 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
7606 CT_MOVE2, "CT_MOVE2", \
7607 CT_SPLIT, "CT_SPLIT", \
7608 CT_SPLIT2, "CT_SPLIT2", \
7609 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
7610 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
7611 CT_RECONFIG, "CT_RECONFIG", \
7612 CT_BREAK2, "CT_BREAK2", \
7613 CT_BREAK, "CT_BREAK", \
7614 CT_MERGE2, "CT_MERGE2", \
7615 CT_MERGE, "CT_MERGE", \
7616 CT_FORCE_ERROR, "CT_FORCE_ERROR", \
7617 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
7618 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
7619 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
7620 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
7621 CT_VOLUME_ADD, "CT_VOLUME_ADD", \
7622 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
7623 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
7624 CT_COPY_STATUS, "CT_COPY_STATUS", \
7625 CT_COPY, "CT_COPY", \
7626 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
7627 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
7628 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
7629 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
7630 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
7631 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
7632 CT_SET, "CT_SET", \
7633 CT_GET, "CT_GET", \
7634 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
7635 CT_GET_DELAY, "CT_GET_DELAY", \
7636 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
7637 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
7638 CT_SCRUB, "CT_SCRUB", \
7639 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
7640 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
7641 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
7642 CT_PAUSE_IO, "CT_PAUSE_IO", \
7643 CT_RELEASE_IO, "CT_RELEASE_IO", \
7644 CT_SCRUB2, "CT_SCRUB2", \
7645 CT_MCHECK, "CT_MCHECK", \
7646 CT_CORRUPT, "CT_CORRUPT", \
7647 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
7648 CT_PROMOTE, "CT_PROMOTE", \
7649 CT_SET_DEAD, "CT_SET_DEAD", \
7650 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
7651 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
7652 CT_GET_PARAM, "CT_GET_PARAM", \
7653 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
7654 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
7655 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
7656 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
7657 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
7658 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
7659 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
7660 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
7661 CT_STOP_DATA, "CT_STOP_DATA", \
7662 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
7663 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
7664 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
7665 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
7666 CT_GET_TIME, "CT_GET_TIME", \
7667 CT_READ_DATA, "CT_READ_DATA", \
7668 CT_CTR, "CT_CTR", \
7669 CT_CTL, "CT_CTL", \
7670 CT_DRAINIO, "CT_DRAINIO", \
7671 CT_RELEASEIO, "CT_RELEASEIO", \
7672 CT_GET_NVRAM, "CT_GET_NVRAM", \
7673 CT_GET_MEMORY, "CT_GET_MEMORY", \
7674 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
7675 CT_ADD_LEVEL, "CT_ADD_LEVEL", \
7676 CT_NV_ZERO, "CT_NV_ZERO", \
7677 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
7678 CT_THROTTLE_ON, "CT_THROTTLE_ON", \
7679 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
7680 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
7681 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
7682 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
7683 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
7684 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
7685 CT_MONITOR, "CT_MONITOR", \
7686 CT_GEN_MORPH, "CT_GEN_MORPH", \
7687 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
7688 CT_CACHE_SET, "CT_CACHE_SET", \
7689 CT_CACHE_STAT, "CT_CACHE_STAT", \
7690 CT_TRACE_START, "CT_TRACE_START", \
7691 CT_TRACE_STOP, "CT_TRACE_STOP", \
7692 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
7693 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
7694 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
7695 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
7696 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
7697 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
7698 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
7699 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
7700 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
7701 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
7702 CT_STOP_DUMPS, "CT_STOP_DUMPS", \
7703 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
7704 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
7705 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
7706 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
7707 CT_READ_NAME, "CT_READ_NAME", \
7708 CT_WRITE_NAME, "CT_WRITE_NAME", \
7709 CT_TOSS_CACHE, "CT_TOSS_CACHE", \
7710 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
7711 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
7712 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
7713 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
7714 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
7715 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
7716 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
7717 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
7718 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
7719 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
7720 CT_FLUSH, "CT_FLUSH", \
7721 CT_REBUILD, "CT_REBUILD", \
7722 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
7723 CT_RESTART, "CT_RESTART", \
7724 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
7725 CT_TRACE_FLAG, "CT_TRACE_FLAG", \
7726 CT_RESTART_MORPH, "CT_RESTART_MORPH", \
7727 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
7728 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
7729 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
7730 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
7731 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
7732 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
7733 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
7734 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
7735 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
7736 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
7737 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
7738 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
7739 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
7740 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
7741 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
7742 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
7743 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
7744 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
7745 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
7746 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
7747 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
7748 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
7749 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
7750 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
7751 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
7752 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
7753 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
7754 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
7755 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
7756 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
7757 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
7758 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
7759 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
7760 CT_IS_CONTAINER_MEATADATA_STANDARD, \
7761 "CT_IS_CONTAINER_MEATADATA_STANDARD", \
7762 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
7763 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
7764 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
7765 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
7766 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
7767 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
7768 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
7769 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
7770 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
7771 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
7772 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
7773 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
7774 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
7775 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
7776 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
7777 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
7778 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
7779 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
7780 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
7781
7782 #define AAC_CL_SUBCMD_KEY_STRINGS \
7783 CL_NULL, "CL_NULL", \
7784 DS_INIT, "DS_INIT", \
7785 DS_RESCAN, "DS_RESCAN", \
7786 DS_CREATE, "DS_CREATE", \
7787 DS_DELETE, "DS_DELETE", \
7788 DS_ADD_DISK, "DS_ADD_DISK", \
7789 DS_REMOVE_DISK, "DS_REMOVE_DISK", \
7790 DS_MOVE_DISK, "DS_MOVE_DISK", \
7791 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
7792 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
7793 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
7794 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
7795 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
7796 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
7797 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
7798 DS_GET_DRIVES, "DS_GET_DRIVES", \
7799 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
7800 DS_ONLINE, "DS_ONLINE", \
7801 DS_OFFLINE, "DS_OFFLINE", \
7802 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
7803 DS_FSAPRINT, "DS_FSAPRINT", \
7804 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
7805 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
7806 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
7807 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
7808 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
7809 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
7810 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
7811 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
7812 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
7813 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
7814 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
7815 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
7816 CC_GET_BUSINFO, "CC_GET_BUSINFO", \
7817 CC_GET_PORTINFO, "CC_GET_PORTINFO", \
7818 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
7819 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
7820 CQ_QUORUM_OP, "CQ_QUORUM_OP"
7821
7822 #define AAC_AIF_SUBCMD_KEY_STRINGS \
7823 AifCmdEventNotify, "AifCmdEventNotify", \
7824 AifCmdJobProgress, "AifCmdJobProgress", \
7825 AifCmdAPIReport, "AifCmdAPIReport", \
7826 AifCmdDriverNotify, "AifCmdDriverNotify", \
7827 AifReqJobList, "AifReqJobList", \
7828 AifReqJobsForCtr, "AifReqJobsForCtr", \
7829 AifReqJobsForScsi, "AifReqJobsForScsi", \
7830 AifReqJobReport, "AifReqJobReport", \
7831 AifReqTerminateJob, "AifReqTerminateJob", \
7832 AifReqSuspendJob, "AifReqSuspendJob", \
7833 AifReqResumeJob, "AifReqResumeJob", \
7834 AifReqSendAPIReport, "AifReqSendAPIReport", \
7835 AifReqAPIJobStart, "AifReqAPIJobStart", \
7836 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
7837 AifReqAPIJobFinish, "AifReqAPIJobFinish"
7838
7839 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \
7840 Reserved_IOCTL, "Reserved_IOCTL", \
7841 GetDeviceHandle, "GetDeviceHandle", \
7842 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
7843 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
7844 RescanBus, "RescanBus", \
7845 GetDeviceProbeInfo, "GetDeviceProbeInfo", \
7846 GetDeviceCapacity, "GetDeviceCapacity", \
7847 GetContainerProbeInfo, "GetContainerProbeInfo", \
7848 GetRequestedMemorySize, "GetRequestedMemorySize", \
7849 GetBusInfo, "GetBusInfo", \
7850 GetVendorSpecific, "GetVendorSpecific", \
7851 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
7852 EnhancedGetBusInfo, "EnhancedGetBusInfo", \
7853 SetupExtendedCounters, "SetupExtendedCounters", \
7854 GetPerformanceCounters, "GetPerformanceCounters", \
7855 ResetPerformanceCounters, "ResetPerformanceCounters", \
7856 ReadModePage, "ReadModePage", \
7857 WriteModePage, "WriteModePage", \
7858 ReadDriveParameter, "ReadDriveParameter", \
7859 WriteDriveParameter, "WriteDriveParameter", \
7860 ResetAdapter, "ResetAdapter", \
7861 ResetBus, "ResetBus", \
7862 ResetBusDevice, "ResetBusDevice", \
7863 ExecuteSrb, "ExecuteSrb", \
7864 Create_IO_Task, "Create_IO_Task", \
7865 Delete_IO_Task, "Delete_IO_Task", \
7866 Get_IO_Task_Info, "Get_IO_Task_Info", \
7867 Check_Task_Progress, "Check_Task_Progress", \
7868 InjectError, "InjectError", \
7869 GetDeviceDefectCounts, "GetDeviceDefectCounts", \
7870 GetDeviceDefectInfo, "GetDeviceDefectInfo", \
7871 GetDeviceStatus, "GetDeviceStatus", \
7872 ClearDeviceStatus, "ClearDeviceStatus", \
7873 DiskSpinControl, "DiskSpinControl", \
7874 DiskSmartControl, "DiskSmartControl", \
7875 WriteSame, "WriteSame", \
7876 ReadWriteLong, "ReadWriteLong", \
7877 FormatUnit, "FormatUnit", \
7878 TargetDeviceControl, "TargetDeviceControl", \
7879 TargetChannelControl, "TargetChannelControl", \
7880 FlashNewCode, "FlashNewCode", \
7881 DiskCheck, "DiskCheck", \
7882 RequestSense, "RequestSense", \
7883 DiskPERControl, "DiskPERControl", \
7884 Read10, "Read10", \
7885 Write10, "Write10"
7886
7887 #define AAC_AIFEN_KEY_STRINGS \
7888 AifEnGeneric, "Generic", \
7889 AifEnTaskComplete, "TaskComplete", \
7890 AifEnConfigChange, "Config change", \
7891 AifEnContainerChange, "Container change", \
7892 AifEnDeviceFailure, "device failed", \
7893 AifEnMirrorFailover, "Mirror failover", \
7894 AifEnContainerEvent, "container event", \
7895 AifEnFileSystemChange, "File system changed", \
7896 AifEnConfigPause, "Container pause event", \
7897 AifEnConfigResume, "Container resume event", \
7898 AifEnFailoverChange, "Failover space assignment changed", \
7899 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
7900 AifEnEnclosureManagement, "Enclosure management event", \
7901 AifEnBatteryEvent, "battery event", \
7902 AifEnAddContainer, "Add container", \
7903 AifEnDeleteContainer, "Delete container", \
7904 AifEnSMARTEvent, "SMART Event", \
7905 AifEnBatteryNeedsRecond, "battery needs reconditioning", \
7906 AifEnClusterEvent, "cluster event", \
7907 AifEnDiskSetEvent, "disk set event occured", \
7908 AifDenMorphComplete, "morph operation completed", \
7909 AifDenVolumeExtendComplete, "VolumeExtendComplete"
7910
7911 struct aac_key_strings {
7912 int key;
7913 char *message;
7914 };
7915
7916 extern struct scsi_key_strings scsi_cmds[];
7917
7918 static struct aac_key_strings aac_fib_cmds[] = {
7919 AAC_FIB_CMD_KEY_STRINGS,
7920 -1, NULL
7921 };
7922
7923 static struct aac_key_strings aac_ctvm_subcmds[] = {
7924 AAC_CTVM_SUBCMD_KEY_STRINGS,
7925 -1, NULL
7926 };
7927
7928 static struct aac_key_strings aac_ct_subcmds[] = {
7929 AAC_CT_SUBCMD_KEY_STRINGS,
7930 -1, NULL
7931 };
7932
7933 static struct aac_key_strings aac_cl_subcmds[] = {
7934 AAC_CL_SUBCMD_KEY_STRINGS,
7935 -1, NULL
7936 };
7937
7938 static struct aac_key_strings aac_aif_subcmds[] = {
7939 AAC_AIF_SUBCMD_KEY_STRINGS,
7940 -1, NULL
7941 };
7942
7943 static struct aac_key_strings aac_ioctl_subcmds[] = {
7944 AAC_IOCTL_SUBCMD_KEY_STRINGS,
7945 -1, NULL
7946 };
7947
7948 static struct aac_key_strings aac_aifens[] = {
7949 AAC_AIFEN_KEY_STRINGS,
7950 -1, NULL
7951 };
7952
7953 /*
7954 * The following function comes from Adaptec:
7955 *
7956 * Get the firmware print buffer parameters from the firmware,
7957 * if the command was successful map in the address.
7958 */
7959 static int
7960 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7961 {
7962 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7963 0, 0, 0, 0, NULL) == AACOK) {
7964 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7965 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7966 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7967 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7968
7969 if (mondrv_buf_size) {
7970 uint32_t offset = mondrv_buf_paddrl - \
7971 softs->pci_mem_base_paddr;
7972
7973 /*
7974 * See if the address is already mapped in, and
7975 * if so set it up from the base address
7976 */
7977 if ((mondrv_buf_paddrh == 0) &&
7978 (offset + mondrv_buf_size < softs->map_size)) {
7979 mutex_enter(&aac_prt_mutex);
7980 softs->debug_buf_offset = offset;
7981 softs->debug_header_size = mondrv_hdr_size;
7982 softs->debug_buf_size = mondrv_buf_size;
7983 softs->debug_fw_flags = 0;
7984 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
7985 mutex_exit(&aac_prt_mutex);
7986
7987 return (AACOK);
7988 }
7989 }
7990 }
7991 return (AACERR);
7992 }
7993
7994 int
7995 aac_dbflag_on(struct aac_softstate *softs, int flag)
7996 {
7997 int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
7998
7999 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
8000 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
8001 }
8002
8003 static void
8004 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
8005 {
8006 if (noheader) {
8007 if (sl) {
8008 aac_fmt[0] = sl;
8009 cmn_err(lev, aac_fmt, aac_prt_buf);
8010 } else {
8011 cmn_err(lev, &aac_fmt[1], aac_prt_buf);
8012 }
8013 } else {
8014 if (sl) {
8015 aac_fmt_header[0] = sl;
8016 cmn_err(lev, aac_fmt_header,
8017 softs->vendor_name, softs->instance,
8018 aac_prt_buf);
8019 } else {
8020 cmn_err(lev, &aac_fmt_header[1],
8021 softs->vendor_name, softs->instance,
8022 aac_prt_buf);
8023 }
8024 }
8025 }
8026
8027 /*
8028 * The following function comes from Adaptec:
8029 *
8030 * Format and print out the data passed in to UART or console
8031 * as specified by debug flags.
8032 */
8033 void
8034 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
8035 {
8036 va_list args;
8037 char sl; /* system log character */
8038
8039 mutex_enter(&aac_prt_mutex);
8040 /* Set up parameters and call sprintf function to format the data */
8041 if (strchr("^!?", fmt[0]) == NULL) {
8042 sl = 0;
8043 } else {
8044 sl = fmt[0];
8045 fmt++;
8046 }
8047 va_start(args, fmt);
8048 (void) vsprintf(aac_prt_buf, fmt, args);
8049 va_end(args);
8050
8051 /* Make sure the softs structure has been passed in for this section */
8052 if (softs) {
8053 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
8054 /* If we are set up for a Firmware print */
8055 (softs->debug_buf_size)) {
8056 uint32_t count, i;
8057
8058 /* Make sure the string size is within boundaries */
8059 count = strlen(aac_prt_buf);
8060 if (count > softs->debug_buf_size)
8061 count = (uint16_t)softs->debug_buf_size;
8062
8063 /*
8064 * Wait for no more than AAC_PRINT_TIMEOUT for the
8065 * previous message length to clear (the handshake).
8066 */
8067 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
8068 if (!PCI_MEM_GET32(softs,
8069 softs->debug_buf_offset + \
8070 AAC_FW_DBG_STRLEN_OFFSET))
8071 break;
8072
8073 drv_usecwait(1000);
8074 }
8075
8076 /*
8077 * If the length is clear, copy over the message, the
8078 * flags, and the length. Make sure the length is the
8079 * last because that is the signal for the Firmware to
8080 * pick it up.
8081 */
8082 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
8083 AAC_FW_DBG_STRLEN_OFFSET)) {
8084 PCI_MEM_REP_PUT8(softs,
8085 softs->debug_buf_offset + \
8086 softs->debug_header_size,
8087 aac_prt_buf, count);
8088 PCI_MEM_PUT32(softs,
8089 softs->debug_buf_offset + \
8090 AAC_FW_DBG_FLAGS_OFFSET,
8091 softs->debug_fw_flags);
8092 PCI_MEM_PUT32(softs,
8093 softs->debug_buf_offset + \
8094 AAC_FW_DBG_STRLEN_OFFSET, count);
8095 } else {
8096 cmn_err(CE_WARN, "UART output fail");
8097 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
8098 }
8099 }
8100
8101 /*
8102 * If the Kernel Debug Print flag is set, send it off
8103 * to the Kernel Debugger
8104 */
8105 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8106 aac_cmn_err(softs, lev, sl,
8107 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
8108 } else {
8109 /* Driver not initialized yet, no firmware or header output */
8110 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8111 aac_cmn_err(softs, lev, sl, 1);
8112 }
8113 mutex_exit(&aac_prt_mutex);
8114 }
8115
8116 /*
8117 * Translate command number to description string
8118 */
8119 static char *
8120 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
8121 {
8122 int i;
8123
8124 for (i = 0; cmdlist[i].key != -1; i++) {
8125 if (cmd == cmdlist[i].key)
8126 return (cmdlist[i].message);
8127 }
8128 return (NULL);
8129 }
8130
8131 static void
8132 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
8133 {
8134 struct scsi_pkt *pkt = acp->pkt;
8135 struct scsi_address *ap = &pkt->pkt_address;
8136 int is_pd = 0;
8137 int ctl = ddi_get_instance(softs->devinfo_p);
8138 int tgt = ap->a_target;
8139 int lun = ap->a_lun;
8140 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
8141 uchar_t cmd = cdbp->scc_cmd;
8142 char *desc;
8143
8144 if (tgt >= AAC_MAX_LD) {
8145 is_pd = 1;
8146 ctl = ((struct aac_nondasd *)acp->dvp)->bus;
8147 tgt = ((struct aac_nondasd *)acp->dvp)->tid;
8148 lun = 0;
8149 }
8150
8151 if ((desc = aac_cmd_name(cmd,
8152 (struct aac_key_strings *)scsi_cmds)) == NULL) {
8153 aac_printf(softs, CE_NOTE,
8154 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
8155 cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
8156 return;
8157 }
8158
8159 switch (cmd) {
8160 case SCMD_READ:
8161 case SCMD_WRITE:
8162 aac_printf(softs, CE_NOTE,
8163 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8164 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
8165 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8166 ctl, tgt, lun, is_pd ? "(pd)" : "");
8167 break;
8168 case SCMD_READ_G1:
8169 case SCMD_WRITE_G1:
8170 aac_printf(softs, CE_NOTE,
8171 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8172 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
8173 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8174 ctl, tgt, lun, is_pd ? "(pd)" : "");
8175 break;
8176 case SCMD_READ_G4:
8177 case SCMD_WRITE_G4:
8178 aac_printf(softs, CE_NOTE,
8179 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
8180 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
8181 GETG4COUNT(cdbp),
8182 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8183 ctl, tgt, lun, is_pd ? "(pd)" : "");
8184 break;
8185 case SCMD_READ_G5:
8186 case SCMD_WRITE_G5:
8187 aac_printf(softs, CE_NOTE,
8188 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8189 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
8190 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8191 ctl, tgt, lun, is_pd ? "(pd)" : "");
8192 break;
8193 default:
8194 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
8195 desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
8196 }
8197 }
8198
8199 void
8200 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
8201 {
8202 struct aac_cmd *acp = slotp->acp;
8203 struct aac_fib *fibp = slotp->fibp;
8204 ddi_acc_handle_t acc = slotp->fib_acc_handle;
8205 uint16_t fib_size;
8206 uint32_t fib_cmd, sub_cmd;
8207 char *cmdstr, *subcmdstr;
8208 char *caller;
8209 int i;
8210
8211 if (acp) {
8212 if (!(softs->debug_fib_flags & acp->fib_flags))
8213 return;
8214 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
8215 caller = "SCMD";
8216 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
8217 caller = "IOCTL";
8218 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
8219 caller = "SRB";
8220 else
8221 return;
8222 } else {
8223 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
8224 return;
8225 caller = "SYNC";
8226 }
8227
8228 fib_cmd = ddi_get16(acc, &fibp->Header.Command);
8229 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
8230 sub_cmd = (uint32_t)-1;
8231 subcmdstr = NULL;
8232
8233 /* Print FIB header */
8234 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
8235 aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
8236 aac_printf(softs, CE_NOTE, " XferState %d",
8237 ddi_get32(acc, &fibp->Header.XferState));
8238 aac_printf(softs, CE_NOTE, " Command %d",
8239 ddi_get16(acc, &fibp->Header.Command));
8240 aac_printf(softs, CE_NOTE, " StructType %d",
8241 ddi_get8(acc, &fibp->Header.StructType));
8242 aac_printf(softs, CE_NOTE, " Flags 0x%x",
8243 ddi_get8(acc, &fibp->Header.Flags));
8244 aac_printf(softs, CE_NOTE, " Size %d",
8245 ddi_get16(acc, &fibp->Header.Size));
8246 aac_printf(softs, CE_NOTE, " SenderSize %d",
8247 ddi_get16(acc, &fibp->Header.SenderSize));
8248 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x",
8249 ddi_get32(acc, &fibp->Header.SenderFibAddress));
8250 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x",
8251 ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
8252 aac_printf(softs, CE_NOTE, " SenderData 0x%x",
8253 ddi_get32(acc, &fibp->Header.SenderData));
8254 }
8255
8256 /* Print FIB data */
8257 switch (fib_cmd) {
8258 case ContainerCommand:
8259 sub_cmd = ddi_get32(acc,
8260 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
8261 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
8262 if (subcmdstr == NULL)
8263 break;
8264
8265 switch (sub_cmd) {
8266 case VM_ContainerConfig: {
8267 struct aac_Container *pContainer =
8268 (struct aac_Container *)fibp->data;
8269
8270 fib_cmd = sub_cmd;
8271 cmdstr = subcmdstr;
8272 sub_cmd = (uint32_t)-1;
8273 subcmdstr = NULL;
8274
8275 sub_cmd = ddi_get32(acc,
8276 &pContainer->CTCommand.command);
8277 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
8278 if (subcmdstr == NULL)
8279 break;
8280 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
8281 subcmdstr,
8282 ddi_get32(acc, &pContainer->CTCommand.param[0]),
8283 ddi_get32(acc, &pContainer->CTCommand.param[1]),
8284 ddi_get32(acc, &pContainer->CTCommand.param[2]));
8285 return;
8286 }
8287
8288 case VM_Ioctl:
8289 fib_cmd = sub_cmd;
8290 cmdstr = subcmdstr;
8291 sub_cmd = (uint32_t)-1;
8292 subcmdstr = NULL;
8293
8294 sub_cmd = ddi_get32(acc,
8295 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
8296 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
8297 break;
8298
8299 case VM_CtBlockRead:
8300 case VM_CtBlockWrite: {
8301 struct aac_blockread *br =
8302 (struct aac_blockread *)fibp->data;
8303 struct aac_sg_table *sg = &br->SgMap;
8304 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8305
8306 aac_printf(softs, CE_NOTE,
8307 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8308 ddi_get32(acc, &br->ContainerId),
8309 ddi_get32(acc, &br->BlockNumber),
8310 ddi_get32(acc, &br->ByteCount));
8311 for (i = 0; i < sgcount; i++)
8312 aac_printf(softs, CE_NOTE,
8313 " %d: 0x%08x/%d", i,
8314 ddi_get32(acc, &sg->SgEntry[i].SgAddress),
8315 ddi_get32(acc, &sg->SgEntry[i]. \
8316 SgByteCount));
8317 return;
8318 }
8319 }
8320 break;
8321
8322 case ContainerCommand64: {
8323 struct aac_blockread64 *br =
8324 (struct aac_blockread64 *)fibp->data;
8325 struct aac_sg_table64 *sg = &br->SgMap64;
8326 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8327 uint64_t sgaddr;
8328
8329 sub_cmd = br->Command;
8330 subcmdstr = NULL;
8331 if (sub_cmd == VM_CtHostRead64)
8332 subcmdstr = "VM_CtHostRead64";
8333 else if (sub_cmd == VM_CtHostWrite64)
8334 subcmdstr = "VM_CtHostWrite64";
8335 else
8336 break;
8337
8338 aac_printf(softs, CE_NOTE,
8339 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8340 ddi_get16(acc, &br->ContainerId),
8341 ddi_get32(acc, &br->BlockNumber),
8342 ddi_get16(acc, &br->SectorCount));
8343 for (i = 0; i < sgcount; i++) {
8344 sgaddr = ddi_get64(acc,
8345 &sg->SgEntry64[i].SgAddress);
8346 aac_printf(softs, CE_NOTE,
8347 " %d: 0x%08x.%08x/%d", i,
8348 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8349 ddi_get32(acc, &sg->SgEntry64[i]. \
8350 SgByteCount));
8351 }
8352 return;
8353 }
8354
8355 case RawIo: {
8356 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
8357 struct aac_sg_tableraw *sg = &io->SgMapRaw;
8358 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8359 uint64_t sgaddr;
8360
8361 aac_printf(softs, CE_NOTE,
8362 "FIB> RawIo Container %d 0x%llx/%d 0x%x",
8363 ddi_get16(acc, &io->ContainerId),
8364 ddi_get64(acc, &io->BlockNumber),
8365 ddi_get32(acc, &io->ByteCount),
8366 ddi_get16(acc, &io->Flags));
8367 for (i = 0; i < sgcount; i++) {
8368 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
8369 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i,
8370 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8371 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
8372 }
8373 return;
8374 }
8375
8376 case ClusterCommand:
8377 sub_cmd = ddi_get32(acc,
8378 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8379 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
8380 break;
8381
8382 case AifRequest:
8383 sub_cmd = ddi_get32(acc,
8384 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8385 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
8386 break;
8387
8388 default:
8389 break;
8390 }
8391
8392 fib_size = ddi_get16(acc, &(fibp->Header.Size));
8393 if (subcmdstr)
8394 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8395 subcmdstr, fib_size);
8396 else if (cmdstr && sub_cmd == (uint32_t)-1)
8397 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8398 cmdstr, fib_size);
8399 else if (cmdstr)
8400 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
8401 cmdstr, sub_cmd, fib_size);
8402 else
8403 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
8404 fib_cmd, fib_size);
8405 }
8406
8407 static void
8408 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
8409 {
8410 int aif_command;
8411 uint32_t aif_seqnumber;
8412 int aif_en_type;
8413 char *str;
8414
8415 aif_command = LE_32(aif->command);
8416 aif_seqnumber = LE_32(aif->seqNumber);
8417 aif_en_type = LE_32(aif->data.EN.type);
8418
8419 switch (aif_command) {
8420 case AifCmdEventNotify:
8421 str = aac_cmd_name(aif_en_type, aac_aifens);
8422 if (str)
8423 aac_printf(softs, CE_NOTE, "AIF! %s", str);
8424 else
8425 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
8426 aif_en_type);
8427 break;
8428
8429 case AifCmdJobProgress:
8430 switch (LE_32(aif->data.PR[0].status)) {
8431 case AifJobStsSuccess:
8432 str = "success"; break;
8433 case AifJobStsFinished:
8434 str = "finished"; break;
8435 case AifJobStsAborted:
8436 str = "aborted"; break;
8437 case AifJobStsFailed:
8438 str = "failed"; break;
8439 case AifJobStsSuspended:
8440 str = "suspended"; break;
8441 case AifJobStsRunning:
8442 str = "running"; break;
8443 default:
8444 str = "unknown"; break;
8445 }
8446 aac_printf(softs, CE_NOTE,
8447 "AIF! JobProgress (%d) - %s (%d, %d)",
8448 aif_seqnumber, str,
8449 LE_32(aif->data.PR[0].currentTick),
8450 LE_32(aif->data.PR[0].finalTick));
8451 break;
8452
8453 case AifCmdAPIReport:
8454 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
8455 aif_seqnumber);
8456 break;
8457
8458 case AifCmdDriverNotify:
8459 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
8460 aif_seqnumber);
8461 break;
8462
8463 default:
8464 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
8465 aif_command, aif_seqnumber);
8466 break;
8467 }
8468 }
8469
8470 #endif /* DEBUG */