Print this page
3117 aac forgets C isn't math
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/aac/aac.c
+++ new/usr/src/uts/common/io/aac/aac.c
1 1 /*
2 2 * Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
3 3 */
4 4
5 5 /*
6 6 * Copyright 2005-08 Adaptec, Inc.
7 7 * Copyright (c) 2005-08 Adaptec Inc., Achim Leubner
8 8 * Copyright (c) 2000 Michael Smith
9 9 * Copyright (c) 2001 Scott Long
10 10 * Copyright (c) 2000 BSDi
11 11 * All rights reserved.
12 12 *
13 13 * Redistribution and use in source and binary forms, with or without
14 14 * modification, are permitted provided that the following conditions
15 15 * are met:
16 16 * 1. Redistributions of source code must retain the above copyright
17 17 * notice, this list of conditions and the following disclaimer.
18 18 * 2. Redistributions in binary form must reproduce the above copyright
19 19 * notice, this list of conditions and the following disclaimer in the
20 20 * documentation and/or other materials provided with the distribution.
21 21 *
22 22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
23 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
26 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 32 * SUCH DAMAGE.
33 33 */
34 34 #include <sys/modctl.h>
35 35 #include <sys/conf.h>
36 36 #include <sys/cmn_err.h>
37 37 #include <sys/ddi.h>
38 38 #include <sys/devops.h>
39 39 #include <sys/pci.h>
40 40 #include <sys/types.h>
41 41 #include <sys/ddidmareq.h>
42 42 #include <sys/scsi/scsi.h>
43 43 #include <sys/ksynch.h>
44 44 #include <sys/sunddi.h>
45 45 #include <sys/byteorder.h>
46 46 #include "aac_regs.h"
47 47 #include "aac.h"
48 48
49 49 /*
50 50 * FMA header files
51 51 */
52 52 #include <sys/ddifm.h>
53 53 #include <sys/fm/protocol.h>
54 54 #include <sys/fm/util.h>
55 55 #include <sys/fm/io/ddi.h>
56 56
57 57 /*
58 58 * For minor nodes created by the SCSA framework, minor numbers are
59 59 * formed by left-shifting instance by INST_MINOR_SHIFT and OR in a
60 60 * number less than 64.
61 61 *
62 62 * To support cfgadm, need to confirm the SCSA framework by creating
63 63 * devctl/scsi and driver specific minor nodes under SCSA format,
64 64 * and calling scsi_hba_xxx() functions aacordingly.
65 65 */
66 66
67 67 #define AAC_MINOR 32
68 68 #define INST2AAC(x) (((x) << INST_MINOR_SHIFT) | AAC_MINOR)
69 69 #define AAC_SCSA_MINOR(x) ((x) & TRAN_MINOR_MASK)
70 70 #define AAC_IS_SCSA_NODE(x) ((x) == DEVCTL_MINOR || (x) == SCSI_MINOR)
71 71
72 72 #define SD2TRAN(sd) ((sd)->sd_address.a_hba_tran)
73 73 #define AAC_TRAN2SOFTS(tran) ((struct aac_softstate *)(tran)->tran_hba_private)
74 74 #define AAC_DIP2TRAN(dip) ((scsi_hba_tran_t *)ddi_get_driver_private(dip))
75 75 #define AAC_DIP2SOFTS(dip) (AAC_TRAN2SOFTS(AAC_DIP2TRAN(dip)))
76 76 #define SD2AAC(sd) (AAC_TRAN2SOFTS(SD2TRAN(sd)))
77 77 #define AAC_PD(t) ((t) - AAC_MAX_LD)
78 78 #define AAC_DEV(softs, t) (((t) < AAC_MAX_LD) ? \
79 79 &(softs)->containers[(t)].dev : \
80 80 ((t) < AAC_MAX_DEV(softs)) ? \
81 81 &(softs)->nondasds[AAC_PD(t)].dev : NULL)
82 82 #define AAC_DEVCFG_BEGIN(softs, tgt) \
83 83 aac_devcfg((softs), (tgt), 1)
84 84 #define AAC_DEVCFG_END(softs, tgt) \
85 85 aac_devcfg((softs), (tgt), 0)
86 86 #define PKT2AC(pkt) ((struct aac_cmd *)(pkt)->pkt_ha_private)
87 87 #define AAC_BUSYWAIT(cond, timeout /* in millisecond */) { \
88 88 if (!(cond)) { \
89 89 int count = (timeout) * 10; \
90 90 while (count) { \
91 91 drv_usecwait(100); \
92 92 if (cond) \
93 93 break; \
94 94 count--; \
95 95 } \
96 96 (timeout) = (count + 9) / 10; \
97 97 } \
98 98 }
99 99
100 100 #define AAC_SENSE_DATA_DESCR_LEN \
101 101 (sizeof (struct scsi_descr_sense_hdr) + \
102 102 sizeof (struct scsi_information_sense_descr))
103 103 #define AAC_ARQ64_LENGTH \
104 104 (sizeof (struct scsi_arq_status) + \
105 105 AAC_SENSE_DATA_DESCR_LEN - SENSE_LENGTH)
106 106
107 107 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
108 108 #define AAC_GETGXADDR(cmdlen, cdbp) \
109 109 ((cmdlen == 6) ? GETG0ADDR(cdbp) : \
110 110 (cmdlen == 10) ? (uint32_t)GETG1ADDR(cdbp) : \
111 111 ((uint64_t)GETG4ADDR(cdbp) << 32) | (uint32_t)GETG4ADDRTL(cdbp))
112 112
113 113 #define AAC_CDB_INQUIRY_CMDDT 0x02
114 114 #define AAC_CDB_INQUIRY_EVPD 0x01
115 115 #define AAC_VPD_PAGE_CODE 1
116 116 #define AAC_VPD_PAGE_LENGTH 3
117 117 #define AAC_VPD_PAGE_DATA 4
118 118 #define AAC_VPD_ID_CODESET 0
119 119 #define AAC_VPD_ID_TYPE 1
120 120 #define AAC_VPD_ID_LENGTH 3
121 121 #define AAC_VPD_ID_DATA 4
122 122
123 123 #define AAC_SCSI_RPTLUNS_HEAD_SIZE 0x08
124 124 #define AAC_SCSI_RPTLUNS_ADDR_SIZE 0x08
125 125 #define AAC_SCSI_RPTLUNS_ADDR_MASK 0xC0
126 126 /* 00b - peripheral device addressing method */
127 127 #define AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL 0x00
128 128 /* 01b - flat space addressing method */
129 129 #define AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE 0x40
130 130 /* 10b - logical unit addressing method */
131 131 #define AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT 0x80
132 132
133 133 /* Return the size of FIB with data part type data_type */
134 134 #define AAC_FIB_SIZEOF(data_type) \
135 135 (sizeof (struct aac_fib_header) + sizeof (data_type))
136 136 /* Return the container size defined in mir */
137 137 #define AAC_MIR_SIZE(softs, acc, mir) \
138 138 (((softs)->flags & AAC_FLAGS_LBA_64BIT) ? \
139 139 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity) + \
140 140 ((uint64_t)ddi_get32((acc), &(mir)->MntObj.CapacityHigh) << 32) : \
141 141 (uint64_t)ddi_get32((acc), &(mir)->MntObj.Capacity))
142 142
143 143 /* The last entry of aac_cards[] is for unknown cards */
144 144 #define AAC_UNKNOWN_CARD \
145 145 (sizeof (aac_cards) / sizeof (struct aac_card_type) - 1)
146 146 #define CARD_IS_UNKNOWN(i) (i == AAC_UNKNOWN_CARD)
147 147 #define BUF_IS_READ(bp) ((bp)->b_flags & B_READ)
148 148 #define AAC_IS_Q_EMPTY(q) ((q)->q_head == NULL)
149 149 #define AAC_CMDQ(acp) (!((acp)->flags & AAC_CMD_SYNC))
150 150
151 151 #define PCI_MEM_GET32(softs, off) \
152 152 ddi_get32((softs)->pci_mem_handle, \
153 153 (void *)((softs)->pci_mem_base_vaddr + (off)))
154 154 #define PCI_MEM_PUT32(softs, off, val) \
155 155 ddi_put32((softs)->pci_mem_handle, \
156 156 (void *)((softs)->pci_mem_base_vaddr + (off)), \
157 157 (uint32_t)(val))
158 158 #define PCI_MEM_GET16(softs, off) \
159 159 ddi_get16((softs)->pci_mem_handle, \
160 160 (void *)((softs)->pci_mem_base_vaddr + (off)))
161 161 #define PCI_MEM_PUT16(softs, off, val) \
162 162 ddi_put16((softs)->pci_mem_handle, \
163 163 (void *)((softs)->pci_mem_base_vaddr + (off)), (uint16_t)(val))
164 164 /* Write host data at valp to device mem[off] repeatedly count times */
165 165 #define PCI_MEM_REP_PUT8(softs, off, valp, count) \
166 166 ddi_rep_put8((softs)->pci_mem_handle, (uint8_t *)(valp), \
167 167 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
168 168 count, DDI_DEV_AUTOINCR)
169 169 /* Read device data at mem[off] to host addr valp repeatedly count times */
170 170 #define PCI_MEM_REP_GET8(softs, off, valp, count) \
171 171 ddi_rep_get8((softs)->pci_mem_handle, (uint8_t *)(valp), \
172 172 (uint8_t *)((softs)->pci_mem_base_vaddr + (off)), \
173 173 count, DDI_DEV_AUTOINCR)
174 174 #define AAC_GET_FIELD8(acc, d, s, field) \
175 175 (d)->field = ddi_get8(acc, (uint8_t *)&(s)->field)
176 176 #define AAC_GET_FIELD32(acc, d, s, field) \
177 177 (d)->field = ddi_get32(acc, (uint32_t *)&(s)->field)
178 178 #define AAC_GET_FIELD64(acc, d, s, field) \
179 179 (d)->field = ddi_get64(acc, (uint64_t *)&(s)->field)
180 180 #define AAC_REP_GET_FIELD8(acc, d, s, field, r) \
181 181 ddi_rep_get8((acc), (uint8_t *)&(d)->field, \
182 182 (uint8_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
183 183 #define AAC_REP_GET_FIELD32(acc, d, s, field, r) \
184 184 ddi_rep_get32((acc), (uint32_t *)&(d)->field, \
185 185 (uint32_t *)&(s)->field, (r), DDI_DEV_AUTOINCR)
186 186
187 187 #define AAC_ENABLE_INTR(softs) { \
188 188 if (softs->flags & AAC_FLAGS_NEW_COMM) \
189 189 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_NEW); \
190 190 else \
191 191 PCI_MEM_PUT32(softs, AAC_OIMR, ~AAC_DB_INTR_BITS); \
192 192 softs->state |= AAC_STATE_INTR; \
193 193 }
194 194
195 195 #define AAC_DISABLE_INTR(softs) { \
196 196 PCI_MEM_PUT32(softs, AAC_OIMR, ~0); \
197 197 softs->state &= ~AAC_STATE_INTR; \
198 198 }
199 199 #define AAC_STATUS_CLR(softs, mask) PCI_MEM_PUT32(softs, AAC_ODBR, mask)
200 200 #define AAC_STATUS_GET(softs) PCI_MEM_GET32(softs, AAC_ODBR)
201 201 #define AAC_NOTIFY(softs, val) PCI_MEM_PUT32(softs, AAC_IDBR, val)
202 202 #define AAC_OUTB_GET(softs) PCI_MEM_GET32(softs, AAC_OQUE)
203 203 #define AAC_OUTB_SET(softs, val) PCI_MEM_PUT32(softs, AAC_OQUE, val)
204 204 #define AAC_FWSTATUS_GET(softs) \
205 205 ((softs)->aac_if.aif_get_fwstatus(softs))
206 206 #define AAC_MAILBOX_GET(softs, mb) \
207 207 ((softs)->aac_if.aif_get_mailbox((softs), (mb)))
208 208 #define AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3) \
209 209 ((softs)->aac_if.aif_set_mailbox((softs), (cmd), \
210 210 (arg0), (arg1), (arg2), (arg3)))
211 211
212 212 #define AAC_MGT_SLOT_NUM 2
213 213 #define AAC_THROTTLE_DRAIN -1
214 214
215 215 #define AAC_QUIESCE_TICK 1 /* 1 second */
216 216 #define AAC_QUIESCE_TIMEOUT 180 /* 180 seconds */
217 217 #define AAC_DEFAULT_TICK 10 /* 10 seconds */
218 218 #define AAC_SYNC_TICK (30*60) /* 30 minutes */
219 219
220 220 /* Poll time for aac_do_poll_io() */
221 221 #define AAC_POLL_TIME 60 /* 60 seconds */
222 222
223 223 /* IOP reset */
224 224 #define AAC_IOP_RESET_SUCCEED 0 /* IOP reset succeed */
225 225 #define AAC_IOP_RESET_FAILED -1 /* IOP reset failed */
226 226 #define AAC_IOP_RESET_ABNORMAL -2 /* Reset operation abnormal */
227 227
228 228 /*
229 229 * Hardware access functions
230 230 */
231 231 static int aac_rx_get_fwstatus(struct aac_softstate *);
232 232 static int aac_rx_get_mailbox(struct aac_softstate *, int);
233 233 static void aac_rx_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
234 234 uint32_t, uint32_t, uint32_t);
235 235 static int aac_rkt_get_fwstatus(struct aac_softstate *);
236 236 static int aac_rkt_get_mailbox(struct aac_softstate *, int);
237 237 static void aac_rkt_set_mailbox(struct aac_softstate *, uint32_t, uint32_t,
238 238 uint32_t, uint32_t, uint32_t);
239 239
240 240 /*
241 241 * SCSA function prototypes
242 242 */
243 243 static int aac_attach(dev_info_t *, ddi_attach_cmd_t);
244 244 static int aac_detach(dev_info_t *, ddi_detach_cmd_t);
245 245 static int aac_reset(dev_info_t *, ddi_reset_cmd_t);
246 246 static int aac_quiesce(dev_info_t *);
247 247 static int aac_getinfo(dev_info_t *, ddi_info_cmd_t, void *, void **);
248 248
249 249 /*
250 250 * Interrupt handler functions
251 251 */
252 252 static int aac_query_intrs(struct aac_softstate *, int);
253 253 static int aac_add_intrs(struct aac_softstate *);
254 254 static void aac_remove_intrs(struct aac_softstate *);
255 255 static int aac_enable_intrs(struct aac_softstate *);
256 256 static int aac_disable_intrs(struct aac_softstate *);
257 257 static uint_t aac_intr_old(caddr_t);
258 258 static uint_t aac_intr_new(caddr_t);
259 259 static uint_t aac_softintr(caddr_t);
260 260
261 261 /*
262 262 * Internal functions in attach
263 263 */
264 264 static int aac_check_card_type(struct aac_softstate *);
265 265 static int aac_check_firmware(struct aac_softstate *);
266 266 static int aac_common_attach(struct aac_softstate *);
267 267 static void aac_common_detach(struct aac_softstate *);
268 268 static int aac_probe_containers(struct aac_softstate *);
269 269 static int aac_alloc_comm_space(struct aac_softstate *);
270 270 static int aac_setup_comm_space(struct aac_softstate *);
271 271 static void aac_free_comm_space(struct aac_softstate *);
272 272 static int aac_hba_setup(struct aac_softstate *);
273 273
274 274 /*
275 275 * Sync FIB operation functions
276 276 */
277 277 int aac_sync_mbcommand(struct aac_softstate *, uint32_t, uint32_t,
278 278 uint32_t, uint32_t, uint32_t, uint32_t *);
279 279 static int aac_sync_fib(struct aac_softstate *, uint16_t, uint16_t);
280 280
281 281 /*
282 282 * Command queue operation functions
283 283 */
284 284 static void aac_cmd_initq(struct aac_cmd_queue *);
285 285 static void aac_cmd_enqueue(struct aac_cmd_queue *, struct aac_cmd *);
286 286 static struct aac_cmd *aac_cmd_dequeue(struct aac_cmd_queue *);
287 287 static void aac_cmd_delete(struct aac_cmd_queue *, struct aac_cmd *);
288 288
289 289 /*
290 290 * FIB queue operation functions
291 291 */
292 292 static int aac_fib_enqueue(struct aac_softstate *, int, uint32_t, uint32_t);
293 293 static int aac_fib_dequeue(struct aac_softstate *, int, int *);
294 294
295 295 /*
296 296 * Slot operation functions
297 297 */
298 298 static int aac_create_slots(struct aac_softstate *);
299 299 static void aac_destroy_slots(struct aac_softstate *);
300 300 static void aac_alloc_fibs(struct aac_softstate *);
301 301 static void aac_destroy_fibs(struct aac_softstate *);
302 302 static struct aac_slot *aac_get_slot(struct aac_softstate *);
303 303 static void aac_release_slot(struct aac_softstate *, struct aac_slot *);
304 304 static int aac_alloc_fib(struct aac_softstate *, struct aac_slot *);
305 305 static void aac_free_fib(struct aac_slot *);
306 306
307 307 /*
308 308 * Internal functions
309 309 */
310 310 static void aac_cmd_fib_header(struct aac_softstate *, struct aac_cmd *,
311 311 uint16_t);
312 312 static void aac_cmd_fib_rawio(struct aac_softstate *, struct aac_cmd *);
313 313 static void aac_cmd_fib_brw64(struct aac_softstate *, struct aac_cmd *);
314 314 static void aac_cmd_fib_brw(struct aac_softstate *, struct aac_cmd *);
315 315 static void aac_cmd_fib_sync(struct aac_softstate *, struct aac_cmd *);
316 316 static void aac_cmd_fib_scsi32(struct aac_softstate *, struct aac_cmd *);
317 317 static void aac_cmd_fib_scsi64(struct aac_softstate *, struct aac_cmd *);
318 318 static void aac_cmd_fib_startstop(struct aac_softstate *, struct aac_cmd *);
319 319 static void aac_start_waiting_io(struct aac_softstate *);
320 320 static void aac_drain_comp_q(struct aac_softstate *);
321 321 int aac_do_io(struct aac_softstate *, struct aac_cmd *);
322 322 static int aac_sync_fib_slot_bind(struct aac_softstate *, struct aac_cmd *);
323 323 static void aac_sync_fib_slot_release(struct aac_softstate *, struct aac_cmd *);
324 324 static void aac_start_io(struct aac_softstate *, struct aac_cmd *);
325 325 static int aac_do_poll_io(struct aac_softstate *, struct aac_cmd *);
326 326 static int aac_do_sync_io(struct aac_softstate *, struct aac_cmd *);
327 327 static int aac_send_command(struct aac_softstate *, struct aac_slot *);
328 328 static void aac_cmd_timeout(struct aac_softstate *, struct aac_cmd *);
329 329 static int aac_dma_sync_ac(struct aac_cmd *);
330 330 static int aac_shutdown(struct aac_softstate *);
331 331 static int aac_reset_adapter(struct aac_softstate *);
332 332 static int aac_do_quiesce(struct aac_softstate *softs);
333 333 static int aac_do_unquiesce(struct aac_softstate *softs);
334 334 static void aac_unhold_bus(struct aac_softstate *, int);
335 335 static void aac_set_throttle(struct aac_softstate *, struct aac_device *,
336 336 int, int);
337 337
338 338 /*
339 339 * Adapter Initiated FIB handling function
340 340 */
341 341 static void aac_save_aif(struct aac_softstate *, ddi_acc_handle_t,
342 342 struct aac_fib *, int);
343 343 static int aac_handle_aif(struct aac_softstate *, struct aac_aif_command *);
344 344
345 345 /*
346 346 * Event handling related functions
347 347 */
348 348 static void aac_timer(void *);
349 349 static void aac_event_thread(struct aac_softstate *);
350 350 static void aac_event_disp(struct aac_softstate *, int);
351 351
352 352 /*
353 353 * IOCTL interface related functions
354 354 */
355 355 static int aac_open(dev_t *, int, int, cred_t *);
356 356 static int aac_close(dev_t, int, int, cred_t *);
357 357 static int aac_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
358 358 extern int aac_do_ioctl(struct aac_softstate *, dev_t, int, intptr_t, int);
359 359
360 360 /*
361 361 * FMA Prototypes
362 362 */
363 363 static void aac_fm_init(struct aac_softstate *);
364 364 static void aac_fm_fini(struct aac_softstate *);
365 365 static int aac_fm_error_cb(dev_info_t *, ddi_fm_error_t *, const void *);
366 366 int aac_check_acc_handle(ddi_acc_handle_t);
367 367 int aac_check_dma_handle(ddi_dma_handle_t);
368 368 void aac_fm_ereport(struct aac_softstate *, char *);
369 369
370 370 /*
371 371 * Auto enumeration functions
372 372 */
373 373 static dev_info_t *aac_find_child(struct aac_softstate *, uint16_t, uint8_t);
374 374 static int aac_tran_bus_config(dev_info_t *, uint_t, ddi_bus_config_op_t,
375 375 void *, dev_info_t **);
376 376 static int aac_handle_dr(struct aac_softstate *, int, int, int);
377 377
378 378 extern pri_t minclsyspri;
379 379
380 380 #ifdef DEBUG
381 381 /*
382 382 * UART debug output support
383 383 */
384 384
385 385 #define AAC_PRINT_BUFFER_SIZE 512
386 386 #define AAC_PRINT_TIMEOUT 250 /* 1/4 sec. = 250 msec. */
387 387
388 388 #define AAC_FW_DBG_STRLEN_OFFSET 0x00
389 389 #define AAC_FW_DBG_FLAGS_OFFSET 0x04
390 390 #define AAC_FW_DBG_BLED_OFFSET 0x08
391 391
392 392 static int aac_get_fw_debug_buffer(struct aac_softstate *);
393 393 static void aac_print_scmd(struct aac_softstate *, struct aac_cmd *);
394 394 static void aac_print_aif(struct aac_softstate *, struct aac_aif_command *);
395 395
396 396 static char aac_prt_buf[AAC_PRINT_BUFFER_SIZE];
397 397 static char aac_fmt[] = " %s";
398 398 static char aac_fmt_header[] = " %s.%d: %s";
399 399 static kmutex_t aac_prt_mutex;
400 400
401 401 /*
402 402 * Debug flags to be put into the softstate flags field
403 403 * when initialized
404 404 */
405 405 uint32_t aac_debug_flags =
406 406 /* AACDB_FLAGS_KERNEL_PRINT | */
407 407 /* AACDB_FLAGS_FW_PRINT | */
408 408 /* AACDB_FLAGS_MISC | */
409 409 /* AACDB_FLAGS_FUNC1 | */
410 410 /* AACDB_FLAGS_FUNC2 | */
411 411 /* AACDB_FLAGS_SCMD | */
412 412 /* AACDB_FLAGS_AIF | */
413 413 /* AACDB_FLAGS_FIB | */
414 414 /* AACDB_FLAGS_IOCTL | */
415 415 0;
416 416 uint32_t aac_debug_fib_flags =
417 417 /* AACDB_FLAGS_FIB_RW | */
418 418 /* AACDB_FLAGS_FIB_IOCTL | */
419 419 /* AACDB_FLAGS_FIB_SRB | */
420 420 /* AACDB_FLAGS_FIB_SYNC | */
421 421 /* AACDB_FLAGS_FIB_HEADER | */
422 422 /* AACDB_FLAGS_FIB_TIMEOUT | */
423 423 0;
424 424
425 425 #endif /* DEBUG */
426 426
427 427 static struct cb_ops aac_cb_ops = {
428 428 aac_open, /* open */
429 429 aac_close, /* close */
430 430 nodev, /* strategy */
431 431 nodev, /* print */
432 432 nodev, /* dump */
433 433 nodev, /* read */
434 434 nodev, /* write */
435 435 aac_ioctl, /* ioctl */
436 436 nodev, /* devmap */
437 437 nodev, /* mmap */
438 438 nodev, /* segmap */
439 439 nochpoll, /* poll */
440 440 ddi_prop_op, /* cb_prop_op */
441 441 NULL, /* streamtab */
442 442 D_64BIT | D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
443 443 CB_REV, /* cb_rev */
444 444 nodev, /* async I/O read entry point */
445 445 nodev /* async I/O write entry point */
446 446 };
447 447
448 448 static struct dev_ops aac_dev_ops = {
449 449 DEVO_REV,
450 450 0,
451 451 aac_getinfo,
452 452 nulldev,
453 453 nulldev,
454 454 aac_attach,
455 455 aac_detach,
456 456 aac_reset,
457 457 &aac_cb_ops,
458 458 NULL,
459 459 NULL,
460 460 aac_quiesce,
461 461 };
462 462
463 463 static struct modldrv aac_modldrv = {
464 464 &mod_driverops,
465 465 "AAC Driver " AAC_DRIVER_VERSION,
466 466 &aac_dev_ops,
467 467 };
468 468
469 469 static struct modlinkage aac_modlinkage = {
470 470 MODREV_1,
471 471 &aac_modldrv,
472 472 NULL
473 473 };
474 474
475 475 static struct aac_softstate *aac_softstatep;
476 476
477 477 /*
478 478 * Supported card list
479 479 * ordered in vendor id, subvendor id, subdevice id, and device id
480 480 */
481 481 static struct aac_card_type aac_cards[] = {
482 482 {0x1028, 0x1, 0x1028, 0x1, AAC_HWIF_I960RX,
483 483 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
484 484 "Dell", "PERC 3/Di"},
485 485 {0x1028, 0x2, 0x1028, 0x2, AAC_HWIF_I960RX,
486 486 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
487 487 "Dell", "PERC 3/Di"},
488 488 {0x1028, 0x3, 0x1028, 0x3, AAC_HWIF_I960RX,
489 489 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
490 490 "Dell", "PERC 3/Si"},
491 491 {0x1028, 0x8, 0x1028, 0xcf, AAC_HWIF_I960RX,
492 492 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
493 493 "Dell", "PERC 3/Di"},
494 494 {0x1028, 0x4, 0x1028, 0xd0, AAC_HWIF_I960RX,
495 495 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
496 496 "Dell", "PERC 3/Si"},
497 497 {0x1028, 0x2, 0x1028, 0xd1, AAC_HWIF_I960RX,
498 498 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
499 499 "Dell", "PERC 3/Di"},
500 500 {0x1028, 0x2, 0x1028, 0xd9, AAC_HWIF_I960RX,
501 501 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
502 502 "Dell", "PERC 3/Di"},
503 503 {0x1028, 0xa, 0x1028, 0x106, AAC_HWIF_I960RX,
504 504 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
505 505 "Dell", "PERC 3/Di"},
506 506 {0x1028, 0xa, 0x1028, 0x11b, AAC_HWIF_I960RX,
507 507 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
508 508 "Dell", "PERC 3/Di"},
509 509 {0x1028, 0xa, 0x1028, 0x121, AAC_HWIF_I960RX,
510 510 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG, AAC_TYPE_SCSI,
511 511 "Dell", "PERC 3/Di"},
512 512 {0x9005, 0x285, 0x1028, 0x287, AAC_HWIF_I960RX,
513 513 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
514 514 "Dell", "PERC 320/DC"},
515 515 {0x9005, 0x285, 0x1028, 0x291, AAC_HWIF_I960RX,
516 516 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Dell", "CERC SR2"},
517 517
518 518 {0x9005, 0x285, 0x1014, 0x2f2, AAC_HWIF_I960RX,
519 519 0, AAC_TYPE_SCSI, "IBM", "ServeRAID 8i"},
520 520 {0x9005, 0x285, 0x1014, 0x34d, AAC_HWIF_I960RX,
521 521 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8s"},
522 522 {0x9005, 0x286, 0x1014, 0x9580, AAC_HWIF_RKT,
523 523 0, AAC_TYPE_SAS, "IBM", "ServeRAID 8k"},
524 524
525 525 {0x9005, 0x285, 0x103c, 0x3227, AAC_HWIF_I960RX,
526 526 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
527 527 {0x9005, 0x285, 0xe11, 0x295, AAC_HWIF_I960RX,
528 528 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2610SA"},
529 529
530 530 {0x9005, 0x285, 0x9005, 0x285, AAC_HWIF_I960RX,
531 531 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
532 532 "Adaptec", "2200S"},
533 533 {0x9005, 0x285, 0x9005, 0x286, AAC_HWIF_I960RX,
534 534 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
535 535 "Adaptec", "2120S"},
536 536 {0x9005, 0x285, 0x9005, 0x287, AAC_HWIF_I960RX,
537 537 AAC_FLAGS_NO4GB | AAC_FLAGS_34SG | AAC_FLAGS_256FIBS, AAC_TYPE_SCSI,
538 538 "Adaptec", "2200S"},
539 539 {0x9005, 0x285, 0x9005, 0x288, AAC_HWIF_I960RX,
540 540 0, AAC_TYPE_SCSI, "Adaptec", "3230S"},
541 541 {0x9005, 0x285, 0x9005, 0x289, AAC_HWIF_I960RX,
542 542 0, AAC_TYPE_SCSI, "Adaptec", "3240S"},
543 543 {0x9005, 0x285, 0x9005, 0x28a, AAC_HWIF_I960RX,
544 544 0, AAC_TYPE_SCSI, "Adaptec", "2020ZCR"},
545 545 {0x9005, 0x285, 0x9005, 0x28b, AAC_HWIF_I960RX,
546 546 0, AAC_TYPE_SCSI, "Adaptec", "2025ZCR"},
547 547 {0x9005, 0x286, 0x9005, 0x28c, AAC_HWIF_RKT,
548 548 0, AAC_TYPE_SCSI, "Adaptec", "2230S"},
549 549 {0x9005, 0x286, 0x9005, 0x28d, AAC_HWIF_RKT,
550 550 0, AAC_TYPE_SCSI, "Adaptec", "2130S"},
551 551 {0x9005, 0x285, 0x9005, 0x28e, AAC_HWIF_I960RX,
552 552 0, AAC_TYPE_SATA, "Adaptec", "2020SA"},
553 553 {0x9005, 0x285, 0x9005, 0x28f, AAC_HWIF_I960RX,
554 554 0, AAC_TYPE_SATA, "Adaptec", "2025SA"},
555 555 {0x9005, 0x285, 0x9005, 0x290, AAC_HWIF_I960RX,
556 556 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2410SA"},
557 557 {0x9005, 0x285, 0x9005, 0x292, AAC_HWIF_I960RX,
558 558 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "2810SA"},
559 559 {0x9005, 0x285, 0x9005, 0x293, AAC_HWIF_I960RX,
560 560 AAC_FLAGS_17SG, AAC_TYPE_SATA, "Adaptec", "21610SA"},
561 561 {0x9005, 0x285, 0x9005, 0x294, AAC_HWIF_I960RX,
562 562 0, AAC_TYPE_SATA, "Adaptec", "2026ZCR"},
563 563 {0x9005, 0x285, 0x9005, 0x296, AAC_HWIF_I960RX,
564 564 0, AAC_TYPE_SCSI, "Adaptec", "2240S"},
565 565 {0x9005, 0x285, 0x9005, 0x297, AAC_HWIF_I960RX,
566 566 0, AAC_TYPE_SAS, "Adaptec", "4005SAS"},
567 567 {0x9005, 0x285, 0x9005, 0x298, AAC_HWIF_I960RX,
568 568 0, AAC_TYPE_SAS, "Adaptec", "RAID 4000"},
569 569 {0x9005, 0x285, 0x9005, 0x299, AAC_HWIF_I960RX,
570 570 0, AAC_TYPE_SAS, "Adaptec", "4800SAS"},
571 571 {0x9005, 0x285, 0x9005, 0x29a, AAC_HWIF_I960RX,
572 572 0, AAC_TYPE_SAS, "Adaptec", "4805SAS"},
573 573 {0x9005, 0x286, 0x9005, 0x29b, AAC_HWIF_RKT,
574 574 0, AAC_TYPE_SATA, "Adaptec", "2820SA"},
575 575 {0x9005, 0x286, 0x9005, 0x29c, AAC_HWIF_RKT,
576 576 0, AAC_TYPE_SATA, "Adaptec", "2620SA"},
577 577 {0x9005, 0x286, 0x9005, 0x29d, AAC_HWIF_RKT,
578 578 0, AAC_TYPE_SATA, "Adaptec", "2420SA"},
579 579 {0x9005, 0x286, 0x9005, 0x29e, AAC_HWIF_RKT,
580 580 0, AAC_TYPE_SATA, "ICP", "9024RO"},
581 581 {0x9005, 0x286, 0x9005, 0x29f, AAC_HWIF_RKT,
582 582 0, AAC_TYPE_SATA, "ICP", "9014RO"},
583 583 {0x9005, 0x286, 0x9005, 0x2a0, AAC_HWIF_RKT,
584 584 0, AAC_TYPE_SATA, "ICP", "9047MA"},
585 585 {0x9005, 0x286, 0x9005, 0x2a1, AAC_HWIF_RKT,
586 586 0, AAC_TYPE_SATA, "ICP", "9087MA"},
587 587 {0x9005, 0x285, 0x9005, 0x2a4, AAC_HWIF_I960RX,
588 588 0, AAC_TYPE_SAS, "ICP", "9085LI"},
589 589 {0x9005, 0x285, 0x9005, 0x2a5, AAC_HWIF_I960RX,
590 590 0, AAC_TYPE_SAS, "ICP", "5085BR"},
591 591 {0x9005, 0x286, 0x9005, 0x2a6, AAC_HWIF_RKT,
592 592 0, AAC_TYPE_SATA, "ICP", "9067MA"},
593 593 {0x9005, 0x285, 0x9005, 0x2b5, AAC_HWIF_I960RX,
594 594 0, AAC_TYPE_SAS, "Adaptec", "RAID 5445"},
595 595 {0x9005, 0x285, 0x9005, 0x2b6, AAC_HWIF_I960RX,
596 596 0, AAC_TYPE_SAS, "Adaptec", "RAID 5805"},
597 597 {0x9005, 0x285, 0x9005, 0x2b7, AAC_HWIF_I960RX,
598 598 0, AAC_TYPE_SAS, "Adaptec", "RAID 5085"},
599 599 {0x9005, 0x285, 0x9005, 0x2b8, AAC_HWIF_I960RX,
600 600 0, AAC_TYPE_SAS, "ICP", "RAID ICP5445SL"},
601 601 {0x9005, 0x285, 0x9005, 0x2b9, AAC_HWIF_I960RX,
602 602 0, AAC_TYPE_SAS, "ICP", "RAID ICP5085SL"},
603 603 {0x9005, 0x285, 0x9005, 0x2ba, AAC_HWIF_I960RX,
604 604 0, AAC_TYPE_SAS, "ICP", "RAID ICP5805SL"},
605 605
606 606 {0, 0, 0, 0, AAC_HWIF_UNKNOWN,
607 607 0, AAC_TYPE_UNKNOWN, "Unknown", "AAC card"},
608 608 };
609 609
610 610 /*
611 611 * Hardware access functions for i960 based cards
612 612 */
613 613 static struct aac_interface aac_rx_interface = {
614 614 aac_rx_get_fwstatus,
615 615 aac_rx_get_mailbox,
616 616 aac_rx_set_mailbox
617 617 };
618 618
619 619 /*
620 620 * Hardware access functions for Rocket based cards
621 621 */
622 622 static struct aac_interface aac_rkt_interface = {
623 623 aac_rkt_get_fwstatus,
624 624 aac_rkt_get_mailbox,
625 625 aac_rkt_set_mailbox
626 626 };
627 627
628 628 ddi_device_acc_attr_t aac_acc_attr = {
629 629 DDI_DEVICE_ATTR_V1,
630 630 DDI_STRUCTURE_LE_ACC,
631 631 DDI_STRICTORDER_ACC,
632 632 DDI_DEFAULT_ACC
633 633 };
634 634
635 635 static struct {
636 636 int size;
637 637 int notify;
638 638 } aac_qinfo[] = {
639 639 {AAC_HOST_NORM_CMD_ENTRIES, AAC_DB_COMMAND_NOT_FULL},
640 640 {AAC_HOST_HIGH_CMD_ENTRIES, 0},
641 641 {AAC_ADAP_NORM_CMD_ENTRIES, AAC_DB_COMMAND_READY},
642 642 {AAC_ADAP_HIGH_CMD_ENTRIES, 0},
643 643 {AAC_HOST_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_NOT_FULL},
644 644 {AAC_HOST_HIGH_RESP_ENTRIES, 0},
645 645 {AAC_ADAP_NORM_RESP_ENTRIES, AAC_DB_RESPONSE_READY},
646 646 {AAC_ADAP_HIGH_RESP_ENTRIES, 0}
647 647 };
648 648
649 649 /*
650 650 * Default aac dma attributes
651 651 */
652 652 static ddi_dma_attr_t aac_dma_attr = {
653 653 DMA_ATTR_V0,
654 654 0, /* lowest usable address */
655 655 0xffffffffull, /* high DMA address range */
656 656 0xffffffffull, /* DMA counter register */
657 657 AAC_DMA_ALIGN, /* DMA address alignment */
658 658 1, /* DMA burstsizes */
659 659 1, /* min effective DMA size */
660 660 0xffffffffull, /* max DMA xfer size */
661 661 0xffffffffull, /* segment boundary */
662 662 1, /* s/g list length */
663 663 AAC_BLK_SIZE, /* granularity of device */
664 664 0 /* DMA transfer flags */
665 665 };
666 666
667 667 static int aac_tick = AAC_DEFAULT_TICK; /* tick for the internal timer */
668 668 static uint32_t aac_timebase = 0; /* internal timer in seconds */
669 669
670 670 /*
671 671 * Warlock directives
672 672 *
673 673 * Different variables with the same types have to be protected by the
674 674 * same mutex; otherwise, warlock will complain with "variables don't
675 675 * seem to be protected consistently". For example,
676 676 * aac_softstate::{q_wait, q_comp} are type of aac_cmd_queue, and protected
677 677 * by aac_softstate::{io_lock, q_comp_mutex} respectively. We have to
678 678 * declare them as protected explictly at aac_cmd_dequeue().
679 679 */
680 680 _NOTE(SCHEME_PROTECTS_DATA("unique per pkt", scsi_pkt scsi_cdb scsi_status \
681 681 scsi_arq_status scsi_descr_sense_hdr scsi_information_sense_descr \
682 682 mode_format mode_geometry mode_header aac_cmd))
683 683 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_cmd", aac_fib ddi_dma_cookie_t \
684 684 aac_sge))
685 685 _NOTE(SCHEME_PROTECTS_DATA("unique per aac_fib", aac_blockread aac_blockwrite \
686 686 aac_blockread64 aac_raw_io aac_sg_entry aac_sg_entry64 aac_sg_entryraw \
687 687 aac_sg_table aac_srb))
688 688 _NOTE(SCHEME_PROTECTS_DATA("unique to sync fib and cdb", scsi_inquiry))
689 689 _NOTE(SCHEME_PROTECTS_DATA("stable data", scsi_device scsi_address))
690 690 _NOTE(SCHEME_PROTECTS_DATA("unique to scsi_transport", buf))
691 691
692 692 int
693 693 _init(void)
694 694 {
695 695 int rval = 0;
696 696
697 697 #ifdef DEBUG
698 698 mutex_init(&aac_prt_mutex, NULL, MUTEX_DRIVER, NULL);
699 699 #endif
700 700 DBCALLED(NULL, 1);
701 701
702 702 if ((rval = ddi_soft_state_init((void *)&aac_softstatep,
703 703 sizeof (struct aac_softstate), 0)) != 0)
704 704 goto error;
705 705
706 706 if ((rval = scsi_hba_init(&aac_modlinkage)) != 0) {
707 707 ddi_soft_state_fini((void *)&aac_softstatep);
708 708 goto error;
709 709 }
710 710
711 711 if ((rval = mod_install(&aac_modlinkage)) != 0) {
712 712 ddi_soft_state_fini((void *)&aac_softstatep);
713 713 scsi_hba_fini(&aac_modlinkage);
714 714 goto error;
715 715 }
716 716 return (rval);
717 717
718 718 error:
719 719 AACDB_PRINT(NULL, CE_WARN, "Mod init error!");
720 720 #ifdef DEBUG
721 721 mutex_destroy(&aac_prt_mutex);
722 722 #endif
723 723 return (rval);
724 724 }
725 725
726 726 int
727 727 _info(struct modinfo *modinfop)
728 728 {
729 729 DBCALLED(NULL, 1);
730 730 return (mod_info(&aac_modlinkage, modinfop));
731 731 }
732 732
733 733 /*
734 734 * An HBA driver cannot be unload unless you reboot,
735 735 * so this function will be of no use.
736 736 */
737 737 int
738 738 _fini(void)
739 739 {
740 740 int rval;
741 741
742 742 DBCALLED(NULL, 1);
743 743
744 744 if ((rval = mod_remove(&aac_modlinkage)) != 0)
745 745 goto error;
746 746
747 747 scsi_hba_fini(&aac_modlinkage);
748 748 ddi_soft_state_fini((void *)&aac_softstatep);
749 749 #ifdef DEBUG
750 750 mutex_destroy(&aac_prt_mutex);
751 751 #endif
752 752 return (0);
753 753
754 754 error:
755 755 AACDB_PRINT(NULL, CE_WARN, "AAC is busy, cannot unload!");
756 756 return (rval);
757 757 }
758 758
759 759 static int
760 760 aac_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
761 761 {
762 762 int instance, i;
763 763 struct aac_softstate *softs = NULL;
764 764 int attach_state = 0;
765 765 char *data;
766 766
767 767 DBCALLED(NULL, 1);
768 768
769 769 switch (cmd) {
770 770 case DDI_ATTACH:
771 771 break;
772 772 case DDI_RESUME:
773 773 return (DDI_FAILURE);
774 774 default:
775 775 return (DDI_FAILURE);
776 776 }
777 777
778 778 instance = ddi_get_instance(dip);
779 779
780 780 /* Get soft state */
781 781 if (ddi_soft_state_zalloc(aac_softstatep, instance) != DDI_SUCCESS) {
782 782 AACDB_PRINT(softs, CE_WARN, "Cannot alloc soft state");
783 783 goto error;
784 784 }
785 785 softs = ddi_get_soft_state(aac_softstatep, instance);
786 786 attach_state |= AAC_ATTACH_SOFTSTATE_ALLOCED;
787 787
788 788 softs->instance = instance;
789 789 softs->devinfo_p = dip;
790 790 softs->buf_dma_attr = softs->addr_dma_attr = aac_dma_attr;
791 791 softs->addr_dma_attr.dma_attr_granular = 1;
792 792 softs->acc_attr = aac_acc_attr;
793 793 softs->reg_attr = aac_acc_attr;
794 794 softs->card = AAC_UNKNOWN_CARD;
795 795 #ifdef DEBUG
796 796 softs->debug_flags = aac_debug_flags;
797 797 softs->debug_fib_flags = aac_debug_fib_flags;
798 798 #endif
799 799
800 800 /* Initialize FMA */
801 801 aac_fm_init(softs);
802 802
803 803 /* Check the card type */
804 804 if (aac_check_card_type(softs) == AACERR) {
805 805 AACDB_PRINT(softs, CE_WARN, "Card not supported");
806 806 goto error;
807 807 }
808 808 /* We have found the right card and everything is OK */
809 809 attach_state |= AAC_ATTACH_CARD_DETECTED;
810 810
811 811 /* Map PCI mem space */
812 812 if (ddi_regs_map_setup(dip, 1,
813 813 (caddr_t *)&softs->pci_mem_base_vaddr, 0,
814 814 softs->map_size_min, &softs->reg_attr,
815 815 &softs->pci_mem_handle) != DDI_SUCCESS)
816 816 goto error;
817 817
818 818 softs->map_size = softs->map_size_min;
819 819 attach_state |= AAC_ATTACH_PCI_MEM_MAPPED;
820 820
821 821 AAC_DISABLE_INTR(softs);
822 822
823 823 /* Init mutexes and condvars */
824 824 mutex_init(&softs->io_lock, NULL, MUTEX_DRIVER,
825 825 DDI_INTR_PRI(softs->intr_pri));
826 826 mutex_init(&softs->q_comp_mutex, NULL, MUTEX_DRIVER,
827 827 DDI_INTR_PRI(softs->intr_pri));
828 828 mutex_init(&softs->time_mutex, NULL, MUTEX_DRIVER,
829 829 DDI_INTR_PRI(softs->intr_pri));
830 830 mutex_init(&softs->ev_lock, NULL, MUTEX_DRIVER,
831 831 DDI_INTR_PRI(softs->intr_pri));
832 832 mutex_init(&softs->aifq_mutex, NULL,
833 833 MUTEX_DRIVER, DDI_INTR_PRI(softs->intr_pri));
834 834 cv_init(&softs->event, NULL, CV_DRIVER, NULL);
835 835 cv_init(&softs->sync_fib_cv, NULL, CV_DRIVER, NULL);
836 836 cv_init(&softs->drain_cv, NULL, CV_DRIVER, NULL);
837 837 cv_init(&softs->event_wait_cv, NULL, CV_DRIVER, NULL);
838 838 cv_init(&softs->event_disp_cv, NULL, CV_DRIVER, NULL);
839 839 cv_init(&softs->aifq_cv, NULL, CV_DRIVER, NULL);
840 840 attach_state |= AAC_ATTACH_KMUTEX_INITED;
841 841
842 842 /* Init the cmd queues */
843 843 for (i = 0; i < AAC_CMDQ_NUM; i++)
844 844 aac_cmd_initq(&softs->q_wait[i]);
845 845 aac_cmd_initq(&softs->q_busy);
846 846 aac_cmd_initq(&softs->q_comp);
847 847
848 848 /* Check for legacy device naming support */
849 849 softs->legacy = 1; /* default to use legacy name */
850 850 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
851 851 "legacy-name-enable", &data) == DDI_SUCCESS)) {
852 852 if (strcmp(data, "no") == 0) {
853 853 AACDB_PRINT(softs, CE_NOTE, "legacy-name disabled");
854 854 softs->legacy = 0;
855 855 }
856 856 ddi_prop_free(data);
857 857 }
858 858
859 859 /*
860 860 * Everything has been set up till now,
861 861 * we will do some common attach.
862 862 */
863 863 mutex_enter(&softs->io_lock);
864 864 if (aac_common_attach(softs) == AACERR) {
865 865 mutex_exit(&softs->io_lock);
866 866 goto error;
867 867 }
868 868 mutex_exit(&softs->io_lock);
869 869 attach_state |= AAC_ATTACH_COMM_SPACE_SETUP;
870 870
871 871 /* Check for buf breakup support */
872 872 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, dip, 0,
873 873 "breakup-enable", &data) == DDI_SUCCESS)) {
874 874 if (strcmp(data, "yes") == 0) {
875 875 AACDB_PRINT(softs, CE_NOTE, "buf breakup enabled");
876 876 softs->flags |= AAC_FLAGS_BRKUP;
877 877 }
878 878 ddi_prop_free(data);
879 879 }
880 880 softs->dma_max = softs->buf_dma_attr.dma_attr_maxxfer;
881 881 if (softs->flags & AAC_FLAGS_BRKUP) {
882 882 softs->dma_max = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
883 883 DDI_PROP_DONTPASS, "dma-max", softs->dma_max);
884 884 }
885 885
886 886 if (aac_hba_setup(softs) != AACOK)
887 887 goto error;
888 888 attach_state |= AAC_ATTACH_SCSI_TRAN_SETUP;
889 889
890 890 /* Create devctl/scsi nodes for cfgadm */
891 891 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
892 892 INST2DEVCTL(instance), DDI_NT_SCSI_NEXUS, 0) != DDI_SUCCESS) {
893 893 AACDB_PRINT(softs, CE_WARN, "failed to create devctl node");
894 894 goto error;
895 895 }
896 896 attach_state |= AAC_ATTACH_CREATE_DEVCTL;
897 897
898 898 if (ddi_create_minor_node(dip, "scsi", S_IFCHR, INST2SCSI(instance),
899 899 DDI_NT_SCSI_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
900 900 AACDB_PRINT(softs, CE_WARN, "failed to create scsi node");
901 901 goto error;
902 902 }
903 903 attach_state |= AAC_ATTACH_CREATE_SCSI;
904 904
905 905 /* Create aac node for app. to issue ioctls */
906 906 if (ddi_create_minor_node(dip, "aac", S_IFCHR, INST2AAC(instance),
907 907 DDI_PSEUDO, 0) != DDI_SUCCESS) {
908 908 AACDB_PRINT(softs, CE_WARN, "failed to create aac node");
909 909 goto error;
910 910 }
911 911
912 912 /* Common attach is OK, so we are attached! */
913 913 softs->state |= AAC_STATE_RUN;
914 914
915 915 /* Create event thread */
916 916 softs->fibctx_p = &softs->aifctx;
917 917 if ((softs->event_thread = thread_create(NULL, 0, aac_event_thread,
918 918 softs, 0, &p0, TS_RUN, minclsyspri)) == NULL) {
919 919 AACDB_PRINT(softs, CE_WARN, "aif thread create failed");
920 920 softs->state &= ~AAC_STATE_RUN;
921 921 goto error;
922 922 }
923 923
924 924 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
925 925
926 926 /* Create a thread for command timeout */
927 927 softs->timeout_id = timeout(aac_timer, (void *)softs,
928 928 (aac_tick * drv_usectohz(1000000)));
929 929
930 930 /* Common attach is OK, so we are attached! */
931 931 ddi_report_dev(dip);
932 932 AACDB_PRINT(softs, CE_NOTE, "aac attached ok");
933 933 return (DDI_SUCCESS);
934 934
935 935 error:
936 936 if (attach_state & AAC_ATTACH_CREATE_SCSI)
937 937 ddi_remove_minor_node(dip, "scsi");
938 938 if (attach_state & AAC_ATTACH_CREATE_DEVCTL)
939 939 ddi_remove_minor_node(dip, "devctl");
940 940 if (attach_state & AAC_ATTACH_COMM_SPACE_SETUP)
941 941 aac_common_detach(softs);
942 942 if (attach_state & AAC_ATTACH_SCSI_TRAN_SETUP) {
943 943 (void) scsi_hba_detach(dip);
944 944 scsi_hba_tran_free(AAC_DIP2TRAN(dip));
945 945 }
946 946 if (attach_state & AAC_ATTACH_KMUTEX_INITED) {
947 947 mutex_destroy(&softs->io_lock);
948 948 mutex_destroy(&softs->q_comp_mutex);
949 949 mutex_destroy(&softs->time_mutex);
950 950 mutex_destroy(&softs->ev_lock);
951 951 mutex_destroy(&softs->aifq_mutex);
952 952 cv_destroy(&softs->event);
953 953 cv_destroy(&softs->sync_fib_cv);
954 954 cv_destroy(&softs->drain_cv);
955 955 cv_destroy(&softs->event_wait_cv);
956 956 cv_destroy(&softs->event_disp_cv);
957 957 cv_destroy(&softs->aifq_cv);
958 958 }
959 959 if (attach_state & AAC_ATTACH_PCI_MEM_MAPPED)
960 960 ddi_regs_map_free(&softs->pci_mem_handle);
961 961 aac_fm_fini(softs);
962 962 if (attach_state & AAC_ATTACH_CARD_DETECTED)
963 963 softs->card = AACERR;
964 964 if (attach_state & AAC_ATTACH_SOFTSTATE_ALLOCED)
965 965 ddi_soft_state_free(aac_softstatep, instance);
966 966 return (DDI_FAILURE);
967 967 }
968 968
969 969 static int
970 970 aac_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
971 971 {
972 972 scsi_hba_tran_t *tran = AAC_DIP2TRAN(dip);
973 973 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
974 974
975 975 DBCALLED(softs, 1);
976 976
977 977 switch (cmd) {
978 978 case DDI_DETACH:
979 979 break;
980 980 case DDI_SUSPEND:
981 981 return (DDI_FAILURE);
982 982 default:
983 983 return (DDI_FAILURE);
984 984 }
985 985
986 986 mutex_enter(&softs->io_lock);
987 987 AAC_DISABLE_INTR(softs);
988 988 softs->state = AAC_STATE_STOPPED;
989 989
990 990 ddi_remove_minor_node(dip, "aac");
991 991 ddi_remove_minor_node(dip, "scsi");
992 992 ddi_remove_minor_node(dip, "devctl");
993 993 mutex_exit(&softs->io_lock);
994 994
995 995 aac_common_detach(softs);
996 996
997 997 mutex_enter(&softs->io_lock);
998 998 (void) scsi_hba_detach(dip);
999 999 scsi_hba_tran_free(tran);
1000 1000 mutex_exit(&softs->io_lock);
1001 1001
1002 1002 /* Stop timer */
1003 1003 mutex_enter(&softs->time_mutex);
1004 1004 if (softs->timeout_id) {
1005 1005 timeout_id_t tid = softs->timeout_id;
1006 1006 softs->timeout_id = 0;
1007 1007
1008 1008 mutex_exit(&softs->time_mutex);
1009 1009 (void) untimeout(tid);
1010 1010 mutex_enter(&softs->time_mutex);
1011 1011 }
1012 1012 mutex_exit(&softs->time_mutex);
1013 1013
1014 1014 /* Destroy event thread */
1015 1015 mutex_enter(&softs->ev_lock);
1016 1016 cv_signal(&softs->event_disp_cv);
1017 1017 cv_wait(&softs->event_wait_cv, &softs->ev_lock);
1018 1018 mutex_exit(&softs->ev_lock);
1019 1019
1020 1020 cv_destroy(&softs->aifq_cv);
1021 1021 cv_destroy(&softs->event_disp_cv);
1022 1022 cv_destroy(&softs->event_wait_cv);
1023 1023 cv_destroy(&softs->drain_cv);
1024 1024 cv_destroy(&softs->sync_fib_cv);
1025 1025 cv_destroy(&softs->event);
1026 1026 mutex_destroy(&softs->aifq_mutex);
1027 1027 mutex_destroy(&softs->ev_lock);
1028 1028 mutex_destroy(&softs->time_mutex);
1029 1029 mutex_destroy(&softs->q_comp_mutex);
1030 1030 mutex_destroy(&softs->io_lock);
1031 1031
1032 1032 ddi_regs_map_free(&softs->pci_mem_handle);
1033 1033 aac_fm_fini(softs);
1034 1034 softs->hwif = AAC_HWIF_UNKNOWN;
1035 1035 softs->card = AAC_UNKNOWN_CARD;
1036 1036 ddi_soft_state_free(aac_softstatep, ddi_get_instance(dip));
1037 1037
1038 1038 return (DDI_SUCCESS);
1039 1039 }
1040 1040
1041 1041 /*ARGSUSED*/
1042 1042 static int
1043 1043 aac_reset(dev_info_t *dip, ddi_reset_cmd_t cmd)
1044 1044 {
1045 1045 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1046 1046
1047 1047 DBCALLED(softs, 1);
1048 1048
1049 1049 mutex_enter(&softs->io_lock);
1050 1050 AAC_DISABLE_INTR(softs);
1051 1051 (void) aac_shutdown(softs);
1052 1052 mutex_exit(&softs->io_lock);
1053 1053
1054 1054 return (DDI_SUCCESS);
1055 1055 }
1056 1056
1057 1057 /*
1058 1058 * quiesce(9E) entry point.
1059 1059 *
1060 1060 * This function is called when the system is single-threaded at high
1061 1061 * PIL with preemption disabled. Therefore, this function must not be
1062 1062 * blocked.
1063 1063 *
1064 1064 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
1065 1065 * DDI_FAILURE indicates an error condition and should almost never happen.
1066 1066 */
1067 1067 static int
1068 1068 aac_quiesce(dev_info_t *dip)
1069 1069 {
1070 1070 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
1071 1071
1072 1072 if (softs == NULL)
1073 1073 return (DDI_FAILURE);
1074 1074
1075 1075 _NOTE(ASSUMING_PROTECTED(softs->state))
1076 1076 AAC_DISABLE_INTR(softs);
1077 1077
1078 1078 return (DDI_SUCCESS);
1079 1079 }
1080 1080
1081 1081 /* ARGSUSED */
1082 1082 static int
1083 1083 aac_getinfo(dev_info_t *self, ddi_info_cmd_t infocmd, void *arg,
1084 1084 void **result)
1085 1085 {
1086 1086 int error = DDI_SUCCESS;
1087 1087
1088 1088 switch (infocmd) {
1089 1089 case DDI_INFO_DEVT2INSTANCE:
1090 1090 *result = (void *)(intptr_t)(MINOR2INST(getminor((dev_t)arg)));
1091 1091 break;
1092 1092 default:
1093 1093 error = DDI_FAILURE;
1094 1094 }
1095 1095 return (error);
1096 1096 }
1097 1097
1098 1098 /*
1099 1099 * Bring the controller down to a dormant state and detach all child devices.
1100 1100 * This function is called before detach or system shutdown.
1101 1101 * Note: we can assume that the q_wait on the controller is empty, as we
1102 1102 * won't allow shutdown if any device is open.
1103 1103 */
1104 1104 static int
1105 1105 aac_shutdown(struct aac_softstate *softs)
1106 1106 {
1107 1107 ddi_acc_handle_t acc;
1108 1108 struct aac_close_command *cc;
1109 1109 int rval;
1110 1110
1111 1111 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
1112 1112 acc = softs->sync_ac.slotp->fib_acc_handle;
1113 1113
1114 1114 cc = (struct aac_close_command *)&softs->sync_ac.slotp->fibp->data[0];
1115 1115
1116 1116 ddi_put32(acc, &cc->Command, VM_CloseAll);
1117 1117 ddi_put32(acc, &cc->ContainerId, 0xfffffffful);
1118 1118
1119 1119 /* Flush all caches, set FW to write through mode */
1120 1120 rval = aac_sync_fib(softs, ContainerCommand,
1121 1121 AAC_FIB_SIZEOF(struct aac_close_command));
1122 1122 aac_sync_fib_slot_release(softs, &softs->sync_ac);
1123 1123
1124 1124 AACDB_PRINT(softs, CE_NOTE,
1125 1125 "shutting down aac %s", (rval == AACOK) ? "ok" : "fail");
1126 1126 return (rval);
1127 1127 }
1128 1128
1129 1129 static uint_t
1130 1130 aac_softintr(caddr_t arg)
1131 1131 {
1132 1132 struct aac_softstate *softs = (void *)arg;
1133 1133
1134 1134 if (!AAC_IS_Q_EMPTY(&softs->q_comp)) {
1135 1135 aac_drain_comp_q(softs);
1136 1136 }
1137 1137 return (DDI_INTR_CLAIMED);
1138 1138 }
1139 1139
1140 1140 /*
1141 1141 * Setup auto sense data for pkt
1142 1142 */
1143 1143 static void
1144 1144 aac_set_arq_data(struct scsi_pkt *pkt, uchar_t key,
1145 1145 uchar_t add_code, uchar_t qual_code, uint64_t info)
1146 1146 {
1147 1147 struct scsi_arq_status *arqstat = (void *)(pkt->pkt_scbp);
1148 1148
1149 1149 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
1150 1150 pkt->pkt_state |= STATE_ARQ_DONE;
1151 1151
1152 1152 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1153 1153 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1154 1154 arqstat->sts_rqpkt_resid = 0;
1155 1155 arqstat->sts_rqpkt_state =
1156 1156 STATE_GOT_BUS |
1157 1157 STATE_GOT_TARGET |
1158 1158 STATE_SENT_CMD |
1159 1159 STATE_XFERRED_DATA;
1160 1160 arqstat->sts_rqpkt_statistics = 0;
1161 1161
1162 1162 if (info <= 0xfffffffful) {
1163 1163 arqstat->sts_sensedata.es_valid = 1;
1164 1164 arqstat->sts_sensedata.es_class = CLASS_EXTENDED_SENSE;
1165 1165 arqstat->sts_sensedata.es_code = CODE_FMT_FIXED_CURRENT;
1166 1166 arqstat->sts_sensedata.es_key = key;
1167 1167 arqstat->sts_sensedata.es_add_code = add_code;
1168 1168 arqstat->sts_sensedata.es_qual_code = qual_code;
1169 1169
1170 1170 arqstat->sts_sensedata.es_info_1 = (info >> 24) & 0xFF;
1171 1171 arqstat->sts_sensedata.es_info_2 = (info >> 16) & 0xFF;
1172 1172 arqstat->sts_sensedata.es_info_3 = (info >> 8) & 0xFF;
1173 1173 arqstat->sts_sensedata.es_info_4 = info & 0xFF;
1174 1174 } else { /* 64-bit LBA */
1175 1175 struct scsi_descr_sense_hdr *dsp;
1176 1176 struct scsi_information_sense_descr *isd;
1177 1177
1178 1178 dsp = (struct scsi_descr_sense_hdr *)&arqstat->sts_sensedata;
1179 1179 dsp->ds_class = CLASS_EXTENDED_SENSE;
1180 1180 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
1181 1181 dsp->ds_key = key;
1182 1182 dsp->ds_add_code = add_code;
1183 1183 dsp->ds_qual_code = qual_code;
1184 1184 dsp->ds_addl_sense_length =
1185 1185 sizeof (struct scsi_information_sense_descr);
1186 1186
1187 1187 isd = (struct scsi_information_sense_descr *)(dsp+1);
1188 1188 isd->isd_descr_type = DESCR_INFORMATION;
1189 1189 isd->isd_valid = 1;
1190 1190 isd->isd_information[0] = (info >> 56) & 0xFF;
1191 1191 isd->isd_information[1] = (info >> 48) & 0xFF;
1192 1192 isd->isd_information[2] = (info >> 40) & 0xFF;
1193 1193 isd->isd_information[3] = (info >> 32) & 0xFF;
1194 1194 isd->isd_information[4] = (info >> 24) & 0xFF;
1195 1195 isd->isd_information[5] = (info >> 16) & 0xFF;
1196 1196 isd->isd_information[6] = (info >> 8) & 0xFF;
1197 1197 isd->isd_information[7] = (info) & 0xFF;
1198 1198 }
1199 1199 }
1200 1200
1201 1201 /*
1202 1202 * Setup auto sense data for HARDWARE ERROR
1203 1203 */
1204 1204 static void
1205 1205 aac_set_arq_data_hwerr(struct aac_cmd *acp)
1206 1206 {
1207 1207 union scsi_cdb *cdbp;
1208 1208 uint64_t err_blkno;
1209 1209
1210 1210 cdbp = (void *)acp->pkt->pkt_cdbp;
1211 1211 err_blkno = AAC_GETGXADDR(acp->cmdlen, cdbp);
1212 1212 aac_set_arq_data(acp->pkt, KEY_HARDWARE_ERROR, 0x00, 0x00, err_blkno);
1213 1213 }
1214 1214
1215 1215 /*
1216 1216 * Send a command to the adapter in New Comm. interface
1217 1217 */
1218 1218 static int
1219 1219 aac_send_command(struct aac_softstate *softs, struct aac_slot *slotp)
1220 1220 {
1221 1221 uint32_t index, device;
1222 1222
1223 1223 index = PCI_MEM_GET32(softs, AAC_IQUE);
1224 1224 if (index == 0xffffffffUL) {
1225 1225 index = PCI_MEM_GET32(softs, AAC_IQUE);
1226 1226 if (index == 0xffffffffUL)
1227 1227 return (AACERR);
1228 1228 }
1229 1229
1230 1230 device = index;
1231 1231 PCI_MEM_PUT32(softs, device,
1232 1232 (uint32_t)(slotp->fib_phyaddr & 0xfffffffful));
1233 1233 device += 4;
1234 1234 PCI_MEM_PUT32(softs, device, (uint32_t)(slotp->fib_phyaddr >> 32));
1235 1235 device += 4;
1236 1236 PCI_MEM_PUT32(softs, device, slotp->acp->fib_size);
1237 1237 PCI_MEM_PUT32(softs, AAC_IQUE, index);
1238 1238 return (AACOK);
1239 1239 }
1240 1240
1241 1241 static void
1242 1242 aac_end_io(struct aac_softstate *softs, struct aac_cmd *acp)
1243 1243 {
1244 1244 struct aac_device *dvp = acp->dvp;
1245 1245 int q = AAC_CMDQ(acp);
1246 1246
1247 1247 if (acp->slotp) { /* outstanding cmd */
1248 1248 if (!(acp->flags & AAC_CMD_IN_SYNC_SLOT)) {
1249 1249 aac_release_slot(softs, acp->slotp);
1250 1250 acp->slotp = NULL;
1251 1251 }
1252 1252 if (dvp) {
1253 1253 dvp->ncmds[q]--;
1254 1254 if (dvp->throttle[q] == AAC_THROTTLE_DRAIN &&
1255 1255 dvp->ncmds[q] == 0 && q == AAC_CMDQ_ASYNC)
1256 1256 aac_set_throttle(softs, dvp, q,
1257 1257 softs->total_slots);
1258 1258 /*
1259 1259 * Setup auto sense data for UNIT ATTENTION
1260 1260 * Each lun should generate a unit attention
1261 1261 * condition when reset.
1262 1262 * Phys. drives are treated as logical ones
1263 1263 * during error recovery.
1264 1264 */
1265 1265 if (dvp->type == AAC_DEV_LD) {
1266 1266 struct aac_container *ctp =
1267 1267 (struct aac_container *)dvp;
1268 1268 if (ctp->reset == 0)
1269 1269 goto noreset;
1270 1270
1271 1271 AACDB_PRINT(softs, CE_NOTE,
1272 1272 "Unit attention: reset");
1273 1273 ctp->reset = 0;
1274 1274 aac_set_arq_data(acp->pkt, KEY_UNIT_ATTENTION,
1275 1275 0x29, 0x02, 0);
1276 1276 }
1277 1277 }
1278 1278 noreset:
1279 1279 softs->bus_ncmds[q]--;
1280 1280 aac_cmd_delete(&softs->q_busy, acp);
1281 1281 } else { /* cmd in waiting queue */
1282 1282 aac_cmd_delete(&softs->q_wait[q], acp);
1283 1283 }
1284 1284
1285 1285 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR))) { /* async IO */
1286 1286 mutex_enter(&softs->q_comp_mutex);
1287 1287 aac_cmd_enqueue(&softs->q_comp, acp);
1288 1288 mutex_exit(&softs->q_comp_mutex);
1289 1289 } else if (acp->flags & AAC_CMD_NO_CB) { /* sync IO */
1290 1290 cv_broadcast(&softs->event);
1291 1291 }
1292 1292 }
1293 1293
1294 1294 static void
1295 1295 aac_handle_io(struct aac_softstate *softs, int index)
1296 1296 {
1297 1297 struct aac_slot *slotp;
1298 1298 struct aac_cmd *acp;
1299 1299 uint32_t fast;
1300 1300
1301 1301 fast = index & AAC_SENDERADDR_MASK_FAST_RESPONSE;
1302 1302 index >>= 2;
1303 1303
1304 1304 /* Make sure firmware reported index is valid */
1305 1305 ASSERT(index >= 0 && index < softs->total_slots);
1306 1306 slotp = &softs->io_slot[index];
1307 1307 ASSERT(slotp->index == index);
1308 1308 acp = slotp->acp;
1309 1309
1310 1310 if (acp == NULL || acp->slotp != slotp) {
1311 1311 cmn_err(CE_WARN,
1312 1312 "Firmware error: invalid slot index received from FW");
1313 1313 return;
1314 1314 }
1315 1315
1316 1316 acp->flags |= AAC_CMD_CMPLT;
1317 1317 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0, DDI_DMA_SYNC_FORCPU);
1318 1318
1319 1319 if (aac_check_dma_handle(slotp->fib_dma_handle) == DDI_SUCCESS) {
1320 1320 /*
1321 1321 * For fast response IO, the firmware do not return any FIB
1322 1322 * data, so we need to fill in the FIB status and state so that
1323 1323 * FIB users can handle it correctly.
1324 1324 */
1325 1325 if (fast) {
1326 1326 uint32_t state;
1327 1327
1328 1328 state = ddi_get32(slotp->fib_acc_handle,
1329 1329 &slotp->fibp->Header.XferState);
1330 1330 /*
1331 1331 * Update state for CPU not for device, no DMA sync
1332 1332 * needed
1333 1333 */
1334 1334 ddi_put32(slotp->fib_acc_handle,
1335 1335 &slotp->fibp->Header.XferState,
1336 1336 state | AAC_FIBSTATE_DONEADAP);
1337 1337 ddi_put32(slotp->fib_acc_handle,
1338 1338 (void *)&slotp->fibp->data[0], ST_OK);
1339 1339 }
1340 1340
1341 1341 /* Handle completed ac */
1342 1342 acp->ac_comp(softs, acp);
1343 1343 } else {
1344 1344 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1345 1345 acp->flags |= AAC_CMD_ERR;
1346 1346 if (acp->pkt) {
1347 1347 acp->pkt->pkt_reason = CMD_TRAN_ERR;
1348 1348 acp->pkt->pkt_statistics = 0;
1349 1349 }
1350 1350 }
1351 1351 aac_end_io(softs, acp);
1352 1352 }
1353 1353
1354 1354 /*
1355 1355 * Interrupt handler for New Comm. interface
1356 1356 * New Comm. interface use a different mechanism for interrupt. No explict
1357 1357 * message queues, and driver need only accesses the mapped PCI mem space to
1358 1358 * find the completed FIB or AIF.
1359 1359 */
1360 1360 static int
1361 1361 aac_process_intr_new(struct aac_softstate *softs)
1362 1362 {
1363 1363 uint32_t index;
1364 1364
1365 1365 index = AAC_OUTB_GET(softs);
1366 1366 if (index == 0xfffffffful)
1367 1367 index = AAC_OUTB_GET(softs);
1368 1368 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1369 1369 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1370 1370 return (0);
1371 1371 }
1372 1372 if (index != 0xfffffffful) {
1373 1373 do {
1374 1374 if ((index & AAC_SENDERADDR_MASK_AIF) == 0) {
1375 1375 aac_handle_io(softs, index);
1376 1376 } else if (index != 0xfffffffeul) {
1377 1377 struct aac_fib *fibp; /* FIB in AIF queue */
1378 1378 uint16_t fib_size;
1379 1379
1380 1380 /*
1381 1381 * 0xfffffffe means that the controller wants
1382 1382 * more work, ignore it for now. Otherwise,
1383 1383 * AIF received.
1384 1384 */
1385 1385 index &= ~2;
1386 1386
1387 1387 fibp = (struct aac_fib *)(softs-> \
1388 1388 pci_mem_base_vaddr + index);
1389 1389 fib_size = PCI_MEM_GET16(softs, index + \
1390 1390 offsetof(struct aac_fib, Header.Size));
1391 1391
1392 1392 aac_save_aif(softs, softs->pci_mem_handle,
1393 1393 fibp, fib_size);
1394 1394
1395 1395 /*
1396 1396 * AIF memory is owned by the adapter, so let it
1397 1397 * know that we are done with it.
1398 1398 */
1399 1399 AAC_OUTB_SET(softs, index);
1400 1400 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1401 1401 }
1402 1402
1403 1403 index = AAC_OUTB_GET(softs);
1404 1404 } while (index != 0xfffffffful);
1405 1405
1406 1406 /*
1407 1407 * Process waiting cmds before start new ones to
1408 1408 * ensure first IOs are serviced first.
1409 1409 */
1410 1410 aac_start_waiting_io(softs);
1411 1411 return (AAC_DB_COMMAND_READY);
1412 1412 } else {
1413 1413 return (0);
1414 1414 }
1415 1415 }
1416 1416
1417 1417 static uint_t
1418 1418 aac_intr_new(caddr_t arg)
1419 1419 {
1420 1420 struct aac_softstate *softs = (void *)arg;
1421 1421 uint_t rval;
1422 1422
1423 1423 mutex_enter(&softs->io_lock);
1424 1424 if (aac_process_intr_new(softs))
1425 1425 rval = DDI_INTR_CLAIMED;
1426 1426 else
1427 1427 rval = DDI_INTR_UNCLAIMED;
1428 1428 mutex_exit(&softs->io_lock);
1429 1429
1430 1430 aac_drain_comp_q(softs);
1431 1431 return (rval);
1432 1432 }
1433 1433
1434 1434 /*
1435 1435 * Interrupt handler for old interface
1436 1436 * Explicit message queues are used to send FIB to and get completed FIB from
1437 1437 * the adapter. Driver and adapter maitain the queues in the producer/consumer
1438 1438 * manner. The driver has to query the queues to find the completed FIB.
1439 1439 */
1440 1440 static int
1441 1441 aac_process_intr_old(struct aac_softstate *softs)
1442 1442 {
1443 1443 uint16_t status;
1444 1444
1445 1445 status = AAC_STATUS_GET(softs);
1446 1446 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
1447 1447 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
1448 1448 return (DDI_INTR_UNCLAIMED);
1449 1449 }
1450 1450 if (status & AAC_DB_RESPONSE_READY) {
1451 1451 int slot_idx;
1452 1452
1453 1453 /* ACK the intr */
1454 1454 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_READY);
1455 1455 (void) AAC_STATUS_GET(softs);
1456 1456 while (aac_fib_dequeue(softs, AAC_HOST_NORM_RESP_Q,
1457 1457 &slot_idx) == AACOK)
1458 1458 aac_handle_io(softs, slot_idx);
1459 1459
1460 1460 /*
1461 1461 * Process waiting cmds before start new ones to
1462 1462 * ensure first IOs are serviced first.
1463 1463 */
1464 1464 aac_start_waiting_io(softs);
1465 1465 return (AAC_DB_RESPONSE_READY);
1466 1466 } else if (status & AAC_DB_COMMAND_READY) {
1467 1467 int aif_idx;
1468 1468
1469 1469 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_READY);
1470 1470 (void) AAC_STATUS_GET(softs);
1471 1471 if (aac_fib_dequeue(softs, AAC_HOST_NORM_CMD_Q, &aif_idx) ==
1472 1472 AACOK) {
1473 1473 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
1474 1474 struct aac_fib *fibp; /* FIB in communication space */
1475 1475 uint16_t fib_size;
1476 1476 uint32_t fib_xfer_state;
1477 1477 uint32_t addr, size;
1478 1478
1479 1479 ASSERT((aif_idx >= 0) && (aif_idx < AAC_ADAPTER_FIBS));
1480 1480
1481 1481 #define AAC_SYNC_AIF(softs, aif_idx, type) \
1482 1482 { (void) ddi_dma_sync((softs)->comm_space_dma_handle, \
1483 1483 offsetof(struct aac_comm_space, \
1484 1484 adapter_fibs[(aif_idx)]), AAC_FIB_SIZE, \
1485 1485 (type)); }
1486 1486
1487 1487 /* Copy AIF from adapter to the empty AIF slot */
1488 1488 AAC_SYNC_AIF(softs, aif_idx, DDI_DMA_SYNC_FORCPU);
1489 1489 fibp = &softs->comm_space->adapter_fibs[aif_idx];
1490 1490 fib_size = ddi_get16(acc, &fibp->Header.Size);
1491 1491
1492 1492 aac_save_aif(softs, acc, fibp, fib_size);
1493 1493
1494 1494 /* Complete AIF back to adapter with good status */
1495 1495 fib_xfer_state = LE_32(fibp->Header.XferState);
1496 1496 if (fib_xfer_state & AAC_FIBSTATE_FROMADAP) {
1497 1497 ddi_put32(acc, &fibp->Header.XferState,
1498 1498 fib_xfer_state | AAC_FIBSTATE_DONEHOST);
1499 1499 ddi_put32(acc, (void *)&fibp->data[0], ST_OK);
1500 1500 if (fib_size > AAC_FIB_SIZE)
1501 1501 ddi_put16(acc, &fibp->Header.Size,
1502 1502 AAC_FIB_SIZE);
1503 1503 AAC_SYNC_AIF(softs, aif_idx,
1504 1504 DDI_DMA_SYNC_FORDEV);
1505 1505 }
1506 1506
1507 1507 /* Put the AIF response on the response queue */
1508 1508 addr = ddi_get32(acc,
1509 1509 &softs->comm_space->adapter_fibs[aif_idx]. \
1510 1510 Header.SenderFibAddress);
1511 1511 size = (uint32_t)ddi_get16(acc,
1512 1512 &softs->comm_space->adapter_fibs[aif_idx]. \
1513 1513 Header.Size);
1514 1514 ddi_put32(acc,
1515 1515 &softs->comm_space->adapter_fibs[aif_idx]. \
1516 1516 Header.ReceiverFibAddress, addr);
1517 1517 if (aac_fib_enqueue(softs, AAC_ADAP_NORM_RESP_Q,
1518 1518 addr, size) == AACERR)
1519 1519 cmn_err(CE_NOTE, "!AIF ack failed");
1520 1520 }
1521 1521 return (AAC_DB_COMMAND_READY);
1522 1522 } else if (status & AAC_DB_PRINTF_READY) {
1523 1523 /* ACK the intr */
1524 1524 AAC_STATUS_CLR(softs, AAC_DB_PRINTF_READY);
1525 1525 (void) AAC_STATUS_GET(softs);
1526 1526 (void) ddi_dma_sync(softs->comm_space_dma_handle,
1527 1527 offsetof(struct aac_comm_space, adapter_print_buf),
1528 1528 AAC_ADAPTER_PRINT_BUFSIZE, DDI_DMA_SYNC_FORCPU);
1529 1529 if (aac_check_dma_handle(softs->comm_space_dma_handle) ==
1530 1530 DDI_SUCCESS)
1531 1531 cmn_err(CE_NOTE, "MSG From Adapter: %s",
1532 1532 softs->comm_space->adapter_print_buf);
1533 1533 else
1534 1534 ddi_fm_service_impact(softs->devinfo_p,
1535 1535 DDI_SERVICE_UNAFFECTED);
1536 1536 AAC_NOTIFY(softs, AAC_DB_PRINTF_READY);
1537 1537 return (AAC_DB_PRINTF_READY);
1538 1538 } else if (status & AAC_DB_COMMAND_NOT_FULL) {
1539 1539 /*
1540 1540 * Without these two condition statements, the OS could hang
1541 1541 * after a while, especially if there are a lot of AIF's to
1542 1542 * handle, for instance if a drive is pulled from an array
1543 1543 * under heavy load.
1544 1544 */
1545 1545 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1546 1546 return (AAC_DB_COMMAND_NOT_FULL);
1547 1547 } else if (status & AAC_DB_RESPONSE_NOT_FULL) {
1548 1548 AAC_STATUS_CLR(softs, AAC_DB_COMMAND_NOT_FULL);
1549 1549 AAC_STATUS_CLR(softs, AAC_DB_RESPONSE_NOT_FULL);
1550 1550 return (AAC_DB_RESPONSE_NOT_FULL);
1551 1551 } else {
1552 1552 return (0);
1553 1553 }
1554 1554 }
1555 1555
1556 1556 static uint_t
1557 1557 aac_intr_old(caddr_t arg)
1558 1558 {
1559 1559 struct aac_softstate *softs = (void *)arg;
1560 1560 int rval;
1561 1561
1562 1562 mutex_enter(&softs->io_lock);
1563 1563 if (aac_process_intr_old(softs))
1564 1564 rval = DDI_INTR_CLAIMED;
1565 1565 else
1566 1566 rval = DDI_INTR_UNCLAIMED;
1567 1567 mutex_exit(&softs->io_lock);
1568 1568
1569 1569 aac_drain_comp_q(softs);
1570 1570 return (rval);
1571 1571 }
1572 1572
1573 1573 /*
1574 1574 * Query FIXED or MSI interrupts
1575 1575 */
1576 1576 static int
1577 1577 aac_query_intrs(struct aac_softstate *softs, int intr_type)
1578 1578 {
1579 1579 dev_info_t *dip = softs->devinfo_p;
1580 1580 int avail, actual, count;
1581 1581 int i, flag, ret;
1582 1582
1583 1583 AACDB_PRINT(softs, CE_NOTE,
1584 1584 "aac_query_intrs:interrupt type 0x%x", intr_type);
1585 1585
1586 1586 /* Get number of interrupts */
1587 1587 ret = ddi_intr_get_nintrs(dip, intr_type, &count);
1588 1588 if ((ret != DDI_SUCCESS) || (count == 0)) {
1589 1589 AACDB_PRINT(softs, CE_WARN,
1590 1590 "ddi_intr_get_nintrs() failed, ret %d count %d",
1591 1591 ret, count);
1592 1592 return (DDI_FAILURE);
1593 1593 }
1594 1594
1595 1595 /* Get number of available interrupts */
1596 1596 ret = ddi_intr_get_navail(dip, intr_type, &avail);
1597 1597 if ((ret != DDI_SUCCESS) || (avail == 0)) {
1598 1598 AACDB_PRINT(softs, CE_WARN,
1599 1599 "ddi_intr_get_navail() failed, ret %d avail %d",
1600 1600 ret, avail);
1601 1601 return (DDI_FAILURE);
1602 1602 }
1603 1603
1604 1604 AACDB_PRINT(softs, CE_NOTE,
1605 1605 "ddi_intr_get_nvail returned %d, navail() returned %d",
1606 1606 count, avail);
1607 1607
1608 1608 /* Allocate an array of interrupt handles */
1609 1609 softs->intr_size = count * sizeof (ddi_intr_handle_t);
1610 1610 softs->htable = kmem_alloc(softs->intr_size, KM_SLEEP);
1611 1611
1612 1612 if (intr_type == DDI_INTR_TYPE_MSI) {
1613 1613 count = 1; /* only one vector needed by now */
1614 1614 flag = DDI_INTR_ALLOC_STRICT;
1615 1615 } else { /* must be DDI_INTR_TYPE_FIXED */
1616 1616 flag = DDI_INTR_ALLOC_NORMAL;
1617 1617 }
1618 1618
1619 1619 /* Call ddi_intr_alloc() */
1620 1620 ret = ddi_intr_alloc(dip, softs->htable, intr_type, 0,
1621 1621 count, &actual, flag);
1622 1622
1623 1623 if ((ret != DDI_SUCCESS) || (actual == 0)) {
1624 1624 AACDB_PRINT(softs, CE_WARN,
1625 1625 "ddi_intr_alloc() failed, ret = %d", ret);
1626 1626 actual = 0;
1627 1627 goto error;
1628 1628 }
1629 1629
1630 1630 if (actual < count) {
1631 1631 AACDB_PRINT(softs, CE_NOTE,
1632 1632 "Requested: %d, Received: %d", count, actual);
1633 1633 goto error;
1634 1634 }
1635 1635
1636 1636 softs->intr_cnt = actual;
1637 1637
1638 1638 /* Get priority for first msi, assume remaining are all the same */
1639 1639 if ((ret = ddi_intr_get_pri(softs->htable[0],
1640 1640 &softs->intr_pri)) != DDI_SUCCESS) {
1641 1641 AACDB_PRINT(softs, CE_WARN,
1642 1642 "ddi_intr_get_pri() failed, ret = %d", ret);
1643 1643 goto error;
1644 1644 }
1645 1645
1646 1646 /* Test for high level mutex */
1647 1647 if (softs->intr_pri >= ddi_intr_get_hilevel_pri()) {
1648 1648 AACDB_PRINT(softs, CE_WARN,
1649 1649 "aac_query_intrs: Hi level interrupt not supported");
1650 1650 goto error;
1651 1651 }
1652 1652
1653 1653 return (DDI_SUCCESS);
1654 1654
1655 1655 error:
1656 1656 /* Free already allocated intr */
1657 1657 for (i = 0; i < actual; i++)
1658 1658 (void) ddi_intr_free(softs->htable[i]);
1659 1659
1660 1660 kmem_free(softs->htable, softs->intr_size);
1661 1661 return (DDI_FAILURE);
1662 1662 }
1663 1663
1664 1664
1665 1665 /*
1666 1666 * Register FIXED or MSI interrupts, and enable them
1667 1667 */
1668 1668 static int
1669 1669 aac_add_intrs(struct aac_softstate *softs)
1670 1670 {
1671 1671 int i, ret;
1672 1672 int actual;
1673 1673 ddi_intr_handler_t *aac_intr;
1674 1674
1675 1675 actual = softs->intr_cnt;
1676 1676 aac_intr = (ddi_intr_handler_t *)((softs->flags & AAC_FLAGS_NEW_COMM) ?
1677 1677 aac_intr_new : aac_intr_old);
1678 1678
1679 1679 /* Call ddi_intr_add_handler() */
1680 1680 for (i = 0; i < actual; i++) {
1681 1681 if ((ret = ddi_intr_add_handler(softs->htable[i],
1682 1682 aac_intr, (caddr_t)softs, NULL)) != DDI_SUCCESS) {
1683 1683 cmn_err(CE_WARN,
1684 1684 "ddi_intr_add_handler() failed ret = %d", ret);
1685 1685
1686 1686 /* Free already allocated intr */
1687 1687 for (i = 0; i < actual; i++)
1688 1688 (void) ddi_intr_free(softs->htable[i]);
1689 1689
1690 1690 kmem_free(softs->htable, softs->intr_size);
1691 1691 return (DDI_FAILURE);
1692 1692 }
1693 1693 }
1694 1694
1695 1695 if ((ret = ddi_intr_get_cap(softs->htable[0], &softs->intr_cap))
1696 1696 != DDI_SUCCESS) {
1697 1697 cmn_err(CE_WARN, "ddi_intr_get_cap() failed, ret = %d", ret);
1698 1698
1699 1699 /* Free already allocated intr */
1700 1700 for (i = 0; i < actual; i++)
1701 1701 (void) ddi_intr_free(softs->htable[i]);
1702 1702
1703 1703 kmem_free(softs->htable, softs->intr_size);
1704 1704 return (DDI_FAILURE);
1705 1705 }
1706 1706
1707 1707 return (DDI_SUCCESS);
1708 1708 }
1709 1709
1710 1710 /*
1711 1711 * Unregister FIXED or MSI interrupts
1712 1712 */
1713 1713 static void
1714 1714 aac_remove_intrs(struct aac_softstate *softs)
1715 1715 {
1716 1716 int i;
1717 1717
1718 1718 /* Disable all interrupts */
1719 1719 (void) aac_disable_intrs(softs);
1720 1720 /* Call ddi_intr_remove_handler() */
1721 1721 for (i = 0; i < softs->intr_cnt; i++) {
1722 1722 (void) ddi_intr_remove_handler(softs->htable[i]);
1723 1723 (void) ddi_intr_free(softs->htable[i]);
1724 1724 }
1725 1725
1726 1726 kmem_free(softs->htable, softs->intr_size);
1727 1727 }
1728 1728
1729 1729 static int
1730 1730 aac_enable_intrs(struct aac_softstate *softs)
1731 1731 {
1732 1732 int rval = AACOK;
1733 1733
1734 1734 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1735 1735 /* for MSI block enable */
1736 1736 if (ddi_intr_block_enable(softs->htable, softs->intr_cnt) !=
1737 1737 DDI_SUCCESS)
1738 1738 rval = AACERR;
1739 1739 } else {
1740 1740 int i;
1741 1741
1742 1742 /* Call ddi_intr_enable() for legacy/MSI non block enable */
1743 1743 for (i = 0; i < softs->intr_cnt; i++) {
1744 1744 if (ddi_intr_enable(softs->htable[i]) != DDI_SUCCESS)
1745 1745 rval = AACERR;
1746 1746 }
1747 1747 }
1748 1748 return (rval);
1749 1749 }
1750 1750
1751 1751 static int
1752 1752 aac_disable_intrs(struct aac_softstate *softs)
1753 1753 {
1754 1754 int rval = AACOK;
1755 1755
1756 1756 if (softs->intr_cap & DDI_INTR_FLAG_BLOCK) {
1757 1757 /* Call ddi_intr_block_disable() */
1758 1758 if (ddi_intr_block_disable(softs->htable, softs->intr_cnt) !=
1759 1759 DDI_SUCCESS)
1760 1760 rval = AACERR;
1761 1761 } else {
1762 1762 int i;
1763 1763
1764 1764 for (i = 0; i < softs->intr_cnt; i++) {
1765 1765 if (ddi_intr_disable(softs->htable[i]) != DDI_SUCCESS)
1766 1766 rval = AACERR;
1767 1767 }
1768 1768 }
1769 1769 return (rval);
1770 1770 }
1771 1771
1772 1772 /*
1773 1773 * Set pkt_reason and OR in pkt_statistics flag
1774 1774 */
1775 1775 static void
1776 1776 aac_set_pkt_reason(struct aac_softstate *softs, struct aac_cmd *acp,
1777 1777 uchar_t reason, uint_t stat)
1778 1778 {
1779 1779 #ifndef __lock_lint
1780 1780 _NOTE(ARGUNUSED(softs))
1781 1781 #endif
1782 1782 if (acp->pkt->pkt_reason == CMD_CMPLT)
1783 1783 acp->pkt->pkt_reason = reason;
1784 1784 acp->pkt->pkt_statistics |= stat;
1785 1785 }
1786 1786
1787 1787 /*
1788 1788 * Handle a finished pkt of soft SCMD
1789 1789 */
1790 1790 static void
1791 1791 aac_soft_callback(struct aac_softstate *softs, struct aac_cmd *acp)
1792 1792 {
1793 1793 ASSERT(acp->pkt);
1794 1794
1795 1795 acp->flags |= AAC_CMD_CMPLT;
1796 1796
1797 1797 acp->pkt->pkt_state |= STATE_GOT_BUS | STATE_GOT_TARGET | \
1798 1798 STATE_SENT_CMD | STATE_GOT_STATUS;
1799 1799 if (acp->pkt->pkt_state & STATE_XFERRED_DATA)
1800 1800 acp->pkt->pkt_resid = 0;
1801 1801
1802 1802 /* AAC_CMD_NO_INTR means no complete callback */
1803 1803 if (!(acp->flags & AAC_CMD_NO_INTR)) {
1804 1804 mutex_enter(&softs->q_comp_mutex);
1805 1805 aac_cmd_enqueue(&softs->q_comp, acp);
1806 1806 mutex_exit(&softs->q_comp_mutex);
1807 1807 ddi_trigger_softintr(softs->softint_id);
1808 1808 }
1809 1809 }
1810 1810
1811 1811 /*
1812 1812 * Handlers for completed IOs, common to aac_intr_new() and aac_intr_old()
1813 1813 */
1814 1814
1815 1815 /*
1816 1816 * Handle completed logical device IO command
1817 1817 */
1818 1818 /*ARGSUSED*/
1819 1819 static void
1820 1820 aac_ld_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1821 1821 {
1822 1822 struct aac_slot *slotp = acp->slotp;
1823 1823 struct aac_blockread_response *resp;
1824 1824 uint32_t status;
1825 1825
1826 1826 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1827 1827 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1828 1828
1829 1829 acp->pkt->pkt_state |= STATE_GOT_STATUS;
1830 1830
1831 1831 /*
1832 1832 * block_read/write has a similar response header, use blockread
1833 1833 * response for both.
1834 1834 */
1835 1835 resp = (struct aac_blockread_response *)&slotp->fibp->data[0];
1836 1836 status = ddi_get32(slotp->fib_acc_handle, &resp->Status);
1837 1837 if (status == ST_OK) {
1838 1838 acp->pkt->pkt_resid = 0;
1839 1839 acp->pkt->pkt_state |= STATE_XFERRED_DATA;
1840 1840 } else {
1841 1841 aac_set_arq_data_hwerr(acp);
1842 1842 }
1843 1843 }
1844 1844
1845 1845 /*
1846 1846 * Handle completed phys. device IO command
1847 1847 */
1848 1848 static void
1849 1849 aac_pd_complete(struct aac_softstate *softs, struct aac_cmd *acp)
1850 1850 {
1851 1851 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
1852 1852 struct aac_fib *fibp = acp->slotp->fibp;
1853 1853 struct scsi_pkt *pkt = acp->pkt;
1854 1854 struct aac_srb_reply *resp;
1855 1855 uint32_t resp_status;
1856 1856
1857 1857 ASSERT(!(acp->flags & AAC_CMD_SYNC));
1858 1858 ASSERT(!(acp->flags & AAC_CMD_NO_CB));
1859 1859
1860 1860 resp = (struct aac_srb_reply *)&fibp->data[0];
1861 1861 resp_status = ddi_get32(acc, &resp->status);
1862 1862
1863 1863 /* First check FIB status */
1864 1864 if (resp_status == ST_OK) {
1865 1865 uint32_t scsi_status;
1866 1866 uint32_t srb_status;
1867 1867 uint32_t data_xfer_length;
1868 1868
1869 1869 scsi_status = ddi_get32(acc, &resp->scsi_status);
1870 1870 srb_status = ddi_get32(acc, &resp->srb_status);
1871 1871 data_xfer_length = ddi_get32(acc, &resp->data_xfer_length);
1872 1872
1873 1873 *pkt->pkt_scbp = (uint8_t)scsi_status;
1874 1874 pkt->pkt_state |= STATE_GOT_STATUS;
1875 1875 if (scsi_status == STATUS_GOOD) {
1876 1876 uchar_t cmd = ((union scsi_cdb *)(void *)
1877 1877 (pkt->pkt_cdbp))->scc_cmd;
1878 1878
1879 1879 /* Next check SRB status */
1880 1880 switch (srb_status & 0x3f) {
1881 1881 case SRB_STATUS_DATA_OVERRUN:
1882 1882 AACDB_PRINT(softs, CE_NOTE, "DATA_OVERRUN: " \
1883 1883 "scmd=%d, xfer=%d, buflen=%d",
1884 1884 (uint32_t)cmd, data_xfer_length,
1885 1885 acp->bcount);
1886 1886
1887 1887 switch (cmd) {
1888 1888 case SCMD_READ:
1889 1889 case SCMD_WRITE:
1890 1890 case SCMD_READ_G1:
1891 1891 case SCMD_WRITE_G1:
1892 1892 case SCMD_READ_G4:
1893 1893 case SCMD_WRITE_G4:
1894 1894 case SCMD_READ_G5:
1895 1895 case SCMD_WRITE_G5:
1896 1896 aac_set_pkt_reason(softs, acp,
1897 1897 CMD_DATA_OVR, 0);
1898 1898 break;
1899 1899 }
1900 1900 /*FALLTHRU*/
1901 1901 case SRB_STATUS_ERROR_RECOVERY:
1902 1902 case SRB_STATUS_PENDING:
1903 1903 case SRB_STATUS_SUCCESS:
1904 1904 /*
1905 1905 * pkt_resid should only be calculated if the
1906 1906 * status is ERROR_RECOVERY/PENDING/SUCCESS/
1907 1907 * OVERRUN/UNDERRUN
1908 1908 */
1909 1909 if (data_xfer_length) {
1910 1910 pkt->pkt_state |= STATE_XFERRED_DATA;
1911 1911 pkt->pkt_resid = acp->bcount - \
1912 1912 data_xfer_length;
1913 1913 ASSERT(pkt->pkt_resid >= 0);
1914 1914 }
1915 1915 break;
1916 1916 case SRB_STATUS_ABORTED:
1917 1917 AACDB_PRINT(softs, CE_NOTE,
1918 1918 "SRB_STATUS_ABORTED, xfer=%d, resid=%d",
1919 1919 data_xfer_length, pkt->pkt_resid);
1920 1920 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
1921 1921 STAT_ABORTED);
1922 1922 break;
1923 1923 case SRB_STATUS_ABORT_FAILED:
1924 1924 AACDB_PRINT(softs, CE_NOTE,
1925 1925 "SRB_STATUS_ABORT_FAILED, xfer=%d, " \
1926 1926 "resid=%d", data_xfer_length,
1927 1927 pkt->pkt_resid);
1928 1928 aac_set_pkt_reason(softs, acp, CMD_ABORT_FAIL,
1929 1929 0);
1930 1930 break;
1931 1931 case SRB_STATUS_PARITY_ERROR:
1932 1932 AACDB_PRINT(softs, CE_NOTE,
1933 1933 "SRB_STATUS_PARITY_ERROR, xfer=%d, " \
1934 1934 "resid=%d", data_xfer_length,
1935 1935 pkt->pkt_resid);
1936 1936 aac_set_pkt_reason(softs, acp, CMD_PER_FAIL, 0);
1937 1937 break;
1938 1938 case SRB_STATUS_NO_DEVICE:
1939 1939 case SRB_STATUS_INVALID_PATH_ID:
1940 1940 case SRB_STATUS_INVALID_TARGET_ID:
1941 1941 case SRB_STATUS_INVALID_LUN:
1942 1942 case SRB_STATUS_SELECTION_TIMEOUT:
1943 1943 #ifdef DEBUG
1944 1944 if (AAC_DEV_IS_VALID(acp->dvp)) {
1945 1945 AACDB_PRINT(softs, CE_NOTE,
1946 1946 "SRB_STATUS_NO_DEVICE(%d), " \
1947 1947 "xfer=%d, resid=%d ",
1948 1948 srb_status & 0x3f,
1949 1949 data_xfer_length, pkt->pkt_resid);
1950 1950 }
1951 1951 #endif
1952 1952 aac_set_pkt_reason(softs, acp, CMD_DEV_GONE, 0);
1953 1953 break;
1954 1954 case SRB_STATUS_COMMAND_TIMEOUT:
1955 1955 case SRB_STATUS_TIMEOUT:
1956 1956 AACDB_PRINT(softs, CE_NOTE,
1957 1957 "SRB_STATUS_COMMAND_TIMEOUT, xfer=%d, " \
1958 1958 "resid=%d", data_xfer_length,
1959 1959 pkt->pkt_resid);
1960 1960 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
1961 1961 STAT_TIMEOUT);
1962 1962 break;
1963 1963 case SRB_STATUS_BUS_RESET:
1964 1964 AACDB_PRINT(softs, CE_NOTE,
1965 1965 "SRB_STATUS_BUS_RESET, xfer=%d, " \
1966 1966 "resid=%d", data_xfer_length,
1967 1967 pkt->pkt_resid);
1968 1968 aac_set_pkt_reason(softs, acp, CMD_RESET,
1969 1969 STAT_BUS_RESET);
1970 1970 break;
1971 1971 default:
1972 1972 AACDB_PRINT(softs, CE_NOTE, "srb_status=%d, " \
1973 1973 "xfer=%d, resid=%d", srb_status & 0x3f,
1974 1974 data_xfer_length, pkt->pkt_resid);
1975 1975 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
1976 1976 break;
1977 1977 }
1978 1978 } else if (scsi_status == STATUS_CHECK) {
1979 1979 /* CHECK CONDITION */
1980 1980 struct scsi_arq_status *arqstat =
1981 1981 (void *)(pkt->pkt_scbp);
1982 1982 uint32_t sense_data_size;
1983 1983
1984 1984 pkt->pkt_state |= STATE_ARQ_DONE;
1985 1985
1986 1986 *(uint8_t *)&arqstat->sts_rqpkt_status = STATUS_GOOD;
1987 1987 arqstat->sts_rqpkt_reason = CMD_CMPLT;
1988 1988 arqstat->sts_rqpkt_resid = 0;
1989 1989 arqstat->sts_rqpkt_state =
1990 1990 STATE_GOT_BUS |
1991 1991 STATE_GOT_TARGET |
1992 1992 STATE_SENT_CMD |
1993 1993 STATE_XFERRED_DATA;
1994 1994 arqstat->sts_rqpkt_statistics = 0;
1995 1995
1996 1996 sense_data_size = ddi_get32(acc,
1997 1997 &resp->sense_data_size);
1998 1998 ASSERT(sense_data_size <= AAC_SENSE_BUFFERSIZE);
1999 1999 AACDB_PRINT(softs, CE_NOTE,
2000 2000 "CHECK CONDITION: sense len=%d, xfer len=%d",
2001 2001 sense_data_size, data_xfer_length);
2002 2002
2003 2003 if (sense_data_size > SENSE_LENGTH)
2004 2004 sense_data_size = SENSE_LENGTH;
2005 2005 ddi_rep_get8(acc, (uint8_t *)&arqstat->sts_sensedata,
2006 2006 (uint8_t *)resp->sense_data, sense_data_size,
2007 2007 DDI_DEV_AUTOINCR);
2008 2008 } else {
2009 2009 AACDB_PRINT(softs, CE_WARN, "invaild scsi status: " \
2010 2010 "scsi_status=%d, srb_status=%d",
2011 2011 scsi_status, srb_status);
2012 2012 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2013 2013 }
2014 2014 } else {
2015 2015 AACDB_PRINT(softs, CE_NOTE, "SRB failed: fib status %d",
2016 2016 resp_status);
2017 2017 aac_set_pkt_reason(softs, acp, CMD_TRAN_ERR, 0);
2018 2018 }
2019 2019 }
2020 2020
2021 2021 /*
2022 2022 * Handle completed IOCTL command
2023 2023 */
2024 2024 /*ARGSUSED*/
2025 2025 void
2026 2026 aac_ioctl_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2027 2027 {
2028 2028 struct aac_slot *slotp = acp->slotp;
2029 2029
2030 2030 /*
2031 2031 * NOTE: Both aac_ioctl_send_fib() and aac_send_raw_srb()
2032 2032 * may wait on softs->event, so use cv_broadcast() instead
2033 2033 * of cv_signal().
2034 2034 */
2035 2035 ASSERT(acp->flags & AAC_CMD_SYNC);
2036 2036 ASSERT(acp->flags & AAC_CMD_NO_CB);
2037 2037
2038 2038 /* Get the size of the response FIB from its FIB.Header.Size field */
2039 2039 acp->fib_size = ddi_get16(slotp->fib_acc_handle,
2040 2040 &slotp->fibp->Header.Size);
2041 2041
2042 2042 ASSERT(acp->fib_size <= softs->aac_max_fib_size);
2043 2043 ddi_rep_get8(slotp->fib_acc_handle, (uint8_t *)acp->fibp,
2044 2044 (uint8_t *)slotp->fibp, acp->fib_size, DDI_DEV_AUTOINCR);
2045 2045 }
2046 2046
2047 2047 /*
2048 2048 * Handle completed sync fib command
2049 2049 */
2050 2050 /*ARGSUSED*/
2051 2051 void
2052 2052 aac_sync_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2053 2053 {
2054 2054 }
2055 2055
2056 2056 /*
2057 2057 * Handle completed Flush command
2058 2058 */
2059 2059 /*ARGSUSED*/
2060 2060 static void
2061 2061 aac_synccache_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2062 2062 {
2063 2063 struct aac_slot *slotp = acp->slotp;
2064 2064 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2065 2065 struct aac_synchronize_reply *resp;
2066 2066 uint32_t status;
2067 2067
2068 2068 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2069 2069
2070 2070 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2071 2071
2072 2072 resp = (struct aac_synchronize_reply *)&slotp->fibp->data[0];
2073 2073 status = ddi_get32(acc, &resp->Status);
2074 2074 if (status != CT_OK)
2075 2075 aac_set_arq_data_hwerr(acp);
2076 2076 }
2077 2077
2078 2078 /*ARGSUSED*/
2079 2079 static void
2080 2080 aac_startstop_complete(struct aac_softstate *softs, struct aac_cmd *acp)
2081 2081 {
2082 2082 struct aac_slot *slotp = acp->slotp;
2083 2083 ddi_acc_handle_t acc = slotp->fib_acc_handle;
2084 2084 struct aac_Container_resp *resp;
2085 2085 uint32_t status;
2086 2086
2087 2087 ASSERT(!(acp->flags & AAC_CMD_SYNC));
2088 2088
2089 2089 acp->pkt->pkt_state |= STATE_GOT_STATUS;
2090 2090
2091 2091 resp = (struct aac_Container_resp *)&slotp->fibp->data[0];
2092 2092 status = ddi_get32(acc, &resp->Status);
2093 2093 if (status != 0) {
2094 2094 AACDB_PRINT(softs, CE_WARN, "Cannot start/stop a unit");
2095 2095 aac_set_arq_data_hwerr(acp);
2096 2096 }
2097 2097 }
2098 2098
2099 2099 /*
2100 2100 * Access PCI space to see if the driver can support the card
2101 2101 */
2102 2102 static int
2103 2103 aac_check_card_type(struct aac_softstate *softs)
2104 2104 {
2105 2105 ddi_acc_handle_t pci_config_handle;
2106 2106 int card_index;
2107 2107 uint32_t pci_cmd;
2108 2108
2109 2109 /* Map pci configuration space */
2110 2110 if ((pci_config_setup(softs->devinfo_p, &pci_config_handle)) !=
2111 2111 DDI_SUCCESS) {
2112 2112 AACDB_PRINT(softs, CE_WARN, "Cannot setup pci config space");
2113 2113 return (AACERR);
2114 2114 }
2115 2115
2116 2116 softs->vendid = pci_config_get16(pci_config_handle, PCI_CONF_VENID);
2117 2117 softs->devid = pci_config_get16(pci_config_handle, PCI_CONF_DEVID);
2118 2118 softs->subvendid = pci_config_get16(pci_config_handle,
2119 2119 PCI_CONF_SUBVENID);
2120 2120 softs->subsysid = pci_config_get16(pci_config_handle,
2121 2121 PCI_CONF_SUBSYSID);
2122 2122
2123 2123 card_index = 0;
2124 2124 while (!CARD_IS_UNKNOWN(card_index)) {
2125 2125 if ((aac_cards[card_index].vendor == softs->vendid) &&
2126 2126 (aac_cards[card_index].device == softs->devid) &&
2127 2127 (aac_cards[card_index].subvendor == softs->subvendid) &&
2128 2128 (aac_cards[card_index].subsys == softs->subsysid)) {
2129 2129 break;
2130 2130 }
2131 2131 card_index++;
2132 2132 }
2133 2133
2134 2134 softs->card = card_index;
2135 2135 softs->hwif = aac_cards[card_index].hwif;
2136 2136
2137 2137 /*
2138 2138 * Unknown aac card
2139 2139 * do a generic match based on the VendorID and DeviceID to
2140 2140 * support the new cards in the aac family
2141 2141 */
2142 2142 if (CARD_IS_UNKNOWN(card_index)) {
2143 2143 if (softs->vendid != 0x9005) {
2144 2144 AACDB_PRINT(softs, CE_WARN,
2145 2145 "Unknown vendor 0x%x", softs->vendid);
2146 2146 goto error;
2147 2147 }
2148 2148 switch (softs->devid) {
2149 2149 case 0x285:
2150 2150 softs->hwif = AAC_HWIF_I960RX;
2151 2151 break;
2152 2152 case 0x286:
2153 2153 softs->hwif = AAC_HWIF_RKT;
2154 2154 break;
2155 2155 default:
2156 2156 AACDB_PRINT(softs, CE_WARN,
2157 2157 "Unknown device \"pci9005,%x\"", softs->devid);
2158 2158 goto error;
2159 2159 }
2160 2160 }
2161 2161
2162 2162 /* Set hardware dependent interface */
2163 2163 switch (softs->hwif) {
2164 2164 case AAC_HWIF_I960RX:
2165 2165 softs->aac_if = aac_rx_interface;
2166 2166 softs->map_size_min = AAC_MAP_SIZE_MIN_RX;
2167 2167 break;
2168 2168 case AAC_HWIF_RKT:
2169 2169 softs->aac_if = aac_rkt_interface;
2170 2170 softs->map_size_min = AAC_MAP_SIZE_MIN_RKT;
2171 2171 break;
2172 2172 default:
2173 2173 AACDB_PRINT(softs, CE_WARN,
2174 2174 "Unknown hardware interface %d", softs->hwif);
2175 2175 goto error;
2176 2176 }
2177 2177
2178 2178 /* Set card names */
2179 2179 (void *)strncpy(softs->vendor_name, aac_cards[card_index].vid,
2180 2180 AAC_VENDOR_LEN);
2181 2181 (void *)strncpy(softs->product_name, aac_cards[card_index].desc,
2182 2182 AAC_PRODUCT_LEN);
2183 2183
2184 2184 /* Set up quirks */
2185 2185 softs->flags = aac_cards[card_index].quirks;
2186 2186
2187 2187 /* Force the busmaster enable bit on */
2188 2188 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2189 2189 if ((pci_cmd & PCI_COMM_ME) == 0) {
2190 2190 pci_cmd |= PCI_COMM_ME;
2191 2191 pci_config_put16(pci_config_handle, PCI_CONF_COMM, pci_cmd);
2192 2192 pci_cmd = pci_config_get16(pci_config_handle, PCI_CONF_COMM);
2193 2193 if ((pci_cmd & PCI_COMM_ME) == 0) {
2194 2194 cmn_err(CE_CONT, "?Cannot enable busmaster bit");
2195 2195 goto error;
2196 2196 }
2197 2197 }
2198 2198
2199 2199 /* Set memory base to map */
2200 2200 softs->pci_mem_base_paddr = 0xfffffff0UL & \
2201 2201 pci_config_get32(pci_config_handle, PCI_CONF_BASE0);
2202 2202
2203 2203 pci_config_teardown(&pci_config_handle);
2204 2204
2205 2205 return (AACOK); /* card type detected */
2206 2206 error:
2207 2207 pci_config_teardown(&pci_config_handle);
2208 2208 return (AACERR); /* no matched card found */
2209 2209 }
2210 2210
2211 2211 /*
2212 2212 * Do the usual interrupt handler setup stuff.
2213 2213 */
2214 2214 static int
2215 2215 aac_register_intrs(struct aac_softstate *softs)
2216 2216 {
2217 2217 dev_info_t *dip;
2218 2218 int intr_types;
2219 2219
2220 2220 ASSERT(softs->devinfo_p);
2221 2221 dip = softs->devinfo_p;
2222 2222
2223 2223 /* Get the type of device intrrupts */
2224 2224 if (ddi_intr_get_supported_types(dip, &intr_types) != DDI_SUCCESS) {
2225 2225 AACDB_PRINT(softs, CE_WARN,
2226 2226 "ddi_intr_get_supported_types() failed");
2227 2227 return (AACERR);
2228 2228 }
2229 2229 AACDB_PRINT(softs, CE_NOTE,
2230 2230 "ddi_intr_get_supported_types() ret: 0x%x", intr_types);
2231 2231
2232 2232 /* Query interrupt, and alloc/init all needed struct */
2233 2233 if (intr_types & DDI_INTR_TYPE_MSI) {
2234 2234 if (aac_query_intrs(softs, DDI_INTR_TYPE_MSI)
2235 2235 != DDI_SUCCESS) {
2236 2236 AACDB_PRINT(softs, CE_WARN,
2237 2237 "MSI interrupt query failed");
2238 2238 return (AACERR);
2239 2239 }
2240 2240 softs->intr_type = DDI_INTR_TYPE_MSI;
2241 2241 } else if (intr_types & DDI_INTR_TYPE_FIXED) {
2242 2242 if (aac_query_intrs(softs, DDI_INTR_TYPE_FIXED)
2243 2243 != DDI_SUCCESS) {
2244 2244 AACDB_PRINT(softs, CE_WARN,
2245 2245 "FIXED interrupt query failed");
2246 2246 return (AACERR);
2247 2247 }
2248 2248 softs->intr_type = DDI_INTR_TYPE_FIXED;
2249 2249 } else {
2250 2250 AACDB_PRINT(softs, CE_WARN,
2251 2251 "Device cannot suppport both FIXED and MSI interrupts");
2252 2252 return (AACERR);
2253 2253 }
2254 2254
2255 2255 /* Connect interrupt handlers */
2256 2256 if (aac_add_intrs(softs) != DDI_SUCCESS) {
2257 2257 AACDB_PRINT(softs, CE_WARN,
2258 2258 "Interrupt registration failed, intr type: %s",
2259 2259 softs->intr_type == DDI_INTR_TYPE_MSI ? "MSI" : "FIXED");
2260 2260 return (AACERR);
2261 2261 }
2262 2262 (void) aac_enable_intrs(softs);
2263 2263
2264 2264 if (ddi_add_softintr(dip, DDI_SOFTINT_LOW, &softs->softint_id,
2265 2265 NULL, NULL, aac_softintr, (caddr_t)softs) != DDI_SUCCESS) {
2266 2266 AACDB_PRINT(softs, CE_WARN,
2267 2267 "Can not setup soft interrupt handler!");
2268 2268 aac_remove_intrs(softs);
2269 2269 return (AACERR);
2270 2270 }
2271 2271
2272 2272 return (AACOK);
2273 2273 }
2274 2274
2275 2275 static void
2276 2276 aac_unregister_intrs(struct aac_softstate *softs)
2277 2277 {
2278 2278 aac_remove_intrs(softs);
2279 2279 ddi_remove_softintr(softs->softint_id);
2280 2280 }
2281 2281
2282 2282 /*
2283 2283 * Check the firmware to determine the features to support and the FIB
2284 2284 * parameters to use.
2285 2285 */
2286 2286 static int
2287 2287 aac_check_firmware(struct aac_softstate *softs)
2288 2288 {
2289 2289 uint32_t options;
2290 2290 uint32_t atu_size;
2291 2291 ddi_acc_handle_t pci_handle;
2292 2292 uint8_t *data;
2293 2293 uint32_t max_fibs;
2294 2294 uint32_t max_fib_size;
2295 2295 uint32_t sg_tablesize;
2296 2296 uint32_t max_sectors;
2297 2297 uint32_t status;
2298 2298
2299 2299 /* Get supported options */
2300 2300 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETINFO, 0, 0, 0, 0,
2301 2301 &status)) != AACOK) {
2302 2302 if (status != SRB_STATUS_INVALID_REQUEST) {
2303 2303 cmn_err(CE_CONT,
2304 2304 "?Fatal error: request adapter info error");
2305 2305 return (AACERR);
2306 2306 }
2307 2307 options = 0;
2308 2308 atu_size = 0;
2309 2309 } else {
2310 2310 options = AAC_MAILBOX_GET(softs, 1);
2311 2311 atu_size = AAC_MAILBOX_GET(softs, 2);
2312 2312 }
2313 2313
2314 2314 if (softs->state & AAC_STATE_RESET) {
2315 2315 if ((softs->support_opt == options) &&
2316 2316 (softs->atu_size == atu_size))
2317 2317 return (AACOK);
2318 2318
2319 2319 cmn_err(CE_WARN,
2320 2320 "?Fatal error: firmware changed, system needs reboot");
2321 2321 return (AACERR);
2322 2322 }
2323 2323
2324 2324 /*
2325 2325 * The following critical settings are initialized only once during
2326 2326 * driver attachment.
2327 2327 */
2328 2328 softs->support_opt = options;
2329 2329 softs->atu_size = atu_size;
2330 2330
2331 2331 /* Process supported options */
2332 2332 if ((options & AAC_SUPPORTED_4GB_WINDOW) != 0 &&
2333 2333 (softs->flags & AAC_FLAGS_NO4GB) == 0) {
2334 2334 AACDB_PRINT(softs, CE_NOTE, "!Enable FIB map 4GB window");
2335 2335 softs->flags |= AAC_FLAGS_4GB_WINDOW;
2336 2336 } else {
2337 2337 /*
2338 2338 * Quirk AAC_FLAGS_NO4GB is for FIB address and thus comm space
2339 2339 * only. IO is handled by the DMA engine which does not suffer
2340 2340 * from the ATU window programming workarounds necessary for
2341 2341 * CPU copy operations.
2342 2342 */
2343 2343 softs->addr_dma_attr.dma_attr_addr_lo = 0x2000ull;
2344 2344 softs->addr_dma_attr.dma_attr_addr_hi = 0x7fffffffull;
2345 2345 }
2346 2346
2347 2347 if ((options & AAC_SUPPORTED_SGMAP_HOST64) != 0) {
2348 2348 AACDB_PRINT(softs, CE_NOTE, "!Enable SG map 64-bit address");
2349 2349 softs->buf_dma_attr.dma_attr_addr_hi = 0xffffffffffffffffull;
2350 2350 softs->buf_dma_attr.dma_attr_seg = 0xffffffffffffffffull;
2351 2351 softs->flags |= AAC_FLAGS_SG_64BIT;
2352 2352 }
2353 2353
2354 2354 if (options & AAC_SUPPORTED_64BIT_ARRAYSIZE) {
2355 2355 softs->flags |= AAC_FLAGS_ARRAY_64BIT;
2356 2356 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array size");
2357 2357 }
2358 2358
2359 2359 if (options & AAC_SUPPORTED_NONDASD) {
2360 2360 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p, 0,
2361 2361 "nondasd-enable", (char **)&data) == DDI_SUCCESS)) {
2362 2362 if (strcmp((char *)data, "yes") == 0) {
2363 2363 AACDB_PRINT(softs, CE_NOTE,
2364 2364 "!Enable Non-DASD access");
2365 2365 softs->flags |= AAC_FLAGS_NONDASD;
2366 2366 }
2367 2367 ddi_prop_free(data);
2368 2368 }
2369 2369 }
2370 2370
2371 2371 /* Read preferred settings */
2372 2372 max_fib_size = 0;
2373 2373 if ((aac_sync_mbcommand(softs, AAC_MONKER_GETCOMMPREF,
2374 2374 0, 0, 0, 0, NULL)) == AACOK) {
2375 2375 options = AAC_MAILBOX_GET(softs, 1);
2376 2376 max_fib_size = (options & 0xffff);
2377 2377 max_sectors = (options >> 16) << 1;
2378 2378 options = AAC_MAILBOX_GET(softs, 2);
2379 2379 sg_tablesize = (options >> 16);
2380 2380 options = AAC_MAILBOX_GET(softs, 3);
2381 2381 max_fibs = (options & 0xffff);
2382 2382 }
2383 2383
2384 2384 /* Enable new comm. and rawio at the same time */
2385 2385 if ((softs->support_opt & AAC_SUPPORTED_NEW_COMM) &&
2386 2386 (max_fib_size != 0)) {
2387 2387 /* read out and save PCI MBR */
2388 2388 if ((atu_size > softs->map_size) &&
2389 2389 (ddi_regs_map_setup(softs->devinfo_p, 1,
2390 2390 (caddr_t *)&data, 0, atu_size, &softs->reg_attr,
2391 2391 &pci_handle) == DDI_SUCCESS)) {
2392 2392 ddi_regs_map_free(&softs->pci_mem_handle);
2393 2393 softs->pci_mem_handle = pci_handle;
2394 2394 softs->pci_mem_base_vaddr = data;
2395 2395 softs->map_size = atu_size;
2396 2396 }
2397 2397 if (atu_size == softs->map_size) {
2398 2398 softs->flags |= AAC_FLAGS_NEW_COMM;
2399 2399 AACDB_PRINT(softs, CE_NOTE,
2400 2400 "!Enable New Comm. interface");
2401 2401 }
2402 2402 }
2403 2403
2404 2404 /* Set FIB parameters */
2405 2405 if (softs->flags & AAC_FLAGS_NEW_COMM) {
2406 2406 softs->aac_max_fibs = max_fibs;
2407 2407 softs->aac_max_fib_size = max_fib_size;
2408 2408 softs->aac_max_sectors = max_sectors;
2409 2409 softs->aac_sg_tablesize = sg_tablesize;
2410 2410
2411 2411 softs->flags |= AAC_FLAGS_RAW_IO;
2412 2412 AACDB_PRINT(softs, CE_NOTE, "!Enable RawIO");
2413 2413 } else {
2414 2414 softs->aac_max_fibs =
2415 2415 (softs->flags & AAC_FLAGS_256FIBS) ? 256 : 512;
2416 2416 softs->aac_max_fib_size = AAC_FIB_SIZE;
2417 2417 softs->aac_max_sectors = 128; /* 64K */
2418 2418 if (softs->flags & AAC_FLAGS_17SG)
2419 2419 softs->aac_sg_tablesize = 17;
2420 2420 else if (softs->flags & AAC_FLAGS_34SG)
2421 2421 softs->aac_sg_tablesize = 34;
2422 2422 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2423 2423 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2424 2424 sizeof (struct aac_blockwrite64) +
2425 2425 sizeof (struct aac_sg_entry64)) /
2426 2426 sizeof (struct aac_sg_entry64);
2427 2427 else
2428 2428 softs->aac_sg_tablesize = (AAC_FIB_DATASIZE -
2429 2429 sizeof (struct aac_blockwrite) +
2430 2430 sizeof (struct aac_sg_entry)) /
2431 2431 sizeof (struct aac_sg_entry);
2432 2432 }
2433 2433
2434 2434 if ((softs->flags & AAC_FLAGS_RAW_IO) &&
2435 2435 (softs->flags & AAC_FLAGS_ARRAY_64BIT)) {
2436 2436 softs->flags |= AAC_FLAGS_LBA_64BIT;
2437 2437 AACDB_PRINT(softs, CE_NOTE, "!Enable 64-bit array");
2438 2438 }
2439 2439 softs->buf_dma_attr.dma_attr_sgllen = softs->aac_sg_tablesize;
2440 2440 softs->buf_dma_attr.dma_attr_maxxfer = softs->aac_max_sectors << 9;
2441 2441 /*
2442 2442 * 64K maximum segment size in scatter gather list is controlled by
2443 2443 * the NEW_COMM bit in the adapter information. If not set, the card
2444 2444 * can only accept a maximum of 64K. It is not recommended to permit
2445 2445 * more than 128KB of total transfer size to the adapters because
2446 2446 * performance is negatively impacted.
2447 2447 *
2448 2448 * For new comm, segment size equals max xfer size. For old comm,
2449 2449 * we use 64K for both.
2450 2450 */
2451 2451 softs->buf_dma_attr.dma_attr_count_max =
2452 2452 softs->buf_dma_attr.dma_attr_maxxfer - 1;
2453 2453
2454 2454 /* Setup FIB operations */
2455 2455 if (softs->flags & AAC_FLAGS_RAW_IO)
2456 2456 softs->aac_cmd_fib = aac_cmd_fib_rawio;
2457 2457 else if (softs->flags & AAC_FLAGS_SG_64BIT)
2458 2458 softs->aac_cmd_fib = aac_cmd_fib_brw64;
2459 2459 else
2460 2460 softs->aac_cmd_fib = aac_cmd_fib_brw;
2461 2461 softs->aac_cmd_fib_scsi = (softs->flags & AAC_FLAGS_SG_64BIT) ? \
2462 2462 aac_cmd_fib_scsi64 : aac_cmd_fib_scsi32;
2463 2463
2464 2464 /* 64-bit LBA needs descriptor format sense data */
2465 2465 softs->slen = sizeof (struct scsi_arq_status);
2466 2466 if ((softs->flags & AAC_FLAGS_LBA_64BIT) &&
2467 2467 softs->slen < AAC_ARQ64_LENGTH)
2468 2468 softs->slen = AAC_ARQ64_LENGTH;
2469 2469
2470 2470 AACDB_PRINT(softs, CE_NOTE,
2471 2471 "!max_fibs %d max_fibsize 0x%x max_sectors %d max_sg %d",
2472 2472 softs->aac_max_fibs, softs->aac_max_fib_size,
2473 2473 softs->aac_max_sectors, softs->aac_sg_tablesize);
2474 2474
2475 2475 return (AACOK);
2476 2476 }
2477 2477
2478 2478 static void
2479 2479 aac_fsa_rev(struct aac_softstate *softs, struct FsaRev *fsarev0,
2480 2480 struct FsaRev *fsarev1)
2481 2481 {
2482 2482 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
2483 2483
2484 2484 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.dash);
2485 2485 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.type);
2486 2486 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.minor);
2487 2487 AAC_GET_FIELD8(acc, fsarev1, fsarev0, external.comp.major);
2488 2488 AAC_GET_FIELD32(acc, fsarev1, fsarev0, buildNumber);
2489 2489 }
2490 2490
2491 2491 /*
2492 2492 * The following function comes from Adaptec:
2493 2493 *
2494 2494 * Query adapter information and supplement adapter information
2495 2495 */
2496 2496 static int
2497 2497 aac_get_adapter_info(struct aac_softstate *softs,
2498 2498 struct aac_adapter_info *ainfr, struct aac_supplement_adapter_info *sinfr)
2499 2499 {
2500 2500 struct aac_cmd *acp = &softs->sync_ac;
2501 2501 ddi_acc_handle_t acc;
2502 2502 struct aac_fib *fibp;
2503 2503 struct aac_adapter_info *ainfp;
2504 2504 struct aac_supplement_adapter_info *sinfp;
2505 2505 int rval;
2506 2506
2507 2507 (void) aac_sync_fib_slot_bind(softs, acp);
2508 2508 acc = acp->slotp->fib_acc_handle;
2509 2509 fibp = acp->slotp->fibp;
2510 2510
2511 2511 ddi_put8(acc, &fibp->data[0], 0);
2512 2512 if (aac_sync_fib(softs, RequestAdapterInfo,
2513 2513 AAC_FIB_SIZEOF(struct aac_adapter_info)) != AACOK) {
2514 2514 AACDB_PRINT(softs, CE_WARN, "RequestAdapterInfo failed");
2515 2515 rval = AACERR;
2516 2516 goto finish;
2517 2517 }
2518 2518 ainfp = (struct aac_adapter_info *)fibp->data;
2519 2519 if (ainfr) {
2520 2520 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2521 2521 AAC_GET_FIELD32(acc, ainfr, ainfp, PlatformBase);
2522 2522 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuArchitecture);
2523 2523 AAC_GET_FIELD32(acc, ainfr, ainfp, CpuVariant);
2524 2524 AAC_GET_FIELD32(acc, ainfr, ainfp, ClockSpeed);
2525 2525 AAC_GET_FIELD32(acc, ainfr, ainfp, ExecutionMem);
2526 2526 AAC_GET_FIELD32(acc, ainfr, ainfp, BufferMem);
2527 2527 AAC_GET_FIELD32(acc, ainfr, ainfp, TotalMem);
2528 2528 aac_fsa_rev(softs, &ainfp->KernelRevision,
2529 2529 &ainfr->KernelRevision);
2530 2530 aac_fsa_rev(softs, &ainfp->MonitorRevision,
2531 2531 &ainfr->MonitorRevision);
2532 2532 aac_fsa_rev(softs, &ainfp->HardwareRevision,
2533 2533 &ainfr->HardwareRevision);
2534 2534 aac_fsa_rev(softs, &ainfp->BIOSRevision,
2535 2535 &ainfr->BIOSRevision);
2536 2536 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusteringEnabled);
2537 2537 AAC_GET_FIELD32(acc, ainfr, ainfp, ClusterChannelMask);
2538 2538 AAC_GET_FIELD64(acc, ainfr, ainfp, SerialNumber);
2539 2539 AAC_GET_FIELD32(acc, ainfr, ainfp, batteryPlatform);
2540 2540 AAC_GET_FIELD32(acc, ainfr, ainfp, SupportedOptions);
2541 2541 AAC_GET_FIELD32(acc, ainfr, ainfp, OemVariant);
2542 2542 }
2543 2543 if (sinfr) {
2544 2544 if (!(softs->support_opt &
2545 2545 AAC_SUPPORTED_SUPPLEMENT_ADAPTER_INFO)) {
2546 2546 AACDB_PRINT(softs, CE_WARN,
2547 2547 "SupplementAdapterInfo not supported");
2548 2548 rval = AACERR;
2549 2549 goto finish;
2550 2550 }
2551 2551 ddi_put8(acc, &fibp->data[0], 0);
2552 2552 if (aac_sync_fib(softs, RequestSupplementAdapterInfo,
2553 2553 AAC_FIB_SIZEOF(struct aac_supplement_adapter_info))
2554 2554 != AACOK) {
2555 2555 AACDB_PRINT(softs, CE_WARN,
2556 2556 "RequestSupplementAdapterInfo failed");
2557 2557 rval = AACERR;
2558 2558 goto finish;
2559 2559 }
2560 2560 sinfp = (struct aac_supplement_adapter_info *)fibp->data;
2561 2561 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, AdapterTypeText[0], 17+1);
2562 2562 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, Pad[0], 2);
2563 2563 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashMemoryByteSize);
2564 2564 AAC_GET_FIELD32(acc, sinfr, sinfp, FlashImageId);
2565 2565 AAC_GET_FIELD32(acc, sinfr, sinfp, MaxNumberPorts);
2566 2566 AAC_GET_FIELD32(acc, sinfr, sinfp, Version);
2567 2567 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits);
2568 2568 AAC_GET_FIELD8(acc, sinfr, sinfp, SlotNumber);
2569 2569 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, ReservedPad0[0], 3);
2570 2570 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, BuildDate[0], 12);
2571 2571 AAC_GET_FIELD32(acc, sinfr, sinfp, CurrentNumberPorts);
2572 2572 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, VpdInfo,
2573 2573 sizeof (struct vpd_info));
2574 2574 aac_fsa_rev(softs, &sinfp->FlashFirmwareRevision,
2575 2575 &sinfr->FlashFirmwareRevision);
2576 2576 AAC_GET_FIELD32(acc, sinfr, sinfp, RaidTypeMorphOptions);
2577 2577 aac_fsa_rev(softs, &sinfp->FlashFirmwareBootRevision,
2578 2578 &sinfr->FlashFirmwareBootRevision);
2579 2579 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgPcbaSerialNo,
2580 2580 MFG_PCBA_SERIAL_NUMBER_WIDTH);
2581 2581 AAC_REP_GET_FIELD8(acc, sinfr, sinfp, MfgWWNName[0],
2582 2582 MFG_WWN_WIDTH);
2583 2583 AAC_GET_FIELD32(acc, sinfr, sinfp, SupportedOptions2);
2584 2584 AAC_GET_FIELD32(acc, sinfr, sinfp, ExpansionFlag);
2585 2585 if (sinfr->ExpansionFlag == 1) {
2586 2586 AAC_GET_FIELD32(acc, sinfr, sinfp, FeatureBits3);
2587 2587 AAC_GET_FIELD32(acc, sinfr, sinfp,
2588 2588 SupportedPerformanceMode);
2589 2589 AAC_REP_GET_FIELD32(acc, sinfr, sinfp,
2590 2590 ReservedGrowth[0], 80);
2591 2591 }
2592 2592 }
2593 2593 rval = AACOK;
2594 2594 finish:
2595 2595 aac_sync_fib_slot_release(softs, acp);
2596 2596 return (rval);
2597 2597 }
2598 2598
2599 2599 static int
2600 2600 aac_get_bus_info(struct aac_softstate *softs, uint32_t *bus_max,
2601 2601 uint32_t *tgt_max)
2602 2602 {
2603 2603 struct aac_cmd *acp = &softs->sync_ac;
2604 2604 ddi_acc_handle_t acc;
2605 2605 struct aac_fib *fibp;
2606 2606 struct aac_ctcfg *c_cmd;
2607 2607 struct aac_ctcfg_resp *c_resp;
2608 2608 uint32_t scsi_method_id;
2609 2609 struct aac_bus_info *cmd;
2610 2610 struct aac_bus_info_response *resp;
2611 2611 int rval;
2612 2612
2613 2613 (void) aac_sync_fib_slot_bind(softs, acp);
2614 2614 acc = acp->slotp->fib_acc_handle;
2615 2615 fibp = acp->slotp->fibp;
2616 2616
2617 2617 /* Detect MethodId */
2618 2618 c_cmd = (struct aac_ctcfg *)&fibp->data[0];
2619 2619 ddi_put32(acc, &c_cmd->Command, VM_ContainerConfig);
2620 2620 ddi_put32(acc, &c_cmd->cmd, CT_GET_SCSI_METHOD);
2621 2621 ddi_put32(acc, &c_cmd->param, 0);
2622 2622 rval = aac_sync_fib(softs, ContainerCommand,
2623 2623 AAC_FIB_SIZEOF(struct aac_ctcfg));
2624 2624 c_resp = (struct aac_ctcfg_resp *)&fibp->data[0];
2625 2625 if (rval != AACOK || ddi_get32(acc, &c_resp->Status) != 0) {
2626 2626 AACDB_PRINT(softs, CE_WARN,
2627 2627 "VM_ContainerConfig command fail");
2628 2628 rval = AACERR;
2629 2629 goto finish;
2630 2630 }
2631 2631 scsi_method_id = ddi_get32(acc, &c_resp->param);
2632 2632
2633 2633 /* Detect phys. bus count and max. target id first */
2634 2634 cmd = (struct aac_bus_info *)&fibp->data[0];
2635 2635 ddi_put32(acc, &cmd->Command, VM_Ioctl);
2636 2636 ddi_put32(acc, &cmd->ObjType, FT_DRIVE); /* physical drive */
2637 2637 ddi_put32(acc, &cmd->MethodId, scsi_method_id);
2638 2638 ddi_put32(acc, &cmd->ObjectId, 0);
2639 2639 ddi_put32(acc, &cmd->CtlCmd, GetBusInfo);
2640 2640 /*
2641 2641 * For VM_Ioctl, the firmware uses the Header.Size filled from the
2642 2642 * driver as the size to be returned. Therefore the driver has to use
2643 2643 * sizeof (struct aac_bus_info_response) because it is greater than
2644 2644 * sizeof (struct aac_bus_info).
2645 2645 */
2646 2646 rval = aac_sync_fib(softs, ContainerCommand,
2647 2647 AAC_FIB_SIZEOF(struct aac_bus_info_response));
2648 2648 resp = (struct aac_bus_info_response *)cmd;
2649 2649
2650 2650 /* Scan all coordinates with INQUIRY */
2651 2651 if ((rval != AACOK) || (ddi_get32(acc, &resp->Status) != 0)) {
2652 2652 AACDB_PRINT(softs, CE_WARN, "GetBusInfo command fail");
2653 2653 rval = AACERR;
2654 2654 goto finish;
2655 2655 }
2656 2656 *bus_max = ddi_get32(acc, &resp->BusCount);
2657 2657 *tgt_max = ddi_get32(acc, &resp->TargetsPerBus);
2658 2658
2659 2659 finish:
2660 2660 aac_sync_fib_slot_release(softs, acp);
2661 2661 return (AACOK);
2662 2662 }
2663 2663
2664 2664 /*
2665 2665 * The following function comes from Adaptec:
2666 2666 *
2667 2667 * Routine to be called during initialization of communications with
2668 2668 * the adapter to handle possible adapter configuration issues. When
2669 2669 * the adapter first boots up, it examines attached drives, etc, and
2670 2670 * potentially comes up with a new or revised configuration (relative to
2671 2671 * what's stored in it's NVRAM). Additionally it may discover problems
2672 2672 * that make the current physical configuration unworkable (currently
2673 2673 * applicable only to cluster configuration issues).
2674 2674 *
2675 2675 * If there are no configuration issues or the issues are considered
2676 2676 * trival by the adapter, it will set it's configuration status to
2677 2677 * "FSACT_CONTINUE" and execute the "commit confiuguration" action
2678 2678 * automatically on it's own.
2679 2679 *
2680 2680 * However, if there are non-trivial issues, the adapter will set it's
2681 2681 * internal configuration status to "FSACT_PAUSE" or "FASCT_ABORT"
2682 2682 * and wait for some agent on the host to issue the "\ContainerCommand
2683 2683 * \VM_ContainerConfig\CT_COMMIT_CONFIG" FIB command to cause the
2684 2684 * adapter to commit the new/updated configuration and enable
2685 2685 * un-inhibited operation. The host agent should first issue the
2686 2686 * "\ContainerCommand\VM_ContainerConfig\CT_GET_CONFIG_STATUS" FIB
2687 2687 * command to obtain information about config issues detected by
2688 2688 * the adapter.
2689 2689 *
2690 2690 * Normally the adapter's PC BIOS will execute on the host following
2691 2691 * adapter poweron and reset and will be responsible for querring the
2692 2692 * adapter with CT_GET_CONFIG_STATUS and issuing the CT_COMMIT_CONFIG
2693 2693 * command if appropriate.
2694 2694 *
2695 2695 * However, with the introduction of IOP reset support, the adapter may
2696 2696 * boot up without the benefit of the adapter's PC BIOS host agent.
2697 2697 * This routine is intended to take care of these issues in situations
2698 2698 * where BIOS doesn't execute following adapter poweron or reset. The
2699 2699 * CT_COMMIT_CONFIG command is a no-op if it's already been issued, so
2700 2700 * there is no harm in doing this when it's already been done.
2701 2701 */
2702 2702 static int
2703 2703 aac_handle_adapter_config_issues(struct aac_softstate *softs)
2704 2704 {
2705 2705 struct aac_cmd *acp = &softs->sync_ac;
2706 2706 ddi_acc_handle_t acc;
2707 2707 struct aac_fib *fibp;
2708 2708 struct aac_Container *cmd;
2709 2709 struct aac_Container_resp *resp;
2710 2710 struct aac_cf_status_header *cfg_sts_hdr;
2711 2711 uint32_t resp_status;
2712 2712 uint32_t ct_status;
2713 2713 uint32_t cfg_stat_action;
2714 2714 int rval;
2715 2715
2716 2716 (void) aac_sync_fib_slot_bind(softs, acp);
2717 2717 acc = acp->slotp->fib_acc_handle;
2718 2718 fibp = acp->slotp->fibp;
2719 2719
2720 2720 /* Get adapter config status */
2721 2721 cmd = (struct aac_Container *)&fibp->data[0];
2722 2722
2723 2723 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2724 2724 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2725 2725 ddi_put32(acc, &cmd->CTCommand.command, CT_GET_CONFIG_STATUS);
2726 2726 ddi_put32(acc, &cmd->CTCommand.param[CNT_SIZE],
2727 2727 sizeof (struct aac_cf_status_header));
2728 2728 rval = aac_sync_fib(softs, ContainerCommand,
2729 2729 AAC_FIB_SIZEOF(struct aac_Container));
2730 2730 resp = (struct aac_Container_resp *)cmd;
2731 2731 cfg_sts_hdr = (struct aac_cf_status_header *)resp->CTResponse.data;
2732 2732
2733 2733 resp_status = ddi_get32(acc, &resp->Status);
2734 2734 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2735 2735 if ((rval == AACOK) && (resp_status == 0) && (ct_status == CT_OK)) {
2736 2736 cfg_stat_action = ddi_get32(acc, &cfg_sts_hdr->action);
2737 2737
2738 2738 /* Commit configuration if it's reasonable to do so. */
2739 2739 if (cfg_stat_action <= CFACT_PAUSE) {
2740 2740 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
2741 2741 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
2742 2742 ddi_put32(acc, &cmd->CTCommand.command,
2743 2743 CT_COMMIT_CONFIG);
2744 2744 rval = aac_sync_fib(softs, ContainerCommand,
2745 2745 AAC_FIB_SIZEOF(struct aac_Container));
2746 2746
2747 2747 resp_status = ddi_get32(acc, &resp->Status);
2748 2748 ct_status = ddi_get32(acc, &resp->CTResponse.param[0]);
2749 2749 if ((rval == AACOK) && (resp_status == 0) &&
2750 2750 (ct_status == CT_OK))
2751 2751 /* Successful completion */
2752 2752 rval = AACMPE_OK;
2753 2753 else
2754 2754 /* Auto-commit aborted due to error(s). */
2755 2755 rval = AACMPE_COMMIT_CONFIG;
2756 2756 } else {
2757 2757 /*
2758 2758 * Auto-commit aborted due to adapter indicating
2759 2759 * configuration issue(s) too dangerous to auto-commit.
2760 2760 */
2761 2761 rval = AACMPE_CONFIG_STATUS;
2762 2762 }
2763 2763 } else {
2764 2764 cmn_err(CE_WARN, "!Configuration issue, auto-commit aborted");
2765 2765 rval = AACMPE_CONFIG_STATUS;
2766 2766 }
2767 2767
2768 2768 aac_sync_fib_slot_release(softs, acp);
2769 2769 return (rval);
2770 2770 }
2771 2771
2772 2772 /*
2773 2773 * Hardware initialization and resource allocation
2774 2774 */
2775 2775 static int
2776 2776 aac_common_attach(struct aac_softstate *softs)
2777 2777 {
2778 2778 uint32_t status;
2779 2779 int i;
2780 2780 struct aac_supplement_adapter_info sinf;
2781 2781
2782 2782 DBCALLED(softs, 1);
2783 2783
2784 2784 /*
2785 2785 * Do a little check here to make sure there aren't any outstanding
2786 2786 * FIBs in the message queue. At this point there should not be and
2787 2787 * if there are they are probably left over from another instance of
2788 2788 * the driver like when the system crashes and the crash dump driver
2789 2789 * gets loaded.
2790 2790 */
2791 2791 while (AAC_OUTB_GET(softs) != 0xfffffffful)
2792 2792 ;
2793 2793
2794 2794 /*
2795 2795 * Wait the card to complete booting up before do anything that
2796 2796 * attempts to communicate with it.
2797 2797 */
2798 2798 status = AAC_FWSTATUS_GET(softs);
2799 2799 if (status == AAC_SELF_TEST_FAILED || status == AAC_KERNEL_PANIC)
2800 2800 goto error;
2801 2801 i = AAC_FWUP_TIMEOUT * 1000; /* set timeout */
2802 2802 AAC_BUSYWAIT(AAC_FWSTATUS_GET(softs) & AAC_KERNEL_UP_AND_RUNNING, i);
2803 2803 if (i == 0) {
2804 2804 cmn_err(CE_CONT, "?Fatal error: controller not ready");
2805 2805 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2806 2806 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2807 2807 goto error;
2808 2808 }
2809 2809
2810 2810 /* Read and set card supported options and settings */
2811 2811 if (aac_check_firmware(softs) == AACERR) {
2812 2812 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2813 2813 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2814 2814 goto error;
2815 2815 }
2816 2816
2817 2817 /* Add interrupt handlers */
2818 2818 if (aac_register_intrs(softs) == AACERR) {
2819 2819 cmn_err(CE_CONT,
2820 2820 "?Fatal error: interrupts register failed");
2821 2821 goto error;
2822 2822 }
2823 2823
2824 2824 /* Setup communication space with the card */
2825 2825 if (softs->comm_space_dma_handle == NULL) {
2826 2826 if (aac_alloc_comm_space(softs) != AACOK)
2827 2827 goto error;
2828 2828 }
2829 2829 if (aac_setup_comm_space(softs) != AACOK) {
2830 2830 cmn_err(CE_CONT, "?Setup communication space failed");
2831 2831 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2832 2832 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2833 2833 goto error;
2834 2834 }
2835 2835
2836 2836 #ifdef DEBUG
2837 2837 if (aac_get_fw_debug_buffer(softs) != AACOK)
2838 2838 cmn_err(CE_CONT, "?firmware UART trace not supported");
2839 2839 #endif
2840 2840
2841 2841 /* Allocate slots */
2842 2842 if ((softs->total_slots == 0) && (aac_create_slots(softs) != AACOK)) {
2843 2843 cmn_err(CE_CONT, "?Fatal error: slots allocate failed");
2844 2844 goto error;
2845 2845 }
2846 2846 AACDB_PRINT(softs, CE_NOTE, "%d slots allocated", softs->total_slots);
2847 2847
2848 2848 /* Allocate FIBs */
2849 2849 if (softs->total_fibs < softs->total_slots) {
2850 2850 aac_alloc_fibs(softs);
2851 2851 if (softs->total_fibs == 0)
2852 2852 goto error;
2853 2853 AACDB_PRINT(softs, CE_NOTE, "%d fibs allocated",
2854 2854 softs->total_fibs);
2855 2855 }
2856 2856
2857 2857 AAC_STATUS_CLR(softs, ~0); /* Clear out all interrupts */
2858 2858 AAC_ENABLE_INTR(softs); /* Enable the interrupts we can handle */
2859 2859
2860 2860 if (aac_get_adapter_info(softs, NULL, &sinf) == AACOK) {
2861 2861 softs->feature_bits = sinf.FeatureBits;
2862 2862 softs->support_opt2 = sinf.SupportedOptions2;
2863 2863
2864 2864 /* Get adapter names */
2865 2865 if (CARD_IS_UNKNOWN(softs->card)) {
2866 2866 char *p, *p0, *p1;
2867 2867
2868 2868 /*
2869 2869 * Now find the controller name in supp_adapter_info->
2870 2870 * AdapterTypeText. Use the first word as the vendor
2871 2871 * and the other words as the product name.
2872 2872 */
2873 2873 AACDB_PRINT(softs, CE_NOTE, "sinf.AdapterTypeText = "
2874 2874 "\"%s\"", sinf.AdapterTypeText);
2875 2875 p = sinf.AdapterTypeText;
2876 2876 p0 = p1 = NULL;
2877 2877 /* Skip heading spaces */
2878 2878 while (*p && (*p == ' ' || *p == '\t'))
2879 2879 p++;
2880 2880 p0 = p;
2881 2881 while (*p && (*p != ' ' && *p != '\t'))
2882 2882 p++;
2883 2883 /* Remove middle spaces */
2884 2884 while (*p && (*p == ' ' || *p == '\t'))
2885 2885 *p++ = 0;
2886 2886 p1 = p;
2887 2887 /* Remove trailing spaces */
2888 2888 p = p1 + strlen(p1) - 1;
2889 2889 while (p > p1 && (*p == ' ' || *p == '\t'))
2890 2890 *p-- = 0;
2891 2891 if (*p0 && *p1) {
2892 2892 (void *)strncpy(softs->vendor_name, p0,
2893 2893 AAC_VENDOR_LEN);
2894 2894 (void *)strncpy(softs->product_name, p1,
2895 2895 AAC_PRODUCT_LEN);
2896 2896 } else {
2897 2897 cmn_err(CE_WARN,
2898 2898 "?adapter name mis-formatted\n");
2899 2899 if (*p0)
2900 2900 (void *)strncpy(softs->product_name,
2901 2901 p0, AAC_PRODUCT_LEN);
2902 2902 }
2903 2903 }
2904 2904 } else {
2905 2905 cmn_err(CE_CONT, "?Query adapter information failed");
2906 2906 }
2907 2907
2908 2908
2909 2909 cmn_err(CE_NOTE,
2910 2910 "!aac driver %d.%02d.%02d-%d, found card: " \
2911 2911 "%s %s(pci0x%x.%x.%x.%x) at 0x%x",
2912 2912 AAC_DRIVER_MAJOR_VERSION,
2913 2913 AAC_DRIVER_MINOR_VERSION,
2914 2914 AAC_DRIVER_BUGFIX_LEVEL,
2915 2915 AAC_DRIVER_BUILD,
2916 2916 softs->vendor_name, softs->product_name,
2917 2917 softs->vendid, softs->devid, softs->subvendid, softs->subsysid,
2918 2918 softs->pci_mem_base_paddr);
2919 2919
2920 2920 /* Perform acceptance of adapter-detected config changes if possible */
2921 2921 if (aac_handle_adapter_config_issues(softs) != AACMPE_OK) {
2922 2922 cmn_err(CE_CONT, "?Handle adapter config issues failed");
2923 2923 aac_fm_ereport(softs, DDI_FM_DEVICE_NO_RESPONSE);
2924 2924 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2925 2925 goto error;
2926 2926 }
2927 2927
2928 2928 /* Setup containers (logical devices) */
2929 2929 if (aac_probe_containers(softs) != AACOK) {
2930 2930 cmn_err(CE_CONT, "?Fatal error: get container info error");
2931 2931 goto error;
2932 2932 }
2933 2933
2934 2934 /* Check for JBOD support. Default disable */
2935 2935 char *data;
2936 2936 if (softs->feature_bits & AAC_FEATURE_SUPPORTED_JBOD) {
2937 2937 if ((ddi_prop_lookup_string(DDI_DEV_T_ANY, softs->devinfo_p,
2938 2938 0, "jbod-enable", &data) == DDI_SUCCESS)) {
2939 2939 if (strcmp(data, "yes") == 0) {
2940 2940 AACDB_PRINT(softs, CE_NOTE,
2941 2941 "Enable JBOD access");
2942 2942 softs->flags |= AAC_FLAGS_JBOD;
2943 2943 }
2944 2944 ddi_prop_free(data);
2945 2945 }
2946 2946 }
2947 2947
2948 2948 /* Setup phys. devices */
2949 2949 if (softs->flags & (AAC_FLAGS_NONDASD | AAC_FLAGS_JBOD)) {
2950 2950 uint32_t bus_max, tgt_max;
2951 2951 uint32_t bus, tgt;
2952 2952 int index;
2953 2953
2954 2954 if (aac_get_bus_info(softs, &bus_max, &tgt_max) != AACOK) {
2955 2955 cmn_err(CE_CONT, "?Fatal error: get bus info error");
2956 2956 goto error;
2957 2957 }
2958 2958 AACDB_PRINT(softs, CE_NOTE, "bus_max=%d, tgt_max=%d",
2959 2959 bus_max, tgt_max);
2960 2960 if (bus_max != softs->bus_max || tgt_max != softs->tgt_max) {
2961 2961 if (softs->state & AAC_STATE_RESET) {
2962 2962 cmn_err(CE_WARN,
2963 2963 "?Fatal error: bus map changed");
2964 2964 goto error;
2965 2965 }
2966 2966 softs->bus_max = bus_max;
2967 2967 softs->tgt_max = tgt_max;
2968 2968 if (softs->nondasds) {
2969 2969 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
2970 2970 sizeof (struct aac_nondasd));
2971 2971 }
2972 2972 softs->nondasds = kmem_zalloc(AAC_MAX_PD(softs) * \
2973 2973 sizeof (struct aac_nondasd), KM_SLEEP);
2974 2974
2975 2975 index = 0;
2976 2976 for (bus = 0; bus < softs->bus_max; bus++) {
2977 2977 for (tgt = 0; tgt < softs->tgt_max; tgt++) {
2978 2978 struct aac_nondasd *dvp =
2979 2979 &softs->nondasds[index++];
2980 2980 dvp->dev.type = AAC_DEV_PD;
2981 2981 dvp->bus = bus;
2982 2982 dvp->tid = tgt;
2983 2983 }
2984 2984 }
2985 2985 }
2986 2986 }
2987 2987
2988 2988 /* Check dma & acc handles allocated in attach */
2989 2989 if (aac_check_dma_handle(softs->comm_space_dma_handle) != DDI_SUCCESS) {
2990 2990 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2991 2991 goto error;
2992 2992 }
2993 2993
2994 2994 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
2995 2995 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
2996 2996 goto error;
2997 2997 }
2998 2998
2999 2999 for (i = 0; i < softs->total_slots; i++) {
3000 3000 if (aac_check_dma_handle(softs->io_slot[i].fib_dma_handle) !=
3001 3001 DDI_SUCCESS) {
3002 3002 ddi_fm_service_impact(softs->devinfo_p,
3003 3003 DDI_SERVICE_LOST);
3004 3004 goto error;
3005 3005 }
3006 3006 }
3007 3007
3008 3008 return (AACOK);
3009 3009 error:
3010 3010 if (softs->state & AAC_STATE_RESET)
3011 3011 return (AACERR);
3012 3012 if (softs->nondasds) {
3013 3013 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
3014 3014 sizeof (struct aac_nondasd));
3015 3015 softs->nondasds = NULL;
3016 3016 }
3017 3017 if (softs->total_fibs > 0)
3018 3018 aac_destroy_fibs(softs);
3019 3019 if (softs->total_slots > 0)
3020 3020 aac_destroy_slots(softs);
3021 3021 if (softs->comm_space_dma_handle)
3022 3022 aac_free_comm_space(softs);
3023 3023 return (AACERR);
3024 3024 }
3025 3025
3026 3026 /*
3027 3027 * Hardware shutdown and resource release
3028 3028 */
3029 3029 static void
3030 3030 aac_common_detach(struct aac_softstate *softs)
3031 3031 {
3032 3032 DBCALLED(softs, 1);
3033 3033
3034 3034 aac_unregister_intrs(softs);
3035 3035
3036 3036 mutex_enter(&softs->io_lock);
3037 3037 (void) aac_shutdown(softs);
3038 3038
3039 3039 if (softs->nondasds) {
3040 3040 kmem_free(softs->nondasds, AAC_MAX_PD(softs) * \
3041 3041 sizeof (struct aac_nondasd));
3042 3042 softs->nondasds = NULL;
3043 3043 }
3044 3044 aac_destroy_fibs(softs);
3045 3045 aac_destroy_slots(softs);
3046 3046 aac_free_comm_space(softs);
3047 3047 mutex_exit(&softs->io_lock);
3048 3048 }
3049 3049
3050 3050 /*
3051 3051 * Send a synchronous command to the controller and wait for a result.
3052 3052 * Indicate if the controller completed the command with an error status.
3053 3053 */
3054 3054 int
3055 3055 aac_sync_mbcommand(struct aac_softstate *softs, uint32_t cmd,
3056 3056 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3,
3057 3057 uint32_t *statusp)
3058 3058 {
3059 3059 int timeout;
3060 3060 uint32_t status;
3061 3061
3062 3062 if (statusp != NULL)
3063 3063 *statusp = SRB_STATUS_SUCCESS;
3064 3064
3065 3065 /* Fill in mailbox */
3066 3066 AAC_MAILBOX_SET(softs, cmd, arg0, arg1, arg2, arg3);
3067 3067
3068 3068 /* Ensure the sync command doorbell flag is cleared */
3069 3069 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3070 3070
3071 3071 /* Then set it to signal the adapter */
3072 3072 AAC_NOTIFY(softs, AAC_DB_SYNC_COMMAND);
3073 3073
3074 3074 /* Spin waiting for the command to complete */
3075 3075 timeout = AAC_IMMEDIATE_TIMEOUT * 1000;
3076 3076 AAC_BUSYWAIT(AAC_STATUS_GET(softs) & AAC_DB_SYNC_COMMAND, timeout);
3077 3077 if (!timeout) {
3078 3078 AACDB_PRINT(softs, CE_WARN,
3079 3079 "Sync command timed out after %d seconds (0x%x)!",
3080 3080 AAC_IMMEDIATE_TIMEOUT, AAC_FWSTATUS_GET(softs));
3081 3081 return (AACERR);
3082 3082 }
3083 3083
3084 3084 /* Clear the completion flag */
3085 3085 AAC_STATUS_CLR(softs, AAC_DB_SYNC_COMMAND);
3086 3086
3087 3087 /* Get the command status */
3088 3088 status = AAC_MAILBOX_GET(softs, 0);
3089 3089 if (statusp != NULL)
3090 3090 *statusp = status;
3091 3091 if (status != SRB_STATUS_SUCCESS) {
3092 3092 AACDB_PRINT(softs, CE_WARN,
3093 3093 "Sync command fail: status = 0x%x", status);
3094 3094 return (AACERR);
3095 3095 }
3096 3096
3097 3097 return (AACOK);
3098 3098 }
3099 3099
3100 3100 /*
3101 3101 * Send a synchronous FIB to the adapter and wait for its completion
3102 3102 */
3103 3103 static int
3104 3104 aac_sync_fib(struct aac_softstate *softs, uint16_t cmd, uint16_t fibsize)
3105 3105 {
3106 3106 struct aac_cmd *acp = &softs->sync_ac;
3107 3107
3108 3108 acp->flags = AAC_CMD_SYNC | AAC_CMD_IN_SYNC_SLOT;
3109 3109 if (softs->state & AAC_STATE_INTR)
3110 3110 acp->flags |= AAC_CMD_NO_CB;
3111 3111 else
3112 3112 acp->flags |= AAC_CMD_NO_INTR;
3113 3113
3114 3114 acp->ac_comp = aac_sync_complete;
3115 3115 acp->timeout = AAC_SYNC_TIMEOUT;
3116 3116 acp->fib_size = fibsize;
3117 3117
3118 3118 /*
3119 3119 * Only need to setup sync fib header, caller should have init
3120 3120 * fib data
3121 3121 */
3122 3122 aac_cmd_fib_header(softs, acp, cmd);
3123 3123
3124 3124 (void) ddi_dma_sync(acp->slotp->fib_dma_handle, 0, fibsize,
3125 3125 DDI_DMA_SYNC_FORDEV);
3126 3126
3127 3127 aac_start_io(softs, acp);
3128 3128
3129 3129 if (softs->state & AAC_STATE_INTR)
3130 3130 return (aac_do_sync_io(softs, acp));
3131 3131 else
3132 3132 return (aac_do_poll_io(softs, acp));
3133 3133 }
3134 3134
3135 3135 static void
3136 3136 aac_cmd_initq(struct aac_cmd_queue *q)
3137 3137 {
3138 3138 q->q_head = NULL;
3139 3139 q->q_tail = (struct aac_cmd *)&q->q_head;
3140 3140 }
3141 3141
3142 3142 /*
3143 3143 * Remove a cmd from the head of q
3144 3144 */
3145 3145 static struct aac_cmd *
3146 3146 aac_cmd_dequeue(struct aac_cmd_queue *q)
3147 3147 {
3148 3148 struct aac_cmd *acp;
3149 3149
3150 3150 _NOTE(ASSUMING_PROTECTED(*q))
3151 3151
3152 3152 if ((acp = q->q_head) != NULL) {
3153 3153 if ((q->q_head = acp->next) != NULL)
3154 3154 acp->next = NULL;
3155 3155 else
3156 3156 q->q_tail = (struct aac_cmd *)&q->q_head;
3157 3157 acp->prev = NULL;
3158 3158 }
3159 3159 return (acp);
3160 3160 }
3161 3161
3162 3162 /*
3163 3163 * Add a cmd to the tail of q
3164 3164 */
3165 3165 static void
3166 3166 aac_cmd_enqueue(struct aac_cmd_queue *q, struct aac_cmd *acp)
3167 3167 {
3168 3168 ASSERT(acp->next == NULL);
3169 3169 acp->prev = q->q_tail;
3170 3170 q->q_tail->next = acp;
3171 3171 q->q_tail = acp;
3172 3172 }
3173 3173
3174 3174 /*
3175 3175 * Remove the cmd ac from q
3176 3176 */
3177 3177 static void
3178 3178 aac_cmd_delete(struct aac_cmd_queue *q, struct aac_cmd *acp)
3179 3179 {
3180 3180 if (acp->prev) {
3181 3181 if ((acp->prev->next = acp->next) != NULL) {
3182 3182 acp->next->prev = acp->prev;
3183 3183 acp->next = NULL;
3184 3184 } else {
3185 3185 q->q_tail = acp->prev;
3186 3186 }
3187 3187 acp->prev = NULL;
3188 3188 }
3189 3189 /* ac is not in the queue */
3190 3190 }
3191 3191
3192 3192 /*
3193 3193 * Atomically insert an entry into the nominated queue, returns 0 on success or
3194 3194 * AACERR if the queue is full.
3195 3195 *
3196 3196 * Note: it would be more efficient to defer notifying the controller in
3197 3197 * the case where we may be inserting several entries in rapid succession,
3198 3198 * but implementing this usefully may be difficult (it would involve a
3199 3199 * separate queue/notify interface).
3200 3200 */
3201 3201 static int
3202 3202 aac_fib_enqueue(struct aac_softstate *softs, int queue, uint32_t fib_addr,
3203 3203 uint32_t fib_size)
3204 3204 {
3205 3205 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3206 3206 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3207 3207 uint32_t pi, ci;
3208 3208
3209 3209 DBCALLED(softs, 2);
3210 3210
3211 3211 ASSERT(queue == AAC_ADAP_NORM_CMD_Q || queue == AAC_ADAP_NORM_RESP_Q);
3212 3212
3213 3213 /* Get the producer/consumer indices */
3214 3214 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3215 3215 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3216 3216 DDI_DMA_SYNC_FORCPU);
3217 3217 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3218 3218 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3219 3219 return (AACERR);
3220 3220 }
3221 3221
3222 3222 pi = ddi_get32(acc,
3223 3223 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3224 3224 ci = ddi_get32(acc,
3225 3225 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3226 3226
3227 3227 /*
3228 3228 * Wrap the queue first before we check the queue to see
3229 3229 * if it is full
3230 3230 */
3231 3231 if (pi >= aac_qinfo[queue].size)
3232 3232 pi = 0;
3233 3233
3234 3234 /* XXX queue full */
3235 3235 if ((pi + 1) == ci)
3236 3236 return (AACERR);
3237 3237
3238 3238 /* Fill in queue entry */
3239 3239 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_size), fib_size);
3240 3240 ddi_put32(acc, &((softs->qentries[queue] + pi)->aq_fib_addr), fib_addr);
3241 3241 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3242 3242 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3243 3243 DDI_DMA_SYNC_FORDEV);
3244 3244
3245 3245 /* Update producer index */
3246 3246 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX],
3247 3247 pi + 1);
3248 3248 (void) ddi_dma_sync(dma,
3249 3249 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX] - \
3250 3250 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3251 3251 DDI_DMA_SYNC_FORDEV);
3252 3252
3253 3253 if (aac_qinfo[queue].notify != 0)
3254 3254 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3255 3255 return (AACOK);
3256 3256 }
3257 3257
3258 3258 /*
3259 3259 * Atomically remove one entry from the nominated queue, returns 0 on
3260 3260 * success or AACERR if the queue is empty.
3261 3261 */
3262 3262 static int
3263 3263 aac_fib_dequeue(struct aac_softstate *softs, int queue, int *idxp)
3264 3264 {
3265 3265 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3266 3266 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3267 3267 uint32_t pi, ci;
3268 3268 int unfull = 0;
3269 3269
3270 3270 DBCALLED(softs, 2);
3271 3271
3272 3272 ASSERT(idxp);
3273 3273
3274 3274 /* Get the producer/consumer indices */
3275 3275 (void) ddi_dma_sync(dma, (uintptr_t)softs->qtablep->qt_qindex[queue] - \
3276 3276 (uintptr_t)softs->comm_space, sizeof (uint32_t) * 2,
3277 3277 DDI_DMA_SYNC_FORCPU);
3278 3278 pi = ddi_get32(acc,
3279 3279 &softs->qtablep->qt_qindex[queue][AAC_PRODUCER_INDEX]);
3280 3280 ci = ddi_get32(acc,
3281 3281 &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX]);
3282 3282
3283 3283 /* Check for queue empty */
3284 3284 if (ci == pi)
3285 3285 return (AACERR);
3286 3286
3287 3287 if (pi >= aac_qinfo[queue].size)
3288 3288 pi = 0;
3289 3289
3290 3290 /* Check for queue full */
3291 3291 if (ci == pi + 1)
3292 3292 unfull = 1;
3293 3293
3294 3294 /*
3295 3295 * The controller does not wrap the queue,
3296 3296 * so we have to do it by ourselves
3297 3297 */
3298 3298 if (ci >= aac_qinfo[queue].size)
3299 3299 ci = 0;
3300 3300
3301 3301 /* Fetch the entry */
3302 3302 (void) ddi_dma_sync(dma, (uintptr_t)(softs->qentries[queue] + pi) - \
3303 3303 (uintptr_t)softs->comm_space, sizeof (struct aac_queue_entry),
3304 3304 DDI_DMA_SYNC_FORCPU);
3305 3305 if (aac_check_dma_handle(dma) != DDI_SUCCESS) {
3306 3306 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
3307 3307 return (AACERR);
3308 3308 }
3309 3309
3310 3310 switch (queue) {
3311 3311 case AAC_HOST_NORM_RESP_Q:
3312 3312 case AAC_HOST_HIGH_RESP_Q:
3313 3313 *idxp = ddi_get32(acc,
3314 3314 &(softs->qentries[queue] + ci)->aq_fib_addr);
3315 3315 break;
3316 3316
3317 3317 case AAC_HOST_NORM_CMD_Q:
3318 3318 case AAC_HOST_HIGH_CMD_Q:
3319 3319 *idxp = ddi_get32(acc,
3320 3320 &(softs->qentries[queue] + ci)->aq_fib_addr) / AAC_FIB_SIZE;
3321 3321 break;
3322 3322
3323 3323 default:
3324 3324 cmn_err(CE_NOTE, "!Invalid queue in aac_fib_dequeue()");
3325 3325 return (AACERR);
3326 3326 }
3327 3327
3328 3328 /* Update consumer index */
3329 3329 ddi_put32(acc, &softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX],
3330 3330 ci + 1);
3331 3331 (void) ddi_dma_sync(dma,
3332 3332 (uintptr_t)&softs->qtablep->qt_qindex[queue][AAC_CONSUMER_INDEX] - \
3333 3333 (uintptr_t)softs->comm_space, sizeof (uint32_t),
3334 3334 DDI_DMA_SYNC_FORDEV);
3335 3335
3336 3336 if (unfull && aac_qinfo[queue].notify != 0)
3337 3337 AAC_NOTIFY(softs, aac_qinfo[queue].notify);
3338 3338 return (AACOK);
3339 3339 }
3340 3340
3341 3341 static struct aac_mntinforesp *
3342 3342 aac_get_mntinfo(struct aac_softstate *softs, int cid)
3343 3343 {
3344 3344 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3345 3345 struct aac_fib *fibp = softs->sync_ac.slotp->fibp;
3346 3346 struct aac_mntinfo *mi = (struct aac_mntinfo *)&fibp->data[0];
3347 3347 struct aac_mntinforesp *mir;
3348 3348
3349 3349 ddi_put32(acc, &mi->Command, /* Use 64-bit LBA if enabled */
3350 3350 (softs->flags & AAC_FLAGS_LBA_64BIT) ?
3351 3351 VM_NameServe64 : VM_NameServe);
3352 3352 ddi_put32(acc, &mi->MntType, FT_FILESYS);
3353 3353 ddi_put32(acc, &mi->MntCount, cid);
3354 3354
3355 3355 if (aac_sync_fib(softs, ContainerCommand,
3356 3356 AAC_FIB_SIZEOF(struct aac_mntinfo)) == AACERR) {
3357 3357 AACDB_PRINT(softs, CE_WARN, "Error probe container %d", cid);
3358 3358 return (NULL);
3359 3359 }
3360 3360
3361 3361 mir = (struct aac_mntinforesp *)&fibp->data[0];
3362 3362 if (ddi_get32(acc, &mir->Status) == ST_OK)
3363 3363 return (mir);
3364 3364 return (NULL);
3365 3365 }
3366 3366
3367 3367 static int
3368 3368 aac_get_container_count(struct aac_softstate *softs, int *count)
3369 3369 {
3370 3370 ddi_acc_handle_t acc;
3371 3371 struct aac_mntinforesp *mir;
3372 3372 int rval;
3373 3373
3374 3374 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3375 3375 acc = softs->sync_ac.slotp->fib_acc_handle;
3376 3376
3377 3377 if ((mir = aac_get_mntinfo(softs, 0)) == NULL) {
3378 3378 rval = AACERR;
3379 3379 goto finish;
3380 3380 }
3381 3381 *count = ddi_get32(acc, &mir->MntRespCount);
3382 3382 if (*count > AAC_MAX_LD) {
3383 3383 AACDB_PRINT(softs, CE_CONT,
3384 3384 "container count(%d) > AAC_MAX_LD", *count);
3385 3385 rval = AACERR;
3386 3386 goto finish;
3387 3387 }
3388 3388 rval = AACOK;
3389 3389
3390 3390 finish:
3391 3391 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3392 3392 return (rval);
3393 3393 }
3394 3394
3395 3395 static int
3396 3396 aac_get_container_uid(struct aac_softstate *softs, uint32_t cid, uint32_t *uid)
3397 3397 {
3398 3398 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3399 3399 struct aac_Container *ct = (struct aac_Container *) \
3400 3400 &softs->sync_ac.slotp->fibp->data[0];
3401 3401
3402 3402 bzero(ct, sizeof (*ct) - CT_PACKET_SIZE);
3403 3403 ddi_put32(acc, &ct->Command, VM_ContainerConfig);
3404 3404 ddi_put32(acc, &ct->CTCommand.command, CT_CID_TO_32BITS_UID);
3405 3405 ddi_put32(acc, &ct->CTCommand.param[0], cid);
3406 3406
3407 3407 if (aac_sync_fib(softs, ContainerCommand,
3408 3408 AAC_FIB_SIZEOF(struct aac_Container)) == AACERR)
3409 3409 return (AACERR);
3410 3410 if (ddi_get32(acc, &ct->CTCommand.param[0]) != CT_OK)
3411 3411 return (AACERR);
3412 3412
3413 3413 *uid = ddi_get32(acc, &ct->CTCommand.param[1]);
3414 3414 return (AACOK);
3415 3415 }
3416 3416
3417 3417 /*
3418 3418 * Request information of the container cid
3419 3419 */
3420 3420 static struct aac_mntinforesp *
3421 3421 aac_get_container_info(struct aac_softstate *softs, int cid)
3422 3422 {
3423 3423 ddi_acc_handle_t acc = softs->sync_ac.slotp->fib_acc_handle;
3424 3424 struct aac_mntinforesp *mir;
3425 3425 int rval_uid;
3426 3426 uint32_t uid;
3427 3427
3428 3428 /* Get container UID first so that it will not overwrite mntinfo */
3429 3429 rval_uid = aac_get_container_uid(softs, cid, &uid);
3430 3430
3431 3431 /* Get container basic info */
3432 3432 if ((mir = aac_get_mntinfo(softs, cid)) == NULL) {
3433 3433 AACDB_PRINT(softs, CE_CONT,
3434 3434 "query container %d info failed", cid);
3435 3435 return (NULL);
3436 3436 }
3437 3437 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE)
3438 3438 return (mir);
3439 3439 if (rval_uid != AACOK) {
3440 3440 AACDB_PRINT(softs, CE_CONT,
3441 3441 "query container %d uid failed", cid);
3442 3442 return (NULL);
3443 3443 }
3444 3444
3445 3445 ddi_put32(acc, &mir->Status, uid);
3446 3446 return (mir);
3447 3447 }
3448 3448
3449 3449 static enum aac_cfg_event
3450 3450 aac_probe_container(struct aac_softstate *softs, uint32_t cid)
3451 3451 {
3452 3452 enum aac_cfg_event event = AAC_CFG_NULL_NOEXIST;
3453 3453 struct aac_container *dvp = &softs->containers[cid];
3454 3454 struct aac_mntinforesp *mir;
3455 3455 ddi_acc_handle_t acc;
3456 3456
3457 3457 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
3458 3458 acc = softs->sync_ac.slotp->fib_acc_handle;
3459 3459
3460 3460 /* Get container basic info */
3461 3461 if ((mir = aac_get_container_info(softs, cid)) == NULL) {
3462 3462 /* AAC_CFG_NULL_NOEXIST */
3463 3463 goto finish;
3464 3464 }
3465 3465
3466 3466 if (ddi_get32(acc, &mir->MntObj.VolType) == CT_NONE) {
3467 3467 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3468 3468 AACDB_PRINT(softs, CE_NOTE,
3469 3469 ">>> Container %d deleted", cid);
3470 3470 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3471 3471 event = AAC_CFG_DELETE;
3472 3472 }
3473 3473 /* AAC_CFG_NULL_NOEXIST */
3474 3474 } else {
3475 3475 uint64_t size;
3476 3476 uint32_t uid;
3477 3477
3478 3478 event = AAC_CFG_NULL_EXIST;
3479 3479
3480 3480 size = AAC_MIR_SIZE(softs, acc, mir);
3481 3481 uid = ddi_get32(acc, &mir->Status);
3482 3482 if (AAC_DEV_IS_VALID(&dvp->dev)) {
3483 3483 if (dvp->uid != uid) {
3484 3484 AACDB_PRINT(softs, CE_WARN,
3485 3485 ">>> Container %u uid changed to %d",
3486 3486 cid, uid);
3487 3487 dvp->uid = uid;
3488 3488 event = AAC_CFG_CHANGE;
3489 3489 }
3490 3490 if (dvp->size != size) {
3491 3491 AACDB_PRINT(softs, CE_NOTE,
3492 3492 ">>> Container %u size changed to %"PRIu64,
3493 3493 cid, size);
3494 3494 dvp->size = size;
3495 3495 event = AAC_CFG_CHANGE;
3496 3496 }
3497 3497 } else { /* Init new container */
3498 3498 AACDB_PRINT(softs, CE_NOTE,
3499 3499 ">>> Container %d added: " \
3500 3500 "size=0x%x.%08x, type=%d, name=%s",
3501 3501 cid,
3502 3502 ddi_get32(acc, &mir->MntObj.CapacityHigh),
3503 3503 ddi_get32(acc, &mir->MntObj.Capacity),
3504 3504 ddi_get32(acc, &mir->MntObj.VolType),
3505 3505 mir->MntObj.FileSystemName);
3506 3506 dvp->dev.flags |= AAC_DFLAG_VALID;
3507 3507 dvp->dev.type = AAC_DEV_LD;
3508 3508
3509 3509 dvp->cid = cid;
3510 3510 dvp->uid = uid;
3511 3511 dvp->size = size;
3512 3512 dvp->locked = 0;
3513 3513 dvp->deleted = 0;
3514 3514
3515 3515 event = AAC_CFG_ADD;
3516 3516 }
3517 3517 }
3518 3518
3519 3519 finish:
3520 3520 aac_sync_fib_slot_release(softs, &softs->sync_ac);
3521 3521 return (event);
3522 3522 }
3523 3523
3524 3524 /*
3525 3525 * Do a rescan of all the possible containers and update the container list
3526 3526 * with newly online/offline containers, and prepare for autoconfiguration.
3527 3527 */
3528 3528 static int
3529 3529 aac_probe_containers(struct aac_softstate *softs)
3530 3530 {
3531 3531 int i, count, total;
3532 3532
3533 3533 /* Loop over possible containers */
3534 3534 count = softs->container_count;
3535 3535 if (aac_get_container_count(softs, &count) == AACERR)
3536 3536 return (AACERR);
3537 3537
3538 3538 for (i = total = 0; i < count; i++) {
3539 3539 enum aac_cfg_event event = aac_probe_container(softs, i);
3540 3540 if ((event != AAC_CFG_NULL_NOEXIST) &&
3541 3541 (event != AAC_CFG_NULL_EXIST)) {
3542 3542 (void) aac_handle_dr(softs, i, -1, event);
3543 3543 total++;
3544 3544 }
3545 3545 }
3546 3546
3547 3547 if (count < softs->container_count) {
3548 3548 struct aac_container *dvp;
3549 3549
3550 3550 for (dvp = &softs->containers[count];
3551 3551 dvp < &softs->containers[softs->container_count]; dvp++) {
3552 3552 if (!AAC_DEV_IS_VALID(&dvp->dev))
3553 3553 continue;
3554 3554 AACDB_PRINT(softs, CE_NOTE, ">>> Container %d deleted",
3555 3555 dvp->cid);
3556 3556 dvp->dev.flags &= ~AAC_DFLAG_VALID;
3557 3557 (void) aac_handle_dr(softs, dvp->cid, -1,
3558 3558 AAC_CFG_DELETE);
3559 3559 }
↓ open down ↓ |
3559 lines elided |
↑ open up ↑ |
3560 3560 }
3561 3561
3562 3562 softs->container_count = count;
3563 3563 AACDB_PRINT(softs, CE_CONT, "?Total %d container(s) found", total);
3564 3564 return (AACOK);
3565 3565 }
3566 3566
3567 3567 static int
3568 3568 aac_probe_jbod(struct aac_softstate *softs, int tgt, int event)
3569 3569 {
3570 - ASSERT(AAC_MAX_LD <= tgt < AAC_MAX_DEV(softs));
3570 + ASSERT(AAC_MAX_LD <= tgt);
3571 + ASSERT(tgt < AAC_MAX_DEV(softs));
3571 3572 struct aac_device *dvp;
3572 3573 dvp = AAC_DEV(softs, tgt);
3573 3574
3574 3575 switch (event) {
3575 3576 case AAC_CFG_ADD:
3576 3577 AACDB_PRINT(softs, CE_NOTE,
3577 3578 ">>> Jbod %d added", tgt - AAC_MAX_LD);
3578 3579 dvp->flags |= AAC_DFLAG_VALID;
3579 3580 dvp->type = AAC_DEV_PD;
3580 3581 break;
3581 3582 case AAC_CFG_DELETE:
3582 3583 AACDB_PRINT(softs, CE_NOTE,
3583 3584 ">>> Jbod %d deleted", tgt - AAC_MAX_LD);
3584 3585 dvp->flags &= ~AAC_DFLAG_VALID;
3585 3586 break;
3586 3587 default:
3587 3588 return (AACERR);
3588 3589 }
3589 3590 (void) aac_handle_dr(softs, tgt, 0, event);
3590 3591 return (AACOK);
3591 3592 }
3592 3593
3593 3594 static int
3594 3595 aac_alloc_comm_space(struct aac_softstate *softs)
3595 3596 {
3596 3597 size_t rlen;
3597 3598 ddi_dma_cookie_t cookie;
3598 3599 uint_t cookien;
3599 3600
3600 3601 /* Allocate DMA for comm. space */
3601 3602 if (ddi_dma_alloc_handle(
3602 3603 softs->devinfo_p,
3603 3604 &softs->addr_dma_attr,
3604 3605 DDI_DMA_SLEEP,
3605 3606 NULL,
3606 3607 &softs->comm_space_dma_handle) != DDI_SUCCESS) {
3607 3608 AACDB_PRINT(softs, CE_WARN,
3608 3609 "Cannot alloc dma handle for communication area");
3609 3610 goto error;
3610 3611 }
3611 3612 if (ddi_dma_mem_alloc(
3612 3613 softs->comm_space_dma_handle,
3613 3614 sizeof (struct aac_comm_space),
3614 3615 &softs->acc_attr,
3615 3616 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3616 3617 DDI_DMA_SLEEP,
3617 3618 NULL,
3618 3619 (caddr_t *)&softs->comm_space,
3619 3620 &rlen,
3620 3621 &softs->comm_space_acc_handle) != DDI_SUCCESS) {
3621 3622 AACDB_PRINT(softs, CE_WARN,
3622 3623 "Cannot alloc mem for communication area");
3623 3624 goto error;
3624 3625 }
3625 3626 if (ddi_dma_addr_bind_handle(
3626 3627 softs->comm_space_dma_handle,
3627 3628 NULL,
3628 3629 (caddr_t)softs->comm_space,
3629 3630 sizeof (struct aac_comm_space),
3630 3631 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
3631 3632 DDI_DMA_SLEEP,
3632 3633 NULL,
3633 3634 &cookie,
3634 3635 &cookien) != DDI_DMA_MAPPED) {
3635 3636 AACDB_PRINT(softs, CE_WARN,
3636 3637 "DMA bind failed for communication area");
3637 3638 goto error;
3638 3639 }
3639 3640 softs->comm_space_phyaddr = cookie.dmac_address;
3640 3641
3641 3642 return (AACOK);
3642 3643 error:
3643 3644 if (softs->comm_space_acc_handle) {
3644 3645 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3645 3646 softs->comm_space_acc_handle = NULL;
3646 3647 }
3647 3648 if (softs->comm_space_dma_handle) {
3648 3649 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3649 3650 softs->comm_space_dma_handle = NULL;
3650 3651 }
3651 3652 return (AACERR);
3652 3653 }
3653 3654
3654 3655 static void
3655 3656 aac_free_comm_space(struct aac_softstate *softs)
3656 3657 {
3657 3658
3658 3659 (void) ddi_dma_unbind_handle(softs->comm_space_dma_handle);
3659 3660 ddi_dma_mem_free(&softs->comm_space_acc_handle);
3660 3661 softs->comm_space_acc_handle = NULL;
3661 3662 ddi_dma_free_handle(&softs->comm_space_dma_handle);
3662 3663 softs->comm_space_dma_handle = NULL;
3663 3664 softs->comm_space_phyaddr = NULL;
3664 3665 }
3665 3666
3666 3667 /*
3667 3668 * Initialize the data structures that are required for the communication
3668 3669 * interface to operate
3669 3670 */
3670 3671 static int
3671 3672 aac_setup_comm_space(struct aac_softstate *softs)
3672 3673 {
3673 3674 ddi_dma_handle_t dma = softs->comm_space_dma_handle;
3674 3675 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
3675 3676 uint32_t comm_space_phyaddr;
3676 3677 struct aac_adapter_init *initp;
3677 3678 int qoffset;
3678 3679
3679 3680 comm_space_phyaddr = softs->comm_space_phyaddr;
3680 3681
3681 3682 /* Setup adapter init struct */
3682 3683 initp = &softs->comm_space->init_data;
3683 3684 bzero(initp, sizeof (struct aac_adapter_init));
3684 3685
3685 3686 ddi_put32(acc, &initp->InitStructRevision, AAC_INIT_STRUCT_REVISION);
3686 3687 ddi_put32(acc, &initp->HostElapsedSeconds, ddi_get_time());
3687 3688
3688 3689 /* Setup new/old comm. specific data */
3689 3690 if (softs->flags & AAC_FLAGS_RAW_IO) {
3690 3691 uint32_t init_flags = 0;
3691 3692
3692 3693 if (softs->flags & AAC_FLAGS_NEW_COMM)
3693 3694 init_flags |= AAC_INIT_FLAGS_NEW_COMM_SUPPORTED;
3694 3695 /* AAC_SUPPORTED_POWER_MANAGEMENT */
3695 3696 init_flags |= AAC_INIT_FLAGS_DRIVER_SUPPORTS_PM;
3696 3697 init_flags |= AAC_INIT_FLAGS_DRIVER_USES_UTC_TIME;
3697 3698
3698 3699 ddi_put32(acc, &initp->InitStructRevision,
3699 3700 AAC_INIT_STRUCT_REVISION_4);
3700 3701 ddi_put32(acc, &initp->InitFlags, init_flags);
3701 3702 /* Setup the preferred settings */
3702 3703 ddi_put32(acc, &initp->MaxIoCommands, softs->aac_max_fibs);
3703 3704 ddi_put32(acc, &initp->MaxIoSize,
3704 3705 (softs->aac_max_sectors << 9));
3705 3706 ddi_put32(acc, &initp->MaxFibSize, softs->aac_max_fib_size);
3706 3707 } else {
3707 3708 /*
3708 3709 * Tells the adapter about the physical location of various
3709 3710 * important shared data structures
3710 3711 */
3711 3712 ddi_put32(acc, &initp->AdapterFibsPhysicalAddress,
3712 3713 comm_space_phyaddr + \
3713 3714 offsetof(struct aac_comm_space, adapter_fibs));
3714 3715 ddi_put32(acc, &initp->AdapterFibsVirtualAddress, 0);
3715 3716 ddi_put32(acc, &initp->AdapterFibAlign, AAC_FIB_SIZE);
3716 3717 ddi_put32(acc, &initp->AdapterFibsSize,
3717 3718 AAC_ADAPTER_FIBS * AAC_FIB_SIZE);
3718 3719 ddi_put32(acc, &initp->PrintfBufferAddress,
3719 3720 comm_space_phyaddr + \
3720 3721 offsetof(struct aac_comm_space, adapter_print_buf));
3721 3722 ddi_put32(acc, &initp->PrintfBufferSize,
3722 3723 AAC_ADAPTER_PRINT_BUFSIZE);
3723 3724 ddi_put32(acc, &initp->MiniPortRevision,
3724 3725 AAC_INIT_STRUCT_MINIPORT_REVISION);
3725 3726 ddi_put32(acc, &initp->HostPhysMemPages, AAC_MAX_PFN);
3726 3727
3727 3728 qoffset = (comm_space_phyaddr + \
3728 3729 offsetof(struct aac_comm_space, qtable)) % \
3729 3730 AAC_QUEUE_ALIGN;
3730 3731 if (qoffset)
3731 3732 qoffset = AAC_QUEUE_ALIGN - qoffset;
3732 3733 softs->qtablep = (struct aac_queue_table *) \
3733 3734 ((char *)&softs->comm_space->qtable + qoffset);
3734 3735 ddi_put32(acc, &initp->CommHeaderAddress, comm_space_phyaddr + \
3735 3736 offsetof(struct aac_comm_space, qtable) + qoffset);
3736 3737
3737 3738 /* Init queue table */
3738 3739 ddi_put32(acc, &softs->qtablep-> \
3739 3740 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3740 3741 AAC_HOST_NORM_CMD_ENTRIES);
3741 3742 ddi_put32(acc, &softs->qtablep-> \
3742 3743 qt_qindex[AAC_HOST_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3743 3744 AAC_HOST_NORM_CMD_ENTRIES);
3744 3745 ddi_put32(acc, &softs->qtablep-> \
3745 3746 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3746 3747 AAC_HOST_HIGH_CMD_ENTRIES);
3747 3748 ddi_put32(acc, &softs->qtablep-> \
3748 3749 qt_qindex[AAC_HOST_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3749 3750 AAC_HOST_HIGH_CMD_ENTRIES);
3750 3751 ddi_put32(acc, &softs->qtablep-> \
3751 3752 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_PRODUCER_INDEX],
3752 3753 AAC_ADAP_NORM_CMD_ENTRIES);
3753 3754 ddi_put32(acc, &softs->qtablep-> \
3754 3755 qt_qindex[AAC_ADAP_NORM_CMD_Q][AAC_CONSUMER_INDEX],
3755 3756 AAC_ADAP_NORM_CMD_ENTRIES);
3756 3757 ddi_put32(acc, &softs->qtablep-> \
3757 3758 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_PRODUCER_INDEX],
3758 3759 AAC_ADAP_HIGH_CMD_ENTRIES);
3759 3760 ddi_put32(acc, &softs->qtablep-> \
3760 3761 qt_qindex[AAC_ADAP_HIGH_CMD_Q][AAC_CONSUMER_INDEX],
3761 3762 AAC_ADAP_HIGH_CMD_ENTRIES);
3762 3763 ddi_put32(acc, &softs->qtablep-> \
3763 3764 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3764 3765 AAC_HOST_NORM_RESP_ENTRIES);
3765 3766 ddi_put32(acc, &softs->qtablep-> \
3766 3767 qt_qindex[AAC_HOST_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3767 3768 AAC_HOST_NORM_RESP_ENTRIES);
3768 3769 ddi_put32(acc, &softs->qtablep-> \
3769 3770 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3770 3771 AAC_HOST_HIGH_RESP_ENTRIES);
3771 3772 ddi_put32(acc, &softs->qtablep-> \
3772 3773 qt_qindex[AAC_HOST_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3773 3774 AAC_HOST_HIGH_RESP_ENTRIES);
3774 3775 ddi_put32(acc, &softs->qtablep-> \
3775 3776 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_PRODUCER_INDEX],
3776 3777 AAC_ADAP_NORM_RESP_ENTRIES);
3777 3778 ddi_put32(acc, &softs->qtablep-> \
3778 3779 qt_qindex[AAC_ADAP_NORM_RESP_Q][AAC_CONSUMER_INDEX],
3779 3780 AAC_ADAP_NORM_RESP_ENTRIES);
3780 3781 ddi_put32(acc, &softs->qtablep-> \
3781 3782 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_PRODUCER_INDEX],
3782 3783 AAC_ADAP_HIGH_RESP_ENTRIES);
3783 3784 ddi_put32(acc, &softs->qtablep-> \
3784 3785 qt_qindex[AAC_ADAP_HIGH_RESP_Q][AAC_CONSUMER_INDEX],
3785 3786 AAC_ADAP_HIGH_RESP_ENTRIES);
3786 3787
3787 3788 /* Init queue entries */
3788 3789 softs->qentries[AAC_HOST_NORM_CMD_Q] =
3789 3790 &softs->qtablep->qt_HostNormCmdQueue[0];
3790 3791 softs->qentries[AAC_HOST_HIGH_CMD_Q] =
3791 3792 &softs->qtablep->qt_HostHighCmdQueue[0];
3792 3793 softs->qentries[AAC_ADAP_NORM_CMD_Q] =
3793 3794 &softs->qtablep->qt_AdapNormCmdQueue[0];
3794 3795 softs->qentries[AAC_ADAP_HIGH_CMD_Q] =
3795 3796 &softs->qtablep->qt_AdapHighCmdQueue[0];
3796 3797 softs->qentries[AAC_HOST_NORM_RESP_Q] =
3797 3798 &softs->qtablep->qt_HostNormRespQueue[0];
3798 3799 softs->qentries[AAC_HOST_HIGH_RESP_Q] =
3799 3800 &softs->qtablep->qt_HostHighRespQueue[0];
3800 3801 softs->qentries[AAC_ADAP_NORM_RESP_Q] =
3801 3802 &softs->qtablep->qt_AdapNormRespQueue[0];
3802 3803 softs->qentries[AAC_ADAP_HIGH_RESP_Q] =
3803 3804 &softs->qtablep->qt_AdapHighRespQueue[0];
3804 3805 }
3805 3806 (void) ddi_dma_sync(dma, 0, 0, DDI_DMA_SYNC_FORDEV);
3806 3807
3807 3808 /* Send init structure to the card */
3808 3809 if (aac_sync_mbcommand(softs, AAC_MONKER_INITSTRUCT,
3809 3810 comm_space_phyaddr + \
3810 3811 offsetof(struct aac_comm_space, init_data),
3811 3812 0, 0, 0, NULL) == AACERR) {
3812 3813 AACDB_PRINT(softs, CE_WARN,
3813 3814 "Cannot send init structure to adapter");
3814 3815 return (AACERR);
3815 3816 }
3816 3817
3817 3818 return (AACOK);
3818 3819 }
3819 3820
3820 3821 static uchar_t *
3821 3822 aac_vendor_id(struct aac_softstate *softs, uchar_t *buf)
3822 3823 {
3823 3824 (void) memset(buf, ' ', AAC_VENDOR_LEN);
3824 3825 bcopy(softs->vendor_name, buf, strlen(softs->vendor_name));
3825 3826 return (buf + AAC_VENDOR_LEN);
3826 3827 }
3827 3828
3828 3829 static uchar_t *
3829 3830 aac_product_id(struct aac_softstate *softs, uchar_t *buf)
3830 3831 {
3831 3832 (void) memset(buf, ' ', AAC_PRODUCT_LEN);
3832 3833 bcopy(softs->product_name, buf, strlen(softs->product_name));
3833 3834 return (buf + AAC_PRODUCT_LEN);
3834 3835 }
3835 3836
3836 3837 /*
3837 3838 * Construct unit serial number from container uid
3838 3839 */
3839 3840 static uchar_t *
3840 3841 aac_lun_serialno(struct aac_softstate *softs, int tgt, uchar_t *buf)
3841 3842 {
3842 3843 int i, d;
3843 3844 uint32_t uid;
3844 3845
3845 3846 ASSERT(tgt >= 0 && tgt < AAC_MAX_LD);
3846 3847
3847 3848 uid = softs->containers[tgt].uid;
3848 3849 for (i = 7; i >= 0; i--) {
3849 3850 d = uid & 0xf;
3850 3851 buf[i] = d > 9 ? 'A' + (d - 0xa) : '0' + d;
3851 3852 uid >>= 4;
3852 3853 }
3853 3854 return (buf + 8);
3854 3855 }
3855 3856
3856 3857 /*
3857 3858 * SPC-3 7.5 INQUIRY command implementation
3858 3859 */
3859 3860 static void
3860 3861 aac_inquiry(struct aac_softstate *softs, struct scsi_pkt *pkt,
3861 3862 union scsi_cdb *cdbp, struct buf *bp)
3862 3863 {
3863 3864 int tgt = pkt->pkt_address.a_target;
3864 3865 char *b_addr = NULL;
3865 3866 uchar_t page = cdbp->cdb_opaque[2];
3866 3867
3867 3868 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_CMDDT) {
3868 3869 /* Command Support Data is not supported */
3869 3870 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST, 0x24, 0x00, 0);
3870 3871 return;
3871 3872 }
3872 3873
3873 3874 if (bp && bp->b_un.b_addr && bp->b_bcount) {
3874 3875 if (bp->b_flags & (B_PHYS | B_PAGEIO))
3875 3876 bp_mapin(bp);
3876 3877 b_addr = bp->b_un.b_addr;
3877 3878 }
3878 3879
3879 3880 if (cdbp->cdb_opaque[1] & AAC_CDB_INQUIRY_EVPD) {
3880 3881 uchar_t *vpdp = (uchar_t *)b_addr;
3881 3882 uchar_t *idp, *sp;
3882 3883
3883 3884 /* SPC-3 8.4 Vital product data parameters */
3884 3885 switch (page) {
3885 3886 case 0x00:
3886 3887 /* Supported VPD pages */
3887 3888 if (vpdp == NULL ||
3888 3889 bp->b_bcount < (AAC_VPD_PAGE_DATA + 3))
3889 3890 return;
3890 3891 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3891 3892 vpdp[AAC_VPD_PAGE_CODE] = 0x00;
3892 3893 vpdp[AAC_VPD_PAGE_LENGTH] = 3;
3893 3894
3894 3895 vpdp[AAC_VPD_PAGE_DATA] = 0x00;
3895 3896 vpdp[AAC_VPD_PAGE_DATA + 1] = 0x80;
3896 3897 vpdp[AAC_VPD_PAGE_DATA + 2] = 0x83;
3897 3898
3898 3899 pkt->pkt_state |= STATE_XFERRED_DATA;
3899 3900 break;
3900 3901
3901 3902 case 0x80:
3902 3903 /* Unit serial number page */
3903 3904 if (vpdp == NULL ||
3904 3905 bp->b_bcount < (AAC_VPD_PAGE_DATA + 8))
3905 3906 return;
3906 3907 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3907 3908 vpdp[AAC_VPD_PAGE_CODE] = 0x80;
3908 3909 vpdp[AAC_VPD_PAGE_LENGTH] = 8;
3909 3910
3910 3911 sp = &vpdp[AAC_VPD_PAGE_DATA];
3911 3912 (void) aac_lun_serialno(softs, tgt, sp);
3912 3913
3913 3914 pkt->pkt_state |= STATE_XFERRED_DATA;
3914 3915 break;
3915 3916
3916 3917 case 0x83:
3917 3918 /* Device identification page */
3918 3919 if (vpdp == NULL ||
3919 3920 bp->b_bcount < (AAC_VPD_PAGE_DATA + 32))
3920 3921 return;
3921 3922 bzero(vpdp, AAC_VPD_PAGE_LENGTH);
3922 3923 vpdp[AAC_VPD_PAGE_CODE] = 0x83;
3923 3924
3924 3925 idp = &vpdp[AAC_VPD_PAGE_DATA];
3925 3926 bzero(idp, AAC_VPD_ID_LENGTH);
3926 3927 idp[AAC_VPD_ID_CODESET] = 0x02;
3927 3928 idp[AAC_VPD_ID_TYPE] = 0x01;
3928 3929
3929 3930 /*
3930 3931 * SPC-3 Table 111 - Identifier type
3931 3932 * One recommanded method of constructing the remainder
3932 3933 * of identifier field is to concatenate the product
3933 3934 * identification field from the standard INQUIRY data
3934 3935 * field and the product serial number field from the
3935 3936 * unit serial number page.
3936 3937 */
3937 3938 sp = &idp[AAC_VPD_ID_DATA];
3938 3939 sp = aac_vendor_id(softs, sp);
3939 3940 sp = aac_product_id(softs, sp);
3940 3941 sp = aac_lun_serialno(softs, tgt, sp);
3941 3942 idp[AAC_VPD_ID_LENGTH] = (uintptr_t)sp - \
3942 3943 (uintptr_t)&idp[AAC_VPD_ID_DATA];
3943 3944
3944 3945 vpdp[AAC_VPD_PAGE_LENGTH] = (uintptr_t)sp - \
3945 3946 (uintptr_t)&vpdp[AAC_VPD_PAGE_DATA];
3946 3947 pkt->pkt_state |= STATE_XFERRED_DATA;
3947 3948 break;
3948 3949
3949 3950 default:
3950 3951 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3951 3952 0x24, 0x00, 0);
3952 3953 break;
3953 3954 }
3954 3955 } else {
3955 3956 struct scsi_inquiry *inqp = (struct scsi_inquiry *)b_addr;
3956 3957 size_t len = sizeof (struct scsi_inquiry);
3957 3958
3958 3959 if (page != 0) {
3959 3960 aac_set_arq_data(pkt, KEY_ILLEGAL_REQUEST,
3960 3961 0x24, 0x00, 0);
3961 3962 return;
3962 3963 }
3963 3964 if (inqp == NULL || bp->b_bcount < len)
3964 3965 return;
3965 3966
3966 3967 bzero(inqp, len);
3967 3968 inqp->inq_len = AAC_ADDITIONAL_LEN;
3968 3969 inqp->inq_ansi = AAC_ANSI_VER;
3969 3970 inqp->inq_rdf = AAC_RESP_DATA_FORMAT;
3970 3971 (void) aac_vendor_id(softs, (uchar_t *)inqp->inq_vid);
3971 3972 (void) aac_product_id(softs, (uchar_t *)inqp->inq_pid);
3972 3973 bcopy("V1.0", inqp->inq_revision, 4);
3973 3974 inqp->inq_cmdque = 1; /* enable tagged-queuing */
3974 3975 /*
3975 3976 * For "sd-max-xfer-size" property which may impact performance
3976 3977 * when IO threads increase.
3977 3978 */
3978 3979 inqp->inq_wbus32 = 1;
3979 3980
3980 3981 pkt->pkt_state |= STATE_XFERRED_DATA;
3981 3982 }
3982 3983 }
3983 3984
3984 3985 /*
3985 3986 * SPC-3 7.10 MODE SENSE command implementation
3986 3987 */
3987 3988 static void
3988 3989 aac_mode_sense(struct aac_softstate *softs, struct scsi_pkt *pkt,
3989 3990 union scsi_cdb *cdbp, struct buf *bp, int capacity)
3990 3991 {
3991 3992 uchar_t pagecode;
3992 3993 struct mode_header *headerp;
3993 3994 struct mode_header_g1 *g1_headerp;
3994 3995 unsigned int ncyl;
3995 3996 caddr_t sense_data;
3996 3997 caddr_t next_page;
3997 3998 size_t sdata_size;
3998 3999 size_t pages_size;
3999 4000 int unsupport_page = 0;
4000 4001
4001 4002 ASSERT(cdbp->scc_cmd == SCMD_MODE_SENSE ||
4002 4003 cdbp->scc_cmd == SCMD_MODE_SENSE_G1);
4003 4004
4004 4005 if (!(bp && bp->b_un.b_addr && bp->b_bcount))
4005 4006 return;
4006 4007
4007 4008 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4008 4009 bp_mapin(bp);
4009 4010 pkt->pkt_state |= STATE_XFERRED_DATA;
4010 4011 pagecode = cdbp->cdb_un.sg.scsi[0] & 0x3F;
4011 4012
4012 4013 /* calculate the size of needed buffer */
4013 4014 if (cdbp->scc_cmd == SCMD_MODE_SENSE)
4014 4015 sdata_size = MODE_HEADER_LENGTH;
4015 4016 else /* must be SCMD_MODE_SENSE_G1 */
4016 4017 sdata_size = MODE_HEADER_LENGTH_G1;
4017 4018
4018 4019 pages_size = 0;
4019 4020 switch (pagecode) {
4020 4021 case SD_MODE_SENSE_PAGE3_CODE:
4021 4022 pages_size += sizeof (struct mode_format);
4022 4023 break;
4023 4024
4024 4025 case SD_MODE_SENSE_PAGE4_CODE:
4025 4026 pages_size += sizeof (struct mode_geometry);
4026 4027 break;
4027 4028
4028 4029 case MODEPAGE_CTRL_MODE:
4029 4030 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4030 4031 pages_size += sizeof (struct mode_control_scsi3);
4031 4032 } else {
4032 4033 unsupport_page = 1;
4033 4034 }
4034 4035 break;
4035 4036
4036 4037 case MODEPAGE_ALLPAGES:
4037 4038 if (softs->flags & AAC_FLAGS_LBA_64BIT) {
4038 4039 pages_size += sizeof (struct mode_format) +
4039 4040 sizeof (struct mode_geometry) +
4040 4041 sizeof (struct mode_control_scsi3);
4041 4042 } else {
4042 4043 pages_size += sizeof (struct mode_format) +
4043 4044 sizeof (struct mode_geometry);
4044 4045 }
4045 4046 break;
4046 4047
4047 4048 default:
4048 4049 /* unsupported pages */
4049 4050 unsupport_page = 1;
4050 4051 }
4051 4052
4052 4053 /* allocate buffer to fill the send data */
4053 4054 sdata_size += pages_size;
4054 4055 sense_data = kmem_zalloc(sdata_size, KM_SLEEP);
4055 4056
4056 4057 if (cdbp->scc_cmd == SCMD_MODE_SENSE) {
4057 4058 headerp = (struct mode_header *)sense_data;
4058 4059 headerp->length = MODE_HEADER_LENGTH + pages_size -
4059 4060 sizeof (headerp->length);
4060 4061 headerp->bdesc_length = 0;
4061 4062 next_page = sense_data + sizeof (struct mode_header);
4062 4063 } else {
4063 4064 g1_headerp = (void *)sense_data;
4064 4065 g1_headerp->length = BE_16(MODE_HEADER_LENGTH_G1 + pages_size -
4065 4066 sizeof (g1_headerp->length));
4066 4067 g1_headerp->bdesc_length = 0;
4067 4068 next_page = sense_data + sizeof (struct mode_header_g1);
4068 4069 }
4069 4070
4070 4071 if (unsupport_page)
4071 4072 goto finish;
4072 4073
4073 4074 if (pagecode == SD_MODE_SENSE_PAGE3_CODE ||
4074 4075 pagecode == MODEPAGE_ALLPAGES) {
4075 4076 /* SBC-3 7.1.3.3 Format device page */
4076 4077 struct mode_format *page3p;
4077 4078
4078 4079 page3p = (void *)next_page;
4079 4080 page3p->mode_page.code = SD_MODE_SENSE_PAGE3_CODE;
4080 4081 page3p->mode_page.length = sizeof (struct mode_format);
4081 4082 page3p->data_bytes_sect = BE_16(AAC_SECTOR_SIZE);
4082 4083 page3p->sect_track = BE_16(AAC_SECTORS_PER_TRACK);
4083 4084
4084 4085 next_page += sizeof (struct mode_format);
4085 4086 }
4086 4087
4087 4088 if (pagecode == SD_MODE_SENSE_PAGE4_CODE ||
4088 4089 pagecode == MODEPAGE_ALLPAGES) {
4089 4090 /* SBC-3 7.1.3.8 Rigid disk device geometry page */
4090 4091 struct mode_geometry *page4p;
4091 4092
4092 4093 page4p = (void *)next_page;
4093 4094 page4p->mode_page.code = SD_MODE_SENSE_PAGE4_CODE;
4094 4095 page4p->mode_page.length = sizeof (struct mode_geometry);
4095 4096 page4p->heads = AAC_NUMBER_OF_HEADS;
4096 4097 page4p->rpm = BE_16(AAC_ROTATION_SPEED);
4097 4098 ncyl = capacity / (AAC_NUMBER_OF_HEADS * AAC_SECTORS_PER_TRACK);
4098 4099 page4p->cyl_lb = ncyl & 0xff;
4099 4100 page4p->cyl_mb = (ncyl >> 8) & 0xff;
4100 4101 page4p->cyl_ub = (ncyl >> 16) & 0xff;
4101 4102
4102 4103 next_page += sizeof (struct mode_geometry);
4103 4104 }
4104 4105
4105 4106 if ((pagecode == MODEPAGE_CTRL_MODE || pagecode == MODEPAGE_ALLPAGES) &&
4106 4107 softs->flags & AAC_FLAGS_LBA_64BIT) {
4107 4108 /* 64-bit LBA need large sense data */
4108 4109 struct mode_control_scsi3 *mctl;
4109 4110
4110 4111 mctl = (void *)next_page;
4111 4112 mctl->mode_page.code = MODEPAGE_CTRL_MODE;
4112 4113 mctl->mode_page.length =
4113 4114 sizeof (struct mode_control_scsi3) -
4114 4115 sizeof (struct mode_page);
4115 4116 mctl->d_sense = 1;
4116 4117 }
4117 4118
4118 4119 finish:
4119 4120 /* copyout the valid data. */
4120 4121 bcopy(sense_data, bp->b_un.b_addr, min(sdata_size, bp->b_bcount));
4121 4122 kmem_free(sense_data, sdata_size);
4122 4123 }
4123 4124
4124 4125 static int
4125 4126 aac_name_node(dev_info_t *dip, char *name, int len)
4126 4127 {
4127 4128 int tgt, lun;
4128 4129
4129 4130 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4130 4131 DDI_PROP_DONTPASS, "target", -1);
4131 4132 if (tgt == -1)
4132 4133 return (DDI_FAILURE);
4133 4134 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
4134 4135 DDI_PROP_DONTPASS, "lun", -1);
4135 4136 if (lun == -1)
4136 4137 return (DDI_FAILURE);
4137 4138
4138 4139 (void) snprintf(name, len, "%x,%x", tgt, lun);
4139 4140 return (DDI_SUCCESS);
4140 4141 }
4141 4142
4142 4143 /*ARGSUSED*/
4143 4144 static int
4144 4145 aac_tran_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4145 4146 scsi_hba_tran_t *tran, struct scsi_device *sd)
4146 4147 {
4147 4148 struct aac_softstate *softs = AAC_TRAN2SOFTS(tran);
4148 4149 #if defined(DEBUG) || defined(__lock_lint)
4149 4150 int ctl = ddi_get_instance(softs->devinfo_p);
4150 4151 #endif
4151 4152 uint16_t tgt = sd->sd_address.a_target;
4152 4153 uint8_t lun = sd->sd_address.a_lun;
4153 4154 struct aac_device *dvp;
4154 4155
4155 4156 DBCALLED(softs, 2);
4156 4157
4157 4158 if (ndi_dev_is_persistent_node(tgt_dip) == 0) {
4158 4159 /*
4159 4160 * If no persistent node exist, we don't allow .conf node
4160 4161 * to be created.
4161 4162 */
4162 4163 if (aac_find_child(softs, tgt, lun) != NULL) {
4163 4164 if (ndi_merge_node(tgt_dip, aac_name_node) !=
4164 4165 DDI_SUCCESS)
4165 4166 /* Create this .conf node */
4166 4167 return (DDI_SUCCESS);
4167 4168 }
4168 4169 return (DDI_FAILURE);
4169 4170 }
4170 4171
4171 4172 /*
4172 4173 * Only support container/phys. device that has been
4173 4174 * detected and valid
4174 4175 */
4175 4176 mutex_enter(&softs->io_lock);
4176 4177 if (tgt >= AAC_MAX_DEV(softs)) {
4177 4178 AACDB_PRINT_TRAN(softs,
4178 4179 "aac_tran_tgt_init: c%dt%dL%d out", ctl, tgt, lun);
4179 4180 mutex_exit(&softs->io_lock);
4180 4181 return (DDI_FAILURE);
4181 4182 }
4182 4183
4183 4184 if (tgt < AAC_MAX_LD) {
4184 4185 dvp = (struct aac_device *)&softs->containers[tgt];
4185 4186 if (lun != 0 || !AAC_DEV_IS_VALID(dvp)) {
4186 4187 AACDB_PRINT_TRAN(softs, "aac_tran_tgt_init: c%dt%dL%d",
4187 4188 ctl, tgt, lun);
4188 4189 mutex_exit(&softs->io_lock);
4189 4190 return (DDI_FAILURE);
4190 4191 }
4191 4192 /*
4192 4193 * Save the tgt_dip for the given target if one doesn't exist
4193 4194 * already. Dip's for non-existance tgt's will be cleared in
4194 4195 * tgt_free.
4195 4196 */
4196 4197 if (softs->containers[tgt].dev.dip == NULL &&
4197 4198 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4198 4199 softs->containers[tgt].dev.dip = tgt_dip;
4199 4200 } else {
4200 4201 dvp = (struct aac_device *)&softs->nondasds[AAC_PD(tgt)];
4201 4202 /*
4202 4203 * Save the tgt_dip for the given target if one doesn't exist
4203 4204 * already. Dip's for non-existance tgt's will be cleared in
4204 4205 * tgt_free.
4205 4206 */
4206 4207
4207 4208 if (softs->nondasds[AAC_PD(tgt)].dev.dip == NULL &&
4208 4209 strcmp(ddi_driver_name(sd->sd_dev), "sd") == 0)
4209 4210 softs->nondasds[AAC_PD(tgt)].dev.dip = tgt_dip;
4210 4211 }
4211 4212
4212 4213 if (softs->flags & AAC_FLAGS_BRKUP) {
4213 4214 if (ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
4214 4215 "buf_break", 1) != DDI_PROP_SUCCESS) {
4215 4216 cmn_err(CE_CONT, "unable to create "
4216 4217 "property for t%dL%d (buf_break)", tgt, lun);
4217 4218 }
4218 4219 }
4219 4220
4220 4221 AACDB_PRINT(softs, CE_NOTE,
4221 4222 "aac_tran_tgt_init: c%dt%dL%d ok (%s)", ctl, tgt, lun,
4222 4223 (dvp->type == AAC_DEV_PD) ? "pd" : "ld");
4223 4224 mutex_exit(&softs->io_lock);
4224 4225 return (DDI_SUCCESS);
4225 4226 }
4226 4227
4227 4228 static void
4228 4229 aac_tran_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
4229 4230 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
4230 4231 {
4231 4232 #ifndef __lock_lint
4232 4233 _NOTE(ARGUNUSED(hba_dip, tgt_dip, hba_tran))
4233 4234 #endif
4234 4235
4235 4236 struct aac_softstate *softs = SD2AAC(sd);
4236 4237 int tgt = sd->sd_address.a_target;
4237 4238
4238 4239 mutex_enter(&softs->io_lock);
4239 4240 if (tgt < AAC_MAX_LD) {
4240 4241 if (softs->containers[tgt].dev.dip == tgt_dip)
4241 4242 softs->containers[tgt].dev.dip = NULL;
4242 4243 } else {
4243 4244 if (softs->nondasds[AAC_PD(tgt)].dev.dip == tgt_dip)
4244 4245 softs->nondasds[AAC_PD(tgt)].dev.dip = NULL;
4245 4246 softs->nondasds[AAC_PD(tgt)].dev.flags &= ~AAC_DFLAG_VALID;
4246 4247 }
4247 4248 mutex_exit(&softs->io_lock);
4248 4249 }
4249 4250
4250 4251 /*
4251 4252 * Check if the firmware is Up And Running. If it is in the Kernel Panic
4252 4253 * state, (BlinkLED code + 1) is returned.
4253 4254 * 0 -- firmware up and running
4254 4255 * -1 -- firmware dead
4255 4256 * >0 -- firmware kernel panic
4256 4257 */
4257 4258 static int
4258 4259 aac_check_adapter_health(struct aac_softstate *softs)
4259 4260 {
4260 4261 int rval;
4261 4262
4262 4263 rval = PCI_MEM_GET32(softs, AAC_OMR0);
4263 4264
4264 4265 if (rval & AAC_KERNEL_UP_AND_RUNNING) {
4265 4266 rval = 0;
4266 4267 } else if (rval & AAC_KERNEL_PANIC) {
4267 4268 cmn_err(CE_WARN, "firmware panic");
4268 4269 rval = ((rval >> 16) & 0xff) + 1; /* avoid 0 as return value */
4269 4270 } else {
4270 4271 cmn_err(CE_WARN, "firmware dead");
4271 4272 rval = -1;
4272 4273 }
4273 4274 return (rval);
4274 4275 }
4275 4276
4276 4277 static void
4277 4278 aac_abort_iocmd(struct aac_softstate *softs, struct aac_cmd *acp,
4278 4279 uchar_t reason)
4279 4280 {
4280 4281 acp->flags |= AAC_CMD_ABORT;
4281 4282
4282 4283 if (acp->pkt) {
4283 4284 if (acp->slotp) { /* outstanding cmd */
4284 4285 acp->pkt->pkt_state |= STATE_GOT_STATUS;
4285 4286 }
4286 4287
4287 4288 switch (reason) {
4288 4289 case CMD_TIMEOUT:
4289 4290 AACDB_PRINT(softs, CE_NOTE, "CMD_TIMEOUT: acp=0x%p",
4290 4291 acp);
4291 4292 aac_set_pkt_reason(softs, acp, CMD_TIMEOUT,
4292 4293 STAT_TIMEOUT | STAT_BUS_RESET);
4293 4294 break;
4294 4295 case CMD_RESET:
4295 4296 /* aac support only RESET_ALL */
4296 4297 AACDB_PRINT(softs, CE_NOTE, "CMD_RESET: acp=0x%p", acp);
4297 4298 aac_set_pkt_reason(softs, acp, CMD_RESET,
4298 4299 STAT_BUS_RESET);
4299 4300 break;
4300 4301 case CMD_ABORTED:
4301 4302 AACDB_PRINT(softs, CE_NOTE, "CMD_ABORTED: acp=0x%p",
4302 4303 acp);
4303 4304 aac_set_pkt_reason(softs, acp, CMD_ABORTED,
4304 4305 STAT_ABORTED);
4305 4306 break;
4306 4307 }
4307 4308 }
4308 4309 aac_end_io(softs, acp);
4309 4310 }
4310 4311
4311 4312 /*
4312 4313 * Abort all the pending commands of type iocmd or just the command pkt
4313 4314 * corresponding to pkt
4314 4315 */
4315 4316 static void
4316 4317 aac_abort_iocmds(struct aac_softstate *softs, int iocmd, struct scsi_pkt *pkt,
4317 4318 int reason)
4318 4319 {
4319 4320 struct aac_cmd *ac_arg, *acp;
4320 4321 int i;
4321 4322
4322 4323 if (pkt == NULL) {
4323 4324 ac_arg = NULL;
4324 4325 } else {
4325 4326 ac_arg = PKT2AC(pkt);
4326 4327 iocmd = (ac_arg->flags & AAC_CMD_SYNC) ?
4327 4328 AAC_IOCMD_SYNC : AAC_IOCMD_ASYNC;
4328 4329 }
4329 4330
4330 4331 /*
4331 4332 * a) outstanding commands on the controller
4332 4333 * Note: should abort outstanding commands only after one
4333 4334 * IOP reset has been done.
4334 4335 */
4335 4336 if (iocmd & AAC_IOCMD_OUTSTANDING) {
4336 4337 struct aac_cmd *acp;
4337 4338
4338 4339 for (i = 0; i < AAC_MAX_LD; i++) {
4339 4340 if (AAC_DEV_IS_VALID(&softs->containers[i].dev))
4340 4341 softs->containers[i].reset = 1;
4341 4342 }
4342 4343 while ((acp = softs->q_busy.q_head) != NULL)
4343 4344 aac_abort_iocmd(softs, acp, reason);
4344 4345 }
4345 4346
4346 4347 /* b) commands in the waiting queues */
4347 4348 for (i = 0; i < AAC_CMDQ_NUM; i++) {
4348 4349 if (iocmd & (1 << i)) {
4349 4350 if (ac_arg) {
4350 4351 aac_abort_iocmd(softs, ac_arg, reason);
4351 4352 } else {
4352 4353 while ((acp = softs->q_wait[i].q_head) != NULL)
4353 4354 aac_abort_iocmd(softs, acp, reason);
4354 4355 }
4355 4356 }
4356 4357 }
4357 4358 }
4358 4359
4359 4360 /*
4360 4361 * The draining thread is shared among quiesce threads. It terminates
4361 4362 * when the adapter is quiesced or stopped by aac_stop_drain().
4362 4363 */
4363 4364 static void
4364 4365 aac_check_drain(void *arg)
4365 4366 {
4366 4367 struct aac_softstate *softs = arg;
4367 4368
4368 4369 mutex_enter(&softs->io_lock);
4369 4370 if (softs->ndrains) {
4370 4371 softs->drain_timeid = 0;
4371 4372 /*
4372 4373 * If both ASYNC and SYNC bus throttle are held,
4373 4374 * wake up threads only when both are drained out.
4374 4375 */
4375 4376 if ((softs->bus_throttle[AAC_CMDQ_ASYNC] > 0 ||
4376 4377 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) &&
4377 4378 (softs->bus_throttle[AAC_CMDQ_SYNC] > 0 ||
4378 4379 softs->bus_ncmds[AAC_CMDQ_SYNC] == 0))
4379 4380 cv_broadcast(&softs->drain_cv);
4380 4381 else
4381 4382 softs->drain_timeid = timeout(aac_check_drain, softs,
4382 4383 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4383 4384 }
4384 4385 mutex_exit(&softs->io_lock);
4385 4386 }
4386 4387
4387 4388 /*
4388 4389 * If not draining the outstanding cmds, drain them. Otherwise,
4389 4390 * only update ndrains.
4390 4391 */
4391 4392 static void
4392 4393 aac_start_drain(struct aac_softstate *softs)
4393 4394 {
4394 4395 if (softs->ndrains == 0) {
4395 4396 ASSERT(softs->drain_timeid == 0);
4396 4397 softs->drain_timeid = timeout(aac_check_drain, softs,
4397 4398 AAC_QUIESCE_TICK * drv_usectohz(1000000));
4398 4399 }
4399 4400 softs->ndrains++;
4400 4401 }
4401 4402
4402 4403 /*
4403 4404 * Stop the draining thread when no other threads use it any longer.
4404 4405 * Side effect: io_lock may be released in the middle.
4405 4406 */
4406 4407 static void
4407 4408 aac_stop_drain(struct aac_softstate *softs)
4408 4409 {
4409 4410 softs->ndrains--;
4410 4411 if (softs->ndrains == 0) {
4411 4412 if (softs->drain_timeid != 0) {
4412 4413 timeout_id_t tid = softs->drain_timeid;
4413 4414
4414 4415 softs->drain_timeid = 0;
4415 4416 mutex_exit(&softs->io_lock);
4416 4417 (void) untimeout(tid);
4417 4418 mutex_enter(&softs->io_lock);
4418 4419 }
4419 4420 }
4420 4421 }
4421 4422
4422 4423 /*
4423 4424 * The following function comes from Adaptec:
4424 4425 *
4425 4426 * Once do an IOP reset, basically the driver have to re-initialize the card
4426 4427 * as if up from a cold boot, and the driver is responsible for any IO that
4427 4428 * is outstanding to the adapter at the time of the IOP RESET. And prepare
4428 4429 * for IOP RESET by making the init code modular with the ability to call it
4429 4430 * from multiple places.
4430 4431 */
4431 4432 static int
4432 4433 aac_reset_adapter(struct aac_softstate *softs)
4433 4434 {
4434 4435 int health;
4435 4436 uint32_t status;
4436 4437 int rval = AAC_IOP_RESET_FAILED;
4437 4438
4438 4439 DBCALLED(softs, 1);
4439 4440
4440 4441 ASSERT(softs->state & AAC_STATE_RESET);
4441 4442
4442 4443 ddi_fm_acc_err_clear(softs->pci_mem_handle, DDI_FME_VER0);
4443 4444 /* Disable interrupt */
4444 4445 AAC_DISABLE_INTR(softs);
4445 4446
4446 4447 health = aac_check_adapter_health(softs);
4447 4448 if (health == -1) {
4448 4449 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
4449 4450 goto finish;
4450 4451 }
4451 4452 if (health == 0) /* flush drives if possible */
4452 4453 (void) aac_shutdown(softs);
4453 4454
4454 4455 /* Execute IOP reset */
4455 4456 if ((aac_sync_mbcommand(softs, AAC_IOP_RESET, 0, 0, 0, 0,
4456 4457 &status)) != AACOK) {
4457 4458 ddi_acc_handle_t acc;
4458 4459 struct aac_fib *fibp;
4459 4460 struct aac_pause_command *pc;
4460 4461
4461 4462 if ((status & 0xf) == 0xf) {
4462 4463 uint32_t wait_count;
4463 4464
4464 4465 /*
4465 4466 * Sunrise Lake has dual cores and we must drag the
4466 4467 * other core with us to reset simultaneously. There
4467 4468 * are 2 bits in the Inbound Reset Control and Status
4468 4469 * Register (offset 0x38) of the Sunrise Lake to reset
4469 4470 * the chip without clearing out the PCI configuration
4470 4471 * info (COMMAND & BARS).
4471 4472 */
4472 4473 PCI_MEM_PUT32(softs, AAC_IRCSR, AAC_IRCSR_CORES_RST);
4473 4474
4474 4475 /*
4475 4476 * We need to wait for 5 seconds before accessing the MU
4476 4477 * again 10000 * 100us = 1000,000us = 1000ms = 1s
4477 4478 */
4478 4479 wait_count = 5 * 10000;
4479 4480 while (wait_count) {
4480 4481 drv_usecwait(100); /* delay 100 microseconds */
4481 4482 wait_count--;
4482 4483 }
4483 4484 } else {
4484 4485 if (status == SRB_STATUS_INVALID_REQUEST)
4485 4486 cmn_err(CE_WARN, "!IOP_RESET not supported");
4486 4487 else /* probably timeout */
4487 4488 cmn_err(CE_WARN, "!IOP_RESET failed");
4488 4489
4489 4490 /* Unwind aac_shutdown() */
4490 4491 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
4491 4492 acc = softs->sync_ac.slotp->fib_acc_handle;
4492 4493
4493 4494 fibp = softs->sync_ac.slotp->fibp;
4494 4495 pc = (struct aac_pause_command *)&fibp->data[0];
4495 4496
4496 4497 bzero(pc, sizeof (*pc));
4497 4498 ddi_put32(acc, &pc->Command, VM_ContainerConfig);
4498 4499 ddi_put32(acc, &pc->Type, CT_PAUSE_IO);
4499 4500 ddi_put32(acc, &pc->Timeout, 1);
4500 4501 ddi_put32(acc, &pc->Min, 1);
4501 4502 ddi_put32(acc, &pc->NoRescan, 1);
4502 4503
4503 4504 (void) aac_sync_fib(softs, ContainerCommand,
4504 4505 AAC_FIB_SIZEOF(struct aac_pause_command));
4505 4506 aac_sync_fib_slot_release(softs, &softs->sync_ac);
4506 4507
4507 4508 if (aac_check_adapter_health(softs) != 0)
4508 4509 ddi_fm_service_impact(softs->devinfo_p,
4509 4510 DDI_SERVICE_LOST);
4510 4511 else
4511 4512 /*
4512 4513 * IOP reset not supported or IOP not reseted
4513 4514 */
4514 4515 rval = AAC_IOP_RESET_ABNORMAL;
4515 4516 goto finish;
4516 4517 }
4517 4518 }
4518 4519
4519 4520 /*
4520 4521 * Re-read and renegotiate the FIB parameters, as one of the actions
4521 4522 * that can result from an IOP reset is the running of a new firmware
4522 4523 * image.
4523 4524 */
4524 4525 if (aac_common_attach(softs) != AACOK)
4525 4526 goto finish;
4526 4527
4527 4528 rval = AAC_IOP_RESET_SUCCEED;
4528 4529
4529 4530 finish:
4530 4531 AAC_ENABLE_INTR(softs);
4531 4532 return (rval);
4532 4533 }
4533 4534
4534 4535 static void
4535 4536 aac_set_throttle(struct aac_softstate *softs, struct aac_device *dvp, int q,
4536 4537 int throttle)
4537 4538 {
4538 4539 /*
4539 4540 * If the bus is draining/quiesced, no changes to the throttles
4540 4541 * are allowed. All throttles should have been set to 0.
4541 4542 */
4542 4543 if ((softs->state & AAC_STATE_QUIESCED) || softs->ndrains)
4543 4544 return;
4544 4545 dvp->throttle[q] = throttle;
4545 4546 }
4546 4547
4547 4548 static void
4548 4549 aac_hold_bus(struct aac_softstate *softs, int iocmds)
4549 4550 {
4550 4551 int i, q;
4551 4552
4552 4553 /* Hold bus by holding every device on the bus */
4553 4554 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4554 4555 if (iocmds & (1 << q)) {
4555 4556 softs->bus_throttle[q] = 0;
4556 4557 for (i = 0; i < AAC_MAX_LD; i++)
4557 4558 aac_set_throttle(softs,
4558 4559 &softs->containers[i].dev, q, 0);
4559 4560 for (i = 0; i < AAC_MAX_PD(softs); i++)
4560 4561 aac_set_throttle(softs,
4561 4562 &softs->nondasds[i].dev, q, 0);
4562 4563 }
4563 4564 }
4564 4565 }
4565 4566
4566 4567 static void
4567 4568 aac_unhold_bus(struct aac_softstate *softs, int iocmds)
4568 4569 {
4569 4570 int i, q, max_throttle;
4570 4571
4571 4572 for (q = 0; q < AAC_CMDQ_NUM; q++) {
4572 4573 if (iocmds & (1 << q)) {
4573 4574 /*
4574 4575 * Should not unhold AAC_IOCMD_ASYNC bus, if it has been
4575 4576 * quiesced or being drained by possibly some quiesce
4576 4577 * threads.
4577 4578 */
4578 4579 if (q == AAC_CMDQ_ASYNC && ((softs->state &
4579 4580 AAC_STATE_QUIESCED) || softs->ndrains))
4580 4581 continue;
4581 4582 if (q == AAC_CMDQ_ASYNC)
4582 4583 max_throttle = softs->total_slots -
4583 4584 AAC_MGT_SLOT_NUM;
4584 4585 else
4585 4586 max_throttle = softs->total_slots - 1;
4586 4587 softs->bus_throttle[q] = max_throttle;
4587 4588 for (i = 0; i < AAC_MAX_LD; i++)
4588 4589 aac_set_throttle(softs,
4589 4590 &softs->containers[i].dev,
4590 4591 q, max_throttle);
4591 4592 for (i = 0; i < AAC_MAX_PD(softs); i++)
4592 4593 aac_set_throttle(softs, &softs->nondasds[i].dev,
4593 4594 q, max_throttle);
4594 4595 }
4595 4596 }
4596 4597 }
4597 4598
4598 4599 static int
4599 4600 aac_do_reset(struct aac_softstate *softs)
4600 4601 {
4601 4602 int health;
4602 4603 int rval;
4603 4604
4604 4605 softs->state |= AAC_STATE_RESET;
4605 4606 health = aac_check_adapter_health(softs);
4606 4607
4607 4608 /*
4608 4609 * Hold off new io commands and wait all outstanding io
4609 4610 * commands to complete.
4610 4611 */
4611 4612 if (health == 0) {
4612 4613 int sync_cmds = softs->bus_ncmds[AAC_CMDQ_SYNC];
4613 4614 int async_cmds = softs->bus_ncmds[AAC_CMDQ_ASYNC];
4614 4615
4615 4616 if (sync_cmds == 0 && async_cmds == 0) {
4616 4617 rval = AAC_IOP_RESET_SUCCEED;
4617 4618 goto finish;
4618 4619 }
4619 4620 /*
4620 4621 * Give the adapter up to AAC_QUIESCE_TIMEOUT more seconds
4621 4622 * to complete the outstanding io commands
4622 4623 */
4623 4624 int timeout = AAC_QUIESCE_TIMEOUT * 1000 * 10;
4624 4625 int (*intr_handler)(struct aac_softstate *);
4625 4626
4626 4627 aac_hold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4627 4628 /*
4628 4629 * Poll the adapter by ourselves in case interrupt is disabled
4629 4630 * and to avoid releasing the io_lock.
4630 4631 */
4631 4632 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
4632 4633 aac_process_intr_new : aac_process_intr_old;
4633 4634 while ((softs->bus_ncmds[AAC_CMDQ_SYNC] ||
4634 4635 softs->bus_ncmds[AAC_CMDQ_ASYNC]) && timeout) {
4635 4636 drv_usecwait(100);
4636 4637 (void) intr_handler(softs);
4637 4638 timeout--;
4638 4639 }
4639 4640 aac_unhold_bus(softs, AAC_IOCMD_SYNC | AAC_IOCMD_ASYNC);
4640 4641
4641 4642 if (softs->bus_ncmds[AAC_CMDQ_SYNC] == 0 &&
4642 4643 softs->bus_ncmds[AAC_CMDQ_ASYNC] == 0) {
4643 4644 /* Cmds drained out */
4644 4645 rval = AAC_IOP_RESET_SUCCEED;
4645 4646 goto finish;
4646 4647 } else if (softs->bus_ncmds[AAC_CMDQ_SYNC] < sync_cmds ||
4647 4648 softs->bus_ncmds[AAC_CMDQ_ASYNC] < async_cmds) {
4648 4649 /* Cmds not drained out, adapter overloaded */
4649 4650 rval = AAC_IOP_RESET_ABNORMAL;
4650 4651 goto finish;
4651 4652 }
4652 4653 }
4653 4654
4654 4655 /*
4655 4656 * If a longer waiting time still can't drain any outstanding io
4656 4657 * commands, do IOP reset.
4657 4658 */
4658 4659 if ((rval = aac_reset_adapter(softs)) == AAC_IOP_RESET_FAILED)
4659 4660 softs->state |= AAC_STATE_DEAD;
4660 4661
4661 4662 finish:
4662 4663 softs->state &= ~AAC_STATE_RESET;
4663 4664 return (rval);
4664 4665 }
4665 4666
4666 4667 static int
4667 4668 aac_tran_reset(struct scsi_address *ap, int level)
4668 4669 {
4669 4670 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4670 4671 int rval;
4671 4672
4672 4673 DBCALLED(softs, 1);
4673 4674
4674 4675 if (level != RESET_ALL) {
4675 4676 cmn_err(CE_NOTE, "!reset target/lun not supported");
4676 4677 return (0);
4677 4678 }
4678 4679
4679 4680 mutex_enter(&softs->io_lock);
4680 4681 switch (rval = aac_do_reset(softs)) {
4681 4682 case AAC_IOP_RESET_SUCCEED:
4682 4683 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING | AAC_IOCMD_ASYNC,
4683 4684 NULL, CMD_RESET);
4684 4685 aac_start_waiting_io(softs);
4685 4686 break;
4686 4687 case AAC_IOP_RESET_FAILED:
4687 4688 /* Abort IOCTL cmds when adapter is dead */
4688 4689 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_RESET);
4689 4690 break;
4690 4691 case AAC_IOP_RESET_ABNORMAL:
4691 4692 aac_start_waiting_io(softs);
4692 4693 }
4693 4694 mutex_exit(&softs->io_lock);
4694 4695
4695 4696 aac_drain_comp_q(softs);
4696 4697 return (rval == 0);
4697 4698 }
4698 4699
4699 4700 static int
4700 4701 aac_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
4701 4702 {
4702 4703 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4703 4704
4704 4705 DBCALLED(softs, 1);
4705 4706
4706 4707 mutex_enter(&softs->io_lock);
4707 4708 aac_abort_iocmds(softs, 0, pkt, CMD_ABORTED);
4708 4709 mutex_exit(&softs->io_lock);
4709 4710
4710 4711 aac_drain_comp_q(softs);
4711 4712 return (1);
4712 4713 }
4713 4714
4714 4715 void
4715 4716 aac_free_dmamap(struct aac_cmd *acp)
4716 4717 {
4717 4718 /* Free dma mapping */
4718 4719 if (acp->flags & AAC_CMD_DMA_VALID) {
4719 4720 ASSERT(acp->buf_dma_handle);
4720 4721 (void) ddi_dma_unbind_handle(acp->buf_dma_handle);
4721 4722 acp->flags &= ~AAC_CMD_DMA_VALID;
4722 4723 }
4723 4724
4724 4725 if (acp->abp != NULL) { /* free non-aligned buf DMA */
4725 4726 ASSERT(acp->buf_dma_handle);
4726 4727 if ((acp->flags & AAC_CMD_BUF_WRITE) == 0 && acp->bp)
4727 4728 ddi_rep_get8(acp->abh, (uint8_t *)acp->bp->b_un.b_addr,
4728 4729 (uint8_t *)acp->abp, acp->bp->b_bcount,
4729 4730 DDI_DEV_AUTOINCR);
4730 4731 ddi_dma_mem_free(&acp->abh);
4731 4732 acp->abp = NULL;
4732 4733 }
4733 4734
4734 4735 if (acp->buf_dma_handle) {
4735 4736 ddi_dma_free_handle(&acp->buf_dma_handle);
4736 4737 acp->buf_dma_handle = NULL;
4737 4738 }
4738 4739 }
4739 4740
4740 4741 static void
4741 4742 aac_unknown_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
4742 4743 {
4743 4744 AACDB_PRINT(softs, CE_CONT, "SCMD 0x%x not supported",
4744 4745 ((union scsi_cdb *)(void *)acp->pkt->pkt_cdbp)->scc_cmd);
4745 4746 aac_free_dmamap(acp);
4746 4747 aac_set_arq_data(acp->pkt, KEY_ILLEGAL_REQUEST, 0x20, 0x00, 0);
4747 4748 aac_soft_callback(softs, acp);
4748 4749 }
4749 4750
4750 4751 /*
4751 4752 * Handle command to logical device
4752 4753 */
4753 4754 static int
4754 4755 aac_tran_start_ld(struct aac_softstate *softs, struct aac_cmd *acp)
4755 4756 {
4756 4757 struct aac_container *dvp;
4757 4758 struct scsi_pkt *pkt;
4758 4759 union scsi_cdb *cdbp;
4759 4760 struct buf *bp;
4760 4761 int rval;
4761 4762
4762 4763 dvp = (struct aac_container *)acp->dvp;
4763 4764 pkt = acp->pkt;
4764 4765 cdbp = (void *)pkt->pkt_cdbp;
4765 4766 bp = acp->bp;
4766 4767
4767 4768 switch (cdbp->scc_cmd) {
4768 4769 case SCMD_INQUIRY: /* inquiry */
4769 4770 aac_free_dmamap(acp);
4770 4771 aac_inquiry(softs, pkt, cdbp, bp);
4771 4772 aac_soft_callback(softs, acp);
4772 4773 rval = TRAN_ACCEPT;
4773 4774 break;
4774 4775
4775 4776 case SCMD_READ_CAPACITY: /* read capacity */
4776 4777 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4777 4778 struct scsi_capacity cap;
4778 4779 uint64_t last_lba;
4779 4780
4780 4781 /* check 64-bit LBA */
4781 4782 last_lba = dvp->size - 1;
4782 4783 if (last_lba > 0xffffffffull) {
4783 4784 cap.capacity = 0xfffffffful;
4784 4785 } else {
4785 4786 cap.capacity = BE_32(last_lba);
4786 4787 }
4787 4788 cap.lbasize = BE_32(AAC_SECTOR_SIZE);
4788 4789
4789 4790 aac_free_dmamap(acp);
4790 4791 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4791 4792 bp_mapin(bp);
4792 4793 bcopy(&cap, bp->b_un.b_addr, min(bp->b_bcount, 8));
4793 4794 pkt->pkt_state |= STATE_XFERRED_DATA;
4794 4795 }
4795 4796 aac_soft_callback(softs, acp);
4796 4797 rval = TRAN_ACCEPT;
4797 4798 break;
4798 4799
4799 4800 case SCMD_SVC_ACTION_IN_G4: /* read capacity 16 */
4800 4801 /* Check if containers need 64-bit LBA support */
4801 4802 if (cdbp->cdb_opaque[1] == SSVC_ACTION_READ_CAPACITY_G4) {
4802 4803 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4803 4804 struct scsi_capacity_16 cap16;
4804 4805 int cap_len = sizeof (struct scsi_capacity_16);
4805 4806
4806 4807 bzero(&cap16, cap_len);
4807 4808 cap16.sc_capacity = BE_64(dvp->size - 1);
4808 4809 cap16.sc_lbasize = BE_32(AAC_SECTOR_SIZE);
4809 4810
4810 4811 aac_free_dmamap(acp);
4811 4812 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4812 4813 bp_mapin(bp);
4813 4814 bcopy(&cap16, bp->b_un.b_addr,
4814 4815 min(bp->b_bcount, cap_len));
4815 4816 pkt->pkt_state |= STATE_XFERRED_DATA;
4816 4817 }
4817 4818 aac_soft_callback(softs, acp);
4818 4819 } else {
4819 4820 aac_unknown_scmd(softs, acp);
4820 4821 }
4821 4822 rval = TRAN_ACCEPT;
4822 4823 break;
4823 4824
4824 4825 case SCMD_READ_G4: /* read_16 */
4825 4826 case SCMD_WRITE_G4: /* write_16 */
4826 4827 if (softs->flags & AAC_FLAGS_RAW_IO) {
4827 4828 /* NOTE: GETG4ADDRTL(cdbp) is int32_t */
4828 4829 acp->blkno = ((uint64_t) \
4829 4830 GETG4ADDR(cdbp) << 32) | \
4830 4831 (uint32_t)GETG4ADDRTL(cdbp);
4831 4832 goto do_io;
4832 4833 }
4833 4834 AACDB_PRINT(softs, CE_WARN, "64-bit LBA not supported");
4834 4835 aac_unknown_scmd(softs, acp);
4835 4836 rval = TRAN_ACCEPT;
4836 4837 break;
4837 4838
4838 4839 case SCMD_READ: /* read_6 */
4839 4840 case SCMD_WRITE: /* write_6 */
4840 4841 acp->blkno = GETG0ADDR(cdbp);
4841 4842 goto do_io;
4842 4843
4843 4844 case SCMD_READ_G5: /* read_12 */
4844 4845 case SCMD_WRITE_G5: /* write_12 */
4845 4846 acp->blkno = GETG5ADDR(cdbp);
4846 4847 goto do_io;
4847 4848
4848 4849 case SCMD_READ_G1: /* read_10 */
4849 4850 case SCMD_WRITE_G1: /* write_10 */
4850 4851 acp->blkno = (uint32_t)GETG1ADDR(cdbp);
4851 4852 do_io:
4852 4853 if (acp->flags & AAC_CMD_DMA_VALID) {
4853 4854 uint64_t cnt_size = dvp->size;
4854 4855
4855 4856 /*
4856 4857 * If LBA > array size AND rawio, the
4857 4858 * adapter may hang. So check it before
4858 4859 * sending.
4859 4860 * NOTE: (blkno + blkcnt) may overflow
4860 4861 */
4861 4862 if ((acp->blkno < cnt_size) &&
4862 4863 ((acp->blkno + acp->bcount /
4863 4864 AAC_BLK_SIZE) <= cnt_size)) {
4864 4865 rval = aac_do_io(softs, acp);
4865 4866 } else {
4866 4867 /*
4867 4868 * Request exceeds the capacity of disk,
4868 4869 * set error block number to last LBA
4869 4870 * + 1.
4870 4871 */
4871 4872 aac_set_arq_data(pkt,
4872 4873 KEY_ILLEGAL_REQUEST, 0x21,
4873 4874 0x00, cnt_size);
4874 4875 aac_soft_callback(softs, acp);
4875 4876 rval = TRAN_ACCEPT;
4876 4877 }
4877 4878 } else if (acp->bcount == 0) {
4878 4879 /* For 0 length IO, just return ok */
4879 4880 aac_soft_callback(softs, acp);
4880 4881 rval = TRAN_ACCEPT;
4881 4882 } else {
4882 4883 rval = TRAN_BADPKT;
4883 4884 }
4884 4885 break;
4885 4886
4886 4887 case SCMD_MODE_SENSE: /* mode_sense_6 */
4887 4888 case SCMD_MODE_SENSE_G1: { /* mode_sense_10 */
4888 4889 int capacity;
4889 4890
4890 4891 aac_free_dmamap(acp);
4891 4892 if (dvp->size > 0xffffffffull)
4892 4893 capacity = 0xfffffffful; /* 64-bit LBA */
4893 4894 else
4894 4895 capacity = dvp->size;
4895 4896 aac_mode_sense(softs, pkt, cdbp, bp, capacity);
4896 4897 aac_soft_callback(softs, acp);
4897 4898 rval = TRAN_ACCEPT;
4898 4899 break;
4899 4900 }
4900 4901
4901 4902 case SCMD_START_STOP:
4902 4903 if (softs->support_opt2 & AAC_SUPPORTED_POWER_MANAGEMENT) {
4903 4904 acp->aac_cmd_fib = aac_cmd_fib_startstop;
4904 4905 acp->ac_comp = aac_startstop_complete;
4905 4906 rval = aac_do_io(softs, acp);
4906 4907 break;
4907 4908 }
4908 4909 /* FALLTHRU */
4909 4910 case SCMD_TEST_UNIT_READY:
4910 4911 case SCMD_REQUEST_SENSE:
4911 4912 case SCMD_FORMAT:
4912 4913 aac_free_dmamap(acp);
4913 4914 if (bp && bp->b_un.b_addr && bp->b_bcount) {
4914 4915 if (acp->flags & AAC_CMD_BUF_READ) {
4915 4916 if (bp->b_flags & (B_PHYS|B_PAGEIO))
4916 4917 bp_mapin(bp);
4917 4918 bzero(bp->b_un.b_addr, bp->b_bcount);
4918 4919 }
4919 4920 pkt->pkt_state |= STATE_XFERRED_DATA;
4920 4921 }
4921 4922 aac_soft_callback(softs, acp);
4922 4923 rval = TRAN_ACCEPT;
4923 4924 break;
4924 4925
4925 4926 case SCMD_SYNCHRONIZE_CACHE:
4926 4927 acp->flags |= AAC_CMD_NTAG;
4927 4928 acp->aac_cmd_fib = aac_cmd_fib_sync;
4928 4929 acp->ac_comp = aac_synccache_complete;
4929 4930 rval = aac_do_io(softs, acp);
4930 4931 break;
4931 4932
4932 4933 case SCMD_DOORLOCK:
4933 4934 aac_free_dmamap(acp);
4934 4935 dvp->locked = (pkt->pkt_cdbp[4] & 0x01) ? 1 : 0;
4935 4936 aac_soft_callback(softs, acp);
4936 4937 rval = TRAN_ACCEPT;
4937 4938 break;
4938 4939
4939 4940 default: /* unknown command */
4940 4941 aac_unknown_scmd(softs, acp);
4941 4942 rval = TRAN_ACCEPT;
4942 4943 break;
4943 4944 }
4944 4945
4945 4946 return (rval);
4946 4947 }
4947 4948
4948 4949 static int
4949 4950 aac_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
4950 4951 {
4951 4952 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
4952 4953 struct aac_cmd *acp = PKT2AC(pkt);
4953 4954 struct aac_device *dvp = acp->dvp;
4954 4955 int rval;
4955 4956
4956 4957 DBCALLED(softs, 2);
4957 4958
4958 4959 /*
4959 4960 * Reinitialize some fields of ac and pkt; the packet may
4960 4961 * have been resubmitted
4961 4962 */
4962 4963 acp->flags &= AAC_CMD_CONSISTENT | AAC_CMD_DMA_PARTIAL | \
4963 4964 AAC_CMD_BUF_READ | AAC_CMD_BUF_WRITE | AAC_CMD_DMA_VALID;
4964 4965 acp->timeout = acp->pkt->pkt_time;
4965 4966 if (pkt->pkt_flags & FLAG_NOINTR)
4966 4967 acp->flags |= AAC_CMD_NO_INTR;
4967 4968 #ifdef DEBUG
4968 4969 acp->fib_flags = AACDB_FLAGS_FIB_SCMD;
4969 4970 #endif
4970 4971 pkt->pkt_reason = CMD_CMPLT;
4971 4972 pkt->pkt_state = 0;
4972 4973 pkt->pkt_statistics = 0;
4973 4974 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
4974 4975
4975 4976 if (acp->flags & AAC_CMD_DMA_VALID) {
4976 4977 pkt->pkt_resid = acp->bcount;
4977 4978 /* Consistent packets need to be sync'ed first */
4978 4979 if ((acp->flags & AAC_CMD_CONSISTENT) &&
4979 4980 (acp->flags & AAC_CMD_BUF_WRITE))
4980 4981 if (aac_dma_sync_ac(acp) != AACOK) {
4981 4982 ddi_fm_service_impact(softs->devinfo_p,
4982 4983 DDI_SERVICE_UNAFFECTED);
4983 4984 return (TRAN_BADPKT);
4984 4985 }
4985 4986 } else {
4986 4987 pkt->pkt_resid = 0;
4987 4988 }
4988 4989
4989 4990 mutex_enter(&softs->io_lock);
4990 4991 AACDB_PRINT_SCMD(softs, acp);
4991 4992 if ((dvp->flags & (AAC_DFLAG_VALID | AAC_DFLAG_CONFIGURING)) &&
4992 4993 !(softs->state & AAC_STATE_DEAD)) {
4993 4994 if (dvp->type == AAC_DEV_LD) {
4994 4995 if (ap->a_lun == 0)
4995 4996 rval = aac_tran_start_ld(softs, acp);
4996 4997 else
4997 4998 goto error;
4998 4999 } else {
4999 5000 rval = aac_do_io(softs, acp);
5000 5001 }
5001 5002 } else {
5002 5003 error:
5003 5004 #ifdef DEBUG
5004 5005 if (!(softs->state & AAC_STATE_DEAD)) {
5005 5006 AACDB_PRINT_TRAN(softs,
5006 5007 "Cannot send cmd to target t%dL%d: %s",
5007 5008 ap->a_target, ap->a_lun,
5008 5009 "target invalid");
5009 5010 } else {
5010 5011 AACDB_PRINT(softs, CE_WARN,
5011 5012 "Cannot send cmd to target t%dL%d: %s",
5012 5013 ap->a_target, ap->a_lun,
5013 5014 "adapter dead");
5014 5015 }
5015 5016 #endif
5016 5017 rval = TRAN_FATAL_ERROR;
5017 5018 }
5018 5019 mutex_exit(&softs->io_lock);
5019 5020 return (rval);
5020 5021 }
5021 5022
5022 5023 static int
5023 5024 aac_tran_getcap(struct scsi_address *ap, char *cap, int whom)
5024 5025 {
5025 5026 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5026 5027 struct aac_device *dvp;
5027 5028 int rval;
5028 5029
5029 5030 DBCALLED(softs, 2);
5030 5031
5031 5032 /* We don't allow inquiring about capabilities for other targets */
5032 5033 if (cap == NULL || whom == 0) {
5033 5034 AACDB_PRINT(softs, CE_WARN,
5034 5035 "GetCap> %s not supported: whom=%d", cap, whom);
5035 5036 return (-1);
5036 5037 }
5037 5038
5038 5039 mutex_enter(&softs->io_lock);
5039 5040 dvp = AAC_DEV(softs, ap->a_target);
5040 5041 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5041 5042 mutex_exit(&softs->io_lock);
5042 5043 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to getcap",
5043 5044 ap->a_target, ap->a_lun);
5044 5045 return (-1);
5045 5046 }
5046 5047
5047 5048 switch (scsi_hba_lookup_capstr(cap)) {
5048 5049 case SCSI_CAP_ARQ: /* auto request sense */
5049 5050 rval = 1;
5050 5051 break;
5051 5052 case SCSI_CAP_UNTAGGED_QING:
5052 5053 case SCSI_CAP_TAGGED_QING:
5053 5054 rval = 1;
5054 5055 break;
5055 5056 case SCSI_CAP_DMA_MAX:
5056 5057 rval = softs->dma_max;
5057 5058 break;
5058 5059 default:
5059 5060 rval = -1;
5060 5061 break;
5061 5062 }
5062 5063 mutex_exit(&softs->io_lock);
5063 5064
5064 5065 AACDB_PRINT_TRAN(softs, "GetCap> %s t%dL%d: rval=%d",
5065 5066 cap, ap->a_target, ap->a_lun, rval);
5066 5067 return (rval);
5067 5068 }
5068 5069
5069 5070 /*ARGSUSED*/
5070 5071 static int
5071 5072 aac_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
5072 5073 {
5073 5074 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5074 5075 struct aac_device *dvp;
5075 5076 int rval;
5076 5077
5077 5078 DBCALLED(softs, 2);
5078 5079
5079 5080 /* We don't allow inquiring about capabilities for other targets */
5080 5081 if (cap == NULL || whom == 0) {
5081 5082 AACDB_PRINT(softs, CE_WARN,
5082 5083 "SetCap> %s not supported: whom=%d", cap, whom);
5083 5084 return (-1);
5084 5085 }
5085 5086
5086 5087 mutex_enter(&softs->io_lock);
5087 5088 dvp = AAC_DEV(softs, ap->a_target);
5088 5089 if (dvp == NULL || !AAC_DEV_IS_VALID(dvp)) {
5089 5090 mutex_exit(&softs->io_lock);
5090 5091 AACDB_PRINT_TRAN(softs, "Bad target t%dL%d to setcap",
5091 5092 ap->a_target, ap->a_lun);
5092 5093 return (-1);
5093 5094 }
5094 5095
5095 5096 switch (scsi_hba_lookup_capstr(cap)) {
5096 5097 case SCSI_CAP_ARQ:
5097 5098 /* Force auto request sense */
5098 5099 rval = (value == 1) ? 1 : 0;
5099 5100 break;
5100 5101 case SCSI_CAP_UNTAGGED_QING:
5101 5102 case SCSI_CAP_TAGGED_QING:
5102 5103 rval = (value == 1) ? 1 : 0;
5103 5104 break;
5104 5105 default:
5105 5106 rval = -1;
5106 5107 break;
5107 5108 }
5108 5109 mutex_exit(&softs->io_lock);
5109 5110
5110 5111 AACDB_PRINT_TRAN(softs, "SetCap> %s t%dL%d val=%d: rval=%d",
5111 5112 cap, ap->a_target, ap->a_lun, value, rval);
5112 5113 return (rval);
5113 5114 }
5114 5115
5115 5116 static void
5116 5117 aac_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5117 5118 {
5118 5119 struct aac_cmd *acp = PKT2AC(pkt);
5119 5120
5120 5121 DBCALLED(NULL, 2);
5121 5122
5122 5123 if (acp->sgt) {
5123 5124 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5124 5125 acp->left_cookien);
5125 5126 }
5126 5127 aac_free_dmamap(acp);
5127 5128 ASSERT(acp->slotp == NULL);
5128 5129 scsi_hba_pkt_free(ap, pkt);
5129 5130 }
5130 5131
5131 5132 int
5132 5133 aac_cmd_dma_alloc(struct aac_softstate *softs, struct aac_cmd *acp,
5133 5134 struct buf *bp, int flags, int (*cb)(), caddr_t arg)
5134 5135 {
5135 5136 int kf = (cb == SLEEP_FUNC) ? KM_SLEEP : KM_NOSLEEP;
5136 5137 uint_t oldcookiec;
5137 5138 int bioerr;
5138 5139 int rval;
5139 5140
5140 5141 oldcookiec = acp->left_cookien;
5141 5142
5142 5143 /* Move window to build s/g map */
5143 5144 if (acp->total_nwin > 0) {
5144 5145 if (++acp->cur_win < acp->total_nwin) {
5145 5146 off_t off;
5146 5147 size_t len;
5147 5148
5148 5149 rval = ddi_dma_getwin(acp->buf_dma_handle, acp->cur_win,
5149 5150 &off, &len, &acp->cookie, &acp->left_cookien);
5150 5151 if (rval == DDI_SUCCESS)
5151 5152 goto get_dma_cookies;
5152 5153 AACDB_PRINT(softs, CE_WARN,
5153 5154 "ddi_dma_getwin() fail %d", rval);
5154 5155 return (AACERR);
5155 5156 }
5156 5157 AACDB_PRINT(softs, CE_WARN, "Nothing to transfer");
5157 5158 return (AACERR);
5158 5159 }
5159 5160
5160 5161 /* We need to transfer data, so we alloc DMA resources for this pkt */
5161 5162 if (bp && bp->b_bcount != 0 && !(acp->flags & AAC_CMD_DMA_VALID)) {
5162 5163 uint_t dma_flags = 0;
5163 5164 struct aac_sge *sge;
5164 5165
5165 5166 /*
5166 5167 * We will still use this point to fake some
5167 5168 * infomation in tran_start
5168 5169 */
5169 5170 acp->bp = bp;
5170 5171
5171 5172 /* Set dma flags */
5172 5173 if (BUF_IS_READ(bp)) {
5173 5174 dma_flags |= DDI_DMA_READ;
5174 5175 acp->flags |= AAC_CMD_BUF_READ;
5175 5176 } else {
5176 5177 dma_flags |= DDI_DMA_WRITE;
5177 5178 acp->flags |= AAC_CMD_BUF_WRITE;
5178 5179 }
5179 5180 if (flags & PKT_CONSISTENT)
5180 5181 dma_flags |= DDI_DMA_CONSISTENT;
5181 5182 if (flags & PKT_DMA_PARTIAL)
5182 5183 dma_flags |= DDI_DMA_PARTIAL;
5183 5184
5184 5185 /* Alloc buf dma handle */
5185 5186 if (!acp->buf_dma_handle) {
5186 5187 rval = ddi_dma_alloc_handle(softs->devinfo_p,
5187 5188 &softs->buf_dma_attr, cb, arg,
5188 5189 &acp->buf_dma_handle);
5189 5190 if (rval != DDI_SUCCESS) {
5190 5191 AACDB_PRINT(softs, CE_WARN,
5191 5192 "Can't allocate DMA handle, errno=%d",
5192 5193 rval);
5193 5194 goto error_out;
5194 5195 }
5195 5196 }
5196 5197
5197 5198 /* Bind buf */
5198 5199 if (((uintptr_t)bp->b_un.b_addr & AAC_DMA_ALIGN_MASK) == 0) {
5199 5200 rval = ddi_dma_buf_bind_handle(acp->buf_dma_handle,
5200 5201 bp, dma_flags, cb, arg, &acp->cookie,
5201 5202 &acp->left_cookien);
5202 5203 } else {
5203 5204 size_t bufsz;
5204 5205
5205 5206 AACDB_PRINT_TRAN(softs,
5206 5207 "non-aligned buffer: addr=0x%p, cnt=%lu",
5207 5208 (void *)bp->b_un.b_addr, bp->b_bcount);
5208 5209 if (bp->b_flags & (B_PAGEIO|B_PHYS))
5209 5210 bp_mapin(bp);
5210 5211
5211 5212 rval = ddi_dma_mem_alloc(acp->buf_dma_handle,
5212 5213 AAC_ROUNDUP(bp->b_bcount, AAC_DMA_ALIGN),
5213 5214 &softs->acc_attr, DDI_DMA_STREAMING,
5214 5215 cb, arg, &acp->abp, &bufsz, &acp->abh);
5215 5216
5216 5217 if (rval != DDI_SUCCESS) {
5217 5218 AACDB_PRINT(softs, CE_NOTE,
5218 5219 "Cannot alloc DMA to non-aligned buf");
5219 5220 bioerr = 0;
5220 5221 goto error_out;
5221 5222 }
5222 5223
5223 5224 if (acp->flags & AAC_CMD_BUF_WRITE)
5224 5225 ddi_rep_put8(acp->abh,
5225 5226 (uint8_t *)bp->b_un.b_addr,
5226 5227 (uint8_t *)acp->abp, bp->b_bcount,
5227 5228 DDI_DEV_AUTOINCR);
5228 5229
5229 5230 rval = ddi_dma_addr_bind_handle(acp->buf_dma_handle,
5230 5231 NULL, acp->abp, bufsz, dma_flags, cb, arg,
5231 5232 &acp->cookie, &acp->left_cookien);
5232 5233 }
5233 5234
5234 5235 switch (rval) {
5235 5236 case DDI_DMA_PARTIAL_MAP:
5236 5237 if (ddi_dma_numwin(acp->buf_dma_handle,
5237 5238 &acp->total_nwin) == DDI_FAILURE) {
5238 5239 AACDB_PRINT(softs, CE_WARN,
5239 5240 "Cannot get number of DMA windows");
5240 5241 bioerr = 0;
5241 5242 goto error_out;
5242 5243 }
5243 5244 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5244 5245 acp->left_cookien);
5245 5246 acp->cur_win = 0;
5246 5247 break;
5247 5248
5248 5249 case DDI_DMA_MAPPED:
5249 5250 AACDB_PRINT_TRAN(softs, "buf bind, %d seg(s)",
5250 5251 acp->left_cookien);
5251 5252 acp->cur_win = 0;
5252 5253 acp->total_nwin = 1;
5253 5254 break;
5254 5255
5255 5256 case DDI_DMA_NORESOURCES:
5256 5257 bioerr = 0;
5257 5258 AACDB_PRINT(softs, CE_WARN,
5258 5259 "Cannot bind buf for DMA: DDI_DMA_NORESOURCES");
5259 5260 goto error_out;
5260 5261 case DDI_DMA_BADATTR:
5261 5262 case DDI_DMA_NOMAPPING:
5262 5263 bioerr = EFAULT;
5263 5264 AACDB_PRINT(softs, CE_WARN,
5264 5265 "Cannot bind buf for DMA: DDI_DMA_NOMAPPING");
5265 5266 goto error_out;
5266 5267 case DDI_DMA_TOOBIG:
5267 5268 bioerr = EINVAL;
5268 5269 AACDB_PRINT(softs, CE_WARN,
5269 5270 "Cannot bind buf for DMA: DDI_DMA_TOOBIG(%d)",
5270 5271 bp->b_bcount);
5271 5272 goto error_out;
5272 5273 default:
5273 5274 bioerr = EINVAL;
5274 5275 AACDB_PRINT(softs, CE_WARN,
5275 5276 "Cannot bind buf for DMA: %d", rval);
5276 5277 goto error_out;
5277 5278 }
5278 5279 acp->flags |= AAC_CMD_DMA_VALID;
5279 5280
5280 5281 get_dma_cookies:
5281 5282 ASSERT(acp->left_cookien > 0);
5282 5283 if (acp->left_cookien > softs->aac_sg_tablesize) {
5283 5284 AACDB_PRINT(softs, CE_NOTE, "large cookiec received %d",
5284 5285 acp->left_cookien);
5285 5286 bioerr = EINVAL;
5286 5287 goto error_out;
5287 5288 }
5288 5289 if (oldcookiec != acp->left_cookien && acp->sgt != NULL) {
5289 5290 kmem_free(acp->sgt, sizeof (struct aac_sge) * \
5290 5291 oldcookiec);
5291 5292 acp->sgt = NULL;
5292 5293 }
5293 5294 if (acp->sgt == NULL) {
5294 5295 acp->sgt = kmem_alloc(sizeof (struct aac_sge) * \
5295 5296 acp->left_cookien, kf);
5296 5297 if (acp->sgt == NULL) {
5297 5298 AACDB_PRINT(softs, CE_WARN,
5298 5299 "sgt kmem_alloc fail");
5299 5300 bioerr = ENOMEM;
5300 5301 goto error_out;
5301 5302 }
5302 5303 }
5303 5304
5304 5305 sge = &acp->sgt[0];
5305 5306 sge->bcount = acp->cookie.dmac_size;
5306 5307 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5307 5308 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5308 5309 acp->bcount = acp->cookie.dmac_size;
5309 5310 for (sge++; sge < &acp->sgt[acp->left_cookien]; sge++) {
5310 5311 ddi_dma_nextcookie(acp->buf_dma_handle, &acp->cookie);
5311 5312 sge->bcount = acp->cookie.dmac_size;
5312 5313 sge->addr.ad64.lo = AAC_LS32(acp->cookie.dmac_laddress);
5313 5314 sge->addr.ad64.hi = AAC_MS32(acp->cookie.dmac_laddress);
5314 5315 acp->bcount += acp->cookie.dmac_size;
5315 5316 }
5316 5317
5317 5318 /*
5318 5319 * Note: The old DMA engine do not correctly handle
5319 5320 * dma_attr_maxxfer attribute. So we have to ensure
5320 5321 * it by ourself.
5321 5322 */
5322 5323 if (acp->bcount > softs->buf_dma_attr.dma_attr_maxxfer) {
5323 5324 AACDB_PRINT(softs, CE_NOTE,
5324 5325 "large xfer size received %d\n", acp->bcount);
5325 5326 bioerr = EINVAL;
5326 5327 goto error_out;
5327 5328 }
5328 5329
5329 5330 acp->total_xfer += acp->bcount;
5330 5331
5331 5332 if (acp->pkt) {
5332 5333 /* Return remaining byte count */
5333 5334 if (acp->total_xfer <= bp->b_bcount) {
5334 5335 acp->pkt->pkt_resid = bp->b_bcount - \
5335 5336 acp->total_xfer;
5336 5337 } else {
5337 5338 /*
5338 5339 * Allocated DMA size is greater than the buf
5339 5340 * size of bp. This is caused by devices like
5340 5341 * tape. we have extra bytes allocated, but
5341 5342 * the packet residual has to stay correct.
5342 5343 */
5343 5344 acp->pkt->pkt_resid = 0;
5344 5345 }
5345 5346 AACDB_PRINT_TRAN(softs,
5346 5347 "bp=0x%p, xfered=%d/%d, resid=%d",
5347 5348 (void *)bp->b_un.b_addr, (int)acp->total_xfer,
5348 5349 (int)bp->b_bcount, (int)acp->pkt->pkt_resid);
5349 5350 }
5350 5351 }
5351 5352 return (AACOK);
5352 5353
5353 5354 error_out:
5354 5355 bioerror(bp, bioerr);
5355 5356 return (AACERR);
5356 5357 }
5357 5358
5358 5359 static struct scsi_pkt *
5359 5360 aac_tran_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
5360 5361 struct buf *bp, int cmdlen, int statuslen, int tgtlen, int flags,
5361 5362 int (*callback)(), caddr_t arg)
5362 5363 {
5363 5364 struct aac_softstate *softs = AAC_TRAN2SOFTS(ap->a_hba_tran);
5364 5365 struct aac_cmd *acp, *new_acp;
5365 5366
5366 5367 DBCALLED(softs, 2);
5367 5368
5368 5369 /* Allocate pkt */
5369 5370 if (pkt == NULL) {
5370 5371 int slen;
5371 5372
5372 5373 /* Force auto request sense */
5373 5374 slen = (statuslen > softs->slen) ? statuslen : softs->slen;
5374 5375 pkt = scsi_hba_pkt_alloc(softs->devinfo_p, ap, cmdlen,
5375 5376 slen, tgtlen, sizeof (struct aac_cmd), callback, arg);
5376 5377 if (pkt == NULL) {
5377 5378 AACDB_PRINT(softs, CE_WARN, "Alloc scsi pkt failed");
5378 5379 return (NULL);
5379 5380 }
5380 5381 acp = new_acp = PKT2AC(pkt);
5381 5382 acp->pkt = pkt;
5382 5383 acp->cmdlen = cmdlen;
5383 5384
5384 5385 if (ap->a_target < AAC_MAX_LD) {
5385 5386 acp->dvp = &softs->containers[ap->a_target].dev;
5386 5387 acp->aac_cmd_fib = softs->aac_cmd_fib;
5387 5388 acp->ac_comp = aac_ld_complete;
5388 5389 } else {
5389 5390 _NOTE(ASSUMING_PROTECTED(softs->nondasds))
5390 5391
5391 5392 acp->dvp = &softs->nondasds[AAC_PD(ap->a_target)].dev;
5392 5393 acp->aac_cmd_fib = softs->aac_cmd_fib_scsi;
5393 5394 acp->ac_comp = aac_pd_complete;
5394 5395 }
5395 5396 } else {
5396 5397 acp = PKT2AC(pkt);
5397 5398 new_acp = NULL;
5398 5399 }
5399 5400
5400 5401 if (aac_cmd_dma_alloc(softs, acp, bp, flags, callback, arg) == AACOK)
5401 5402 return (pkt);
5402 5403
5403 5404 if (new_acp)
5404 5405 aac_tran_destroy_pkt(ap, pkt);
5405 5406 return (NULL);
5406 5407 }
5407 5408
5408 5409 /*
5409 5410 * tran_sync_pkt(9E) - explicit DMA synchronization
5410 5411 */
5411 5412 /*ARGSUSED*/
5412 5413 static void
5413 5414 aac_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
5414 5415 {
5415 5416 struct aac_cmd *acp = PKT2AC(pkt);
5416 5417
5417 5418 DBCALLED(NULL, 2);
5418 5419
5419 5420 if (aac_dma_sync_ac(acp) != AACOK)
5420 5421 ddi_fm_service_impact(
5421 5422 (AAC_TRAN2SOFTS(ap->a_hba_tran))->devinfo_p,
5422 5423 DDI_SERVICE_UNAFFECTED);
5423 5424 }
5424 5425
5425 5426 /*
5426 5427 * tran_dmafree(9E) - deallocate DMA resources allocated for command
5427 5428 */
5428 5429 /*ARGSUSED*/
5429 5430 static void
5430 5431 aac_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
5431 5432 {
5432 5433 struct aac_cmd *acp = PKT2AC(pkt);
5433 5434
5434 5435 DBCALLED(NULL, 2);
5435 5436
5436 5437 aac_free_dmamap(acp);
5437 5438 }
5438 5439
5439 5440 static int
5440 5441 aac_do_quiesce(struct aac_softstate *softs)
5441 5442 {
5442 5443 aac_hold_bus(softs, AAC_IOCMD_ASYNC);
5443 5444 if (softs->bus_ncmds[AAC_CMDQ_ASYNC]) {
5444 5445 aac_start_drain(softs);
5445 5446 do {
5446 5447 if (cv_wait_sig(&softs->drain_cv,
5447 5448 &softs->io_lock) == 0) {
5448 5449 /* Quiesce has been interrupted */
5449 5450 aac_stop_drain(softs);
5450 5451 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5451 5452 aac_start_waiting_io(softs);
5452 5453 return (AACERR);
5453 5454 }
5454 5455 } while (softs->bus_ncmds[AAC_CMDQ_ASYNC]);
5455 5456 aac_stop_drain(softs);
5456 5457 }
5457 5458
5458 5459 softs->state |= AAC_STATE_QUIESCED;
5459 5460 return (AACOK);
5460 5461 }
5461 5462
5462 5463 static int
5463 5464 aac_tran_quiesce(dev_info_t *dip)
5464 5465 {
5465 5466 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5466 5467 int rval;
5467 5468
5468 5469 DBCALLED(softs, 1);
5469 5470
5470 5471 mutex_enter(&softs->io_lock);
5471 5472 if (aac_do_quiesce(softs) == AACOK)
5472 5473 rval = 0;
5473 5474 else
5474 5475 rval = 1;
5475 5476 mutex_exit(&softs->io_lock);
5476 5477 return (rval);
5477 5478 }
5478 5479
5479 5480 static int
5480 5481 aac_do_unquiesce(struct aac_softstate *softs)
5481 5482 {
5482 5483 softs->state &= ~AAC_STATE_QUIESCED;
5483 5484 aac_unhold_bus(softs, AAC_IOCMD_ASYNC);
5484 5485
5485 5486 aac_start_waiting_io(softs);
5486 5487 return (AACOK);
5487 5488 }
5488 5489
5489 5490 static int
5490 5491 aac_tran_unquiesce(dev_info_t *dip)
5491 5492 {
5492 5493 struct aac_softstate *softs = AAC_DIP2SOFTS(dip);
5493 5494 int rval;
5494 5495
5495 5496 DBCALLED(softs, 1);
5496 5497
5497 5498 mutex_enter(&softs->io_lock);
5498 5499 if (aac_do_unquiesce(softs) == AACOK)
5499 5500 rval = 0;
5500 5501 else
5501 5502 rval = 1;
5502 5503 mutex_exit(&softs->io_lock);
5503 5504 return (rval);
5504 5505 }
5505 5506
5506 5507 static int
5507 5508 aac_hba_setup(struct aac_softstate *softs)
5508 5509 {
5509 5510 scsi_hba_tran_t *hba_tran;
5510 5511 int rval;
5511 5512
5512 5513 hba_tran = scsi_hba_tran_alloc(softs->devinfo_p, SCSI_HBA_CANSLEEP);
5513 5514 if (hba_tran == NULL)
5514 5515 return (AACERR);
5515 5516 hba_tran->tran_hba_private = softs;
5516 5517 hba_tran->tran_tgt_init = aac_tran_tgt_init;
5517 5518 hba_tran->tran_tgt_free = aac_tran_tgt_free;
5518 5519 hba_tran->tran_tgt_probe = scsi_hba_probe;
5519 5520 hba_tran->tran_start = aac_tran_start;
5520 5521 hba_tran->tran_getcap = aac_tran_getcap;
5521 5522 hba_tran->tran_setcap = aac_tran_setcap;
5522 5523 hba_tran->tran_init_pkt = aac_tran_init_pkt;
5523 5524 hba_tran->tran_destroy_pkt = aac_tran_destroy_pkt;
5524 5525 hba_tran->tran_reset = aac_tran_reset;
5525 5526 hba_tran->tran_abort = aac_tran_abort;
5526 5527 hba_tran->tran_sync_pkt = aac_tran_sync_pkt;
5527 5528 hba_tran->tran_dmafree = aac_tran_dmafree;
5528 5529 hba_tran->tran_quiesce = aac_tran_quiesce;
5529 5530 hba_tran->tran_unquiesce = aac_tran_unquiesce;
5530 5531 hba_tran->tran_bus_config = aac_tran_bus_config;
5531 5532 rval = scsi_hba_attach_setup(softs->devinfo_p, &softs->buf_dma_attr,
5532 5533 hba_tran, 0);
5533 5534 if (rval != DDI_SUCCESS) {
5534 5535 scsi_hba_tran_free(hba_tran);
5535 5536 AACDB_PRINT(softs, CE_WARN, "aac_hba_setup failed");
5536 5537 return (AACERR);
5537 5538 }
5538 5539
5539 5540 softs->hba_tran = hba_tran;
5540 5541 return (AACOK);
5541 5542 }
5542 5543
5543 5544 /*
5544 5545 * FIB setup operations
5545 5546 */
5546 5547
5547 5548 /*
5548 5549 * Init FIB header
5549 5550 */
5550 5551 static void
5551 5552 aac_cmd_fib_header(struct aac_softstate *softs, struct aac_cmd *acp,
5552 5553 uint16_t cmd)
5553 5554 {
5554 5555 struct aac_slot *slotp = acp->slotp;
5555 5556 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5556 5557 struct aac_fib *fibp = slotp->fibp;
5557 5558 uint32_t xfer_state;
5558 5559
5559 5560 xfer_state =
5560 5561 AAC_FIBSTATE_HOSTOWNED |
5561 5562 AAC_FIBSTATE_INITIALISED |
5562 5563 AAC_FIBSTATE_EMPTY |
5563 5564 AAC_FIBSTATE_FAST_RESPONSE | /* enable fast io */
5564 5565 AAC_FIBSTATE_FROMHOST |
5565 5566 AAC_FIBSTATE_REXPECTED |
5566 5567 AAC_FIBSTATE_NORM;
5567 5568
5568 5569 if (!(acp->flags & AAC_CMD_SYNC))
5569 5570 xfer_state |= AAC_FIBSTATE_ASYNC;
5570 5571
5571 5572 ddi_put32(acc, &fibp->Header.XferState, xfer_state);
5572 5573 ddi_put16(acc, &fibp->Header.Command, cmd);
5573 5574 ddi_put8(acc, &fibp->Header.StructType, AAC_FIBTYPE_TFIB);
5574 5575 ddi_put8(acc, &fibp->Header.Flags, 0); /* don't care */
5575 5576 ddi_put16(acc, &fibp->Header.Size, acp->fib_size);
5576 5577 ddi_put16(acc, &fibp->Header.SenderSize, softs->aac_max_fib_size);
5577 5578 ddi_put32(acc, &fibp->Header.SenderFibAddress, (slotp->index << 2));
5578 5579 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5579 5580 ddi_put32(acc, &fibp->Header.SenderData, 0); /* don't care */
5580 5581 }
5581 5582
5582 5583 /*
5583 5584 * Init FIB for raw IO command
5584 5585 */
5585 5586 static void
5586 5587 aac_cmd_fib_rawio(struct aac_softstate *softs, struct aac_cmd *acp)
5587 5588 {
5588 5589 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5589 5590 struct aac_raw_io *io = (struct aac_raw_io *)&acp->slotp->fibp->data[0];
5590 5591 struct aac_sg_entryraw *sgp;
5591 5592 struct aac_sge *sge;
5592 5593
5593 5594 /* Calculate FIB size */
5594 5595 acp->fib_size = sizeof (struct aac_fib_header) + \
5595 5596 sizeof (struct aac_raw_io) + (acp->left_cookien - 1) * \
5596 5597 sizeof (struct aac_sg_entryraw);
5597 5598
5598 5599 aac_cmd_fib_header(softs, acp, RawIo);
5599 5600
5600 5601 ddi_put16(acc, &io->Flags, (acp->flags & AAC_CMD_BUF_READ) ? 1 : 0);
5601 5602 ddi_put16(acc, &io->BpTotal, 0);
5602 5603 ddi_put16(acc, &io->BpComplete, 0);
5603 5604
5604 5605 ddi_put32(acc, AAC_LO32(&io->BlockNumber), AAC_LS32(acp->blkno));
5605 5606 ddi_put32(acc, AAC_HI32(&io->BlockNumber), AAC_MS32(acp->blkno));
5606 5607 ddi_put16(acc, &io->ContainerId,
5607 5608 ((struct aac_container *)acp->dvp)->cid);
5608 5609
5609 5610 /* Fill SG table */
5610 5611 ddi_put32(acc, &io->SgMapRaw.SgCount, acp->left_cookien);
5611 5612 ddi_put32(acc, &io->ByteCount, acp->bcount);
5612 5613
5613 5614 for (sge = &acp->sgt[0], sgp = &io->SgMapRaw.SgEntryRaw[0];
5614 5615 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5615 5616 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5616 5617 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5617 5618 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5618 5619 sgp->Next = 0;
5619 5620 sgp->Prev = 0;
5620 5621 sgp->Flags = 0;
5621 5622 }
5622 5623 }
5623 5624
5624 5625 /* Init FIB for 64-bit block IO command */
5625 5626 static void
5626 5627 aac_cmd_fib_brw64(struct aac_softstate *softs, struct aac_cmd *acp)
5627 5628 {
5628 5629 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5629 5630 struct aac_blockread64 *br = (struct aac_blockread64 *) \
5630 5631 &acp->slotp->fibp->data[0];
5631 5632 struct aac_sg_entry64 *sgp;
5632 5633 struct aac_sge *sge;
5633 5634
5634 5635 acp->fib_size = sizeof (struct aac_fib_header) + \
5635 5636 sizeof (struct aac_blockread64) + (acp->left_cookien - 1) * \
5636 5637 sizeof (struct aac_sg_entry64);
5637 5638
5638 5639 aac_cmd_fib_header(softs, acp, ContainerCommand64);
5639 5640
5640 5641 /*
5641 5642 * The definitions for aac_blockread64 and aac_blockwrite64
5642 5643 * are the same.
5643 5644 */
5644 5645 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5645 5646 ddi_put16(acc, &br->ContainerId,
5646 5647 ((struct aac_container *)acp->dvp)->cid);
5647 5648 ddi_put32(acc, &br->Command, (acp->flags & AAC_CMD_BUF_READ) ?
5648 5649 VM_CtHostRead64 : VM_CtHostWrite64);
5649 5650 ddi_put16(acc, &br->Pad, 0);
5650 5651 ddi_put16(acc, &br->Flags, 0);
5651 5652
5652 5653 /* Fill SG table */
5653 5654 ddi_put32(acc, &br->SgMap64.SgCount, acp->left_cookien);
5654 5655 ddi_put16(acc, &br->SectorCount, acp->bcount / AAC_BLK_SIZE);
5655 5656
5656 5657 for (sge = &acp->sgt[0], sgp = &br->SgMap64.SgEntry64[0];
5657 5658 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5658 5659 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5659 5660 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5660 5661 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5661 5662 }
5662 5663 }
5663 5664
5664 5665 /* Init FIB for block IO command */
5665 5666 static void
5666 5667 aac_cmd_fib_brw(struct aac_softstate *softs, struct aac_cmd *acp)
5667 5668 {
5668 5669 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5669 5670 struct aac_blockread *br = (struct aac_blockread *) \
5670 5671 &acp->slotp->fibp->data[0];
5671 5672 struct aac_sg_entry *sgp;
5672 5673 struct aac_sge *sge = &acp->sgt[0];
5673 5674
5674 5675 if (acp->flags & AAC_CMD_BUF_READ) {
5675 5676 acp->fib_size = sizeof (struct aac_fib_header) + \
5676 5677 sizeof (struct aac_blockread) + (acp->left_cookien - 1) * \
5677 5678 sizeof (struct aac_sg_entry);
5678 5679
5679 5680 ddi_put32(acc, &br->Command, VM_CtBlockRead);
5680 5681 ddi_put32(acc, &br->SgMap.SgCount, acp->left_cookien);
5681 5682 sgp = &br->SgMap.SgEntry[0];
5682 5683 } else {
5683 5684 struct aac_blockwrite *bw = (struct aac_blockwrite *)br;
5684 5685
5685 5686 acp->fib_size = sizeof (struct aac_fib_header) + \
5686 5687 sizeof (struct aac_blockwrite) + (acp->left_cookien - 1) * \
5687 5688 sizeof (struct aac_sg_entry);
5688 5689
5689 5690 ddi_put32(acc, &bw->Command, VM_CtBlockWrite);
5690 5691 ddi_put32(acc, &bw->Stable, CUNSTABLE);
5691 5692 ddi_put32(acc, &bw->SgMap.SgCount, acp->left_cookien);
5692 5693 sgp = &bw->SgMap.SgEntry[0];
5693 5694 }
5694 5695 aac_cmd_fib_header(softs, acp, ContainerCommand);
5695 5696
5696 5697 /*
5697 5698 * aac_blockread and aac_blockwrite have the similar
5698 5699 * structure head, so use br for bw here
5699 5700 */
5700 5701 ddi_put32(acc, &br->BlockNumber, (uint32_t)acp->blkno);
5701 5702 ddi_put32(acc, &br->ContainerId,
5702 5703 ((struct aac_container *)acp->dvp)->cid);
5703 5704 ddi_put32(acc, &br->ByteCount, acp->bcount);
5704 5705
5705 5706 /* Fill SG table */
5706 5707 for (sge = &acp->sgt[0];
5707 5708 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5708 5709 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5709 5710 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5710 5711 }
5711 5712 }
5712 5713
5713 5714 /*ARGSUSED*/
5714 5715 void
5715 5716 aac_cmd_fib_copy(struct aac_softstate *softs, struct aac_cmd *acp)
5716 5717 {
5717 5718 struct aac_slot *slotp = acp->slotp;
5718 5719 struct aac_fib *fibp = slotp->fibp;
5719 5720 ddi_acc_handle_t acc = slotp->fib_acc_handle;
5720 5721
5721 5722 ddi_rep_put8(acc, (uint8_t *)acp->fibp, (uint8_t *)fibp,
5722 5723 acp->fib_size, /* only copy data of needed length */
5723 5724 DDI_DEV_AUTOINCR);
5724 5725 ddi_put32(acc, &fibp->Header.ReceiverFibAddress, slotp->fib_phyaddr);
5725 5726 ddi_put32(acc, &fibp->Header.SenderFibAddress, slotp->index << 2);
5726 5727 }
5727 5728
5728 5729 static void
5729 5730 aac_cmd_fib_sync(struct aac_softstate *softs, struct aac_cmd *acp)
5730 5731 {
5731 5732 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5732 5733 struct aac_synchronize_command *sync =
5733 5734 (struct aac_synchronize_command *)&acp->slotp->fibp->data[0];
5734 5735
5735 5736 acp->fib_size = AAC_FIB_SIZEOF(struct aac_synchronize_command);
5736 5737
5737 5738 aac_cmd_fib_header(softs, acp, ContainerCommand);
5738 5739 ddi_put32(acc, &sync->Command, VM_ContainerConfig);
5739 5740 ddi_put32(acc, &sync->Type, (uint32_t)CT_FLUSH_CACHE);
5740 5741 ddi_put32(acc, &sync->Cid, ((struct aac_container *)acp->dvp)->cid);
5741 5742 ddi_put32(acc, &sync->Count,
5742 5743 sizeof (((struct aac_synchronize_reply *)0)->Data));
5743 5744 }
5744 5745
5745 5746 /*
5746 5747 * Start/Stop unit (Power Management)
5747 5748 */
5748 5749 static void
5749 5750 aac_cmd_fib_startstop(struct aac_softstate *softs, struct aac_cmd *acp)
5750 5751 {
5751 5752 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5752 5753 struct aac_Container *cmd =
5753 5754 (struct aac_Container *)&acp->slotp->fibp->data[0];
5754 5755 union scsi_cdb *cdbp = (void *)acp->pkt->pkt_cdbp;
5755 5756
5756 5757 acp->fib_size = AAC_FIB_SIZEOF(struct aac_Container);
5757 5758
5758 5759 aac_cmd_fib_header(softs, acp, ContainerCommand);
5759 5760 bzero(cmd, sizeof (*cmd) - CT_PACKET_SIZE);
5760 5761 ddi_put32(acc, &cmd->Command, VM_ContainerConfig);
5761 5762 ddi_put32(acc, &cmd->CTCommand.command, CT_PM_DRIVER_SUPPORT);
5762 5763 ddi_put32(acc, &cmd->CTCommand.param[0], cdbp->cdb_opaque[4] & 1 ? \
5763 5764 AAC_PM_DRIVERSUP_START_UNIT : AAC_PM_DRIVERSUP_STOP_UNIT);
5764 5765 ddi_put32(acc, &cmd->CTCommand.param[1],
5765 5766 ((struct aac_container *)acp->dvp)->cid);
5766 5767 ddi_put32(acc, &cmd->CTCommand.param[2], cdbp->cdb_opaque[1] & 1);
5767 5768 }
5768 5769
5769 5770 /*
5770 5771 * Init FIB for pass-through SCMD
5771 5772 */
5772 5773 static void
5773 5774 aac_cmd_fib_srb(struct aac_cmd *acp)
5774 5775 {
5775 5776 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5776 5777 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5777 5778 uint8_t *cdb;
5778 5779
5779 5780 ddi_put32(acc, &srb->function, SRBF_ExecuteScsi);
5780 5781 ddi_put32(acc, &srb->retry_limit, 0);
5781 5782 ddi_put32(acc, &srb->cdb_size, acp->cmdlen);
5782 5783 ddi_put32(acc, &srb->timeout, 0); /* use driver timeout */
5783 5784 if (acp->fibp == NULL) {
5784 5785 if (acp->flags & AAC_CMD_BUF_READ)
5785 5786 ddi_put32(acc, &srb->flags, SRB_DataIn);
5786 5787 else if (acp->flags & AAC_CMD_BUF_WRITE)
5787 5788 ddi_put32(acc, &srb->flags, SRB_DataOut);
5788 5789 ddi_put32(acc, &srb->channel,
5789 5790 ((struct aac_nondasd *)acp->dvp)->bus);
5790 5791 ddi_put32(acc, &srb->id, ((struct aac_nondasd *)acp->dvp)->tid);
5791 5792 ddi_put32(acc, &srb->lun, 0);
5792 5793 cdb = acp->pkt->pkt_cdbp;
5793 5794 } else {
5794 5795 struct aac_srb *srb0 = (struct aac_srb *)&acp->fibp->data[0];
5795 5796
5796 5797 ddi_put32(acc, &srb->flags, srb0->flags);
5797 5798 ddi_put32(acc, &srb->channel, srb0->channel);
5798 5799 ddi_put32(acc, &srb->id, srb0->id);
5799 5800 ddi_put32(acc, &srb->lun, srb0->lun);
5800 5801 cdb = srb0->cdb;
5801 5802 }
5802 5803 ddi_rep_put8(acc, cdb, srb->cdb, acp->cmdlen, DDI_DEV_AUTOINCR);
5803 5804 }
5804 5805
5805 5806 static void
5806 5807 aac_cmd_fib_scsi32(struct aac_softstate *softs, struct aac_cmd *acp)
5807 5808 {
5808 5809 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5809 5810 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5810 5811 struct aac_sg_entry *sgp;
5811 5812 struct aac_sge *sge;
5812 5813
5813 5814 acp->fib_size = sizeof (struct aac_fib_header) + \
5814 5815 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5815 5816 acp->left_cookien * sizeof (struct aac_sg_entry);
5816 5817
5817 5818 /* Fill FIB and SRB headers, and copy cdb */
5818 5819 aac_cmd_fib_header(softs, acp, ScsiPortCommand);
5819 5820 aac_cmd_fib_srb(acp);
5820 5821
5821 5822 /* Fill SG table */
5822 5823 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5823 5824 ddi_put32(acc, &srb->count, acp->bcount);
5824 5825
5825 5826 for (sge = &acp->sgt[0], sgp = &srb->sg.SgEntry[0];
5826 5827 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5827 5828 ddi_put32(acc, &sgp->SgAddress, sge->addr.ad32);
5828 5829 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5829 5830 }
5830 5831 }
5831 5832
5832 5833 static void
5833 5834 aac_cmd_fib_scsi64(struct aac_softstate *softs, struct aac_cmd *acp)
5834 5835 {
5835 5836 ddi_acc_handle_t acc = acp->slotp->fib_acc_handle;
5836 5837 struct aac_srb *srb = (struct aac_srb *)&acp->slotp->fibp->data[0];
5837 5838 struct aac_sg_entry64 *sgp;
5838 5839 struct aac_sge *sge;
5839 5840
5840 5841 acp->fib_size = sizeof (struct aac_fib_header) + \
5841 5842 sizeof (struct aac_srb) - sizeof (struct aac_sg_entry) + \
5842 5843 acp->left_cookien * sizeof (struct aac_sg_entry64);
5843 5844
5844 5845 /* Fill FIB and SRB headers, and copy cdb */
5845 5846 aac_cmd_fib_header(softs, acp, ScsiPortCommandU64);
5846 5847 aac_cmd_fib_srb(acp);
5847 5848
5848 5849 /* Fill SG table */
5849 5850 ddi_put32(acc, &srb->sg.SgCount, acp->left_cookien);
5850 5851 ddi_put32(acc, &srb->count, acp->bcount);
5851 5852
5852 5853 for (sge = &acp->sgt[0],
5853 5854 sgp = &((struct aac_sg_table64 *)&srb->sg)->SgEntry64[0];
5854 5855 sge < &acp->sgt[acp->left_cookien]; sge++, sgp++) {
5855 5856 ddi_put32(acc, AAC_LO32(&sgp->SgAddress), sge->addr.ad64.lo);
5856 5857 ddi_put32(acc, AAC_HI32(&sgp->SgAddress), sge->addr.ad64.hi);
5857 5858 ddi_put32(acc, &sgp->SgByteCount, sge->bcount);
5858 5859 }
5859 5860 }
5860 5861
5861 5862 static int
5862 5863 aac_cmd_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5863 5864 {
5864 5865 struct aac_slot *slotp;
5865 5866
5866 5867 if (slotp = aac_get_slot(softs)) {
5867 5868 acp->slotp = slotp;
5868 5869 slotp->acp = acp;
5869 5870 acp->aac_cmd_fib(softs, acp);
5870 5871 (void) ddi_dma_sync(slotp->fib_dma_handle, 0, 0,
5871 5872 DDI_DMA_SYNC_FORDEV);
5872 5873 return (AACOK);
5873 5874 }
5874 5875 return (AACERR);
5875 5876 }
5876 5877
5877 5878 static int
5878 5879 aac_bind_io(struct aac_softstate *softs, struct aac_cmd *acp)
5879 5880 {
5880 5881 struct aac_device *dvp = acp->dvp;
5881 5882 int q = AAC_CMDQ(acp);
5882 5883
5883 5884 if (softs->bus_ncmds[q] < softs->bus_throttle[q]) {
5884 5885 if (dvp) {
5885 5886 if (dvp->ncmds[q] < dvp->throttle[q]) {
5886 5887 if (!(acp->flags & AAC_CMD_NTAG) ||
5887 5888 dvp->ncmds[q] == 0) {
5888 5889 return (aac_cmd_slot_bind(softs, acp));
5889 5890 }
5890 5891 ASSERT(q == AAC_CMDQ_ASYNC);
5891 5892 aac_set_throttle(softs, dvp, AAC_CMDQ_ASYNC,
5892 5893 AAC_THROTTLE_DRAIN);
5893 5894 }
5894 5895 } else {
5895 5896 return (aac_cmd_slot_bind(softs, acp));
5896 5897 }
5897 5898 }
5898 5899 return (AACERR);
5899 5900 }
5900 5901
5901 5902 static int
5902 5903 aac_sync_fib_slot_bind(struct aac_softstate *softs, struct aac_cmd *acp)
5903 5904 {
5904 5905 struct aac_slot *slotp;
5905 5906
5906 5907 while (softs->sync_ac.slotp)
5907 5908 cv_wait(&softs->sync_fib_cv, &softs->io_lock);
5908 5909
5909 5910 if (slotp = aac_get_slot(softs)) {
5910 5911 ASSERT(acp->slotp == NULL);
5911 5912
5912 5913 acp->slotp = slotp;
5913 5914 slotp->acp = acp;
5914 5915 return (AACOK);
5915 5916 }
5916 5917 return (AACERR);
5917 5918 }
5918 5919
5919 5920 static void
5920 5921 aac_sync_fib_slot_release(struct aac_softstate *softs, struct aac_cmd *acp)
5921 5922 {
5922 5923 ASSERT(acp->slotp);
5923 5924
5924 5925 aac_release_slot(softs, acp->slotp);
5925 5926 acp->slotp->acp = NULL;
5926 5927 acp->slotp = NULL;
5927 5928
5928 5929 cv_signal(&softs->sync_fib_cv);
5929 5930 }
5930 5931
5931 5932 static void
5932 5933 aac_start_io(struct aac_softstate *softs, struct aac_cmd *acp)
5933 5934 {
5934 5935 struct aac_slot *slotp = acp->slotp;
5935 5936 int q = AAC_CMDQ(acp);
5936 5937 int rval;
5937 5938
5938 5939 /* Set ac and pkt */
5939 5940 if (acp->pkt) { /* ac from ioctl has no pkt */
5940 5941 acp->pkt->pkt_state |=
5941 5942 STATE_GOT_BUS | STATE_GOT_TARGET | STATE_SENT_CMD;
5942 5943 }
5943 5944 if (acp->timeout) /* 0 indicates no timeout */
5944 5945 acp->timeout += aac_timebase + aac_tick;
5945 5946
5946 5947 if (acp->dvp)
5947 5948 acp->dvp->ncmds[q]++;
5948 5949 softs->bus_ncmds[q]++;
5949 5950 aac_cmd_enqueue(&softs->q_busy, acp);
5950 5951
5951 5952 AACDB_PRINT_FIB(softs, slotp);
5952 5953
5953 5954 if (softs->flags & AAC_FLAGS_NEW_COMM) {
5954 5955 rval = aac_send_command(softs, slotp);
5955 5956 } else {
5956 5957 /*
5957 5958 * If fib can not be enqueued, the adapter is in an abnormal
5958 5959 * state, there will be no interrupt to us.
5959 5960 */
5960 5961 rval = aac_fib_enqueue(softs, AAC_ADAP_NORM_CMD_Q,
5961 5962 slotp->fib_phyaddr, acp->fib_size);
5962 5963 }
5963 5964
5964 5965 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS)
5965 5966 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_UNAFFECTED);
5966 5967
5967 5968 /*
5968 5969 * NOTE: We send command only when slots availabe, so should never
5969 5970 * reach here.
5970 5971 */
5971 5972 if (rval != AACOK) {
5972 5973 AACDB_PRINT(softs, CE_NOTE, "SCMD send failed");
5973 5974 if (acp->pkt) {
5974 5975 acp->pkt->pkt_state &= ~STATE_SENT_CMD;
5975 5976 aac_set_pkt_reason(softs, acp, CMD_INCOMPLETE, 0);
5976 5977 }
5977 5978 aac_end_io(softs, acp);
5978 5979 if (!(acp->flags & (AAC_CMD_NO_INTR | AAC_CMD_NO_CB)))
5979 5980 ddi_trigger_softintr(softs->softint_id);
5980 5981 }
5981 5982 }
5982 5983
5983 5984 static void
5984 5985 aac_start_waitq(struct aac_softstate *softs, struct aac_cmd_queue *q)
5985 5986 {
5986 5987 struct aac_cmd *acp, *next_acp;
5987 5988
5988 5989 /* Serve as many waiting io's as possible */
5989 5990 for (acp = q->q_head; acp; acp = next_acp) {
5990 5991 next_acp = acp->next;
5991 5992 if (aac_bind_io(softs, acp) == AACOK) {
5992 5993 aac_cmd_delete(q, acp);
5993 5994 aac_start_io(softs, acp);
5994 5995 }
5995 5996 if (softs->free_io_slot_head == NULL)
5996 5997 break;
5997 5998 }
5998 5999 }
5999 6000
6000 6001 static void
6001 6002 aac_start_waiting_io(struct aac_softstate *softs)
6002 6003 {
6003 6004 /*
6004 6005 * Sync FIB io is served before async FIB io so that io requests
6005 6006 * sent by interactive userland commands get responded asap.
6006 6007 */
6007 6008 if (softs->q_wait[AAC_CMDQ_SYNC].q_head)
6008 6009 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_SYNC]);
6009 6010 if (softs->q_wait[AAC_CMDQ_ASYNC].q_head)
6010 6011 aac_start_waitq(softs, &softs->q_wait[AAC_CMDQ_ASYNC]);
6011 6012 }
6012 6013
6013 6014 static void
6014 6015 aac_drain_comp_q(struct aac_softstate *softs)
6015 6016 {
6016 6017 struct aac_cmd *acp;
6017 6018 struct scsi_pkt *pkt;
6018 6019
6019 6020 /*CONSTCOND*/
6020 6021 while (1) {
6021 6022 mutex_enter(&softs->q_comp_mutex);
6022 6023 acp = aac_cmd_dequeue(&softs->q_comp);
6023 6024 mutex_exit(&softs->q_comp_mutex);
6024 6025 if (acp != NULL) {
6025 6026 ASSERT(acp->pkt != NULL);
6026 6027 pkt = acp->pkt;
6027 6028
6028 6029 if (pkt->pkt_reason == CMD_CMPLT) {
6029 6030 /*
6030 6031 * Consistent packets need to be sync'ed first
6031 6032 */
6032 6033 if ((acp->flags & AAC_CMD_CONSISTENT) &&
6033 6034 (acp->flags & AAC_CMD_BUF_READ)) {
6034 6035 if (aac_dma_sync_ac(acp) != AACOK) {
6035 6036 ddi_fm_service_impact(
6036 6037 softs->devinfo_p,
6037 6038 DDI_SERVICE_UNAFFECTED);
6038 6039 pkt->pkt_reason = CMD_TRAN_ERR;
6039 6040 pkt->pkt_statistics = 0;
6040 6041 }
6041 6042 }
6042 6043 if ((aac_check_acc_handle(softs-> \
6043 6044 comm_space_acc_handle) != DDI_SUCCESS) ||
6044 6045 (aac_check_acc_handle(softs-> \
6045 6046 pci_mem_handle) != DDI_SUCCESS)) {
6046 6047 ddi_fm_service_impact(softs->devinfo_p,
6047 6048 DDI_SERVICE_UNAFFECTED);
6048 6049 ddi_fm_acc_err_clear(softs-> \
6049 6050 pci_mem_handle, DDI_FME_VER0);
6050 6051 pkt->pkt_reason = CMD_TRAN_ERR;
6051 6052 pkt->pkt_statistics = 0;
6052 6053 }
6053 6054 if (aac_check_dma_handle(softs-> \
6054 6055 comm_space_dma_handle) != DDI_SUCCESS) {
6055 6056 ddi_fm_service_impact(softs->devinfo_p,
6056 6057 DDI_SERVICE_UNAFFECTED);
6057 6058 pkt->pkt_reason = CMD_TRAN_ERR;
6058 6059 pkt->pkt_statistics = 0;
6059 6060 }
6060 6061 }
6061 6062 scsi_hba_pkt_comp(pkt);
6062 6063 } else {
6063 6064 break;
6064 6065 }
6065 6066 }
6066 6067 }
6067 6068
6068 6069 static int
6069 6070 aac_alloc_fib(struct aac_softstate *softs, struct aac_slot *slotp)
6070 6071 {
6071 6072 size_t rlen;
6072 6073 ddi_dma_cookie_t cookie;
6073 6074 uint_t cookien;
6074 6075
6075 6076 /* Allocate FIB dma resource */
6076 6077 if (ddi_dma_alloc_handle(
6077 6078 softs->devinfo_p,
6078 6079 &softs->addr_dma_attr,
6079 6080 DDI_DMA_SLEEP,
6080 6081 NULL,
6081 6082 &slotp->fib_dma_handle) != DDI_SUCCESS) {
6082 6083 AACDB_PRINT(softs, CE_WARN,
6083 6084 "Cannot alloc dma handle for slot fib area");
6084 6085 goto error;
6085 6086 }
6086 6087 if (ddi_dma_mem_alloc(
6087 6088 slotp->fib_dma_handle,
6088 6089 softs->aac_max_fib_size,
6089 6090 &softs->acc_attr,
6090 6091 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6091 6092 DDI_DMA_SLEEP,
6092 6093 NULL,
6093 6094 (caddr_t *)&slotp->fibp,
6094 6095 &rlen,
6095 6096 &slotp->fib_acc_handle) != DDI_SUCCESS) {
6096 6097 AACDB_PRINT(softs, CE_WARN,
6097 6098 "Cannot alloc mem for slot fib area");
6098 6099 goto error;
6099 6100 }
6100 6101 if (ddi_dma_addr_bind_handle(
6101 6102 slotp->fib_dma_handle,
6102 6103 NULL,
6103 6104 (caddr_t)slotp->fibp,
6104 6105 softs->aac_max_fib_size,
6105 6106 DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
6106 6107 DDI_DMA_SLEEP,
6107 6108 NULL,
6108 6109 &cookie,
6109 6110 &cookien) != DDI_DMA_MAPPED) {
6110 6111 AACDB_PRINT(softs, CE_WARN,
6111 6112 "dma bind failed for slot fib area");
6112 6113 goto error;
6113 6114 }
6114 6115
6115 6116 /* Check dma handles allocated in fib attach */
6116 6117 if (aac_check_dma_handle(slotp->fib_dma_handle) != DDI_SUCCESS) {
6117 6118 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6118 6119 goto error;
6119 6120 }
6120 6121
6121 6122 /* Check acc handles allocated in fib attach */
6122 6123 if (aac_check_acc_handle(slotp->fib_acc_handle) != DDI_SUCCESS) {
6123 6124 ddi_fm_service_impact(softs->devinfo_p, DDI_SERVICE_LOST);
6124 6125 goto error;
6125 6126 }
6126 6127
6127 6128 slotp->fib_phyaddr = cookie.dmac_laddress;
6128 6129 return (AACOK);
6129 6130
6130 6131 error:
6131 6132 if (slotp->fib_acc_handle) {
6132 6133 ddi_dma_mem_free(&slotp->fib_acc_handle);
6133 6134 slotp->fib_acc_handle = NULL;
6134 6135 }
6135 6136 if (slotp->fib_dma_handle) {
6136 6137 ddi_dma_free_handle(&slotp->fib_dma_handle);
6137 6138 slotp->fib_dma_handle = NULL;
6138 6139 }
6139 6140 return (AACERR);
6140 6141 }
6141 6142
6142 6143 static void
6143 6144 aac_free_fib(struct aac_slot *slotp)
6144 6145 {
6145 6146 (void) ddi_dma_unbind_handle(slotp->fib_dma_handle);
6146 6147 ddi_dma_mem_free(&slotp->fib_acc_handle);
6147 6148 slotp->fib_acc_handle = NULL;
6148 6149 ddi_dma_free_handle(&slotp->fib_dma_handle);
6149 6150 slotp->fib_dma_handle = NULL;
6150 6151 slotp->fib_phyaddr = 0;
6151 6152 }
6152 6153
6153 6154 static void
6154 6155 aac_alloc_fibs(struct aac_softstate *softs)
6155 6156 {
6156 6157 int i;
6157 6158 struct aac_slot *slotp;
6158 6159
6159 6160 for (i = 0; i < softs->total_slots &&
6160 6161 softs->total_fibs < softs->total_slots; i++) {
6161 6162 slotp = &(softs->io_slot[i]);
6162 6163 if (slotp->fib_phyaddr)
6163 6164 continue;
6164 6165 if (aac_alloc_fib(softs, slotp) != AACOK)
6165 6166 break;
6166 6167
6167 6168 /* Insert the slot to the free slot list */
6168 6169 aac_release_slot(softs, slotp);
6169 6170 softs->total_fibs++;
6170 6171 }
6171 6172 }
6172 6173
6173 6174 static void
6174 6175 aac_destroy_fibs(struct aac_softstate *softs)
6175 6176 {
6176 6177 struct aac_slot *slotp;
6177 6178
6178 6179 while ((slotp = softs->free_io_slot_head) != NULL) {
6179 6180 ASSERT(slotp->fib_phyaddr);
6180 6181 softs->free_io_slot_head = slotp->next;
6181 6182 aac_free_fib(slotp);
6182 6183 ASSERT(slotp->index == (slotp - softs->io_slot));
6183 6184 softs->total_fibs--;
6184 6185 }
6185 6186 ASSERT(softs->total_fibs == 0);
6186 6187 }
6187 6188
6188 6189 static int
6189 6190 aac_create_slots(struct aac_softstate *softs)
6190 6191 {
6191 6192 int i;
6192 6193
6193 6194 softs->total_slots = softs->aac_max_fibs;
6194 6195 softs->io_slot = kmem_zalloc(sizeof (struct aac_slot) * \
6195 6196 softs->total_slots, KM_SLEEP);
6196 6197 if (softs->io_slot == NULL) {
6197 6198 AACDB_PRINT(softs, CE_WARN, "Cannot allocate slot");
6198 6199 return (AACERR);
6199 6200 }
6200 6201 for (i = 0; i < softs->total_slots; i++)
6201 6202 softs->io_slot[i].index = i;
6202 6203 softs->free_io_slot_head = NULL;
6203 6204 softs->total_fibs = 0;
6204 6205 return (AACOK);
6205 6206 }
6206 6207
6207 6208 static void
6208 6209 aac_destroy_slots(struct aac_softstate *softs)
6209 6210 {
6210 6211 ASSERT(softs->free_io_slot_head == NULL);
6211 6212
6212 6213 kmem_free(softs->io_slot, sizeof (struct aac_slot) * \
6213 6214 softs->total_slots);
6214 6215 softs->io_slot = NULL;
6215 6216 softs->total_slots = 0;
6216 6217 }
6217 6218
6218 6219 struct aac_slot *
6219 6220 aac_get_slot(struct aac_softstate *softs)
6220 6221 {
6221 6222 struct aac_slot *slotp;
6222 6223
6223 6224 if ((slotp = softs->free_io_slot_head) != NULL) {
6224 6225 softs->free_io_slot_head = slotp->next;
6225 6226 slotp->next = NULL;
6226 6227 }
6227 6228 return (slotp);
6228 6229 }
6229 6230
6230 6231 static void
6231 6232 aac_release_slot(struct aac_softstate *softs, struct aac_slot *slotp)
6232 6233 {
6233 6234 ASSERT((slotp->index >= 0) && (slotp->index < softs->total_slots));
6234 6235 ASSERT(slotp == &softs->io_slot[slotp->index]);
6235 6236
6236 6237 slotp->acp = NULL;
6237 6238 slotp->next = softs->free_io_slot_head;
6238 6239 softs->free_io_slot_head = slotp;
6239 6240 }
6240 6241
6241 6242 int
6242 6243 aac_do_io(struct aac_softstate *softs, struct aac_cmd *acp)
6243 6244 {
6244 6245 if (aac_bind_io(softs, acp) == AACOK)
6245 6246 aac_start_io(softs, acp);
6246 6247 else
6247 6248 aac_cmd_enqueue(&softs->q_wait[AAC_CMDQ(acp)], acp);
6248 6249
6249 6250 if (!(acp->flags & (AAC_CMD_NO_CB | AAC_CMD_NO_INTR)))
6250 6251 return (TRAN_ACCEPT);
6251 6252 /*
6252 6253 * Because sync FIB is always 512 bytes and used for critical
6253 6254 * functions, async FIB is used for poll IO.
6254 6255 */
6255 6256 if (acp->flags & AAC_CMD_NO_INTR) {
6256 6257 if (aac_do_poll_io(softs, acp) == AACOK)
6257 6258 return (TRAN_ACCEPT);
6258 6259 } else {
6259 6260 if (aac_do_sync_io(softs, acp) == AACOK)
6260 6261 return (TRAN_ACCEPT);
6261 6262 }
6262 6263 return (TRAN_BADPKT);
6263 6264 }
6264 6265
6265 6266 static int
6266 6267 aac_do_poll_io(struct aac_softstate *softs, struct aac_cmd *acp)
6267 6268 {
6268 6269 int (*intr_handler)(struct aac_softstate *);
6269 6270
6270 6271 /*
6271 6272 * Interrupt is disabled, we have to poll the adapter by ourselves.
6272 6273 */
6273 6274 intr_handler = (softs->flags & AAC_FLAGS_NEW_COMM) ?
6274 6275 aac_process_intr_new : aac_process_intr_old;
6275 6276 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT))) {
6276 6277 int i = AAC_POLL_TIME * 1000;
6277 6278
6278 6279 AAC_BUSYWAIT((intr_handler(softs) != AAC_DB_RESPONSE_READY), i);
6279 6280 if (i == 0)
6280 6281 aac_cmd_timeout(softs, acp);
6281 6282 }
6282 6283
6283 6284 ddi_trigger_softintr(softs->softint_id);
6284 6285
6285 6286 if ((acp->flags & AAC_CMD_CMPLT) && !(acp->flags & AAC_CMD_ERR))
6286 6287 return (AACOK);
6287 6288 return (AACERR);
6288 6289 }
6289 6290
6290 6291 static int
6291 6292 aac_do_sync_io(struct aac_softstate *softs, struct aac_cmd *acp)
6292 6293 {
6293 6294 ASSERT(softs && acp);
6294 6295
6295 6296 while (!(acp->flags & (AAC_CMD_CMPLT | AAC_CMD_ABORT)))
6296 6297 cv_wait(&softs->event, &softs->io_lock);
6297 6298
6298 6299 if (acp->flags & AAC_CMD_CMPLT)
6299 6300 return (AACOK);
6300 6301 return (AACERR);
6301 6302 }
6302 6303
6303 6304 static int
6304 6305 aac_dma_sync_ac(struct aac_cmd *acp)
6305 6306 {
6306 6307 if (acp->buf_dma_handle) {
6307 6308 if (acp->flags & AAC_CMD_BUF_WRITE) {
6308 6309 if (acp->abp != NULL)
6309 6310 ddi_rep_put8(acp->abh,
6310 6311 (uint8_t *)acp->bp->b_un.b_addr,
6311 6312 (uint8_t *)acp->abp, acp->bp->b_bcount,
6312 6313 DDI_DEV_AUTOINCR);
6313 6314 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6314 6315 DDI_DMA_SYNC_FORDEV);
6315 6316 } else {
6316 6317 (void) ddi_dma_sync(acp->buf_dma_handle, 0, 0,
6317 6318 DDI_DMA_SYNC_FORCPU);
6318 6319 if (aac_check_dma_handle(acp->buf_dma_handle) !=
6319 6320 DDI_SUCCESS)
6320 6321 return (AACERR);
6321 6322 if (acp->abp != NULL)
6322 6323 ddi_rep_get8(acp->abh,
6323 6324 (uint8_t *)acp->bp->b_un.b_addr,
6324 6325 (uint8_t *)acp->abp, acp->bp->b_bcount,
6325 6326 DDI_DEV_AUTOINCR);
6326 6327 }
6327 6328 }
6328 6329 return (AACOK);
6329 6330 }
6330 6331
6331 6332 /*
6332 6333 * Copy AIF from adapter to the empty AIF slot and inform AIF threads
6333 6334 */
6334 6335 static void
6335 6336 aac_save_aif(struct aac_softstate *softs, ddi_acc_handle_t acc,
6336 6337 struct aac_fib *fibp0, int fib_size0)
6337 6338 {
6338 6339 struct aac_fib *fibp; /* FIB in AIF queue */
6339 6340 int fib_size;
6340 6341 uint16_t fib_command;
6341 6342 int current, next;
6342 6343
6343 6344 /* Ignore non AIF messages */
6344 6345 fib_command = ddi_get16(acc, &fibp0->Header.Command);
6345 6346 if (fib_command != AifRequest) {
6346 6347 cmn_err(CE_WARN, "!Unknown command from controller");
6347 6348 return;
6348 6349 }
6349 6350
6350 6351 mutex_enter(&softs->aifq_mutex);
6351 6352
6352 6353 /* Save AIF */
6353 6354 fibp = &softs->aifq[softs->aifq_idx].d;
6354 6355 fib_size = (fib_size0 > AAC_FIB_SIZE) ? AAC_FIB_SIZE : fib_size0;
6355 6356 ddi_rep_get8(acc, (uint8_t *)fibp, (uint8_t *)fibp0, fib_size,
6356 6357 DDI_DEV_AUTOINCR);
6357 6358
6358 6359 if (aac_check_acc_handle(softs->pci_mem_handle) != DDI_SUCCESS) {
6359 6360 ddi_fm_service_impact(softs->devinfo_p,
6360 6361 DDI_SERVICE_UNAFFECTED);
6361 6362 mutex_exit(&softs->aifq_mutex);
6362 6363 return;
6363 6364 }
6364 6365
6365 6366 AACDB_PRINT_AIF(softs, (struct aac_aif_command *)&fibp->data[0]);
6366 6367
6367 6368 /* Modify AIF contexts */
6368 6369 current = softs->aifq_idx;
6369 6370 next = (current + 1) % AAC_AIFQ_LENGTH;
6370 6371 if (next == 0) {
6371 6372 struct aac_fib_context *ctx_p;
6372 6373
6373 6374 softs->aifq_wrap = 1;
6374 6375 for (ctx_p = softs->fibctx_p; ctx_p; ctx_p = ctx_p->next) {
6375 6376 if (next == ctx_p->ctx_idx) {
6376 6377 ctx_p->ctx_flags |= AAC_CTXFLAG_FILLED;
6377 6378 } else if (current == ctx_p->ctx_idx &&
6378 6379 (ctx_p->ctx_flags & AAC_CTXFLAG_FILLED)) {
6379 6380 ctx_p->ctx_idx = next;
6380 6381 ctx_p->ctx_overrun++;
6381 6382 }
6382 6383 }
6383 6384 }
6384 6385 softs->aifq_idx = next;
6385 6386
6386 6387 /* Wakeup AIF threads */
6387 6388 cv_broadcast(&softs->aifq_cv);
6388 6389 mutex_exit(&softs->aifq_mutex);
6389 6390
6390 6391 /* Wakeup event thread to handle aif */
6391 6392 aac_event_disp(softs, AAC_EVENT_AIF);
6392 6393 }
6393 6394
6394 6395 static int
6395 6396 aac_return_aif_common(struct aac_softstate *softs, struct aac_fib_context *ctx,
6396 6397 struct aac_fib **fibpp)
6397 6398 {
6398 6399 int current;
6399 6400
6400 6401 current = ctx->ctx_idx;
6401 6402 if (current == softs->aifq_idx &&
6402 6403 !(ctx->ctx_flags & AAC_CTXFLAG_FILLED))
6403 6404 return (EAGAIN); /* Empty */
6404 6405
6405 6406 *fibpp = &softs->aifq[current].d;
6406 6407
6407 6408 ctx->ctx_flags &= ~AAC_CTXFLAG_FILLED;
6408 6409 ctx->ctx_idx = (current + 1) % AAC_AIFQ_LENGTH;
6409 6410 return (0);
6410 6411 }
6411 6412
6412 6413 int
6413 6414 aac_return_aif(struct aac_softstate *softs, struct aac_fib_context *ctx,
6414 6415 struct aac_fib **fibpp)
6415 6416 {
6416 6417 int rval;
6417 6418
6418 6419 mutex_enter(&softs->aifq_mutex);
6419 6420 rval = aac_return_aif_common(softs, ctx, fibpp);
6420 6421 mutex_exit(&softs->aifq_mutex);
6421 6422 return (rval);
6422 6423 }
6423 6424
6424 6425 int
6425 6426 aac_return_aif_wait(struct aac_softstate *softs, struct aac_fib_context *ctx,
6426 6427 struct aac_fib **fibpp)
6427 6428 {
6428 6429 int rval;
6429 6430
6430 6431 mutex_enter(&softs->aifq_mutex);
6431 6432 rval = aac_return_aif_common(softs, ctx, fibpp);
6432 6433 if (rval == EAGAIN) {
6433 6434 AACDB_PRINT(softs, CE_NOTE, "Waiting for AIF");
6434 6435 rval = cv_wait_sig(&softs->aifq_cv, &softs->aifq_mutex);
6435 6436 }
6436 6437 mutex_exit(&softs->aifq_mutex);
6437 6438 return ((rval > 0) ? 0 : EINTR);
6438 6439 }
6439 6440
6440 6441 /*
6441 6442 * The following function comes from Adaptec:
6442 6443 *
6443 6444 * When driver sees a particular event that means containers are changed, it
6444 6445 * will rescan containers. However a change may not be complete until some
6445 6446 * other event is received. For example, creating or deleting an array will
6446 6447 * incur as many as six AifEnConfigChange events which would generate six
6447 6448 * container rescans. To diminish rescans, driver set a flag to wait for
6448 6449 * another particular event. When sees that events come in, it will do rescan.
6449 6450 */
6450 6451 static int
6451 6452 aac_handle_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
6452 6453 {
6453 6454 ddi_acc_handle_t acc = softs->comm_space_acc_handle;
6454 6455 int en_type;
6455 6456 int devcfg_needed;
6456 6457 int cid;
6457 6458 uint32_t bus_id, tgt_id;
6458 6459 enum aac_cfg_event event = AAC_CFG_NULL_EXIST;
6459 6460
6460 6461 devcfg_needed = 0;
6461 6462 en_type = LE_32((uint32_t)aif->data.EN.type);
6462 6463
6463 6464 switch (LE_32((uint32_t)aif->command)) {
6464 6465 case AifCmdDriverNotify: {
6465 6466 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6466 6467
6467 6468 switch (en_type) {
6468 6469 case AifDenMorphComplete:
6469 6470 case AifDenVolumeExtendComplete:
6470 6471 if (AAC_DEV_IS_VALID(&softs->containers[cid].dev))
6471 6472 softs->devcfg_wait_on = AifEnConfigChange;
6472 6473 break;
6473 6474 }
6474 6475 if (softs->devcfg_wait_on == en_type)
6475 6476 devcfg_needed = 1;
6476 6477 break;
6477 6478 }
6478 6479
6479 6480 case AifCmdEventNotify:
6480 6481 cid = LE_32(aif->data.EN.data.ECC.container[0]);
6481 6482 switch (en_type) {
6482 6483 case AifEnAddContainer:
6483 6484 case AifEnDeleteContainer:
6484 6485 softs->devcfg_wait_on = AifEnConfigChange;
6485 6486 break;
6486 6487 case AifEnContainerChange:
6487 6488 if (!softs->devcfg_wait_on)
6488 6489 softs->devcfg_wait_on = AifEnConfigChange;
6489 6490 break;
6490 6491 case AifEnContainerEvent:
6491 6492 if (ddi_get32(acc, &aif-> \
6492 6493 data.EN.data.ECE.eventType) == CT_PUP_MISSING_DRIVE)
6493 6494 devcfg_needed = 1;
6494 6495 break;
6495 6496 case AifEnAddJBOD:
6496 6497 if (!(softs->flags & AAC_FLAGS_JBOD))
6497 6498 return (AACERR);
6498 6499 event = AAC_CFG_ADD;
6499 6500 bus_id = (cid >> 24) & 0xf;
6500 6501 tgt_id = cid & 0xffff;
6501 6502 break;
6502 6503 case AifEnDeleteJBOD:
6503 6504 if (!(softs->flags & AAC_FLAGS_JBOD))
6504 6505 return (AACERR);
6505 6506 event = AAC_CFG_DELETE;
6506 6507 bus_id = (cid >> 24) & 0xf;
6507 6508 tgt_id = cid & 0xffff;
6508 6509 break;
6509 6510 }
6510 6511 if (softs->devcfg_wait_on == en_type)
6511 6512 devcfg_needed = 1;
6512 6513 break;
6513 6514
6514 6515 case AifCmdJobProgress:
6515 6516 if (LE_32((uint32_t)aif->data.PR[0].jd.type) == AifJobCtrZero) {
6516 6517 int pr_status;
6517 6518 uint32_t pr_ftick, pr_ctick;
6518 6519
6519 6520 pr_status = LE_32((uint32_t)aif->data.PR[0].status);
6520 6521 pr_ctick = LE_32(aif->data.PR[0].currentTick);
6521 6522 pr_ftick = LE_32(aif->data.PR[0].finalTick);
6522 6523
6523 6524 if ((pr_ctick == pr_ftick) ||
6524 6525 (pr_status == AifJobStsSuccess))
6525 6526 softs->devcfg_wait_on = AifEnContainerChange;
6526 6527 else if ((pr_ctick == 0) &&
6527 6528 (pr_status == AifJobStsRunning))
6528 6529 softs->devcfg_wait_on = AifEnContainerChange;
6529 6530 }
6530 6531 break;
6531 6532 }
6532 6533
6533 6534 if (devcfg_needed) {
6534 6535 softs->devcfg_wait_on = 0;
6535 6536 (void) aac_probe_containers(softs);
6536 6537 }
6537 6538
6538 6539 if (event != AAC_CFG_NULL_EXIST) {
6539 6540 ASSERT(en_type == AifEnAddJBOD || en_type == AifEnDeleteJBOD);
6540 6541 (void) aac_probe_jbod(softs,
6541 6542 AAC_P2VTGT(softs, bus_id, tgt_id), event);
6542 6543 }
6543 6544 return (AACOK);
6544 6545 }
6545 6546
6546 6547
6547 6548 /*
6548 6549 * Check and handle AIF events
6549 6550 */
6550 6551 static void
6551 6552 aac_aif_event(struct aac_softstate *softs)
6552 6553 {
6553 6554 struct aac_fib *fibp;
6554 6555
6555 6556 /*CONSTCOND*/
6556 6557 while (1) {
6557 6558 if (aac_return_aif(softs, &softs->aifctx, &fibp) != 0)
6558 6559 break; /* No more AIFs to handle, end loop */
6559 6560
6560 6561 /* AIF overrun, array create/delete may missed. */
6561 6562 if (softs->aifctx.ctx_overrun) {
6562 6563 softs->aifctx.ctx_overrun = 0;
6563 6564 }
6564 6565
6565 6566 /* AIF received, handle it */
6566 6567 struct aac_aif_command *aifp =
6567 6568 (struct aac_aif_command *)&fibp->data[0];
6568 6569 uint32_t aif_command = LE_32((uint32_t)aifp->command);
6569 6570
6570 6571 if (aif_command == AifCmdDriverNotify ||
6571 6572 aif_command == AifCmdEventNotify ||
6572 6573 aif_command == AifCmdJobProgress)
6573 6574 (void) aac_handle_aif(softs, aifp);
6574 6575 }
6575 6576 }
6576 6577
6577 6578 /*
6578 6579 * Timeout recovery
6579 6580 */
6580 6581 /*ARGSUSED*/
6581 6582 static void
6582 6583 aac_cmd_timeout(struct aac_softstate *softs, struct aac_cmd *acp)
6583 6584 {
6584 6585 #ifdef DEBUG
6585 6586 acp->fib_flags |= AACDB_FLAGS_FIB_TIMEOUT;
6586 6587 AACDB_PRINT(softs, CE_WARN, "acp %p timed out", acp);
6587 6588 AACDB_PRINT_FIB(softs, acp->slotp);
6588 6589 #endif
6589 6590
6590 6591 /*
6591 6592 * Besides the firmware in unhealthy state, an overloaded
6592 6593 * adapter may also incur pkt timeout.
6593 6594 * There is a chance for an adapter with a slower IOP to take
6594 6595 * longer than 60 seconds to process the commands, such as when
6595 6596 * to perform IOs. So the adapter is doing a build on a RAID-5
6596 6597 * while being required longer completion times should be
6597 6598 * tolerated.
6598 6599 */
6599 6600 switch (aac_do_reset(softs)) {
6600 6601 case AAC_IOP_RESET_SUCCEED:
6601 6602 aac_abort_iocmds(softs, AAC_IOCMD_OUTSTANDING, NULL, CMD_RESET);
6602 6603 aac_start_waiting_io(softs);
6603 6604 break;
6604 6605 case AAC_IOP_RESET_FAILED:
6605 6606 /* Abort all waiting cmds when adapter is dead */
6606 6607 aac_abort_iocmds(softs, AAC_IOCMD_ALL, NULL, CMD_TIMEOUT);
6607 6608 break;
6608 6609 case AAC_IOP_RESET_ABNORMAL:
6609 6610 aac_start_waiting_io(softs);
6610 6611 }
6611 6612 }
6612 6613
6613 6614 /*
6614 6615 * The following function comes from Adaptec:
6615 6616 *
6616 6617 * Time sync. command added to synchronize time with firmware every 30
6617 6618 * minutes (required for correct AIF timestamps etc.)
6618 6619 */
6619 6620 static void
6620 6621 aac_sync_tick(struct aac_softstate *softs)
6621 6622 {
6622 6623 ddi_acc_handle_t acc;
6623 6624 int rval;
6624 6625
6625 6626 mutex_enter(&softs->time_mutex);
6626 6627 ASSERT(softs->time_sync <= softs->timebase);
6627 6628 softs->time_sync = 0;
6628 6629 mutex_exit(&softs->time_mutex);
6629 6630
6630 6631 /* Time sync. with firmware every AAC_SYNC_TICK */
6631 6632 (void) aac_sync_fib_slot_bind(softs, &softs->sync_ac);
6632 6633 acc = softs->sync_ac.slotp->fib_acc_handle;
6633 6634
6634 6635 ddi_put32(acc, (void *)&softs->sync_ac.slotp->fibp->data[0],
6635 6636 ddi_get_time());
6636 6637 rval = aac_sync_fib(softs, SendHostTime, AAC_FIB_SIZEOF(uint32_t));
6637 6638 aac_sync_fib_slot_release(softs, &softs->sync_ac);
6638 6639
6639 6640 mutex_enter(&softs->time_mutex);
6640 6641 softs->time_sync = softs->timebase;
6641 6642 if (rval != AACOK)
6642 6643 /* retry shortly */
6643 6644 softs->time_sync += aac_tick << 1;
6644 6645 else
6645 6646 softs->time_sync += AAC_SYNC_TICK;
6646 6647 mutex_exit(&softs->time_mutex);
6647 6648 }
6648 6649
6649 6650 /*
6650 6651 * Timeout checking and handling
6651 6652 */
6652 6653 static void
6653 6654 aac_daemon(struct aac_softstate *softs)
6654 6655 {
6655 6656 int time_out; /* set if timeout happened */
6656 6657 int time_adjust;
6657 6658 uint32_t softs_timebase;
6658 6659
6659 6660 mutex_enter(&softs->time_mutex);
6660 6661 ASSERT(softs->time_out <= softs->timebase);
6661 6662 softs->time_out = 0;
6662 6663 softs_timebase = softs->timebase;
6663 6664 mutex_exit(&softs->time_mutex);
6664 6665
6665 6666 /* Check slots for timeout pkts */
6666 6667 time_adjust = 0;
6667 6668 do {
6668 6669 struct aac_cmd *acp;
6669 6670
6670 6671 time_out = 0;
6671 6672 for (acp = softs->q_busy.q_head; acp; acp = acp->next) {
6672 6673 if (acp->timeout == 0)
6673 6674 continue;
6674 6675
6675 6676 /*
6676 6677 * If timeout happened, update outstanding cmds
6677 6678 * to be checked later again.
6678 6679 */
6679 6680 if (time_adjust) {
6680 6681 acp->timeout += time_adjust;
6681 6682 continue;
6682 6683 }
6683 6684
6684 6685 if (acp->timeout <= softs_timebase) {
6685 6686 aac_cmd_timeout(softs, acp);
6686 6687 time_out = 1;
6687 6688 time_adjust = aac_tick * drv_usectohz(1000000);
6688 6689 break; /* timeout happened */
6689 6690 } else {
6690 6691 break; /* no timeout */
6691 6692 }
6692 6693 }
6693 6694 } while (time_out);
6694 6695
6695 6696 mutex_enter(&softs->time_mutex);
6696 6697 softs->time_out = softs->timebase + aac_tick;
6697 6698 mutex_exit(&softs->time_mutex);
6698 6699 }
6699 6700
6700 6701 /*
6701 6702 * The event thread handles various tasks serially for the other parts of
6702 6703 * the driver, so that they can run fast.
6703 6704 */
6704 6705 static void
6705 6706 aac_event_thread(struct aac_softstate *softs)
6706 6707 {
6707 6708 int run = 1;
6708 6709
6709 6710 DBCALLED(softs, 1);
6710 6711
6711 6712 mutex_enter(&softs->ev_lock);
6712 6713 while (run) {
6713 6714 int events;
6714 6715
6715 6716 if ((events = softs->events) == 0) {
6716 6717 cv_wait(&softs->event_disp_cv, &softs->ev_lock);
6717 6718 events = softs->events;
6718 6719 }
6719 6720 softs->events = 0;
6720 6721 mutex_exit(&softs->ev_lock);
6721 6722
6722 6723 mutex_enter(&softs->io_lock);
6723 6724 if ((softs->state & AAC_STATE_RUN) &&
6724 6725 (softs->state & AAC_STATE_DEAD) == 0) {
6725 6726 if (events & AAC_EVENT_TIMEOUT)
6726 6727 aac_daemon(softs);
6727 6728 if (events & AAC_EVENT_SYNCTICK)
6728 6729 aac_sync_tick(softs);
6729 6730 if (events & AAC_EVENT_AIF)
6730 6731 aac_aif_event(softs);
6731 6732 } else {
6732 6733 run = 0;
6733 6734 }
6734 6735 mutex_exit(&softs->io_lock);
6735 6736
6736 6737 mutex_enter(&softs->ev_lock);
6737 6738 }
6738 6739
6739 6740 cv_signal(&softs->event_wait_cv);
6740 6741 mutex_exit(&softs->ev_lock);
6741 6742 }
6742 6743
6743 6744 /*
6744 6745 * Internal timer. It is only responsbile for time counting and report time
6745 6746 * related events. Events handling is done by aac_event_thread(), so that
6746 6747 * the timer itself could be as precise as possible.
6747 6748 */
6748 6749 static void
6749 6750 aac_timer(void *arg)
6750 6751 {
6751 6752 struct aac_softstate *softs = arg;
6752 6753 int events = 0;
6753 6754
6754 6755 mutex_enter(&softs->time_mutex);
6755 6756
6756 6757 /* If timer is being stopped, exit */
6757 6758 if (softs->timeout_id) {
6758 6759 softs->timeout_id = timeout(aac_timer, (void *)softs,
6759 6760 (aac_tick * drv_usectohz(1000000)));
6760 6761 } else {
6761 6762 mutex_exit(&softs->time_mutex);
6762 6763 return;
6763 6764 }
6764 6765
6765 6766 /* Time counting */
6766 6767 softs->timebase += aac_tick;
6767 6768
6768 6769 /* Check time related events */
6769 6770 if (softs->time_out && softs->time_out <= softs->timebase)
6770 6771 events |= AAC_EVENT_TIMEOUT;
6771 6772 if (softs->time_sync && softs->time_sync <= softs->timebase)
6772 6773 events |= AAC_EVENT_SYNCTICK;
6773 6774
6774 6775 mutex_exit(&softs->time_mutex);
6775 6776
6776 6777 if (events)
6777 6778 aac_event_disp(softs, events);
6778 6779 }
6779 6780
6780 6781 /*
6781 6782 * Dispatch events to daemon thread for handling
6782 6783 */
6783 6784 static void
6784 6785 aac_event_disp(struct aac_softstate *softs, int events)
6785 6786 {
6786 6787 mutex_enter(&softs->ev_lock);
6787 6788 softs->events |= events;
6788 6789 cv_broadcast(&softs->event_disp_cv);
6789 6790 mutex_exit(&softs->ev_lock);
6790 6791 }
6791 6792
6792 6793 /*
6793 6794 * Architecture dependent functions
6794 6795 */
6795 6796 static int
6796 6797 aac_rx_get_fwstatus(struct aac_softstate *softs)
6797 6798 {
6798 6799 return (PCI_MEM_GET32(softs, AAC_OMR0));
6799 6800 }
6800 6801
6801 6802 static int
6802 6803 aac_rx_get_mailbox(struct aac_softstate *softs, int mb)
6803 6804 {
6804 6805 return (PCI_MEM_GET32(softs, AAC_RX_MAILBOX + mb * 4));
6805 6806 }
6806 6807
6807 6808 static void
6808 6809 aac_rx_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6809 6810 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6810 6811 {
6811 6812 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX, cmd);
6812 6813 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 4, arg0);
6813 6814 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 8, arg1);
6814 6815 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 12, arg2);
6815 6816 PCI_MEM_PUT32(softs, AAC_RX_MAILBOX + 16, arg3);
6816 6817 }
6817 6818
6818 6819 static int
6819 6820 aac_rkt_get_fwstatus(struct aac_softstate *softs)
6820 6821 {
6821 6822 return (PCI_MEM_GET32(softs, AAC_OMR0));
6822 6823 }
6823 6824
6824 6825 static int
6825 6826 aac_rkt_get_mailbox(struct aac_softstate *softs, int mb)
6826 6827 {
6827 6828 return (PCI_MEM_GET32(softs, AAC_RKT_MAILBOX + mb *4));
6828 6829 }
6829 6830
6830 6831 static void
6831 6832 aac_rkt_set_mailbox(struct aac_softstate *softs, uint32_t cmd,
6832 6833 uint32_t arg0, uint32_t arg1, uint32_t arg2, uint32_t arg3)
6833 6834 {
6834 6835 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX, cmd);
6835 6836 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 4, arg0);
6836 6837 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 8, arg1);
6837 6838 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 12, arg2);
6838 6839 PCI_MEM_PUT32(softs, AAC_RKT_MAILBOX + 16, arg3);
6839 6840 }
6840 6841
6841 6842 /*
6842 6843 * cb_ops functions
6843 6844 */
6844 6845 static int
6845 6846 aac_open(dev_t *devp, int flag, int otyp, cred_t *cred)
6846 6847 {
6847 6848 struct aac_softstate *softs;
6848 6849 int minor0, minor;
6849 6850 int instance;
6850 6851
6851 6852 DBCALLED(NULL, 2);
6852 6853
6853 6854 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6854 6855 return (EINVAL);
6855 6856
6856 6857 minor0 = getminor(*devp);
6857 6858 minor = AAC_SCSA_MINOR(minor0);
6858 6859
6859 6860 if (AAC_IS_SCSA_NODE(minor))
6860 6861 return (scsi_hba_open(devp, flag, otyp, cred));
6861 6862
6862 6863 instance = MINOR2INST(minor0);
6863 6864 if (instance >= AAC_MAX_ADAPTERS)
6864 6865 return (ENXIO);
6865 6866
6866 6867 softs = ddi_get_soft_state(aac_softstatep, instance);
6867 6868 if (softs == NULL)
6868 6869 return (ENXIO);
6869 6870
6870 6871 return (0);
6871 6872 }
6872 6873
6873 6874 /*ARGSUSED*/
6874 6875 static int
6875 6876 aac_close(dev_t dev, int flag, int otyp, cred_t *cred)
6876 6877 {
6877 6878 int minor0, minor;
6878 6879 int instance;
6879 6880
6880 6881 DBCALLED(NULL, 2);
6881 6882
6882 6883 if (otyp != OTYP_BLK && otyp != OTYP_CHR)
6883 6884 return (EINVAL);
6884 6885
6885 6886 minor0 = getminor(dev);
6886 6887 minor = AAC_SCSA_MINOR(minor0);
6887 6888
6888 6889 if (AAC_IS_SCSA_NODE(minor))
6889 6890 return (scsi_hba_close(dev, flag, otyp, cred));
6890 6891
6891 6892 instance = MINOR2INST(minor0);
6892 6893 if (instance >= AAC_MAX_ADAPTERS)
6893 6894 return (ENXIO);
6894 6895
6895 6896 return (0);
6896 6897 }
6897 6898
6898 6899 static int
6899 6900 aac_ioctl(dev_t dev, int cmd, intptr_t arg, int flag, cred_t *cred_p,
6900 6901 int *rval_p)
6901 6902 {
6902 6903 struct aac_softstate *softs;
6903 6904 int minor0, minor;
6904 6905 int instance;
6905 6906
6906 6907 DBCALLED(NULL, 2);
6907 6908
6908 6909 if (drv_priv(cred_p) != 0)
6909 6910 return (EPERM);
6910 6911
6911 6912 minor0 = getminor(dev);
6912 6913 minor = AAC_SCSA_MINOR(minor0);
6913 6914
6914 6915 if (AAC_IS_SCSA_NODE(minor))
6915 6916 return (scsi_hba_ioctl(dev, cmd, arg, flag, cred_p, rval_p));
6916 6917
6917 6918 instance = MINOR2INST(minor0);
6918 6919 if (instance < AAC_MAX_ADAPTERS) {
6919 6920 softs = ddi_get_soft_state(aac_softstatep, instance);
6920 6921 return (aac_do_ioctl(softs, dev, cmd, arg, flag));
6921 6922 }
6922 6923 return (ENXIO);
6923 6924 }
6924 6925
6925 6926 /*
6926 6927 * The IO fault service error handling callback function
6927 6928 */
6928 6929 /*ARGSUSED*/
6929 6930 static int
6930 6931 aac_fm_error_cb(dev_info_t *dip, ddi_fm_error_t *err, const void *impl_data)
6931 6932 {
6932 6933 /*
6933 6934 * as the driver can always deal with an error in any dma or
6934 6935 * access handle, we can just return the fme_status value.
6935 6936 */
6936 6937 pci_ereport_post(dip, err, NULL);
6937 6938 return (err->fme_status);
6938 6939 }
6939 6940
6940 6941 /*
6941 6942 * aac_fm_init - initialize fma capabilities and register with IO
6942 6943 * fault services.
6943 6944 */
6944 6945 static void
6945 6946 aac_fm_init(struct aac_softstate *softs)
6946 6947 {
6947 6948 /*
6948 6949 * Need to change iblock to priority for new MSI intr
6949 6950 */
6950 6951 ddi_iblock_cookie_t fm_ibc;
6951 6952
6952 6953 softs->fm_capabilities = ddi_getprop(DDI_DEV_T_ANY, softs->devinfo_p,
6953 6954 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
6954 6955 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
6955 6956 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
6956 6957
6957 6958 /* Only register with IO Fault Services if we have some capability */
6958 6959 if (softs->fm_capabilities) {
6959 6960 /* Adjust access and dma attributes for FMA */
6960 6961 softs->reg_attr.devacc_attr_access = DDI_FLAGERR_ACC;
6961 6962 softs->addr_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6962 6963 softs->buf_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
6963 6964
6964 6965 /*
6965 6966 * Register capabilities with IO Fault Services.
6966 6967 * fm_capabilities will be updated to indicate
6967 6968 * capabilities actually supported (not requested.)
6968 6969 */
6969 6970 ddi_fm_init(softs->devinfo_p, &softs->fm_capabilities, &fm_ibc);
6970 6971
6971 6972 /*
6972 6973 * Initialize pci ereport capabilities if ereport
6973 6974 * capable (should always be.)
6974 6975 */
6975 6976 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
6976 6977 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6977 6978 pci_ereport_setup(softs->devinfo_p);
6978 6979 }
6979 6980
6980 6981 /*
6981 6982 * Register error callback if error callback capable.
6982 6983 */
6983 6984 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
6984 6985 ddi_fm_handler_register(softs->devinfo_p,
6985 6986 aac_fm_error_cb, (void *) softs);
6986 6987 }
6987 6988 }
6988 6989 }
6989 6990
6990 6991 /*
6991 6992 * aac_fm_fini - Releases fma capabilities and un-registers with IO
6992 6993 * fault services.
6993 6994 */
6994 6995 static void
6995 6996 aac_fm_fini(struct aac_softstate *softs)
6996 6997 {
6997 6998 /* Only unregister FMA capabilities if registered */
6998 6999 if (softs->fm_capabilities) {
6999 7000 /*
7000 7001 * Un-register error callback if error callback capable.
7001 7002 */
7002 7003 if (DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
7003 7004 ddi_fm_handler_unregister(softs->devinfo_p);
7004 7005 }
7005 7006
7006 7007 /*
7007 7008 * Release any resources allocated by pci_ereport_setup()
7008 7009 */
7009 7010 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities) ||
7010 7011 DDI_FM_ERRCB_CAP(softs->fm_capabilities)) {
7011 7012 pci_ereport_teardown(softs->devinfo_p);
7012 7013 }
7013 7014
7014 7015 /* Unregister from IO Fault Services */
7015 7016 ddi_fm_fini(softs->devinfo_p);
7016 7017
7017 7018 /* Adjust access and dma attributes for FMA */
7018 7019 softs->reg_attr.devacc_attr_access = DDI_DEFAULT_ACC;
7019 7020 softs->addr_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
7020 7021 softs->buf_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
7021 7022 }
7022 7023 }
7023 7024
7024 7025 int
7025 7026 aac_check_acc_handle(ddi_acc_handle_t handle)
7026 7027 {
7027 7028 ddi_fm_error_t de;
7028 7029
7029 7030 ddi_fm_acc_err_get(handle, &de, DDI_FME_VERSION);
7030 7031 return (de.fme_status);
7031 7032 }
7032 7033
7033 7034 int
7034 7035 aac_check_dma_handle(ddi_dma_handle_t handle)
7035 7036 {
7036 7037 ddi_fm_error_t de;
7037 7038
7038 7039 ddi_fm_dma_err_get(handle, &de, DDI_FME_VERSION);
7039 7040 return (de.fme_status);
7040 7041 }
7041 7042
7042 7043 void
7043 7044 aac_fm_ereport(struct aac_softstate *softs, char *detail)
7044 7045 {
7045 7046 uint64_t ena;
7046 7047 char buf[FM_MAX_CLASS];
7047 7048
7048 7049 (void) snprintf(buf, FM_MAX_CLASS, "%s.%s", DDI_FM_DEVICE, detail);
7049 7050 ena = fm_ena_generate(0, FM_ENA_FMT1);
7050 7051 if (DDI_FM_EREPORT_CAP(softs->fm_capabilities)) {
7051 7052 ddi_fm_ereport_post(softs->devinfo_p, buf, ena, DDI_NOSLEEP,
7052 7053 FM_VERSION, DATA_TYPE_UINT8, FM_EREPORT_VERSION, NULL);
7053 7054 }
7054 7055 }
7055 7056
7056 7057 /*
7057 7058 * Autoconfiguration support
7058 7059 */
7059 7060 static int
7060 7061 aac_parse_devname(char *devnm, int *tgt, int *lun)
7061 7062 {
7062 7063 char devbuf[SCSI_MAXNAMELEN];
7063 7064 char *addr;
7064 7065 char *p, *tp, *lp;
7065 7066 long num;
7066 7067
7067 7068 /* Parse dev name and address */
7068 7069 (void) strcpy(devbuf, devnm);
7069 7070 addr = "";
7070 7071 for (p = devbuf; *p != '\0'; p++) {
7071 7072 if (*p == '@') {
7072 7073 addr = p + 1;
7073 7074 *p = '\0';
7074 7075 } else if (*p == ':') {
7075 7076 *p = '\0';
7076 7077 break;
7077 7078 }
7078 7079 }
7079 7080
7080 7081 /* Parse taget and lun */
7081 7082 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
7082 7083 if (*p == ',') {
7083 7084 lp = p + 1;
7084 7085 *p = '\0';
7085 7086 break;
7086 7087 }
7087 7088 }
7088 7089 if (tgt && tp) {
7089 7090 if (ddi_strtol(tp, NULL, 0x10, &num))
7090 7091 return (AACERR);
7091 7092 *tgt = (int)num;
7092 7093 }
7093 7094 if (lun && lp) {
7094 7095 if (ddi_strtol(lp, NULL, 0x10, &num))
7095 7096 return (AACERR);
7096 7097 *lun = (int)num;
7097 7098 }
7098 7099 return (AACOK);
7099 7100 }
7100 7101
7101 7102 static dev_info_t *
7102 7103 aac_find_child(struct aac_softstate *softs, uint16_t tgt, uint8_t lun)
7103 7104 {
7104 7105 dev_info_t *child = NULL;
7105 7106 char addr[SCSI_MAXNAMELEN];
7106 7107 char tmp[MAXNAMELEN];
7107 7108
7108 7109 if (tgt < AAC_MAX_LD) {
7109 7110 if (lun == 0) {
7110 7111 struct aac_device *dvp = &softs->containers[tgt].dev;
7111 7112
7112 7113 child = dvp->dip;
7113 7114 }
7114 7115 } else {
7115 7116 (void) sprintf(addr, "%x,%x", tgt, lun);
7116 7117 for (child = ddi_get_child(softs->devinfo_p);
7117 7118 child; child = ddi_get_next_sibling(child)) {
7118 7119 /* We don't care about non-persistent node */
7119 7120 if (ndi_dev_is_persistent_node(child) == 0)
7120 7121 continue;
7121 7122
7122 7123 if (aac_name_node(child, tmp, MAXNAMELEN) !=
7123 7124 DDI_SUCCESS)
7124 7125 continue;
7125 7126 if (strcmp(addr, tmp) == 0)
7126 7127 break;
7127 7128 }
7128 7129 }
7129 7130 return (child);
7130 7131 }
7131 7132
7132 7133 static int
7133 7134 aac_config_child(struct aac_softstate *softs, struct scsi_device *sd,
7134 7135 dev_info_t **dipp)
7135 7136 {
7136 7137 char *nodename = NULL;
7137 7138 char **compatible = NULL;
7138 7139 int ncompatible = 0;
7139 7140 char *childname;
7140 7141 dev_info_t *ldip = NULL;
7141 7142 int tgt = sd->sd_address.a_target;
7142 7143 int lun = sd->sd_address.a_lun;
7143 7144 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7144 7145 int rval;
7145 7146
7146 7147 DBCALLED(softs, 2);
7147 7148
7148 7149 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
7149 7150 NULL, &nodename, &compatible, &ncompatible);
7150 7151 if (nodename == NULL) {
7151 7152 AACDB_PRINT(softs, CE_WARN,
7152 7153 "found no comptible driver for t%dL%d", tgt, lun);
7153 7154 rval = NDI_FAILURE;
7154 7155 goto finish;
7155 7156 }
7156 7157 childname = (softs->legacy && dtype == DTYPE_DIRECT) ? "sd" : nodename;
7157 7158
7158 7159 /* Create dev node */
7159 7160 rval = ndi_devi_alloc(softs->devinfo_p, childname, DEVI_SID_NODEID,
7160 7161 &ldip);
7161 7162 if (rval == NDI_SUCCESS) {
7162 7163 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt)
7163 7164 != DDI_PROP_SUCCESS) {
7164 7165 AACDB_PRINT(softs, CE_WARN, "unable to create "
7165 7166 "property for t%dL%d (target)", tgt, lun);
7166 7167 rval = NDI_FAILURE;
7167 7168 goto finish;
7168 7169 }
7169 7170 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun)
7170 7171 != DDI_PROP_SUCCESS) {
7171 7172 AACDB_PRINT(softs, CE_WARN, "unable to create "
7172 7173 "property for t%dL%d (lun)", tgt, lun);
7173 7174 rval = NDI_FAILURE;
7174 7175 goto finish;
7175 7176 }
7176 7177 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
7177 7178 "compatible", compatible, ncompatible)
7178 7179 != DDI_PROP_SUCCESS) {
7179 7180 AACDB_PRINT(softs, CE_WARN, "unable to create "
7180 7181 "property for t%dL%d (compatible)", tgt, lun);
7181 7182 rval = NDI_FAILURE;
7182 7183 goto finish;
7183 7184 }
7184 7185
7185 7186 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
7186 7187 if (rval != NDI_SUCCESS) {
7187 7188 AACDB_PRINT(softs, CE_WARN, "unable to online t%dL%d",
7188 7189 tgt, lun);
7189 7190 ndi_prop_remove_all(ldip);
7190 7191 (void) ndi_devi_free(ldip);
7191 7192 }
7192 7193 }
7193 7194 finish:
7194 7195 if (dipp)
7195 7196 *dipp = ldip;
7196 7197
7197 7198 scsi_hba_nodename_compatible_free(nodename, compatible);
7198 7199 return (rval);
7199 7200 }
7200 7201
7201 7202 /*ARGSUSED*/
7202 7203 static int
7203 7204 aac_probe_lun(struct aac_softstate *softs, struct scsi_device *sd)
7204 7205 {
7205 7206 int tgt = sd->sd_address.a_target;
7206 7207 int lun = sd->sd_address.a_lun;
7207 7208
7208 7209 DBCALLED(softs, 2);
7209 7210
7210 7211 if (tgt < AAC_MAX_LD) {
7211 7212 enum aac_cfg_event event;
7212 7213
7213 7214 if (lun == 0) {
7214 7215 mutex_enter(&softs->io_lock);
7215 7216 event = aac_probe_container(softs, tgt);
7216 7217 mutex_exit(&softs->io_lock);
7217 7218 if ((event != AAC_CFG_NULL_NOEXIST) &&
7218 7219 (event != AAC_CFG_DELETE)) {
7219 7220 if (scsi_hba_probe(sd, NULL) ==
7220 7221 SCSIPROBE_EXISTS)
7221 7222 return (NDI_SUCCESS);
7222 7223 }
7223 7224 }
7224 7225 return (NDI_FAILURE);
7225 7226 } else {
7226 7227 int dtype;
7227 7228 int qual; /* device qualifier */
7228 7229
7229 7230 if (scsi_hba_probe(sd, NULL) != SCSIPROBE_EXISTS)
7230 7231 return (NDI_FAILURE);
7231 7232
7232 7233 dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
7233 7234 qual = dtype >> 5;
7234 7235
7235 7236 AACDB_PRINT(softs, CE_NOTE,
7236 7237 "Phys. device found: tgt %d dtype %d: %s",
7237 7238 tgt, dtype, sd->sd_inq->inq_vid);
7238 7239
7239 7240 /* Only non-DASD and JBOD mode DASD are allowed exposed */
7240 7241 if (dtype == DTYPE_RODIRECT /* CDROM */ ||
7241 7242 dtype == DTYPE_SEQUENTIAL /* TAPE */ ||
7242 7243 dtype == DTYPE_ESI /* SES */) {
7243 7244 if (!(softs->flags & AAC_FLAGS_NONDASD))
7244 7245 return (NDI_FAILURE);
7245 7246 AACDB_PRINT(softs, CE_NOTE, "non-DASD %d found", tgt);
7246 7247
7247 7248 } else if (dtype == DTYPE_DIRECT) {
7248 7249 if (!(softs->flags & AAC_FLAGS_JBOD) || qual != 0)
7249 7250 return (NDI_FAILURE);
7250 7251 AACDB_PRINT(softs, CE_NOTE, "JBOD DASD %d found", tgt);
7251 7252 }
7252 7253
7253 7254 mutex_enter(&softs->io_lock);
7254 7255 softs->nondasds[AAC_PD(tgt)].dev.flags |= AAC_DFLAG_VALID;
7255 7256 mutex_exit(&softs->io_lock);
7256 7257 return (NDI_SUCCESS);
7257 7258 }
7258 7259 }
7259 7260
7260 7261 static int
7261 7262 aac_config_lun(struct aac_softstate *softs, uint16_t tgt, uint8_t lun,
7262 7263 dev_info_t **ldip)
7263 7264 {
7264 7265 struct scsi_device sd;
7265 7266 dev_info_t *child;
7266 7267 int rval;
7267 7268
7268 7269 DBCALLED(softs, 2);
7269 7270
7270 7271 if ((child = aac_find_child(softs, tgt, lun)) != NULL) {
7271 7272 if (ldip)
7272 7273 *ldip = child;
7273 7274 return (NDI_SUCCESS);
7274 7275 }
7275 7276
7276 7277 bzero(&sd, sizeof (struct scsi_device));
7277 7278 sd.sd_address.a_hba_tran = softs->hba_tran;
7278 7279 sd.sd_address.a_target = (uint16_t)tgt;
7279 7280 sd.sd_address.a_lun = (uint8_t)lun;
7280 7281 if ((rval = aac_probe_lun(softs, &sd)) == NDI_SUCCESS)
7281 7282 rval = aac_config_child(softs, &sd, ldip);
7282 7283 /* scsi_unprobe is blank now. Free buffer manually */
7283 7284 if (sd.sd_inq) {
7284 7285 kmem_free(sd.sd_inq, SUN_INQSIZE);
7285 7286 sd.sd_inq = (struct scsi_inquiry *)NULL;
7286 7287 }
7287 7288 return (rval);
7288 7289 }
7289 7290
7290 7291 static int
7291 7292 aac_config_tgt(struct aac_softstate *softs, int tgt)
7292 7293 {
7293 7294 struct scsi_address ap;
7294 7295 struct buf *bp = NULL;
7295 7296 int buf_len = AAC_SCSI_RPTLUNS_HEAD_SIZE + AAC_SCSI_RPTLUNS_ADDR_SIZE;
7296 7297 int list_len = 0;
7297 7298 int lun_total = 0;
7298 7299 dev_info_t *ldip;
7299 7300 int i;
7300 7301
7301 7302 ap.a_hba_tran = softs->hba_tran;
7302 7303 ap.a_target = (uint16_t)tgt;
7303 7304 ap.a_lun = 0;
7304 7305
7305 7306 for (i = 0; i < 2; i++) {
7306 7307 struct scsi_pkt *pkt;
7307 7308 uchar_t *cdb;
7308 7309 uchar_t *p;
7309 7310 uint32_t data;
7310 7311
7311 7312 if (bp == NULL) {
7312 7313 if ((bp = scsi_alloc_consistent_buf(&ap, NULL,
7313 7314 buf_len, B_READ, NULL_FUNC, NULL)) == NULL)
7314 7315 return (AACERR);
7315 7316 }
7316 7317 if ((pkt = scsi_init_pkt(&ap, NULL, bp, CDB_GROUP5,
7317 7318 sizeof (struct scsi_arq_status), 0, PKT_CONSISTENT,
7318 7319 NULL, NULL)) == NULL) {
7319 7320 scsi_free_consistent_buf(bp);
7320 7321 return (AACERR);
7321 7322 }
7322 7323 cdb = pkt->pkt_cdbp;
7323 7324 bzero(cdb, CDB_GROUP5);
7324 7325 cdb[0] = SCMD_REPORT_LUNS;
7325 7326
7326 7327 /* Convert buffer len from local to LE_32 */
7327 7328 data = buf_len;
7328 7329 for (p = &cdb[9]; p > &cdb[5]; p--) {
7329 7330 *p = data & 0xff;
7330 7331 data >>= 8;
7331 7332 }
7332 7333
7333 7334 if (scsi_poll(pkt) < 0 ||
7334 7335 ((struct scsi_status *)pkt->pkt_scbp)->sts_chk) {
7335 7336 scsi_destroy_pkt(pkt);
7336 7337 break;
7337 7338 }
7338 7339
7339 7340 /* Convert list_len from LE_32 to local */
7340 7341 for (p = (uchar_t *)bp->b_un.b_addr;
7341 7342 p < (uchar_t *)bp->b_un.b_addr + 4; p++) {
7342 7343 data <<= 8;
7343 7344 data |= *p;
7344 7345 }
7345 7346 list_len = data;
7346 7347 if (buf_len < list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE) {
7347 7348 scsi_free_consistent_buf(bp);
7348 7349 bp = NULL;
7349 7350 buf_len = list_len + AAC_SCSI_RPTLUNS_HEAD_SIZE;
7350 7351 }
7351 7352 scsi_destroy_pkt(pkt);
7352 7353 }
7353 7354 if (i >= 2) {
7354 7355 uint8_t *buf = (uint8_t *)(bp->b_un.b_addr +
7355 7356 AAC_SCSI_RPTLUNS_HEAD_SIZE);
7356 7357
7357 7358 for (i = 0; i < (list_len / AAC_SCSI_RPTLUNS_ADDR_SIZE); i++) {
7358 7359 uint16_t lun;
7359 7360
7360 7361 /* Determine report luns addressing type */
7361 7362 switch (buf[0] & AAC_SCSI_RPTLUNS_ADDR_MASK) {
7362 7363 /*
7363 7364 * Vendors in the field have been found to be
7364 7365 * concatenating bus/target/lun to equal the
7365 7366 * complete lun value instead of switching to
7366 7367 * flat space addressing
7367 7368 */
7368 7369 case AAC_SCSI_RPTLUNS_ADDR_PERIPHERAL:
7369 7370 case AAC_SCSI_RPTLUNS_ADDR_LOGICAL_UNIT:
7370 7371 case AAC_SCSI_RPTLUNS_ADDR_FLAT_SPACE:
7371 7372 lun = ((buf[0] & 0x3f) << 8) | buf[1];
7372 7373 if (lun > UINT8_MAX) {
7373 7374 AACDB_PRINT(softs, CE_WARN,
7374 7375 "abnormal lun number: %d", lun);
7375 7376 break;
7376 7377 }
7377 7378 if (aac_config_lun(softs, tgt, lun, &ldip) ==
7378 7379 NDI_SUCCESS)
7379 7380 lun_total++;
7380 7381 break;
7381 7382 }
7382 7383
7383 7384 buf += AAC_SCSI_RPTLUNS_ADDR_SIZE;
7384 7385 }
7385 7386 } else {
7386 7387 /* The target may do not support SCMD_REPORT_LUNS. */
7387 7388 if (aac_config_lun(softs, tgt, 0, &ldip) == NDI_SUCCESS)
7388 7389 lun_total++;
7389 7390 }
7390 7391 scsi_free_consistent_buf(bp);
7391 7392 return (lun_total);
7392 7393 }
7393 7394
7394 7395 static void
7395 7396 aac_devcfg(struct aac_softstate *softs, int tgt, int en)
7396 7397 {
7397 7398 struct aac_device *dvp;
7398 7399
7399 7400 mutex_enter(&softs->io_lock);
7400 7401 dvp = AAC_DEV(softs, tgt);
7401 7402 if (en)
7402 7403 dvp->flags |= AAC_DFLAG_CONFIGURING;
7403 7404 else
7404 7405 dvp->flags &= ~AAC_DFLAG_CONFIGURING;
7405 7406 mutex_exit(&softs->io_lock);
7406 7407 }
7407 7408
7408 7409 static int
7409 7410 aac_tran_bus_config(dev_info_t *parent, uint_t flags, ddi_bus_config_op_t op,
7410 7411 void *arg, dev_info_t **childp)
7411 7412 {
7412 7413 struct aac_softstate *softs;
7413 7414 int circ = 0;
7414 7415 int rval;
7415 7416
7416 7417 if ((softs = ddi_get_soft_state(aac_softstatep,
7417 7418 ddi_get_instance(parent))) == NULL)
7418 7419 return (NDI_FAILURE);
7419 7420
7420 7421 /* Commands for bus config should be blocked as the bus is quiesced */
7421 7422 mutex_enter(&softs->io_lock);
7422 7423 if (softs->state & AAC_STATE_QUIESCED) {
7423 7424 AACDB_PRINT(softs, CE_NOTE,
7424 7425 "bus_config abroted because bus is quiesced");
7425 7426 mutex_exit(&softs->io_lock);
7426 7427 return (NDI_FAILURE);
7427 7428 }
7428 7429 mutex_exit(&softs->io_lock);
7429 7430
7430 7431 DBCALLED(softs, 1);
7431 7432
7432 7433 /* Hold the nexus across the bus_config */
7433 7434 ndi_devi_enter(parent, &circ);
7434 7435 switch (op) {
7435 7436 case BUS_CONFIG_ONE: {
7436 7437 int tgt, lun;
7437 7438
7438 7439 if (aac_parse_devname(arg, &tgt, &lun) != AACOK) {
7439 7440 rval = NDI_FAILURE;
7440 7441 break;
7441 7442 }
7442 7443 if (tgt >= AAC_MAX_LD) {
7443 7444 if (tgt >= AAC_MAX_DEV(softs)) {
7444 7445 rval = NDI_FAILURE;
7445 7446 break;
7446 7447 }
7447 7448 }
7448 7449
7449 7450 AAC_DEVCFG_BEGIN(softs, tgt);
7450 7451 rval = aac_config_lun(softs, tgt, lun, childp);
7451 7452 AAC_DEVCFG_END(softs, tgt);
7452 7453 break;
7453 7454 }
7454 7455
7455 7456 case BUS_CONFIG_DRIVER:
7456 7457 case BUS_CONFIG_ALL: {
7457 7458 uint32_t bus, tgt;
7458 7459 int index, total;
7459 7460
7460 7461 for (tgt = 0; tgt < AAC_MAX_LD; tgt++) {
7461 7462 AAC_DEVCFG_BEGIN(softs, tgt);
7462 7463 (void) aac_config_lun(softs, tgt, 0, NULL);
7463 7464 AAC_DEVCFG_END(softs, tgt);
7464 7465 }
7465 7466
7466 7467 /* Config the non-DASD devices connected to the card */
7467 7468 total = 0;
7468 7469 index = AAC_MAX_LD;
7469 7470 for (bus = 0; bus < softs->bus_max; bus++) {
7470 7471 AACDB_PRINT(softs, CE_NOTE, "bus %d:", bus);
7471 7472 for (tgt = 0; tgt < softs->tgt_max; tgt++, index++) {
7472 7473 AAC_DEVCFG_BEGIN(softs, index);
7473 7474 if (aac_config_tgt(softs, index))
7474 7475 total++;
7475 7476 AAC_DEVCFG_END(softs, index);
7476 7477 }
7477 7478 }
7478 7479 AACDB_PRINT(softs, CE_CONT,
7479 7480 "?Total %d phys. device(s) found", total);
7480 7481 rval = NDI_SUCCESS;
7481 7482 break;
7482 7483 }
7483 7484 }
7484 7485
7485 7486 if (rval == NDI_SUCCESS)
7486 7487 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
7487 7488 ndi_devi_exit(parent, circ);
7488 7489 return (rval);
7489 7490 }
7490 7491
7491 7492 /*ARGSUSED*/
7492 7493 static int
7493 7494 aac_handle_dr(struct aac_softstate *softs, int tgt, int lun, int event)
7494 7495 {
7495 7496 struct aac_device *dvp;
7496 7497 dev_info_t *dip;
7497 7498 int valid;
7498 7499 int circ1 = 0;
7499 7500
7500 7501 DBCALLED(softs, 1);
7501 7502
7502 7503 /* Hold the nexus across the bus_config */
7503 7504 dvp = AAC_DEV(softs, tgt);
7504 7505 valid = AAC_DEV_IS_VALID(dvp);
7505 7506 dip = dvp->dip;
7506 7507 if (!(softs->state & AAC_STATE_RUN))
7507 7508 return (AACERR);
7508 7509 mutex_exit(&softs->io_lock);
7509 7510
7510 7511 switch (event) {
7511 7512 case AAC_CFG_ADD:
7512 7513 case AAC_CFG_DELETE:
7513 7514 /* Device onlined */
7514 7515 if (dip == NULL && valid) {
7515 7516 ndi_devi_enter(softs->devinfo_p, &circ1);
7516 7517 (void) aac_config_lun(softs, tgt, 0, NULL);
7517 7518 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d onlined",
7518 7519 softs->instance, tgt, lun);
7519 7520 ndi_devi_exit(softs->devinfo_p, circ1);
7520 7521 }
7521 7522 /* Device offlined */
7522 7523 if (dip && !valid) {
7523 7524 mutex_enter(&softs->io_lock);
7524 7525 (void) aac_do_reset(softs);
7525 7526 mutex_exit(&softs->io_lock);
7526 7527
7527 7528 (void) ndi_devi_offline(dip, NDI_DEVI_REMOVE);
7528 7529 AACDB_PRINT(softs, CE_NOTE, "c%dt%dL%d offlined",
7529 7530 softs->instance, tgt, lun);
7530 7531 }
7531 7532 break;
7532 7533 }
7533 7534
7534 7535 mutex_enter(&softs->io_lock);
7535 7536 return (AACOK);
7536 7537 }
7537 7538
7538 7539 #ifdef DEBUG
7539 7540
7540 7541 /* -------------------------debug aid functions-------------------------- */
7541 7542
7542 7543 #define AAC_FIB_CMD_KEY_STRINGS \
7543 7544 TestCommandResponse, "TestCommandResponse", \
7544 7545 TestAdapterCommand, "TestAdapterCommand", \
7545 7546 LastTestCommand, "LastTestCommand", \
7546 7547 ReinitHostNormCommandQueue, "ReinitHostNormCommandQueue", \
7547 7548 ReinitHostHighCommandQueue, "ReinitHostHighCommandQueue", \
7548 7549 ReinitHostHighRespQueue, "ReinitHostHighRespQueue", \
7549 7550 ReinitHostNormRespQueue, "ReinitHostNormRespQueue", \
7550 7551 ReinitAdapNormCommandQueue, "ReinitAdapNormCommandQueue", \
7551 7552 ReinitAdapHighCommandQueue, "ReinitAdapHighCommandQueue", \
7552 7553 ReinitAdapHighRespQueue, "ReinitAdapHighRespQueue", \
7553 7554 ReinitAdapNormRespQueue, "ReinitAdapNormRespQueue", \
7554 7555 InterfaceShutdown, "InterfaceShutdown", \
7555 7556 DmaCommandFib, "DmaCommandFib", \
7556 7557 StartProfile, "StartProfile", \
7557 7558 TermProfile, "TermProfile", \
7558 7559 SpeedTest, "SpeedTest", \
7559 7560 TakeABreakPt, "TakeABreakPt", \
7560 7561 RequestPerfData, "RequestPerfData", \
7561 7562 SetInterruptDefTimer, "SetInterruptDefTimer", \
7562 7563 SetInterruptDefCount, "SetInterruptDefCount", \
7563 7564 GetInterruptDefStatus, "GetInterruptDefStatus", \
7564 7565 LastCommCommand, "LastCommCommand", \
7565 7566 NuFileSystem, "NuFileSystem", \
7566 7567 UFS, "UFS", \
7567 7568 HostFileSystem, "HostFileSystem", \
7568 7569 LastFileSystemCommand, "LastFileSystemCommand", \
7569 7570 ContainerCommand, "ContainerCommand", \
7570 7571 ContainerCommand64, "ContainerCommand64", \
7571 7572 ClusterCommand, "ClusterCommand", \
7572 7573 ScsiPortCommand, "ScsiPortCommand", \
7573 7574 ScsiPortCommandU64, "ScsiPortCommandU64", \
7574 7575 AifRequest, "AifRequest", \
7575 7576 CheckRevision, "CheckRevision", \
7576 7577 FsaHostShutdown, "FsaHostShutdown", \
7577 7578 RequestAdapterInfo, "RequestAdapterInfo", \
7578 7579 IsAdapterPaused, "IsAdapterPaused", \
7579 7580 SendHostTime, "SendHostTime", \
7580 7581 LastMiscCommand, "LastMiscCommand"
7581 7582
7582 7583 #define AAC_CTVM_SUBCMD_KEY_STRINGS \
7583 7584 VM_Null, "VM_Null", \
7584 7585 VM_NameServe, "VM_NameServe", \
7585 7586 VM_ContainerConfig, "VM_ContainerConfig", \
7586 7587 VM_Ioctl, "VM_Ioctl", \
7587 7588 VM_FilesystemIoctl, "VM_FilesystemIoctl", \
7588 7589 VM_CloseAll, "VM_CloseAll", \
7589 7590 VM_CtBlockRead, "VM_CtBlockRead", \
7590 7591 VM_CtBlockWrite, "VM_CtBlockWrite", \
7591 7592 VM_SliceBlockRead, "VM_SliceBlockRead", \
7592 7593 VM_SliceBlockWrite, "VM_SliceBlockWrite", \
7593 7594 VM_DriveBlockRead, "VM_DriveBlockRead", \
7594 7595 VM_DriveBlockWrite, "VM_DriveBlockWrite", \
7595 7596 VM_EnclosureMgt, "VM_EnclosureMgt", \
7596 7597 VM_Unused, "VM_Unused", \
7597 7598 VM_CtBlockVerify, "VM_CtBlockVerify", \
7598 7599 VM_CtPerf, "VM_CtPerf", \
7599 7600 VM_CtBlockRead64, "VM_CtBlockRead64", \
7600 7601 VM_CtBlockWrite64, "VM_CtBlockWrite64", \
7601 7602 VM_CtBlockVerify64, "VM_CtBlockVerify64", \
7602 7603 VM_CtHostRead64, "VM_CtHostRead64", \
7603 7604 VM_CtHostWrite64, "VM_CtHostWrite64", \
7604 7605 VM_NameServe64, "VM_NameServe64"
7605 7606
7606 7607 #define AAC_CT_SUBCMD_KEY_STRINGS \
7607 7608 CT_Null, "CT_Null", \
7608 7609 CT_GET_SLICE_COUNT, "CT_GET_SLICE_COUNT", \
7609 7610 CT_GET_PARTITION_COUNT, "CT_GET_PARTITION_COUNT", \
7610 7611 CT_GET_PARTITION_INFO, "CT_GET_PARTITION_INFO", \
7611 7612 CT_GET_CONTAINER_COUNT, "CT_GET_CONTAINER_COUNT", \
7612 7613 CT_GET_CONTAINER_INFO_OLD, "CT_GET_CONTAINER_INFO_OLD", \
7613 7614 CT_WRITE_MBR, "CT_WRITE_MBR", \
7614 7615 CT_WRITE_PARTITION, "CT_WRITE_PARTITION", \
7615 7616 CT_UPDATE_PARTITION, "CT_UPDATE_PARTITION", \
7616 7617 CT_UNLOAD_CONTAINER, "CT_UNLOAD_CONTAINER", \
7617 7618 CT_CONFIG_SINGLE_PRIMARY, "CT_CONFIG_SINGLE_PRIMARY", \
7618 7619 CT_READ_CONFIG_AGE, "CT_READ_CONFIG_AGE", \
7619 7620 CT_WRITE_CONFIG_AGE, "CT_WRITE_CONFIG_AGE", \
7620 7621 CT_READ_SERIAL_NUMBER, "CT_READ_SERIAL_NUMBER", \
7621 7622 CT_ZERO_PAR_ENTRY, "CT_ZERO_PAR_ENTRY", \
7622 7623 CT_READ_MBR, "CT_READ_MBR", \
7623 7624 CT_READ_PARTITION, "CT_READ_PARTITION", \
7624 7625 CT_DESTROY_CONTAINER, "CT_DESTROY_CONTAINER", \
7625 7626 CT_DESTROY2_CONTAINER, "CT_DESTROY2_CONTAINER", \
7626 7627 CT_SLICE_SIZE, "CT_SLICE_SIZE", \
7627 7628 CT_CHECK_CONFLICTS, "CT_CHECK_CONFLICTS", \
7628 7629 CT_MOVE_CONTAINER, "CT_MOVE_CONTAINER", \
7629 7630 CT_READ_LAST_DRIVE, "CT_READ_LAST_DRIVE", \
7630 7631 CT_WRITE_LAST_DRIVE, "CT_WRITE_LAST_DRIVE", \
7631 7632 CT_UNMIRROR, "CT_UNMIRROR", \
7632 7633 CT_MIRROR_DELAY, "CT_MIRROR_DELAY", \
7633 7634 CT_GEN_MIRROR, "CT_GEN_MIRROR", \
7634 7635 CT_GEN_MIRROR2, "CT_GEN_MIRROR2", \
7635 7636 CT_TEST_CONTAINER, "CT_TEST_CONTAINER", \
7636 7637 CT_MOVE2, "CT_MOVE2", \
7637 7638 CT_SPLIT, "CT_SPLIT", \
7638 7639 CT_SPLIT2, "CT_SPLIT2", \
7639 7640 CT_SPLIT_BROKEN, "CT_SPLIT_BROKEN", \
7640 7641 CT_SPLIT_BROKEN2, "CT_SPLIT_BROKEN2", \
7641 7642 CT_RECONFIG, "CT_RECONFIG", \
7642 7643 CT_BREAK2, "CT_BREAK2", \
7643 7644 CT_BREAK, "CT_BREAK", \
7644 7645 CT_MERGE2, "CT_MERGE2", \
7645 7646 CT_MERGE, "CT_MERGE", \
7646 7647 CT_FORCE_ERROR, "CT_FORCE_ERROR", \
7647 7648 CT_CLEAR_ERROR, "CT_CLEAR_ERROR", \
7648 7649 CT_ASSIGN_FAILOVER, "CT_ASSIGN_FAILOVER", \
7649 7650 CT_CLEAR_FAILOVER, "CT_CLEAR_FAILOVER", \
7650 7651 CT_GET_FAILOVER_DATA, "CT_GET_FAILOVER_DATA", \
7651 7652 CT_VOLUME_ADD, "CT_VOLUME_ADD", \
7652 7653 CT_VOLUME_ADD2, "CT_VOLUME_ADD2", \
7653 7654 CT_MIRROR_STATUS, "CT_MIRROR_STATUS", \
7654 7655 CT_COPY_STATUS, "CT_COPY_STATUS", \
7655 7656 CT_COPY, "CT_COPY", \
7656 7657 CT_UNLOCK_CONTAINER, "CT_UNLOCK_CONTAINER", \
7657 7658 CT_LOCK_CONTAINER, "CT_LOCK_CONTAINER", \
7658 7659 CT_MAKE_READ_ONLY, "CT_MAKE_READ_ONLY", \
7659 7660 CT_MAKE_READ_WRITE, "CT_MAKE_READ_WRITE", \
7660 7661 CT_CLEAN_DEAD, "CT_CLEAN_DEAD", \
7661 7662 CT_ABORT_MIRROR_COMMAND, "CT_ABORT_MIRROR_COMMAND", \
7662 7663 CT_SET, "CT_SET", \
7663 7664 CT_GET, "CT_GET", \
7664 7665 CT_GET_NVLOG_ENTRY, "CT_GET_NVLOG_ENTRY", \
7665 7666 CT_GET_DELAY, "CT_GET_DELAY", \
7666 7667 CT_ZERO_CONTAINER_SPACE, "CT_ZERO_CONTAINER_SPACE", \
7667 7668 CT_GET_ZERO_STATUS, "CT_GET_ZERO_STATUS", \
7668 7669 CT_SCRUB, "CT_SCRUB", \
7669 7670 CT_GET_SCRUB_STATUS, "CT_GET_SCRUB_STATUS", \
7670 7671 CT_GET_SLICE_INFO, "CT_GET_SLICE_INFO", \
7671 7672 CT_GET_SCSI_METHOD, "CT_GET_SCSI_METHOD", \
7672 7673 CT_PAUSE_IO, "CT_PAUSE_IO", \
7673 7674 CT_RELEASE_IO, "CT_RELEASE_IO", \
7674 7675 CT_SCRUB2, "CT_SCRUB2", \
7675 7676 CT_MCHECK, "CT_MCHECK", \
7676 7677 CT_CORRUPT, "CT_CORRUPT", \
7677 7678 CT_GET_TASK_COUNT, "CT_GET_TASK_COUNT", \
7678 7679 CT_PROMOTE, "CT_PROMOTE", \
7679 7680 CT_SET_DEAD, "CT_SET_DEAD", \
7680 7681 CT_CONTAINER_OPTIONS, "CT_CONTAINER_OPTIONS", \
7681 7682 CT_GET_NV_PARAM, "CT_GET_NV_PARAM", \
7682 7683 CT_GET_PARAM, "CT_GET_PARAM", \
7683 7684 CT_NV_PARAM_SIZE, "CT_NV_PARAM_SIZE", \
7684 7685 CT_COMMON_PARAM_SIZE, "CT_COMMON_PARAM_SIZE", \
7685 7686 CT_PLATFORM_PARAM_SIZE, "CT_PLATFORM_PARAM_SIZE", \
7686 7687 CT_SET_NV_PARAM, "CT_SET_NV_PARAM", \
7687 7688 CT_ABORT_SCRUB, "CT_ABORT_SCRUB", \
7688 7689 CT_GET_SCRUB_ERROR, "CT_GET_SCRUB_ERROR", \
7689 7690 CT_LABEL_CONTAINER, "CT_LABEL_CONTAINER", \
7690 7691 CT_CONTINUE_DATA, "CT_CONTINUE_DATA", \
7691 7692 CT_STOP_DATA, "CT_STOP_DATA", \
7692 7693 CT_GET_PARTITION_TABLE, "CT_GET_PARTITION_TABLE", \
7693 7694 CT_GET_DISK_PARTITIONS, "CT_GET_DISK_PARTITIONS", \
7694 7695 CT_GET_MISC_STATUS, "CT_GET_MISC_STATUS", \
7695 7696 CT_GET_CONTAINER_PERF_INFO, "CT_GET_CONTAINER_PERF_INFO", \
7696 7697 CT_GET_TIME, "CT_GET_TIME", \
7697 7698 CT_READ_DATA, "CT_READ_DATA", \
7698 7699 CT_CTR, "CT_CTR", \
7699 7700 CT_CTL, "CT_CTL", \
7700 7701 CT_DRAINIO, "CT_DRAINIO", \
7701 7702 CT_RELEASEIO, "CT_RELEASEIO", \
7702 7703 CT_GET_NVRAM, "CT_GET_NVRAM", \
7703 7704 CT_GET_MEMORY, "CT_GET_MEMORY", \
7704 7705 CT_PRINT_CT_LOG, "CT_PRINT_CT_LOG", \
7705 7706 CT_ADD_LEVEL, "CT_ADD_LEVEL", \
7706 7707 CT_NV_ZERO, "CT_NV_ZERO", \
7707 7708 CT_READ_SIGNATURE, "CT_READ_SIGNATURE", \
7708 7709 CT_THROTTLE_ON, "CT_THROTTLE_ON", \
7709 7710 CT_THROTTLE_OFF, "CT_THROTTLE_OFF", \
7710 7711 CT_GET_THROTTLE_STATS, "CT_GET_THROTTLE_STATS", \
7711 7712 CT_MAKE_SNAPSHOT, "CT_MAKE_SNAPSHOT", \
7712 7713 CT_REMOVE_SNAPSHOT, "CT_REMOVE_SNAPSHOT", \
7713 7714 CT_WRITE_USER_FLAGS, "CT_WRITE_USER_FLAGS", \
7714 7715 CT_READ_USER_FLAGS, "CT_READ_USER_FLAGS", \
7715 7716 CT_MONITOR, "CT_MONITOR", \
7716 7717 CT_GEN_MORPH, "CT_GEN_MORPH", \
7717 7718 CT_GET_SNAPSHOT_INFO, "CT_GET_SNAPSHOT_INFO", \
7718 7719 CT_CACHE_SET, "CT_CACHE_SET", \
7719 7720 CT_CACHE_STAT, "CT_CACHE_STAT", \
7720 7721 CT_TRACE_START, "CT_TRACE_START", \
7721 7722 CT_TRACE_STOP, "CT_TRACE_STOP", \
7722 7723 CT_TRACE_ENABLE, "CT_TRACE_ENABLE", \
7723 7724 CT_TRACE_DISABLE, "CT_TRACE_DISABLE", \
7724 7725 CT_FORCE_CORE_DUMP, "CT_FORCE_CORE_DUMP", \
7725 7726 CT_SET_SERIAL_NUMBER, "CT_SET_SERIAL_NUMBER", \
7726 7727 CT_RESET_SERIAL_NUMBER, "CT_RESET_SERIAL_NUMBER", \
7727 7728 CT_ENABLE_RAID5, "CT_ENABLE_RAID5", \
7728 7729 CT_CLEAR_VALID_DUMP_FLAG, "CT_CLEAR_VALID_DUMP_FLAG", \
7729 7730 CT_GET_MEM_STATS, "CT_GET_MEM_STATS", \
7730 7731 CT_GET_CORE_SIZE, "CT_GET_CORE_SIZE", \
7731 7732 CT_CREATE_CONTAINER_OLD, "CT_CREATE_CONTAINER_OLD", \
7732 7733 CT_STOP_DUMPS, "CT_STOP_DUMPS", \
7733 7734 CT_PANIC_ON_TAKE_A_BREAK, "CT_PANIC_ON_TAKE_A_BREAK", \
7734 7735 CT_GET_CACHE_STATS, "CT_GET_CACHE_STATS", \
7735 7736 CT_MOVE_PARTITION, "CT_MOVE_PARTITION", \
7736 7737 CT_FLUSH_CACHE, "CT_FLUSH_CACHE", \
7737 7738 CT_READ_NAME, "CT_READ_NAME", \
7738 7739 CT_WRITE_NAME, "CT_WRITE_NAME", \
7739 7740 CT_TOSS_CACHE, "CT_TOSS_CACHE", \
7740 7741 CT_LOCK_DRAINIO, "CT_LOCK_DRAINIO", \
7741 7742 CT_CONTAINER_OFFLINE, "CT_CONTAINER_OFFLINE", \
7742 7743 CT_SET_CACHE_SIZE, "CT_SET_CACHE_SIZE", \
7743 7744 CT_CLEAN_SHUTDOWN_STATUS, "CT_CLEAN_SHUTDOWN_STATUS", \
7744 7745 CT_CLEAR_DISKLOG_ON_DISK, "CT_CLEAR_DISKLOG_ON_DISK", \
7745 7746 CT_CLEAR_ALL_DISKLOG, "CT_CLEAR_ALL_DISKLOG", \
7746 7747 CT_CACHE_FAVOR, "CT_CACHE_FAVOR", \
7747 7748 CT_READ_PASSTHRU_MBR, "CT_READ_PASSTHRU_MBR", \
7748 7749 CT_SCRUB_NOFIX, "CT_SCRUB_NOFIX", \
7749 7750 CT_SCRUB2_NOFIX, "CT_SCRUB2_NOFIX", \
7750 7751 CT_FLUSH, "CT_FLUSH", \
7751 7752 CT_REBUILD, "CT_REBUILD", \
7752 7753 CT_FLUSH_CONTAINER, "CT_FLUSH_CONTAINER", \
7753 7754 CT_RESTART, "CT_RESTART", \
7754 7755 CT_GET_CONFIG_STATUS, "CT_GET_CONFIG_STATUS", \
7755 7756 CT_TRACE_FLAG, "CT_TRACE_FLAG", \
7756 7757 CT_RESTART_MORPH, "CT_RESTART_MORPH", \
7757 7758 CT_GET_TRACE_INFO, "CT_GET_TRACE_INFO", \
7758 7759 CT_GET_TRACE_ITEM, "CT_GET_TRACE_ITEM", \
7759 7760 CT_COMMIT_CONFIG, "CT_COMMIT_CONFIG", \
7760 7761 CT_CONTAINER_EXISTS, "CT_CONTAINER_EXISTS", \
7761 7762 CT_GET_SLICE_FROM_DEVT, "CT_GET_SLICE_FROM_DEVT", \
7762 7763 CT_OPEN_READ_WRITE, "CT_OPEN_READ_WRITE", \
7763 7764 CT_WRITE_MEMORY_BLOCK, "CT_WRITE_MEMORY_BLOCK", \
7764 7765 CT_GET_CACHE_PARAMS, "CT_GET_CACHE_PARAMS", \
7765 7766 CT_CRAZY_CACHE, "CT_CRAZY_CACHE", \
7766 7767 CT_GET_PROFILE_STRUCT, "CT_GET_PROFILE_STRUCT", \
7767 7768 CT_SET_IO_TRACE_FLAG, "CT_SET_IO_TRACE_FLAG", \
7768 7769 CT_GET_IO_TRACE_STRUCT, "CT_GET_IO_TRACE_STRUCT", \
7769 7770 CT_CID_TO_64BITS_UID, "CT_CID_TO_64BITS_UID", \
7770 7771 CT_64BITS_UID_TO_CID, "CT_64BITS_UID_TO_CID", \
7771 7772 CT_PAR_TO_64BITS_UID, "CT_PAR_TO_64BITS_UID", \
7772 7773 CT_CID_TO_32BITS_UID, "CT_CID_TO_32BITS_UID", \
7773 7774 CT_32BITS_UID_TO_CID, "CT_32BITS_UID_TO_CID", \
7774 7775 CT_PAR_TO_32BITS_UID, "CT_PAR_TO_32BITS_UID", \
7775 7776 CT_SET_FAILOVER_OPTION, "CT_SET_FAILOVER_OPTION", \
7776 7777 CT_GET_FAILOVER_OPTION, "CT_GET_FAILOVER_OPTION", \
7777 7778 CT_STRIPE_ADD2, "CT_STRIPE_ADD2", \
7778 7779 CT_CREATE_VOLUME_SET, "CT_CREATE_VOLUME_SET", \
7779 7780 CT_CREATE_STRIPE_SET, "CT_CREATE_STRIPE_SET", \
7780 7781 CT_VERIFY_CONTAINER, "CT_VERIFY_CONTAINER", \
7781 7782 CT_IS_CONTAINER_DEAD, "CT_IS_CONTAINER_DEAD", \
7782 7783 CT_GET_CONTAINER_OPTION, "CT_GET_CONTAINER_OPTION", \
7783 7784 CT_GET_SNAPSHOT_UNUSED_STRUCT, "CT_GET_SNAPSHOT_UNUSED_STRUCT", \
7784 7785 CT_CLEAR_SNAPSHOT_UNUSED_STRUCT, "CT_CLEAR_SNAPSHOT_UNUSED_STRUCT", \
7785 7786 CT_GET_CONTAINER_INFO, "CT_GET_CONTAINER_INFO", \
7786 7787 CT_CREATE_CONTAINER, "CT_CREATE_CONTAINER", \
7787 7788 CT_CHANGE_CREATIONINFO, "CT_CHANGE_CREATIONINFO", \
7788 7789 CT_CHECK_CONFLICT_UID, "CT_CHECK_CONFLICT_UID", \
7789 7790 CT_CONTAINER_UID_CHECK, "CT_CONTAINER_UID_CHECK", \
7790 7791 CT_IS_CONTAINER_MEATADATA_STANDARD, \
7791 7792 "CT_IS_CONTAINER_MEATADATA_STANDARD", \
7792 7793 CT_IS_SLICE_METADATA_STANDARD, "CT_IS_SLICE_METADATA_STANDARD", \
7793 7794 CT_GET_IMPORT_COUNT, "CT_GET_IMPORT_COUNT", \
7794 7795 CT_CANCEL_ALL_IMPORTS, "CT_CANCEL_ALL_IMPORTS", \
7795 7796 CT_GET_IMPORT_INFO, "CT_GET_IMPORT_INFO", \
7796 7797 CT_IMPORT_ARRAY, "CT_IMPORT_ARRAY", \
7797 7798 CT_GET_LOG_SIZE, "CT_GET_LOG_SIZE", \
7798 7799 CT_ALARM_GET_STATE, "CT_ALARM_GET_STATE", \
7799 7800 CT_ALARM_SET_STATE, "CT_ALARM_SET_STATE", \
7800 7801 CT_ALARM_ON_OFF, "CT_ALARM_ON_OFF", \
7801 7802 CT_GET_EE_OEM_ID, "CT_GET_EE_OEM_ID", \
7802 7803 CT_GET_PPI_HEADERS, "CT_GET_PPI_HEADERS", \
7803 7804 CT_GET_PPI_DATA, "CT_GET_PPI_DATA", \
7804 7805 CT_GET_PPI_ENTRIES, "CT_GET_PPI_ENTRIES", \
7805 7806 CT_DELETE_PPI_BUNDLE, "CT_DELETE_PPI_BUNDLE", \
7806 7807 CT_GET_PARTITION_TABLE_2, "CT_GET_PARTITION_TABLE_2", \
7807 7808 CT_GET_PARTITION_INFO_2, "CT_GET_PARTITION_INFO_2", \
7808 7809 CT_GET_DISK_PARTITIONS_2, "CT_GET_DISK_PARTITIONS_2", \
7809 7810 CT_QUIESCE_ADAPTER, "CT_QUIESCE_ADAPTER", \
7810 7811 CT_CLEAR_PPI_TABLE, "CT_CLEAR_PPI_TABLE"
7811 7812
7812 7813 #define AAC_CL_SUBCMD_KEY_STRINGS \
7813 7814 CL_NULL, "CL_NULL", \
7814 7815 DS_INIT, "DS_INIT", \
7815 7816 DS_RESCAN, "DS_RESCAN", \
7816 7817 DS_CREATE, "DS_CREATE", \
7817 7818 DS_DELETE, "DS_DELETE", \
7818 7819 DS_ADD_DISK, "DS_ADD_DISK", \
7819 7820 DS_REMOVE_DISK, "DS_REMOVE_DISK", \
7820 7821 DS_MOVE_DISK, "DS_MOVE_DISK", \
7821 7822 DS_TAKE_OWNERSHIP, "DS_TAKE_OWNERSHIP", \
7822 7823 DS_RELEASE_OWNERSHIP, "DS_RELEASE_OWNERSHIP", \
7823 7824 DS_FORCE_OWNERSHIP, "DS_FORCE_OWNERSHIP", \
7824 7825 DS_GET_DISK_SET_PARAM, "DS_GET_DISK_SET_PARAM", \
7825 7826 DS_GET_DRIVE_PARAM, "DS_GET_DRIVE_PARAM", \
7826 7827 DS_GET_SLICE_PARAM, "DS_GET_SLICE_PARAM", \
7827 7828 DS_GET_DISK_SETS, "DS_GET_DISK_SETS", \
7828 7829 DS_GET_DRIVES, "DS_GET_DRIVES", \
7829 7830 DS_SET_DISK_SET_PARAM, "DS_SET_DISK_SET_PARAM", \
7830 7831 DS_ONLINE, "DS_ONLINE", \
7831 7832 DS_OFFLINE, "DS_OFFLINE", \
7832 7833 DS_ONLINE_CONTAINERS, "DS_ONLINE_CONTAINERS", \
7833 7834 DS_FSAPRINT, "DS_FSAPRINT", \
7834 7835 CL_CFG_SET_HOST_IDS, "CL_CFG_SET_HOST_IDS", \
7835 7836 CL_CFG_SET_PARTNER_HOST_IDS, "CL_CFG_SET_PARTNER_HOST_IDS", \
7836 7837 CL_CFG_GET_CLUSTER_CONFIG, "CL_CFG_GET_CLUSTER_CONFIG", \
7837 7838 CC_CLI_CLEAR_MESSAGE_BUFFER, "CC_CLI_CLEAR_MESSAGE_BUFFER", \
7838 7839 CC_SRV_CLEAR_MESSAGE_BUFFER, "CC_SRV_CLEAR_MESSAGE_BUFFER", \
7839 7840 CC_CLI_SHOW_MESSAGE_BUFFER, "CC_CLI_SHOW_MESSAGE_BUFFER", \
7840 7841 CC_SRV_SHOW_MESSAGE_BUFFER, "CC_SRV_SHOW_MESSAGE_BUFFER", \
7841 7842 CC_CLI_SEND_MESSAGE, "CC_CLI_SEND_MESSAGE", \
7842 7843 CC_SRV_SEND_MESSAGE, "CC_SRV_SEND_MESSAGE", \
7843 7844 CC_CLI_GET_MESSAGE, "CC_CLI_GET_MESSAGE", \
7844 7845 CC_SRV_GET_MESSAGE, "CC_SRV_GET_MESSAGE", \
7845 7846 CC_SEND_TEST_MESSAGE, "CC_SEND_TEST_MESSAGE", \
7846 7847 CC_GET_BUSINFO, "CC_GET_BUSINFO", \
7847 7848 CC_GET_PORTINFO, "CC_GET_PORTINFO", \
7848 7849 CC_GET_NAMEINFO, "CC_GET_NAMEINFO", \
7849 7850 CC_GET_CONFIGINFO, "CC_GET_CONFIGINFO", \
7850 7851 CQ_QUORUM_OP, "CQ_QUORUM_OP"
7851 7852
7852 7853 #define AAC_AIF_SUBCMD_KEY_STRINGS \
7853 7854 AifCmdEventNotify, "AifCmdEventNotify", \
7854 7855 AifCmdJobProgress, "AifCmdJobProgress", \
7855 7856 AifCmdAPIReport, "AifCmdAPIReport", \
7856 7857 AifCmdDriverNotify, "AifCmdDriverNotify", \
7857 7858 AifReqJobList, "AifReqJobList", \
7858 7859 AifReqJobsForCtr, "AifReqJobsForCtr", \
7859 7860 AifReqJobsForScsi, "AifReqJobsForScsi", \
7860 7861 AifReqJobReport, "AifReqJobReport", \
7861 7862 AifReqTerminateJob, "AifReqTerminateJob", \
7862 7863 AifReqSuspendJob, "AifReqSuspendJob", \
7863 7864 AifReqResumeJob, "AifReqResumeJob", \
7864 7865 AifReqSendAPIReport, "AifReqSendAPIReport", \
7865 7866 AifReqAPIJobStart, "AifReqAPIJobStart", \
7866 7867 AifReqAPIJobUpdate, "AifReqAPIJobUpdate", \
7867 7868 AifReqAPIJobFinish, "AifReqAPIJobFinish"
7868 7869
7869 7870 #define AAC_IOCTL_SUBCMD_KEY_STRINGS \
7870 7871 Reserved_IOCTL, "Reserved_IOCTL", \
7871 7872 GetDeviceHandle, "GetDeviceHandle", \
7872 7873 BusTargetLun_to_DeviceHandle, "BusTargetLun_to_DeviceHandle", \
7873 7874 DeviceHandle_to_BusTargetLun, "DeviceHandle_to_BusTargetLun", \
7874 7875 RescanBus, "RescanBus", \
7875 7876 GetDeviceProbeInfo, "GetDeviceProbeInfo", \
7876 7877 GetDeviceCapacity, "GetDeviceCapacity", \
7877 7878 GetContainerProbeInfo, "GetContainerProbeInfo", \
7878 7879 GetRequestedMemorySize, "GetRequestedMemorySize", \
7879 7880 GetBusInfo, "GetBusInfo", \
7880 7881 GetVendorSpecific, "GetVendorSpecific", \
7881 7882 EnhancedGetDeviceProbeInfo, "EnhancedGetDeviceProbeInfo", \
7882 7883 EnhancedGetBusInfo, "EnhancedGetBusInfo", \
7883 7884 SetupExtendedCounters, "SetupExtendedCounters", \
7884 7885 GetPerformanceCounters, "GetPerformanceCounters", \
7885 7886 ResetPerformanceCounters, "ResetPerformanceCounters", \
7886 7887 ReadModePage, "ReadModePage", \
7887 7888 WriteModePage, "WriteModePage", \
7888 7889 ReadDriveParameter, "ReadDriveParameter", \
7889 7890 WriteDriveParameter, "WriteDriveParameter", \
7890 7891 ResetAdapter, "ResetAdapter", \
7891 7892 ResetBus, "ResetBus", \
7892 7893 ResetBusDevice, "ResetBusDevice", \
7893 7894 ExecuteSrb, "ExecuteSrb", \
7894 7895 Create_IO_Task, "Create_IO_Task", \
7895 7896 Delete_IO_Task, "Delete_IO_Task", \
7896 7897 Get_IO_Task_Info, "Get_IO_Task_Info", \
7897 7898 Check_Task_Progress, "Check_Task_Progress", \
7898 7899 InjectError, "InjectError", \
7899 7900 GetDeviceDefectCounts, "GetDeviceDefectCounts", \
7900 7901 GetDeviceDefectInfo, "GetDeviceDefectInfo", \
7901 7902 GetDeviceStatus, "GetDeviceStatus", \
7902 7903 ClearDeviceStatus, "ClearDeviceStatus", \
7903 7904 DiskSpinControl, "DiskSpinControl", \
7904 7905 DiskSmartControl, "DiskSmartControl", \
7905 7906 WriteSame, "WriteSame", \
7906 7907 ReadWriteLong, "ReadWriteLong", \
7907 7908 FormatUnit, "FormatUnit", \
7908 7909 TargetDeviceControl, "TargetDeviceControl", \
7909 7910 TargetChannelControl, "TargetChannelControl", \
7910 7911 FlashNewCode, "FlashNewCode", \
7911 7912 DiskCheck, "DiskCheck", \
7912 7913 RequestSense, "RequestSense", \
7913 7914 DiskPERControl, "DiskPERControl", \
7914 7915 Read10, "Read10", \
7915 7916 Write10, "Write10"
7916 7917
7917 7918 #define AAC_AIFEN_KEY_STRINGS \
7918 7919 AifEnGeneric, "Generic", \
7919 7920 AifEnTaskComplete, "TaskComplete", \
7920 7921 AifEnConfigChange, "Config change", \
7921 7922 AifEnContainerChange, "Container change", \
7922 7923 AifEnDeviceFailure, "device failed", \
7923 7924 AifEnMirrorFailover, "Mirror failover", \
7924 7925 AifEnContainerEvent, "container event", \
7925 7926 AifEnFileSystemChange, "File system changed", \
7926 7927 AifEnConfigPause, "Container pause event", \
7927 7928 AifEnConfigResume, "Container resume event", \
7928 7929 AifEnFailoverChange, "Failover space assignment changed", \
7929 7930 AifEnRAID5RebuildDone, "RAID5 rebuild finished", \
7930 7931 AifEnEnclosureManagement, "Enclosure management event", \
7931 7932 AifEnBatteryEvent, "battery event", \
7932 7933 AifEnAddContainer, "Add container", \
7933 7934 AifEnDeleteContainer, "Delete container", \
7934 7935 AifEnSMARTEvent, "SMART Event", \
7935 7936 AifEnBatteryNeedsRecond, "battery needs reconditioning", \
7936 7937 AifEnClusterEvent, "cluster event", \
7937 7938 AifEnDiskSetEvent, "disk set event occured", \
7938 7939 AifDenMorphComplete, "morph operation completed", \
7939 7940 AifDenVolumeExtendComplete, "VolumeExtendComplete"
7940 7941
7941 7942 struct aac_key_strings {
7942 7943 int key;
7943 7944 char *message;
7944 7945 };
7945 7946
7946 7947 extern struct scsi_key_strings scsi_cmds[];
7947 7948
7948 7949 static struct aac_key_strings aac_fib_cmds[] = {
7949 7950 AAC_FIB_CMD_KEY_STRINGS,
7950 7951 -1, NULL
7951 7952 };
7952 7953
7953 7954 static struct aac_key_strings aac_ctvm_subcmds[] = {
7954 7955 AAC_CTVM_SUBCMD_KEY_STRINGS,
7955 7956 -1, NULL
7956 7957 };
7957 7958
7958 7959 static struct aac_key_strings aac_ct_subcmds[] = {
7959 7960 AAC_CT_SUBCMD_KEY_STRINGS,
7960 7961 -1, NULL
7961 7962 };
7962 7963
7963 7964 static struct aac_key_strings aac_cl_subcmds[] = {
7964 7965 AAC_CL_SUBCMD_KEY_STRINGS,
7965 7966 -1, NULL
7966 7967 };
7967 7968
7968 7969 static struct aac_key_strings aac_aif_subcmds[] = {
7969 7970 AAC_AIF_SUBCMD_KEY_STRINGS,
7970 7971 -1, NULL
7971 7972 };
7972 7973
7973 7974 static struct aac_key_strings aac_ioctl_subcmds[] = {
7974 7975 AAC_IOCTL_SUBCMD_KEY_STRINGS,
7975 7976 -1, NULL
7976 7977 };
7977 7978
7978 7979 static struct aac_key_strings aac_aifens[] = {
7979 7980 AAC_AIFEN_KEY_STRINGS,
7980 7981 -1, NULL
7981 7982 };
7982 7983
7983 7984 /*
7984 7985 * The following function comes from Adaptec:
7985 7986 *
7986 7987 * Get the firmware print buffer parameters from the firmware,
7987 7988 * if the command was successful map in the address.
7988 7989 */
7989 7990 static int
7990 7991 aac_get_fw_debug_buffer(struct aac_softstate *softs)
7991 7992 {
7992 7993 if (aac_sync_mbcommand(softs, AAC_MONKER_GETDRVPROP,
7993 7994 0, 0, 0, 0, NULL) == AACOK) {
7994 7995 uint32_t mondrv_buf_paddrl = AAC_MAILBOX_GET(softs, 1);
7995 7996 uint32_t mondrv_buf_paddrh = AAC_MAILBOX_GET(softs, 2);
7996 7997 uint32_t mondrv_buf_size = AAC_MAILBOX_GET(softs, 3);
7997 7998 uint32_t mondrv_hdr_size = AAC_MAILBOX_GET(softs, 4);
7998 7999
7999 8000 if (mondrv_buf_size) {
8000 8001 uint32_t offset = mondrv_buf_paddrl - \
8001 8002 softs->pci_mem_base_paddr;
8002 8003
8003 8004 /*
8004 8005 * See if the address is already mapped in, and
8005 8006 * if so set it up from the base address
8006 8007 */
8007 8008 if ((mondrv_buf_paddrh == 0) &&
8008 8009 (offset + mondrv_buf_size < softs->map_size)) {
8009 8010 mutex_enter(&aac_prt_mutex);
8010 8011 softs->debug_buf_offset = offset;
8011 8012 softs->debug_header_size = mondrv_hdr_size;
8012 8013 softs->debug_buf_size = mondrv_buf_size;
8013 8014 softs->debug_fw_flags = 0;
8014 8015 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
8015 8016 mutex_exit(&aac_prt_mutex);
8016 8017
8017 8018 return (AACOK);
8018 8019 }
8019 8020 }
8020 8021 }
8021 8022 return (AACERR);
8022 8023 }
8023 8024
8024 8025 int
8025 8026 aac_dbflag_on(struct aac_softstate *softs, int flag)
8026 8027 {
8027 8028 int debug_flags = softs ? softs->debug_flags : aac_debug_flags;
8028 8029
8029 8030 return ((debug_flags & (AACDB_FLAGS_FW_PRINT | \
8030 8031 AACDB_FLAGS_KERNEL_PRINT)) && (debug_flags & flag));
8031 8032 }
8032 8033
8033 8034 static void
8034 8035 aac_cmn_err(struct aac_softstate *softs, uint_t lev, char sl, int noheader)
8035 8036 {
8036 8037 if (noheader) {
8037 8038 if (sl) {
8038 8039 aac_fmt[0] = sl;
8039 8040 cmn_err(lev, aac_fmt, aac_prt_buf);
8040 8041 } else {
8041 8042 cmn_err(lev, &aac_fmt[1], aac_prt_buf);
8042 8043 }
8043 8044 } else {
8044 8045 if (sl) {
8045 8046 aac_fmt_header[0] = sl;
8046 8047 cmn_err(lev, aac_fmt_header,
8047 8048 softs->vendor_name, softs->instance,
8048 8049 aac_prt_buf);
8049 8050 } else {
8050 8051 cmn_err(lev, &aac_fmt_header[1],
8051 8052 softs->vendor_name, softs->instance,
8052 8053 aac_prt_buf);
8053 8054 }
8054 8055 }
8055 8056 }
8056 8057
8057 8058 /*
8058 8059 * The following function comes from Adaptec:
8059 8060 *
8060 8061 * Format and print out the data passed in to UART or console
8061 8062 * as specified by debug flags.
8062 8063 */
8063 8064 void
8064 8065 aac_printf(struct aac_softstate *softs, uint_t lev, const char *fmt, ...)
8065 8066 {
8066 8067 va_list args;
8067 8068 char sl; /* system log character */
8068 8069
8069 8070 mutex_enter(&aac_prt_mutex);
8070 8071 /* Set up parameters and call sprintf function to format the data */
8071 8072 if (strchr("^!?", fmt[0]) == NULL) {
8072 8073 sl = 0;
8073 8074 } else {
8074 8075 sl = fmt[0];
8075 8076 fmt++;
8076 8077 }
8077 8078 va_start(args, fmt);
8078 8079 (void) vsprintf(aac_prt_buf, fmt, args);
8079 8080 va_end(args);
8080 8081
8081 8082 /* Make sure the softs structure has been passed in for this section */
8082 8083 if (softs) {
8083 8084 if ((softs->debug_flags & AACDB_FLAGS_FW_PRINT) &&
8084 8085 /* If we are set up for a Firmware print */
8085 8086 (softs->debug_buf_size)) {
8086 8087 uint32_t count, i;
8087 8088
8088 8089 /* Make sure the string size is within boundaries */
8089 8090 count = strlen(aac_prt_buf);
8090 8091 if (count > softs->debug_buf_size)
8091 8092 count = (uint16_t)softs->debug_buf_size;
8092 8093
8093 8094 /*
8094 8095 * Wait for no more than AAC_PRINT_TIMEOUT for the
8095 8096 * previous message length to clear (the handshake).
8096 8097 */
8097 8098 for (i = 0; i < AAC_PRINT_TIMEOUT; i++) {
8098 8099 if (!PCI_MEM_GET32(softs,
8099 8100 softs->debug_buf_offset + \
8100 8101 AAC_FW_DBG_STRLEN_OFFSET))
8101 8102 break;
8102 8103
8103 8104 drv_usecwait(1000);
8104 8105 }
8105 8106
8106 8107 /*
8107 8108 * If the length is clear, copy over the message, the
8108 8109 * flags, and the length. Make sure the length is the
8109 8110 * last because that is the signal for the Firmware to
8110 8111 * pick it up.
8111 8112 */
8112 8113 if (!PCI_MEM_GET32(softs, softs->debug_buf_offset + \
8113 8114 AAC_FW_DBG_STRLEN_OFFSET)) {
8114 8115 PCI_MEM_REP_PUT8(softs,
8115 8116 softs->debug_buf_offset + \
8116 8117 softs->debug_header_size,
8117 8118 aac_prt_buf, count);
8118 8119 PCI_MEM_PUT32(softs,
8119 8120 softs->debug_buf_offset + \
8120 8121 AAC_FW_DBG_FLAGS_OFFSET,
8121 8122 softs->debug_fw_flags);
8122 8123 PCI_MEM_PUT32(softs,
8123 8124 softs->debug_buf_offset + \
8124 8125 AAC_FW_DBG_STRLEN_OFFSET, count);
8125 8126 } else {
8126 8127 cmn_err(CE_WARN, "UART output fail");
8127 8128 softs->debug_flags &= ~AACDB_FLAGS_FW_PRINT;
8128 8129 }
8129 8130 }
8130 8131
8131 8132 /*
8132 8133 * If the Kernel Debug Print flag is set, send it off
8133 8134 * to the Kernel Debugger
8134 8135 */
8135 8136 if (softs->debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8136 8137 aac_cmn_err(softs, lev, sl,
8137 8138 (softs->debug_flags & AACDB_FLAGS_NO_HEADERS));
8138 8139 } else {
8139 8140 /* Driver not initialized yet, no firmware or header output */
8140 8141 if (aac_debug_flags & AACDB_FLAGS_KERNEL_PRINT)
8141 8142 aac_cmn_err(softs, lev, sl, 1);
8142 8143 }
8143 8144 mutex_exit(&aac_prt_mutex);
8144 8145 }
8145 8146
8146 8147 /*
8147 8148 * Translate command number to description string
8148 8149 */
8149 8150 static char *
8150 8151 aac_cmd_name(int cmd, struct aac_key_strings *cmdlist)
8151 8152 {
8152 8153 int i;
8153 8154
8154 8155 for (i = 0; cmdlist[i].key != -1; i++) {
8155 8156 if (cmd == cmdlist[i].key)
8156 8157 return (cmdlist[i].message);
8157 8158 }
8158 8159 return (NULL);
8159 8160 }
8160 8161
8161 8162 static void
8162 8163 aac_print_scmd(struct aac_softstate *softs, struct aac_cmd *acp)
8163 8164 {
8164 8165 struct scsi_pkt *pkt = acp->pkt;
8165 8166 struct scsi_address *ap = &pkt->pkt_address;
8166 8167 int is_pd = 0;
8167 8168 int ctl = ddi_get_instance(softs->devinfo_p);
8168 8169 int tgt = ap->a_target;
8169 8170 int lun = ap->a_lun;
8170 8171 union scsi_cdb *cdbp = (void *)pkt->pkt_cdbp;
8171 8172 uchar_t cmd = cdbp->scc_cmd;
8172 8173 char *desc;
8173 8174
8174 8175 if (tgt >= AAC_MAX_LD) {
8175 8176 is_pd = 1;
8176 8177 ctl = ((struct aac_nondasd *)acp->dvp)->bus;
8177 8178 tgt = ((struct aac_nondasd *)acp->dvp)->tid;
8178 8179 lun = 0;
8179 8180 }
8180 8181
8181 8182 if ((desc = aac_cmd_name(cmd,
8182 8183 (struct aac_key_strings *)scsi_cmds)) == NULL) {
8183 8184 aac_printf(softs, CE_NOTE,
8184 8185 "SCMD> Unknown(0x%2x) --> c%dt%dL%d %s",
8185 8186 cmd, ctl, tgt, lun, is_pd ? "(pd)" : "");
8186 8187 return;
8187 8188 }
8188 8189
8189 8190 switch (cmd) {
8190 8191 case SCMD_READ:
8191 8192 case SCMD_WRITE:
8192 8193 aac_printf(softs, CE_NOTE,
8193 8194 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8194 8195 desc, GETG0ADDR(cdbp), GETG0COUNT(cdbp),
8195 8196 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8196 8197 ctl, tgt, lun, is_pd ? "(pd)" : "");
8197 8198 break;
8198 8199 case SCMD_READ_G1:
8199 8200 case SCMD_WRITE_G1:
8200 8201 aac_printf(softs, CE_NOTE,
8201 8202 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8202 8203 desc, GETG1ADDR(cdbp), GETG1COUNT(cdbp),
8203 8204 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8204 8205 ctl, tgt, lun, is_pd ? "(pd)" : "");
8205 8206 break;
8206 8207 case SCMD_READ_G4:
8207 8208 case SCMD_WRITE_G4:
8208 8209 aac_printf(softs, CE_NOTE,
8209 8210 "SCMD> %s 0x%x.%08x[%d] %s --> c%dt%dL%d %s",
8210 8211 desc, GETG4ADDR(cdbp), GETG4ADDRTL(cdbp),
8211 8212 GETG4COUNT(cdbp),
8212 8213 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8213 8214 ctl, tgt, lun, is_pd ? "(pd)" : "");
8214 8215 break;
8215 8216 case SCMD_READ_G5:
8216 8217 case SCMD_WRITE_G5:
8217 8218 aac_printf(softs, CE_NOTE,
8218 8219 "SCMD> %s 0x%x[%d] %s --> c%dt%dL%d %s",
8219 8220 desc, GETG5ADDR(cdbp), GETG5COUNT(cdbp),
8220 8221 (acp->flags & AAC_CMD_NO_INTR) ? "poll" : "intr",
8221 8222 ctl, tgt, lun, is_pd ? "(pd)" : "");
8222 8223 break;
8223 8224 default:
8224 8225 aac_printf(softs, CE_NOTE, "SCMD> %s --> c%dt%dL%d %s",
8225 8226 desc, ctl, tgt, lun, is_pd ? "(pd)" : "");
8226 8227 }
8227 8228 }
8228 8229
8229 8230 void
8230 8231 aac_print_fib(struct aac_softstate *softs, struct aac_slot *slotp)
8231 8232 {
8232 8233 struct aac_cmd *acp = slotp->acp;
8233 8234 struct aac_fib *fibp = slotp->fibp;
8234 8235 ddi_acc_handle_t acc = slotp->fib_acc_handle;
8235 8236 uint16_t fib_size;
8236 8237 uint32_t fib_cmd, sub_cmd;
8237 8238 char *cmdstr, *subcmdstr;
8238 8239 char *caller;
8239 8240 int i;
8240 8241
8241 8242 if (acp) {
8242 8243 if (!(softs->debug_fib_flags & acp->fib_flags))
8243 8244 return;
8244 8245 if (acp->fib_flags & AACDB_FLAGS_FIB_SCMD)
8245 8246 caller = "SCMD";
8246 8247 else if (acp->fib_flags & AACDB_FLAGS_FIB_IOCTL)
8247 8248 caller = "IOCTL";
8248 8249 else if (acp->fib_flags & AACDB_FLAGS_FIB_SRB)
8249 8250 caller = "SRB";
8250 8251 else
8251 8252 return;
8252 8253 } else {
8253 8254 if (!(softs->debug_fib_flags & AACDB_FLAGS_FIB_SYNC))
8254 8255 return;
8255 8256 caller = "SYNC";
8256 8257 }
8257 8258
8258 8259 fib_cmd = ddi_get16(acc, &fibp->Header.Command);
8259 8260 cmdstr = aac_cmd_name(fib_cmd, aac_fib_cmds);
8260 8261 sub_cmd = (uint32_t)-1;
8261 8262 subcmdstr = NULL;
8262 8263
8263 8264 /* Print FIB header */
8264 8265 if (softs->debug_fib_flags & AACDB_FLAGS_FIB_HEADER) {
8265 8266 aac_printf(softs, CE_NOTE, "FIB> from %s", caller);
8266 8267 aac_printf(softs, CE_NOTE, " XferState %d",
8267 8268 ddi_get32(acc, &fibp->Header.XferState));
8268 8269 aac_printf(softs, CE_NOTE, " Command %d",
8269 8270 ddi_get16(acc, &fibp->Header.Command));
8270 8271 aac_printf(softs, CE_NOTE, " StructType %d",
8271 8272 ddi_get8(acc, &fibp->Header.StructType));
8272 8273 aac_printf(softs, CE_NOTE, " Flags 0x%x",
8273 8274 ddi_get8(acc, &fibp->Header.Flags));
8274 8275 aac_printf(softs, CE_NOTE, " Size %d",
8275 8276 ddi_get16(acc, &fibp->Header.Size));
8276 8277 aac_printf(softs, CE_NOTE, " SenderSize %d",
8277 8278 ddi_get16(acc, &fibp->Header.SenderSize));
8278 8279 aac_printf(softs, CE_NOTE, " SenderAddr 0x%x",
8279 8280 ddi_get32(acc, &fibp->Header.SenderFibAddress));
8280 8281 aac_printf(softs, CE_NOTE, " RcvrAddr 0x%x",
8281 8282 ddi_get32(acc, &fibp->Header.ReceiverFibAddress));
8282 8283 aac_printf(softs, CE_NOTE, " SenderData 0x%x",
8283 8284 ddi_get32(acc, &fibp->Header.SenderData));
8284 8285 }
8285 8286
8286 8287 /* Print FIB data */
8287 8288 switch (fib_cmd) {
8288 8289 case ContainerCommand:
8289 8290 sub_cmd = ddi_get32(acc,
8290 8291 (void *)&(((uint32_t *)(void *)&fibp->data[0])[0]));
8291 8292 subcmdstr = aac_cmd_name(sub_cmd, aac_ctvm_subcmds);
8292 8293 if (subcmdstr == NULL)
8293 8294 break;
8294 8295
8295 8296 switch (sub_cmd) {
8296 8297 case VM_ContainerConfig: {
8297 8298 struct aac_Container *pContainer =
8298 8299 (struct aac_Container *)fibp->data;
8299 8300
8300 8301 fib_cmd = sub_cmd;
8301 8302 cmdstr = subcmdstr;
8302 8303 sub_cmd = (uint32_t)-1;
8303 8304 subcmdstr = NULL;
8304 8305
8305 8306 sub_cmd = ddi_get32(acc,
8306 8307 &pContainer->CTCommand.command);
8307 8308 subcmdstr = aac_cmd_name(sub_cmd, aac_ct_subcmds);
8308 8309 if (subcmdstr == NULL)
8309 8310 break;
8310 8311 aac_printf(softs, CE_NOTE, "FIB> %s (0x%x, 0x%x, 0x%x)",
8311 8312 subcmdstr,
8312 8313 ddi_get32(acc, &pContainer->CTCommand.param[0]),
8313 8314 ddi_get32(acc, &pContainer->CTCommand.param[1]),
8314 8315 ddi_get32(acc, &pContainer->CTCommand.param[2]));
8315 8316 return;
8316 8317 }
8317 8318
8318 8319 case VM_Ioctl:
8319 8320 fib_cmd = sub_cmd;
8320 8321 cmdstr = subcmdstr;
8321 8322 sub_cmd = (uint32_t)-1;
8322 8323 subcmdstr = NULL;
8323 8324
8324 8325 sub_cmd = ddi_get32(acc,
8325 8326 (void *)&(((uint32_t *)(void *)&fibp->data[0])[4]));
8326 8327 subcmdstr = aac_cmd_name(sub_cmd, aac_ioctl_subcmds);
8327 8328 break;
8328 8329
8329 8330 case VM_CtBlockRead:
8330 8331 case VM_CtBlockWrite: {
8331 8332 struct aac_blockread *br =
8332 8333 (struct aac_blockread *)fibp->data;
8333 8334 struct aac_sg_table *sg = &br->SgMap;
8334 8335 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8335 8336
8336 8337 aac_printf(softs, CE_NOTE,
8337 8338 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8338 8339 ddi_get32(acc, &br->ContainerId),
8339 8340 ddi_get32(acc, &br->BlockNumber),
8340 8341 ddi_get32(acc, &br->ByteCount));
8341 8342 for (i = 0; i < sgcount; i++)
8342 8343 aac_printf(softs, CE_NOTE,
8343 8344 " %d: 0x%08x/%d", i,
8344 8345 ddi_get32(acc, &sg->SgEntry[i].SgAddress),
8345 8346 ddi_get32(acc, &sg->SgEntry[i]. \
8346 8347 SgByteCount));
8347 8348 return;
8348 8349 }
8349 8350 }
8350 8351 break;
8351 8352
8352 8353 case ContainerCommand64: {
8353 8354 struct aac_blockread64 *br =
8354 8355 (struct aac_blockread64 *)fibp->data;
8355 8356 struct aac_sg_table64 *sg = &br->SgMap64;
8356 8357 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8357 8358 uint64_t sgaddr;
8358 8359
8359 8360 sub_cmd = br->Command;
8360 8361 subcmdstr = NULL;
8361 8362 if (sub_cmd == VM_CtHostRead64)
8362 8363 subcmdstr = "VM_CtHostRead64";
8363 8364 else if (sub_cmd == VM_CtHostWrite64)
8364 8365 subcmdstr = "VM_CtHostWrite64";
8365 8366 else
8366 8367 break;
8367 8368
8368 8369 aac_printf(softs, CE_NOTE,
8369 8370 "FIB> %s Container %d 0x%x/%d", subcmdstr,
8370 8371 ddi_get16(acc, &br->ContainerId),
8371 8372 ddi_get32(acc, &br->BlockNumber),
8372 8373 ddi_get16(acc, &br->SectorCount));
8373 8374 for (i = 0; i < sgcount; i++) {
8374 8375 sgaddr = ddi_get64(acc,
8375 8376 &sg->SgEntry64[i].SgAddress);
8376 8377 aac_printf(softs, CE_NOTE,
8377 8378 " %d: 0x%08x.%08x/%d", i,
8378 8379 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8379 8380 ddi_get32(acc, &sg->SgEntry64[i]. \
8380 8381 SgByteCount));
8381 8382 }
8382 8383 return;
8383 8384 }
8384 8385
8385 8386 case RawIo: {
8386 8387 struct aac_raw_io *io = (struct aac_raw_io *)fibp->data;
8387 8388 struct aac_sg_tableraw *sg = &io->SgMapRaw;
8388 8389 uint32_t sgcount = ddi_get32(acc, &sg->SgCount);
8389 8390 uint64_t sgaddr;
8390 8391
8391 8392 aac_printf(softs, CE_NOTE,
8392 8393 "FIB> RawIo Container %d 0x%llx/%d 0x%x",
8393 8394 ddi_get16(acc, &io->ContainerId),
8394 8395 ddi_get64(acc, &io->BlockNumber),
8395 8396 ddi_get32(acc, &io->ByteCount),
8396 8397 ddi_get16(acc, &io->Flags));
8397 8398 for (i = 0; i < sgcount; i++) {
8398 8399 sgaddr = ddi_get64(acc, &sg->SgEntryRaw[i].SgAddress);
8399 8400 aac_printf(softs, CE_NOTE, " %d: 0x%08x.%08x/%d", i,
8400 8401 AAC_MS32(sgaddr), AAC_LS32(sgaddr),
8401 8402 ddi_get32(acc, &sg->SgEntryRaw[i].SgByteCount));
8402 8403 }
8403 8404 return;
8404 8405 }
8405 8406
8406 8407 case ClusterCommand:
8407 8408 sub_cmd = ddi_get32(acc,
8408 8409 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8409 8410 subcmdstr = aac_cmd_name(sub_cmd, aac_cl_subcmds);
8410 8411 break;
8411 8412
8412 8413 case AifRequest:
8413 8414 sub_cmd = ddi_get32(acc,
8414 8415 (void *)&(((uint32_t *)(void *)fibp->data)[0]));
8415 8416 subcmdstr = aac_cmd_name(sub_cmd, aac_aif_subcmds);
8416 8417 break;
8417 8418
8418 8419 default:
8419 8420 break;
8420 8421 }
8421 8422
8422 8423 fib_size = ddi_get16(acc, &(fibp->Header.Size));
8423 8424 if (subcmdstr)
8424 8425 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8425 8426 subcmdstr, fib_size);
8426 8427 else if (cmdstr && sub_cmd == (uint32_t)-1)
8427 8428 aac_printf(softs, CE_NOTE, "FIB> %s, sz=%d",
8428 8429 cmdstr, fib_size);
8429 8430 else if (cmdstr)
8430 8431 aac_printf(softs, CE_NOTE, "FIB> %s: Unknown(0x%x), sz=%d",
8431 8432 cmdstr, sub_cmd, fib_size);
8432 8433 else
8433 8434 aac_printf(softs, CE_NOTE, "FIB> Unknown(0x%x), sz=%d",
8434 8435 fib_cmd, fib_size);
8435 8436 }
8436 8437
8437 8438 static void
8438 8439 aac_print_aif(struct aac_softstate *softs, struct aac_aif_command *aif)
8439 8440 {
8440 8441 int aif_command;
8441 8442 uint32_t aif_seqnumber;
8442 8443 int aif_en_type;
8443 8444 char *str;
8444 8445
8445 8446 aif_command = LE_32(aif->command);
8446 8447 aif_seqnumber = LE_32(aif->seqNumber);
8447 8448 aif_en_type = LE_32(aif->data.EN.type);
8448 8449
8449 8450 switch (aif_command) {
8450 8451 case AifCmdEventNotify:
8451 8452 str = aac_cmd_name(aif_en_type, aac_aifens);
8452 8453 if (str)
8453 8454 aac_printf(softs, CE_NOTE, "AIF! %s", str);
8454 8455 else
8455 8456 aac_printf(softs, CE_NOTE, "AIF! Unknown(0x%x)",
8456 8457 aif_en_type);
8457 8458 break;
8458 8459
8459 8460 case AifCmdJobProgress:
8460 8461 switch (LE_32(aif->data.PR[0].status)) {
8461 8462 case AifJobStsSuccess:
8462 8463 str = "success"; break;
8463 8464 case AifJobStsFinished:
8464 8465 str = "finished"; break;
8465 8466 case AifJobStsAborted:
8466 8467 str = "aborted"; break;
8467 8468 case AifJobStsFailed:
8468 8469 str = "failed"; break;
8469 8470 case AifJobStsSuspended:
8470 8471 str = "suspended"; break;
8471 8472 case AifJobStsRunning:
8472 8473 str = "running"; break;
8473 8474 default:
8474 8475 str = "unknown"; break;
8475 8476 }
8476 8477 aac_printf(softs, CE_NOTE,
8477 8478 "AIF! JobProgress (%d) - %s (%d, %d)",
8478 8479 aif_seqnumber, str,
8479 8480 LE_32(aif->data.PR[0].currentTick),
8480 8481 LE_32(aif->data.PR[0].finalTick));
8481 8482 break;
8482 8483
8483 8484 case AifCmdAPIReport:
8484 8485 aac_printf(softs, CE_NOTE, "AIF! APIReport (%d)",
8485 8486 aif_seqnumber);
8486 8487 break;
8487 8488
8488 8489 case AifCmdDriverNotify:
8489 8490 aac_printf(softs, CE_NOTE, "AIF! DriverNotify (%d)",
8490 8491 aif_seqnumber);
8491 8492 break;
8492 8493
8493 8494 default:
8494 8495 aac_printf(softs, CE_NOTE, "AIF! AIF %d (%d)",
8495 8496 aif_command, aif_seqnumber);
8496 8497 break;
8497 8498 }
8498 8499 }
8499 8500
8500 8501 #endif /* DEBUG */
↓ open down ↓ |
4920 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX