7127 remove -Wno-missing-braces from Makefile.uts
1 /*
2 * O.S : Solaris
3 * FILE NAME : arcmsr.c
4 * BY : Erich Chen, C.L. Huang
5 * Description: SCSI RAID Device Driver for
6 * ARECA RAID Host adapter
7 *
8 * Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
9 * Copyright (C) 2002,2010 Erich Chen
10 * Web site: www.areca.com.tw
11 * E-mail: erich@areca.com.tw; ching2048@areca.com.tw
12 *
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
16 * 1. Redistributions of source code must retain the above copyright
17 * notice, this list of conditions and the following disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The party using or redistributing the source code and binary forms
22 * agrees to the disclaimer below and the terms and conditions set forth
23 * herein.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 *
37 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
38 * Use is subject to license terms.
39 *
40 */
41 /*
42 * This file and its contents are supplied under the terms of the
43 * Common Development and Distribution License ("CDDL"), version 1.0.
44 * You may only use this file in accordance with the terms of version
45 * 1.0 of the CDDL.
46 *
47 * A full copy of the text of the CDDL should have accompanied this
48 * source. A copy of the CDDL is also available via the Internet at
49 * http://www.illumos.org/license/CDDL.
50 */
51 /*
52 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
53 */
54 #include <sys/types.h>
55 #include <sys/ddidmareq.h>
56 #include <sys/scsi/scsi.h>
57 #include <sys/ddi.h>
58 #include <sys/sunddi.h>
59 #include <sys/file.h>
60 #include <sys/disp.h>
61 #include <sys/signal.h>
62 #include <sys/debug.h>
63 #include <sys/pci.h>
64 #include <sys/policy.h>
65 #include <sys/atomic.h>
66 #include "arcmsr.h"
67
68 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
69 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
70 int mode, cred_t *credp, int *rvalp);
71 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
72 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
73 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
74 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
75 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
76 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
77 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
78 int whom);
79 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
80 dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
81 struct scsi_device *sd);
82 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
83 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
84 struct scsi_pkt *pkt);
85 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
86 struct scsi_pkt *pkt);
87 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
88 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
89 int tgtlen, int flags, int (*callback)(), caddr_t arg);
90 static int arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
91 dev_info_t **dipp);
92
93 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
94 dev_info_t **ldip);
95 static uint8_t arcmsr_abort_host_command(struct ACB *acb);
96 static uint8_t arcmsr_get_echo_from_iop(struct ACB *acb);
97 static uint_t arcmsr_intr_handler(caddr_t arg, caddr_t arg2);
98 static int arcmsr_initialize(struct ACB *acb);
99 static int arcmsr_dma_alloc(struct ACB *acb,
100 struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
101 static int arcmsr_dma_move(struct ACB *acb,
102 struct scsi_pkt *pkt, struct buf *bp);
103 static void arcmsr_handle_iop_bus_hold(struct ACB *acb);
104 static void arcmsr_hbc_message_isr(struct ACB *acb);
105 static void arcmsr_pcidev_disattach(struct ACB *acb);
106 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
107 static void arcmsr_iop_init(struct ACB *acb);
108 static void arcmsr_iop_parking(struct ACB *acb);
109 /*PRINTFLIKE3*/
110 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
111 /*PRINTFLIKE2*/
112 static void arcmsr_warn(struct ACB *acb, char *fmt, ...);
113 static void arcmsr_mutex_init(struct ACB *acb);
114 static void arcmsr_remove_intr(struct ACB *acb);
115 static void arcmsr_ccbs_timeout(void* arg);
116 static void arcmsr_devMap_monitor(void* arg);
117 static void arcmsr_pcidev_disattach(struct ACB *acb);
118 static void arcmsr_iop_message_read(struct ACB *acb);
119 static void arcmsr_free_ccb(struct CCB *ccb);
120 static void arcmsr_post_ioctldata2iop(struct ACB *acb);
121 static void arcmsr_report_sense_info(struct CCB *ccb);
122 static void arcmsr_init_list_head(struct list_head *list);
123 static void arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org);
124 static void arcmsr_done4abort_postqueue(struct ACB *acb);
125 static void arcmsr_list_add_tail(kmutex_t *list_lock,
126 struct list_head *new_one, struct list_head *head);
127 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
128 static int arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt);
129 static int arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt);
130 static int arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb);
131 static int arcmsr_parse_devname(char *devnm, int *tgt, int *lun);
132 static int arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance);
133 static uint8_t arcmsr_iop_reset(struct ACB *acb);
134 static uint32_t arcmsr_disable_allintr(struct ACB *acb);
135 static uint32_t arcmsr_iop_confirm(struct ACB *acb);
136 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
137 static void arcmsr_flush_hba_cache(struct ACB *acb);
138 static void arcmsr_flush_hbb_cache(struct ACB *acb);
139 static void arcmsr_flush_hbc_cache(struct ACB *acb);
140 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
141 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
142 static void arcmsr_stop_hbc_bgrb(struct ACB *acb);
143 static void arcmsr_start_hba_bgrb(struct ACB *acb);
144 static void arcmsr_start_hbb_bgrb(struct ACB *acb);
145 static void arcmsr_start_hbc_bgrb(struct ACB *acb);
146 static void arcmsr_mutex_destroy(struct ACB *acb);
147 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
148 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
149 static void arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
150 static void arcmsr_build_ccb(struct CCB *ccb);
151 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
152 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
153 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
154 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
155 uint8_t lun);
156 static struct QBUFFER *arcmsr_get_iop_rqbuffer(struct ACB *acb);
157
158 static int arcmsr_add_intr(struct ACB *, int);
159
160 static void *arcmsr_soft_state = NULL;
161
162 static ddi_dma_attr_t arcmsr_dma_attr = {
163 DMA_ATTR_V0, /* ddi_dma_attr version */
164 0, /* low DMA address range */
165 0xffffffffffffffffull, /* high DMA address range */
166 0x00ffffff, /* DMA counter counter upper bound */
167 1, /* DMA address alignment requirements */
168 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
169 1, /* minimum effective DMA size */
170 ARCMSR_MAX_XFER_LEN, /* maximum DMA xfer size */
171 /*
172 * The dma_attr_seg field supplies the limit of each Scatter/Gather
173 * list element's "address+length". The Intel IOP331 can not use
174 * segments over the 4G boundary due to segment boundary restrictions
175 */
176 0xffffffff,
177 ARCMSR_MAX_SG_ENTRIES, /* scatter/gather list count */
178 1, /* device granularity */
179 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
180 };
181
182
183 static ddi_dma_attr_t arcmsr_ccb_attr = {
184 DMA_ATTR_V0, /* ddi_dma_attr version */
185 0, /* low DMA address range */
186 0xffffffff, /* high DMA address range */
187 0x00ffffff, /* DMA counter counter upper bound */
188 1, /* default byte alignment */
189 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
190 1, /* minimum effective DMA size */
191 0xffffffff, /* maximum DMA xfer size */
192 0x00ffffff, /* max segment size, segment boundary restrictions */
193 1, /* scatter/gather list count */
194 1, /* device granularity */
195 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
196 };
197
198
199 static struct cb_ops arcmsr_cb_ops = {
200 scsi_hba_open, /* open(9E) */
201 scsi_hba_close, /* close(9E) */
202 nodev, /* strategy(9E), returns ENXIO */
203 nodev, /* print(9E) */
204 nodev, /* dump(9E) Cannot be used as a dump device */
205 nodev, /* read(9E) */
206 nodev, /* write(9E) */
207 arcmsr_cb_ioctl, /* ioctl(9E) */
208 nodev, /* devmap(9E) */
209 nodev, /* mmap(9E) */
210 nodev, /* segmap(9E) */
211 NULL, /* chpoll(9E) returns ENXIO */
212 nodev, /* prop_op(9E) */
213 NULL, /* streamtab(9S) */
214 D_MP,
215 CB_REV,
216 nodev, /* aread(9E) */
217 nodev /* awrite(9E) */
218 };
219
220 static struct dev_ops arcmsr_ops = {
221 DEVO_REV, /* devo_rev */
222 0, /* reference count */
223 nodev, /* getinfo */
224 nulldev, /* identify */
225 nulldev, /* probe */
226 arcmsr_attach, /* attach */
227 arcmsr_detach, /* detach */
228 arcmsr_reset, /* reset, shutdown, reboot notify */
229 &arcmsr_cb_ops, /* driver operations */
230 NULL, /* bus operations */
231 NULL /* power */
232 };
233
234 static struct modldrv arcmsr_modldrv = {
235 &mod_driverops, /* Type of module. This is a driver. */
236 "ARECA RAID Controller", /* module name, from arcmsr.h */
237 &arcmsr_ops, /* driver ops */
238 };
239
240 static struct modlinkage arcmsr_modlinkage = {
241 MODREV_1,
242 { &arcmsr_modldrv, NULL }
243 };
244
245
246 int
247 _init(void)
248 {
249 int ret;
250
251 ret = ddi_soft_state_init(&arcmsr_soft_state, sizeof (struct ACB), 1);
252 if (ret != 0) {
253 return (ret);
254 }
255 if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
256 ddi_soft_state_fini(&arcmsr_soft_state);
257 return (ret);
258 }
259
260 if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
261 scsi_hba_fini(&arcmsr_modlinkage);
262 if (arcmsr_soft_state != NULL) {
263 ddi_soft_state_fini(&arcmsr_soft_state);
264 }
265 }
266 return (ret);
267 }
268
269
270 int
271 _fini(void)
272 {
273 int ret;
274
275 ret = mod_remove(&arcmsr_modlinkage);
276 if (ret == 0) {
277 /* if ret = 0 , said driver can remove */
278 scsi_hba_fini(&arcmsr_modlinkage);
279 if (arcmsr_soft_state != NULL) {
280 ddi_soft_state_fini(&arcmsr_soft_state);
281 }
282 }
283 return (ret);
284 }
285
286
287 int
288 _info(struct modinfo *modinfop)
289 {
290 return (mod_info(&arcmsr_modlinkage, modinfop));
291 }
292
293
294 /*
295 * Function: arcmsr_attach(9E)
296 * Description: Set up all device state and allocate data structures,
297 * mutexes, condition variables, etc. for device operation.
298 * Set mt_attr property for driver to indicate MT-safety.
299 * Add interrupts needed.
300 * Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
301 * Output: Return DDI_SUCCESS if device is ready,
302 * else return DDI_FAILURE
303 */
304 static int
305 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd)
306 {
307 scsi_hba_tran_t *hba_trans;
308 struct ACB *acb;
309
310 switch (cmd) {
311 case DDI_ATTACH:
312 return (arcmsr_do_ddi_attach(dev_info,
313 ddi_get_instance(dev_info)));
314 case DDI_RESUME:
315 /*
316 * There is no hardware state to restart and no
317 * timeouts to restart since we didn't DDI_SUSPEND with
318 * active cmds or active timeouts We just need to
319 * unblock waiting threads and restart I/O the code
320 */
321 hba_trans = ddi_get_driver_private(dev_info);
322 if (hba_trans == NULL) {
323 return (DDI_FAILURE);
324 }
325 acb = hba_trans->tran_hba_private;
326 mutex_enter(&acb->acb_mutex);
327 arcmsr_iop_init(acb);
328
329 /* restart ccbs "timeout" watchdog */
330 acb->timeout_count = 0;
331 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
332 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
333 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
334 (caddr_t)acb,
335 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
336 mutex_exit(&acb->acb_mutex);
337 return (DDI_SUCCESS);
338
339 default:
340 return (DDI_FAILURE);
341 }
342 }
343
344 /*
345 * Function: arcmsr_detach(9E)
346 * Description: Remove all device allocation and system resources, disable
347 * device interrupt.
348 * Input: dev_info_t *dev_info
349 * ddi_detach_cmd_t cmd
350 * Output: Return DDI_SUCCESS if done,
351 * else returnDDI_FAILURE
352 */
353 static int
354 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
355
356 int instance;
357 struct ACB *acb;
358
359
360 instance = ddi_get_instance(dev_info);
361 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
362 if (acb == NULL)
363 return (DDI_FAILURE);
364
365 switch (cmd) {
366 case DDI_DETACH:
367 mutex_enter(&acb->acb_mutex);
368 if (acb->timeout_id != 0) {
369 mutex_exit(&acb->acb_mutex);
370 (void) untimeout(acb->timeout_id);
371 mutex_enter(&acb->acb_mutex);
372 acb->timeout_id = 0;
373 }
374 if (acb->timeout_sc_id != 0) {
375 mutex_exit(&acb->acb_mutex);
376 (void) untimeout(acb->timeout_sc_id);
377 mutex_enter(&acb->acb_mutex);
378 acb->timeout_sc_id = 0;
379 }
380 arcmsr_pcidev_disattach(acb);
381 /* Remove interrupt set up by ddi_add_intr */
382 arcmsr_remove_intr(acb);
383 /* unbind mapping object to handle */
384 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
385 /* Free ccb pool memory */
386 ddi_dma_mem_free(&acb->ccbs_acc_handle);
387 /* Free DMA handle */
388 ddi_dma_free_handle(&acb->ccbs_pool_handle);
389 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
390 if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
391 arcmsr_warn(acb, "Unable to detach instance cleanly "
392 "(should not happen)");
393 /* free scsi_hba_transport from scsi_hba_tran_alloc */
394 scsi_hba_tran_free(acb->scsi_hba_transport);
395 ddi_taskq_destroy(acb->taskq);
396 ddi_prop_remove_all(dev_info);
397 mutex_exit(&acb->acb_mutex);
398 arcmsr_mutex_destroy(acb);
399 pci_config_teardown(&acb->pci_acc_handle);
400 ddi_set_driver_private(dev_info, NULL);
401 ddi_soft_state_free(arcmsr_soft_state, instance);
402 return (DDI_SUCCESS);
403 case DDI_SUSPEND:
404 mutex_enter(&acb->acb_mutex);
405 if (acb->timeout_id != 0) {
406 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
407 mutex_exit(&acb->acb_mutex);
408 (void) untimeout(acb->timeout_id);
409 (void) untimeout(acb->timeout_sc_id);
410 mutex_enter(&acb->acb_mutex);
411 acb->timeout_id = 0;
412 }
413
414 if (acb->timeout_sc_id != 0) {
415 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
416 mutex_exit(&acb->acb_mutex);
417 (void) untimeout(acb->timeout_sc_id);
418 mutex_enter(&acb->acb_mutex);
419 acb->timeout_sc_id = 0;
420 }
421
422 /* disable all outbound interrupt */
423 (void) arcmsr_disable_allintr(acb);
424 /* stop adapter background rebuild */
425 switch (acb->adapter_type) {
426 case ACB_ADAPTER_TYPE_A:
427 arcmsr_stop_hba_bgrb(acb);
428 arcmsr_flush_hba_cache(acb);
429 break;
430
431 case ACB_ADAPTER_TYPE_B:
432 arcmsr_stop_hbb_bgrb(acb);
433 arcmsr_flush_hbb_cache(acb);
434 break;
435
436 case ACB_ADAPTER_TYPE_C:
437 arcmsr_stop_hbc_bgrb(acb);
438 arcmsr_flush_hbc_cache(acb);
439 break;
440 }
441 mutex_exit(&acb->acb_mutex);
442 return (DDI_SUCCESS);
443 default:
444 return (DDI_FAILURE);
445 }
446 }
447
448 static int
449 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd)
450 {
451 struct ACB *acb;
452 scsi_hba_tran_t *scsi_hba_transport;
453 _NOTE(ARGUNUSED(cmd));
454
455 scsi_hba_transport = ddi_get_driver_private(resetdev);
456 if (scsi_hba_transport == NULL)
457 return (DDI_FAILURE);
458
459 acb = (struct ACB *)scsi_hba_transport->tran_hba_private;
460 if (!acb)
461 return (DDI_FAILURE);
462
463 arcmsr_pcidev_disattach(acb);
464
465 return (DDI_SUCCESS);
466 }
467
468 static int
469 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
470 cred_t *credp, int *rvalp)
471 {
472 struct ACB *acb;
473 struct CMD_MESSAGE_FIELD *pktioctlfld;
474 int retvalue = 0;
475 int instance = MINOR2INST(getminor(dev));
476
477 if (instance < 0)
478 return (ENXIO);
479
480 if (secpolicy_sys_config(credp, B_FALSE) != 0)
481 return (EPERM);
482
483 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
484 if (acb == NULL)
485 return (ENXIO);
486
487 pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD), KM_SLEEP);
488
489 mutex_enter(&acb->ioctl_mutex);
490 if (ddi_copyin((void *)arg, pktioctlfld,
491 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
492 retvalue = ENXIO;
493 goto ioctl_out;
494 }
495
496 if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
497 /* validity check */
498 retvalue = ENXIO;
499 goto ioctl_out;
500 }
501
502 switch ((unsigned int)ioctl_cmd) {
503 case ARCMSR_MESSAGE_READ_RQBUFFER:
504 {
505 uint8_t *ver_addr;
506 uint8_t *pQbuffer, *ptmpQbuffer;
507 int32_t allxfer_len = 0;
508
509 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
510 ptmpQbuffer = ver_addr;
511 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
512 (allxfer_len < (MSGDATABUFLEN - 1))) {
513 /* copy READ QBUFFER to srb */
514 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
515 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
516 acb->rqbuf_firstidx++;
517 /* if last index number set it to 0 */
518 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
519 ptmpQbuffer++;
520 allxfer_len++;
521 }
522
523 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
524 struct QBUFFER *prbuffer;
525 uint8_t *pQbuffer;
526 uint8_t *iop_data;
527 int32_t iop_len;
528
529 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
530 prbuffer = arcmsr_get_iop_rqbuffer(acb);
531 iop_data = (uint8_t *)prbuffer->data;
532 iop_len = (int32_t)prbuffer->data_len;
533 /*
534 * this iop data does no chance to make me overflow
535 * again here, so just do it
536 */
537 while (iop_len > 0) {
538 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
539 (void) memcpy(pQbuffer, iop_data, 1);
540 acb->rqbuf_lastidx++;
541 /* if last index number set it to 0 */
542 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
543 iop_data++;
544 iop_len--;
545 }
546 /* let IOP know data has been read */
547 arcmsr_iop_message_read(acb);
548 }
549 (void) memcpy(pktioctlfld->messagedatabuffer,
550 ver_addr, allxfer_len);
551 pktioctlfld->cmdmessage.Length = allxfer_len;
552 pktioctlfld->cmdmessage.ReturnCode =
553 ARCMSR_MESSAGE_RETURNCODE_OK;
554
555 if (ddi_copyout(pktioctlfld, (void *)arg,
556 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
557 retvalue = ENXIO;
558
559 kmem_free(ver_addr, MSGDATABUFLEN);
560 break;
561 }
562
563 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
564 {
565 uint8_t *ver_addr;
566 int32_t my_empty_len, user_len;
567 int32_t wqbuf_firstidx, wqbuf_lastidx;
568 uint8_t *pQbuffer, *ptmpuserbuffer;
569
570 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
571
572 ptmpuserbuffer = ver_addr;
573 user_len = min(pktioctlfld->cmdmessage.Length,
574 MSGDATABUFLEN);
575 (void) memcpy(ptmpuserbuffer,
576 pktioctlfld->messagedatabuffer, user_len);
577 /*
578 * check ifdata xfer length of this request will overflow
579 * my array qbuffer
580 */
581 wqbuf_lastidx = acb->wqbuf_lastidx;
582 wqbuf_firstidx = acb->wqbuf_firstidx;
583 if (wqbuf_lastidx != wqbuf_firstidx) {
584 arcmsr_post_ioctldata2iop(acb);
585 pktioctlfld->cmdmessage.ReturnCode =
586 ARCMSR_MESSAGE_RETURNCODE_ERROR;
587 } else {
588 my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
589 & (ARCMSR_MAX_QBUFFER - 1);
590 if (my_empty_len >= user_len) {
591 while (user_len > 0) {
592 /* copy srb data to wqbuffer */
593 pQbuffer =
594 &acb->wqbuffer[acb->wqbuf_lastidx];
595 (void) memcpy(pQbuffer,
596 ptmpuserbuffer, 1);
597 acb->wqbuf_lastidx++;
598 /* iflast index number set it to 0 */
599 acb->wqbuf_lastidx %=
600 ARCMSR_MAX_QBUFFER;
601 ptmpuserbuffer++;
602 user_len--;
603 }
604 /* post first Qbuffer */
605 if (acb->acb_flags &
606 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
607 acb->acb_flags &=
608 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
609 arcmsr_post_ioctldata2iop(acb);
610 }
611 pktioctlfld->cmdmessage.ReturnCode =
612 ARCMSR_MESSAGE_RETURNCODE_OK;
613 } else {
614 pktioctlfld->cmdmessage.ReturnCode =
615 ARCMSR_MESSAGE_RETURNCODE_ERROR;
616 }
617 }
618 if (ddi_copyout(pktioctlfld, (void *)arg,
619 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
620 retvalue = ENXIO;
621
622 kmem_free(ver_addr, MSGDATABUFLEN);
623 break;
624 }
625
626 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
627 {
628 uint8_t *pQbuffer = acb->rqbuffer;
629
630 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
631 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
632 arcmsr_iop_message_read(acb);
633 }
634 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
635 acb->rqbuf_firstidx = 0;
636 acb->rqbuf_lastidx = 0;
637 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
638 /* report success */
639 pktioctlfld->cmdmessage.ReturnCode =
640 ARCMSR_MESSAGE_RETURNCODE_OK;
641
642 if (ddi_copyout(pktioctlfld, (void *)arg,
643 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
644 retvalue = ENXIO;
645 break;
646 }
647
648 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
649 {
650 uint8_t *pQbuffer = acb->wqbuffer;
651
652 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
653 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
654 arcmsr_iop_message_read(acb);
655 }
656 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
657 ACB_F_MESSAGE_WQBUFFER_READ);
658 acb->wqbuf_firstidx = 0;
659 acb->wqbuf_lastidx = 0;
660 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
661 /* report success */
662 pktioctlfld->cmdmessage.ReturnCode =
663 ARCMSR_MESSAGE_RETURNCODE_OK;
664
665 if (ddi_copyout(pktioctlfld, (void *)arg,
666 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
667 retvalue = ENXIO;
668 break;
669 }
670
671 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
672 {
673 uint8_t *pQbuffer;
674
675 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
676 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
677 arcmsr_iop_message_read(acb);
678 }
679 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
680 ACB_F_MESSAGE_RQBUFFER_CLEARED |
681 ACB_F_MESSAGE_WQBUFFER_READ);
682 acb->rqbuf_firstidx = 0;
683 acb->rqbuf_lastidx = 0;
684 acb->wqbuf_firstidx = 0;
685 acb->wqbuf_lastidx = 0;
686 pQbuffer = acb->rqbuffer;
687 bzero(pQbuffer, sizeof (struct QBUFFER));
688 pQbuffer = acb->wqbuffer;
689 bzero(pQbuffer, sizeof (struct QBUFFER));
690 /* report success */
691 pktioctlfld->cmdmessage.ReturnCode =
692 ARCMSR_MESSAGE_RETURNCODE_OK;
693 if (ddi_copyout(pktioctlfld, (void *)arg,
694 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
695 retvalue = ENXIO;
696 break;
697 }
698
699 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
700 pktioctlfld->cmdmessage.ReturnCode =
701 ARCMSR_MESSAGE_RETURNCODE_3F;
702 if (ddi_copyout(pktioctlfld, (void *)arg,
703 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
704 retvalue = ENXIO;
705 break;
706
707 /* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
708 case ARCMSR_MESSAGE_SAY_GOODBYE:
709 arcmsr_iop_parking(acb);
710 break;
711
712 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
713 switch (acb->adapter_type) {
714 case ACB_ADAPTER_TYPE_A:
715 arcmsr_flush_hba_cache(acb);
716 break;
717 case ACB_ADAPTER_TYPE_B:
718 arcmsr_flush_hbb_cache(acb);
719 break;
720 case ACB_ADAPTER_TYPE_C:
721 arcmsr_flush_hbc_cache(acb);
722 break;
723 }
724 break;
725
726 default:
727 mutex_exit(&acb->ioctl_mutex);
728 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
729 return (scsi_hba_ioctl(dev, ioctl_cmd, arg, mode, credp,
730 rvalp));
731 }
732
733 ioctl_out:
734 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
735 mutex_exit(&acb->ioctl_mutex);
736
737 return (retvalue);
738 }
739
740
741 /*
742 * Function: arcmsr_tran_tgt_init
743 * Description: Called when initializing a target device instance. If
744 * no per-target initialization is required, the HBA
745 * may leave tran_tgt_init to NULL
746 * Input:
747 * dev_info_t *host_dev_info,
748 * dev_info_t *target_dev_info,
749 * scsi_hba_tran_t *tran,
750 * struct scsi_device *sd
751 *
752 * Return: DDI_SUCCESS if success, else return DDI_FAILURE
753 *
754 * entry point enables the HBA to allocate and/or initialize any per-
755 * target resources.
756 * It also enables the HBA to qualify the device's address as valid and
757 * supportable for that particular HBA.
758 * By returning DDI_FAILURE, the instance of the target driver for that
759 * device will not be probed or attached.
760 * This entry point is not required, and if none is supplied,
761 * the framework will attempt to probe and attach all possible instances
762 * of the appropriate target drivers.
763 */
764 static int
765 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
766 scsi_hba_tran_t *tran, struct scsi_device *sd)
767 {
768 uint16_t target;
769 uint8_t lun;
770 struct ACB *acb = tran->tran_hba_private;
771
772 _NOTE(ARGUNUSED(tran, target_dev_info, host_dev_info))
773
774 target = sd->sd_address.a_target;
775 lun = sd->sd_address.a_lun;
776 if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
777 return (DDI_FAILURE);
778 }
779
780
781 if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
782 /*
783 * If no persistent node exist, we don't allow .conf node
784 * to be created.
785 */
786 if (arcmsr_find_child(acb, target, lun) != NULL) {
787 if ((ndi_merge_node(target_dev_info,
788 arcmsr_name_node) != DDI_SUCCESS)) {
789 return (DDI_SUCCESS);
790 }
791 }
792 return (DDI_FAILURE);
793 }
794
795 return (DDI_SUCCESS);
796 }
797
798 /*
799 * Function: arcmsr_tran_getcap(9E)
800 * Description: Get the capability named, and returnits value.
801 * Return Values: current value of capability, ifdefined
802 * -1 ifcapability is not defined
803 * ------------------------------------------------------
804 * Common Capability Strings Array
805 * ------------------------------------------------------
806 * #define SCSI_CAP_DMA_MAX 0
807 * #define SCSI_CAP_MSG_OUT 1
808 * #define SCSI_CAP_DISCONNECT 2
809 * #define SCSI_CAP_SYNCHRONOUS 3
810 * #define SCSI_CAP_WIDE_XFER 4
811 * #define SCSI_CAP_PARITY 5
812 * #define SCSI_CAP_INITIATOR_ID 6
813 * #define SCSI_CAP_UNTAGGED_QING 7
814 * #define SCSI_CAP_TAGGED_QING 8
815 * #define SCSI_CAP_ARQ 9
816 * #define SCSI_CAP_LINKED_CMDS 10 a
817 * #define SCSI_CAP_SECTOR_SIZE 11 b
818 * #define SCSI_CAP_TOTAL_SECTORS 12 c
819 * #define SCSI_CAP_GEOMETRY 13 d
820 * #define SCSI_CAP_RESET_NOTIFICATION 14 e
821 * #define SCSI_CAP_QFULL_RETRIES 15 f
822 * #define SCSI_CAP_QFULL_RETRY_INTERVAL 16 10
823 * #define SCSI_CAP_SCSI_VERSION 17 11
824 * #define SCSI_CAP_INTERCONNECT_TYPE 18 12
825 * #define SCSI_CAP_LUN_RESET 19 13
826 */
827 static int
828 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
829 {
830 int capability = 0;
831 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
832
833 if (cap == NULL || whom == 0) {
834 return (DDI_FAILURE);
835 }
836
837 mutex_enter(&acb->acb_mutex);
838 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
839 mutex_exit(&acb->acb_mutex);
840 return (-1);
841 }
842 switch (scsi_hba_lookup_capstr(cap)) {
843 case SCSI_CAP_MSG_OUT:
844 case SCSI_CAP_DISCONNECT:
845 case SCSI_CAP_WIDE_XFER:
846 case SCSI_CAP_TAGGED_QING:
847 case SCSI_CAP_UNTAGGED_QING:
848 case SCSI_CAP_PARITY:
849 case SCSI_CAP_ARQ:
850 capability = 1;
851 break;
852 case SCSI_CAP_SECTOR_SIZE:
853 capability = ARCMSR_DEV_SECTOR_SIZE;
854 break;
855 case SCSI_CAP_DMA_MAX:
856 /* Limit to 16MB max transfer */
857 capability = ARCMSR_MAX_XFER_LEN;
858 break;
859 case SCSI_CAP_INITIATOR_ID:
860 capability = ARCMSR_SCSI_INITIATOR_ID;
861 break;
862 case SCSI_CAP_GEOMETRY:
863 /* head , track , cylinder */
864 capability = (255 << 16) | 63;
865 break;
866 default:
867 capability = -1;
868 break;
869 }
870 mutex_exit(&acb->acb_mutex);
871 return (capability);
872 }
873
874 /*
875 * Function: arcmsr_tran_setcap(9E)
876 * Description: Set the specific capability.
877 * Return Values: 1 - capability exists and can be set to new value
878 * 0 - capability could not be set to new value
879 * -1 - no such capability
880 */
881 static int
882 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
883 {
884 _NOTE(ARGUNUSED(value))
885
886 int supported = 0;
887 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
888
889 if (cap == NULL || whom == 0) {
890 return (-1);
891 }
892
893 mutex_enter(&acb->acb_mutex);
894 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
895 mutex_exit(&acb->acb_mutex);
896 return (-1);
897 }
898 switch (supported = scsi_hba_lookup_capstr(cap)) {
899 case SCSI_CAP_ARQ: /* 9 auto request sense */
900 case SCSI_CAP_UNTAGGED_QING: /* 7 */
901 case SCSI_CAP_TAGGED_QING: /* 8 */
902 /* these are always on, and cannot be turned off */
903 supported = (value == 1) ? 1 : 0;
904 break;
905 case SCSI_CAP_TOTAL_SECTORS: /* c */
906 supported = 1;
907 break;
908 case SCSI_CAP_DISCONNECT: /* 2 */
909 case SCSI_CAP_WIDE_XFER: /* 4 */
910 case SCSI_CAP_INITIATOR_ID: /* 6 */
911 case SCSI_CAP_DMA_MAX: /* 0 */
912 case SCSI_CAP_MSG_OUT: /* 1 */
913 case SCSI_CAP_PARITY: /* 5 */
914 case SCSI_CAP_LINKED_CMDS: /* a */
915 case SCSI_CAP_RESET_NOTIFICATION: /* e */
916 case SCSI_CAP_SECTOR_SIZE: /* b */
917 /* these are not settable */
918 supported = 0;
919 break;
920 default:
921 supported = -1;
922 break;
923 }
924 mutex_exit(&acb->acb_mutex);
925 return (supported);
926 }
927
928
929 /*
930 * Function: arcmsr_tran_init_pkt
931 * Return Values: pointer to scsi_pkt, or NULL
932 * Description: simultaneously allocate both a scsi_pkt(9S) structure and
933 * DMA resources for that pkt.
934 * Called by kernel on behalf of a target driver
935 * calling scsi_init_pkt(9F).
936 * Refer to tran_init_pkt(9E) man page
937 * Context: Can be called from different kernel process threads.
938 * Can be called by interrupt thread.
939 * Allocates SCSI packet and DMA resources
940 */
941 static struct
942 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
943 register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
944 int tgtlen, int flags, int (*callback)(), caddr_t arg)
945 {
946 struct CCB *ccb;
947 struct ARCMSR_CDB *arcmsr_cdb;
948 struct ACB *acb;
949 int old_pkt_flag;
950
951 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
952
953 if (acb->acb_flags & ACB_F_BUS_RESET) {
954 return (NULL);
955 }
956 if (pkt == NULL) {
957 /* get free CCB */
958 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
959 DDI_DMA_SYNC_FORKERNEL);
960 ccb = arcmsr_get_freeccb(acb);
961 if (ccb == (struct CCB *)NULL) {
962 return (NULL);
963 }
964
965 if (statuslen < sizeof (struct scsi_arq_status)) {
966 statuslen = sizeof (struct scsi_arq_status);
967 }
968 pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
969 statuslen, tgtlen, sizeof (void *), callback, arg);
970 if (pkt == NULL) {
971 arcmsr_warn(acb, "scsi pkt allocation failed");
972 arcmsr_free_ccb(ccb);
973 return (NULL);
974 }
975 /* Initialize CCB */
976 ccb->pkt = pkt;
977 ccb->pkt_dma_handle = NULL;
978 /* record how many sg are needed to xfer on this pkt */
979 ccb->pkt_ncookies = 0;
980 /* record how many sg we got from this window */
981 ccb->pkt_cookie = 0;
982 /* record how many windows have partial dma map set */
983 ccb->pkt_nwin = 0;
984 /* record current sg window position */
985 ccb->pkt_curwin = 0;
986 ccb->pkt_dma_len = 0;
987 ccb->pkt_dma_offset = 0;
988 ccb->resid_dmacookie.dmac_size = 0;
989
990 /*
991 * we will still use this point for we want to fake some
992 * information in tran_start
993 */
994 ccb->bp = bp;
995
996 /* Initialize arcmsr_cdb */
997 arcmsr_cdb = &ccb->arcmsr_cdb;
998 bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
999 arcmsr_cdb->Bus = 0;
1000 arcmsr_cdb->Function = 1;
1001 arcmsr_cdb->LUN = ap->a_lun;
1002 arcmsr_cdb->TargetID = ap->a_target;
1003 arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1004 arcmsr_cdb->Context = (uintptr_t)arcmsr_cdb;
1005
1006 /* Fill in the rest of the structure */
1007 pkt->pkt_ha_private = ccb;
1008 pkt->pkt_address = *ap;
1009 pkt->pkt_comp = NULL;
1010 pkt->pkt_flags = 0;
1011 pkt->pkt_time = 0;
1012 pkt->pkt_resid = 0;
1013 pkt->pkt_statistics = 0;
1014 pkt->pkt_reason = 0;
1015 old_pkt_flag = 0;
1016 } else {
1017 ccb = pkt->pkt_ha_private;
1018 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1019 if (!(ccb->ccb_state & ARCMSR_CCB_BACK)) {
1020 return (NULL);
1021 }
1022 }
1023
1024 /*
1025 * you cannot update CdbLength with cmdlen here, it would
1026 * cause a data compare error
1027 */
1028 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
1029 old_pkt_flag = 1;
1030 }
1031
1032 /* Second step : dma allocation/move */
1033 if (bp && bp->b_bcount != 0) {
1034 /*
1035 * system had a lot of data trunk need to xfer, from...20 byte
1036 * to 819200 byte.
1037 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1038 * this lot of data trunk xfer done this mission will be done
1039 * by some of continue READ or WRITE scsi command, till this
1040 * lot of data trunk xfer completed.
1041 * arcmsr_dma_move do the action repeatedly, and use the same
1042 * ccb till this lot of data trunk xfer complete notice.
1043 * when after the arcmsr_tran_init_pkt returns the solaris
1044 * kernel is by your pkt_resid and its b_bcount to give you
1045 * which type of scsi command descriptor to implement the
1046 * length of folowing arcmsr_tran_start scsi cdb (data length)
1047 *
1048 * Each transfer should be aligned on a 512 byte boundary
1049 */
1050 if (ccb->pkt_dma_handle == NULL) {
1051 if (arcmsr_dma_alloc(acb, pkt, bp, flags, callback) ==
1052 DDI_FAILURE) {
1053 /*
1054 * the HBA driver is unable to allocate DMA
1055 * resources, it must free the allocated
1056 * scsi_pkt(9S) before returning
1057 */
1058 arcmsr_warn(acb, "dma allocation failure");
1059 if (old_pkt_flag == 0) {
1060 arcmsr_warn(acb, "dma "
1061 "allocation failed to free "
1062 "scsi hba pkt");
1063 arcmsr_free_ccb(ccb);
1064 scsi_hba_pkt_free(ap, pkt);
1065 }
1066 return (NULL);
1067 }
1068 } else {
1069 /* DMA resources to next DMA window, for old pkt */
1070 if (arcmsr_dma_move(acb, pkt, bp) == DDI_FAILURE) {
1071 arcmsr_warn(acb, "dma move failed");
1072 return (NULL);
1073 }
1074 }
1075 } else {
1076 pkt->pkt_resid = 0;
1077 }
1078 return (pkt);
1079 }
1080
1081 /*
1082 * Function: arcmsr_tran_start(9E)
1083 * Description: Transport the command in pktp to the target device.
1084 * The command is not finished when this returns, only
1085 * sent to the target; arcmsr_intr_handler will call
1086 * scsi_hba_pkt_comp(pktp) when the target device has done.
1087 *
1088 * Input: struct scsi_address *ap, struct scsi_pkt *pktp
1089 * Output: TRAN_ACCEPT if pkt is OK and not driver not busy
1090 * TRAN_BUSY if driver is
1091 * TRAN_BADPKT if pkt is invalid
1092 */
1093 static int
1094 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1095 {
1096 struct ACB *acb;
1097 struct CCB *ccb;
1098 int target = ap->a_target;
1099 int lun = ap->a_lun;
1100
1101 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1102 ccb = pkt->pkt_ha_private;
1103 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1104
1105 if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
1106 (ccb->ccb_flags & DDI_DMA_CONSISTENT))
1107 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1108 DDI_DMA_SYNC_FORDEV);
1109
1110 if (ccb->ccb_state == ARCMSR_CCB_UNBUILD)
1111 arcmsr_build_ccb(ccb);
1112
1113 if (acb->acb_flags & ACB_F_BUS_RESET) {
1114 pkt->pkt_reason = CMD_RESET;
1115 pkt->pkt_statistics |= STAT_BUS_RESET;
1116 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1117 STATE_SENT_CMD | STATE_GOT_STATUS);
1118 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1119 (pkt->pkt_state & STATE_XFERRED_DATA))
1120 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1121 0, 0, DDI_DMA_SYNC_FORCPU);
1122
1123 scsi_hba_pkt_comp(pkt);
1124 return (TRAN_ACCEPT);
1125 }
1126
1127 /* IMPORTANT: Target 16 is a virtual device for iop message transfer */
1128 if (target == 16) {
1129
1130 struct buf *bp = ccb->bp;
1131 uint8_t scsicmd = pkt->pkt_cdbp[0];
1132
1133 switch (scsicmd) {
1134 case SCMD_INQUIRY: {
1135 if (lun != 0) {
1136 ccb->pkt->pkt_reason = CMD_TIMEOUT;
1137 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1138 arcmsr_ccb_complete(ccb, 0);
1139 return (TRAN_ACCEPT);
1140 }
1141
1142 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1143 uint8_t inqdata[36];
1144
1145 /* The EVDP and pagecode is not supported */
1146 if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
1147 inqdata[1] = 0xFF;
1148 inqdata[2] = 0x00;
1149 } else {
1150 /* Periph Qualifier & Periph Dev Type */
1151 inqdata[0] = DTYPE_PROCESSOR;
1152 /* rem media bit & Dev Type Modifier */
1153 inqdata[1] = 0;
1154 /* ISO, ECMA, & ANSI versions */
1155 inqdata[2] = 0;
1156 inqdata[3] = 0;
1157 /* length of additional data */
1158 inqdata[4] = 31;
1159 /* Vendor Identification */
1160 bcopy("Areca ", &inqdata[8], VIDLEN);
1161 /* Product Identification */
1162 bcopy("RAID controller ", &inqdata[16],
1163 PIDLEN);
1164 /* Product Revision */
1165 bcopy(&inqdata[32], "R001", REVLEN);
1166 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1167 bp_mapin(bp);
1168
1169 (void) memcpy(bp->b_un.b_addr,
1170 inqdata, sizeof (inqdata));
1171 }
1172 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1173 }
1174 arcmsr_ccb_complete(ccb, 0);
1175 return (TRAN_ACCEPT);
1176 }
1177 case SCMD_WRITE_BUFFER:
1178 case SCMD_READ_BUFFER: {
1179 if (arcmsr_iop_message_xfer(acb, pkt)) {
1180 /* error just for retry */
1181 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
1182 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
1183 }
1184 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1185 arcmsr_ccb_complete(ccb, 0);
1186 return (TRAN_ACCEPT);
1187 }
1188 default:
1189 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1190 arcmsr_ccb_complete(ccb, 0);
1191 return (TRAN_ACCEPT);
1192 }
1193 }
1194
1195 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1196 uint8_t block_cmd;
1197
1198 block_cmd = pkt->pkt_cdbp[0] & 0x0f;
1199 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1200 pkt->pkt_reason = CMD_TIMEOUT;
1201 pkt->pkt_statistics |= STAT_TIMEOUT;
1202 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1203 STATE_SENT_CMD | STATE_GOT_STATUS);
1204 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1205 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1206 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1207 ccb->pkt_dma_offset,
1208 ccb->pkt_dma_len, DDI_DMA_SYNC_FORCPU);
1209 }
1210 scsi_hba_pkt_comp(pkt);
1211 return (TRAN_ACCEPT);
1212 }
1213 }
1214 mutex_enter(&acb->postq_mutex);
1215 if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1216 ccb->ccb_state = ARCMSR_CCB_RETRY;
1217 mutex_exit(&acb->postq_mutex);
1218 return (TRAN_BUSY);
1219 } else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
1220 arcmsr_warn(acb, "post ccb failure, ccboutstandingcount = %d",
1221 acb->ccboutstandingcount);
1222 mutex_exit(&acb->postq_mutex);
1223 return (TRAN_FATAL_ERROR);
1224 }
1225 mutex_exit(&acb->postq_mutex);
1226 return (TRAN_ACCEPT);
1227 }
1228
1229 /*
1230 * Function name: arcmsr_tran_destroy_pkt
1231 * Return Values: none
1232 * Description: Called by kernel on behalf of a target driver
1233 * calling scsi_destroy_pkt(9F).
1234 * Refer to tran_destroy_pkt(9E) man page
1235 * Context: Can be called from different kernel process threads.
1236 * Can be called by interrupt thread.
1237 */
1238 static void
1239 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1240 {
1241 struct CCB *ccb = pkt->pkt_ha_private;
1242 ddi_dma_handle_t pkt_dma_handle = ccb->pkt_dma_handle;
1243
1244 if (ccb == NULL) {
1245 return;
1246 }
1247 if (ccb->pkt != pkt) {
1248 return;
1249 }
1250 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1251 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1252 if (pkt_dma_handle) {
1253 (void) ddi_dma_unbind_handle(ccb->pkt_dma_handle);
1254 }
1255 }
1256 if (pkt_dma_handle) {
1257 (void) ddi_dma_free_handle(&pkt_dma_handle);
1258 }
1259 pkt->pkt_ha_private = NULL;
1260 if (ccb) {
1261 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1262 if (ccb->ccb_state & ARCMSR_CCB_BACK) {
1263 arcmsr_free_ccb(ccb);
1264 } else {
1265 ccb->ccb_state |= ARCMSR_CCB_WAIT4_FREE;
1266 }
1267 } else {
1268 arcmsr_free_ccb(ccb);
1269 }
1270 }
1271 scsi_hba_pkt_free(ap, pkt);
1272 }
1273
1274 /*
1275 * Function name: arcmsr_tran_dmafree()
1276 * Return Values: none
1277 * Description: free dvma resources
1278 * Context: Can be called from different kernel process threads.
1279 * Can be called by interrupt thread.
1280 */
1281 static void
1282 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1283 {
1284 struct CCB *ccb = pkt->pkt_ha_private;
1285
1286 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1287 return;
1288 }
1289 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1290 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1291 if (ddi_dma_unbind_handle(ccb->pkt_dma_handle) != DDI_SUCCESS) {
1292 arcmsr_warn(ccb->acb, "ddi_dma_unbind_handle() failed "
1293 "(target %d lun %d)", ap->a_target, ap->a_lun);
1294 }
1295 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1296 ccb->pkt_dma_handle = NULL;
1297 }
1298 }
1299
1300 /*
1301 * Function name: arcmsr_tran_sync_pkt()
1302 * Return Values: none
1303 * Description: sync dma
1304 * Context: Can be called from different kernel process threads.
1305 * Can be called by interrupt thread.
1306 */
1307 static void
1308 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1309 {
1310 struct CCB *ccb;
1311
1312 ccb = pkt->pkt_ha_private;
1313 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1314 return;
1315 }
1316 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1317 if (ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1318 (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1319 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1320 DDI_SUCCESS) {
1321 arcmsr_warn(ccb->acb,
1322 "sync pkt failed for target %d lun %d",
1323 ap->a_target, ap->a_lun);
1324 }
1325 }
1326 }
1327
1328
1329 /*
1330 * Function: arcmsr_tran_abort(9E)
1331 * SCSA interface routine to abort pkt(s) in progress.
1332 * Aborts the pkt specified. If NULL pkt, aborts ALL pkts.
1333 * Output: Return 1 if success
1334 * Return 0 if failure
1335 */
1336 static int
1337 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt)
1338 {
1339 struct ACB *acb;
1340 int return_code;
1341
1342 acb = ap->a_hba_tran->tran_hba_private;
1343
1344 while (acb->ccboutstandingcount != 0) {
1345 drv_usecwait(10000);
1346 }
1347
1348 mutex_enter(&acb->isr_mutex);
1349 return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
1350 mutex_exit(&acb->isr_mutex);
1351
1352 if (return_code != DDI_SUCCESS) {
1353 arcmsr_warn(acb, "abort command failed for target %d lun %d",
1354 ap->a_target, ap->a_lun);
1355 return (0);
1356 }
1357 return (1);
1358 }
1359
1360 /*
1361 * Function: arcmsr_tran_reset(9E)
1362 * SCSA interface routine to perform scsi resets on either
1363 * a specified target or the bus (default).
1364 * Output: Return 1 if success
1365 * Return 0 if failure
1366 */
1367 static int
1368 arcmsr_tran_reset(struct scsi_address *ap, int level) {
1369
1370 struct ACB *acb;
1371 int return_code = 1;
1372 int target = ap->a_target;
1373 int lun = ap->a_lun;
1374
1375 /* Are we in the middle of dumping core? */
1376 if (ddi_in_panic())
1377 return (return_code);
1378
1379 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1380 mutex_enter(&acb->isr_mutex);
1381 switch (level) {
1382 case RESET_ALL: /* 0 */
1383 acb->num_resets++;
1384 acb->acb_flags |= ACB_F_BUS_RESET;
1385 if (acb->timeout_count) {
1386 if (arcmsr_iop_reset(acb) != 0) {
1387 arcmsr_handle_iop_bus_hold(acb);
1388 acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1389 }
1390 }
1391 acb->acb_flags &= ~ACB_F_BUS_RESET;
1392 break;
1393 case RESET_TARGET: /* 1 */
1394 if (acb->devstate[target][lun] == ARECA_RAID_GONE)
1395 return_code = 0;
1396 break;
1397 case RESET_BUS: /* 2 */
1398 return_code = 0;
1399 break;
1400 case RESET_LUN: /* 3 */
1401 return_code = 0;
1402 break;
1403 default:
1404 return_code = 0;
1405 }
1406 mutex_exit(&acb->isr_mutex);
1407 return (return_code);
1408 }
1409
1410 static int
1411 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
1412 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1413 {
1414 struct ACB *acb;
1415 int circ = 0;
1416 int rval;
1417 int tgt, lun;
1418
1419 if ((acb = ddi_get_soft_state(arcmsr_soft_state,
1420 ddi_get_instance(parent))) == NULL)
1421 return (NDI_FAILURE);
1422
1423 ndi_devi_enter(parent, &circ);
1424 switch (op) {
1425 case BUS_CONFIG_ONE:
1426 if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
1427 rval = NDI_FAILURE;
1428 break;
1429 }
1430 if (acb->device_map[tgt] & 1 << lun) {
1431 acb->devstate[tgt][lun] = ARECA_RAID_GOOD;
1432 rval = arcmsr_config_lun(acb, tgt, lun, childp);
1433 }
1434 break;
1435
1436 case BUS_CONFIG_DRIVER:
1437 case BUS_CONFIG_ALL:
1438 for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
1439 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1440 if (acb->device_map[tgt] & 1 << lun) {
1441 acb->devstate[tgt][lun] =
1442 ARECA_RAID_GOOD;
1443 (void) arcmsr_config_lun(acb, tgt,
1444 lun, NULL);
1445 }
1446
1447 rval = NDI_SUCCESS;
1448 break;
1449 }
1450 if (rval == NDI_SUCCESS)
1451 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
1452 ndi_devi_exit(parent, circ);
1453 return (rval);
1454 }
1455
1456 /*
1457 * Function name: arcmsr_dma_alloc
1458 * Return Values: 0 if successful, -1 if failure
1459 * Description: allocate DMA resources
1460 * Context: Can only be called from arcmsr_tran_init_pkt()
1461 * register struct scsi_address *ap = &((pkt)->pkt_address);
1462 */
1463 static int
1464 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1465 struct buf *bp, int flags, int (*callback)())
1466 {
1467 struct CCB *ccb = pkt->pkt_ha_private;
1468 int alloc_result, map_method, dma_flags;
1469 int resid = 0;
1470 int total_ccb_xferlen = 0;
1471 int (*cb)(caddr_t);
1472 uint8_t i;
1473
1474 /*
1475 * at this point the PKT SCSI CDB is empty, and dma xfer length
1476 * is bp->b_bcount
1477 */
1478
1479 if (bp->b_flags & B_READ) {
1480 ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1481 dma_flags = DDI_DMA_READ;
1482 } else {
1483 ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1484 dma_flags = DDI_DMA_WRITE;
1485 }
1486
1487 if (flags & PKT_CONSISTENT) {
1488 ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1489 dma_flags |= DDI_DMA_CONSISTENT;
1490 }
1491 if (flags & PKT_DMA_PARTIAL) {
1492 dma_flags |= DDI_DMA_PARTIAL;
1493 }
1494
1495 dma_flags |= DDI_DMA_REDZONE;
1496 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1497
1498 alloc_result = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_dma_attr,
1499 cb, 0, &ccb->pkt_dma_handle);
1500 if (alloc_result != DDI_SUCCESS) {
1501 arcmsr_warn(acb, "dma allocate failed (%x)", alloc_result);
1502 return (DDI_FAILURE);
1503 }
1504
1505 map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle,
1506 bp, dma_flags, cb, 0,
1507 &ccb->pkt_dmacookies[0], /* SG List pointer */
1508 &ccb->pkt_ncookies); /* number of sgl cookies */
1509
1510 switch (map_method) {
1511 case DDI_DMA_PARTIAL_MAP:
1512 /*
1513 * When your main memory size larger then 4G
1514 * DDI_DMA_PARTIAL_MAP will be touched.
1515 *
1516 * We've already set DDI_DMA_PARTIAL in dma_flags,
1517 * so if it's now missing, there's something screwy
1518 * happening. We plow on....
1519 */
1520
1521 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1522 arcmsr_warn(acb,
1523 "dma partial mapping lost ...impossible case!");
1524 }
1525 if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1526 DDI_FAILURE) {
1527 arcmsr_warn(acb, "ddi_dma_numwin() failed");
1528 }
1529
1530 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1531 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1532 &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1533 DDI_FAILURE) {
1534 arcmsr_warn(acb, "ddi_dma_getwin failed");
1535 }
1536
1537 i = 0;
1538 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1539 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1540 for (;;) {
1541 i++;
1542 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1543 (i == ccb->pkt_ncookies) ||
1544 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1545 break;
1546 }
1547 /*
1548 * next cookie will be retrieved from
1549 * ccb->pkt_dmacookies[i]
1550 */
1551 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1552 &ccb->pkt_dmacookies[i]);
1553 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1554 }
1555 ccb->pkt_cookie = i;
1556 ccb->arcmsr_cdb.sgcount = i;
1557 if (total_ccb_xferlen > 512) {
1558 resid = total_ccb_xferlen % 512;
1559 if (resid != 0) {
1560 i--;
1561 total_ccb_xferlen -= resid;
1562 /* modify last sg length */
1563 ccb->pkt_dmacookies[i].dmac_size =
1564 ccb->pkt_dmacookies[i].dmac_size - resid;
1565 ccb->resid_dmacookie.dmac_size = resid;
1566 ccb->resid_dmacookie.dmac_laddress =
1567 ccb->pkt_dmacookies[i].dmac_laddress +
1568 ccb->pkt_dmacookies[i].dmac_size;
1569 }
1570 }
1571 ccb->total_dmac_size = total_ccb_xferlen;
1572 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1573 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1574
1575 return (DDI_SUCCESS);
1576
1577 case DDI_DMA_MAPPED:
1578 ccb->pkt_nwin = 1; /* all mapped, so only one window */
1579 ccb->pkt_dma_len = 0;
1580 ccb->pkt_dma_offset = 0;
1581 i = 0;
1582 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1583 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1584 for (;;) {
1585 i++;
1586 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1587 (i == ccb->pkt_ncookies) ||
1588 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1589 break;
1590 }
1591 /*
1592 * next cookie will be retrieved from
1593 * ccb->pkt_dmacookies[i]
1594 */
1595 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1596 &ccb->pkt_dmacookies[i]);
1597 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1598 }
1599 ccb->pkt_cookie = i;
1600 ccb->arcmsr_cdb.sgcount = i;
1601 if (total_ccb_xferlen > 512) {
1602 resid = total_ccb_xferlen % 512;
1603 if (resid != 0) {
1604 i--;
1605 total_ccb_xferlen -= resid;
1606 /* modify last sg length */
1607 ccb->pkt_dmacookies[i].dmac_size =
1608 ccb->pkt_dmacookies[i].dmac_size - resid;
1609 ccb->resid_dmacookie.dmac_size = resid;
1610 ccb->resid_dmacookie.dmac_laddress =
1611 ccb->pkt_dmacookies[i].dmac_laddress +
1612 ccb->pkt_dmacookies[i].dmac_size;
1613 }
1614 }
1615 ccb->total_dmac_size = total_ccb_xferlen;
1616 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1617 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1618 return (DDI_SUCCESS);
1619
1620 case DDI_DMA_NORESOURCES:
1621 arcmsr_warn(acb, "dma map got 'no resources'");
1622 bioerror(bp, ENOMEM);
1623 break;
1624
1625 case DDI_DMA_NOMAPPING:
1626 arcmsr_warn(acb, "dma map got 'no mapping'");
1627 bioerror(bp, EFAULT);
1628 break;
1629
1630 case DDI_DMA_TOOBIG:
1631 arcmsr_warn(acb, "dma map got 'too big'");
1632 bioerror(bp, EINVAL);
1633 break;
1634
1635 case DDI_DMA_INUSE:
1636 arcmsr_warn(acb, "dma map got 'in use' "
1637 "(should not happen)");
1638 break;
1639 default:
1640 arcmsr_warn(acb, "dma map failed (0x%x)", i);
1641 break;
1642 }
1643
1644 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1645 ccb->pkt_dma_handle = NULL;
1646 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1647 return (DDI_FAILURE);
1648 }
1649
1650
1651 /*
1652 * Function name: arcmsr_dma_move
1653 * Return Values: 0 if successful, -1 if failure
1654 * Description: move DMA resources to next DMA window
1655 * Context: Can only be called from arcmsr_tran_init_pkt()
1656 */
1657 static int
1658 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt, struct buf *bp)
1659 {
1660 struct CCB *ccb = pkt->pkt_ha_private;
1661 uint8_t i = 0;
1662 int resid = 0;
1663 int total_ccb_xferlen = 0;
1664
1665 if (ccb->resid_dmacookie.dmac_size != 0) {
1666 total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1667 ccb->pkt_dmacookies[i].dmac_size =
1668 ccb->resid_dmacookie.dmac_size;
1669 ccb->pkt_dmacookies[i].dmac_laddress =
1670 ccb->resid_dmacookie.dmac_laddress;
1671 i++;
1672 ccb->resid_dmacookie.dmac_size = 0;
1673 }
1674 /*
1675 * If there are no more cookies remaining in this window,
1676 * move to the next window.
1677 */
1678 if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1679 /*
1680 * only dma map "partial" arrive here
1681 */
1682 if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1683 (ccb->pkt_nwin == 1)) {
1684 return (DDI_SUCCESS);
1685 }
1686
1687 /* At last window, cannot move */
1688 if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1689 arcmsr_warn(acb, "dma partial set, numwin exceeded");
1690 return (DDI_FAILURE);
1691 }
1692 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1693 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1694 &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1695 DDI_FAILURE) {
1696 arcmsr_warn(acb, "ddi_dma_getwin failed");
1697 return (DDI_FAILURE);
1698 }
1699 /* reset cookie pointer */
1700 ccb->pkt_cookie = 0;
1701 } else {
1702 /*
1703 * only dma map "all" arrive here
1704 * We still have more cookies in this window,
1705 * get the next one
1706 * access the pkt_dma_handle remain cookie record at
1707 * ccb->pkt_dmacookies array
1708 */
1709 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1710 &ccb->pkt_dmacookies[i]);
1711 }
1712
1713 /* Get remaining cookies in this window, up to our maximum */
1714 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1715
1716 /* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1717 for (;;) {
1718 i++;
1719 /* handled cookies count level indicator */
1720 ccb->pkt_cookie++;
1721 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1722 (ccb->pkt_cookie == ccb->pkt_ncookies) ||
1723 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1724 break;
1725 }
1726 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1727 &ccb->pkt_dmacookies[i]);
1728 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1729 }
1730
1731 ccb->arcmsr_cdb.sgcount = i;
1732 if (total_ccb_xferlen > 512) {
1733 resid = total_ccb_xferlen % 512;
1734 if (resid != 0) {
1735 i--;
1736 total_ccb_xferlen -= resid;
1737 /* modify last sg length */
1738 ccb->pkt_dmacookies[i].dmac_size =
1739 ccb->pkt_dmacookies[i].dmac_size - resid;
1740 ccb->resid_dmacookie.dmac_size = resid;
1741 ccb->resid_dmacookie.dmac_laddress =
1742 ccb->pkt_dmacookies[i].dmac_laddress +
1743 ccb->pkt_dmacookies[i].dmac_size;
1744 }
1745 }
1746 ccb->total_dmac_size += total_ccb_xferlen;
1747 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1748
1749 return (DDI_SUCCESS);
1750 }
1751
1752
1753 /*ARGSUSED*/
1754 static void
1755 arcmsr_build_ccb(struct CCB *ccb)
1756 {
1757 struct scsi_pkt *pkt = ccb->pkt;
1758 struct ARCMSR_CDB *arcmsr_cdb;
1759 char *psge;
1760 uint32_t address_lo, address_hi;
1761 int arccdbsize = 0x30;
1762 uint8_t sgcount;
1763
1764 arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1765 psge = (char *)&arcmsr_cdb->sgu;
1766
1767 bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb, arcmsr_cdb->CdbLength);
1768 sgcount = ccb->arcmsr_cdb.sgcount;
1769
1770 if (sgcount != 0) {
1771 int length, i;
1772 int cdb_sgcount = 0;
1773 int total_xfer_length = 0;
1774
1775 /* map stor port SG list to our iop SG List. */
1776 for (i = 0; i < sgcount; i++) {
1777 /* Get physaddr of the current data pointer */
1778 length = ccb->pkt_dmacookies[i].dmac_size;
1779 total_xfer_length += length;
1780 address_lo =
1781 dma_addr_lo32(ccb->pkt_dmacookies[i].dmac_laddress);
1782 address_hi =
1783 dma_addr_hi32(ccb->pkt_dmacookies[i].dmac_laddress);
1784
1785 if (address_hi == 0) {
1786 struct SG32ENTRY *dma_sg;
1787
1788 dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
1789 dma_sg->address = address_lo;
1790 dma_sg->length = length;
1791 psge += sizeof (struct SG32ENTRY);
1792 arccdbsize += sizeof (struct SG32ENTRY);
1793 } else {
1794 struct SG64ENTRY *dma_sg;
1795
1796 dma_sg = (struct SG64ENTRY *)(intptr_t)psge;
1797 dma_sg->addresshigh = address_hi;
1798 dma_sg->address = address_lo;
1799 dma_sg->length = length | IS_SG64_ADDR;
1800 psge += sizeof (struct SG64ENTRY);
1801 arccdbsize += sizeof (struct SG64ENTRY);
1802 }
1803 cdb_sgcount++;
1804 }
1805 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1806 arcmsr_cdb->DataLength = total_xfer_length;
1807 if (arccdbsize > 256) {
1808 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1809 }
1810 } else {
1811 arcmsr_cdb->DataLength = 0;
1812 }
1813
1814 if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
1815 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1816 ccb->arc_cdb_size = arccdbsize;
1817 }
1818
1819 /*
1820 * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
1821 *
1822 * handle: Handle of registered ARC protocol driver
1823 * adapter_id: AIOC unique identifier(integer)
1824 * pPOSTCARD_SEND: Pointer to ARC send postcard
1825 *
1826 * This routine posts a ARC send postcard to the request post FIFO of a
1827 * specific ARC adapter.
1828 */
1829 static int
1830 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1831 {
1832 uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1833 struct scsi_pkt *pkt = ccb->pkt;
1834 struct ARCMSR_CDB *arcmsr_cdb;
1835 uint_t pkt_flags = pkt->pkt_flags;
1836
1837 arcmsr_cdb = &ccb->arcmsr_cdb;
1838
1839 /* TODO: Use correct offset and size for syncing? */
1840 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1841 DDI_FAILURE)
1842 return (DDI_FAILURE);
1843
1844 atomic_inc_32(&acb->ccboutstandingcount);
1845 ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1846
1847 ccb->ccb_state = ARCMSR_CCB_START;
1848 switch (acb->adapter_type) {
1849 case ACB_ADAPTER_TYPE_A:
1850 {
1851 struct HBA_msgUnit *phbamu;
1852
1853 phbamu = (struct HBA_msgUnit *)acb->pmu;
1854 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1855 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1856 &phbamu->inbound_queueport,
1857 cdb_phyaddr_pattern |
1858 ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1859 } else {
1860 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1861 &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1862 }
1863 if (pkt_flags & FLAG_NOINTR)
1864 arcmsr_polling_hba_ccbdone(acb, ccb);
1865 break;
1866 }
1867
1868 case ACB_ADAPTER_TYPE_B:
1869 {
1870 struct HBB_msgUnit *phbbmu;
1871 int ending_index, index;
1872
1873 phbbmu = (struct HBB_msgUnit *)acb->pmu;
1874 index = phbbmu->postq_index;
1875 ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
1876 phbbmu->post_qbuffer[ending_index] = 0;
1877 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1878 phbbmu->post_qbuffer[index] =
1879 (cdb_phyaddr_pattern|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1880 } else {
1881 phbbmu->post_qbuffer[index] = cdb_phyaddr_pattern;
1882 }
1883 index++;
1884 /* if last index number set it to 0 */
1885 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1886 phbbmu->postq_index = index;
1887 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1888 &phbbmu->hbb_doorbell->drv2iop_doorbell,
1889 ARCMSR_DRV2IOP_CDB_POSTED);
1890
1891 if (pkt_flags & FLAG_NOINTR)
1892 arcmsr_polling_hbb_ccbdone(acb, ccb);
1893 break;
1894 }
1895
1896 case ACB_ADAPTER_TYPE_C:
1897 {
1898 struct HBC_msgUnit *phbcmu;
1899 uint32_t ccb_post_stamp, arc_cdb_size;
1900
1901 phbcmu = (struct HBC_msgUnit *)acb->pmu;
1902 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 :
1903 ccb->arc_cdb_size;
1904 ccb_post_stamp = (cdb_phyaddr_pattern |
1905 ((arc_cdb_size-1) >> 6) |1);
1906 if (acb->cdb_phyaddr_hi32) {
1907 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1908 &phbcmu->inbound_queueport_high,
1909 acb->cdb_phyaddr_hi32);
1910 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1911 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1912 } else {
1913 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1914 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1915 }
1916 if (pkt_flags & FLAG_NOINTR)
1917 arcmsr_polling_hbc_ccbdone(acb, ccb);
1918 break;
1919 }
1920
1921 }
1922 return (DDI_SUCCESS);
1923 }
1924
1925
1926 static void
1927 arcmsr_ccb_complete(struct CCB *ccb, int flag)
1928 {
1929 struct ACB *acb = ccb->acb;
1930 struct scsi_pkt *pkt = ccb->pkt;
1931
1932 if (pkt == NULL) {
1933 return;
1934 }
1935 ccb->ccb_state |= ARCMSR_CCB_DONE;
1936 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1937 STATE_SENT_CMD | STATE_GOT_STATUS);
1938
1939 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1940 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1941 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1942 DDI_DMA_SYNC_FORCPU);
1943 }
1944 /*
1945 * TODO: This represents a potential race condition, and is
1946 * ultimately a poor design decision. Revisit this code
1947 * and solve the mutex ownership issue correctly.
1948 */
1949 if (mutex_owned(&acb->isr_mutex)) {
1950 mutex_exit(&acb->isr_mutex);
1951 scsi_hba_pkt_comp(pkt);
1952 mutex_enter(&acb->isr_mutex);
1953 } else {
1954 scsi_hba_pkt_comp(pkt);
1955 }
1956 if (flag == 1) {
1957 atomic_dec_32(&acb->ccboutstandingcount);
1958 }
1959 }
1960
1961 static void
1962 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1963 {
1964 int id, lun;
1965
1966 ccb->ccb_state |= ARCMSR_CCB_DONE;
1967 id = ccb->pkt->pkt_address.a_target;
1968 lun = ccb->pkt->pkt_address.a_lun;
1969
1970 if (!error) {
1971 if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1972 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1973 }
1974 ccb->pkt->pkt_reason = CMD_CMPLT;
1975 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1976 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1977 &ccb->complete_queue_pointer, &acb->ccb_complete_list);
1978
1979 } else {
1980 switch (ccb->arcmsr_cdb.DeviceStatus) {
1981 case ARCMSR_DEV_SELECT_TIMEOUT:
1982 if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1983 arcmsr_warn(acb,
1984 "target %d lun %d selection "
1985 "timeout", id, lun);
1986 }
1987 acb->devstate[id][lun] = ARECA_RAID_GONE;
1988 ccb->pkt->pkt_reason = CMD_TIMEOUT; /* CMD_DEV_GONE; */
1989 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1990 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1991 &ccb->complete_queue_pointer,
1992 &acb->ccb_complete_list);
1993 break;
1994 case ARCMSR_DEV_ABORTED:
1995 case ARCMSR_DEV_INIT_FAIL:
1996 arcmsr_warn(acb, "isr got 'ARCMSR_DEV_ABORTED'"
1997 " 'ARCMSR_DEV_INIT_FAIL'");
1998 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
1999 acb->devstate[id][lun] = ARECA_RAID_GONE;
2000 ccb->pkt->pkt_reason = CMD_DEV_GONE;
2001 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2002 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2003 &ccb->complete_queue_pointer,
2004 &acb->ccb_complete_list);
2005 break;
2006 case SCSISTAT_CHECK_CONDITION:
2007 acb->devstate[id][lun] = ARECA_RAID_GOOD;
2008 arcmsr_report_sense_info(ccb);
2009 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2010 &ccb->complete_queue_pointer,
2011 &acb->ccb_complete_list);
2012 break;
2013 default:
2014 arcmsr_warn(acb,
2015 "target %d lun %d isr received CMD_DONE"
2016 " with unknown DeviceStatus (0x%x)",
2017 id, lun, ccb->arcmsr_cdb.DeviceStatus);
2018 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2019 acb->devstate[id][lun] = ARECA_RAID_GONE;
2020 /* unknown error or crc error just for retry */
2021 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2022 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2023 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2024 &ccb->complete_queue_pointer,
2025 &acb->ccb_complete_list);
2026 break;
2027 }
2028 }
2029 }
2030
2031
2032 static void
2033 arcmsr_drain_donequeue(struct ACB *acb, struct CCB *ccb, boolean_t error)
2034 {
2035 uint16_t ccb_state;
2036
2037 if (ccb->acb != acb) {
2038 return;
2039 }
2040 if (ccb->ccb_state != ARCMSR_CCB_START) {
2041 switch (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
2042 case ARCMSR_CCB_TIMEOUT:
2043 ccb_state = ccb->ccb_state;
2044 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2045 arcmsr_free_ccb(ccb);
2046 else
2047 ccb->ccb_state |= ARCMSR_CCB_BACK;
2048 return;
2049
2050 case ARCMSR_CCB_ABORTED:
2051 ccb_state = ccb->ccb_state;
2052 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2053 arcmsr_free_ccb(ccb);
2054 else
2055 ccb->ccb_state |= ARCMSR_CCB_BACK;
2056 return;
2057 case ARCMSR_CCB_RESET:
2058 ccb_state = ccb->ccb_state;
2059 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2060 arcmsr_free_ccb(ccb);
2061 else
2062 ccb->ccb_state |= ARCMSR_CCB_BACK;
2063 return;
2064 default:
2065 return;
2066 }
2067 }
2068 arcmsr_report_ccb_state(acb, ccb, error);
2069 }
2070
2071 static void
2072 arcmsr_report_sense_info(struct CCB *ccb)
2073 {
2074 struct SENSE_DATA *cdb_sensedata;
2075 struct scsi_pkt *pkt = ccb->pkt;
2076 struct scsi_arq_status *arq_status;
2077 union scsi_cdb *cdbp;
2078 uint64_t err_blkno;
2079
2080 cdbp = (void *)pkt->pkt_cdbp;
2081 err_blkno = ARCMSR_GETGXADDR(ccb->arcmsr_cdb.CdbLength, cdbp);
2082
2083 arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
2084 bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
2085 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
2086 arq_status->sts_rqpkt_reason = CMD_CMPLT;
2087 arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
2088 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
2089 arq_status->sts_rqpkt_statistics = 0;
2090 arq_status->sts_rqpkt_resid = 0;
2091
2092 pkt->pkt_reason = CMD_CMPLT;
2093 /* auto rqsense took place */
2094 pkt->pkt_state |= STATE_ARQ_DONE;
2095
2096 cdb_sensedata = (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
2097 if (&arq_status->sts_sensedata != NULL) {
2098 if (err_blkno <= 0xfffffffful) {
2099 struct scsi_extended_sense *sts_sensedata;
2100
2101 sts_sensedata = &arq_status->sts_sensedata;
2102 sts_sensedata->es_code = cdb_sensedata->ErrorCode;
2103 /* must eq CLASS_EXTENDED_SENSE (0x07) */
2104 sts_sensedata->es_class = cdb_sensedata->ErrorClass;
2105 sts_sensedata->es_valid = cdb_sensedata->Valid;
2106 sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
2107 sts_sensedata->es_key = cdb_sensedata->SenseKey;
2108 sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
2109 sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2110 sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2111 sts_sensedata->es_info_1 = (err_blkno >> 24) & 0xFF;
2112 sts_sensedata->es_info_2 = (err_blkno >> 16) & 0xFF;
2113 sts_sensedata->es_info_3 = (err_blkno >> 8) & 0xFF;
2114 sts_sensedata->es_info_4 = err_blkno & 0xFF;
2115 sts_sensedata->es_add_len =
2116 cdb_sensedata->AdditionalSenseLength;
2117 sts_sensedata->es_cmd_info[0] =
2118 cdb_sensedata->CommandSpecificInformation[0];
2119 sts_sensedata->es_cmd_info[1] =
2120 cdb_sensedata->CommandSpecificInformation[1];
2121 sts_sensedata->es_cmd_info[2] =
2122 cdb_sensedata->CommandSpecificInformation[2];
2123 sts_sensedata->es_cmd_info[3] =
2124 cdb_sensedata->CommandSpecificInformation[3];
2125 sts_sensedata->es_add_code =
2126 cdb_sensedata->AdditionalSenseCode;
2127 sts_sensedata->es_qual_code =
2128 cdb_sensedata->AdditionalSenseCodeQualifier;
2129 sts_sensedata->es_fru_code =
2130 cdb_sensedata->FieldReplaceableUnitCode;
2131 } else { /* 64-bit LBA */
2132 struct scsi_descr_sense_hdr *dsp;
2133 struct scsi_information_sense_descr *isd;
2134
2135 dsp = (struct scsi_descr_sense_hdr *)
2136 &arq_status->sts_sensedata;
2137 dsp->ds_class = CLASS_EXTENDED_SENSE;
2138 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
2139 dsp->ds_key = cdb_sensedata->SenseKey;
2140 dsp->ds_add_code = cdb_sensedata->AdditionalSenseCode;
2141 dsp->ds_qual_code =
2142 cdb_sensedata->AdditionalSenseCodeQualifier;
2143 dsp->ds_addl_sense_length =
2144 sizeof (struct scsi_information_sense_descr);
2145
2146 isd = (struct scsi_information_sense_descr *)(dsp+1);
2147 isd->isd_descr_type = DESCR_INFORMATION;
2148 isd->isd_valid = 1;
2149 isd->isd_information[0] = (err_blkno >> 56) & 0xFF;
2150 isd->isd_information[1] = (err_blkno >> 48) & 0xFF;
2151 isd->isd_information[2] = (err_blkno >> 40) & 0xFF;
2152 isd->isd_information[3] = (err_blkno >> 32) & 0xFF;
2153 isd->isd_information[4] = (err_blkno >> 24) & 0xFF;
2154 isd->isd_information[5] = (err_blkno >> 16) & 0xFF;
2155 isd->isd_information[6] = (err_blkno >> 8) & 0xFF;
2156 isd->isd_information[7] = (err_blkno) & 0xFF;
2157 }
2158 }
2159 }
2160
2161
2162 static int
2163 arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt)
2164 {
2165 struct CCB *ccb;
2166 uint32_t intmask_org = 0;
2167 int i = 0;
2168
2169 acb->num_aborts++;
2170
2171 if (abortpkt != NULL) {
2172 /*
2173 * We don't support abort of a single packet. All
2174 * callers in our kernel always do a global abort, so
2175 * there is no point in having code to support it
2176 * here.
2177 */
2178 return (DDI_FAILURE);
2179 }
2180
2181 /*
2182 * if abortpkt is NULL, the upper layer needs us
2183 * to abort all commands
2184 */
2185 if (acb->ccboutstandingcount != 0) {
2186 /* disable all outbound interrupt */
2187 intmask_org = arcmsr_disable_allintr(acb);
2188 /* clear and abort all outbound posted Q */
2189 arcmsr_done4abort_postqueue(acb);
2190 /* talk to iop 331 outstanding command aborted */
2191 (void) arcmsr_abort_host_command(acb);
2192
2193 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2194 ccb = acb->pccb_pool[i];
2195 if (ccb->ccb_state == ARCMSR_CCB_START) {
2196 /*
2197 * this ccb will complete at
2198 * hwinterrupt
2199 */
2200 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
2201 ccb->pkt->pkt_reason = CMD_ABORTED;
2202 ccb->pkt->pkt_statistics |= STAT_ABORTED;
2203 arcmsr_ccb_complete(ccb, 1);
2204 }
2205 }
2206 /*
2207 * enable outbound Post Queue, outbound
2208 * doorbell Interrupt
2209 */
2210 arcmsr_enable_allintr(acb, intmask_org);
2211 }
2212 return (DDI_SUCCESS);
2213 }
2214
2215
2216 /*
2217 * Autoconfiguration support
2218 */
2219 static int
2220 arcmsr_parse_devname(char *devnm, int *tgt, int *lun) {
2221
2222 char devbuf[SCSI_MAXNAMELEN];
2223 char *addr;
2224 char *p, *tp, *lp;
2225 long num;
2226
2227 /* Parse dev name and address */
2228 (void) strlcpy(devbuf, devnm, sizeof (devbuf));
2229 addr = "";
2230 for (p = devbuf; *p != '\0'; p++) {
2231 if (*p == '@') {
2232 addr = p + 1;
2233 *p = '\0';
2234 } else if (*p == ':') {
2235 *p = '\0';
2236 break;
2237 }
2238 }
2239
2240 /* Parse target and lun */
2241 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
2242 if (*p == ',') {
2243 lp = p + 1;
2244 *p = '\0';
2245 break;
2246 }
2247 }
2248 if ((tgt != NULL) && (tp != NULL)) {
2249 if (ddi_strtol(tp, NULL, 0x10, &num) != 0)
2250 return (-1);
2251 *tgt = (int)num;
2252 }
2253 if ((lun != NULL) && (lp != NULL)) {
2254 if (ddi_strtol(lp, NULL, 0x10, &num) != 0)
2255 return (-1);
2256 *lun = (int)num;
2257 }
2258 return (0);
2259 }
2260
2261 static int
2262 arcmsr_name_node(dev_info_t *dip, char *name, int len)
2263 {
2264 int tgt, lun;
2265
2266 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target",
2267 -1);
2268 if (tgt == -1)
2269 return (DDI_FAILURE);
2270 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun",
2271 -1);
2272 if (lun == -1)
2273 return (DDI_FAILURE);
2274 (void) snprintf(name, len, "%x,%x", tgt, lun);
2275 return (DDI_SUCCESS);
2276 }
2277
2278 static dev_info_t *
2279 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
2280 {
2281 dev_info_t *child = NULL;
2282 char addr[SCSI_MAXNAMELEN];
2283 char tmp[SCSI_MAXNAMELEN];
2284
2285 (void) sprintf(addr, "%x,%x", tgt, lun);
2286
2287 for (child = ddi_get_child(acb->dev_info);
2288 child;
2289 child = ddi_get_next_sibling(child)) {
2290 /* We don't care about non-persistent node */
2291 if (ndi_dev_is_persistent_node(child) == 0)
2292 continue;
2293 if (arcmsr_name_node(child, tmp, SCSI_MAXNAMELEN) !=
2294 DDI_SUCCESS)
2295 continue;
2296 if (strcmp(addr, tmp) == 0)
2297 break;
2298 }
2299 return (child);
2300 }
2301
2302 static int
2303 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd, dev_info_t **dipp)
2304 {
2305 char *nodename = NULL;
2306 char **compatible = NULL;
2307 int ncompatible = 0;
2308 dev_info_t *ldip = NULL;
2309 int tgt = sd->sd_address.a_target;
2310 int lun = sd->sd_address.a_lun;
2311 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
2312 int rval;
2313
2314 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
2315 NULL, &nodename, &compatible, &ncompatible);
2316 if (nodename == NULL) {
2317 arcmsr_warn(acb, "found no comptible driver for T%dL%d",
2318 tgt, lun);
2319 rval = NDI_FAILURE;
2320 goto finish;
2321 }
2322 /* Create dev node */
2323 rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID, &ldip);
2324 if (rval == NDI_SUCCESS) {
2325 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
2326 DDI_PROP_SUCCESS) {
2327 arcmsr_warn(acb,
2328 "unable to create target property for T%dL%d",
2329 tgt, lun);
2330 rval = NDI_FAILURE;
2331 goto finish;
2332 }
2333 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
2334 DDI_PROP_SUCCESS) {
2335 arcmsr_warn(acb,
2336 "unable to create lun property for T%dL%d",
2337 tgt, lun);
2338 rval = NDI_FAILURE;
2339 goto finish;
2340 }
2341 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
2342 "compatible", compatible, ncompatible) !=
2343 DDI_PROP_SUCCESS) {
2344 arcmsr_warn(acb,
2345 "unable to create compatible property for T%dL%d",
2346 tgt, lun);
2347 rval = NDI_FAILURE;
2348 goto finish;
2349 }
2350 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
2351 if (rval != NDI_SUCCESS) {
2352 arcmsr_warn(acb, "unable to online T%dL%d", tgt, lun);
2353 ndi_prop_remove_all(ldip);
2354 (void) ndi_devi_free(ldip);
2355 } else {
2356 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined", tgt, lun);
2357 }
2358 }
2359 finish:
2360 if (dipp)
2361 *dipp = ldip;
2362
2363 scsi_hba_nodename_compatible_free(nodename, compatible);
2364 return (rval);
2365 }
2366
2367 static int
2368 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun, dev_info_t **ldip)
2369 {
2370 struct scsi_device sd;
2371 dev_info_t *child;
2372 int rval;
2373
2374 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
2375 if (ldip) {
2376 *ldip = child;
2377 }
2378 return (NDI_SUCCESS);
2379 }
2380 bzero(&sd, sizeof (struct scsi_device));
2381 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
2382 sd.sd_address.a_target = tgt;
2383 sd.sd_address.a_lun = lun;
2384
2385 rval = scsi_hba_probe(&sd, NULL);
2386 if (rval == SCSIPROBE_EXISTS)
2387 rval = arcmsr_config_child(acb, &sd, ldip);
2388 scsi_unprobe(&sd);
2389 return (rval);
2390 }
2391
2392
2393 static int
2394 arcmsr_add_intr(struct ACB *acb, int intr_type)
2395 {
2396 int rc, count;
2397 dev_info_t *dev_info;
2398 const char *type_str;
2399
2400 switch (intr_type) {
2401 case DDI_INTR_TYPE_MSI:
2402 type_str = "MSI";
2403 break;
2404 case DDI_INTR_TYPE_MSIX:
2405 type_str = "MSIX";
2406 break;
2407 case DDI_INTR_TYPE_FIXED:
2408 type_str = "FIXED";
2409 break;
2410 default:
2411 type_str = "unknown";
2412 break;
2413 }
2414
2415 dev_info = acb->dev_info;
2416 /* Determine number of supported interrupts */
2417 rc = ddi_intr_get_nintrs(dev_info, intr_type, &count);
2418 if ((rc != DDI_SUCCESS) || (count == 0)) {
2419 arcmsr_warn(acb,
2420 "no interrupts of type %s, rc=0x%x, count=%d",
2421 type_str, rc, count);
2422 return (DDI_FAILURE);
2423 }
2424 acb->intr_size = sizeof (ddi_intr_handle_t) * count;
2425 acb->phandle = kmem_zalloc(acb->intr_size, KM_SLEEP);
2426 rc = ddi_intr_alloc(dev_info, acb->phandle, intr_type, 0,
2427 count, &acb->intr_count, DDI_INTR_ALLOC_NORMAL);
2428 if ((rc != DDI_SUCCESS) || (acb->intr_count == 0)) {
2429 arcmsr_warn(acb, "ddi_intr_alloc(%s) failed 0x%x",
2430 type_str, rc);
2431 return (DDI_FAILURE);
2432 }
2433 if (acb->intr_count < count) {
2434 arcmsr_log(acb, CE_NOTE, "Got %d interrupts, but requested %d",
2435 acb->intr_count, count);
2436 }
2437 /*
2438 * Get priority for first msi, assume remaining are all the same
2439 */
2440 if (ddi_intr_get_pri(acb->phandle[0], &acb->intr_pri) != DDI_SUCCESS) {
2441 arcmsr_warn(acb, "ddi_intr_get_pri failed");
2442 return (DDI_FAILURE);
2443 }
2444 if (acb->intr_pri >= ddi_intr_get_hilevel_pri()) {
2445 arcmsr_warn(acb, "high level interrupt not supported");
2446 return (DDI_FAILURE);
2447 }
2448
2449 for (int x = 0; x < acb->intr_count; x++) {
2450 if (ddi_intr_add_handler(acb->phandle[x], arcmsr_intr_handler,
2451 (caddr_t)acb, NULL) != DDI_SUCCESS) {
2452 arcmsr_warn(acb, "ddi_intr_add_handler(%s) failed",
2453 type_str);
2454 return (DDI_FAILURE);
2455 }
2456 }
2457 (void) ddi_intr_get_cap(acb->phandle[0], &acb->intr_cap);
2458 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2459 /* Call ddi_intr_block_enable() for MSI */
2460 (void) ddi_intr_block_enable(acb->phandle, acb->intr_count);
2461 } else {
2462 /* Call ddi_intr_enable() for MSI non block enable */
2463 for (int x = 0; x < acb->intr_count; x++) {
2464 (void) ddi_intr_enable(acb->phandle[x]);
2465 }
2466 }
2467 return (DDI_SUCCESS);
2468 }
2469
2470 static void
2471 arcmsr_remove_intr(struct ACB *acb)
2472 {
2473 int x;
2474
2475 if (acb->phandle == NULL)
2476 return;
2477
2478 /* Disable all interrupts */
2479 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2480 /* Call ddi_intr_block_disable() */
2481 (void) ddi_intr_block_disable(acb->phandle, acb->intr_count);
2482 } else {
2483 for (x = 0; x < acb->intr_count; x++) {
2484 (void) ddi_intr_disable(acb->phandle[x]);
2485 }
2486 }
2487 /* Call ddi_intr_remove_handler() */
2488 for (x = 0; x < acb->intr_count; x++) {
2489 (void) ddi_intr_remove_handler(acb->phandle[x]);
2490 (void) ddi_intr_free(acb->phandle[x]);
2491 }
2492 kmem_free(acb->phandle, acb->intr_size);
2493 acb->phandle = NULL;
2494 }
2495
2496 static void
2497 arcmsr_mutex_init(struct ACB *acb)
2498 {
2499 mutex_init(&acb->isr_mutex, NULL, MUTEX_DRIVER, NULL);
2500 mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER, NULL);
2501 mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER, NULL);
2502 mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER, NULL);
2503 mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
2504 }
2505
2506 static void
2507 arcmsr_mutex_destroy(struct ACB *acb)
2508 {
2509 mutex_destroy(&acb->isr_mutex);
2510 mutex_destroy(&acb->acb_mutex);
2511 mutex_destroy(&acb->postq_mutex);
2512 mutex_destroy(&acb->workingQ_mutex);
2513 mutex_destroy(&acb->ioctl_mutex);
2514 }
2515
2516 static int
2517 arcmsr_initialize(struct ACB *acb)
2518 {
2519 struct CCB *pccb_tmp;
2520 size_t allocated_length;
2521 uint16_t wval;
2522 uint_t intmask_org, count;
2523 caddr_t arcmsr_ccbs_area;
2524 uint32_t wlval, cdb_phyaddr, offset, realccb_size;
2525 int32_t dma_sync_size;
2526 int i, id, lun, instance;
2527
2528 instance = ddi_get_instance(acb->dev_info);
2529 wlval = pci_config_get32(acb->pci_acc_handle, 0);
2530 wval = (uint16_t)((wlval >> 16) & 0xffff);
2531 realccb_size = P2ROUNDUP(sizeof (struct CCB), 32);
2532 switch (wval) {
2533 case PCI_DEVICE_ID_ARECA_1880:
2534 case PCI_DEVICE_ID_ARECA_1882:
2535 {
2536 uint32_t *iop_mu_regs_map0;
2537
2538 acb->adapter_type = ACB_ADAPTER_TYPE_C; /* lsi */
2539 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2540 if (ddi_regs_map_setup(acb->dev_info, 2,
2541 (caddr_t *)&iop_mu_regs_map0, 0,
2542 sizeof (struct HBC_msgUnit), &acb->dev_acc_attr,
2543 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2544 arcmsr_warn(acb, "unable to map registers");
2545 return (DDI_FAILURE);
2546 }
2547
2548 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2549 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2550 DDI_SUCCESS) {
2551 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2552 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2553 return (DDI_FAILURE);
2554 }
2555
2556 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2557 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2558 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2559 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2560 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2561 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2562 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2563 return (DDI_FAILURE);
2564 }
2565
2566 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2567 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2568 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2569 &count) != DDI_DMA_MAPPED) {
2570 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2571 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2572 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2573 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2574 return (DDI_FAILURE);
2575 }
2576 bzero(arcmsr_ccbs_area, dma_sync_size);
2577 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2578 - PtrToNum(arcmsr_ccbs_area));
2579 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2580 /* ioport base */
2581 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2582 break;
2583 }
2584
2585 case PCI_DEVICE_ID_ARECA_1201:
2586 {
2587 uint32_t *iop_mu_regs_map0;
2588 uint32_t *iop_mu_regs_map1;
2589 struct HBB_msgUnit *phbbmu;
2590
2591 acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
2592 dma_sync_size =
2593 (ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20) +
2594 sizeof (struct HBB_msgUnit);
2595 /* Allocate memory for the ccb */
2596 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2597 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2598 DDI_SUCCESS) {
2599 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2600 return (DDI_FAILURE);
2601 }
2602
2603 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2604 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2605 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2606 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2607 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2608 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2609 return (DDI_FAILURE);
2610 }
2611
2612 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2613 (caddr_t)arcmsr_ccbs_area, dma_sync_size,
2614 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2615 NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
2616 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2617 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2618 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2619 return (DDI_FAILURE);
2620 }
2621 bzero(arcmsr_ccbs_area, dma_sync_size);
2622 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2623 - PtrToNum(arcmsr_ccbs_area));
2624 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2625 acb->pmu = (struct msgUnit *)
2626 NumToPtr(PtrToNum(arcmsr_ccbs_area) +
2627 (realccb_size*ARCMSR_MAX_FREECCB_NUM));
2628 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2629
2630 /* setup device register */
2631 if (ddi_regs_map_setup(acb->dev_info, 1,
2632 (caddr_t *)&iop_mu_regs_map0, 0,
2633 sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
2634 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2635 arcmsr_warn(acb, "unable to map base0 registers");
2636 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2637 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2638 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2639 return (DDI_FAILURE);
2640 }
2641
2642 /* ARCMSR_DRV2IOP_DOORBELL */
2643 phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)iop_mu_regs_map0;
2644 if (ddi_regs_map_setup(acb->dev_info, 2,
2645 (caddr_t *)&iop_mu_regs_map1, 0,
2646 sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
2647 &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
2648 arcmsr_warn(acb, "unable to map base1 registers");
2649 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2650 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2651 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2652 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2653 return (DDI_FAILURE);
2654 }
2655
2656 /* ARCMSR_MSGCODE_RWBUFFER */
2657 phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)iop_mu_regs_map1;
2658 break;
2659 }
2660
2661 case PCI_DEVICE_ID_ARECA_1110:
2662 case PCI_DEVICE_ID_ARECA_1120:
2663 case PCI_DEVICE_ID_ARECA_1130:
2664 case PCI_DEVICE_ID_ARECA_1160:
2665 case PCI_DEVICE_ID_ARECA_1170:
2666 case PCI_DEVICE_ID_ARECA_1210:
2667 case PCI_DEVICE_ID_ARECA_1220:
2668 case PCI_DEVICE_ID_ARECA_1230:
2669 case PCI_DEVICE_ID_ARECA_1231:
2670 case PCI_DEVICE_ID_ARECA_1260:
2671 case PCI_DEVICE_ID_ARECA_1261:
2672 case PCI_DEVICE_ID_ARECA_1270:
2673 case PCI_DEVICE_ID_ARECA_1280:
2674 case PCI_DEVICE_ID_ARECA_1212:
2675 case PCI_DEVICE_ID_ARECA_1222:
2676 case PCI_DEVICE_ID_ARECA_1380:
2677 case PCI_DEVICE_ID_ARECA_1381:
2678 case PCI_DEVICE_ID_ARECA_1680:
2679 case PCI_DEVICE_ID_ARECA_1681:
2680 {
2681 uint32_t *iop_mu_regs_map0;
2682
2683 acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
2684 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2685 if (ddi_regs_map_setup(acb->dev_info, 1,
2686 (caddr_t *)&iop_mu_regs_map0, 0,
2687 sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
2688 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2689 arcmsr_warn(acb, "unable to map registers");
2690 return (DDI_FAILURE);
2691 }
2692
2693 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2694 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2695 DDI_SUCCESS) {
2696 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2697 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2698 return (DDI_FAILURE);
2699 }
2700
2701 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2702 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2703 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2704 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2705 arcmsr_warn(acb, "ddi_dma_mem_alloc failed", instance);
2706 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2707 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2708 return (DDI_FAILURE);
2709 }
2710
2711 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2712 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2713 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2714 &count) != DDI_DMA_MAPPED) {
2715 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2716 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2717 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2718 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2719 return (DDI_FAILURE);
2720 }
2721 bzero(arcmsr_ccbs_area, dma_sync_size);
2722 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2723 - PtrToNum(arcmsr_ccbs_area));
2724 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2725 /* ioport base */
2726 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2727 break;
2728 }
2729
2730 default:
2731 arcmsr_warn(acb, "Unknown RAID adapter type!");
2732 return (DDI_FAILURE);
2733 }
2734 arcmsr_init_list_head(&acb->ccb_complete_list);
2735 /* here we can not access pci configuration again */
2736 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2737 ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
2738 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2739 /* physical address of acb->pccb_pool */
2740 cdb_phyaddr = acb->ccb_cookie.dmac_address + offset;
2741
2742 pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
2743
2744 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2745 pccb_tmp->cdb_phyaddr_pattern =
2746 (acb->adapter_type == ACB_ADAPTER_TYPE_C) ?
2747 cdb_phyaddr : (cdb_phyaddr >> 5);
2748 pccb_tmp->acb = acb;
2749 acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
2750 cdb_phyaddr = cdb_phyaddr + realccb_size;
2751 pccb_tmp = (struct CCB *)NumToPtr(PtrToNum(pccb_tmp) +
2752 realccb_size);
2753 }
2754 acb->vir2phy_offset = PtrToNum(pccb_tmp) - cdb_phyaddr;
2755
2756 /* disable all outbound interrupt */
2757 intmask_org = arcmsr_disable_allintr(acb);
2758
2759 if (!arcmsr_iop_confirm(acb)) {
2760 arcmsr_warn(acb, "arcmsr_iop_confirm error", instance);
2761 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2762 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2763 return (DDI_FAILURE);
2764 }
2765
2766 for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
2767 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
2768 acb->devstate[id][lun] = ARECA_RAID_GONE;
2769 }
2770 }
2771
2772 /* enable outbound Post Queue, outbound doorbell Interrupt */
2773 arcmsr_enable_allintr(acb, intmask_org);
2774
2775 return (0);
2776 }
2777
2778 static int
2779 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance)
2780 {
2781 scsi_hba_tran_t *hba_trans;
2782 ddi_device_acc_attr_t dev_acc_attr;
2783 struct ACB *acb;
2784 uint16_t wval;
2785 int raid6 = 1;
2786 char *type;
2787 int intr_types;
2788
2789
2790 /*
2791 * Soft State Structure
2792 * The driver should allocate the per-device-instance
2793 * soft state structure, being careful to clean up properly if
2794 * an error occurs. Allocate data structure.
2795 */
2796 if (ddi_soft_state_zalloc(arcmsr_soft_state, instance) != DDI_SUCCESS) {
2797 arcmsr_warn(NULL, "ddi_soft_state_zalloc failed");
2798 return (DDI_FAILURE);
2799 }
2800
2801 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2802 ASSERT(acb);
2803
2804 arcmsr_mutex_init(acb);
2805
2806 /* acb is already zalloc()d so we don't need to bzero() it */
2807 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2808 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2809 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2810
2811 acb->dev_info = dev_info;
2812 acb->dev_acc_attr = dev_acc_attr;
2813
2814 /*
2815 * The driver, if providing DMA, should also check that its hardware is
2816 * installed in a DMA-capable slot
2817 */
2818 if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
2819 arcmsr_warn(acb, "hardware is not installed in"
2820 " a DMA-capable slot");
2821 goto error_level_0;
2822 }
2823 if (pci_config_setup(dev_info, &acb->pci_acc_handle) != DDI_SUCCESS) {
2824 arcmsr_warn(acb, "pci_config_setup() failed, attach failed");
2825 goto error_level_0;
2826 }
2827
2828 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
2829 if (wval != PCI_VENDOR_ID_ARECA) {
2830 arcmsr_warn(acb,
2831 "'vendorid (0x%04x) does not match 0x%04x "
2832 "(PCI_VENDOR_ID_ARECA)",
2833 wval, PCI_VENDOR_ID_ARECA);
2834 goto error_level_0;
2835 }
2836
2837 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
2838 switch (wval) {
2839 case PCI_DEVICE_ID_ARECA_1110:
2840 case PCI_DEVICE_ID_ARECA_1210:
2841 case PCI_DEVICE_ID_ARECA_1201:
2842 raid6 = 0;
2843 /*FALLTHRU*/
2844 case PCI_DEVICE_ID_ARECA_1120:
2845 case PCI_DEVICE_ID_ARECA_1130:
2846 case PCI_DEVICE_ID_ARECA_1160:
2847 case PCI_DEVICE_ID_ARECA_1170:
2848 case PCI_DEVICE_ID_ARECA_1220:
2849 case PCI_DEVICE_ID_ARECA_1230:
2850 case PCI_DEVICE_ID_ARECA_1260:
2851 case PCI_DEVICE_ID_ARECA_1270:
2852 case PCI_DEVICE_ID_ARECA_1280:
2853 type = "SATA 3G";
2854 break;
2855 case PCI_DEVICE_ID_ARECA_1380:
2856 case PCI_DEVICE_ID_ARECA_1381:
2857 case PCI_DEVICE_ID_ARECA_1680:
2858 case PCI_DEVICE_ID_ARECA_1681:
2859 type = "SAS 3G";
2860 break;
2861 case PCI_DEVICE_ID_ARECA_1880:
2862 type = "SAS 6G";
2863 break;
2864 default:
2865 type = "X-TYPE";
2866 arcmsr_warn(acb, "Unknown Host Adapter RAID Controller!");
2867 goto error_level_0;
2868 }
2869
2870 arcmsr_log(acb, CE_CONT, "Areca %s Host Adapter RAID Controller%s\n",
2871 type, raid6 ? " (RAID6 capable)" : "");
2872
2873 /* we disable iop interrupt here */
2874 if (arcmsr_initialize(acb) == DDI_FAILURE) {
2875 arcmsr_warn(acb, "arcmsr_initialize failed");
2876 goto error_level_1;
2877 }
2878
2879 /* Allocate a transport structure */
2880 hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
2881 if (hba_trans == NULL) {
2882 arcmsr_warn(acb, "scsi_hba_tran_alloc failed");
2883 goto error_level_2;
2884 }
2885 acb->scsi_hba_transport = hba_trans;
2886 acb->dev_info = dev_info;
2887 /* init scsi host adapter transport entry */
2888 hba_trans->tran_hba_private = acb;
2889 hba_trans->tran_tgt_private = NULL;
2890 /*
2891 * If no per-target initialization is required, the HBA can leave
2892 * tran_tgt_init set to NULL.
2893 */
2894 hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
2895 hba_trans->tran_tgt_probe = scsi_hba_probe;
2896 hba_trans->tran_tgt_free = NULL;
2897 hba_trans->tran_start = arcmsr_tran_start;
2898 hba_trans->tran_abort = arcmsr_tran_abort;
2899 hba_trans->tran_reset = arcmsr_tran_reset;
2900 hba_trans->tran_getcap = arcmsr_tran_getcap;
2901 hba_trans->tran_setcap = arcmsr_tran_setcap;
2902 hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
2903 hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
2904 hba_trans->tran_dmafree = arcmsr_tran_dmafree;
2905 hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
2906
2907 hba_trans->tran_reset_notify = NULL;
2908 hba_trans->tran_get_bus_addr = NULL;
2909 hba_trans->tran_get_name = NULL;
2910 hba_trans->tran_quiesce = NULL;
2911 hba_trans->tran_unquiesce = NULL;
2912 hba_trans->tran_bus_reset = NULL;
2913 hba_trans->tran_bus_config = arcmsr_tran_bus_config;
2914 hba_trans->tran_add_eventcall = NULL;
2915 hba_trans->tran_get_eventcookie = NULL;
2916 hba_trans->tran_post_event = NULL;
2917 hba_trans->tran_remove_eventcall = NULL;
2918
2919 /* iop init and enable interrupt here */
2920 arcmsr_iop_init(acb);
2921
2922 /* Get supported interrupt types */
2923 if (ddi_intr_get_supported_types(dev_info, &intr_types) !=
2924 DDI_SUCCESS) {
2925 arcmsr_warn(acb, "ddi_intr_get_supported_types failed");
2926 goto error_level_3;
2927 }
2928 if (intr_types & DDI_INTR_TYPE_FIXED) {
2929 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2930 goto error_level_5;
2931 } else if (intr_types & DDI_INTR_TYPE_MSI) {
2932 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2933 goto error_level_5;
2934 }
2935
2936 /*
2937 * The driver should attach this instance of the device, and
2938 * perform error cleanup if necessary
2939 */
2940 if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2941 hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2942 arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2943 goto error_level_5;
2944 }
2945
2946 /* Create a taskq for dealing with dr events */
2947 if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2948 TASKQ_DEFAULTPRI, 0)) == NULL) {
2949 arcmsr_warn(acb, "ddi_taskq_create failed");
2950 goto error_level_8;
2951 }
2952
2953 acb->timeout_count = 0;
2954 /* active ccbs "timeout" watchdog */
2955 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2956 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2957 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2958 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2959
2960 /* report device info */
2961 ddi_report_dev(dev_info);
2962
2963 return (DDI_SUCCESS);
2964
2965 error_level_8:
2966
2967 error_level_7:
2968 error_level_6:
2969 (void) scsi_hba_detach(dev_info);
2970
2971 error_level_5:
2972 arcmsr_remove_intr(acb);
2973
2974 error_level_3:
2975 error_level_4:
2976 if (acb->scsi_hba_transport)
2977 scsi_hba_tran_free(acb->scsi_hba_transport);
2978
2979 error_level_2:
2980 if (acb->ccbs_acc_handle)
2981 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2982 if (acb->ccbs_pool_handle)
2983 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2984
2985 error_level_1:
2986 if (acb->pci_acc_handle)
2987 pci_config_teardown(&acb->pci_acc_handle);
2988 arcmsr_mutex_destroy(acb);
2989 ddi_soft_state_free(arcmsr_soft_state, instance);
2990
2991 error_level_0:
2992 return (DDI_FAILURE);
2993 }
2994
2995
2996 static void
2997 arcmsr_vlog(struct ACB *acb, int level, char *fmt, va_list ap)
2998 {
2999 char buf[256];
3000
3001 if (acb != NULL) {
3002 (void) snprintf(buf, sizeof (buf), "%s%d: %s",
3003 ddi_driver_name(acb->dev_info),
3004 ddi_get_instance(acb->dev_info), fmt);
3005 fmt = buf;
3006 }
3007 vcmn_err(level, fmt, ap);
3008 }
3009
3010 static void
3011 arcmsr_log(struct ACB *acb, int level, char *fmt, ...)
3012 {
3013 va_list ap;
3014
3015 va_start(ap, fmt);
3016 arcmsr_vlog(acb, level, fmt, ap);
3017 va_end(ap);
3018 }
3019
3020 static void
3021 arcmsr_warn(struct ACB *acb, char *fmt, ...)
3022 {
3023 va_list ap;
3024
3025 va_start(ap, fmt);
3026 arcmsr_vlog(acb, CE_WARN, fmt, ap);
3027 va_end(ap);
3028 }
3029
3030 static void
3031 arcmsr_init_list_head(struct list_head *list)
3032 {
3033 list->next = list;
3034 list->prev = list;
3035 }
3036
3037 static void
3038 arcmsr_x_list_del(struct list_head *prev, struct list_head *next)
3039 {
3040 next->prev = prev;
3041 prev->next = next;
3042 }
3043
3044 static void
3045 arcmsr_x_list_add(struct list_head *new_one, struct list_head *prev,
3046 struct list_head *next)
3047 {
3048 next->prev = new_one;
3049 new_one->next = next;
3050 new_one->prev = prev;
3051 prev->next = new_one;
3052 }
3053
3054 static void
3055 arcmsr_list_add_tail(kmutex_t *list_lock, struct list_head *new_one,
3056 struct list_head *head)
3057 {
3058 mutex_enter(list_lock);
3059 arcmsr_x_list_add(new_one, head->prev, head);
3060 mutex_exit(list_lock);
3061 }
3062
3063 static struct list_head *
3064 arcmsr_list_get_first(kmutex_t *list_lock, struct list_head *head)
3065 {
3066 struct list_head *one = NULL;
3067
3068 mutex_enter(list_lock);
3069 if (head->next == head) {
3070 mutex_exit(list_lock);
3071 return (NULL);
3072 }
3073 one = head->next;
3074 arcmsr_x_list_del(one->prev, one->next);
3075 arcmsr_init_list_head(one);
3076 mutex_exit(list_lock);
3077 return (one);
3078 }
3079
3080 static struct CCB *
3081 arcmsr_get_complete_ccb_from_list(struct ACB *acb)
3082 {
3083 struct list_head *first_complete_ccb_list = NULL;
3084 struct CCB *ccb;
3085
3086 first_complete_ccb_list =
3087 arcmsr_list_get_first(&acb->ccb_complete_list_mutex,
3088 &acb->ccb_complete_list);
3089 if (first_complete_ccb_list == NULL) {
3090 return (NULL);
3091 }
3092 ccb = (void *)((caddr_t)(first_complete_ccb_list) -
3093 offsetof(struct CCB, complete_queue_pointer));
3094 return (ccb);
3095 }
3096
3097 static struct CCB *
3098 arcmsr_get_freeccb(struct ACB *acb)
3099 {
3100 struct CCB *ccb;
3101 int ccb_get_index, ccb_put_index;
3102
3103 mutex_enter(&acb->workingQ_mutex);
3104 ccb_put_index = acb->ccb_put_index;
3105 ccb_get_index = acb->ccb_get_index;
3106 ccb = acb->ccbworkingQ[ccb_get_index];
3107 ccb_get_index++;
3108 if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
3109 ccb_get_index = ccb_get_index - ARCMSR_MAX_FREECCB_NUM;
3110 if (ccb_put_index != ccb_get_index) {
3111 acb->ccb_get_index = ccb_get_index;
3112 arcmsr_init_list_head(&ccb->complete_queue_pointer);
3113 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
3114 } else {
3115 ccb = NULL;
3116 }
3117 mutex_exit(&acb->workingQ_mutex);
3118 return (ccb);
3119 }
3120
3121
3122 static void
3123 arcmsr_free_ccb(struct CCB *ccb)
3124 {
3125 struct ACB *acb = ccb->acb;
3126
3127 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3128 return;
3129 }
3130 mutex_enter(&acb->workingQ_mutex);
3131 ccb->ccb_state = ARCMSR_CCB_FREE;
3132 ccb->pkt = NULL;
3133 ccb->pkt_dma_handle = NULL;
3134 ccb->ccb_flags = 0;
3135 acb->ccbworkingQ[acb->ccb_put_index] = ccb;
3136 acb->ccb_put_index++;
3137 if (acb->ccb_put_index >= ARCMSR_MAX_FREECCB_NUM)
3138 acb->ccb_put_index =
3139 acb->ccb_put_index - ARCMSR_MAX_FREECCB_NUM;
3140 mutex_exit(&acb->workingQ_mutex);
3141 }
3142
3143
3144 static void
3145 arcmsr_ccbs_timeout(void* arg)
3146 {
3147 struct ACB *acb = (struct ACB *)arg;
3148 struct CCB *ccb;
3149 int i, instance, timeout_count = 0;
3150 uint32_t intmask_org;
3151 time_t current_time = ddi_get_time();
3152
3153 intmask_org = arcmsr_disable_allintr(acb);
3154 mutex_enter(&acb->isr_mutex);
3155 if (acb->ccboutstandingcount != 0) {
3156 /* check each ccb */
3157 i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3158 DDI_DMA_SYNC_FORKERNEL);
3159 if (i != DDI_SUCCESS) {
3160 if ((acb->timeout_id != 0) &&
3161 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3162 /* do pkt timeout check each 60 secs */
3163 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3164 (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3165 drv_usectohz(1000000)));
3166 }
3167 mutex_exit(&acb->isr_mutex);
3168 arcmsr_enable_allintr(acb, intmask_org);
3169 return;
3170 }
3171 instance = ddi_get_instance(acb->dev_info);
3172 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3173 ccb = acb->pccb_pool[i];
3174 if (ccb->acb != acb) {
3175 break;
3176 }
3177 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3178 continue;
3179 }
3180 if (ccb->pkt == NULL) {
3181 continue;
3182 }
3183 if (ccb->pkt->pkt_time == 0) {
3184 continue;
3185 }
3186 if (ccb->ccb_time >= current_time) {
3187 continue;
3188 }
3189 int id = ccb->pkt->pkt_address.a_target;
3190 int lun = ccb->pkt->pkt_address.a_lun;
3191 if (ccb->ccb_state == ARCMSR_CCB_START) {
3192 uint8_t *cdb = (uint8_t *)&ccb->arcmsr_cdb.Cdb;
3193
3194 timeout_count++;
3195 arcmsr_warn(acb,
3196 "scsi target %d lun %d cmd=0x%x "
3197 "command timeout, ccb=0x%p",
3198 instance, id, lun, *cdb, (void *)ccb);
3199 ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3200 ccb->pkt->pkt_reason = CMD_TIMEOUT;
3201 ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3202 /* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3203 arcmsr_ccb_complete(ccb, 1);
3204 continue;
3205 } else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3206 ARCMSR_CCB_CAN_BE_FREE) {
3207 arcmsr_free_ccb(ccb);
3208 }
3209 }
3210 }
3211 if ((acb->timeout_id != 0) &&
3212 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3213 /* do pkt timeout check each 60 secs */
3214 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3215 (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3216 }
3217 mutex_exit(&acb->isr_mutex);
3218 arcmsr_enable_allintr(acb, intmask_org);
3219 }
3220
3221 static void
3222 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3223 {
3224 struct CCB *ccb;
3225 uint32_t intmask_org;
3226 int i;
3227
3228 /* disable all outbound interrupts */
3229 intmask_org = arcmsr_disable_allintr(acb);
3230 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3231 ccb = acb->pccb_pool[i];
3232 if (ccb->ccb_state == ARCMSR_CCB_START) {
3233 if ((target == ccb->pkt->pkt_address.a_target) &&
3234 (lun == ccb->pkt->pkt_address.a_lun)) {
3235 ccb->ccb_state = ARCMSR_CCB_ABORTED;
3236 ccb->pkt->pkt_reason = CMD_ABORTED;
3237 ccb->pkt->pkt_statistics |= STAT_ABORTED;
3238 arcmsr_ccb_complete(ccb, 1);
3239 arcmsr_log(acb, CE_NOTE,
3240 "abort T%dL%d ccb", target, lun);
3241 }
3242 }
3243 }
3244 /* enable outbound Post Queue, outbound doorbell Interrupt */
3245 arcmsr_enable_allintr(acb, intmask_org);
3246 }
3247
3248 static int
3249 arcmsr_scsi_device_probe(struct ACB *acb, uint16_t tgt, uint8_t lun)
3250 {
3251 struct scsi_device sd;
3252 dev_info_t *child;
3253 int rval;
3254
3255 bzero(&sd, sizeof (struct scsi_device));
3256 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
3257 sd.sd_address.a_target = (uint16_t)tgt;
3258 sd.sd_address.a_lun = (uint8_t)lun;
3259 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
3260 rval = scsi_hba_probe(&sd, NULL);
3261 if (rval == SCSIPROBE_EXISTS) {
3262 rval = ndi_devi_online(child, NDI_ONLINE_ATTACH);
3263 if (rval != NDI_SUCCESS) {
3264 arcmsr_warn(acb, "unable to online T%dL%d",
3265 tgt, lun);
3266 } else {
3267 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined",
3268 tgt, lun);
3269 }
3270 }
3271 } else {
3272 rval = scsi_hba_probe(&sd, NULL);
3273 if (rval == SCSIPROBE_EXISTS)
3274 rval = arcmsr_config_child(acb, &sd, NULL);
3275 }
3276 scsi_unprobe(&sd);
3277 return (rval);
3278 }
3279
3280 static void
3281 arcmsr_dr_handle(struct ACB *acb)
3282 {
3283 char *acb_dev_map = (char *)acb->device_map;
3284 char *devicemap;
3285 char temp;
3286 uint16_t target;
3287 uint8_t lun;
3288 char diff;
3289 int circ = 0;
3290 dev_info_t *dip;
3291 ddi_acc_handle_t reg;
3292
3293 switch (acb->adapter_type) {
3294 case ACB_ADAPTER_TYPE_A:
3295 {
3296 struct HBA_msgUnit *phbamu;
3297
3298 phbamu = (struct HBA_msgUnit *)acb->pmu;
3299 devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
3300 reg = acb->reg_mu_acc_handle0;
3301 break;
3302 }
3303
3304 case ACB_ADAPTER_TYPE_B:
3305 {
3306 struct HBB_msgUnit *phbbmu;
3307
3308 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3309 devicemap = (char *)
3310 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
3311 reg = acb->reg_mu_acc_handle1;
3312 break;
3313 }
3314
3315 case ACB_ADAPTER_TYPE_C:
3316 {
3317 struct HBC_msgUnit *phbcmu;
3318
3319 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3320 devicemap = (char *)&phbcmu->msgcode_rwbuffer[21];
3321 reg = acb->reg_mu_acc_handle0;
3322 break;
3323 }
3324
3325 }
3326
3327 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
3328 temp = CHIP_REG_READ8(reg, devicemap);
3329 diff = (*acb_dev_map)^ temp;
3330 if (diff != 0) {
3331 *acb_dev_map = temp;
3332 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
3333 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
3334 ndi_devi_enter(acb->dev_info, &circ);
3335 acb->devstate[target][lun] =
3336 ARECA_RAID_GOOD;
3337 (void) arcmsr_scsi_device_probe(acb,
3338 target, lun);
3339 ndi_devi_exit(acb->dev_info, circ);
3340 arcmsr_log(acb, CE_NOTE,
3341 "T%dL%d on-line", target, lun);
3342 } else if ((temp & 0x01) == 0 &&
3343 (diff & 0x01) == 1) {
3344 dip = arcmsr_find_child(acb, target,
3345 lun);
3346 if (dip != NULL) {
3347 acb->devstate[target][lun] =
3348 ARECA_RAID_GONE;
3349 if (mutex_owned(&acb->
3350 isr_mutex)) {
3351 arcmsr_abort_dr_ccbs(
3352 acb, target, lun);
3353 (void)
3354 ndi_devi_offline(
3355 dip,
3356 NDI_DEVI_REMOVE |
3357 NDI_DEVI_OFFLINE);
3358 } else {
3359 mutex_enter(&acb->
3360 isr_mutex);
3361 arcmsr_abort_dr_ccbs(
3362 acb, target, lun);
3363 (void)
3364 ndi_devi_offline(
3365 dip,
3366 NDI_DEVI_REMOVE |
3367 NDI_DEVI_OFFLINE);
3368 mutex_exit(&acb->
3369 isr_mutex);
3370 }
3371 }
3372 arcmsr_log(acb, CE_NOTE,
3373 "T%dL%d off-line", target, lun);
3374 }
3375 temp >>= 1;
3376 diff >>= 1;
3377 }
3378 }
3379 devicemap++;
3380 acb_dev_map++;
3381 }
3382 }
3383
3384
3385 static void
3386 arcmsr_devMap_monitor(void* arg)
3387 {
3388
3389 struct ACB *acb = (struct ACB *)arg;
3390 switch (acb->adapter_type) {
3391 case ACB_ADAPTER_TYPE_A:
3392 {
3393 struct HBA_msgUnit *phbamu;
3394
3395 phbamu = (struct HBA_msgUnit *)acb->pmu;
3396 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3397 &phbamu->inbound_msgaddr0,
3398 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3399 break;
3400 }
3401
3402 case ACB_ADAPTER_TYPE_B:
3403 {
3404 struct HBB_msgUnit *phbbmu;
3405
3406 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3407 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3408 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3409 ARCMSR_MESSAGE_GET_CONFIG);
3410 break;
3411 }
3412
3413 case ACB_ADAPTER_TYPE_C:
3414 {
3415 struct HBC_msgUnit *phbcmu;
3416
3417 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3418 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3419 &phbcmu->inbound_msgaddr0,
3420 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3421 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3422 &phbcmu->inbound_doorbell,
3423 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3424 break;
3425 }
3426
3427 }
3428
3429 if ((acb->timeout_id != 0) &&
3430 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3431 /* do pkt timeout check each 5 secs */
3432 acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3433 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3434 }
3435 }
3436
3437
3438 static uint32_t
3439 arcmsr_disable_allintr(struct ACB *acb) {
3440
3441 uint32_t intmask_org;
3442
3443 switch (acb->adapter_type) {
3444 case ACB_ADAPTER_TYPE_A:
3445 {
3446 struct HBA_msgUnit *phbamu;
3447
3448 phbamu = (struct HBA_msgUnit *)acb->pmu;
3449 /* disable all outbound interrupt */
3450 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3451 &phbamu->outbound_intmask);
3452 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3453 &phbamu->outbound_intmask,
3454 intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
3455 break;
3456 }
3457
3458 case ACB_ADAPTER_TYPE_B:
3459 {
3460 struct HBB_msgUnit *phbbmu;
3461
3462 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3463 /* disable all outbound interrupt */
3464 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3465 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
3466 /* disable all interrupts */
3467 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3468 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
3469 break;
3470 }
3471
3472 case ACB_ADAPTER_TYPE_C:
3473 {
3474 struct HBC_msgUnit *phbcmu;
3475
3476 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3477 /* disable all outbound interrupt */
3478 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3479 &phbcmu->host_int_mask); /* disable outbound message0 int */
3480 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3481 &phbcmu->host_int_mask,
3482 intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
3483 break;
3484 }
3485
3486 }
3487 return (intmask_org);
3488 }
3489
3490
3491 static void
3492 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
3493
3494 int mask;
3495
3496 switch (acb->adapter_type) {
3497 case ACB_ADAPTER_TYPE_A:
3498 {
3499 struct HBA_msgUnit *phbamu;
3500
3501 phbamu = (struct HBA_msgUnit *)acb->pmu;
3502 /*
3503 * enable outbound Post Queue, outbound doorbell message0
3504 * Interrupt
3505 */
3506 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
3507 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
3508 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
3509 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3510 &phbamu->outbound_intmask, intmask_org & mask);
3511 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
3512 break;
3513 }
3514
3515 case ACB_ADAPTER_TYPE_B:
3516 {
3517 struct HBB_msgUnit *phbbmu;
3518
3519 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3520 mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
3521 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
3522 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
3523 /* 1=interrupt enable, 0=interrupt disable */
3524 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3525 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
3526 intmask_org | mask);
3527 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
3528 break;
3529 }
3530
3531 case ACB_ADAPTER_TYPE_C:
3532 {
3533 struct HBC_msgUnit *phbcmu;
3534
3535 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3536 /* enable outbound Post Queue,outbound doorbell Interrupt */
3537 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK |
3538 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK |
3539 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
3540 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3541 &phbcmu->host_int_mask, intmask_org & mask);
3542 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
3543 break;
3544 }
3545
3546 }
3547 }
3548
3549
3550 static void
3551 arcmsr_iop_parking(struct ACB *acb)
3552 {
3553 /* stop adapter background rebuild */
3554 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
3555 uint32_t intmask_org;
3556
3557 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
3558 /* disable all outbound interrupt */
3559 intmask_org = arcmsr_disable_allintr(acb);
3560 switch (acb->adapter_type) {
3561 case ACB_ADAPTER_TYPE_A:
3562 arcmsr_stop_hba_bgrb(acb);
3563 arcmsr_flush_hba_cache(acb);
3564 break;
3565
3566 case ACB_ADAPTER_TYPE_B:
3567 arcmsr_stop_hbb_bgrb(acb);
3568 arcmsr_flush_hbb_cache(acb);
3569 break;
3570
3571 case ACB_ADAPTER_TYPE_C:
3572 arcmsr_stop_hbc_bgrb(acb);
3573 arcmsr_flush_hbc_cache(acb);
3574 break;
3575 }
3576 /*
3577 * enable outbound Post Queue
3578 * enable outbound doorbell Interrupt
3579 */
3580 arcmsr_enable_allintr(acb, intmask_org);
3581 }
3582 }
3583
3584
3585 static uint8_t
3586 arcmsr_hba_wait_msgint_ready(struct ACB *acb)
3587 {
3588 uint32_t i;
3589 uint8_t retries = 0x00;
3590 struct HBA_msgUnit *phbamu;
3591
3592
3593 phbamu = (struct HBA_msgUnit *)acb->pmu;
3594
3595 do {
3596 for (i = 0; i < 100; i++) {
3597 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3598 &phbamu->outbound_intstatus) &
3599 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
3600 /* clear interrupt */
3601 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3602 &phbamu->outbound_intstatus,
3603 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3604 return (TRUE);
3605 }
3606 drv_usecwait(10000);
3607 if (ddi_in_panic()) {
3608 /* clear interrupts */
3609 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3610 &phbamu->outbound_intstatus,
3611 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3612 return (TRUE);
3613 }
3614 } /* max 1 second */
3615 } while (retries++ < 20); /* max 20 seconds */
3616 return (FALSE);
3617 }
3618
3619
3620 static uint8_t
3621 arcmsr_hbb_wait_msgint_ready(struct ACB *acb)
3622 {
3623 struct HBB_msgUnit *phbbmu;
3624 uint32_t i;
3625 uint8_t retries = 0x00;
3626
3627 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3628
3629 do {
3630 for (i = 0; i < 100; i++) {
3631 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3632 &phbbmu->hbb_doorbell->iop2drv_doorbell) &
3633 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
3634 /* clear interrupt */
3635 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3636 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3637 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3638 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3639 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3640 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3641 return (TRUE);
3642 }
3643 drv_usecwait(10000);
3644 if (ddi_in_panic()) {
3645 /* clear interrupts */
3646 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3647 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3648 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3649 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3650 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3651 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3652 return (TRUE);
3653 }
3654 } /* max 1 second */
3655 } while (retries++ < 20); /* max 20 seconds */
3656
3657 return (FALSE);
3658 }
3659
3660
3661 static uint8_t
3662 arcmsr_hbc_wait_msgint_ready(struct ACB *acb)
3663 {
3664 uint32_t i;
3665 uint8_t retries = 0x00;
3666 struct HBC_msgUnit *phbcmu;
3667 uint32_t c = ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR;
3668
3669
3670 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3671
3672 do {
3673 for (i = 0; i < 100; i++) {
3674 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3675 &phbcmu->outbound_doorbell) &
3676 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
3677 /* clear interrupt */
3678 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3679 &phbcmu->outbound_doorbell_clear, c);
3680 return (TRUE);
3681 }
3682 drv_usecwait(10000);
3683 if (ddi_in_panic()) {
3684 /* clear interrupts */
3685 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3686 &phbcmu->outbound_doorbell_clear, c);
3687 return (TRUE);
3688 }
3689 } /* max 1 second */
3690 } while (retries++ < 20); /* max 20 seconds */
3691 return (FALSE);
3692 }
3693
3694 static void
3695 arcmsr_flush_hba_cache(struct ACB *acb) {
3696
3697 struct HBA_msgUnit *phbamu;
3698 int retry_count = 30;
3699
3700 /* enlarge wait flush adapter cache time: 10 minutes */
3701
3702 phbamu = (struct HBA_msgUnit *)acb->pmu;
3703
3704 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3705 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3706 do {
3707 if (arcmsr_hba_wait_msgint_ready(acb)) {
3708 break;
3709 } else {
3710 retry_count--;
3711 }
3712 } while (retry_count != 0);
3713 }
3714
3715
3716
3717 static void
3718 arcmsr_flush_hbb_cache(struct ACB *acb) {
3719
3720 struct HBB_msgUnit *phbbmu;
3721 int retry_count = 30;
3722
3723 /* enlarge wait flush adapter cache time: 10 minutes */
3724
3725 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3726 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3727 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3728 ARCMSR_MESSAGE_FLUSH_CACHE);
3729 do {
3730 if (arcmsr_hbb_wait_msgint_ready(acb)) {
3731 break;
3732 } else {
3733 retry_count--;
3734 }
3735 } while (retry_count != 0);
3736 }
3737
3738
3739 static void
3740 arcmsr_flush_hbc_cache(struct ACB *acb)
3741 {
3742 struct HBC_msgUnit *phbcmu;
3743 int retry_count = 30;
3744
3745 /* enlarge wait flush adapter cache time: 10 minutes */
3746
3747 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3748
3749 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3750 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3751 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3752 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3753 do {
3754 if (arcmsr_hbc_wait_msgint_ready(acb)) {
3755 break;
3756 } else {
3757 retry_count--;
3758 }
3759 } while (retry_count != 0);
3760 }
3761
3762
3763
3764 static uint8_t
3765 arcmsr_abort_hba_allcmd(struct ACB *acb)
3766 {
3767 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
3768
3769 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3770 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3771
3772 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3773 arcmsr_warn(acb,
3774 "timeout while waiting for 'abort all "
3775 "outstanding commands'");
3776 return (0xff);
3777 }
3778 return (0x00);
3779 }
3780
3781
3782
3783 static uint8_t
3784 arcmsr_abort_hbb_allcmd(struct ACB *acb)
3785 {
3786 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
3787
3788 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3789 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
3790
3791 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
3792 arcmsr_warn(acb,
3793 "timeout while waiting for 'abort all "
3794 "outstanding commands'");
3795 return (0x00);
3796 }
3797 return (0x00);
3798 }
3799
3800
3801 static uint8_t
3802 arcmsr_abort_hbc_allcmd(struct ACB *acb)
3803 {
3804 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
3805
3806 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3807 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3808 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3809 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3810
3811 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
3812 arcmsr_warn(acb,
3813 "timeout while waiting for 'abort all "
3814 "outstanding commands'");
3815 return (0xff);
3816 }
3817 return (0x00);
3818 }
3819
3820
3821 static void
3822 arcmsr_done4abort_postqueue(struct ACB *acb)
3823 {
3824
3825 struct CCB *ccb;
3826 uint32_t flag_ccb;
3827 int i = 0;
3828 boolean_t error;
3829
3830 switch (acb->adapter_type) {
3831 case ACB_ADAPTER_TYPE_A:
3832 {
3833 struct HBA_msgUnit *phbamu;
3834 uint32_t outbound_intstatus;
3835
3836 phbamu = (struct HBA_msgUnit *)acb->pmu;
3837 /* clear and abort all outbound posted Q */
3838 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3839 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3840 /* clear interrupt */
3841 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3842 &phbamu->outbound_intstatus, outbound_intstatus);
3843 while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3844 &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
3845 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3846 /* frame must be 32 bytes aligned */
3847 /* the CDB is the first field of the CCB */
3848 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
3849 /* check if command done with no error */
3850 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3851 B_TRUE : B_FALSE;
3852 arcmsr_drain_donequeue(acb, ccb, error);
3853 }
3854 break;
3855 }
3856
3857 case ACB_ADAPTER_TYPE_B:
3858 {
3859 struct HBB_msgUnit *phbbmu;
3860
3861 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3862 /* clear all outbound posted Q */
3863 /* clear doorbell interrupt */
3864 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3865 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3866 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3867 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
3868 if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
3869 phbbmu->done_qbuffer[i] = 0;
3870 /* frame must be 32 bytes aligned */
3871 ccb = NumToPtr((acb->vir2phy_offset +
3872 (flag_ccb << 5)));
3873 /* check if command done with no error */
3874 error =
3875 (flag_ccb &
3876 ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3877 B_TRUE : B_FALSE;
3878 arcmsr_drain_donequeue(acb, ccb, error);
3879 }
3880 phbbmu->post_qbuffer[i] = 0;
3881 } /* drain reply FIFO */
3882 phbbmu->doneq_index = 0;
3883 phbbmu->postq_index = 0;
3884 break;
3885 }
3886
3887 case ACB_ADAPTER_TYPE_C:
3888 {
3889 struct HBC_msgUnit *phbcmu;
3890 uint32_t ccb_cdb_phy;
3891
3892 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3893 while ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3894 &phbcmu->host_int_status) &
3895 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) &&
3896 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3897 /* need to do */
3898 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3899 &phbcmu->outbound_queueport_low);
3900 /* frame must be 32 bytes aligned */
3901 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3902 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
3903 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)?
3904 B_TRUE : B_FALSE;
3905 arcmsr_drain_donequeue(acb, ccb, error);
3906 }
3907 break;
3908 }
3909
3910 }
3911 }
3912 /*
3913 * Routine Description: try to get echo from iop.
3914 * Arguments:
3915 * Return Value: Nothing.
3916 */
3917 static uint8_t
3918 arcmsr_get_echo_from_iop(struct ACB *acb)
3919 {
3920 uint32_t intmask_org;
3921 uint8_t rtnval = 0;
3922
3923 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3924 struct HBA_msgUnit *phbamu;
3925
3926 phbamu = (struct HBA_msgUnit *)acb->pmu;
3927 intmask_org = arcmsr_disable_allintr(acb);
3928 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3929 &phbamu->inbound_msgaddr0,
3930 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3931 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3932 arcmsr_warn(acb, "try to get echo from iop,"
3933 "... timeout ...");
3934 acb->acb_flags |= ACB_F_BUS_HANG_ON;
3935 rtnval = 0xFF;
3936 }
3937 /* enable all outbound interrupt */
3938 arcmsr_enable_allintr(acb, intmask_org);
3939 }
3940 return (rtnval);
3941 }
3942
3943 /*
3944 * Routine Description: Reset 80331 iop.
3945 * Arguments:
3946 * Return Value: Nothing.
3947 */
3948 static uint8_t
3949 arcmsr_iop_reset(struct ACB *acb)
3950 {
3951 struct CCB *ccb;
3952 uint32_t intmask_org;
3953 uint8_t rtnval = 0;
3954 int i = 0;
3955
3956 if (acb->ccboutstandingcount > 0) {
3957 /* disable all outbound interrupt */
3958 intmask_org = arcmsr_disable_allintr(acb);
3959 /* clear and abort all outbound posted Q */
3960 arcmsr_done4abort_postqueue(acb);
3961 /* talk to iop 331 outstanding command aborted */
3962 rtnval = (acb->acb_flags & ACB_F_BUS_HANG_ON) ?
3963 0xFF : arcmsr_abort_host_command(acb);
3964
3965 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3966 ccb = acb->pccb_pool[i];
3967 if (ccb->ccb_state == ARCMSR_CCB_START) {
3968 /* ccb->ccb_state = ARCMSR_CCB_RESET; */
3969 ccb->pkt->pkt_reason = CMD_RESET;
3970 ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
3971 arcmsr_ccb_complete(ccb, 1);
3972 }
3973 }
3974 atomic_and_32(&acb->ccboutstandingcount, 0);
3975 /* enable all outbound interrupt */
3976 arcmsr_enable_allintr(acb, intmask_org);
3977 } else {
3978 rtnval = arcmsr_get_echo_from_iop(acb);
3979 }
3980 return (rtnval);
3981 }
3982
3983
3984 static struct QBUFFER *
3985 arcmsr_get_iop_rqbuffer(struct ACB *acb)
3986 {
3987 struct QBUFFER *qb;
3988
3989 switch (acb->adapter_type) {
3990 case ACB_ADAPTER_TYPE_A:
3991 {
3992 struct HBA_msgUnit *phbamu;
3993
3994 phbamu = (struct HBA_msgUnit *)acb->pmu;
3995 qb = (struct QBUFFER *)&phbamu->message_rbuffer;
3996 break;
3997 }
3998
3999 case ACB_ADAPTER_TYPE_B:
4000 {
4001 struct HBB_msgUnit *phbbmu;
4002
4003 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4004 qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
4005 break;
4006 }
4007
4008 case ACB_ADAPTER_TYPE_C:
4009 {
4010 struct HBC_msgUnit *phbcmu;
4011
4012 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4013 qb = (struct QBUFFER *)&phbcmu->message_rbuffer;
4014 break;
4015 }
4016
4017 }
4018 return (qb);
4019 }
4020
4021
4022 static struct QBUFFER *
4023 arcmsr_get_iop_wqbuffer(struct ACB *acb)
4024 {
4025 struct QBUFFER *qbuffer = NULL;
4026
4027 switch (acb->adapter_type) {
4028 case ACB_ADAPTER_TYPE_A:
4029 {
4030 struct HBA_msgUnit *phbamu;
4031
4032 phbamu = (struct HBA_msgUnit *)acb->pmu;
4033 qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
4034 break;
4035 }
4036
4037 case ACB_ADAPTER_TYPE_B:
4038 {
4039 struct HBB_msgUnit *phbbmu;
4040
4041 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4042 qbuffer = (struct QBUFFER *)
4043 &phbbmu->hbb_rwbuffer->message_wbuffer;
4044 break;
4045 }
4046
4047 case ACB_ADAPTER_TYPE_C:
4048 {
4049 struct HBC_msgUnit *phbcmu;
4050
4051 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4052 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
4053 break;
4054 }
4055
4056 }
4057 return (qbuffer);
4058 }
4059
4060
4061
4062 static void
4063 arcmsr_iop_message_read(struct ACB *acb)
4064 {
4065 switch (acb->adapter_type) {
4066 case ACB_ADAPTER_TYPE_A:
4067 {
4068 struct HBA_msgUnit *phbamu;
4069
4070 phbamu = (struct HBA_msgUnit *)acb->pmu;
4071 /* let IOP know the data has been read */
4072 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4073 &phbamu->inbound_doorbell,
4074 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4075 break;
4076 }
4077
4078 case ACB_ADAPTER_TYPE_B:
4079 {
4080 struct HBB_msgUnit *phbbmu;
4081
4082 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4083 /* let IOP know the data has been read */
4084 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4085 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4086 ARCMSR_DRV2IOP_DATA_READ_OK);
4087 break;
4088 }
4089
4090 case ACB_ADAPTER_TYPE_C:
4091 {
4092 struct HBC_msgUnit *phbcmu;
4093
4094 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4095 /* let IOP know data has been read */
4096 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4097 &phbcmu->inbound_doorbell,
4098 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4099 break;
4100 }
4101
4102 }
4103 }
4104
4105
4106
4107 static void
4108 arcmsr_iop_message_wrote(struct ACB *acb)
4109 {
4110 switch (acb->adapter_type) {
4111 case ACB_ADAPTER_TYPE_A: {
4112 struct HBA_msgUnit *phbamu;
4113
4114 phbamu = (struct HBA_msgUnit *)acb->pmu;
4115 /*
4116 * push inbound doorbell tell iop, driver data write ok
4117 * and wait reply on next hwinterrupt for next Qbuffer post
4118 */
4119 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4120 &phbamu->inbound_doorbell,
4121 ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
4122 break;
4123 }
4124
4125 case ACB_ADAPTER_TYPE_B:
4126 {
4127 struct HBB_msgUnit *phbbmu;
4128
4129 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4130 /*
4131 * push inbound doorbell tell iop, driver data was writen
4132 * successfully, then await reply on next hwinterrupt for
4133 * next Qbuffer post
4134 */
4135 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4136 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4137 ARCMSR_DRV2IOP_DATA_WRITE_OK);
4138 break;
4139 }
4140
4141 case ACB_ADAPTER_TYPE_C:
4142 {
4143 struct HBC_msgUnit *phbcmu;
4144
4145 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4146 /*
4147 * push inbound doorbell tell iop, driver data write ok
4148 * and wait reply on next hwinterrupt for next Qbuffer post
4149 */
4150 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4151 &phbcmu->inbound_doorbell,
4152 ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
4153 break;
4154 }
4155
4156 }
4157 }
4158
4159
4160
4161 static void
4162 arcmsr_post_ioctldata2iop(struct ACB *acb)
4163 {
4164 uint8_t *pQbuffer;
4165 struct QBUFFER *pwbuffer;
4166 uint8_t *iop_data;
4167 int32_t allxfer_len = 0;
4168
4169 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4170 iop_data = (uint8_t *)pwbuffer->data;
4171 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
4172 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4173 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4174 (allxfer_len < 124)) {
4175 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4176 (void) memcpy(iop_data, pQbuffer, 1);
4177 acb->wqbuf_firstidx++;
4178 /* if last index number set it to 0 */
4179 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4180 iop_data++;
4181 allxfer_len++;
4182 }
4183 pwbuffer->data_len = allxfer_len;
4184 /*
4185 * push inbound doorbell and wait reply at hwinterrupt
4186 * routine for next Qbuffer post
4187 */
4188 arcmsr_iop_message_wrote(acb);
4189 }
4190 }
4191
4192
4193
4194 static void
4195 arcmsr_stop_hba_bgrb(struct ACB *acb)
4196 {
4197 struct HBA_msgUnit *phbamu;
4198
4199 phbamu = (struct HBA_msgUnit *)acb->pmu;
4200
4201 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4202 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4203 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4204 if (!arcmsr_hba_wait_msgint_ready(acb))
4205 arcmsr_warn(acb,
4206 "timeout while waiting for background rebuild completion");
4207 }
4208
4209
4210 static void
4211 arcmsr_stop_hbb_bgrb(struct ACB *acb)
4212 {
4213 struct HBB_msgUnit *phbbmu;
4214
4215 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4216
4217 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4218 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4219 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
4220
4221 if (!arcmsr_hbb_wait_msgint_ready(acb))
4222 arcmsr_warn(acb,
4223 "timeout while waiting for background rebuild completion");
4224 }
4225
4226
4227 static void
4228 arcmsr_stop_hbc_bgrb(struct ACB *acb)
4229 {
4230 struct HBC_msgUnit *phbcmu;
4231
4232 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4233
4234 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4235 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4236 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4237 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4238 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4239 if (!arcmsr_hbc_wait_msgint_ready(acb))
4240 arcmsr_warn(acb,
4241 "timeout while waiting for background rebuild completion");
4242 }
4243
4244
4245 static int
4246 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt)
4247 {
4248 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
4249 struct CCB *ccb = pkt->pkt_ha_private;
4250 struct buf *bp = ccb->bp;
4251 uint8_t *pQbuffer;
4252 int retvalue = 0, transfer_len = 0;
4253 char *buffer;
4254 uint32_t controlcode;
4255
4256
4257 /* 4 bytes: Areca io control code */
4258 controlcode =
4259 (uint32_t)pkt->pkt_cdbp[5] << 24 |
4260 (uint32_t)pkt->pkt_cdbp[6] << 16 |
4261 (uint32_t)pkt->pkt_cdbp[7] << 8 |
4262 (uint32_t)pkt->pkt_cdbp[8];
4263
4264 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4265 bp_mapin(bp);
4266
4267 buffer = bp->b_un.b_addr;
4268 transfer_len = bp->b_bcount;
4269 if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
4270 retvalue = ARCMSR_MESSAGE_FAIL;
4271 goto message_out;
4272 }
4273
4274 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
4275 switch (controlcode) {
4276 case ARCMSR_MESSAGE_READ_RQBUFFER:
4277 {
4278 unsigned long *ver_addr;
4279 uint8_t *ptmpQbuffer;
4280 int32_t allxfer_len = 0;
4281
4282 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4283
4284 ptmpQbuffer = (uint8_t *)ver_addr;
4285 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
4286 (allxfer_len < (MSGDATABUFLEN - 1))) {
4287 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
4288 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
4289 acb->rqbuf_firstidx++;
4290 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4291 ptmpQbuffer++;
4292 allxfer_len++;
4293 }
4294
4295 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4296 struct QBUFFER *prbuffer;
4297 uint8_t *iop_data;
4298 int32_t iop_len;
4299
4300 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4301 prbuffer = arcmsr_get_iop_rqbuffer(acb);
4302 iop_data = (uint8_t *)prbuffer->data;
4303 iop_len = (int32_t)prbuffer->data_len;
4304
4305 while (iop_len > 0) {
4306 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
4307 (void) memcpy(pQbuffer, iop_data, 1);
4308 acb->rqbuf_lastidx++;
4309 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4310 iop_data++;
4311 iop_len--;
4312 }
4313 arcmsr_iop_message_read(acb);
4314 }
4315
4316 (void) memcpy(pcmdmessagefld->messagedatabuffer,
4317 (uint8_t *)ver_addr, allxfer_len);
4318 pcmdmessagefld->cmdmessage.Length = allxfer_len;
4319 pcmdmessagefld->cmdmessage.ReturnCode =
4320 ARCMSR_MESSAGE_RETURNCODE_OK;
4321 kmem_free(ver_addr, MSGDATABUFLEN);
4322 break;
4323 }
4324
4325 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
4326 {
4327 uint8_t *ver_addr;
4328 int32_t my_empty_len, user_len, wqbuf_firstidx,
4329 wqbuf_lastidx;
4330 uint8_t *ptmpuserbuffer;
4331
4332 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4333
4334 ptmpuserbuffer = ver_addr;
4335 user_len = min(pcmdmessagefld->cmdmessage.Length,
4336 MSGDATABUFLEN);
4337 (void) memcpy(ptmpuserbuffer,
4338 pcmdmessagefld->messagedatabuffer, user_len);
4339 wqbuf_lastidx = acb->wqbuf_lastidx;
4340 wqbuf_firstidx = acb->wqbuf_firstidx;
4341 if (wqbuf_lastidx != wqbuf_firstidx) {
4342 struct scsi_arq_status *arq_status;
4343
4344 arcmsr_post_ioctldata2iop(acb);
4345 arq_status = (struct scsi_arq_status *)
4346 (intptr_t)(pkt->pkt_scbp);
4347 bzero((caddr_t)arq_status,
4348 sizeof (struct scsi_arq_status));
4349 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4350 arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
4351 STATE_GOT_TARGET | STATE_SENT_CMD |
4352 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4353
4354 arq_status->sts_rqpkt_statistics =
4355 pkt->pkt_statistics;
4356 arq_status->sts_rqpkt_resid = 0;
4357 if (&arq_status->sts_sensedata != NULL) {
4358 struct scsi_extended_sense *sts_sensedata;
4359
4360 sts_sensedata = &arq_status->sts_sensedata;
4361
4362 /* has error report sensedata */
4363 sts_sensedata->es_code = 0x0;
4364 sts_sensedata->es_valid = 0x01;
4365 sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4366 /* AdditionalSenseLength */
4367 sts_sensedata->es_add_len = 0x0A;
4368 /* AdditionalSenseCode */
4369 sts_sensedata->es_add_code = 0x20;
4370 }
4371 retvalue = ARCMSR_MESSAGE_FAIL;
4372 } else {
4373 my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
4374 (ARCMSR_MAX_QBUFFER - 1);
4375 if (my_empty_len >= user_len) {
4376 while (user_len > 0) {
4377 pQbuffer = &acb->wqbuffer[
4378 acb->wqbuf_lastidx];
4379 (void) memcpy(pQbuffer,
4380 ptmpuserbuffer, 1);
4381 acb->wqbuf_lastidx++;
4382 acb->wqbuf_lastidx %=
4383 ARCMSR_MAX_QBUFFER;
4384 ptmpuserbuffer++;
4385 user_len--;
4386 }
4387 if (acb->acb_flags &
4388 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
4389 acb->acb_flags &=
4390 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
4391 arcmsr_post_ioctldata2iop(acb);
4392 }
4393 } else {
4394 struct scsi_arq_status *arq_status;
4395
4396 /* has error report sensedata */
4397 arq_status = (struct scsi_arq_status *)
4398 (intptr_t)(pkt->pkt_scbp);
4399 bzero((caddr_t)arq_status,
4400 sizeof (struct scsi_arq_status));
4401 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4402 arq_status->sts_rqpkt_state =
4403 (STATE_GOT_BUS |
4404 STATE_GOT_TARGET |STATE_SENT_CMD |
4405 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4406 arq_status->sts_rqpkt_statistics =
4407 pkt->pkt_statistics;
4408 arq_status->sts_rqpkt_resid = 0;
4409 if (&arq_status->sts_sensedata != NULL) {
4410 struct scsi_extended_sense *
4411 sts_sensedata;
4412
4413 sts_sensedata =
4414 &arq_status->sts_sensedata;
4415
4416 /* has error report sensedata */
4417 sts_sensedata->es_code = 0x0;
4418 sts_sensedata->es_valid = 0x01;
4419 sts_sensedata->es_key =
4420 KEY_ILLEGAL_REQUEST;
4421 /* AdditionalSenseLength */
4422 sts_sensedata->es_add_len = 0x0A;
4423 /* AdditionalSenseCode */
4424 sts_sensedata->es_add_code = 0x20;
4425 }
4426 retvalue = ARCMSR_MESSAGE_FAIL;
4427 }
4428 }
4429 kmem_free(ver_addr, MSGDATABUFLEN);
4430 break;
4431 }
4432
4433 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
4434 pQbuffer = acb->rqbuffer;
4435
4436 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4437 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4438 arcmsr_iop_message_read(acb);
4439 }
4440 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
4441 acb->rqbuf_firstidx = 0;
4442 acb->rqbuf_lastidx = 0;
4443 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4444 pcmdmessagefld->cmdmessage.ReturnCode =
4445 ARCMSR_MESSAGE_RETURNCODE_OK;
4446 break;
4447 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
4448 pQbuffer = acb->wqbuffer;
4449
4450 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4451 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4452 arcmsr_iop_message_read(acb);
4453 }
4454 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4455 ACB_F_MESSAGE_WQBUFFER_READ);
4456 acb->wqbuf_firstidx = 0;
4457 acb->wqbuf_lastidx = 0;
4458 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4459 pcmdmessagefld->cmdmessage.ReturnCode =
4460 ARCMSR_MESSAGE_RETURNCODE_OK;
4461 break;
4462 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
4463
4464 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4465 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4466 arcmsr_iop_message_read(acb);
4467 }
4468 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4469 ACB_F_MESSAGE_RQBUFFER_CLEARED |
4470 ACB_F_MESSAGE_WQBUFFER_READ);
4471 acb->rqbuf_firstidx = 0;
4472 acb->rqbuf_lastidx = 0;
4473 acb->wqbuf_firstidx = 0;
4474 acb->wqbuf_lastidx = 0;
4475 pQbuffer = acb->rqbuffer;
4476 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4477 pQbuffer = acb->wqbuffer;
4478 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4479 pcmdmessagefld->cmdmessage.ReturnCode =
4480 ARCMSR_MESSAGE_RETURNCODE_OK;
4481 break;
4482
4483 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
4484 pcmdmessagefld->cmdmessage.ReturnCode =
4485 ARCMSR_MESSAGE_RETURNCODE_3F;
4486 break;
4487 /*
4488 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
4489 */
4490 case ARCMSR_MESSAGE_SAY_GOODBYE:
4491 arcmsr_iop_parking(acb);
4492 break;
4493 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
4494 switch (acb->adapter_type) {
4495 case ACB_ADAPTER_TYPE_A:
4496 arcmsr_flush_hba_cache(acb);
4497 break;
4498 case ACB_ADAPTER_TYPE_B:
4499 arcmsr_flush_hbb_cache(acb);
4500 break;
4501 case ACB_ADAPTER_TYPE_C:
4502 arcmsr_flush_hbc_cache(acb);
4503 break;
4504 }
4505 break;
4506 default:
4507 retvalue = ARCMSR_MESSAGE_FAIL;
4508 }
4509
4510 message_out:
4511
4512 return (retvalue);
4513 }
4514
4515
4516
4517
4518 static void
4519 arcmsr_pcidev_disattach(struct ACB *acb)
4520 {
4521 struct CCB *ccb;
4522 int i = 0;
4523
4524 /* disable all outbound interrupts */
4525 (void) arcmsr_disable_allintr(acb);
4526 /* stop adapter background rebuild */
4527 switch (acb->adapter_type) {
4528 case ACB_ADAPTER_TYPE_A:
4529 arcmsr_stop_hba_bgrb(acb);
4530 arcmsr_flush_hba_cache(acb);
4531 break;
4532 case ACB_ADAPTER_TYPE_B:
4533 arcmsr_stop_hbb_bgrb(acb);
4534 arcmsr_flush_hbb_cache(acb);
4535 break;
4536 case ACB_ADAPTER_TYPE_C:
4537 arcmsr_stop_hbc_bgrb(acb);
4538 arcmsr_flush_hbc_cache(acb);
4539 break;
4540 }
4541 /* abort all outstanding commands */
4542 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4543 acb->acb_flags &= ~ACB_F_IOP_INITED;
4544
4545 if (acb->ccboutstandingcount != 0) {
4546 /* clear and abort all outbound posted Q */
4547 arcmsr_done4abort_postqueue(acb);
4548 /* talk to iop outstanding command aborted */
4549 (void) arcmsr_abort_host_command(acb);
4550
4551 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4552 ccb = acb->pccb_pool[i];
4553 if (ccb->ccb_state == ARCMSR_CCB_START) {
4554 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
4555 ccb->pkt->pkt_reason = CMD_ABORTED;
4556 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4557 arcmsr_ccb_complete(ccb, 1);
4558 }
4559 }
4560 }
4561 }
4562
4563 /* get firmware miscellaneous data */
4564 static void
4565 arcmsr_get_hba_config(struct ACB *acb)
4566 {
4567 struct HBA_msgUnit *phbamu;
4568
4569 char *acb_firm_model;
4570 char *acb_firm_version;
4571 char *acb_device_map;
4572 char *iop_firm_model;
4573 char *iop_firm_version;
4574 char *iop_device_map;
4575 int count;
4576
4577 phbamu = (struct HBA_msgUnit *)acb->pmu;
4578 acb_firm_model = acb->firm_model;
4579 acb_firm_version = acb->firm_version;
4580 acb_device_map = acb->device_map;
4581 /* firm_model, 15 */
4582 iop_firm_model =
4583 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4584 /* firm_version, 17 */
4585 iop_firm_version =
4586 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4587
4588 /* device_map, 21 */
4589 iop_device_map =
4590 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4591
4592 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4593 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4594
4595 if (!arcmsr_hba_wait_msgint_ready(acb))
4596 arcmsr_warn(acb,
4597 "timeout while waiting for adapter firmware "
4598 "miscellaneous data");
4599
4600 count = 8;
4601 while (count) {
4602 *acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle0,
4603 iop_firm_model);
4604 acb_firm_model++;
4605 iop_firm_model++;
4606 count--;
4607 }
4608
4609 count = 16;
4610 while (count) {
4611 *acb_firm_version =
4612 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4613 acb_firm_version++;
4614 iop_firm_version++;
4615 count--;
4616 }
4617
4618 count = 16;
4619 while (count) {
4620 *acb_device_map =
4621 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4622 acb_device_map++;
4623 iop_device_map++;
4624 count--;
4625 }
4626
4627 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4628 acb->firm_version);
4629
4630 /* firm_request_len, 1 */
4631 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4632 &phbamu->msgcode_rwbuffer[1]);
4633 /* firm_numbers_queue, 2 */
4634 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4635 &phbamu->msgcode_rwbuffer[2]);
4636 /* firm_sdram_size, 3 */
4637 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4638 &phbamu->msgcode_rwbuffer[3]);
4639 /* firm_ide_channels, 4 */
4640 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4641 &phbamu->msgcode_rwbuffer[4]);
4642 }
4643
4644 /* get firmware miscellaneous data */
4645 static void
4646 arcmsr_get_hbb_config(struct ACB *acb)
4647 {
4648 struct HBB_msgUnit *phbbmu;
4649 char *acb_firm_model;
4650 char *acb_firm_version;
4651 char *acb_device_map;
4652 char *iop_firm_model;
4653 char *iop_firm_version;
4654 char *iop_device_map;
4655 int count;
4656
4657 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4658 acb_firm_model = acb->firm_model;
4659 acb_firm_version = acb->firm_version;
4660 acb_device_map = acb->device_map;
4661 /* firm_model, 15 */
4662 iop_firm_model = (char *)
4663 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4664 /* firm_version, 17 */
4665 iop_firm_version = (char *)
4666 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4667 /* device_map, 21 */
4668 iop_device_map = (char *)
4669 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4670
4671 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4672 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
4673
4674 if (!arcmsr_hbb_wait_msgint_ready(acb))
4675 arcmsr_warn(acb,
4676 "timeout while waiting for adapter firmware "
4677 "miscellaneous data");
4678
4679 count = 8;
4680 while (count) {
4681 *acb_firm_model =
4682 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_model);
4683 acb_firm_model++;
4684 iop_firm_model++;
4685 count--;
4686 }
4687 count = 16;
4688 while (count) {
4689 *acb_firm_version =
4690 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_version);
4691 acb_firm_version++;
4692 iop_firm_version++;
4693 count--;
4694 }
4695 count = 16;
4696 while (count) {
4697 *acb_device_map =
4698 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
4699 acb_device_map++;
4700 iop_device_map++;
4701 count--;
4702 }
4703
4704 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4705 acb->firm_version);
4706
4707 /* firm_request_len, 1 */
4708 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4709 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
4710 /* firm_numbers_queue, 2 */
4711 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4712 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
4713 /* firm_sdram_size, 3 */
4714 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4715 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
4716 /* firm_ide_channels, 4 */
4717 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4718 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
4719 }
4720
4721
4722 /* get firmware miscellaneous data */
4723 static void
4724 arcmsr_get_hbc_config(struct ACB *acb)
4725 {
4726 struct HBC_msgUnit *phbcmu;
4727
4728 char *acb_firm_model;
4729 char *acb_firm_version;
4730 char *acb_device_map;
4731 char *iop_firm_model;
4732 char *iop_firm_version;
4733 char *iop_device_map;
4734 int count;
4735
4736 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4737 acb_firm_model = acb->firm_model;
4738 acb_firm_version = acb->firm_version;
4739 acb_device_map = acb->device_map;
4740 /* firm_model, 15 */
4741 iop_firm_model =
4742 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4743 /* firm_version, 17 */
4744 iop_firm_version =
4745 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4746 /* device_map, 21 */
4747 iop_device_map =
4748 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4749 /* post "get config" instruction */
4750 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4751 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4752 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4753 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4754 if (!arcmsr_hbc_wait_msgint_ready(acb))
4755 arcmsr_warn(acb,
4756 "timeout while waiting for adapter firmware "
4757 "miscellaneous data");
4758 count = 8;
4759 while (count) {
4760 *acb_firm_model =
4761 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
4762 acb_firm_model++;
4763 iop_firm_model++;
4764 count--;
4765 }
4766
4767 count = 16;
4768 while (count) {
4769 *acb_firm_version =
4770 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4771 acb_firm_version++;
4772 iop_firm_version++;
4773 count--;
4774 }
4775
4776 count = 16;
4777 while (count) {
4778 *acb_device_map =
4779 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4780 acb_device_map++;
4781 iop_device_map++;
4782 count--;
4783 }
4784
4785 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4786 acb->firm_version);
4787
4788 /* firm_request_len, 1, 04-07 */
4789 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4790 &phbcmu->msgcode_rwbuffer[1]);
4791 /* firm_numbers_queue, 2, 08-11 */
4792 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4793 &phbcmu->msgcode_rwbuffer[2]);
4794 /* firm_sdram_size, 3, 12-15 */
4795 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4796 &phbcmu->msgcode_rwbuffer[3]);
4797 /* firm_ide_channels, 4, 16-19 */
4798 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4799 &phbcmu->msgcode_rwbuffer[4]);
4800 /* firm_cfg_version, 25, 100-103 */
4801 acb->firm_cfg_version = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4802 &phbcmu->msgcode_rwbuffer[25]);
4803 }
4804
4805
4806 /* start background rebuild */
4807 static void
4808 arcmsr_start_hba_bgrb(struct ACB *acb) {
4809
4810 struct HBA_msgUnit *phbamu;
4811
4812 phbamu = (struct HBA_msgUnit *)acb->pmu;
4813
4814 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4815 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4816 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4817
4818 if (!arcmsr_hba_wait_msgint_ready(acb))
4819 arcmsr_warn(acb,
4820 "timeout while waiting for background rebuild to start");
4821 }
4822
4823
4824 static void
4825 arcmsr_start_hbb_bgrb(struct ACB *acb) {
4826
4827 struct HBB_msgUnit *phbbmu;
4828
4829 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4830
4831 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4832 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4833 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4834 ARCMSR_MESSAGE_START_BGRB);
4835
4836 if (!arcmsr_hbb_wait_msgint_ready(acb))
4837 arcmsr_warn(acb,
4838 "timeout while waiting for background rebuild to start");
4839 }
4840
4841
4842 static void
4843 arcmsr_start_hbc_bgrb(struct ACB *acb) {
4844
4845 struct HBC_msgUnit *phbcmu;
4846
4847 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4848
4849 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4850 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4851 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4852 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4853 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4854 if (!arcmsr_hbc_wait_msgint_ready(acb))
4855 arcmsr_warn(acb,
4856 "timeout while waiting for background rebuild to start");
4857 }
4858
4859 static void
4860 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4861 {
4862 struct HBA_msgUnit *phbamu;
4863 struct CCB *ccb;
4864 boolean_t error;
4865 uint32_t flag_ccb, outbound_intstatus, intmask_org;
4866 boolean_t poll_ccb_done = B_FALSE;
4867 uint32_t poll_count = 0;
4868
4869
4870 phbamu = (struct HBA_msgUnit *)acb->pmu;
4871
4872 polling_ccb_retry:
4873 /* TODO: Use correct offset and size for syncing? */
4874 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4875 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4876 return;
4877 intmask_org = arcmsr_disable_allintr(acb);
4878
4879 for (;;) {
4880 if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4881 &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
4882 if (poll_ccb_done) {
4883 /* chip FIFO no ccb for completion already */
4884 break;
4885 } else {
4886 drv_usecwait(25000);
4887 if ((poll_count > 100) && (poll_ccb != NULL)) {
4888 break;
4889 }
4890 if (acb->ccboutstandingcount == 0) {
4891 break;
4892 }
4893 poll_count++;
4894 outbound_intstatus =
4895 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4896 &phbamu->outbound_intstatus) &
4897 acb->outbound_int_enable;
4898
4899 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4900 &phbamu->outbound_intstatus,
4901 outbound_intstatus); /* clear interrupt */
4902 }
4903 }
4904
4905 /* frame must be 32 bytes aligned */
4906 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4907
4908 /* check if command done with no error */
4909 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4910 B_TRUE : B_FALSE;
4911 if (poll_ccb != NULL)
4912 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4913
4914 if (ccb->acb != acb) {
4915 arcmsr_warn(acb, "ccb got a wrong acb!");
4916 continue;
4917 }
4918 if (ccb->ccb_state != ARCMSR_CCB_START) {
4919 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4920 ccb->ccb_state |= ARCMSR_CCB_BACK;
4921 ccb->pkt->pkt_reason = CMD_ABORTED;
4922 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4923 arcmsr_ccb_complete(ccb, 1);
4924 continue;
4925 }
4926 arcmsr_report_ccb_state(acb, ccb, error);
4927 arcmsr_warn(acb,
4928 "polling op got unexpected ccb command done");
4929 continue;
4930 }
4931 arcmsr_report_ccb_state(acb, ccb, error);
4932 } /* drain reply FIFO */
4933 arcmsr_enable_allintr(acb, intmask_org);
4934 }
4935
4936
4937 static void
4938 arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4939 {
4940 struct HBB_msgUnit *phbbmu;
4941 struct CCB *ccb;
4942 uint32_t flag_ccb, intmask_org;
4943 boolean_t error;
4944 uint32_t poll_count = 0;
4945 int index;
4946 boolean_t poll_ccb_done = B_FALSE;
4947
4948
4949 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4950
4951
4952 polling_ccb_retry:
4953 /* Use correct offset and size for syncing */
4954 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4955 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4956 return;
4957
4958 intmask_org = arcmsr_disable_allintr(acb);
4959
4960 for (;;) {
4961 index = phbbmu->doneq_index;
4962 if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
4963 if (poll_ccb_done) {
4964 /* chip FIFO no ccb for completion already */
4965 break;
4966 } else {
4967 drv_usecwait(25000);
4968 if ((poll_count > 100) && (poll_ccb != NULL))
4969 break;
4970 if (acb->ccboutstandingcount == 0)
4971 break;
4972 poll_count++;
4973 /* clear doorbell interrupt */
4974 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4975 &phbbmu->hbb_doorbell->iop2drv_doorbell,
4976 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
4977 }
4978 }
4979
4980 phbbmu->done_qbuffer[index] = 0;
4981 index++;
4982 /* if last index number set it to 0 */
4983 index %= ARCMSR_MAX_HBB_POSTQUEUE;
4984 phbbmu->doneq_index = index;
4985 /* check if command done with no error */
4986 /* frame must be 32 bytes aligned */
4987 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4988
4989 /* check if command done with no error */
4990 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4991 B_TRUE : B_FALSE;
4992
4993 if (poll_ccb != NULL)
4994 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4995 if (ccb->acb != acb) {
4996 arcmsr_warn(acb, "ccb got a wrong acb!");
4997 continue;
4998 }
4999 if (ccb->ccb_state != ARCMSR_CCB_START) {
5000 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5001 ccb->ccb_state |= ARCMSR_CCB_BACK;
5002 ccb->pkt->pkt_reason = CMD_ABORTED;
5003 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5004 arcmsr_ccb_complete(ccb, 1);
5005 continue;
5006 }
5007 arcmsr_report_ccb_state(acb, ccb, error);
5008 arcmsr_warn(acb,
5009 "polling op got unexpect ccb command done");
5010 continue;
5011 }
5012 arcmsr_report_ccb_state(acb, ccb, error);
5013 } /* drain reply FIFO */
5014 arcmsr_enable_allintr(acb, intmask_org);
5015 }
5016
5017
5018 static void
5019 arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
5020 {
5021
5022 struct HBC_msgUnit *phbcmu;
5023 struct CCB *ccb;
5024 boolean_t error;
5025 uint32_t ccb_cdb_phy;
5026 uint32_t flag_ccb, intmask_org;
5027 boolean_t poll_ccb_done = B_FALSE;
5028 uint32_t poll_count = 0;
5029
5030
5031 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5032
5033 polling_ccb_retry:
5034
5035 /* Use correct offset and size for syncing */
5036 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5037 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5038 return;
5039
5040 intmask_org = arcmsr_disable_allintr(acb);
5041
5042 for (;;) {
5043 if (!(CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5044 &phbcmu->host_int_status) &
5045 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
5046
5047 if (poll_ccb_done) {
5048 /* chip FIFO no ccb for completion already */
5049 break;
5050 } else {
5051 drv_usecwait(25000);
5052 if ((poll_count > 100) && (poll_ccb != NULL)) {
5053 break;
5054 }
5055 if (acb->ccboutstandingcount == 0) {
5056 break;
5057 }
5058 poll_count++;
5059 }
5060 }
5061 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5062 &phbcmu->outbound_queueport_low);
5063 /* frame must be 32 bytes aligned */
5064 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5065 /* the CDB is the first field of the CCB */
5066 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5067
5068 /* check if command done with no error */
5069 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5070 B_TRUE : B_FALSE;
5071 if (poll_ccb != NULL)
5072 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
5073
5074 if (ccb->acb != acb) {
5075 arcmsr_warn(acb, "ccb got a wrong acb!");
5076 continue;
5077 }
5078 if (ccb->ccb_state != ARCMSR_CCB_START) {
5079 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5080 ccb->ccb_state |= ARCMSR_CCB_BACK;
5081 ccb->pkt->pkt_reason = CMD_ABORTED;
5082 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5083 arcmsr_ccb_complete(ccb, 1);
5084 continue;
5085 }
5086 arcmsr_report_ccb_state(acb, ccb, error);
5087 arcmsr_warn(acb,
5088 "polling op got unexpected ccb command done");
5089 continue;
5090 }
5091 arcmsr_report_ccb_state(acb, ccb, error);
5092 } /* drain reply FIFO */
5093 arcmsr_enable_allintr(acb, intmask_org);
5094 }
5095
5096
5097 /*
5098 * Function: arcmsr_hba_hardware_reset()
5099 * Bug Fix for Intel IOP cause firmware hang on.
5100 * and kernel panic
5101 */
5102 static void
5103 arcmsr_hba_hardware_reset(struct ACB *acb)
5104 {
5105 struct HBA_msgUnit *phbamu;
5106 uint8_t value[64];
5107 int i;
5108
5109 phbamu = (struct HBA_msgUnit *)acb->pmu;
5110 /* backup pci config data */
5111 for (i = 0; i < 64; i++) {
5112 value[i] = pci_config_get8(acb->pci_acc_handle, i);
5113 }
5114 /* hardware reset signal */
5115 if ((PCI_DEVICE_ID_ARECA_1680 ==
5116 pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID))) {
5117 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5118 &phbamu->reserved1[0], 0x00000003);
5119 } else {
5120 pci_config_put8(acb->pci_acc_handle, 0x84, 0x20);
5121 }
5122 drv_usecwait(1000000);
5123 /* write back pci config data */
5124 for (i = 0; i < 64; i++) {
5125 pci_config_put8(acb->pci_acc_handle, i, value[i]);
5126 }
5127 drv_usecwait(1000000);
5128 }
5129
5130 /*
5131 * Function: arcmsr_abort_host_command
5132 */
5133 static uint8_t
5134 arcmsr_abort_host_command(struct ACB *acb)
5135 {
5136 uint8_t rtnval = 0;
5137
5138 switch (acb->adapter_type) {
5139 case ACB_ADAPTER_TYPE_A:
5140 rtnval = arcmsr_abort_hba_allcmd(acb);
5141 break;
5142 case ACB_ADAPTER_TYPE_B:
5143 rtnval = arcmsr_abort_hbb_allcmd(acb);
5144 break;
5145 case ACB_ADAPTER_TYPE_C:
5146 rtnval = arcmsr_abort_hbc_allcmd(acb);
5147 break;
5148 }
5149 return (rtnval);
5150 }
5151
5152 /*
5153 * Function: arcmsr_handle_iop_bus_hold
5154 */
5155 static void
5156 arcmsr_handle_iop_bus_hold(struct ACB *acb)
5157 {
5158
5159 switch (acb->adapter_type) {
5160 case ACB_ADAPTER_TYPE_A:
5161 {
5162 struct HBA_msgUnit *phbamu;
5163 int retry_count = 0;
5164
5165 acb->timeout_count = 0;
5166 phbamu = (struct HBA_msgUnit *)acb->pmu;
5167 arcmsr_hba_hardware_reset(acb);
5168 acb->acb_flags &= ~ACB_F_IOP_INITED;
5169 sleep_again:
5170 drv_usecwait(1000000);
5171 if ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5172 &phbamu->outbound_msgaddr1) &
5173 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
5174 if (retry_count > 60) {
5175 arcmsr_warn(acb,
5176 "waiting for hardware"
5177 "bus reset return, RETRY TERMINATED!!");
5178 return;
5179 }
5180 retry_count++;
5181 goto sleep_again;
5182 }
5183 arcmsr_iop_init(acb);
5184 break;
5185 }
5186
5187 }
5188 }
5189
5190 static void
5191 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
5192
5193 struct QBUFFER *prbuffer;
5194 uint8_t *pQbuffer;
5195 uint8_t *iop_data;
5196 int my_empty_len, iop_len;
5197 int rqbuf_firstidx, rqbuf_lastidx;
5198
5199 /* check this iop data if overflow my rqbuffer */
5200 rqbuf_lastidx = acb->rqbuf_lastidx;
5201 rqbuf_firstidx = acb->rqbuf_firstidx;
5202 prbuffer = arcmsr_get_iop_rqbuffer(acb);
5203 iop_data = (uint8_t *)prbuffer->data;
5204 iop_len = prbuffer->data_len;
5205 my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
5206 (ARCMSR_MAX_QBUFFER - 1);
5207
5208 if (my_empty_len >= iop_len) {
5209 while (iop_len > 0) {
5210 pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
5211 (void) memcpy(pQbuffer, iop_data, 1);
5212 rqbuf_lastidx++;
5213 /* if last index number set it to 0 */
5214 rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
5215 iop_data++;
5216 iop_len--;
5217 }
5218 acb->rqbuf_lastidx = rqbuf_lastidx;
5219 arcmsr_iop_message_read(acb);
5220 /* signature, let IOP know data has been read */
5221 } else {
5222 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
5223 }
5224 }
5225
5226
5227
5228 static void
5229 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
5230
5231 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
5232 /*
5233 * check if there are any mail packages from user space program
5234 * in my post bag, now is the time to send them into Areca's firmware
5235 */
5236
5237 if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
5238
5239 uint8_t *pQbuffer;
5240 struct QBUFFER *pwbuffer;
5241 uint8_t *iop_data;
5242 int allxfer_len = 0;
5243
5244 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
5245 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
5246 iop_data = (uint8_t *)pwbuffer->data;
5247
5248 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
5249 (allxfer_len < 124)) {
5250 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
5251 (void) memcpy(iop_data, pQbuffer, 1);
5252 acb->wqbuf_firstidx++;
5253 /* if last index number set it to 0 */
5254 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
5255 iop_data++;
5256 allxfer_len++;
5257 }
5258 pwbuffer->data_len = allxfer_len;
5259 /*
5260 * push inbound doorbell, tell iop driver data write ok
5261 * await reply on next hwinterrupt for next Qbuffer post
5262 */
5263 arcmsr_iop_message_wrote(acb);
5264 }
5265
5266 if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
5267 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
5268 }
5269
5270
5271 static void
5272 arcmsr_hba_doorbell_isr(struct ACB *acb)
5273 {
5274 uint32_t outbound_doorbell;
5275 struct HBA_msgUnit *phbamu;
5276
5277 phbamu = (struct HBA_msgUnit *)acb->pmu;
5278
5279 /*
5280 * Maybe here we need to check wrqbuffer_lock is locked or not
5281 * DOORBELL: ding! dong!
5282 * check if there are any mail need to pack from firmware
5283 */
5284
5285 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5286 &phbamu->outbound_doorbell);
5287 /* clear doorbell interrupt */
5288 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5289 &phbamu->outbound_doorbell, outbound_doorbell);
5290
5291 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
5292 arcmsr_iop2drv_data_wrote_handle(acb);
5293
5294
5295 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
5296 arcmsr_iop2drv_data_read_handle(acb);
5297 }
5298
5299
5300
5301 static void
5302 arcmsr_hbc_doorbell_isr(struct ACB *acb)
5303 {
5304 uint32_t outbound_doorbell;
5305 struct HBC_msgUnit *phbcmu;
5306
5307 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5308
5309 /*
5310 * Maybe here we need to check wrqbuffer_lock is locked or not
5311 * DOORBELL: ding! dong!
5312 * check if there are any mail need to pick from firmware
5313 */
5314
5315 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5316 &phbcmu->outbound_doorbell);
5317 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5318 &phbcmu->outbound_doorbell_clear,
5319 outbound_doorbell); /* clear interrupt */
5320 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
5321 arcmsr_iop2drv_data_wrote_handle(acb);
5322 }
5323 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
5324 arcmsr_iop2drv_data_read_handle(acb);
5325 }
5326 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
5327 /* messenger of "driver to iop commands" */
5328 arcmsr_hbc_message_isr(acb);
5329 }
5330 }
5331
5332
5333 static void
5334 arcmsr_hba_message_isr(struct ACB *acb)
5335 {
5336 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
5337 uint32_t *signature = (&phbamu->msgcode_rwbuffer[0]);
5338 uint32_t outbound_message;
5339
5340 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5341 &phbamu->outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
5342
5343 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5344 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5345 if ((ddi_taskq_dispatch(acb->taskq,
5346 (void (*)(void *))arcmsr_dr_handle,
5347 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5348 arcmsr_warn(acb, "DR task start failed");
5349 }
5350 }
5351
5352 static void
5353 arcmsr_hbb_message_isr(struct ACB *acb)
5354 {
5355 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
5356 uint32_t *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
5357 uint32_t outbound_message;
5358
5359 /* clear interrupts */
5360 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5361 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5362 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5363 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5364 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5365 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5366
5367 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5368 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5369 if ((ddi_taskq_dispatch(acb->taskq,
5370 (void (*)(void *))arcmsr_dr_handle,
5371 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5372 arcmsr_warn(acb, "DR task start failed");
5373 }
5374 }
5375
5376 static void
5377 arcmsr_hbc_message_isr(struct ACB *acb)
5378 {
5379 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
5380 uint32_t *signature = (&phbcmu->msgcode_rwbuffer[0]);
5381 uint32_t outbound_message;
5382
5383 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5384 &phbcmu->outbound_doorbell_clear,
5385 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
5386
5387 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5388 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5389 if ((ddi_taskq_dispatch(acb->taskq,
5390 (void (*)(void *))arcmsr_dr_handle,
5391 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5392 arcmsr_warn(acb, "DR task start failed");
5393 }
5394 }
5395
5396
5397 static void
5398 arcmsr_hba_postqueue_isr(struct ACB *acb)
5399 {
5400
5401 struct HBA_msgUnit *phbamu;
5402 struct CCB *ccb;
5403 uint32_t flag_ccb;
5404 boolean_t error;
5405
5406 phbamu = (struct HBA_msgUnit *)acb->pmu;
5407
5408 /* areca cdb command done */
5409 /* Use correct offset and size for syncing */
5410 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5411 DDI_DMA_SYNC_FORKERNEL);
5412
5413 while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5414 &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
5415 /* frame must be 32 bytes aligned */
5416 ccb = NumToPtr((acb->vir2phy_offset+(flag_ccb << 5)));
5417 /* check if command done with no error */
5418 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5419 B_TRUE : B_FALSE;
5420 arcmsr_drain_donequeue(acb, ccb, error);
5421 } /* drain reply FIFO */
5422 }
5423
5424
5425 static void
5426 arcmsr_hbb_postqueue_isr(struct ACB *acb)
5427 {
5428 struct HBB_msgUnit *phbbmu;
5429 struct CCB *ccb;
5430 uint32_t flag_ccb;
5431 boolean_t error;
5432 int index;
5433
5434 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5435
5436 /* areca cdb command done */
5437 index = phbbmu->doneq_index;
5438 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5439 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5440 return;
5441 while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
5442 phbbmu->done_qbuffer[index] = 0;
5443 /* frame must be 32 bytes aligned */
5444
5445 /* the CDB is the first field of the CCB */
5446 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
5447
5448 /* check if command done with no error */
5449 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5450 B_TRUE : B_FALSE;
5451 arcmsr_drain_donequeue(acb, ccb, error);
5452 index++;
5453 /* if last index number set it to 0 */
5454 index %= ARCMSR_MAX_HBB_POSTQUEUE;
5455 phbbmu->doneq_index = index;
5456 } /* drain reply FIFO */
5457 }
5458
5459
5460 static void
5461 arcmsr_hbc_postqueue_isr(struct ACB *acb)
5462 {
5463
5464 struct HBC_msgUnit *phbcmu;
5465 struct CCB *ccb;
5466 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
5467 boolean_t error;
5468
5469 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5470 /* areca cdb command done */
5471 /* Use correct offset and size for syncing */
5472 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5473 DDI_DMA_SYNC_FORKERNEL);
5474
5475 while (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5476 &phbcmu->host_int_status) &
5477 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5478 /* check if command done with no error */
5479 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5480 &phbcmu->outbound_queueport_low);
5481 /* frame must be 32 bytes aligned */
5482 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5483
5484 /* the CDB is the first field of the CCB */
5485 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5486
5487 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5488 B_TRUE : B_FALSE;
5489 /* check if command done with no error */
5490 arcmsr_drain_donequeue(acb, ccb, error);
5491 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
5492 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5493 &phbcmu->inbound_doorbell,
5494 ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
5495 break;
5496 }
5497 throttling++;
5498 } /* drain reply FIFO */
5499 }
5500
5501
5502 static uint_t
5503 arcmsr_handle_hba_isr(struct ACB *acb) {
5504
5505 uint32_t outbound_intstatus;
5506 struct HBA_msgUnit *phbamu;
5507
5508 phbamu = (struct HBA_msgUnit *)acb->pmu;
5509
5510 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5511 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
5512
5513 if (outbound_intstatus == 0) /* it must be a shared irq */
5514 return (DDI_INTR_UNCLAIMED);
5515
5516 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
5517 outbound_intstatus); /* clear interrupt */
5518
5519 /* MU doorbell interrupts */
5520
5521 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
5522 arcmsr_hba_doorbell_isr(acb);
5523
5524 /* MU post queue interrupts */
5525 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
5526 arcmsr_hba_postqueue_isr(acb);
5527
5528 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
5529 arcmsr_hba_message_isr(acb);
5530 }
5531
5532 return (DDI_INTR_CLAIMED);
5533 }
5534
5535
5536 static uint_t
5537 arcmsr_handle_hbb_isr(struct ACB *acb) {
5538
5539 uint32_t outbound_doorbell;
5540 struct HBB_msgUnit *phbbmu;
5541
5542
5543 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5544
5545 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5546 &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
5547
5548 if (outbound_doorbell == 0) /* it must be a shared irq */
5549 return (DDI_INTR_UNCLAIMED);
5550
5551 /* clear doorbell interrupt */
5552 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5553 &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
5554 /* wait a cycle */
5555 (void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5556 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5557 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5558 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5559 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5560
5561 /* MU ioctl transfer doorbell interrupts */
5562 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
5563 arcmsr_iop2drv_data_wrote_handle(acb);
5564
5565 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
5566 arcmsr_iop2drv_data_read_handle(acb);
5567
5568 /* MU post queue interrupts */
5569 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
5570 arcmsr_hbb_postqueue_isr(acb);
5571
5572 /* MU message interrupt */
5573
5574 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
5575 arcmsr_hbb_message_isr(acb);
5576 }
5577
5578 return (DDI_INTR_CLAIMED);
5579 }
5580
5581 static uint_t
5582 arcmsr_handle_hbc_isr(struct ACB *acb)
5583 {
5584 uint32_t host_interrupt_status;
5585 struct HBC_msgUnit *phbcmu;
5586
5587 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5588 /* check outbound intstatus */
5589 host_interrupt_status=
5590 CHIP_REG_READ32(acb->reg_mu_acc_handle0, &phbcmu->host_int_status);
5591 if (host_interrupt_status == 0) /* it must be share irq */
5592 return (DDI_INTR_UNCLAIMED);
5593 /* MU ioctl transfer doorbell interrupts */
5594 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
5595 /* messenger of "ioctl message read write" */
5596 arcmsr_hbc_doorbell_isr(acb);
5597 }
5598 /* MU post queue interrupts */
5599 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5600 /* messenger of "scsi commands" */
5601 arcmsr_hbc_postqueue_isr(acb);
5602 }
5603 return (DDI_INTR_CLAIMED);
5604 }
5605
5606 static uint_t
5607 arcmsr_intr_handler(caddr_t arg, caddr_t arg2)
5608 {
5609 struct ACB *acb = (void *)arg;
5610 struct CCB *ccb;
5611 uint_t retrn = DDI_INTR_UNCLAIMED;
5612 _NOTE(ARGUNUSED(arg2))
5613
5614 mutex_enter(&acb->isr_mutex);
5615 switch (acb->adapter_type) {
5616 case ACB_ADAPTER_TYPE_A:
5617 retrn = arcmsr_handle_hba_isr(acb);
5618 break;
5619
5620 case ACB_ADAPTER_TYPE_B:
5621 retrn = arcmsr_handle_hbb_isr(acb);
5622 break;
5623
5624 case ACB_ADAPTER_TYPE_C:
5625 retrn = arcmsr_handle_hbc_isr(acb);
5626 break;
5627
5628 default:
5629 /* We should never be here */
5630 ASSERT(0);
5631 break;
5632 }
5633 mutex_exit(&acb->isr_mutex);
5634 while ((ccb = arcmsr_get_complete_ccb_from_list(acb)) != NULL) {
5635 arcmsr_ccb_complete(ccb, 1);
5636 }
5637 return (retrn);
5638 }
5639
5640
5641 static void
5642 arcmsr_wait_firmware_ready(struct ACB *acb) {
5643
5644 uint32_t firmware_state;
5645
5646 firmware_state = 0;
5647
5648 switch (acb->adapter_type) {
5649 case ACB_ADAPTER_TYPE_A:
5650 {
5651 struct HBA_msgUnit *phbamu;
5652 phbamu = (struct HBA_msgUnit *)acb->pmu;
5653 do {
5654 firmware_state =
5655 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5656 &phbamu->outbound_msgaddr1);
5657 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
5658 == 0);
5659 break;
5660 }
5661
5662 case ACB_ADAPTER_TYPE_B:
5663 {
5664 struct HBB_msgUnit *phbbmu;
5665 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5666 do {
5667 firmware_state =
5668 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5669 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5670 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
5671 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5672 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5673 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5674 break;
5675 }
5676
5677 case ACB_ADAPTER_TYPE_C:
5678 {
5679 struct HBC_msgUnit *phbcmu;
5680 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5681 do {
5682 firmware_state =
5683 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5684 &phbcmu->outbound_msgaddr1);
5685 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
5686 == 0);
5687 break;
5688 }
5689
5690 }
5691 }
5692
5693 static void
5694 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb)
5695 {
5696 switch (acb->adapter_type) {
5697 case ACB_ADAPTER_TYPE_A: {
5698 struct HBA_msgUnit *phbamu;
5699 uint32_t outbound_doorbell;
5700
5701 phbamu = (struct HBA_msgUnit *)acb->pmu;
5702 /* empty doorbell Qbuffer if door bell rung */
5703 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5704 &phbamu->outbound_doorbell);
5705 /* clear doorbell interrupt */
5706 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5707 &phbamu->outbound_doorbell, outbound_doorbell);
5708 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5709 &phbamu->inbound_doorbell,
5710 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
5711 break;
5712 }
5713
5714 case ACB_ADAPTER_TYPE_B: {
5715 struct HBB_msgUnit *phbbmu;
5716
5717 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5718 /* clear interrupt and message state */
5719 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5720 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5721 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5722 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5723 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5724 ARCMSR_DRV2IOP_DATA_READ_OK);
5725 /* let IOP know data has been read */
5726 break;
5727 }
5728
5729 case ACB_ADAPTER_TYPE_C: {
5730 struct HBC_msgUnit *phbcmu;
5731 uint32_t outbound_doorbell;
5732
5733 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5734 /* empty doorbell Qbuffer if door bell ringed */
5735 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5736 &phbcmu->outbound_doorbell);
5737 /* clear outbound doobell isr */
5738 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5739 &phbcmu->outbound_doorbell_clear, outbound_doorbell);
5740 /* let IOP know data has been read */
5741 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5742 &phbcmu->inbound_doorbell,
5743 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
5744 break;
5745 }
5746
5747 }
5748 }
5749
5750
5751 static uint32_t
5752 arcmsr_iop_confirm(struct ACB *acb) {
5753
5754 uint64_t cdb_phyaddr;
5755 uint32_t cdb_phyaddr_hi32;
5756
5757 /*
5758 * here we need to tell iop 331 about our freeccb.HighPart
5759 * if freeccb.HighPart is non-zero
5760 */
5761 cdb_phyaddr = acb->ccb_cookie.dmac_laddress;
5762 cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
5763 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
5764 switch (acb->adapter_type) {
5765 case ACB_ADAPTER_TYPE_A:
5766 if (cdb_phyaddr_hi32 != 0) {
5767 struct HBA_msgUnit *phbamu;
5768
5769 phbamu = (struct HBA_msgUnit *)acb->pmu;
5770 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5771 &phbamu->msgcode_rwbuffer[0],
5772 ARCMSR_SIGNATURE_SET_CONFIG);
5773 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5774 &phbamu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5775 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5776 &phbamu->inbound_msgaddr0,
5777 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5778 if (!arcmsr_hba_wait_msgint_ready(acb)) {
5779 arcmsr_warn(acb,
5780 "timeout setting ccb "
5781 "high physical address");
5782 return (FALSE);
5783 }
5784 }
5785 break;
5786
5787 /* if adapter is type B, set window of "post command queue" */
5788 case ACB_ADAPTER_TYPE_B: {
5789 uint32_t post_queue_phyaddr;
5790 struct HBB_msgUnit *phbbmu;
5791
5792 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5793 phbbmu->postq_index = 0;
5794 phbbmu->doneq_index = 0;
5795 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5796 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5797 ARCMSR_MESSAGE_SET_POST_WINDOW);
5798
5799 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5800 arcmsr_warn(acb, "timeout setting post command "
5801 "queue window");
5802 return (FALSE);
5803 }
5804
5805 post_queue_phyaddr = (uint32_t)cdb_phyaddr +
5806 ARCMSR_MAX_FREECCB_NUM * P2ROUNDUP(sizeof (struct CCB), 32)
5807 + offsetof(struct HBB_msgUnit, post_qbuffer);
5808 /* driver "set config" signature */
5809 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5810 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
5811 ARCMSR_SIGNATURE_SET_CONFIG);
5812 /* normal should be zero */
5813 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5814 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
5815 cdb_phyaddr_hi32);
5816 /* postQ size (256+8)*4 */
5817 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5818 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
5819 post_queue_phyaddr);
5820 /* doneQ size (256+8)*4 */
5821 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5822 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
5823 post_queue_phyaddr+1056);
5824 /* ccb maxQ size must be --> [(256+8)*4] */
5825 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5826 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
5827 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5828 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5829 ARCMSR_MESSAGE_SET_CONFIG);
5830
5831 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5832 arcmsr_warn(acb,
5833 "timeout setting command queue window");
5834 return (FALSE);
5835 }
5836 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5837 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5838 ARCMSR_MESSAGE_START_DRIVER_MODE);
5839
5840 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5841 arcmsr_warn(acb, "timeout in 'start driver mode'");
5842 return (FALSE);
5843 }
5844 break;
5845 }
5846
5847 case ACB_ADAPTER_TYPE_C:
5848 if (cdb_phyaddr_hi32 != 0) {
5849 struct HBC_msgUnit *phbcmu;
5850
5851 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5852 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5853 &phbcmu->msgcode_rwbuffer[0],
5854 ARCMSR_SIGNATURE_SET_CONFIG);
5855 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5856 &phbcmu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5857 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5858 &phbcmu->inbound_msgaddr0,
5859 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5860 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5861 &phbcmu->inbound_doorbell,
5862 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
5863 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
5864 arcmsr_warn(acb, "'set ccb "
5865 "high part physical address' timeout");
5866 return (FALSE);
5867 }
5868 }
5869 break;
5870 }
5871 return (TRUE);
5872 }
5873
5874
5875 /*
5876 * ONLY used for Adapter type B
5877 */
5878 static void
5879 arcmsr_enable_eoi_mode(struct ACB *acb)
5880 {
5881 struct HBB_msgUnit *phbbmu;
5882
5883 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5884
5885 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5886 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5887 ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
5888
5889 if (!arcmsr_hbb_wait_msgint_ready(acb))
5890 arcmsr_warn(acb, "'iop enable eoi mode' timeout");
5891 }
5892
5893 /* start background rebuild */
5894 static void
5895 arcmsr_iop_init(struct ACB *acb)
5896 {
5897 uint32_t intmask_org;
5898
5899 /* disable all outbound interrupt */
5900 intmask_org = arcmsr_disable_allintr(acb);
5901 arcmsr_wait_firmware_ready(acb);
5902 (void) arcmsr_iop_confirm(acb);
5903
5904 /* start background rebuild */
5905 switch (acb->adapter_type) {
5906 case ACB_ADAPTER_TYPE_A:
5907 arcmsr_get_hba_config(acb);
5908 arcmsr_start_hba_bgrb(acb);
5909 break;
5910 case ACB_ADAPTER_TYPE_B:
5911 arcmsr_get_hbb_config(acb);
5912 arcmsr_start_hbb_bgrb(acb);
5913 break;
5914 case ACB_ADAPTER_TYPE_C:
5915 arcmsr_get_hbc_config(acb);
5916 arcmsr_start_hbc_bgrb(acb);
5917 break;
5918 }
5919 /* empty doorbell Qbuffer if door bell rang */
5920 arcmsr_clear_doorbell_queue_buffer(acb);
5921
5922 if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
5923 arcmsr_enable_eoi_mode(acb);
5924
5925 /* enable outbound Post Queue, outbound doorbell Interrupt */
5926 arcmsr_enable_allintr(acb, intmask_org);
5927 acb->acb_flags |= ACB_F_IOP_INITED;
5928 }
--- EOF ---