Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c
+++ new/usr/src/uts/intel/io/scsi/adapters/arcmsr/arcmsr.c
1 1 /*
2 2 * O.S : Solaris
3 3 * FILE NAME : arcmsr.c
4 4 * BY : Erich Chen, C.L. Huang
5 5 * Description: SCSI RAID Device Driver for
6 6 * ARECA RAID Host adapter
7 7 *
8 8 * Copyright (C) 2002,2010 Areca Technology Corporation All rights reserved.
9 9 * Copyright (C) 2002,2010 Erich Chen
10 10 * Web site: www.areca.com.tw
11 11 * E-mail: erich@areca.com.tw; ching2048@areca.com.tw
12 12 *
13 13 * Redistribution and use in source and binary forms, with or without
14 14 * modification, are permitted provided that the following conditions
15 15 * are met:
16 16 * 1. Redistributions of source code must retain the above copyright
17 17 * notice, this list of conditions and the following disclaimer.
18 18 * 2. Redistributions in binary form must reproduce the above copyright
19 19 * notice, this list of conditions and the following disclaimer in the
20 20 * documentation and/or other materials provided with the distribution.
21 21 * 3. The party using or redistributing the source code and binary forms
22 22 * agrees to the disclaimer below and the terms and conditions set forth
23 23 * herein.
24 24 *
25 25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 35 * SUCH DAMAGE.
36 36 *
37 37 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
38 38 * Use is subject to license terms.
39 39 *
40 40 */
41 41 /*
42 42 * This file and its contents are supplied under the terms of the
43 43 * Common Development and Distribution License ("CDDL"), version 1.0.
44 44 * You may only use this file in accordance with the terms of version
45 45 * 1.0 of the CDDL.
46 46 *
47 47 * A full copy of the text of the CDDL should have accompanied this
48 48 * source. A copy of the CDDL is also available via the Internet at
49 49 * http://www.illumos.org/license/CDDL.
50 50 */
51 51 /*
52 52 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
53 53 */
54 54 #include <sys/types.h>
55 55 #include <sys/ddidmareq.h>
56 56 #include <sys/scsi/scsi.h>
57 57 #include <sys/ddi.h>
58 58 #include <sys/sunddi.h>
59 59 #include <sys/file.h>
60 60 #include <sys/disp.h>
61 61 #include <sys/signal.h>
62 62 #include <sys/debug.h>
63 63 #include <sys/pci.h>
64 64 #include <sys/policy.h>
65 65 #include <sys/atomic.h>
66 66 #include "arcmsr.h"
67 67
68 68 static int arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd);
69 69 static int arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg,
70 70 int mode, cred_t *credp, int *rvalp);
71 71 static int arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd);
72 72 static int arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd);
73 73 static int arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt);
74 74 static int arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
75 75 static int arcmsr_tran_reset(struct scsi_address *ap, int level);
76 76 static int arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom);
77 77 static int arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value,
78 78 int whom);
79 79 static int arcmsr_tran_tgt_init(dev_info_t *host_dev_info,
80 80 dev_info_t *target_dev_info, scsi_hba_tran_t *hosttran,
81 81 struct scsi_device *sd);
82 82 static void arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt);
83 83 static void arcmsr_tran_destroy_pkt(struct scsi_address *ap,
84 84 struct scsi_pkt *pkt);
85 85 static void arcmsr_tran_sync_pkt(struct scsi_address *ap,
86 86 struct scsi_pkt *pkt);
87 87 static struct scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
88 88 struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
89 89 int tgtlen, int flags, int (*callback)(), caddr_t arg);
90 90 static int arcmsr_config_child(struct ACB *acb, struct scsi_device *sd,
91 91 dev_info_t **dipp);
92 92
93 93 static int arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun,
94 94 dev_info_t **ldip);
95 95 static uint8_t arcmsr_abort_host_command(struct ACB *acb);
96 96 static uint8_t arcmsr_get_echo_from_iop(struct ACB *acb);
97 97 static uint_t arcmsr_intr_handler(caddr_t arg, caddr_t arg2);
98 98 static int arcmsr_initialize(struct ACB *acb);
99 99 static int arcmsr_dma_alloc(struct ACB *acb,
100 100 struct scsi_pkt *pkt, struct buf *bp, int flags, int (*callback)());
101 101 static int arcmsr_dma_move(struct ACB *acb,
102 102 struct scsi_pkt *pkt, struct buf *bp);
103 103 static void arcmsr_handle_iop_bus_hold(struct ACB *acb);
104 104 static void arcmsr_hbc_message_isr(struct ACB *acb);
105 105 static void arcmsr_pcidev_disattach(struct ACB *acb);
106 106 static void arcmsr_ccb_complete(struct CCB *ccb, int flag);
107 107 static void arcmsr_iop_init(struct ACB *acb);
108 108 static void arcmsr_iop_parking(struct ACB *acb);
109 109 /*PRINTFLIKE3*/
110 110 static void arcmsr_log(struct ACB *acb, int level, char *fmt, ...);
111 111 /*PRINTFLIKE2*/
112 112 static void arcmsr_warn(struct ACB *acb, char *fmt, ...);
113 113 static void arcmsr_mutex_init(struct ACB *acb);
114 114 static void arcmsr_remove_intr(struct ACB *acb);
115 115 static void arcmsr_ccbs_timeout(void* arg);
116 116 static void arcmsr_devMap_monitor(void* arg);
117 117 static void arcmsr_pcidev_disattach(struct ACB *acb);
118 118 static void arcmsr_iop_message_read(struct ACB *acb);
119 119 static void arcmsr_free_ccb(struct CCB *ccb);
120 120 static void arcmsr_post_ioctldata2iop(struct ACB *acb);
121 121 static void arcmsr_report_sense_info(struct CCB *ccb);
122 122 static void arcmsr_init_list_head(struct list_head *list);
123 123 static void arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org);
124 124 static void arcmsr_done4abort_postqueue(struct ACB *acb);
125 125 static void arcmsr_list_add_tail(kmutex_t *list_lock,
126 126 struct list_head *new_one, struct list_head *head);
127 127 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
128 128 static int arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt);
129 129 static int arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt);
130 130 static int arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb);
131 131 static int arcmsr_parse_devname(char *devnm, int *tgt, int *lun);
132 132 static int arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance);
133 133 static uint8_t arcmsr_iop_reset(struct ACB *acb);
134 134 static uint32_t arcmsr_disable_allintr(struct ACB *acb);
135 135 static uint32_t arcmsr_iop_confirm(struct ACB *acb);
136 136 static struct CCB *arcmsr_get_freeccb(struct ACB *acb);
137 137 static void arcmsr_flush_hba_cache(struct ACB *acb);
138 138 static void arcmsr_flush_hbb_cache(struct ACB *acb);
139 139 static void arcmsr_flush_hbc_cache(struct ACB *acb);
140 140 static void arcmsr_stop_hba_bgrb(struct ACB *acb);
141 141 static void arcmsr_stop_hbb_bgrb(struct ACB *acb);
142 142 static void arcmsr_stop_hbc_bgrb(struct ACB *acb);
143 143 static void arcmsr_start_hba_bgrb(struct ACB *acb);
144 144 static void arcmsr_start_hbb_bgrb(struct ACB *acb);
145 145 static void arcmsr_start_hbc_bgrb(struct ACB *acb);
146 146 static void arcmsr_mutex_destroy(struct ACB *acb);
147 147 static void arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
148 148 static void arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
149 149 static void arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb);
150 150 static void arcmsr_build_ccb(struct CCB *ccb);
151 151 static int arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
152 152 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
153 153 static int arcmsr_name_node(dev_info_t *dip, char *name, int len);
154 154 static dev_info_t *arcmsr_find_child(struct ACB *acb, uint16_t tgt,
155 155 uint8_t lun);
156 156 static struct QBUFFER *arcmsr_get_iop_rqbuffer(struct ACB *acb);
157 157
158 158 static int arcmsr_add_intr(struct ACB *, int);
159 159
160 160 static void *arcmsr_soft_state = NULL;
161 161
162 162 static ddi_dma_attr_t arcmsr_dma_attr = {
163 163 DMA_ATTR_V0, /* ddi_dma_attr version */
164 164 0, /* low DMA address range */
165 165 0xffffffffffffffffull, /* high DMA address range */
166 166 0x00ffffff, /* DMA counter counter upper bound */
167 167 1, /* DMA address alignment requirements */
168 168 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
169 169 1, /* minimum effective DMA size */
170 170 ARCMSR_MAX_XFER_LEN, /* maximum DMA xfer size */
171 171 /*
172 172 * The dma_attr_seg field supplies the limit of each Scatter/Gather
173 173 * list element's "address+length". The Intel IOP331 can not use
174 174 * segments over the 4G boundary due to segment boundary restrictions
175 175 */
176 176 0xffffffff,
177 177 ARCMSR_MAX_SG_ENTRIES, /* scatter/gather list count */
178 178 1, /* device granularity */
179 179 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
180 180 };
181 181
182 182
183 183 static ddi_dma_attr_t arcmsr_ccb_attr = {
184 184 DMA_ATTR_V0, /* ddi_dma_attr version */
185 185 0, /* low DMA address range */
186 186 0xffffffff, /* high DMA address range */
187 187 0x00ffffff, /* DMA counter counter upper bound */
188 188 1, /* default byte alignment */
189 189 DEFAULT_BURSTSIZE | BURST32 | BURST64, /* burst sizes */
190 190 1, /* minimum effective DMA size */
191 191 0xffffffff, /* maximum DMA xfer size */
192 192 0x00ffffff, /* max segment size, segment boundary restrictions */
193 193 1, /* scatter/gather list count */
194 194 1, /* device granularity */
195 195 DDI_DMA_FORCE_PHYSICAL /* Bus specific DMA flags */
196 196 };
197 197
198 198
199 199 static struct cb_ops arcmsr_cb_ops = {
200 200 scsi_hba_open, /* open(9E) */
201 201 scsi_hba_close, /* close(9E) */
202 202 nodev, /* strategy(9E), returns ENXIO */
203 203 nodev, /* print(9E) */
204 204 nodev, /* dump(9E) Cannot be used as a dump device */
205 205 nodev, /* read(9E) */
206 206 nodev, /* write(9E) */
207 207 arcmsr_cb_ioctl, /* ioctl(9E) */
208 208 nodev, /* devmap(9E) */
209 209 nodev, /* mmap(9E) */
210 210 nodev, /* segmap(9E) */
211 211 NULL, /* chpoll(9E) returns ENXIO */
212 212 nodev, /* prop_op(9E) */
213 213 NULL, /* streamtab(9S) */
214 214 D_MP,
215 215 CB_REV,
216 216 nodev, /* aread(9E) */
217 217 nodev /* awrite(9E) */
218 218 };
219 219
220 220 static struct dev_ops arcmsr_ops = {
221 221 DEVO_REV, /* devo_rev */
222 222 0, /* reference count */
223 223 nodev, /* getinfo */
224 224 nulldev, /* identify */
225 225 nulldev, /* probe */
226 226 arcmsr_attach, /* attach */
227 227 arcmsr_detach, /* detach */
228 228 arcmsr_reset, /* reset, shutdown, reboot notify */
229 229 &arcmsr_cb_ops, /* driver operations */
230 230 NULL, /* bus operations */
231 231 NULL /* power */
↓ open down ↓ |
231 lines elided |
↑ open up ↑ |
232 232 };
233 233
234 234 static struct modldrv arcmsr_modldrv = {
235 235 &mod_driverops, /* Type of module. This is a driver. */
236 236 "ARECA RAID Controller", /* module name, from arcmsr.h */
237 237 &arcmsr_ops, /* driver ops */
238 238 };
239 239
240 240 static struct modlinkage arcmsr_modlinkage = {
241 241 MODREV_1,
242 - &arcmsr_modldrv,
243 - NULL
242 + { &arcmsr_modldrv, NULL }
244 243 };
245 244
246 245
247 246 int
248 247 _init(void)
249 248 {
250 249 int ret;
251 250
252 251 ret = ddi_soft_state_init(&arcmsr_soft_state, sizeof (struct ACB), 1);
253 252 if (ret != 0) {
254 253 return (ret);
255 254 }
256 255 if ((ret = scsi_hba_init(&arcmsr_modlinkage)) != 0) {
257 256 ddi_soft_state_fini(&arcmsr_soft_state);
258 257 return (ret);
259 258 }
260 259
261 260 if ((ret = mod_install(&arcmsr_modlinkage)) != 0) {
262 261 scsi_hba_fini(&arcmsr_modlinkage);
263 262 if (arcmsr_soft_state != NULL) {
264 263 ddi_soft_state_fini(&arcmsr_soft_state);
265 264 }
266 265 }
267 266 return (ret);
268 267 }
269 268
270 269
271 270 int
272 271 _fini(void)
273 272 {
274 273 int ret;
275 274
276 275 ret = mod_remove(&arcmsr_modlinkage);
277 276 if (ret == 0) {
278 277 /* if ret = 0 , said driver can remove */
279 278 scsi_hba_fini(&arcmsr_modlinkage);
280 279 if (arcmsr_soft_state != NULL) {
281 280 ddi_soft_state_fini(&arcmsr_soft_state);
282 281 }
283 282 }
284 283 return (ret);
285 284 }
286 285
287 286
288 287 int
289 288 _info(struct modinfo *modinfop)
290 289 {
291 290 return (mod_info(&arcmsr_modlinkage, modinfop));
292 291 }
293 292
294 293
295 294 /*
296 295 * Function: arcmsr_attach(9E)
297 296 * Description: Set up all device state and allocate data structures,
298 297 * mutexes, condition variables, etc. for device operation.
299 298 * Set mt_attr property for driver to indicate MT-safety.
300 299 * Add interrupts needed.
301 300 * Input: dev_info_t *dev_info, ddi_attach_cmd_t cmd
302 301 * Output: Return DDI_SUCCESS if device is ready,
303 302 * else return DDI_FAILURE
304 303 */
305 304 static int
306 305 arcmsr_attach(dev_info_t *dev_info, ddi_attach_cmd_t cmd)
307 306 {
308 307 scsi_hba_tran_t *hba_trans;
309 308 struct ACB *acb;
310 309
311 310 switch (cmd) {
312 311 case DDI_ATTACH:
313 312 return (arcmsr_do_ddi_attach(dev_info,
314 313 ddi_get_instance(dev_info)));
315 314 case DDI_RESUME:
316 315 /*
317 316 * There is no hardware state to restart and no
318 317 * timeouts to restart since we didn't DDI_SUSPEND with
319 318 * active cmds or active timeouts We just need to
320 319 * unblock waiting threads and restart I/O the code
321 320 */
322 321 hba_trans = ddi_get_driver_private(dev_info);
323 322 if (hba_trans == NULL) {
324 323 return (DDI_FAILURE);
325 324 }
326 325 acb = hba_trans->tran_hba_private;
327 326 mutex_enter(&acb->acb_mutex);
328 327 arcmsr_iop_init(acb);
329 328
330 329 /* restart ccbs "timeout" watchdog */
331 330 acb->timeout_count = 0;
332 331 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
333 332 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
334 333 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor,
335 334 (caddr_t)acb,
336 335 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
337 336 mutex_exit(&acb->acb_mutex);
338 337 return (DDI_SUCCESS);
339 338
340 339 default:
341 340 return (DDI_FAILURE);
342 341 }
343 342 }
344 343
345 344 /*
346 345 * Function: arcmsr_detach(9E)
347 346 * Description: Remove all device allocation and system resources, disable
348 347 * device interrupt.
349 348 * Input: dev_info_t *dev_info
350 349 * ddi_detach_cmd_t cmd
351 350 * Output: Return DDI_SUCCESS if done,
352 351 * else returnDDI_FAILURE
353 352 */
354 353 static int
355 354 arcmsr_detach(dev_info_t *dev_info, ddi_detach_cmd_t cmd) {
356 355
357 356 int instance;
358 357 struct ACB *acb;
359 358
360 359
361 360 instance = ddi_get_instance(dev_info);
362 361 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
363 362 if (acb == NULL)
364 363 return (DDI_FAILURE);
365 364
366 365 switch (cmd) {
367 366 case DDI_DETACH:
368 367 mutex_enter(&acb->acb_mutex);
369 368 if (acb->timeout_id != 0) {
370 369 mutex_exit(&acb->acb_mutex);
371 370 (void) untimeout(acb->timeout_id);
372 371 mutex_enter(&acb->acb_mutex);
373 372 acb->timeout_id = 0;
374 373 }
375 374 if (acb->timeout_sc_id != 0) {
376 375 mutex_exit(&acb->acb_mutex);
377 376 (void) untimeout(acb->timeout_sc_id);
378 377 mutex_enter(&acb->acb_mutex);
379 378 acb->timeout_sc_id = 0;
380 379 }
381 380 arcmsr_pcidev_disattach(acb);
382 381 /* Remove interrupt set up by ddi_add_intr */
383 382 arcmsr_remove_intr(acb);
384 383 /* unbind mapping object to handle */
385 384 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
386 385 /* Free ccb pool memory */
387 386 ddi_dma_mem_free(&acb->ccbs_acc_handle);
388 387 /* Free DMA handle */
389 388 ddi_dma_free_handle(&acb->ccbs_pool_handle);
390 389 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
391 390 if (scsi_hba_detach(dev_info) != DDI_SUCCESS)
392 391 arcmsr_warn(acb, "Unable to detach instance cleanly "
393 392 "(should not happen)");
394 393 /* free scsi_hba_transport from scsi_hba_tran_alloc */
395 394 scsi_hba_tran_free(acb->scsi_hba_transport);
396 395 ddi_taskq_destroy(acb->taskq);
397 396 ddi_prop_remove_all(dev_info);
398 397 mutex_exit(&acb->acb_mutex);
399 398 arcmsr_mutex_destroy(acb);
400 399 pci_config_teardown(&acb->pci_acc_handle);
401 400 ddi_set_driver_private(dev_info, NULL);
402 401 ddi_soft_state_free(arcmsr_soft_state, instance);
403 402 return (DDI_SUCCESS);
404 403 case DDI_SUSPEND:
405 404 mutex_enter(&acb->acb_mutex);
406 405 if (acb->timeout_id != 0) {
407 406 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
408 407 mutex_exit(&acb->acb_mutex);
409 408 (void) untimeout(acb->timeout_id);
410 409 (void) untimeout(acb->timeout_sc_id);
411 410 mutex_enter(&acb->acb_mutex);
412 411 acb->timeout_id = 0;
413 412 }
414 413
415 414 if (acb->timeout_sc_id != 0) {
416 415 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
417 416 mutex_exit(&acb->acb_mutex);
418 417 (void) untimeout(acb->timeout_sc_id);
419 418 mutex_enter(&acb->acb_mutex);
420 419 acb->timeout_sc_id = 0;
421 420 }
422 421
423 422 /* disable all outbound interrupt */
424 423 (void) arcmsr_disable_allintr(acb);
425 424 /* stop adapter background rebuild */
426 425 switch (acb->adapter_type) {
427 426 case ACB_ADAPTER_TYPE_A:
428 427 arcmsr_stop_hba_bgrb(acb);
429 428 arcmsr_flush_hba_cache(acb);
430 429 break;
431 430
432 431 case ACB_ADAPTER_TYPE_B:
433 432 arcmsr_stop_hbb_bgrb(acb);
434 433 arcmsr_flush_hbb_cache(acb);
435 434 break;
436 435
437 436 case ACB_ADAPTER_TYPE_C:
438 437 arcmsr_stop_hbc_bgrb(acb);
439 438 arcmsr_flush_hbc_cache(acb);
440 439 break;
441 440 }
442 441 mutex_exit(&acb->acb_mutex);
443 442 return (DDI_SUCCESS);
444 443 default:
445 444 return (DDI_FAILURE);
446 445 }
447 446 }
448 447
449 448 static int
450 449 arcmsr_reset(dev_info_t *resetdev, ddi_reset_cmd_t cmd)
451 450 {
452 451 struct ACB *acb;
453 452 scsi_hba_tran_t *scsi_hba_transport;
454 453 _NOTE(ARGUNUSED(cmd));
455 454
456 455 scsi_hba_transport = ddi_get_driver_private(resetdev);
457 456 if (scsi_hba_transport == NULL)
458 457 return (DDI_FAILURE);
459 458
460 459 acb = (struct ACB *)scsi_hba_transport->tran_hba_private;
461 460 if (!acb)
462 461 return (DDI_FAILURE);
463 462
464 463 arcmsr_pcidev_disattach(acb);
465 464
466 465 return (DDI_SUCCESS);
467 466 }
468 467
469 468 static int
470 469 arcmsr_cb_ioctl(dev_t dev, int ioctl_cmd, intptr_t arg, int mode,
471 470 cred_t *credp, int *rvalp)
472 471 {
473 472 struct ACB *acb;
474 473 struct CMD_MESSAGE_FIELD *pktioctlfld;
475 474 int retvalue = 0;
476 475 int instance = MINOR2INST(getminor(dev));
477 476
478 477 if (instance < 0)
479 478 return (ENXIO);
480 479
481 480 if (secpolicy_sys_config(credp, B_FALSE) != 0)
482 481 return (EPERM);
483 482
484 483 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
485 484 if (acb == NULL)
486 485 return (ENXIO);
487 486
488 487 pktioctlfld = kmem_zalloc(sizeof (struct CMD_MESSAGE_FIELD), KM_SLEEP);
489 488
490 489 mutex_enter(&acb->ioctl_mutex);
491 490 if (ddi_copyin((void *)arg, pktioctlfld,
492 491 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0) {
493 492 retvalue = ENXIO;
494 493 goto ioctl_out;
495 494 }
496 495
497 496 if (memcmp(pktioctlfld->cmdmessage.Signature, "ARCMSR", 6) != 0) {
498 497 /* validity check */
499 498 retvalue = ENXIO;
500 499 goto ioctl_out;
501 500 }
502 501
503 502 switch ((unsigned int)ioctl_cmd) {
504 503 case ARCMSR_MESSAGE_READ_RQBUFFER:
505 504 {
506 505 uint8_t *ver_addr;
507 506 uint8_t *pQbuffer, *ptmpQbuffer;
508 507 int32_t allxfer_len = 0;
509 508
510 509 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
511 510 ptmpQbuffer = ver_addr;
512 511 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
513 512 (allxfer_len < (MSGDATABUFLEN - 1))) {
514 513 /* copy READ QBUFFER to srb */
515 514 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
516 515 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
517 516 acb->rqbuf_firstidx++;
518 517 /* if last index number set it to 0 */
519 518 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
520 519 ptmpQbuffer++;
521 520 allxfer_len++;
522 521 }
523 522
524 523 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
525 524 struct QBUFFER *prbuffer;
526 525 uint8_t *pQbuffer;
527 526 uint8_t *iop_data;
528 527 int32_t iop_len;
529 528
530 529 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
531 530 prbuffer = arcmsr_get_iop_rqbuffer(acb);
532 531 iop_data = (uint8_t *)prbuffer->data;
533 532 iop_len = (int32_t)prbuffer->data_len;
534 533 /*
535 534 * this iop data does no chance to make me overflow
536 535 * again here, so just do it
537 536 */
538 537 while (iop_len > 0) {
539 538 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
540 539 (void) memcpy(pQbuffer, iop_data, 1);
541 540 acb->rqbuf_lastidx++;
542 541 /* if last index number set it to 0 */
543 542 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
544 543 iop_data++;
545 544 iop_len--;
546 545 }
547 546 /* let IOP know data has been read */
548 547 arcmsr_iop_message_read(acb);
549 548 }
550 549 (void) memcpy(pktioctlfld->messagedatabuffer,
551 550 ver_addr, allxfer_len);
552 551 pktioctlfld->cmdmessage.Length = allxfer_len;
553 552 pktioctlfld->cmdmessage.ReturnCode =
554 553 ARCMSR_MESSAGE_RETURNCODE_OK;
555 554
556 555 if (ddi_copyout(pktioctlfld, (void *)arg,
557 556 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
558 557 retvalue = ENXIO;
559 558
560 559 kmem_free(ver_addr, MSGDATABUFLEN);
561 560 break;
562 561 }
563 562
564 563 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
565 564 {
566 565 uint8_t *ver_addr;
567 566 int32_t my_empty_len, user_len;
568 567 int32_t wqbuf_firstidx, wqbuf_lastidx;
569 568 uint8_t *pQbuffer, *ptmpuserbuffer;
570 569
571 570 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
572 571
573 572 ptmpuserbuffer = ver_addr;
574 573 user_len = min(pktioctlfld->cmdmessage.Length,
575 574 MSGDATABUFLEN);
576 575 (void) memcpy(ptmpuserbuffer,
577 576 pktioctlfld->messagedatabuffer, user_len);
578 577 /*
579 578 * check ifdata xfer length of this request will overflow
580 579 * my array qbuffer
581 580 */
582 581 wqbuf_lastidx = acb->wqbuf_lastidx;
583 582 wqbuf_firstidx = acb->wqbuf_firstidx;
584 583 if (wqbuf_lastidx != wqbuf_firstidx) {
585 584 arcmsr_post_ioctldata2iop(acb);
586 585 pktioctlfld->cmdmessage.ReturnCode =
587 586 ARCMSR_MESSAGE_RETURNCODE_ERROR;
588 587 } else {
589 588 my_empty_len = (wqbuf_firstidx - wqbuf_lastidx - 1)
590 589 & (ARCMSR_MAX_QBUFFER - 1);
591 590 if (my_empty_len >= user_len) {
592 591 while (user_len > 0) {
593 592 /* copy srb data to wqbuffer */
594 593 pQbuffer =
595 594 &acb->wqbuffer[acb->wqbuf_lastidx];
596 595 (void) memcpy(pQbuffer,
597 596 ptmpuserbuffer, 1);
598 597 acb->wqbuf_lastidx++;
599 598 /* iflast index number set it to 0 */
600 599 acb->wqbuf_lastidx %=
601 600 ARCMSR_MAX_QBUFFER;
602 601 ptmpuserbuffer++;
603 602 user_len--;
604 603 }
605 604 /* post first Qbuffer */
606 605 if (acb->acb_flags &
607 606 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
608 607 acb->acb_flags &=
609 608 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
610 609 arcmsr_post_ioctldata2iop(acb);
611 610 }
612 611 pktioctlfld->cmdmessage.ReturnCode =
613 612 ARCMSR_MESSAGE_RETURNCODE_OK;
614 613 } else {
615 614 pktioctlfld->cmdmessage.ReturnCode =
616 615 ARCMSR_MESSAGE_RETURNCODE_ERROR;
617 616 }
618 617 }
619 618 if (ddi_copyout(pktioctlfld, (void *)arg,
620 619 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
621 620 retvalue = ENXIO;
622 621
623 622 kmem_free(ver_addr, MSGDATABUFLEN);
624 623 break;
625 624 }
626 625
627 626 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
628 627 {
629 628 uint8_t *pQbuffer = acb->rqbuffer;
630 629
631 630 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
632 631 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
633 632 arcmsr_iop_message_read(acb);
634 633 }
635 634 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
636 635 acb->rqbuf_firstidx = 0;
637 636 acb->rqbuf_lastidx = 0;
638 637 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
639 638 /* report success */
640 639 pktioctlfld->cmdmessage.ReturnCode =
641 640 ARCMSR_MESSAGE_RETURNCODE_OK;
642 641
643 642 if (ddi_copyout(pktioctlfld, (void *)arg,
644 643 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
645 644 retvalue = ENXIO;
646 645 break;
647 646 }
648 647
649 648 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
650 649 {
651 650 uint8_t *pQbuffer = acb->wqbuffer;
652 651
653 652 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
654 653 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
655 654 arcmsr_iop_message_read(acb);
656 655 }
657 656 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
658 657 ACB_F_MESSAGE_WQBUFFER_READ);
659 658 acb->wqbuf_firstidx = 0;
660 659 acb->wqbuf_lastidx = 0;
661 660 bzero(pQbuffer, ARCMSR_MAX_QBUFFER);
662 661 /* report success */
663 662 pktioctlfld->cmdmessage.ReturnCode =
664 663 ARCMSR_MESSAGE_RETURNCODE_OK;
665 664
666 665 if (ddi_copyout(pktioctlfld, (void *)arg,
667 666 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
668 667 retvalue = ENXIO;
669 668 break;
670 669 }
671 670
672 671 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
673 672 {
674 673 uint8_t *pQbuffer;
675 674
676 675 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
677 676 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
678 677 arcmsr_iop_message_read(acb);
679 678 }
680 679 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
681 680 ACB_F_MESSAGE_RQBUFFER_CLEARED |
682 681 ACB_F_MESSAGE_WQBUFFER_READ);
683 682 acb->rqbuf_firstidx = 0;
684 683 acb->rqbuf_lastidx = 0;
685 684 acb->wqbuf_firstidx = 0;
686 685 acb->wqbuf_lastidx = 0;
687 686 pQbuffer = acb->rqbuffer;
688 687 bzero(pQbuffer, sizeof (struct QBUFFER));
689 688 pQbuffer = acb->wqbuffer;
690 689 bzero(pQbuffer, sizeof (struct QBUFFER));
691 690 /* report success */
692 691 pktioctlfld->cmdmessage.ReturnCode =
693 692 ARCMSR_MESSAGE_RETURNCODE_OK;
694 693 if (ddi_copyout(pktioctlfld, (void *)arg,
695 694 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
696 695 retvalue = ENXIO;
697 696 break;
698 697 }
699 698
700 699 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
701 700 pktioctlfld->cmdmessage.ReturnCode =
702 701 ARCMSR_MESSAGE_RETURNCODE_3F;
703 702 if (ddi_copyout(pktioctlfld, (void *)arg,
704 703 sizeof (struct CMD_MESSAGE_FIELD), mode) != 0)
705 704 retvalue = ENXIO;
706 705 break;
707 706
708 707 /* Not supported: ARCMSR_MESSAGE_SAY_HELLO */
709 708 case ARCMSR_MESSAGE_SAY_GOODBYE:
710 709 arcmsr_iop_parking(acb);
711 710 break;
712 711
713 712 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
714 713 switch (acb->adapter_type) {
715 714 case ACB_ADAPTER_TYPE_A:
716 715 arcmsr_flush_hba_cache(acb);
717 716 break;
718 717 case ACB_ADAPTER_TYPE_B:
719 718 arcmsr_flush_hbb_cache(acb);
720 719 break;
721 720 case ACB_ADAPTER_TYPE_C:
722 721 arcmsr_flush_hbc_cache(acb);
723 722 break;
724 723 }
725 724 break;
726 725
727 726 default:
728 727 mutex_exit(&acb->ioctl_mutex);
729 728 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
730 729 return (scsi_hba_ioctl(dev, ioctl_cmd, arg, mode, credp,
731 730 rvalp));
732 731 }
733 732
734 733 ioctl_out:
735 734 kmem_free(pktioctlfld, sizeof (struct CMD_MESSAGE_FIELD));
736 735 mutex_exit(&acb->ioctl_mutex);
737 736
738 737 return (retvalue);
739 738 }
740 739
741 740
742 741 /*
743 742 * Function: arcmsr_tran_tgt_init
744 743 * Description: Called when initializing a target device instance. If
745 744 * no per-target initialization is required, the HBA
746 745 * may leave tran_tgt_init to NULL
747 746 * Input:
748 747 * dev_info_t *host_dev_info,
749 748 * dev_info_t *target_dev_info,
750 749 * scsi_hba_tran_t *tran,
751 750 * struct scsi_device *sd
752 751 *
753 752 * Return: DDI_SUCCESS if success, else return DDI_FAILURE
754 753 *
755 754 * entry point enables the HBA to allocate and/or initialize any per-
756 755 * target resources.
757 756 * It also enables the HBA to qualify the device's address as valid and
758 757 * supportable for that particular HBA.
759 758 * By returning DDI_FAILURE, the instance of the target driver for that
760 759 * device will not be probed or attached.
761 760 * This entry point is not required, and if none is supplied,
762 761 * the framework will attempt to probe and attach all possible instances
763 762 * of the appropriate target drivers.
764 763 */
765 764 static int
766 765 arcmsr_tran_tgt_init(dev_info_t *host_dev_info, dev_info_t *target_dev_info,
767 766 scsi_hba_tran_t *tran, struct scsi_device *sd)
768 767 {
769 768 uint16_t target;
770 769 uint8_t lun;
771 770 struct ACB *acb = tran->tran_hba_private;
772 771
773 772 _NOTE(ARGUNUSED(tran, target_dev_info, host_dev_info))
774 773
775 774 target = sd->sd_address.a_target;
776 775 lun = sd->sd_address.a_lun;
777 776 if ((target >= ARCMSR_MAX_TARGETID) || (lun >= ARCMSR_MAX_TARGETLUN)) {
778 777 return (DDI_FAILURE);
779 778 }
780 779
781 780
782 781 if (ndi_dev_is_persistent_node(target_dev_info) == 0) {
783 782 /*
784 783 * If no persistent node exist, we don't allow .conf node
785 784 * to be created.
786 785 */
787 786 if (arcmsr_find_child(acb, target, lun) != NULL) {
788 787 if ((ndi_merge_node(target_dev_info,
789 788 arcmsr_name_node) != DDI_SUCCESS)) {
790 789 return (DDI_SUCCESS);
791 790 }
792 791 }
793 792 return (DDI_FAILURE);
794 793 }
795 794
796 795 return (DDI_SUCCESS);
797 796 }
798 797
799 798 /*
800 799 * Function: arcmsr_tran_getcap(9E)
801 800 * Description: Get the capability named, and returnits value.
802 801 * Return Values: current value of capability, ifdefined
803 802 * -1 ifcapability is not defined
804 803 * ------------------------------------------------------
805 804 * Common Capability Strings Array
806 805 * ------------------------------------------------------
807 806 * #define SCSI_CAP_DMA_MAX 0
808 807 * #define SCSI_CAP_MSG_OUT 1
809 808 * #define SCSI_CAP_DISCONNECT 2
810 809 * #define SCSI_CAP_SYNCHRONOUS 3
811 810 * #define SCSI_CAP_WIDE_XFER 4
812 811 * #define SCSI_CAP_PARITY 5
813 812 * #define SCSI_CAP_INITIATOR_ID 6
814 813 * #define SCSI_CAP_UNTAGGED_QING 7
815 814 * #define SCSI_CAP_TAGGED_QING 8
816 815 * #define SCSI_CAP_ARQ 9
817 816 * #define SCSI_CAP_LINKED_CMDS 10 a
818 817 * #define SCSI_CAP_SECTOR_SIZE 11 b
819 818 * #define SCSI_CAP_TOTAL_SECTORS 12 c
820 819 * #define SCSI_CAP_GEOMETRY 13 d
821 820 * #define SCSI_CAP_RESET_NOTIFICATION 14 e
822 821 * #define SCSI_CAP_QFULL_RETRIES 15 f
823 822 * #define SCSI_CAP_QFULL_RETRY_INTERVAL 16 10
824 823 * #define SCSI_CAP_SCSI_VERSION 17 11
825 824 * #define SCSI_CAP_INTERCONNECT_TYPE 18 12
826 825 * #define SCSI_CAP_LUN_RESET 19 13
827 826 */
828 827 static int
829 828 arcmsr_tran_getcap(struct scsi_address *ap, char *cap, int whom)
830 829 {
831 830 int capability = 0;
832 831 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
833 832
834 833 if (cap == NULL || whom == 0) {
835 834 return (DDI_FAILURE);
836 835 }
837 836
838 837 mutex_enter(&acb->acb_mutex);
839 838 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
840 839 mutex_exit(&acb->acb_mutex);
841 840 return (-1);
842 841 }
843 842 switch (scsi_hba_lookup_capstr(cap)) {
844 843 case SCSI_CAP_MSG_OUT:
845 844 case SCSI_CAP_DISCONNECT:
846 845 case SCSI_CAP_WIDE_XFER:
847 846 case SCSI_CAP_TAGGED_QING:
848 847 case SCSI_CAP_UNTAGGED_QING:
849 848 case SCSI_CAP_PARITY:
850 849 case SCSI_CAP_ARQ:
851 850 capability = 1;
852 851 break;
853 852 case SCSI_CAP_SECTOR_SIZE:
854 853 capability = ARCMSR_DEV_SECTOR_SIZE;
855 854 break;
856 855 case SCSI_CAP_DMA_MAX:
857 856 /* Limit to 16MB max transfer */
858 857 capability = ARCMSR_MAX_XFER_LEN;
859 858 break;
860 859 case SCSI_CAP_INITIATOR_ID:
861 860 capability = ARCMSR_SCSI_INITIATOR_ID;
862 861 break;
863 862 case SCSI_CAP_GEOMETRY:
864 863 /* head , track , cylinder */
865 864 capability = (255 << 16) | 63;
866 865 break;
867 866 default:
868 867 capability = -1;
869 868 break;
870 869 }
871 870 mutex_exit(&acb->acb_mutex);
872 871 return (capability);
873 872 }
874 873
875 874 /*
876 875 * Function: arcmsr_tran_setcap(9E)
877 876 * Description: Set the specific capability.
878 877 * Return Values: 1 - capability exists and can be set to new value
879 878 * 0 - capability could not be set to new value
880 879 * -1 - no such capability
881 880 */
882 881 static int
883 882 arcmsr_tran_setcap(struct scsi_address *ap, char *cap, int value, int whom)
884 883 {
885 884 _NOTE(ARGUNUSED(value))
886 885
887 886 int supported = 0;
888 887 struct ACB *acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
889 888
890 889 if (cap == NULL || whom == 0) {
891 890 return (-1);
892 891 }
893 892
894 893 mutex_enter(&acb->acb_mutex);
895 894 if (acb->devstate[ap->a_target][ap->a_lun] == ARECA_RAID_GONE) {
896 895 mutex_exit(&acb->acb_mutex);
897 896 return (-1);
898 897 }
899 898 switch (supported = scsi_hba_lookup_capstr(cap)) {
900 899 case SCSI_CAP_ARQ: /* 9 auto request sense */
901 900 case SCSI_CAP_UNTAGGED_QING: /* 7 */
902 901 case SCSI_CAP_TAGGED_QING: /* 8 */
903 902 /* these are always on, and cannot be turned off */
904 903 supported = (value == 1) ? 1 : 0;
905 904 break;
906 905 case SCSI_CAP_TOTAL_SECTORS: /* c */
907 906 supported = 1;
908 907 break;
909 908 case SCSI_CAP_DISCONNECT: /* 2 */
910 909 case SCSI_CAP_WIDE_XFER: /* 4 */
911 910 case SCSI_CAP_INITIATOR_ID: /* 6 */
912 911 case SCSI_CAP_DMA_MAX: /* 0 */
913 912 case SCSI_CAP_MSG_OUT: /* 1 */
914 913 case SCSI_CAP_PARITY: /* 5 */
915 914 case SCSI_CAP_LINKED_CMDS: /* a */
916 915 case SCSI_CAP_RESET_NOTIFICATION: /* e */
917 916 case SCSI_CAP_SECTOR_SIZE: /* b */
918 917 /* these are not settable */
919 918 supported = 0;
920 919 break;
921 920 default:
922 921 supported = -1;
923 922 break;
924 923 }
925 924 mutex_exit(&acb->acb_mutex);
926 925 return (supported);
927 926 }
928 927
929 928
930 929 /*
931 930 * Function: arcmsr_tran_init_pkt
932 931 * Return Values: pointer to scsi_pkt, or NULL
933 932 * Description: simultaneously allocate both a scsi_pkt(9S) structure and
934 933 * DMA resources for that pkt.
935 934 * Called by kernel on behalf of a target driver
936 935 * calling scsi_init_pkt(9F).
937 936 * Refer to tran_init_pkt(9E) man page
938 937 * Context: Can be called from different kernel process threads.
939 938 * Can be called by interrupt thread.
940 939 * Allocates SCSI packet and DMA resources
941 940 */
942 941 static struct
943 942 scsi_pkt *arcmsr_tran_init_pkt(struct scsi_address *ap,
944 943 register struct scsi_pkt *pkt, struct buf *bp, int cmdlen, int statuslen,
945 944 int tgtlen, int flags, int (*callback)(), caddr_t arg)
946 945 {
947 946 struct CCB *ccb;
948 947 struct ARCMSR_CDB *arcmsr_cdb;
949 948 struct ACB *acb;
950 949 int old_pkt_flag;
951 950
952 951 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
953 952
954 953 if (acb->acb_flags & ACB_F_BUS_RESET) {
955 954 return (NULL);
956 955 }
957 956 if (pkt == NULL) {
958 957 /* get free CCB */
959 958 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
960 959 DDI_DMA_SYNC_FORKERNEL);
961 960 ccb = arcmsr_get_freeccb(acb);
962 961 if (ccb == (struct CCB *)NULL) {
963 962 return (NULL);
964 963 }
965 964
966 965 if (statuslen < sizeof (struct scsi_arq_status)) {
967 966 statuslen = sizeof (struct scsi_arq_status);
968 967 }
969 968 pkt = scsi_hba_pkt_alloc(acb->dev_info, ap, cmdlen,
970 969 statuslen, tgtlen, sizeof (void *), callback, arg);
971 970 if (pkt == NULL) {
972 971 arcmsr_warn(acb, "scsi pkt allocation failed");
973 972 arcmsr_free_ccb(ccb);
974 973 return (NULL);
975 974 }
976 975 /* Initialize CCB */
977 976 ccb->pkt = pkt;
978 977 ccb->pkt_dma_handle = NULL;
979 978 /* record how many sg are needed to xfer on this pkt */
980 979 ccb->pkt_ncookies = 0;
981 980 /* record how many sg we got from this window */
982 981 ccb->pkt_cookie = 0;
983 982 /* record how many windows have partial dma map set */
984 983 ccb->pkt_nwin = 0;
985 984 /* record current sg window position */
986 985 ccb->pkt_curwin = 0;
987 986 ccb->pkt_dma_len = 0;
988 987 ccb->pkt_dma_offset = 0;
989 988 ccb->resid_dmacookie.dmac_size = 0;
990 989
991 990 /*
992 991 * we will still use this point for we want to fake some
993 992 * information in tran_start
994 993 */
995 994 ccb->bp = bp;
996 995
997 996 /* Initialize arcmsr_cdb */
998 997 arcmsr_cdb = &ccb->arcmsr_cdb;
999 998 bzero(arcmsr_cdb, sizeof (struct ARCMSR_CDB));
1000 999 arcmsr_cdb->Bus = 0;
1001 1000 arcmsr_cdb->Function = 1;
1002 1001 arcmsr_cdb->LUN = ap->a_lun;
1003 1002 arcmsr_cdb->TargetID = ap->a_target;
1004 1003 arcmsr_cdb->CdbLength = (uint8_t)cmdlen;
1005 1004 arcmsr_cdb->Context = (uintptr_t)arcmsr_cdb;
1006 1005
1007 1006 /* Fill in the rest of the structure */
1008 1007 pkt->pkt_ha_private = ccb;
1009 1008 pkt->pkt_address = *ap;
1010 1009 pkt->pkt_comp = NULL;
1011 1010 pkt->pkt_flags = 0;
1012 1011 pkt->pkt_time = 0;
1013 1012 pkt->pkt_resid = 0;
1014 1013 pkt->pkt_statistics = 0;
1015 1014 pkt->pkt_reason = 0;
1016 1015 old_pkt_flag = 0;
1017 1016 } else {
1018 1017 ccb = pkt->pkt_ha_private;
1019 1018 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1020 1019 if (!(ccb->ccb_state & ARCMSR_CCB_BACK)) {
1021 1020 return (NULL);
1022 1021 }
1023 1022 }
1024 1023
1025 1024 /*
1026 1025 * you cannot update CdbLength with cmdlen here, it would
1027 1026 * cause a data compare error
1028 1027 */
1029 1028 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
1030 1029 old_pkt_flag = 1;
1031 1030 }
1032 1031
1033 1032 /* Second step : dma allocation/move */
1034 1033 if (bp && bp->b_bcount != 0) {
1035 1034 /*
1036 1035 * system had a lot of data trunk need to xfer, from...20 byte
1037 1036 * to 819200 byte.
1038 1037 * arcmsr_dma_alloc will get pkt_dma_handle (not null) until
1039 1038 * this lot of data trunk xfer done this mission will be done
1040 1039 * by some of continue READ or WRITE scsi command, till this
1041 1040 * lot of data trunk xfer completed.
1042 1041 * arcmsr_dma_move do the action repeatedly, and use the same
1043 1042 * ccb till this lot of data trunk xfer complete notice.
1044 1043 * when after the arcmsr_tran_init_pkt returns the solaris
1045 1044 * kernel is by your pkt_resid and its b_bcount to give you
1046 1045 * which type of scsi command descriptor to implement the
1047 1046 * length of folowing arcmsr_tran_start scsi cdb (data length)
1048 1047 *
1049 1048 * Each transfer should be aligned on a 512 byte boundary
1050 1049 */
1051 1050 if (ccb->pkt_dma_handle == NULL) {
1052 1051 if (arcmsr_dma_alloc(acb, pkt, bp, flags, callback) ==
1053 1052 DDI_FAILURE) {
1054 1053 /*
1055 1054 * the HBA driver is unable to allocate DMA
1056 1055 * resources, it must free the allocated
1057 1056 * scsi_pkt(9S) before returning
1058 1057 */
1059 1058 arcmsr_warn(acb, "dma allocation failure");
1060 1059 if (old_pkt_flag == 0) {
1061 1060 arcmsr_warn(acb, "dma "
1062 1061 "allocation failed to free "
1063 1062 "scsi hba pkt");
1064 1063 arcmsr_free_ccb(ccb);
1065 1064 scsi_hba_pkt_free(ap, pkt);
1066 1065 }
1067 1066 return (NULL);
1068 1067 }
1069 1068 } else {
1070 1069 /* DMA resources to next DMA window, for old pkt */
1071 1070 if (arcmsr_dma_move(acb, pkt, bp) == DDI_FAILURE) {
1072 1071 arcmsr_warn(acb, "dma move failed");
1073 1072 return (NULL);
1074 1073 }
1075 1074 }
1076 1075 } else {
1077 1076 pkt->pkt_resid = 0;
1078 1077 }
1079 1078 return (pkt);
1080 1079 }
1081 1080
1082 1081 /*
1083 1082 * Function: arcmsr_tran_start(9E)
1084 1083 * Description: Transport the command in pktp to the target device.
1085 1084 * The command is not finished when this returns, only
1086 1085 * sent to the target; arcmsr_intr_handler will call
1087 1086 * scsi_hba_pkt_comp(pktp) when the target device has done.
1088 1087 *
1089 1088 * Input: struct scsi_address *ap, struct scsi_pkt *pktp
1090 1089 * Output: TRAN_ACCEPT if pkt is OK and not driver not busy
1091 1090 * TRAN_BUSY if driver is
1092 1091 * TRAN_BADPKT if pkt is invalid
1093 1092 */
1094 1093 static int
1095 1094 arcmsr_tran_start(struct scsi_address *ap, struct scsi_pkt *pkt)
1096 1095 {
1097 1096 struct ACB *acb;
1098 1097 struct CCB *ccb;
1099 1098 int target = ap->a_target;
1100 1099 int lun = ap->a_lun;
1101 1100
1102 1101 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1103 1102 ccb = pkt->pkt_ha_private;
1104 1103 *pkt->pkt_scbp = STATUS_GOOD; /* clear arq scsi_status */
1105 1104
1106 1105 if ((ccb->ccb_flags & CCB_FLAG_DMAVALID) &&
1107 1106 (ccb->ccb_flags & DDI_DMA_CONSISTENT))
1108 1107 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1109 1108 DDI_DMA_SYNC_FORDEV);
1110 1109
1111 1110 if (ccb->ccb_state == ARCMSR_CCB_UNBUILD)
1112 1111 arcmsr_build_ccb(ccb);
1113 1112
1114 1113 if (acb->acb_flags & ACB_F_BUS_RESET) {
1115 1114 pkt->pkt_reason = CMD_RESET;
1116 1115 pkt->pkt_statistics |= STAT_BUS_RESET;
1117 1116 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1118 1117 STATE_SENT_CMD | STATE_GOT_STATUS);
1119 1118 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1120 1119 (pkt->pkt_state & STATE_XFERRED_DATA))
1121 1120 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1122 1121 0, 0, DDI_DMA_SYNC_FORCPU);
1123 1122
1124 1123 scsi_hba_pkt_comp(pkt);
1125 1124 return (TRAN_ACCEPT);
1126 1125 }
1127 1126
1128 1127 /* IMPORTANT: Target 16 is a virtual device for iop message transfer */
1129 1128 if (target == 16) {
1130 1129
1131 1130 struct buf *bp = ccb->bp;
1132 1131 uint8_t scsicmd = pkt->pkt_cdbp[0];
1133 1132
1134 1133 switch (scsicmd) {
1135 1134 case SCMD_INQUIRY: {
1136 1135 if (lun != 0) {
1137 1136 ccb->pkt->pkt_reason = CMD_TIMEOUT;
1138 1137 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1139 1138 arcmsr_ccb_complete(ccb, 0);
1140 1139 return (TRAN_ACCEPT);
1141 1140 }
1142 1141
1143 1142 if (bp && bp->b_un.b_addr && bp->b_bcount) {
1144 1143 uint8_t inqdata[36];
1145 1144
1146 1145 /* The EVDP and pagecode is not supported */
1147 1146 if (pkt->pkt_cdbp[1] || pkt->pkt_cdbp[2]) {
1148 1147 inqdata[1] = 0xFF;
1149 1148 inqdata[2] = 0x00;
1150 1149 } else {
1151 1150 /* Periph Qualifier & Periph Dev Type */
1152 1151 inqdata[0] = DTYPE_PROCESSOR;
1153 1152 /* rem media bit & Dev Type Modifier */
1154 1153 inqdata[1] = 0;
1155 1154 /* ISO, ECMA, & ANSI versions */
1156 1155 inqdata[2] = 0;
1157 1156 inqdata[3] = 0;
1158 1157 /* length of additional data */
1159 1158 inqdata[4] = 31;
1160 1159 /* Vendor Identification */
1161 1160 bcopy("Areca ", &inqdata[8], VIDLEN);
1162 1161 /* Product Identification */
1163 1162 bcopy("RAID controller ", &inqdata[16],
1164 1163 PIDLEN);
1165 1164 /* Product Revision */
1166 1165 bcopy(&inqdata[32], "R001", REVLEN);
1167 1166 if (bp->b_flags & (B_PHYS | B_PAGEIO))
1168 1167 bp_mapin(bp);
1169 1168
1170 1169 (void) memcpy(bp->b_un.b_addr,
1171 1170 inqdata, sizeof (inqdata));
1172 1171 }
1173 1172 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1174 1173 }
1175 1174 arcmsr_ccb_complete(ccb, 0);
1176 1175 return (TRAN_ACCEPT);
1177 1176 }
1178 1177 case SCMD_WRITE_BUFFER:
1179 1178 case SCMD_READ_BUFFER: {
1180 1179 if (arcmsr_iop_message_xfer(acb, pkt)) {
1181 1180 /* error just for retry */
1182 1181 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
1183 1182 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
1184 1183 }
1185 1184 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1186 1185 arcmsr_ccb_complete(ccb, 0);
1187 1186 return (TRAN_ACCEPT);
1188 1187 }
1189 1188 default:
1190 1189 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1191 1190 arcmsr_ccb_complete(ccb, 0);
1192 1191 return (TRAN_ACCEPT);
1193 1192 }
1194 1193 }
1195 1194
1196 1195 if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
1197 1196 uint8_t block_cmd;
1198 1197
1199 1198 block_cmd = pkt->pkt_cdbp[0] & 0x0f;
1200 1199 if (block_cmd == 0x08 || block_cmd == 0x0a) {
1201 1200 pkt->pkt_reason = CMD_TIMEOUT;
1202 1201 pkt->pkt_statistics |= STAT_TIMEOUT;
1203 1202 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1204 1203 STATE_SENT_CMD | STATE_GOT_STATUS);
1205 1204 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1206 1205 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1207 1206 (void) ddi_dma_sync(ccb->pkt_dma_handle,
1208 1207 ccb->pkt_dma_offset,
1209 1208 ccb->pkt_dma_len, DDI_DMA_SYNC_FORCPU);
1210 1209 }
1211 1210 scsi_hba_pkt_comp(pkt);
1212 1211 return (TRAN_ACCEPT);
1213 1212 }
1214 1213 }
1215 1214 mutex_enter(&acb->postq_mutex);
1216 1215 if (acb->ccboutstandingcount >= ARCMSR_MAX_OUTSTANDING_CMD) {
1217 1216 ccb->ccb_state = ARCMSR_CCB_RETRY;
1218 1217 mutex_exit(&acb->postq_mutex);
1219 1218 return (TRAN_BUSY);
1220 1219 } else if (arcmsr_post_ccb(acb, ccb) == DDI_FAILURE) {
1221 1220 arcmsr_warn(acb, "post ccb failure, ccboutstandingcount = %d",
1222 1221 acb->ccboutstandingcount);
1223 1222 mutex_exit(&acb->postq_mutex);
1224 1223 return (TRAN_FATAL_ERROR);
1225 1224 }
1226 1225 mutex_exit(&acb->postq_mutex);
1227 1226 return (TRAN_ACCEPT);
1228 1227 }
1229 1228
1230 1229 /*
1231 1230 * Function name: arcmsr_tran_destroy_pkt
1232 1231 * Return Values: none
1233 1232 * Description: Called by kernel on behalf of a target driver
1234 1233 * calling scsi_destroy_pkt(9F).
1235 1234 * Refer to tran_destroy_pkt(9E) man page
1236 1235 * Context: Can be called from different kernel process threads.
1237 1236 * Can be called by interrupt thread.
1238 1237 */
1239 1238 static void
1240 1239 arcmsr_tran_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1241 1240 {
1242 1241 struct CCB *ccb = pkt->pkt_ha_private;
1243 1242 ddi_dma_handle_t pkt_dma_handle = ccb->pkt_dma_handle;
1244 1243
1245 1244 if (ccb == NULL) {
1246 1245 return;
1247 1246 }
1248 1247 if (ccb->pkt != pkt) {
1249 1248 return;
1250 1249 }
1251 1250 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1252 1251 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1253 1252 if (pkt_dma_handle) {
1254 1253 (void) ddi_dma_unbind_handle(ccb->pkt_dma_handle);
1255 1254 }
1256 1255 }
1257 1256 if (pkt_dma_handle) {
1258 1257 (void) ddi_dma_free_handle(&pkt_dma_handle);
1259 1258 }
1260 1259 pkt->pkt_ha_private = NULL;
1261 1260 if (ccb) {
1262 1261 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
1263 1262 if (ccb->ccb_state & ARCMSR_CCB_BACK) {
1264 1263 arcmsr_free_ccb(ccb);
1265 1264 } else {
1266 1265 ccb->ccb_state |= ARCMSR_CCB_WAIT4_FREE;
1267 1266 }
1268 1267 } else {
1269 1268 arcmsr_free_ccb(ccb);
1270 1269 }
1271 1270 }
1272 1271 scsi_hba_pkt_free(ap, pkt);
1273 1272 }
1274 1273
1275 1274 /*
1276 1275 * Function name: arcmsr_tran_dmafree()
1277 1276 * Return Values: none
1278 1277 * Description: free dvma resources
1279 1278 * Context: Can be called from different kernel process threads.
1280 1279 * Can be called by interrupt thread.
1281 1280 */
1282 1281 static void
1283 1282 arcmsr_tran_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
1284 1283 {
1285 1284 struct CCB *ccb = pkt->pkt_ha_private;
1286 1285
1287 1286 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1288 1287 return;
1289 1288 }
1290 1289 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1291 1290 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1292 1291 if (ddi_dma_unbind_handle(ccb->pkt_dma_handle) != DDI_SUCCESS) {
1293 1292 arcmsr_warn(ccb->acb, "ddi_dma_unbind_handle() failed "
1294 1293 "(target %d lun %d)", ap->a_target, ap->a_lun);
1295 1294 }
1296 1295 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1297 1296 ccb->pkt_dma_handle = NULL;
1298 1297 }
1299 1298 }
1300 1299
1301 1300 /*
1302 1301 * Function name: arcmsr_tran_sync_pkt()
1303 1302 * Return Values: none
1304 1303 * Description: sync dma
1305 1304 * Context: Can be called from different kernel process threads.
1306 1305 * Can be called by interrupt thread.
1307 1306 */
1308 1307 static void
1309 1308 arcmsr_tran_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
1310 1309 {
1311 1310 struct CCB *ccb;
1312 1311
1313 1312 ccb = pkt->pkt_ha_private;
1314 1313 if ((ccb == NULL) || (ccb->pkt != pkt)) {
1315 1314 return;
1316 1315 }
1317 1316 if (ccb->ccb_flags & CCB_FLAG_DMAVALID) {
1318 1317 if (ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1319 1318 (ccb->ccb_flags & CCB_FLAG_DMAWRITE) ?
1320 1319 DDI_DMA_SYNC_FORDEV : DDI_DMA_SYNC_FORCPU) !=
1321 1320 DDI_SUCCESS) {
1322 1321 arcmsr_warn(ccb->acb,
1323 1322 "sync pkt failed for target %d lun %d",
1324 1323 ap->a_target, ap->a_lun);
1325 1324 }
1326 1325 }
1327 1326 }
1328 1327
1329 1328
1330 1329 /*
1331 1330 * Function: arcmsr_tran_abort(9E)
1332 1331 * SCSA interface routine to abort pkt(s) in progress.
1333 1332 * Aborts the pkt specified. If NULL pkt, aborts ALL pkts.
1334 1333 * Output: Return 1 if success
1335 1334 * Return 0 if failure
1336 1335 */
1337 1336 static int
1338 1337 arcmsr_tran_abort(struct scsi_address *ap, struct scsi_pkt *abortpkt)
1339 1338 {
1340 1339 struct ACB *acb;
1341 1340 int return_code;
1342 1341
1343 1342 acb = ap->a_hba_tran->tran_hba_private;
1344 1343
1345 1344 while (acb->ccboutstandingcount != 0) {
1346 1345 drv_usecwait(10000);
1347 1346 }
1348 1347
1349 1348 mutex_enter(&acb->isr_mutex);
1350 1349 return_code = arcmsr_seek_cmd2abort(acb, abortpkt);
1351 1350 mutex_exit(&acb->isr_mutex);
1352 1351
1353 1352 if (return_code != DDI_SUCCESS) {
1354 1353 arcmsr_warn(acb, "abort command failed for target %d lun %d",
1355 1354 ap->a_target, ap->a_lun);
1356 1355 return (0);
1357 1356 }
1358 1357 return (1);
1359 1358 }
1360 1359
1361 1360 /*
1362 1361 * Function: arcmsr_tran_reset(9E)
1363 1362 * SCSA interface routine to perform scsi resets on either
1364 1363 * a specified target or the bus (default).
1365 1364 * Output: Return 1 if success
1366 1365 * Return 0 if failure
1367 1366 */
1368 1367 static int
1369 1368 arcmsr_tran_reset(struct scsi_address *ap, int level) {
1370 1369
1371 1370 struct ACB *acb;
1372 1371 int return_code = 1;
1373 1372 int target = ap->a_target;
1374 1373 int lun = ap->a_lun;
1375 1374
1376 1375 /* Are we in the middle of dumping core? */
1377 1376 if (ddi_in_panic())
1378 1377 return (return_code);
1379 1378
1380 1379 acb = (struct ACB *)ap->a_hba_tran->tran_hba_private;
1381 1380 mutex_enter(&acb->isr_mutex);
1382 1381 switch (level) {
1383 1382 case RESET_ALL: /* 0 */
1384 1383 acb->num_resets++;
1385 1384 acb->acb_flags |= ACB_F_BUS_RESET;
1386 1385 if (acb->timeout_count) {
1387 1386 if (arcmsr_iop_reset(acb) != 0) {
1388 1387 arcmsr_handle_iop_bus_hold(acb);
1389 1388 acb->acb_flags &= ~ACB_F_BUS_HANG_ON;
1390 1389 }
1391 1390 }
1392 1391 acb->acb_flags &= ~ACB_F_BUS_RESET;
1393 1392 break;
1394 1393 case RESET_TARGET: /* 1 */
1395 1394 if (acb->devstate[target][lun] == ARECA_RAID_GONE)
1396 1395 return_code = 0;
1397 1396 break;
1398 1397 case RESET_BUS: /* 2 */
1399 1398 return_code = 0;
1400 1399 break;
1401 1400 case RESET_LUN: /* 3 */
1402 1401 return_code = 0;
1403 1402 break;
1404 1403 default:
1405 1404 return_code = 0;
1406 1405 }
1407 1406 mutex_exit(&acb->isr_mutex);
1408 1407 return (return_code);
1409 1408 }
1410 1409
1411 1410 static int
1412 1411 arcmsr_tran_bus_config(dev_info_t *parent, uint_t flags,
1413 1412 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
1414 1413 {
1415 1414 struct ACB *acb;
1416 1415 int circ = 0;
1417 1416 int rval;
1418 1417 int tgt, lun;
1419 1418
1420 1419 if ((acb = ddi_get_soft_state(arcmsr_soft_state,
1421 1420 ddi_get_instance(parent))) == NULL)
1422 1421 return (NDI_FAILURE);
1423 1422
1424 1423 ndi_devi_enter(parent, &circ);
1425 1424 switch (op) {
1426 1425 case BUS_CONFIG_ONE:
1427 1426 if (arcmsr_parse_devname(arg, &tgt, &lun) != 0) {
1428 1427 rval = NDI_FAILURE;
1429 1428 break;
1430 1429 }
1431 1430 if (acb->device_map[tgt] & 1 << lun) {
1432 1431 acb->devstate[tgt][lun] = ARECA_RAID_GOOD;
1433 1432 rval = arcmsr_config_lun(acb, tgt, lun, childp);
1434 1433 }
1435 1434 break;
1436 1435
1437 1436 case BUS_CONFIG_DRIVER:
1438 1437 case BUS_CONFIG_ALL:
1439 1438 for (tgt = 0; tgt < ARCMSR_MAX_TARGETID; tgt++)
1440 1439 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++)
1441 1440 if (acb->device_map[tgt] & 1 << lun) {
1442 1441 acb->devstate[tgt][lun] =
1443 1442 ARECA_RAID_GOOD;
1444 1443 (void) arcmsr_config_lun(acb, tgt,
1445 1444 lun, NULL);
1446 1445 }
1447 1446
1448 1447 rval = NDI_SUCCESS;
1449 1448 break;
1450 1449 }
1451 1450 if (rval == NDI_SUCCESS)
1452 1451 rval = ndi_busop_bus_config(parent, flags, op, arg, childp, 0);
1453 1452 ndi_devi_exit(parent, circ);
1454 1453 return (rval);
1455 1454 }
1456 1455
1457 1456 /*
1458 1457 * Function name: arcmsr_dma_alloc
1459 1458 * Return Values: 0 if successful, -1 if failure
1460 1459 * Description: allocate DMA resources
1461 1460 * Context: Can only be called from arcmsr_tran_init_pkt()
1462 1461 * register struct scsi_address *ap = &((pkt)->pkt_address);
1463 1462 */
1464 1463 static int
1465 1464 arcmsr_dma_alloc(struct ACB *acb, struct scsi_pkt *pkt,
1466 1465 struct buf *bp, int flags, int (*callback)())
1467 1466 {
1468 1467 struct CCB *ccb = pkt->pkt_ha_private;
1469 1468 int alloc_result, map_method, dma_flags;
1470 1469 int resid = 0;
1471 1470 int total_ccb_xferlen = 0;
1472 1471 int (*cb)(caddr_t);
1473 1472 uint8_t i;
1474 1473
1475 1474 /*
1476 1475 * at this point the PKT SCSI CDB is empty, and dma xfer length
1477 1476 * is bp->b_bcount
1478 1477 */
1479 1478
1480 1479 if (bp->b_flags & B_READ) {
1481 1480 ccb->ccb_flags &= ~CCB_FLAG_DMAWRITE;
1482 1481 dma_flags = DDI_DMA_READ;
1483 1482 } else {
1484 1483 ccb->ccb_flags |= CCB_FLAG_DMAWRITE;
1485 1484 dma_flags = DDI_DMA_WRITE;
1486 1485 }
1487 1486
1488 1487 if (flags & PKT_CONSISTENT) {
1489 1488 ccb->ccb_flags |= CCB_FLAG_DMACONSISTENT;
1490 1489 dma_flags |= DDI_DMA_CONSISTENT;
1491 1490 }
1492 1491 if (flags & PKT_DMA_PARTIAL) {
1493 1492 dma_flags |= DDI_DMA_PARTIAL;
1494 1493 }
1495 1494
1496 1495 dma_flags |= DDI_DMA_REDZONE;
1497 1496 cb = (callback == NULL_FUNC) ? DDI_DMA_DONTWAIT : DDI_DMA_SLEEP;
1498 1497
1499 1498 alloc_result = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_dma_attr,
1500 1499 cb, 0, &ccb->pkt_dma_handle);
1501 1500 if (alloc_result != DDI_SUCCESS) {
1502 1501 arcmsr_warn(acb, "dma allocate failed (%x)", alloc_result);
1503 1502 return (DDI_FAILURE);
1504 1503 }
1505 1504
1506 1505 map_method = ddi_dma_buf_bind_handle(ccb->pkt_dma_handle,
1507 1506 bp, dma_flags, cb, 0,
1508 1507 &ccb->pkt_dmacookies[0], /* SG List pointer */
1509 1508 &ccb->pkt_ncookies); /* number of sgl cookies */
1510 1509
1511 1510 switch (map_method) {
1512 1511 case DDI_DMA_PARTIAL_MAP:
1513 1512 /*
1514 1513 * When your main memory size larger then 4G
1515 1514 * DDI_DMA_PARTIAL_MAP will be touched.
1516 1515 *
1517 1516 * We've already set DDI_DMA_PARTIAL in dma_flags,
1518 1517 * so if it's now missing, there's something screwy
1519 1518 * happening. We plow on....
1520 1519 */
1521 1520
1522 1521 if ((dma_flags & DDI_DMA_PARTIAL) == 0) {
1523 1522 arcmsr_warn(acb,
1524 1523 "dma partial mapping lost ...impossible case!");
1525 1524 }
1526 1525 if (ddi_dma_numwin(ccb->pkt_dma_handle, &ccb->pkt_nwin) ==
1527 1526 DDI_FAILURE) {
1528 1527 arcmsr_warn(acb, "ddi_dma_numwin() failed");
1529 1528 }
1530 1529
1531 1530 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1532 1531 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1533 1532 &ccb->pkt_dmacookies[0], &ccb->pkt_ncookies) ==
1534 1533 DDI_FAILURE) {
1535 1534 arcmsr_warn(acb, "ddi_dma_getwin failed");
1536 1535 }
1537 1536
1538 1537 i = 0;
1539 1538 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1540 1539 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1541 1540 for (;;) {
1542 1541 i++;
1543 1542 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1544 1543 (i == ccb->pkt_ncookies) ||
1545 1544 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1546 1545 break;
1547 1546 }
1548 1547 /*
1549 1548 * next cookie will be retrieved from
1550 1549 * ccb->pkt_dmacookies[i]
1551 1550 */
1552 1551 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1553 1552 &ccb->pkt_dmacookies[i]);
1554 1553 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1555 1554 }
1556 1555 ccb->pkt_cookie = i;
1557 1556 ccb->arcmsr_cdb.sgcount = i;
1558 1557 if (total_ccb_xferlen > 512) {
1559 1558 resid = total_ccb_xferlen % 512;
1560 1559 if (resid != 0) {
1561 1560 i--;
1562 1561 total_ccb_xferlen -= resid;
1563 1562 /* modify last sg length */
1564 1563 ccb->pkt_dmacookies[i].dmac_size =
1565 1564 ccb->pkt_dmacookies[i].dmac_size - resid;
1566 1565 ccb->resid_dmacookie.dmac_size = resid;
1567 1566 ccb->resid_dmacookie.dmac_laddress =
1568 1567 ccb->pkt_dmacookies[i].dmac_laddress +
1569 1568 ccb->pkt_dmacookies[i].dmac_size;
1570 1569 }
1571 1570 }
1572 1571 ccb->total_dmac_size = total_ccb_xferlen;
1573 1572 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1574 1573 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1575 1574
1576 1575 return (DDI_SUCCESS);
1577 1576
1578 1577 case DDI_DMA_MAPPED:
1579 1578 ccb->pkt_nwin = 1; /* all mapped, so only one window */
1580 1579 ccb->pkt_dma_len = 0;
1581 1580 ccb->pkt_dma_offset = 0;
1582 1581 i = 0;
1583 1582 /* first cookie is accessed from ccb->pkt_dmacookies[0] */
1584 1583 total_ccb_xferlen = ccb->pkt_dmacookies[0].dmac_size;
1585 1584 for (;;) {
1586 1585 i++;
1587 1586 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1588 1587 (i == ccb->pkt_ncookies) ||
1589 1588 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1590 1589 break;
1591 1590 }
1592 1591 /*
1593 1592 * next cookie will be retrieved from
1594 1593 * ccb->pkt_dmacookies[i]
1595 1594 */
1596 1595 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1597 1596 &ccb->pkt_dmacookies[i]);
1598 1597 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1599 1598 }
1600 1599 ccb->pkt_cookie = i;
1601 1600 ccb->arcmsr_cdb.sgcount = i;
1602 1601 if (total_ccb_xferlen > 512) {
1603 1602 resid = total_ccb_xferlen % 512;
1604 1603 if (resid != 0) {
1605 1604 i--;
1606 1605 total_ccb_xferlen -= resid;
1607 1606 /* modify last sg length */
1608 1607 ccb->pkt_dmacookies[i].dmac_size =
1609 1608 ccb->pkt_dmacookies[i].dmac_size - resid;
1610 1609 ccb->resid_dmacookie.dmac_size = resid;
1611 1610 ccb->resid_dmacookie.dmac_laddress =
1612 1611 ccb->pkt_dmacookies[i].dmac_laddress +
1613 1612 ccb->pkt_dmacookies[i].dmac_size;
1614 1613 }
1615 1614 }
1616 1615 ccb->total_dmac_size = total_ccb_xferlen;
1617 1616 ccb->ccb_flags |= CCB_FLAG_DMAVALID;
1618 1617 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1619 1618 return (DDI_SUCCESS);
1620 1619
1621 1620 case DDI_DMA_NORESOURCES:
1622 1621 arcmsr_warn(acb, "dma map got 'no resources'");
1623 1622 bioerror(bp, ENOMEM);
1624 1623 break;
1625 1624
1626 1625 case DDI_DMA_NOMAPPING:
1627 1626 arcmsr_warn(acb, "dma map got 'no mapping'");
1628 1627 bioerror(bp, EFAULT);
1629 1628 break;
1630 1629
1631 1630 case DDI_DMA_TOOBIG:
1632 1631 arcmsr_warn(acb, "dma map got 'too big'");
1633 1632 bioerror(bp, EINVAL);
1634 1633 break;
1635 1634
1636 1635 case DDI_DMA_INUSE:
1637 1636 arcmsr_warn(acb, "dma map got 'in use' "
1638 1637 "(should not happen)");
1639 1638 break;
1640 1639 default:
1641 1640 arcmsr_warn(acb, "dma map failed (0x%x)", i);
1642 1641 break;
1643 1642 }
1644 1643
1645 1644 ddi_dma_free_handle(&ccb->pkt_dma_handle);
1646 1645 ccb->pkt_dma_handle = NULL;
1647 1646 ccb->ccb_flags &= ~CCB_FLAG_DMAVALID;
1648 1647 return (DDI_FAILURE);
1649 1648 }
1650 1649
1651 1650
1652 1651 /*
1653 1652 * Function name: arcmsr_dma_move
1654 1653 * Return Values: 0 if successful, -1 if failure
1655 1654 * Description: move DMA resources to next DMA window
1656 1655 * Context: Can only be called from arcmsr_tran_init_pkt()
1657 1656 */
1658 1657 static int
1659 1658 arcmsr_dma_move(struct ACB *acb, struct scsi_pkt *pkt, struct buf *bp)
1660 1659 {
1661 1660 struct CCB *ccb = pkt->pkt_ha_private;
1662 1661 uint8_t i = 0;
1663 1662 int resid = 0;
1664 1663 int total_ccb_xferlen = 0;
1665 1664
1666 1665 if (ccb->resid_dmacookie.dmac_size != 0) {
1667 1666 total_ccb_xferlen += ccb->resid_dmacookie.dmac_size;
1668 1667 ccb->pkt_dmacookies[i].dmac_size =
1669 1668 ccb->resid_dmacookie.dmac_size;
1670 1669 ccb->pkt_dmacookies[i].dmac_laddress =
1671 1670 ccb->resid_dmacookie.dmac_laddress;
1672 1671 i++;
1673 1672 ccb->resid_dmacookie.dmac_size = 0;
1674 1673 }
1675 1674 /*
1676 1675 * If there are no more cookies remaining in this window,
1677 1676 * move to the next window.
1678 1677 */
1679 1678 if (ccb->pkt_cookie == ccb->pkt_ncookies) {
1680 1679 /*
1681 1680 * only dma map "partial" arrive here
1682 1681 */
1683 1682 if ((ccb->pkt_curwin == ccb->pkt_nwin) &&
1684 1683 (ccb->pkt_nwin == 1)) {
1685 1684 return (DDI_SUCCESS);
1686 1685 }
1687 1686
1688 1687 /* At last window, cannot move */
1689 1688 if (++ccb->pkt_curwin >= ccb->pkt_nwin) {
1690 1689 arcmsr_warn(acb, "dma partial set, numwin exceeded");
1691 1690 return (DDI_FAILURE);
1692 1691 }
1693 1692 if (ddi_dma_getwin(ccb->pkt_dma_handle, ccb->pkt_curwin,
1694 1693 &ccb->pkt_dma_offset, &ccb->pkt_dma_len,
1695 1694 &ccb->pkt_dmacookies[i], &ccb->pkt_ncookies) ==
1696 1695 DDI_FAILURE) {
1697 1696 arcmsr_warn(acb, "ddi_dma_getwin failed");
1698 1697 return (DDI_FAILURE);
1699 1698 }
1700 1699 /* reset cookie pointer */
1701 1700 ccb->pkt_cookie = 0;
1702 1701 } else {
1703 1702 /*
1704 1703 * only dma map "all" arrive here
1705 1704 * We still have more cookies in this window,
1706 1705 * get the next one
1707 1706 * access the pkt_dma_handle remain cookie record at
1708 1707 * ccb->pkt_dmacookies array
1709 1708 */
1710 1709 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1711 1710 &ccb->pkt_dmacookies[i]);
1712 1711 }
1713 1712
1714 1713 /* Get remaining cookies in this window, up to our maximum */
1715 1714 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1716 1715
1717 1716 /* retrieve and store cookies, start at ccb->pkt_dmacookies[0] */
1718 1717 for (;;) {
1719 1718 i++;
1720 1719 /* handled cookies count level indicator */
1721 1720 ccb->pkt_cookie++;
1722 1721 if ((i == ARCMSR_MAX_SG_ENTRIES) ||
1723 1722 (ccb->pkt_cookie == ccb->pkt_ncookies) ||
1724 1723 (total_ccb_xferlen == ARCMSR_MAX_XFER_LEN)) {
1725 1724 break;
1726 1725 }
1727 1726 ddi_dma_nextcookie(ccb->pkt_dma_handle,
1728 1727 &ccb->pkt_dmacookies[i]);
1729 1728 total_ccb_xferlen += ccb->pkt_dmacookies[i].dmac_size;
1730 1729 }
1731 1730
1732 1731 ccb->arcmsr_cdb.sgcount = i;
1733 1732 if (total_ccb_xferlen > 512) {
1734 1733 resid = total_ccb_xferlen % 512;
1735 1734 if (resid != 0) {
1736 1735 i--;
1737 1736 total_ccb_xferlen -= resid;
1738 1737 /* modify last sg length */
1739 1738 ccb->pkt_dmacookies[i].dmac_size =
1740 1739 ccb->pkt_dmacookies[i].dmac_size - resid;
1741 1740 ccb->resid_dmacookie.dmac_size = resid;
1742 1741 ccb->resid_dmacookie.dmac_laddress =
1743 1742 ccb->pkt_dmacookies[i].dmac_laddress +
1744 1743 ccb->pkt_dmacookies[i].dmac_size;
1745 1744 }
1746 1745 }
1747 1746 ccb->total_dmac_size += total_ccb_xferlen;
1748 1747 pkt->pkt_resid = bp->b_bcount - ccb->total_dmac_size;
1749 1748
1750 1749 return (DDI_SUCCESS);
1751 1750 }
1752 1751
1753 1752
1754 1753 /*ARGSUSED*/
1755 1754 static void
1756 1755 arcmsr_build_ccb(struct CCB *ccb)
1757 1756 {
1758 1757 struct scsi_pkt *pkt = ccb->pkt;
1759 1758 struct ARCMSR_CDB *arcmsr_cdb;
1760 1759 char *psge;
1761 1760 uint32_t address_lo, address_hi;
1762 1761 int arccdbsize = 0x30;
1763 1762 uint8_t sgcount;
1764 1763
1765 1764 arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
1766 1765 psge = (char *)&arcmsr_cdb->sgu;
1767 1766
1768 1767 bcopy((caddr_t)pkt->pkt_cdbp, arcmsr_cdb->Cdb, arcmsr_cdb->CdbLength);
1769 1768 sgcount = ccb->arcmsr_cdb.sgcount;
1770 1769
1771 1770 if (sgcount != 0) {
1772 1771 int length, i;
1773 1772 int cdb_sgcount = 0;
1774 1773 int total_xfer_length = 0;
1775 1774
1776 1775 /* map stor port SG list to our iop SG List. */
1777 1776 for (i = 0; i < sgcount; i++) {
1778 1777 /* Get physaddr of the current data pointer */
1779 1778 length = ccb->pkt_dmacookies[i].dmac_size;
1780 1779 total_xfer_length += length;
1781 1780 address_lo =
1782 1781 dma_addr_lo32(ccb->pkt_dmacookies[i].dmac_laddress);
1783 1782 address_hi =
1784 1783 dma_addr_hi32(ccb->pkt_dmacookies[i].dmac_laddress);
1785 1784
1786 1785 if (address_hi == 0) {
1787 1786 struct SG32ENTRY *dma_sg;
1788 1787
1789 1788 dma_sg = (struct SG32ENTRY *)(intptr_t)psge;
1790 1789 dma_sg->address = address_lo;
1791 1790 dma_sg->length = length;
1792 1791 psge += sizeof (struct SG32ENTRY);
1793 1792 arccdbsize += sizeof (struct SG32ENTRY);
1794 1793 } else {
1795 1794 struct SG64ENTRY *dma_sg;
1796 1795
1797 1796 dma_sg = (struct SG64ENTRY *)(intptr_t)psge;
1798 1797 dma_sg->addresshigh = address_hi;
1799 1798 dma_sg->address = address_lo;
1800 1799 dma_sg->length = length | IS_SG64_ADDR;
1801 1800 psge += sizeof (struct SG64ENTRY);
1802 1801 arccdbsize += sizeof (struct SG64ENTRY);
1803 1802 }
1804 1803 cdb_sgcount++;
1805 1804 }
1806 1805 arcmsr_cdb->sgcount = (uint8_t)cdb_sgcount;
1807 1806 arcmsr_cdb->DataLength = total_xfer_length;
1808 1807 if (arccdbsize > 256) {
1809 1808 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE;
1810 1809 }
1811 1810 } else {
1812 1811 arcmsr_cdb->DataLength = 0;
1813 1812 }
1814 1813
1815 1814 if (ccb->ccb_flags & CCB_FLAG_DMAWRITE)
1816 1815 arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE;
1817 1816 ccb->arc_cdb_size = arccdbsize;
1818 1817 }
1819 1818
1820 1819 /*
1821 1820 * arcmsr_post_ccb - Send a protocol specific ARC send postcard to a AIOC.
1822 1821 *
1823 1822 * handle: Handle of registered ARC protocol driver
1824 1823 * adapter_id: AIOC unique identifier(integer)
1825 1824 * pPOSTCARD_SEND: Pointer to ARC send postcard
1826 1825 *
1827 1826 * This routine posts a ARC send postcard to the request post FIFO of a
1828 1827 * specific ARC adapter.
1829 1828 */
1830 1829 static int
1831 1830 arcmsr_post_ccb(struct ACB *acb, struct CCB *ccb)
1832 1831 {
1833 1832 uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
1834 1833 struct scsi_pkt *pkt = ccb->pkt;
1835 1834 struct ARCMSR_CDB *arcmsr_cdb;
1836 1835 uint_t pkt_flags = pkt->pkt_flags;
1837 1836
1838 1837 arcmsr_cdb = &ccb->arcmsr_cdb;
1839 1838
1840 1839 /* TODO: Use correct offset and size for syncing? */
1841 1840 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0, DDI_DMA_SYNC_FORDEV) ==
1842 1841 DDI_FAILURE)
1843 1842 return (DDI_FAILURE);
1844 1843
1845 1844 atomic_inc_32(&acb->ccboutstandingcount);
1846 1845 ccb->ccb_time = (time_t)(ddi_get_time() + pkt->pkt_time);
1847 1846
1848 1847 ccb->ccb_state = ARCMSR_CCB_START;
1849 1848 switch (acb->adapter_type) {
1850 1849 case ACB_ADAPTER_TYPE_A:
1851 1850 {
1852 1851 struct HBA_msgUnit *phbamu;
1853 1852
1854 1853 phbamu = (struct HBA_msgUnit *)acb->pmu;
1855 1854 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1856 1855 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1857 1856 &phbamu->inbound_queueport,
1858 1857 cdb_phyaddr_pattern |
1859 1858 ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1860 1859 } else {
1861 1860 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1862 1861 &phbamu->inbound_queueport, cdb_phyaddr_pattern);
1863 1862 }
1864 1863 if (pkt_flags & FLAG_NOINTR)
1865 1864 arcmsr_polling_hba_ccbdone(acb, ccb);
1866 1865 break;
1867 1866 }
1868 1867
1869 1868 case ACB_ADAPTER_TYPE_B:
1870 1869 {
1871 1870 struct HBB_msgUnit *phbbmu;
1872 1871 int ending_index, index;
1873 1872
1874 1873 phbbmu = (struct HBB_msgUnit *)acb->pmu;
1875 1874 index = phbbmu->postq_index;
1876 1875 ending_index = ((index+1)%ARCMSR_MAX_HBB_POSTQUEUE);
1877 1876 phbbmu->post_qbuffer[ending_index] = 0;
1878 1877 if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
1879 1878 phbbmu->post_qbuffer[index] =
1880 1879 (cdb_phyaddr_pattern|ARCMSR_CCBPOST_FLAG_SGL_BSIZE);
1881 1880 } else {
1882 1881 phbbmu->post_qbuffer[index] = cdb_phyaddr_pattern;
1883 1882 }
1884 1883 index++;
1885 1884 /* if last index number set it to 0 */
1886 1885 index %= ARCMSR_MAX_HBB_POSTQUEUE;
1887 1886 phbbmu->postq_index = index;
1888 1887 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1889 1888 &phbbmu->hbb_doorbell->drv2iop_doorbell,
1890 1889 ARCMSR_DRV2IOP_CDB_POSTED);
1891 1890
1892 1891 if (pkt_flags & FLAG_NOINTR)
1893 1892 arcmsr_polling_hbb_ccbdone(acb, ccb);
1894 1893 break;
1895 1894 }
1896 1895
1897 1896 case ACB_ADAPTER_TYPE_C:
1898 1897 {
1899 1898 struct HBC_msgUnit *phbcmu;
1900 1899 uint32_t ccb_post_stamp, arc_cdb_size;
1901 1900
1902 1901 phbcmu = (struct HBC_msgUnit *)acb->pmu;
1903 1902 arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 :
1904 1903 ccb->arc_cdb_size;
1905 1904 ccb_post_stamp = (cdb_phyaddr_pattern |
1906 1905 ((arc_cdb_size-1) >> 6) |1);
1907 1906 if (acb->cdb_phyaddr_hi32) {
1908 1907 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1909 1908 &phbcmu->inbound_queueport_high,
1910 1909 acb->cdb_phyaddr_hi32);
1911 1910 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1912 1911 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1913 1912 } else {
1914 1913 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
1915 1914 &phbcmu->inbound_queueport_low, ccb_post_stamp);
1916 1915 }
1917 1916 if (pkt_flags & FLAG_NOINTR)
1918 1917 arcmsr_polling_hbc_ccbdone(acb, ccb);
1919 1918 break;
1920 1919 }
1921 1920
1922 1921 }
1923 1922 return (DDI_SUCCESS);
1924 1923 }
1925 1924
1926 1925
1927 1926 static void
1928 1927 arcmsr_ccb_complete(struct CCB *ccb, int flag)
1929 1928 {
1930 1929 struct ACB *acb = ccb->acb;
1931 1930 struct scsi_pkt *pkt = ccb->pkt;
1932 1931
1933 1932 if (pkt == NULL) {
1934 1933 return;
1935 1934 }
1936 1935 ccb->ccb_state |= ARCMSR_CCB_DONE;
1937 1936 pkt->pkt_state |= (STATE_GOT_BUS | STATE_GOT_TARGET |
1938 1937 STATE_SENT_CMD | STATE_GOT_STATUS);
1939 1938
1940 1939 if ((ccb->ccb_flags & CCB_FLAG_DMACONSISTENT) &&
1941 1940 (pkt->pkt_state & STATE_XFERRED_DATA)) {
1942 1941 (void) ddi_dma_sync(ccb->pkt_dma_handle, 0, 0,
1943 1942 DDI_DMA_SYNC_FORCPU);
1944 1943 }
1945 1944 /*
1946 1945 * TODO: This represents a potential race condition, and is
1947 1946 * ultimately a poor design decision. Revisit this code
1948 1947 * and solve the mutex ownership issue correctly.
1949 1948 */
1950 1949 if (mutex_owned(&acb->isr_mutex)) {
1951 1950 mutex_exit(&acb->isr_mutex);
1952 1951 scsi_hba_pkt_comp(pkt);
1953 1952 mutex_enter(&acb->isr_mutex);
1954 1953 } else {
1955 1954 scsi_hba_pkt_comp(pkt);
1956 1955 }
1957 1956 if (flag == 1) {
1958 1957 atomic_dec_32(&acb->ccboutstandingcount);
1959 1958 }
1960 1959 }
1961 1960
1962 1961 static void
1963 1962 arcmsr_report_ccb_state(struct ACB *acb, struct CCB *ccb, boolean_t error)
1964 1963 {
1965 1964 int id, lun;
1966 1965
1967 1966 ccb->ccb_state |= ARCMSR_CCB_DONE;
1968 1967 id = ccb->pkt->pkt_address.a_target;
1969 1968 lun = ccb->pkt->pkt_address.a_lun;
1970 1969
1971 1970 if (!error) {
1972 1971 if (acb->devstate[id][lun] == ARECA_RAID_GONE) {
1973 1972 acb->devstate[id][lun] = ARECA_RAID_GOOD;
1974 1973 }
1975 1974 ccb->pkt->pkt_reason = CMD_CMPLT;
1976 1975 ccb->pkt->pkt_state |= STATE_XFERRED_DATA;
1977 1976 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1978 1977 &ccb->complete_queue_pointer, &acb->ccb_complete_list);
1979 1978
1980 1979 } else {
1981 1980 switch (ccb->arcmsr_cdb.DeviceStatus) {
1982 1981 case ARCMSR_DEV_SELECT_TIMEOUT:
1983 1982 if (acb->devstate[id][lun] == ARECA_RAID_GOOD) {
1984 1983 arcmsr_warn(acb,
1985 1984 "target %d lun %d selection "
1986 1985 "timeout", id, lun);
1987 1986 }
1988 1987 acb->devstate[id][lun] = ARECA_RAID_GONE;
1989 1988 ccb->pkt->pkt_reason = CMD_TIMEOUT; /* CMD_DEV_GONE; */
1990 1989 ccb->pkt->pkt_statistics |= STAT_TIMEOUT;
1991 1990 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
1992 1991 &ccb->complete_queue_pointer,
1993 1992 &acb->ccb_complete_list);
1994 1993 break;
1995 1994 case ARCMSR_DEV_ABORTED:
1996 1995 case ARCMSR_DEV_INIT_FAIL:
1997 1996 arcmsr_warn(acb, "isr got 'ARCMSR_DEV_ABORTED'"
1998 1997 " 'ARCMSR_DEV_INIT_FAIL'");
1999 1998 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2000 1999 acb->devstate[id][lun] = ARECA_RAID_GONE;
2001 2000 ccb->pkt->pkt_reason = CMD_DEV_GONE;
2002 2001 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2003 2002 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2004 2003 &ccb->complete_queue_pointer,
2005 2004 &acb->ccb_complete_list);
2006 2005 break;
2007 2006 case SCSISTAT_CHECK_CONDITION:
2008 2007 acb->devstate[id][lun] = ARECA_RAID_GOOD;
2009 2008 arcmsr_report_sense_info(ccb);
2010 2009 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2011 2010 &ccb->complete_queue_pointer,
2012 2011 &acb->ccb_complete_list);
2013 2012 break;
2014 2013 default:
2015 2014 arcmsr_warn(acb,
2016 2015 "target %d lun %d isr received CMD_DONE"
2017 2016 " with unknown DeviceStatus (0x%x)",
2018 2017 id, lun, ccb->arcmsr_cdb.DeviceStatus);
2019 2018 arcmsr_log(acb, CE_NOTE, "raid volume was kicked out");
2020 2019 acb->devstate[id][lun] = ARECA_RAID_GONE;
2021 2020 /* unknown error or crc error just for retry */
2022 2021 ccb->pkt->pkt_reason = CMD_TRAN_ERR;
2023 2022 ccb->pkt->pkt_statistics |= STAT_TERMINATED;
2024 2023 arcmsr_list_add_tail(&acb->ccb_complete_list_mutex,
2025 2024 &ccb->complete_queue_pointer,
2026 2025 &acb->ccb_complete_list);
2027 2026 break;
2028 2027 }
2029 2028 }
2030 2029 }
2031 2030
2032 2031
2033 2032 static void
2034 2033 arcmsr_drain_donequeue(struct ACB *acb, struct CCB *ccb, boolean_t error)
2035 2034 {
2036 2035 uint16_t ccb_state;
2037 2036
2038 2037 if (ccb->acb != acb) {
2039 2038 return;
2040 2039 }
2041 2040 if (ccb->ccb_state != ARCMSR_CCB_START) {
2042 2041 switch (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
2043 2042 case ARCMSR_CCB_TIMEOUT:
2044 2043 ccb_state = ccb->ccb_state;
2045 2044 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2046 2045 arcmsr_free_ccb(ccb);
2047 2046 else
2048 2047 ccb->ccb_state |= ARCMSR_CCB_BACK;
2049 2048 return;
2050 2049
2051 2050 case ARCMSR_CCB_ABORTED:
2052 2051 ccb_state = ccb->ccb_state;
2053 2052 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2054 2053 arcmsr_free_ccb(ccb);
2055 2054 else
2056 2055 ccb->ccb_state |= ARCMSR_CCB_BACK;
2057 2056 return;
2058 2057 case ARCMSR_CCB_RESET:
2059 2058 ccb_state = ccb->ccb_state;
2060 2059 if (ccb_state & ARCMSR_CCB_WAIT4_FREE)
2061 2060 arcmsr_free_ccb(ccb);
2062 2061 else
2063 2062 ccb->ccb_state |= ARCMSR_CCB_BACK;
2064 2063 return;
2065 2064 default:
2066 2065 return;
2067 2066 }
2068 2067 }
2069 2068 arcmsr_report_ccb_state(acb, ccb, error);
2070 2069 }
2071 2070
2072 2071 static void
2073 2072 arcmsr_report_sense_info(struct CCB *ccb)
2074 2073 {
2075 2074 struct SENSE_DATA *cdb_sensedata;
2076 2075 struct scsi_pkt *pkt = ccb->pkt;
2077 2076 struct scsi_arq_status *arq_status;
2078 2077 union scsi_cdb *cdbp;
2079 2078 uint64_t err_blkno;
2080 2079
2081 2080 cdbp = (void *)pkt->pkt_cdbp;
2082 2081 err_blkno = ARCMSR_GETGXADDR(ccb->arcmsr_cdb.CdbLength, cdbp);
2083 2082
2084 2083 arq_status = (struct scsi_arq_status *)(intptr_t)(pkt->pkt_scbp);
2085 2084 bzero((caddr_t)arq_status, sizeof (struct scsi_arq_status));
2086 2085 *pkt->pkt_scbp = STATUS_CHECK; /* CHECK CONDITION */
2087 2086 arq_status->sts_rqpkt_reason = CMD_CMPLT;
2088 2087 arq_status->sts_rqpkt_state = (STATE_GOT_BUS | STATE_GOT_TARGET |
2089 2088 STATE_SENT_CMD | STATE_XFERRED_DATA | STATE_GOT_STATUS);
2090 2089 arq_status->sts_rqpkt_statistics = 0;
2091 2090 arq_status->sts_rqpkt_resid = 0;
2092 2091
2093 2092 pkt->pkt_reason = CMD_CMPLT;
2094 2093 /* auto rqsense took place */
2095 2094 pkt->pkt_state |= STATE_ARQ_DONE;
2096 2095
2097 2096 cdb_sensedata = (struct SENSE_DATA *)ccb->arcmsr_cdb.SenseData;
2098 2097 if (&arq_status->sts_sensedata != NULL) {
2099 2098 if (err_blkno <= 0xfffffffful) {
2100 2099 struct scsi_extended_sense *sts_sensedata;
2101 2100
2102 2101 sts_sensedata = &arq_status->sts_sensedata;
2103 2102 sts_sensedata->es_code = cdb_sensedata->ErrorCode;
2104 2103 /* must eq CLASS_EXTENDED_SENSE (0x07) */
2105 2104 sts_sensedata->es_class = cdb_sensedata->ErrorClass;
2106 2105 sts_sensedata->es_valid = cdb_sensedata->Valid;
2107 2106 sts_sensedata->es_segnum = cdb_sensedata->SegmentNumber;
2108 2107 sts_sensedata->es_key = cdb_sensedata->SenseKey;
2109 2108 sts_sensedata->es_ili = cdb_sensedata->IncorrectLength;
2110 2109 sts_sensedata->es_eom = cdb_sensedata->EndOfMedia;
2111 2110 sts_sensedata->es_filmk = cdb_sensedata->FileMark;
2112 2111 sts_sensedata->es_info_1 = (err_blkno >> 24) & 0xFF;
2113 2112 sts_sensedata->es_info_2 = (err_blkno >> 16) & 0xFF;
2114 2113 sts_sensedata->es_info_3 = (err_blkno >> 8) & 0xFF;
2115 2114 sts_sensedata->es_info_4 = err_blkno & 0xFF;
2116 2115 sts_sensedata->es_add_len =
2117 2116 cdb_sensedata->AdditionalSenseLength;
2118 2117 sts_sensedata->es_cmd_info[0] =
2119 2118 cdb_sensedata->CommandSpecificInformation[0];
2120 2119 sts_sensedata->es_cmd_info[1] =
2121 2120 cdb_sensedata->CommandSpecificInformation[1];
2122 2121 sts_sensedata->es_cmd_info[2] =
2123 2122 cdb_sensedata->CommandSpecificInformation[2];
2124 2123 sts_sensedata->es_cmd_info[3] =
2125 2124 cdb_sensedata->CommandSpecificInformation[3];
2126 2125 sts_sensedata->es_add_code =
2127 2126 cdb_sensedata->AdditionalSenseCode;
2128 2127 sts_sensedata->es_qual_code =
2129 2128 cdb_sensedata->AdditionalSenseCodeQualifier;
2130 2129 sts_sensedata->es_fru_code =
2131 2130 cdb_sensedata->FieldReplaceableUnitCode;
2132 2131 } else { /* 64-bit LBA */
2133 2132 struct scsi_descr_sense_hdr *dsp;
2134 2133 struct scsi_information_sense_descr *isd;
2135 2134
2136 2135 dsp = (struct scsi_descr_sense_hdr *)
2137 2136 &arq_status->sts_sensedata;
2138 2137 dsp->ds_class = CLASS_EXTENDED_SENSE;
2139 2138 dsp->ds_code = CODE_FMT_DESCR_CURRENT;
2140 2139 dsp->ds_key = cdb_sensedata->SenseKey;
2141 2140 dsp->ds_add_code = cdb_sensedata->AdditionalSenseCode;
2142 2141 dsp->ds_qual_code =
2143 2142 cdb_sensedata->AdditionalSenseCodeQualifier;
2144 2143 dsp->ds_addl_sense_length =
2145 2144 sizeof (struct scsi_information_sense_descr);
2146 2145
2147 2146 isd = (struct scsi_information_sense_descr *)(dsp+1);
2148 2147 isd->isd_descr_type = DESCR_INFORMATION;
2149 2148 isd->isd_valid = 1;
2150 2149 isd->isd_information[0] = (err_blkno >> 56) & 0xFF;
2151 2150 isd->isd_information[1] = (err_blkno >> 48) & 0xFF;
2152 2151 isd->isd_information[2] = (err_blkno >> 40) & 0xFF;
2153 2152 isd->isd_information[3] = (err_blkno >> 32) & 0xFF;
2154 2153 isd->isd_information[4] = (err_blkno >> 24) & 0xFF;
2155 2154 isd->isd_information[5] = (err_blkno >> 16) & 0xFF;
2156 2155 isd->isd_information[6] = (err_blkno >> 8) & 0xFF;
2157 2156 isd->isd_information[7] = (err_blkno) & 0xFF;
2158 2157 }
2159 2158 }
2160 2159 }
2161 2160
2162 2161
2163 2162 static int
2164 2163 arcmsr_seek_cmd2abort(struct ACB *acb, struct scsi_pkt *abortpkt)
2165 2164 {
2166 2165 struct CCB *ccb;
2167 2166 uint32_t intmask_org = 0;
2168 2167 int i = 0;
2169 2168
2170 2169 acb->num_aborts++;
2171 2170
2172 2171 if (abortpkt != NULL) {
2173 2172 /*
2174 2173 * We don't support abort of a single packet. All
2175 2174 * callers in our kernel always do a global abort, so
2176 2175 * there is no point in having code to support it
2177 2176 * here.
2178 2177 */
2179 2178 return (DDI_FAILURE);
2180 2179 }
2181 2180
2182 2181 /*
2183 2182 * if abortpkt is NULL, the upper layer needs us
2184 2183 * to abort all commands
2185 2184 */
2186 2185 if (acb->ccboutstandingcount != 0) {
2187 2186 /* disable all outbound interrupt */
2188 2187 intmask_org = arcmsr_disable_allintr(acb);
2189 2188 /* clear and abort all outbound posted Q */
2190 2189 arcmsr_done4abort_postqueue(acb);
2191 2190 /* talk to iop 331 outstanding command aborted */
2192 2191 (void) arcmsr_abort_host_command(acb);
2193 2192
2194 2193 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2195 2194 ccb = acb->pccb_pool[i];
2196 2195 if (ccb->ccb_state == ARCMSR_CCB_START) {
2197 2196 /*
2198 2197 * this ccb will complete at
2199 2198 * hwinterrupt
2200 2199 */
2201 2200 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
2202 2201 ccb->pkt->pkt_reason = CMD_ABORTED;
2203 2202 ccb->pkt->pkt_statistics |= STAT_ABORTED;
2204 2203 arcmsr_ccb_complete(ccb, 1);
2205 2204 }
2206 2205 }
2207 2206 /*
2208 2207 * enable outbound Post Queue, outbound
2209 2208 * doorbell Interrupt
2210 2209 */
2211 2210 arcmsr_enable_allintr(acb, intmask_org);
2212 2211 }
2213 2212 return (DDI_SUCCESS);
2214 2213 }
2215 2214
2216 2215
2217 2216 /*
2218 2217 * Autoconfiguration support
2219 2218 */
2220 2219 static int
2221 2220 arcmsr_parse_devname(char *devnm, int *tgt, int *lun) {
2222 2221
2223 2222 char devbuf[SCSI_MAXNAMELEN];
2224 2223 char *addr;
2225 2224 char *p, *tp, *lp;
2226 2225 long num;
2227 2226
2228 2227 /* Parse dev name and address */
2229 2228 (void) strlcpy(devbuf, devnm, sizeof (devbuf));
2230 2229 addr = "";
2231 2230 for (p = devbuf; *p != '\0'; p++) {
2232 2231 if (*p == '@') {
2233 2232 addr = p + 1;
2234 2233 *p = '\0';
2235 2234 } else if (*p == ':') {
2236 2235 *p = '\0';
2237 2236 break;
2238 2237 }
2239 2238 }
2240 2239
2241 2240 /* Parse target and lun */
2242 2241 for (p = tp = addr, lp = NULL; *p != '\0'; p++) {
2243 2242 if (*p == ',') {
2244 2243 lp = p + 1;
2245 2244 *p = '\0';
2246 2245 break;
2247 2246 }
2248 2247 }
2249 2248 if ((tgt != NULL) && (tp != NULL)) {
2250 2249 if (ddi_strtol(tp, NULL, 0x10, &num) != 0)
2251 2250 return (-1);
2252 2251 *tgt = (int)num;
2253 2252 }
2254 2253 if ((lun != NULL) && (lp != NULL)) {
2255 2254 if (ddi_strtol(lp, NULL, 0x10, &num) != 0)
2256 2255 return (-1);
2257 2256 *lun = (int)num;
2258 2257 }
2259 2258 return (0);
2260 2259 }
2261 2260
2262 2261 static int
2263 2262 arcmsr_name_node(dev_info_t *dip, char *name, int len)
2264 2263 {
2265 2264 int tgt, lun;
2266 2265
2267 2266 tgt = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "target",
2268 2267 -1);
2269 2268 if (tgt == -1)
2270 2269 return (DDI_FAILURE);
2271 2270 lun = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS, "lun",
2272 2271 -1);
2273 2272 if (lun == -1)
2274 2273 return (DDI_FAILURE);
2275 2274 (void) snprintf(name, len, "%x,%x", tgt, lun);
2276 2275 return (DDI_SUCCESS);
2277 2276 }
2278 2277
2279 2278 static dev_info_t *
2280 2279 arcmsr_find_child(struct ACB *acb, uint16_t tgt, uint8_t lun)
2281 2280 {
2282 2281 dev_info_t *child = NULL;
2283 2282 char addr[SCSI_MAXNAMELEN];
2284 2283 char tmp[SCSI_MAXNAMELEN];
2285 2284
2286 2285 (void) sprintf(addr, "%x,%x", tgt, lun);
2287 2286
2288 2287 for (child = ddi_get_child(acb->dev_info);
2289 2288 child;
2290 2289 child = ddi_get_next_sibling(child)) {
2291 2290 /* We don't care about non-persistent node */
2292 2291 if (ndi_dev_is_persistent_node(child) == 0)
2293 2292 continue;
2294 2293 if (arcmsr_name_node(child, tmp, SCSI_MAXNAMELEN) !=
2295 2294 DDI_SUCCESS)
2296 2295 continue;
2297 2296 if (strcmp(addr, tmp) == 0)
2298 2297 break;
2299 2298 }
2300 2299 return (child);
2301 2300 }
2302 2301
2303 2302 static int
2304 2303 arcmsr_config_child(struct ACB *acb, struct scsi_device *sd, dev_info_t **dipp)
2305 2304 {
2306 2305 char *nodename = NULL;
2307 2306 char **compatible = NULL;
2308 2307 int ncompatible = 0;
2309 2308 dev_info_t *ldip = NULL;
2310 2309 int tgt = sd->sd_address.a_target;
2311 2310 int lun = sd->sd_address.a_lun;
2312 2311 int dtype = sd->sd_inq->inq_dtype & DTYPE_MASK;
2313 2312 int rval;
2314 2313
2315 2314 scsi_hba_nodename_compatible_get(sd->sd_inq, NULL, dtype,
2316 2315 NULL, &nodename, &compatible, &ncompatible);
2317 2316 if (nodename == NULL) {
2318 2317 arcmsr_warn(acb, "found no comptible driver for T%dL%d",
2319 2318 tgt, lun);
2320 2319 rval = NDI_FAILURE;
2321 2320 goto finish;
2322 2321 }
2323 2322 /* Create dev node */
2324 2323 rval = ndi_devi_alloc(acb->dev_info, nodename, DEVI_SID_NODEID, &ldip);
2325 2324 if (rval == NDI_SUCCESS) {
2326 2325 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "target", tgt) !=
2327 2326 DDI_PROP_SUCCESS) {
2328 2327 arcmsr_warn(acb,
2329 2328 "unable to create target property for T%dL%d",
2330 2329 tgt, lun);
2331 2330 rval = NDI_FAILURE;
2332 2331 goto finish;
2333 2332 }
2334 2333 if (ndi_prop_update_int(DDI_DEV_T_NONE, ldip, "lun", lun) !=
2335 2334 DDI_PROP_SUCCESS) {
2336 2335 arcmsr_warn(acb,
2337 2336 "unable to create lun property for T%dL%d",
2338 2337 tgt, lun);
2339 2338 rval = NDI_FAILURE;
2340 2339 goto finish;
2341 2340 }
2342 2341 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, ldip,
2343 2342 "compatible", compatible, ncompatible) !=
2344 2343 DDI_PROP_SUCCESS) {
2345 2344 arcmsr_warn(acb,
2346 2345 "unable to create compatible property for T%dL%d",
2347 2346 tgt, lun);
2348 2347 rval = NDI_FAILURE;
2349 2348 goto finish;
2350 2349 }
2351 2350 rval = ndi_devi_online(ldip, NDI_ONLINE_ATTACH);
2352 2351 if (rval != NDI_SUCCESS) {
2353 2352 arcmsr_warn(acb, "unable to online T%dL%d", tgt, lun);
2354 2353 ndi_prop_remove_all(ldip);
2355 2354 (void) ndi_devi_free(ldip);
2356 2355 } else {
2357 2356 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined", tgt, lun);
2358 2357 }
2359 2358 }
2360 2359 finish:
2361 2360 if (dipp)
2362 2361 *dipp = ldip;
2363 2362
2364 2363 scsi_hba_nodename_compatible_free(nodename, compatible);
2365 2364 return (rval);
2366 2365 }
2367 2366
2368 2367 static int
2369 2368 arcmsr_config_lun(struct ACB *acb, uint16_t tgt, uint8_t lun, dev_info_t **ldip)
2370 2369 {
2371 2370 struct scsi_device sd;
2372 2371 dev_info_t *child;
2373 2372 int rval;
2374 2373
2375 2374 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
2376 2375 if (ldip) {
2377 2376 *ldip = child;
2378 2377 }
2379 2378 return (NDI_SUCCESS);
2380 2379 }
2381 2380 bzero(&sd, sizeof (struct scsi_device));
2382 2381 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
2383 2382 sd.sd_address.a_target = tgt;
2384 2383 sd.sd_address.a_lun = lun;
2385 2384
2386 2385 rval = scsi_hba_probe(&sd, NULL);
2387 2386 if (rval == SCSIPROBE_EXISTS)
2388 2387 rval = arcmsr_config_child(acb, &sd, ldip);
2389 2388 scsi_unprobe(&sd);
2390 2389 return (rval);
2391 2390 }
2392 2391
2393 2392
2394 2393 static int
2395 2394 arcmsr_add_intr(struct ACB *acb, int intr_type)
2396 2395 {
2397 2396 int rc, count;
2398 2397 dev_info_t *dev_info;
2399 2398 const char *type_str;
2400 2399
2401 2400 switch (intr_type) {
2402 2401 case DDI_INTR_TYPE_MSI:
2403 2402 type_str = "MSI";
2404 2403 break;
2405 2404 case DDI_INTR_TYPE_MSIX:
2406 2405 type_str = "MSIX";
2407 2406 break;
2408 2407 case DDI_INTR_TYPE_FIXED:
2409 2408 type_str = "FIXED";
2410 2409 break;
2411 2410 default:
2412 2411 type_str = "unknown";
2413 2412 break;
2414 2413 }
2415 2414
2416 2415 dev_info = acb->dev_info;
2417 2416 /* Determine number of supported interrupts */
2418 2417 rc = ddi_intr_get_nintrs(dev_info, intr_type, &count);
2419 2418 if ((rc != DDI_SUCCESS) || (count == 0)) {
2420 2419 arcmsr_warn(acb,
2421 2420 "no interrupts of type %s, rc=0x%x, count=%d",
2422 2421 type_str, rc, count);
2423 2422 return (DDI_FAILURE);
2424 2423 }
2425 2424 acb->intr_size = sizeof (ddi_intr_handle_t) * count;
2426 2425 acb->phandle = kmem_zalloc(acb->intr_size, KM_SLEEP);
2427 2426 rc = ddi_intr_alloc(dev_info, acb->phandle, intr_type, 0,
2428 2427 count, &acb->intr_count, DDI_INTR_ALLOC_NORMAL);
2429 2428 if ((rc != DDI_SUCCESS) || (acb->intr_count == 0)) {
2430 2429 arcmsr_warn(acb, "ddi_intr_alloc(%s) failed 0x%x",
2431 2430 type_str, rc);
2432 2431 return (DDI_FAILURE);
2433 2432 }
2434 2433 if (acb->intr_count < count) {
2435 2434 arcmsr_log(acb, CE_NOTE, "Got %d interrupts, but requested %d",
2436 2435 acb->intr_count, count);
2437 2436 }
2438 2437 /*
2439 2438 * Get priority for first msi, assume remaining are all the same
2440 2439 */
2441 2440 if (ddi_intr_get_pri(acb->phandle[0], &acb->intr_pri) != DDI_SUCCESS) {
2442 2441 arcmsr_warn(acb, "ddi_intr_get_pri failed");
2443 2442 return (DDI_FAILURE);
2444 2443 }
2445 2444 if (acb->intr_pri >= ddi_intr_get_hilevel_pri()) {
2446 2445 arcmsr_warn(acb, "high level interrupt not supported");
2447 2446 return (DDI_FAILURE);
2448 2447 }
2449 2448
2450 2449 for (int x = 0; x < acb->intr_count; x++) {
2451 2450 if (ddi_intr_add_handler(acb->phandle[x], arcmsr_intr_handler,
2452 2451 (caddr_t)acb, NULL) != DDI_SUCCESS) {
2453 2452 arcmsr_warn(acb, "ddi_intr_add_handler(%s) failed",
2454 2453 type_str);
2455 2454 return (DDI_FAILURE);
2456 2455 }
2457 2456 }
2458 2457 (void) ddi_intr_get_cap(acb->phandle[0], &acb->intr_cap);
2459 2458 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2460 2459 /* Call ddi_intr_block_enable() for MSI */
2461 2460 (void) ddi_intr_block_enable(acb->phandle, acb->intr_count);
2462 2461 } else {
2463 2462 /* Call ddi_intr_enable() for MSI non block enable */
2464 2463 for (int x = 0; x < acb->intr_count; x++) {
2465 2464 (void) ddi_intr_enable(acb->phandle[x]);
2466 2465 }
2467 2466 }
2468 2467 return (DDI_SUCCESS);
2469 2468 }
2470 2469
2471 2470 static void
2472 2471 arcmsr_remove_intr(struct ACB *acb)
2473 2472 {
2474 2473 int x;
2475 2474
2476 2475 if (acb->phandle == NULL)
2477 2476 return;
2478 2477
2479 2478 /* Disable all interrupts */
2480 2479 if (acb->intr_cap & DDI_INTR_FLAG_BLOCK) {
2481 2480 /* Call ddi_intr_block_disable() */
2482 2481 (void) ddi_intr_block_disable(acb->phandle, acb->intr_count);
2483 2482 } else {
2484 2483 for (x = 0; x < acb->intr_count; x++) {
2485 2484 (void) ddi_intr_disable(acb->phandle[x]);
2486 2485 }
2487 2486 }
2488 2487 /* Call ddi_intr_remove_handler() */
2489 2488 for (x = 0; x < acb->intr_count; x++) {
2490 2489 (void) ddi_intr_remove_handler(acb->phandle[x]);
2491 2490 (void) ddi_intr_free(acb->phandle[x]);
2492 2491 }
2493 2492 kmem_free(acb->phandle, acb->intr_size);
2494 2493 acb->phandle = NULL;
2495 2494 }
2496 2495
2497 2496 static void
2498 2497 arcmsr_mutex_init(struct ACB *acb)
2499 2498 {
2500 2499 mutex_init(&acb->isr_mutex, NULL, MUTEX_DRIVER, NULL);
2501 2500 mutex_init(&acb->acb_mutex, NULL, MUTEX_DRIVER, NULL);
2502 2501 mutex_init(&acb->postq_mutex, NULL, MUTEX_DRIVER, NULL);
2503 2502 mutex_init(&acb->workingQ_mutex, NULL, MUTEX_DRIVER, NULL);
2504 2503 mutex_init(&acb->ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
2505 2504 }
2506 2505
2507 2506 static void
2508 2507 arcmsr_mutex_destroy(struct ACB *acb)
2509 2508 {
2510 2509 mutex_destroy(&acb->isr_mutex);
2511 2510 mutex_destroy(&acb->acb_mutex);
2512 2511 mutex_destroy(&acb->postq_mutex);
2513 2512 mutex_destroy(&acb->workingQ_mutex);
2514 2513 mutex_destroy(&acb->ioctl_mutex);
2515 2514 }
2516 2515
2517 2516 static int
2518 2517 arcmsr_initialize(struct ACB *acb)
2519 2518 {
2520 2519 struct CCB *pccb_tmp;
2521 2520 size_t allocated_length;
2522 2521 uint16_t wval;
2523 2522 uint_t intmask_org, count;
2524 2523 caddr_t arcmsr_ccbs_area;
2525 2524 uint32_t wlval, cdb_phyaddr, offset, realccb_size;
2526 2525 int32_t dma_sync_size;
2527 2526 int i, id, lun, instance;
2528 2527
2529 2528 instance = ddi_get_instance(acb->dev_info);
2530 2529 wlval = pci_config_get32(acb->pci_acc_handle, 0);
2531 2530 wval = (uint16_t)((wlval >> 16) & 0xffff);
2532 2531 realccb_size = P2ROUNDUP(sizeof (struct CCB), 32);
2533 2532 switch (wval) {
2534 2533 case PCI_DEVICE_ID_ARECA_1880:
2535 2534 case PCI_DEVICE_ID_ARECA_1882:
2536 2535 {
2537 2536 uint32_t *iop_mu_regs_map0;
2538 2537
2539 2538 acb->adapter_type = ACB_ADAPTER_TYPE_C; /* lsi */
2540 2539 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2541 2540 if (ddi_regs_map_setup(acb->dev_info, 2,
2542 2541 (caddr_t *)&iop_mu_regs_map0, 0,
2543 2542 sizeof (struct HBC_msgUnit), &acb->dev_acc_attr,
2544 2543 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2545 2544 arcmsr_warn(acb, "unable to map registers");
2546 2545 return (DDI_FAILURE);
2547 2546 }
2548 2547
2549 2548 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2550 2549 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2551 2550 DDI_SUCCESS) {
2552 2551 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2553 2552 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2554 2553 return (DDI_FAILURE);
2555 2554 }
2556 2555
2557 2556 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2558 2557 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2559 2558 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2560 2559 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2561 2560 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2562 2561 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2563 2562 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2564 2563 return (DDI_FAILURE);
2565 2564 }
2566 2565
2567 2566 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2568 2567 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2569 2568 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2570 2569 &count) != DDI_DMA_MAPPED) {
2571 2570 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2572 2571 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2573 2572 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2574 2573 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2575 2574 return (DDI_FAILURE);
2576 2575 }
2577 2576 bzero(arcmsr_ccbs_area, dma_sync_size);
2578 2577 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2579 2578 - PtrToNum(arcmsr_ccbs_area));
2580 2579 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2581 2580 /* ioport base */
2582 2581 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2583 2582 break;
2584 2583 }
2585 2584
2586 2585 case PCI_DEVICE_ID_ARECA_1201:
2587 2586 {
2588 2587 uint32_t *iop_mu_regs_map0;
2589 2588 uint32_t *iop_mu_regs_map1;
2590 2589 struct HBB_msgUnit *phbbmu;
2591 2590
2592 2591 acb->adapter_type = ACB_ADAPTER_TYPE_B; /* marvell */
2593 2592 dma_sync_size =
2594 2593 (ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20) +
2595 2594 sizeof (struct HBB_msgUnit);
2596 2595 /* Allocate memory for the ccb */
2597 2596 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2598 2597 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2599 2598 DDI_SUCCESS) {
2600 2599 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2601 2600 return (DDI_FAILURE);
2602 2601 }
2603 2602
2604 2603 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2605 2604 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2606 2605 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2607 2606 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2608 2607 arcmsr_warn(acb, "ddi_dma_mem_alloc failed");
2609 2608 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2610 2609 return (DDI_FAILURE);
2611 2610 }
2612 2611
2613 2612 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2614 2613 (caddr_t)arcmsr_ccbs_area, dma_sync_size,
2615 2614 DDI_DMA_RDWR | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2616 2615 NULL, &acb->ccb_cookie, &count) != DDI_DMA_MAPPED) {
2617 2616 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2618 2617 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2619 2618 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2620 2619 return (DDI_FAILURE);
2621 2620 }
2622 2621 bzero(arcmsr_ccbs_area, dma_sync_size);
2623 2622 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2624 2623 - PtrToNum(arcmsr_ccbs_area));
2625 2624 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2626 2625 acb->pmu = (struct msgUnit *)
2627 2626 NumToPtr(PtrToNum(arcmsr_ccbs_area) +
2628 2627 (realccb_size*ARCMSR_MAX_FREECCB_NUM));
2629 2628 phbbmu = (struct HBB_msgUnit *)acb->pmu;
2630 2629
2631 2630 /* setup device register */
2632 2631 if (ddi_regs_map_setup(acb->dev_info, 1,
2633 2632 (caddr_t *)&iop_mu_regs_map0, 0,
2634 2633 sizeof (struct HBB_DOORBELL), &acb->dev_acc_attr,
2635 2634 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2636 2635 arcmsr_warn(acb, "unable to map base0 registers");
2637 2636 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2638 2637 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2639 2638 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2640 2639 return (DDI_FAILURE);
2641 2640 }
2642 2641
2643 2642 /* ARCMSR_DRV2IOP_DOORBELL */
2644 2643 phbbmu->hbb_doorbell = (struct HBB_DOORBELL *)iop_mu_regs_map0;
2645 2644 if (ddi_regs_map_setup(acb->dev_info, 2,
2646 2645 (caddr_t *)&iop_mu_regs_map1, 0,
2647 2646 sizeof (struct HBB_RWBUFFER), &acb->dev_acc_attr,
2648 2647 &acb->reg_mu_acc_handle1) != DDI_SUCCESS) {
2649 2648 arcmsr_warn(acb, "unable to map base1 registers");
2650 2649 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2651 2650 (void) ddi_dma_unbind_handle(acb->ccbs_pool_handle);
2652 2651 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2653 2652 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2654 2653 return (DDI_FAILURE);
2655 2654 }
2656 2655
2657 2656 /* ARCMSR_MSGCODE_RWBUFFER */
2658 2657 phbbmu->hbb_rwbuffer = (struct HBB_RWBUFFER *)iop_mu_regs_map1;
2659 2658 break;
2660 2659 }
2661 2660
2662 2661 case PCI_DEVICE_ID_ARECA_1110:
2663 2662 case PCI_DEVICE_ID_ARECA_1120:
2664 2663 case PCI_DEVICE_ID_ARECA_1130:
2665 2664 case PCI_DEVICE_ID_ARECA_1160:
2666 2665 case PCI_DEVICE_ID_ARECA_1170:
2667 2666 case PCI_DEVICE_ID_ARECA_1210:
2668 2667 case PCI_DEVICE_ID_ARECA_1220:
2669 2668 case PCI_DEVICE_ID_ARECA_1230:
2670 2669 case PCI_DEVICE_ID_ARECA_1231:
2671 2670 case PCI_DEVICE_ID_ARECA_1260:
2672 2671 case PCI_DEVICE_ID_ARECA_1261:
2673 2672 case PCI_DEVICE_ID_ARECA_1270:
2674 2673 case PCI_DEVICE_ID_ARECA_1280:
2675 2674 case PCI_DEVICE_ID_ARECA_1212:
2676 2675 case PCI_DEVICE_ID_ARECA_1222:
2677 2676 case PCI_DEVICE_ID_ARECA_1380:
2678 2677 case PCI_DEVICE_ID_ARECA_1381:
2679 2678 case PCI_DEVICE_ID_ARECA_1680:
2680 2679 case PCI_DEVICE_ID_ARECA_1681:
2681 2680 {
2682 2681 uint32_t *iop_mu_regs_map0;
2683 2682
2684 2683 acb->adapter_type = ACB_ADAPTER_TYPE_A; /* intel */
2685 2684 dma_sync_size = ARCMSR_MAX_FREECCB_NUM * realccb_size + 0x20;
2686 2685 if (ddi_regs_map_setup(acb->dev_info, 1,
2687 2686 (caddr_t *)&iop_mu_regs_map0, 0,
2688 2687 sizeof (struct HBA_msgUnit), &acb->dev_acc_attr,
2689 2688 &acb->reg_mu_acc_handle0) != DDI_SUCCESS) {
2690 2689 arcmsr_warn(acb, "unable to map registers");
2691 2690 return (DDI_FAILURE);
2692 2691 }
2693 2692
2694 2693 if ((i = ddi_dma_alloc_handle(acb->dev_info, &arcmsr_ccb_attr,
2695 2694 DDI_DMA_SLEEP, NULL, &acb->ccbs_pool_handle)) !=
2696 2695 DDI_SUCCESS) {
2697 2696 arcmsr_warn(acb, "ddi_dma_alloc_handle failed");
2698 2697 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2699 2698 return (DDI_FAILURE);
2700 2699 }
2701 2700
2702 2701 if (ddi_dma_mem_alloc(acb->ccbs_pool_handle, dma_sync_size,
2703 2702 &acb->dev_acc_attr, DDI_DMA_RDWR | DDI_DMA_CONSISTENT,
2704 2703 DDI_DMA_SLEEP, NULL, (caddr_t *)&arcmsr_ccbs_area,
2705 2704 &allocated_length, &acb->ccbs_acc_handle) != DDI_SUCCESS) {
2706 2705 arcmsr_warn(acb, "ddi_dma_mem_alloc failed", instance);
2707 2706 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2708 2707 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2709 2708 return (DDI_FAILURE);
2710 2709 }
2711 2710
2712 2711 if (ddi_dma_addr_bind_handle(acb->ccbs_pool_handle, NULL,
2713 2712 (caddr_t)arcmsr_ccbs_area, dma_sync_size, DDI_DMA_RDWR |
2714 2713 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &acb->ccb_cookie,
2715 2714 &count) != DDI_DMA_MAPPED) {
2716 2715 arcmsr_warn(acb, "ddi_dma_addr_bind_handle failed");
2717 2716 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2718 2717 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2719 2718 ddi_regs_map_free(&acb->reg_mu_acc_handle0);
2720 2719 return (DDI_FAILURE);
2721 2720 }
2722 2721 bzero(arcmsr_ccbs_area, dma_sync_size);
2723 2722 offset = (uint32_t)(P2ROUNDUP(PtrToNum(arcmsr_ccbs_area), 32)
2724 2723 - PtrToNum(arcmsr_ccbs_area));
2725 2724 arcmsr_ccbs_area = arcmsr_ccbs_area + offset;
2726 2725 /* ioport base */
2727 2726 acb->pmu = (struct msgUnit *)(intptr_t)iop_mu_regs_map0;
2728 2727 break;
2729 2728 }
2730 2729
2731 2730 default:
2732 2731 arcmsr_warn(acb, "Unknown RAID adapter type!");
2733 2732 return (DDI_FAILURE);
2734 2733 }
2735 2734 arcmsr_init_list_head(&acb->ccb_complete_list);
2736 2735 /* here we can not access pci configuration again */
2737 2736 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
2738 2737 ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READ);
2739 2738 acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
2740 2739 /* physical address of acb->pccb_pool */
2741 2740 cdb_phyaddr = acb->ccb_cookie.dmac_address + offset;
2742 2741
2743 2742 pccb_tmp = (struct CCB *)(intptr_t)arcmsr_ccbs_area;
2744 2743
2745 2744 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
2746 2745 pccb_tmp->cdb_phyaddr_pattern =
2747 2746 (acb->adapter_type == ACB_ADAPTER_TYPE_C) ?
2748 2747 cdb_phyaddr : (cdb_phyaddr >> 5);
2749 2748 pccb_tmp->acb = acb;
2750 2749 acb->ccbworkingQ[i] = acb->pccb_pool[i] = pccb_tmp;
2751 2750 cdb_phyaddr = cdb_phyaddr + realccb_size;
2752 2751 pccb_tmp = (struct CCB *)NumToPtr(PtrToNum(pccb_tmp) +
2753 2752 realccb_size);
2754 2753 }
2755 2754 acb->vir2phy_offset = PtrToNum(pccb_tmp) - cdb_phyaddr;
2756 2755
2757 2756 /* disable all outbound interrupt */
2758 2757 intmask_org = arcmsr_disable_allintr(acb);
2759 2758
2760 2759 if (!arcmsr_iop_confirm(acb)) {
2761 2760 arcmsr_warn(acb, "arcmsr_iop_confirm error", instance);
2762 2761 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2763 2762 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2764 2763 return (DDI_FAILURE);
2765 2764 }
2766 2765
2767 2766 for (id = 0; id < ARCMSR_MAX_TARGETID; id++) {
2768 2767 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
2769 2768 acb->devstate[id][lun] = ARECA_RAID_GONE;
2770 2769 }
2771 2770 }
2772 2771
2773 2772 /* enable outbound Post Queue, outbound doorbell Interrupt */
2774 2773 arcmsr_enable_allintr(acb, intmask_org);
2775 2774
2776 2775 return (0);
2777 2776 }
2778 2777
2779 2778 static int
2780 2779 arcmsr_do_ddi_attach(dev_info_t *dev_info, int instance)
2781 2780 {
2782 2781 scsi_hba_tran_t *hba_trans;
2783 2782 ddi_device_acc_attr_t dev_acc_attr;
2784 2783 struct ACB *acb;
2785 2784 uint16_t wval;
2786 2785 int raid6 = 1;
2787 2786 char *type;
2788 2787 int intr_types;
2789 2788
2790 2789
2791 2790 /*
2792 2791 * Soft State Structure
2793 2792 * The driver should allocate the per-device-instance
2794 2793 * soft state structure, being careful to clean up properly if
2795 2794 * an error occurs. Allocate data structure.
2796 2795 */
2797 2796 if (ddi_soft_state_zalloc(arcmsr_soft_state, instance) != DDI_SUCCESS) {
2798 2797 arcmsr_warn(NULL, "ddi_soft_state_zalloc failed");
2799 2798 return (DDI_FAILURE);
2800 2799 }
2801 2800
2802 2801 acb = ddi_get_soft_state(arcmsr_soft_state, instance);
2803 2802 ASSERT(acb);
2804 2803
2805 2804 arcmsr_mutex_init(acb);
2806 2805
2807 2806 /* acb is already zalloc()d so we don't need to bzero() it */
2808 2807 dev_acc_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2809 2808 dev_acc_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2810 2809 dev_acc_attr.devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC;
2811 2810
2812 2811 acb->dev_info = dev_info;
2813 2812 acb->dev_acc_attr = dev_acc_attr;
2814 2813
2815 2814 /*
2816 2815 * The driver, if providing DMA, should also check that its hardware is
2817 2816 * installed in a DMA-capable slot
2818 2817 */
2819 2818 if (ddi_slaveonly(dev_info) == DDI_SUCCESS) {
2820 2819 arcmsr_warn(acb, "hardware is not installed in"
2821 2820 " a DMA-capable slot");
2822 2821 goto error_level_0;
2823 2822 }
2824 2823 if (pci_config_setup(dev_info, &acb->pci_acc_handle) != DDI_SUCCESS) {
2825 2824 arcmsr_warn(acb, "pci_config_setup() failed, attach failed");
2826 2825 goto error_level_0;
2827 2826 }
2828 2827
2829 2828 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_VENID);
2830 2829 if (wval != PCI_VENDOR_ID_ARECA) {
2831 2830 arcmsr_warn(acb,
2832 2831 "'vendorid (0x%04x) does not match 0x%04x "
2833 2832 "(PCI_VENDOR_ID_ARECA)",
2834 2833 wval, PCI_VENDOR_ID_ARECA);
2835 2834 goto error_level_0;
2836 2835 }
2837 2836
2838 2837 wval = pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID);
2839 2838 switch (wval) {
2840 2839 case PCI_DEVICE_ID_ARECA_1110:
2841 2840 case PCI_DEVICE_ID_ARECA_1210:
2842 2841 case PCI_DEVICE_ID_ARECA_1201:
2843 2842 raid6 = 0;
2844 2843 /*FALLTHRU*/
2845 2844 case PCI_DEVICE_ID_ARECA_1120:
2846 2845 case PCI_DEVICE_ID_ARECA_1130:
2847 2846 case PCI_DEVICE_ID_ARECA_1160:
2848 2847 case PCI_DEVICE_ID_ARECA_1170:
2849 2848 case PCI_DEVICE_ID_ARECA_1220:
2850 2849 case PCI_DEVICE_ID_ARECA_1230:
2851 2850 case PCI_DEVICE_ID_ARECA_1260:
2852 2851 case PCI_DEVICE_ID_ARECA_1270:
2853 2852 case PCI_DEVICE_ID_ARECA_1280:
2854 2853 type = "SATA 3G";
2855 2854 break;
2856 2855 case PCI_DEVICE_ID_ARECA_1380:
2857 2856 case PCI_DEVICE_ID_ARECA_1381:
2858 2857 case PCI_DEVICE_ID_ARECA_1680:
2859 2858 case PCI_DEVICE_ID_ARECA_1681:
2860 2859 type = "SAS 3G";
2861 2860 break;
2862 2861 case PCI_DEVICE_ID_ARECA_1880:
2863 2862 type = "SAS 6G";
2864 2863 break;
2865 2864 default:
2866 2865 type = "X-TYPE";
2867 2866 arcmsr_warn(acb, "Unknown Host Adapter RAID Controller!");
2868 2867 goto error_level_0;
2869 2868 }
2870 2869
2871 2870 arcmsr_log(acb, CE_CONT, "Areca %s Host Adapter RAID Controller%s\n",
2872 2871 type, raid6 ? " (RAID6 capable)" : "");
2873 2872
2874 2873 /* we disable iop interrupt here */
2875 2874 if (arcmsr_initialize(acb) == DDI_FAILURE) {
2876 2875 arcmsr_warn(acb, "arcmsr_initialize failed");
2877 2876 goto error_level_1;
2878 2877 }
2879 2878
2880 2879 /* Allocate a transport structure */
2881 2880 hba_trans = scsi_hba_tran_alloc(dev_info, SCSI_HBA_CANSLEEP);
2882 2881 if (hba_trans == NULL) {
2883 2882 arcmsr_warn(acb, "scsi_hba_tran_alloc failed");
2884 2883 goto error_level_2;
2885 2884 }
2886 2885 acb->scsi_hba_transport = hba_trans;
2887 2886 acb->dev_info = dev_info;
2888 2887 /* init scsi host adapter transport entry */
2889 2888 hba_trans->tran_hba_private = acb;
2890 2889 hba_trans->tran_tgt_private = NULL;
2891 2890 /*
2892 2891 * If no per-target initialization is required, the HBA can leave
2893 2892 * tran_tgt_init set to NULL.
2894 2893 */
2895 2894 hba_trans->tran_tgt_init = arcmsr_tran_tgt_init;
2896 2895 hba_trans->tran_tgt_probe = scsi_hba_probe;
2897 2896 hba_trans->tran_tgt_free = NULL;
2898 2897 hba_trans->tran_start = arcmsr_tran_start;
2899 2898 hba_trans->tran_abort = arcmsr_tran_abort;
2900 2899 hba_trans->tran_reset = arcmsr_tran_reset;
2901 2900 hba_trans->tran_getcap = arcmsr_tran_getcap;
2902 2901 hba_trans->tran_setcap = arcmsr_tran_setcap;
2903 2902 hba_trans->tran_init_pkt = arcmsr_tran_init_pkt;
2904 2903 hba_trans->tran_destroy_pkt = arcmsr_tran_destroy_pkt;
2905 2904 hba_trans->tran_dmafree = arcmsr_tran_dmafree;
2906 2905 hba_trans->tran_sync_pkt = arcmsr_tran_sync_pkt;
2907 2906
2908 2907 hba_trans->tran_reset_notify = NULL;
2909 2908 hba_trans->tran_get_bus_addr = NULL;
2910 2909 hba_trans->tran_get_name = NULL;
2911 2910 hba_trans->tran_quiesce = NULL;
2912 2911 hba_trans->tran_unquiesce = NULL;
2913 2912 hba_trans->tran_bus_reset = NULL;
2914 2913 hba_trans->tran_bus_config = arcmsr_tran_bus_config;
2915 2914 hba_trans->tran_add_eventcall = NULL;
2916 2915 hba_trans->tran_get_eventcookie = NULL;
2917 2916 hba_trans->tran_post_event = NULL;
2918 2917 hba_trans->tran_remove_eventcall = NULL;
2919 2918
2920 2919 /* iop init and enable interrupt here */
2921 2920 arcmsr_iop_init(acb);
2922 2921
2923 2922 /* Get supported interrupt types */
2924 2923 if (ddi_intr_get_supported_types(dev_info, &intr_types) !=
2925 2924 DDI_SUCCESS) {
2926 2925 arcmsr_warn(acb, "ddi_intr_get_supported_types failed");
2927 2926 goto error_level_3;
2928 2927 }
2929 2928 if (intr_types & DDI_INTR_TYPE_FIXED) {
2930 2929 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2931 2930 goto error_level_5;
2932 2931 } else if (intr_types & DDI_INTR_TYPE_MSI) {
2933 2932 if (arcmsr_add_intr(acb, DDI_INTR_TYPE_FIXED) != DDI_SUCCESS)
2934 2933 goto error_level_5;
2935 2934 }
2936 2935
2937 2936 /*
2938 2937 * The driver should attach this instance of the device, and
2939 2938 * perform error cleanup if necessary
2940 2939 */
2941 2940 if (scsi_hba_attach_setup(dev_info, &arcmsr_dma_attr,
2942 2941 hba_trans, SCSI_HBA_TRAN_CLONE) != DDI_SUCCESS) {
2943 2942 arcmsr_warn(acb, "scsi_hba_attach_setup failed");
2944 2943 goto error_level_5;
2945 2944 }
2946 2945
2947 2946 /* Create a taskq for dealing with dr events */
2948 2947 if ((acb->taskq = ddi_taskq_create(dev_info, "arcmsr_dr_taskq", 1,
2949 2948 TASKQ_DEFAULTPRI, 0)) == NULL) {
2950 2949 arcmsr_warn(acb, "ddi_taskq_create failed");
2951 2950 goto error_level_8;
2952 2951 }
2953 2952
2954 2953 acb->timeout_count = 0;
2955 2954 /* active ccbs "timeout" watchdog */
2956 2955 acb->timeout_id = timeout(arcmsr_ccbs_timeout, (caddr_t)acb,
2957 2956 (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
2958 2957 acb->timeout_sc_id = timeout(arcmsr_devMap_monitor, (caddr_t)acb,
2959 2958 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
2960 2959
2961 2960 /* report device info */
2962 2961 ddi_report_dev(dev_info);
2963 2962
2964 2963 return (DDI_SUCCESS);
2965 2964
2966 2965 error_level_8:
2967 2966
2968 2967 error_level_7:
2969 2968 error_level_6:
2970 2969 (void) scsi_hba_detach(dev_info);
2971 2970
2972 2971 error_level_5:
2973 2972 arcmsr_remove_intr(acb);
2974 2973
2975 2974 error_level_3:
2976 2975 error_level_4:
2977 2976 if (acb->scsi_hba_transport)
2978 2977 scsi_hba_tran_free(acb->scsi_hba_transport);
2979 2978
2980 2979 error_level_2:
2981 2980 if (acb->ccbs_acc_handle)
2982 2981 ddi_dma_mem_free(&acb->ccbs_acc_handle);
2983 2982 if (acb->ccbs_pool_handle)
2984 2983 ddi_dma_free_handle(&acb->ccbs_pool_handle);
2985 2984
2986 2985 error_level_1:
2987 2986 if (acb->pci_acc_handle)
2988 2987 pci_config_teardown(&acb->pci_acc_handle);
2989 2988 arcmsr_mutex_destroy(acb);
2990 2989 ddi_soft_state_free(arcmsr_soft_state, instance);
2991 2990
2992 2991 error_level_0:
2993 2992 return (DDI_FAILURE);
2994 2993 }
2995 2994
2996 2995
2997 2996 static void
2998 2997 arcmsr_vlog(struct ACB *acb, int level, char *fmt, va_list ap)
2999 2998 {
3000 2999 char buf[256];
3001 3000
3002 3001 if (acb != NULL) {
3003 3002 (void) snprintf(buf, sizeof (buf), "%s%d: %s",
3004 3003 ddi_driver_name(acb->dev_info),
3005 3004 ddi_get_instance(acb->dev_info), fmt);
3006 3005 fmt = buf;
3007 3006 }
3008 3007 vcmn_err(level, fmt, ap);
3009 3008 }
3010 3009
3011 3010 static void
3012 3011 arcmsr_log(struct ACB *acb, int level, char *fmt, ...)
3013 3012 {
3014 3013 va_list ap;
3015 3014
3016 3015 va_start(ap, fmt);
3017 3016 arcmsr_vlog(acb, level, fmt, ap);
3018 3017 va_end(ap);
3019 3018 }
3020 3019
3021 3020 static void
3022 3021 arcmsr_warn(struct ACB *acb, char *fmt, ...)
3023 3022 {
3024 3023 va_list ap;
3025 3024
3026 3025 va_start(ap, fmt);
3027 3026 arcmsr_vlog(acb, CE_WARN, fmt, ap);
3028 3027 va_end(ap);
3029 3028 }
3030 3029
3031 3030 static void
3032 3031 arcmsr_init_list_head(struct list_head *list)
3033 3032 {
3034 3033 list->next = list;
3035 3034 list->prev = list;
3036 3035 }
3037 3036
3038 3037 static void
3039 3038 arcmsr_x_list_del(struct list_head *prev, struct list_head *next)
3040 3039 {
3041 3040 next->prev = prev;
3042 3041 prev->next = next;
3043 3042 }
3044 3043
3045 3044 static void
3046 3045 arcmsr_x_list_add(struct list_head *new_one, struct list_head *prev,
3047 3046 struct list_head *next)
3048 3047 {
3049 3048 next->prev = new_one;
3050 3049 new_one->next = next;
3051 3050 new_one->prev = prev;
3052 3051 prev->next = new_one;
3053 3052 }
3054 3053
3055 3054 static void
3056 3055 arcmsr_list_add_tail(kmutex_t *list_lock, struct list_head *new_one,
3057 3056 struct list_head *head)
3058 3057 {
3059 3058 mutex_enter(list_lock);
3060 3059 arcmsr_x_list_add(new_one, head->prev, head);
3061 3060 mutex_exit(list_lock);
3062 3061 }
3063 3062
3064 3063 static struct list_head *
3065 3064 arcmsr_list_get_first(kmutex_t *list_lock, struct list_head *head)
3066 3065 {
3067 3066 struct list_head *one = NULL;
3068 3067
3069 3068 mutex_enter(list_lock);
3070 3069 if (head->next == head) {
3071 3070 mutex_exit(list_lock);
3072 3071 return (NULL);
3073 3072 }
3074 3073 one = head->next;
3075 3074 arcmsr_x_list_del(one->prev, one->next);
3076 3075 arcmsr_init_list_head(one);
3077 3076 mutex_exit(list_lock);
3078 3077 return (one);
3079 3078 }
3080 3079
3081 3080 static struct CCB *
3082 3081 arcmsr_get_complete_ccb_from_list(struct ACB *acb)
3083 3082 {
3084 3083 struct list_head *first_complete_ccb_list = NULL;
3085 3084 struct CCB *ccb;
3086 3085
3087 3086 first_complete_ccb_list =
3088 3087 arcmsr_list_get_first(&acb->ccb_complete_list_mutex,
3089 3088 &acb->ccb_complete_list);
3090 3089 if (first_complete_ccb_list == NULL) {
3091 3090 return (NULL);
3092 3091 }
3093 3092 ccb = (void *)((caddr_t)(first_complete_ccb_list) -
3094 3093 offsetof(struct CCB, complete_queue_pointer));
3095 3094 return (ccb);
3096 3095 }
3097 3096
3098 3097 static struct CCB *
3099 3098 arcmsr_get_freeccb(struct ACB *acb)
3100 3099 {
3101 3100 struct CCB *ccb;
3102 3101 int ccb_get_index, ccb_put_index;
3103 3102
3104 3103 mutex_enter(&acb->workingQ_mutex);
3105 3104 ccb_put_index = acb->ccb_put_index;
3106 3105 ccb_get_index = acb->ccb_get_index;
3107 3106 ccb = acb->ccbworkingQ[ccb_get_index];
3108 3107 ccb_get_index++;
3109 3108 if (ccb_get_index >= ARCMSR_MAX_FREECCB_NUM)
3110 3109 ccb_get_index = ccb_get_index - ARCMSR_MAX_FREECCB_NUM;
3111 3110 if (ccb_put_index != ccb_get_index) {
3112 3111 acb->ccb_get_index = ccb_get_index;
3113 3112 arcmsr_init_list_head(&ccb->complete_queue_pointer);
3114 3113 ccb->ccb_state = ARCMSR_CCB_UNBUILD;
3115 3114 } else {
3116 3115 ccb = NULL;
3117 3116 }
3118 3117 mutex_exit(&acb->workingQ_mutex);
3119 3118 return (ccb);
3120 3119 }
3121 3120
3122 3121
3123 3122 static void
3124 3123 arcmsr_free_ccb(struct CCB *ccb)
3125 3124 {
3126 3125 struct ACB *acb = ccb->acb;
3127 3126
3128 3127 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3129 3128 return;
3130 3129 }
3131 3130 mutex_enter(&acb->workingQ_mutex);
3132 3131 ccb->ccb_state = ARCMSR_CCB_FREE;
3133 3132 ccb->pkt = NULL;
3134 3133 ccb->pkt_dma_handle = NULL;
3135 3134 ccb->ccb_flags = 0;
3136 3135 acb->ccbworkingQ[acb->ccb_put_index] = ccb;
3137 3136 acb->ccb_put_index++;
3138 3137 if (acb->ccb_put_index >= ARCMSR_MAX_FREECCB_NUM)
3139 3138 acb->ccb_put_index =
3140 3139 acb->ccb_put_index - ARCMSR_MAX_FREECCB_NUM;
3141 3140 mutex_exit(&acb->workingQ_mutex);
3142 3141 }
3143 3142
3144 3143
3145 3144 static void
3146 3145 arcmsr_ccbs_timeout(void* arg)
3147 3146 {
3148 3147 struct ACB *acb = (struct ACB *)arg;
3149 3148 struct CCB *ccb;
3150 3149 int i, instance, timeout_count = 0;
3151 3150 uint32_t intmask_org;
3152 3151 time_t current_time = ddi_get_time();
3153 3152
3154 3153 intmask_org = arcmsr_disable_allintr(acb);
3155 3154 mutex_enter(&acb->isr_mutex);
3156 3155 if (acb->ccboutstandingcount != 0) {
3157 3156 /* check each ccb */
3158 3157 i = ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
3159 3158 DDI_DMA_SYNC_FORKERNEL);
3160 3159 if (i != DDI_SUCCESS) {
3161 3160 if ((acb->timeout_id != 0) &&
3162 3161 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3163 3162 /* do pkt timeout check each 60 secs */
3164 3163 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3165 3164 (void*)acb, (ARCMSR_TIMEOUT_WATCH *
3166 3165 drv_usectohz(1000000)));
3167 3166 }
3168 3167 mutex_exit(&acb->isr_mutex);
3169 3168 arcmsr_enable_allintr(acb, intmask_org);
3170 3169 return;
3171 3170 }
3172 3171 instance = ddi_get_instance(acb->dev_info);
3173 3172 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3174 3173 ccb = acb->pccb_pool[i];
3175 3174 if (ccb->acb != acb) {
3176 3175 break;
3177 3176 }
3178 3177 if (ccb->ccb_state == ARCMSR_CCB_FREE) {
3179 3178 continue;
3180 3179 }
3181 3180 if (ccb->pkt == NULL) {
3182 3181 continue;
3183 3182 }
3184 3183 if (ccb->pkt->pkt_time == 0) {
3185 3184 continue;
3186 3185 }
3187 3186 if (ccb->ccb_time >= current_time) {
3188 3187 continue;
3189 3188 }
3190 3189 int id = ccb->pkt->pkt_address.a_target;
3191 3190 int lun = ccb->pkt->pkt_address.a_lun;
3192 3191 if (ccb->ccb_state == ARCMSR_CCB_START) {
3193 3192 uint8_t *cdb = (uint8_t *)&ccb->arcmsr_cdb.Cdb;
3194 3193
3195 3194 timeout_count++;
3196 3195 arcmsr_warn(acb,
3197 3196 "scsi target %d lun %d cmd=0x%x "
3198 3197 "command timeout, ccb=0x%p",
3199 3198 instance, id, lun, *cdb, (void *)ccb);
3200 3199 ccb->ccb_state = ARCMSR_CCB_TIMEOUT;
3201 3200 ccb->pkt->pkt_reason = CMD_TIMEOUT;
3202 3201 ccb->pkt->pkt_statistics = STAT_TIMEOUT;
3203 3202 /* acb->devstate[id][lun] = ARECA_RAID_GONE; */
3204 3203 arcmsr_ccb_complete(ccb, 1);
3205 3204 continue;
3206 3205 } else if ((ccb->ccb_state & ARCMSR_CCB_CAN_BE_FREE) ==
3207 3206 ARCMSR_CCB_CAN_BE_FREE) {
3208 3207 arcmsr_free_ccb(ccb);
3209 3208 }
3210 3209 }
3211 3210 }
3212 3211 if ((acb->timeout_id != 0) &&
3213 3212 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3214 3213 /* do pkt timeout check each 60 secs */
3215 3214 acb->timeout_id = timeout(arcmsr_ccbs_timeout,
3216 3215 (void*)acb, (ARCMSR_TIMEOUT_WATCH * drv_usectohz(1000000)));
3217 3216 }
3218 3217 mutex_exit(&acb->isr_mutex);
3219 3218 arcmsr_enable_allintr(acb, intmask_org);
3220 3219 }
3221 3220
3222 3221 static void
3223 3222 arcmsr_abort_dr_ccbs(struct ACB *acb, uint16_t target, uint8_t lun)
3224 3223 {
3225 3224 struct CCB *ccb;
3226 3225 uint32_t intmask_org;
3227 3226 int i;
3228 3227
3229 3228 /* disable all outbound interrupts */
3230 3229 intmask_org = arcmsr_disable_allintr(acb);
3231 3230 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3232 3231 ccb = acb->pccb_pool[i];
3233 3232 if (ccb->ccb_state == ARCMSR_CCB_START) {
3234 3233 if ((target == ccb->pkt->pkt_address.a_target) &&
3235 3234 (lun == ccb->pkt->pkt_address.a_lun)) {
3236 3235 ccb->ccb_state = ARCMSR_CCB_ABORTED;
3237 3236 ccb->pkt->pkt_reason = CMD_ABORTED;
3238 3237 ccb->pkt->pkt_statistics |= STAT_ABORTED;
3239 3238 arcmsr_ccb_complete(ccb, 1);
3240 3239 arcmsr_log(acb, CE_NOTE,
3241 3240 "abort T%dL%d ccb", target, lun);
3242 3241 }
3243 3242 }
3244 3243 }
3245 3244 /* enable outbound Post Queue, outbound doorbell Interrupt */
3246 3245 arcmsr_enable_allintr(acb, intmask_org);
3247 3246 }
3248 3247
3249 3248 static int
3250 3249 arcmsr_scsi_device_probe(struct ACB *acb, uint16_t tgt, uint8_t lun)
3251 3250 {
3252 3251 struct scsi_device sd;
3253 3252 dev_info_t *child;
3254 3253 int rval;
3255 3254
3256 3255 bzero(&sd, sizeof (struct scsi_device));
3257 3256 sd.sd_address.a_hba_tran = acb->scsi_hba_transport;
3258 3257 sd.sd_address.a_target = (uint16_t)tgt;
3259 3258 sd.sd_address.a_lun = (uint8_t)lun;
3260 3259 if ((child = arcmsr_find_child(acb, tgt, lun)) != NULL) {
3261 3260 rval = scsi_hba_probe(&sd, NULL);
3262 3261 if (rval == SCSIPROBE_EXISTS) {
3263 3262 rval = ndi_devi_online(child, NDI_ONLINE_ATTACH);
3264 3263 if (rval != NDI_SUCCESS) {
3265 3264 arcmsr_warn(acb, "unable to online T%dL%d",
3266 3265 tgt, lun);
3267 3266 } else {
3268 3267 arcmsr_log(acb, CE_NOTE, "T%dL%d onlined",
3269 3268 tgt, lun);
3270 3269 }
3271 3270 }
3272 3271 } else {
3273 3272 rval = scsi_hba_probe(&sd, NULL);
3274 3273 if (rval == SCSIPROBE_EXISTS)
3275 3274 rval = arcmsr_config_child(acb, &sd, NULL);
3276 3275 }
3277 3276 scsi_unprobe(&sd);
3278 3277 return (rval);
3279 3278 }
3280 3279
3281 3280 static void
3282 3281 arcmsr_dr_handle(struct ACB *acb)
3283 3282 {
3284 3283 char *acb_dev_map = (char *)acb->device_map;
3285 3284 char *devicemap;
3286 3285 char temp;
3287 3286 uint16_t target;
3288 3287 uint8_t lun;
3289 3288 char diff;
3290 3289 int circ = 0;
3291 3290 dev_info_t *dip;
3292 3291 ddi_acc_handle_t reg;
3293 3292
3294 3293 switch (acb->adapter_type) {
3295 3294 case ACB_ADAPTER_TYPE_A:
3296 3295 {
3297 3296 struct HBA_msgUnit *phbamu;
3298 3297
3299 3298 phbamu = (struct HBA_msgUnit *)acb->pmu;
3300 3299 devicemap = (char *)&phbamu->msgcode_rwbuffer[21];
3301 3300 reg = acb->reg_mu_acc_handle0;
3302 3301 break;
3303 3302 }
3304 3303
3305 3304 case ACB_ADAPTER_TYPE_B:
3306 3305 {
3307 3306 struct HBB_msgUnit *phbbmu;
3308 3307
3309 3308 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3310 3309 devicemap = (char *)
3311 3310 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[21];
3312 3311 reg = acb->reg_mu_acc_handle1;
3313 3312 break;
3314 3313 }
3315 3314
3316 3315 case ACB_ADAPTER_TYPE_C:
3317 3316 {
3318 3317 struct HBC_msgUnit *phbcmu;
3319 3318
3320 3319 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3321 3320 devicemap = (char *)&phbcmu->msgcode_rwbuffer[21];
3322 3321 reg = acb->reg_mu_acc_handle0;
3323 3322 break;
3324 3323 }
3325 3324
3326 3325 }
3327 3326
3328 3327 for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
3329 3328 temp = CHIP_REG_READ8(reg, devicemap);
3330 3329 diff = (*acb_dev_map)^ temp;
3331 3330 if (diff != 0) {
3332 3331 *acb_dev_map = temp;
3333 3332 for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
3334 3333 if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
3335 3334 ndi_devi_enter(acb->dev_info, &circ);
3336 3335 acb->devstate[target][lun] =
3337 3336 ARECA_RAID_GOOD;
3338 3337 (void) arcmsr_scsi_device_probe(acb,
3339 3338 target, lun);
3340 3339 ndi_devi_exit(acb->dev_info, circ);
3341 3340 arcmsr_log(acb, CE_NOTE,
3342 3341 "T%dL%d on-line", target, lun);
3343 3342 } else if ((temp & 0x01) == 0 &&
3344 3343 (diff & 0x01) == 1) {
3345 3344 dip = arcmsr_find_child(acb, target,
3346 3345 lun);
3347 3346 if (dip != NULL) {
3348 3347 acb->devstate[target][lun] =
3349 3348 ARECA_RAID_GONE;
3350 3349 if (mutex_owned(&acb->
3351 3350 isr_mutex)) {
3352 3351 arcmsr_abort_dr_ccbs(
3353 3352 acb, target, lun);
3354 3353 (void)
3355 3354 ndi_devi_offline(
3356 3355 dip,
3357 3356 NDI_DEVI_REMOVE |
3358 3357 NDI_DEVI_OFFLINE);
3359 3358 } else {
3360 3359 mutex_enter(&acb->
3361 3360 isr_mutex);
3362 3361 arcmsr_abort_dr_ccbs(
3363 3362 acb, target, lun);
3364 3363 (void)
3365 3364 ndi_devi_offline(
3366 3365 dip,
3367 3366 NDI_DEVI_REMOVE |
3368 3367 NDI_DEVI_OFFLINE);
3369 3368 mutex_exit(&acb->
3370 3369 isr_mutex);
3371 3370 }
3372 3371 }
3373 3372 arcmsr_log(acb, CE_NOTE,
3374 3373 "T%dL%d off-line", target, lun);
3375 3374 }
3376 3375 temp >>= 1;
3377 3376 diff >>= 1;
3378 3377 }
3379 3378 }
3380 3379 devicemap++;
3381 3380 acb_dev_map++;
3382 3381 }
3383 3382 }
3384 3383
3385 3384
3386 3385 static void
3387 3386 arcmsr_devMap_monitor(void* arg)
3388 3387 {
3389 3388
3390 3389 struct ACB *acb = (struct ACB *)arg;
3391 3390 switch (acb->adapter_type) {
3392 3391 case ACB_ADAPTER_TYPE_A:
3393 3392 {
3394 3393 struct HBA_msgUnit *phbamu;
3395 3394
3396 3395 phbamu = (struct HBA_msgUnit *)acb->pmu;
3397 3396 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3398 3397 &phbamu->inbound_msgaddr0,
3399 3398 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3400 3399 break;
3401 3400 }
3402 3401
3403 3402 case ACB_ADAPTER_TYPE_B:
3404 3403 {
3405 3404 struct HBB_msgUnit *phbbmu;
3406 3405
3407 3406 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3408 3407 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3409 3408 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3410 3409 ARCMSR_MESSAGE_GET_CONFIG);
3411 3410 break;
3412 3411 }
3413 3412
3414 3413 case ACB_ADAPTER_TYPE_C:
3415 3414 {
3416 3415 struct HBC_msgUnit *phbcmu;
3417 3416
3418 3417 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3419 3418 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3420 3419 &phbcmu->inbound_msgaddr0,
3421 3420 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3422 3421 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3423 3422 &phbcmu->inbound_doorbell,
3424 3423 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3425 3424 break;
3426 3425 }
3427 3426
3428 3427 }
3429 3428
3430 3429 if ((acb->timeout_id != 0) &&
3431 3430 ((acb->acb_flags & ACB_F_SCSISTOPADAPTER) == 0)) {
3432 3431 /* do pkt timeout check each 5 secs */
3433 3432 acb->timeout_id = timeout(arcmsr_devMap_monitor, (void*)acb,
3434 3433 (ARCMSR_DEV_MAP_WATCH * drv_usectohz(1000000)));
3435 3434 }
3436 3435 }
3437 3436
3438 3437
3439 3438 static uint32_t
3440 3439 arcmsr_disable_allintr(struct ACB *acb) {
3441 3440
3442 3441 uint32_t intmask_org;
3443 3442
3444 3443 switch (acb->adapter_type) {
3445 3444 case ACB_ADAPTER_TYPE_A:
3446 3445 {
3447 3446 struct HBA_msgUnit *phbamu;
3448 3447
3449 3448 phbamu = (struct HBA_msgUnit *)acb->pmu;
3450 3449 /* disable all outbound interrupt */
3451 3450 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3452 3451 &phbamu->outbound_intmask);
3453 3452 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3454 3453 &phbamu->outbound_intmask,
3455 3454 intmask_org|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE);
3456 3455 break;
3457 3456 }
3458 3457
3459 3458 case ACB_ADAPTER_TYPE_B:
3460 3459 {
3461 3460 struct HBB_msgUnit *phbbmu;
3462 3461
3463 3462 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3464 3463 /* disable all outbound interrupt */
3465 3464 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3466 3465 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask);
3467 3466 /* disable all interrupts */
3468 3467 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3469 3468 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask, 0);
3470 3469 break;
3471 3470 }
3472 3471
3473 3472 case ACB_ADAPTER_TYPE_C:
3474 3473 {
3475 3474 struct HBC_msgUnit *phbcmu;
3476 3475
3477 3476 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3478 3477 /* disable all outbound interrupt */
3479 3478 intmask_org = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3480 3479 &phbcmu->host_int_mask); /* disable outbound message0 int */
3481 3480 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3482 3481 &phbcmu->host_int_mask,
3483 3482 intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE);
3484 3483 break;
3485 3484 }
3486 3485
3487 3486 }
3488 3487 return (intmask_org);
3489 3488 }
3490 3489
3491 3490
3492 3491 static void
3493 3492 arcmsr_enable_allintr(struct ACB *acb, uint32_t intmask_org) {
3494 3493
3495 3494 int mask;
3496 3495
3497 3496 switch (acb->adapter_type) {
3498 3497 case ACB_ADAPTER_TYPE_A:
3499 3498 {
3500 3499 struct HBA_msgUnit *phbamu;
3501 3500
3502 3501 phbamu = (struct HBA_msgUnit *)acb->pmu;
3503 3502 /*
3504 3503 * enable outbound Post Queue, outbound doorbell message0
3505 3504 * Interrupt
3506 3505 */
3507 3506 mask = ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
3508 3507 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE |
3509 3508 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
3510 3509 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3511 3510 &phbamu->outbound_intmask, intmask_org & mask);
3512 3511 acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
3513 3512 break;
3514 3513 }
3515 3514
3516 3515 case ACB_ADAPTER_TYPE_B:
3517 3516 {
3518 3517 struct HBB_msgUnit *phbbmu;
3519 3518
3520 3519 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3521 3520 mask = (ARCMSR_IOP2DRV_DATA_WRITE_OK |
3522 3521 ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE |
3523 3522 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
3524 3523 /* 1=interrupt enable, 0=interrupt disable */
3525 3524 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3526 3525 &phbbmu->hbb_doorbell->iop2drv_doorbell_mask,
3527 3526 intmask_org | mask);
3528 3527 acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
3529 3528 break;
3530 3529 }
3531 3530
3532 3531 case ACB_ADAPTER_TYPE_C:
3533 3532 {
3534 3533 struct HBC_msgUnit *phbcmu;
3535 3534
3536 3535 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3537 3536 /* enable outbound Post Queue,outbound doorbell Interrupt */
3538 3537 mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK |
3539 3538 ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK |
3540 3539 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
3541 3540 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3542 3541 &phbcmu->host_int_mask, intmask_org & mask);
3543 3542 acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
3544 3543 break;
3545 3544 }
3546 3545
3547 3546 }
3548 3547 }
3549 3548
3550 3549
3551 3550 static void
3552 3551 arcmsr_iop_parking(struct ACB *acb)
3553 3552 {
3554 3553 /* stop adapter background rebuild */
3555 3554 if (acb->acb_flags & ACB_F_MSG_START_BGRB) {
3556 3555 uint32_t intmask_org;
3557 3556
3558 3557 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
3559 3558 /* disable all outbound interrupt */
3560 3559 intmask_org = arcmsr_disable_allintr(acb);
3561 3560 switch (acb->adapter_type) {
3562 3561 case ACB_ADAPTER_TYPE_A:
3563 3562 arcmsr_stop_hba_bgrb(acb);
3564 3563 arcmsr_flush_hba_cache(acb);
3565 3564 break;
3566 3565
3567 3566 case ACB_ADAPTER_TYPE_B:
3568 3567 arcmsr_stop_hbb_bgrb(acb);
3569 3568 arcmsr_flush_hbb_cache(acb);
3570 3569 break;
3571 3570
3572 3571 case ACB_ADAPTER_TYPE_C:
3573 3572 arcmsr_stop_hbc_bgrb(acb);
3574 3573 arcmsr_flush_hbc_cache(acb);
3575 3574 break;
3576 3575 }
3577 3576 /*
3578 3577 * enable outbound Post Queue
3579 3578 * enable outbound doorbell Interrupt
3580 3579 */
3581 3580 arcmsr_enable_allintr(acb, intmask_org);
3582 3581 }
3583 3582 }
3584 3583
3585 3584
3586 3585 static uint8_t
3587 3586 arcmsr_hba_wait_msgint_ready(struct ACB *acb)
3588 3587 {
3589 3588 uint32_t i;
3590 3589 uint8_t retries = 0x00;
3591 3590 struct HBA_msgUnit *phbamu;
3592 3591
3593 3592
3594 3593 phbamu = (struct HBA_msgUnit *)acb->pmu;
3595 3594
3596 3595 do {
3597 3596 for (i = 0; i < 100; i++) {
3598 3597 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3599 3598 &phbamu->outbound_intstatus) &
3600 3599 ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
3601 3600 /* clear interrupt */
3602 3601 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3603 3602 &phbamu->outbound_intstatus,
3604 3603 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3605 3604 return (TRUE);
3606 3605 }
3607 3606 drv_usecwait(10000);
3608 3607 if (ddi_in_panic()) {
3609 3608 /* clear interrupts */
3610 3609 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3611 3610 &phbamu->outbound_intstatus,
3612 3611 ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
3613 3612 return (TRUE);
3614 3613 }
3615 3614 } /* max 1 second */
3616 3615 } while (retries++ < 20); /* max 20 seconds */
3617 3616 return (FALSE);
3618 3617 }
3619 3618
3620 3619
3621 3620 static uint8_t
3622 3621 arcmsr_hbb_wait_msgint_ready(struct ACB *acb)
3623 3622 {
3624 3623 struct HBB_msgUnit *phbbmu;
3625 3624 uint32_t i;
3626 3625 uint8_t retries = 0x00;
3627 3626
3628 3627 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3629 3628
3630 3629 do {
3631 3630 for (i = 0; i < 100; i++) {
3632 3631 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3633 3632 &phbbmu->hbb_doorbell->iop2drv_doorbell) &
3634 3633 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
3635 3634 /* clear interrupt */
3636 3635 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3637 3636 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3638 3637 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3639 3638 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3640 3639 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3641 3640 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3642 3641 return (TRUE);
3643 3642 }
3644 3643 drv_usecwait(10000);
3645 3644 if (ddi_in_panic()) {
3646 3645 /* clear interrupts */
3647 3646 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3648 3647 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3649 3648 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
3650 3649 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3651 3650 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3652 3651 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
3653 3652 return (TRUE);
3654 3653 }
3655 3654 } /* max 1 second */
3656 3655 } while (retries++ < 20); /* max 20 seconds */
3657 3656
3658 3657 return (FALSE);
3659 3658 }
3660 3659
3661 3660
3662 3661 static uint8_t
3663 3662 arcmsr_hbc_wait_msgint_ready(struct ACB *acb)
3664 3663 {
3665 3664 uint32_t i;
3666 3665 uint8_t retries = 0x00;
3667 3666 struct HBC_msgUnit *phbcmu;
3668 3667 uint32_t c = ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR;
3669 3668
3670 3669
3671 3670 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3672 3671
3673 3672 do {
3674 3673 for (i = 0; i < 100; i++) {
3675 3674 if (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3676 3675 &phbcmu->outbound_doorbell) &
3677 3676 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
3678 3677 /* clear interrupt */
3679 3678 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3680 3679 &phbcmu->outbound_doorbell_clear, c);
3681 3680 return (TRUE);
3682 3681 }
3683 3682 drv_usecwait(10000);
3684 3683 if (ddi_in_panic()) {
3685 3684 /* clear interrupts */
3686 3685 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3687 3686 &phbcmu->outbound_doorbell_clear, c);
3688 3687 return (TRUE);
3689 3688 }
3690 3689 } /* max 1 second */
3691 3690 } while (retries++ < 20); /* max 20 seconds */
3692 3691 return (FALSE);
3693 3692 }
3694 3693
3695 3694 static void
3696 3695 arcmsr_flush_hba_cache(struct ACB *acb) {
3697 3696
3698 3697 struct HBA_msgUnit *phbamu;
3699 3698 int retry_count = 30;
3700 3699
3701 3700 /* enlarge wait flush adapter cache time: 10 minutes */
3702 3701
3703 3702 phbamu = (struct HBA_msgUnit *)acb->pmu;
3704 3703
3705 3704 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3706 3705 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3707 3706 do {
3708 3707 if (arcmsr_hba_wait_msgint_ready(acb)) {
3709 3708 break;
3710 3709 } else {
3711 3710 retry_count--;
3712 3711 }
3713 3712 } while (retry_count != 0);
3714 3713 }
3715 3714
3716 3715
3717 3716
3718 3717 static void
3719 3718 arcmsr_flush_hbb_cache(struct ACB *acb) {
3720 3719
3721 3720 struct HBB_msgUnit *phbbmu;
3722 3721 int retry_count = 30;
3723 3722
3724 3723 /* enlarge wait flush adapter cache time: 10 minutes */
3725 3724
3726 3725 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3727 3726 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3728 3727 &phbbmu->hbb_doorbell->drv2iop_doorbell,
3729 3728 ARCMSR_MESSAGE_FLUSH_CACHE);
3730 3729 do {
3731 3730 if (arcmsr_hbb_wait_msgint_ready(acb)) {
3732 3731 break;
3733 3732 } else {
3734 3733 retry_count--;
3735 3734 }
3736 3735 } while (retry_count != 0);
3737 3736 }
3738 3737
3739 3738
3740 3739 static void
3741 3740 arcmsr_flush_hbc_cache(struct ACB *acb)
3742 3741 {
3743 3742 struct HBC_msgUnit *phbcmu;
3744 3743 int retry_count = 30;
3745 3744
3746 3745 /* enlarge wait flush adapter cache time: 10 minutes */
3747 3746
3748 3747 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3749 3748
3750 3749 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3751 3750 ARCMSR_INBOUND_MESG0_FLUSH_CACHE);
3752 3751 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3753 3752 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3754 3753 do {
3755 3754 if (arcmsr_hbc_wait_msgint_ready(acb)) {
3756 3755 break;
3757 3756 } else {
3758 3757 retry_count--;
3759 3758 }
3760 3759 } while (retry_count != 0);
3761 3760 }
3762 3761
3763 3762
3764 3763
3765 3764 static uint8_t
3766 3765 arcmsr_abort_hba_allcmd(struct ACB *acb)
3767 3766 {
3768 3767 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
3769 3768
3770 3769 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->inbound_msgaddr0,
3771 3770 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3772 3771
3773 3772 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3774 3773 arcmsr_warn(acb,
3775 3774 "timeout while waiting for 'abort all "
3776 3775 "outstanding commands'");
3777 3776 return (0xff);
3778 3777 }
3779 3778 return (0x00);
3780 3779 }
3781 3780
3782 3781
3783 3782
3784 3783 static uint8_t
3785 3784 arcmsr_abort_hbb_allcmd(struct ACB *acb)
3786 3785 {
3787 3786 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
3788 3787
3789 3788 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3790 3789 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_ABORT_CMD);
3791 3790
3792 3791 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
3793 3792 arcmsr_warn(acb,
3794 3793 "timeout while waiting for 'abort all "
3795 3794 "outstanding commands'");
3796 3795 return (0x00);
3797 3796 }
3798 3797 return (0x00);
3799 3798 }
3800 3799
3801 3800
3802 3801 static uint8_t
3803 3802 arcmsr_abort_hbc_allcmd(struct ACB *acb)
3804 3803 {
3805 3804 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
3806 3805
3807 3806 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_msgaddr0,
3808 3807 ARCMSR_INBOUND_MESG0_ABORT_CMD);
3809 3808 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbcmu->inbound_doorbell,
3810 3809 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
3811 3810
3812 3811 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
3813 3812 arcmsr_warn(acb,
3814 3813 "timeout while waiting for 'abort all "
3815 3814 "outstanding commands'");
3816 3815 return (0xff);
3817 3816 }
3818 3817 return (0x00);
3819 3818 }
3820 3819
3821 3820
3822 3821 static void
3823 3822 arcmsr_done4abort_postqueue(struct ACB *acb)
3824 3823 {
3825 3824
3826 3825 struct CCB *ccb;
3827 3826 uint32_t flag_ccb;
3828 3827 int i = 0;
3829 3828 boolean_t error;
3830 3829
3831 3830 switch (acb->adapter_type) {
3832 3831 case ACB_ADAPTER_TYPE_A:
3833 3832 {
3834 3833 struct HBA_msgUnit *phbamu;
3835 3834 uint32_t outbound_intstatus;
3836 3835
3837 3836 phbamu = (struct HBA_msgUnit *)acb->pmu;
3838 3837 /* clear and abort all outbound posted Q */
3839 3838 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3840 3839 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
3841 3840 /* clear interrupt */
3842 3841 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3843 3842 &phbamu->outbound_intstatus, outbound_intstatus);
3844 3843 while (((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3845 3844 &phbamu->outbound_queueport)) != 0xFFFFFFFF) &&
3846 3845 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3847 3846 /* frame must be 32 bytes aligned */
3848 3847 /* the CDB is the first field of the CCB */
3849 3848 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
3850 3849 /* check if command done with no error */
3851 3850 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3852 3851 B_TRUE : B_FALSE;
3853 3852 arcmsr_drain_donequeue(acb, ccb, error);
3854 3853 }
3855 3854 break;
3856 3855 }
3857 3856
3858 3857 case ACB_ADAPTER_TYPE_B:
3859 3858 {
3860 3859 struct HBB_msgUnit *phbbmu;
3861 3860
3862 3861 phbbmu = (struct HBB_msgUnit *)acb->pmu;
3863 3862 /* clear all outbound posted Q */
3864 3863 /* clear doorbell interrupt */
3865 3864 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3866 3865 &phbbmu->hbb_doorbell->iop2drv_doorbell,
3867 3866 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
3868 3867 for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
3869 3868 if ((flag_ccb = phbbmu->done_qbuffer[i]) != 0) {
3870 3869 phbbmu->done_qbuffer[i] = 0;
3871 3870 /* frame must be 32 bytes aligned */
3872 3871 ccb = NumToPtr((acb->vir2phy_offset +
3873 3872 (flag_ccb << 5)));
3874 3873 /* check if command done with no error */
3875 3874 error =
3876 3875 (flag_ccb &
3877 3876 ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
3878 3877 B_TRUE : B_FALSE;
3879 3878 arcmsr_drain_donequeue(acb, ccb, error);
3880 3879 }
3881 3880 phbbmu->post_qbuffer[i] = 0;
3882 3881 } /* drain reply FIFO */
3883 3882 phbbmu->doneq_index = 0;
3884 3883 phbbmu->postq_index = 0;
3885 3884 break;
3886 3885 }
3887 3886
3888 3887 case ACB_ADAPTER_TYPE_C:
3889 3888 {
3890 3889 struct HBC_msgUnit *phbcmu;
3891 3890 uint32_t ccb_cdb_phy;
3892 3891
3893 3892 phbcmu = (struct HBC_msgUnit *)acb->pmu;
3894 3893 while ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3895 3894 &phbcmu->host_int_status) &
3896 3895 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) &&
3897 3896 (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
3898 3897 /* need to do */
3899 3898 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
3900 3899 &phbcmu->outbound_queueport_low);
3901 3900 /* frame must be 32 bytes aligned */
3902 3901 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
3903 3902 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
3904 3903 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)?
3905 3904 B_TRUE : B_FALSE;
3906 3905 arcmsr_drain_donequeue(acb, ccb, error);
3907 3906 }
3908 3907 break;
3909 3908 }
3910 3909
3911 3910 }
3912 3911 }
3913 3912 /*
3914 3913 * Routine Description: try to get echo from iop.
3915 3914 * Arguments:
3916 3915 * Return Value: Nothing.
3917 3916 */
3918 3917 static uint8_t
3919 3918 arcmsr_get_echo_from_iop(struct ACB *acb)
3920 3919 {
3921 3920 uint32_t intmask_org;
3922 3921 uint8_t rtnval = 0;
3923 3922
3924 3923 if (acb->adapter_type == ACB_ADAPTER_TYPE_A) {
3925 3924 struct HBA_msgUnit *phbamu;
3926 3925
3927 3926 phbamu = (struct HBA_msgUnit *)acb->pmu;
3928 3927 intmask_org = arcmsr_disable_allintr(acb);
3929 3928 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
3930 3929 &phbamu->inbound_msgaddr0,
3931 3930 ARCMSR_INBOUND_MESG0_GET_CONFIG);
3932 3931 if (!arcmsr_hba_wait_msgint_ready(acb)) {
3933 3932 arcmsr_warn(acb, "try to get echo from iop,"
3934 3933 "... timeout ...");
3935 3934 acb->acb_flags |= ACB_F_BUS_HANG_ON;
3936 3935 rtnval = 0xFF;
3937 3936 }
3938 3937 /* enable all outbound interrupt */
3939 3938 arcmsr_enable_allintr(acb, intmask_org);
3940 3939 }
3941 3940 return (rtnval);
3942 3941 }
3943 3942
3944 3943 /*
3945 3944 * Routine Description: Reset 80331 iop.
3946 3945 * Arguments:
3947 3946 * Return Value: Nothing.
3948 3947 */
3949 3948 static uint8_t
3950 3949 arcmsr_iop_reset(struct ACB *acb)
3951 3950 {
3952 3951 struct CCB *ccb;
3953 3952 uint32_t intmask_org;
3954 3953 uint8_t rtnval = 0;
3955 3954 int i = 0;
3956 3955
3957 3956 if (acb->ccboutstandingcount > 0) {
3958 3957 /* disable all outbound interrupt */
3959 3958 intmask_org = arcmsr_disable_allintr(acb);
3960 3959 /* clear and abort all outbound posted Q */
3961 3960 arcmsr_done4abort_postqueue(acb);
3962 3961 /* talk to iop 331 outstanding command aborted */
3963 3962 rtnval = (acb->acb_flags & ACB_F_BUS_HANG_ON) ?
3964 3963 0xFF : arcmsr_abort_host_command(acb);
3965 3964
3966 3965 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
3967 3966 ccb = acb->pccb_pool[i];
3968 3967 if (ccb->ccb_state == ARCMSR_CCB_START) {
3969 3968 /* ccb->ccb_state = ARCMSR_CCB_RESET; */
3970 3969 ccb->pkt->pkt_reason = CMD_RESET;
3971 3970 ccb->pkt->pkt_statistics |= STAT_BUS_RESET;
3972 3971 arcmsr_ccb_complete(ccb, 1);
3973 3972 }
3974 3973 }
3975 3974 atomic_and_32(&acb->ccboutstandingcount, 0);
3976 3975 /* enable all outbound interrupt */
3977 3976 arcmsr_enable_allintr(acb, intmask_org);
3978 3977 } else {
3979 3978 rtnval = arcmsr_get_echo_from_iop(acb);
3980 3979 }
3981 3980 return (rtnval);
3982 3981 }
3983 3982
3984 3983
3985 3984 static struct QBUFFER *
3986 3985 arcmsr_get_iop_rqbuffer(struct ACB *acb)
3987 3986 {
3988 3987 struct QBUFFER *qb;
3989 3988
3990 3989 switch (acb->adapter_type) {
3991 3990 case ACB_ADAPTER_TYPE_A:
3992 3991 {
3993 3992 struct HBA_msgUnit *phbamu;
3994 3993
3995 3994 phbamu = (struct HBA_msgUnit *)acb->pmu;
3996 3995 qb = (struct QBUFFER *)&phbamu->message_rbuffer;
3997 3996 break;
3998 3997 }
3999 3998
4000 3999 case ACB_ADAPTER_TYPE_B:
4001 4000 {
4002 4001 struct HBB_msgUnit *phbbmu;
4003 4002
4004 4003 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4005 4004 qb = (struct QBUFFER *)&phbbmu->hbb_rwbuffer->message_rbuffer;
4006 4005 break;
4007 4006 }
4008 4007
4009 4008 case ACB_ADAPTER_TYPE_C:
4010 4009 {
4011 4010 struct HBC_msgUnit *phbcmu;
4012 4011
4013 4012 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4014 4013 qb = (struct QBUFFER *)&phbcmu->message_rbuffer;
4015 4014 break;
4016 4015 }
4017 4016
4018 4017 }
4019 4018 return (qb);
4020 4019 }
4021 4020
4022 4021
4023 4022 static struct QBUFFER *
4024 4023 arcmsr_get_iop_wqbuffer(struct ACB *acb)
4025 4024 {
4026 4025 struct QBUFFER *qbuffer = NULL;
4027 4026
4028 4027 switch (acb->adapter_type) {
4029 4028 case ACB_ADAPTER_TYPE_A:
4030 4029 {
4031 4030 struct HBA_msgUnit *phbamu;
4032 4031
4033 4032 phbamu = (struct HBA_msgUnit *)acb->pmu;
4034 4033 qbuffer = (struct QBUFFER *)&phbamu->message_wbuffer;
4035 4034 break;
4036 4035 }
4037 4036
4038 4037 case ACB_ADAPTER_TYPE_B:
4039 4038 {
4040 4039 struct HBB_msgUnit *phbbmu;
4041 4040
4042 4041 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4043 4042 qbuffer = (struct QBUFFER *)
4044 4043 &phbbmu->hbb_rwbuffer->message_wbuffer;
4045 4044 break;
4046 4045 }
4047 4046
4048 4047 case ACB_ADAPTER_TYPE_C:
4049 4048 {
4050 4049 struct HBC_msgUnit *phbcmu;
4051 4050
4052 4051 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4053 4052 qbuffer = (struct QBUFFER *)&phbcmu->message_wbuffer;
4054 4053 break;
4055 4054 }
4056 4055
4057 4056 }
4058 4057 return (qbuffer);
4059 4058 }
4060 4059
4061 4060
4062 4061
4063 4062 static void
4064 4063 arcmsr_iop_message_read(struct ACB *acb)
4065 4064 {
4066 4065 switch (acb->adapter_type) {
4067 4066 case ACB_ADAPTER_TYPE_A:
4068 4067 {
4069 4068 struct HBA_msgUnit *phbamu;
4070 4069
4071 4070 phbamu = (struct HBA_msgUnit *)acb->pmu;
4072 4071 /* let IOP know the data has been read */
4073 4072 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4074 4073 &phbamu->inbound_doorbell,
4075 4074 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
4076 4075 break;
4077 4076 }
4078 4077
4079 4078 case ACB_ADAPTER_TYPE_B:
4080 4079 {
4081 4080 struct HBB_msgUnit *phbbmu;
4082 4081
4083 4082 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4084 4083 /* let IOP know the data has been read */
4085 4084 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4086 4085 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4087 4086 ARCMSR_DRV2IOP_DATA_READ_OK);
4088 4087 break;
4089 4088 }
4090 4089
4091 4090 case ACB_ADAPTER_TYPE_C:
4092 4091 {
4093 4092 struct HBC_msgUnit *phbcmu;
4094 4093
4095 4094 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4096 4095 /* let IOP know data has been read */
4097 4096 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4098 4097 &phbcmu->inbound_doorbell,
4099 4098 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
4100 4099 break;
4101 4100 }
4102 4101
4103 4102 }
4104 4103 }
4105 4104
4106 4105
4107 4106
4108 4107 static void
4109 4108 arcmsr_iop_message_wrote(struct ACB *acb)
4110 4109 {
4111 4110 switch (acb->adapter_type) {
4112 4111 case ACB_ADAPTER_TYPE_A: {
4113 4112 struct HBA_msgUnit *phbamu;
4114 4113
4115 4114 phbamu = (struct HBA_msgUnit *)acb->pmu;
4116 4115 /*
4117 4116 * push inbound doorbell tell iop, driver data write ok
4118 4117 * and wait reply on next hwinterrupt for next Qbuffer post
4119 4118 */
4120 4119 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4121 4120 &phbamu->inbound_doorbell,
4122 4121 ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK);
4123 4122 break;
4124 4123 }
4125 4124
4126 4125 case ACB_ADAPTER_TYPE_B:
4127 4126 {
4128 4127 struct HBB_msgUnit *phbbmu;
4129 4128
4130 4129 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4131 4130 /*
4132 4131 * push inbound doorbell tell iop, driver data was writen
4133 4132 * successfully, then await reply on next hwinterrupt for
4134 4133 * next Qbuffer post
4135 4134 */
4136 4135 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4137 4136 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4138 4137 ARCMSR_DRV2IOP_DATA_WRITE_OK);
4139 4138 break;
4140 4139 }
4141 4140
4142 4141 case ACB_ADAPTER_TYPE_C:
4143 4142 {
4144 4143 struct HBC_msgUnit *phbcmu;
4145 4144
4146 4145 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4147 4146 /*
4148 4147 * push inbound doorbell tell iop, driver data write ok
4149 4148 * and wait reply on next hwinterrupt for next Qbuffer post
4150 4149 */
4151 4150 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4152 4151 &phbcmu->inbound_doorbell,
4153 4152 ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK);
4154 4153 break;
4155 4154 }
4156 4155
4157 4156 }
4158 4157 }
4159 4158
4160 4159
4161 4160
4162 4161 static void
4163 4162 arcmsr_post_ioctldata2iop(struct ACB *acb)
4164 4163 {
4165 4164 uint8_t *pQbuffer;
4166 4165 struct QBUFFER *pwbuffer;
4167 4166 uint8_t *iop_data;
4168 4167 int32_t allxfer_len = 0;
4169 4168
4170 4169 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
4171 4170 iop_data = (uint8_t *)pwbuffer->data;
4172 4171 if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READ) {
4173 4172 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
4174 4173 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
4175 4174 (allxfer_len < 124)) {
4176 4175 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
4177 4176 (void) memcpy(iop_data, pQbuffer, 1);
4178 4177 acb->wqbuf_firstidx++;
4179 4178 /* if last index number set it to 0 */
4180 4179 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4181 4180 iop_data++;
4182 4181 allxfer_len++;
4183 4182 }
4184 4183 pwbuffer->data_len = allxfer_len;
4185 4184 /*
4186 4185 * push inbound doorbell and wait reply at hwinterrupt
4187 4186 * routine for next Qbuffer post
4188 4187 */
4189 4188 arcmsr_iop_message_wrote(acb);
4190 4189 }
4191 4190 }
4192 4191
4193 4192
4194 4193
4195 4194 static void
4196 4195 arcmsr_stop_hba_bgrb(struct ACB *acb)
4197 4196 {
4198 4197 struct HBA_msgUnit *phbamu;
4199 4198
4200 4199 phbamu = (struct HBA_msgUnit *)acb->pmu;
4201 4200
4202 4201 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4203 4202 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4204 4203 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4205 4204 if (!arcmsr_hba_wait_msgint_ready(acb))
4206 4205 arcmsr_warn(acb,
4207 4206 "timeout while waiting for background rebuild completion");
4208 4207 }
4209 4208
4210 4209
4211 4210 static void
4212 4211 arcmsr_stop_hbb_bgrb(struct ACB *acb)
4213 4212 {
4214 4213 struct HBB_msgUnit *phbbmu;
4215 4214
4216 4215 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4217 4216
4218 4217 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4219 4218 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4220 4219 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_STOP_BGRB);
4221 4220
4222 4221 if (!arcmsr_hbb_wait_msgint_ready(acb))
4223 4222 arcmsr_warn(acb,
4224 4223 "timeout while waiting for background rebuild completion");
4225 4224 }
4226 4225
4227 4226
4228 4227 static void
4229 4228 arcmsr_stop_hbc_bgrb(struct ACB *acb)
4230 4229 {
4231 4230 struct HBC_msgUnit *phbcmu;
4232 4231
4233 4232 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4234 4233
4235 4234 acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
4236 4235 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4237 4236 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_STOP_BGRB);
4238 4237 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4239 4238 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4240 4239 if (!arcmsr_hbc_wait_msgint_ready(acb))
4241 4240 arcmsr_warn(acb,
4242 4241 "timeout while waiting for background rebuild completion");
4243 4242 }
4244 4243
4245 4244
4246 4245 static int
4247 4246 arcmsr_iop_message_xfer(struct ACB *acb, struct scsi_pkt *pkt)
4248 4247 {
4249 4248 struct CMD_MESSAGE_FIELD *pcmdmessagefld;
4250 4249 struct CCB *ccb = pkt->pkt_ha_private;
4251 4250 struct buf *bp = ccb->bp;
4252 4251 uint8_t *pQbuffer;
4253 4252 int retvalue = 0, transfer_len = 0;
4254 4253 char *buffer;
4255 4254 uint32_t controlcode;
4256 4255
4257 4256
4258 4257 /* 4 bytes: Areca io control code */
4259 4258 controlcode =
4260 4259 (uint32_t)pkt->pkt_cdbp[5] << 24 |
4261 4260 (uint32_t)pkt->pkt_cdbp[6] << 16 |
4262 4261 (uint32_t)pkt->pkt_cdbp[7] << 8 |
4263 4262 (uint32_t)pkt->pkt_cdbp[8];
4264 4263
4265 4264 if (bp->b_flags & (B_PHYS | B_PAGEIO))
4266 4265 bp_mapin(bp);
4267 4266
4268 4267 buffer = bp->b_un.b_addr;
4269 4268 transfer_len = bp->b_bcount;
4270 4269 if (transfer_len > sizeof (struct CMD_MESSAGE_FIELD)) {
4271 4270 retvalue = ARCMSR_MESSAGE_FAIL;
4272 4271 goto message_out;
4273 4272 }
4274 4273
4275 4274 pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)(intptr_t)buffer;
4276 4275 switch (controlcode) {
4277 4276 case ARCMSR_MESSAGE_READ_RQBUFFER:
4278 4277 {
4279 4278 unsigned long *ver_addr;
4280 4279 uint8_t *ptmpQbuffer;
4281 4280 int32_t allxfer_len = 0;
4282 4281
4283 4282 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4284 4283
4285 4284 ptmpQbuffer = (uint8_t *)ver_addr;
4286 4285 while ((acb->rqbuf_firstidx != acb->rqbuf_lastidx) &&
4287 4286 (allxfer_len < (MSGDATABUFLEN - 1))) {
4288 4287 pQbuffer = &acb->rqbuffer[acb->rqbuf_firstidx];
4289 4288 (void) memcpy(ptmpQbuffer, pQbuffer, 1);
4290 4289 acb->rqbuf_firstidx++;
4291 4290 acb->rqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
4292 4291 ptmpQbuffer++;
4293 4292 allxfer_len++;
4294 4293 }
4295 4294
4296 4295 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4297 4296 struct QBUFFER *prbuffer;
4298 4297 uint8_t *iop_data;
4299 4298 int32_t iop_len;
4300 4299
4301 4300 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4302 4301 prbuffer = arcmsr_get_iop_rqbuffer(acb);
4303 4302 iop_data = (uint8_t *)prbuffer->data;
4304 4303 iop_len = (int32_t)prbuffer->data_len;
4305 4304
4306 4305 while (iop_len > 0) {
4307 4306 pQbuffer = &acb->rqbuffer[acb->rqbuf_lastidx];
4308 4307 (void) memcpy(pQbuffer, iop_data, 1);
4309 4308 acb->rqbuf_lastidx++;
4310 4309 acb->rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
4311 4310 iop_data++;
4312 4311 iop_len--;
4313 4312 }
4314 4313 arcmsr_iop_message_read(acb);
4315 4314 }
4316 4315
4317 4316 (void) memcpy(pcmdmessagefld->messagedatabuffer,
4318 4317 (uint8_t *)ver_addr, allxfer_len);
4319 4318 pcmdmessagefld->cmdmessage.Length = allxfer_len;
4320 4319 pcmdmessagefld->cmdmessage.ReturnCode =
4321 4320 ARCMSR_MESSAGE_RETURNCODE_OK;
4322 4321 kmem_free(ver_addr, MSGDATABUFLEN);
4323 4322 break;
4324 4323 }
4325 4324
4326 4325 case ARCMSR_MESSAGE_WRITE_WQBUFFER:
4327 4326 {
4328 4327 uint8_t *ver_addr;
4329 4328 int32_t my_empty_len, user_len, wqbuf_firstidx,
4330 4329 wqbuf_lastidx;
4331 4330 uint8_t *ptmpuserbuffer;
4332 4331
4333 4332 ver_addr = kmem_zalloc(MSGDATABUFLEN, KM_SLEEP);
4334 4333
4335 4334 ptmpuserbuffer = ver_addr;
4336 4335 user_len = min(pcmdmessagefld->cmdmessage.Length,
4337 4336 MSGDATABUFLEN);
4338 4337 (void) memcpy(ptmpuserbuffer,
4339 4338 pcmdmessagefld->messagedatabuffer, user_len);
4340 4339 wqbuf_lastidx = acb->wqbuf_lastidx;
4341 4340 wqbuf_firstidx = acb->wqbuf_firstidx;
4342 4341 if (wqbuf_lastidx != wqbuf_firstidx) {
4343 4342 struct scsi_arq_status *arq_status;
4344 4343
4345 4344 arcmsr_post_ioctldata2iop(acb);
4346 4345 arq_status = (struct scsi_arq_status *)
4347 4346 (intptr_t)(pkt->pkt_scbp);
4348 4347 bzero((caddr_t)arq_status,
4349 4348 sizeof (struct scsi_arq_status));
4350 4349 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4351 4350 arq_status->sts_rqpkt_state = (STATE_GOT_BUS |
4352 4351 STATE_GOT_TARGET | STATE_SENT_CMD |
4353 4352 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4354 4353
4355 4354 arq_status->sts_rqpkt_statistics =
4356 4355 pkt->pkt_statistics;
4357 4356 arq_status->sts_rqpkt_resid = 0;
4358 4357 if (&arq_status->sts_sensedata != NULL) {
4359 4358 struct scsi_extended_sense *sts_sensedata;
4360 4359
4361 4360 sts_sensedata = &arq_status->sts_sensedata;
4362 4361
4363 4362 /* has error report sensedata */
4364 4363 sts_sensedata->es_code = 0x0;
4365 4364 sts_sensedata->es_valid = 0x01;
4366 4365 sts_sensedata->es_key = KEY_ILLEGAL_REQUEST;
4367 4366 /* AdditionalSenseLength */
4368 4367 sts_sensedata->es_add_len = 0x0A;
4369 4368 /* AdditionalSenseCode */
4370 4369 sts_sensedata->es_add_code = 0x20;
4371 4370 }
4372 4371 retvalue = ARCMSR_MESSAGE_FAIL;
4373 4372 } else {
4374 4373 my_empty_len = (wqbuf_firstidx-wqbuf_lastidx - 1) &
4375 4374 (ARCMSR_MAX_QBUFFER - 1);
4376 4375 if (my_empty_len >= user_len) {
4377 4376 while (user_len > 0) {
4378 4377 pQbuffer = &acb->wqbuffer[
4379 4378 acb->wqbuf_lastidx];
4380 4379 (void) memcpy(pQbuffer,
4381 4380 ptmpuserbuffer, 1);
4382 4381 acb->wqbuf_lastidx++;
4383 4382 acb->wqbuf_lastidx %=
4384 4383 ARCMSR_MAX_QBUFFER;
4385 4384 ptmpuserbuffer++;
4386 4385 user_len--;
4387 4386 }
4388 4387 if (acb->acb_flags &
4389 4388 ACB_F_MESSAGE_WQBUFFER_CLEARED) {
4390 4389 acb->acb_flags &=
4391 4390 ~ACB_F_MESSAGE_WQBUFFER_CLEARED;
4392 4391 arcmsr_post_ioctldata2iop(acb);
4393 4392 }
4394 4393 } else {
4395 4394 struct scsi_arq_status *arq_status;
4396 4395
4397 4396 /* has error report sensedata */
4398 4397 arq_status = (struct scsi_arq_status *)
4399 4398 (intptr_t)(pkt->pkt_scbp);
4400 4399 bzero((caddr_t)arq_status,
4401 4400 sizeof (struct scsi_arq_status));
4402 4401 arq_status->sts_rqpkt_reason = CMD_CMPLT;
4403 4402 arq_status->sts_rqpkt_state =
4404 4403 (STATE_GOT_BUS |
4405 4404 STATE_GOT_TARGET |STATE_SENT_CMD |
4406 4405 STATE_XFERRED_DATA | STATE_GOT_STATUS);
4407 4406 arq_status->sts_rqpkt_statistics =
4408 4407 pkt->pkt_statistics;
4409 4408 arq_status->sts_rqpkt_resid = 0;
4410 4409 if (&arq_status->sts_sensedata != NULL) {
4411 4410 struct scsi_extended_sense *
4412 4411 sts_sensedata;
4413 4412
4414 4413 sts_sensedata =
4415 4414 &arq_status->sts_sensedata;
4416 4415
4417 4416 /* has error report sensedata */
4418 4417 sts_sensedata->es_code = 0x0;
4419 4418 sts_sensedata->es_valid = 0x01;
4420 4419 sts_sensedata->es_key =
4421 4420 KEY_ILLEGAL_REQUEST;
4422 4421 /* AdditionalSenseLength */
4423 4422 sts_sensedata->es_add_len = 0x0A;
4424 4423 /* AdditionalSenseCode */
4425 4424 sts_sensedata->es_add_code = 0x20;
4426 4425 }
4427 4426 retvalue = ARCMSR_MESSAGE_FAIL;
4428 4427 }
4429 4428 }
4430 4429 kmem_free(ver_addr, MSGDATABUFLEN);
4431 4430 break;
4432 4431 }
4433 4432
4434 4433 case ARCMSR_MESSAGE_CLEAR_RQBUFFER:
4435 4434 pQbuffer = acb->rqbuffer;
4436 4435
4437 4436 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4438 4437 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4439 4438 arcmsr_iop_message_read(acb);
4440 4439 }
4441 4440 acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
4442 4441 acb->rqbuf_firstidx = 0;
4443 4442 acb->rqbuf_lastidx = 0;
4444 4443 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4445 4444 pcmdmessagefld->cmdmessage.ReturnCode =
4446 4445 ARCMSR_MESSAGE_RETURNCODE_OK;
4447 4446 break;
4448 4447 case ARCMSR_MESSAGE_CLEAR_WQBUFFER:
4449 4448 pQbuffer = acb->wqbuffer;
4450 4449
4451 4450 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4452 4451 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4453 4452 arcmsr_iop_message_read(acb);
4454 4453 }
4455 4454 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4456 4455 ACB_F_MESSAGE_WQBUFFER_READ);
4457 4456 acb->wqbuf_firstidx = 0;
4458 4457 acb->wqbuf_lastidx = 0;
4459 4458 (void) memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
4460 4459 pcmdmessagefld->cmdmessage.ReturnCode =
4461 4460 ARCMSR_MESSAGE_RETURNCODE_OK;
4462 4461 break;
4463 4462 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER:
4464 4463
4465 4464 if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
4466 4465 acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
4467 4466 arcmsr_iop_message_read(acb);
4468 4467 }
4469 4468 acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
4470 4469 ACB_F_MESSAGE_RQBUFFER_CLEARED |
4471 4470 ACB_F_MESSAGE_WQBUFFER_READ);
4472 4471 acb->rqbuf_firstidx = 0;
4473 4472 acb->rqbuf_lastidx = 0;
4474 4473 acb->wqbuf_firstidx = 0;
4475 4474 acb->wqbuf_lastidx = 0;
4476 4475 pQbuffer = acb->rqbuffer;
4477 4476 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4478 4477 pQbuffer = acb->wqbuffer;
4479 4478 (void) memset(pQbuffer, 0, sizeof (struct QBUFFER));
4480 4479 pcmdmessagefld->cmdmessage.ReturnCode =
4481 4480 ARCMSR_MESSAGE_RETURNCODE_OK;
4482 4481 break;
4483 4482
4484 4483 case ARCMSR_MESSAGE_REQUEST_RETURN_CODE_3F:
4485 4484 pcmdmessagefld->cmdmessage.ReturnCode =
4486 4485 ARCMSR_MESSAGE_RETURNCODE_3F;
4487 4486 break;
4488 4487 /*
4489 4488 * Not supported - ARCMSR_MESSAGE_SAY_HELLO
4490 4489 */
4491 4490 case ARCMSR_MESSAGE_SAY_GOODBYE:
4492 4491 arcmsr_iop_parking(acb);
4493 4492 break;
4494 4493 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
4495 4494 switch (acb->adapter_type) {
4496 4495 case ACB_ADAPTER_TYPE_A:
4497 4496 arcmsr_flush_hba_cache(acb);
4498 4497 break;
4499 4498 case ACB_ADAPTER_TYPE_B:
4500 4499 arcmsr_flush_hbb_cache(acb);
4501 4500 break;
4502 4501 case ACB_ADAPTER_TYPE_C:
4503 4502 arcmsr_flush_hbc_cache(acb);
4504 4503 break;
4505 4504 }
4506 4505 break;
4507 4506 default:
4508 4507 retvalue = ARCMSR_MESSAGE_FAIL;
4509 4508 }
4510 4509
4511 4510 message_out:
4512 4511
4513 4512 return (retvalue);
4514 4513 }
4515 4514
4516 4515
4517 4516
4518 4517
4519 4518 static void
4520 4519 arcmsr_pcidev_disattach(struct ACB *acb)
4521 4520 {
4522 4521 struct CCB *ccb;
4523 4522 int i = 0;
4524 4523
4525 4524 /* disable all outbound interrupts */
4526 4525 (void) arcmsr_disable_allintr(acb);
4527 4526 /* stop adapter background rebuild */
4528 4527 switch (acb->adapter_type) {
4529 4528 case ACB_ADAPTER_TYPE_A:
4530 4529 arcmsr_stop_hba_bgrb(acb);
4531 4530 arcmsr_flush_hba_cache(acb);
4532 4531 break;
4533 4532 case ACB_ADAPTER_TYPE_B:
4534 4533 arcmsr_stop_hbb_bgrb(acb);
4535 4534 arcmsr_flush_hbb_cache(acb);
4536 4535 break;
4537 4536 case ACB_ADAPTER_TYPE_C:
4538 4537 arcmsr_stop_hbc_bgrb(acb);
4539 4538 arcmsr_flush_hbc_cache(acb);
4540 4539 break;
4541 4540 }
4542 4541 /* abort all outstanding commands */
4543 4542 acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
4544 4543 acb->acb_flags &= ~ACB_F_IOP_INITED;
4545 4544
4546 4545 if (acb->ccboutstandingcount != 0) {
4547 4546 /* clear and abort all outbound posted Q */
4548 4547 arcmsr_done4abort_postqueue(acb);
4549 4548 /* talk to iop outstanding command aborted */
4550 4549 (void) arcmsr_abort_host_command(acb);
4551 4550
4552 4551 for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
4553 4552 ccb = acb->pccb_pool[i];
4554 4553 if (ccb->ccb_state == ARCMSR_CCB_START) {
4555 4554 /* ccb->ccb_state = ARCMSR_CCB_ABORTED; */
4556 4555 ccb->pkt->pkt_reason = CMD_ABORTED;
4557 4556 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4558 4557 arcmsr_ccb_complete(ccb, 1);
4559 4558 }
4560 4559 }
4561 4560 }
4562 4561 }
4563 4562
4564 4563 /* get firmware miscellaneous data */
4565 4564 static void
4566 4565 arcmsr_get_hba_config(struct ACB *acb)
4567 4566 {
4568 4567 struct HBA_msgUnit *phbamu;
4569 4568
4570 4569 char *acb_firm_model;
4571 4570 char *acb_firm_version;
4572 4571 char *acb_device_map;
4573 4572 char *iop_firm_model;
4574 4573 char *iop_firm_version;
4575 4574 char *iop_device_map;
4576 4575 int count;
4577 4576
4578 4577 phbamu = (struct HBA_msgUnit *)acb->pmu;
4579 4578 acb_firm_model = acb->firm_model;
4580 4579 acb_firm_version = acb->firm_version;
4581 4580 acb_device_map = acb->device_map;
4582 4581 /* firm_model, 15 */
4583 4582 iop_firm_model =
4584 4583 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4585 4584 /* firm_version, 17 */
4586 4585 iop_firm_version =
4587 4586 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4588 4587
4589 4588 /* device_map, 21 */
4590 4589 iop_device_map =
4591 4590 (char *)(&phbamu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4592 4591
4593 4592 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4594 4593 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4595 4594
4596 4595 if (!arcmsr_hba_wait_msgint_ready(acb))
4597 4596 arcmsr_warn(acb,
4598 4597 "timeout while waiting for adapter firmware "
4599 4598 "miscellaneous data");
4600 4599
4601 4600 count = 8;
4602 4601 while (count) {
4603 4602 *acb_firm_model = CHIP_REG_READ8(acb->reg_mu_acc_handle0,
4604 4603 iop_firm_model);
4605 4604 acb_firm_model++;
4606 4605 iop_firm_model++;
4607 4606 count--;
4608 4607 }
4609 4608
4610 4609 count = 16;
4611 4610 while (count) {
4612 4611 *acb_firm_version =
4613 4612 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4614 4613 acb_firm_version++;
4615 4614 iop_firm_version++;
4616 4615 count--;
4617 4616 }
4618 4617
4619 4618 count = 16;
4620 4619 while (count) {
4621 4620 *acb_device_map =
4622 4621 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4623 4622 acb_device_map++;
4624 4623 iop_device_map++;
4625 4624 count--;
4626 4625 }
4627 4626
4628 4627 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4629 4628 acb->firm_version);
4630 4629
4631 4630 /* firm_request_len, 1 */
4632 4631 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4633 4632 &phbamu->msgcode_rwbuffer[1]);
4634 4633 /* firm_numbers_queue, 2 */
4635 4634 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4636 4635 &phbamu->msgcode_rwbuffer[2]);
4637 4636 /* firm_sdram_size, 3 */
4638 4637 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4639 4638 &phbamu->msgcode_rwbuffer[3]);
4640 4639 /* firm_ide_channels, 4 */
4641 4640 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4642 4641 &phbamu->msgcode_rwbuffer[4]);
4643 4642 }
4644 4643
4645 4644 /* get firmware miscellaneous data */
4646 4645 static void
4647 4646 arcmsr_get_hbb_config(struct ACB *acb)
4648 4647 {
4649 4648 struct HBB_msgUnit *phbbmu;
4650 4649 char *acb_firm_model;
4651 4650 char *acb_firm_version;
4652 4651 char *acb_device_map;
4653 4652 char *iop_firm_model;
4654 4653 char *iop_firm_version;
4655 4654 char *iop_device_map;
4656 4655 int count;
4657 4656
4658 4657 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4659 4658 acb_firm_model = acb->firm_model;
4660 4659 acb_firm_version = acb->firm_version;
4661 4660 acb_device_map = acb->device_map;
4662 4661 /* firm_model, 15 */
4663 4662 iop_firm_model = (char *)
4664 4663 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4665 4664 /* firm_version, 17 */
4666 4665 iop_firm_version = (char *)
4667 4666 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4668 4667 /* device_map, 21 */
4669 4668 iop_device_map = (char *)
4670 4669 (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4671 4670
4672 4671 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4673 4672 &phbbmu->hbb_doorbell->drv2iop_doorbell, ARCMSR_MESSAGE_GET_CONFIG);
4674 4673
4675 4674 if (!arcmsr_hbb_wait_msgint_ready(acb))
4676 4675 arcmsr_warn(acb,
4677 4676 "timeout while waiting for adapter firmware "
4678 4677 "miscellaneous data");
4679 4678
4680 4679 count = 8;
4681 4680 while (count) {
4682 4681 *acb_firm_model =
4683 4682 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_model);
4684 4683 acb_firm_model++;
4685 4684 iop_firm_model++;
4686 4685 count--;
4687 4686 }
4688 4687 count = 16;
4689 4688 while (count) {
4690 4689 *acb_firm_version =
4691 4690 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_firm_version);
4692 4691 acb_firm_version++;
4693 4692 iop_firm_version++;
4694 4693 count--;
4695 4694 }
4696 4695 count = 16;
4697 4696 while (count) {
4698 4697 *acb_device_map =
4699 4698 CHIP_REG_READ8(acb->reg_mu_acc_handle1, iop_device_map);
4700 4699 acb_device_map++;
4701 4700 iop_device_map++;
4702 4701 count--;
4703 4702 }
4704 4703
4705 4704 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4706 4705 acb->firm_version);
4707 4706
4708 4707 /* firm_request_len, 1 */
4709 4708 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4710 4709 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1]);
4711 4710 /* firm_numbers_queue, 2 */
4712 4711 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4713 4712 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2]);
4714 4713 /* firm_sdram_size, 3 */
4715 4714 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4716 4715 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3]);
4717 4716 /* firm_ide_channels, 4 */
4718 4717 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle1,
4719 4718 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4]);
4720 4719 }
4721 4720
4722 4721
4723 4722 /* get firmware miscellaneous data */
4724 4723 static void
4725 4724 arcmsr_get_hbc_config(struct ACB *acb)
4726 4725 {
4727 4726 struct HBC_msgUnit *phbcmu;
4728 4727
4729 4728 char *acb_firm_model;
4730 4729 char *acb_firm_version;
4731 4730 char *acb_device_map;
4732 4731 char *iop_firm_model;
4733 4732 char *iop_firm_version;
4734 4733 char *iop_device_map;
4735 4734 int count;
4736 4735
4737 4736 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4738 4737 acb_firm_model = acb->firm_model;
4739 4738 acb_firm_version = acb->firm_version;
4740 4739 acb_device_map = acb->device_map;
4741 4740 /* firm_model, 15 */
4742 4741 iop_firm_model =
4743 4742 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MODEL_OFFSET]);
4744 4743 /* firm_version, 17 */
4745 4744 iop_firm_version =
4746 4745 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_VERS_OFFSET]);
4747 4746 /* device_map, 21 */
4748 4747 iop_device_map =
4749 4748 (char *)(&phbcmu->msgcode_rwbuffer[ARCMSR_FW_MAP_OFFSET]);
4750 4749 /* post "get config" instruction */
4751 4750 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4752 4751 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_GET_CONFIG);
4753 4752 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4754 4753 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4755 4754 if (!arcmsr_hbc_wait_msgint_ready(acb))
4756 4755 arcmsr_warn(acb,
4757 4756 "timeout while waiting for adapter firmware "
4758 4757 "miscellaneous data");
4759 4758 count = 8;
4760 4759 while (count) {
4761 4760 *acb_firm_model =
4762 4761 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_model);
4763 4762 acb_firm_model++;
4764 4763 iop_firm_model++;
4765 4764 count--;
4766 4765 }
4767 4766
4768 4767 count = 16;
4769 4768 while (count) {
4770 4769 *acb_firm_version =
4771 4770 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_firm_version);
4772 4771 acb_firm_version++;
4773 4772 iop_firm_version++;
4774 4773 count--;
4775 4774 }
4776 4775
4777 4776 count = 16;
4778 4777 while (count) {
4779 4778 *acb_device_map =
4780 4779 CHIP_REG_READ8(acb->reg_mu_acc_handle0, iop_device_map);
4781 4780 acb_device_map++;
4782 4781 iop_device_map++;
4783 4782 count--;
4784 4783 }
4785 4784
4786 4785 arcmsr_log(acb, CE_CONT, "ARECA RAID FIRMWARE VERSION %s\n",
4787 4786 acb->firm_version);
4788 4787
4789 4788 /* firm_request_len, 1, 04-07 */
4790 4789 acb->firm_request_len = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4791 4790 &phbcmu->msgcode_rwbuffer[1]);
4792 4791 /* firm_numbers_queue, 2, 08-11 */
4793 4792 acb->firm_numbers_queue = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4794 4793 &phbcmu->msgcode_rwbuffer[2]);
4795 4794 /* firm_sdram_size, 3, 12-15 */
4796 4795 acb->firm_sdram_size = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4797 4796 &phbcmu->msgcode_rwbuffer[3]);
4798 4797 /* firm_ide_channels, 4, 16-19 */
4799 4798 acb->firm_ide_channels = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4800 4799 &phbcmu->msgcode_rwbuffer[4]);
4801 4800 /* firm_cfg_version, 25, 100-103 */
4802 4801 acb->firm_cfg_version = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4803 4802 &phbcmu->msgcode_rwbuffer[25]);
4804 4803 }
4805 4804
4806 4805
4807 4806 /* start background rebuild */
4808 4807 static void
4809 4808 arcmsr_start_hba_bgrb(struct ACB *acb) {
4810 4809
4811 4810 struct HBA_msgUnit *phbamu;
4812 4811
4813 4812 phbamu = (struct HBA_msgUnit *)acb->pmu;
4814 4813
4815 4814 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4816 4815 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4817 4816 &phbamu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4818 4817
4819 4818 if (!arcmsr_hba_wait_msgint_ready(acb))
4820 4819 arcmsr_warn(acb,
4821 4820 "timeout while waiting for background rebuild to start");
4822 4821 }
4823 4822
4824 4823
4825 4824 static void
4826 4825 arcmsr_start_hbb_bgrb(struct ACB *acb) {
4827 4826
4828 4827 struct HBB_msgUnit *phbbmu;
4829 4828
4830 4829 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4831 4830
4832 4831 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4833 4832 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4834 4833 &phbbmu->hbb_doorbell->drv2iop_doorbell,
4835 4834 ARCMSR_MESSAGE_START_BGRB);
4836 4835
4837 4836 if (!arcmsr_hbb_wait_msgint_ready(acb))
4838 4837 arcmsr_warn(acb,
4839 4838 "timeout while waiting for background rebuild to start");
4840 4839 }
4841 4840
4842 4841
4843 4842 static void
4844 4843 arcmsr_start_hbc_bgrb(struct ACB *acb) {
4845 4844
4846 4845 struct HBC_msgUnit *phbcmu;
4847 4846
4848 4847 phbcmu = (struct HBC_msgUnit *)acb->pmu;
4849 4848
4850 4849 acb->acb_flags |= ACB_F_MSG_START_BGRB;
4851 4850 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4852 4851 &phbcmu->inbound_msgaddr0, ARCMSR_INBOUND_MESG0_START_BGRB);
4853 4852 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4854 4853 &phbcmu->inbound_doorbell, ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
4855 4854 if (!arcmsr_hbc_wait_msgint_ready(acb))
4856 4855 arcmsr_warn(acb,
4857 4856 "timeout while waiting for background rebuild to start");
4858 4857 }
4859 4858
4860 4859 static void
4861 4860 arcmsr_polling_hba_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4862 4861 {
4863 4862 struct HBA_msgUnit *phbamu;
4864 4863 struct CCB *ccb;
4865 4864 boolean_t error;
4866 4865 uint32_t flag_ccb, outbound_intstatus, intmask_org;
4867 4866 boolean_t poll_ccb_done = B_FALSE;
4868 4867 uint32_t poll_count = 0;
4869 4868
4870 4869
4871 4870 phbamu = (struct HBA_msgUnit *)acb->pmu;
4872 4871
4873 4872 polling_ccb_retry:
4874 4873 /* TODO: Use correct offset and size for syncing? */
4875 4874 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4876 4875 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4877 4876 return;
4878 4877 intmask_org = arcmsr_disable_allintr(acb);
4879 4878
4880 4879 for (;;) {
4881 4880 if ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4882 4881 &phbamu->outbound_queueport)) == 0xFFFFFFFF) {
4883 4882 if (poll_ccb_done) {
4884 4883 /* chip FIFO no ccb for completion already */
4885 4884 break;
4886 4885 } else {
4887 4886 drv_usecwait(25000);
4888 4887 if ((poll_count > 100) && (poll_ccb != NULL)) {
4889 4888 break;
4890 4889 }
4891 4890 if (acb->ccboutstandingcount == 0) {
4892 4891 break;
4893 4892 }
4894 4893 poll_count++;
4895 4894 outbound_intstatus =
4896 4895 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
4897 4896 &phbamu->outbound_intstatus) &
4898 4897 acb->outbound_int_enable;
4899 4898
4900 4899 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4901 4900 &phbamu->outbound_intstatus,
4902 4901 outbound_intstatus); /* clear interrupt */
4903 4902 }
4904 4903 }
4905 4904
4906 4905 /* frame must be 32 bytes aligned */
4907 4906 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4908 4907
4909 4908 /* check if command done with no error */
4910 4909 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4911 4910 B_TRUE : B_FALSE;
4912 4911 if (poll_ccb != NULL)
4913 4912 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4914 4913
4915 4914 if (ccb->acb != acb) {
4916 4915 arcmsr_warn(acb, "ccb got a wrong acb!");
4917 4916 continue;
4918 4917 }
4919 4918 if (ccb->ccb_state != ARCMSR_CCB_START) {
4920 4919 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
4921 4920 ccb->ccb_state |= ARCMSR_CCB_BACK;
4922 4921 ccb->pkt->pkt_reason = CMD_ABORTED;
4923 4922 ccb->pkt->pkt_statistics |= STAT_ABORTED;
4924 4923 arcmsr_ccb_complete(ccb, 1);
4925 4924 continue;
4926 4925 }
4927 4926 arcmsr_report_ccb_state(acb, ccb, error);
4928 4927 arcmsr_warn(acb,
4929 4928 "polling op got unexpected ccb command done");
4930 4929 continue;
4931 4930 }
4932 4931 arcmsr_report_ccb_state(acb, ccb, error);
4933 4932 } /* drain reply FIFO */
4934 4933 arcmsr_enable_allintr(acb, intmask_org);
4935 4934 }
4936 4935
4937 4936
4938 4937 static void
4939 4938 arcmsr_polling_hbb_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
4940 4939 {
4941 4940 struct HBB_msgUnit *phbbmu;
4942 4941 struct CCB *ccb;
4943 4942 uint32_t flag_ccb, intmask_org;
4944 4943 boolean_t error;
4945 4944 uint32_t poll_count = 0;
4946 4945 int index;
4947 4946 boolean_t poll_ccb_done = B_FALSE;
4948 4947
4949 4948
4950 4949 phbbmu = (struct HBB_msgUnit *)acb->pmu;
4951 4950
4952 4951
4953 4952 polling_ccb_retry:
4954 4953 /* Use correct offset and size for syncing */
4955 4954 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
4956 4955 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
4957 4956 return;
4958 4957
4959 4958 intmask_org = arcmsr_disable_allintr(acb);
4960 4959
4961 4960 for (;;) {
4962 4961 index = phbbmu->doneq_index;
4963 4962 if ((flag_ccb = phbbmu->done_qbuffer[index]) == 0) {
4964 4963 if (poll_ccb_done) {
4965 4964 /* chip FIFO no ccb for completion already */
4966 4965 break;
4967 4966 } else {
4968 4967 drv_usecwait(25000);
4969 4968 if ((poll_count > 100) && (poll_ccb != NULL))
4970 4969 break;
4971 4970 if (acb->ccboutstandingcount == 0)
4972 4971 break;
4973 4972 poll_count++;
4974 4973 /* clear doorbell interrupt */
4975 4974 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
4976 4975 &phbbmu->hbb_doorbell->iop2drv_doorbell,
4977 4976 ARCMSR_DOORBELL_INT_CLEAR_PATTERN);
4978 4977 }
4979 4978 }
4980 4979
4981 4980 phbbmu->done_qbuffer[index] = 0;
4982 4981 index++;
4983 4982 /* if last index number set it to 0 */
4984 4983 index %= ARCMSR_MAX_HBB_POSTQUEUE;
4985 4984 phbbmu->doneq_index = index;
4986 4985 /* check if command done with no error */
4987 4986 /* frame must be 32 bytes aligned */
4988 4987 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
4989 4988
4990 4989 /* check if command done with no error */
4991 4990 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
4992 4991 B_TRUE : B_FALSE;
4993 4992
4994 4993 if (poll_ccb != NULL)
4995 4994 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
4996 4995 if (ccb->acb != acb) {
4997 4996 arcmsr_warn(acb, "ccb got a wrong acb!");
4998 4997 continue;
4999 4998 }
5000 4999 if (ccb->ccb_state != ARCMSR_CCB_START) {
5001 5000 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5002 5001 ccb->ccb_state |= ARCMSR_CCB_BACK;
5003 5002 ccb->pkt->pkt_reason = CMD_ABORTED;
5004 5003 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5005 5004 arcmsr_ccb_complete(ccb, 1);
5006 5005 continue;
5007 5006 }
5008 5007 arcmsr_report_ccb_state(acb, ccb, error);
5009 5008 arcmsr_warn(acb,
5010 5009 "polling op got unexpect ccb command done");
5011 5010 continue;
5012 5011 }
5013 5012 arcmsr_report_ccb_state(acb, ccb, error);
5014 5013 } /* drain reply FIFO */
5015 5014 arcmsr_enable_allintr(acb, intmask_org);
5016 5015 }
5017 5016
5018 5017
5019 5018 static void
5020 5019 arcmsr_polling_hbc_ccbdone(struct ACB *acb, struct CCB *poll_ccb)
5021 5020 {
5022 5021
5023 5022 struct HBC_msgUnit *phbcmu;
5024 5023 struct CCB *ccb;
5025 5024 boolean_t error;
5026 5025 uint32_t ccb_cdb_phy;
5027 5026 uint32_t flag_ccb, intmask_org;
5028 5027 boolean_t poll_ccb_done = B_FALSE;
5029 5028 uint32_t poll_count = 0;
5030 5029
5031 5030
5032 5031 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5033 5032
5034 5033 polling_ccb_retry:
5035 5034
5036 5035 /* Use correct offset and size for syncing */
5037 5036 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5038 5037 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5039 5038 return;
5040 5039
5041 5040 intmask_org = arcmsr_disable_allintr(acb);
5042 5041
5043 5042 for (;;) {
5044 5043 if (!(CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5045 5044 &phbcmu->host_int_status) &
5046 5045 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)) {
5047 5046
5048 5047 if (poll_ccb_done) {
5049 5048 /* chip FIFO no ccb for completion already */
5050 5049 break;
5051 5050 } else {
5052 5051 drv_usecwait(25000);
5053 5052 if ((poll_count > 100) && (poll_ccb != NULL)) {
5054 5053 break;
5055 5054 }
5056 5055 if (acb->ccboutstandingcount == 0) {
5057 5056 break;
5058 5057 }
5059 5058 poll_count++;
5060 5059 }
5061 5060 }
5062 5061 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5063 5062 &phbcmu->outbound_queueport_low);
5064 5063 /* frame must be 32 bytes aligned */
5065 5064 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5066 5065 /* the CDB is the first field of the CCB */
5067 5066 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5068 5067
5069 5068 /* check if command done with no error */
5070 5069 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5071 5070 B_TRUE : B_FALSE;
5072 5071 if (poll_ccb != NULL)
5073 5072 poll_ccb_done = (ccb == poll_ccb) ? B_TRUE : B_FALSE;
5074 5073
5075 5074 if (ccb->acb != acb) {
5076 5075 arcmsr_warn(acb, "ccb got a wrong acb!");
5077 5076 continue;
5078 5077 }
5079 5078 if (ccb->ccb_state != ARCMSR_CCB_START) {
5080 5079 if (ccb->ccb_state & ARCMSR_ABNORMAL_MASK) {
5081 5080 ccb->ccb_state |= ARCMSR_CCB_BACK;
5082 5081 ccb->pkt->pkt_reason = CMD_ABORTED;
5083 5082 ccb->pkt->pkt_statistics |= STAT_ABORTED;
5084 5083 arcmsr_ccb_complete(ccb, 1);
5085 5084 continue;
5086 5085 }
5087 5086 arcmsr_report_ccb_state(acb, ccb, error);
5088 5087 arcmsr_warn(acb,
5089 5088 "polling op got unexpected ccb command done");
5090 5089 continue;
5091 5090 }
5092 5091 arcmsr_report_ccb_state(acb, ccb, error);
5093 5092 } /* drain reply FIFO */
5094 5093 arcmsr_enable_allintr(acb, intmask_org);
5095 5094 }
5096 5095
5097 5096
5098 5097 /*
5099 5098 * Function: arcmsr_hba_hardware_reset()
5100 5099 * Bug Fix for Intel IOP cause firmware hang on.
5101 5100 * and kernel panic
5102 5101 */
5103 5102 static void
5104 5103 arcmsr_hba_hardware_reset(struct ACB *acb)
5105 5104 {
5106 5105 struct HBA_msgUnit *phbamu;
5107 5106 uint8_t value[64];
5108 5107 int i;
5109 5108
5110 5109 phbamu = (struct HBA_msgUnit *)acb->pmu;
5111 5110 /* backup pci config data */
5112 5111 for (i = 0; i < 64; i++) {
5113 5112 value[i] = pci_config_get8(acb->pci_acc_handle, i);
5114 5113 }
5115 5114 /* hardware reset signal */
5116 5115 if ((PCI_DEVICE_ID_ARECA_1680 ==
5117 5116 pci_config_get16(acb->pci_acc_handle, PCI_CONF_DEVID))) {
5118 5117 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5119 5118 &phbamu->reserved1[0], 0x00000003);
5120 5119 } else {
5121 5120 pci_config_put8(acb->pci_acc_handle, 0x84, 0x20);
5122 5121 }
5123 5122 drv_usecwait(1000000);
5124 5123 /* write back pci config data */
5125 5124 for (i = 0; i < 64; i++) {
5126 5125 pci_config_put8(acb->pci_acc_handle, i, value[i]);
5127 5126 }
5128 5127 drv_usecwait(1000000);
5129 5128 }
5130 5129
5131 5130 /*
5132 5131 * Function: arcmsr_abort_host_command
5133 5132 */
5134 5133 static uint8_t
5135 5134 arcmsr_abort_host_command(struct ACB *acb)
5136 5135 {
5137 5136 uint8_t rtnval = 0;
5138 5137
5139 5138 switch (acb->adapter_type) {
5140 5139 case ACB_ADAPTER_TYPE_A:
5141 5140 rtnval = arcmsr_abort_hba_allcmd(acb);
5142 5141 break;
5143 5142 case ACB_ADAPTER_TYPE_B:
5144 5143 rtnval = arcmsr_abort_hbb_allcmd(acb);
5145 5144 break;
5146 5145 case ACB_ADAPTER_TYPE_C:
5147 5146 rtnval = arcmsr_abort_hbc_allcmd(acb);
5148 5147 break;
5149 5148 }
5150 5149 return (rtnval);
5151 5150 }
5152 5151
5153 5152 /*
5154 5153 * Function: arcmsr_handle_iop_bus_hold
5155 5154 */
5156 5155 static void
5157 5156 arcmsr_handle_iop_bus_hold(struct ACB *acb)
5158 5157 {
5159 5158
5160 5159 switch (acb->adapter_type) {
5161 5160 case ACB_ADAPTER_TYPE_A:
5162 5161 {
5163 5162 struct HBA_msgUnit *phbamu;
5164 5163 int retry_count = 0;
5165 5164
5166 5165 acb->timeout_count = 0;
5167 5166 phbamu = (struct HBA_msgUnit *)acb->pmu;
5168 5167 arcmsr_hba_hardware_reset(acb);
5169 5168 acb->acb_flags &= ~ACB_F_IOP_INITED;
5170 5169 sleep_again:
5171 5170 drv_usecwait(1000000);
5172 5171 if ((CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5173 5172 &phbamu->outbound_msgaddr1) &
5174 5173 ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
5175 5174 if (retry_count > 60) {
5176 5175 arcmsr_warn(acb,
5177 5176 "waiting for hardware"
5178 5177 "bus reset return, RETRY TERMINATED!!");
5179 5178 return;
5180 5179 }
5181 5180 retry_count++;
5182 5181 goto sleep_again;
5183 5182 }
5184 5183 arcmsr_iop_init(acb);
5185 5184 break;
5186 5185 }
5187 5186
5188 5187 }
5189 5188 }
5190 5189
5191 5190 static void
5192 5191 arcmsr_iop2drv_data_wrote_handle(struct ACB *acb) {
5193 5192
5194 5193 struct QBUFFER *prbuffer;
5195 5194 uint8_t *pQbuffer;
5196 5195 uint8_t *iop_data;
5197 5196 int my_empty_len, iop_len;
5198 5197 int rqbuf_firstidx, rqbuf_lastidx;
5199 5198
5200 5199 /* check this iop data if overflow my rqbuffer */
5201 5200 rqbuf_lastidx = acb->rqbuf_lastidx;
5202 5201 rqbuf_firstidx = acb->rqbuf_firstidx;
5203 5202 prbuffer = arcmsr_get_iop_rqbuffer(acb);
5204 5203 iop_data = (uint8_t *)prbuffer->data;
5205 5204 iop_len = prbuffer->data_len;
5206 5205 my_empty_len = (rqbuf_firstidx-rqbuf_lastidx - 1) &
5207 5206 (ARCMSR_MAX_QBUFFER - 1);
5208 5207
5209 5208 if (my_empty_len >= iop_len) {
5210 5209 while (iop_len > 0) {
5211 5210 pQbuffer = &acb->rqbuffer[rqbuf_lastidx];
5212 5211 (void) memcpy(pQbuffer, iop_data, 1);
5213 5212 rqbuf_lastidx++;
5214 5213 /* if last index number set it to 0 */
5215 5214 rqbuf_lastidx %= ARCMSR_MAX_QBUFFER;
5216 5215 iop_data++;
5217 5216 iop_len--;
5218 5217 }
5219 5218 acb->rqbuf_lastidx = rqbuf_lastidx;
5220 5219 arcmsr_iop_message_read(acb);
5221 5220 /* signature, let IOP know data has been read */
5222 5221 } else {
5223 5222 acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
5224 5223 }
5225 5224 }
5226 5225
5227 5226
5228 5227
5229 5228 static void
5230 5229 arcmsr_iop2drv_data_read_handle(struct ACB *acb) {
5231 5230
5232 5231 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READ;
5233 5232 /*
5234 5233 * check if there are any mail packages from user space program
5235 5234 * in my post bag, now is the time to send them into Areca's firmware
5236 5235 */
5237 5236
5238 5237 if (acb->wqbuf_firstidx != acb->wqbuf_lastidx) {
5239 5238
5240 5239 uint8_t *pQbuffer;
5241 5240 struct QBUFFER *pwbuffer;
5242 5241 uint8_t *iop_data;
5243 5242 int allxfer_len = 0;
5244 5243
5245 5244 acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READ);
5246 5245 pwbuffer = arcmsr_get_iop_wqbuffer(acb);
5247 5246 iop_data = (uint8_t *)pwbuffer->data;
5248 5247
5249 5248 while ((acb->wqbuf_firstidx != acb->wqbuf_lastidx) &&
5250 5249 (allxfer_len < 124)) {
5251 5250 pQbuffer = &acb->wqbuffer[acb->wqbuf_firstidx];
5252 5251 (void) memcpy(iop_data, pQbuffer, 1);
5253 5252 acb->wqbuf_firstidx++;
5254 5253 /* if last index number set it to 0 */
5255 5254 acb->wqbuf_firstidx %= ARCMSR_MAX_QBUFFER;
5256 5255 iop_data++;
5257 5256 allxfer_len++;
5258 5257 }
5259 5258 pwbuffer->data_len = allxfer_len;
5260 5259 /*
5261 5260 * push inbound doorbell, tell iop driver data write ok
5262 5261 * await reply on next hwinterrupt for next Qbuffer post
5263 5262 */
5264 5263 arcmsr_iop_message_wrote(acb);
5265 5264 }
5266 5265
5267 5266 if (acb->wqbuf_firstidx == acb->wqbuf_lastidx)
5268 5267 acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
5269 5268 }
5270 5269
5271 5270
5272 5271 static void
5273 5272 arcmsr_hba_doorbell_isr(struct ACB *acb)
5274 5273 {
5275 5274 uint32_t outbound_doorbell;
5276 5275 struct HBA_msgUnit *phbamu;
5277 5276
5278 5277 phbamu = (struct HBA_msgUnit *)acb->pmu;
5279 5278
5280 5279 /*
5281 5280 * Maybe here we need to check wrqbuffer_lock is locked or not
5282 5281 * DOORBELL: ding! dong!
5283 5282 * check if there are any mail need to pack from firmware
5284 5283 */
5285 5284
5286 5285 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5287 5286 &phbamu->outbound_doorbell);
5288 5287 /* clear doorbell interrupt */
5289 5288 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5290 5289 &phbamu->outbound_doorbell, outbound_doorbell);
5291 5290
5292 5291 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
5293 5292 arcmsr_iop2drv_data_wrote_handle(acb);
5294 5293
5295 5294
5296 5295 if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
5297 5296 arcmsr_iop2drv_data_read_handle(acb);
5298 5297 }
5299 5298
5300 5299
5301 5300
5302 5301 static void
5303 5302 arcmsr_hbc_doorbell_isr(struct ACB *acb)
5304 5303 {
5305 5304 uint32_t outbound_doorbell;
5306 5305 struct HBC_msgUnit *phbcmu;
5307 5306
5308 5307 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5309 5308
5310 5309 /*
5311 5310 * Maybe here we need to check wrqbuffer_lock is locked or not
5312 5311 * DOORBELL: ding! dong!
5313 5312 * check if there are any mail need to pick from firmware
5314 5313 */
5315 5314
5316 5315 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5317 5316 &phbcmu->outbound_doorbell);
5318 5317 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5319 5318 &phbcmu->outbound_doorbell_clear,
5320 5319 outbound_doorbell); /* clear interrupt */
5321 5320 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
5322 5321 arcmsr_iop2drv_data_wrote_handle(acb);
5323 5322 }
5324 5323 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
5325 5324 arcmsr_iop2drv_data_read_handle(acb);
5326 5325 }
5327 5326 if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
5328 5327 /* messenger of "driver to iop commands" */
5329 5328 arcmsr_hbc_message_isr(acb);
5330 5329 }
5331 5330 }
5332 5331
5333 5332
5334 5333 static void
5335 5334 arcmsr_hba_message_isr(struct ACB *acb)
5336 5335 {
5337 5336 struct HBA_msgUnit *phbamu = (struct HBA_msgUnit *)acb->pmu;
5338 5337 uint32_t *signature = (&phbamu->msgcode_rwbuffer[0]);
5339 5338 uint32_t outbound_message;
5340 5339
5341 5340 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5342 5341 &phbamu->outbound_intstatus, ARCMSR_MU_OUTBOUND_MESSAGE0_INT);
5343 5342
5344 5343 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5345 5344 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5346 5345 if ((ddi_taskq_dispatch(acb->taskq,
5347 5346 (void (*)(void *))arcmsr_dr_handle,
5348 5347 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5349 5348 arcmsr_warn(acb, "DR task start failed");
5350 5349 }
5351 5350 }
5352 5351
5353 5352 static void
5354 5353 arcmsr_hbb_message_isr(struct ACB *acb)
5355 5354 {
5356 5355 struct HBB_msgUnit *phbbmu = (struct HBB_msgUnit *)acb->pmu;
5357 5356 uint32_t *signature = (&phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0]);
5358 5357 uint32_t outbound_message;
5359 5358
5360 5359 /* clear interrupts */
5361 5360 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5362 5361 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5363 5362 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5364 5363 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5365 5364 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5366 5365 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5367 5366
5368 5367 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5369 5368 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5370 5369 if ((ddi_taskq_dispatch(acb->taskq,
5371 5370 (void (*)(void *))arcmsr_dr_handle,
5372 5371 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5373 5372 arcmsr_warn(acb, "DR task start failed");
5374 5373 }
5375 5374 }
5376 5375
5377 5376 static void
5378 5377 arcmsr_hbc_message_isr(struct ACB *acb)
5379 5378 {
5380 5379 struct HBC_msgUnit *phbcmu = (struct HBC_msgUnit *)acb->pmu;
5381 5380 uint32_t *signature = (&phbcmu->msgcode_rwbuffer[0]);
5382 5381 uint32_t outbound_message;
5383 5382
5384 5383 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5385 5384 &phbcmu->outbound_doorbell_clear,
5386 5385 ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR);
5387 5386
5388 5387 outbound_message = CHIP_REG_READ32(acb->reg_mu_acc_handle0, signature);
5389 5388 if (outbound_message == ARCMSR_SIGNATURE_GET_CONFIG)
5390 5389 if ((ddi_taskq_dispatch(acb->taskq,
5391 5390 (void (*)(void *))arcmsr_dr_handle,
5392 5391 acb, DDI_NOSLEEP)) != DDI_SUCCESS) {
5393 5392 arcmsr_warn(acb, "DR task start failed");
5394 5393 }
5395 5394 }
5396 5395
5397 5396
5398 5397 static void
5399 5398 arcmsr_hba_postqueue_isr(struct ACB *acb)
5400 5399 {
5401 5400
5402 5401 struct HBA_msgUnit *phbamu;
5403 5402 struct CCB *ccb;
5404 5403 uint32_t flag_ccb;
5405 5404 boolean_t error;
5406 5405
5407 5406 phbamu = (struct HBA_msgUnit *)acb->pmu;
5408 5407
5409 5408 /* areca cdb command done */
5410 5409 /* Use correct offset and size for syncing */
5411 5410 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5412 5411 DDI_DMA_SYNC_FORKERNEL);
5413 5412
5414 5413 while ((flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5415 5414 &phbamu->outbound_queueport)) != 0xFFFFFFFF) {
5416 5415 /* frame must be 32 bytes aligned */
5417 5416 ccb = NumToPtr((acb->vir2phy_offset+(flag_ccb << 5)));
5418 5417 /* check if command done with no error */
5419 5418 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5420 5419 B_TRUE : B_FALSE;
5421 5420 arcmsr_drain_donequeue(acb, ccb, error);
5422 5421 } /* drain reply FIFO */
5423 5422 }
5424 5423
5425 5424
5426 5425 static void
5427 5426 arcmsr_hbb_postqueue_isr(struct ACB *acb)
5428 5427 {
5429 5428 struct HBB_msgUnit *phbbmu;
5430 5429 struct CCB *ccb;
5431 5430 uint32_t flag_ccb;
5432 5431 boolean_t error;
5433 5432 int index;
5434 5433
5435 5434 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5436 5435
5437 5436 /* areca cdb command done */
5438 5437 index = phbbmu->doneq_index;
5439 5438 if (ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5440 5439 DDI_DMA_SYNC_FORKERNEL) != DDI_SUCCESS)
5441 5440 return;
5442 5441 while ((flag_ccb = phbbmu->done_qbuffer[index]) != 0) {
5443 5442 phbbmu->done_qbuffer[index] = 0;
5444 5443 /* frame must be 32 bytes aligned */
5445 5444
5446 5445 /* the CDB is the first field of the CCB */
5447 5446 ccb = NumToPtr((acb->vir2phy_offset + (flag_ccb << 5)));
5448 5447
5449 5448 /* check if command done with no error */
5450 5449 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ?
5451 5450 B_TRUE : B_FALSE;
5452 5451 arcmsr_drain_donequeue(acb, ccb, error);
5453 5452 index++;
5454 5453 /* if last index number set it to 0 */
5455 5454 index %= ARCMSR_MAX_HBB_POSTQUEUE;
5456 5455 phbbmu->doneq_index = index;
5457 5456 } /* drain reply FIFO */
5458 5457 }
5459 5458
5460 5459
5461 5460 static void
5462 5461 arcmsr_hbc_postqueue_isr(struct ACB *acb)
5463 5462 {
5464 5463
5465 5464 struct HBC_msgUnit *phbcmu;
5466 5465 struct CCB *ccb;
5467 5466 uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
5468 5467 boolean_t error;
5469 5468
5470 5469 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5471 5470 /* areca cdb command done */
5472 5471 /* Use correct offset and size for syncing */
5473 5472 (void) ddi_dma_sync(acb->ccbs_pool_handle, 0, 0,
5474 5473 DDI_DMA_SYNC_FORKERNEL);
5475 5474
5476 5475 while (CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5477 5476 &phbcmu->host_int_status) &
5478 5477 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5479 5478 /* check if command done with no error */
5480 5479 flag_ccb = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5481 5480 &phbcmu->outbound_queueport_low);
5482 5481 /* frame must be 32 bytes aligned */
5483 5482 ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
5484 5483
5485 5484 /* the CDB is the first field of the CCB */
5486 5485 ccb = NumToPtr((acb->vir2phy_offset + ccb_cdb_phy));
5487 5486
5488 5487 error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
5489 5488 B_TRUE : B_FALSE;
5490 5489 /* check if command done with no error */
5491 5490 arcmsr_drain_donequeue(acb, ccb, error);
5492 5491 if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
5493 5492 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5494 5493 &phbcmu->inbound_doorbell,
5495 5494 ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING);
5496 5495 break;
5497 5496 }
5498 5497 throttling++;
5499 5498 } /* drain reply FIFO */
5500 5499 }
5501 5500
5502 5501
5503 5502 static uint_t
5504 5503 arcmsr_handle_hba_isr(struct ACB *acb) {
5505 5504
5506 5505 uint32_t outbound_intstatus;
5507 5506 struct HBA_msgUnit *phbamu;
5508 5507
5509 5508 phbamu = (struct HBA_msgUnit *)acb->pmu;
5510 5509
5511 5510 outbound_intstatus = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5512 5511 &phbamu->outbound_intstatus) & acb->outbound_int_enable;
5513 5512
5514 5513 if (outbound_intstatus == 0) /* it must be a shared irq */
5515 5514 return (DDI_INTR_UNCLAIMED);
5516 5515
5517 5516 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0, &phbamu->outbound_intstatus,
5518 5517 outbound_intstatus); /* clear interrupt */
5519 5518
5520 5519 /* MU doorbell interrupts */
5521 5520
5522 5521 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
5523 5522 arcmsr_hba_doorbell_isr(acb);
5524 5523
5525 5524 /* MU post queue interrupts */
5526 5525 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
5527 5526 arcmsr_hba_postqueue_isr(acb);
5528 5527
5529 5528 if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) {
5530 5529 arcmsr_hba_message_isr(acb);
5531 5530 }
5532 5531
5533 5532 return (DDI_INTR_CLAIMED);
5534 5533 }
5535 5534
5536 5535
5537 5536 static uint_t
5538 5537 arcmsr_handle_hbb_isr(struct ACB *acb) {
5539 5538
5540 5539 uint32_t outbound_doorbell;
5541 5540 struct HBB_msgUnit *phbbmu;
5542 5541
5543 5542
5544 5543 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5545 5544
5546 5545 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5547 5546 &phbbmu->hbb_doorbell->iop2drv_doorbell) & acb->outbound_int_enable;
5548 5547
5549 5548 if (outbound_doorbell == 0) /* it must be a shared irq */
5550 5549 return (DDI_INTR_UNCLAIMED);
5551 5550
5552 5551 /* clear doorbell interrupt */
5553 5552 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5554 5553 &phbbmu->hbb_doorbell->iop2drv_doorbell, ~outbound_doorbell);
5555 5554 /* wait a cycle */
5556 5555 (void) CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5557 5556 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5558 5557 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5559 5558 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5560 5559 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5561 5560
5562 5561 /* MU ioctl transfer doorbell interrupts */
5563 5562 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
5564 5563 arcmsr_iop2drv_data_wrote_handle(acb);
5565 5564
5566 5565 if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
5567 5566 arcmsr_iop2drv_data_read_handle(acb);
5568 5567
5569 5568 /* MU post queue interrupts */
5570 5569 if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
5571 5570 arcmsr_hbb_postqueue_isr(acb);
5572 5571
5573 5572 /* MU message interrupt */
5574 5573
5575 5574 if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
5576 5575 arcmsr_hbb_message_isr(acb);
5577 5576 }
5578 5577
5579 5578 return (DDI_INTR_CLAIMED);
5580 5579 }
5581 5580
5582 5581 static uint_t
5583 5582 arcmsr_handle_hbc_isr(struct ACB *acb)
5584 5583 {
5585 5584 uint32_t host_interrupt_status;
5586 5585 struct HBC_msgUnit *phbcmu;
5587 5586
5588 5587 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5589 5588 /* check outbound intstatus */
5590 5589 host_interrupt_status=
5591 5590 CHIP_REG_READ32(acb->reg_mu_acc_handle0, &phbcmu->host_int_status);
5592 5591 if (host_interrupt_status == 0) /* it must be share irq */
5593 5592 return (DDI_INTR_UNCLAIMED);
5594 5593 /* MU ioctl transfer doorbell interrupts */
5595 5594 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
5596 5595 /* messenger of "ioctl message read write" */
5597 5596 arcmsr_hbc_doorbell_isr(acb);
5598 5597 }
5599 5598 /* MU post queue interrupts */
5600 5599 if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
5601 5600 /* messenger of "scsi commands" */
5602 5601 arcmsr_hbc_postqueue_isr(acb);
5603 5602 }
5604 5603 return (DDI_INTR_CLAIMED);
5605 5604 }
5606 5605
5607 5606 static uint_t
5608 5607 arcmsr_intr_handler(caddr_t arg, caddr_t arg2)
5609 5608 {
5610 5609 struct ACB *acb = (void *)arg;
5611 5610 struct CCB *ccb;
5612 5611 uint_t retrn = DDI_INTR_UNCLAIMED;
5613 5612 _NOTE(ARGUNUSED(arg2))
5614 5613
5615 5614 mutex_enter(&acb->isr_mutex);
5616 5615 switch (acb->adapter_type) {
5617 5616 case ACB_ADAPTER_TYPE_A:
5618 5617 retrn = arcmsr_handle_hba_isr(acb);
5619 5618 break;
5620 5619
5621 5620 case ACB_ADAPTER_TYPE_B:
5622 5621 retrn = arcmsr_handle_hbb_isr(acb);
5623 5622 break;
5624 5623
5625 5624 case ACB_ADAPTER_TYPE_C:
5626 5625 retrn = arcmsr_handle_hbc_isr(acb);
5627 5626 break;
5628 5627
5629 5628 default:
5630 5629 /* We should never be here */
5631 5630 ASSERT(0);
5632 5631 break;
5633 5632 }
5634 5633 mutex_exit(&acb->isr_mutex);
5635 5634 while ((ccb = arcmsr_get_complete_ccb_from_list(acb)) != NULL) {
5636 5635 arcmsr_ccb_complete(ccb, 1);
5637 5636 }
5638 5637 return (retrn);
5639 5638 }
5640 5639
5641 5640
5642 5641 static void
5643 5642 arcmsr_wait_firmware_ready(struct ACB *acb) {
5644 5643
5645 5644 uint32_t firmware_state;
5646 5645
5647 5646 firmware_state = 0;
5648 5647
5649 5648 switch (acb->adapter_type) {
5650 5649 case ACB_ADAPTER_TYPE_A:
5651 5650 {
5652 5651 struct HBA_msgUnit *phbamu;
5653 5652 phbamu = (struct HBA_msgUnit *)acb->pmu;
5654 5653 do {
5655 5654 firmware_state =
5656 5655 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5657 5656 &phbamu->outbound_msgaddr1);
5658 5657 } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK)
5659 5658 == 0);
5660 5659 break;
5661 5660 }
5662 5661
5663 5662 case ACB_ADAPTER_TYPE_B:
5664 5663 {
5665 5664 struct HBB_msgUnit *phbbmu;
5666 5665 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5667 5666 do {
5668 5667 firmware_state =
5669 5668 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5670 5669 &phbbmu->hbb_doorbell->iop2drv_doorbell);
5671 5670 } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0);
5672 5671 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5673 5672 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5674 5673 ARCMSR_DRV2IOP_END_OF_INTERRUPT);
5675 5674 break;
5676 5675 }
5677 5676
5678 5677 case ACB_ADAPTER_TYPE_C:
5679 5678 {
5680 5679 struct HBC_msgUnit *phbcmu;
5681 5680 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5682 5681 do {
5683 5682 firmware_state =
5684 5683 CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5685 5684 &phbcmu->outbound_msgaddr1);
5686 5685 } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK)
5687 5686 == 0);
5688 5687 break;
5689 5688 }
5690 5689
5691 5690 }
5692 5691 }
5693 5692
5694 5693 static void
5695 5694 arcmsr_clear_doorbell_queue_buffer(struct ACB *acb)
5696 5695 {
5697 5696 switch (acb->adapter_type) {
5698 5697 case ACB_ADAPTER_TYPE_A: {
5699 5698 struct HBA_msgUnit *phbamu;
5700 5699 uint32_t outbound_doorbell;
5701 5700
5702 5701 phbamu = (struct HBA_msgUnit *)acb->pmu;
5703 5702 /* empty doorbell Qbuffer if door bell rung */
5704 5703 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5705 5704 &phbamu->outbound_doorbell);
5706 5705 /* clear doorbell interrupt */
5707 5706 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5708 5707 &phbamu->outbound_doorbell, outbound_doorbell);
5709 5708 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5710 5709 &phbamu->inbound_doorbell,
5711 5710 ARCMSR_INBOUND_DRIVER_DATA_READ_OK);
5712 5711 break;
5713 5712 }
5714 5713
5715 5714 case ACB_ADAPTER_TYPE_B: {
5716 5715 struct HBB_msgUnit *phbbmu;
5717 5716
5718 5717 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5719 5718 /* clear interrupt and message state */
5720 5719 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5721 5720 &phbbmu->hbb_doorbell->iop2drv_doorbell,
5722 5721 ARCMSR_MESSAGE_INT_CLEAR_PATTERN);
5723 5722 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5724 5723 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5725 5724 ARCMSR_DRV2IOP_DATA_READ_OK);
5726 5725 /* let IOP know data has been read */
5727 5726 break;
5728 5727 }
5729 5728
5730 5729 case ACB_ADAPTER_TYPE_C: {
5731 5730 struct HBC_msgUnit *phbcmu;
5732 5731 uint32_t outbound_doorbell;
5733 5732
5734 5733 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5735 5734 /* empty doorbell Qbuffer if door bell ringed */
5736 5735 outbound_doorbell = CHIP_REG_READ32(acb->reg_mu_acc_handle0,
5737 5736 &phbcmu->outbound_doorbell);
5738 5737 /* clear outbound doobell isr */
5739 5738 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5740 5739 &phbcmu->outbound_doorbell_clear, outbound_doorbell);
5741 5740 /* let IOP know data has been read */
5742 5741 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5743 5742 &phbcmu->inbound_doorbell,
5744 5743 ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK);
5745 5744 break;
5746 5745 }
5747 5746
5748 5747 }
5749 5748 }
5750 5749
5751 5750
5752 5751 static uint32_t
5753 5752 arcmsr_iop_confirm(struct ACB *acb) {
5754 5753
5755 5754 uint64_t cdb_phyaddr;
5756 5755 uint32_t cdb_phyaddr_hi32;
5757 5756
5758 5757 /*
5759 5758 * here we need to tell iop 331 about our freeccb.HighPart
5760 5759 * if freeccb.HighPart is non-zero
5761 5760 */
5762 5761 cdb_phyaddr = acb->ccb_cookie.dmac_laddress;
5763 5762 cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16);
5764 5763 acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
5765 5764 switch (acb->adapter_type) {
5766 5765 case ACB_ADAPTER_TYPE_A:
5767 5766 if (cdb_phyaddr_hi32 != 0) {
5768 5767 struct HBA_msgUnit *phbamu;
5769 5768
5770 5769 phbamu = (struct HBA_msgUnit *)acb->pmu;
5771 5770 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5772 5771 &phbamu->msgcode_rwbuffer[0],
5773 5772 ARCMSR_SIGNATURE_SET_CONFIG);
5774 5773 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5775 5774 &phbamu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5776 5775 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5777 5776 &phbamu->inbound_msgaddr0,
5778 5777 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5779 5778 if (!arcmsr_hba_wait_msgint_ready(acb)) {
5780 5779 arcmsr_warn(acb,
5781 5780 "timeout setting ccb "
5782 5781 "high physical address");
5783 5782 return (FALSE);
5784 5783 }
5785 5784 }
5786 5785 break;
5787 5786
5788 5787 /* if adapter is type B, set window of "post command queue" */
5789 5788 case ACB_ADAPTER_TYPE_B: {
5790 5789 uint32_t post_queue_phyaddr;
5791 5790 struct HBB_msgUnit *phbbmu;
5792 5791
5793 5792 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5794 5793 phbbmu->postq_index = 0;
5795 5794 phbbmu->doneq_index = 0;
5796 5795 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5797 5796 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5798 5797 ARCMSR_MESSAGE_SET_POST_WINDOW);
5799 5798
5800 5799 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5801 5800 arcmsr_warn(acb, "timeout setting post command "
5802 5801 "queue window");
5803 5802 return (FALSE);
5804 5803 }
5805 5804
5806 5805 post_queue_phyaddr = (uint32_t)cdb_phyaddr +
5807 5806 ARCMSR_MAX_FREECCB_NUM * P2ROUNDUP(sizeof (struct CCB), 32)
5808 5807 + offsetof(struct HBB_msgUnit, post_qbuffer);
5809 5808 /* driver "set config" signature */
5810 5809 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5811 5810 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[0],
5812 5811 ARCMSR_SIGNATURE_SET_CONFIG);
5813 5812 /* normal should be zero */
5814 5813 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5815 5814 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[1],
5816 5815 cdb_phyaddr_hi32);
5817 5816 /* postQ size (256+8)*4 */
5818 5817 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5819 5818 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[2],
5820 5819 post_queue_phyaddr);
5821 5820 /* doneQ size (256+8)*4 */
5822 5821 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5823 5822 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[3],
5824 5823 post_queue_phyaddr+1056);
5825 5824 /* ccb maxQ size must be --> [(256+8)*4] */
5826 5825 CHIP_REG_WRITE32(acb->reg_mu_acc_handle1,
5827 5826 &phbbmu->hbb_rwbuffer->msgcode_rwbuffer[4], 1056);
5828 5827 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5829 5828 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5830 5829 ARCMSR_MESSAGE_SET_CONFIG);
5831 5830
5832 5831 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5833 5832 arcmsr_warn(acb,
5834 5833 "timeout setting command queue window");
5835 5834 return (FALSE);
5836 5835 }
5837 5836 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5838 5837 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5839 5838 ARCMSR_MESSAGE_START_DRIVER_MODE);
5840 5839
5841 5840 if (!arcmsr_hbb_wait_msgint_ready(acb)) {
5842 5841 arcmsr_warn(acb, "timeout in 'start driver mode'");
5843 5842 return (FALSE);
5844 5843 }
5845 5844 break;
5846 5845 }
5847 5846
5848 5847 case ACB_ADAPTER_TYPE_C:
5849 5848 if (cdb_phyaddr_hi32 != 0) {
5850 5849 struct HBC_msgUnit *phbcmu;
5851 5850
5852 5851 phbcmu = (struct HBC_msgUnit *)acb->pmu;
5853 5852 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5854 5853 &phbcmu->msgcode_rwbuffer[0],
5855 5854 ARCMSR_SIGNATURE_SET_CONFIG);
5856 5855 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5857 5856 &phbcmu->msgcode_rwbuffer[1], cdb_phyaddr_hi32);
5858 5857 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5859 5858 &phbcmu->inbound_msgaddr0,
5860 5859 ARCMSR_INBOUND_MESG0_SET_CONFIG);
5861 5860 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5862 5861 &phbcmu->inbound_doorbell,
5863 5862 ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE);
5864 5863 if (!arcmsr_hbc_wait_msgint_ready(acb)) {
5865 5864 arcmsr_warn(acb, "'set ccb "
5866 5865 "high part physical address' timeout");
5867 5866 return (FALSE);
5868 5867 }
5869 5868 }
5870 5869 break;
5871 5870 }
5872 5871 return (TRUE);
5873 5872 }
5874 5873
5875 5874
5876 5875 /*
5877 5876 * ONLY used for Adapter type B
5878 5877 */
5879 5878 static void
5880 5879 arcmsr_enable_eoi_mode(struct ACB *acb)
5881 5880 {
5882 5881 struct HBB_msgUnit *phbbmu;
5883 5882
5884 5883 phbbmu = (struct HBB_msgUnit *)acb->pmu;
5885 5884
5886 5885 CHIP_REG_WRITE32(acb->reg_mu_acc_handle0,
5887 5886 &phbbmu->hbb_doorbell->drv2iop_doorbell,
5888 5887 ARCMSR_MESSAGE_ACTIVE_EOI_MODE);
5889 5888
5890 5889 if (!arcmsr_hbb_wait_msgint_ready(acb))
5891 5890 arcmsr_warn(acb, "'iop enable eoi mode' timeout");
5892 5891 }
5893 5892
5894 5893 /* start background rebuild */
5895 5894 static void
5896 5895 arcmsr_iop_init(struct ACB *acb)
5897 5896 {
5898 5897 uint32_t intmask_org;
5899 5898
5900 5899 /* disable all outbound interrupt */
5901 5900 intmask_org = arcmsr_disable_allintr(acb);
5902 5901 arcmsr_wait_firmware_ready(acb);
5903 5902 (void) arcmsr_iop_confirm(acb);
5904 5903
5905 5904 /* start background rebuild */
5906 5905 switch (acb->adapter_type) {
5907 5906 case ACB_ADAPTER_TYPE_A:
5908 5907 arcmsr_get_hba_config(acb);
5909 5908 arcmsr_start_hba_bgrb(acb);
5910 5909 break;
5911 5910 case ACB_ADAPTER_TYPE_B:
5912 5911 arcmsr_get_hbb_config(acb);
5913 5912 arcmsr_start_hbb_bgrb(acb);
5914 5913 break;
5915 5914 case ACB_ADAPTER_TYPE_C:
5916 5915 arcmsr_get_hbc_config(acb);
5917 5916 arcmsr_start_hbc_bgrb(acb);
5918 5917 break;
5919 5918 }
5920 5919 /* empty doorbell Qbuffer if door bell rang */
5921 5920 arcmsr_clear_doorbell_queue_buffer(acb);
5922 5921
5923 5922 if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
5924 5923 arcmsr_enable_eoi_mode(acb);
5925 5924
5926 5925 /* enable outbound Post Queue, outbound doorbell Interrupt */
5927 5926 arcmsr_enable_allintr(acb, intmask_org);
5928 5927 acb->acb_flags |= ACB_F_IOP_INITED;
5929 5928 }
↓ open down ↓ |
5676 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX