1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 */
26
27 /*
28 *
29 * nv_sata is a combo SATA HBA driver for CK804/MCP04 (ck804) and
30 * MCP55/MCP51/MCP61 (mcp5x) based chipsets.
31 *
32 * NCQ
33 * ---
34 *
35 * A portion of the NCQ is in place, but is incomplete. NCQ is disabled
36 * and is likely to be revisited in the future.
37 *
38 *
39 * Power Management
40 * ----------------
41 *
42 * Normally power management would be responsible for ensuring the device
43 * is quiescent and then changing power states to the device, such as
44 * powering down parts or all of the device. mcp5x/ck804 is unique in
45 * that it is only available as part of a larger southbridge chipset, so
46 * removing power to the device isn't possible. Switches to control
47 * power management states D0/D3 in the PCI configuration space appear to
48 * be supported but changes to these states are apparently are ignored.
49 * The only further PM that the driver _could_ do is shut down the PHY,
50 * but in order to deliver the first rev of the driver sooner than later,
51 * that will be deferred until some future phase.
52 *
53 * Since the driver currently will not directly change any power state to
54 * the device, no power() entry point will be required. However, it is
55 * possible that in ACPI power state S3, aka suspend to RAM, that power
56 * can be removed to the device, and the driver cannot rely on BIOS to
57 * have reset any state. For the time being, there is no known
58 * non-default configurations that need to be programmed. This judgement
59 * is based on the port of the legacy ata driver not having any such
60 * functionality and based on conversations with the PM team. If such a
61 * restoration is later deemed necessary it can be incorporated into the
62 * DDI_RESUME processing.
63 *
64 */
65
66 #include <sys/scsi/scsi.h>
67 #include <sys/pci.h>
68 #include <sys/byteorder.h>
69 #include <sys/sunddi.h>
70 #include <sys/sata/sata_hba.h>
71 #ifdef SGPIO_SUPPORT
72 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73 #include <sys/devctl.h>
74 #include <sys/sdt.h>
75 #endif
76 #include <sys/sata/adapters/nv_sata/nv_sata.h>
77 #include <sys/disp.h>
78 #include <sys/note.h>
79 #include <sys/promif.h>
80
81
82 /*
83 * Function prototypes for driver entry points
84 */
85 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87 static int nv_quiesce(dev_info_t *dip);
88 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89 void *arg, void **result);
90
91 /*
92 * Function prototypes for entry points from sata service module
93 * These functions are distinguished from other local functions
94 * by the prefix "nv_sata_"
95 */
96 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101
102 /*
103 * Local function prototypes
104 */
105 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108 #ifdef NV_MSI_SUPPORTED
109 static int nv_add_msi_intrs(nv_ctl_t *nvc);
110 #endif
111 static void nv_rem_intrs(nv_ctl_t *nvc);
112 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113 static int nv_start_nodata(nv_port_t *nvp, int slot);
114 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115 static int nv_start_pio_in(nv_port_t *nvp, int slot);
116 static int nv_start_pio_out(nv_port_t *nvp, int slot);
117 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121 static int nv_start_dma(nv_port_t *nvp, int slot);
122 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123 static void nv_uninit_ctl(nv_ctl_t *nvc);
124 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126 static void nv_uninit_port(nv_port_t *nvp);
127 static void nv_init_port(nv_port_t *nvp);
128 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
129 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
130 #ifdef NCQ
131 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
132 #endif
133 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
134 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
135 int state);
136 static void nv_common_reg_init(nv_ctl_t *nvc);
137 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
138 static void nv_reset(nv_port_t *nvp, char *reason);
139 static void nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot);
140 static void nv_timeout(void *);
141 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
142 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
143 static void nv_read_signature(nv_port_t *nvp);
144 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
145 static void ck804_set_intr(nv_port_t *nvp, int flag);
146 static void nv_resume(nv_port_t *nvp);
147 static void nv_suspend(nv_port_t *nvp);
148 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
149 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
150 boolean_t reset);
151 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
152 sata_pkt_t *spkt);
153 static void nv_link_event(nv_port_t *nvp, int flags);
154 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
155 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
156 uchar_t failure_onbits2, uchar_t failure_offbits2,
157 uchar_t failure_onbits3, uchar_t failure_offbits3,
158 uint_t timeout_usec, int type_wait);
159 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
160 uint_t timeout_usec, int type_wait);
161 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
162 static void nv_setup_timeout(nv_port_t *nvp, clock_t microseconds);
163 static clock_t nv_monitor_reset(nv_port_t *nvp);
164 static int nv_bm_status_clear(nv_port_t *nvp);
165 static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
166
167 #ifdef SGPIO_SUPPORT
168 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
169 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
170 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
171 cred_t *credp, int *rvalp);
172
173 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
174 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
175 uint32_t *cbpp);
176 static int nv_sgp_init(nv_ctl_t *nvc);
177 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
178 static int nv_sgp_csr_read(nv_ctl_t *nvc);
179 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
180 static int nv_sgp_write_data(nv_ctl_t *nvc);
181 static void nv_sgp_activity_led_ctl(void *arg);
182 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
183 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
184 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
185 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
186 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
187 static void nv_sgp_cleanup(nv_ctl_t *nvc);
188 #endif
189
190
191 /*
192 * DMA attributes for the data buffer for x86. dma_attr_burstsizes is unused.
193 * Verify if needed if ported to other ISA.
194 */
195 static ddi_dma_attr_t buffer_dma_attr = {
196 DMA_ATTR_V0, /* dma_attr_version */
197 0, /* dma_attr_addr_lo: lowest bus address */
198 0xffffffffull, /* dma_attr_addr_hi: */
199 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max i.e for one cookie */
200 4, /* dma_attr_align */
201 1, /* dma_attr_burstsizes. */
202 1, /* dma_attr_minxfer */
203 0xffffffffull, /* dma_attr_maxxfer including all cookies */
204 0xffffffffull, /* dma_attr_seg */
205 NV_DMA_NSEGS, /* dma_attr_sgllen */
206 512, /* dma_attr_granular */
207 0, /* dma_attr_flags */
208 };
209 static ddi_dma_attr_t buffer_dma_40bit_attr = {
210 DMA_ATTR_V0, /* dma_attr_version */
211 0, /* dma_attr_addr_lo: lowest bus address */
212 0xffffffffffull, /* dma_attr_addr_hi: */
213 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max i.e for one cookie */
214 4, /* dma_attr_align */
215 1, /* dma_attr_burstsizes. */
216 1, /* dma_attr_minxfer */
217 0xffffffffull, /* dma_attr_maxxfer including all cookies */
218 0xffffffffull, /* dma_attr_seg */
219 NV_DMA_NSEGS, /* dma_attr_sgllen */
220 512, /* dma_attr_granular */
221 0, /* dma_attr_flags */
222 };
223
224
225 /*
226 * DMA attributes for PRD tables
227 */
228 ddi_dma_attr_t nv_prd_dma_attr = {
229 DMA_ATTR_V0, /* dma_attr_version */
230 0, /* dma_attr_addr_lo */
231 0xffffffffull, /* dma_attr_addr_hi */
232 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max */
233 4, /* dma_attr_align */
234 1, /* dma_attr_burstsizes */
235 1, /* dma_attr_minxfer */
236 NV_BM_64K_BOUNDARY, /* dma_attr_maxxfer */
237 NV_BM_64K_BOUNDARY - 1, /* dma_attr_seg */
238 1, /* dma_attr_sgllen */
239 1, /* dma_attr_granular */
240 0 /* dma_attr_flags */
241 };
242
243 /*
244 * Device access attributes
245 */
246 static ddi_device_acc_attr_t accattr = {
247 DDI_DEVICE_ATTR_V0,
248 DDI_STRUCTURE_LE_ACC,
249 DDI_STRICTORDER_ACC
250 };
251
252
253 #ifdef SGPIO_SUPPORT
254 static struct cb_ops nv_cb_ops = {
255 nv_open, /* open */
256 nv_close, /* close */
257 nodev, /* strategy (block) */
258 nodev, /* print (block) */
259 nodev, /* dump (block) */
260 nodev, /* read */
261 nodev, /* write */
262 nv_ioctl, /* ioctl */
263 nodev, /* devmap */
264 nodev, /* mmap */
265 nodev, /* segmap */
266 nochpoll, /* chpoll */
267 ddi_prop_op, /* prop_op */
268 NULL, /* streams */
269 D_NEW | D_MP |
270 D_64BIT | D_HOTPLUG, /* flags */
271 CB_REV /* rev */
272 };
273 #endif /* SGPIO_SUPPORT */
274
275
276 static struct dev_ops nv_dev_ops = {
277 DEVO_REV, /* devo_rev */
278 0, /* refcnt */
279 nv_getinfo, /* info */
280 nulldev, /* identify */
281 nulldev, /* probe */
282 nv_attach, /* attach */
283 nv_detach, /* detach */
284 nodev, /* no reset */
285 #ifdef SGPIO_SUPPORT
286 &nv_cb_ops, /* driver operations */
287 #else
288 (struct cb_ops *)0, /* driver operations */
289 #endif
290 NULL, /* bus operations */
291 NULL, /* power */
292 nv_quiesce /* quiesce */
293 };
294
295
296 /*
297 * Request Sense CDB for ATAPI
298 */
299 static const uint8_t nv_rqsense_cdb[16] = {
300 SCMD_REQUEST_SENSE,
301 0,
302 0,
303 0,
304 SATA_ATAPI_MIN_RQSENSE_LEN,
305 0,
306 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* pad out to max CDB length */
307 };
308
309
310 static sata_tran_hotplug_ops_t nv_hotplug_ops;
311
312 extern struct mod_ops mod_driverops;
313
314 static struct modldrv modldrv = {
315 &mod_driverops, /* driverops */
316 "NVIDIA CK804/MCP04/MCP51/MCP55/MCP61 HBA",
317 &nv_dev_ops, /* driver ops */
318 };
319
320 static struct modlinkage modlinkage = {
321 MODREV_1,
322 { &modldrv, NULL }
323 };
324
325 /*
326 * Maximum number of consecutive interrupts processed in the loop in the
327 * single invocation of the port interrupt routine.
328 */
329 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
330
331 /*
332 * wait between checks of reg status
333 */
334 int nv_usec_delay = NV_WAIT_REG_CHECK;
335
336 /*
337 * The following used for nv_vcmn_err() and nv_log()
338 */
339
340 /*
341 * temp buffer to save from wasting limited stack space
342 */
343 static char nv_log_buf[NV_LOGBUF_LEN];
344
345 /*
346 * protects nv_log_buf
347 */
348 static kmutex_t nv_log_mutex;
349
350 /*
351 * these on-by-default flags were chosen so that the driver
352 * logs as much non-usual run-time information as possible
353 * without overflowing the ring with useless information or
354 * causing any significant performance penalty.
355 */
356 int nv_debug_flags =
357 NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
358
359 /*
360 * normally debug information is not logged to the console
361 * but this allows it to be enabled.
362 */
363 int nv_log_to_console = B_FALSE;
364
365 /*
366 * normally debug information is not logged to cmn_err but
367 * in some cases it may be desired.
368 */
369 int nv_log_to_cmn_err = B_FALSE;
370
371 /*
372 * using prom print avoids using cmn_err/syslog and goes right
373 * to the console which may be desirable in some situations, but
374 * it may be synchronous, which would change timings and
375 * impact performance. Use with caution.
376 */
377 int nv_prom_print = B_FALSE;
378
379 /*
380 * Opaque state pointer to be initialized by ddi_soft_state_init()
381 */
382 static void *nv_statep = NULL;
383
384 /*
385 * Map from CBP to shared space
386 *
387 * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
388 * Control Block Pointer as well as the corresponding Control Block) that
389 * is shared across all driver instances associated with that part. The
390 * Control Block is used to update and query the LED state for the devices
391 * on the controllers associated with those instances. There is also some
392 * driver state (called the 'common' area here) associated with each SGPIO
393 * Control Block. The nv_sgp_cpb2cmn is used to map a given CBP to its
394 * control area.
395 *
396 * The driver can also use this mapping array to determine whether the
397 * common area for a given CBP has been initialized, and, if it isn't
398 * initialized, initialize it.
399 *
400 * When a driver instance with a CBP value that is already in the array is
401 * initialized, it will use the pointer to the previously initialized common
402 * area associated with that SGPIO CBP value, rather than initialize it
403 * itself.
404 *
405 * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
406 */
407 #ifdef SGPIO_SUPPORT
408 static kmutex_t nv_sgp_c2c_mutex;
409 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
410 #endif
411
412 /*
413 * control whether 40bit DMA is used or not
414 */
415 int nv_sata_40bit_dma = B_TRUE;
416
417 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
418 SATA_TRAN_HOTPLUG_OPS_REV_1, /* structure version */
419 nv_sata_activate, /* activate port. cfgadm -c connect */
420 nv_sata_deactivate /* deactivate port. cfgadm -c disconnect */
421 };
422
423
424 /*
425 * nv module initialization
426 */
427 int
428 _init(void)
429 {
430 int error;
431 #ifdef SGPIO_SUPPORT
432 int i;
433 #endif
434
435 error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
436
437 if (error != 0) {
438
439 return (error);
440 }
441
442 mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
443 #ifdef SGPIO_SUPPORT
444 mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
445
446 for (i = 0; i < NV_MAX_CBPS; i++) {
447 nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
448 nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
449 }
450 #endif
451
452 if ((error = sata_hba_init(&modlinkage)) != 0) {
453 ddi_soft_state_fini(&nv_statep);
454 mutex_destroy(&nv_log_mutex);
455
456 return (error);
457 }
458
459 error = mod_install(&modlinkage);
460 if (error != 0) {
461 sata_hba_fini(&modlinkage);
462 ddi_soft_state_fini(&nv_statep);
463 mutex_destroy(&nv_log_mutex);
464
465 return (error);
466 }
467
468 return (error);
469 }
470
471
472 /*
473 * nv module uninitialize
474 */
475 int
476 _fini(void)
477 {
478 int error;
479
480 error = mod_remove(&modlinkage);
481
482 if (error != 0) {
483 return (error);
484 }
485
486 /*
487 * remove the resources allocated in _init()
488 */
489 mutex_destroy(&nv_log_mutex);
490 #ifdef SGPIO_SUPPORT
491 mutex_destroy(&nv_sgp_c2c_mutex);
492 #endif
493 sata_hba_fini(&modlinkage);
494 ddi_soft_state_fini(&nv_statep);
495
496 return (error);
497 }
498
499
500 /*
501 * nv _info entry point
502 */
503 int
504 _info(struct modinfo *modinfop)
505 {
506 return (mod_info(&modlinkage, modinfop));
507 }
508
509
510 /*
511 * these wrappers for ddi_{get,put}8 are for observability
512 * with dtrace
513 */
514 #ifdef DEBUG
515
516 static void
517 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
518 {
519 ddi_put8(handle, dev_addr, value);
520 }
521
522 static void
523 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
524 {
525 ddi_put32(handle, dev_addr, value);
526 }
527
528 static uint32_t
529 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
530 {
531 return (ddi_get32(handle, dev_addr));
532 }
533
534 static void
535 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
536 {
537 ddi_put16(handle, dev_addr, value);
538 }
539
540 static uint16_t
541 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
542 {
543 return (ddi_get16(handle, dev_addr));
544 }
545
546 static uint8_t
547 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
548 {
549 return (ddi_get8(handle, dev_addr));
550 }
551
552 #else
553
554 #define nv_put8 ddi_put8
555 #define nv_put32 ddi_put32
556 #define nv_get32 ddi_get32
557 #define nv_put16 ddi_put16
558 #define nv_get16 ddi_get16
559 #define nv_get8 ddi_get8
560
561 #endif
562
563
564 /*
565 * Driver attach
566 */
567 static int
568 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
569 {
570 int status, attach_state, intr_types, bar, i, j, command;
571 int inst = ddi_get_instance(dip);
572 ddi_acc_handle_t pci_conf_handle;
573 nv_ctl_t *nvc;
574 uint8_t subclass;
575 uint32_t reg32;
576 #ifdef SGPIO_SUPPORT
577 pci_regspec_t *regs;
578 int rlen;
579 #endif
580
581 switch (cmd) {
582
583 case DDI_ATTACH:
584
585 attach_state = ATTACH_PROGRESS_NONE;
586
587 status = ddi_soft_state_zalloc(nv_statep, inst);
588
589 if (status != DDI_SUCCESS) {
590 break;
591 }
592
593 nvc = ddi_get_soft_state(nv_statep, inst);
594
595 nvc->nvc_dip = dip;
596
597 NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
598
599 attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
600
601 if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
602 nvc->nvc_devid = pci_config_get16(pci_conf_handle,
603 PCI_CONF_DEVID);
604 nvc->nvc_revid = pci_config_get8(pci_conf_handle,
605 PCI_CONF_REVID);
606 NVLOG(NVDBG_INIT, nvc, NULL,
607 "inst %d: devid is %x silicon revid is %x"
608 " nv_debug_flags=%x", inst, nvc->nvc_devid,
609 nvc->nvc_revid, nv_debug_flags);
610 } else {
611 break;
612 }
613
614 attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
615
616 /*
617 * Set the PCI command register: enable IO/MEM/Master.
618 */
619 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
620 pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
621 command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
622
623 subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
624
625 if (subclass & PCI_MASS_RAID) {
626 cmn_err(CE_WARN,
627 "attach failed: RAID mode not supported");
628
629 break;
630 }
631
632 /*
633 * the 6 bars of the controller are:
634 * 0: port 0 task file
635 * 1: port 0 status
636 * 2: port 1 task file
637 * 3: port 1 status
638 * 4: bus master for both ports
639 * 5: extended registers for SATA features
640 */
641 for (bar = 0; bar < 6; bar++) {
642 status = ddi_regs_map_setup(dip, bar + 1,
643 (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
644 &nvc->nvc_bar_hdl[bar]);
645
646 if (status != DDI_SUCCESS) {
647 NVLOG(NVDBG_INIT, nvc, NULL,
648 "ddi_regs_map_setup failure for bar"
649 " %d status = %d", bar, status);
650 break;
651 }
652 }
653
654 attach_state |= ATTACH_PROGRESS_BARS;
655
656 /*
657 * initialize controller structures
658 */
659 status = nv_init_ctl(nvc, pci_conf_handle);
660
661 if (status == NV_FAILURE) {
662 NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
663 NULL);
664
665 break;
666 }
667
668 attach_state |= ATTACH_PROGRESS_CTL_SETUP;
669
670 /*
671 * initialize mutexes
672 */
673 mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
674 DDI_INTR_PRI(nvc->nvc_intr_pri));
675
676 attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
677
678 /*
679 * get supported interrupt types
680 */
681 if (ddi_intr_get_supported_types(dip, &intr_types) !=
682 DDI_SUCCESS) {
683 nv_cmn_err(CE_WARN, nvc, NULL,
684 "ddi_intr_get_supported_types failed");
685
686 break;
687 }
688
689 NVLOG(NVDBG_INIT, nvc, NULL,
690 "ddi_intr_get_supported_types() returned: 0x%x",
691 intr_types);
692
693 #ifdef NV_MSI_SUPPORTED
694 if (intr_types & DDI_INTR_TYPE_MSI) {
695 NVLOG(NVDBG_INIT, nvc, NULL,
696 "using MSI interrupt type", NULL);
697
698 /*
699 * Try MSI first, but fall back to legacy if MSI
700 * attach fails
701 */
702 if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
703 nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
704 attach_state |= ATTACH_PROGRESS_INTR_ADDED;
705 NVLOG(NVDBG_INIT, nvc, NULL,
706 "MSI interrupt setup done", NULL);
707 } else {
708 nv_cmn_err(CE_CONT, nvc, NULL,
709 "MSI registration failed "
710 "will try Legacy interrupts");
711 }
712 }
713 #endif
714
715 /*
716 * Either the MSI interrupt setup has failed or only
717 * the fixed interrupts are available on the system.
718 */
719 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
720 (intr_types & DDI_INTR_TYPE_FIXED)) {
721
722 NVLOG(NVDBG_INIT, nvc, NULL,
723 "using Legacy interrupt type", NULL);
724
725 if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
726 nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
727 attach_state |= ATTACH_PROGRESS_INTR_ADDED;
728 NVLOG(NVDBG_INIT, nvc, NULL,
729 "Legacy interrupt setup done", NULL);
730 } else {
731 nv_cmn_err(CE_WARN, nvc, NULL,
732 "legacy interrupt setup failed");
733 NVLOG(NVDBG_INIT, nvc, NULL,
734 "legacy interrupt setup failed", NULL);
735 break;
736 }
737 }
738
739 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
740 NVLOG(NVDBG_INIT, nvc, NULL,
741 "no interrupts registered", NULL);
742 break;
743 }
744
745 #ifdef SGPIO_SUPPORT
746 /*
747 * save off the controller number
748 */
749 (void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
750 "reg", (caddr_t)®s, &rlen);
751 nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
752 kmem_free(regs, rlen);
753
754 /*
755 * initialize SGPIO
756 */
757 nv_sgp_led_init(nvc, pci_conf_handle);
758 #endif /* SGPIO_SUPPORT */
759
760 /*
761 * Do initial reset so that signature can be gathered
762 */
763 for (j = 0; j < NV_NUM_PORTS; j++) {
764 ddi_acc_handle_t bar5_hdl;
765 uint32_t sstatus;
766 nv_port_t *nvp;
767
768 nvp = &(nvc->nvc_port[j]);
769 bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
770 sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
771
772 if (SSTATUS_GET_DET(sstatus) ==
773 SSTATUS_DET_DEVPRE_PHYCOM) {
774
775 nvp->nvp_state |= NV_ATTACH;
776 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
777 mutex_enter(&nvp->nvp_mutex);
778 nv_reset(nvp, "attach");
779
780 while (nvp->nvp_state & NV_RESET) {
781 cv_wait(&nvp->nvp_reset_cv,
782 &nvp->nvp_mutex);
783 }
784
785 mutex_exit(&nvp->nvp_mutex);
786 }
787 }
788
789 /*
790 * attach to sata module
791 */
792 if (sata_hba_attach(nvc->nvc_dip,
793 &nvc->nvc_sata_hba_tran,
794 DDI_ATTACH) != DDI_SUCCESS) {
795 attach_state |= ATTACH_PROGRESS_SATA_MODULE;
796
797 break;
798 }
799
800 pci_config_teardown(&pci_conf_handle);
801
802 NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
803
804 return (DDI_SUCCESS);
805
806 case DDI_RESUME:
807
808 nvc = ddi_get_soft_state(nv_statep, inst);
809
810 NVLOG(NVDBG_INIT, nvc, NULL,
811 "nv_attach(): DDI_RESUME inst %d", inst);
812
813 if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
814 return (DDI_FAILURE);
815 }
816
817 /*
818 * Set the PCI command register: enable IO/MEM/Master.
819 */
820 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
821 pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
822 command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
823
824 /*
825 * Need to set bit 2 to 1 at config offset 0x50
826 * to enable access to the bar5 registers.
827 */
828 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
829
830 if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
831 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
832 reg32 | NV_BAR5_SPACE_EN);
833 }
834
835 nvc->nvc_state &= ~NV_CTRL_SUSPEND;
836
837 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
838 nv_resume(&(nvc->nvc_port[i]));
839 }
840
841 pci_config_teardown(&pci_conf_handle);
842
843 return (DDI_SUCCESS);
844
845 default:
846 return (DDI_FAILURE);
847 }
848
849
850 /*
851 * DDI_ATTACH failure path starts here
852 */
853
854 if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
855 nv_rem_intrs(nvc);
856 }
857
858 if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
859 /*
860 * Remove timers
861 */
862 int port = 0;
863 nv_port_t *nvp;
864
865 for (; port < NV_MAX_PORTS(nvc); port++) {
866 nvp = &(nvc->nvc_port[port]);
867 if (nvp->nvp_timeout_id != 0) {
868 (void) untimeout(nvp->nvp_timeout_id);
869 }
870 }
871 }
872
873 if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
874 mutex_destroy(&nvc->nvc_mutex);
875 }
876
877 if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
878 nv_uninit_ctl(nvc);
879 }
880
881 if (attach_state & ATTACH_PROGRESS_BARS) {
882 while (--bar >= 0) {
883 ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
884 }
885 }
886
887 if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
888 ddi_soft_state_free(nv_statep, inst);
889 }
890
891 if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
892 pci_config_teardown(&pci_conf_handle);
893 }
894
895 cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
896
897 return (DDI_FAILURE);
898 }
899
900
901 static int
902 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
903 {
904 int i, port, inst = ddi_get_instance(dip);
905 nv_ctl_t *nvc;
906 nv_port_t *nvp;
907
908 nvc = ddi_get_soft_state(nv_statep, inst);
909
910 switch (cmd) {
911
912 case DDI_DETACH:
913
914 NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
915
916 /*
917 * Remove interrupts
918 */
919 nv_rem_intrs(nvc);
920
921 /*
922 * Remove timers
923 */
924 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
925 nvp = &(nvc->nvc_port[port]);
926 if (nvp->nvp_timeout_id != 0) {
927 (void) untimeout(nvp->nvp_timeout_id);
928 }
929 }
930
931 /*
932 * Remove maps
933 */
934 for (i = 0; i < 6; i++) {
935 ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
936 }
937
938 /*
939 * Destroy mutexes
940 */
941 mutex_destroy(&nvc->nvc_mutex);
942
943 /*
944 * Uninitialize the controller structures
945 */
946 nv_uninit_ctl(nvc);
947
948 #ifdef SGPIO_SUPPORT
949 /*
950 * release SGPIO resources
951 */
952 nv_sgp_cleanup(nvc);
953 #endif
954
955 /*
956 * unregister from the sata module
957 */
958 (void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
959
960 /*
961 * Free soft state
962 */
963 ddi_soft_state_free(nv_statep, inst);
964
965 return (DDI_SUCCESS);
966
967 case DDI_SUSPEND:
968
969 NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
970
971 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
972 nv_suspend(&(nvc->nvc_port[i]));
973 }
974
975 nvc->nvc_state |= NV_CTRL_SUSPEND;
976
977 return (DDI_SUCCESS);
978
979 default:
980 return (DDI_FAILURE);
981 }
982 }
983
984
985 /*ARGSUSED*/
986 static int
987 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
988 {
989 nv_ctl_t *nvc;
990 int instance;
991 dev_t dev;
992
993 dev = (dev_t)arg;
994 instance = getminor(dev);
995
996 switch (infocmd) {
997 case DDI_INFO_DEVT2DEVINFO:
998 nvc = ddi_get_soft_state(nv_statep, instance);
999 if (nvc != NULL) {
1000 *result = nvc->nvc_dip;
1001 return (DDI_SUCCESS);
1002 } else {
1003 *result = NULL;
1004 return (DDI_FAILURE);
1005 }
1006 case DDI_INFO_DEVT2INSTANCE:
1007 *(int *)result = instance;
1008 break;
1009 default:
1010 break;
1011 }
1012 return (DDI_SUCCESS);
1013 }
1014
1015
1016 #ifdef SGPIO_SUPPORT
1017 /* ARGSUSED */
1018 static int
1019 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1020 {
1021 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1022
1023 if (nvc == NULL) {
1024 return (ENXIO);
1025 }
1026
1027 return (0);
1028 }
1029
1030
1031 /* ARGSUSED */
1032 static int
1033 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1034 {
1035 return (0);
1036 }
1037
1038
1039 /* ARGSUSED */
1040 static int
1041 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1042 {
1043 nv_ctl_t *nvc;
1044 int inst;
1045 int status;
1046 int ctlr, port;
1047 int drive;
1048 uint8_t curr_led;
1049 struct dc_led_ctl led;
1050
1051 inst = getminor(dev);
1052 if (inst == -1) {
1053 return (EBADF);
1054 }
1055
1056 nvc = ddi_get_soft_state(nv_statep, inst);
1057 if (nvc == NULL) {
1058 return (EBADF);
1059 }
1060
1061 if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1062 return (EIO);
1063 }
1064
1065 switch (cmd) {
1066 case DEVCTL_SET_LED:
1067 status = ddi_copyin((void *)arg, &led,
1068 sizeof (struct dc_led_ctl), mode);
1069 if (status != 0)
1070 return (EFAULT);
1071
1072 /*
1073 * Since only the first two controller currently support
1074 * SGPIO (as per NVIDIA docs), this code will as well.
1075 * Note that this validate the port value within led_state
1076 * as well.
1077 */
1078
1079 ctlr = SGP_DRV_TO_CTLR(led.led_number);
1080 if ((ctlr != 0) && (ctlr != 1))
1081 return (ENXIO);
1082
1083 if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1084 (led.led_state & DCL_STATE_SLOW_BLNK)) {
1085 return (EINVAL);
1086 }
1087
1088 drive = led.led_number;
1089
1090 if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1091 (led.led_state == DCL_STATE_OFF)) {
1092
1093 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1094 nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1095 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1096 nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1097 } else {
1098 return (ENXIO);
1099 }
1100
1101 port = SGP_DRV_TO_PORT(led.led_number);
1102 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1103 }
1104
1105 if (led.led_ctl_active == DCL_CNTRL_ON) {
1106 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1107 nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1108 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1109 nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1110 } else {
1111 return (ENXIO);
1112 }
1113
1114 port = SGP_DRV_TO_PORT(led.led_number);
1115 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1116 }
1117
1118 break;
1119
1120 case DEVCTL_GET_LED:
1121 status = ddi_copyin((void *)arg, &led,
1122 sizeof (struct dc_led_ctl), mode);
1123 if (status != 0)
1124 return (EFAULT);
1125
1126 /*
1127 * Since only the first two controller currently support
1128 * SGPIO (as per NVIDIA docs), this code will as well.
1129 * Note that this validate the port value within led_state
1130 * as well.
1131 */
1132
1133 ctlr = SGP_DRV_TO_CTLR(led.led_number);
1134 if ((ctlr != 0) && (ctlr != 1))
1135 return (ENXIO);
1136
1137 curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1138 led.led_number);
1139
1140 port = SGP_DRV_TO_PORT(led.led_number);
1141 if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1142 led.led_ctl_active = DCL_CNTRL_ON;
1143
1144 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1145 if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1146 led.led_state = DCL_STATE_OFF;
1147 else
1148 led.led_state = DCL_STATE_ON;
1149 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1150 if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1151 led.led_state = DCL_STATE_OFF;
1152 else
1153 led.led_state = DCL_STATE_ON;
1154 } else {
1155 return (ENXIO);
1156 }
1157 } else {
1158 led.led_ctl_active = DCL_CNTRL_OFF;
1159 /*
1160 * Not really off, but never set and no constant for
1161 * tri-state
1162 */
1163 led.led_state = DCL_STATE_OFF;
1164 }
1165
1166 status = ddi_copyout(&led, (void *)arg,
1167 sizeof (struct dc_led_ctl), mode);
1168 if (status != 0)
1169 return (EFAULT);
1170
1171 break;
1172
1173 case DEVCTL_NUM_LEDS:
1174 led.led_number = SGPIO_DRV_CNT_VALUE;
1175 led.led_ctl_active = 1;
1176 led.led_type = 3;
1177
1178 /*
1179 * According to documentation, NVIDIA SGPIO is supposed to
1180 * support blinking, but it does not seem to work in practice.
1181 */
1182 led.led_state = DCL_STATE_ON;
1183
1184 status = ddi_copyout(&led, (void *)arg,
1185 sizeof (struct dc_led_ctl), mode);
1186 if (status != 0)
1187 return (EFAULT);
1188
1189 break;
1190
1191 default:
1192 return (EINVAL);
1193 }
1194
1195 return (0);
1196 }
1197 #endif /* SGPIO_SUPPORT */
1198
1199
1200 /*
1201 * Called by sata module to probe a port. Port and device state
1202 * are not changed here... only reported back to the sata module.
1203 *
1204 */
1205 static int
1206 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1207 {
1208 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1209 uint8_t cport = sd->satadev_addr.cport;
1210 uint8_t pmport = sd->satadev_addr.pmport;
1211 uint8_t qual = sd->satadev_addr.qual;
1212 uint8_t det;
1213
1214 nv_port_t *nvp;
1215
1216 if (cport >= NV_MAX_PORTS(nvc)) {
1217 sd->satadev_type = SATA_DTYPE_NONE;
1218 sd->satadev_state = SATA_STATE_UNKNOWN;
1219
1220 return (SATA_FAILURE);
1221 }
1222
1223 ASSERT(nvc->nvc_port != NULL);
1224 nvp = &(nvc->nvc_port[cport]);
1225 ASSERT(nvp != NULL);
1226
1227 NVLOG(NVDBG_ENTRY, nvc, nvp,
1228 "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1229 "qual: 0x%x", cport, pmport, qual);
1230
1231 mutex_enter(&nvp->nvp_mutex);
1232
1233 /*
1234 * This check seems to be done in the SATA module.
1235 * It may not be required here
1236 */
1237 if (nvp->nvp_state & NV_DEACTIVATED) {
1238 nv_cmn_err(CE_WARN, nvc, nvp,
1239 "port inactive. Use cfgadm to activate");
1240 sd->satadev_type = SATA_DTYPE_UNKNOWN;
1241 sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1242 mutex_exit(&nvp->nvp_mutex);
1243
1244 return (SATA_SUCCESS);
1245 }
1246
1247 if (nvp->nvp_state & NV_FAILED) {
1248 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1249 "probe: port failed", NULL);
1250 sd->satadev_type = nvp->nvp_type;
1251 sd->satadev_state = SATA_PSTATE_FAILED;
1252 mutex_exit(&nvp->nvp_mutex);
1253
1254 return (SATA_SUCCESS);
1255 }
1256
1257 if (qual == SATA_ADDR_PMPORT) {
1258 sd->satadev_type = SATA_DTYPE_NONE;
1259 sd->satadev_state = SATA_STATE_UNKNOWN;
1260 mutex_exit(&nvp->nvp_mutex);
1261 nv_cmn_err(CE_WARN, nvc, nvp,
1262 "controller does not support port multiplier");
1263
1264 return (SATA_SUCCESS);
1265 }
1266
1267 sd->satadev_state = SATA_PSTATE_PWRON;
1268
1269 nv_copy_registers(nvp, sd, NULL);
1270
1271 if (nvp->nvp_state & (NV_RESET|NV_LINK_EVENT)) {
1272 /*
1273 * during a reset or link event, fake the status
1274 * as it may be changing as a result of the reset
1275 * or link event.
1276 */
1277 DTRACE_PROBE(state_reset_link_event_faking_status_p);
1278 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
1279
1280 SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1281 SSTATUS_IPM_ACTIVE);
1282 SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1283 SSTATUS_DET_DEVPRE_PHYCOM);
1284 sd->satadev_type = nvp->nvp_type;
1285 mutex_exit(&nvp->nvp_mutex);
1286
1287 return (SATA_SUCCESS);
1288 }
1289
1290 det = SSTATUS_GET_DET(sd->satadev_scr.sstatus);
1291
1292 /*
1293 * determine link status
1294 */
1295 if (det != SSTATUS_DET_DEVPRE_PHYCOM) {
1296 switch (det) {
1297
1298 case SSTATUS_DET_NODEV:
1299 case SSTATUS_DET_PHYOFFLINE:
1300 sd->satadev_type = SATA_DTYPE_NONE;
1301 break;
1302
1303 default:
1304 sd->satadev_type = SATA_DTYPE_UNKNOWN;
1305 break;
1306 }
1307
1308 mutex_exit(&nvp->nvp_mutex);
1309
1310 return (SATA_SUCCESS);
1311 }
1312
1313 /*
1314 * Just report the current port state
1315 */
1316 sd->satadev_type = nvp->nvp_type;
1317 DTRACE_PROBE1(nvp_type_h, int, nvp->nvp_type);
1318
1319 mutex_exit(&nvp->nvp_mutex);
1320
1321 return (SATA_SUCCESS);
1322 }
1323
1324
1325 /*
1326 * Called by sata module to start a new command.
1327 */
1328 static int
1329 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1330 {
1331 int cport = spkt->satapkt_device.satadev_addr.cport;
1332 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1333 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1334 int ret;
1335
1336 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1337 spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1338
1339 mutex_enter(&nvp->nvp_mutex);
1340
1341 if (nvp->nvp_state & NV_DEACTIVATED) {
1342
1343 NVLOG(NVDBG_ERRS, nvc, nvp,
1344 "nv_sata_start: NV_DEACTIVATED", NULL);
1345 DTRACE_PROBE(nvp_state_inactive_p);
1346
1347 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1348 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1349 mutex_exit(&nvp->nvp_mutex);
1350
1351 return (SATA_TRAN_PORT_ERROR);
1352 }
1353
1354 if (nvp->nvp_state & NV_FAILED) {
1355
1356 NVLOG(NVDBG_ERRS, nvc, nvp,
1357 "nv_sata_start: NV_FAILED state", NULL);
1358 DTRACE_PROBE(nvp_state_failed_p);
1359
1360 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1361 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1362 mutex_exit(&nvp->nvp_mutex);
1363
1364 return (SATA_TRAN_PORT_ERROR);
1365 }
1366
1367 if (nvp->nvp_state & NV_RESET) {
1368
1369 NVLOG(NVDBG_ERRS, nvc, nvp,
1370 "still waiting for reset completion", NULL);
1371 DTRACE_PROBE(nvp_state_reset_p);
1372
1373 spkt->satapkt_reason = SATA_PKT_BUSY;
1374
1375 /*
1376 * If in panic, timeouts do not occur, so invoke
1377 * reset handling directly so that the signature
1378 * can be acquired to complete the reset handling.
1379 */
1380 if (ddi_in_panic()) {
1381 NVLOG(NVDBG_ERRS, nvc, nvp,
1382 "nv_sata_start: calling nv_monitor_reset "
1383 "synchronously", NULL);
1384
1385 (void) nv_monitor_reset(nvp);
1386 }
1387
1388 mutex_exit(&nvp->nvp_mutex);
1389
1390 return (SATA_TRAN_BUSY);
1391 }
1392
1393 if (nvp->nvp_state & NV_LINK_EVENT) {
1394
1395 NVLOG(NVDBG_ERRS, nvc, nvp,
1396 "nv_sata_start(): link event ret bsy", NULL);
1397 DTRACE_PROBE(nvp_state_link_event_p);
1398
1399 spkt->satapkt_reason = SATA_PKT_BUSY;
1400
1401 if (ddi_in_panic()) {
1402 NVLOG(NVDBG_ERRS, nvc, nvp,
1403 "nv_sata_start: calling nv_timeout "
1404 "synchronously", NULL);
1405
1406 nv_timeout(nvp);
1407 }
1408
1409 mutex_exit(&nvp->nvp_mutex);
1410
1411 return (SATA_TRAN_BUSY);
1412 }
1413
1414
1415 if ((nvp->nvp_type == SATA_DTYPE_NONE) ||
1416 (nvp->nvp_type == SATA_DTYPE_UNKNOWN)) {
1417
1418 NVLOG(NVDBG_ERRS, nvc, nvp,
1419 "nv_sata_start: nvp_type 0x%x", nvp->nvp_type);
1420 DTRACE_PROBE1(not_ready_nvp_type_h, int, nvp->nvp_type);
1421
1422 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1423 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1424 mutex_exit(&nvp->nvp_mutex);
1425
1426 return (SATA_TRAN_PORT_ERROR);
1427 }
1428
1429 if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1430
1431 nv_cmn_err(CE_WARN, nvc, nvp,
1432 "port multiplier not supported by controller");
1433
1434 ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1435 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1436 mutex_exit(&nvp->nvp_mutex);
1437
1438 return (SATA_TRAN_CMD_UNSUPPORTED);
1439 }
1440
1441 /*
1442 * after a device reset, and then when sata module restore processing
1443 * is complete, the sata module will set sata_clear_dev_reset which
1444 * indicates that restore processing has completed and normal
1445 * non-restore related commands should be processed.
1446 */
1447 if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1448
1449 NVLOG(NVDBG_RESET, nvc, nvp,
1450 "nv_sata_start: clearing NV_RESTORE", NULL);
1451 DTRACE_PROBE(clearing_restore_p);
1452 DTRACE_PROBE1(nvp_state_before_clear_h, int, nvp->nvp_state);
1453
1454 nvp->nvp_state &= ~NV_RESTORE;
1455 }
1456
1457 /*
1458 * if the device was recently reset as indicated by NV_RESTORE,
1459 * only allow commands which restore device state. The sata module
1460 * marks such commands with sata_ignore_dev_reset.
1461 *
1462 * during coredump, nv_reset is called but the restore isn't
1463 * processed, so ignore the wait for restore if the system
1464 * is panicing.
1465 */
1466 if ((nvp->nvp_state & NV_RESTORE) &&
1467 !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1468 (ddi_in_panic() == 0)) {
1469
1470 NVLOG(NVDBG_RESET, nvc, nvp,
1471 "nv_sata_start: waiting for restore ", NULL);
1472 DTRACE_PROBE1(restore_no_ignore_reset_nvp_state_h,
1473 int, nvp->nvp_state);
1474
1475 spkt->satapkt_reason = SATA_PKT_BUSY;
1476 mutex_exit(&nvp->nvp_mutex);
1477
1478 return (SATA_TRAN_BUSY);
1479 }
1480
1481 if (nvp->nvp_state & NV_ABORTING) {
1482
1483 NVLOG(NVDBG_ERRS, nvc, nvp,
1484 "nv_sata_start: NV_ABORTING", NULL);
1485 DTRACE_PROBE1(aborting_nvp_state_h, int, nvp->nvp_state);
1486
1487 spkt->satapkt_reason = SATA_PKT_BUSY;
1488 mutex_exit(&nvp->nvp_mutex);
1489
1490 return (SATA_TRAN_BUSY);
1491 }
1492
1493 /*
1494 * record command sequence for debugging.
1495 */
1496 nvp->nvp_seq++;
1497
1498 DTRACE_PROBE2(command_start, int *, nvp, int,
1499 spkt->satapkt_cmd.satacmd_cmd_reg);
1500
1501 /*
1502 * clear SError to be able to check errors after the command failure
1503 */
1504 nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1505
1506 if (spkt->satapkt_op_mode &
1507 (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1508
1509 ret = nv_start_sync(nvp, spkt);
1510
1511 mutex_exit(&nvp->nvp_mutex);
1512
1513 return (ret);
1514 }
1515
1516 /*
1517 * start command asynchronous command
1518 */
1519 ret = nv_start_async(nvp, spkt);
1520
1521 mutex_exit(&nvp->nvp_mutex);
1522
1523 return (ret);
1524 }
1525
1526
1527 /*
1528 * SATA_OPMODE_POLLING implies the driver is in a
1529 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1530 * If only SATA_OPMODE_SYNCH is set, the driver can use
1531 * interrupts and sleep wait on a cv.
1532 *
1533 * If SATA_OPMODE_POLLING is set, the driver can't use
1534 * interrupts and must busy wait and simulate the
1535 * interrupts by waiting for BSY to be cleared.
1536 *
1537 * Synchronous mode has to return BUSY if there are
1538 * any other commands already on the drive.
1539 */
1540 static int
1541 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1542 {
1543 nv_ctl_t *nvc = nvp->nvp_ctlp;
1544 int ret;
1545
1546 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1547 NULL);
1548
1549 if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1550 spkt->satapkt_reason = SATA_PKT_BUSY;
1551 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1552 "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1553 "ncq_run: %d non_ncq_run: %d spkt: %p",
1554 nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1555 (&(nvp->nvp_slot[0]))->nvslot_spkt);
1556
1557 return (SATA_TRAN_BUSY);
1558 }
1559
1560 /*
1561 * if SYNC but not POLL, verify that this is not on interrupt thread.
1562 */
1563 if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1564 servicing_interrupt()) {
1565 spkt->satapkt_reason = SATA_PKT_BUSY;
1566 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1567 "SYNC mode not allowed during interrupt", NULL);
1568
1569 return (SATA_TRAN_BUSY);
1570
1571 }
1572
1573 /*
1574 * disable interrupt generation if in polled mode
1575 */
1576 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1577 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1578 }
1579
1580 /*
1581 * overload the satapkt_reason with BUSY so code below
1582 * will know when it's done
1583 */
1584 spkt->satapkt_reason = SATA_PKT_BUSY;
1585
1586 if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1587 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1588 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1589 }
1590
1591 return (ret);
1592 }
1593
1594 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1595 mutex_exit(&nvp->nvp_mutex);
1596 ret = nv_poll_wait(nvp, spkt);
1597 mutex_enter(&nvp->nvp_mutex);
1598
1599 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1600
1601 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1602 " done % reason %d", ret);
1603
1604 return (ret);
1605 }
1606
1607 /*
1608 * non-polling synchronous mode handling. The interrupt will signal
1609 * when device IO is completed.
1610 */
1611 while (spkt->satapkt_reason == SATA_PKT_BUSY) {
1612 cv_wait(&nvp->nvp_sync_cv, &nvp->nvp_mutex);
1613 }
1614
1615
1616 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1617 " done % reason %d", spkt->satapkt_reason);
1618
1619 return (SATA_TRAN_ACCEPTED);
1620 }
1621
1622
1623 static int
1624 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1625 {
1626 int ret;
1627 nv_ctl_t *nvc = nvp->nvp_ctlp;
1628 #if ! defined(__lock_lint)
1629 nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1630 #endif
1631
1632 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1633
1634 for (;;) {
1635
1636 NV_DELAY_NSEC(400);
1637
1638 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1639 NULL);
1640 if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1641 NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1642 mutex_enter(&nvp->nvp_mutex);
1643 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1644 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1645 nv_reset(nvp, "poll_wait");
1646 nv_complete_io(nvp, spkt, 0);
1647 mutex_exit(&nvp->nvp_mutex);
1648 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1649 "SATA_STATUS_BSY", NULL);
1650
1651 return (SATA_TRAN_ACCEPTED);
1652 }
1653
1654 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1655 NULL);
1656
1657 /*
1658 * Simulate interrupt.
1659 */
1660 ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1661 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1662 NULL);
1663
1664 if (ret != DDI_INTR_CLAIMED) {
1665 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1666 " unclaimed -- resetting", NULL);
1667 mutex_enter(&nvp->nvp_mutex);
1668 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1669 nv_reset(nvp, "poll_wait intr not claimed");
1670 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1671 nv_complete_io(nvp, spkt, 0);
1672 mutex_exit(&nvp->nvp_mutex);
1673
1674 return (SATA_TRAN_ACCEPTED);
1675 }
1676
1677 #if ! defined(__lock_lint)
1678 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1679 /*
1680 * packet is complete
1681 */
1682 return (SATA_TRAN_ACCEPTED);
1683 }
1684 #endif
1685 }
1686 /*NOTREACHED*/
1687 }
1688
1689
1690 /*
1691 * Called by sata module to abort outstanding packets.
1692 */
1693 /*ARGSUSED*/
1694 static int
1695 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1696 {
1697 int cport = spkt->satapkt_device.satadev_addr.cport;
1698 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1699 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1700 int c_a, ret;
1701
1702 ASSERT(cport < NV_MAX_PORTS(nvc));
1703 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1704
1705 mutex_enter(&nvp->nvp_mutex);
1706
1707 if (nvp->nvp_state & NV_DEACTIVATED) {
1708 mutex_exit(&nvp->nvp_mutex);
1709 nv_cmn_err(CE_WARN, nvc, nvp,
1710 "abort request failed: port inactive");
1711
1712 return (SATA_FAILURE);
1713 }
1714
1715 /*
1716 * spkt == NULL then abort all commands
1717 */
1718 c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1719
1720 if (c_a) {
1721 NVLOG(NVDBG_ENTRY, nvc, nvp,
1722 "packets aborted running=%d", c_a);
1723 ret = SATA_SUCCESS;
1724 } else {
1725 if (spkt == NULL) {
1726 NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1727 } else {
1728 NVLOG(NVDBG_ENTRY, nvc, nvp,
1729 "can't find spkt to abort", NULL);
1730 }
1731 ret = SATA_FAILURE;
1732 }
1733
1734 mutex_exit(&nvp->nvp_mutex);
1735
1736 return (ret);
1737 }
1738
1739
1740 /*
1741 * if spkt == NULL abort all pkts running, otherwise
1742 * abort the requested packet. must be called with nv_mutex
1743 * held and returns with it held. Not NCQ aware.
1744 */
1745 static int
1746 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
1747 boolean_t reset)
1748 {
1749 int aborted = 0, i, reset_once = B_FALSE;
1750 struct nv_slot *nv_slotp;
1751 sata_pkt_t *spkt_slot;
1752
1753 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1754
1755 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1756
1757 nvp->nvp_state |= NV_ABORTING;
1758
1759 for (i = 0; i < nvp->nvp_queue_depth; i++) {
1760
1761 nv_slotp = &(nvp->nvp_slot[i]);
1762 spkt_slot = nv_slotp->nvslot_spkt;
1763
1764 /*
1765 * skip if not active command in slot
1766 */
1767 if (spkt_slot == NULL) {
1768 continue;
1769 }
1770
1771 /*
1772 * if a specific packet was requested, skip if
1773 * this is not a match
1774 */
1775 if ((spkt != NULL) && (spkt != spkt_slot)) {
1776 continue;
1777 }
1778
1779 /*
1780 * stop the hardware. This could need reworking
1781 * when NCQ is enabled in the driver.
1782 */
1783 if (reset_once == B_FALSE) {
1784 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1785
1786 /*
1787 * stop DMA engine
1788 */
1789 nv_put8(bmhdl, nvp->nvp_bmicx, 0);
1790
1791 /*
1792 * Reset only if explicitly specified by the arg reset
1793 */
1794 if (reset == B_TRUE) {
1795 reset_once = B_TRUE;
1796 nv_reset(nvp, "abort_active");
1797 }
1798 }
1799
1800 spkt_slot->satapkt_reason = abort_reason;
1801 nv_complete_io(nvp, spkt_slot, i);
1802 aborted++;
1803 }
1804
1805 nvp->nvp_state &= ~NV_ABORTING;
1806
1807 return (aborted);
1808 }
1809
1810
1811 /*
1812 * Called by sata module to reset a port, device, or the controller.
1813 */
1814 static int
1815 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1816 {
1817 int cport = sd->satadev_addr.cport;
1818 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1819 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1820 int ret = SATA_FAILURE;
1821
1822 ASSERT(cport < NV_MAX_PORTS(nvc));
1823
1824 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1825
1826 mutex_enter(&nvp->nvp_mutex);
1827
1828 switch (sd->satadev_addr.qual) {
1829
1830 case SATA_ADDR_CPORT:
1831 /*FALLTHROUGH*/
1832 case SATA_ADDR_DCPORT:
1833
1834 ret = SATA_SUCCESS;
1835
1836 /*
1837 * If a reset is already in progress, don't disturb it
1838 */
1839 if ((nvp->nvp_state & (NV_RESET|NV_RESTORE)) &&
1840 (ddi_in_panic() == 0)) {
1841 NVLOG(NVDBG_RESET, nvc, nvp,
1842 "nv_sata_reset: reset already in progress", NULL);
1843 DTRACE_PROBE(reset_already_in_progress_p);
1844
1845 break;
1846 }
1847
1848 /*
1849 * log the pre-reset state of the driver because dumping the
1850 * blocks will disturb it.
1851 */
1852 if (ddi_in_panic() == 1) {
1853 NVLOG(NVDBG_RESET, nvc, nvp, "in_panic. nvp_state: "
1854 "0x%x nvp_reset_time: %d nvp_last_cmd: 0x%x "
1855 "nvp_previous_cmd: 0x%x nvp_reset_count: %d "
1856 "nvp_first_reset_reason: %s "
1857 "nvp_reset_reason: %s nvp_seq: %d "
1858 "in_interrupt: %d", nvp->nvp_state,
1859 nvp->nvp_reset_time, nvp->nvp_last_cmd,
1860 nvp->nvp_previous_cmd, nvp->nvp_reset_count,
1861 nvp->nvp_first_reset_reason,
1862 nvp->nvp_reset_reason, nvp->nvp_seq,
1863 servicing_interrupt());
1864 }
1865
1866 nv_reset(nvp, "sata_reset");
1867
1868 (void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1869
1870 /*
1871 * If the port is inactive, do a quiet reset and don't attempt
1872 * to wait for reset completion or do any post reset processing
1873 *
1874 */
1875 if (nvp->nvp_state & NV_DEACTIVATED) {
1876 nvp->nvp_state &= ~NV_RESET;
1877 nvp->nvp_reset_time = 0;
1878
1879 break;
1880 }
1881
1882 /*
1883 * clear the port failed flag. It will get set again
1884 * if the port is still not functioning.
1885 */
1886 nvp->nvp_state &= ~NV_FAILED;
1887
1888 /*
1889 * timeouts are not available while the system is
1890 * dropping core, so call nv_monitor_reset() directly
1891 */
1892 if (ddi_in_panic() != 0) {
1893 while (nvp->nvp_state & NV_RESET) {
1894 drv_usecwait(1000);
1895 (void) nv_monitor_reset(nvp);
1896 }
1897
1898 break;
1899 }
1900
1901 break;
1902 case SATA_ADDR_CNTRL:
1903 NVLOG(NVDBG_ENTRY, nvc, nvp,
1904 "nv_sata_reset: controller reset not supported", NULL);
1905
1906 break;
1907 case SATA_ADDR_PMPORT:
1908 case SATA_ADDR_DPMPORT:
1909 NVLOG(NVDBG_ENTRY, nvc, nvp,
1910 "nv_sata_reset: port multipliers not supported", NULL);
1911 /*FALLTHROUGH*/
1912 default:
1913 /*
1914 * unsupported case
1915 */
1916 break;
1917 }
1918
1919 mutex_exit(&nvp->nvp_mutex);
1920
1921 return (ret);
1922 }
1923
1924
1925 /*
1926 * Sata entry point to handle port activation. cfgadm -c connect
1927 */
1928 static int
1929 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1930 {
1931 int cport = sd->satadev_addr.cport;
1932 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1933 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1934 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1935 uint32_t sstatus;
1936
1937 ASSERT(cport < NV_MAX_PORTS(nvc));
1938 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1939
1940 mutex_enter(&nvp->nvp_mutex);
1941
1942 sd->satadev_state = SATA_STATE_READY;
1943
1944 nv_copy_registers(nvp, sd, NULL);
1945
1946 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1947
1948 /*
1949 * initiate link probing and device signature acquisition
1950 */
1951
1952 bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1953
1954 sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
1955
1956 nvp->nvp_type = SATA_DTYPE_NONE;
1957 nvp->nvp_signature = NV_NO_SIG;
1958 nvp->nvp_state &= ~NV_DEACTIVATED;
1959
1960 if (SSTATUS_GET_DET(sstatus) ==
1961 SSTATUS_DET_DEVPRE_PHYCOM) {
1962
1963 nvp->nvp_state |= NV_ATTACH;
1964 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1965 nv_reset(nvp, "sata_activate");
1966
1967 while (nvp->nvp_state & NV_RESET) {
1968 cv_wait(&nvp->nvp_reset_cv, &nvp->nvp_mutex);
1969 }
1970
1971 }
1972
1973 mutex_exit(&nvp->nvp_mutex);
1974
1975 return (SATA_SUCCESS);
1976 }
1977
1978
1979 /*
1980 * Sata entry point to handle port deactivation. cfgadm -c disconnect
1981 */
1982 static int
1983 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1984 {
1985 int cport = sd->satadev_addr.cport;
1986 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1987 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1988
1989 ASSERT(cport < NV_MAX_PORTS(nvc));
1990 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1991
1992 mutex_enter(&nvp->nvp_mutex);
1993
1994 (void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1995
1996 /*
1997 * make the device inaccessible
1998 */
1999 nvp->nvp_state |= NV_DEACTIVATED;
2000
2001 /*
2002 * disable the interrupts on port
2003 */
2004 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2005
2006 sd->satadev_state = SATA_PSTATE_SHUTDOWN;
2007 nv_copy_registers(nvp, sd, NULL);
2008
2009 mutex_exit(&nvp->nvp_mutex);
2010
2011 return (SATA_SUCCESS);
2012 }
2013
2014
2015 /*
2016 * find an empty slot in the driver's queue, increment counters,
2017 * and then invoke the appropriate PIO or DMA start routine.
2018 */
2019 static int
2020 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
2021 {
2022 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
2023 int on_bit = 0x01, slot, sactive, ret, ncq = 0;
2024 uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2025 int direction = sata_cmdp->satacmd_flags.sata_data_direction;
2026 nv_ctl_t *nvc = nvp->nvp_ctlp;
2027 nv_slot_t *nv_slotp;
2028 boolean_t dma_cmd;
2029
2030 NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common entered: cmd: 0x%x",
2031 sata_cmdp->satacmd_cmd_reg);
2032
2033 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
2034 (cmd == SATAC_READ_FPDMA_QUEUED)) {
2035 nvp->nvp_ncq_run++;
2036 /*
2037 * search for an empty NCQ slot. by the time, it's already
2038 * been determined by the caller that there is room on the
2039 * queue.
2040 */
2041 for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
2042 on_bit <<= 1) {
2043 if ((nvp->nvp_sactive_cache & on_bit) == 0) {
2044 break;
2045 }
2046 }
2047
2048 /*
2049 * the first empty slot found, should not exceed the queue
2050 * depth of the drive. if it does it's an error.
2051 */
2052 ASSERT(slot != nvp->nvp_queue_depth);
2053
2054 sactive = nv_get32(nvc->nvc_bar_hdl[5],
2055 nvp->nvp_sactive);
2056 ASSERT((sactive & on_bit) == 0);
2057 nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
2058 NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
2059 on_bit);
2060 nvp->nvp_sactive_cache |= on_bit;
2061
2062 ncq = NVSLOT_NCQ;
2063
2064 } else {
2065 nvp->nvp_non_ncq_run++;
2066 slot = 0;
2067 }
2068
2069 nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
2070
2071 ASSERT(nv_slotp->nvslot_spkt == NULL);
2072
2073 nv_slotp->nvslot_spkt = spkt;
2074 nv_slotp->nvslot_flags = ncq;
2075
2076 /*
2077 * the sata module doesn't indicate which commands utilize the
2078 * DMA engine, so find out using this switch table.
2079 */
2080 switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
2081 case SATAC_READ_DMA_EXT:
2082 case SATAC_WRITE_DMA_EXT:
2083 case SATAC_WRITE_DMA:
2084 case SATAC_READ_DMA:
2085 case SATAC_READ_DMA_QUEUED:
2086 case SATAC_READ_DMA_QUEUED_EXT:
2087 case SATAC_WRITE_DMA_QUEUED:
2088 case SATAC_WRITE_DMA_QUEUED_EXT:
2089 case SATAC_READ_FPDMA_QUEUED:
2090 case SATAC_WRITE_FPDMA_QUEUED:
2091 case SATAC_DSM:
2092 dma_cmd = B_TRUE;
2093 break;
2094 default:
2095 dma_cmd = B_FALSE;
2096 }
2097
2098 if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
2099 NVLOG(NVDBG_DELIVER, nvc, nvp, "DMA command", NULL);
2100 nv_slotp->nvslot_start = nv_start_dma;
2101 nv_slotp->nvslot_intr = nv_intr_dma;
2102 } else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
2103 NVLOG(NVDBG_DELIVER, nvc, nvp, "packet command", NULL);
2104 nv_slotp->nvslot_start = nv_start_pkt_pio;
2105 nv_slotp->nvslot_intr = nv_intr_pkt_pio;
2106 if ((direction == SATA_DIR_READ) ||
2107 (direction == SATA_DIR_WRITE)) {
2108 nv_slotp->nvslot_byte_count =
2109 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2110 nv_slotp->nvslot_v_addr =
2111 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2112 /*
2113 * Freeing DMA resources allocated by the sata common
2114 * module to avoid buffer overwrite (dma sync) problems
2115 * when the buffer is released at command completion.
2116 * Primarily an issue on systems with more than
2117 * 4GB of memory.
2118 */
2119 sata_free_dma_resources(spkt);
2120 }
2121 } else if (direction == SATA_DIR_NODATA_XFER) {
2122 NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2123 nv_slotp->nvslot_start = nv_start_nodata;
2124 nv_slotp->nvslot_intr = nv_intr_nodata;
2125 } else if (direction == SATA_DIR_READ) {
2126 NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2127 nv_slotp->nvslot_start = nv_start_pio_in;
2128 nv_slotp->nvslot_intr = nv_intr_pio_in;
2129 nv_slotp->nvslot_byte_count =
2130 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2131 nv_slotp->nvslot_v_addr =
2132 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2133 /*
2134 * Freeing DMA resources allocated by the sata common module to
2135 * avoid buffer overwrite (dma sync) problems when the buffer
2136 * is released at command completion. This is not an issue
2137 * for write because write does not update the buffer.
2138 * Primarily an issue on systems with more than 4GB of memory.
2139 */
2140 sata_free_dma_resources(spkt);
2141 } else if (direction == SATA_DIR_WRITE) {
2142 NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2143 nv_slotp->nvslot_start = nv_start_pio_out;
2144 nv_slotp->nvslot_intr = nv_intr_pio_out;
2145 nv_slotp->nvslot_byte_count =
2146 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2147 nv_slotp->nvslot_v_addr =
2148 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2149 } else {
2150 nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2151 " %d cookies %d cmd %x",
2152 sata_cmdp->satacmd_flags.sata_data_direction,
2153 sata_cmdp->satacmd_num_dma_cookies, cmd);
2154 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2155 ret = SATA_TRAN_CMD_UNSUPPORTED;
2156
2157 goto fail;
2158 }
2159
2160 if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2161 SATA_TRAN_ACCEPTED) {
2162 #ifdef SGPIO_SUPPORT
2163 nv_sgp_drive_active(nvp->nvp_ctlp,
2164 (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2165 #endif
2166 nv_slotp->nvslot_stime = ddi_get_lbolt();
2167
2168 /*
2169 * start timer if it's not already running and this packet
2170 * is not requesting polled mode.
2171 */
2172 if ((nvp->nvp_timeout_id == 0) &&
2173 ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2174 nv_setup_timeout(nvp, NV_ONE_SEC);
2175 }
2176
2177 nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2178 nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2179
2180 return (SATA_TRAN_ACCEPTED);
2181 }
2182
2183 fail:
2184
2185 spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2186
2187 if (ncq == NVSLOT_NCQ) {
2188 nvp->nvp_ncq_run--;
2189 nvp->nvp_sactive_cache &= ~on_bit;
2190 } else {
2191 nvp->nvp_non_ncq_run--;
2192 }
2193 nv_slotp->nvslot_spkt = NULL;
2194 nv_slotp->nvslot_flags = 0;
2195
2196 return (ret);
2197 }
2198
2199
2200 /*
2201 * Check if the signature is ready and if non-zero translate
2202 * it into a solaris sata defined type.
2203 */
2204 static void
2205 nv_read_signature(nv_port_t *nvp)
2206 {
2207 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2208 int retry_count = 0;
2209
2210 retry:
2211
2212 nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2213 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2214 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2215 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2216
2217 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2218 "nv_read_signature: 0x%x ", nvp->nvp_signature);
2219
2220 switch (nvp->nvp_signature) {
2221
2222 case NV_DISK_SIG:
2223 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2224 DTRACE_PROBE(signature_is_disk_device_p)
2225 nvp->nvp_type = SATA_DTYPE_ATADISK;
2226
2227 break;
2228 case NV_ATAPI_SIG:
2229 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2230 "drive is an optical device", NULL);
2231 DTRACE_PROBE(signature_is_optical_device_p)
2232 nvp->nvp_type = SATA_DTYPE_ATAPICD;
2233 break;
2234 case NV_PM_SIG:
2235 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2236 "device is a port multiplier", NULL);
2237 DTRACE_PROBE(signature_is_port_multiplier_p)
2238 nvp->nvp_type = SATA_DTYPE_PMULT;
2239 break;
2240 case NV_NO_SIG:
2241 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2242 "signature not available", NULL);
2243 DTRACE_PROBE(sig_not_available_p);
2244 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2245 break;
2246 default:
2247 if (retry_count++ == 0) {
2248 /*
2249 * this is a rare corner case where the controller
2250 * is updating the task file registers as the driver
2251 * is reading them. If this happens, wait a bit and
2252 * retry once.
2253 */
2254 NV_DELAY_NSEC(1000000);
2255 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2256 "invalid signature 0x%x retry once",
2257 nvp->nvp_signature);
2258 DTRACE_PROBE1(signature_invalid_retry_once_h,
2259 int, nvp->nvp_signature);
2260
2261 goto retry;
2262 }
2263
2264 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
2265 "invalid signature 0x%x", nvp->nvp_signature);
2266 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2267
2268 break;
2269 }
2270 }
2271
2272
2273 /*
2274 * Set up a new timeout or complete a timeout in microseconds.
2275 * If microseconds is zero, no new timeout is scheduled. Must be
2276 * called at the end of the timeout routine.
2277 */
2278 static void
2279 nv_setup_timeout(nv_port_t *nvp, clock_t microseconds)
2280 {
2281 clock_t old_duration = nvp->nvp_timeout_duration;
2282
2283 if (microseconds == 0) {
2284
2285 return;
2286 }
2287
2288 if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2289 /*
2290 * Since we are dropping the mutex for untimeout,
2291 * the timeout may be executed while we are trying to
2292 * untimeout and setting up a new timeout.
2293 * If nvp_timeout_duration is 0, then this function
2294 * was re-entered. Just exit.
2295 */
2296 cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2297
2298 return;
2299 }
2300
2301 nvp->nvp_timeout_duration = 0;
2302
2303 if (nvp->nvp_timeout_id == 0) {
2304 /*
2305 * start new timer
2306 */
2307 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2308 drv_usectohz(microseconds));
2309 } else {
2310 /*
2311 * If the currently running timeout is due later than the
2312 * requested one, restart it with a new expiration.
2313 * Our timeouts do not need to be accurate - we would be just
2314 * checking that the specified time was exceeded.
2315 */
2316 if (old_duration > microseconds) {
2317 mutex_exit(&nvp->nvp_mutex);
2318 (void) untimeout(nvp->nvp_timeout_id);
2319 mutex_enter(&nvp->nvp_mutex);
2320 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2321 drv_usectohz(microseconds));
2322 }
2323 }
2324
2325 nvp->nvp_timeout_duration = microseconds;
2326 }
2327
2328
2329
2330 int nv_reset_length = NV_RESET_LENGTH;
2331
2332 /*
2333 * Reset the port
2334 */
2335 static void
2336 nv_reset(nv_port_t *nvp, char *reason)
2337 {
2338 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2339 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2340 nv_ctl_t *nvc = nvp->nvp_ctlp;
2341 uint32_t sctrl, serr, sstatus;
2342 uint8_t bmicx;
2343 int i, j;
2344 boolean_t reset_success = B_FALSE;
2345
2346 ASSERT(mutex_owned(&nvp->nvp_mutex));
2347
2348 /*
2349 * If the port is reset right after the controller receives
2350 * the DMA activate command (or possibly any other FIS),
2351 * controller operation freezes without any known recovery
2352 * procedure. Until Nvidia advises on a recovery mechanism,
2353 * avoid the situation by waiting sufficiently long to
2354 * ensure the link is not actively transmitting any FIS.
2355 * 100ms was empirically determined to be large enough to
2356 * ensure no transaction was left in flight but not too long
2357 * as to cause any significant thread delay.
2358 */
2359 drv_usecwait(100000);
2360
2361 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2362 DTRACE_PROBE1(serror_h, int, serr);
2363
2364 /*
2365 * stop DMA engine.
2366 */
2367 bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2368 nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
2369
2370 /*
2371 * the current setting of the NV_RESET in nvp_state indicates whether
2372 * this is the first reset attempt or a retry.
2373 */
2374 if (nvp->nvp_state & NV_RESET) {
2375 nvp->nvp_reset_retry_count++;
2376
2377 NVLOG(NVDBG_RESET, nvc, nvp, "npv_reset_retry_count: %d",
2378 nvp->nvp_reset_retry_count);
2379
2380 } else {
2381 nvp->nvp_reset_retry_count = 0;
2382 nvp->nvp_reset_count++;
2383 nvp->nvp_state |= NV_RESET;
2384
2385 NVLOG(NVDBG_RESET, nvc, nvp, "nvp_reset_count: %d reason: %s "
2386 "serror: 0x%x seq: %d run: %d cmd: 0x%x",
2387 nvp->nvp_reset_count, reason, serr, nvp->nvp_seq,
2388 nvp->nvp_non_ncq_run, nvp->nvp_last_cmd);
2389 }
2390
2391 /*
2392 * a link event could have occurred slightly before excessive
2393 * interrupt processing invokes a reset. Reset handling overrides
2394 * link event processing so it's safe to clear it here.
2395 */
2396 nvp->nvp_state &= ~(NV_RESTORE|NV_LINK_EVENT);
2397
2398 nvp->nvp_reset_time = ddi_get_lbolt();
2399
2400 if ((nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) == 0) {
2401 nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x"
2402 " nvp_state: 0x%x", reason, serr, nvp->nvp_state);
2403 /*
2404 * keep a record of why the first reset occurred, for debugging
2405 */
2406 if (nvp->nvp_first_reset_reason[0] == '\0') {
2407 (void) strncpy(nvp->nvp_first_reset_reason,
2408 reason, NV_REASON_LEN);
2409 nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2410 }
2411 }
2412
2413 (void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2414
2415 /*
2416 * ensure there is terminating NULL
2417 */
2418 nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2419
2420 /*
2421 * Issue hardware reset; retry if necessary.
2422 */
2423 for (i = 0; i < NV_COMRESET_ATTEMPTS; i++) {
2424
2425 /*
2426 * clear signature registers and the error register too
2427 */
2428 nv_put8(cmdhdl, nvp->nvp_sect, 0);
2429 nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2430 nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2431 nv_put8(cmdhdl, nvp->nvp_count, 0);
2432
2433 nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2434
2435 /*
2436 * assert reset in PHY by writing a 1 to bit 0 scontrol
2437 */
2438 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2439
2440 nv_put32(bar5_hdl, nvp->nvp_sctrl,
2441 sctrl | SCONTROL_DET_COMRESET);
2442
2443 /* Wait at least 1ms, as required by the spec */
2444 drv_usecwait(nv_reset_length);
2445
2446 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2447 DTRACE_PROBE1(aftercomreset_serror_h, int, serr);
2448
2449 /* Reset all accumulated error bits */
2450 nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2451
2452
2453 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2454 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2455 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2456 "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2457
2458 /* de-assert reset in PHY */
2459 nv_put32(bar5_hdl, nvp->nvp_sctrl,
2460 sctrl & ~SCONTROL_DET_COMRESET);
2461
2462 /*
2463 * Wait up to 10ms for COMINIT to arrive, indicating that
2464 * the device recognized COMRESET.
2465 */
2466 for (j = 0; j < 10; j++) {
2467 drv_usecwait(NV_ONE_MSEC);
2468 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2469 if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2470 (SSTATUS_GET_DET(sstatus) ==
2471 SSTATUS_DET_DEVPRE_PHYCOM)) {
2472 reset_success = B_TRUE;
2473 break;
2474 }
2475 }
2476
2477 if (reset_success == B_TRUE)
2478 break;
2479 }
2480
2481
2482 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2483 DTRACE_PROBE1(last_serror_h, int, serr);
2484
2485 if (reset_success == B_FALSE) {
2486 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2487 "after %d attempts. serr: 0x%x", i, serr);
2488 } else {
2489 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded"
2490 " after %dms. serr: 0x%x", TICK_TO_MSEC(ddi_get_lbolt() -
2491 nvp->nvp_reset_time), serr);
2492 }
2493
2494 nvp->nvp_wait_sig = NV_WAIT_SIG;
2495 nv_setup_timeout(nvp, nvp->nvp_wait_sig);
2496 }
2497
2498
2499 /*
2500 * Initialize register handling specific to mcp51/mcp55/mcp61
2501 */
2502 /* ARGSUSED */
2503 static void
2504 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2505 {
2506 nv_port_t *nvp;
2507 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2508 uint8_t off, port;
2509
2510 nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2511 nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2512
2513 for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2514 nvp = &(nvc->nvc_port[port]);
2515 nvp->nvp_mcp5x_int_status =
2516 (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2517 nvp->nvp_mcp5x_int_ctl =
2518 (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2519
2520 /*
2521 * clear any previous interrupts asserted
2522 */
2523 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2524 MCP5X_INT_CLEAR);
2525
2526 /*
2527 * These are the interrupts to accept for now. The spec
2528 * says these are enable bits, but nvidia has indicated
2529 * these are masking bits. Even though they may be masked
2530 * out to prevent asserting the main interrupt, they can
2531 * still be asserted while reading the interrupt status
2532 * register, so that needs to be considered in the interrupt
2533 * handler.
2534 */
2535 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2536 ~(MCP5X_INT_IGNORE));
2537 }
2538
2539 /*
2540 * Allow the driver to program the BM on the first command instead
2541 * of waiting for an interrupt.
2542 */
2543 #ifdef NCQ
2544 flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2545 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2546 flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2547 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2548 #endif
2549
2550 /*
2551 * mcp55 rev A03 and above supports 40-bit physical addressing.
2552 * Enable DMA to take advantage of that.
2553 *
2554 */
2555 if ((nvc->nvc_devid > 0x37f) ||
2556 ((nvc->nvc_devid == 0x37f) && (nvc->nvc_revid >= 0xa3))) {
2557 if (nv_sata_40bit_dma == B_TRUE) {
2558 uint32_t reg32;
2559 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2560 "devid is %X revid is %X. 40-bit DMA"
2561 " addressing enabled", nvc->nvc_devid,
2562 nvc->nvc_revid);
2563 nvc->dma_40bit = B_TRUE;
2564
2565 reg32 = pci_config_get32(pci_conf_handle,
2566 NV_SATA_CFG_20);
2567 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2568 reg32 | NV_40BIT_PRD);
2569
2570 /*
2571 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2572 * bits) for the primary PRD table, and bits 8-15
2573 * contain the top 8 bits for the secondary. Set
2574 * to zero because the DMA attribute table for PRD
2575 * allocation forces it into 32 bit address space
2576 * anyway.
2577 */
2578 reg32 = pci_config_get32(pci_conf_handle,
2579 NV_SATA_CFG_23);
2580 pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2581 reg32 & 0xffff0000);
2582 } else {
2583 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2584 "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2585 }
2586 } else {
2587 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "devid is %X revid is"
2588 " %X. Not capable of 40-bit DMA addressing",
2589 nvc->nvc_devid, nvc->nvc_revid);
2590 }
2591 }
2592
2593
2594 /*
2595 * Initialize register handling specific to ck804
2596 */
2597 static void
2598 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2599 {
2600 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2601 uint32_t reg32;
2602 uint16_t reg16;
2603 nv_port_t *nvp;
2604 int j;
2605
2606 /*
2607 * delay hotplug interrupts until PHYRDY.
2608 */
2609 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2610 pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2611 reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2612
2613 /*
2614 * enable hot plug interrupts for channel x and y
2615 */
2616 reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2617 (uint16_t *)(bar5 + NV_ADMACTL_X));
2618 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2619 NV_HIRQ_EN | reg16);
2620
2621
2622 reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2623 (uint16_t *)(bar5 + NV_ADMACTL_Y));
2624 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2625 NV_HIRQ_EN | reg16);
2626
2627 nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2628
2629 /*
2630 * clear any existing interrupt pending then enable
2631 */
2632 for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2633 nvp = &(nvc->nvc_port[j]);
2634 mutex_enter(&nvp->nvp_mutex);
2635 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2636 NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2637 mutex_exit(&nvp->nvp_mutex);
2638 }
2639 }
2640
2641
2642 /*
2643 * Initialize the controller and set up driver data structures.
2644 * determine if ck804 or mcp5x class.
2645 */
2646 static int
2647 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2648 {
2649 struct sata_hba_tran stran;
2650 nv_port_t *nvp;
2651 int j;
2652 uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2653 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2654 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2655 uint32_t reg32;
2656 uint8_t reg8, reg8_save;
2657
2658 NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2659
2660 nvc->nvc_mcp5x_flag = B_FALSE;
2661
2662 /*
2663 * Need to set bit 2 to 1 at config offset 0x50
2664 * to enable access to the bar5 registers.
2665 */
2666 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2667 if (!(reg32 & NV_BAR5_SPACE_EN)) {
2668 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2669 reg32 | NV_BAR5_SPACE_EN);
2670 }
2671
2672 /*
2673 * Determine if this is ck804 or mcp5x. ck804 will map in the
2674 * task file registers into bar5 while mcp5x won't. The offset of
2675 * the task file registers in mcp5x's space is unused, so it will
2676 * return zero. So check one of the task file registers to see if it is
2677 * writable and reads back what was written. If it's mcp5x it will
2678 * return back 0xff whereas ck804 will return the value written.
2679 */
2680 reg8_save = nv_get8(bar5_hdl,
2681 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2682
2683
2684 for (j = 1; j < 3; j++) {
2685
2686 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2687 reg8 = nv_get8(bar5_hdl,
2688 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2689
2690 if (reg8 != j) {
2691 nvc->nvc_mcp5x_flag = B_TRUE;
2692 break;
2693 }
2694 }
2695
2696 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2697
2698 if (nvc->nvc_mcp5x_flag == B_FALSE) {
2699 NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804/MCP04",
2700 NULL);
2701 nvc->nvc_interrupt = ck804_intr;
2702 nvc->nvc_reg_init = ck804_reg_init;
2703 nvc->nvc_set_intr = ck804_set_intr;
2704 } else {
2705 NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55/MCP61",
2706 NULL);
2707 nvc->nvc_interrupt = mcp5x_intr;
2708 nvc->nvc_reg_init = mcp5x_reg_init;
2709 nvc->nvc_set_intr = mcp5x_set_intr;
2710 }
2711
2712
2713 stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2714 stran.sata_tran_hba_dip = nvc->nvc_dip;
2715 stran.sata_tran_hba_num_cports = NV_NUM_PORTS;
2716 stran.sata_tran_hba_features_support =
2717 SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2718 stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2719 stran.sata_tran_probe_port = nv_sata_probe;
2720 stran.sata_tran_start = nv_sata_start;
2721 stran.sata_tran_abort = nv_sata_abort;
2722 stran.sata_tran_reset_dport = nv_sata_reset;
2723 stran.sata_tran_selftest = NULL;
2724 stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2725 stran.sata_tran_pwrmgt_ops = NULL;
2726 stran.sata_tran_ioctl = NULL;
2727 nvc->nvc_sata_hba_tran = stran;
2728
2729 nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2730 KM_SLEEP);
2731
2732 /*
2733 * initialize registers common to all chipsets
2734 */
2735 nv_common_reg_init(nvc);
2736
2737 for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2738 nvp = &(nvc->nvc_port[j]);
2739
2740 cmd_addr = nvp->nvp_cmd_addr;
2741 ctl_addr = nvp->nvp_ctl_addr;
2742 bm_addr = nvp->nvp_bm_addr;
2743
2744 mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2745 DDI_INTR_PRI(nvc->nvc_intr_pri));
2746
2747 cv_init(&nvp->nvp_sync_cv, NULL, CV_DRIVER, NULL);
2748 cv_init(&nvp->nvp_reset_cv, NULL, CV_DRIVER, NULL);
2749
2750 nvp->nvp_data = cmd_addr + NV_DATA;
2751 nvp->nvp_error = cmd_addr + NV_ERROR;
2752 nvp->nvp_feature = cmd_addr + NV_FEATURE;
2753 nvp->nvp_count = cmd_addr + NV_COUNT;
2754 nvp->nvp_sect = cmd_addr + NV_SECT;
2755 nvp->nvp_lcyl = cmd_addr + NV_LCYL;
2756 nvp->nvp_hcyl = cmd_addr + NV_HCYL;
2757 nvp->nvp_drvhd = cmd_addr + NV_DRVHD;
2758 nvp->nvp_status = cmd_addr + NV_STATUS;
2759 nvp->nvp_cmd = cmd_addr + NV_CMD;
2760 nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2761 nvp->nvp_devctl = ctl_addr + NV_DEVCTL;
2762
2763 nvp->nvp_bmicx = bm_addr + BMICX_REG;
2764 nvp->nvp_bmisx = bm_addr + BMISX_REG;
2765 nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2766
2767 nvp->nvp_state = 0;
2768
2769 /*
2770 * Initialize dma handles, etc.
2771 * If it fails, the port is in inactive state.
2772 */
2773 nv_init_port(nvp);
2774 }
2775
2776 /*
2777 * initialize register by calling chip specific reg initialization
2778 */
2779 (*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2780
2781 /* initialize the hba dma attribute */
2782 if (nvc->dma_40bit == B_TRUE)
2783 nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2784 &buffer_dma_40bit_attr;
2785 else
2786 nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2787 &buffer_dma_attr;
2788
2789 return (NV_SUCCESS);
2790 }
2791
2792
2793 /*
2794 * Initialize data structures with enough slots to handle queuing, if
2795 * enabled. NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2796 * NCQ support is built into the driver and enabled. It might have been
2797 * better to derive the true size from the drive itself, but the sata
2798 * module only sends down that information on the first NCQ command,
2799 * which means possibly re-sizing the structures on an interrupt stack,
2800 * making error handling more messy. The easy way is to just allocate
2801 * all 32 slots, which is what most drives support anyway.
2802 */
2803 static void
2804 nv_init_port(nv_port_t *nvp)
2805 {
2806 nv_ctl_t *nvc = nvp->nvp_ctlp;
2807 size_t prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2808 dev_info_t *dip = nvc->nvc_dip;
2809 ddi_device_acc_attr_t dev_attr;
2810 size_t buf_size;
2811 ddi_dma_cookie_t cookie;
2812 uint_t count;
2813 int rc, i;
2814
2815 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2816 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2817 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2818
2819 nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2820 NV_QUEUE_SLOTS, KM_SLEEP);
2821
2822 nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2823 NV_QUEUE_SLOTS, KM_SLEEP);
2824
2825 nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2826 NV_QUEUE_SLOTS, KM_SLEEP);
2827
2828 nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2829 NV_QUEUE_SLOTS, KM_SLEEP);
2830
2831 nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2832 KM_SLEEP);
2833
2834 for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2835
2836 rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2837 DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2838
2839 if (rc != DDI_SUCCESS) {
2840 nv_uninit_port(nvp);
2841
2842 return;
2843 }
2844
2845 rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2846 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2847 NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2848 &(nvp->nvp_sg_acc_hdl[i]));
2849
2850 if (rc != DDI_SUCCESS) {
2851 nv_uninit_port(nvp);
2852
2853 return;
2854 }
2855
2856 rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2857 nvp->nvp_sg_addr[i], buf_size,
2858 DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2859 DDI_DMA_SLEEP, NULL, &cookie, &count);
2860
2861 if (rc != DDI_DMA_MAPPED) {
2862 nv_uninit_port(nvp);
2863
2864 return;
2865 }
2866
2867 ASSERT(count == 1);
2868 ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2869
2870 ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2871
2872 nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2873 }
2874
2875 /*
2876 * nvp_queue_depth represents the actual drive queue depth, not the
2877 * number of slots allocated in the structures (which may be more).
2878 * Actual queue depth is only learned after the first NCQ command, so
2879 * initialize it to 1 for now.
2880 */
2881 nvp->nvp_queue_depth = 1;
2882
2883 /*
2884 * Port is initialized whether the device is attached or not.
2885 * Link processing and device identification will be started later,
2886 * after interrupts are initialized.
2887 */
2888 nvp->nvp_type = SATA_DTYPE_NONE;
2889 }
2890
2891
2892 /*
2893 * Free dynamically allocated structures for port.
2894 */
2895 static void
2896 nv_uninit_port(nv_port_t *nvp)
2897 {
2898 int i;
2899
2900 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2901 "nv_uninit_port uninitializing", NULL);
2902
2903 #ifdef SGPIO_SUPPORT
2904 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2905 nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2906 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2907 }
2908 #endif
2909
2910 nvp->nvp_type = SATA_DTYPE_NONE;
2911
2912 for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2913 if (nvp->nvp_sg_paddr[i]) {
2914 (void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2915 }
2916
2917 if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2918 ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2919 }
2920
2921 if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2922 ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2923 }
2924 }
2925
2926 kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2927 nvp->nvp_slot = NULL;
2928
2929 kmem_free(nvp->nvp_sg_dma_hdl,
2930 sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2931 nvp->nvp_sg_dma_hdl = NULL;
2932
2933 kmem_free(nvp->nvp_sg_acc_hdl,
2934 sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2935 nvp->nvp_sg_acc_hdl = NULL;
2936
2937 kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2938 nvp->nvp_sg_addr = NULL;
2939
2940 kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2941 nvp->nvp_sg_paddr = NULL;
2942 }
2943
2944
2945 /*
2946 * Cache register offsets and access handles to frequently accessed registers
2947 * which are common to either chipset.
2948 */
2949 static void
2950 nv_common_reg_init(nv_ctl_t *nvc)
2951 {
2952 uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2953 uchar_t *bm_addr_offset, *sreg_offset;
2954 uint8_t bar, port;
2955 nv_port_t *nvp;
2956
2957 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2958 if (port == 0) {
2959 bar = NV_BAR_0;
2960 bm_addr_offset = 0;
2961 sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2962 } else {
2963 bar = NV_BAR_2;
2964 bm_addr_offset = (uchar_t *)8;
2965 sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2966 }
2967
2968 nvp = &(nvc->nvc_port[port]);
2969 nvp->nvp_ctlp = nvc;
2970 nvp->nvp_port_num = port;
2971 NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2972
2973 nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2974 nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2975 nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2976 nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2977 nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2978 nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2979 (long)bm_addr_offset;
2980
2981 nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2982 nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2983 nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2984 nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2985 }
2986 }
2987
2988
2989 static void
2990 nv_uninit_ctl(nv_ctl_t *nvc)
2991 {
2992 int port;
2993 nv_port_t *nvp;
2994
2995 NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2996
2997 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2998 nvp = &(nvc->nvc_port[port]);
2999 mutex_enter(&nvp->nvp_mutex);
3000 NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
3001 nv_uninit_port(nvp);
3002 mutex_exit(&nvp->nvp_mutex);
3003 mutex_destroy(&nvp->nvp_mutex);
3004 cv_destroy(&nvp->nvp_sync_cv);
3005 cv_destroy(&nvp->nvp_reset_cv);
3006 }
3007
3008 kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
3009 nvc->nvc_port = NULL;
3010 }
3011
3012
3013 /*
3014 * ck804 interrupt. This is a wrapper around ck804_intr_process so
3015 * that interrupts from other devices can be disregarded while dtracing.
3016 */
3017 /* ARGSUSED */
3018 static uint_t
3019 ck804_intr(caddr_t arg1, caddr_t arg2)
3020 {
3021 nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3022 uint8_t intr_status;
3023 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3024
3025 if (nvc->nvc_state & NV_CTRL_SUSPEND)
3026 return (DDI_INTR_UNCLAIMED);
3027
3028 intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3029
3030 if (intr_status == 0) {
3031
3032 return (DDI_INTR_UNCLAIMED);
3033 }
3034
3035 ck804_intr_process(nvc, intr_status);
3036
3037 return (DDI_INTR_CLAIMED);
3038 }
3039
3040
3041 /*
3042 * Main interrupt handler for ck804. handles normal device
3043 * interrupts and hot plug and remove interrupts.
3044 *
3045 */
3046 static void
3047 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3048 {
3049
3050 int port, i;
3051 nv_port_t *nvp;
3052 nv_slot_t *nv_slotp;
3053 uchar_t status;
3054 sata_pkt_t *spkt;
3055 uint8_t bmstatus, clear_bits;
3056 ddi_acc_handle_t bmhdl;
3057 int nvcleared = 0;
3058 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3059 uint32_t sstatus;
3060 int port_mask_hot[] = {
3061 CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3062 };
3063 int port_mask_pm[] = {
3064 CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3065 };
3066
3067 NVLOG(NVDBG_INTR, nvc, NULL,
3068 "ck804_intr_process entered intr_status=%x", intr_status);
3069
3070 /*
3071 * For command completion interrupt, explicit clear is not required.
3072 * however, for the error cases explicit clear is performed.
3073 */
3074 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3075
3076 int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3077
3078 if ((port_mask[port] & intr_status) == 0) {
3079
3080 continue;
3081 }
3082
3083 NVLOG(NVDBG_INTR, nvc, NULL,
3084 "ck804_intr_process interrupt on port %d", port);
3085
3086 nvp = &(nvc->nvc_port[port]);
3087
3088 mutex_enter(&nvp->nvp_mutex);
3089
3090 /*
3091 * this case might be encountered when the other port
3092 * is active
3093 */
3094 if (nvp->nvp_state & NV_DEACTIVATED) {
3095
3096 /*
3097 * clear interrupt bits
3098 */
3099 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3100 port_mask[port]);
3101
3102 mutex_exit(&nvp->nvp_mutex);
3103
3104 continue;
3105 }
3106
3107
3108 if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL) {
3109 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3110 NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3111 " no command in progress status=%x", status);
3112 mutex_exit(&nvp->nvp_mutex);
3113
3114 /*
3115 * clear interrupt bits
3116 */
3117 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3118 port_mask[port]);
3119
3120 continue;
3121 }
3122
3123 bmhdl = nvp->nvp_bm_hdl;
3124 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3125
3126 if (!(bmstatus & BMISX_IDEINTS)) {
3127 mutex_exit(&nvp->nvp_mutex);
3128
3129 continue;
3130 }
3131
3132 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3133
3134 if (status & SATA_STATUS_BSY) {
3135 mutex_exit(&nvp->nvp_mutex);
3136
3137 continue;
3138 }
3139
3140 nv_slotp = &(nvp->nvp_slot[0]);
3141
3142 ASSERT(nv_slotp);
3143
3144 spkt = nv_slotp->nvslot_spkt;
3145
3146 if (spkt == NULL) {
3147 mutex_exit(&nvp->nvp_mutex);
3148
3149 continue;
3150 }
3151
3152 (*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3153
3154 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3155
3156 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3157
3158 nv_complete_io(nvp, spkt, 0);
3159 }
3160
3161 mutex_exit(&nvp->nvp_mutex);
3162 }
3163
3164 /*
3165 * ck804 often doesn't correctly distinguish hot add/remove
3166 * interrupts. Frequently both the ADD and the REMOVE bits
3167 * are asserted, whether it was a remove or add. Use sstatus
3168 * to distinguish hot add from hot remove.
3169 */
3170
3171 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3172 clear_bits = 0;
3173
3174 nvp = &(nvc->nvc_port[port]);
3175 mutex_enter(&nvp->nvp_mutex);
3176
3177 if ((port_mask_pm[port] & intr_status) != 0) {
3178 clear_bits = port_mask_pm[port];
3179 NVLOG(NVDBG_HOT, nvc, nvp,
3180 "clearing PM interrupt bit: %x",
3181 intr_status & port_mask_pm[port]);
3182 }
3183
3184 if ((port_mask_hot[port] & intr_status) == 0) {
3185 if (clear_bits != 0) {
3186 goto clear;
3187 } else {
3188 mutex_exit(&nvp->nvp_mutex);
3189 continue;
3190 }
3191 }
3192
3193 /*
3194 * reaching here means there was a hot add or remove.
3195 */
3196 clear_bits |= port_mask_hot[port];
3197
3198 ASSERT(nvc->nvc_port[port].nvp_sstatus);
3199
3200 sstatus = nv_get32(bar5_hdl,
3201 nvc->nvc_port[port].nvp_sstatus);
3202
3203 if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3204 SSTATUS_DET_DEVPRE_PHYCOM) {
3205 nv_link_event(nvp, NV_REM_DEV);
3206 } else {
3207 nv_link_event(nvp, NV_ADD_DEV);
3208 }
3209 clear:
3210 /*
3211 * clear interrupt bits. explicit interrupt clear is
3212 * required for hotplug interrupts.
3213 */
3214 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3215
3216 /*
3217 * make sure it's flushed and cleared. If not try
3218 * again. Sometimes it has been observed to not clear
3219 * on the first try.
3220 */
3221 intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3222
3223 /*
3224 * make 10 additional attempts to clear the interrupt
3225 */
3226 for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3227 NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3228 "still not clear try=%d", intr_status,
3229 ++nvcleared);
3230 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3231 clear_bits);
3232 intr_status = nv_get8(bar5_hdl,
3233 nvc->nvc_ck804_int_status);
3234 }
3235
3236 /*
3237 * if still not clear, log a message and disable the
3238 * port. highly unlikely that this path is taken, but it
3239 * gives protection against a wedged interrupt.
3240 */
3241 if (intr_status & clear_bits) {
3242 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3243 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3244 SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3245 nvp->nvp_state |= NV_FAILED;
3246 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3247 B_TRUE);
3248 nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3249 "interrupt. disabling port intr_status=%X",
3250 intr_status);
3251 }
3252
3253 mutex_exit(&nvp->nvp_mutex);
3254 }
3255 }
3256
3257
3258 /*
3259 * Interrupt handler for mcp5x. It is invoked by the wrapper for each port
3260 * on the controller, to handle completion and hot plug and remove events.
3261 */
3262 static uint_t
3263 mcp5x_intr_port(nv_port_t *nvp)
3264 {
3265 nv_ctl_t *nvc = nvp->nvp_ctlp;
3266 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3267 uint8_t clear = 0, intr_cycles = 0;
3268 int ret = DDI_INTR_UNCLAIMED;
3269 uint16_t int_status;
3270 clock_t intr_time;
3271 int loop_cnt = 0;
3272
3273 nvp->intr_start_time = ddi_get_lbolt();
3274
3275 NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3276
3277 do {
3278 /*
3279 * read current interrupt status
3280 */
3281 int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3282
3283 /*
3284 * if the port is deactivated, just clear the interrupt and
3285 * return. can get here even if interrupts were disabled
3286 * on this port but enabled on the other.
3287 */
3288 if (nvp->nvp_state & NV_DEACTIVATED) {
3289 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3290 int_status);
3291
3292 return (DDI_INTR_CLAIMED);
3293 }
3294
3295 NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3296
3297 DTRACE_PROBE1(int_status_before_h, int, int_status);
3298
3299 /*
3300 * MCP5X_INT_IGNORE interrupts will show up in the status,
3301 * but are masked out from causing an interrupt to be generated
3302 * to the processor. Ignore them here by masking them out.
3303 */
3304 int_status &= ~(MCP5X_INT_IGNORE);
3305
3306 DTRACE_PROBE1(int_status_after_h, int, int_status);
3307
3308 /*
3309 * exit the loop when no more interrupts to process
3310 */
3311 if (int_status == 0) {
3312
3313 break;
3314 }
3315
3316 if (int_status & MCP5X_INT_COMPLETE) {
3317 NVLOG(NVDBG_INTR, nvc, nvp,
3318 "mcp5x_packet_complete_intr", NULL);
3319 /*
3320 * since int_status was set, return DDI_INTR_CLAIMED
3321 * from the DDI's perspective even though the packet
3322 * completion may not have succeeded. If it fails,
3323 * need to manually clear the interrupt, otherwise
3324 * clearing is implicit as a result of reading the
3325 * task file status register.
3326 */
3327 ret = DDI_INTR_CLAIMED;
3328 if (mcp5x_packet_complete_intr(nvc, nvp) ==
3329 NV_FAILURE) {
3330 clear |= MCP5X_INT_COMPLETE;
3331 } else {
3332 intr_cycles = 0;
3333 }
3334 }
3335
3336 if (int_status & MCP5X_INT_DMA_SETUP) {
3337 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3338 NULL);
3339
3340 /*
3341 * Needs to be cleared before starting the BM, so do it
3342 * now. make sure this is still working.
3343 */
3344 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3345 MCP5X_INT_DMA_SETUP);
3346 #ifdef NCQ
3347 ret = mcp5x_dma_setup_intr(nvc, nvp);
3348 #endif
3349 }
3350
3351 if (int_status & MCP5X_INT_REM) {
3352 clear |= MCP5X_INT_REM;
3353 ret = DDI_INTR_CLAIMED;
3354
3355 mutex_enter(&nvp->nvp_mutex);
3356 nv_link_event(nvp, NV_REM_DEV);
3357 mutex_exit(&nvp->nvp_mutex);
3358
3359 } else if (int_status & MCP5X_INT_ADD) {
3360 clear |= MCP5X_INT_ADD;
3361 ret = DDI_INTR_CLAIMED;
3362
3363 mutex_enter(&nvp->nvp_mutex);
3364 nv_link_event(nvp, NV_ADD_DEV);
3365 mutex_exit(&nvp->nvp_mutex);
3366 }
3367 if (clear) {
3368 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3369 clear = 0;
3370 }
3371
3372 /*
3373 * protect against a stuck interrupt
3374 */
3375 if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3376
3377 NVLOG(NVDBG_INTR, nvc, nvp, "excessive interrupt "
3378 "processing. Disabling interrupts int_status=%X"
3379 " clear=%X", int_status, clear);
3380 DTRACE_PROBE(excessive_interrupts_f);
3381
3382 mutex_enter(&nvp->nvp_mutex);
3383 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3384 /*
3385 * reset the device. If it remains inaccessible
3386 * after a reset it will be failed then.
3387 */
3388 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3389 B_TRUE);
3390 mutex_exit(&nvp->nvp_mutex);
3391 }
3392
3393 } while (loop_cnt++ < nv_max_intr_loops);
3394
3395 if (loop_cnt > nvp->intr_loop_cnt) {
3396 NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3397 "Exiting with multiple intr loop count %d", loop_cnt);
3398 nvp->intr_loop_cnt = loop_cnt;
3399 }
3400
3401 if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3402 (NVDBG_INTR | NVDBG_VERBOSE)) {
3403 uint8_t status, bmstatus;
3404 uint16_t int_status2;
3405
3406 if (int_status & MCP5X_INT_COMPLETE) {
3407 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3408 bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3409 int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3410 nvp->nvp_mcp5x_int_status);
3411 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3412 "mcp55_intr_port: Exiting with altstatus %x, "
3413 "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3414 " loop_cnt %d ", status, bmstatus, int_status2,
3415 int_status, ret, loop_cnt);
3416 }
3417 }
3418
3419 NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3420
3421 /*
3422 * To facilitate debugging, keep track of the length of time spent in
3423 * the port interrupt routine.
3424 */
3425 intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3426 if (intr_time > nvp->intr_duration)
3427 nvp->intr_duration = intr_time;
3428
3429 return (ret);
3430 }
3431
3432
3433 /* ARGSUSED */
3434 static uint_t
3435 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3436 {
3437 nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3438 int ret;
3439
3440 if (nvc->nvc_state & NV_CTRL_SUSPEND)
3441 return (DDI_INTR_UNCLAIMED);
3442
3443 ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3444 ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3445
3446 return (ret);
3447 }
3448
3449
3450 #ifdef NCQ
3451 /*
3452 * with software driven NCQ on mcp5x, an interrupt occurs right
3453 * before the drive is ready to do a DMA transfer. At this point,
3454 * the PRD table needs to be programmed and the DMA engine enabled
3455 * and ready to go.
3456 *
3457 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3458 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3459 * -- clear bit 0 of master command reg
3460 * -- program PRD
3461 * -- clear the interrupt status bit for the DMA Setup FIS
3462 * -- set bit 0 of the bus master command register
3463 */
3464 static int
3465 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3466 {
3467 int slot;
3468 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3469 uint8_t bmicx;
3470 int port = nvp->nvp_port_num;
3471 uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3472 MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3473
3474 nv_cmn_err(CE_PANIC, nvc, nvp,
3475 "this is should not be executed at all until NCQ");
3476
3477 mutex_enter(&nvp->nvp_mutex);
3478
3479 slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3480
3481 slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3482
3483 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3484 " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3485
3486 /*
3487 * halt the DMA engine. This step is necessary according to
3488 * the mcp5x spec, probably since there may have been a "first" packet
3489 * that already programmed the DMA engine, but may not turn out to
3490 * be the first one processed.
3491 */
3492 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3493
3494 if (bmicx & BMICX_SSBM) {
3495 NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3496 "another packet. Cancelling and reprogramming", NULL);
3497 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
3498 }
3499 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
3500
3501 nv_start_dma_engine(nvp, slot);
3502
3503 mutex_exit(&nvp->nvp_mutex);
3504
3505 return (DDI_INTR_CLAIMED);
3506 }
3507 #endif /* NCQ */
3508
3509
3510 /*
3511 * packet completion interrupt. If the packet is complete, invoke
3512 * the packet completion callback.
3513 */
3514 static int
3515 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3516 {
3517 uint8_t status, bmstatus;
3518 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3519 int sactive;
3520 int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3521 sata_pkt_t *spkt;
3522 nv_slot_t *nv_slotp;
3523
3524 mutex_enter(&nvp->nvp_mutex);
3525
3526 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3527
3528 if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3529 DTRACE_PROBE1(bmstatus_h, int, bmstatus);
3530 NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set %x",
3531 bmstatus);
3532 mutex_exit(&nvp->nvp_mutex);
3533
3534 return (NV_FAILURE);
3535 }
3536
3537 /*
3538 * Commands may have been processed by abort or timeout before
3539 * interrupt processing acquired the mutex. So we may be processing
3540 * an interrupt for packets that were already removed.
3541 * For functioning NCQ processing all slots may be checked, but
3542 * with NCQ disabled (current code), relying on *_run flags is OK.
3543 */
3544 if (nvp->nvp_non_ncq_run) {
3545 /*
3546 * If the just completed item is a non-ncq command, the busy
3547 * bit should not be set
3548 */
3549 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3550 if (status & SATA_STATUS_BSY) {
3551 nv_cmn_err(CE_WARN, nvc, nvp,
3552 "unexpected SATA_STATUS_BSY set");
3553 DTRACE_PROBE(unexpected_status_bsy_p);
3554 mutex_exit(&nvp->nvp_mutex);
3555 /*
3556 * calling function will clear interrupt. then
3557 * the real interrupt will either arrive or the
3558 * packet timeout handling will take over and
3559 * reset.
3560 */
3561 return (NV_FAILURE);
3562 }
3563 ASSERT(nvp->nvp_ncq_run == 0);
3564 } else {
3565 ASSERT(nvp->nvp_non_ncq_run == 0);
3566 /*
3567 * Pre-NCQ code!
3568 * Nothing to do. The packet for the command that just
3569 * completed is already gone. Just clear the interrupt.
3570 */
3571 (void) nv_bm_status_clear(nvp);
3572 (void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3573 mutex_exit(&nvp->nvp_mutex);
3574 return (NV_SUCCESS);
3575
3576 /*
3577 * NCQ check for BSY here and wait if still bsy before
3578 * continuing. Rather than wait for it to be cleared
3579 * when starting a packet and wasting CPU time, the starting
3580 * thread can exit immediate, but might have to spin here
3581 * for a bit possibly. Needs more work and experimentation.
3582 *
3583 */
3584 }
3585
3586 /*
3587 * active_pkt_bit will represent the bitmap of the single completed
3588 * packet. Because of the nature of sw assisted NCQ, only one
3589 * command will complete per interrupt.
3590 */
3591
3592 if (ncq_command == B_FALSE) {
3593 active_pkt = 0;
3594 } else {
3595 /*
3596 * NCQ: determine which command just completed, by examining
3597 * which bit cleared in the register since last written.
3598 */
3599 sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3600
3601 active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3602
3603 ASSERT(active_pkt_bit);
3604
3605
3606 /*
3607 * this failure path needs more work to handle the
3608 * error condition and recovery.
3609 */
3610 if (active_pkt_bit == 0) {
3611 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3612
3613 nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X "
3614 "nvp->nvp_sactive %X", sactive,
3615 nvp->nvp_sactive_cache);
3616
3617 (void) nv_get8(cmdhdl, nvp->nvp_status);
3618
3619 mutex_exit(&nvp->nvp_mutex);
3620
3621 return (NV_FAILURE);
3622 }
3623
3624 for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3625 active_pkt++, active_pkt_bit >>= 1) {
3626 }
3627
3628 /*
3629 * make sure only one bit is ever turned on
3630 */
3631 ASSERT(active_pkt_bit == 1);
3632
3633 nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3634 }
3635
3636 nv_slotp = &(nvp->nvp_slot[active_pkt]);
3637
3638 spkt = nv_slotp->nvslot_spkt;
3639
3640 ASSERT(spkt != NULL);
3641
3642 (*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3643
3644 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3645
3646 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3647
3648 nv_complete_io(nvp, spkt, active_pkt);
3649 }
3650
3651 mutex_exit(&nvp->nvp_mutex);
3652
3653 return (NV_SUCCESS);
3654 }
3655
3656
3657 static void
3658 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3659 {
3660
3661 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3662
3663 if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3664 nvp->nvp_ncq_run--;
3665 } else {
3666 nvp->nvp_non_ncq_run--;
3667 }
3668
3669 /*
3670 * mark the packet slot idle so it can be reused. Do this before
3671 * calling satapkt_comp so the slot can be reused.
3672 */
3673 (&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3674
3675 if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3676 /*
3677 * If this is not timed polled mode cmd, which has an
3678 * active thread monitoring for completion, then need
3679 * to signal the sleeping thread that the cmd is complete.
3680 */
3681 if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3682 cv_signal(&nvp->nvp_sync_cv);
3683 }
3684
3685 return;
3686 }
3687
3688 if (spkt->satapkt_comp != NULL) {
3689 mutex_exit(&nvp->nvp_mutex);
3690 (*spkt->satapkt_comp)(spkt);
3691 mutex_enter(&nvp->nvp_mutex);
3692 }
3693 }
3694
3695
3696 /*
3697 * check whether packet is ncq command or not. for ncq command,
3698 * start it if there is still room on queue. for non-ncq command only
3699 * start if no other command is running.
3700 */
3701 static int
3702 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3703 {
3704 uint8_t cmd, ncq;
3705
3706 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3707
3708 cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3709
3710 ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3711 (cmd == SATAC_READ_FPDMA_QUEUED));
3712
3713 if (ncq == B_FALSE) {
3714
3715 if ((nvp->nvp_non_ncq_run == 1) ||
3716 (nvp->nvp_ncq_run > 0)) {
3717 /*
3718 * next command is non-ncq which can't run
3719 * concurrently. exit and return queue full.
3720 */
3721 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3722
3723 return (SATA_TRAN_QUEUE_FULL);
3724 }
3725
3726 return (nv_start_common(nvp, spkt));
3727 }
3728
3729 /*
3730 * ncq == B_TRUE
3731 */
3732 if (nvp->nvp_non_ncq_run == 1) {
3733 /*
3734 * cannot start any NCQ commands when there
3735 * is a non-NCQ command running.
3736 */
3737 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3738
3739 return (SATA_TRAN_QUEUE_FULL);
3740 }
3741
3742 #ifdef NCQ
3743 /*
3744 * this is not compiled for now as satapkt_device.satadev_qdepth
3745 * is being pulled out until NCQ support is later addressed
3746 *
3747 * nvp_queue_depth is initialized by the first NCQ command
3748 * received.
3749 */
3750 if (nvp->nvp_queue_depth == 1) {
3751 nvp->nvp_queue_depth =
3752 spkt->satapkt_device.satadev_qdepth;
3753
3754 ASSERT(nvp->nvp_queue_depth > 1);
3755
3756 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3757 "nv_process_queue: nvp_queue_depth set to %d",
3758 nvp->nvp_queue_depth);
3759 }
3760 #endif
3761
3762 if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3763 /*
3764 * max number of NCQ commands already active
3765 */
3766 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3767
3768 return (SATA_TRAN_QUEUE_FULL);
3769 }
3770
3771 return (nv_start_common(nvp, spkt));
3772 }
3773
3774
3775 /*
3776 * configure INTx and legacy interrupts
3777 */
3778 static int
3779 nv_add_legacy_intrs(nv_ctl_t *nvc)
3780 {
3781 dev_info_t *devinfo = nvc->nvc_dip;
3782 int actual, count = 0;
3783 int x, y, rc, inum = 0;
3784
3785 NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3786
3787 /*
3788 * get number of interrupts
3789 */
3790 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3791 if ((rc != DDI_SUCCESS) || (count == 0)) {
3792 NVLOG(NVDBG_INIT, nvc, NULL,
3793 "ddi_intr_get_nintrs() failed, "
3794 "rc %d count %d", rc, count);
3795
3796 return (DDI_FAILURE);
3797 }
3798
3799 /*
3800 * allocate an array of interrupt handles
3801 */
3802 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3803 nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3804
3805 /*
3806 * call ddi_intr_alloc()
3807 */
3808 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3809 inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3810
3811 if ((rc != DDI_SUCCESS) || (actual == 0)) {
3812 nv_cmn_err(CE_WARN, nvc, NULL,
3813 "ddi_intr_alloc() failed, rc %d", rc);
3814 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3815
3816 return (DDI_FAILURE);
3817 }
3818
3819 if (actual < count) {
3820 nv_cmn_err(CE_WARN, nvc, NULL,
3821 "ddi_intr_alloc: requested: %d, received: %d",
3822 count, actual);
3823
3824 goto failure;
3825 }
3826
3827 nvc->nvc_intr_cnt = actual;
3828
3829 /*
3830 * get intr priority
3831 */
3832 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3833 DDI_SUCCESS) {
3834 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3835
3836 goto failure;
3837 }
3838
3839 /*
3840 * Test for high level mutex
3841 */
3842 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3843 nv_cmn_err(CE_WARN, nvc, NULL,
3844 "nv_add_legacy_intrs: high level intr not supported");
3845
3846 goto failure;
3847 }
3848
3849 for (x = 0; x < actual; x++) {
3850 if (ddi_intr_add_handler(nvc->nvc_htable[x],
3851 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3852 nv_cmn_err(CE_WARN, nvc, NULL,
3853 "ddi_intr_add_handler() failed");
3854
3855 goto failure;
3856 }
3857 }
3858
3859 /*
3860 * call ddi_intr_enable() for legacy interrupts
3861 */
3862 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3863 (void) ddi_intr_enable(nvc->nvc_htable[x]);
3864 }
3865
3866 return (DDI_SUCCESS);
3867
3868 failure:
3869 /*
3870 * free allocated intr and nvc_htable
3871 */
3872 for (y = 0; y < actual; y++) {
3873 (void) ddi_intr_free(nvc->nvc_htable[y]);
3874 }
3875
3876 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3877
3878 return (DDI_FAILURE);
3879 }
3880
3881 #ifdef NV_MSI_SUPPORTED
3882 /*
3883 * configure MSI interrupts
3884 */
3885 static int
3886 nv_add_msi_intrs(nv_ctl_t *nvc)
3887 {
3888 dev_info_t *devinfo = nvc->nvc_dip;
3889 int count, avail, actual;
3890 int x, y, rc, inum = 0;
3891
3892 NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3893
3894 /*
3895 * get number of interrupts
3896 */
3897 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3898 if ((rc != DDI_SUCCESS) || (count == 0)) {
3899 nv_cmn_err(CE_WARN, nvc, NULL,
3900 "ddi_intr_get_nintrs() failed, "
3901 "rc %d count %d", rc, count);
3902
3903 return (DDI_FAILURE);
3904 }
3905
3906 /*
3907 * get number of available interrupts
3908 */
3909 rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3910 if ((rc != DDI_SUCCESS) || (avail == 0)) {
3911 nv_cmn_err(CE_WARN, nvc, NULL,
3912 "ddi_intr_get_navail() failed, "
3913 "rc %d avail %d", rc, avail);
3914
3915 return (DDI_FAILURE);
3916 }
3917
3918 if (avail < count) {
3919 nv_cmn_err(CE_WARN, nvc, NULL,
3920 "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3921 avail, count);
3922 }
3923
3924 /*
3925 * allocate an array of interrupt handles
3926 */
3927 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3928 nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3929
3930 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3931 inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3932
3933 if ((rc != DDI_SUCCESS) || (actual == 0)) {
3934 nv_cmn_err(CE_WARN, nvc, NULL,
3935 "ddi_intr_alloc() failed, rc %d", rc);
3936 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3937
3938 return (DDI_FAILURE);
3939 }
3940
3941 /*
3942 * Use interrupt count returned or abort?
3943 */
3944 if (actual < count) {
3945 NVLOG(NVDBG_INIT, nvc, NULL,
3946 "Requested: %d, Received: %d", count, actual);
3947 }
3948
3949 nvc->nvc_intr_cnt = actual;
3950
3951 /*
3952 * get priority for first msi, assume remaining are all the same
3953 */
3954 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3955 DDI_SUCCESS) {
3956 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3957
3958 goto failure;
3959 }
3960
3961 /*
3962 * test for high level mutex
3963 */
3964 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3965 nv_cmn_err(CE_WARN, nvc, NULL,
3966 "nv_add_msi_intrs: high level intr not supported");
3967
3968 goto failure;
3969 }
3970
3971 /*
3972 * Call ddi_intr_add_handler()
3973 */
3974 for (x = 0; x < actual; x++) {
3975 if (ddi_intr_add_handler(nvc->nvc_htable[x],
3976 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3977 nv_cmn_err(CE_WARN, nvc, NULL,
3978 "ddi_intr_add_handler() failed");
3979
3980 goto failure;
3981 }
3982 }
3983
3984 (void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3985
3986 if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3987 (void) ddi_intr_block_enable(nvc->nvc_htable,
3988 nvc->nvc_intr_cnt);
3989 } else {
3990 /*
3991 * Call ddi_intr_enable() for MSI non block enable
3992 */
3993 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3994 (void) ddi_intr_enable(nvc->nvc_htable[x]);
3995 }
3996 }
3997
3998 return (DDI_SUCCESS);
3999
4000 failure:
4001 /*
4002 * free allocated intr and nvc_htable
4003 */
4004 for (y = 0; y < actual; y++) {
4005 (void) ddi_intr_free(nvc->nvc_htable[y]);
4006 }
4007
4008 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4009
4010 return (DDI_FAILURE);
4011 }
4012 #endif
4013
4014
4015 static void
4016 nv_rem_intrs(nv_ctl_t *nvc)
4017 {
4018 int x, i;
4019 nv_port_t *nvp;
4020
4021 NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
4022
4023 /*
4024 * prevent controller from generating interrupts by
4025 * masking them out. This is an extra precaution.
4026 */
4027 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
4028 nvp = (&nvc->nvc_port[i]);
4029 mutex_enter(&nvp->nvp_mutex);
4030 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
4031 mutex_exit(&nvp->nvp_mutex);
4032 }
4033
4034 /*
4035 * disable all interrupts
4036 */
4037 if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4038 (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4039 (void) ddi_intr_block_disable(nvc->nvc_htable,
4040 nvc->nvc_intr_cnt);
4041 } else {
4042 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4043 (void) ddi_intr_disable(nvc->nvc_htable[x]);
4044 }
4045 }
4046
4047 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4048 (void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4049 (void) ddi_intr_free(nvc->nvc_htable[x]);
4050 }
4051
4052 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4053 }
4054
4055
4056 /*
4057 * variable argument wrapper for cmn_err. prefixes the instance and port
4058 * number if possible
4059 */
4060 static void
4061 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, va_list ap,
4062 boolean_t log_to_sata_ring)
4063 {
4064 char port[NV_STR_LEN];
4065 char inst[NV_STR_LEN];
4066 dev_info_t *dip;
4067
4068 if (nvc) {
4069 (void) snprintf(inst, NV_STR_LEN, "inst%d ",
4070 ddi_get_instance(nvc->nvc_dip));
4071 dip = nvc->nvc_dip;
4072 } else {
4073 inst[0] = '\0';
4074 }
4075
4076 if (nvp) {
4077 (void) snprintf(port, NV_STR_LEN, "port%d",
4078 nvp->nvp_port_num);
4079 dip = nvp->nvp_ctlp->nvc_dip;
4080 } else {
4081 port[0] = '\0';
4082 }
4083
4084 mutex_enter(&nv_log_mutex);
4085
4086 (void) sprintf(nv_log_buf, "%s%s%s", inst, port,
4087 (inst[0]|port[0] ? ": " :""));
4088
4089 (void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4090 NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4091
4092 /*
4093 * Log to console or log to file, depending on
4094 * nv_log_to_console setting.
4095 */
4096 if (nv_log_to_console) {
4097 if (nv_prom_print) {
4098 prom_printf("%s\n", nv_log_buf);
4099 } else {
4100 cmn_err(ce, "%s\n", nv_log_buf);
4101 }
4102 } else {
4103 cmn_err(ce, "!%s", nv_log_buf);
4104 }
4105
4106 if (log_to_sata_ring == B_TRUE) {
4107 (void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4108
4109 (void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4110 NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4111
4112 sata_trace_debug(dip, nv_log_buf);
4113 }
4114
4115 mutex_exit(&nv_log_mutex);
4116 }
4117
4118
4119 /*
4120 * wrapper for cmn_err
4121 */
4122 static void
4123 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4124 {
4125 va_list ap;
4126
4127 va_start(ap, fmt);
4128 nv_vcmn_err(ce, nvc, nvp, fmt, ap, B_TRUE);
4129 va_end(ap);
4130 }
4131
4132
4133 static void
4134 nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4135 {
4136 va_list ap;
4137
4138 if (nv_log_to_cmn_err == B_TRUE) {
4139 va_start(ap, fmt);
4140 nv_vcmn_err(CE_CONT, nvc, nvp, fmt, ap, B_FALSE);
4141 va_end(ap);
4142
4143 }
4144
4145 va_start(ap, fmt);
4146
4147 if (nvp == NULL && nvc == NULL) {
4148 sata_vtrace_debug(NULL, fmt, ap);
4149 va_end(ap);
4150
4151 return;
4152 }
4153
4154 if (nvp == NULL && nvc != NULL) {
4155 sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4156 va_end(ap);
4157
4158 return;
4159 }
4160
4161 /*
4162 * nvp is not NULL, but nvc might be. Reference nvp for both
4163 * port and dip, to get the port number prefixed on the
4164 * message.
4165 */
4166 mutex_enter(&nv_log_mutex);
4167
4168 (void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4169 nvp->nvp_port_num, fmt);
4170
4171 sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4172
4173 mutex_exit(&nv_log_mutex);
4174
4175 va_end(ap);
4176 }
4177
4178
4179 /*
4180 * program registers which are common to all commands
4181 */
4182 static void
4183 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4184 {
4185 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4186 sata_pkt_t *spkt;
4187 sata_cmd_t *satacmd;
4188 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4189 uint8_t cmd, ncq = B_FALSE;
4190
4191 spkt = nv_slotp->nvslot_spkt;
4192 satacmd = &spkt->satapkt_cmd;
4193 cmd = satacmd->satacmd_cmd_reg;
4194
4195 ASSERT(nvp->nvp_slot);
4196
4197 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4198 (cmd == SATAC_READ_FPDMA_QUEUED)) {
4199 ncq = B_TRUE;
4200 }
4201
4202 /*
4203 * select the drive
4204 */
4205 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4206
4207 /*
4208 * make certain the drive selected
4209 */
4210 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4211 NV_SEC2USEC(5), 0) == B_FALSE) {
4212
4213 return;
4214 }
4215
4216 switch (spkt->satapkt_cmd.satacmd_addr_type) {
4217
4218 case ATA_ADDR_LBA:
4219 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4220 NULL);
4221
4222 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4223 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4224 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4225 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4226 nv_put8(cmdhdl, nvp->nvp_feature,
4227 satacmd->satacmd_features_reg);
4228
4229
4230 break;
4231
4232 case ATA_ADDR_LBA28:
4233 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4234 "ATA_ADDR_LBA28 mode", NULL);
4235 /*
4236 * NCQ only uses 48-bit addressing
4237 */
4238 ASSERT(ncq != B_TRUE);
4239
4240 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4241 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4242 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4243 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4244 nv_put8(cmdhdl, nvp->nvp_feature,
4245 satacmd->satacmd_features_reg);
4246
4247 break;
4248
4249 case ATA_ADDR_LBA48:
4250 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4251 "ATA_ADDR_LBA48 mode", NULL);
4252
4253 /*
4254 * for NCQ, tag goes into count register and real sector count
4255 * into features register. The sata module does the translation
4256 * in the satacmd.
4257 */
4258 if (ncq == B_TRUE) {
4259 nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4260 } else {
4261 nv_put8(cmdhdl, nvp->nvp_count,
4262 satacmd->satacmd_sec_count_msb);
4263 nv_put8(cmdhdl, nvp->nvp_count,
4264 satacmd->satacmd_sec_count_lsb);
4265 }
4266
4267 nv_put8(cmdhdl, nvp->nvp_feature,
4268 satacmd->satacmd_features_reg_ext);
4269 nv_put8(cmdhdl, nvp->nvp_feature,
4270 satacmd->satacmd_features_reg);
4271
4272 /*
4273 * send the high-order half first
4274 */
4275 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4276 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4277 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4278
4279 /*
4280 * Send the low-order half
4281 */
4282 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4283 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4284 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4285
4286 break;
4287
4288 case 0:
4289 /*
4290 * non-media access commands such as identify and features
4291 * take this path.
4292 */
4293 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4294 nv_put8(cmdhdl, nvp->nvp_feature,
4295 satacmd->satacmd_features_reg);
4296 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4297 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4298 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4299
4300 break;
4301
4302 default:
4303 break;
4304 }
4305
4306 ASSERT(nvp->nvp_slot);
4307 }
4308
4309
4310 /*
4311 * start a command that involves no media access
4312 */
4313 static int
4314 nv_start_nodata(nv_port_t *nvp, int slot)
4315 {
4316 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4317 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4318 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4319 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4320
4321 nv_program_taskfile_regs(nvp, slot);
4322
4323 /*
4324 * This next one sets the controller in motion
4325 */
4326 nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4327
4328 return (SATA_TRAN_ACCEPTED);
4329 }
4330
4331
4332 static int
4333 nv_bm_status_clear(nv_port_t *nvp)
4334 {
4335 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4336 uchar_t status, ret;
4337
4338 /*
4339 * Get the current BM status
4340 */
4341 ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4342
4343 status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4344
4345 /*
4346 * Clear the latches (and preserve the other bits)
4347 */
4348 nv_put8(bmhdl, nvp->nvp_bmisx, status);
4349
4350 return (ret);
4351 }
4352
4353
4354 /*
4355 * program the bus master DMA engine with the PRD address for
4356 * the active slot command, and start the DMA engine.
4357 */
4358 static void
4359 nv_start_dma_engine(nv_port_t *nvp, int slot)
4360 {
4361 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4362 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4363 uchar_t direction;
4364
4365 ASSERT(nv_slotp->nvslot_spkt != NULL);
4366
4367 if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4368 == SATA_DIR_READ) {
4369 direction = BMICX_RWCON_WRITE_TO_MEMORY;
4370 } else {
4371 direction = BMICX_RWCON_READ_FROM_MEMORY;
4372 }
4373
4374 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4375 "nv_start_dma_engine entered", NULL);
4376
4377 #if NOT_USED
4378 /*
4379 * NOT NEEDED. Left here of historical reason.
4380 * Reset the controller's interrupt and error status bits.
4381 */
4382 (void) nv_bm_status_clear(nvp);
4383 #endif
4384 /*
4385 * program the PRD table physical start address
4386 */
4387 nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4388
4389 /*
4390 * set the direction control and start the DMA controller
4391 */
4392 nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4393 }
4394
4395 /*
4396 * start dma command, either in or out
4397 */
4398 static int
4399 nv_start_dma(nv_port_t *nvp, int slot)
4400 {
4401 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4402 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4403 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4404 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4405 uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4406 #ifdef NCQ
4407 uint8_t ncq = B_FALSE;
4408 #endif
4409 ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4410 uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4411 int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4412 ddi_dma_cookie_t *srcp = sata_cmdp->satacmd_dma_cookie_list;
4413
4414 ASSERT(sg_count != 0);
4415
4416 if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4417 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4418 " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4419 sata_cmdp->satacmd_num_dma_cookies);
4420
4421 return (NV_FAILURE);
4422 }
4423
4424 nv_program_taskfile_regs(nvp, slot);
4425
4426 /*
4427 * start the drive in motion
4428 */
4429 nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4430
4431 /*
4432 * the drive starts processing the transaction when the cmd register
4433 * is written. This is done here before programming the DMA engine to
4434 * parallelize and save some time. In the event that the drive is ready
4435 * before DMA, it will wait.
4436 */
4437 #ifdef NCQ
4438 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4439 (cmd == SATAC_READ_FPDMA_QUEUED)) {
4440 ncq = B_TRUE;
4441 }
4442 #endif
4443
4444 /*
4445 * copy the PRD list to PRD table in DMA accessible memory
4446 * so that the controller can access it.
4447 */
4448 for (idx = 0; idx < sg_count; idx++, srcp++) {
4449 uint32_t size;
4450
4451 nv_put32(sghdl, dstp++, srcp->dmac_address);
4452
4453 /* Set the number of bytes to transfer, 0 implies 64KB */
4454 size = srcp->dmac_size;
4455 if (size == 0x10000)
4456 size = 0;
4457
4458 /*
4459 * If this is a 40-bit address, copy bits 32-40 of the
4460 * physical address to bits 16-24 of the PRD count.
4461 */
4462 if (srcp->dmac_laddress > UINT32_MAX) {
4463 size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4464 }
4465
4466 /*
4467 * set the end of table flag for the last entry
4468 */
4469 if (idx == (sg_count - 1)) {
4470 size |= PRDE_EOT;
4471 }
4472
4473 nv_put32(sghdl, dstp++, size);
4474 }
4475
4476 (void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4477 sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4478
4479 nv_start_dma_engine(nvp, slot);
4480
4481 #ifdef NCQ
4482 /*
4483 * optimization: for SWNCQ, start DMA engine if this is the only
4484 * command running. Preliminary NCQ efforts indicated this needs
4485 * more debugging.
4486 *
4487 * if (nvp->nvp_ncq_run <= 1)
4488 */
4489
4490 if (ncq == B_FALSE) {
4491 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4492 "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4493 " cmd = %X", non_ncq_commands++, cmd);
4494 nv_start_dma_engine(nvp, slot);
4495 } else {
4496 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4497 "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4498 }
4499 #endif /* NCQ */
4500
4501 return (SATA_TRAN_ACCEPTED);
4502 }
4503
4504
4505 /*
4506 * start a PIO data-in ATA command
4507 */
4508 static int
4509 nv_start_pio_in(nv_port_t *nvp, int slot)
4510 {
4511
4512 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4513 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4514 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4515
4516 nv_program_taskfile_regs(nvp, slot);
4517
4518 /*
4519 * This next one sets the drive in motion
4520 */
4521 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4522
4523 return (SATA_TRAN_ACCEPTED);
4524 }
4525
4526
4527 /*
4528 * start a PIO data-out ATA command
4529 */
4530 static int
4531 nv_start_pio_out(nv_port_t *nvp, int slot)
4532 {
4533 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4534 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4535 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4536
4537 nv_program_taskfile_regs(nvp, slot);
4538
4539 /*
4540 * this next one sets the drive in motion
4541 */
4542 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4543
4544 /*
4545 * wait for the busy bit to settle
4546 */
4547 NV_DELAY_NSEC(400);
4548
4549 /*
4550 * wait for the drive to assert DRQ to send the first chunk
4551 * of data. Have to busy wait because there's no interrupt for
4552 * the first chunk. This is bad... uses a lot of cycles if the
4553 * drive responds too slowly or if the wait loop granularity
4554 * is too large. It's even worse if the drive is defective and
4555 * the loop times out.
4556 */
4557 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4558 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4559 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4560 4000000, 0) == B_FALSE) {
4561 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4562
4563 goto error;
4564 }
4565
4566 /*
4567 * send the first block.
4568 */
4569 nv_intr_pio_out(nvp, nv_slotp);
4570
4571 /*
4572 * If nvslot_flags is not set to COMPLETE yet, then processing
4573 * is OK so far, so return. Otherwise, fall into error handling
4574 * below.
4575 */
4576 if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4577
4578 return (SATA_TRAN_ACCEPTED);
4579 }
4580
4581 error:
4582 /*
4583 * there was an error so reset the device and complete the packet.
4584 */
4585 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4586 nv_complete_io(nvp, spkt, 0);
4587 nv_reset(nvp, "pio_out");
4588
4589 return (SATA_TRAN_PORT_ERROR);
4590 }
4591
4592
4593 /*
4594 * start a ATAPI Packet command (PIO data in or out)
4595 */
4596 static int
4597 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4598 {
4599 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4600 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4601 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4602 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4603
4604 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4605 "nv_start_pkt_pio: start", NULL);
4606
4607 /*
4608 * Write the PACKET command to the command register. Normally
4609 * this would be done through nv_program_taskfile_regs(). It
4610 * is done here because some values need to be overridden.
4611 */
4612
4613 /* select the drive */
4614 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4615
4616 /* make certain the drive selected */
4617 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4618 NV_SEC2USEC(5), 0) == B_FALSE) {
4619 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4620 "nv_start_pkt_pio: drive select failed", NULL);
4621 return (SATA_TRAN_PORT_ERROR);
4622 }
4623
4624 /*
4625 * The command is always sent via PIO, despite whatever the SATA
4626 * common module sets in the command. Overwrite the DMA bit to do this.
4627 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4628 */
4629 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */
4630
4631 /* set appropriately by the sata common module */
4632 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4633 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4634 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4635 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4636
4637 /* initiate the command by writing the command register last */
4638 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4639
4640 /* Give the host controller time to do its thing */
4641 NV_DELAY_NSEC(400);
4642
4643 /*
4644 * Wait for the device to indicate that it is ready for the command
4645 * ATAPI protocol state - HP0: Check_Status_A
4646 */
4647
4648 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4649 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4650 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4651 4000000, 0) == B_FALSE) {
4652 /*
4653 * Either an error or device fault occurred or the wait
4654 * timed out. According to the ATAPI protocol, command
4655 * completion is also possible. Other implementations of
4656 * this protocol don't handle this last case, so neither
4657 * does this code.
4658 */
4659
4660 if (nv_get8(cmdhdl, nvp->nvp_status) &
4661 (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4662 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4663
4664 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4665 "nv_start_pkt_pio: device error (HP0)", NULL);
4666 } else {
4667 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4668
4669 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4670 "nv_start_pkt_pio: timeout (HP0)", NULL);
4671 }
4672
4673 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4674 nv_complete_io(nvp, spkt, 0);
4675 nv_reset(nvp, "start_pkt_pio");
4676
4677 return (SATA_TRAN_PORT_ERROR);
4678 }
4679
4680 /*
4681 * Put the ATAPI command in the data register
4682 * ATAPI protocol state - HP1: Send_Packet
4683 */
4684
4685 ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4686 (ushort_t *)nvp->nvp_data,
4687 (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4688
4689 /*
4690 * See you in nv_intr_pkt_pio.
4691 * ATAPI protocol state - HP3: INTRQ_wait
4692 */
4693
4694 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4695 "nv_start_pkt_pio: exiting into HP3", NULL);
4696
4697 return (SATA_TRAN_ACCEPTED);
4698 }
4699
4700
4701 /*
4702 * Interrupt processing for a non-data ATA command.
4703 */
4704 static void
4705 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4706 {
4707 uchar_t status;
4708 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4709 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4710 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4711 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4712
4713 NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4714
4715 status = nv_get8(cmdhdl, nvp->nvp_status);
4716
4717 /*
4718 * check for errors
4719 */
4720 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4721 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4722 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4723 nvp->nvp_altstatus);
4724 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4725 } else {
4726 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4727 }
4728
4729 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4730 }
4731
4732
4733 /*
4734 * ATA command, PIO data in
4735 */
4736 static void
4737 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4738 {
4739 uchar_t status;
4740 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4741 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4742 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4743 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4744 int count;
4745
4746 status = nv_get8(cmdhdl, nvp->nvp_status);
4747
4748 if (status & SATA_STATUS_BSY) {
4749 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4750 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4751 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4752 nvp->nvp_altstatus);
4753 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4754 nv_reset(nvp, "intr_pio_in");
4755
4756 return;
4757 }
4758
4759 /*
4760 * check for errors
4761 */
4762 if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4763 SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4764 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4765 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4766 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4767
4768 return;
4769 }
4770
4771 /*
4772 * read the next chunk of data (if any)
4773 */
4774 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4775
4776 /*
4777 * read count bytes
4778 */
4779 ASSERT(count != 0);
4780
4781 ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4782 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4783
4784 nv_slotp->nvslot_v_addr += count;
4785 nv_slotp->nvslot_byte_count -= count;
4786
4787
4788 if (nv_slotp->nvslot_byte_count != 0) {
4789 /*
4790 * more to transfer. Wait for next interrupt.
4791 */
4792 return;
4793 }
4794
4795 /*
4796 * transfer is complete. wait for the busy bit to settle.
4797 */
4798 NV_DELAY_NSEC(400);
4799
4800 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4801 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4802 }
4803
4804
4805 /*
4806 * ATA command PIO data out
4807 */
4808 static void
4809 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4810 {
4811 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4812 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4813 uchar_t status;
4814 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4815 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4816 int count;
4817
4818 /*
4819 * clear the IRQ
4820 */
4821 status = nv_get8(cmdhdl, nvp->nvp_status);
4822
4823 if (status & SATA_STATUS_BSY) {
4824 /*
4825 * this should not happen
4826 */
4827 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4828 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4829 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4830 nvp->nvp_altstatus);
4831 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4832
4833 return;
4834 }
4835
4836 /*
4837 * check for errors
4838 */
4839 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4840 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4841 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4842 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4843
4844 return;
4845 }
4846
4847 /*
4848 * this is the condition which signals the drive is
4849 * no longer ready to transfer. Likely that the transfer
4850 * completed successfully, but check that byte_count is
4851 * zero.
4852 */
4853 if ((status & SATA_STATUS_DRQ) == 0) {
4854
4855 if (nv_slotp->nvslot_byte_count == 0) {
4856 /*
4857 * complete; successful transfer
4858 */
4859 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4860 } else {
4861 /*
4862 * error condition, incomplete transfer
4863 */
4864 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4865 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4866 }
4867 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4868
4869 return;
4870 }
4871
4872 /*
4873 * write the next chunk of data
4874 */
4875 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4876
4877 /*
4878 * read or write count bytes
4879 */
4880
4881 ASSERT(count != 0);
4882
4883 ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4884 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4885
4886 nv_slotp->nvslot_v_addr += count;
4887 nv_slotp->nvslot_byte_count -= count;
4888 }
4889
4890
4891 /*
4892 * ATAPI PACKET command, PIO in/out interrupt
4893 *
4894 * Under normal circumstances, one of four different interrupt scenarios
4895 * will result in this function being called:
4896 *
4897 * 1. Packet command data transfer
4898 * 2. Packet command completion
4899 * 3. Request sense data transfer
4900 * 4. Request sense command completion
4901 */
4902 static void
4903 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4904 {
4905 uchar_t status;
4906 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4907 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4908 int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4909 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4910 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4911 uint16_t ctlr_count;
4912 int count;
4913
4914 /* ATAPI protocol state - HP2: Check_Status_B */
4915
4916 status = nv_get8(cmdhdl, nvp->nvp_status);
4917 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4918 "nv_intr_pkt_pio: status 0x%x", status);
4919
4920 if (status & SATA_STATUS_BSY) {
4921 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4922 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4923 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4924 } else {
4925 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4926 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4927 nv_reset(nvp, "intr_pkt_pio");
4928 }
4929
4930 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4931 "nv_intr_pkt_pio: busy - status 0x%x", status);
4932
4933 return;
4934 }
4935
4936 if ((status & SATA_STATUS_DF) != 0) {
4937 /*
4938 * On device fault, just clean up and bail. Request sense
4939 * will just default to its NO SENSE initialized value.
4940 */
4941
4942 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4943 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4944 }
4945
4946 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4947 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4948
4949 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4950 nvp->nvp_altstatus);
4951 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4952 nvp->nvp_error);
4953
4954 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4955 "nv_intr_pkt_pio: device fault", NULL);
4956
4957 return;
4958 }
4959
4960 if ((status & SATA_STATUS_ERR) != 0) {
4961 /*
4962 * On command error, figure out whether we are processing a
4963 * request sense. If so, clean up and bail. Otherwise,
4964 * do a REQUEST SENSE.
4965 */
4966
4967 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4968 nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4969 if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4970 NV_FAILURE) {
4971 nv_copy_registers(nvp, &spkt->satapkt_device,
4972 spkt);
4973 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4974 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4975 }
4976
4977 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4978 nvp->nvp_altstatus);
4979 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4980 nvp->nvp_error);
4981 } else {
4982 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4983 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4984
4985 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4986 }
4987
4988 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4989 "nv_intr_pkt_pio: error (status 0x%x)", status);
4990
4991 return;
4992 }
4993
4994 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4995 /*
4996 * REQUEST SENSE command processing
4997 */
4998
4999 if ((status & (SATA_STATUS_DRQ)) != 0) {
5000 /* ATAPI state - HP4: Transfer_Data */
5001
5002 /* read the byte count from the controller */
5003 ctlr_count =
5004 (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5005 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5006
5007 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5008 "nv_intr_pkt_pio: ctlr byte count - %d",
5009 ctlr_count);
5010
5011 if (ctlr_count == 0) {
5012 /* no data to transfer - some devices do this */
5013
5014 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5015 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5016
5017 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5018 "nv_intr_pkt_pio: done (no data)", NULL);
5019
5020 return;
5021 }
5022
5023 count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
5024
5025 /* transfer the data */
5026 ddi_rep_get16(cmdhdl,
5027 (ushort_t *)nv_slotp->nvslot_rqsense_buff,
5028 (ushort_t *)nvp->nvp_data, (count >> 1),
5029 DDI_DEV_NO_AUTOINCR);
5030
5031 /* consume residual bytes */
5032 ctlr_count -= count;
5033
5034 if (ctlr_count > 0) {
5035 for (; ctlr_count > 0; ctlr_count -= 2)
5036 (void) ddi_get16(cmdhdl,
5037 (ushort_t *)nvp->nvp_data);
5038 }
5039
5040 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5041 "nv_intr_pkt_pio: transition to HP2", NULL);
5042 } else {
5043 /* still in ATAPI state - HP2 */
5044
5045 /*
5046 * In order to avoid clobbering the rqsense data
5047 * set by the SATA common module, the sense data read
5048 * from the device is put in a separate buffer and
5049 * copied into the packet after the request sense
5050 * command successfully completes.
5051 */
5052 bcopy(nv_slotp->nvslot_rqsense_buff,
5053 spkt->satapkt_cmd.satacmd_rqsense,
5054 SATA_ATAPI_RQSENSE_LEN);
5055
5056 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5057 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5058
5059 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5060 "nv_intr_pkt_pio: request sense done", NULL);
5061 }
5062
5063 return;
5064 }
5065
5066 /*
5067 * Normal command processing
5068 */
5069
5070 if ((status & (SATA_STATUS_DRQ)) != 0) {
5071 /* ATAPI protocol state - HP4: Transfer_Data */
5072
5073 /* read the byte count from the controller */
5074 ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5075 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5076
5077 if (ctlr_count == 0) {
5078 /* no data to transfer - some devices do this */
5079
5080 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5081 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5082
5083 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5084 "nv_intr_pkt_pio: done (no data)", NULL);
5085
5086 return;
5087 }
5088
5089 count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5090
5091 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5092 "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5093
5094 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5095 "nv_intr_pkt_pio: byte_count 0x%x",
5096 nv_slotp->nvslot_byte_count);
5097
5098 /* transfer the data */
5099
5100 if (direction == SATA_DIR_READ) {
5101 ddi_rep_get16(cmdhdl,
5102 (ushort_t *)nv_slotp->nvslot_v_addr,
5103 (ushort_t *)nvp->nvp_data, (count >> 1),
5104 DDI_DEV_NO_AUTOINCR);
5105
5106 ctlr_count -= count;
5107
5108 if (ctlr_count > 0) {
5109 /* consume remaining bytes */
5110
5111 for (; ctlr_count > 0;
5112 ctlr_count -= 2)
5113 (void) ddi_get16(cmdhdl,
5114 (ushort_t *)nvp->nvp_data);
5115
5116 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5117 "nv_intr_pkt_pio: bytes remained", NULL);
5118 }
5119 } else {
5120 ddi_rep_put16(cmdhdl,
5121 (ushort_t *)nv_slotp->nvslot_v_addr,
5122 (ushort_t *)nvp->nvp_data, (count >> 1),
5123 DDI_DEV_NO_AUTOINCR);
5124 }
5125
5126 nv_slotp->nvslot_v_addr += count;
5127 nv_slotp->nvslot_byte_count -= count;
5128
5129 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5130 "nv_intr_pkt_pio: transition to HP2", NULL);
5131 } else {
5132 /* still in ATAPI state - HP2 */
5133
5134 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5135 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5136
5137 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5138 "nv_intr_pkt_pio: done", NULL);
5139 }
5140 }
5141
5142
5143 /*
5144 * ATA command, DMA data in/out
5145 */
5146 static void
5147 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5148 {
5149 uchar_t status;
5150 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5151 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5152 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5153 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5154 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5155 uchar_t bmicx;
5156 uchar_t bm_status;
5157
5158 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5159
5160 /*
5161 * stop DMA engine.
5162 */
5163 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5164 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
5165
5166 /*
5167 * get the status and clear the IRQ, and check for DMA error
5168 */
5169 status = nv_get8(cmdhdl, nvp->nvp_status);
5170
5171 /*
5172 * check for drive errors
5173 */
5174 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5175 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5176 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5177 (void) nv_bm_status_clear(nvp);
5178
5179 return;
5180 }
5181
5182 bm_status = nv_bm_status_clear(nvp);
5183
5184 /*
5185 * check for bus master errors
5186 */
5187
5188 if (bm_status & BMISX_IDERR) {
5189 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5190 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5191 nvp->nvp_altstatus);
5192 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5193 nv_reset(nvp, "intr_dma");
5194
5195 return;
5196 }
5197
5198 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5199 }
5200
5201
5202 /*
5203 * Wait for a register of a controller to achieve a specific state.
5204 * To return normally, all the bits in the first sub-mask must be ON,
5205 * all the bits in the second sub-mask must be OFF.
5206 * If timeout_usec microseconds pass without the controller achieving
5207 * the desired bit configuration, return TRUE, else FALSE.
5208 *
5209 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5210 * occur for the first 250 us, then switch over to a sleeping wait.
5211 *
5212 */
5213 int
5214 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5215 int type_wait)
5216 {
5217 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5218 hrtime_t end, cur, start_sleep, start;
5219 int first_time = B_TRUE;
5220 ushort_t val;
5221
5222 for (;;) {
5223 val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5224
5225 if ((val & onbits) == onbits && (val & offbits) == 0) {
5226
5227 return (B_TRUE);
5228 }
5229
5230 cur = gethrtime();
5231
5232 /*
5233 * store the start time and calculate the end
5234 * time. also calculate "start_sleep" which is
5235 * the point after which the driver will stop busy
5236 * waiting and change to sleep waiting.
5237 */
5238 if (first_time) {
5239 first_time = B_FALSE;
5240 /*
5241 * start and end are in nanoseconds
5242 */
5243 start = cur;
5244 end = start + timeout_usec * 1000;
5245 /*
5246 * add 1 ms to start
5247 */
5248 start_sleep = start + 250000;
5249
5250 if (servicing_interrupt()) {
5251 type_wait = NV_NOSLEEP;
5252 }
5253 }
5254
5255 if (cur > end) {
5256
5257 break;
5258 }
5259
5260 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5261 #if ! defined(__lock_lint)
5262 delay(1);
5263 #endif
5264 } else {
5265 drv_usecwait(nv_usec_delay);
5266 }
5267 }
5268
5269 return (B_FALSE);
5270 }
5271
5272
5273 /*
5274 * This is a slightly more complicated version that checks
5275 * for error conditions and bails-out rather than looping
5276 * until the timeout is exceeded.
5277 *
5278 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5279 * occur for the first 250 us, then switch over to a sleeping wait.
5280 */
5281 int
5282 nv_wait3(
5283 nv_port_t *nvp,
5284 uchar_t onbits1,
5285 uchar_t offbits1,
5286 uchar_t failure_onbits2,
5287 uchar_t failure_offbits2,
5288 uchar_t failure_onbits3,
5289 uchar_t failure_offbits3,
5290 uint_t timeout_usec,
5291 int type_wait)
5292 {
5293 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5294 hrtime_t end, cur, start_sleep, start;
5295 int first_time = B_TRUE;
5296 ushort_t val;
5297
5298 for (;;) {
5299 val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5300
5301 /*
5302 * check for expected condition
5303 */
5304 if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5305
5306 return (B_TRUE);
5307 }
5308
5309 /*
5310 * check for error conditions
5311 */
5312 if ((val & failure_onbits2) == failure_onbits2 &&
5313 (val & failure_offbits2) == 0) {
5314
5315 return (B_FALSE);
5316 }
5317
5318 if ((val & failure_onbits3) == failure_onbits3 &&
5319 (val & failure_offbits3) == 0) {
5320
5321 return (B_FALSE);
5322 }
5323
5324 /*
5325 * store the start time and calculate the end
5326 * time. also calculate "start_sleep" which is
5327 * the point after which the driver will stop busy
5328 * waiting and change to sleep waiting.
5329 */
5330 if (first_time) {
5331 first_time = B_FALSE;
5332 /*
5333 * start and end are in nanoseconds
5334 */
5335 cur = start = gethrtime();
5336 end = start + timeout_usec * 1000;
5337 /*
5338 * add 1 ms to start
5339 */
5340 start_sleep = start + 250000;
5341
5342 if (servicing_interrupt()) {
5343 type_wait = NV_NOSLEEP;
5344 }
5345 } else {
5346 cur = gethrtime();
5347 }
5348
5349 if (cur > end) {
5350
5351 break;
5352 }
5353
5354 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5355 #if ! defined(__lock_lint)
5356 delay(1);
5357 #endif
5358 } else {
5359 drv_usecwait(nv_usec_delay);
5360 }
5361 }
5362
5363 return (B_FALSE);
5364 }
5365
5366
5367 /*
5368 * nv_port_state_change() reports the state of the port to the
5369 * sata module by calling sata_hba_event_notify(). This
5370 * function is called any time the state of the port is changed
5371 */
5372 static void
5373 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5374 {
5375 sata_device_t sd;
5376
5377 NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5378 "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5379 "lbolt %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5380
5381 if (ddi_in_panic() != 0) {
5382
5383 return;
5384 }
5385
5386 bzero((void *)&sd, sizeof (sata_device_t));
5387 sd.satadev_rev = SATA_DEVICE_REV;
5388 nv_copy_registers(nvp, &sd, NULL);
5389
5390 /*
5391 * When NCQ is implemented sactive and snotific field need to be
5392 * updated.
5393 */
5394 sd.satadev_addr.cport = nvp->nvp_port_num;
5395 sd.satadev_addr.qual = addr_type;
5396 sd.satadev_state = state;
5397
5398 sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5399 }
5400
5401
5402 /*
5403 * Monitor reset progress and signature gathering.
5404 */
5405 static clock_t
5406 nv_monitor_reset(nv_port_t *nvp)
5407 {
5408 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5409 uint32_t sstatus;
5410
5411 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
5412
5413 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5414
5415 /*
5416 * Check the link status. The link needs to be active before
5417 * checking the link's status.
5418 */
5419 if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5420 (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5421 /*
5422 * Either link is not active or there is no device
5423 * If the link remains down for more than NV_LINK_EVENT_DOWN
5424 * (milliseconds), abort signature acquisition and complete
5425 * reset processing. The link will go down when COMRESET is
5426 * sent by nv_reset().
5427 */
5428
5429 if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5430 NV_LINK_EVENT_DOWN) {
5431
5432 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5433 "nv_monitor_reset: no link - ending signature "
5434 "acquisition; time after reset %ldms",
5435 TICK_TO_MSEC(ddi_get_lbolt() -
5436 nvp->nvp_reset_time));
5437
5438 DTRACE_PROBE(no_link_reset_giving_up_f);
5439
5440 /*
5441 * If the drive was previously present and configured
5442 * and then subsequently removed, then send a removal
5443 * event to sata common module.
5444 */
5445 if (nvp->nvp_type != SATA_DTYPE_NONE) {
5446 nv_port_state_change(nvp,
5447 SATA_EVNT_DEVICE_DETACHED,
5448 SATA_ADDR_CPORT, 0);
5449 }
5450
5451 nvp->nvp_type = SATA_DTYPE_NONE;
5452 nvp->nvp_signature = NV_NO_SIG;
5453 nvp->nvp_state &= ~(NV_DEACTIVATED);
5454
5455 #ifdef SGPIO_SUPPORT
5456 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5457 SGP_CTLR_PORT_TO_DRV(
5458 nvp->nvp_ctlp->nvc_ctlr_num,
5459 nvp->nvp_port_num));
5460 #endif
5461
5462 cv_signal(&nvp->nvp_reset_cv);
5463
5464 return (0);
5465 }
5466
5467 DTRACE_PROBE(link_lost_reset_keep_trying_p);
5468
5469 return (nvp->nvp_wait_sig);
5470 }
5471
5472 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5473 "nv_monitor_reset: link up. time since reset %ldms",
5474 TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5475
5476 nv_read_signature(nvp);
5477
5478
5479 if (nvp->nvp_signature != NV_NO_SIG) {
5480 /*
5481 * signature has been acquired, send the appropriate
5482 * event to the sata common module.
5483 */
5484 if (nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) {
5485 char *source;
5486
5487 if (nvp->nvp_state & NV_HOTPLUG) {
5488
5489 source = "hotplugged";
5490 nv_port_state_change(nvp,
5491 SATA_EVNT_DEVICE_ATTACHED,
5492 SATA_ADDR_CPORT, SATA_DSTATE_PWR_ACTIVE);
5493 DTRACE_PROBE1(got_sig_for_hotplugged_device_h,
5494 int, nvp->nvp_state);
5495
5496 } else {
5497 source = "activated or attached";
5498 DTRACE_PROBE1(got_sig_for_existing_device_h,
5499 int, nvp->nvp_state);
5500 }
5501
5502 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5503 "signature acquired for %s device. sig:"
5504 " 0x%x state: 0x%x nvp_type: 0x%x", source,
5505 nvp->nvp_signature, nvp->nvp_state, nvp->nvp_type);
5506
5507
5508 nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5509
5510 #ifdef SGPIO_SUPPORT
5511 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5512 nv_sgp_drive_connect(nvp->nvp_ctlp,
5513 SGP_CTLR_PORT_TO_DRV(
5514 nvp->nvp_ctlp->nvc_ctlr_num,
5515 nvp->nvp_port_num));
5516 } else {
5517 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5518 SGP_CTLR_PORT_TO_DRV(
5519 nvp->nvp_ctlp->nvc_ctlr_num,
5520 nvp->nvp_port_num));
5521 }
5522 #endif
5523
5524 cv_signal(&nvp->nvp_reset_cv);
5525
5526 return (0);
5527 }
5528
5529 /*
5530 * Since this was not an attach, it was a reset of an
5531 * existing device
5532 */
5533 nvp->nvp_state &= ~NV_RESET;
5534 nvp->nvp_state |= NV_RESTORE;
5535
5536
5537
5538 DTRACE_PROBE(got_signature_reset_complete_p);
5539 DTRACE_PROBE1(nvp_signature_h, int, nvp->nvp_signature);
5540 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5541
5542 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5543 "signature acquired reset complete. sig: 0x%x"
5544 " state: 0x%x", nvp->nvp_signature, nvp->nvp_state);
5545
5546 /*
5547 * interrupts may have been disabled so just make sure
5548 * they are cleared and re-enabled.
5549 */
5550
5551 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5552 NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5553
5554 nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5555 SATA_ADDR_DCPORT,
5556 SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5557
5558 return (0);
5559 }
5560
5561
5562 if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >
5563 NV_RETRY_RESET_SIG) {
5564
5565
5566 if (nvp->nvp_reset_retry_count >= NV_MAX_RESET_RETRY) {
5567
5568 nvp->nvp_state |= NV_FAILED;
5569 nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5570
5571 DTRACE_PROBE(reset_exceeded_waiting_for_sig_p);
5572 DTRACE_PROBE(reset_exceeded_waiting_for_sig_f);
5573 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5574 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5575 "reset time exceeded waiting for sig nvp_state %x",
5576 nvp->nvp_state);
5577
5578 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
5579 SATA_ADDR_CPORT, 0);
5580
5581 cv_signal(&nvp->nvp_reset_cv);
5582
5583 return (0);
5584 }
5585
5586 nv_reset(nvp, "retry");
5587
5588 return (nvp->nvp_wait_sig);
5589 }
5590
5591 /*
5592 * signature not received, keep trying
5593 */
5594 DTRACE_PROBE(no_sig_keep_waiting_p);
5595
5596 /*
5597 * double the wait time for sig since the last try but cap it off at
5598 * 1 second.
5599 */
5600 nvp->nvp_wait_sig = nvp->nvp_wait_sig * 2;
5601
5602 return (nvp->nvp_wait_sig > NV_ONE_SEC ? NV_ONE_SEC :
5603 nvp->nvp_wait_sig);
5604 }
5605
5606
5607 /*
5608 * timeout processing:
5609 *
5610 * Check if any packets have crossed a timeout threshold. If so,
5611 * abort the packet. This function is not NCQ-aware.
5612 *
5613 * If reset is in progress, call reset monitoring function.
5614 *
5615 * Timeout frequency may be lower for checking packet timeout
5616 * and higher for reset monitoring.
5617 *
5618 */
5619 static void
5620 nv_timeout(void *arg)
5621 {
5622 nv_port_t *nvp = arg;
5623 nv_slot_t *nv_slotp;
5624 clock_t next_timeout_us = NV_ONE_SEC;
5625 uint16_t int_status;
5626 uint8_t status, bmstatus;
5627 static int intr_warn_once = 0;
5628 uint32_t serror;
5629
5630
5631 ASSERT(nvp != NULL);
5632
5633 mutex_enter(&nvp->nvp_mutex);
5634 nvp->nvp_timeout_id = 0;
5635
5636 if (nvp->nvp_state & (NV_DEACTIVATED|NV_FAILED)) {
5637 next_timeout_us = 0;
5638
5639 goto finished;
5640 }
5641
5642 if (nvp->nvp_state & NV_RESET) {
5643 next_timeout_us = nv_monitor_reset(nvp);
5644
5645 goto finished;
5646 }
5647
5648 if (nvp->nvp_state & NV_LINK_EVENT) {
5649 boolean_t device_present = B_FALSE;
5650 uint32_t sstatus;
5651 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5652
5653 if (TICK_TO_USEC(ddi_get_lbolt() -
5654 nvp->nvp_link_event_time) < NV_LINK_EVENT_SETTLE) {
5655
5656 next_timeout_us = 10 * NV_ONE_MSEC;
5657
5658 DTRACE_PROBE(link_event_set_no_timeout_keep_waiting_p);
5659
5660 goto finished;
5661 }
5662
5663 DTRACE_PROBE(link_event_settled_now_process_p);
5664
5665 nvp->nvp_state &= ~NV_LINK_EVENT;
5666
5667 /*
5668 * ck804 routinely reports the wrong hotplug/unplug event,
5669 * and it's been seen on mcp55 when there are signal integrity
5670 * issues. Therefore need to infer the event from the
5671 * current link status.
5672 */
5673
5674 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5675
5676 if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
5677 (SSTATUS_GET_DET(sstatus) ==
5678 SSTATUS_DET_DEVPRE_PHYCOM)) {
5679 device_present = B_TRUE;
5680 }
5681
5682 if ((nvp->nvp_signature != NV_NO_SIG) &&
5683 (device_present == B_FALSE)) {
5684
5685 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5686 "nv_timeout: device detached", NULL);
5687
5688 DTRACE_PROBE(device_detached_p);
5689
5690 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5691 B_FALSE);
5692
5693 nv_port_state_change(nvp, SATA_EVNT_DEVICE_DETACHED,
5694 SATA_ADDR_CPORT, 0);
5695
5696 nvp->nvp_signature = NV_NO_SIG;
5697 nvp->nvp_rem_time = ddi_get_lbolt();
5698 nvp->nvp_type = SATA_DTYPE_NONE;
5699 next_timeout_us = 0;
5700
5701 #ifdef SGPIO_SUPPORT
5702 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5703 SGP_CTLR_PORT_TO_DRV(nvp->nvp_ctlp->nvc_ctlr_num,
5704 nvp->nvp_port_num));
5705 #endif
5706
5707 goto finished;
5708 }
5709
5710 /*
5711 * if the device was already present, and it's still present,
5712 * then abort any outstanding command and issue a reset.
5713 * This may result from transient link errors.
5714 */
5715
5716 if ((nvp->nvp_signature != NV_NO_SIG) &&
5717 (device_present == B_TRUE)) {
5718
5719 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5720 "nv_timeout: spurious link event", NULL);
5721 DTRACE_PROBE(spurious_link_event_p);
5722
5723 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5724 B_FALSE);
5725
5726 nvp->nvp_signature = NV_NO_SIG;
5727 nvp->nvp_trans_link_time = ddi_get_lbolt();
5728 nvp->nvp_trans_link_count++;
5729 next_timeout_us = 0;
5730
5731 nv_reset(nvp, "transient link event");
5732
5733 goto finished;
5734 }
5735
5736
5737 /*
5738 * a new device has been inserted
5739 */
5740 if ((nvp->nvp_signature == NV_NO_SIG) &&
5741 (device_present == B_TRUE)) {
5742 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5743 "nv_timeout: device attached", NULL);
5744
5745 DTRACE_PROBE(device_attached_p);
5746 nvp->nvp_add_time = ddi_get_lbolt();
5747 next_timeout_us = 0;
5748 nvp->nvp_reset_count = 0;
5749 nvp->nvp_state = NV_HOTPLUG;
5750 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5751 nv_reset(nvp, "hotplug");
5752
5753 goto finished;
5754 }
5755
5756 /*
5757 * no link, and no prior device. Nothing to do, but
5758 * log this.
5759 */
5760 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5761 "nv_timeout: delayed hot processing no link no prior"
5762 " device", NULL);
5763 DTRACE_PROBE(delayed_hotplug_no_link_no_prior_device_p);
5764
5765 nvp->nvp_trans_link_time = ddi_get_lbolt();
5766 nvp->nvp_trans_link_count++;
5767 next_timeout_us = 0;
5768
5769 goto finished;
5770 }
5771
5772 /*
5773 * Not yet NCQ-aware - there is only one command active.
5774 */
5775 nv_slotp = &(nvp->nvp_slot[0]);
5776
5777 /*
5778 * perform timeout checking and processing only if there is an
5779 * active packet on the port
5780 */
5781 if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL) {
5782 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5783 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5784 uint8_t cmd = satacmd->satacmd_cmd_reg;
5785 uint64_t lba;
5786
5787 #if ! defined(__lock_lint) && defined(DEBUG)
5788
5789 lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5790 ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5791 ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5792 ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5793 ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5794 ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5795 #endif
5796
5797 /*
5798 * timeout not needed if there is a polling thread
5799 */
5800 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5801 next_timeout_us = 0;
5802
5803 goto finished;
5804 }
5805
5806 if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5807 spkt->satapkt_time) {
5808
5809 serror = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5810 nvp->nvp_serror);
5811 status = nv_get8(nvp->nvp_ctl_hdl,
5812 nvp->nvp_altstatus);
5813 bmstatus = nv_get8(nvp->nvp_bm_hdl,
5814 nvp->nvp_bmisx);
5815
5816 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5817 "nv_timeout: aborting: "
5818 "nvslot_stime: %ld max ticks till timeout: %ld "
5819 "cur_time: %ld cmd = 0x%x lba = %d seq = %d",
5820 nv_slotp->nvslot_stime,
5821 drv_usectohz(MICROSEC *
5822 spkt->satapkt_time), ddi_get_lbolt(),
5823 cmd, lba, nvp->nvp_seq);
5824
5825 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5826 "nv_timeout: altstatus = 0x%x bmicx = 0x%x "
5827 "serror = 0x%x previous_cmd = "
5828 "0x%x", status, bmstatus, serror,
5829 nvp->nvp_previous_cmd);
5830
5831
5832 DTRACE_PROBE1(nv_timeout_packet_p, int, nvp);
5833
5834 if (nvp->nvp_mcp5x_int_status != NULL) {
5835
5836 int_status = nv_get16(
5837 nvp->nvp_ctlp->nvc_bar_hdl[5],
5838 nvp->nvp_mcp5x_int_status);
5839 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5840 "int_status = 0x%x", int_status);
5841
5842 if (int_status & MCP5X_INT_COMPLETE) {
5843 /*
5844 * Completion interrupt was missed.
5845 * Issue warning message once.
5846 */
5847 if (!intr_warn_once) {
5848
5849 nv_cmn_err(CE_WARN,
5850 nvp->nvp_ctlp,
5851 nvp,
5852 "nv_sata: missing command "
5853 "completion interrupt");
5854 intr_warn_once = 1;
5855
5856 }
5857
5858 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5859 nvp, "timeout detected with "
5860 "interrupt ready - calling "
5861 "int directly", NULL);
5862
5863 mutex_exit(&nvp->nvp_mutex);
5864 (void) mcp5x_intr_port(nvp);
5865 mutex_enter(&nvp->nvp_mutex);
5866
5867 } else {
5868 /*
5869 * True timeout and not a missing
5870 * interrupt.
5871 */
5872 DTRACE_PROBE1(timeout_abort_active_p,
5873 int *, nvp);
5874 (void) nv_abort_active(nvp, spkt,
5875 SATA_PKT_TIMEOUT, B_TRUE);
5876 }
5877 } else {
5878 (void) nv_abort_active(nvp, spkt,
5879 SATA_PKT_TIMEOUT, B_TRUE);
5880 }
5881
5882 } else {
5883 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5884 "nv_timeout:"
5885 " still in use so restarting timeout",
5886 NULL);
5887
5888 next_timeout_us = NV_ONE_SEC;
5889 }
5890 } else {
5891 /*
5892 * there was no active packet, so do not re-enable timeout
5893 */
5894 next_timeout_us = 0;
5895 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5896 "nv_timeout: no active packet so not re-arming "
5897 "timeout", NULL);
5898 }
5899
5900 finished:
5901
5902 nv_setup_timeout(nvp, next_timeout_us);
5903
5904 mutex_exit(&nvp->nvp_mutex);
5905 }
5906
5907
5908 /*
5909 * enable or disable the 3 interrupt types the driver is
5910 * interested in: completion, add and remove.
5911 */
5912 static void
5913 ck804_set_intr(nv_port_t *nvp, int flag)
5914 {
5915 nv_ctl_t *nvc = nvp->nvp_ctlp;
5916 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5917 uchar_t *bar5 = nvc->nvc_bar_addr[5];
5918 uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5919 CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5920 uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5921 uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5922
5923 if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5924 int_en = nv_get8(bar5_hdl,
5925 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5926 int_en &= ~intr_bits[port];
5927 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5928 int_en);
5929 return;
5930 }
5931
5932 ASSERT(mutex_owned(&nvp->nvp_mutex));
5933
5934 /*
5935 * controller level lock also required since access to an 8-bit
5936 * interrupt register is shared between both channels.
5937 */
5938 mutex_enter(&nvc->nvc_mutex);
5939
5940 if (flag & NV_INTR_CLEAR_ALL) {
5941 NVLOG(NVDBG_INTR, nvc, nvp,
5942 "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5943
5944 intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5945 (uint8_t *)(nvc->nvc_ck804_int_status));
5946
5947 if (intr_status & clear_all_bits[port]) {
5948
5949 nv_put8(nvc->nvc_bar_hdl[5],
5950 (uint8_t *)(nvc->nvc_ck804_int_status),
5951 clear_all_bits[port]);
5952
5953 NVLOG(NVDBG_INTR, nvc, nvp,
5954 "interrupt bits cleared %x",
5955 intr_status & clear_all_bits[port]);
5956 }
5957 }
5958
5959 if (flag & NV_INTR_DISABLE) {
5960 NVLOG(NVDBG_INTR, nvc, nvp,
5961 "ck804_set_intr: NV_INTR_DISABLE", NULL);
5962 int_en = nv_get8(bar5_hdl,
5963 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5964 int_en &= ~intr_bits[port];
5965 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5966 int_en);
5967 }
5968
5969 if (flag & NV_INTR_ENABLE) {
5970 NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5971 NULL);
5972 int_en = nv_get8(bar5_hdl,
5973 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5974 int_en |= intr_bits[port];
5975 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5976 int_en);
5977 }
5978
5979 mutex_exit(&nvc->nvc_mutex);
5980 }
5981
5982
5983 /*
5984 * enable or disable the 3 interrupts the driver is interested in:
5985 * completion interrupt, hot add, and hot remove interrupt.
5986 */
5987 static void
5988 mcp5x_set_intr(nv_port_t *nvp, int flag)
5989 {
5990 nv_ctl_t *nvc = nvp->nvp_ctlp;
5991 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5992 uint16_t intr_bits =
5993 MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5994 uint16_t int_en;
5995
5996 if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5997 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5998 int_en &= ~intr_bits;
5999 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6000 return;
6001 }
6002
6003 ASSERT(mutex_owned(&nvp->nvp_mutex));
6004
6005 NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
6006
6007 if (flag & NV_INTR_CLEAR_ALL) {
6008 NVLOG(NVDBG_INTR, nvc, nvp,
6009 "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
6010 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
6011 }
6012
6013 if (flag & NV_INTR_ENABLE) {
6014 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
6015 NULL);
6016 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6017 int_en |= intr_bits;
6018 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6019 }
6020
6021 if (flag & NV_INTR_DISABLE) {
6022 NVLOG(NVDBG_INTR, nvc, nvp,
6023 "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
6024 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6025 int_en &= ~intr_bits;
6026 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6027 }
6028 }
6029
6030
6031 static void
6032 nv_resume(nv_port_t *nvp)
6033 {
6034 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
6035
6036 mutex_enter(&nvp->nvp_mutex);
6037
6038 if (nvp->nvp_state & NV_DEACTIVATED) {
6039 mutex_exit(&nvp->nvp_mutex);
6040
6041 return;
6042 }
6043
6044 /* Enable interrupt */
6045 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
6046
6047 /*
6048 * Power may have been removed to the port and the
6049 * drive, and/or a drive may have been added or removed.
6050 * Force a reset which will cause a probe and re-establish
6051 * any state needed on the drive.
6052 */
6053 nv_reset(nvp, "resume");
6054
6055 mutex_exit(&nvp->nvp_mutex);
6056 }
6057
6058
6059 static void
6060 nv_suspend(nv_port_t *nvp)
6061 {
6062 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
6063
6064 mutex_enter(&nvp->nvp_mutex);
6065
6066 #ifdef SGPIO_SUPPORT
6067 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
6068 nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6069 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6070 }
6071 #endif
6072
6073 if (nvp->nvp_state & NV_DEACTIVATED) {
6074 mutex_exit(&nvp->nvp_mutex);
6075
6076 return;
6077 }
6078
6079 /*
6080 * Stop the timeout handler.
6081 * (It will be restarted in nv_reset() during nv_resume().)
6082 */
6083 if (nvp->nvp_timeout_id) {
6084 (void) untimeout(nvp->nvp_timeout_id);
6085 nvp->nvp_timeout_id = 0;
6086 }
6087
6088 /* Disable interrupt */
6089 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
6090 NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
6091
6092 mutex_exit(&nvp->nvp_mutex);
6093 }
6094
6095
6096 static void
6097 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6098 {
6099 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6100 sata_cmd_t *scmd = &spkt->satapkt_cmd;
6101 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6102 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6103 uchar_t status;
6104 struct sata_cmd_flags flags;
6105
6106 sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6107 sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6108 sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6109
6110 if (spkt == NULL) {
6111
6112 return;
6113 }
6114
6115 /*
6116 * in the error case, implicitly set the return of regs needed
6117 * for error handling.
6118 */
6119 status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6120 nvp->nvp_altstatus);
6121
6122 flags = scmd->satacmd_flags;
6123
6124 if (status & SATA_STATUS_ERR) {
6125 flags.sata_copy_out_lba_low_msb = B_TRUE;
6126 flags.sata_copy_out_lba_mid_msb = B_TRUE;
6127 flags.sata_copy_out_lba_high_msb = B_TRUE;
6128 flags.sata_copy_out_lba_low_lsb = B_TRUE;
6129 flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6130 flags.sata_copy_out_lba_high_lsb = B_TRUE;
6131 flags.sata_copy_out_error_reg = B_TRUE;
6132 flags.sata_copy_out_sec_count_msb = B_TRUE;
6133 flags.sata_copy_out_sec_count_lsb = B_TRUE;
6134 scmd->satacmd_status_reg = status;
6135 }
6136
6137 if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6138
6139 /*
6140 * set HOB so that high byte will be read
6141 */
6142 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6143
6144 /*
6145 * get the requested high bytes
6146 */
6147 if (flags.sata_copy_out_sec_count_msb) {
6148 scmd->satacmd_sec_count_msb =
6149 nv_get8(cmdhdl, nvp->nvp_count);
6150 }
6151
6152 if (flags.sata_copy_out_lba_low_msb) {
6153 scmd->satacmd_lba_low_msb =
6154 nv_get8(cmdhdl, nvp->nvp_sect);
6155 }
6156
6157 if (flags.sata_copy_out_lba_mid_msb) {
6158 scmd->satacmd_lba_mid_msb =
6159 nv_get8(cmdhdl, nvp->nvp_lcyl);
6160 }
6161
6162 if (flags.sata_copy_out_lba_high_msb) {
6163 scmd->satacmd_lba_high_msb =
6164 nv_get8(cmdhdl, nvp->nvp_hcyl);
6165 }
6166 }
6167
6168 /*
6169 * disable HOB so that low byte is read
6170 */
6171 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6172
6173 /*
6174 * get the requested low bytes
6175 */
6176 if (flags.sata_copy_out_sec_count_lsb) {
6177 scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6178 }
6179
6180 if (flags.sata_copy_out_lba_low_lsb) {
6181 scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6182 }
6183
6184 if (flags.sata_copy_out_lba_mid_lsb) {
6185 scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6186 }
6187
6188 if (flags.sata_copy_out_lba_high_lsb) {
6189 scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6190 }
6191
6192 /*
6193 * get the device register if requested
6194 */
6195 if (flags.sata_copy_out_device_reg) {
6196 scmd->satacmd_device_reg = nv_get8(cmdhdl, nvp->nvp_drvhd);
6197 }
6198
6199 /*
6200 * get the error register if requested
6201 */
6202 if (flags.sata_copy_out_error_reg) {
6203 scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6204 }
6205 }
6206
6207
6208 /*
6209 * hot plug and remove interrupts can occur when the device is reset.
6210 * Masking the interrupt doesn't always work well because if a
6211 * different interrupt arrives on the other port, the driver can still
6212 * end up checking the state of the other port and discover the hot
6213 * interrupt flag is set even though it was masked. Also, when there are
6214 * errors on the link there can be transient link events which need to be
6215 * masked and eliminated as well.
6216 */
6217 static void
6218 nv_link_event(nv_port_t *nvp, int flag)
6219 {
6220
6221 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_link_event: flag: %s",
6222 flag ? "add" : "remove");
6223
6224 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
6225
6226 nvp->nvp_link_event_time = ddi_get_lbolt();
6227
6228 /*
6229 * if a port has been deactivated, ignore all link events
6230 */
6231 if (nvp->nvp_state & NV_DEACTIVATED) {
6232 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6233 " port deactivated", NULL);
6234 DTRACE_PROBE(ignoring_link_port_deactivated_p);
6235
6236 return;
6237 }
6238
6239 /*
6240 * if the drive has been reset, ignore any transient events. If it's
6241 * a real removal event, nv_monitor_reset() will handle it.
6242 */
6243 if (nvp->nvp_state & NV_RESET) {
6244 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6245 " during reset", NULL);
6246 DTRACE_PROBE(ignoring_link_event_during_reset_p);
6247
6248 return;
6249 }
6250
6251 /*
6252 * if link event processing is already enabled, nothing to
6253 * do.
6254 */
6255 if (nvp->nvp_state & NV_LINK_EVENT) {
6256
6257 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6258 "received link event while processing already in "
6259 "progress", NULL);
6260 DTRACE_PROBE(nv_link_event_already_set_p);
6261
6262 return;
6263 }
6264
6265 DTRACE_PROBE1(link_event_p, int, nvp);
6266
6267 nvp->nvp_state |= NV_LINK_EVENT;
6268
6269 nv_setup_timeout(nvp, NV_LINK_EVENT_SETTLE);
6270 }
6271
6272
6273 /*
6274 * Get request sense data and stuff it the command's sense buffer.
6275 * Start a request sense command in order to get sense data to insert
6276 * in the sata packet's rqsense buffer. The command completion
6277 * processing is in nv_intr_pkt_pio.
6278 *
6279 * The sata common module provides a function to allocate and set-up a
6280 * request sense packet command. The reasons it is not being used here is:
6281 * a) it cannot be called in an interrupt context and this function is
6282 * called in an interrupt context.
6283 * b) it allocates DMA resources that are not used here because this is
6284 * implemented using PIO.
6285 *
6286 * If, in the future, this is changed to use DMA, the sata common module
6287 * should be used to allocate and set-up the error retrieval (request sense)
6288 * command.
6289 */
6290 static int
6291 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6292 {
6293 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6294 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6295 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6296 int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6297
6298 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6299 "nv_start_rqsense_pio: start", NULL);
6300
6301 /* clear the local request sense buffer before starting the command */
6302 bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6303
6304 /* Write the request sense PACKET command */
6305
6306 /* select the drive */
6307 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6308
6309 /* make certain the drive selected */
6310 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6311 NV_SEC2USEC(5), 0) == B_FALSE) {
6312 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6313 "nv_start_rqsense_pio: drive select failed", NULL);
6314 return (NV_FAILURE);
6315 }
6316
6317 /* set up the command */
6318 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */
6319 nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6320 nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6321 nv_put8(cmdhdl, nvp->nvp_sect, 0);
6322 nv_put8(cmdhdl, nvp->nvp_count, 0); /* no tag */
6323
6324 /* initiate the command by writing the command register last */
6325 nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6326
6327 /* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6328 NV_DELAY_NSEC(400);
6329
6330 /*
6331 * Wait for the device to indicate that it is ready for the command
6332 * ATAPI protocol state - HP0: Check_Status_A
6333 */
6334
6335 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6336 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6337 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6338 4000000, 0) == B_FALSE) {
6339 if (nv_get8(cmdhdl, nvp->nvp_status) &
6340 (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6341 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
6342 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6343 "nv_start_rqsense_pio: rqsense dev error (HP0)",
6344 NULL);
6345 } else {
6346 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
6347 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6348 "nv_start_rqsense_pio: rqsense timeout (HP0)",
6349 NULL);
6350 }
6351
6352 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6353 nv_complete_io(nvp, spkt, 0);
6354 nv_reset(nvp, "rqsense_pio");
6355
6356 return (NV_FAILURE);
6357 }
6358
6359 /*
6360 * Put the ATAPI command in the data register
6361 * ATAPI protocol state - HP1: Send_Packet
6362 */
6363
6364 ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6365 (ushort_t *)nvp->nvp_data,
6366 (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6367
6368 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6369 "nv_start_rqsense_pio: exiting into HP3", NULL);
6370
6371 return (NV_SUCCESS);
6372 }
6373
6374 /*
6375 * quiesce(9E) entry point.
6376 *
6377 * This function is called when the system is single-threaded at high
6378 * PIL with preemption disabled. Therefore, this function must not be
6379 * blocked.
6380 *
6381 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6382 * DDI_FAILURE indicates an error condition and should almost never happen.
6383 */
6384 static int
6385 nv_quiesce(dev_info_t *dip)
6386 {
6387 int port, instance = ddi_get_instance(dip);
6388 nv_ctl_t *nvc;
6389
6390 if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6391 return (DDI_FAILURE);
6392
6393 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6394 nv_port_t *nvp = &(nvc->nvc_port[port]);
6395 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6396 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6397 uint32_t sctrl;
6398
6399 /*
6400 * Stop the controllers from generating interrupts.
6401 */
6402 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6403
6404 /*
6405 * clear signature registers
6406 */
6407 nv_put8(cmdhdl, nvp->nvp_sect, 0);
6408 nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6409 nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6410 nv_put8(cmdhdl, nvp->nvp_count, 0);
6411
6412 nvp->nvp_signature = NV_NO_SIG;
6413 nvp->nvp_type = SATA_DTYPE_NONE;
6414 nvp->nvp_state |= NV_RESET;
6415 nvp->nvp_reset_time = ddi_get_lbolt();
6416
6417 /*
6418 * assert reset in PHY by writing a 1 to bit 0 scontrol
6419 */
6420 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6421
6422 nv_put32(bar5_hdl, nvp->nvp_sctrl,
6423 sctrl | SCONTROL_DET_COMRESET);
6424
6425 /*
6426 * wait 1ms
6427 */
6428 drv_usecwait(1000);
6429
6430 /*
6431 * de-assert reset in PHY
6432 */
6433 nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6434 }
6435
6436 return (DDI_SUCCESS);
6437 }
6438
6439
6440 #ifdef SGPIO_SUPPORT
6441 /*
6442 * NVIDIA specific SGPIO LED support
6443 * Please refer to the NVIDIA documentation for additional details
6444 */
6445
6446 /*
6447 * nv_sgp_led_init
6448 * Detect SGPIO support. If present, initialize.
6449 */
6450 static void
6451 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6452 {
6453 uint16_t csrp; /* SGPIO_CSRP from PCI config space */
6454 uint32_t cbp; /* SGPIO_CBP from PCI config space */
6455 nv_sgp_cmn_t *cmn; /* shared data structure */
6456 int i;
6457 char tqname[SGPIO_TQ_NAME_LEN];
6458 extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6459
6460 /*
6461 * Initialize with appropriately invalid values in case this function
6462 * exits without initializing SGPIO (for example, there is no SGPIO
6463 * support).
6464 */
6465 nvc->nvc_sgp_csr = 0;
6466 nvc->nvc_sgp_cbp = NULL;
6467 nvc->nvc_sgp_cmn = NULL;
6468
6469 /*
6470 * Only try to initialize SGPIO LED support if this property
6471 * indicates it should be.
6472 */
6473 if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6474 "enable-sgpio-leds", 0) != 1)
6475 return;
6476
6477 /*
6478 * CK804 can pass the sgpio_detect test even though it does not support
6479 * SGPIO, so don't even look at a CK804.
6480 */
6481 if (nvc->nvc_mcp5x_flag != B_TRUE)
6482 return;
6483
6484 /*
6485 * The NVIDIA SGPIO support can nominally handle 6 drives.
6486 * However, the current implementation only supports 4 drives.
6487 * With two drives per controller, that means only look at the
6488 * first two controllers.
6489 */
6490 if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6491 return;
6492
6493 /* confirm that the SGPIO registers are there */
6494 if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6495 NVLOG(NVDBG_INIT, nvc, NULL,
6496 "SGPIO registers not detected", NULL);
6497 return;
6498 }
6499
6500 /* save off the SGPIO_CSR I/O address */
6501 nvc->nvc_sgp_csr = csrp;
6502
6503 /* map in Control Block */
6504 nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6505 sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6506
6507 /* initialize the SGPIO h/w */
6508 if (nv_sgp_init(nvc) == NV_FAILURE) {
6509 nv_cmn_err(CE_WARN, nvc, NULL,
6510 "Unable to initialize SGPIO");
6511 }
6512
6513 /*
6514 * Initialize the shared space for this instance. This could
6515 * involve allocating the space, saving a pointer to the space
6516 * and starting the taskq that actually turns the LEDs on and off.
6517 * Or, it could involve just getting the pointer to the already
6518 * allocated space.
6519 */
6520
6521 mutex_enter(&nv_sgp_c2c_mutex);
6522
6523 /* try and find our CBP in the mapping table */
6524 cmn = NULL;
6525 for (i = 0; i < NV_MAX_CBPS; i++) {
6526 if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6527 cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6528 break;
6529 }
6530
6531 if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6532 break;
6533 }
6534
6535 if (i >= NV_MAX_CBPS) {
6536 /*
6537 * CBP to shared space mapping table is full
6538 */
6539 nvc->nvc_sgp_cmn = NULL;
6540 nv_cmn_err(CE_WARN, nvc, NULL,
6541 "LED handling not initialized - too many controllers");
6542 } else if (cmn == NULL) {
6543 /*
6544 * Allocate the shared space, point the SGPIO scratch register
6545 * at it and start the led update taskq.
6546 */
6547
6548 /* allocate shared space */
6549 cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6550 KM_SLEEP);
6551 if (cmn == NULL) {
6552 nv_cmn_err(CE_WARN, nvc, NULL,
6553 "Failed to allocate shared data");
6554 return;
6555 }
6556
6557 nvc->nvc_sgp_cmn = cmn;
6558
6559 /* initialize the shared data structure */
6560 cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6561 cmn->nvs_connected = 0;
6562 cmn->nvs_activity = 0;
6563 cmn->nvs_cbp = cbp;
6564
6565 mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6566 mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6567 cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6568
6569 /* put the address in the SGPIO scratch register */
6570 #if defined(__amd64)
6571 nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6572 #else
6573 nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6574 #endif
6575
6576 /* add an entry to the cbp to cmn mapping table */
6577
6578 /* i should be the next available table position */
6579 nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6580 nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6581
6582 /* start the activity LED taskq */
6583
6584 /*
6585 * The taskq name should be unique and the time
6586 */
6587 (void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6588 "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6589 cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6590 TASKQ_DEFAULTPRI, 0);
6591 if (cmn->nvs_taskq == NULL) {
6592 cmn->nvs_taskq_delay = 0;
6593 nv_cmn_err(CE_WARN, nvc, NULL,
6594 "Failed to start activity LED taskq");
6595 } else {
6596 cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6597 (void) ddi_taskq_dispatch(cmn->nvs_taskq,
6598 nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6599 }
6600 } else {
6601 nvc->nvc_sgp_cmn = cmn;
6602 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6603 }
6604
6605 mutex_exit(&nv_sgp_c2c_mutex);
6606 }
6607
6608 /*
6609 * nv_sgp_detect
6610 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6611 * report back whether both were readable.
6612 */
6613 static int
6614 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6615 uint32_t *cbpp)
6616 {
6617 /* get the SGPIO_CSRP */
6618 *csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6619 if (*csrpp == 0) {
6620 return (NV_FAILURE);
6621 }
6622
6623 /* SGPIO_CSRP is good, get the SGPIO_CBP */
6624 *cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6625 if (*cbpp == 0) {
6626 return (NV_FAILURE);
6627 }
6628
6629 /* SGPIO_CBP is good, so we must support SGPIO */
6630 return (NV_SUCCESS);
6631 }
6632
6633 /*
6634 * nv_sgp_init
6635 * Initialize SGPIO.
6636 * The initialization process is described by NVIDIA, but the hardware does
6637 * not always behave as documented, so several steps have been changed and/or
6638 * omitted.
6639 */
6640 static int
6641 nv_sgp_init(nv_ctl_t *nvc)
6642 {
6643 int seq;
6644 int rval = NV_SUCCESS;
6645 hrtime_t start, end;
6646 uint32_t cmd;
6647 uint32_t status;
6648 int drive_count;
6649
6650 status = nv_sgp_csr_read(nvc);
6651 if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6652 /* SGPIO logic is in reset state and requires initialization */
6653
6654 /* noting the Sequence field value */
6655 seq = SGPIO_CSR_SEQ(status);
6656
6657 /* issue SGPIO_CMD_READ_PARAMS command */
6658 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6659 nv_sgp_csr_write(nvc, cmd);
6660
6661 DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6662
6663 /* poll for command completion */
6664 start = gethrtime();
6665 end = start + NV_SGP_CMD_TIMEOUT;
6666 for (;;) {
6667 status = nv_sgp_csr_read(nvc);
6668
6669 /* break on error */
6670 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6671 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6672 "Command error during initialization",
6673 NULL);
6674 rval = NV_FAILURE;
6675 break;
6676 }
6677
6678 /* command processing is taking place */
6679 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6680 if (SGPIO_CSR_SEQ(status) != seq) {
6681 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6682 "Sequence number change error",
6683 NULL);
6684 }
6685
6686 break;
6687 }
6688
6689 /* if completion not detected in 2000ms ... */
6690
6691 if (gethrtime() > end)
6692 break;
6693
6694 /* wait 400 ns before checking again */
6695 NV_DELAY_NSEC(400);
6696 }
6697 }
6698
6699 if (rval == NV_FAILURE)
6700 return (rval);
6701
6702 if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6703 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6704 "SGPIO logic not operational after init - state %d",
6705 SGPIO_CSR_SSTAT(status));
6706 /*
6707 * Should return (NV_FAILURE) but the hardware can be
6708 * operational even if the SGPIO Status does not indicate
6709 * this.
6710 */
6711 }
6712
6713 /*
6714 * NVIDIA recommends reading the supported drive count even
6715 * though they also indicate that it is always 4 at this time.
6716 */
6717 drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6718 if (drive_count != SGPIO_DRV_CNT_VALUE) {
6719 NVLOG(NVDBG_INIT, nvc, NULL,
6720 "SGPIO reported undocumented drive count - %d",
6721 drive_count);
6722 }
6723
6724 NVLOG(NVDBG_INIT, nvc, NULL,
6725 "initialized ctlr: %d csr: 0x%08x",
6726 nvc->nvc_ctlr_num, nvc->nvc_sgp_csr);
6727
6728 return (rval);
6729 }
6730
6731 static int
6732 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6733 {
6734 nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6735
6736 if (cmn == NULL)
6737 return (NV_FAILURE);
6738
6739 mutex_enter(&cmn->nvs_slock);
6740 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6741 mutex_exit(&cmn->nvs_slock);
6742
6743 return (NV_SUCCESS);
6744 }
6745
6746 /*
6747 * nv_sgp_csr_read
6748 * This is just a 32-bit port read from the value that was obtained from the
6749 * PCI config space.
6750 *
6751 * XXX It was advised to use the in[bwl] function for this, even though they
6752 * are obsolete interfaces.
6753 */
6754 static int
6755 nv_sgp_csr_read(nv_ctl_t *nvc)
6756 {
6757 return (inl(nvc->nvc_sgp_csr));
6758 }
6759
6760 /*
6761 * nv_sgp_csr_write
6762 * This is just a 32-bit I/O port write. The port number was obtained from
6763 * the PCI config space.
6764 *
6765 * XXX It was advised to use the out[bwl] function for this, even though they
6766 * are obsolete interfaces.
6767 */
6768 static void
6769 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6770 {
6771 outl(nvc->nvc_sgp_csr, val);
6772 }
6773
6774 /*
6775 * nv_sgp_write_data
6776 * Cause SGPIO to send Control Block data
6777 */
6778 static int
6779 nv_sgp_write_data(nv_ctl_t *nvc)
6780 {
6781 hrtime_t start, end;
6782 uint32_t status;
6783 uint32_t cmd;
6784
6785 /* issue command */
6786 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6787 nv_sgp_csr_write(nvc, cmd);
6788
6789 /* poll for completion */
6790 start = gethrtime();
6791 end = start + NV_SGP_CMD_TIMEOUT;
6792 for (;;) {
6793 status = nv_sgp_csr_read(nvc);
6794
6795 /* break on error completion */
6796 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6797 break;
6798
6799 /* break on successful completion */
6800 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6801 break;
6802
6803 /* Wait 400 ns and try again */
6804 NV_DELAY_NSEC(400);
6805
6806 if (gethrtime() > end)
6807 break;
6808 }
6809
6810 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6811 return (NV_SUCCESS);
6812
6813 return (NV_FAILURE);
6814 }
6815
6816 /*
6817 * nv_sgp_activity_led_ctl
6818 * This is run as a taskq. It wakes up at a fixed interval and checks to
6819 * see if any of the activity LEDs need to be changed.
6820 */
6821 static void
6822 nv_sgp_activity_led_ctl(void *arg)
6823 {
6824 nv_ctl_t *nvc = (nv_ctl_t *)arg;
6825 nv_sgp_cmn_t *cmn;
6826 volatile nv_sgp_cb_t *cbp;
6827 clock_t ticks;
6828 uint8_t drv_leds;
6829 uint32_t old_leds;
6830 uint32_t new_led_state;
6831 int i;
6832
6833 cmn = nvc->nvc_sgp_cmn;
6834 cbp = nvc->nvc_sgp_cbp;
6835
6836 do {
6837 /* save off the old state of all of the LEDs */
6838 old_leds = cbp->sgpio0_tr;
6839
6840 DTRACE_PROBE3(sgpio__activity__state,
6841 int, cmn->nvs_connected, int, cmn->nvs_activity,
6842 int, old_leds);
6843
6844 new_led_state = 0;
6845
6846 /* for each drive */
6847 for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6848
6849 /* get the current state of the LEDs for the drive */
6850 drv_leds = SGPIO0_TR_DRV(old_leds, i);
6851
6852 if ((cmn->nvs_connected & (1 << i)) == 0) {
6853 /* if not connected, turn off activity */
6854 drv_leds &= ~TR_ACTIVE_MASK;
6855 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6856
6857 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6858 new_led_state |=
6859 SGPIO0_TR_DRV_SET(drv_leds, i);
6860
6861 continue;
6862 }
6863
6864 if ((cmn->nvs_activity & (1 << i)) == 0) {
6865 /* connected, but not active */
6866 drv_leds &= ~TR_ACTIVE_MASK;
6867 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6868
6869 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6870 new_led_state |=
6871 SGPIO0_TR_DRV_SET(drv_leds, i);
6872
6873 continue;
6874 }
6875
6876 /* connected and active */
6877 if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6878 /* was enabled, so disable */
6879 drv_leds &= ~TR_ACTIVE_MASK;
6880 drv_leds |=
6881 TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6882
6883 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6884 new_led_state |=
6885 SGPIO0_TR_DRV_SET(drv_leds, i);
6886 } else {
6887 /* was disabled, so enable */
6888 drv_leds &= ~TR_ACTIVE_MASK;
6889 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6890
6891 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6892 new_led_state |=
6893 SGPIO0_TR_DRV_SET(drv_leds, i);
6894 }
6895
6896 /*
6897 * clear the activity bit
6898 * if there is drive activity again within the
6899 * loop interval (now 1/16 second), nvs_activity
6900 * will be reset and the "connected and active"
6901 * condition above will cause the LED to blink
6902 * off and on at the loop interval rate. The
6903 * rate may be increased (interval shortened) as
6904 * long as it is not more than 1/30 second.
6905 */
6906 mutex_enter(&cmn->nvs_slock);
6907 cmn->nvs_activity &= ~(1 << i);
6908 mutex_exit(&cmn->nvs_slock);
6909 }
6910
6911 DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6912
6913 /* write out LED values */
6914
6915 mutex_enter(&cmn->nvs_slock);
6916 cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6917 cbp->sgpio0_tr |= new_led_state;
6918 cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6919 mutex_exit(&cmn->nvs_slock);
6920
6921 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6922 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6923 "nv_sgp_write_data failure updating active LED",
6924 NULL);
6925 }
6926
6927 /* now rest for the interval */
6928 mutex_enter(&cmn->nvs_tlock);
6929 ticks = drv_usectohz(cmn->nvs_taskq_delay);
6930 if (ticks > 0)
6931 (void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6932 ticks, TR_CLOCK_TICK);
6933 mutex_exit(&cmn->nvs_tlock);
6934 } while (ticks > 0);
6935 }
6936
6937 /*
6938 * nv_sgp_drive_connect
6939 * Set the flag used to indicate that the drive is attached to the HBA.
6940 * Used to let the taskq know that it should turn the Activity LED on.
6941 */
6942 static void
6943 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6944 {
6945 nv_sgp_cmn_t *cmn;
6946
6947 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6948 return;
6949 cmn = nvc->nvc_sgp_cmn;
6950
6951 mutex_enter(&cmn->nvs_slock);
6952 cmn->nvs_connected |= (1 << drive);
6953 mutex_exit(&cmn->nvs_slock);
6954 }
6955
6956 /*
6957 * nv_sgp_drive_disconnect
6958 * Clears the flag used to indicate that the drive is no longer attached
6959 * to the HBA. Used to let the taskq know that it should turn the
6960 * Activity LED off. The flag that indicates that the drive is in use is
6961 * also cleared.
6962 */
6963 static void
6964 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6965 {
6966 nv_sgp_cmn_t *cmn;
6967
6968 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6969 return;
6970 cmn = nvc->nvc_sgp_cmn;
6971
6972 mutex_enter(&cmn->nvs_slock);
6973 cmn->nvs_connected &= ~(1 << drive);
6974 cmn->nvs_activity &= ~(1 << drive);
6975 mutex_exit(&cmn->nvs_slock);
6976 }
6977
6978 /*
6979 * nv_sgp_drive_active
6980 * Sets the flag used to indicate that the drive has been accessed and the
6981 * LED should be flicked off, then on. It is cleared at a fixed time
6982 * interval by the LED taskq and set by the sata command start.
6983 */
6984 static void
6985 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6986 {
6987 nv_sgp_cmn_t *cmn;
6988
6989 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6990 return;
6991 cmn = nvc->nvc_sgp_cmn;
6992
6993 DTRACE_PROBE1(sgpio__active, int, drive);
6994
6995 mutex_enter(&cmn->nvs_slock);
6996 cmn->nvs_activity |= (1 << drive);
6997 mutex_exit(&cmn->nvs_slock);
6998 }
6999
7000
7001 /*
7002 * nv_sgp_locate
7003 * Turns the Locate/OK2RM LED off or on for a particular drive. State is
7004 * maintained in the SGPIO Control Block.
7005 */
7006 static void
7007 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
7008 {
7009 uint8_t leds;
7010 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7011 nv_sgp_cmn_t *cmn;
7012
7013 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7014 return;
7015 cmn = nvc->nvc_sgp_cmn;
7016
7017 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7018 return;
7019
7020 DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7021
7022 mutex_enter(&cmn->nvs_slock);
7023
7024 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7025
7026 leds &= ~TR_LOCATE_MASK;
7027 leds |= TR_LOCATE_SET(value);
7028
7029 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7030 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7031
7032 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7033
7034 mutex_exit(&cmn->nvs_slock);
7035
7036 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7037 nv_cmn_err(CE_WARN, nvc, NULL,
7038 "nv_sgp_write_data failure updating OK2RM/Locate LED");
7039 }
7040 }
7041
7042 /*
7043 * nv_sgp_error
7044 * Turns the Error/Failure LED off or on for a particular drive. State is
7045 * maintained in the SGPIO Control Block.
7046 */
7047 static void
7048 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7049 {
7050 uint8_t leds;
7051 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7052 nv_sgp_cmn_t *cmn;
7053
7054 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7055 return;
7056 cmn = nvc->nvc_sgp_cmn;
7057
7058 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7059 return;
7060
7061 DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7062
7063 mutex_enter(&cmn->nvs_slock);
7064
7065 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7066
7067 leds &= ~TR_ERROR_MASK;
7068 leds |= TR_ERROR_SET(value);
7069
7070 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7071 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7072
7073 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7074
7075 mutex_exit(&cmn->nvs_slock);
7076
7077 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7078 nv_cmn_err(CE_WARN, nvc, NULL,
7079 "nv_sgp_write_data failure updating Fail/Error LED");
7080 }
7081 }
7082
7083 static void
7084 nv_sgp_cleanup(nv_ctl_t *nvc)
7085 {
7086 int drive, i;
7087 uint8_t drv_leds;
7088 uint32_t led_state;
7089 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7090 nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7091 extern void psm_unmap_phys(caddr_t, size_t);
7092
7093 /*
7094 * If the SGPIO Control Block isn't mapped or the shared data
7095 * structure isn't present in this instance, there isn't much that
7096 * can be cleaned up.
7097 */
7098 if ((cb == NULL) || (cmn == NULL))
7099 return;
7100
7101 /* turn off activity LEDs for this controller */
7102 drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7103
7104 /* get the existing LED state */
7105 led_state = cb->sgpio0_tr;
7106
7107 /* turn off port 0 */
7108 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7109 led_state &= SGPIO0_TR_DRV_CLR(drive);
7110 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7111
7112 /* turn off port 1 */
7113 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7114 led_state &= SGPIO0_TR_DRV_CLR(drive);
7115 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7116
7117 /* set the new led state, which should turn off this ctrl's LEDs */
7118 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7119 (void) nv_sgp_write_data(nvc);
7120
7121 /* clear the controller's in use bit */
7122 mutex_enter(&cmn->nvs_slock);
7123 cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7124 mutex_exit(&cmn->nvs_slock);
7125
7126 if (cmn->nvs_in_use == 0) {
7127 /* if all "in use" bits cleared, take everything down */
7128
7129 if (cmn->nvs_taskq != NULL) {
7130 /* allow activity taskq to exit */
7131 cmn->nvs_taskq_delay = 0;
7132 cv_broadcast(&cmn->nvs_cv);
7133
7134 /* then destroy it */
7135 ddi_taskq_destroy(cmn->nvs_taskq);
7136 }
7137
7138 /* turn off all of the LEDs */
7139 cb->sgpio0_tr = 0;
7140 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7141 (void) nv_sgp_write_data(nvc);
7142
7143 cb->sgpio_sr = NULL;
7144
7145 /* zero out the CBP to cmn mapping */
7146 for (i = 0; i < NV_MAX_CBPS; i++) {
7147 if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7148 nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7149 break;
7150 }
7151
7152 if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7153 break;
7154 }
7155
7156 /* free resources */
7157 cv_destroy(&cmn->nvs_cv);
7158 mutex_destroy(&cmn->nvs_tlock);
7159 mutex_destroy(&cmn->nvs_slock);
7160
7161 kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7162 }
7163
7164 nvc->nvc_sgp_cmn = NULL;
7165
7166 /* unmap the SGPIO Control Block */
7167 psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7168 }
7169 #endif /* SGPIO_SUPPORT */