Print this page
7127 remove -Wno-missing-braces from Makefile.uts
Split |
Close |
Expand all |
Collapse all |
--- old/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
+++ new/usr/src/uts/common/io/sata/adapters/nv_sata/nv_sata.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 24 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 /*
28 28 *
29 29 * nv_sata is a combo SATA HBA driver for CK804/MCP04 (ck804) and
30 30 * MCP55/MCP51/MCP61 (mcp5x) based chipsets.
31 31 *
32 32 * NCQ
33 33 * ---
34 34 *
35 35 * A portion of the NCQ is in place, but is incomplete. NCQ is disabled
36 36 * and is likely to be revisited in the future.
37 37 *
38 38 *
39 39 * Power Management
40 40 * ----------------
41 41 *
42 42 * Normally power management would be responsible for ensuring the device
43 43 * is quiescent and then changing power states to the device, such as
44 44 * powering down parts or all of the device. mcp5x/ck804 is unique in
45 45 * that it is only available as part of a larger southbridge chipset, so
46 46 * removing power to the device isn't possible. Switches to control
47 47 * power management states D0/D3 in the PCI configuration space appear to
48 48 * be supported but changes to these states are apparently are ignored.
49 49 * The only further PM that the driver _could_ do is shut down the PHY,
50 50 * but in order to deliver the first rev of the driver sooner than later,
51 51 * that will be deferred until some future phase.
52 52 *
53 53 * Since the driver currently will not directly change any power state to
54 54 * the device, no power() entry point will be required. However, it is
55 55 * possible that in ACPI power state S3, aka suspend to RAM, that power
56 56 * can be removed to the device, and the driver cannot rely on BIOS to
57 57 * have reset any state. For the time being, there is no known
58 58 * non-default configurations that need to be programmed. This judgement
59 59 * is based on the port of the legacy ata driver not having any such
60 60 * functionality and based on conversations with the PM team. If such a
61 61 * restoration is later deemed necessary it can be incorporated into the
62 62 * DDI_RESUME processing.
63 63 *
64 64 */
65 65
66 66 #include <sys/scsi/scsi.h>
67 67 #include <sys/pci.h>
68 68 #include <sys/byteorder.h>
69 69 #include <sys/sunddi.h>
70 70 #include <sys/sata/sata_hba.h>
71 71 #ifdef SGPIO_SUPPORT
72 72 #include <sys/sata/adapters/nv_sata/nv_sgpio.h>
73 73 #include <sys/devctl.h>
74 74 #include <sys/sdt.h>
75 75 #endif
76 76 #include <sys/sata/adapters/nv_sata/nv_sata.h>
77 77 #include <sys/disp.h>
78 78 #include <sys/note.h>
79 79 #include <sys/promif.h>
80 80
81 81
82 82 /*
83 83 * Function prototypes for driver entry points
84 84 */
85 85 static int nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
86 86 static int nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
87 87 static int nv_quiesce(dev_info_t *dip);
88 88 static int nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd,
89 89 void *arg, void **result);
90 90
91 91 /*
92 92 * Function prototypes for entry points from sata service module
93 93 * These functions are distinguished from other local functions
94 94 * by the prefix "nv_sata_"
95 95 */
96 96 static int nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt);
97 97 static int nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int);
98 98 static int nv_sata_reset(dev_info_t *dip, sata_device_t *sd);
99 99 static int nv_sata_activate(dev_info_t *dip, sata_device_t *sd);
100 100 static int nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd);
101 101
102 102 /*
103 103 * Local function prototypes
104 104 */
105 105 static uint_t mcp5x_intr(caddr_t arg1, caddr_t arg2);
106 106 static uint_t ck804_intr(caddr_t arg1, caddr_t arg2);
107 107 static int nv_add_legacy_intrs(nv_ctl_t *nvc);
108 108 #ifdef NV_MSI_SUPPORTED
109 109 static int nv_add_msi_intrs(nv_ctl_t *nvc);
110 110 #endif
111 111 static void nv_rem_intrs(nv_ctl_t *nvc);
112 112 static int nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt);
113 113 static int nv_start_nodata(nv_port_t *nvp, int slot);
114 114 static void nv_intr_nodata(nv_port_t *nvp, nv_slot_t *spkt);
115 115 static int nv_start_pio_in(nv_port_t *nvp, int slot);
116 116 static int nv_start_pio_out(nv_port_t *nvp, int slot);
117 117 static void nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *spkt);
118 118 static void nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *spkt);
119 119 static int nv_start_pkt_pio(nv_port_t *nvp, int slot);
120 120 static void nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
121 121 static int nv_start_dma(nv_port_t *nvp, int slot);
122 122 static void nv_intr_dma(nv_port_t *nvp, struct nv_slot *spkt);
123 123 static void nv_uninit_ctl(nv_ctl_t *nvc);
124 124 static void mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
125 125 static void ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
126 126 static void nv_uninit_port(nv_port_t *nvp);
127 127 static void nv_init_port(nv_port_t *nvp);
128 128 static int nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
129 129 static int mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp);
130 130 #ifdef NCQ
131 131 static int mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp);
132 132 #endif
133 133 static void nv_start_dma_engine(nv_port_t *nvp, int slot);
134 134 static void nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type,
135 135 int state);
136 136 static void nv_common_reg_init(nv_ctl_t *nvc);
137 137 static void ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status);
138 138 static void nv_reset(nv_port_t *nvp, char *reason);
139 139 static void nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot);
140 140 static void nv_timeout(void *);
141 141 static int nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt);
142 142 static void nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...);
143 143 static void nv_read_signature(nv_port_t *nvp);
144 144 static void mcp5x_set_intr(nv_port_t *nvp, int flag);
145 145 static void ck804_set_intr(nv_port_t *nvp, int flag);
146 146 static void nv_resume(nv_port_t *nvp);
147 147 static void nv_suspend(nv_port_t *nvp);
148 148 static int nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt);
149 149 static int nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
150 150 boolean_t reset);
151 151 static void nv_copy_registers(nv_port_t *nvp, sata_device_t *sd,
152 152 sata_pkt_t *spkt);
153 153 static void nv_link_event(nv_port_t *nvp, int flags);
154 154 static int nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt);
155 155 static int nv_wait3(nv_port_t *nvp, uchar_t onbits1, uchar_t offbits1,
156 156 uchar_t failure_onbits2, uchar_t failure_offbits2,
157 157 uchar_t failure_onbits3, uchar_t failure_offbits3,
158 158 uint_t timeout_usec, int type_wait);
159 159 static int nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits,
160 160 uint_t timeout_usec, int type_wait);
161 161 static int nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp);
162 162 static void nv_setup_timeout(nv_port_t *nvp, clock_t microseconds);
163 163 static clock_t nv_monitor_reset(nv_port_t *nvp);
164 164 static int nv_bm_status_clear(nv_port_t *nvp);
165 165 static void nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...);
166 166
167 167 #ifdef SGPIO_SUPPORT
168 168 static int nv_open(dev_t *devp, int flag, int otyp, cred_t *credp);
169 169 static int nv_close(dev_t dev, int flag, int otyp, cred_t *credp);
170 170 static int nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode,
171 171 cred_t *credp, int *rvalp);
172 172
173 173 static void nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle);
174 174 static int nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
175 175 uint32_t *cbpp);
176 176 static int nv_sgp_init(nv_ctl_t *nvc);
177 177 static int nv_sgp_check_set_cmn(nv_ctl_t *nvc);
178 178 static int nv_sgp_csr_read(nv_ctl_t *nvc);
179 179 static void nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val);
180 180 static int nv_sgp_write_data(nv_ctl_t *nvc);
181 181 static void nv_sgp_activity_led_ctl(void *arg);
182 182 static void nv_sgp_drive_connect(nv_ctl_t *nvc, int drive);
183 183 static void nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive);
184 184 static void nv_sgp_drive_active(nv_ctl_t *nvc, int drive);
185 185 static void nv_sgp_locate(nv_ctl_t *nvc, int drive, int value);
186 186 static void nv_sgp_error(nv_ctl_t *nvc, int drive, int value);
187 187 static void nv_sgp_cleanup(nv_ctl_t *nvc);
188 188 #endif
189 189
190 190
191 191 /*
192 192 * DMA attributes for the data buffer for x86. dma_attr_burstsizes is unused.
193 193 * Verify if needed if ported to other ISA.
194 194 */
195 195 static ddi_dma_attr_t buffer_dma_attr = {
196 196 DMA_ATTR_V0, /* dma_attr_version */
197 197 0, /* dma_attr_addr_lo: lowest bus address */
198 198 0xffffffffull, /* dma_attr_addr_hi: */
199 199 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max i.e for one cookie */
200 200 4, /* dma_attr_align */
201 201 1, /* dma_attr_burstsizes. */
202 202 1, /* dma_attr_minxfer */
203 203 0xffffffffull, /* dma_attr_maxxfer including all cookies */
204 204 0xffffffffull, /* dma_attr_seg */
205 205 NV_DMA_NSEGS, /* dma_attr_sgllen */
206 206 512, /* dma_attr_granular */
207 207 0, /* dma_attr_flags */
208 208 };
209 209 static ddi_dma_attr_t buffer_dma_40bit_attr = {
210 210 DMA_ATTR_V0, /* dma_attr_version */
211 211 0, /* dma_attr_addr_lo: lowest bus address */
212 212 0xffffffffffull, /* dma_attr_addr_hi: */
213 213 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max i.e for one cookie */
214 214 4, /* dma_attr_align */
215 215 1, /* dma_attr_burstsizes. */
216 216 1, /* dma_attr_minxfer */
217 217 0xffffffffull, /* dma_attr_maxxfer including all cookies */
218 218 0xffffffffull, /* dma_attr_seg */
219 219 NV_DMA_NSEGS, /* dma_attr_sgllen */
220 220 512, /* dma_attr_granular */
221 221 0, /* dma_attr_flags */
222 222 };
223 223
224 224
225 225 /*
226 226 * DMA attributes for PRD tables
227 227 */
228 228 ddi_dma_attr_t nv_prd_dma_attr = {
229 229 DMA_ATTR_V0, /* dma_attr_version */
230 230 0, /* dma_attr_addr_lo */
231 231 0xffffffffull, /* dma_attr_addr_hi */
232 232 NV_BM_64K_BOUNDARY - 1, /* dma_attr_count_max */
233 233 4, /* dma_attr_align */
234 234 1, /* dma_attr_burstsizes */
235 235 1, /* dma_attr_minxfer */
236 236 NV_BM_64K_BOUNDARY, /* dma_attr_maxxfer */
237 237 NV_BM_64K_BOUNDARY - 1, /* dma_attr_seg */
238 238 1, /* dma_attr_sgllen */
239 239 1, /* dma_attr_granular */
240 240 0 /* dma_attr_flags */
241 241 };
242 242
243 243 /*
244 244 * Device access attributes
245 245 */
246 246 static ddi_device_acc_attr_t accattr = {
247 247 DDI_DEVICE_ATTR_V0,
248 248 DDI_STRUCTURE_LE_ACC,
249 249 DDI_STRICTORDER_ACC
250 250 };
251 251
252 252
253 253 #ifdef SGPIO_SUPPORT
254 254 static struct cb_ops nv_cb_ops = {
255 255 nv_open, /* open */
256 256 nv_close, /* close */
257 257 nodev, /* strategy (block) */
258 258 nodev, /* print (block) */
259 259 nodev, /* dump (block) */
260 260 nodev, /* read */
261 261 nodev, /* write */
262 262 nv_ioctl, /* ioctl */
263 263 nodev, /* devmap */
264 264 nodev, /* mmap */
265 265 nodev, /* segmap */
266 266 nochpoll, /* chpoll */
267 267 ddi_prop_op, /* prop_op */
268 268 NULL, /* streams */
269 269 D_NEW | D_MP |
270 270 D_64BIT | D_HOTPLUG, /* flags */
271 271 CB_REV /* rev */
272 272 };
273 273 #endif /* SGPIO_SUPPORT */
274 274
275 275
276 276 static struct dev_ops nv_dev_ops = {
277 277 DEVO_REV, /* devo_rev */
278 278 0, /* refcnt */
279 279 nv_getinfo, /* info */
280 280 nulldev, /* identify */
281 281 nulldev, /* probe */
282 282 nv_attach, /* attach */
283 283 nv_detach, /* detach */
284 284 nodev, /* no reset */
285 285 #ifdef SGPIO_SUPPORT
286 286 &nv_cb_ops, /* driver operations */
287 287 #else
288 288 (struct cb_ops *)0, /* driver operations */
289 289 #endif
290 290 NULL, /* bus operations */
291 291 NULL, /* power */
292 292 nv_quiesce /* quiesce */
293 293 };
294 294
295 295
296 296 /*
297 297 * Request Sense CDB for ATAPI
298 298 */
299 299 static const uint8_t nv_rqsense_cdb[16] = {
300 300 SCMD_REQUEST_SENSE,
301 301 0,
302 302 0,
303 303 0,
304 304 SATA_ATAPI_MIN_RQSENSE_LEN,
305 305 0,
306 306 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 /* pad out to max CDB length */
307 307 };
308 308
309 309
310 310 static sata_tran_hotplug_ops_t nv_hotplug_ops;
311 311
↓ open down ↓ |
311 lines elided |
↑ open up ↑ |
312 312 extern struct mod_ops mod_driverops;
313 313
314 314 static struct modldrv modldrv = {
315 315 &mod_driverops, /* driverops */
316 316 "NVIDIA CK804/MCP04/MCP51/MCP55/MCP61 HBA",
317 317 &nv_dev_ops, /* driver ops */
318 318 };
319 319
320 320 static struct modlinkage modlinkage = {
321 321 MODREV_1,
322 - &modldrv,
323 - NULL
322 + { &modldrv, NULL }
324 323 };
325 324
326 325 /*
327 326 * Maximum number of consecutive interrupts processed in the loop in the
328 327 * single invocation of the port interrupt routine.
329 328 */
330 329 int nv_max_intr_loops = NV_MAX_INTR_PER_DEV;
331 330
332 331 /*
333 332 * wait between checks of reg status
334 333 */
335 334 int nv_usec_delay = NV_WAIT_REG_CHECK;
336 335
337 336 /*
338 337 * The following used for nv_vcmn_err() and nv_log()
339 338 */
340 339
341 340 /*
342 341 * temp buffer to save from wasting limited stack space
343 342 */
344 343 static char nv_log_buf[NV_LOGBUF_LEN];
345 344
346 345 /*
347 346 * protects nv_log_buf
348 347 */
349 348 static kmutex_t nv_log_mutex;
350 349
351 350 /*
352 351 * these on-by-default flags were chosen so that the driver
353 352 * logs as much non-usual run-time information as possible
354 353 * without overflowing the ring with useless information or
355 354 * causing any significant performance penalty.
356 355 */
357 356 int nv_debug_flags =
358 357 NVDBG_HOT|NVDBG_RESET|NVDBG_ALWAYS|NVDBG_TIMEOUT|NVDBG_EVENT;
359 358
360 359 /*
361 360 * normally debug information is not logged to the console
362 361 * but this allows it to be enabled.
363 362 */
364 363 int nv_log_to_console = B_FALSE;
365 364
366 365 /*
367 366 * normally debug information is not logged to cmn_err but
368 367 * in some cases it may be desired.
369 368 */
370 369 int nv_log_to_cmn_err = B_FALSE;
371 370
372 371 /*
373 372 * using prom print avoids using cmn_err/syslog and goes right
374 373 * to the console which may be desirable in some situations, but
375 374 * it may be synchronous, which would change timings and
376 375 * impact performance. Use with caution.
377 376 */
378 377 int nv_prom_print = B_FALSE;
379 378
380 379 /*
381 380 * Opaque state pointer to be initialized by ddi_soft_state_init()
382 381 */
383 382 static void *nv_statep = NULL;
384 383
385 384 /*
386 385 * Map from CBP to shared space
387 386 *
388 387 * When a MCP55/IO55 parts supports SGPIO, there is a single CBP (SGPIO
389 388 * Control Block Pointer as well as the corresponding Control Block) that
390 389 * is shared across all driver instances associated with that part. The
391 390 * Control Block is used to update and query the LED state for the devices
392 391 * on the controllers associated with those instances. There is also some
393 392 * driver state (called the 'common' area here) associated with each SGPIO
394 393 * Control Block. The nv_sgp_cpb2cmn is used to map a given CBP to its
395 394 * control area.
396 395 *
397 396 * The driver can also use this mapping array to determine whether the
398 397 * common area for a given CBP has been initialized, and, if it isn't
399 398 * initialized, initialize it.
400 399 *
401 400 * When a driver instance with a CBP value that is already in the array is
402 401 * initialized, it will use the pointer to the previously initialized common
403 402 * area associated with that SGPIO CBP value, rather than initialize it
404 403 * itself.
405 404 *
406 405 * nv_sgp_c2c_mutex is used to synchronize access to this mapping array.
407 406 */
408 407 #ifdef SGPIO_SUPPORT
409 408 static kmutex_t nv_sgp_c2c_mutex;
410 409 static struct nv_sgp_cbp2cmn nv_sgp_cbp2cmn[NV_MAX_CBPS];
411 410 #endif
412 411
413 412 /*
414 413 * control whether 40bit DMA is used or not
415 414 */
416 415 int nv_sata_40bit_dma = B_TRUE;
417 416
418 417 static sata_tran_hotplug_ops_t nv_hotplug_ops = {
419 418 SATA_TRAN_HOTPLUG_OPS_REV_1, /* structure version */
420 419 nv_sata_activate, /* activate port. cfgadm -c connect */
421 420 nv_sata_deactivate /* deactivate port. cfgadm -c disconnect */
422 421 };
423 422
424 423
425 424 /*
426 425 * nv module initialization
427 426 */
428 427 int
429 428 _init(void)
430 429 {
431 430 int error;
432 431 #ifdef SGPIO_SUPPORT
433 432 int i;
434 433 #endif
435 434
436 435 error = ddi_soft_state_init(&nv_statep, sizeof (nv_ctl_t), 0);
437 436
438 437 if (error != 0) {
439 438
440 439 return (error);
441 440 }
442 441
443 442 mutex_init(&nv_log_mutex, NULL, MUTEX_DRIVER, NULL);
444 443 #ifdef SGPIO_SUPPORT
445 444 mutex_init(&nv_sgp_c2c_mutex, NULL, MUTEX_DRIVER, NULL);
446 445
447 446 for (i = 0; i < NV_MAX_CBPS; i++) {
448 447 nv_sgp_cbp2cmn[i].c2cm_cbp = 0;
449 448 nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
450 449 }
451 450 #endif
452 451
453 452 if ((error = sata_hba_init(&modlinkage)) != 0) {
454 453 ddi_soft_state_fini(&nv_statep);
455 454 mutex_destroy(&nv_log_mutex);
456 455
457 456 return (error);
458 457 }
459 458
460 459 error = mod_install(&modlinkage);
461 460 if (error != 0) {
462 461 sata_hba_fini(&modlinkage);
463 462 ddi_soft_state_fini(&nv_statep);
464 463 mutex_destroy(&nv_log_mutex);
465 464
466 465 return (error);
467 466 }
468 467
469 468 return (error);
470 469 }
471 470
472 471
473 472 /*
474 473 * nv module uninitialize
475 474 */
476 475 int
477 476 _fini(void)
478 477 {
479 478 int error;
480 479
481 480 error = mod_remove(&modlinkage);
482 481
483 482 if (error != 0) {
484 483 return (error);
485 484 }
486 485
487 486 /*
488 487 * remove the resources allocated in _init()
489 488 */
490 489 mutex_destroy(&nv_log_mutex);
491 490 #ifdef SGPIO_SUPPORT
492 491 mutex_destroy(&nv_sgp_c2c_mutex);
493 492 #endif
494 493 sata_hba_fini(&modlinkage);
495 494 ddi_soft_state_fini(&nv_statep);
496 495
497 496 return (error);
498 497 }
499 498
500 499
501 500 /*
502 501 * nv _info entry point
503 502 */
504 503 int
505 504 _info(struct modinfo *modinfop)
506 505 {
507 506 return (mod_info(&modlinkage, modinfop));
508 507 }
509 508
510 509
511 510 /*
512 511 * these wrappers for ddi_{get,put}8 are for observability
513 512 * with dtrace
514 513 */
515 514 #ifdef DEBUG
516 515
517 516 static void
518 517 nv_put8(ddi_acc_handle_t handle, uint8_t *dev_addr, uint8_t value)
519 518 {
520 519 ddi_put8(handle, dev_addr, value);
521 520 }
522 521
523 522 static void
524 523 nv_put32(ddi_acc_handle_t handle, uint32_t *dev_addr, uint32_t value)
525 524 {
526 525 ddi_put32(handle, dev_addr, value);
527 526 }
528 527
529 528 static uint32_t
530 529 nv_get32(ddi_acc_handle_t handle, uint32_t *dev_addr)
531 530 {
532 531 return (ddi_get32(handle, dev_addr));
533 532 }
534 533
535 534 static void
536 535 nv_put16(ddi_acc_handle_t handle, uint16_t *dev_addr, uint16_t value)
537 536 {
538 537 ddi_put16(handle, dev_addr, value);
539 538 }
540 539
541 540 static uint16_t
542 541 nv_get16(ddi_acc_handle_t handle, uint16_t *dev_addr)
543 542 {
544 543 return (ddi_get16(handle, dev_addr));
545 544 }
546 545
547 546 static uint8_t
548 547 nv_get8(ddi_acc_handle_t handle, uint8_t *dev_addr)
549 548 {
550 549 return (ddi_get8(handle, dev_addr));
551 550 }
552 551
553 552 #else
554 553
555 554 #define nv_put8 ddi_put8
556 555 #define nv_put32 ddi_put32
557 556 #define nv_get32 ddi_get32
558 557 #define nv_put16 ddi_put16
559 558 #define nv_get16 ddi_get16
560 559 #define nv_get8 ddi_get8
561 560
562 561 #endif
563 562
564 563
565 564 /*
566 565 * Driver attach
567 566 */
568 567 static int
569 568 nv_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
570 569 {
571 570 int status, attach_state, intr_types, bar, i, j, command;
572 571 int inst = ddi_get_instance(dip);
573 572 ddi_acc_handle_t pci_conf_handle;
574 573 nv_ctl_t *nvc;
575 574 uint8_t subclass;
576 575 uint32_t reg32;
577 576 #ifdef SGPIO_SUPPORT
578 577 pci_regspec_t *regs;
579 578 int rlen;
580 579 #endif
581 580
582 581 switch (cmd) {
583 582
584 583 case DDI_ATTACH:
585 584
586 585 attach_state = ATTACH_PROGRESS_NONE;
587 586
588 587 status = ddi_soft_state_zalloc(nv_statep, inst);
589 588
590 589 if (status != DDI_SUCCESS) {
591 590 break;
592 591 }
593 592
594 593 nvc = ddi_get_soft_state(nv_statep, inst);
595 594
596 595 nvc->nvc_dip = dip;
597 596
598 597 NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach(): DDI_ATTACH", NULL);
599 598
600 599 attach_state |= ATTACH_PROGRESS_STATEP_ALLOC;
601 600
602 601 if (pci_config_setup(dip, &pci_conf_handle) == DDI_SUCCESS) {
603 602 nvc->nvc_devid = pci_config_get16(pci_conf_handle,
604 603 PCI_CONF_DEVID);
605 604 nvc->nvc_revid = pci_config_get8(pci_conf_handle,
606 605 PCI_CONF_REVID);
607 606 NVLOG(NVDBG_INIT, nvc, NULL,
608 607 "inst %d: devid is %x silicon revid is %x"
609 608 " nv_debug_flags=%x", inst, nvc->nvc_devid,
610 609 nvc->nvc_revid, nv_debug_flags);
611 610 } else {
612 611 break;
613 612 }
614 613
615 614 attach_state |= ATTACH_PROGRESS_CONF_HANDLE;
616 615
617 616 /*
618 617 * Set the PCI command register: enable IO/MEM/Master.
619 618 */
620 619 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
621 620 pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
622 621 command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
623 622
624 623 subclass = pci_config_get8(pci_conf_handle, PCI_CONF_SUBCLASS);
625 624
626 625 if (subclass & PCI_MASS_RAID) {
627 626 cmn_err(CE_WARN,
628 627 "attach failed: RAID mode not supported");
629 628
630 629 break;
631 630 }
632 631
633 632 /*
634 633 * the 6 bars of the controller are:
635 634 * 0: port 0 task file
636 635 * 1: port 0 status
637 636 * 2: port 1 task file
638 637 * 3: port 1 status
639 638 * 4: bus master for both ports
640 639 * 5: extended registers for SATA features
641 640 */
642 641 for (bar = 0; bar < 6; bar++) {
643 642 status = ddi_regs_map_setup(dip, bar + 1,
644 643 (caddr_t *)&nvc->nvc_bar_addr[bar], 0, 0, &accattr,
645 644 &nvc->nvc_bar_hdl[bar]);
646 645
647 646 if (status != DDI_SUCCESS) {
648 647 NVLOG(NVDBG_INIT, nvc, NULL,
649 648 "ddi_regs_map_setup failure for bar"
650 649 " %d status = %d", bar, status);
651 650 break;
652 651 }
653 652 }
654 653
655 654 attach_state |= ATTACH_PROGRESS_BARS;
656 655
657 656 /*
658 657 * initialize controller structures
659 658 */
660 659 status = nv_init_ctl(nvc, pci_conf_handle);
661 660
662 661 if (status == NV_FAILURE) {
663 662 NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl failed",
664 663 NULL);
665 664
666 665 break;
667 666 }
668 667
669 668 attach_state |= ATTACH_PROGRESS_CTL_SETUP;
670 669
671 670 /*
672 671 * initialize mutexes
673 672 */
674 673 mutex_init(&nvc->nvc_mutex, NULL, MUTEX_DRIVER,
675 674 DDI_INTR_PRI(nvc->nvc_intr_pri));
676 675
677 676 attach_state |= ATTACH_PROGRESS_MUTEX_INIT;
678 677
679 678 /*
680 679 * get supported interrupt types
681 680 */
682 681 if (ddi_intr_get_supported_types(dip, &intr_types) !=
683 682 DDI_SUCCESS) {
684 683 nv_cmn_err(CE_WARN, nvc, NULL,
685 684 "ddi_intr_get_supported_types failed");
686 685
687 686 break;
688 687 }
689 688
690 689 NVLOG(NVDBG_INIT, nvc, NULL,
691 690 "ddi_intr_get_supported_types() returned: 0x%x",
692 691 intr_types);
693 692
694 693 #ifdef NV_MSI_SUPPORTED
695 694 if (intr_types & DDI_INTR_TYPE_MSI) {
696 695 NVLOG(NVDBG_INIT, nvc, NULL,
697 696 "using MSI interrupt type", NULL);
698 697
699 698 /*
700 699 * Try MSI first, but fall back to legacy if MSI
701 700 * attach fails
702 701 */
703 702 if (nv_add_msi_intrs(nvc) == DDI_SUCCESS) {
704 703 nvc->nvc_intr_type = DDI_INTR_TYPE_MSI;
705 704 attach_state |= ATTACH_PROGRESS_INTR_ADDED;
706 705 NVLOG(NVDBG_INIT, nvc, NULL,
707 706 "MSI interrupt setup done", NULL);
708 707 } else {
709 708 nv_cmn_err(CE_CONT, nvc, NULL,
710 709 "MSI registration failed "
711 710 "will try Legacy interrupts");
712 711 }
713 712 }
714 713 #endif
715 714
716 715 /*
717 716 * Either the MSI interrupt setup has failed or only
718 717 * the fixed interrupts are available on the system.
719 718 */
720 719 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED) &&
721 720 (intr_types & DDI_INTR_TYPE_FIXED)) {
722 721
723 722 NVLOG(NVDBG_INIT, nvc, NULL,
724 723 "using Legacy interrupt type", NULL);
725 724
726 725 if (nv_add_legacy_intrs(nvc) == DDI_SUCCESS) {
727 726 nvc->nvc_intr_type = DDI_INTR_TYPE_FIXED;
728 727 attach_state |= ATTACH_PROGRESS_INTR_ADDED;
729 728 NVLOG(NVDBG_INIT, nvc, NULL,
730 729 "Legacy interrupt setup done", NULL);
731 730 } else {
732 731 nv_cmn_err(CE_WARN, nvc, NULL,
733 732 "legacy interrupt setup failed");
734 733 NVLOG(NVDBG_INIT, nvc, NULL,
735 734 "legacy interrupt setup failed", NULL);
736 735 break;
737 736 }
738 737 }
739 738
740 739 if (!(attach_state & ATTACH_PROGRESS_INTR_ADDED)) {
741 740 NVLOG(NVDBG_INIT, nvc, NULL,
742 741 "no interrupts registered", NULL);
743 742 break;
744 743 }
745 744
746 745 #ifdef SGPIO_SUPPORT
747 746 /*
748 747 * save off the controller number
749 748 */
750 749 (void) ddi_getlongprop(DDI_DEV_T_NONE, dip, DDI_PROP_DONTPASS,
751 750 "reg", (caddr_t)®s, &rlen);
752 751 nvc->nvc_ctlr_num = PCI_REG_FUNC_G(regs->pci_phys_hi);
753 752 kmem_free(regs, rlen);
754 753
755 754 /*
756 755 * initialize SGPIO
757 756 */
758 757 nv_sgp_led_init(nvc, pci_conf_handle);
759 758 #endif /* SGPIO_SUPPORT */
760 759
761 760 /*
762 761 * Do initial reset so that signature can be gathered
763 762 */
764 763 for (j = 0; j < NV_NUM_PORTS; j++) {
765 764 ddi_acc_handle_t bar5_hdl;
766 765 uint32_t sstatus;
767 766 nv_port_t *nvp;
768 767
769 768 nvp = &(nvc->nvc_port[j]);
770 769 bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
771 770 sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
772 771
773 772 if (SSTATUS_GET_DET(sstatus) ==
774 773 SSTATUS_DET_DEVPRE_PHYCOM) {
775 774
776 775 nvp->nvp_state |= NV_ATTACH;
777 776 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
778 777 mutex_enter(&nvp->nvp_mutex);
779 778 nv_reset(nvp, "attach");
780 779
781 780 while (nvp->nvp_state & NV_RESET) {
782 781 cv_wait(&nvp->nvp_reset_cv,
783 782 &nvp->nvp_mutex);
784 783 }
785 784
786 785 mutex_exit(&nvp->nvp_mutex);
787 786 }
788 787 }
789 788
790 789 /*
791 790 * attach to sata module
792 791 */
793 792 if (sata_hba_attach(nvc->nvc_dip,
794 793 &nvc->nvc_sata_hba_tran,
795 794 DDI_ATTACH) != DDI_SUCCESS) {
796 795 attach_state |= ATTACH_PROGRESS_SATA_MODULE;
797 796
798 797 break;
799 798 }
800 799
801 800 pci_config_teardown(&pci_conf_handle);
802 801
803 802 NVLOG(NVDBG_INIT, nvc, NULL, "nv_attach DDI_SUCCESS", NULL);
804 803
805 804 return (DDI_SUCCESS);
806 805
807 806 case DDI_RESUME:
808 807
809 808 nvc = ddi_get_soft_state(nv_statep, inst);
810 809
811 810 NVLOG(NVDBG_INIT, nvc, NULL,
812 811 "nv_attach(): DDI_RESUME inst %d", inst);
813 812
814 813 if (pci_config_setup(dip, &pci_conf_handle) != DDI_SUCCESS) {
815 814 return (DDI_FAILURE);
816 815 }
817 816
818 817 /*
819 818 * Set the PCI command register: enable IO/MEM/Master.
820 819 */
821 820 command = pci_config_get16(pci_conf_handle, PCI_CONF_COMM);
822 821 pci_config_put16(pci_conf_handle, PCI_CONF_COMM,
823 822 command|PCI_COMM_IO|PCI_COMM_MAE|PCI_COMM_ME);
824 823
825 824 /*
826 825 * Need to set bit 2 to 1 at config offset 0x50
827 826 * to enable access to the bar5 registers.
828 827 */
829 828 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
830 829
831 830 if ((reg32 & NV_BAR5_SPACE_EN) != NV_BAR5_SPACE_EN) {
832 831 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
833 832 reg32 | NV_BAR5_SPACE_EN);
834 833 }
835 834
836 835 nvc->nvc_state &= ~NV_CTRL_SUSPEND;
837 836
838 837 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
839 838 nv_resume(&(nvc->nvc_port[i]));
840 839 }
841 840
842 841 pci_config_teardown(&pci_conf_handle);
843 842
844 843 return (DDI_SUCCESS);
845 844
846 845 default:
847 846 return (DDI_FAILURE);
848 847 }
849 848
850 849
851 850 /*
852 851 * DDI_ATTACH failure path starts here
853 852 */
854 853
855 854 if (attach_state & ATTACH_PROGRESS_INTR_ADDED) {
856 855 nv_rem_intrs(nvc);
857 856 }
858 857
859 858 if (attach_state & ATTACH_PROGRESS_SATA_MODULE) {
860 859 /*
861 860 * Remove timers
862 861 */
863 862 int port = 0;
864 863 nv_port_t *nvp;
865 864
866 865 for (; port < NV_MAX_PORTS(nvc); port++) {
867 866 nvp = &(nvc->nvc_port[port]);
868 867 if (nvp->nvp_timeout_id != 0) {
869 868 (void) untimeout(nvp->nvp_timeout_id);
870 869 }
871 870 }
872 871 }
873 872
874 873 if (attach_state & ATTACH_PROGRESS_MUTEX_INIT) {
875 874 mutex_destroy(&nvc->nvc_mutex);
876 875 }
877 876
878 877 if (attach_state & ATTACH_PROGRESS_CTL_SETUP) {
879 878 nv_uninit_ctl(nvc);
880 879 }
881 880
882 881 if (attach_state & ATTACH_PROGRESS_BARS) {
883 882 while (--bar >= 0) {
884 883 ddi_regs_map_free(&nvc->nvc_bar_hdl[bar]);
885 884 }
886 885 }
887 886
888 887 if (attach_state & ATTACH_PROGRESS_STATEP_ALLOC) {
889 888 ddi_soft_state_free(nv_statep, inst);
890 889 }
891 890
892 891 if (attach_state & ATTACH_PROGRESS_CONF_HANDLE) {
893 892 pci_config_teardown(&pci_conf_handle);
894 893 }
895 894
896 895 cmn_err(CE_WARN, "nv_sata%d attach failed", inst);
897 896
898 897 return (DDI_FAILURE);
899 898 }
900 899
901 900
902 901 static int
903 902 nv_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
904 903 {
905 904 int i, port, inst = ddi_get_instance(dip);
906 905 nv_ctl_t *nvc;
907 906 nv_port_t *nvp;
908 907
909 908 nvc = ddi_get_soft_state(nv_statep, inst);
910 909
911 910 switch (cmd) {
912 911
913 912 case DDI_DETACH:
914 913
915 914 NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_DETACH", NULL);
916 915
917 916 /*
918 917 * Remove interrupts
919 918 */
920 919 nv_rem_intrs(nvc);
921 920
922 921 /*
923 922 * Remove timers
924 923 */
925 924 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
926 925 nvp = &(nvc->nvc_port[port]);
927 926 if (nvp->nvp_timeout_id != 0) {
928 927 (void) untimeout(nvp->nvp_timeout_id);
929 928 }
930 929 }
931 930
932 931 /*
933 932 * Remove maps
934 933 */
935 934 for (i = 0; i < 6; i++) {
936 935 ddi_regs_map_free(&nvc->nvc_bar_hdl[i]);
937 936 }
938 937
939 938 /*
940 939 * Destroy mutexes
941 940 */
942 941 mutex_destroy(&nvc->nvc_mutex);
943 942
944 943 /*
945 944 * Uninitialize the controller structures
946 945 */
947 946 nv_uninit_ctl(nvc);
948 947
949 948 #ifdef SGPIO_SUPPORT
950 949 /*
951 950 * release SGPIO resources
952 951 */
953 952 nv_sgp_cleanup(nvc);
954 953 #endif
955 954
956 955 /*
957 956 * unregister from the sata module
958 957 */
959 958 (void) sata_hba_detach(nvc->nvc_dip, DDI_DETACH);
960 959
961 960 /*
962 961 * Free soft state
963 962 */
964 963 ddi_soft_state_free(nv_statep, inst);
965 964
966 965 return (DDI_SUCCESS);
967 966
968 967 case DDI_SUSPEND:
969 968
970 969 NVLOG(NVDBG_INIT, nvc, NULL, "nv_detach: DDI_SUSPEND", NULL);
971 970
972 971 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
973 972 nv_suspend(&(nvc->nvc_port[i]));
974 973 }
975 974
976 975 nvc->nvc_state |= NV_CTRL_SUSPEND;
977 976
978 977 return (DDI_SUCCESS);
979 978
980 979 default:
981 980 return (DDI_FAILURE);
982 981 }
983 982 }
984 983
985 984
986 985 /*ARGSUSED*/
987 986 static int
988 987 nv_getinfo(dev_info_t *dip, ddi_info_cmd_t infocmd, void *arg, void **result)
989 988 {
990 989 nv_ctl_t *nvc;
991 990 int instance;
992 991 dev_t dev;
993 992
994 993 dev = (dev_t)arg;
995 994 instance = getminor(dev);
996 995
997 996 switch (infocmd) {
998 997 case DDI_INFO_DEVT2DEVINFO:
999 998 nvc = ddi_get_soft_state(nv_statep, instance);
1000 999 if (nvc != NULL) {
1001 1000 *result = nvc->nvc_dip;
1002 1001 return (DDI_SUCCESS);
1003 1002 } else {
1004 1003 *result = NULL;
1005 1004 return (DDI_FAILURE);
1006 1005 }
1007 1006 case DDI_INFO_DEVT2INSTANCE:
1008 1007 *(int *)result = instance;
1009 1008 break;
1010 1009 default:
1011 1010 break;
1012 1011 }
1013 1012 return (DDI_SUCCESS);
1014 1013 }
1015 1014
1016 1015
1017 1016 #ifdef SGPIO_SUPPORT
1018 1017 /* ARGSUSED */
1019 1018 static int
1020 1019 nv_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1021 1020 {
1022 1021 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, getminor(*devp));
1023 1022
1024 1023 if (nvc == NULL) {
1025 1024 return (ENXIO);
1026 1025 }
1027 1026
1028 1027 return (0);
1029 1028 }
1030 1029
1031 1030
1032 1031 /* ARGSUSED */
1033 1032 static int
1034 1033 nv_close(dev_t dev, int flag, int otyp, cred_t *credp)
1035 1034 {
1036 1035 return (0);
1037 1036 }
1038 1037
1039 1038
1040 1039 /* ARGSUSED */
1041 1040 static int
1042 1041 nv_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *credp, int *rvalp)
1043 1042 {
1044 1043 nv_ctl_t *nvc;
1045 1044 int inst;
1046 1045 int status;
1047 1046 int ctlr, port;
1048 1047 int drive;
1049 1048 uint8_t curr_led;
1050 1049 struct dc_led_ctl led;
1051 1050
1052 1051 inst = getminor(dev);
1053 1052 if (inst == -1) {
1054 1053 return (EBADF);
1055 1054 }
1056 1055
1057 1056 nvc = ddi_get_soft_state(nv_statep, inst);
1058 1057 if (nvc == NULL) {
1059 1058 return (EBADF);
1060 1059 }
1061 1060
1062 1061 if ((nvc->nvc_sgp_cbp == NULL) || (nvc->nvc_sgp_cmn == NULL)) {
1063 1062 return (EIO);
1064 1063 }
1065 1064
1066 1065 switch (cmd) {
1067 1066 case DEVCTL_SET_LED:
1068 1067 status = ddi_copyin((void *)arg, &led,
1069 1068 sizeof (struct dc_led_ctl), mode);
1070 1069 if (status != 0)
1071 1070 return (EFAULT);
1072 1071
1073 1072 /*
1074 1073 * Since only the first two controller currently support
1075 1074 * SGPIO (as per NVIDIA docs), this code will as well.
1076 1075 * Note that this validate the port value within led_state
1077 1076 * as well.
1078 1077 */
1079 1078
1080 1079 ctlr = SGP_DRV_TO_CTLR(led.led_number);
1081 1080 if ((ctlr != 0) && (ctlr != 1))
1082 1081 return (ENXIO);
1083 1082
1084 1083 if ((led.led_state & DCL_STATE_FAST_BLNK) ||
1085 1084 (led.led_state & DCL_STATE_SLOW_BLNK)) {
1086 1085 return (EINVAL);
1087 1086 }
1088 1087
1089 1088 drive = led.led_number;
1090 1089
1091 1090 if ((led.led_ctl_active == DCL_CNTRL_OFF) ||
1092 1091 (led.led_state == DCL_STATE_OFF)) {
1093 1092
1094 1093 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1095 1094 nv_sgp_error(nvc, drive, TR_ERROR_DISABLE);
1096 1095 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1097 1096 nv_sgp_locate(nvc, drive, TR_LOCATE_DISABLE);
1098 1097 } else {
1099 1098 return (ENXIO);
1100 1099 }
1101 1100
1102 1101 port = SGP_DRV_TO_PORT(led.led_number);
1103 1102 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1104 1103 }
1105 1104
1106 1105 if (led.led_ctl_active == DCL_CNTRL_ON) {
1107 1106 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1108 1107 nv_sgp_error(nvc, drive, TR_ERROR_ENABLE);
1109 1108 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1110 1109 nv_sgp_locate(nvc, drive, TR_LOCATE_ENABLE);
1111 1110 } else {
1112 1111 return (ENXIO);
1113 1112 }
1114 1113
1115 1114 port = SGP_DRV_TO_PORT(led.led_number);
1116 1115 nvc->nvc_port[port].nvp_sgp_ioctl_mod |= led.led_type;
1117 1116 }
1118 1117
1119 1118 break;
1120 1119
1121 1120 case DEVCTL_GET_LED:
1122 1121 status = ddi_copyin((void *)arg, &led,
1123 1122 sizeof (struct dc_led_ctl), mode);
1124 1123 if (status != 0)
1125 1124 return (EFAULT);
1126 1125
1127 1126 /*
1128 1127 * Since only the first two controller currently support
1129 1128 * SGPIO (as per NVIDIA docs), this code will as well.
1130 1129 * Note that this validate the port value within led_state
1131 1130 * as well.
1132 1131 */
1133 1132
1134 1133 ctlr = SGP_DRV_TO_CTLR(led.led_number);
1135 1134 if ((ctlr != 0) && (ctlr != 1))
1136 1135 return (ENXIO);
1137 1136
1138 1137 curr_led = SGPIO0_TR_DRV(nvc->nvc_sgp_cbp->sgpio0_tr,
1139 1138 led.led_number);
1140 1139
1141 1140 port = SGP_DRV_TO_PORT(led.led_number);
1142 1141 if (nvc->nvc_port[port].nvp_sgp_ioctl_mod & led.led_type) {
1143 1142 led.led_ctl_active = DCL_CNTRL_ON;
1144 1143
1145 1144 if (led.led_type == DCL_TYPE_DEVICE_FAIL) {
1146 1145 if (TR_ERROR(curr_led) == TR_ERROR_DISABLE)
1147 1146 led.led_state = DCL_STATE_OFF;
1148 1147 else
1149 1148 led.led_state = DCL_STATE_ON;
1150 1149 } else if (led.led_type == DCL_TYPE_DEVICE_OK2RM) {
1151 1150 if (TR_LOCATE(curr_led) == TR_LOCATE_DISABLE)
1152 1151 led.led_state = DCL_STATE_OFF;
1153 1152 else
1154 1153 led.led_state = DCL_STATE_ON;
1155 1154 } else {
1156 1155 return (ENXIO);
1157 1156 }
1158 1157 } else {
1159 1158 led.led_ctl_active = DCL_CNTRL_OFF;
1160 1159 /*
1161 1160 * Not really off, but never set and no constant for
1162 1161 * tri-state
1163 1162 */
1164 1163 led.led_state = DCL_STATE_OFF;
1165 1164 }
1166 1165
1167 1166 status = ddi_copyout(&led, (void *)arg,
1168 1167 sizeof (struct dc_led_ctl), mode);
1169 1168 if (status != 0)
1170 1169 return (EFAULT);
1171 1170
1172 1171 break;
1173 1172
1174 1173 case DEVCTL_NUM_LEDS:
1175 1174 led.led_number = SGPIO_DRV_CNT_VALUE;
1176 1175 led.led_ctl_active = 1;
1177 1176 led.led_type = 3;
1178 1177
1179 1178 /*
1180 1179 * According to documentation, NVIDIA SGPIO is supposed to
1181 1180 * support blinking, but it does not seem to work in practice.
1182 1181 */
1183 1182 led.led_state = DCL_STATE_ON;
1184 1183
1185 1184 status = ddi_copyout(&led, (void *)arg,
1186 1185 sizeof (struct dc_led_ctl), mode);
1187 1186 if (status != 0)
1188 1187 return (EFAULT);
1189 1188
1190 1189 break;
1191 1190
1192 1191 default:
1193 1192 return (EINVAL);
1194 1193 }
1195 1194
1196 1195 return (0);
1197 1196 }
1198 1197 #endif /* SGPIO_SUPPORT */
1199 1198
1200 1199
1201 1200 /*
1202 1201 * Called by sata module to probe a port. Port and device state
1203 1202 * are not changed here... only reported back to the sata module.
1204 1203 *
1205 1204 */
1206 1205 static int
1207 1206 nv_sata_probe(dev_info_t *dip, sata_device_t *sd)
1208 1207 {
1209 1208 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1210 1209 uint8_t cport = sd->satadev_addr.cport;
1211 1210 uint8_t pmport = sd->satadev_addr.pmport;
1212 1211 uint8_t qual = sd->satadev_addr.qual;
1213 1212 uint8_t det;
1214 1213
1215 1214 nv_port_t *nvp;
1216 1215
1217 1216 if (cport >= NV_MAX_PORTS(nvc)) {
1218 1217 sd->satadev_type = SATA_DTYPE_NONE;
1219 1218 sd->satadev_state = SATA_STATE_UNKNOWN;
1220 1219
1221 1220 return (SATA_FAILURE);
1222 1221 }
1223 1222
1224 1223 ASSERT(nvc->nvc_port != NULL);
1225 1224 nvp = &(nvc->nvc_port[cport]);
1226 1225 ASSERT(nvp != NULL);
1227 1226
1228 1227 NVLOG(NVDBG_ENTRY, nvc, nvp,
1229 1228 "nv_sata_probe: enter cport: 0x%x, pmport: 0x%x, "
1230 1229 "qual: 0x%x", cport, pmport, qual);
1231 1230
1232 1231 mutex_enter(&nvp->nvp_mutex);
1233 1232
1234 1233 /*
1235 1234 * This check seems to be done in the SATA module.
1236 1235 * It may not be required here
1237 1236 */
1238 1237 if (nvp->nvp_state & NV_DEACTIVATED) {
1239 1238 nv_cmn_err(CE_WARN, nvc, nvp,
1240 1239 "port inactive. Use cfgadm to activate");
1241 1240 sd->satadev_type = SATA_DTYPE_UNKNOWN;
1242 1241 sd->satadev_state = SATA_PSTATE_SHUTDOWN;
1243 1242 mutex_exit(&nvp->nvp_mutex);
1244 1243
1245 1244 return (SATA_SUCCESS);
1246 1245 }
1247 1246
1248 1247 if (nvp->nvp_state & NV_FAILED) {
1249 1248 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
1250 1249 "probe: port failed", NULL);
1251 1250 sd->satadev_type = nvp->nvp_type;
1252 1251 sd->satadev_state = SATA_PSTATE_FAILED;
1253 1252 mutex_exit(&nvp->nvp_mutex);
1254 1253
1255 1254 return (SATA_SUCCESS);
1256 1255 }
1257 1256
1258 1257 if (qual == SATA_ADDR_PMPORT) {
1259 1258 sd->satadev_type = SATA_DTYPE_NONE;
1260 1259 sd->satadev_state = SATA_STATE_UNKNOWN;
1261 1260 mutex_exit(&nvp->nvp_mutex);
1262 1261 nv_cmn_err(CE_WARN, nvc, nvp,
1263 1262 "controller does not support port multiplier");
1264 1263
1265 1264 return (SATA_SUCCESS);
1266 1265 }
1267 1266
1268 1267 sd->satadev_state = SATA_PSTATE_PWRON;
1269 1268
1270 1269 nv_copy_registers(nvp, sd, NULL);
1271 1270
1272 1271 if (nvp->nvp_state & (NV_RESET|NV_LINK_EVENT)) {
1273 1272 /*
1274 1273 * during a reset or link event, fake the status
1275 1274 * as it may be changing as a result of the reset
1276 1275 * or link event.
1277 1276 */
1278 1277 DTRACE_PROBE(state_reset_link_event_faking_status_p);
1279 1278 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
1280 1279
1281 1280 SSTATUS_SET_IPM(sd->satadev_scr.sstatus,
1282 1281 SSTATUS_IPM_ACTIVE);
1283 1282 SSTATUS_SET_DET(sd->satadev_scr.sstatus,
1284 1283 SSTATUS_DET_DEVPRE_PHYCOM);
1285 1284 sd->satadev_type = nvp->nvp_type;
1286 1285 mutex_exit(&nvp->nvp_mutex);
1287 1286
1288 1287 return (SATA_SUCCESS);
1289 1288 }
1290 1289
1291 1290 det = SSTATUS_GET_DET(sd->satadev_scr.sstatus);
1292 1291
1293 1292 /*
1294 1293 * determine link status
1295 1294 */
1296 1295 if (det != SSTATUS_DET_DEVPRE_PHYCOM) {
1297 1296 switch (det) {
1298 1297
1299 1298 case SSTATUS_DET_NODEV:
1300 1299 case SSTATUS_DET_PHYOFFLINE:
1301 1300 sd->satadev_type = SATA_DTYPE_NONE;
1302 1301 break;
1303 1302
1304 1303 default:
1305 1304 sd->satadev_type = SATA_DTYPE_UNKNOWN;
1306 1305 break;
1307 1306 }
1308 1307
1309 1308 mutex_exit(&nvp->nvp_mutex);
1310 1309
1311 1310 return (SATA_SUCCESS);
1312 1311 }
1313 1312
1314 1313 /*
1315 1314 * Just report the current port state
1316 1315 */
1317 1316 sd->satadev_type = nvp->nvp_type;
1318 1317 DTRACE_PROBE1(nvp_type_h, int, nvp->nvp_type);
1319 1318
1320 1319 mutex_exit(&nvp->nvp_mutex);
1321 1320
1322 1321 return (SATA_SUCCESS);
1323 1322 }
1324 1323
1325 1324
1326 1325 /*
1327 1326 * Called by sata module to start a new command.
1328 1327 */
1329 1328 static int
1330 1329 nv_sata_start(dev_info_t *dip, sata_pkt_t *spkt)
1331 1330 {
1332 1331 int cport = spkt->satapkt_device.satadev_addr.cport;
1333 1332 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1334 1333 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1335 1334 int ret;
1336 1335
1337 1336 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_start: opmode: 0x%x cmd=%x",
1338 1337 spkt->satapkt_op_mode, spkt->satapkt_cmd.satacmd_cmd_reg);
1339 1338
1340 1339 mutex_enter(&nvp->nvp_mutex);
1341 1340
1342 1341 if (nvp->nvp_state & NV_DEACTIVATED) {
1343 1342
1344 1343 NVLOG(NVDBG_ERRS, nvc, nvp,
1345 1344 "nv_sata_start: NV_DEACTIVATED", NULL);
1346 1345 DTRACE_PROBE(nvp_state_inactive_p);
1347 1346
1348 1347 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1349 1348 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1350 1349 mutex_exit(&nvp->nvp_mutex);
1351 1350
1352 1351 return (SATA_TRAN_PORT_ERROR);
1353 1352 }
1354 1353
1355 1354 if (nvp->nvp_state & NV_FAILED) {
1356 1355
1357 1356 NVLOG(NVDBG_ERRS, nvc, nvp,
1358 1357 "nv_sata_start: NV_FAILED state", NULL);
1359 1358 DTRACE_PROBE(nvp_state_failed_p);
1360 1359
1361 1360 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1362 1361 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1363 1362 mutex_exit(&nvp->nvp_mutex);
1364 1363
1365 1364 return (SATA_TRAN_PORT_ERROR);
1366 1365 }
1367 1366
1368 1367 if (nvp->nvp_state & NV_RESET) {
1369 1368
1370 1369 NVLOG(NVDBG_ERRS, nvc, nvp,
1371 1370 "still waiting for reset completion", NULL);
1372 1371 DTRACE_PROBE(nvp_state_reset_p);
1373 1372
1374 1373 spkt->satapkt_reason = SATA_PKT_BUSY;
1375 1374
1376 1375 /*
1377 1376 * If in panic, timeouts do not occur, so invoke
1378 1377 * reset handling directly so that the signature
1379 1378 * can be acquired to complete the reset handling.
1380 1379 */
1381 1380 if (ddi_in_panic()) {
1382 1381 NVLOG(NVDBG_ERRS, nvc, nvp,
1383 1382 "nv_sata_start: calling nv_monitor_reset "
1384 1383 "synchronously", NULL);
1385 1384
1386 1385 (void) nv_monitor_reset(nvp);
1387 1386 }
1388 1387
1389 1388 mutex_exit(&nvp->nvp_mutex);
1390 1389
1391 1390 return (SATA_TRAN_BUSY);
1392 1391 }
1393 1392
1394 1393 if (nvp->nvp_state & NV_LINK_EVENT) {
1395 1394
1396 1395 NVLOG(NVDBG_ERRS, nvc, nvp,
1397 1396 "nv_sata_start(): link event ret bsy", NULL);
1398 1397 DTRACE_PROBE(nvp_state_link_event_p);
1399 1398
1400 1399 spkt->satapkt_reason = SATA_PKT_BUSY;
1401 1400
1402 1401 if (ddi_in_panic()) {
1403 1402 NVLOG(NVDBG_ERRS, nvc, nvp,
1404 1403 "nv_sata_start: calling nv_timeout "
1405 1404 "synchronously", NULL);
1406 1405
1407 1406 nv_timeout(nvp);
1408 1407 }
1409 1408
1410 1409 mutex_exit(&nvp->nvp_mutex);
1411 1410
1412 1411 return (SATA_TRAN_BUSY);
1413 1412 }
1414 1413
1415 1414
1416 1415 if ((nvp->nvp_type == SATA_DTYPE_NONE) ||
1417 1416 (nvp->nvp_type == SATA_DTYPE_UNKNOWN)) {
1418 1417
1419 1418 NVLOG(NVDBG_ERRS, nvc, nvp,
1420 1419 "nv_sata_start: nvp_type 0x%x", nvp->nvp_type);
1421 1420 DTRACE_PROBE1(not_ready_nvp_type_h, int, nvp->nvp_type);
1422 1421
1423 1422 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
1424 1423 nv_copy_registers(nvp, &spkt->satapkt_device, NULL);
1425 1424 mutex_exit(&nvp->nvp_mutex);
1426 1425
1427 1426 return (SATA_TRAN_PORT_ERROR);
1428 1427 }
1429 1428
1430 1429 if (spkt->satapkt_device.satadev_type == SATA_DTYPE_PMULT) {
1431 1430
1432 1431 nv_cmn_err(CE_WARN, nvc, nvp,
1433 1432 "port multiplier not supported by controller");
1434 1433
1435 1434 ASSERT(nvp->nvp_type == SATA_DTYPE_PMULT);
1436 1435 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
1437 1436 mutex_exit(&nvp->nvp_mutex);
1438 1437
1439 1438 return (SATA_TRAN_CMD_UNSUPPORTED);
1440 1439 }
1441 1440
1442 1441 /*
1443 1442 * after a device reset, and then when sata module restore processing
1444 1443 * is complete, the sata module will set sata_clear_dev_reset which
1445 1444 * indicates that restore processing has completed and normal
1446 1445 * non-restore related commands should be processed.
1447 1446 */
1448 1447 if (spkt->satapkt_cmd.satacmd_flags.sata_clear_dev_reset) {
1449 1448
1450 1449 NVLOG(NVDBG_RESET, nvc, nvp,
1451 1450 "nv_sata_start: clearing NV_RESTORE", NULL);
1452 1451 DTRACE_PROBE(clearing_restore_p);
1453 1452 DTRACE_PROBE1(nvp_state_before_clear_h, int, nvp->nvp_state);
1454 1453
1455 1454 nvp->nvp_state &= ~NV_RESTORE;
1456 1455 }
1457 1456
1458 1457 /*
1459 1458 * if the device was recently reset as indicated by NV_RESTORE,
1460 1459 * only allow commands which restore device state. The sata module
1461 1460 * marks such commands with sata_ignore_dev_reset.
1462 1461 *
1463 1462 * during coredump, nv_reset is called but the restore isn't
1464 1463 * processed, so ignore the wait for restore if the system
1465 1464 * is panicing.
1466 1465 */
1467 1466 if ((nvp->nvp_state & NV_RESTORE) &&
1468 1467 !(spkt->satapkt_cmd.satacmd_flags.sata_ignore_dev_reset) &&
1469 1468 (ddi_in_panic() == 0)) {
1470 1469
1471 1470 NVLOG(NVDBG_RESET, nvc, nvp,
1472 1471 "nv_sata_start: waiting for restore ", NULL);
1473 1472 DTRACE_PROBE1(restore_no_ignore_reset_nvp_state_h,
1474 1473 int, nvp->nvp_state);
1475 1474
1476 1475 spkt->satapkt_reason = SATA_PKT_BUSY;
1477 1476 mutex_exit(&nvp->nvp_mutex);
1478 1477
1479 1478 return (SATA_TRAN_BUSY);
1480 1479 }
1481 1480
1482 1481 if (nvp->nvp_state & NV_ABORTING) {
1483 1482
1484 1483 NVLOG(NVDBG_ERRS, nvc, nvp,
1485 1484 "nv_sata_start: NV_ABORTING", NULL);
1486 1485 DTRACE_PROBE1(aborting_nvp_state_h, int, nvp->nvp_state);
1487 1486
1488 1487 spkt->satapkt_reason = SATA_PKT_BUSY;
1489 1488 mutex_exit(&nvp->nvp_mutex);
1490 1489
1491 1490 return (SATA_TRAN_BUSY);
1492 1491 }
1493 1492
1494 1493 /*
1495 1494 * record command sequence for debugging.
1496 1495 */
1497 1496 nvp->nvp_seq++;
1498 1497
1499 1498 DTRACE_PROBE2(command_start, int *, nvp, int,
1500 1499 spkt->satapkt_cmd.satacmd_cmd_reg);
1501 1500
1502 1501 /*
1503 1502 * clear SError to be able to check errors after the command failure
1504 1503 */
1505 1504 nv_put32(nvp->nvp_ctlp->nvc_bar_hdl[5], nvp->nvp_serror, 0xffffffff);
1506 1505
1507 1506 if (spkt->satapkt_op_mode &
1508 1507 (SATA_OPMODE_POLLING|SATA_OPMODE_SYNCH)) {
1509 1508
1510 1509 ret = nv_start_sync(nvp, spkt);
1511 1510
1512 1511 mutex_exit(&nvp->nvp_mutex);
1513 1512
1514 1513 return (ret);
1515 1514 }
1516 1515
1517 1516 /*
1518 1517 * start command asynchronous command
1519 1518 */
1520 1519 ret = nv_start_async(nvp, spkt);
1521 1520
1522 1521 mutex_exit(&nvp->nvp_mutex);
1523 1522
1524 1523 return (ret);
1525 1524 }
1526 1525
1527 1526
1528 1527 /*
1529 1528 * SATA_OPMODE_POLLING implies the driver is in a
1530 1529 * synchronous mode, and SATA_OPMODE_SYNCH is also set.
1531 1530 * If only SATA_OPMODE_SYNCH is set, the driver can use
1532 1531 * interrupts and sleep wait on a cv.
1533 1532 *
1534 1533 * If SATA_OPMODE_POLLING is set, the driver can't use
1535 1534 * interrupts and must busy wait and simulate the
1536 1535 * interrupts by waiting for BSY to be cleared.
1537 1536 *
1538 1537 * Synchronous mode has to return BUSY if there are
1539 1538 * any other commands already on the drive.
1540 1539 */
1541 1540 static int
1542 1541 nv_start_sync(nv_port_t *nvp, sata_pkt_t *spkt)
1543 1542 {
1544 1543 nv_ctl_t *nvc = nvp->nvp_ctlp;
1545 1544 int ret;
1546 1545
1547 1546 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync: entry",
1548 1547 NULL);
1549 1548
1550 1549 if (nvp->nvp_ncq_run != 0 || nvp->nvp_non_ncq_run != 0) {
1551 1550 spkt->satapkt_reason = SATA_PKT_BUSY;
1552 1551 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1553 1552 "nv_sata_satapkt_sync: device is busy, sync cmd rejected"
1554 1553 "ncq_run: %d non_ncq_run: %d spkt: %p",
1555 1554 nvp->nvp_ncq_run, nvp->nvp_non_ncq_run,
1556 1555 (&(nvp->nvp_slot[0]))->nvslot_spkt);
1557 1556
1558 1557 return (SATA_TRAN_BUSY);
1559 1558 }
1560 1559
1561 1560 /*
1562 1561 * if SYNC but not POLL, verify that this is not on interrupt thread.
1563 1562 */
1564 1563 if (!(spkt->satapkt_op_mode & SATA_OPMODE_POLLING) &&
1565 1564 servicing_interrupt()) {
1566 1565 spkt->satapkt_reason = SATA_PKT_BUSY;
1567 1566 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp,
1568 1567 "SYNC mode not allowed during interrupt", NULL);
1569 1568
1570 1569 return (SATA_TRAN_BUSY);
1571 1570
1572 1571 }
1573 1572
1574 1573 /*
1575 1574 * disable interrupt generation if in polled mode
1576 1575 */
1577 1576 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1578 1577 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
1579 1578 }
1580 1579
1581 1580 /*
1582 1581 * overload the satapkt_reason with BUSY so code below
1583 1582 * will know when it's done
1584 1583 */
1585 1584 spkt->satapkt_reason = SATA_PKT_BUSY;
1586 1585
1587 1586 if ((ret = nv_start_common(nvp, spkt)) != SATA_TRAN_ACCEPTED) {
1588 1587 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1589 1588 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1590 1589 }
1591 1590
1592 1591 return (ret);
1593 1592 }
1594 1593
1595 1594 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
1596 1595 mutex_exit(&nvp->nvp_mutex);
1597 1596 ret = nv_poll_wait(nvp, spkt);
1598 1597 mutex_enter(&nvp->nvp_mutex);
1599 1598
1600 1599 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1601 1600
1602 1601 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1603 1602 " done % reason %d", ret);
1604 1603
1605 1604 return (ret);
1606 1605 }
1607 1606
1608 1607 /*
1609 1608 * non-polling synchronous mode handling. The interrupt will signal
1610 1609 * when device IO is completed.
1611 1610 */
1612 1611 while (spkt->satapkt_reason == SATA_PKT_BUSY) {
1613 1612 cv_wait(&nvp->nvp_sync_cv, &nvp->nvp_mutex);
1614 1613 }
1615 1614
1616 1615
1617 1616 NVLOG(NVDBG_SYNC, nvp->nvp_ctlp, nvp, "nv_sata_satapkt_sync:"
1618 1617 " done % reason %d", spkt->satapkt_reason);
1619 1618
1620 1619 return (SATA_TRAN_ACCEPTED);
1621 1620 }
1622 1621
1623 1622
1624 1623 static int
1625 1624 nv_poll_wait(nv_port_t *nvp, sata_pkt_t *spkt)
1626 1625 {
1627 1626 int ret;
1628 1627 nv_ctl_t *nvc = nvp->nvp_ctlp;
1629 1628 #if ! defined(__lock_lint)
1630 1629 nv_slot_t *nv_slotp = &(nvp->nvp_slot[0]); /* not NCQ aware */
1631 1630 #endif
1632 1631
1633 1632 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: enter", NULL);
1634 1633
1635 1634 for (;;) {
1636 1635
1637 1636 NV_DELAY_NSEC(400);
1638 1637
1639 1638 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nv_wait",
1640 1639 NULL);
1641 1640 if (nv_wait(nvp, 0, SATA_STATUS_BSY,
1642 1641 NV_SEC2USEC(spkt->satapkt_time), NV_NOSLEEP) == B_FALSE) {
1643 1642 mutex_enter(&nvp->nvp_mutex);
1644 1643 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1645 1644 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1646 1645 nv_reset(nvp, "poll_wait");
1647 1646 nv_complete_io(nvp, spkt, 0);
1648 1647 mutex_exit(&nvp->nvp_mutex);
1649 1648 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: "
1650 1649 "SATA_STATUS_BSY", NULL);
1651 1650
1652 1651 return (SATA_TRAN_ACCEPTED);
1653 1652 }
1654 1653
1655 1654 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: before nvc_intr",
1656 1655 NULL);
1657 1656
1658 1657 /*
1659 1658 * Simulate interrupt.
1660 1659 */
1661 1660 ret = (*(nvc->nvc_interrupt))((caddr_t)nvc, NULL);
1662 1661 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait: after nvc_intr",
1663 1662 NULL);
1664 1663
1665 1664 if (ret != DDI_INTR_CLAIMED) {
1666 1665 NVLOG(NVDBG_SYNC, nvc, nvp, "nv_poll_wait:"
1667 1666 " unclaimed -- resetting", NULL);
1668 1667 mutex_enter(&nvp->nvp_mutex);
1669 1668 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
1670 1669 nv_reset(nvp, "poll_wait intr not claimed");
1671 1670 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
1672 1671 nv_complete_io(nvp, spkt, 0);
1673 1672 mutex_exit(&nvp->nvp_mutex);
1674 1673
1675 1674 return (SATA_TRAN_ACCEPTED);
1676 1675 }
1677 1676
1678 1677 #if ! defined(__lock_lint)
1679 1678 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
1680 1679 /*
1681 1680 * packet is complete
1682 1681 */
1683 1682 return (SATA_TRAN_ACCEPTED);
1684 1683 }
1685 1684 #endif
1686 1685 }
1687 1686 /*NOTREACHED*/
1688 1687 }
1689 1688
1690 1689
1691 1690 /*
1692 1691 * Called by sata module to abort outstanding packets.
1693 1692 */
1694 1693 /*ARGSUSED*/
1695 1694 static int
1696 1695 nv_sata_abort(dev_info_t *dip, sata_pkt_t *spkt, int flag)
1697 1696 {
1698 1697 int cport = spkt->satapkt_device.satadev_addr.cport;
1699 1698 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1700 1699 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1701 1700 int c_a, ret;
1702 1701
1703 1702 ASSERT(cport < NV_MAX_PORTS(nvc));
1704 1703 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_abort %d %p", flag, spkt);
1705 1704
1706 1705 mutex_enter(&nvp->nvp_mutex);
1707 1706
1708 1707 if (nvp->nvp_state & NV_DEACTIVATED) {
1709 1708 mutex_exit(&nvp->nvp_mutex);
1710 1709 nv_cmn_err(CE_WARN, nvc, nvp,
1711 1710 "abort request failed: port inactive");
1712 1711
1713 1712 return (SATA_FAILURE);
1714 1713 }
1715 1714
1716 1715 /*
1717 1716 * spkt == NULL then abort all commands
1718 1717 */
1719 1718 c_a = nv_abort_active(nvp, spkt, SATA_PKT_ABORTED, B_TRUE);
1720 1719
1721 1720 if (c_a) {
1722 1721 NVLOG(NVDBG_ENTRY, nvc, nvp,
1723 1722 "packets aborted running=%d", c_a);
1724 1723 ret = SATA_SUCCESS;
1725 1724 } else {
1726 1725 if (spkt == NULL) {
1727 1726 NVLOG(NVDBG_ENTRY, nvc, nvp, "no spkts to abort", NULL);
1728 1727 } else {
1729 1728 NVLOG(NVDBG_ENTRY, nvc, nvp,
1730 1729 "can't find spkt to abort", NULL);
1731 1730 }
1732 1731 ret = SATA_FAILURE;
1733 1732 }
1734 1733
1735 1734 mutex_exit(&nvp->nvp_mutex);
1736 1735
1737 1736 return (ret);
1738 1737 }
1739 1738
1740 1739
1741 1740 /*
1742 1741 * if spkt == NULL abort all pkts running, otherwise
1743 1742 * abort the requested packet. must be called with nv_mutex
1744 1743 * held and returns with it held. Not NCQ aware.
1745 1744 */
1746 1745 static int
1747 1746 nv_abort_active(nv_port_t *nvp, sata_pkt_t *spkt, int abort_reason,
1748 1747 boolean_t reset)
1749 1748 {
1750 1749 int aborted = 0, i, reset_once = B_FALSE;
1751 1750 struct nv_slot *nv_slotp;
1752 1751 sata_pkt_t *spkt_slot;
1753 1752
1754 1753 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
1755 1754
1756 1755 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_abort_active", NULL);
1757 1756
1758 1757 nvp->nvp_state |= NV_ABORTING;
1759 1758
1760 1759 for (i = 0; i < nvp->nvp_queue_depth; i++) {
1761 1760
1762 1761 nv_slotp = &(nvp->nvp_slot[i]);
1763 1762 spkt_slot = nv_slotp->nvslot_spkt;
1764 1763
1765 1764 /*
1766 1765 * skip if not active command in slot
1767 1766 */
1768 1767 if (spkt_slot == NULL) {
1769 1768 continue;
1770 1769 }
1771 1770
1772 1771 /*
1773 1772 * if a specific packet was requested, skip if
1774 1773 * this is not a match
1775 1774 */
1776 1775 if ((spkt != NULL) && (spkt != spkt_slot)) {
1777 1776 continue;
1778 1777 }
1779 1778
1780 1779 /*
1781 1780 * stop the hardware. This could need reworking
1782 1781 * when NCQ is enabled in the driver.
1783 1782 */
1784 1783 if (reset_once == B_FALSE) {
1785 1784 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
1786 1785
1787 1786 /*
1788 1787 * stop DMA engine
1789 1788 */
1790 1789 nv_put8(bmhdl, nvp->nvp_bmicx, 0);
1791 1790
1792 1791 /*
1793 1792 * Reset only if explicitly specified by the arg reset
1794 1793 */
1795 1794 if (reset == B_TRUE) {
1796 1795 reset_once = B_TRUE;
1797 1796 nv_reset(nvp, "abort_active");
1798 1797 }
1799 1798 }
1800 1799
1801 1800 spkt_slot->satapkt_reason = abort_reason;
1802 1801 nv_complete_io(nvp, spkt_slot, i);
1803 1802 aborted++;
1804 1803 }
1805 1804
1806 1805 nvp->nvp_state &= ~NV_ABORTING;
1807 1806
1808 1807 return (aborted);
1809 1808 }
1810 1809
1811 1810
1812 1811 /*
1813 1812 * Called by sata module to reset a port, device, or the controller.
1814 1813 */
1815 1814 static int
1816 1815 nv_sata_reset(dev_info_t *dip, sata_device_t *sd)
1817 1816 {
1818 1817 int cport = sd->satadev_addr.cport;
1819 1818 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1820 1819 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1821 1820 int ret = SATA_FAILURE;
1822 1821
1823 1822 ASSERT(cport < NV_MAX_PORTS(nvc));
1824 1823
1825 1824 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_reset", NULL);
1826 1825
1827 1826 mutex_enter(&nvp->nvp_mutex);
1828 1827
1829 1828 switch (sd->satadev_addr.qual) {
1830 1829
1831 1830 case SATA_ADDR_CPORT:
1832 1831 /*FALLTHROUGH*/
1833 1832 case SATA_ADDR_DCPORT:
1834 1833
1835 1834 ret = SATA_SUCCESS;
1836 1835
1837 1836 /*
1838 1837 * If a reset is already in progress, don't disturb it
1839 1838 */
1840 1839 if ((nvp->nvp_state & (NV_RESET|NV_RESTORE)) &&
1841 1840 (ddi_in_panic() == 0)) {
1842 1841 NVLOG(NVDBG_RESET, nvc, nvp,
1843 1842 "nv_sata_reset: reset already in progress", NULL);
1844 1843 DTRACE_PROBE(reset_already_in_progress_p);
1845 1844
1846 1845 break;
1847 1846 }
1848 1847
1849 1848 /*
1850 1849 * log the pre-reset state of the driver because dumping the
1851 1850 * blocks will disturb it.
1852 1851 */
1853 1852 if (ddi_in_panic() == 1) {
1854 1853 NVLOG(NVDBG_RESET, nvc, nvp, "in_panic. nvp_state: "
1855 1854 "0x%x nvp_reset_time: %d nvp_last_cmd: 0x%x "
1856 1855 "nvp_previous_cmd: 0x%x nvp_reset_count: %d "
1857 1856 "nvp_first_reset_reason: %s "
1858 1857 "nvp_reset_reason: %s nvp_seq: %d "
1859 1858 "in_interrupt: %d", nvp->nvp_state,
1860 1859 nvp->nvp_reset_time, nvp->nvp_last_cmd,
1861 1860 nvp->nvp_previous_cmd, nvp->nvp_reset_count,
1862 1861 nvp->nvp_first_reset_reason,
1863 1862 nvp->nvp_reset_reason, nvp->nvp_seq,
1864 1863 servicing_interrupt());
1865 1864 }
1866 1865
1867 1866 nv_reset(nvp, "sata_reset");
1868 1867
1869 1868 (void) nv_abort_active(nvp, NULL, SATA_PKT_RESET, B_FALSE);
1870 1869
1871 1870 /*
1872 1871 * If the port is inactive, do a quiet reset and don't attempt
1873 1872 * to wait for reset completion or do any post reset processing
1874 1873 *
1875 1874 */
1876 1875 if (nvp->nvp_state & NV_DEACTIVATED) {
1877 1876 nvp->nvp_state &= ~NV_RESET;
1878 1877 nvp->nvp_reset_time = 0;
1879 1878
1880 1879 break;
1881 1880 }
1882 1881
1883 1882 /*
1884 1883 * clear the port failed flag. It will get set again
1885 1884 * if the port is still not functioning.
1886 1885 */
1887 1886 nvp->nvp_state &= ~NV_FAILED;
1888 1887
1889 1888 /*
1890 1889 * timeouts are not available while the system is
1891 1890 * dropping core, so call nv_monitor_reset() directly
1892 1891 */
1893 1892 if (ddi_in_panic() != 0) {
1894 1893 while (nvp->nvp_state & NV_RESET) {
1895 1894 drv_usecwait(1000);
1896 1895 (void) nv_monitor_reset(nvp);
1897 1896 }
1898 1897
1899 1898 break;
1900 1899 }
1901 1900
1902 1901 break;
1903 1902 case SATA_ADDR_CNTRL:
1904 1903 NVLOG(NVDBG_ENTRY, nvc, nvp,
1905 1904 "nv_sata_reset: controller reset not supported", NULL);
1906 1905
1907 1906 break;
1908 1907 case SATA_ADDR_PMPORT:
1909 1908 case SATA_ADDR_DPMPORT:
1910 1909 NVLOG(NVDBG_ENTRY, nvc, nvp,
1911 1910 "nv_sata_reset: port multipliers not supported", NULL);
1912 1911 /*FALLTHROUGH*/
1913 1912 default:
1914 1913 /*
1915 1914 * unsupported case
1916 1915 */
1917 1916 break;
1918 1917 }
1919 1918
1920 1919 mutex_exit(&nvp->nvp_mutex);
1921 1920
1922 1921 return (ret);
1923 1922 }
1924 1923
1925 1924
1926 1925 /*
1927 1926 * Sata entry point to handle port activation. cfgadm -c connect
1928 1927 */
1929 1928 static int
1930 1929 nv_sata_activate(dev_info_t *dip, sata_device_t *sd)
1931 1930 {
1932 1931 int cport = sd->satadev_addr.cport;
1933 1932 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1934 1933 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1935 1934 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1936 1935 uint32_t sstatus;
1937 1936
1938 1937 ASSERT(cport < NV_MAX_PORTS(nvc));
1939 1938 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_activate", NULL);
1940 1939
1941 1940 mutex_enter(&nvp->nvp_mutex);
1942 1941
1943 1942 sd->satadev_state = SATA_STATE_READY;
1944 1943
1945 1944 nv_copy_registers(nvp, sd, NULL);
1946 1945
1947 1946 (*(nvc->nvc_set_intr))(nvp, NV_INTR_ENABLE);
1948 1947
1949 1948 /*
1950 1949 * initiate link probing and device signature acquisition
1951 1950 */
1952 1951
1953 1952 bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
1954 1953
1955 1954 sstatus = ddi_get32(bar5_hdl, nvp->nvp_sstatus);
1956 1955
1957 1956 nvp->nvp_type = SATA_DTYPE_NONE;
1958 1957 nvp->nvp_signature = NV_NO_SIG;
1959 1958 nvp->nvp_state &= ~NV_DEACTIVATED;
1960 1959
1961 1960 if (SSTATUS_GET_DET(sstatus) ==
1962 1961 SSTATUS_DET_DEVPRE_PHYCOM) {
1963 1962
1964 1963 nvp->nvp_state |= NV_ATTACH;
1965 1964 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
1966 1965 nv_reset(nvp, "sata_activate");
1967 1966
1968 1967 while (nvp->nvp_state & NV_RESET) {
1969 1968 cv_wait(&nvp->nvp_reset_cv, &nvp->nvp_mutex);
1970 1969 }
1971 1970
1972 1971 }
1973 1972
1974 1973 mutex_exit(&nvp->nvp_mutex);
1975 1974
1976 1975 return (SATA_SUCCESS);
1977 1976 }
1978 1977
1979 1978
1980 1979 /*
1981 1980 * Sata entry point to handle port deactivation. cfgadm -c disconnect
1982 1981 */
1983 1982 static int
1984 1983 nv_sata_deactivate(dev_info_t *dip, sata_device_t *sd)
1985 1984 {
1986 1985 int cport = sd->satadev_addr.cport;
1987 1986 nv_ctl_t *nvc = ddi_get_soft_state(nv_statep, ddi_get_instance(dip));
1988 1987 nv_port_t *nvp = &(nvc->nvc_port[cport]);
1989 1988
1990 1989 ASSERT(cport < NV_MAX_PORTS(nvc));
1991 1990 NVLOG(NVDBG_ENTRY, nvc, nvp, "nv_sata_deactivate", NULL);
1992 1991
1993 1992 mutex_enter(&nvp->nvp_mutex);
1994 1993
1995 1994 (void) nv_abort_active(nvp, NULL, SATA_PKT_ABORTED, B_FALSE);
1996 1995
1997 1996 /*
1998 1997 * make the device inaccessible
1999 1998 */
2000 1999 nvp->nvp_state |= NV_DEACTIVATED;
2001 2000
2002 2001 /*
2003 2002 * disable the interrupts on port
2004 2003 */
2005 2004 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
2006 2005
2007 2006 sd->satadev_state = SATA_PSTATE_SHUTDOWN;
2008 2007 nv_copy_registers(nvp, sd, NULL);
2009 2008
2010 2009 mutex_exit(&nvp->nvp_mutex);
2011 2010
2012 2011 return (SATA_SUCCESS);
2013 2012 }
2014 2013
2015 2014
2016 2015 /*
2017 2016 * find an empty slot in the driver's queue, increment counters,
2018 2017 * and then invoke the appropriate PIO or DMA start routine.
2019 2018 */
2020 2019 static int
2021 2020 nv_start_common(nv_port_t *nvp, sata_pkt_t *spkt)
2022 2021 {
2023 2022 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
2024 2023 int on_bit = 0x01, slot, sactive, ret, ncq = 0;
2025 2024 uint8_t cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2026 2025 int direction = sata_cmdp->satacmd_flags.sata_data_direction;
2027 2026 nv_ctl_t *nvc = nvp->nvp_ctlp;
2028 2027 nv_slot_t *nv_slotp;
2029 2028 boolean_t dma_cmd;
2030 2029
2031 2030 NVLOG(NVDBG_DELIVER, nvc, nvp, "nv_start_common entered: cmd: 0x%x",
2032 2031 sata_cmdp->satacmd_cmd_reg);
2033 2032
2034 2033 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
2035 2034 (cmd == SATAC_READ_FPDMA_QUEUED)) {
2036 2035 nvp->nvp_ncq_run++;
2037 2036 /*
2038 2037 * search for an empty NCQ slot. by the time, it's already
2039 2038 * been determined by the caller that there is room on the
2040 2039 * queue.
2041 2040 */
2042 2041 for (slot = 0; slot < nvp->nvp_queue_depth; slot++,
2043 2042 on_bit <<= 1) {
2044 2043 if ((nvp->nvp_sactive_cache & on_bit) == 0) {
2045 2044 break;
2046 2045 }
2047 2046 }
2048 2047
2049 2048 /*
2050 2049 * the first empty slot found, should not exceed the queue
2051 2050 * depth of the drive. if it does it's an error.
2052 2051 */
2053 2052 ASSERT(slot != nvp->nvp_queue_depth);
2054 2053
2055 2054 sactive = nv_get32(nvc->nvc_bar_hdl[5],
2056 2055 nvp->nvp_sactive);
2057 2056 ASSERT((sactive & on_bit) == 0);
2058 2057 nv_put32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive, on_bit);
2059 2058 NVLOG(NVDBG_DELIVER, nvc, nvp, "setting SACTIVE onbit: %X",
2060 2059 on_bit);
2061 2060 nvp->nvp_sactive_cache |= on_bit;
2062 2061
2063 2062 ncq = NVSLOT_NCQ;
2064 2063
2065 2064 } else {
2066 2065 nvp->nvp_non_ncq_run++;
2067 2066 slot = 0;
2068 2067 }
2069 2068
2070 2069 nv_slotp = (nv_slot_t *)&nvp->nvp_slot[slot];
2071 2070
2072 2071 ASSERT(nv_slotp->nvslot_spkt == NULL);
2073 2072
2074 2073 nv_slotp->nvslot_spkt = spkt;
2075 2074 nv_slotp->nvslot_flags = ncq;
2076 2075
2077 2076 /*
2078 2077 * the sata module doesn't indicate which commands utilize the
2079 2078 * DMA engine, so find out using this switch table.
2080 2079 */
2081 2080 switch (spkt->satapkt_cmd.satacmd_cmd_reg) {
2082 2081 case SATAC_READ_DMA_EXT:
2083 2082 case SATAC_WRITE_DMA_EXT:
2084 2083 case SATAC_WRITE_DMA:
2085 2084 case SATAC_READ_DMA:
2086 2085 case SATAC_READ_DMA_QUEUED:
2087 2086 case SATAC_READ_DMA_QUEUED_EXT:
2088 2087 case SATAC_WRITE_DMA_QUEUED:
2089 2088 case SATAC_WRITE_DMA_QUEUED_EXT:
2090 2089 case SATAC_READ_FPDMA_QUEUED:
2091 2090 case SATAC_WRITE_FPDMA_QUEUED:
2092 2091 case SATAC_DSM:
2093 2092 dma_cmd = B_TRUE;
2094 2093 break;
2095 2094 default:
2096 2095 dma_cmd = B_FALSE;
2097 2096 }
2098 2097
2099 2098 if (sata_cmdp->satacmd_num_dma_cookies != 0 && dma_cmd == B_TRUE) {
2100 2099 NVLOG(NVDBG_DELIVER, nvc, nvp, "DMA command", NULL);
2101 2100 nv_slotp->nvslot_start = nv_start_dma;
2102 2101 nv_slotp->nvslot_intr = nv_intr_dma;
2103 2102 } else if (spkt->satapkt_cmd.satacmd_cmd_reg == SATAC_PACKET) {
2104 2103 NVLOG(NVDBG_DELIVER, nvc, nvp, "packet command", NULL);
2105 2104 nv_slotp->nvslot_start = nv_start_pkt_pio;
2106 2105 nv_slotp->nvslot_intr = nv_intr_pkt_pio;
2107 2106 if ((direction == SATA_DIR_READ) ||
2108 2107 (direction == SATA_DIR_WRITE)) {
2109 2108 nv_slotp->nvslot_byte_count =
2110 2109 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2111 2110 nv_slotp->nvslot_v_addr =
2112 2111 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2113 2112 /*
2114 2113 * Freeing DMA resources allocated by the sata common
2115 2114 * module to avoid buffer overwrite (dma sync) problems
2116 2115 * when the buffer is released at command completion.
2117 2116 * Primarily an issue on systems with more than
2118 2117 * 4GB of memory.
2119 2118 */
2120 2119 sata_free_dma_resources(spkt);
2121 2120 }
2122 2121 } else if (direction == SATA_DIR_NODATA_XFER) {
2123 2122 NVLOG(NVDBG_DELIVER, nvc, nvp, "non-data command", NULL);
2124 2123 nv_slotp->nvslot_start = nv_start_nodata;
2125 2124 nv_slotp->nvslot_intr = nv_intr_nodata;
2126 2125 } else if (direction == SATA_DIR_READ) {
2127 2126 NVLOG(NVDBG_DELIVER, nvc, nvp, "pio in command", NULL);
2128 2127 nv_slotp->nvslot_start = nv_start_pio_in;
2129 2128 nv_slotp->nvslot_intr = nv_intr_pio_in;
2130 2129 nv_slotp->nvslot_byte_count =
2131 2130 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2132 2131 nv_slotp->nvslot_v_addr =
2133 2132 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2134 2133 /*
2135 2134 * Freeing DMA resources allocated by the sata common module to
2136 2135 * avoid buffer overwrite (dma sync) problems when the buffer
2137 2136 * is released at command completion. This is not an issue
2138 2137 * for write because write does not update the buffer.
2139 2138 * Primarily an issue on systems with more than 4GB of memory.
2140 2139 */
2141 2140 sata_free_dma_resources(spkt);
2142 2141 } else if (direction == SATA_DIR_WRITE) {
2143 2142 NVLOG(NVDBG_DELIVER, nvc, nvp, "pio out command", NULL);
2144 2143 nv_slotp->nvslot_start = nv_start_pio_out;
2145 2144 nv_slotp->nvslot_intr = nv_intr_pio_out;
2146 2145 nv_slotp->nvslot_byte_count =
2147 2146 spkt->satapkt_cmd.satacmd_bp->b_bcount;
2148 2147 nv_slotp->nvslot_v_addr =
2149 2148 spkt->satapkt_cmd.satacmd_bp->b_un.b_addr;
2150 2149 } else {
2151 2150 nv_cmn_err(CE_WARN, nvc, nvp, "malformed command: direction"
2152 2151 " %d cookies %d cmd %x",
2153 2152 sata_cmdp->satacmd_flags.sata_data_direction,
2154 2153 sata_cmdp->satacmd_num_dma_cookies, cmd);
2155 2154 spkt->satapkt_reason = SATA_PKT_CMD_UNSUPPORTED;
2156 2155 ret = SATA_TRAN_CMD_UNSUPPORTED;
2157 2156
2158 2157 goto fail;
2159 2158 }
2160 2159
2161 2160 if ((ret = (*nv_slotp->nvslot_start)(nvp, slot)) ==
2162 2161 SATA_TRAN_ACCEPTED) {
2163 2162 #ifdef SGPIO_SUPPORT
2164 2163 nv_sgp_drive_active(nvp->nvp_ctlp,
2165 2164 (nvp->nvp_ctlp->nvc_ctlr_num * 2) + nvp->nvp_port_num);
2166 2165 #endif
2167 2166 nv_slotp->nvslot_stime = ddi_get_lbolt();
2168 2167
2169 2168 /*
2170 2169 * start timer if it's not already running and this packet
2171 2170 * is not requesting polled mode.
2172 2171 */
2173 2172 if ((nvp->nvp_timeout_id == 0) &&
2174 2173 ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0)) {
2175 2174 nv_setup_timeout(nvp, NV_ONE_SEC);
2176 2175 }
2177 2176
2178 2177 nvp->nvp_previous_cmd = nvp->nvp_last_cmd;
2179 2178 nvp->nvp_last_cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
2180 2179
2181 2180 return (SATA_TRAN_ACCEPTED);
2182 2181 }
2183 2182
2184 2183 fail:
2185 2184
2186 2185 spkt->satapkt_reason = SATA_TRAN_PORT_ERROR;
2187 2186
2188 2187 if (ncq == NVSLOT_NCQ) {
2189 2188 nvp->nvp_ncq_run--;
2190 2189 nvp->nvp_sactive_cache &= ~on_bit;
2191 2190 } else {
2192 2191 nvp->nvp_non_ncq_run--;
2193 2192 }
2194 2193 nv_slotp->nvslot_spkt = NULL;
2195 2194 nv_slotp->nvslot_flags = 0;
2196 2195
2197 2196 return (ret);
2198 2197 }
2199 2198
2200 2199
2201 2200 /*
2202 2201 * Check if the signature is ready and if non-zero translate
2203 2202 * it into a solaris sata defined type.
2204 2203 */
2205 2204 static void
2206 2205 nv_read_signature(nv_port_t *nvp)
2207 2206 {
2208 2207 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2209 2208 int retry_count = 0;
2210 2209
2211 2210 retry:
2212 2211
2213 2212 nvp->nvp_signature = nv_get8(cmdhdl, nvp->nvp_count);
2214 2213 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_sect) << 8);
2215 2214 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_lcyl) << 16);
2216 2215 nvp->nvp_signature |= (nv_get8(cmdhdl, nvp->nvp_hcyl) << 24);
2217 2216
2218 2217 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2219 2218 "nv_read_signature: 0x%x ", nvp->nvp_signature);
2220 2219
2221 2220 switch (nvp->nvp_signature) {
2222 2221
2223 2222 case NV_DISK_SIG:
2224 2223 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp, "drive is a disk", NULL);
2225 2224 DTRACE_PROBE(signature_is_disk_device_p)
2226 2225 nvp->nvp_type = SATA_DTYPE_ATADISK;
2227 2226
2228 2227 break;
2229 2228 case NV_ATAPI_SIG:
2230 2229 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2231 2230 "drive is an optical device", NULL);
2232 2231 DTRACE_PROBE(signature_is_optical_device_p)
2233 2232 nvp->nvp_type = SATA_DTYPE_ATAPICD;
2234 2233 break;
2235 2234 case NV_PM_SIG:
2236 2235 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
2237 2236 "device is a port multiplier", NULL);
2238 2237 DTRACE_PROBE(signature_is_port_multiplier_p)
2239 2238 nvp->nvp_type = SATA_DTYPE_PMULT;
2240 2239 break;
2241 2240 case NV_NO_SIG:
2242 2241 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2243 2242 "signature not available", NULL);
2244 2243 DTRACE_PROBE(sig_not_available_p);
2245 2244 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2246 2245 break;
2247 2246 default:
2248 2247 if (retry_count++ == 0) {
2249 2248 /*
2250 2249 * this is a rare corner case where the controller
2251 2250 * is updating the task file registers as the driver
2252 2251 * is reading them. If this happens, wait a bit and
2253 2252 * retry once.
2254 2253 */
2255 2254 NV_DELAY_NSEC(1000000);
2256 2255 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
2257 2256 "invalid signature 0x%x retry once",
2258 2257 nvp->nvp_signature);
2259 2258 DTRACE_PROBE1(signature_invalid_retry_once_h,
2260 2259 int, nvp->nvp_signature);
2261 2260
2262 2261 goto retry;
2263 2262 }
2264 2263
2265 2264 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp,
2266 2265 "invalid signature 0x%x", nvp->nvp_signature);
2267 2266 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
2268 2267
2269 2268 break;
2270 2269 }
2271 2270 }
2272 2271
2273 2272
2274 2273 /*
2275 2274 * Set up a new timeout or complete a timeout in microseconds.
2276 2275 * If microseconds is zero, no new timeout is scheduled. Must be
2277 2276 * called at the end of the timeout routine.
2278 2277 */
2279 2278 static void
2280 2279 nv_setup_timeout(nv_port_t *nvp, clock_t microseconds)
2281 2280 {
2282 2281 clock_t old_duration = nvp->nvp_timeout_duration;
2283 2282
2284 2283 if (microseconds == 0) {
2285 2284
2286 2285 return;
2287 2286 }
2288 2287
2289 2288 if (nvp->nvp_timeout_id != 0 && nvp->nvp_timeout_duration == 0) {
2290 2289 /*
2291 2290 * Since we are dropping the mutex for untimeout,
2292 2291 * the timeout may be executed while we are trying to
2293 2292 * untimeout and setting up a new timeout.
2294 2293 * If nvp_timeout_duration is 0, then this function
2295 2294 * was re-entered. Just exit.
2296 2295 */
2297 2296 cmn_err(CE_WARN, "nv_setup_timeout re-entered");
2298 2297
2299 2298 return;
2300 2299 }
2301 2300
2302 2301 nvp->nvp_timeout_duration = 0;
2303 2302
2304 2303 if (nvp->nvp_timeout_id == 0) {
2305 2304 /*
2306 2305 * start new timer
2307 2306 */
2308 2307 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2309 2308 drv_usectohz(microseconds));
2310 2309 } else {
2311 2310 /*
2312 2311 * If the currently running timeout is due later than the
2313 2312 * requested one, restart it with a new expiration.
2314 2313 * Our timeouts do not need to be accurate - we would be just
2315 2314 * checking that the specified time was exceeded.
2316 2315 */
2317 2316 if (old_duration > microseconds) {
2318 2317 mutex_exit(&nvp->nvp_mutex);
2319 2318 (void) untimeout(nvp->nvp_timeout_id);
2320 2319 mutex_enter(&nvp->nvp_mutex);
2321 2320 nvp->nvp_timeout_id = timeout(nv_timeout, (void *)nvp,
2322 2321 drv_usectohz(microseconds));
2323 2322 }
2324 2323 }
2325 2324
2326 2325 nvp->nvp_timeout_duration = microseconds;
2327 2326 }
2328 2327
2329 2328
2330 2329
2331 2330 int nv_reset_length = NV_RESET_LENGTH;
2332 2331
2333 2332 /*
2334 2333 * Reset the port
2335 2334 */
2336 2335 static void
2337 2336 nv_reset(nv_port_t *nvp, char *reason)
2338 2337 {
2339 2338 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
2340 2339 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
2341 2340 nv_ctl_t *nvc = nvp->nvp_ctlp;
2342 2341 uint32_t sctrl, serr, sstatus;
2343 2342 uint8_t bmicx;
2344 2343 int i, j;
2345 2344 boolean_t reset_success = B_FALSE;
2346 2345
2347 2346 ASSERT(mutex_owned(&nvp->nvp_mutex));
2348 2347
2349 2348 /*
2350 2349 * If the port is reset right after the controller receives
2351 2350 * the DMA activate command (or possibly any other FIS),
2352 2351 * controller operation freezes without any known recovery
2353 2352 * procedure. Until Nvidia advises on a recovery mechanism,
2354 2353 * avoid the situation by waiting sufficiently long to
2355 2354 * ensure the link is not actively transmitting any FIS.
2356 2355 * 100ms was empirically determined to be large enough to
2357 2356 * ensure no transaction was left in flight but not too long
2358 2357 * as to cause any significant thread delay.
2359 2358 */
2360 2359 drv_usecwait(100000);
2361 2360
2362 2361 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2363 2362 DTRACE_PROBE1(serror_h, int, serr);
2364 2363
2365 2364 /*
2366 2365 * stop DMA engine.
2367 2366 */
2368 2367 bmicx = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmicx);
2369 2368 nv_put8(nvp->nvp_bm_hdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
2370 2369
2371 2370 /*
2372 2371 * the current setting of the NV_RESET in nvp_state indicates whether
2373 2372 * this is the first reset attempt or a retry.
2374 2373 */
2375 2374 if (nvp->nvp_state & NV_RESET) {
2376 2375 nvp->nvp_reset_retry_count++;
2377 2376
2378 2377 NVLOG(NVDBG_RESET, nvc, nvp, "npv_reset_retry_count: %d",
2379 2378 nvp->nvp_reset_retry_count);
2380 2379
2381 2380 } else {
2382 2381 nvp->nvp_reset_retry_count = 0;
2383 2382 nvp->nvp_reset_count++;
2384 2383 nvp->nvp_state |= NV_RESET;
2385 2384
2386 2385 NVLOG(NVDBG_RESET, nvc, nvp, "nvp_reset_count: %d reason: %s "
2387 2386 "serror: 0x%x seq: %d run: %d cmd: 0x%x",
2388 2387 nvp->nvp_reset_count, reason, serr, nvp->nvp_seq,
2389 2388 nvp->nvp_non_ncq_run, nvp->nvp_last_cmd);
2390 2389 }
2391 2390
2392 2391 /*
2393 2392 * a link event could have occurred slightly before excessive
2394 2393 * interrupt processing invokes a reset. Reset handling overrides
2395 2394 * link event processing so it's safe to clear it here.
2396 2395 */
2397 2396 nvp->nvp_state &= ~(NV_RESTORE|NV_LINK_EVENT);
2398 2397
2399 2398 nvp->nvp_reset_time = ddi_get_lbolt();
2400 2399
2401 2400 if ((nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) == 0) {
2402 2401 nv_cmn_err(CE_NOTE, nvc, nvp, "nv_reset: reason: %s serr 0x%x"
2403 2402 " nvp_state: 0x%x", reason, serr, nvp->nvp_state);
2404 2403 /*
2405 2404 * keep a record of why the first reset occurred, for debugging
2406 2405 */
2407 2406 if (nvp->nvp_first_reset_reason[0] == '\0') {
2408 2407 (void) strncpy(nvp->nvp_first_reset_reason,
2409 2408 reason, NV_REASON_LEN);
2410 2409 nvp->nvp_first_reset_reason[NV_REASON_LEN - 1] = '\0';
2411 2410 }
2412 2411 }
2413 2412
2414 2413 (void) strncpy(nvp->nvp_reset_reason, reason, NV_REASON_LEN);
2415 2414
2416 2415 /*
2417 2416 * ensure there is terminating NULL
2418 2417 */
2419 2418 nvp->nvp_reset_reason[NV_REASON_LEN - 1] = '\0';
2420 2419
2421 2420 /*
2422 2421 * Issue hardware reset; retry if necessary.
2423 2422 */
2424 2423 for (i = 0; i < NV_COMRESET_ATTEMPTS; i++) {
2425 2424
2426 2425 /*
2427 2426 * clear signature registers and the error register too
2428 2427 */
2429 2428 nv_put8(cmdhdl, nvp->nvp_sect, 0);
2430 2429 nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
2431 2430 nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
2432 2431 nv_put8(cmdhdl, nvp->nvp_count, 0);
2433 2432
2434 2433 nv_put8(nvp->nvp_cmd_hdl, nvp->nvp_error, 0);
2435 2434
2436 2435 /*
2437 2436 * assert reset in PHY by writing a 1 to bit 0 scontrol
2438 2437 */
2439 2438 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2440 2439
2441 2440 nv_put32(bar5_hdl, nvp->nvp_sctrl,
2442 2441 sctrl | SCONTROL_DET_COMRESET);
2443 2442
2444 2443 /* Wait at least 1ms, as required by the spec */
2445 2444 drv_usecwait(nv_reset_length);
2446 2445
2447 2446 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2448 2447 DTRACE_PROBE1(aftercomreset_serror_h, int, serr);
2449 2448
2450 2449 /* Reset all accumulated error bits */
2451 2450 nv_put32(bar5_hdl, nvp->nvp_serror, 0xffffffff);
2452 2451
2453 2452
2454 2453 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2455 2454 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
2456 2455 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset: applied (%d); "
2457 2456 "sctrl 0x%x, sstatus 0x%x", i, sctrl, sstatus);
2458 2457
2459 2458 /* de-assert reset in PHY */
2460 2459 nv_put32(bar5_hdl, nvp->nvp_sctrl,
2461 2460 sctrl & ~SCONTROL_DET_COMRESET);
2462 2461
2463 2462 /*
2464 2463 * Wait up to 10ms for COMINIT to arrive, indicating that
2465 2464 * the device recognized COMRESET.
2466 2465 */
2467 2466 for (j = 0; j < 10; j++) {
2468 2467 drv_usecwait(NV_ONE_MSEC);
2469 2468 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
2470 2469 if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
2471 2470 (SSTATUS_GET_DET(sstatus) ==
2472 2471 SSTATUS_DET_DEVPRE_PHYCOM)) {
2473 2472 reset_success = B_TRUE;
2474 2473 break;
2475 2474 }
2476 2475 }
2477 2476
2478 2477 if (reset_success == B_TRUE)
2479 2478 break;
2480 2479 }
2481 2480
2482 2481
2483 2482 serr = nv_get32(bar5_hdl, nvp->nvp_serror);
2484 2483 DTRACE_PROBE1(last_serror_h, int, serr);
2485 2484
2486 2485 if (reset_success == B_FALSE) {
2487 2486 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset not succeeded "
2488 2487 "after %d attempts. serr: 0x%x", i, serr);
2489 2488 } else {
2490 2489 NVLOG(NVDBG_RESET, nvc, nvp, "nv_reset succeeded"
2491 2490 " after %dms. serr: 0x%x", TICK_TO_MSEC(ddi_get_lbolt() -
2492 2491 nvp->nvp_reset_time), serr);
2493 2492 }
2494 2493
2495 2494 nvp->nvp_wait_sig = NV_WAIT_SIG;
2496 2495 nv_setup_timeout(nvp, nvp->nvp_wait_sig);
2497 2496 }
2498 2497
2499 2498
2500 2499 /*
2501 2500 * Initialize register handling specific to mcp51/mcp55/mcp61
2502 2501 */
2503 2502 /* ARGSUSED */
2504 2503 static void
2505 2504 mcp5x_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2506 2505 {
2507 2506 nv_port_t *nvp;
2508 2507 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2509 2508 uint8_t off, port;
2510 2509
2511 2510 nvc->nvc_mcp5x_ctl = (uint32_t *)(bar5 + MCP5X_CTL);
2512 2511 nvc->nvc_mcp5x_ncq = (uint32_t *)(bar5 + MCP5X_NCQ);
2513 2512
2514 2513 for (port = 0, off = 0; port < NV_MAX_PORTS(nvc); port++, off += 2) {
2515 2514 nvp = &(nvc->nvc_port[port]);
2516 2515 nvp->nvp_mcp5x_int_status =
2517 2516 (uint16_t *)(bar5 + MCP5X_INT_STATUS + off);
2518 2517 nvp->nvp_mcp5x_int_ctl =
2519 2518 (uint16_t *)(bar5 + MCP5X_INT_CTL + off);
2520 2519
2521 2520 /*
2522 2521 * clear any previous interrupts asserted
2523 2522 */
2524 2523 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_status,
2525 2524 MCP5X_INT_CLEAR);
2526 2525
2527 2526 /*
2528 2527 * These are the interrupts to accept for now. The spec
2529 2528 * says these are enable bits, but nvidia has indicated
2530 2529 * these are masking bits. Even though they may be masked
2531 2530 * out to prevent asserting the main interrupt, they can
2532 2531 * still be asserted while reading the interrupt status
2533 2532 * register, so that needs to be considered in the interrupt
2534 2533 * handler.
2535 2534 */
2536 2535 nv_put16(nvc->nvc_bar_hdl[5], nvp->nvp_mcp5x_int_ctl,
2537 2536 ~(MCP5X_INT_IGNORE));
2538 2537 }
2539 2538
2540 2539 /*
2541 2540 * Allow the driver to program the BM on the first command instead
2542 2541 * of waiting for an interrupt.
2543 2542 */
2544 2543 #ifdef NCQ
2545 2544 flags = MCP_SATA_AE_NCQ_PDEV_FIRST_CMD | MCP_SATA_AE_NCQ_SDEV_FIRST_CMD;
2546 2545 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq, flags);
2547 2546 flags = MCP_SATA_AE_CTL_PRI_SWNCQ | MCP_SATA_AE_CTL_SEC_SWNCQ;
2548 2547 nv_put32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ctl, flags);
2549 2548 #endif
2550 2549
2551 2550 /*
2552 2551 * mcp55 rev A03 and above supports 40-bit physical addressing.
2553 2552 * Enable DMA to take advantage of that.
2554 2553 *
2555 2554 */
2556 2555 if ((nvc->nvc_devid > 0x37f) ||
2557 2556 ((nvc->nvc_devid == 0x37f) && (nvc->nvc_revid >= 0xa3))) {
2558 2557 if (nv_sata_40bit_dma == B_TRUE) {
2559 2558 uint32_t reg32;
2560 2559 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2561 2560 "devid is %X revid is %X. 40-bit DMA"
2562 2561 " addressing enabled", nvc->nvc_devid,
2563 2562 nvc->nvc_revid);
2564 2563 nvc->dma_40bit = B_TRUE;
2565 2564
2566 2565 reg32 = pci_config_get32(pci_conf_handle,
2567 2566 NV_SATA_CFG_20);
2568 2567 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2569 2568 reg32 | NV_40BIT_PRD);
2570 2569
2571 2570 /*
2572 2571 * CFG_23 bits 0-7 contain the top 8 bits (of 40
2573 2572 * bits) for the primary PRD table, and bits 8-15
2574 2573 * contain the top 8 bits for the secondary. Set
2575 2574 * to zero because the DMA attribute table for PRD
2576 2575 * allocation forces it into 32 bit address space
2577 2576 * anyway.
2578 2577 */
2579 2578 reg32 = pci_config_get32(pci_conf_handle,
2580 2579 NV_SATA_CFG_23);
2581 2580 pci_config_put32(pci_conf_handle, NV_SATA_CFG_23,
2582 2581 reg32 & 0xffff0000);
2583 2582 } else {
2584 2583 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2585 2584 "40-bit DMA disabled by nv_sata_40bit_dma", NULL);
2586 2585 }
2587 2586 } else {
2588 2587 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp, "devid is %X revid is"
2589 2588 " %X. Not capable of 40-bit DMA addressing",
2590 2589 nvc->nvc_devid, nvc->nvc_revid);
2591 2590 }
2592 2591 }
2593 2592
2594 2593
2595 2594 /*
2596 2595 * Initialize register handling specific to ck804
2597 2596 */
2598 2597 static void
2599 2598 ck804_reg_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2600 2599 {
2601 2600 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2602 2601 uint32_t reg32;
2603 2602 uint16_t reg16;
2604 2603 nv_port_t *nvp;
2605 2604 int j;
2606 2605
2607 2606 /*
2608 2607 * delay hotplug interrupts until PHYRDY.
2609 2608 */
2610 2609 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_42);
2611 2610 pci_config_put32(pci_conf_handle, NV_SATA_CFG_42,
2612 2611 reg32 | CK804_CFG_DELAY_HOTPLUG_INTR);
2613 2612
2614 2613 /*
2615 2614 * enable hot plug interrupts for channel x and y
2616 2615 */
2617 2616 reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2618 2617 (uint16_t *)(bar5 + NV_ADMACTL_X));
2619 2618 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_X),
2620 2619 NV_HIRQ_EN | reg16);
2621 2620
2622 2621
2623 2622 reg16 = nv_get16(nvc->nvc_bar_hdl[5],
2624 2623 (uint16_t *)(bar5 + NV_ADMACTL_Y));
2625 2624 nv_put16(nvc->nvc_bar_hdl[5], (uint16_t *)(bar5 + NV_ADMACTL_Y),
2626 2625 NV_HIRQ_EN | reg16);
2627 2626
2628 2627 nvc->nvc_ck804_int_status = (uint8_t *)(bar5 + CK804_SATA_INT_STATUS);
2629 2628
2630 2629 /*
2631 2630 * clear any existing interrupt pending then enable
2632 2631 */
2633 2632 for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2634 2633 nvp = &(nvc->nvc_port[j]);
2635 2634 mutex_enter(&nvp->nvp_mutex);
2636 2635 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
2637 2636 NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
2638 2637 mutex_exit(&nvp->nvp_mutex);
2639 2638 }
2640 2639 }
2641 2640
2642 2641
2643 2642 /*
2644 2643 * Initialize the controller and set up driver data structures.
2645 2644 * determine if ck804 or mcp5x class.
2646 2645 */
2647 2646 static int
2648 2647 nv_init_ctl(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
2649 2648 {
2650 2649 struct sata_hba_tran stran;
2651 2650 nv_port_t *nvp;
2652 2651 int j;
2653 2652 uchar_t *cmd_addr, *ctl_addr, *bm_addr;
2654 2653 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
2655 2654 uchar_t *bar5 = nvc->nvc_bar_addr[5];
2656 2655 uint32_t reg32;
2657 2656 uint8_t reg8, reg8_save;
2658 2657
2659 2658 NVLOG(NVDBG_INIT, nvc, NULL, "nv_init_ctl entered", NULL);
2660 2659
2661 2660 nvc->nvc_mcp5x_flag = B_FALSE;
2662 2661
2663 2662 /*
2664 2663 * Need to set bit 2 to 1 at config offset 0x50
2665 2664 * to enable access to the bar5 registers.
2666 2665 */
2667 2666 reg32 = pci_config_get32(pci_conf_handle, NV_SATA_CFG_20);
2668 2667 if (!(reg32 & NV_BAR5_SPACE_EN)) {
2669 2668 pci_config_put32(pci_conf_handle, NV_SATA_CFG_20,
2670 2669 reg32 | NV_BAR5_SPACE_EN);
2671 2670 }
2672 2671
2673 2672 /*
2674 2673 * Determine if this is ck804 or mcp5x. ck804 will map in the
2675 2674 * task file registers into bar5 while mcp5x won't. The offset of
2676 2675 * the task file registers in mcp5x's space is unused, so it will
2677 2676 * return zero. So check one of the task file registers to see if it is
2678 2677 * writable and reads back what was written. If it's mcp5x it will
2679 2678 * return back 0xff whereas ck804 will return the value written.
2680 2679 */
2681 2680 reg8_save = nv_get8(bar5_hdl,
2682 2681 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2683 2682
2684 2683
2685 2684 for (j = 1; j < 3; j++) {
2686 2685
2687 2686 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), j);
2688 2687 reg8 = nv_get8(bar5_hdl,
2689 2688 (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X));
2690 2689
2691 2690 if (reg8 != j) {
2692 2691 nvc->nvc_mcp5x_flag = B_TRUE;
2693 2692 break;
2694 2693 }
2695 2694 }
2696 2695
2697 2696 nv_put8(bar5_hdl, (uint8_t *)(bar5 + NV_BAR5_TRAN_LEN_CH_X), reg8_save);
2698 2697
2699 2698 if (nvc->nvc_mcp5x_flag == B_FALSE) {
2700 2699 NVLOG(NVDBG_INIT, nvc, NULL, "controller is CK804/MCP04",
2701 2700 NULL);
2702 2701 nvc->nvc_interrupt = ck804_intr;
2703 2702 nvc->nvc_reg_init = ck804_reg_init;
2704 2703 nvc->nvc_set_intr = ck804_set_intr;
2705 2704 } else {
2706 2705 NVLOG(NVDBG_INIT, nvc, NULL, "controller is MCP51/MCP55/MCP61",
2707 2706 NULL);
2708 2707 nvc->nvc_interrupt = mcp5x_intr;
2709 2708 nvc->nvc_reg_init = mcp5x_reg_init;
2710 2709 nvc->nvc_set_intr = mcp5x_set_intr;
2711 2710 }
2712 2711
2713 2712
2714 2713 stran.sata_tran_hba_rev = SATA_TRAN_HBA_REV;
2715 2714 stran.sata_tran_hba_dip = nvc->nvc_dip;
2716 2715 stran.sata_tran_hba_num_cports = NV_NUM_PORTS;
2717 2716 stran.sata_tran_hba_features_support =
2718 2717 SATA_CTLF_HOTPLUG | SATA_CTLF_ASN | SATA_CTLF_ATAPI;
2719 2718 stran.sata_tran_hba_qdepth = NV_QUEUE_SLOTS;
2720 2719 stran.sata_tran_probe_port = nv_sata_probe;
2721 2720 stran.sata_tran_start = nv_sata_start;
2722 2721 stran.sata_tran_abort = nv_sata_abort;
2723 2722 stran.sata_tran_reset_dport = nv_sata_reset;
2724 2723 stran.sata_tran_selftest = NULL;
2725 2724 stran.sata_tran_hotplug_ops = &nv_hotplug_ops;
2726 2725 stran.sata_tran_pwrmgt_ops = NULL;
2727 2726 stran.sata_tran_ioctl = NULL;
2728 2727 nvc->nvc_sata_hba_tran = stran;
2729 2728
2730 2729 nvc->nvc_port = kmem_zalloc(sizeof (nv_port_t) * NV_MAX_PORTS(nvc),
2731 2730 KM_SLEEP);
2732 2731
2733 2732 /*
2734 2733 * initialize registers common to all chipsets
2735 2734 */
2736 2735 nv_common_reg_init(nvc);
2737 2736
2738 2737 for (j = 0; j < NV_MAX_PORTS(nvc); j++) {
2739 2738 nvp = &(nvc->nvc_port[j]);
2740 2739
2741 2740 cmd_addr = nvp->nvp_cmd_addr;
2742 2741 ctl_addr = nvp->nvp_ctl_addr;
2743 2742 bm_addr = nvp->nvp_bm_addr;
2744 2743
2745 2744 mutex_init(&nvp->nvp_mutex, NULL, MUTEX_DRIVER,
2746 2745 DDI_INTR_PRI(nvc->nvc_intr_pri));
2747 2746
2748 2747 cv_init(&nvp->nvp_sync_cv, NULL, CV_DRIVER, NULL);
2749 2748 cv_init(&nvp->nvp_reset_cv, NULL, CV_DRIVER, NULL);
2750 2749
2751 2750 nvp->nvp_data = cmd_addr + NV_DATA;
2752 2751 nvp->nvp_error = cmd_addr + NV_ERROR;
2753 2752 nvp->nvp_feature = cmd_addr + NV_FEATURE;
2754 2753 nvp->nvp_count = cmd_addr + NV_COUNT;
2755 2754 nvp->nvp_sect = cmd_addr + NV_SECT;
2756 2755 nvp->nvp_lcyl = cmd_addr + NV_LCYL;
2757 2756 nvp->nvp_hcyl = cmd_addr + NV_HCYL;
2758 2757 nvp->nvp_drvhd = cmd_addr + NV_DRVHD;
2759 2758 nvp->nvp_status = cmd_addr + NV_STATUS;
2760 2759 nvp->nvp_cmd = cmd_addr + NV_CMD;
2761 2760 nvp->nvp_altstatus = ctl_addr + NV_ALTSTATUS;
2762 2761 nvp->nvp_devctl = ctl_addr + NV_DEVCTL;
2763 2762
2764 2763 nvp->nvp_bmicx = bm_addr + BMICX_REG;
2765 2764 nvp->nvp_bmisx = bm_addr + BMISX_REG;
2766 2765 nvp->nvp_bmidtpx = (uint32_t *)(bm_addr + BMIDTPX_REG);
2767 2766
2768 2767 nvp->nvp_state = 0;
2769 2768
2770 2769 /*
2771 2770 * Initialize dma handles, etc.
2772 2771 * If it fails, the port is in inactive state.
2773 2772 */
2774 2773 nv_init_port(nvp);
2775 2774 }
2776 2775
2777 2776 /*
2778 2777 * initialize register by calling chip specific reg initialization
2779 2778 */
2780 2779 (*(nvc->nvc_reg_init))(nvc, pci_conf_handle);
2781 2780
2782 2781 /* initialize the hba dma attribute */
2783 2782 if (nvc->dma_40bit == B_TRUE)
2784 2783 nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2785 2784 &buffer_dma_40bit_attr;
2786 2785 else
2787 2786 nvc->nvc_sata_hba_tran.sata_tran_hba_dma_attr =
2788 2787 &buffer_dma_attr;
2789 2788
2790 2789 return (NV_SUCCESS);
2791 2790 }
2792 2791
2793 2792
2794 2793 /*
2795 2794 * Initialize data structures with enough slots to handle queuing, if
2796 2795 * enabled. NV_QUEUE_SLOTS will be set to 1 or 32, depending on whether
2797 2796 * NCQ support is built into the driver and enabled. It might have been
2798 2797 * better to derive the true size from the drive itself, but the sata
2799 2798 * module only sends down that information on the first NCQ command,
2800 2799 * which means possibly re-sizing the structures on an interrupt stack,
2801 2800 * making error handling more messy. The easy way is to just allocate
2802 2801 * all 32 slots, which is what most drives support anyway.
2803 2802 */
2804 2803 static void
2805 2804 nv_init_port(nv_port_t *nvp)
2806 2805 {
2807 2806 nv_ctl_t *nvc = nvp->nvp_ctlp;
2808 2807 size_t prd_size = sizeof (prde_t) * NV_DMA_NSEGS;
2809 2808 dev_info_t *dip = nvc->nvc_dip;
2810 2809 ddi_device_acc_attr_t dev_attr;
2811 2810 size_t buf_size;
2812 2811 ddi_dma_cookie_t cookie;
2813 2812 uint_t count;
2814 2813 int rc, i;
2815 2814
2816 2815 dev_attr.devacc_attr_version = DDI_DEVICE_ATTR_V0;
2817 2816 dev_attr.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
2818 2817 dev_attr.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
2819 2818
2820 2819 nvp->nvp_sg_dma_hdl = kmem_zalloc(sizeof (ddi_dma_handle_t) *
2821 2820 NV_QUEUE_SLOTS, KM_SLEEP);
2822 2821
2823 2822 nvp->nvp_sg_acc_hdl = kmem_zalloc(sizeof (ddi_acc_handle_t) *
2824 2823 NV_QUEUE_SLOTS, KM_SLEEP);
2825 2824
2826 2825 nvp->nvp_sg_addr = kmem_zalloc(sizeof (caddr_t) *
2827 2826 NV_QUEUE_SLOTS, KM_SLEEP);
2828 2827
2829 2828 nvp->nvp_sg_paddr = kmem_zalloc(sizeof (uint32_t) *
2830 2829 NV_QUEUE_SLOTS, KM_SLEEP);
2831 2830
2832 2831 nvp->nvp_slot = kmem_zalloc(sizeof (nv_slot_t) * NV_QUEUE_SLOTS,
2833 2832 KM_SLEEP);
2834 2833
2835 2834 for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2836 2835
2837 2836 rc = ddi_dma_alloc_handle(dip, &nv_prd_dma_attr,
2838 2837 DDI_DMA_SLEEP, NULL, &(nvp->nvp_sg_dma_hdl[i]));
2839 2838
2840 2839 if (rc != DDI_SUCCESS) {
2841 2840 nv_uninit_port(nvp);
2842 2841
2843 2842 return;
2844 2843 }
2845 2844
2846 2845 rc = ddi_dma_mem_alloc(nvp->nvp_sg_dma_hdl[i], prd_size,
2847 2846 &dev_attr, DDI_DMA_CONSISTENT, DDI_DMA_SLEEP,
2848 2847 NULL, &(nvp->nvp_sg_addr[i]), &buf_size,
2849 2848 &(nvp->nvp_sg_acc_hdl[i]));
2850 2849
2851 2850 if (rc != DDI_SUCCESS) {
2852 2851 nv_uninit_port(nvp);
2853 2852
2854 2853 return;
2855 2854 }
2856 2855
2857 2856 rc = ddi_dma_addr_bind_handle(nvp->nvp_sg_dma_hdl[i], NULL,
2858 2857 nvp->nvp_sg_addr[i], buf_size,
2859 2858 DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
2860 2859 DDI_DMA_SLEEP, NULL, &cookie, &count);
2861 2860
2862 2861 if (rc != DDI_DMA_MAPPED) {
2863 2862 nv_uninit_port(nvp);
2864 2863
2865 2864 return;
2866 2865 }
2867 2866
2868 2867 ASSERT(count == 1);
2869 2868 ASSERT((cookie.dmac_address & (sizeof (int) - 1)) == 0);
2870 2869
2871 2870 ASSERT(cookie.dmac_laddress <= UINT32_MAX);
2872 2871
2873 2872 nvp->nvp_sg_paddr[i] = cookie.dmac_address;
2874 2873 }
2875 2874
2876 2875 /*
2877 2876 * nvp_queue_depth represents the actual drive queue depth, not the
2878 2877 * number of slots allocated in the structures (which may be more).
2879 2878 * Actual queue depth is only learned after the first NCQ command, so
2880 2879 * initialize it to 1 for now.
2881 2880 */
2882 2881 nvp->nvp_queue_depth = 1;
2883 2882
2884 2883 /*
2885 2884 * Port is initialized whether the device is attached or not.
2886 2885 * Link processing and device identification will be started later,
2887 2886 * after interrupts are initialized.
2888 2887 */
2889 2888 nvp->nvp_type = SATA_DTYPE_NONE;
2890 2889 }
2891 2890
2892 2891
2893 2892 /*
2894 2893 * Free dynamically allocated structures for port.
2895 2894 */
2896 2895 static void
2897 2896 nv_uninit_port(nv_port_t *nvp)
2898 2897 {
2899 2898 int i;
2900 2899
2901 2900 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp,
2902 2901 "nv_uninit_port uninitializing", NULL);
2903 2902
2904 2903 #ifdef SGPIO_SUPPORT
2905 2904 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
2906 2905 nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
2907 2906 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
2908 2907 }
2909 2908 #endif
2910 2909
2911 2910 nvp->nvp_type = SATA_DTYPE_NONE;
2912 2911
2913 2912 for (i = 0; i < NV_QUEUE_SLOTS; i++) {
2914 2913 if (nvp->nvp_sg_paddr[i]) {
2915 2914 (void) ddi_dma_unbind_handle(nvp->nvp_sg_dma_hdl[i]);
2916 2915 }
2917 2916
2918 2917 if (nvp->nvp_sg_acc_hdl[i] != NULL) {
2919 2918 ddi_dma_mem_free(&(nvp->nvp_sg_acc_hdl[i]));
2920 2919 }
2921 2920
2922 2921 if (nvp->nvp_sg_dma_hdl[i] != NULL) {
2923 2922 ddi_dma_free_handle(&(nvp->nvp_sg_dma_hdl[i]));
2924 2923 }
2925 2924 }
2926 2925
2927 2926 kmem_free(nvp->nvp_slot, sizeof (nv_slot_t) * NV_QUEUE_SLOTS);
2928 2927 nvp->nvp_slot = NULL;
2929 2928
2930 2929 kmem_free(nvp->nvp_sg_dma_hdl,
2931 2930 sizeof (ddi_dma_handle_t) * NV_QUEUE_SLOTS);
2932 2931 nvp->nvp_sg_dma_hdl = NULL;
2933 2932
2934 2933 kmem_free(nvp->nvp_sg_acc_hdl,
2935 2934 sizeof (ddi_acc_handle_t) * NV_QUEUE_SLOTS);
2936 2935 nvp->nvp_sg_acc_hdl = NULL;
2937 2936
2938 2937 kmem_free(nvp->nvp_sg_addr, sizeof (caddr_t) * NV_QUEUE_SLOTS);
2939 2938 nvp->nvp_sg_addr = NULL;
2940 2939
2941 2940 kmem_free(nvp->nvp_sg_paddr, sizeof (uint32_t) * NV_QUEUE_SLOTS);
2942 2941 nvp->nvp_sg_paddr = NULL;
2943 2942 }
2944 2943
2945 2944
2946 2945 /*
2947 2946 * Cache register offsets and access handles to frequently accessed registers
2948 2947 * which are common to either chipset.
2949 2948 */
2950 2949 static void
2951 2950 nv_common_reg_init(nv_ctl_t *nvc)
2952 2951 {
2953 2952 uchar_t *bar5_addr = nvc->nvc_bar_addr[5];
2954 2953 uchar_t *bm_addr_offset, *sreg_offset;
2955 2954 uint8_t bar, port;
2956 2955 nv_port_t *nvp;
2957 2956
2958 2957 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2959 2958 if (port == 0) {
2960 2959 bar = NV_BAR_0;
2961 2960 bm_addr_offset = 0;
2962 2961 sreg_offset = (uchar_t *)(CH0_SREG_OFFSET + bar5_addr);
2963 2962 } else {
2964 2963 bar = NV_BAR_2;
2965 2964 bm_addr_offset = (uchar_t *)8;
2966 2965 sreg_offset = (uchar_t *)(CH1_SREG_OFFSET + bar5_addr);
2967 2966 }
2968 2967
2969 2968 nvp = &(nvc->nvc_port[port]);
2970 2969 nvp->nvp_ctlp = nvc;
2971 2970 nvp->nvp_port_num = port;
2972 2971 NVLOG(NVDBG_INIT, nvc, nvp, "setting up port mappings", NULL);
2973 2972
2974 2973 nvp->nvp_cmd_hdl = nvc->nvc_bar_hdl[bar];
2975 2974 nvp->nvp_cmd_addr = nvc->nvc_bar_addr[bar];
2976 2975 nvp->nvp_ctl_hdl = nvc->nvc_bar_hdl[bar + 1];
2977 2976 nvp->nvp_ctl_addr = nvc->nvc_bar_addr[bar + 1];
2978 2977 nvp->nvp_bm_hdl = nvc->nvc_bar_hdl[NV_BAR_4];
2979 2978 nvp->nvp_bm_addr = nvc->nvc_bar_addr[NV_BAR_4] +
2980 2979 (long)bm_addr_offset;
2981 2980
2982 2981 nvp->nvp_sstatus = (uint32_t *)(sreg_offset + NV_SSTATUS);
2983 2982 nvp->nvp_serror = (uint32_t *)(sreg_offset + NV_SERROR);
2984 2983 nvp->nvp_sactive = (uint32_t *)(sreg_offset + NV_SACTIVE);
2985 2984 nvp->nvp_sctrl = (uint32_t *)(sreg_offset + NV_SCTRL);
2986 2985 }
2987 2986 }
2988 2987
2989 2988
2990 2989 static void
2991 2990 nv_uninit_ctl(nv_ctl_t *nvc)
2992 2991 {
2993 2992 int port;
2994 2993 nv_port_t *nvp;
2995 2994
2996 2995 NVLOG(NVDBG_INIT, nvc, NULL, "nv_uninit_ctl entered", NULL);
2997 2996
2998 2997 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
2999 2998 nvp = &(nvc->nvc_port[port]);
3000 2999 mutex_enter(&nvp->nvp_mutex);
3001 3000 NVLOG(NVDBG_INIT, nvc, nvp, "uninitializing port", NULL);
3002 3001 nv_uninit_port(nvp);
3003 3002 mutex_exit(&nvp->nvp_mutex);
3004 3003 mutex_destroy(&nvp->nvp_mutex);
3005 3004 cv_destroy(&nvp->nvp_sync_cv);
3006 3005 cv_destroy(&nvp->nvp_reset_cv);
3007 3006 }
3008 3007
3009 3008 kmem_free(nvc->nvc_port, NV_MAX_PORTS(nvc) * sizeof (nv_port_t));
3010 3009 nvc->nvc_port = NULL;
3011 3010 }
3012 3011
3013 3012
3014 3013 /*
3015 3014 * ck804 interrupt. This is a wrapper around ck804_intr_process so
3016 3015 * that interrupts from other devices can be disregarded while dtracing.
3017 3016 */
3018 3017 /* ARGSUSED */
3019 3018 static uint_t
3020 3019 ck804_intr(caddr_t arg1, caddr_t arg2)
3021 3020 {
3022 3021 nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3023 3022 uint8_t intr_status;
3024 3023 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3025 3024
3026 3025 if (nvc->nvc_state & NV_CTRL_SUSPEND)
3027 3026 return (DDI_INTR_UNCLAIMED);
3028 3027
3029 3028 intr_status = ddi_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3030 3029
3031 3030 if (intr_status == 0) {
3032 3031
3033 3032 return (DDI_INTR_UNCLAIMED);
3034 3033 }
3035 3034
3036 3035 ck804_intr_process(nvc, intr_status);
3037 3036
3038 3037 return (DDI_INTR_CLAIMED);
3039 3038 }
3040 3039
3041 3040
3042 3041 /*
3043 3042 * Main interrupt handler for ck804. handles normal device
3044 3043 * interrupts and hot plug and remove interrupts.
3045 3044 *
3046 3045 */
3047 3046 static void
3048 3047 ck804_intr_process(nv_ctl_t *nvc, uint8_t intr_status)
3049 3048 {
3050 3049
3051 3050 int port, i;
3052 3051 nv_port_t *nvp;
3053 3052 nv_slot_t *nv_slotp;
3054 3053 uchar_t status;
3055 3054 sata_pkt_t *spkt;
3056 3055 uint8_t bmstatus, clear_bits;
3057 3056 ddi_acc_handle_t bmhdl;
3058 3057 int nvcleared = 0;
3059 3058 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3060 3059 uint32_t sstatus;
3061 3060 int port_mask_hot[] = {
3062 3061 CK804_INT_PDEV_HOT, CK804_INT_SDEV_HOT,
3063 3062 };
3064 3063 int port_mask_pm[] = {
3065 3064 CK804_INT_PDEV_PM, CK804_INT_SDEV_PM,
3066 3065 };
3067 3066
3068 3067 NVLOG(NVDBG_INTR, nvc, NULL,
3069 3068 "ck804_intr_process entered intr_status=%x", intr_status);
3070 3069
3071 3070 /*
3072 3071 * For command completion interrupt, explicit clear is not required.
3073 3072 * however, for the error cases explicit clear is performed.
3074 3073 */
3075 3074 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3076 3075
3077 3076 int port_mask[] = {CK804_INT_PDEV_INT, CK804_INT_SDEV_INT};
3078 3077
3079 3078 if ((port_mask[port] & intr_status) == 0) {
3080 3079
3081 3080 continue;
3082 3081 }
3083 3082
3084 3083 NVLOG(NVDBG_INTR, nvc, NULL,
3085 3084 "ck804_intr_process interrupt on port %d", port);
3086 3085
3087 3086 nvp = &(nvc->nvc_port[port]);
3088 3087
3089 3088 mutex_enter(&nvp->nvp_mutex);
3090 3089
3091 3090 /*
3092 3091 * this case might be encountered when the other port
3093 3092 * is active
3094 3093 */
3095 3094 if (nvp->nvp_state & NV_DEACTIVATED) {
3096 3095
3097 3096 /*
3098 3097 * clear interrupt bits
3099 3098 */
3100 3099 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3101 3100 port_mask[port]);
3102 3101
3103 3102 mutex_exit(&nvp->nvp_mutex);
3104 3103
3105 3104 continue;
3106 3105 }
3107 3106
3108 3107
3109 3108 if ((&(nvp->nvp_slot[0]))->nvslot_spkt == NULL) {
3110 3109 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_status);
3111 3110 NVLOG(NVDBG_ALWAYS, nvc, nvp, "spurious interrupt "
3112 3111 " no command in progress status=%x", status);
3113 3112 mutex_exit(&nvp->nvp_mutex);
3114 3113
3115 3114 /*
3116 3115 * clear interrupt bits
3117 3116 */
3118 3117 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3119 3118 port_mask[port]);
3120 3119
3121 3120 continue;
3122 3121 }
3123 3122
3124 3123 bmhdl = nvp->nvp_bm_hdl;
3125 3124 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3126 3125
3127 3126 if (!(bmstatus & BMISX_IDEINTS)) {
3128 3127 mutex_exit(&nvp->nvp_mutex);
3129 3128
3130 3129 continue;
3131 3130 }
3132 3131
3133 3132 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3134 3133
3135 3134 if (status & SATA_STATUS_BSY) {
3136 3135 mutex_exit(&nvp->nvp_mutex);
3137 3136
3138 3137 continue;
3139 3138 }
3140 3139
3141 3140 nv_slotp = &(nvp->nvp_slot[0]);
3142 3141
3143 3142 ASSERT(nv_slotp);
3144 3143
3145 3144 spkt = nv_slotp->nvslot_spkt;
3146 3145
3147 3146 if (spkt == NULL) {
3148 3147 mutex_exit(&nvp->nvp_mutex);
3149 3148
3150 3149 continue;
3151 3150 }
3152 3151
3153 3152 (*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3154 3153
3155 3154 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3156 3155
3157 3156 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3158 3157
3159 3158 nv_complete_io(nvp, spkt, 0);
3160 3159 }
3161 3160
3162 3161 mutex_exit(&nvp->nvp_mutex);
3163 3162 }
3164 3163
3165 3164 /*
3166 3165 * ck804 often doesn't correctly distinguish hot add/remove
3167 3166 * interrupts. Frequently both the ADD and the REMOVE bits
3168 3167 * are asserted, whether it was a remove or add. Use sstatus
3169 3168 * to distinguish hot add from hot remove.
3170 3169 */
3171 3170
3172 3171 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
3173 3172 clear_bits = 0;
3174 3173
3175 3174 nvp = &(nvc->nvc_port[port]);
3176 3175 mutex_enter(&nvp->nvp_mutex);
3177 3176
3178 3177 if ((port_mask_pm[port] & intr_status) != 0) {
3179 3178 clear_bits = port_mask_pm[port];
3180 3179 NVLOG(NVDBG_HOT, nvc, nvp,
3181 3180 "clearing PM interrupt bit: %x",
3182 3181 intr_status & port_mask_pm[port]);
3183 3182 }
3184 3183
3185 3184 if ((port_mask_hot[port] & intr_status) == 0) {
3186 3185 if (clear_bits != 0) {
3187 3186 goto clear;
3188 3187 } else {
3189 3188 mutex_exit(&nvp->nvp_mutex);
3190 3189 continue;
3191 3190 }
3192 3191 }
3193 3192
3194 3193 /*
3195 3194 * reaching here means there was a hot add or remove.
3196 3195 */
3197 3196 clear_bits |= port_mask_hot[port];
3198 3197
3199 3198 ASSERT(nvc->nvc_port[port].nvp_sstatus);
3200 3199
3201 3200 sstatus = nv_get32(bar5_hdl,
3202 3201 nvc->nvc_port[port].nvp_sstatus);
3203 3202
3204 3203 if ((sstatus & SSTATUS_DET_DEVPRE_PHYCOM) ==
3205 3204 SSTATUS_DET_DEVPRE_PHYCOM) {
3206 3205 nv_link_event(nvp, NV_REM_DEV);
3207 3206 } else {
3208 3207 nv_link_event(nvp, NV_ADD_DEV);
3209 3208 }
3210 3209 clear:
3211 3210 /*
3212 3211 * clear interrupt bits. explicit interrupt clear is
3213 3212 * required for hotplug interrupts.
3214 3213 */
3215 3214 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status, clear_bits);
3216 3215
3217 3216 /*
3218 3217 * make sure it's flushed and cleared. If not try
3219 3218 * again. Sometimes it has been observed to not clear
3220 3219 * on the first try.
3221 3220 */
3222 3221 intr_status = nv_get8(bar5_hdl, nvc->nvc_ck804_int_status);
3223 3222
3224 3223 /*
3225 3224 * make 10 additional attempts to clear the interrupt
3226 3225 */
3227 3226 for (i = 0; (intr_status & clear_bits) && (i < 10); i++) {
3228 3227 NVLOG(NVDBG_ALWAYS, nvc, nvp, "inst_status=%x "
3229 3228 "still not clear try=%d", intr_status,
3230 3229 ++nvcleared);
3231 3230 nv_put8(bar5_hdl, nvc->nvc_ck804_int_status,
3232 3231 clear_bits);
3233 3232 intr_status = nv_get8(bar5_hdl,
3234 3233 nvc->nvc_ck804_int_status);
3235 3234 }
3236 3235
3237 3236 /*
3238 3237 * if still not clear, log a message and disable the
3239 3238 * port. highly unlikely that this path is taken, but it
3240 3239 * gives protection against a wedged interrupt.
3241 3240 */
3242 3241 if (intr_status & clear_bits) {
3243 3242 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3244 3243 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
3245 3244 SATA_ADDR_CPORT, SATA_PSTATE_FAILED);
3246 3245 nvp->nvp_state |= NV_FAILED;
3247 3246 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3248 3247 B_TRUE);
3249 3248 nv_cmn_err(CE_WARN, nvc, nvp, "unable to clear "
3250 3249 "interrupt. disabling port intr_status=%X",
3251 3250 intr_status);
3252 3251 }
3253 3252
3254 3253 mutex_exit(&nvp->nvp_mutex);
3255 3254 }
3256 3255 }
3257 3256
3258 3257
3259 3258 /*
3260 3259 * Interrupt handler for mcp5x. It is invoked by the wrapper for each port
3261 3260 * on the controller, to handle completion and hot plug and remove events.
3262 3261 */
3263 3262 static uint_t
3264 3263 mcp5x_intr_port(nv_port_t *nvp)
3265 3264 {
3266 3265 nv_ctl_t *nvc = nvp->nvp_ctlp;
3267 3266 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
3268 3267 uint8_t clear = 0, intr_cycles = 0;
3269 3268 int ret = DDI_INTR_UNCLAIMED;
3270 3269 uint16_t int_status;
3271 3270 clock_t intr_time;
3272 3271 int loop_cnt = 0;
3273 3272
3274 3273 nvp->intr_start_time = ddi_get_lbolt();
3275 3274
3276 3275 NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port entered", NULL);
3277 3276
3278 3277 do {
3279 3278 /*
3280 3279 * read current interrupt status
3281 3280 */
3282 3281 int_status = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_status);
3283 3282
3284 3283 /*
3285 3284 * if the port is deactivated, just clear the interrupt and
3286 3285 * return. can get here even if interrupts were disabled
3287 3286 * on this port but enabled on the other.
3288 3287 */
3289 3288 if (nvp->nvp_state & NV_DEACTIVATED) {
3290 3289 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3291 3290 int_status);
3292 3291
3293 3292 return (DDI_INTR_CLAIMED);
3294 3293 }
3295 3294
3296 3295 NVLOG(NVDBG_INTR, nvc, nvp, "int_status = %x", int_status);
3297 3296
3298 3297 DTRACE_PROBE1(int_status_before_h, int, int_status);
3299 3298
3300 3299 /*
3301 3300 * MCP5X_INT_IGNORE interrupts will show up in the status,
3302 3301 * but are masked out from causing an interrupt to be generated
3303 3302 * to the processor. Ignore them here by masking them out.
3304 3303 */
3305 3304 int_status &= ~(MCP5X_INT_IGNORE);
3306 3305
3307 3306 DTRACE_PROBE1(int_status_after_h, int, int_status);
3308 3307
3309 3308 /*
3310 3309 * exit the loop when no more interrupts to process
3311 3310 */
3312 3311 if (int_status == 0) {
3313 3312
3314 3313 break;
3315 3314 }
3316 3315
3317 3316 if (int_status & MCP5X_INT_COMPLETE) {
3318 3317 NVLOG(NVDBG_INTR, nvc, nvp,
3319 3318 "mcp5x_packet_complete_intr", NULL);
3320 3319 /*
3321 3320 * since int_status was set, return DDI_INTR_CLAIMED
3322 3321 * from the DDI's perspective even though the packet
3323 3322 * completion may not have succeeded. If it fails,
3324 3323 * need to manually clear the interrupt, otherwise
3325 3324 * clearing is implicit as a result of reading the
3326 3325 * task file status register.
3327 3326 */
3328 3327 ret = DDI_INTR_CLAIMED;
3329 3328 if (mcp5x_packet_complete_intr(nvc, nvp) ==
3330 3329 NV_FAILURE) {
3331 3330 clear |= MCP5X_INT_COMPLETE;
3332 3331 } else {
3333 3332 intr_cycles = 0;
3334 3333 }
3335 3334 }
3336 3335
3337 3336 if (int_status & MCP5X_INT_DMA_SETUP) {
3338 3337 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr",
3339 3338 NULL);
3340 3339
3341 3340 /*
3342 3341 * Needs to be cleared before starting the BM, so do it
3343 3342 * now. make sure this is still working.
3344 3343 */
3345 3344 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status,
3346 3345 MCP5X_INT_DMA_SETUP);
3347 3346 #ifdef NCQ
3348 3347 ret = mcp5x_dma_setup_intr(nvc, nvp);
3349 3348 #endif
3350 3349 }
3351 3350
3352 3351 if (int_status & MCP5X_INT_REM) {
3353 3352 clear |= MCP5X_INT_REM;
3354 3353 ret = DDI_INTR_CLAIMED;
3355 3354
3356 3355 mutex_enter(&nvp->nvp_mutex);
3357 3356 nv_link_event(nvp, NV_REM_DEV);
3358 3357 mutex_exit(&nvp->nvp_mutex);
3359 3358
3360 3359 } else if (int_status & MCP5X_INT_ADD) {
3361 3360 clear |= MCP5X_INT_ADD;
3362 3361 ret = DDI_INTR_CLAIMED;
3363 3362
3364 3363 mutex_enter(&nvp->nvp_mutex);
3365 3364 nv_link_event(nvp, NV_ADD_DEV);
3366 3365 mutex_exit(&nvp->nvp_mutex);
3367 3366 }
3368 3367 if (clear) {
3369 3368 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, clear);
3370 3369 clear = 0;
3371 3370 }
3372 3371
3373 3372 /*
3374 3373 * protect against a stuck interrupt
3375 3374 */
3376 3375 if (intr_cycles++ == NV_MAX_INTR_LOOP) {
3377 3376
3378 3377 NVLOG(NVDBG_INTR, nvc, nvp, "excessive interrupt "
3379 3378 "processing. Disabling interrupts int_status=%X"
3380 3379 " clear=%X", int_status, clear);
3381 3380 DTRACE_PROBE(excessive_interrupts_f);
3382 3381
3383 3382 mutex_enter(&nvp->nvp_mutex);
3384 3383 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
3385 3384 /*
3386 3385 * reset the device. If it remains inaccessible
3387 3386 * after a reset it will be failed then.
3388 3387 */
3389 3388 (void) nv_abort_active(nvp, NULL, SATA_PKT_DEV_ERROR,
3390 3389 B_TRUE);
3391 3390 mutex_exit(&nvp->nvp_mutex);
3392 3391 }
3393 3392
3394 3393 } while (loop_cnt++ < nv_max_intr_loops);
3395 3394
3396 3395 if (loop_cnt > nvp->intr_loop_cnt) {
3397 3396 NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp,
3398 3397 "Exiting with multiple intr loop count %d", loop_cnt);
3399 3398 nvp->intr_loop_cnt = loop_cnt;
3400 3399 }
3401 3400
3402 3401 if ((nv_debug_flags & (NVDBG_INTR | NVDBG_VERBOSE)) ==
3403 3402 (NVDBG_INTR | NVDBG_VERBOSE)) {
3404 3403 uint8_t status, bmstatus;
3405 3404 uint16_t int_status2;
3406 3405
3407 3406 if (int_status & MCP5X_INT_COMPLETE) {
3408 3407 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3409 3408 bmstatus = nv_get8(nvp->nvp_bm_hdl, nvp->nvp_bmisx);
3410 3409 int_status2 = nv_get16(nvp->nvp_ctlp->nvc_bar_hdl[5],
3411 3410 nvp->nvp_mcp5x_int_status);
3412 3411 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
3413 3412 "mcp55_intr_port: Exiting with altstatus %x, "
3414 3413 "bmicx %x, int_status2 %X, int_status %X, ret %x,"
3415 3414 " loop_cnt %d ", status, bmstatus, int_status2,
3416 3415 int_status, ret, loop_cnt);
3417 3416 }
3418 3417 }
3419 3418
3420 3419 NVLOG(NVDBG_INTR, nvc, nvp, "mcp55_intr_port: finished ret=%d", ret);
3421 3420
3422 3421 /*
3423 3422 * To facilitate debugging, keep track of the length of time spent in
3424 3423 * the port interrupt routine.
3425 3424 */
3426 3425 intr_time = ddi_get_lbolt() - nvp->intr_start_time;
3427 3426 if (intr_time > nvp->intr_duration)
3428 3427 nvp->intr_duration = intr_time;
3429 3428
3430 3429 return (ret);
3431 3430 }
3432 3431
3433 3432
3434 3433 /* ARGSUSED */
3435 3434 static uint_t
3436 3435 mcp5x_intr(caddr_t arg1, caddr_t arg2)
3437 3436 {
3438 3437 nv_ctl_t *nvc = (nv_ctl_t *)arg1;
3439 3438 int ret;
3440 3439
3441 3440 if (nvc->nvc_state & NV_CTRL_SUSPEND)
3442 3441 return (DDI_INTR_UNCLAIMED);
3443 3442
3444 3443 ret = mcp5x_intr_port(&(nvc->nvc_port[0]));
3445 3444 ret |= mcp5x_intr_port(&(nvc->nvc_port[1]));
3446 3445
3447 3446 return (ret);
3448 3447 }
3449 3448
3450 3449
3451 3450 #ifdef NCQ
3452 3451 /*
3453 3452 * with software driven NCQ on mcp5x, an interrupt occurs right
3454 3453 * before the drive is ready to do a DMA transfer. At this point,
3455 3454 * the PRD table needs to be programmed and the DMA engine enabled
3456 3455 * and ready to go.
3457 3456 *
3458 3457 * -- MCP_SATA_AE_INT_STATUS_SDEV_DMA_SETUP indicates the interrupt
3459 3458 * -- MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG shows which command is ready
3460 3459 * -- clear bit 0 of master command reg
3461 3460 * -- program PRD
3462 3461 * -- clear the interrupt status bit for the DMA Setup FIS
3463 3462 * -- set bit 0 of the bus master command register
3464 3463 */
3465 3464 static int
3466 3465 mcp5x_dma_setup_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3467 3466 {
3468 3467 int slot;
3469 3468 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3470 3469 uint8_t bmicx;
3471 3470 int port = nvp->nvp_port_num;
3472 3471 uint8_t tag_shift[] = {MCP_SATA_AE_NCQ_PDEV_DMA_SETUP_TAG_SHIFT,
3473 3472 MCP_SATA_AE_NCQ_SDEV_DMA_SETUP_TAG_SHIFT};
3474 3473
3475 3474 nv_cmn_err(CE_PANIC, nvc, nvp,
3476 3475 "this is should not be executed at all until NCQ");
3477 3476
3478 3477 mutex_enter(&nvp->nvp_mutex);
3479 3478
3480 3479 slot = nv_get32(nvc->nvc_bar_hdl[5], nvc->nvc_mcp5x_ncq);
3481 3480
3482 3481 slot = (slot >> tag_shift[port]) & MCP_SATA_AE_NCQ_DMA_SETUP_TAG_MASK;
3483 3482
3484 3483 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_dma_setup_intr slot %d"
3485 3484 " nvp_slot_sactive %X", slot, nvp->nvp_sactive_cache);
3486 3485
3487 3486 /*
3488 3487 * halt the DMA engine. This step is necessary according to
3489 3488 * the mcp5x spec, probably since there may have been a "first" packet
3490 3489 * that already programmed the DMA engine, but may not turn out to
3491 3490 * be the first one processed.
3492 3491 */
3493 3492 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
3494 3493
3495 3494 if (bmicx & BMICX_SSBM) {
3496 3495 NVLOG(NVDBG_INTR, nvc, nvp, "BM was already enabled for "
3497 3496 "another packet. Cancelling and reprogramming", NULL);
3498 3497 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
3499 3498 }
3500 3499 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
3501 3500
3502 3501 nv_start_dma_engine(nvp, slot);
3503 3502
3504 3503 mutex_exit(&nvp->nvp_mutex);
3505 3504
3506 3505 return (DDI_INTR_CLAIMED);
3507 3506 }
3508 3507 #endif /* NCQ */
3509 3508
3510 3509
3511 3510 /*
3512 3511 * packet completion interrupt. If the packet is complete, invoke
3513 3512 * the packet completion callback.
3514 3513 */
3515 3514 static int
3516 3515 mcp5x_packet_complete_intr(nv_ctl_t *nvc, nv_port_t *nvp)
3517 3516 {
3518 3517 uint8_t status, bmstatus;
3519 3518 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
3520 3519 int sactive;
3521 3520 int active_pkt_bit = 0, active_pkt = 0, ncq_command = B_FALSE;
3522 3521 sata_pkt_t *spkt;
3523 3522 nv_slot_t *nv_slotp;
3524 3523
3525 3524 mutex_enter(&nvp->nvp_mutex);
3526 3525
3527 3526 bmstatus = nv_get8(bmhdl, nvp->nvp_bmisx);
3528 3527
3529 3528 if (!(bmstatus & (BMISX_IDEINTS | BMISX_IDERR))) {
3530 3529 DTRACE_PROBE1(bmstatus_h, int, bmstatus);
3531 3530 NVLOG(NVDBG_INTR, nvc, nvp, "BMISX_IDEINTS not set %x",
3532 3531 bmstatus);
3533 3532 mutex_exit(&nvp->nvp_mutex);
3534 3533
3535 3534 return (NV_FAILURE);
3536 3535 }
3537 3536
3538 3537 /*
3539 3538 * Commands may have been processed by abort or timeout before
3540 3539 * interrupt processing acquired the mutex. So we may be processing
3541 3540 * an interrupt for packets that were already removed.
3542 3541 * For functioning NCQ processing all slots may be checked, but
3543 3542 * with NCQ disabled (current code), relying on *_run flags is OK.
3544 3543 */
3545 3544 if (nvp->nvp_non_ncq_run) {
3546 3545 /*
3547 3546 * If the just completed item is a non-ncq command, the busy
3548 3547 * bit should not be set
3549 3548 */
3550 3549 status = nv_get8(nvp->nvp_ctl_hdl, nvp->nvp_altstatus);
3551 3550 if (status & SATA_STATUS_BSY) {
3552 3551 nv_cmn_err(CE_WARN, nvc, nvp,
3553 3552 "unexpected SATA_STATUS_BSY set");
3554 3553 DTRACE_PROBE(unexpected_status_bsy_p);
3555 3554 mutex_exit(&nvp->nvp_mutex);
3556 3555 /*
3557 3556 * calling function will clear interrupt. then
3558 3557 * the real interrupt will either arrive or the
3559 3558 * packet timeout handling will take over and
3560 3559 * reset.
3561 3560 */
3562 3561 return (NV_FAILURE);
3563 3562 }
3564 3563 ASSERT(nvp->nvp_ncq_run == 0);
3565 3564 } else {
3566 3565 ASSERT(nvp->nvp_non_ncq_run == 0);
3567 3566 /*
3568 3567 * Pre-NCQ code!
3569 3568 * Nothing to do. The packet for the command that just
3570 3569 * completed is already gone. Just clear the interrupt.
3571 3570 */
3572 3571 (void) nv_bm_status_clear(nvp);
3573 3572 (void) nv_get8(nvp->nvp_cmd_hdl, nvp->nvp_status);
3574 3573 mutex_exit(&nvp->nvp_mutex);
3575 3574 return (NV_SUCCESS);
3576 3575
3577 3576 /*
3578 3577 * NCQ check for BSY here and wait if still bsy before
3579 3578 * continuing. Rather than wait for it to be cleared
3580 3579 * when starting a packet and wasting CPU time, the starting
3581 3580 * thread can exit immediate, but might have to spin here
3582 3581 * for a bit possibly. Needs more work and experimentation.
3583 3582 *
3584 3583 */
3585 3584 }
3586 3585
3587 3586 /*
3588 3587 * active_pkt_bit will represent the bitmap of the single completed
3589 3588 * packet. Because of the nature of sw assisted NCQ, only one
3590 3589 * command will complete per interrupt.
3591 3590 */
3592 3591
3593 3592 if (ncq_command == B_FALSE) {
3594 3593 active_pkt = 0;
3595 3594 } else {
3596 3595 /*
3597 3596 * NCQ: determine which command just completed, by examining
3598 3597 * which bit cleared in the register since last written.
3599 3598 */
3600 3599 sactive = nv_get32(nvc->nvc_bar_hdl[5], nvp->nvp_sactive);
3601 3600
3602 3601 active_pkt_bit = ~sactive & nvp->nvp_sactive_cache;
3603 3602
3604 3603 ASSERT(active_pkt_bit);
3605 3604
3606 3605
3607 3606 /*
3608 3607 * this failure path needs more work to handle the
3609 3608 * error condition and recovery.
3610 3609 */
3611 3610 if (active_pkt_bit == 0) {
3612 3611 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
3613 3612
3614 3613 nv_cmn_err(CE_CONT, nvc, nvp, "ERROR sactive = %X "
3615 3614 "nvp->nvp_sactive %X", sactive,
3616 3615 nvp->nvp_sactive_cache);
3617 3616
3618 3617 (void) nv_get8(cmdhdl, nvp->nvp_status);
3619 3618
3620 3619 mutex_exit(&nvp->nvp_mutex);
3621 3620
3622 3621 return (NV_FAILURE);
3623 3622 }
3624 3623
3625 3624 for (active_pkt = 0; (active_pkt_bit & 0x1) != 0x1;
3626 3625 active_pkt++, active_pkt_bit >>= 1) {
3627 3626 }
3628 3627
3629 3628 /*
3630 3629 * make sure only one bit is ever turned on
3631 3630 */
3632 3631 ASSERT(active_pkt_bit == 1);
3633 3632
3634 3633 nvp->nvp_sactive_cache &= ~(0x01 << active_pkt);
3635 3634 }
3636 3635
3637 3636 nv_slotp = &(nvp->nvp_slot[active_pkt]);
3638 3637
3639 3638 spkt = nv_slotp->nvslot_spkt;
3640 3639
3641 3640 ASSERT(spkt != NULL);
3642 3641
3643 3642 (*nv_slotp->nvslot_intr)(nvp, nv_slotp);
3644 3643
3645 3644 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
3646 3645
3647 3646 if (nv_slotp->nvslot_flags == NVSLOT_COMPLETE) {
3648 3647
3649 3648 nv_complete_io(nvp, spkt, active_pkt);
3650 3649 }
3651 3650
3652 3651 mutex_exit(&nvp->nvp_mutex);
3653 3652
3654 3653 return (NV_SUCCESS);
3655 3654 }
3656 3655
3657 3656
3658 3657 static void
3659 3658 nv_complete_io(nv_port_t *nvp, sata_pkt_t *spkt, int slot)
3660 3659 {
3661 3660
3662 3661 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
3663 3662
3664 3663 if ((&(nvp->nvp_slot[slot]))->nvslot_flags & NVSLOT_NCQ) {
3665 3664 nvp->nvp_ncq_run--;
3666 3665 } else {
3667 3666 nvp->nvp_non_ncq_run--;
3668 3667 }
3669 3668
3670 3669 /*
3671 3670 * mark the packet slot idle so it can be reused. Do this before
3672 3671 * calling satapkt_comp so the slot can be reused.
3673 3672 */
3674 3673 (&(nvp->nvp_slot[slot]))->nvslot_spkt = NULL;
3675 3674
3676 3675 if (spkt->satapkt_op_mode & SATA_OPMODE_SYNCH) {
3677 3676 /*
3678 3677 * If this is not timed polled mode cmd, which has an
3679 3678 * active thread monitoring for completion, then need
3680 3679 * to signal the sleeping thread that the cmd is complete.
3681 3680 */
3682 3681 if ((spkt->satapkt_op_mode & SATA_OPMODE_POLLING) == 0) {
3683 3682 cv_signal(&nvp->nvp_sync_cv);
3684 3683 }
3685 3684
3686 3685 return;
3687 3686 }
3688 3687
3689 3688 if (spkt->satapkt_comp != NULL) {
3690 3689 mutex_exit(&nvp->nvp_mutex);
3691 3690 (*spkt->satapkt_comp)(spkt);
3692 3691 mutex_enter(&nvp->nvp_mutex);
3693 3692 }
3694 3693 }
3695 3694
3696 3695
3697 3696 /*
3698 3697 * check whether packet is ncq command or not. for ncq command,
3699 3698 * start it if there is still room on queue. for non-ncq command only
3700 3699 * start if no other command is running.
3701 3700 */
3702 3701 static int
3703 3702 nv_start_async(nv_port_t *nvp, sata_pkt_t *spkt)
3704 3703 {
3705 3704 uint8_t cmd, ncq;
3706 3705
3707 3706 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp, "nv_start_async: entry", NULL);
3708 3707
3709 3708 cmd = spkt->satapkt_cmd.satacmd_cmd_reg;
3710 3709
3711 3710 ncq = ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
3712 3711 (cmd == SATAC_READ_FPDMA_QUEUED));
3713 3712
3714 3713 if (ncq == B_FALSE) {
3715 3714
3716 3715 if ((nvp->nvp_non_ncq_run == 1) ||
3717 3716 (nvp->nvp_ncq_run > 0)) {
3718 3717 /*
3719 3718 * next command is non-ncq which can't run
3720 3719 * concurrently. exit and return queue full.
3721 3720 */
3722 3721 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3723 3722
3724 3723 return (SATA_TRAN_QUEUE_FULL);
3725 3724 }
3726 3725
3727 3726 return (nv_start_common(nvp, spkt));
3728 3727 }
3729 3728
3730 3729 /*
3731 3730 * ncq == B_TRUE
3732 3731 */
3733 3732 if (nvp->nvp_non_ncq_run == 1) {
3734 3733 /*
3735 3734 * cannot start any NCQ commands when there
3736 3735 * is a non-NCQ command running.
3737 3736 */
3738 3737 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3739 3738
3740 3739 return (SATA_TRAN_QUEUE_FULL);
3741 3740 }
3742 3741
3743 3742 #ifdef NCQ
3744 3743 /*
3745 3744 * this is not compiled for now as satapkt_device.satadev_qdepth
3746 3745 * is being pulled out until NCQ support is later addressed
3747 3746 *
3748 3747 * nvp_queue_depth is initialized by the first NCQ command
3749 3748 * received.
3750 3749 */
3751 3750 if (nvp->nvp_queue_depth == 1) {
3752 3751 nvp->nvp_queue_depth =
3753 3752 spkt->satapkt_device.satadev_qdepth;
3754 3753
3755 3754 ASSERT(nvp->nvp_queue_depth > 1);
3756 3755
3757 3756 NVLOG(NVDBG_ENTRY, nvp->nvp_ctlp, nvp,
3758 3757 "nv_process_queue: nvp_queue_depth set to %d",
3759 3758 nvp->nvp_queue_depth);
3760 3759 }
3761 3760 #endif
3762 3761
3763 3762 if (nvp->nvp_ncq_run >= nvp->nvp_queue_depth) {
3764 3763 /*
3765 3764 * max number of NCQ commands already active
3766 3765 */
3767 3766 spkt->satapkt_reason = SATA_PKT_QUEUE_FULL;
3768 3767
3769 3768 return (SATA_TRAN_QUEUE_FULL);
3770 3769 }
3771 3770
3772 3771 return (nv_start_common(nvp, spkt));
3773 3772 }
3774 3773
3775 3774
3776 3775 /*
3777 3776 * configure INTx and legacy interrupts
3778 3777 */
3779 3778 static int
3780 3779 nv_add_legacy_intrs(nv_ctl_t *nvc)
3781 3780 {
3782 3781 dev_info_t *devinfo = nvc->nvc_dip;
3783 3782 int actual, count = 0;
3784 3783 int x, y, rc, inum = 0;
3785 3784
3786 3785 NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_legacy_intrs", NULL);
3787 3786
3788 3787 /*
3789 3788 * get number of interrupts
3790 3789 */
3791 3790 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_FIXED, &count);
3792 3791 if ((rc != DDI_SUCCESS) || (count == 0)) {
3793 3792 NVLOG(NVDBG_INIT, nvc, NULL,
3794 3793 "ddi_intr_get_nintrs() failed, "
3795 3794 "rc %d count %d", rc, count);
3796 3795
3797 3796 return (DDI_FAILURE);
3798 3797 }
3799 3798
3800 3799 /*
3801 3800 * allocate an array of interrupt handles
3802 3801 */
3803 3802 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3804 3803 nvc->nvc_htable = kmem_zalloc(nvc->nvc_intr_size, KM_SLEEP);
3805 3804
3806 3805 /*
3807 3806 * call ddi_intr_alloc()
3808 3807 */
3809 3808 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_FIXED,
3810 3809 inum, count, &actual, DDI_INTR_ALLOC_STRICT);
3811 3810
3812 3811 if ((rc != DDI_SUCCESS) || (actual == 0)) {
3813 3812 nv_cmn_err(CE_WARN, nvc, NULL,
3814 3813 "ddi_intr_alloc() failed, rc %d", rc);
3815 3814 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3816 3815
3817 3816 return (DDI_FAILURE);
3818 3817 }
3819 3818
3820 3819 if (actual < count) {
3821 3820 nv_cmn_err(CE_WARN, nvc, NULL,
3822 3821 "ddi_intr_alloc: requested: %d, received: %d",
3823 3822 count, actual);
3824 3823
3825 3824 goto failure;
3826 3825 }
3827 3826
3828 3827 nvc->nvc_intr_cnt = actual;
3829 3828
3830 3829 /*
3831 3830 * get intr priority
3832 3831 */
3833 3832 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3834 3833 DDI_SUCCESS) {
3835 3834 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3836 3835
3837 3836 goto failure;
3838 3837 }
3839 3838
3840 3839 /*
3841 3840 * Test for high level mutex
3842 3841 */
3843 3842 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3844 3843 nv_cmn_err(CE_WARN, nvc, NULL,
3845 3844 "nv_add_legacy_intrs: high level intr not supported");
3846 3845
3847 3846 goto failure;
3848 3847 }
3849 3848
3850 3849 for (x = 0; x < actual; x++) {
3851 3850 if (ddi_intr_add_handler(nvc->nvc_htable[x],
3852 3851 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3853 3852 nv_cmn_err(CE_WARN, nvc, NULL,
3854 3853 "ddi_intr_add_handler() failed");
3855 3854
3856 3855 goto failure;
3857 3856 }
3858 3857 }
3859 3858
3860 3859 /*
3861 3860 * call ddi_intr_enable() for legacy interrupts
3862 3861 */
3863 3862 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3864 3863 (void) ddi_intr_enable(nvc->nvc_htable[x]);
3865 3864 }
3866 3865
3867 3866 return (DDI_SUCCESS);
3868 3867
3869 3868 failure:
3870 3869 /*
3871 3870 * free allocated intr and nvc_htable
3872 3871 */
3873 3872 for (y = 0; y < actual; y++) {
3874 3873 (void) ddi_intr_free(nvc->nvc_htable[y]);
3875 3874 }
3876 3875
3877 3876 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3878 3877
3879 3878 return (DDI_FAILURE);
3880 3879 }
3881 3880
3882 3881 #ifdef NV_MSI_SUPPORTED
3883 3882 /*
3884 3883 * configure MSI interrupts
3885 3884 */
3886 3885 static int
3887 3886 nv_add_msi_intrs(nv_ctl_t *nvc)
3888 3887 {
3889 3888 dev_info_t *devinfo = nvc->nvc_dip;
3890 3889 int count, avail, actual;
3891 3890 int x, y, rc, inum = 0;
3892 3891
3893 3892 NVLOG(NVDBG_INIT, nvc, NULL, "nv_add_msi_intrs", NULL);
3894 3893
3895 3894 /*
3896 3895 * get number of interrupts
3897 3896 */
3898 3897 rc = ddi_intr_get_nintrs(devinfo, DDI_INTR_TYPE_MSI, &count);
3899 3898 if ((rc != DDI_SUCCESS) || (count == 0)) {
3900 3899 nv_cmn_err(CE_WARN, nvc, NULL,
3901 3900 "ddi_intr_get_nintrs() failed, "
3902 3901 "rc %d count %d", rc, count);
3903 3902
3904 3903 return (DDI_FAILURE);
3905 3904 }
3906 3905
3907 3906 /*
3908 3907 * get number of available interrupts
3909 3908 */
3910 3909 rc = ddi_intr_get_navail(devinfo, DDI_INTR_TYPE_MSI, &avail);
3911 3910 if ((rc != DDI_SUCCESS) || (avail == 0)) {
3912 3911 nv_cmn_err(CE_WARN, nvc, NULL,
3913 3912 "ddi_intr_get_navail() failed, "
3914 3913 "rc %d avail %d", rc, avail);
3915 3914
3916 3915 return (DDI_FAILURE);
3917 3916 }
3918 3917
3919 3918 if (avail < count) {
3920 3919 nv_cmn_err(CE_WARN, nvc, NULL,
3921 3920 "ddi_intr_get_nvail returned %d ddi_intr_get_nintrs: %d",
3922 3921 avail, count);
3923 3922 }
3924 3923
3925 3924 /*
3926 3925 * allocate an array of interrupt handles
3927 3926 */
3928 3927 nvc->nvc_intr_size = count * sizeof (ddi_intr_handle_t);
3929 3928 nvc->nvc_htable = kmem_alloc(nvc->nvc_intr_size, KM_SLEEP);
3930 3929
3931 3930 rc = ddi_intr_alloc(devinfo, nvc->nvc_htable, DDI_INTR_TYPE_MSI,
3932 3931 inum, count, &actual, DDI_INTR_ALLOC_NORMAL);
3933 3932
3934 3933 if ((rc != DDI_SUCCESS) || (actual == 0)) {
3935 3934 nv_cmn_err(CE_WARN, nvc, NULL,
3936 3935 "ddi_intr_alloc() failed, rc %d", rc);
3937 3936 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
3938 3937
3939 3938 return (DDI_FAILURE);
3940 3939 }
3941 3940
3942 3941 /*
3943 3942 * Use interrupt count returned or abort?
3944 3943 */
3945 3944 if (actual < count) {
3946 3945 NVLOG(NVDBG_INIT, nvc, NULL,
3947 3946 "Requested: %d, Received: %d", count, actual);
3948 3947 }
3949 3948
3950 3949 nvc->nvc_intr_cnt = actual;
3951 3950
3952 3951 /*
3953 3952 * get priority for first msi, assume remaining are all the same
3954 3953 */
3955 3954 if (ddi_intr_get_pri(nvc->nvc_htable[0], &nvc->nvc_intr_pri) !=
3956 3955 DDI_SUCCESS) {
3957 3956 nv_cmn_err(CE_WARN, nvc, NULL, "ddi_intr_get_pri() failed");
3958 3957
3959 3958 goto failure;
3960 3959 }
3961 3960
3962 3961 /*
3963 3962 * test for high level mutex
3964 3963 */
3965 3964 if (nvc->nvc_intr_pri >= ddi_intr_get_hilevel_pri()) {
3966 3965 nv_cmn_err(CE_WARN, nvc, NULL,
3967 3966 "nv_add_msi_intrs: high level intr not supported");
3968 3967
3969 3968 goto failure;
3970 3969 }
3971 3970
3972 3971 /*
3973 3972 * Call ddi_intr_add_handler()
3974 3973 */
3975 3974 for (x = 0; x < actual; x++) {
3976 3975 if (ddi_intr_add_handler(nvc->nvc_htable[x],
3977 3976 nvc->nvc_interrupt, (caddr_t)nvc, NULL) != DDI_SUCCESS) {
3978 3977 nv_cmn_err(CE_WARN, nvc, NULL,
3979 3978 "ddi_intr_add_handler() failed");
3980 3979
3981 3980 goto failure;
3982 3981 }
3983 3982 }
3984 3983
3985 3984 (void) ddi_intr_get_cap(nvc->nvc_htable[0], &nvc->nvc_intr_cap);
3986 3985
3987 3986 if (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK) {
3988 3987 (void) ddi_intr_block_enable(nvc->nvc_htable,
3989 3988 nvc->nvc_intr_cnt);
3990 3989 } else {
3991 3990 /*
3992 3991 * Call ddi_intr_enable() for MSI non block enable
3993 3992 */
3994 3993 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
3995 3994 (void) ddi_intr_enable(nvc->nvc_htable[x]);
3996 3995 }
3997 3996 }
3998 3997
3999 3998 return (DDI_SUCCESS);
4000 3999
4001 4000 failure:
4002 4001 /*
4003 4002 * free allocated intr and nvc_htable
4004 4003 */
4005 4004 for (y = 0; y < actual; y++) {
4006 4005 (void) ddi_intr_free(nvc->nvc_htable[y]);
4007 4006 }
4008 4007
4009 4008 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4010 4009
4011 4010 return (DDI_FAILURE);
4012 4011 }
4013 4012 #endif
4014 4013
4015 4014
4016 4015 static void
4017 4016 nv_rem_intrs(nv_ctl_t *nvc)
4018 4017 {
4019 4018 int x, i;
4020 4019 nv_port_t *nvp;
4021 4020
4022 4021 NVLOG(NVDBG_INIT, nvc, NULL, "nv_rem_intrs", NULL);
4023 4022
4024 4023 /*
4025 4024 * prevent controller from generating interrupts by
4026 4025 * masking them out. This is an extra precaution.
4027 4026 */
4028 4027 for (i = 0; i < NV_MAX_PORTS(nvc); i++) {
4029 4028 nvp = (&nvc->nvc_port[i]);
4030 4029 mutex_enter(&nvp->nvp_mutex);
4031 4030 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE);
4032 4031 mutex_exit(&nvp->nvp_mutex);
4033 4032 }
4034 4033
4035 4034 /*
4036 4035 * disable all interrupts
4037 4036 */
4038 4037 if ((nvc->nvc_intr_type == DDI_INTR_TYPE_MSI) &&
4039 4038 (nvc->nvc_intr_cap & DDI_INTR_FLAG_BLOCK)) {
4040 4039 (void) ddi_intr_block_disable(nvc->nvc_htable,
4041 4040 nvc->nvc_intr_cnt);
4042 4041 } else {
4043 4042 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4044 4043 (void) ddi_intr_disable(nvc->nvc_htable[x]);
4045 4044 }
4046 4045 }
4047 4046
4048 4047 for (x = 0; x < nvc->nvc_intr_cnt; x++) {
4049 4048 (void) ddi_intr_remove_handler(nvc->nvc_htable[x]);
4050 4049 (void) ddi_intr_free(nvc->nvc_htable[x]);
4051 4050 }
4052 4051
4053 4052 kmem_free(nvc->nvc_htable, nvc->nvc_intr_size);
4054 4053 }
4055 4054
4056 4055
4057 4056 /*
4058 4057 * variable argument wrapper for cmn_err. prefixes the instance and port
4059 4058 * number if possible
4060 4059 */
4061 4060 static void
4062 4061 nv_vcmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, va_list ap,
4063 4062 boolean_t log_to_sata_ring)
4064 4063 {
4065 4064 char port[NV_STR_LEN];
4066 4065 char inst[NV_STR_LEN];
4067 4066 dev_info_t *dip;
4068 4067
4069 4068 if (nvc) {
4070 4069 (void) snprintf(inst, NV_STR_LEN, "inst%d ",
4071 4070 ddi_get_instance(nvc->nvc_dip));
4072 4071 dip = nvc->nvc_dip;
4073 4072 } else {
4074 4073 inst[0] = '\0';
4075 4074 }
4076 4075
4077 4076 if (nvp) {
4078 4077 (void) snprintf(port, NV_STR_LEN, "port%d",
4079 4078 nvp->nvp_port_num);
4080 4079 dip = nvp->nvp_ctlp->nvc_dip;
4081 4080 } else {
4082 4081 port[0] = '\0';
4083 4082 }
4084 4083
4085 4084 mutex_enter(&nv_log_mutex);
4086 4085
4087 4086 (void) sprintf(nv_log_buf, "%s%s%s", inst, port,
4088 4087 (inst[0]|port[0] ? ": " :""));
4089 4088
4090 4089 (void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4091 4090 NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4092 4091
4093 4092 /*
4094 4093 * Log to console or log to file, depending on
4095 4094 * nv_log_to_console setting.
4096 4095 */
4097 4096 if (nv_log_to_console) {
4098 4097 if (nv_prom_print) {
4099 4098 prom_printf("%s\n", nv_log_buf);
4100 4099 } else {
4101 4100 cmn_err(ce, "%s\n", nv_log_buf);
4102 4101 }
4103 4102 } else {
4104 4103 cmn_err(ce, "!%s", nv_log_buf);
4105 4104 }
4106 4105
4107 4106 if (log_to_sata_ring == B_TRUE) {
4108 4107 (void) sprintf(nv_log_buf, "%s%s", port, (port[0] ? ": " :""));
4109 4108
4110 4109 (void) vsnprintf(&nv_log_buf[strlen(nv_log_buf)],
4111 4110 NV_LOGBUF_LEN - strlen(nv_log_buf), fmt, ap);
4112 4111
4113 4112 sata_trace_debug(dip, nv_log_buf);
4114 4113 }
4115 4114
4116 4115 mutex_exit(&nv_log_mutex);
4117 4116 }
4118 4117
4119 4118
4120 4119 /*
4121 4120 * wrapper for cmn_err
4122 4121 */
4123 4122 static void
4124 4123 nv_cmn_err(int ce, nv_ctl_t *nvc, nv_port_t *nvp, char *fmt, ...)
4125 4124 {
4126 4125 va_list ap;
4127 4126
4128 4127 va_start(ap, fmt);
4129 4128 nv_vcmn_err(ce, nvc, nvp, fmt, ap, B_TRUE);
4130 4129 va_end(ap);
4131 4130 }
4132 4131
4133 4132
4134 4133 static void
4135 4134 nv_log(nv_ctl_t *nvc, nv_port_t *nvp, const char *fmt, ...)
4136 4135 {
4137 4136 va_list ap;
4138 4137
4139 4138 if (nv_log_to_cmn_err == B_TRUE) {
4140 4139 va_start(ap, fmt);
4141 4140 nv_vcmn_err(CE_CONT, nvc, nvp, fmt, ap, B_FALSE);
4142 4141 va_end(ap);
4143 4142
4144 4143 }
4145 4144
4146 4145 va_start(ap, fmt);
4147 4146
4148 4147 if (nvp == NULL && nvc == NULL) {
4149 4148 sata_vtrace_debug(NULL, fmt, ap);
4150 4149 va_end(ap);
4151 4150
4152 4151 return;
4153 4152 }
4154 4153
4155 4154 if (nvp == NULL && nvc != NULL) {
4156 4155 sata_vtrace_debug(nvc->nvc_dip, fmt, ap);
4157 4156 va_end(ap);
4158 4157
4159 4158 return;
4160 4159 }
4161 4160
4162 4161 /*
4163 4162 * nvp is not NULL, but nvc might be. Reference nvp for both
4164 4163 * port and dip, to get the port number prefixed on the
4165 4164 * message.
4166 4165 */
4167 4166 mutex_enter(&nv_log_mutex);
4168 4167
4169 4168 (void) snprintf(nv_log_buf, NV_LOGBUF_LEN, "port%d: %s",
4170 4169 nvp->nvp_port_num, fmt);
4171 4170
4172 4171 sata_vtrace_debug(nvp->nvp_ctlp->nvc_dip, nv_log_buf, ap);
4173 4172
4174 4173 mutex_exit(&nv_log_mutex);
4175 4174
4176 4175 va_end(ap);
4177 4176 }
4178 4177
4179 4178
4180 4179 /*
4181 4180 * program registers which are common to all commands
4182 4181 */
4183 4182 static void
4184 4183 nv_program_taskfile_regs(nv_port_t *nvp, int slot)
4185 4184 {
4186 4185 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4187 4186 sata_pkt_t *spkt;
4188 4187 sata_cmd_t *satacmd;
4189 4188 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4190 4189 uint8_t cmd, ncq = B_FALSE;
4191 4190
4192 4191 spkt = nv_slotp->nvslot_spkt;
4193 4192 satacmd = &spkt->satapkt_cmd;
4194 4193 cmd = satacmd->satacmd_cmd_reg;
4195 4194
4196 4195 ASSERT(nvp->nvp_slot);
4197 4196
4198 4197 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4199 4198 (cmd == SATAC_READ_FPDMA_QUEUED)) {
4200 4199 ncq = B_TRUE;
4201 4200 }
4202 4201
4203 4202 /*
4204 4203 * select the drive
4205 4204 */
4206 4205 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4207 4206
4208 4207 /*
4209 4208 * make certain the drive selected
4210 4209 */
4211 4210 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4212 4211 NV_SEC2USEC(5), 0) == B_FALSE) {
4213 4212
4214 4213 return;
4215 4214 }
4216 4215
4217 4216 switch (spkt->satapkt_cmd.satacmd_addr_type) {
4218 4217
4219 4218 case ATA_ADDR_LBA:
4220 4219 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "ATA_ADDR_LBA mode",
4221 4220 NULL);
4222 4221
4223 4222 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4224 4223 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4225 4224 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4226 4225 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4227 4226 nv_put8(cmdhdl, nvp->nvp_feature,
4228 4227 satacmd->satacmd_features_reg);
4229 4228
4230 4229
4231 4230 break;
4232 4231
4233 4232 case ATA_ADDR_LBA28:
4234 4233 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4235 4234 "ATA_ADDR_LBA28 mode", NULL);
4236 4235 /*
4237 4236 * NCQ only uses 48-bit addressing
4238 4237 */
4239 4238 ASSERT(ncq != B_TRUE);
4240 4239
4241 4240 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4242 4241 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4243 4242 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4244 4243 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4245 4244 nv_put8(cmdhdl, nvp->nvp_feature,
4246 4245 satacmd->satacmd_features_reg);
4247 4246
4248 4247 break;
4249 4248
4250 4249 case ATA_ADDR_LBA48:
4251 4250 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4252 4251 "ATA_ADDR_LBA48 mode", NULL);
4253 4252
4254 4253 /*
4255 4254 * for NCQ, tag goes into count register and real sector count
4256 4255 * into features register. The sata module does the translation
4257 4256 * in the satacmd.
4258 4257 */
4259 4258 if (ncq == B_TRUE) {
4260 4259 nv_put8(cmdhdl, nvp->nvp_count, slot << 3);
4261 4260 } else {
4262 4261 nv_put8(cmdhdl, nvp->nvp_count,
4263 4262 satacmd->satacmd_sec_count_msb);
4264 4263 nv_put8(cmdhdl, nvp->nvp_count,
4265 4264 satacmd->satacmd_sec_count_lsb);
4266 4265 }
4267 4266
4268 4267 nv_put8(cmdhdl, nvp->nvp_feature,
4269 4268 satacmd->satacmd_features_reg_ext);
4270 4269 nv_put8(cmdhdl, nvp->nvp_feature,
4271 4270 satacmd->satacmd_features_reg);
4272 4271
4273 4272 /*
4274 4273 * send the high-order half first
4275 4274 */
4276 4275 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_msb);
4277 4276 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_msb);
4278 4277 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_msb);
4279 4278
4280 4279 /*
4281 4280 * Send the low-order half
4282 4281 */
4283 4282 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4284 4283 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4285 4284 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4286 4285
4287 4286 break;
4288 4287
4289 4288 case 0:
4290 4289 /*
4291 4290 * non-media access commands such as identify and features
4292 4291 * take this path.
4293 4292 */
4294 4293 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4295 4294 nv_put8(cmdhdl, nvp->nvp_feature,
4296 4295 satacmd->satacmd_features_reg);
4297 4296 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4298 4297 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4299 4298 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4300 4299
4301 4300 break;
4302 4301
4303 4302 default:
4304 4303 break;
4305 4304 }
4306 4305
4307 4306 ASSERT(nvp->nvp_slot);
4308 4307 }
4309 4308
4310 4309
4311 4310 /*
4312 4311 * start a command that involves no media access
4313 4312 */
4314 4313 static int
4315 4314 nv_start_nodata(nv_port_t *nvp, int slot)
4316 4315 {
4317 4316 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4318 4317 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4319 4318 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4320 4319 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4321 4320
4322 4321 nv_program_taskfile_regs(nvp, slot);
4323 4322
4324 4323 /*
4325 4324 * This next one sets the controller in motion
4326 4325 */
4327 4326 nv_put8(cmdhdl, nvp->nvp_cmd, sata_cmdp->satacmd_cmd_reg);
4328 4327
4329 4328 return (SATA_TRAN_ACCEPTED);
4330 4329 }
4331 4330
4332 4331
4333 4332 static int
4334 4333 nv_bm_status_clear(nv_port_t *nvp)
4335 4334 {
4336 4335 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4337 4336 uchar_t status, ret;
4338 4337
4339 4338 /*
4340 4339 * Get the current BM status
4341 4340 */
4342 4341 ret = status = nv_get8(bmhdl, nvp->nvp_bmisx);
4343 4342
4344 4343 status = (status & BMISX_MASK) | BMISX_IDERR | BMISX_IDEINTS;
4345 4344
4346 4345 /*
4347 4346 * Clear the latches (and preserve the other bits)
4348 4347 */
4349 4348 nv_put8(bmhdl, nvp->nvp_bmisx, status);
4350 4349
4351 4350 return (ret);
4352 4351 }
4353 4352
4354 4353
4355 4354 /*
4356 4355 * program the bus master DMA engine with the PRD address for
4357 4356 * the active slot command, and start the DMA engine.
4358 4357 */
4359 4358 static void
4360 4359 nv_start_dma_engine(nv_port_t *nvp, int slot)
4361 4360 {
4362 4361 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4363 4362 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
4364 4363 uchar_t direction;
4365 4364
4366 4365 ASSERT(nv_slotp->nvslot_spkt != NULL);
4367 4366
4368 4367 if (nv_slotp->nvslot_spkt->satapkt_cmd.satacmd_flags.sata_data_direction
4369 4368 == SATA_DIR_READ) {
4370 4369 direction = BMICX_RWCON_WRITE_TO_MEMORY;
4371 4370 } else {
4372 4371 direction = BMICX_RWCON_READ_FROM_MEMORY;
4373 4372 }
4374 4373
4375 4374 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4376 4375 "nv_start_dma_engine entered", NULL);
4377 4376
4378 4377 #if NOT_USED
4379 4378 /*
4380 4379 * NOT NEEDED. Left here of historical reason.
4381 4380 * Reset the controller's interrupt and error status bits.
4382 4381 */
4383 4382 (void) nv_bm_status_clear(nvp);
4384 4383 #endif
4385 4384 /*
4386 4385 * program the PRD table physical start address
4387 4386 */
4388 4387 nv_put32(bmhdl, nvp->nvp_bmidtpx, nvp->nvp_sg_paddr[slot]);
4389 4388
4390 4389 /*
4391 4390 * set the direction control and start the DMA controller
4392 4391 */
4393 4392 nv_put8(bmhdl, nvp->nvp_bmicx, direction | BMICX_SSBM);
4394 4393 }
4395 4394
4396 4395 /*
4397 4396 * start dma command, either in or out
4398 4397 */
4399 4398 static int
4400 4399 nv_start_dma(nv_port_t *nvp, int slot)
4401 4400 {
4402 4401 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4403 4402 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4404 4403 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4405 4404 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4406 4405 uint8_t cmd = sata_cmdp->satacmd_cmd_reg;
4407 4406 #ifdef NCQ
4408 4407 uint8_t ncq = B_FALSE;
4409 4408 #endif
4410 4409 ddi_acc_handle_t sghdl = nvp->nvp_sg_acc_hdl[slot];
4411 4410 uint_t *dstp = (uint_t *)nvp->nvp_sg_addr[slot];
4412 4411 int sg_count = sata_cmdp->satacmd_num_dma_cookies, idx;
4413 4412 ddi_dma_cookie_t *srcp = sata_cmdp->satacmd_dma_cookie_list;
4414 4413
4415 4414 ASSERT(sg_count != 0);
4416 4415
4417 4416 if (sata_cmdp->satacmd_num_dma_cookies > NV_DMA_NSEGS) {
4418 4417 nv_cmn_err(CE_WARN, nvp->nvp_ctlp, nvp, "NV_DMA_NSEGS=%d <"
4419 4418 " satacmd_num_dma_cookies=%d", NV_DMA_NSEGS,
4420 4419 sata_cmdp->satacmd_num_dma_cookies);
4421 4420
4422 4421 return (NV_FAILURE);
4423 4422 }
4424 4423
4425 4424 nv_program_taskfile_regs(nvp, slot);
4426 4425
4427 4426 /*
4428 4427 * start the drive in motion
4429 4428 */
4430 4429 nv_put8(cmdhdl, nvp->nvp_cmd, cmd);
4431 4430
4432 4431 /*
4433 4432 * the drive starts processing the transaction when the cmd register
4434 4433 * is written. This is done here before programming the DMA engine to
4435 4434 * parallelize and save some time. In the event that the drive is ready
4436 4435 * before DMA, it will wait.
4437 4436 */
4438 4437 #ifdef NCQ
4439 4438 if ((cmd == SATAC_WRITE_FPDMA_QUEUED) ||
4440 4439 (cmd == SATAC_READ_FPDMA_QUEUED)) {
4441 4440 ncq = B_TRUE;
4442 4441 }
4443 4442 #endif
4444 4443
4445 4444 /*
4446 4445 * copy the PRD list to PRD table in DMA accessible memory
4447 4446 * so that the controller can access it.
4448 4447 */
4449 4448 for (idx = 0; idx < sg_count; idx++, srcp++) {
4450 4449 uint32_t size;
4451 4450
4452 4451 nv_put32(sghdl, dstp++, srcp->dmac_address);
4453 4452
4454 4453 /* Set the number of bytes to transfer, 0 implies 64KB */
4455 4454 size = srcp->dmac_size;
4456 4455 if (size == 0x10000)
4457 4456 size = 0;
4458 4457
4459 4458 /*
4460 4459 * If this is a 40-bit address, copy bits 32-40 of the
4461 4460 * physical address to bits 16-24 of the PRD count.
4462 4461 */
4463 4462 if (srcp->dmac_laddress > UINT32_MAX) {
4464 4463 size |= ((srcp->dmac_laddress & 0xff00000000) >> 16);
4465 4464 }
4466 4465
4467 4466 /*
4468 4467 * set the end of table flag for the last entry
4469 4468 */
4470 4469 if (idx == (sg_count - 1)) {
4471 4470 size |= PRDE_EOT;
4472 4471 }
4473 4472
4474 4473 nv_put32(sghdl, dstp++, size);
4475 4474 }
4476 4475
4477 4476 (void) ddi_dma_sync(nvp->nvp_sg_dma_hdl[slot], 0,
4478 4477 sizeof (prde_t) * NV_DMA_NSEGS, DDI_DMA_SYNC_FORDEV);
4479 4478
4480 4479 nv_start_dma_engine(nvp, slot);
4481 4480
4482 4481 #ifdef NCQ
4483 4482 /*
4484 4483 * optimization: for SWNCQ, start DMA engine if this is the only
4485 4484 * command running. Preliminary NCQ efforts indicated this needs
4486 4485 * more debugging.
4487 4486 *
4488 4487 * if (nvp->nvp_ncq_run <= 1)
4489 4488 */
4490 4489
4491 4490 if (ncq == B_FALSE) {
4492 4491 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp,
4493 4492 "NOT NCQ so starting DMA NOW non_ncq_commands=%d"
4494 4493 " cmd = %X", non_ncq_commands++, cmd);
4495 4494 nv_start_dma_engine(nvp, slot);
4496 4495 } else {
4497 4496 NVLOG(NVDBG_DELIVER, nvp->nvp_ctlp, nvp, "NCQ, so program "
4498 4497 "DMA later ncq_commands=%d cmd = %X", ncq_commands++, cmd);
4499 4498 }
4500 4499 #endif /* NCQ */
4501 4500
4502 4501 return (SATA_TRAN_ACCEPTED);
4503 4502 }
4504 4503
4505 4504
4506 4505 /*
4507 4506 * start a PIO data-in ATA command
4508 4507 */
4509 4508 static int
4510 4509 nv_start_pio_in(nv_port_t *nvp, int slot)
4511 4510 {
4512 4511
4513 4512 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4514 4513 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4515 4514 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4516 4515
4517 4516 nv_program_taskfile_regs(nvp, slot);
4518 4517
4519 4518 /*
4520 4519 * This next one sets the drive in motion
4521 4520 */
4522 4521 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4523 4522
4524 4523 return (SATA_TRAN_ACCEPTED);
4525 4524 }
4526 4525
4527 4526
4528 4527 /*
4529 4528 * start a PIO data-out ATA command
4530 4529 */
4531 4530 static int
4532 4531 nv_start_pio_out(nv_port_t *nvp, int slot)
4533 4532 {
4534 4533 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4535 4534 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4536 4535 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4537 4536
4538 4537 nv_program_taskfile_regs(nvp, slot);
4539 4538
4540 4539 /*
4541 4540 * this next one sets the drive in motion
4542 4541 */
4543 4542 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4544 4543
4545 4544 /*
4546 4545 * wait for the busy bit to settle
4547 4546 */
4548 4547 NV_DELAY_NSEC(400);
4549 4548
4550 4549 /*
4551 4550 * wait for the drive to assert DRQ to send the first chunk
4552 4551 * of data. Have to busy wait because there's no interrupt for
4553 4552 * the first chunk. This is bad... uses a lot of cycles if the
4554 4553 * drive responds too slowly or if the wait loop granularity
4555 4554 * is too large. It's even worse if the drive is defective and
4556 4555 * the loop times out.
4557 4556 */
4558 4557 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4559 4558 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4560 4559 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4561 4560 4000000, 0) == B_FALSE) {
4562 4561 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4563 4562
4564 4563 goto error;
4565 4564 }
4566 4565
4567 4566 /*
4568 4567 * send the first block.
4569 4568 */
4570 4569 nv_intr_pio_out(nvp, nv_slotp);
4571 4570
4572 4571 /*
4573 4572 * If nvslot_flags is not set to COMPLETE yet, then processing
4574 4573 * is OK so far, so return. Otherwise, fall into error handling
4575 4574 * below.
4576 4575 */
4577 4576 if (nv_slotp->nvslot_flags != NVSLOT_COMPLETE) {
4578 4577
4579 4578 return (SATA_TRAN_ACCEPTED);
4580 4579 }
4581 4580
4582 4581 error:
4583 4582 /*
4584 4583 * there was an error so reset the device and complete the packet.
4585 4584 */
4586 4585 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4587 4586 nv_complete_io(nvp, spkt, 0);
4588 4587 nv_reset(nvp, "pio_out");
4589 4588
4590 4589 return (SATA_TRAN_PORT_ERROR);
4591 4590 }
4592 4591
4593 4592
4594 4593 /*
4595 4594 * start a ATAPI Packet command (PIO data in or out)
4596 4595 */
4597 4596 static int
4598 4597 nv_start_pkt_pio(nv_port_t *nvp, int slot)
4599 4598 {
4600 4599 nv_slot_t *nv_slotp = &(nvp->nvp_slot[slot]);
4601 4600 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4602 4601 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4603 4602 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
4604 4603
4605 4604 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4606 4605 "nv_start_pkt_pio: start", NULL);
4607 4606
4608 4607 /*
4609 4608 * Write the PACKET command to the command register. Normally
4610 4609 * this would be done through nv_program_taskfile_regs(). It
4611 4610 * is done here because some values need to be overridden.
4612 4611 */
4613 4612
4614 4613 /* select the drive */
4615 4614 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
4616 4615
4617 4616 /* make certain the drive selected */
4618 4617 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
4619 4618 NV_SEC2USEC(5), 0) == B_FALSE) {
4620 4619 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4621 4620 "nv_start_pkt_pio: drive select failed", NULL);
4622 4621 return (SATA_TRAN_PORT_ERROR);
4623 4622 }
4624 4623
4625 4624 /*
4626 4625 * The command is always sent via PIO, despite whatever the SATA
4627 4626 * common module sets in the command. Overwrite the DMA bit to do this.
4628 4627 * Also, overwrite the overlay bit to be safe (it shouldn't be set).
4629 4628 */
4630 4629 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */
4631 4630
4632 4631 /* set appropriately by the sata common module */
4633 4632 nv_put8(cmdhdl, nvp->nvp_hcyl, satacmd->satacmd_lba_high_lsb);
4634 4633 nv_put8(cmdhdl, nvp->nvp_lcyl, satacmd->satacmd_lba_mid_lsb);
4635 4634 nv_put8(cmdhdl, nvp->nvp_sect, satacmd->satacmd_lba_low_lsb);
4636 4635 nv_put8(cmdhdl, nvp->nvp_count, satacmd->satacmd_sec_count_lsb);
4637 4636
4638 4637 /* initiate the command by writing the command register last */
4639 4638 nv_put8(cmdhdl, nvp->nvp_cmd, spkt->satapkt_cmd.satacmd_cmd_reg);
4640 4639
4641 4640 /* Give the host controller time to do its thing */
4642 4641 NV_DELAY_NSEC(400);
4643 4642
4644 4643 /*
4645 4644 * Wait for the device to indicate that it is ready for the command
4646 4645 * ATAPI protocol state - HP0: Check_Status_A
4647 4646 */
4648 4647
4649 4648 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
4650 4649 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
4651 4650 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
4652 4651 4000000, 0) == B_FALSE) {
4653 4652 /*
4654 4653 * Either an error or device fault occurred or the wait
4655 4654 * timed out. According to the ATAPI protocol, command
4656 4655 * completion is also possible. Other implementations of
4657 4656 * this protocol don't handle this last case, so neither
4658 4657 * does this code.
4659 4658 */
4660 4659
4661 4660 if (nv_get8(cmdhdl, nvp->nvp_status) &
4662 4661 (SATA_STATUS_ERR | SATA_STATUS_DF)) {
4663 4662 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4664 4663
4665 4664 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4666 4665 "nv_start_pkt_pio: device error (HP0)", NULL);
4667 4666 } else {
4668 4667 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4669 4668
4670 4669 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4671 4670 "nv_start_pkt_pio: timeout (HP0)", NULL);
4672 4671 }
4673 4672
4674 4673 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4675 4674 nv_complete_io(nvp, spkt, 0);
4676 4675 nv_reset(nvp, "start_pkt_pio");
4677 4676
4678 4677 return (SATA_TRAN_PORT_ERROR);
4679 4678 }
4680 4679
4681 4680 /*
4682 4681 * Put the ATAPI command in the data register
4683 4682 * ATAPI protocol state - HP1: Send_Packet
4684 4683 */
4685 4684
4686 4685 ddi_rep_put16(cmdhdl, (ushort_t *)spkt->satapkt_cmd.satacmd_acdb,
4687 4686 (ushort_t *)nvp->nvp_data,
4688 4687 (spkt->satapkt_cmd.satacmd_acdb_len >> 1), DDI_DEV_NO_AUTOINCR);
4689 4688
4690 4689 /*
4691 4690 * See you in nv_intr_pkt_pio.
4692 4691 * ATAPI protocol state - HP3: INTRQ_wait
4693 4692 */
4694 4693
4695 4694 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4696 4695 "nv_start_pkt_pio: exiting into HP3", NULL);
4697 4696
4698 4697 return (SATA_TRAN_ACCEPTED);
4699 4698 }
4700 4699
4701 4700
4702 4701 /*
4703 4702 * Interrupt processing for a non-data ATA command.
4704 4703 */
4705 4704 static void
4706 4705 nv_intr_nodata(nv_port_t *nvp, nv_slot_t *nv_slotp)
4707 4706 {
4708 4707 uchar_t status;
4709 4708 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4710 4709 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4711 4710 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4712 4711 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4713 4712
4714 4713 NVLOG(NVDBG_INTR, nvp->nvp_ctlp, nvp, "nv_intr_nodata entered", NULL);
4715 4714
4716 4715 status = nv_get8(cmdhdl, nvp->nvp_status);
4717 4716
4718 4717 /*
4719 4718 * check for errors
4720 4719 */
4721 4720 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4722 4721 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4723 4722 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4724 4723 nvp->nvp_altstatus);
4725 4724 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4726 4725 } else {
4727 4726 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4728 4727 }
4729 4728
4730 4729 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4731 4730 }
4732 4731
4733 4732
4734 4733 /*
4735 4734 * ATA command, PIO data in
4736 4735 */
4737 4736 static void
4738 4737 nv_intr_pio_in(nv_port_t *nvp, nv_slot_t *nv_slotp)
4739 4738 {
4740 4739 uchar_t status;
4741 4740 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4742 4741 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4743 4742 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4744 4743 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4745 4744 int count;
4746 4745
4747 4746 status = nv_get8(cmdhdl, nvp->nvp_status);
4748 4747
4749 4748 if (status & SATA_STATUS_BSY) {
4750 4749 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4751 4750 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4752 4751 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4753 4752 nvp->nvp_altstatus);
4754 4753 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4755 4754 nv_reset(nvp, "intr_pio_in");
4756 4755
4757 4756 return;
4758 4757 }
4759 4758
4760 4759 /*
4761 4760 * check for errors
4762 4761 */
4763 4762 if ((status & (SATA_STATUS_DRQ | SATA_STATUS_DF |
4764 4763 SATA_STATUS_ERR)) != SATA_STATUS_DRQ) {
4765 4764 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4766 4765 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4767 4766 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4768 4767
4769 4768 return;
4770 4769 }
4771 4770
4772 4771 /*
4773 4772 * read the next chunk of data (if any)
4774 4773 */
4775 4774 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4776 4775
4777 4776 /*
4778 4777 * read count bytes
4779 4778 */
4780 4779 ASSERT(count != 0);
4781 4780
4782 4781 ddi_rep_get16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4783 4782 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4784 4783
4785 4784 nv_slotp->nvslot_v_addr += count;
4786 4785 nv_slotp->nvslot_byte_count -= count;
4787 4786
4788 4787
4789 4788 if (nv_slotp->nvslot_byte_count != 0) {
4790 4789 /*
4791 4790 * more to transfer. Wait for next interrupt.
4792 4791 */
4793 4792 return;
4794 4793 }
4795 4794
4796 4795 /*
4797 4796 * transfer is complete. wait for the busy bit to settle.
4798 4797 */
4799 4798 NV_DELAY_NSEC(400);
4800 4799
4801 4800 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4802 4801 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4803 4802 }
4804 4803
4805 4804
4806 4805 /*
4807 4806 * ATA command PIO data out
4808 4807 */
4809 4808 static void
4810 4809 nv_intr_pio_out(nv_port_t *nvp, nv_slot_t *nv_slotp)
4811 4810 {
4812 4811 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4813 4812 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4814 4813 uchar_t status;
4815 4814 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4816 4815 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4817 4816 int count;
4818 4817
4819 4818 /*
4820 4819 * clear the IRQ
4821 4820 */
4822 4821 status = nv_get8(cmdhdl, nvp->nvp_status);
4823 4822
4824 4823 if (status & SATA_STATUS_BSY) {
4825 4824 /*
4826 4825 * this should not happen
4827 4826 */
4828 4827 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4829 4828 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4830 4829 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4831 4830 nvp->nvp_altstatus);
4832 4831 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
4833 4832
4834 4833 return;
4835 4834 }
4836 4835
4837 4836 /*
4838 4837 * check for errors
4839 4838 */
4840 4839 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
4841 4840 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4842 4841 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4843 4842 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4844 4843
4845 4844 return;
4846 4845 }
4847 4846
4848 4847 /*
4849 4848 * this is the condition which signals the drive is
4850 4849 * no longer ready to transfer. Likely that the transfer
4851 4850 * completed successfully, but check that byte_count is
4852 4851 * zero.
4853 4852 */
4854 4853 if ((status & SATA_STATUS_DRQ) == 0) {
4855 4854
4856 4855 if (nv_slotp->nvslot_byte_count == 0) {
4857 4856 /*
4858 4857 * complete; successful transfer
4859 4858 */
4860 4859 spkt->satapkt_reason = SATA_PKT_COMPLETED;
4861 4860 } else {
4862 4861 /*
4863 4862 * error condition, incomplete transfer
4864 4863 */
4865 4864 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4866 4865 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4867 4866 }
4868 4867 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4869 4868
4870 4869 return;
4871 4870 }
4872 4871
4873 4872 /*
4874 4873 * write the next chunk of data
4875 4874 */
4876 4875 count = min(nv_slotp->nvslot_byte_count, NV_BYTES_PER_SEC);
4877 4876
4878 4877 /*
4879 4878 * read or write count bytes
4880 4879 */
4881 4880
4882 4881 ASSERT(count != 0);
4883 4882
4884 4883 ddi_rep_put16(cmdhdl, (ushort_t *)nv_slotp->nvslot_v_addr,
4885 4884 (ushort_t *)nvp->nvp_data, (count >> 1), DDI_DEV_NO_AUTOINCR);
4886 4885
4887 4886 nv_slotp->nvslot_v_addr += count;
4888 4887 nv_slotp->nvslot_byte_count -= count;
4889 4888 }
4890 4889
4891 4890
4892 4891 /*
4893 4892 * ATAPI PACKET command, PIO in/out interrupt
4894 4893 *
4895 4894 * Under normal circumstances, one of four different interrupt scenarios
4896 4895 * will result in this function being called:
4897 4896 *
4898 4897 * 1. Packet command data transfer
4899 4898 * 2. Packet command completion
4900 4899 * 3. Request sense data transfer
4901 4900 * 4. Request sense command completion
4902 4901 */
4903 4902 static void
4904 4903 nv_intr_pkt_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
4905 4904 {
4906 4905 uchar_t status;
4907 4906 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
4908 4907 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
4909 4908 int direction = sata_cmdp->satacmd_flags.sata_data_direction;
4910 4909 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
4911 4910 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
4912 4911 uint16_t ctlr_count;
4913 4912 int count;
4914 4913
4915 4914 /* ATAPI protocol state - HP2: Check_Status_B */
4916 4915
4917 4916 status = nv_get8(cmdhdl, nvp->nvp_status);
4918 4917 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4919 4918 "nv_intr_pkt_pio: status 0x%x", status);
4920 4919
4921 4920 if (status & SATA_STATUS_BSY) {
4922 4921 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4923 4922 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4924 4923 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4925 4924 } else {
4926 4925 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4927 4926 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
4928 4927 nv_reset(nvp, "intr_pkt_pio");
4929 4928 }
4930 4929
4931 4930 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4932 4931 "nv_intr_pkt_pio: busy - status 0x%x", status);
4933 4932
4934 4933 return;
4935 4934 }
4936 4935
4937 4936 if ((status & SATA_STATUS_DF) != 0) {
4938 4937 /*
4939 4938 * On device fault, just clean up and bail. Request sense
4940 4939 * will just default to its NO SENSE initialized value.
4941 4940 */
4942 4941
4943 4942 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4944 4943 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4945 4944 }
4946 4945
4947 4946 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4948 4947 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4949 4948
4950 4949 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4951 4950 nvp->nvp_altstatus);
4952 4951 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4953 4952 nvp->nvp_error);
4954 4953
4955 4954 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4956 4955 "nv_intr_pkt_pio: device fault", NULL);
4957 4956
4958 4957 return;
4959 4958 }
4960 4959
4961 4960 if ((status & SATA_STATUS_ERR) != 0) {
4962 4961 /*
4963 4962 * On command error, figure out whether we are processing a
4964 4963 * request sense. If so, clean up and bail. Otherwise,
4965 4964 * do a REQUEST SENSE.
4966 4965 */
4967 4966
4968 4967 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) == 0) {
4969 4968 nv_slotp->nvslot_flags |= NVSLOT_RQSENSE;
4970 4969 if (nv_start_rqsense_pio(nvp, nv_slotp) ==
4971 4970 NV_FAILURE) {
4972 4971 nv_copy_registers(nvp, &spkt->satapkt_device,
4973 4972 spkt);
4974 4973 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4975 4974 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4976 4975 }
4977 4976
4978 4977 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
4979 4978 nvp->nvp_altstatus);
4980 4979 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl,
4981 4980 nvp->nvp_error);
4982 4981 } else {
4983 4982 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
4984 4983 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
4985 4984
4986 4985 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
4987 4986 }
4988 4987
4989 4988 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
4990 4989 "nv_intr_pkt_pio: error (status 0x%x)", status);
4991 4990
4992 4991 return;
4993 4992 }
4994 4993
4995 4994 if ((nv_slotp->nvslot_flags & NVSLOT_RQSENSE) != 0) {
4996 4995 /*
4997 4996 * REQUEST SENSE command processing
4998 4997 */
4999 4998
5000 4999 if ((status & (SATA_STATUS_DRQ)) != 0) {
5001 5000 /* ATAPI state - HP4: Transfer_Data */
5002 5001
5003 5002 /* read the byte count from the controller */
5004 5003 ctlr_count =
5005 5004 (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5006 5005 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5007 5006
5008 5007 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5009 5008 "nv_intr_pkt_pio: ctlr byte count - %d",
5010 5009 ctlr_count);
5011 5010
5012 5011 if (ctlr_count == 0) {
5013 5012 /* no data to transfer - some devices do this */
5014 5013
5015 5014 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5016 5015 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5017 5016
5018 5017 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5019 5018 "nv_intr_pkt_pio: done (no data)", NULL);
5020 5019
5021 5020 return;
5022 5021 }
5023 5022
5024 5023 count = min(ctlr_count, SATA_ATAPI_RQSENSE_LEN);
5025 5024
5026 5025 /* transfer the data */
5027 5026 ddi_rep_get16(cmdhdl,
5028 5027 (ushort_t *)nv_slotp->nvslot_rqsense_buff,
5029 5028 (ushort_t *)nvp->nvp_data, (count >> 1),
5030 5029 DDI_DEV_NO_AUTOINCR);
5031 5030
5032 5031 /* consume residual bytes */
5033 5032 ctlr_count -= count;
5034 5033
5035 5034 if (ctlr_count > 0) {
5036 5035 for (; ctlr_count > 0; ctlr_count -= 2)
5037 5036 (void) ddi_get16(cmdhdl,
5038 5037 (ushort_t *)nvp->nvp_data);
5039 5038 }
5040 5039
5041 5040 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5042 5041 "nv_intr_pkt_pio: transition to HP2", NULL);
5043 5042 } else {
5044 5043 /* still in ATAPI state - HP2 */
5045 5044
5046 5045 /*
5047 5046 * In order to avoid clobbering the rqsense data
5048 5047 * set by the SATA common module, the sense data read
5049 5048 * from the device is put in a separate buffer and
5050 5049 * copied into the packet after the request sense
5051 5050 * command successfully completes.
5052 5051 */
5053 5052 bcopy(nv_slotp->nvslot_rqsense_buff,
5054 5053 spkt->satapkt_cmd.satacmd_rqsense,
5055 5054 SATA_ATAPI_RQSENSE_LEN);
5056 5055
5057 5056 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5058 5057 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5059 5058
5060 5059 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5061 5060 "nv_intr_pkt_pio: request sense done", NULL);
5062 5061 }
5063 5062
5064 5063 return;
5065 5064 }
5066 5065
5067 5066 /*
5068 5067 * Normal command processing
5069 5068 */
5070 5069
5071 5070 if ((status & (SATA_STATUS_DRQ)) != 0) {
5072 5071 /* ATAPI protocol state - HP4: Transfer_Data */
5073 5072
5074 5073 /* read the byte count from the controller */
5075 5074 ctlr_count = (uint16_t)nv_get8(cmdhdl, nvp->nvp_hcyl) << 8;
5076 5075 ctlr_count |= nv_get8(cmdhdl, nvp->nvp_lcyl);
5077 5076
5078 5077 if (ctlr_count == 0) {
5079 5078 /* no data to transfer - some devices do this */
5080 5079
5081 5080 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5082 5081 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5083 5082
5084 5083 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5085 5084 "nv_intr_pkt_pio: done (no data)", NULL);
5086 5085
5087 5086 return;
5088 5087 }
5089 5088
5090 5089 count = min(ctlr_count, nv_slotp->nvslot_byte_count);
5091 5090
5092 5091 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5093 5092 "nv_intr_pkt_pio: drive_bytes 0x%x", ctlr_count);
5094 5093
5095 5094 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5096 5095 "nv_intr_pkt_pio: byte_count 0x%x",
5097 5096 nv_slotp->nvslot_byte_count);
5098 5097
5099 5098 /* transfer the data */
5100 5099
5101 5100 if (direction == SATA_DIR_READ) {
5102 5101 ddi_rep_get16(cmdhdl,
5103 5102 (ushort_t *)nv_slotp->nvslot_v_addr,
5104 5103 (ushort_t *)nvp->nvp_data, (count >> 1),
5105 5104 DDI_DEV_NO_AUTOINCR);
5106 5105
5107 5106 ctlr_count -= count;
5108 5107
5109 5108 if (ctlr_count > 0) {
5110 5109 /* consume remaining bytes */
5111 5110
5112 5111 for (; ctlr_count > 0;
5113 5112 ctlr_count -= 2)
5114 5113 (void) ddi_get16(cmdhdl,
5115 5114 (ushort_t *)nvp->nvp_data);
5116 5115
5117 5116 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5118 5117 "nv_intr_pkt_pio: bytes remained", NULL);
5119 5118 }
5120 5119 } else {
5121 5120 ddi_rep_put16(cmdhdl,
5122 5121 (ushort_t *)nv_slotp->nvslot_v_addr,
5123 5122 (ushort_t *)nvp->nvp_data, (count >> 1),
5124 5123 DDI_DEV_NO_AUTOINCR);
5125 5124 }
5126 5125
5127 5126 nv_slotp->nvslot_v_addr += count;
5128 5127 nv_slotp->nvslot_byte_count -= count;
5129 5128
5130 5129 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5131 5130 "nv_intr_pkt_pio: transition to HP2", NULL);
5132 5131 } else {
5133 5132 /* still in ATAPI state - HP2 */
5134 5133
5135 5134 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5136 5135 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5137 5136
5138 5137 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
5139 5138 "nv_intr_pkt_pio: done", NULL);
5140 5139 }
5141 5140 }
5142 5141
5143 5142
5144 5143 /*
5145 5144 * ATA command, DMA data in/out
5146 5145 */
5147 5146 static void
5148 5147 nv_intr_dma(nv_port_t *nvp, struct nv_slot *nv_slotp)
5149 5148 {
5150 5149 uchar_t status;
5151 5150 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5152 5151 sata_cmd_t *sata_cmdp = &spkt->satapkt_cmd;
5153 5152 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5154 5153 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
5155 5154 ddi_acc_handle_t bmhdl = nvp->nvp_bm_hdl;
5156 5155 uchar_t bmicx;
5157 5156 uchar_t bm_status;
5158 5157
5159 5158 nv_slotp->nvslot_flags = NVSLOT_COMPLETE;
5160 5159
5161 5160 /*
5162 5161 * stop DMA engine.
5163 5162 */
5164 5163 bmicx = nv_get8(bmhdl, nvp->nvp_bmicx);
5165 5164 nv_put8(bmhdl, nvp->nvp_bmicx, bmicx & ~BMICX_SSBM);
5166 5165
5167 5166 /*
5168 5167 * get the status and clear the IRQ, and check for DMA error
5169 5168 */
5170 5169 status = nv_get8(cmdhdl, nvp->nvp_status);
5171 5170
5172 5171 /*
5173 5172 * check for drive errors
5174 5173 */
5175 5174 if (status & (SATA_STATUS_DF | SATA_STATUS_ERR)) {
5176 5175 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
5177 5176 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
5178 5177 (void) nv_bm_status_clear(nvp);
5179 5178
5180 5179 return;
5181 5180 }
5182 5181
5183 5182 bm_status = nv_bm_status_clear(nvp);
5184 5183
5185 5184 /*
5186 5185 * check for bus master errors
5187 5186 */
5188 5187
5189 5188 if (bm_status & BMISX_IDERR) {
5190 5189 spkt->satapkt_reason = SATA_PKT_PORT_ERROR;
5191 5190 sata_cmdp->satacmd_status_reg = nv_get8(ctlhdl,
5192 5191 nvp->nvp_altstatus);
5193 5192 sata_cmdp->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
5194 5193 nv_reset(nvp, "intr_dma");
5195 5194
5196 5195 return;
5197 5196 }
5198 5197
5199 5198 spkt->satapkt_reason = SATA_PKT_COMPLETED;
5200 5199 }
5201 5200
5202 5201
5203 5202 /*
5204 5203 * Wait for a register of a controller to achieve a specific state.
5205 5204 * To return normally, all the bits in the first sub-mask must be ON,
5206 5205 * all the bits in the second sub-mask must be OFF.
5207 5206 * If timeout_usec microseconds pass without the controller achieving
5208 5207 * the desired bit configuration, return TRUE, else FALSE.
5209 5208 *
5210 5209 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5211 5210 * occur for the first 250 us, then switch over to a sleeping wait.
5212 5211 *
5213 5212 */
5214 5213 int
5215 5214 nv_wait(nv_port_t *nvp, uchar_t onbits, uchar_t offbits, uint_t timeout_usec,
5216 5215 int type_wait)
5217 5216 {
5218 5217 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5219 5218 hrtime_t end, cur, start_sleep, start;
5220 5219 int first_time = B_TRUE;
5221 5220 ushort_t val;
5222 5221
5223 5222 for (;;) {
5224 5223 val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5225 5224
5226 5225 if ((val & onbits) == onbits && (val & offbits) == 0) {
5227 5226
5228 5227 return (B_TRUE);
5229 5228 }
5230 5229
5231 5230 cur = gethrtime();
5232 5231
5233 5232 /*
5234 5233 * store the start time and calculate the end
5235 5234 * time. also calculate "start_sleep" which is
5236 5235 * the point after which the driver will stop busy
5237 5236 * waiting and change to sleep waiting.
5238 5237 */
5239 5238 if (first_time) {
5240 5239 first_time = B_FALSE;
5241 5240 /*
5242 5241 * start and end are in nanoseconds
5243 5242 */
5244 5243 start = cur;
5245 5244 end = start + timeout_usec * 1000;
5246 5245 /*
5247 5246 * add 1 ms to start
5248 5247 */
5249 5248 start_sleep = start + 250000;
5250 5249
5251 5250 if (servicing_interrupt()) {
5252 5251 type_wait = NV_NOSLEEP;
5253 5252 }
5254 5253 }
5255 5254
5256 5255 if (cur > end) {
5257 5256
5258 5257 break;
5259 5258 }
5260 5259
5261 5260 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5262 5261 #if ! defined(__lock_lint)
5263 5262 delay(1);
5264 5263 #endif
5265 5264 } else {
5266 5265 drv_usecwait(nv_usec_delay);
5267 5266 }
5268 5267 }
5269 5268
5270 5269 return (B_FALSE);
5271 5270 }
5272 5271
5273 5272
5274 5273 /*
5275 5274 * This is a slightly more complicated version that checks
5276 5275 * for error conditions and bails-out rather than looping
5277 5276 * until the timeout is exceeded.
5278 5277 *
5279 5278 * hybrid waiting algorithm: if not in interrupt context, busy looping will
5280 5279 * occur for the first 250 us, then switch over to a sleeping wait.
5281 5280 */
5282 5281 int
5283 5282 nv_wait3(
5284 5283 nv_port_t *nvp,
5285 5284 uchar_t onbits1,
5286 5285 uchar_t offbits1,
5287 5286 uchar_t failure_onbits2,
5288 5287 uchar_t failure_offbits2,
5289 5288 uchar_t failure_onbits3,
5290 5289 uchar_t failure_offbits3,
5291 5290 uint_t timeout_usec,
5292 5291 int type_wait)
5293 5292 {
5294 5293 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
5295 5294 hrtime_t end, cur, start_sleep, start;
5296 5295 int first_time = B_TRUE;
5297 5296 ushort_t val;
5298 5297
5299 5298 for (;;) {
5300 5299 val = nv_get8(ctlhdl, nvp->nvp_altstatus);
5301 5300
5302 5301 /*
5303 5302 * check for expected condition
5304 5303 */
5305 5304 if ((val & onbits1) == onbits1 && (val & offbits1) == 0) {
5306 5305
5307 5306 return (B_TRUE);
5308 5307 }
5309 5308
5310 5309 /*
5311 5310 * check for error conditions
5312 5311 */
5313 5312 if ((val & failure_onbits2) == failure_onbits2 &&
5314 5313 (val & failure_offbits2) == 0) {
5315 5314
5316 5315 return (B_FALSE);
5317 5316 }
5318 5317
5319 5318 if ((val & failure_onbits3) == failure_onbits3 &&
5320 5319 (val & failure_offbits3) == 0) {
5321 5320
5322 5321 return (B_FALSE);
5323 5322 }
5324 5323
5325 5324 /*
5326 5325 * store the start time and calculate the end
5327 5326 * time. also calculate "start_sleep" which is
5328 5327 * the point after which the driver will stop busy
5329 5328 * waiting and change to sleep waiting.
5330 5329 */
5331 5330 if (first_time) {
5332 5331 first_time = B_FALSE;
5333 5332 /*
5334 5333 * start and end are in nanoseconds
5335 5334 */
5336 5335 cur = start = gethrtime();
5337 5336 end = start + timeout_usec * 1000;
5338 5337 /*
5339 5338 * add 1 ms to start
5340 5339 */
5341 5340 start_sleep = start + 250000;
5342 5341
5343 5342 if (servicing_interrupt()) {
5344 5343 type_wait = NV_NOSLEEP;
5345 5344 }
5346 5345 } else {
5347 5346 cur = gethrtime();
5348 5347 }
5349 5348
5350 5349 if (cur > end) {
5351 5350
5352 5351 break;
5353 5352 }
5354 5353
5355 5354 if ((type_wait != NV_NOSLEEP) && (cur > start_sleep)) {
5356 5355 #if ! defined(__lock_lint)
5357 5356 delay(1);
5358 5357 #endif
5359 5358 } else {
5360 5359 drv_usecwait(nv_usec_delay);
5361 5360 }
5362 5361 }
5363 5362
5364 5363 return (B_FALSE);
5365 5364 }
5366 5365
5367 5366
5368 5367 /*
5369 5368 * nv_port_state_change() reports the state of the port to the
5370 5369 * sata module by calling sata_hba_event_notify(). This
5371 5370 * function is called any time the state of the port is changed
5372 5371 */
5373 5372 static void
5374 5373 nv_port_state_change(nv_port_t *nvp, int event, uint8_t addr_type, int state)
5375 5374 {
5376 5375 sata_device_t sd;
5377 5376
5378 5377 NVLOG(NVDBG_EVENT, nvp->nvp_ctlp, nvp,
5379 5378 "nv_port_state_change: event 0x%x type 0x%x state 0x%x "
5380 5379 "lbolt %ld (ticks)", event, addr_type, state, ddi_get_lbolt());
5381 5380
5382 5381 if (ddi_in_panic() != 0) {
5383 5382
5384 5383 return;
5385 5384 }
5386 5385
5387 5386 bzero((void *)&sd, sizeof (sata_device_t));
5388 5387 sd.satadev_rev = SATA_DEVICE_REV;
5389 5388 nv_copy_registers(nvp, &sd, NULL);
5390 5389
5391 5390 /*
5392 5391 * When NCQ is implemented sactive and snotific field need to be
5393 5392 * updated.
5394 5393 */
5395 5394 sd.satadev_addr.cport = nvp->nvp_port_num;
5396 5395 sd.satadev_addr.qual = addr_type;
5397 5396 sd.satadev_state = state;
5398 5397
5399 5398 sata_hba_event_notify(nvp->nvp_ctlp->nvc_dip, &sd, event);
5400 5399 }
5401 5400
5402 5401
5403 5402 /*
5404 5403 * Monitor reset progress and signature gathering.
5405 5404 */
5406 5405 static clock_t
5407 5406 nv_monitor_reset(nv_port_t *nvp)
5408 5407 {
5409 5408 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5410 5409 uint32_t sstatus;
5411 5410
5412 5411 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
5413 5412
5414 5413 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5415 5414
5416 5415 /*
5417 5416 * Check the link status. The link needs to be active before
5418 5417 * checking the link's status.
5419 5418 */
5420 5419 if ((SSTATUS_GET_IPM(sstatus) != SSTATUS_IPM_ACTIVE) ||
5421 5420 (SSTATUS_GET_DET(sstatus) != SSTATUS_DET_DEVPRE_PHYCOM)) {
5422 5421 /*
5423 5422 * Either link is not active or there is no device
5424 5423 * If the link remains down for more than NV_LINK_EVENT_DOWN
5425 5424 * (milliseconds), abort signature acquisition and complete
5426 5425 * reset processing. The link will go down when COMRESET is
5427 5426 * sent by nv_reset().
5428 5427 */
5429 5428
5430 5429 if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >=
5431 5430 NV_LINK_EVENT_DOWN) {
5432 5431
5433 5432 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5434 5433 "nv_monitor_reset: no link - ending signature "
5435 5434 "acquisition; time after reset %ldms",
5436 5435 TICK_TO_MSEC(ddi_get_lbolt() -
5437 5436 nvp->nvp_reset_time));
5438 5437
5439 5438 DTRACE_PROBE(no_link_reset_giving_up_f);
5440 5439
5441 5440 /*
5442 5441 * If the drive was previously present and configured
5443 5442 * and then subsequently removed, then send a removal
5444 5443 * event to sata common module.
5445 5444 */
5446 5445 if (nvp->nvp_type != SATA_DTYPE_NONE) {
5447 5446 nv_port_state_change(nvp,
5448 5447 SATA_EVNT_DEVICE_DETACHED,
5449 5448 SATA_ADDR_CPORT, 0);
5450 5449 }
5451 5450
5452 5451 nvp->nvp_type = SATA_DTYPE_NONE;
5453 5452 nvp->nvp_signature = NV_NO_SIG;
5454 5453 nvp->nvp_state &= ~(NV_DEACTIVATED);
5455 5454
5456 5455 #ifdef SGPIO_SUPPORT
5457 5456 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5458 5457 SGP_CTLR_PORT_TO_DRV(
5459 5458 nvp->nvp_ctlp->nvc_ctlr_num,
5460 5459 nvp->nvp_port_num));
5461 5460 #endif
5462 5461
5463 5462 cv_signal(&nvp->nvp_reset_cv);
5464 5463
5465 5464 return (0);
5466 5465 }
5467 5466
5468 5467 DTRACE_PROBE(link_lost_reset_keep_trying_p);
5469 5468
5470 5469 return (nvp->nvp_wait_sig);
5471 5470 }
5472 5471
5473 5472 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5474 5473 "nv_monitor_reset: link up. time since reset %ldms",
5475 5474 TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time));
5476 5475
5477 5476 nv_read_signature(nvp);
5478 5477
5479 5478
5480 5479 if (nvp->nvp_signature != NV_NO_SIG) {
5481 5480 /*
5482 5481 * signature has been acquired, send the appropriate
5483 5482 * event to the sata common module.
5484 5483 */
5485 5484 if (nvp->nvp_state & (NV_ATTACH|NV_HOTPLUG)) {
5486 5485 char *source;
5487 5486
5488 5487 if (nvp->nvp_state & NV_HOTPLUG) {
5489 5488
5490 5489 source = "hotplugged";
5491 5490 nv_port_state_change(nvp,
5492 5491 SATA_EVNT_DEVICE_ATTACHED,
5493 5492 SATA_ADDR_CPORT, SATA_DSTATE_PWR_ACTIVE);
5494 5493 DTRACE_PROBE1(got_sig_for_hotplugged_device_h,
5495 5494 int, nvp->nvp_state);
5496 5495
5497 5496 } else {
5498 5497 source = "activated or attached";
5499 5498 DTRACE_PROBE1(got_sig_for_existing_device_h,
5500 5499 int, nvp->nvp_state);
5501 5500 }
5502 5501
5503 5502 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5504 5503 "signature acquired for %s device. sig:"
5505 5504 " 0x%x state: 0x%x nvp_type: 0x%x", source,
5506 5505 nvp->nvp_signature, nvp->nvp_state, nvp->nvp_type);
5507 5506
5508 5507
5509 5508 nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5510 5509
5511 5510 #ifdef SGPIO_SUPPORT
5512 5511 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
5513 5512 nv_sgp_drive_connect(nvp->nvp_ctlp,
5514 5513 SGP_CTLR_PORT_TO_DRV(
5515 5514 nvp->nvp_ctlp->nvc_ctlr_num,
5516 5515 nvp->nvp_port_num));
5517 5516 } else {
5518 5517 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5519 5518 SGP_CTLR_PORT_TO_DRV(
5520 5519 nvp->nvp_ctlp->nvc_ctlr_num,
5521 5520 nvp->nvp_port_num));
5522 5521 }
5523 5522 #endif
5524 5523
5525 5524 cv_signal(&nvp->nvp_reset_cv);
5526 5525
5527 5526 return (0);
5528 5527 }
5529 5528
5530 5529 /*
5531 5530 * Since this was not an attach, it was a reset of an
5532 5531 * existing device
5533 5532 */
5534 5533 nvp->nvp_state &= ~NV_RESET;
5535 5534 nvp->nvp_state |= NV_RESTORE;
5536 5535
5537 5536
5538 5537
5539 5538 DTRACE_PROBE(got_signature_reset_complete_p);
5540 5539 DTRACE_PROBE1(nvp_signature_h, int, nvp->nvp_signature);
5541 5540 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5542 5541
5543 5542 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5544 5543 "signature acquired reset complete. sig: 0x%x"
5545 5544 " state: 0x%x", nvp->nvp_signature, nvp->nvp_state);
5546 5545
5547 5546 /*
5548 5547 * interrupts may have been disabled so just make sure
5549 5548 * they are cleared and re-enabled.
5550 5549 */
5551 5550
5552 5551 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
5553 5552 NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
5554 5553
5555 5554 nv_port_state_change(nvp, SATA_EVNT_DEVICE_RESET,
5556 5555 SATA_ADDR_DCPORT,
5557 5556 SATA_DSTATE_RESET | SATA_DSTATE_PWR_ACTIVE);
5558 5557
5559 5558 return (0);
5560 5559 }
5561 5560
5562 5561
5563 5562 if (TICK_TO_MSEC(ddi_get_lbolt() - nvp->nvp_reset_time) >
5564 5563 NV_RETRY_RESET_SIG) {
5565 5564
5566 5565
5567 5566 if (nvp->nvp_reset_retry_count >= NV_MAX_RESET_RETRY) {
5568 5567
5569 5568 nvp->nvp_state |= NV_FAILED;
5570 5569 nvp->nvp_state &= ~(NV_RESET|NV_ATTACH|NV_HOTPLUG);
5571 5570
5572 5571 DTRACE_PROBE(reset_exceeded_waiting_for_sig_p);
5573 5572 DTRACE_PROBE(reset_exceeded_waiting_for_sig_f);
5574 5573 DTRACE_PROBE1(nvp_state_h, int, nvp->nvp_state);
5575 5574 NVLOG(NVDBG_RESET, nvp->nvp_ctlp, nvp,
5576 5575 "reset time exceeded waiting for sig nvp_state %x",
5577 5576 nvp->nvp_state);
5578 5577
5579 5578 nv_port_state_change(nvp, SATA_EVNT_PORT_FAILED,
5580 5579 SATA_ADDR_CPORT, 0);
5581 5580
5582 5581 cv_signal(&nvp->nvp_reset_cv);
5583 5582
5584 5583 return (0);
5585 5584 }
5586 5585
5587 5586 nv_reset(nvp, "retry");
5588 5587
5589 5588 return (nvp->nvp_wait_sig);
5590 5589 }
5591 5590
5592 5591 /*
5593 5592 * signature not received, keep trying
5594 5593 */
5595 5594 DTRACE_PROBE(no_sig_keep_waiting_p);
5596 5595
5597 5596 /*
5598 5597 * double the wait time for sig since the last try but cap it off at
5599 5598 * 1 second.
5600 5599 */
5601 5600 nvp->nvp_wait_sig = nvp->nvp_wait_sig * 2;
5602 5601
5603 5602 return (nvp->nvp_wait_sig > NV_ONE_SEC ? NV_ONE_SEC :
5604 5603 nvp->nvp_wait_sig);
5605 5604 }
5606 5605
5607 5606
5608 5607 /*
5609 5608 * timeout processing:
5610 5609 *
5611 5610 * Check if any packets have crossed a timeout threshold. If so,
5612 5611 * abort the packet. This function is not NCQ-aware.
5613 5612 *
5614 5613 * If reset is in progress, call reset monitoring function.
5615 5614 *
5616 5615 * Timeout frequency may be lower for checking packet timeout
5617 5616 * and higher for reset monitoring.
5618 5617 *
5619 5618 */
5620 5619 static void
5621 5620 nv_timeout(void *arg)
5622 5621 {
5623 5622 nv_port_t *nvp = arg;
5624 5623 nv_slot_t *nv_slotp;
5625 5624 clock_t next_timeout_us = NV_ONE_SEC;
5626 5625 uint16_t int_status;
5627 5626 uint8_t status, bmstatus;
5628 5627 static int intr_warn_once = 0;
5629 5628 uint32_t serror;
5630 5629
5631 5630
5632 5631 ASSERT(nvp != NULL);
5633 5632
5634 5633 mutex_enter(&nvp->nvp_mutex);
5635 5634 nvp->nvp_timeout_id = 0;
5636 5635
5637 5636 if (nvp->nvp_state & (NV_DEACTIVATED|NV_FAILED)) {
5638 5637 next_timeout_us = 0;
5639 5638
5640 5639 goto finished;
5641 5640 }
5642 5641
5643 5642 if (nvp->nvp_state & NV_RESET) {
5644 5643 next_timeout_us = nv_monitor_reset(nvp);
5645 5644
5646 5645 goto finished;
5647 5646 }
5648 5647
5649 5648 if (nvp->nvp_state & NV_LINK_EVENT) {
5650 5649 boolean_t device_present = B_FALSE;
5651 5650 uint32_t sstatus;
5652 5651 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
5653 5652
5654 5653 if (TICK_TO_USEC(ddi_get_lbolt() -
5655 5654 nvp->nvp_link_event_time) < NV_LINK_EVENT_SETTLE) {
5656 5655
5657 5656 next_timeout_us = 10 * NV_ONE_MSEC;
5658 5657
5659 5658 DTRACE_PROBE(link_event_set_no_timeout_keep_waiting_p);
5660 5659
5661 5660 goto finished;
5662 5661 }
5663 5662
5664 5663 DTRACE_PROBE(link_event_settled_now_process_p);
5665 5664
5666 5665 nvp->nvp_state &= ~NV_LINK_EVENT;
5667 5666
5668 5667 /*
5669 5668 * ck804 routinely reports the wrong hotplug/unplug event,
5670 5669 * and it's been seen on mcp55 when there are signal integrity
5671 5670 * issues. Therefore need to infer the event from the
5672 5671 * current link status.
5673 5672 */
5674 5673
5675 5674 sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
5676 5675
5677 5676 if ((SSTATUS_GET_IPM(sstatus) == SSTATUS_IPM_ACTIVE) &&
5678 5677 (SSTATUS_GET_DET(sstatus) ==
5679 5678 SSTATUS_DET_DEVPRE_PHYCOM)) {
5680 5679 device_present = B_TRUE;
5681 5680 }
5682 5681
5683 5682 if ((nvp->nvp_signature != NV_NO_SIG) &&
5684 5683 (device_present == B_FALSE)) {
5685 5684
5686 5685 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5687 5686 "nv_timeout: device detached", NULL);
5688 5687
5689 5688 DTRACE_PROBE(device_detached_p);
5690 5689
5691 5690 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5692 5691 B_FALSE);
5693 5692
5694 5693 nv_port_state_change(nvp, SATA_EVNT_DEVICE_DETACHED,
5695 5694 SATA_ADDR_CPORT, 0);
5696 5695
5697 5696 nvp->nvp_signature = NV_NO_SIG;
5698 5697 nvp->nvp_rem_time = ddi_get_lbolt();
5699 5698 nvp->nvp_type = SATA_DTYPE_NONE;
5700 5699 next_timeout_us = 0;
5701 5700
5702 5701 #ifdef SGPIO_SUPPORT
5703 5702 nv_sgp_drive_disconnect(nvp->nvp_ctlp,
5704 5703 SGP_CTLR_PORT_TO_DRV(nvp->nvp_ctlp->nvc_ctlr_num,
5705 5704 nvp->nvp_port_num));
5706 5705 #endif
5707 5706
5708 5707 goto finished;
5709 5708 }
5710 5709
5711 5710 /*
5712 5711 * if the device was already present, and it's still present,
5713 5712 * then abort any outstanding command and issue a reset.
5714 5713 * This may result from transient link errors.
5715 5714 */
5716 5715
5717 5716 if ((nvp->nvp_signature != NV_NO_SIG) &&
5718 5717 (device_present == B_TRUE)) {
5719 5718
5720 5719 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5721 5720 "nv_timeout: spurious link event", NULL);
5722 5721 DTRACE_PROBE(spurious_link_event_p);
5723 5722
5724 5723 (void) nv_abort_active(nvp, NULL, SATA_PKT_PORT_ERROR,
5725 5724 B_FALSE);
5726 5725
5727 5726 nvp->nvp_signature = NV_NO_SIG;
5728 5727 nvp->nvp_trans_link_time = ddi_get_lbolt();
5729 5728 nvp->nvp_trans_link_count++;
5730 5729 next_timeout_us = 0;
5731 5730
5732 5731 nv_reset(nvp, "transient link event");
5733 5732
5734 5733 goto finished;
5735 5734 }
5736 5735
5737 5736
5738 5737 /*
5739 5738 * a new device has been inserted
5740 5739 */
5741 5740 if ((nvp->nvp_signature == NV_NO_SIG) &&
5742 5741 (device_present == B_TRUE)) {
5743 5742 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5744 5743 "nv_timeout: device attached", NULL);
5745 5744
5746 5745 DTRACE_PROBE(device_attached_p);
5747 5746 nvp->nvp_add_time = ddi_get_lbolt();
5748 5747 next_timeout_us = 0;
5749 5748 nvp->nvp_reset_count = 0;
5750 5749 nvp->nvp_state = NV_HOTPLUG;
5751 5750 nvp->nvp_type = SATA_DTYPE_UNKNOWN;
5752 5751 nv_reset(nvp, "hotplug");
5753 5752
5754 5753 goto finished;
5755 5754 }
5756 5755
5757 5756 /*
5758 5757 * no link, and no prior device. Nothing to do, but
5759 5758 * log this.
5760 5759 */
5761 5760 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
5762 5761 "nv_timeout: delayed hot processing no link no prior"
5763 5762 " device", NULL);
5764 5763 DTRACE_PROBE(delayed_hotplug_no_link_no_prior_device_p);
5765 5764
5766 5765 nvp->nvp_trans_link_time = ddi_get_lbolt();
5767 5766 nvp->nvp_trans_link_count++;
5768 5767 next_timeout_us = 0;
5769 5768
5770 5769 goto finished;
5771 5770 }
5772 5771
5773 5772 /*
5774 5773 * Not yet NCQ-aware - there is only one command active.
5775 5774 */
5776 5775 nv_slotp = &(nvp->nvp_slot[0]);
5777 5776
5778 5777 /*
5779 5778 * perform timeout checking and processing only if there is an
5780 5779 * active packet on the port
5781 5780 */
5782 5781 if (nv_slotp != NULL && nv_slotp->nvslot_spkt != NULL) {
5783 5782 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
5784 5783 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
5785 5784 uint8_t cmd = satacmd->satacmd_cmd_reg;
5786 5785 uint64_t lba;
5787 5786
5788 5787 #if ! defined(__lock_lint) && defined(DEBUG)
5789 5788
5790 5789 lba = (uint64_t)satacmd->satacmd_lba_low_lsb |
5791 5790 ((uint64_t)satacmd->satacmd_lba_mid_lsb << 8) |
5792 5791 ((uint64_t)satacmd->satacmd_lba_high_lsb << 16) |
5793 5792 ((uint64_t)satacmd->satacmd_lba_low_msb << 24) |
5794 5793 ((uint64_t)satacmd->satacmd_lba_mid_msb << 32) |
5795 5794 ((uint64_t)satacmd->satacmd_lba_high_msb << 40);
5796 5795 #endif
5797 5796
5798 5797 /*
5799 5798 * timeout not needed if there is a polling thread
5800 5799 */
5801 5800 if (spkt->satapkt_op_mode & SATA_OPMODE_POLLING) {
5802 5801 next_timeout_us = 0;
5803 5802
5804 5803 goto finished;
5805 5804 }
5806 5805
5807 5806 if (TICK_TO_SEC(ddi_get_lbolt() - nv_slotp->nvslot_stime) >
5808 5807 spkt->satapkt_time) {
5809 5808
5810 5809 serror = nv_get32(nvp->nvp_ctlp->nvc_bar_hdl[5],
5811 5810 nvp->nvp_serror);
5812 5811 status = nv_get8(nvp->nvp_ctl_hdl,
5813 5812 nvp->nvp_altstatus);
5814 5813 bmstatus = nv_get8(nvp->nvp_bm_hdl,
5815 5814 nvp->nvp_bmisx);
5816 5815
5817 5816 nv_cmn_err(CE_NOTE, nvp->nvp_ctlp, nvp,
5818 5817 "nv_timeout: aborting: "
5819 5818 "nvslot_stime: %ld max ticks till timeout: %ld "
5820 5819 "cur_time: %ld cmd = 0x%x lba = %d seq = %d",
5821 5820 nv_slotp->nvslot_stime,
5822 5821 drv_usectohz(MICROSEC *
5823 5822 spkt->satapkt_time), ddi_get_lbolt(),
5824 5823 cmd, lba, nvp->nvp_seq);
5825 5824
5826 5825 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5827 5826 "nv_timeout: altstatus = 0x%x bmicx = 0x%x "
5828 5827 "serror = 0x%x previous_cmd = "
5829 5828 "0x%x", status, bmstatus, serror,
5830 5829 nvp->nvp_previous_cmd);
5831 5830
5832 5831
5833 5832 DTRACE_PROBE1(nv_timeout_packet_p, int, nvp);
5834 5833
5835 5834 if (nvp->nvp_mcp5x_int_status != NULL) {
5836 5835
5837 5836 int_status = nv_get16(
5838 5837 nvp->nvp_ctlp->nvc_bar_hdl[5],
5839 5838 nvp->nvp_mcp5x_int_status);
5840 5839 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp, nvp,
5841 5840 "int_status = 0x%x", int_status);
5842 5841
5843 5842 if (int_status & MCP5X_INT_COMPLETE) {
5844 5843 /*
5845 5844 * Completion interrupt was missed.
5846 5845 * Issue warning message once.
5847 5846 */
5848 5847 if (!intr_warn_once) {
5849 5848
5850 5849 nv_cmn_err(CE_WARN,
5851 5850 nvp->nvp_ctlp,
5852 5851 nvp,
5853 5852 "nv_sata: missing command "
5854 5853 "completion interrupt");
5855 5854 intr_warn_once = 1;
5856 5855
5857 5856 }
5858 5857
5859 5858 NVLOG(NVDBG_TIMEOUT, nvp->nvp_ctlp,
5860 5859 nvp, "timeout detected with "
5861 5860 "interrupt ready - calling "
5862 5861 "int directly", NULL);
5863 5862
5864 5863 mutex_exit(&nvp->nvp_mutex);
5865 5864 (void) mcp5x_intr_port(nvp);
5866 5865 mutex_enter(&nvp->nvp_mutex);
5867 5866
5868 5867 } else {
5869 5868 /*
5870 5869 * True timeout and not a missing
5871 5870 * interrupt.
5872 5871 */
5873 5872 DTRACE_PROBE1(timeout_abort_active_p,
5874 5873 int *, nvp);
5875 5874 (void) nv_abort_active(nvp, spkt,
5876 5875 SATA_PKT_TIMEOUT, B_TRUE);
5877 5876 }
5878 5877 } else {
5879 5878 (void) nv_abort_active(nvp, spkt,
5880 5879 SATA_PKT_TIMEOUT, B_TRUE);
5881 5880 }
5882 5881
5883 5882 } else {
5884 5883 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5885 5884 "nv_timeout:"
5886 5885 " still in use so restarting timeout",
5887 5886 NULL);
5888 5887
5889 5888 next_timeout_us = NV_ONE_SEC;
5890 5889 }
5891 5890 } else {
5892 5891 /*
5893 5892 * there was no active packet, so do not re-enable timeout
5894 5893 */
5895 5894 next_timeout_us = 0;
5896 5895 NVLOG(NVDBG_VERBOSE, nvp->nvp_ctlp, nvp,
5897 5896 "nv_timeout: no active packet so not re-arming "
5898 5897 "timeout", NULL);
5899 5898 }
5900 5899
5901 5900 finished:
5902 5901
5903 5902 nv_setup_timeout(nvp, next_timeout_us);
5904 5903
5905 5904 mutex_exit(&nvp->nvp_mutex);
5906 5905 }
5907 5906
5908 5907
5909 5908 /*
5910 5909 * enable or disable the 3 interrupt types the driver is
5911 5910 * interested in: completion, add and remove.
5912 5911 */
5913 5912 static void
5914 5913 ck804_set_intr(nv_port_t *nvp, int flag)
5915 5914 {
5916 5915 nv_ctl_t *nvc = nvp->nvp_ctlp;
5917 5916 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5918 5917 uchar_t *bar5 = nvc->nvc_bar_addr[5];
5919 5918 uint8_t intr_bits[] = { CK804_INT_PDEV_HOT|CK804_INT_PDEV_INT,
5920 5919 CK804_INT_SDEV_HOT|CK804_INT_SDEV_INT };
5921 5920 uint8_t clear_all_bits[] = { CK804_INT_PDEV_ALL, CK804_INT_SDEV_ALL };
5922 5921 uint8_t int_en, port = nvp->nvp_port_num, intr_status;
5923 5922
5924 5923 if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5925 5924 int_en = nv_get8(bar5_hdl,
5926 5925 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5927 5926 int_en &= ~intr_bits[port];
5928 5927 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5929 5928 int_en);
5930 5929 return;
5931 5930 }
5932 5931
5933 5932 ASSERT(mutex_owned(&nvp->nvp_mutex));
5934 5933
5935 5934 /*
5936 5935 * controller level lock also required since access to an 8-bit
5937 5936 * interrupt register is shared between both channels.
5938 5937 */
5939 5938 mutex_enter(&nvc->nvc_mutex);
5940 5939
5941 5940 if (flag & NV_INTR_CLEAR_ALL) {
5942 5941 NVLOG(NVDBG_INTR, nvc, nvp,
5943 5942 "ck804_set_intr: NV_INTR_CLEAR_ALL", NULL);
5944 5943
5945 5944 intr_status = nv_get8(nvc->nvc_bar_hdl[5],
5946 5945 (uint8_t *)(nvc->nvc_ck804_int_status));
5947 5946
5948 5947 if (intr_status & clear_all_bits[port]) {
5949 5948
5950 5949 nv_put8(nvc->nvc_bar_hdl[5],
5951 5950 (uint8_t *)(nvc->nvc_ck804_int_status),
5952 5951 clear_all_bits[port]);
5953 5952
5954 5953 NVLOG(NVDBG_INTR, nvc, nvp,
5955 5954 "interrupt bits cleared %x",
5956 5955 intr_status & clear_all_bits[port]);
5957 5956 }
5958 5957 }
5959 5958
5960 5959 if (flag & NV_INTR_DISABLE) {
5961 5960 NVLOG(NVDBG_INTR, nvc, nvp,
5962 5961 "ck804_set_intr: NV_INTR_DISABLE", NULL);
5963 5962 int_en = nv_get8(bar5_hdl,
5964 5963 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5965 5964 int_en &= ~intr_bits[port];
5966 5965 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5967 5966 int_en);
5968 5967 }
5969 5968
5970 5969 if (flag & NV_INTR_ENABLE) {
5971 5970 NVLOG(NVDBG_INTR, nvc, nvp, "ck804_set_intr: NV_INTR_ENABLE",
5972 5971 NULL);
5973 5972 int_en = nv_get8(bar5_hdl,
5974 5973 (uint8_t *)(bar5 + CK804_SATA_INT_EN));
5975 5974 int_en |= intr_bits[port];
5976 5975 nv_put8(bar5_hdl, (uint8_t *)(bar5 + CK804_SATA_INT_EN),
5977 5976 int_en);
5978 5977 }
5979 5978
5980 5979 mutex_exit(&nvc->nvc_mutex);
5981 5980 }
5982 5981
5983 5982
5984 5983 /*
5985 5984 * enable or disable the 3 interrupts the driver is interested in:
5986 5985 * completion interrupt, hot add, and hot remove interrupt.
5987 5986 */
5988 5987 static void
5989 5988 mcp5x_set_intr(nv_port_t *nvp, int flag)
5990 5989 {
5991 5990 nv_ctl_t *nvc = nvp->nvp_ctlp;
5992 5991 ddi_acc_handle_t bar5_hdl = nvc->nvc_bar_hdl[5];
5993 5992 uint16_t intr_bits =
5994 5993 MCP5X_INT_ADD|MCP5X_INT_REM|MCP5X_INT_COMPLETE;
5995 5994 uint16_t int_en;
5996 5995
5997 5996 if (flag & NV_INTR_DISABLE_NON_BLOCKING) {
5998 5997 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
5999 5998 int_en &= ~intr_bits;
6000 5999 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6001 6000 return;
6002 6001 }
6003 6002
6004 6003 ASSERT(mutex_owned(&nvp->nvp_mutex));
6005 6004
6006 6005 NVLOG(NVDBG_INTR, nvc, nvp, "mcp055_set_intr: enter flag: %d", flag);
6007 6006
6008 6007 if (flag & NV_INTR_CLEAR_ALL) {
6009 6008 NVLOG(NVDBG_INTR, nvc, nvp,
6010 6009 "mcp5x_set_intr: NV_INTR_CLEAR_ALL", NULL);
6011 6010 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_status, MCP5X_INT_CLEAR);
6012 6011 }
6013 6012
6014 6013 if (flag & NV_INTR_ENABLE) {
6015 6014 NVLOG(NVDBG_INTR, nvc, nvp, "mcp5x_set_intr: NV_INTR_ENABLE",
6016 6015 NULL);
6017 6016 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6018 6017 int_en |= intr_bits;
6019 6018 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6020 6019 }
6021 6020
6022 6021 if (flag & NV_INTR_DISABLE) {
6023 6022 NVLOG(NVDBG_INTR, nvc, nvp,
6024 6023 "mcp5x_set_intr: NV_INTR_DISABLE", NULL);
6025 6024 int_en = nv_get16(bar5_hdl, nvp->nvp_mcp5x_int_ctl);
6026 6025 int_en &= ~intr_bits;
6027 6026 nv_put16(bar5_hdl, nvp->nvp_mcp5x_int_ctl, int_en);
6028 6027 }
6029 6028 }
6030 6029
6031 6030
6032 6031 static void
6033 6032 nv_resume(nv_port_t *nvp)
6034 6033 {
6035 6034 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_resume()", NULL);
6036 6035
6037 6036 mutex_enter(&nvp->nvp_mutex);
6038 6037
6039 6038 if (nvp->nvp_state & NV_DEACTIVATED) {
6040 6039 mutex_exit(&nvp->nvp_mutex);
6041 6040
6042 6041 return;
6043 6042 }
6044 6043
6045 6044 /* Enable interrupt */
6046 6045 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp, NV_INTR_CLEAR_ALL|NV_INTR_ENABLE);
6047 6046
6048 6047 /*
6049 6048 * Power may have been removed to the port and the
6050 6049 * drive, and/or a drive may have been added or removed.
6051 6050 * Force a reset which will cause a probe and re-establish
6052 6051 * any state needed on the drive.
6053 6052 */
6054 6053 nv_reset(nvp, "resume");
6055 6054
6056 6055 mutex_exit(&nvp->nvp_mutex);
6057 6056 }
6058 6057
6059 6058
6060 6059 static void
6061 6060 nv_suspend(nv_port_t *nvp)
6062 6061 {
6063 6062 NVLOG(NVDBG_INIT, nvp->nvp_ctlp, nvp, "nv_suspend()", NULL);
6064 6063
6065 6064 mutex_enter(&nvp->nvp_mutex);
6066 6065
6067 6066 #ifdef SGPIO_SUPPORT
6068 6067 if (nvp->nvp_type == SATA_DTYPE_ATADISK) {
6069 6068 nv_sgp_drive_disconnect(nvp->nvp_ctlp, SGP_CTLR_PORT_TO_DRV(
6070 6069 nvp->nvp_ctlp->nvc_ctlr_num, nvp->nvp_port_num));
6071 6070 }
6072 6071 #endif
6073 6072
6074 6073 if (nvp->nvp_state & NV_DEACTIVATED) {
6075 6074 mutex_exit(&nvp->nvp_mutex);
6076 6075
6077 6076 return;
6078 6077 }
6079 6078
6080 6079 /*
6081 6080 * Stop the timeout handler.
6082 6081 * (It will be restarted in nv_reset() during nv_resume().)
6083 6082 */
6084 6083 if (nvp->nvp_timeout_id) {
6085 6084 (void) untimeout(nvp->nvp_timeout_id);
6086 6085 nvp->nvp_timeout_id = 0;
6087 6086 }
6088 6087
6089 6088 /* Disable interrupt */
6090 6089 (*(nvp->nvp_ctlp->nvc_set_intr))(nvp,
6091 6090 NV_INTR_CLEAR_ALL|NV_INTR_DISABLE);
6092 6091
6093 6092 mutex_exit(&nvp->nvp_mutex);
6094 6093 }
6095 6094
6096 6095
6097 6096 static void
6098 6097 nv_copy_registers(nv_port_t *nvp, sata_device_t *sd, sata_pkt_t *spkt)
6099 6098 {
6100 6099 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6101 6100 sata_cmd_t *scmd = &spkt->satapkt_cmd;
6102 6101 ddi_acc_handle_t ctlhdl = nvp->nvp_ctl_hdl;
6103 6102 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6104 6103 uchar_t status;
6105 6104 struct sata_cmd_flags flags;
6106 6105
6107 6106 sd->satadev_scr.sstatus = nv_get32(bar5_hdl, nvp->nvp_sstatus);
6108 6107 sd->satadev_scr.serror = nv_get32(bar5_hdl, nvp->nvp_serror);
6109 6108 sd->satadev_scr.scontrol = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6110 6109
6111 6110 if (spkt == NULL) {
6112 6111
6113 6112 return;
6114 6113 }
6115 6114
6116 6115 /*
6117 6116 * in the error case, implicitly set the return of regs needed
6118 6117 * for error handling.
6119 6118 */
6120 6119 status = scmd->satacmd_status_reg = nv_get8(ctlhdl,
6121 6120 nvp->nvp_altstatus);
6122 6121
6123 6122 flags = scmd->satacmd_flags;
6124 6123
6125 6124 if (status & SATA_STATUS_ERR) {
6126 6125 flags.sata_copy_out_lba_low_msb = B_TRUE;
6127 6126 flags.sata_copy_out_lba_mid_msb = B_TRUE;
6128 6127 flags.sata_copy_out_lba_high_msb = B_TRUE;
6129 6128 flags.sata_copy_out_lba_low_lsb = B_TRUE;
6130 6129 flags.sata_copy_out_lba_mid_lsb = B_TRUE;
6131 6130 flags.sata_copy_out_lba_high_lsb = B_TRUE;
6132 6131 flags.sata_copy_out_error_reg = B_TRUE;
6133 6132 flags.sata_copy_out_sec_count_msb = B_TRUE;
6134 6133 flags.sata_copy_out_sec_count_lsb = B_TRUE;
6135 6134 scmd->satacmd_status_reg = status;
6136 6135 }
6137 6136
6138 6137 if (scmd->satacmd_addr_type & ATA_ADDR_LBA48) {
6139 6138
6140 6139 /*
6141 6140 * set HOB so that high byte will be read
6142 6141 */
6143 6142 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_HOB|ATDC_D3);
6144 6143
6145 6144 /*
6146 6145 * get the requested high bytes
6147 6146 */
6148 6147 if (flags.sata_copy_out_sec_count_msb) {
6149 6148 scmd->satacmd_sec_count_msb =
6150 6149 nv_get8(cmdhdl, nvp->nvp_count);
6151 6150 }
6152 6151
6153 6152 if (flags.sata_copy_out_lba_low_msb) {
6154 6153 scmd->satacmd_lba_low_msb =
6155 6154 nv_get8(cmdhdl, nvp->nvp_sect);
6156 6155 }
6157 6156
6158 6157 if (flags.sata_copy_out_lba_mid_msb) {
6159 6158 scmd->satacmd_lba_mid_msb =
6160 6159 nv_get8(cmdhdl, nvp->nvp_lcyl);
6161 6160 }
6162 6161
6163 6162 if (flags.sata_copy_out_lba_high_msb) {
6164 6163 scmd->satacmd_lba_high_msb =
6165 6164 nv_get8(cmdhdl, nvp->nvp_hcyl);
6166 6165 }
6167 6166 }
6168 6167
6169 6168 /*
6170 6169 * disable HOB so that low byte is read
6171 6170 */
6172 6171 nv_put8(ctlhdl, nvp->nvp_devctl, ATDC_D3);
6173 6172
6174 6173 /*
6175 6174 * get the requested low bytes
6176 6175 */
6177 6176 if (flags.sata_copy_out_sec_count_lsb) {
6178 6177 scmd->satacmd_sec_count_lsb = nv_get8(cmdhdl, nvp->nvp_count);
6179 6178 }
6180 6179
6181 6180 if (flags.sata_copy_out_lba_low_lsb) {
6182 6181 scmd->satacmd_lba_low_lsb = nv_get8(cmdhdl, nvp->nvp_sect);
6183 6182 }
6184 6183
6185 6184 if (flags.sata_copy_out_lba_mid_lsb) {
6186 6185 scmd->satacmd_lba_mid_lsb = nv_get8(cmdhdl, nvp->nvp_lcyl);
6187 6186 }
6188 6187
6189 6188 if (flags.sata_copy_out_lba_high_lsb) {
6190 6189 scmd->satacmd_lba_high_lsb = nv_get8(cmdhdl, nvp->nvp_hcyl);
6191 6190 }
6192 6191
6193 6192 /*
6194 6193 * get the device register if requested
6195 6194 */
6196 6195 if (flags.sata_copy_out_device_reg) {
6197 6196 scmd->satacmd_device_reg = nv_get8(cmdhdl, nvp->nvp_drvhd);
6198 6197 }
6199 6198
6200 6199 /*
6201 6200 * get the error register if requested
6202 6201 */
6203 6202 if (flags.sata_copy_out_error_reg) {
6204 6203 scmd->satacmd_error_reg = nv_get8(cmdhdl, nvp->nvp_error);
6205 6204 }
6206 6205 }
6207 6206
6208 6207
6209 6208 /*
6210 6209 * hot plug and remove interrupts can occur when the device is reset.
6211 6210 * Masking the interrupt doesn't always work well because if a
6212 6211 * different interrupt arrives on the other port, the driver can still
6213 6212 * end up checking the state of the other port and discover the hot
6214 6213 * interrupt flag is set even though it was masked. Also, when there are
6215 6214 * errors on the link there can be transient link events which need to be
6216 6215 * masked and eliminated as well.
6217 6216 */
6218 6217 static void
6219 6218 nv_link_event(nv_port_t *nvp, int flag)
6220 6219 {
6221 6220
6222 6221 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "nv_link_event: flag: %s",
6223 6222 flag ? "add" : "remove");
6224 6223
6225 6224 ASSERT(MUTEX_HELD(&nvp->nvp_mutex));
6226 6225
6227 6226 nvp->nvp_link_event_time = ddi_get_lbolt();
6228 6227
6229 6228 /*
6230 6229 * if a port has been deactivated, ignore all link events
6231 6230 */
6232 6231 if (nvp->nvp_state & NV_DEACTIVATED) {
6233 6232 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6234 6233 " port deactivated", NULL);
6235 6234 DTRACE_PROBE(ignoring_link_port_deactivated_p);
6236 6235
6237 6236 return;
6238 6237 }
6239 6238
6240 6239 /*
6241 6240 * if the drive has been reset, ignore any transient events. If it's
6242 6241 * a real removal event, nv_monitor_reset() will handle it.
6243 6242 */
6244 6243 if (nvp->nvp_state & NV_RESET) {
6245 6244 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp, "ignoring link event"
6246 6245 " during reset", NULL);
6247 6246 DTRACE_PROBE(ignoring_link_event_during_reset_p);
6248 6247
6249 6248 return;
6250 6249 }
6251 6250
6252 6251 /*
6253 6252 * if link event processing is already enabled, nothing to
6254 6253 * do.
6255 6254 */
6256 6255 if (nvp->nvp_state & NV_LINK_EVENT) {
6257 6256
6258 6257 NVLOG(NVDBG_HOT, nvp->nvp_ctlp, nvp,
6259 6258 "received link event while processing already in "
6260 6259 "progress", NULL);
6261 6260 DTRACE_PROBE(nv_link_event_already_set_p);
6262 6261
6263 6262 return;
6264 6263 }
6265 6264
6266 6265 DTRACE_PROBE1(link_event_p, int, nvp);
6267 6266
6268 6267 nvp->nvp_state |= NV_LINK_EVENT;
6269 6268
6270 6269 nv_setup_timeout(nvp, NV_LINK_EVENT_SETTLE);
6271 6270 }
6272 6271
6273 6272
6274 6273 /*
6275 6274 * Get request sense data and stuff it the command's sense buffer.
6276 6275 * Start a request sense command in order to get sense data to insert
6277 6276 * in the sata packet's rqsense buffer. The command completion
6278 6277 * processing is in nv_intr_pkt_pio.
6279 6278 *
6280 6279 * The sata common module provides a function to allocate and set-up a
6281 6280 * request sense packet command. The reasons it is not being used here is:
6282 6281 * a) it cannot be called in an interrupt context and this function is
6283 6282 * called in an interrupt context.
6284 6283 * b) it allocates DMA resources that are not used here because this is
6285 6284 * implemented using PIO.
6286 6285 *
6287 6286 * If, in the future, this is changed to use DMA, the sata common module
6288 6287 * should be used to allocate and set-up the error retrieval (request sense)
6289 6288 * command.
6290 6289 */
6291 6290 static int
6292 6291 nv_start_rqsense_pio(nv_port_t *nvp, nv_slot_t *nv_slotp)
6293 6292 {
6294 6293 sata_pkt_t *spkt = nv_slotp->nvslot_spkt;
6295 6294 sata_cmd_t *satacmd = &spkt->satapkt_cmd;
6296 6295 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6297 6296 int cdb_len = spkt->satapkt_cmd.satacmd_acdb_len;
6298 6297
6299 6298 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6300 6299 "nv_start_rqsense_pio: start", NULL);
6301 6300
6302 6301 /* clear the local request sense buffer before starting the command */
6303 6302 bzero(nv_slotp->nvslot_rqsense_buff, SATA_ATAPI_RQSENSE_LEN);
6304 6303
6305 6304 /* Write the request sense PACKET command */
6306 6305
6307 6306 /* select the drive */
6308 6307 nv_put8(cmdhdl, nvp->nvp_drvhd, satacmd->satacmd_device_reg);
6309 6308
6310 6309 /* make certain the drive selected */
6311 6310 if (nv_wait(nvp, SATA_STATUS_DRDY, SATA_STATUS_BSY,
6312 6311 NV_SEC2USEC(5), 0) == B_FALSE) {
6313 6312 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6314 6313 "nv_start_rqsense_pio: drive select failed", NULL);
6315 6314 return (NV_FAILURE);
6316 6315 }
6317 6316
6318 6317 /* set up the command */
6319 6318 nv_put8(cmdhdl, nvp->nvp_feature, 0); /* deassert DMA and OVL */
6320 6319 nv_put8(cmdhdl, nvp->nvp_hcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ >> 8);
6321 6320 nv_put8(cmdhdl, nvp->nvp_lcyl, SATA_ATAPI_MAX_BYTES_PER_DRQ & 0xff);
6322 6321 nv_put8(cmdhdl, nvp->nvp_sect, 0);
6323 6322 nv_put8(cmdhdl, nvp->nvp_count, 0); /* no tag */
6324 6323
6325 6324 /* initiate the command by writing the command register last */
6326 6325 nv_put8(cmdhdl, nvp->nvp_cmd, SATAC_PACKET);
6327 6326
6328 6327 /* Give the host ctlr time to do its thing, according to ATA/ATAPI */
6329 6328 NV_DELAY_NSEC(400);
6330 6329
6331 6330 /*
6332 6331 * Wait for the device to indicate that it is ready for the command
6333 6332 * ATAPI protocol state - HP0: Check_Status_A
6334 6333 */
6335 6334
6336 6335 if (nv_wait3(nvp, SATA_STATUS_DRQ, SATA_STATUS_BSY, /* okay */
6337 6336 SATA_STATUS_ERR, SATA_STATUS_BSY, /* cmd failed */
6338 6337 SATA_STATUS_DF, SATA_STATUS_BSY, /* drive failed */
6339 6338 4000000, 0) == B_FALSE) {
6340 6339 if (nv_get8(cmdhdl, nvp->nvp_status) &
6341 6340 (SATA_STATUS_ERR | SATA_STATUS_DF)) {
6342 6341 spkt->satapkt_reason = SATA_PKT_DEV_ERROR;
6343 6342 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6344 6343 "nv_start_rqsense_pio: rqsense dev error (HP0)",
6345 6344 NULL);
6346 6345 } else {
6347 6346 spkt->satapkt_reason = SATA_PKT_TIMEOUT;
6348 6347 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6349 6348 "nv_start_rqsense_pio: rqsense timeout (HP0)",
6350 6349 NULL);
6351 6350 }
6352 6351
6353 6352 nv_copy_registers(nvp, &spkt->satapkt_device, spkt);
6354 6353 nv_complete_io(nvp, spkt, 0);
6355 6354 nv_reset(nvp, "rqsense_pio");
6356 6355
6357 6356 return (NV_FAILURE);
6358 6357 }
6359 6358
6360 6359 /*
6361 6360 * Put the ATAPI command in the data register
6362 6361 * ATAPI protocol state - HP1: Send_Packet
6363 6362 */
6364 6363
6365 6364 ddi_rep_put16(cmdhdl, (ushort_t *)nv_rqsense_cdb,
6366 6365 (ushort_t *)nvp->nvp_data,
6367 6366 (cdb_len >> 1), DDI_DEV_NO_AUTOINCR);
6368 6367
6369 6368 NVLOG(NVDBG_ATAPI, nvp->nvp_ctlp, nvp,
6370 6369 "nv_start_rqsense_pio: exiting into HP3", NULL);
6371 6370
6372 6371 return (NV_SUCCESS);
6373 6372 }
6374 6373
6375 6374 /*
6376 6375 * quiesce(9E) entry point.
6377 6376 *
6378 6377 * This function is called when the system is single-threaded at high
6379 6378 * PIL with preemption disabled. Therefore, this function must not be
6380 6379 * blocked.
6381 6380 *
6382 6381 * This function returns DDI_SUCCESS on success, or DDI_FAILURE on failure.
6383 6382 * DDI_FAILURE indicates an error condition and should almost never happen.
6384 6383 */
6385 6384 static int
6386 6385 nv_quiesce(dev_info_t *dip)
6387 6386 {
6388 6387 int port, instance = ddi_get_instance(dip);
6389 6388 nv_ctl_t *nvc;
6390 6389
6391 6390 if ((nvc = (nv_ctl_t *)ddi_get_soft_state(nv_statep, instance)) == NULL)
6392 6391 return (DDI_FAILURE);
6393 6392
6394 6393 for (port = 0; port < NV_MAX_PORTS(nvc); port++) {
6395 6394 nv_port_t *nvp = &(nvc->nvc_port[port]);
6396 6395 ddi_acc_handle_t cmdhdl = nvp->nvp_cmd_hdl;
6397 6396 ddi_acc_handle_t bar5_hdl = nvp->nvp_ctlp->nvc_bar_hdl[5];
6398 6397 uint32_t sctrl;
6399 6398
6400 6399 /*
6401 6400 * Stop the controllers from generating interrupts.
6402 6401 */
6403 6402 (*(nvc->nvc_set_intr))(nvp, NV_INTR_DISABLE_NON_BLOCKING);
6404 6403
6405 6404 /*
6406 6405 * clear signature registers
6407 6406 */
6408 6407 nv_put8(cmdhdl, nvp->nvp_sect, 0);
6409 6408 nv_put8(cmdhdl, nvp->nvp_lcyl, 0);
6410 6409 nv_put8(cmdhdl, nvp->nvp_hcyl, 0);
6411 6410 nv_put8(cmdhdl, nvp->nvp_count, 0);
6412 6411
6413 6412 nvp->nvp_signature = NV_NO_SIG;
6414 6413 nvp->nvp_type = SATA_DTYPE_NONE;
6415 6414 nvp->nvp_state |= NV_RESET;
6416 6415 nvp->nvp_reset_time = ddi_get_lbolt();
6417 6416
6418 6417 /*
6419 6418 * assert reset in PHY by writing a 1 to bit 0 scontrol
6420 6419 */
6421 6420 sctrl = nv_get32(bar5_hdl, nvp->nvp_sctrl);
6422 6421
6423 6422 nv_put32(bar5_hdl, nvp->nvp_sctrl,
6424 6423 sctrl | SCONTROL_DET_COMRESET);
6425 6424
6426 6425 /*
6427 6426 * wait 1ms
6428 6427 */
6429 6428 drv_usecwait(1000);
6430 6429
6431 6430 /*
6432 6431 * de-assert reset in PHY
6433 6432 */
6434 6433 nv_put32(bar5_hdl, nvp->nvp_sctrl, sctrl);
6435 6434 }
6436 6435
6437 6436 return (DDI_SUCCESS);
6438 6437 }
6439 6438
6440 6439
6441 6440 #ifdef SGPIO_SUPPORT
6442 6441 /*
6443 6442 * NVIDIA specific SGPIO LED support
6444 6443 * Please refer to the NVIDIA documentation for additional details
6445 6444 */
6446 6445
6447 6446 /*
6448 6447 * nv_sgp_led_init
6449 6448 * Detect SGPIO support. If present, initialize.
6450 6449 */
6451 6450 static void
6452 6451 nv_sgp_led_init(nv_ctl_t *nvc, ddi_acc_handle_t pci_conf_handle)
6453 6452 {
6454 6453 uint16_t csrp; /* SGPIO_CSRP from PCI config space */
6455 6454 uint32_t cbp; /* SGPIO_CBP from PCI config space */
6456 6455 nv_sgp_cmn_t *cmn; /* shared data structure */
6457 6456 int i;
6458 6457 char tqname[SGPIO_TQ_NAME_LEN];
6459 6458 extern caddr_t psm_map_phys_new(paddr_t, size_t, int);
6460 6459
6461 6460 /*
6462 6461 * Initialize with appropriately invalid values in case this function
6463 6462 * exits without initializing SGPIO (for example, there is no SGPIO
6464 6463 * support).
6465 6464 */
6466 6465 nvc->nvc_sgp_csr = 0;
6467 6466 nvc->nvc_sgp_cbp = NULL;
6468 6467 nvc->nvc_sgp_cmn = NULL;
6469 6468
6470 6469 /*
6471 6470 * Only try to initialize SGPIO LED support if this property
6472 6471 * indicates it should be.
6473 6472 */
6474 6473 if (ddi_getprop(DDI_DEV_T_ANY, nvc->nvc_dip, DDI_PROP_DONTPASS,
6475 6474 "enable-sgpio-leds", 0) != 1)
6476 6475 return;
6477 6476
6478 6477 /*
6479 6478 * CK804 can pass the sgpio_detect test even though it does not support
6480 6479 * SGPIO, so don't even look at a CK804.
6481 6480 */
6482 6481 if (nvc->nvc_mcp5x_flag != B_TRUE)
6483 6482 return;
6484 6483
6485 6484 /*
6486 6485 * The NVIDIA SGPIO support can nominally handle 6 drives.
6487 6486 * However, the current implementation only supports 4 drives.
6488 6487 * With two drives per controller, that means only look at the
6489 6488 * first two controllers.
6490 6489 */
6491 6490 if ((nvc->nvc_ctlr_num != 0) && (nvc->nvc_ctlr_num != 1))
6492 6491 return;
6493 6492
6494 6493 /* confirm that the SGPIO registers are there */
6495 6494 if (nv_sgp_detect(pci_conf_handle, &csrp, &cbp) != NV_SUCCESS) {
6496 6495 NVLOG(NVDBG_INIT, nvc, NULL,
6497 6496 "SGPIO registers not detected", NULL);
6498 6497 return;
6499 6498 }
6500 6499
6501 6500 /* save off the SGPIO_CSR I/O address */
6502 6501 nvc->nvc_sgp_csr = csrp;
6503 6502
6504 6503 /* map in Control Block */
6505 6504 nvc->nvc_sgp_cbp = (nv_sgp_cb_t *)psm_map_phys_new(cbp,
6506 6505 sizeof (nv_sgp_cb_t), PROT_READ | PROT_WRITE);
6507 6506
6508 6507 /* initialize the SGPIO h/w */
6509 6508 if (nv_sgp_init(nvc) == NV_FAILURE) {
6510 6509 nv_cmn_err(CE_WARN, nvc, NULL,
6511 6510 "Unable to initialize SGPIO");
6512 6511 }
6513 6512
6514 6513 /*
6515 6514 * Initialize the shared space for this instance. This could
6516 6515 * involve allocating the space, saving a pointer to the space
6517 6516 * and starting the taskq that actually turns the LEDs on and off.
6518 6517 * Or, it could involve just getting the pointer to the already
6519 6518 * allocated space.
6520 6519 */
6521 6520
6522 6521 mutex_enter(&nv_sgp_c2c_mutex);
6523 6522
6524 6523 /* try and find our CBP in the mapping table */
6525 6524 cmn = NULL;
6526 6525 for (i = 0; i < NV_MAX_CBPS; i++) {
6527 6526 if (nv_sgp_cbp2cmn[i].c2cm_cbp == cbp) {
6528 6527 cmn = nv_sgp_cbp2cmn[i].c2cm_cmn;
6529 6528 break;
6530 6529 }
6531 6530
6532 6531 if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
6533 6532 break;
6534 6533 }
6535 6534
6536 6535 if (i >= NV_MAX_CBPS) {
6537 6536 /*
6538 6537 * CBP to shared space mapping table is full
6539 6538 */
6540 6539 nvc->nvc_sgp_cmn = NULL;
6541 6540 nv_cmn_err(CE_WARN, nvc, NULL,
6542 6541 "LED handling not initialized - too many controllers");
6543 6542 } else if (cmn == NULL) {
6544 6543 /*
6545 6544 * Allocate the shared space, point the SGPIO scratch register
6546 6545 * at it and start the led update taskq.
6547 6546 */
6548 6547
6549 6548 /* allocate shared space */
6550 6549 cmn = (nv_sgp_cmn_t *)kmem_zalloc(sizeof (nv_sgp_cmn_t),
6551 6550 KM_SLEEP);
6552 6551 if (cmn == NULL) {
6553 6552 nv_cmn_err(CE_WARN, nvc, NULL,
6554 6553 "Failed to allocate shared data");
6555 6554 return;
6556 6555 }
6557 6556
6558 6557 nvc->nvc_sgp_cmn = cmn;
6559 6558
6560 6559 /* initialize the shared data structure */
6561 6560 cmn->nvs_in_use = (1 << nvc->nvc_ctlr_num);
6562 6561 cmn->nvs_connected = 0;
6563 6562 cmn->nvs_activity = 0;
6564 6563 cmn->nvs_cbp = cbp;
6565 6564
6566 6565 mutex_init(&cmn->nvs_slock, NULL, MUTEX_DRIVER, NULL);
6567 6566 mutex_init(&cmn->nvs_tlock, NULL, MUTEX_DRIVER, NULL);
6568 6567 cv_init(&cmn->nvs_cv, NULL, CV_DRIVER, NULL);
6569 6568
6570 6569 /* put the address in the SGPIO scratch register */
6571 6570 #if defined(__amd64)
6572 6571 nvc->nvc_sgp_cbp->sgpio_sr = (uint64_t)cmn;
6573 6572 #else
6574 6573 nvc->nvc_sgp_cbp->sgpio_sr = (uint32_t)cmn;
6575 6574 #endif
6576 6575
6577 6576 /* add an entry to the cbp to cmn mapping table */
6578 6577
6579 6578 /* i should be the next available table position */
6580 6579 nv_sgp_cbp2cmn[i].c2cm_cbp = cbp;
6581 6580 nv_sgp_cbp2cmn[i].c2cm_cmn = cmn;
6582 6581
6583 6582 /* start the activity LED taskq */
6584 6583
6585 6584 /*
6586 6585 * The taskq name should be unique and the time
6587 6586 */
6588 6587 (void) snprintf(tqname, SGPIO_TQ_NAME_LEN,
6589 6588 "nvSataLed%x", (short)(ddi_get_lbolt() & 0xffff));
6590 6589 cmn->nvs_taskq = ddi_taskq_create(nvc->nvc_dip, tqname, 1,
6591 6590 TASKQ_DEFAULTPRI, 0);
6592 6591 if (cmn->nvs_taskq == NULL) {
6593 6592 cmn->nvs_taskq_delay = 0;
6594 6593 nv_cmn_err(CE_WARN, nvc, NULL,
6595 6594 "Failed to start activity LED taskq");
6596 6595 } else {
6597 6596 cmn->nvs_taskq_delay = SGPIO_LOOP_WAIT_USECS;
6598 6597 (void) ddi_taskq_dispatch(cmn->nvs_taskq,
6599 6598 nv_sgp_activity_led_ctl, nvc, DDI_SLEEP);
6600 6599 }
6601 6600 } else {
6602 6601 nvc->nvc_sgp_cmn = cmn;
6603 6602 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6604 6603 }
6605 6604
6606 6605 mutex_exit(&nv_sgp_c2c_mutex);
6607 6606 }
6608 6607
6609 6608 /*
6610 6609 * nv_sgp_detect
6611 6610 * Read the SGPIO_CSR and SGPIO_CBP values from PCI config space and
6612 6611 * report back whether both were readable.
6613 6612 */
6614 6613 static int
6615 6614 nv_sgp_detect(ddi_acc_handle_t pci_conf_handle, uint16_t *csrpp,
6616 6615 uint32_t *cbpp)
6617 6616 {
6618 6617 /* get the SGPIO_CSRP */
6619 6618 *csrpp = pci_config_get16(pci_conf_handle, SGPIO_CSRP);
6620 6619 if (*csrpp == 0) {
6621 6620 return (NV_FAILURE);
6622 6621 }
6623 6622
6624 6623 /* SGPIO_CSRP is good, get the SGPIO_CBP */
6625 6624 *cbpp = pci_config_get32(pci_conf_handle, SGPIO_CBP);
6626 6625 if (*cbpp == 0) {
6627 6626 return (NV_FAILURE);
6628 6627 }
6629 6628
6630 6629 /* SGPIO_CBP is good, so we must support SGPIO */
6631 6630 return (NV_SUCCESS);
6632 6631 }
6633 6632
6634 6633 /*
6635 6634 * nv_sgp_init
6636 6635 * Initialize SGPIO.
6637 6636 * The initialization process is described by NVIDIA, but the hardware does
6638 6637 * not always behave as documented, so several steps have been changed and/or
6639 6638 * omitted.
6640 6639 */
6641 6640 static int
6642 6641 nv_sgp_init(nv_ctl_t *nvc)
6643 6642 {
6644 6643 int seq;
6645 6644 int rval = NV_SUCCESS;
6646 6645 hrtime_t start, end;
6647 6646 uint32_t cmd;
6648 6647 uint32_t status;
6649 6648 int drive_count;
6650 6649
6651 6650 status = nv_sgp_csr_read(nvc);
6652 6651 if (SGPIO_CSR_SSTAT(status) == SGPIO_STATE_RESET) {
6653 6652 /* SGPIO logic is in reset state and requires initialization */
6654 6653
6655 6654 /* noting the Sequence field value */
6656 6655 seq = SGPIO_CSR_SEQ(status);
6657 6656
6658 6657 /* issue SGPIO_CMD_READ_PARAMS command */
6659 6658 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_READ_PARAMS);
6660 6659 nv_sgp_csr_write(nvc, cmd);
6661 6660
6662 6661 DTRACE_PROBE2(sgpio__cmd, int, cmd, int, status);
6663 6662
6664 6663 /* poll for command completion */
6665 6664 start = gethrtime();
6666 6665 end = start + NV_SGP_CMD_TIMEOUT;
6667 6666 for (;;) {
6668 6667 status = nv_sgp_csr_read(nvc);
6669 6668
6670 6669 /* break on error */
6671 6670 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR) {
6672 6671 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6673 6672 "Command error during initialization",
6674 6673 NULL);
6675 6674 rval = NV_FAILURE;
6676 6675 break;
6677 6676 }
6678 6677
6679 6678 /* command processing is taking place */
6680 6679 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK) {
6681 6680 if (SGPIO_CSR_SEQ(status) != seq) {
6682 6681 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6683 6682 "Sequence number change error",
6684 6683 NULL);
6685 6684 }
6686 6685
6687 6686 break;
6688 6687 }
6689 6688
6690 6689 /* if completion not detected in 2000ms ... */
6691 6690
6692 6691 if (gethrtime() > end)
6693 6692 break;
6694 6693
6695 6694 /* wait 400 ns before checking again */
6696 6695 NV_DELAY_NSEC(400);
6697 6696 }
6698 6697 }
6699 6698
6700 6699 if (rval == NV_FAILURE)
6701 6700 return (rval);
6702 6701
6703 6702 if (SGPIO_CSR_SSTAT(status) != SGPIO_STATE_OPERATIONAL) {
6704 6703 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6705 6704 "SGPIO logic not operational after init - state %d",
6706 6705 SGPIO_CSR_SSTAT(status));
6707 6706 /*
6708 6707 * Should return (NV_FAILURE) but the hardware can be
6709 6708 * operational even if the SGPIO Status does not indicate
6710 6709 * this.
6711 6710 */
6712 6711 }
6713 6712
6714 6713 /*
6715 6714 * NVIDIA recommends reading the supported drive count even
6716 6715 * though they also indicate that it is always 4 at this time.
6717 6716 */
6718 6717 drive_count = SGP_CR0_DRV_CNT(nvc->nvc_sgp_cbp->sgpio_cr0);
6719 6718 if (drive_count != SGPIO_DRV_CNT_VALUE) {
6720 6719 NVLOG(NVDBG_INIT, nvc, NULL,
6721 6720 "SGPIO reported undocumented drive count - %d",
6722 6721 drive_count);
6723 6722 }
6724 6723
6725 6724 NVLOG(NVDBG_INIT, nvc, NULL,
6726 6725 "initialized ctlr: %d csr: 0x%08x",
6727 6726 nvc->nvc_ctlr_num, nvc->nvc_sgp_csr);
6728 6727
6729 6728 return (rval);
6730 6729 }
6731 6730
6732 6731 static int
6733 6732 nv_sgp_check_set_cmn(nv_ctl_t *nvc)
6734 6733 {
6735 6734 nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
6736 6735
6737 6736 if (cmn == NULL)
6738 6737 return (NV_FAILURE);
6739 6738
6740 6739 mutex_enter(&cmn->nvs_slock);
6741 6740 cmn->nvs_in_use |= (1 << nvc->nvc_ctlr_num);
6742 6741 mutex_exit(&cmn->nvs_slock);
6743 6742
6744 6743 return (NV_SUCCESS);
6745 6744 }
6746 6745
6747 6746 /*
6748 6747 * nv_sgp_csr_read
6749 6748 * This is just a 32-bit port read from the value that was obtained from the
6750 6749 * PCI config space.
6751 6750 *
6752 6751 * XXX It was advised to use the in[bwl] function for this, even though they
6753 6752 * are obsolete interfaces.
6754 6753 */
6755 6754 static int
6756 6755 nv_sgp_csr_read(nv_ctl_t *nvc)
6757 6756 {
6758 6757 return (inl(nvc->nvc_sgp_csr));
6759 6758 }
6760 6759
6761 6760 /*
6762 6761 * nv_sgp_csr_write
6763 6762 * This is just a 32-bit I/O port write. The port number was obtained from
6764 6763 * the PCI config space.
6765 6764 *
6766 6765 * XXX It was advised to use the out[bwl] function for this, even though they
6767 6766 * are obsolete interfaces.
6768 6767 */
6769 6768 static void
6770 6769 nv_sgp_csr_write(nv_ctl_t *nvc, uint32_t val)
6771 6770 {
6772 6771 outl(nvc->nvc_sgp_csr, val);
6773 6772 }
6774 6773
6775 6774 /*
6776 6775 * nv_sgp_write_data
6777 6776 * Cause SGPIO to send Control Block data
6778 6777 */
6779 6778 static int
6780 6779 nv_sgp_write_data(nv_ctl_t *nvc)
6781 6780 {
6782 6781 hrtime_t start, end;
6783 6782 uint32_t status;
6784 6783 uint32_t cmd;
6785 6784
6786 6785 /* issue command */
6787 6786 cmd = SGPIO_CSR_CMD_SET(SGPIO_CMD_WRITE_DATA);
6788 6787 nv_sgp_csr_write(nvc, cmd);
6789 6788
6790 6789 /* poll for completion */
6791 6790 start = gethrtime();
6792 6791 end = start + NV_SGP_CMD_TIMEOUT;
6793 6792 for (;;) {
6794 6793 status = nv_sgp_csr_read(nvc);
6795 6794
6796 6795 /* break on error completion */
6797 6796 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_ERROR)
6798 6797 break;
6799 6798
6800 6799 /* break on successful completion */
6801 6800 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6802 6801 break;
6803 6802
6804 6803 /* Wait 400 ns and try again */
6805 6804 NV_DELAY_NSEC(400);
6806 6805
6807 6806 if (gethrtime() > end)
6808 6807 break;
6809 6808 }
6810 6809
6811 6810 if (SGPIO_CSR_CSTAT(status) == SGPIO_CMD_OK)
6812 6811 return (NV_SUCCESS);
6813 6812
6814 6813 return (NV_FAILURE);
6815 6814 }
6816 6815
6817 6816 /*
6818 6817 * nv_sgp_activity_led_ctl
6819 6818 * This is run as a taskq. It wakes up at a fixed interval and checks to
6820 6819 * see if any of the activity LEDs need to be changed.
6821 6820 */
6822 6821 static void
6823 6822 nv_sgp_activity_led_ctl(void *arg)
6824 6823 {
6825 6824 nv_ctl_t *nvc = (nv_ctl_t *)arg;
6826 6825 nv_sgp_cmn_t *cmn;
6827 6826 volatile nv_sgp_cb_t *cbp;
6828 6827 clock_t ticks;
6829 6828 uint8_t drv_leds;
6830 6829 uint32_t old_leds;
6831 6830 uint32_t new_led_state;
6832 6831 int i;
6833 6832
6834 6833 cmn = nvc->nvc_sgp_cmn;
6835 6834 cbp = nvc->nvc_sgp_cbp;
6836 6835
6837 6836 do {
6838 6837 /* save off the old state of all of the LEDs */
6839 6838 old_leds = cbp->sgpio0_tr;
6840 6839
6841 6840 DTRACE_PROBE3(sgpio__activity__state,
6842 6841 int, cmn->nvs_connected, int, cmn->nvs_activity,
6843 6842 int, old_leds);
6844 6843
6845 6844 new_led_state = 0;
6846 6845
6847 6846 /* for each drive */
6848 6847 for (i = 0; i < SGPIO_DRV_CNT_VALUE; i++) {
6849 6848
6850 6849 /* get the current state of the LEDs for the drive */
6851 6850 drv_leds = SGPIO0_TR_DRV(old_leds, i);
6852 6851
6853 6852 if ((cmn->nvs_connected & (1 << i)) == 0) {
6854 6853 /* if not connected, turn off activity */
6855 6854 drv_leds &= ~TR_ACTIVE_MASK;
6856 6855 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6857 6856
6858 6857 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6859 6858 new_led_state |=
6860 6859 SGPIO0_TR_DRV_SET(drv_leds, i);
6861 6860
6862 6861 continue;
6863 6862 }
6864 6863
6865 6864 if ((cmn->nvs_activity & (1 << i)) == 0) {
6866 6865 /* connected, but not active */
6867 6866 drv_leds &= ~TR_ACTIVE_MASK;
6868 6867 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6869 6868
6870 6869 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6871 6870 new_led_state |=
6872 6871 SGPIO0_TR_DRV_SET(drv_leds, i);
6873 6872
6874 6873 continue;
6875 6874 }
6876 6875
6877 6876 /* connected and active */
6878 6877 if (TR_ACTIVE(drv_leds) == TR_ACTIVE_ENABLE) {
6879 6878 /* was enabled, so disable */
6880 6879 drv_leds &= ~TR_ACTIVE_MASK;
6881 6880 drv_leds |=
6882 6881 TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
6883 6882
6884 6883 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6885 6884 new_led_state |=
6886 6885 SGPIO0_TR_DRV_SET(drv_leds, i);
6887 6886 } else {
6888 6887 /* was disabled, so enable */
6889 6888 drv_leds &= ~TR_ACTIVE_MASK;
6890 6889 drv_leds |= TR_ACTIVE_SET(TR_ACTIVE_ENABLE);
6891 6890
6892 6891 new_led_state &= SGPIO0_TR_DRV_CLR(i);
6893 6892 new_led_state |=
6894 6893 SGPIO0_TR_DRV_SET(drv_leds, i);
6895 6894 }
6896 6895
6897 6896 /*
6898 6897 * clear the activity bit
6899 6898 * if there is drive activity again within the
6900 6899 * loop interval (now 1/16 second), nvs_activity
6901 6900 * will be reset and the "connected and active"
6902 6901 * condition above will cause the LED to blink
6903 6902 * off and on at the loop interval rate. The
6904 6903 * rate may be increased (interval shortened) as
6905 6904 * long as it is not more than 1/30 second.
6906 6905 */
6907 6906 mutex_enter(&cmn->nvs_slock);
6908 6907 cmn->nvs_activity &= ~(1 << i);
6909 6908 mutex_exit(&cmn->nvs_slock);
6910 6909 }
6911 6910
6912 6911 DTRACE_PROBE1(sgpio__new__led__state, int, new_led_state);
6913 6912
6914 6913 /* write out LED values */
6915 6914
6916 6915 mutex_enter(&cmn->nvs_slock);
6917 6916 cbp->sgpio0_tr &= ~TR_ACTIVE_MASK_ALL;
6918 6917 cbp->sgpio0_tr |= new_led_state;
6919 6918 cbp->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
6920 6919 mutex_exit(&cmn->nvs_slock);
6921 6920
6922 6921 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
6923 6922 NVLOG(NVDBG_VERBOSE, nvc, NULL,
6924 6923 "nv_sgp_write_data failure updating active LED",
6925 6924 NULL);
6926 6925 }
6927 6926
6928 6927 /* now rest for the interval */
6929 6928 mutex_enter(&cmn->nvs_tlock);
6930 6929 ticks = drv_usectohz(cmn->nvs_taskq_delay);
6931 6930 if (ticks > 0)
6932 6931 (void) cv_reltimedwait(&cmn->nvs_cv, &cmn->nvs_tlock,
6933 6932 ticks, TR_CLOCK_TICK);
6934 6933 mutex_exit(&cmn->nvs_tlock);
6935 6934 } while (ticks > 0);
6936 6935 }
6937 6936
6938 6937 /*
6939 6938 * nv_sgp_drive_connect
6940 6939 * Set the flag used to indicate that the drive is attached to the HBA.
6941 6940 * Used to let the taskq know that it should turn the Activity LED on.
6942 6941 */
6943 6942 static void
6944 6943 nv_sgp_drive_connect(nv_ctl_t *nvc, int drive)
6945 6944 {
6946 6945 nv_sgp_cmn_t *cmn;
6947 6946
6948 6947 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6949 6948 return;
6950 6949 cmn = nvc->nvc_sgp_cmn;
6951 6950
6952 6951 mutex_enter(&cmn->nvs_slock);
6953 6952 cmn->nvs_connected |= (1 << drive);
6954 6953 mutex_exit(&cmn->nvs_slock);
6955 6954 }
6956 6955
6957 6956 /*
6958 6957 * nv_sgp_drive_disconnect
6959 6958 * Clears the flag used to indicate that the drive is no longer attached
6960 6959 * to the HBA. Used to let the taskq know that it should turn the
6961 6960 * Activity LED off. The flag that indicates that the drive is in use is
6962 6961 * also cleared.
6963 6962 */
6964 6963 static void
6965 6964 nv_sgp_drive_disconnect(nv_ctl_t *nvc, int drive)
6966 6965 {
6967 6966 nv_sgp_cmn_t *cmn;
6968 6967
6969 6968 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6970 6969 return;
6971 6970 cmn = nvc->nvc_sgp_cmn;
6972 6971
6973 6972 mutex_enter(&cmn->nvs_slock);
6974 6973 cmn->nvs_connected &= ~(1 << drive);
6975 6974 cmn->nvs_activity &= ~(1 << drive);
6976 6975 mutex_exit(&cmn->nvs_slock);
6977 6976 }
6978 6977
6979 6978 /*
6980 6979 * nv_sgp_drive_active
6981 6980 * Sets the flag used to indicate that the drive has been accessed and the
6982 6981 * LED should be flicked off, then on. It is cleared at a fixed time
6983 6982 * interval by the LED taskq and set by the sata command start.
6984 6983 */
6985 6984 static void
6986 6985 nv_sgp_drive_active(nv_ctl_t *nvc, int drive)
6987 6986 {
6988 6987 nv_sgp_cmn_t *cmn;
6989 6988
6990 6989 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
6991 6990 return;
6992 6991 cmn = nvc->nvc_sgp_cmn;
6993 6992
6994 6993 DTRACE_PROBE1(sgpio__active, int, drive);
6995 6994
6996 6995 mutex_enter(&cmn->nvs_slock);
6997 6996 cmn->nvs_activity |= (1 << drive);
6998 6997 mutex_exit(&cmn->nvs_slock);
6999 6998 }
7000 6999
7001 7000
7002 7001 /*
7003 7002 * nv_sgp_locate
7004 7003 * Turns the Locate/OK2RM LED off or on for a particular drive. State is
7005 7004 * maintained in the SGPIO Control Block.
7006 7005 */
7007 7006 static void
7008 7007 nv_sgp_locate(nv_ctl_t *nvc, int drive, int value)
7009 7008 {
7010 7009 uint8_t leds;
7011 7010 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7012 7011 nv_sgp_cmn_t *cmn;
7013 7012
7014 7013 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7015 7014 return;
7016 7015 cmn = nvc->nvc_sgp_cmn;
7017 7016
7018 7017 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7019 7018 return;
7020 7019
7021 7020 DTRACE_PROBE2(sgpio__locate, int, drive, int, value);
7022 7021
7023 7022 mutex_enter(&cmn->nvs_slock);
7024 7023
7025 7024 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7026 7025
7027 7026 leds &= ~TR_LOCATE_MASK;
7028 7027 leds |= TR_LOCATE_SET(value);
7029 7028
7030 7029 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7031 7030 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7032 7031
7033 7032 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7034 7033
7035 7034 mutex_exit(&cmn->nvs_slock);
7036 7035
7037 7036 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7038 7037 nv_cmn_err(CE_WARN, nvc, NULL,
7039 7038 "nv_sgp_write_data failure updating OK2RM/Locate LED");
7040 7039 }
7041 7040 }
7042 7041
7043 7042 /*
7044 7043 * nv_sgp_error
7045 7044 * Turns the Error/Failure LED off or on for a particular drive. State is
7046 7045 * maintained in the SGPIO Control Block.
7047 7046 */
7048 7047 static void
7049 7048 nv_sgp_error(nv_ctl_t *nvc, int drive, int value)
7050 7049 {
7051 7050 uint8_t leds;
7052 7051 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7053 7052 nv_sgp_cmn_t *cmn;
7054 7053
7055 7054 if (nv_sgp_check_set_cmn(nvc) == NV_FAILURE)
7056 7055 return;
7057 7056 cmn = nvc->nvc_sgp_cmn;
7058 7057
7059 7058 if ((drive < 0) || (drive >= SGPIO_DRV_CNT_VALUE))
7060 7059 return;
7061 7060
7062 7061 DTRACE_PROBE2(sgpio__error, int, drive, int, value);
7063 7062
7064 7063 mutex_enter(&cmn->nvs_slock);
7065 7064
7066 7065 leds = SGPIO0_TR_DRV(cb->sgpio0_tr, drive);
7067 7066
7068 7067 leds &= ~TR_ERROR_MASK;
7069 7068 leds |= TR_ERROR_SET(value);
7070 7069
7071 7070 cb->sgpio0_tr &= SGPIO0_TR_DRV_CLR(drive);
7072 7071 cb->sgpio0_tr |= SGPIO0_TR_DRV_SET(leds, drive);
7073 7072
7074 7073 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7075 7074
7076 7075 mutex_exit(&cmn->nvs_slock);
7077 7076
7078 7077 if (nv_sgp_write_data(nvc) == NV_FAILURE) {
7079 7078 nv_cmn_err(CE_WARN, nvc, NULL,
7080 7079 "nv_sgp_write_data failure updating Fail/Error LED");
7081 7080 }
7082 7081 }
7083 7082
7084 7083 static void
7085 7084 nv_sgp_cleanup(nv_ctl_t *nvc)
7086 7085 {
7087 7086 int drive, i;
7088 7087 uint8_t drv_leds;
7089 7088 uint32_t led_state;
7090 7089 volatile nv_sgp_cb_t *cb = nvc->nvc_sgp_cbp;
7091 7090 nv_sgp_cmn_t *cmn = nvc->nvc_sgp_cmn;
7092 7091 extern void psm_unmap_phys(caddr_t, size_t);
7093 7092
7094 7093 /*
7095 7094 * If the SGPIO Control Block isn't mapped or the shared data
7096 7095 * structure isn't present in this instance, there isn't much that
7097 7096 * can be cleaned up.
7098 7097 */
7099 7098 if ((cb == NULL) || (cmn == NULL))
7100 7099 return;
7101 7100
7102 7101 /* turn off activity LEDs for this controller */
7103 7102 drv_leds = TR_ACTIVE_SET(TR_ACTIVE_DISABLE);
7104 7103
7105 7104 /* get the existing LED state */
7106 7105 led_state = cb->sgpio0_tr;
7107 7106
7108 7107 /* turn off port 0 */
7109 7108 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 0);
7110 7109 led_state &= SGPIO0_TR_DRV_CLR(drive);
7111 7110 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7112 7111
7113 7112 /* turn off port 1 */
7114 7113 drive = SGP_CTLR_PORT_TO_DRV(nvc->nvc_ctlr_num, 1);
7115 7114 led_state &= SGPIO0_TR_DRV_CLR(drive);
7116 7115 led_state |= SGPIO0_TR_DRV_SET(drv_leds, drive);
7117 7116
7118 7117 /* set the new led state, which should turn off this ctrl's LEDs */
7119 7118 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7120 7119 (void) nv_sgp_write_data(nvc);
7121 7120
7122 7121 /* clear the controller's in use bit */
7123 7122 mutex_enter(&cmn->nvs_slock);
7124 7123 cmn->nvs_in_use &= ~(1 << nvc->nvc_ctlr_num);
7125 7124 mutex_exit(&cmn->nvs_slock);
7126 7125
7127 7126 if (cmn->nvs_in_use == 0) {
7128 7127 /* if all "in use" bits cleared, take everything down */
7129 7128
7130 7129 if (cmn->nvs_taskq != NULL) {
7131 7130 /* allow activity taskq to exit */
7132 7131 cmn->nvs_taskq_delay = 0;
7133 7132 cv_broadcast(&cmn->nvs_cv);
7134 7133
7135 7134 /* then destroy it */
7136 7135 ddi_taskq_destroy(cmn->nvs_taskq);
7137 7136 }
7138 7137
7139 7138 /* turn off all of the LEDs */
7140 7139 cb->sgpio0_tr = 0;
7141 7140 cb->sgpio_cr0 = SGP_CR0_ENABLE_MASK;
7142 7141 (void) nv_sgp_write_data(nvc);
7143 7142
7144 7143 cb->sgpio_sr = NULL;
7145 7144
7146 7145 /* zero out the CBP to cmn mapping */
7147 7146 for (i = 0; i < NV_MAX_CBPS; i++) {
7148 7147 if (nv_sgp_cbp2cmn[i].c2cm_cbp == cmn->nvs_cbp) {
7149 7148 nv_sgp_cbp2cmn[i].c2cm_cmn = NULL;
7150 7149 break;
7151 7150 }
7152 7151
7153 7152 if (nv_sgp_cbp2cmn[i].c2cm_cbp == 0)
7154 7153 break;
7155 7154 }
7156 7155
7157 7156 /* free resources */
7158 7157 cv_destroy(&cmn->nvs_cv);
7159 7158 mutex_destroy(&cmn->nvs_tlock);
7160 7159 mutex_destroy(&cmn->nvs_slock);
7161 7160
7162 7161 kmem_free(nvc->nvc_sgp_cmn, sizeof (nv_sgp_cmn_t));
7163 7162 }
7164 7163
7165 7164 nvc->nvc_sgp_cmn = NULL;
7166 7165
7167 7166 /* unmap the SGPIO Control Block */
7168 7167 psm_unmap_phys((caddr_t)nvc->nvc_sgp_cbp, sizeof (nv_sgp_cb_t));
7169 7168 }
7170 7169 #endif /* SGPIO_SUPPORT */
↓ open down ↓ |
6837 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX